isci: uplevel register hardware data structures and unsolicited frame handling
[deliverable/linux.git] / drivers / scsi / isci / host.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55 #include <linux/device.h>
56 #include <scsi/sas.h>
57 #include "host.h"
58 #include "isci.h"
59 #include "port.h"
60 #include "host.h"
61 #include "probe_roms.h"
62 #include "remote_device.h"
63 #include "request.h"
64 #include "scic_io_request.h"
65 #include "scic_sds_port_configuration_agent.h"
66 #include "sci_util.h"
67 #include "scu_completion_codes.h"
68 #include "scu_event_codes.h"
69 #include "registers.h"
70 #include "scu_remote_node_context.h"
71 #include "scu_task_context.h"
72 #include "scu_unsolicited_frame.h"
73 #include "timers.h"
74
75 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
76
77 /**
78 * smu_dcc_get_max_ports() -
79 *
80 * This macro returns the maximum number of logical ports supported by the
81 * hardware. The caller passes in the value read from the device context
82 * capacity register and this macro will mash and shift the value appropriately.
83 */
84 #define smu_dcc_get_max_ports(dcc_value) \
85 (\
86 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
87 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
88 )
89
90 /**
91 * smu_dcc_get_max_task_context() -
92 *
93 * This macro returns the maximum number of task contexts supported by the
94 * hardware. The caller passes in the value read from the device context
95 * capacity register and this macro will mash and shift the value appropriately.
96 */
97 #define smu_dcc_get_max_task_context(dcc_value) \
98 (\
99 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
100 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
101 )
102
103 /**
104 * smu_dcc_get_max_remote_node_context() -
105 *
106 * This macro returns the maximum number of remote node contexts supported by
107 * the hardware. The caller passes in the value read from the device context
108 * capacity register and this macro will mash and shift the value appropriately.
109 */
110 #define smu_dcc_get_max_remote_node_context(dcc_value) \
111 (\
112 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
113 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
114 )
115
116
117 #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT 3
118 #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT 3
119
120 /**
121 *
122 *
123 * The number of milliseconds to wait for a phy to start.
124 */
125 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
126
127 /**
128 *
129 *
130 * The number of milliseconds to wait while a given phy is consuming power
131 * before allowing another set of phys to consume power. Ultimately, this will
132 * be specified by OEM parameter.
133 */
134 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
135
136 /**
137 * NORMALIZE_PUT_POINTER() -
138 *
139 * This macro will normalize the completion queue put pointer so its value can
140 * be used as an array inde
141 */
142 #define NORMALIZE_PUT_POINTER(x) \
143 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
144
145
146 /**
147 * NORMALIZE_EVENT_POINTER() -
148 *
149 * This macro will normalize the completion queue event entry so its value can
150 * be used as an index.
151 */
152 #define NORMALIZE_EVENT_POINTER(x) \
153 (\
154 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
155 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
156 )
157
158 /**
159 * INCREMENT_COMPLETION_QUEUE_GET() -
160 *
161 * This macro will increment the controllers completion queue index value and
162 * possibly toggle the cycle bit if the completion queue index wraps back to 0.
163 */
164 #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
165 INCREMENT_QUEUE_GET(\
166 (index), \
167 (cycle), \
168 (controller)->completion_queue_entries, \
169 SMU_CQGR_CYCLE_BIT \
170 )
171
172 /**
173 * INCREMENT_EVENT_QUEUE_GET() -
174 *
175 * This macro will increment the controllers event queue index value and
176 * possibly toggle the event cycle bit if the event queue index wraps back to 0.
177 */
178 #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
179 INCREMENT_QUEUE_GET(\
180 (index), \
181 (cycle), \
182 (controller)->completion_event_entries, \
183 SMU_CQGR_EVENT_CYCLE_BIT \
184 )
185
186
187 /**
188 * NORMALIZE_GET_POINTER() -
189 *
190 * This macro will normalize the completion queue get pointer so its value can
191 * be used as an index into an array
192 */
193 #define NORMALIZE_GET_POINTER(x) \
194 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
195
196 /**
197 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
198 *
199 * This macro will normalize the completion queue cycle pointer so it matches
200 * the completion queue cycle bit
201 */
202 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
203 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
204
205 /**
206 * COMPLETION_QUEUE_CYCLE_BIT() -
207 *
208 * This macro will return the cycle bit of the completion queue entry
209 */
210 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
211
212 static bool scic_sds_controller_completion_queue_has_entries(
213 struct scic_sds_controller *scic)
214 {
215 u32 get_value = scic->completion_queue_get;
216 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
217
218 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
219 COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]))
220 return true;
221
222 return false;
223 }
224
225 static bool scic_sds_controller_isr(struct scic_sds_controller *scic)
226 {
227 if (scic_sds_controller_completion_queue_has_entries(scic)) {
228 return true;
229 } else {
230 /*
231 * we have a spurious interrupt it could be that we have already
232 * emptied the completion queue from a previous interrupt */
233 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
234
235 /*
236 * There is a race in the hardware that could cause us not to be notified
237 * of an interrupt completion if we do not take this step. We will mask
238 * then unmask the interrupts so if there is another interrupt pending
239 * the clearing of the interrupt source we get the next interrupt message. */
240 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
241 writel(0, &scic->smu_registers->interrupt_mask);
242 }
243
244 return false;
245 }
246
247 irqreturn_t isci_msix_isr(int vec, void *data)
248 {
249 struct isci_host *ihost = data;
250
251 if (scic_sds_controller_isr(&ihost->sci))
252 tasklet_schedule(&ihost->completion_tasklet);
253
254 return IRQ_HANDLED;
255 }
256
257 static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
258 {
259 u32 interrupt_status;
260
261 interrupt_status =
262 readl(&scic->smu_registers->interrupt_status);
263 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
264
265 if (interrupt_status != 0) {
266 /*
267 * There is an error interrupt pending so let it through and handle
268 * in the callback */
269 return true;
270 }
271
272 /*
273 * There is a race in the hardware that could cause us not to be notified
274 * of an interrupt completion if we do not take this step. We will mask
275 * then unmask the error interrupts so if there was another interrupt
276 * pending we will be notified.
277 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
278 writel(0xff, &scic->smu_registers->interrupt_mask);
279 writel(0, &scic->smu_registers->interrupt_mask);
280
281 return false;
282 }
283
284 static void scic_sds_controller_task_completion(struct scic_sds_controller *scic,
285 u32 completion_entry)
286 {
287 u32 index;
288 struct scic_sds_request *io_request;
289
290 index = SCU_GET_COMPLETION_INDEX(completion_entry);
291 io_request = scic->io_request_table[index];
292
293 /* Make sure that we really want to process this IO request */
294 if (
295 (io_request != NULL)
296 && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
297 && (
298 scic_sds_io_tag_get_sequence(io_request->io_tag)
299 == scic->io_request_sequence[index]
300 )
301 ) {
302 /* Yep this is a valid io request pass it along to the io request handler */
303 scic_sds_io_request_tc_completion(io_request, completion_entry);
304 }
305 }
306
307 static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic,
308 u32 completion_entry)
309 {
310 u32 index;
311 struct scic_sds_request *io_request;
312 struct scic_sds_remote_device *device;
313
314 index = SCU_GET_COMPLETION_INDEX(completion_entry);
315
316 switch (scu_get_command_request_type(completion_entry)) {
317 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
318 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
319 io_request = scic->io_request_table[index];
320 dev_warn(scic_to_dev(scic),
321 "%s: SCIC SDS Completion type SDMA %x for io request "
322 "%p\n",
323 __func__,
324 completion_entry,
325 io_request);
326 /* @todo For a post TC operation we need to fail the IO
327 * request
328 */
329 break;
330
331 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
332 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
333 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
334 device = scic->device_table[index];
335 dev_warn(scic_to_dev(scic),
336 "%s: SCIC SDS Completion type SDMA %x for remote "
337 "device %p\n",
338 __func__,
339 completion_entry,
340 device);
341 /* @todo For a port RNC operation we need to fail the
342 * device
343 */
344 break;
345
346 default:
347 dev_warn(scic_to_dev(scic),
348 "%s: SCIC SDS Completion unknown SDMA completion "
349 "type %x\n",
350 __func__,
351 completion_entry);
352 break;
353
354 }
355 }
356
357 static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic,
358 u32 completion_entry)
359 {
360 u32 index;
361 u32 frame_index;
362
363 struct isci_host *ihost = scic_to_ihost(scic);
364 struct scu_unsolicited_frame_header *frame_header;
365 struct scic_sds_phy *phy;
366 struct scic_sds_remote_device *device;
367
368 enum sci_status result = SCI_FAILURE;
369
370 frame_index = SCU_GET_FRAME_INDEX(completion_entry);
371
372 frame_header = scic->uf_control.buffers.array[frame_index].header;
373 scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
374
375 if (SCU_GET_FRAME_ERROR(completion_entry)) {
376 /*
377 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
378 * / this cause a problem? We expect the phy initialization will
379 * / fail if there is an error in the frame. */
380 scic_sds_controller_release_frame(scic, frame_index);
381 return;
382 }
383
384 if (frame_header->is_address_frame) {
385 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
386 phy = &ihost->phys[index].sci;
387 result = scic_sds_phy_frame_handler(phy, frame_index);
388 } else {
389
390 index = SCU_GET_COMPLETION_INDEX(completion_entry);
391
392 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
393 /*
394 * This is a signature fis or a frame from a direct attached SATA
395 * device that has not yet been created. In either case forwared
396 * the frame to the PE and let it take care of the frame data. */
397 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
398 phy = &ihost->phys[index].sci;
399 result = scic_sds_phy_frame_handler(phy, frame_index);
400 } else {
401 if (index < scic->remote_node_entries)
402 device = scic->device_table[index];
403 else
404 device = NULL;
405
406 if (device != NULL)
407 result = scic_sds_remote_device_frame_handler(device, frame_index);
408 else
409 scic_sds_controller_release_frame(scic, frame_index);
410 }
411 }
412
413 if (result != SCI_SUCCESS) {
414 /*
415 * / @todo Is there any reason to report some additional error message
416 * / when we get this failure notifiction? */
417 }
418 }
419
420 static void scic_sds_controller_event_completion(struct scic_sds_controller *scic,
421 u32 completion_entry)
422 {
423 struct isci_host *ihost = scic_to_ihost(scic);
424 struct scic_sds_request *io_request;
425 struct scic_sds_remote_device *device;
426 struct scic_sds_phy *phy;
427 u32 index;
428
429 index = SCU_GET_COMPLETION_INDEX(completion_entry);
430
431 switch (scu_get_event_type(completion_entry)) {
432 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
433 /* / @todo The driver did something wrong and we need to fix the condtion. */
434 dev_err(scic_to_dev(scic),
435 "%s: SCIC Controller 0x%p received SMU command error "
436 "0x%x\n",
437 __func__,
438 scic,
439 completion_entry);
440 break;
441
442 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
443 case SCU_EVENT_TYPE_SMU_ERROR:
444 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
445 /*
446 * / @todo This is a hardware failure and its likely that we want to
447 * / reset the controller. */
448 dev_err(scic_to_dev(scic),
449 "%s: SCIC Controller 0x%p received fatal controller "
450 "event 0x%x\n",
451 __func__,
452 scic,
453 completion_entry);
454 break;
455
456 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
457 io_request = scic->io_request_table[index];
458 scic_sds_io_request_event_handler(io_request, completion_entry);
459 break;
460
461 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
462 switch (scu_get_event_specifier(completion_entry)) {
463 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
464 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
465 io_request = scic->io_request_table[index];
466 if (io_request != NULL)
467 scic_sds_io_request_event_handler(io_request, completion_entry);
468 else
469 dev_warn(scic_to_dev(scic),
470 "%s: SCIC Controller 0x%p received "
471 "event 0x%x for io request object "
472 "that doesnt exist.\n",
473 __func__,
474 scic,
475 completion_entry);
476
477 break;
478
479 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
480 device = scic->device_table[index];
481 if (device != NULL)
482 scic_sds_remote_device_event_handler(device, completion_entry);
483 else
484 dev_warn(scic_to_dev(scic),
485 "%s: SCIC Controller 0x%p received "
486 "event 0x%x for remote device object "
487 "that doesnt exist.\n",
488 __func__,
489 scic,
490 completion_entry);
491
492 break;
493 }
494 break;
495
496 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
497 /*
498 * direct the broadcast change event to the phy first and then let
499 * the phy redirect the broadcast change to the port object */
500 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
501 /*
502 * direct error counter event to the phy object since that is where
503 * we get the event notification. This is a type 4 event. */
504 case SCU_EVENT_TYPE_OSSP_EVENT:
505 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
506 phy = &ihost->phys[index].sci;
507 scic_sds_phy_event_handler(phy, completion_entry);
508 break;
509
510 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
511 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
512 case SCU_EVENT_TYPE_RNC_OPS_MISC:
513 if (index < scic->remote_node_entries) {
514 device = scic->device_table[index];
515
516 if (device != NULL)
517 scic_sds_remote_device_event_handler(device, completion_entry);
518 } else
519 dev_err(scic_to_dev(scic),
520 "%s: SCIC Controller 0x%p received event 0x%x "
521 "for remote device object 0x%0x that doesnt "
522 "exist.\n",
523 __func__,
524 scic,
525 completion_entry,
526 index);
527
528 break;
529
530 default:
531 dev_warn(scic_to_dev(scic),
532 "%s: SCIC Controller received unknown event code %x\n",
533 __func__,
534 completion_entry);
535 break;
536 }
537 }
538
539
540
541 static void scic_sds_controller_process_completions(struct scic_sds_controller *scic)
542 {
543 u32 completion_count = 0;
544 u32 completion_entry;
545 u32 get_index;
546 u32 get_cycle;
547 u32 event_index;
548 u32 event_cycle;
549
550 dev_dbg(scic_to_dev(scic),
551 "%s: completion queue begining get:0x%08x\n",
552 __func__,
553 scic->completion_queue_get);
554
555 /* Get the component parts of the completion queue */
556 get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
557 get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
558
559 event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
560 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
561
562 while (
563 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
564 == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])
565 ) {
566 completion_count++;
567
568 completion_entry = scic->completion_queue[get_index];
569 INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle);
570
571 dev_dbg(scic_to_dev(scic),
572 "%s: completion queue entry:0x%08x\n",
573 __func__,
574 completion_entry);
575
576 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
577 case SCU_COMPLETION_TYPE_TASK:
578 scic_sds_controller_task_completion(scic, completion_entry);
579 break;
580
581 case SCU_COMPLETION_TYPE_SDMA:
582 scic_sds_controller_sdma_completion(scic, completion_entry);
583 break;
584
585 case SCU_COMPLETION_TYPE_UFI:
586 scic_sds_controller_unsolicited_frame(scic, completion_entry);
587 break;
588
589 case SCU_COMPLETION_TYPE_EVENT:
590 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
591 scic_sds_controller_event_completion(scic, completion_entry);
592 break;
593
594 case SCU_COMPLETION_TYPE_NOTIFY:
595 /*
596 * Presently we do the same thing with a notify event that we do with the
597 * other event codes. */
598 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
599 scic_sds_controller_event_completion(scic, completion_entry);
600 break;
601
602 default:
603 dev_warn(scic_to_dev(scic),
604 "%s: SCIC Controller received unknown "
605 "completion type %x\n",
606 __func__,
607 completion_entry);
608 break;
609 }
610 }
611
612 /* Update the get register if we completed one or more entries */
613 if (completion_count > 0) {
614 scic->completion_queue_get =
615 SMU_CQGR_GEN_BIT(ENABLE) |
616 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
617 event_cycle |
618 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) |
619 get_cycle |
620 SMU_CQGR_GEN_VAL(POINTER, get_index);
621
622 writel(scic->completion_queue_get,
623 &scic->smu_registers->completion_queue_get);
624
625 }
626
627 dev_dbg(scic_to_dev(scic),
628 "%s: completion queue ending get:0x%08x\n",
629 __func__,
630 scic->completion_queue_get);
631
632 }
633
634 static void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
635 {
636 u32 interrupt_status;
637
638 interrupt_status =
639 readl(&scic->smu_registers->interrupt_status);
640
641 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
642 scic_sds_controller_completion_queue_has_entries(scic)) {
643
644 scic_sds_controller_process_completions(scic);
645 writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
646 } else {
647 dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
648 interrupt_status);
649
650 sci_base_state_machine_change_state(&scic->state_machine,
651 SCI_BASE_CONTROLLER_STATE_FAILED);
652
653 return;
654 }
655
656 /* If we dont process any completions I am not sure that we want to do this.
657 * We are in the middle of a hardware fault and should probably be reset.
658 */
659 writel(0, &scic->smu_registers->interrupt_mask);
660 }
661
662 irqreturn_t isci_intx_isr(int vec, void *data)
663 {
664 irqreturn_t ret = IRQ_NONE;
665 struct isci_host *ihost = data;
666 struct scic_sds_controller *scic = &ihost->sci;
667
668 if (scic_sds_controller_isr(scic)) {
669 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
670 tasklet_schedule(&ihost->completion_tasklet);
671 ret = IRQ_HANDLED;
672 } else if (scic_sds_controller_error_isr(scic)) {
673 spin_lock(&ihost->scic_lock);
674 scic_sds_controller_error_handler(scic);
675 spin_unlock(&ihost->scic_lock);
676 ret = IRQ_HANDLED;
677 }
678
679 return ret;
680 }
681
682 irqreturn_t isci_error_isr(int vec, void *data)
683 {
684 struct isci_host *ihost = data;
685
686 if (scic_sds_controller_error_isr(&ihost->sci))
687 scic_sds_controller_error_handler(&ihost->sci);
688
689 return IRQ_HANDLED;
690 }
691
692 /**
693 * isci_host_start_complete() - This function is called by the core library,
694 * through the ISCI Module, to indicate controller start status.
695 * @isci_host: This parameter specifies the ISCI host object
696 * @completion_status: This parameter specifies the completion status from the
697 * core library.
698 *
699 */
700 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
701 {
702 if (completion_status != SCI_SUCCESS)
703 dev_info(&ihost->pdev->dev,
704 "controller start timed out, continuing...\n");
705 isci_host_change_state(ihost, isci_ready);
706 clear_bit(IHOST_START_PENDING, &ihost->flags);
707 wake_up(&ihost->eventq);
708 }
709
710 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
711 {
712 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
713
714 if (test_bit(IHOST_START_PENDING, &ihost->flags))
715 return 0;
716
717 /* todo: use sas_flush_discovery once it is upstream */
718 scsi_flush_work(shost);
719
720 scsi_flush_work(shost);
721
722 dev_dbg(&ihost->pdev->dev,
723 "%s: ihost->status = %d, time = %ld\n",
724 __func__, isci_host_get_state(ihost), time);
725
726 return 1;
727
728 }
729
730 /**
731 * scic_controller_get_suggested_start_timeout() - This method returns the
732 * suggested scic_controller_start() timeout amount. The user is free to
733 * use any timeout value, but this method provides the suggested minimum
734 * start timeout value. The returned value is based upon empirical
735 * information determined as a result of interoperability testing.
736 * @controller: the handle to the controller object for which to return the
737 * suggested start timeout.
738 *
739 * This method returns the number of milliseconds for the suggested start
740 * operation timeout.
741 */
742 static u32 scic_controller_get_suggested_start_timeout(
743 struct scic_sds_controller *sc)
744 {
745 /* Validate the user supplied parameters. */
746 if (sc == NULL)
747 return 0;
748
749 /*
750 * The suggested minimum timeout value for a controller start operation:
751 *
752 * Signature FIS Timeout
753 * + Phy Start Timeout
754 * + Number of Phy Spin Up Intervals
755 * ---------------------------------
756 * Number of milliseconds for the controller start operation.
757 *
758 * NOTE: The number of phy spin up intervals will be equivalent
759 * to the number of phys divided by the number phys allowed
760 * per interval - 1 (once OEM parameters are supported).
761 * Currently we assume only 1 phy per interval. */
762
763 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
764 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
765 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
766 }
767
768 static void scic_controller_enable_interrupts(
769 struct scic_sds_controller *scic)
770 {
771 BUG_ON(scic->smu_registers == NULL);
772 writel(0, &scic->smu_registers->interrupt_mask);
773 }
774
775 void scic_controller_disable_interrupts(
776 struct scic_sds_controller *scic)
777 {
778 BUG_ON(scic->smu_registers == NULL);
779 writel(0xffffffff, &scic->smu_registers->interrupt_mask);
780 }
781
782 static void scic_sds_controller_enable_port_task_scheduler(
783 struct scic_sds_controller *scic)
784 {
785 u32 port_task_scheduler_value;
786
787 port_task_scheduler_value =
788 readl(&scic->scu_registers->peg0.ptsg.control);
789 port_task_scheduler_value |=
790 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
791 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
792 writel(port_task_scheduler_value,
793 &scic->scu_registers->peg0.ptsg.control);
794 }
795
796 static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic)
797 {
798 u32 task_assignment;
799
800 /*
801 * Assign all the TCs to function 0
802 * TODO: Do we actually need to read this register to write it back?
803 */
804
805 task_assignment =
806 readl(&scic->smu_registers->task_context_assignment[0]);
807
808 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
809 (SMU_TCA_GEN_VAL(ENDING, scic->task_context_entries - 1)) |
810 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
811
812 writel(task_assignment,
813 &scic->smu_registers->task_context_assignment[0]);
814
815 }
816
817 static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic)
818 {
819 u32 index;
820 u32 completion_queue_control_value;
821 u32 completion_queue_get_value;
822 u32 completion_queue_put_value;
823
824 scic->completion_queue_get = 0;
825
826 completion_queue_control_value = (
827 SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1)
828 | SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1)
829 );
830
831 writel(completion_queue_control_value,
832 &scic->smu_registers->completion_queue_control);
833
834
835 /* Set the completion queue get pointer and enable the queue */
836 completion_queue_get_value = (
837 (SMU_CQGR_GEN_VAL(POINTER, 0))
838 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
839 | (SMU_CQGR_GEN_BIT(ENABLE))
840 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
841 );
842
843 writel(completion_queue_get_value,
844 &scic->smu_registers->completion_queue_get);
845
846 /* Set the completion queue put pointer */
847 completion_queue_put_value = (
848 (SMU_CQPR_GEN_VAL(POINTER, 0))
849 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
850 );
851
852 writel(completion_queue_put_value,
853 &scic->smu_registers->completion_queue_put);
854
855 /* Initialize the cycle bit of the completion queue entries */
856 for (index = 0; index < scic->completion_queue_entries; index++) {
857 /*
858 * If get.cycle_bit != completion_queue.cycle_bit
859 * its not a valid completion queue entry
860 * so at system start all entries are invalid */
861 scic->completion_queue[index] = 0x80000000;
862 }
863 }
864
865 static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic)
866 {
867 u32 frame_queue_control_value;
868 u32 frame_queue_get_value;
869 u32 frame_queue_put_value;
870
871 /* Write the queue size */
872 frame_queue_control_value =
873 SCU_UFQC_GEN_VAL(QUEUE_SIZE,
874 scic->uf_control.address_table.count);
875
876 writel(frame_queue_control_value,
877 &scic->scu_registers->sdma.unsolicited_frame_queue_control);
878
879 /* Setup the get pointer for the unsolicited frame queue */
880 frame_queue_get_value = (
881 SCU_UFQGP_GEN_VAL(POINTER, 0)
882 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
883 );
884
885 writel(frame_queue_get_value,
886 &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
887 /* Setup the put pointer for the unsolicited frame queue */
888 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
889 writel(frame_queue_put_value,
890 &scic->scu_registers->sdma.unsolicited_frame_put_pointer);
891 }
892
893 /**
894 * This method will attempt to transition into the ready state for the
895 * controller and indicate that the controller start operation has completed
896 * if all criteria are met.
897 * @scic: This parameter indicates the controller object for which
898 * to transition to ready.
899 * @status: This parameter indicates the status value to be pass into the call
900 * to scic_cb_controller_start_complete().
901 *
902 * none.
903 */
904 static void scic_sds_controller_transition_to_ready(
905 struct scic_sds_controller *scic,
906 enum sci_status status)
907 {
908 struct isci_host *ihost = scic_to_ihost(scic);
909
910 if (scic->state_machine.current_state_id ==
911 SCI_BASE_CONTROLLER_STATE_STARTING) {
912 /*
913 * We move into the ready state, because some of the phys/ports
914 * may be up and operational.
915 */
916 sci_base_state_machine_change_state(&scic->state_machine,
917 SCI_BASE_CONTROLLER_STATE_READY);
918
919 isci_host_start_complete(ihost, status);
920 }
921 }
922
923 static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic)
924 {
925 isci_timer_stop(scic->phy_startup_timer);
926
927 scic->phy_startup_timer_pending = false;
928 }
929
930 static void scic_sds_controller_phy_timer_start(struct scic_sds_controller *scic)
931 {
932 isci_timer_start(scic->phy_startup_timer,
933 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
934
935 scic->phy_startup_timer_pending = true;
936 }
937
938 /**
939 * scic_sds_controller_start_next_phy - start phy
940 * @scic: controller
941 *
942 * If all the phys have been started, then attempt to transition the
943 * controller to the READY state and inform the user
944 * (scic_cb_controller_start_complete()).
945 */
946 static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
947 {
948 struct isci_host *ihost = scic_to_ihost(scic);
949 struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
950 struct scic_sds_phy *sci_phy;
951 enum sci_status status;
952
953 status = SCI_SUCCESS;
954
955 if (scic->phy_startup_timer_pending)
956 return status;
957
958 if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
959 bool is_controller_start_complete = true;
960 u32 state;
961 u8 index;
962
963 for (index = 0; index < SCI_MAX_PHYS; index++) {
964 sci_phy = &ihost->phys[index].sci;
965 state = sci_phy->state_machine.current_state_id;
966
967 if (!scic_sds_phy_get_port(sci_phy))
968 continue;
969
970 /* The controller start operation is complete iff:
971 * - all links have been given an opportunity to start
972 * - have no indication of a connected device
973 * - have an indication of a connected device and it has
974 * finished the link training process.
975 */
976 if ((sci_phy->is_in_link_training == false &&
977 state == SCI_BASE_PHY_STATE_INITIAL) ||
978 (sci_phy->is_in_link_training == false &&
979 state == SCI_BASE_PHY_STATE_STOPPED) ||
980 (sci_phy->is_in_link_training == true &&
981 state == SCI_BASE_PHY_STATE_STARTING)) {
982 is_controller_start_complete = false;
983 break;
984 }
985 }
986
987 /*
988 * The controller has successfully finished the start process.
989 * Inform the SCI Core user and transition to the READY state. */
990 if (is_controller_start_complete == true) {
991 scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
992 scic_sds_controller_phy_timer_stop(scic);
993 }
994 } else {
995 sci_phy = &ihost->phys[scic->next_phy_to_start].sci;
996
997 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
998 if (scic_sds_phy_get_port(sci_phy) == NULL) {
999 scic->next_phy_to_start++;
1000
1001 /* Caution recursion ahead be forwarned
1002 *
1003 * The PHY was never added to a PORT in MPC mode
1004 * so start the next phy in sequence This phy
1005 * will never go link up and will not draw power
1006 * the OEM parameters either configured the phy
1007 * incorrectly for the PORT or it was never
1008 * assigned to a PORT
1009 */
1010 return scic_sds_controller_start_next_phy(scic);
1011 }
1012 }
1013
1014 status = scic_sds_phy_start(sci_phy);
1015
1016 if (status == SCI_SUCCESS) {
1017 scic_sds_controller_phy_timer_start(scic);
1018 } else {
1019 dev_warn(scic_to_dev(scic),
1020 "%s: Controller stop operation failed "
1021 "to stop phy %d because of status "
1022 "%d.\n",
1023 __func__,
1024 ihost->phys[scic->next_phy_to_start].sci.phy_index,
1025 status);
1026 }
1027
1028 scic->next_phy_to_start++;
1029 }
1030
1031 return status;
1032 }
1033
1034 static void scic_sds_controller_phy_startup_timeout_handler(void *_scic)
1035 {
1036 struct scic_sds_controller *scic = _scic;
1037 enum sci_status status;
1038
1039 scic->phy_startup_timer_pending = false;
1040 status = SCI_FAILURE;
1041 while (status != SCI_SUCCESS)
1042 status = scic_sds_controller_start_next_phy(scic);
1043 }
1044
1045 static enum sci_status scic_controller_start(struct scic_sds_controller *scic,
1046 u32 timeout)
1047 {
1048 struct isci_host *ihost = scic_to_ihost(scic);
1049 enum sci_status result;
1050 u16 index;
1051
1052 if (scic->state_machine.current_state_id !=
1053 SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
1054 dev_warn(scic_to_dev(scic),
1055 "SCIC Controller start operation requested in "
1056 "invalid state\n");
1057 return SCI_FAILURE_INVALID_STATE;
1058 }
1059
1060 /* Build the TCi free pool */
1061 sci_pool_initialize(scic->tci_pool);
1062 for (index = 0; index < scic->task_context_entries; index++)
1063 sci_pool_put(scic->tci_pool, index);
1064
1065 /* Build the RNi free pool */
1066 scic_sds_remote_node_table_initialize(
1067 &scic->available_remote_nodes,
1068 scic->remote_node_entries);
1069
1070 /*
1071 * Before anything else lets make sure we will not be
1072 * interrupted by the hardware.
1073 */
1074 scic_controller_disable_interrupts(scic);
1075
1076 /* Enable the port task scheduler */
1077 scic_sds_controller_enable_port_task_scheduler(scic);
1078
1079 /* Assign all the task entries to scic physical function */
1080 scic_sds_controller_assign_task_entries(scic);
1081
1082 /* Now initialize the completion queue */
1083 scic_sds_controller_initialize_completion_queue(scic);
1084
1085 /* Initialize the unsolicited frame queue for use */
1086 scic_sds_controller_initialize_unsolicited_frame_queue(scic);
1087
1088 /* Start all of the ports on this controller */
1089 for (index = 0; index < scic->logical_port_entries; index++) {
1090 struct scic_sds_port *sci_port = &ihost->ports[index].sci;
1091
1092 result = sci_port->state_handlers->start_handler(sci_port);
1093 if (result)
1094 return result;
1095 }
1096
1097 scic_sds_controller_start_next_phy(scic);
1098
1099 isci_timer_start(scic->timeout_timer, timeout);
1100
1101 sci_base_state_machine_change_state(&scic->state_machine,
1102 SCI_BASE_CONTROLLER_STATE_STARTING);
1103
1104 return SCI_SUCCESS;
1105 }
1106
1107 void isci_host_scan_start(struct Scsi_Host *shost)
1108 {
1109 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1110 unsigned long tmo = scic_controller_get_suggested_start_timeout(&ihost->sci);
1111
1112 set_bit(IHOST_START_PENDING, &ihost->flags);
1113
1114 spin_lock_irq(&ihost->scic_lock);
1115 scic_controller_start(&ihost->sci, tmo);
1116 scic_controller_enable_interrupts(&ihost->sci);
1117 spin_unlock_irq(&ihost->scic_lock);
1118 }
1119
1120 static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1121 {
1122 isci_host_change_state(ihost, isci_stopped);
1123 scic_controller_disable_interrupts(&ihost->sci);
1124 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1125 wake_up(&ihost->eventq);
1126 }
1127
1128 static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1129 {
1130 /* Empty out the completion queue */
1131 if (scic_sds_controller_completion_queue_has_entries(scic))
1132 scic_sds_controller_process_completions(scic);
1133
1134 /* Clear the interrupt and enable all interrupts again */
1135 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1136 /* Could we write the value of SMU_ISR_COMPLETION? */
1137 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1138 writel(0, &scic->smu_registers->interrupt_mask);
1139 }
1140
1141 /**
1142 * isci_host_completion_routine() - This function is the delayed service
1143 * routine that calls the sci core library's completion handler. It's
1144 * scheduled as a tasklet from the interrupt service routine when interrupts
1145 * in use, or set as the timeout function in polled mode.
1146 * @data: This parameter specifies the ISCI host object
1147 *
1148 */
1149 static void isci_host_completion_routine(unsigned long data)
1150 {
1151 struct isci_host *isci_host = (struct isci_host *)data;
1152 struct list_head completed_request_list;
1153 struct list_head errored_request_list;
1154 struct list_head *current_position;
1155 struct list_head *next_position;
1156 struct isci_request *request;
1157 struct isci_request *next_request;
1158 struct sas_task *task;
1159
1160 INIT_LIST_HEAD(&completed_request_list);
1161 INIT_LIST_HEAD(&errored_request_list);
1162
1163 spin_lock_irq(&isci_host->scic_lock);
1164
1165 scic_sds_controller_completion_handler(&isci_host->sci);
1166
1167 /* Take the lists of completed I/Os from the host. */
1168
1169 list_splice_init(&isci_host->requests_to_complete,
1170 &completed_request_list);
1171
1172 /* Take the list of errored I/Os from the host. */
1173 list_splice_init(&isci_host->requests_to_errorback,
1174 &errored_request_list);
1175
1176 spin_unlock_irq(&isci_host->scic_lock);
1177
1178 /* Process any completions in the lists. */
1179 list_for_each_safe(current_position, next_position,
1180 &completed_request_list) {
1181
1182 request = list_entry(current_position, struct isci_request,
1183 completed_node);
1184 task = isci_request_access_task(request);
1185
1186 /* Normal notification (task_done) */
1187 dev_dbg(&isci_host->pdev->dev,
1188 "%s: Normal - request/task = %p/%p\n",
1189 __func__,
1190 request,
1191 task);
1192
1193 /* Return the task to libsas */
1194 if (task != NULL) {
1195
1196 task->lldd_task = NULL;
1197 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1198
1199 /* If the task is already in the abort path,
1200 * the task_done callback cannot be called.
1201 */
1202 task->task_done(task);
1203 }
1204 }
1205 /* Free the request object. */
1206 isci_request_free(isci_host, request);
1207 }
1208 list_for_each_entry_safe(request, next_request, &errored_request_list,
1209 completed_node) {
1210
1211 task = isci_request_access_task(request);
1212
1213 /* Use sas_task_abort */
1214 dev_warn(&isci_host->pdev->dev,
1215 "%s: Error - request/task = %p/%p\n",
1216 __func__,
1217 request,
1218 task);
1219
1220 if (task != NULL) {
1221
1222 /* Put the task into the abort path if it's not there
1223 * already.
1224 */
1225 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1226 sas_task_abort(task);
1227
1228 } else {
1229 /* This is a case where the request has completed with a
1230 * status such that it needed further target servicing,
1231 * but the sas_task reference has already been removed
1232 * from the request. Since it was errored, it was not
1233 * being aborted, so there is nothing to do except free
1234 * it.
1235 */
1236
1237 spin_lock_irq(&isci_host->scic_lock);
1238 /* Remove the request from the remote device's list
1239 * of pending requests.
1240 */
1241 list_del_init(&request->dev_node);
1242 spin_unlock_irq(&isci_host->scic_lock);
1243
1244 /* Free the request object. */
1245 isci_request_free(isci_host, request);
1246 }
1247 }
1248
1249 }
1250
1251 /**
1252 * scic_controller_stop() - This method will stop an individual controller
1253 * object.This method will invoke the associated user callback upon
1254 * completion. The completion callback is called when the following
1255 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1256 * controller has been quiesced. This method will ensure that all IO
1257 * requests are quiesced, phys are stopped, and all additional operation by
1258 * the hardware is halted.
1259 * @controller: the handle to the controller object to stop.
1260 * @timeout: This parameter specifies the number of milliseconds in which the
1261 * stop operation should complete.
1262 *
1263 * The controller must be in the STARTED or STOPPED state. Indicate if the
1264 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1265 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1266 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1267 * controller is not either in the STARTED or STOPPED states.
1268 */
1269 static enum sci_status scic_controller_stop(struct scic_sds_controller *scic,
1270 u32 timeout)
1271 {
1272 if (scic->state_machine.current_state_id !=
1273 SCI_BASE_CONTROLLER_STATE_READY) {
1274 dev_warn(scic_to_dev(scic),
1275 "SCIC Controller stop operation requested in "
1276 "invalid state\n");
1277 return SCI_FAILURE_INVALID_STATE;
1278 }
1279
1280 isci_timer_start(scic->timeout_timer, timeout);
1281 sci_base_state_machine_change_state(&scic->state_machine,
1282 SCI_BASE_CONTROLLER_STATE_STOPPING);
1283 return SCI_SUCCESS;
1284 }
1285
1286 /**
1287 * scic_controller_reset() - This method will reset the supplied core
1288 * controller regardless of the state of said controller. This operation is
1289 * considered destructive. In other words, all current operations are wiped
1290 * out. No IO completions for outstanding devices occur. Outstanding IO
1291 * requests are not aborted or completed at the actual remote device.
1292 * @controller: the handle to the controller object to reset.
1293 *
1294 * Indicate if the controller reset method succeeded or failed in some way.
1295 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1296 * the controller reset operation is unable to complete.
1297 */
1298 static enum sci_status scic_controller_reset(struct scic_sds_controller *scic)
1299 {
1300 switch (scic->state_machine.current_state_id) {
1301 case SCI_BASE_CONTROLLER_STATE_RESET:
1302 case SCI_BASE_CONTROLLER_STATE_READY:
1303 case SCI_BASE_CONTROLLER_STATE_STOPPED:
1304 case SCI_BASE_CONTROLLER_STATE_FAILED:
1305 /*
1306 * The reset operation is not a graceful cleanup, just
1307 * perform the state transition.
1308 */
1309 sci_base_state_machine_change_state(&scic->state_machine,
1310 SCI_BASE_CONTROLLER_STATE_RESETTING);
1311 return SCI_SUCCESS;
1312 default:
1313 dev_warn(scic_to_dev(scic),
1314 "SCIC Controller reset operation requested in "
1315 "invalid state\n");
1316 return SCI_FAILURE_INVALID_STATE;
1317 }
1318 }
1319
1320 void isci_host_deinit(struct isci_host *ihost)
1321 {
1322 int i;
1323
1324 isci_host_change_state(ihost, isci_stopping);
1325 for (i = 0; i < SCI_MAX_PORTS; i++) {
1326 struct isci_port *iport = &ihost->ports[i];
1327 struct isci_remote_device *idev, *d;
1328
1329 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1330 isci_remote_device_change_state(idev, isci_stopping);
1331 isci_remote_device_stop(ihost, idev);
1332 }
1333 }
1334
1335 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1336
1337 spin_lock_irq(&ihost->scic_lock);
1338 scic_controller_stop(&ihost->sci, SCIC_CONTROLLER_STOP_TIMEOUT);
1339 spin_unlock_irq(&ihost->scic_lock);
1340
1341 wait_for_stop(ihost);
1342 scic_controller_reset(&ihost->sci);
1343 isci_timer_list_destroy(ihost);
1344 }
1345
1346 static void __iomem *scu_base(struct isci_host *isci_host)
1347 {
1348 struct pci_dev *pdev = isci_host->pdev;
1349 int id = isci_host->id;
1350
1351 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1352 }
1353
1354 static void __iomem *smu_base(struct isci_host *isci_host)
1355 {
1356 struct pci_dev *pdev = isci_host->pdev;
1357 int id = isci_host->id;
1358
1359 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1360 }
1361
1362 static void isci_user_parameters_get(
1363 struct isci_host *isci_host,
1364 union scic_user_parameters *scic_user_params)
1365 {
1366 struct scic_sds_user_parameters *u = &scic_user_params->sds1;
1367 int i;
1368
1369 for (i = 0; i < SCI_MAX_PHYS; i++) {
1370 struct sci_phy_user_params *u_phy = &u->phys[i];
1371
1372 u_phy->max_speed_generation = phy_gen;
1373
1374 /* we are not exporting these for now */
1375 u_phy->align_insertion_frequency = 0x7f;
1376 u_phy->in_connection_align_insertion_frequency = 0xff;
1377 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1378 }
1379
1380 u->stp_inactivity_timeout = stp_inactive_to;
1381 u->ssp_inactivity_timeout = ssp_inactive_to;
1382 u->stp_max_occupancy_timeout = stp_max_occ_to;
1383 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1384 u->no_outbound_task_timeout = no_outbound_task_to;
1385 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1386 }
1387
1388 static void scic_sds_controller_initial_state_enter(void *object)
1389 {
1390 struct scic_sds_controller *scic = object;
1391
1392 sci_base_state_machine_change_state(&scic->state_machine,
1393 SCI_BASE_CONTROLLER_STATE_RESET);
1394 }
1395
1396 static inline void scic_sds_controller_starting_state_exit(void *object)
1397 {
1398 struct scic_sds_controller *scic = object;
1399
1400 isci_timer_stop(scic->timeout_timer);
1401 }
1402
1403 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1404 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1405 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1406 #define INTERRUPT_COALESCE_NUMBER_MAX 256
1407 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1408 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1409
1410 /**
1411 * scic_controller_set_interrupt_coalescence() - This method allows the user to
1412 * configure the interrupt coalescence.
1413 * @controller: This parameter represents the handle to the controller object
1414 * for which its interrupt coalesce register is overridden.
1415 * @coalesce_number: Used to control the number of entries in the Completion
1416 * Queue before an interrupt is generated. If the number of entries exceed
1417 * this number, an interrupt will be generated. The valid range of the input
1418 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1419 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1420 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1421 * interrupt coalescing timeout.
1422 *
1423 * Indicate if the user successfully set the interrupt coalesce parameters.
1424 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1425 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1426 */
1427 static enum sci_status scic_controller_set_interrupt_coalescence(
1428 struct scic_sds_controller *scic_controller,
1429 u32 coalesce_number,
1430 u32 coalesce_timeout)
1431 {
1432 u8 timeout_encode = 0;
1433 u32 min = 0;
1434 u32 max = 0;
1435
1436 /* Check if the input parameters fall in the range. */
1437 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1438 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1439
1440 /*
1441 * Defined encoding for interrupt coalescing timeout:
1442 * Value Min Max Units
1443 * ----- --- --- -----
1444 * 0 - - Disabled
1445 * 1 13.3 20.0 ns
1446 * 2 26.7 40.0
1447 * 3 53.3 80.0
1448 * 4 106.7 160.0
1449 * 5 213.3 320.0
1450 * 6 426.7 640.0
1451 * 7 853.3 1280.0
1452 * 8 1.7 2.6 us
1453 * 9 3.4 5.1
1454 * 10 6.8 10.2
1455 * 11 13.7 20.5
1456 * 12 27.3 41.0
1457 * 13 54.6 81.9
1458 * 14 109.2 163.8
1459 * 15 218.5 327.7
1460 * 16 436.9 655.4
1461 * 17 873.8 1310.7
1462 * 18 1.7 2.6 ms
1463 * 19 3.5 5.2
1464 * 20 7.0 10.5
1465 * 21 14.0 21.0
1466 * 22 28.0 41.9
1467 * 23 55.9 83.9
1468 * 24 111.8 167.8
1469 * 25 223.7 335.5
1470 * 26 447.4 671.1
1471 * 27 894.8 1342.2
1472 * 28 1.8 2.7 s
1473 * Others Undefined */
1474
1475 /*
1476 * Use the table above to decide the encode of interrupt coalescing timeout
1477 * value for register writing. */
1478 if (coalesce_timeout == 0)
1479 timeout_encode = 0;
1480 else{
1481 /* make the timeout value in unit of (10 ns). */
1482 coalesce_timeout = coalesce_timeout * 100;
1483 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1484 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1485
1486 /* get the encode of timeout for register writing. */
1487 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1488 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1489 timeout_encode++) {
1490 if (min <= coalesce_timeout && max > coalesce_timeout)
1491 break;
1492 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1493 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1494 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1495 break;
1496 else{
1497 timeout_encode++;
1498 break;
1499 }
1500 } else {
1501 max = max * 2;
1502 min = min * 2;
1503 }
1504 }
1505
1506 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1507 /* the value is out of range. */
1508 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1509 }
1510
1511 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1512 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1513 &scic_controller->smu_registers->interrupt_coalesce_control);
1514
1515
1516 scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
1517 scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
1518
1519 return SCI_SUCCESS;
1520 }
1521
1522
1523 static void scic_sds_controller_ready_state_enter(void *object)
1524 {
1525 struct scic_sds_controller *scic = object;
1526
1527 /* set the default interrupt coalescence number and timeout value. */
1528 scic_controller_set_interrupt_coalescence(scic, 0x10, 250);
1529 }
1530
1531 static void scic_sds_controller_ready_state_exit(void *object)
1532 {
1533 struct scic_sds_controller *scic = object;
1534
1535 /* disable interrupt coalescence. */
1536 scic_controller_set_interrupt_coalescence(scic, 0, 0);
1537 }
1538
1539 static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
1540 {
1541 u32 index;
1542 enum sci_status status;
1543 enum sci_status phy_status;
1544 struct isci_host *ihost = scic_to_ihost(scic);
1545
1546 status = SCI_SUCCESS;
1547
1548 for (index = 0; index < SCI_MAX_PHYS; index++) {
1549 phy_status = scic_sds_phy_stop(&ihost->phys[index].sci);
1550
1551 if (phy_status != SCI_SUCCESS &&
1552 phy_status != SCI_FAILURE_INVALID_STATE) {
1553 status = SCI_FAILURE;
1554
1555 dev_warn(scic_to_dev(scic),
1556 "%s: Controller stop operation failed to stop "
1557 "phy %d because of status %d.\n",
1558 __func__,
1559 ihost->phys[index].sci.phy_index, phy_status);
1560 }
1561 }
1562
1563 return status;
1564 }
1565
1566 static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
1567 {
1568 u32 index;
1569 enum sci_status port_status;
1570 enum sci_status status = SCI_SUCCESS;
1571 struct isci_host *ihost = scic_to_ihost(scic);
1572
1573 for (index = 0; index < scic->logical_port_entries; index++) {
1574 struct scic_sds_port *sci_port = &ihost->ports[index].sci;
1575 scic_sds_port_handler_t stop;
1576
1577 stop = sci_port->state_handlers->stop_handler;
1578 port_status = stop(sci_port);
1579
1580 if ((port_status != SCI_SUCCESS) &&
1581 (port_status != SCI_FAILURE_INVALID_STATE)) {
1582 status = SCI_FAILURE;
1583
1584 dev_warn(scic_to_dev(scic),
1585 "%s: Controller stop operation failed to "
1586 "stop port %d because of status %d.\n",
1587 __func__,
1588 sci_port->logical_port_index,
1589 port_status);
1590 }
1591 }
1592
1593 return status;
1594 }
1595
1596 static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
1597 {
1598 u32 index;
1599 enum sci_status status;
1600 enum sci_status device_status;
1601
1602 status = SCI_SUCCESS;
1603
1604 for (index = 0; index < scic->remote_node_entries; index++) {
1605 if (scic->device_table[index] != NULL) {
1606 /* / @todo What timeout value do we want to provide to this request? */
1607 device_status = scic_remote_device_stop(scic->device_table[index], 0);
1608
1609 if ((device_status != SCI_SUCCESS) &&
1610 (device_status != SCI_FAILURE_INVALID_STATE)) {
1611 dev_warn(scic_to_dev(scic),
1612 "%s: Controller stop operation failed "
1613 "to stop device 0x%p because of "
1614 "status %d.\n",
1615 __func__,
1616 scic->device_table[index], device_status);
1617 }
1618 }
1619 }
1620
1621 return status;
1622 }
1623
1624 static void scic_sds_controller_stopping_state_enter(void *object)
1625 {
1626 struct scic_sds_controller *scic = object;
1627
1628 /* Stop all of the components for this controller */
1629 scic_sds_controller_stop_phys(scic);
1630 scic_sds_controller_stop_ports(scic);
1631 scic_sds_controller_stop_devices(scic);
1632 }
1633
1634 static void scic_sds_controller_stopping_state_exit(void *object)
1635 {
1636 struct scic_sds_controller *scic = object;
1637
1638 isci_timer_stop(scic->timeout_timer);
1639 }
1640
1641
1642 /**
1643 * scic_sds_controller_reset_hardware() -
1644 *
1645 * This method will reset the controller hardware.
1646 */
1647 static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic)
1648 {
1649 /* Disable interrupts so we dont take any spurious interrupts */
1650 scic_controller_disable_interrupts(scic);
1651
1652 /* Reset the SCU */
1653 writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
1654
1655 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1656 udelay(1000);
1657
1658 /* The write to the CQGR clears the CQP */
1659 writel(0x00000000, &scic->smu_registers->completion_queue_get);
1660
1661 /* The write to the UFQGP clears the UFQPR */
1662 writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
1663 }
1664
1665 static void scic_sds_controller_resetting_state_enter(void *object)
1666 {
1667 struct scic_sds_controller *scic = object;
1668
1669 scic_sds_controller_reset_hardware(scic);
1670 sci_base_state_machine_change_state(&scic->state_machine,
1671 SCI_BASE_CONTROLLER_STATE_RESET);
1672 }
1673
1674 static const struct sci_base_state scic_sds_controller_state_table[] = {
1675 [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
1676 .enter_state = scic_sds_controller_initial_state_enter,
1677 },
1678 [SCI_BASE_CONTROLLER_STATE_RESET] = {},
1679 [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
1680 [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
1681 [SCI_BASE_CONTROLLER_STATE_STARTING] = {
1682 .exit_state = scic_sds_controller_starting_state_exit,
1683 },
1684 [SCI_BASE_CONTROLLER_STATE_READY] = {
1685 .enter_state = scic_sds_controller_ready_state_enter,
1686 .exit_state = scic_sds_controller_ready_state_exit,
1687 },
1688 [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
1689 .enter_state = scic_sds_controller_resetting_state_enter,
1690 },
1691 [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
1692 .enter_state = scic_sds_controller_stopping_state_enter,
1693 .exit_state = scic_sds_controller_stopping_state_exit,
1694 },
1695 [SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
1696 [SCI_BASE_CONTROLLER_STATE_FAILED] = {}
1697 };
1698
1699 static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
1700 {
1701 /* these defaults are overridden by the platform / firmware */
1702 struct isci_host *ihost = scic_to_ihost(scic);
1703 u16 index;
1704
1705 /* Default to APC mode. */
1706 scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1707
1708 /* Default to APC mode. */
1709 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
1710
1711 /* Default to no SSC operation. */
1712 scic->oem_parameters.sds1.controller.do_enable_ssc = false;
1713
1714 /* Initialize all of the port parameter information to narrow ports. */
1715 for (index = 0; index < SCI_MAX_PORTS; index++) {
1716 scic->oem_parameters.sds1.ports[index].phy_mask = 0;
1717 }
1718
1719 /* Initialize all of the phy parameter information. */
1720 for (index = 0; index < SCI_MAX_PHYS; index++) {
1721 /* Default to 6G (i.e. Gen 3) for now. */
1722 scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
1723
1724 /* the frequencies cannot be 0 */
1725 scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
1726 scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
1727 scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1728
1729 /*
1730 * Previous Vitesse based expanders had a arbitration issue that
1731 * is worked around by having the upper 32-bits of SAS address
1732 * with a value greater then the Vitesse company identifier.
1733 * Hence, usage of 0x5FCFFFFF. */
1734 scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
1735 scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
1736 }
1737
1738 scic->user_parameters.sds1.stp_inactivity_timeout = 5;
1739 scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
1740 scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1741 scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
1742 scic->user_parameters.sds1.no_outbound_task_timeout = 20;
1743 }
1744
1745
1746
1747 /**
1748 * scic_controller_construct() - This method will attempt to construct a
1749 * controller object utilizing the supplied parameter information.
1750 * @c: This parameter specifies the controller to be constructed.
1751 * @scu_base: mapped base address of the scu registers
1752 * @smu_base: mapped base address of the smu registers
1753 *
1754 * Indicate if the controller was successfully constructed or if it failed in
1755 * some way. SCI_SUCCESS This value is returned if the controller was
1756 * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
1757 * if the interrupt coalescence timer may cause SAS compliance issues for SMP
1758 * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
1759 * This value is returned if the controller does not support the supplied type.
1760 * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
1761 * controller does not support the supplied initialization data version.
1762 */
1763 static enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
1764 void __iomem *scu_base,
1765 void __iomem *smu_base)
1766 {
1767 struct isci_host *ihost = scic_to_ihost(scic);
1768 u8 i;
1769
1770 sci_base_state_machine_construct(&scic->state_machine,
1771 scic, scic_sds_controller_state_table,
1772 SCI_BASE_CONTROLLER_STATE_INITIAL);
1773
1774 sci_base_state_machine_start(&scic->state_machine);
1775
1776 scic->scu_registers = scu_base;
1777 scic->smu_registers = smu_base;
1778
1779 scic_sds_port_configuration_agent_construct(&scic->port_agent);
1780
1781 /* Construct the ports for this controller */
1782 for (i = 0; i < SCI_MAX_PORTS; i++)
1783 scic_sds_port_construct(&ihost->ports[i].sci, i, scic);
1784 scic_sds_port_construct(&ihost->ports[i].sci, SCIC_SDS_DUMMY_PORT, scic);
1785
1786 /* Construct the phys for this controller */
1787 for (i = 0; i < SCI_MAX_PHYS; i++) {
1788 /* Add all the PHYs to the dummy port */
1789 scic_sds_phy_construct(&ihost->phys[i].sci,
1790 &ihost->ports[SCI_MAX_PORTS].sci, i);
1791 }
1792
1793 scic->invalid_phy_mask = 0;
1794
1795 /* Set the default maximum values */
1796 scic->completion_event_entries = SCU_EVENT_COUNT;
1797 scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT;
1798 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
1799 scic->logical_port_entries = SCI_MAX_PORTS;
1800 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
1801 scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT;
1802 scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
1803
1804 /* Initialize the User and OEM parameters to default values. */
1805 scic_sds_controller_set_default_config_parameters(scic);
1806
1807 return scic_controller_reset(scic);
1808 }
1809
1810 int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
1811 {
1812 int i;
1813
1814 for (i = 0; i < SCI_MAX_PORTS; i++)
1815 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1816 return -EINVAL;
1817
1818 for (i = 0; i < SCI_MAX_PHYS; i++)
1819 if (oem->phys[i].sas_address.high == 0 &&
1820 oem->phys[i].sas_address.low == 0)
1821 return -EINVAL;
1822
1823 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1824 for (i = 0; i < SCI_MAX_PHYS; i++)
1825 if (oem->ports[i].phy_mask != 0)
1826 return -EINVAL;
1827 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1828 u8 phy_mask = 0;
1829
1830 for (i = 0; i < SCI_MAX_PHYS; i++)
1831 phy_mask |= oem->ports[i].phy_mask;
1832
1833 if (phy_mask == 0)
1834 return -EINVAL;
1835 } else
1836 return -EINVAL;
1837
1838 if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
1839 return -EINVAL;
1840
1841 return 0;
1842 }
1843
1844 static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic,
1845 union scic_oem_parameters *scic_parms)
1846 {
1847 u32 state = scic->state_machine.current_state_id;
1848
1849 if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
1850 state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
1851 state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
1852
1853 if (scic_oem_parameters_validate(&scic_parms->sds1))
1854 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1855 scic->oem_parameters.sds1 = scic_parms->sds1;
1856
1857 return SCI_SUCCESS;
1858 }
1859
1860 return SCI_FAILURE_INVALID_STATE;
1861 }
1862
1863 void scic_oem_parameters_get(
1864 struct scic_sds_controller *scic,
1865 union scic_oem_parameters *scic_parms)
1866 {
1867 memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
1868 }
1869
1870 static void scic_sds_controller_timeout_handler(void *_scic)
1871 {
1872 struct scic_sds_controller *scic = _scic;
1873 struct isci_host *ihost = scic_to_ihost(scic);
1874 struct sci_base_state_machine *sm = &scic->state_machine;
1875
1876 if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING)
1877 scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
1878 else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) {
1879 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED);
1880 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
1881 } else /* / @todo Now what do we want to do in this case? */
1882 dev_err(scic_to_dev(scic),
1883 "%s: Controller timer fired when controller was not "
1884 "in a state being timed.\n",
1885 __func__);
1886 }
1887
1888 static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic)
1889 {
1890 struct isci_host *ihost = scic_to_ihost(scic);
1891
1892 scic->phy_startup_timer = isci_timer_create(ihost,
1893 scic,
1894 scic_sds_controller_phy_startup_timeout_handler);
1895
1896 if (scic->phy_startup_timer == NULL)
1897 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1898 else {
1899 scic->next_phy_to_start = 0;
1900 scic->phy_startup_timer_pending = false;
1901 }
1902
1903 return SCI_SUCCESS;
1904 }
1905
1906 static void scic_sds_controller_power_control_timer_start(struct scic_sds_controller *scic)
1907 {
1908 isci_timer_start(scic->power_control.timer,
1909 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1910
1911 scic->power_control.timer_started = true;
1912 }
1913
1914 static void scic_sds_controller_power_control_timer_stop(struct scic_sds_controller *scic)
1915 {
1916 if (scic->power_control.timer_started) {
1917 isci_timer_stop(scic->power_control.timer);
1918 scic->power_control.timer_started = false;
1919 }
1920 }
1921
1922 static void scic_sds_controller_power_control_timer_restart(struct scic_sds_controller *scic)
1923 {
1924 scic_sds_controller_power_control_timer_stop(scic);
1925 scic_sds_controller_power_control_timer_start(scic);
1926 }
1927
1928 static void scic_sds_controller_power_control_timer_handler(
1929 void *controller)
1930 {
1931 struct scic_sds_controller *scic;
1932
1933 scic = (struct scic_sds_controller *)controller;
1934
1935 scic->power_control.phys_granted_power = 0;
1936
1937 if (scic->power_control.phys_waiting == 0) {
1938 scic->power_control.timer_started = false;
1939 } else {
1940 struct scic_sds_phy *sci_phy = NULL;
1941 u8 i;
1942
1943 for (i = 0;
1944 (i < SCI_MAX_PHYS)
1945 && (scic->power_control.phys_waiting != 0);
1946 i++) {
1947 if (scic->power_control.requesters[i] != NULL) {
1948 if (scic->power_control.phys_granted_power <
1949 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
1950 sci_phy = scic->power_control.requesters[i];
1951 scic->power_control.requesters[i] = NULL;
1952 scic->power_control.phys_waiting--;
1953 scic->power_control.phys_granted_power++;
1954 scic_sds_phy_consume_power_handler(sci_phy);
1955 } else {
1956 break;
1957 }
1958 }
1959 }
1960
1961 /*
1962 * It doesn't matter if the power list is empty, we need to start the
1963 * timer in case another phy becomes ready.
1964 */
1965 scic_sds_controller_power_control_timer_start(scic);
1966 }
1967 }
1968
1969 /**
1970 * This method inserts the phy in the stagger spinup control queue.
1971 * @scic:
1972 *
1973 *
1974 */
1975 void scic_sds_controller_power_control_queue_insert(
1976 struct scic_sds_controller *scic,
1977 struct scic_sds_phy *sci_phy)
1978 {
1979 BUG_ON(sci_phy == NULL);
1980
1981 if (scic->power_control.phys_granted_power <
1982 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
1983 scic->power_control.phys_granted_power++;
1984 scic_sds_phy_consume_power_handler(sci_phy);
1985
1986 /*
1987 * stop and start the power_control timer. When the timer fires, the
1988 * no_of_phys_granted_power will be set to 0
1989 */
1990 scic_sds_controller_power_control_timer_restart(scic);
1991 } else {
1992 /* Add the phy in the waiting list */
1993 scic->power_control.requesters[sci_phy->phy_index] = sci_phy;
1994 scic->power_control.phys_waiting++;
1995 }
1996 }
1997
1998 /**
1999 * This method removes the phy from the stagger spinup control queue.
2000 * @scic:
2001 *
2002 *
2003 */
2004 void scic_sds_controller_power_control_queue_remove(
2005 struct scic_sds_controller *scic,
2006 struct scic_sds_phy *sci_phy)
2007 {
2008 BUG_ON(sci_phy == NULL);
2009
2010 if (scic->power_control.requesters[sci_phy->phy_index] != NULL) {
2011 scic->power_control.phys_waiting--;
2012 }
2013
2014 scic->power_control.requesters[sci_phy->phy_index] = NULL;
2015 }
2016
2017 #define AFE_REGISTER_WRITE_DELAY 10
2018
2019 /* Initialize the AFE for this phy index. We need to read the AFE setup from
2020 * the OEM parameters
2021 */
2022 static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
2023 {
2024 const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
2025 u32 afe_status;
2026 u32 phy_id;
2027
2028 /* Clear DFX Status registers */
2029 writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
2030 udelay(AFE_REGISTER_WRITE_DELAY);
2031
2032 if (is_b0()) {
2033 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
2034 * Timer, PM Stagger Timer */
2035 writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2);
2036 udelay(AFE_REGISTER_WRITE_DELAY);
2037 }
2038
2039 /* Configure bias currents to normal */
2040 if (is_a0())
2041 writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
2042 else if (is_a2())
2043 writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
2044 else if (is_b0())
2045 writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control);
2046
2047 udelay(AFE_REGISTER_WRITE_DELAY);
2048
2049 /* Enable PLL */
2050 if (is_b0())
2051 writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
2052 else
2053 writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
2054
2055 udelay(AFE_REGISTER_WRITE_DELAY);
2056
2057 /* Wait for the PLL to lock */
2058 do {
2059 afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
2060 udelay(AFE_REGISTER_WRITE_DELAY);
2061 } while ((afe_status & 0x00001000) == 0);
2062
2063 if (is_a0() || is_a2()) {
2064 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
2065 writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
2066 udelay(AFE_REGISTER_WRITE_DELAY);
2067 }
2068
2069 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2070 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2071
2072 if (is_b0()) {
2073 /* Configure transmitter SSC parameters */
2074 writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
2075 udelay(AFE_REGISTER_WRITE_DELAY);
2076 } else {
2077 /*
2078 * All defaults, except the Receive Word Alignament/Comma Detect
2079 * Enable....(0xe800) */
2080 writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2081 udelay(AFE_REGISTER_WRITE_DELAY);
2082
2083 writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
2084 udelay(AFE_REGISTER_WRITE_DELAY);
2085 }
2086
2087 /*
2088 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2089 * & increase TX int & ext bias 20%....(0xe85c) */
2090 if (is_a0())
2091 writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2092 else if (is_a2())
2093 writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2094 else {
2095 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
2096 writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2097 udelay(AFE_REGISTER_WRITE_DELAY);
2098
2099 /*
2100 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2101 * & increase TX int & ext bias 20%....(0xe85c) */
2102 writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2103 }
2104 udelay(AFE_REGISTER_WRITE_DELAY);
2105
2106 if (is_a0() || is_a2()) {
2107 /* Enable TX equalization (0xe824) */
2108 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2109 udelay(AFE_REGISTER_WRITE_DELAY);
2110 }
2111
2112 /*
2113 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
2114 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
2115 writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2116 udelay(AFE_REGISTER_WRITE_DELAY);
2117
2118 /* Leave DFE/FFE on */
2119 if (is_a0())
2120 writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2121 else if (is_a2())
2122 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2123 else {
2124 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2125 udelay(AFE_REGISTER_WRITE_DELAY);
2126 /* Enable TX equalization (0xe824) */
2127 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2128 }
2129 udelay(AFE_REGISTER_WRITE_DELAY);
2130
2131 writel(oem_phy->afe_tx_amp_control0,
2132 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2133 udelay(AFE_REGISTER_WRITE_DELAY);
2134
2135 writel(oem_phy->afe_tx_amp_control1,
2136 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2137 udelay(AFE_REGISTER_WRITE_DELAY);
2138
2139 writel(oem_phy->afe_tx_amp_control2,
2140 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2141 udelay(AFE_REGISTER_WRITE_DELAY);
2142
2143 writel(oem_phy->afe_tx_amp_control3,
2144 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2145 udelay(AFE_REGISTER_WRITE_DELAY);
2146 }
2147
2148 /* Transfer control to the PEs */
2149 writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
2150 udelay(AFE_REGISTER_WRITE_DELAY);
2151 }
2152
2153 static enum sci_status scic_controller_set_mode(struct scic_sds_controller *scic,
2154 enum sci_controller_mode operating_mode)
2155 {
2156 enum sci_status status = SCI_SUCCESS;
2157
2158 if ((scic->state_machine.current_state_id ==
2159 SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
2160 (scic->state_machine.current_state_id ==
2161 SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
2162 switch (operating_mode) {
2163 case SCI_MODE_SPEED:
2164 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
2165 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
2166 scic->uf_control.buffers.count =
2167 SCU_UNSOLICITED_FRAME_COUNT;
2168 scic->completion_event_entries = SCU_EVENT_COUNT;
2169 scic->completion_queue_entries =
2170 SCU_COMPLETION_QUEUE_COUNT;
2171 break;
2172
2173 case SCI_MODE_SIZE:
2174 scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES;
2175 scic->task_context_entries = SCI_MIN_IO_REQUESTS;
2176 scic->uf_control.buffers.count =
2177 SCU_MIN_UNSOLICITED_FRAMES;
2178 scic->completion_event_entries = SCU_MIN_EVENTS;
2179 scic->completion_queue_entries =
2180 SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2181 break;
2182
2183 default:
2184 status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2185 break;
2186 }
2187 } else
2188 status = SCI_FAILURE_INVALID_STATE;
2189
2190 return status;
2191 }
2192
2193 static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
2194 {
2195 struct isci_host *ihost = scic_to_ihost(scic);
2196 scic->power_control.timer = isci_timer_create(ihost,
2197 scic,
2198 scic_sds_controller_power_control_timer_handler);
2199
2200 memset(scic->power_control.requesters, 0,
2201 sizeof(scic->power_control.requesters));
2202
2203 scic->power_control.phys_waiting = 0;
2204 scic->power_control.phys_granted_power = 0;
2205 }
2206
2207 static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic)
2208 {
2209 struct sci_base_state_machine *sm = &scic->state_machine;
2210 enum sci_status result = SCI_SUCCESS;
2211 struct isci_host *ihost = scic_to_ihost(scic);
2212 u32 index, state;
2213
2214 if (scic->state_machine.current_state_id !=
2215 SCI_BASE_CONTROLLER_STATE_RESET) {
2216 dev_warn(scic_to_dev(scic),
2217 "SCIC Controller initialize operation requested "
2218 "in invalid state\n");
2219 return SCI_FAILURE_INVALID_STATE;
2220 }
2221
2222 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING);
2223
2224 scic->timeout_timer = isci_timer_create(ihost, scic,
2225 scic_sds_controller_timeout_handler);
2226
2227 scic_sds_controller_initialize_phy_startup(scic);
2228
2229 scic_sds_controller_initialize_power_control(scic);
2230
2231 /*
2232 * There is nothing to do here for B0 since we do not have to
2233 * program the AFE registers.
2234 * / @todo The AFE settings are supposed to be correct for the B0 but
2235 * / presently they seem to be wrong. */
2236 scic_sds_controller_afe_initialization(scic);
2237
2238 if (result == SCI_SUCCESS) {
2239 u32 status;
2240 u32 terminate_loop;
2241
2242 /* Take the hardware out of reset */
2243 writel(0, &scic->smu_registers->soft_reset_control);
2244
2245 /*
2246 * / @todo Provide meaningfull error code for hardware failure
2247 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2248 result = SCI_FAILURE;
2249 terminate_loop = 100;
2250
2251 while (terminate_loop-- && (result != SCI_SUCCESS)) {
2252 /* Loop until the hardware reports success */
2253 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2254 status = readl(&scic->smu_registers->control_status);
2255
2256 if ((status & SCU_RAM_INIT_COMPLETED) ==
2257 SCU_RAM_INIT_COMPLETED)
2258 result = SCI_SUCCESS;
2259 }
2260 }
2261
2262 if (result == SCI_SUCCESS) {
2263 u32 max_supported_ports;
2264 u32 max_supported_devices;
2265 u32 max_supported_io_requests;
2266 u32 device_context_capacity;
2267
2268 /*
2269 * Determine what are the actaul device capacities that the
2270 * hardware will support */
2271 device_context_capacity =
2272 readl(&scic->smu_registers->device_context_capacity);
2273
2274
2275 max_supported_ports = smu_dcc_get_max_ports(device_context_capacity);
2276 max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity);
2277 max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity);
2278
2279 /*
2280 * Make all PEs that are unassigned match up with the
2281 * logical ports
2282 */
2283 for (index = 0; index < max_supported_ports; index++) {
2284 struct scu_port_task_scheduler_group_registers __iomem
2285 *ptsg = &scic->scu_registers->peg0.ptsg;
2286
2287 writel(index, &ptsg->protocol_engine[index]);
2288 }
2289
2290 /* Record the smaller of the two capacity values */
2291 scic->logical_port_entries =
2292 min(max_supported_ports, scic->logical_port_entries);
2293
2294 scic->task_context_entries =
2295 min(max_supported_io_requests,
2296 scic->task_context_entries);
2297
2298 scic->remote_node_entries =
2299 min(max_supported_devices, scic->remote_node_entries);
2300
2301 /*
2302 * Now that we have the correct hardware reported minimum values
2303 * build the MDL for the controller. Default to a performance
2304 * configuration.
2305 */
2306 scic_controller_set_mode(scic, SCI_MODE_SPEED);
2307 }
2308
2309 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2310 if (result == SCI_SUCCESS) {
2311 u32 dma_configuration;
2312
2313 /* Configure the payload DMA */
2314 dma_configuration =
2315 readl(&scic->scu_registers->sdma.pdma_configuration);
2316 dma_configuration |=
2317 SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2318 writel(dma_configuration,
2319 &scic->scu_registers->sdma.pdma_configuration);
2320
2321 /* Configure the control DMA */
2322 dma_configuration =
2323 readl(&scic->scu_registers->sdma.cdma_configuration);
2324 dma_configuration |=
2325 SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2326 writel(dma_configuration,
2327 &scic->scu_registers->sdma.cdma_configuration);
2328 }
2329
2330 /*
2331 * Initialize the PHYs before the PORTs because the PHY registers
2332 * are accessed during the port initialization.
2333 */
2334 if (result == SCI_SUCCESS) {
2335 /* Initialize the phys */
2336 for (index = 0;
2337 (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2338 index++) {
2339 result = scic_sds_phy_initialize(
2340 &ihost->phys[index].sci,
2341 &scic->scu_registers->peg0.pe[index].tl,
2342 &scic->scu_registers->peg0.pe[index].ll);
2343 }
2344 }
2345
2346 if (result == SCI_SUCCESS) {
2347 /* Initialize the logical ports */
2348 for (index = 0;
2349 (index < scic->logical_port_entries) &&
2350 (result == SCI_SUCCESS);
2351 index++) {
2352 result = scic_sds_port_initialize(
2353 &ihost->ports[index].sci,
2354 &scic->scu_registers->peg0.ptsg.port[index],
2355 &scic->scu_registers->peg0.ptsg.protocol_engine,
2356 &scic->scu_registers->peg0.viit[index]);
2357 }
2358 }
2359
2360 if (result == SCI_SUCCESS)
2361 result = scic_sds_port_configuration_agent_initialize(
2362 scic,
2363 &scic->port_agent);
2364
2365 /* Advance the controller state machine */
2366 if (result == SCI_SUCCESS)
2367 state = SCI_BASE_CONTROLLER_STATE_INITIALIZED;
2368 else
2369 state = SCI_BASE_CONTROLLER_STATE_FAILED;
2370 sci_base_state_machine_change_state(sm, state);
2371
2372 return result;
2373 }
2374
2375 static enum sci_status scic_user_parameters_set(
2376 struct scic_sds_controller *scic,
2377 union scic_user_parameters *scic_parms)
2378 {
2379 u32 state = scic->state_machine.current_state_id;
2380
2381 if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2382 state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2383 state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2384 u16 index;
2385
2386 /*
2387 * Validate the user parameters. If they are not legal, then
2388 * return a failure.
2389 */
2390 for (index = 0; index < SCI_MAX_PHYS; index++) {
2391 struct sci_phy_user_params *user_phy;
2392
2393 user_phy = &scic_parms->sds1.phys[index];
2394
2395 if (!((user_phy->max_speed_generation <=
2396 SCIC_SDS_PARM_MAX_SPEED) &&
2397 (user_phy->max_speed_generation >
2398 SCIC_SDS_PARM_NO_SPEED)))
2399 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2400
2401 if (user_phy->in_connection_align_insertion_frequency <
2402 3)
2403 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2404
2405 if ((user_phy->in_connection_align_insertion_frequency <
2406 3) ||
2407 (user_phy->align_insertion_frequency == 0) ||
2408 (user_phy->
2409 notify_enable_spin_up_insertion_frequency ==
2410 0))
2411 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2412 }
2413
2414 if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
2415 (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
2416 (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
2417 (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
2418 (scic_parms->sds1.no_outbound_task_timeout == 0))
2419 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2420
2421 memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2422
2423 return SCI_SUCCESS;
2424 }
2425
2426 return SCI_FAILURE_INVALID_STATE;
2427 }
2428
2429 static int scic_controller_mem_init(struct scic_sds_controller *scic)
2430 {
2431 struct device *dev = scic_to_dev(scic);
2432 dma_addr_t dma_handle;
2433 enum sci_status result;
2434
2435 scic->completion_queue = dmam_alloc_coherent(dev,
2436 scic->completion_queue_entries * sizeof(u32),
2437 &dma_handle, GFP_KERNEL);
2438 if (!scic->completion_queue)
2439 return -ENOMEM;
2440
2441 writel(lower_32_bits(dma_handle),
2442 &scic->smu_registers->completion_queue_lower);
2443 writel(upper_32_bits(dma_handle),
2444 &scic->smu_registers->completion_queue_upper);
2445
2446 scic->remote_node_context_table = dmam_alloc_coherent(dev,
2447 scic->remote_node_entries *
2448 sizeof(union scu_remote_node_context),
2449 &dma_handle, GFP_KERNEL);
2450 if (!scic->remote_node_context_table)
2451 return -ENOMEM;
2452
2453 writel(lower_32_bits(dma_handle),
2454 &scic->smu_registers->remote_node_context_lower);
2455 writel(upper_32_bits(dma_handle),
2456 &scic->smu_registers->remote_node_context_upper);
2457
2458 scic->task_context_table = dmam_alloc_coherent(dev,
2459 scic->task_context_entries *
2460 sizeof(struct scu_task_context),
2461 &dma_handle, GFP_KERNEL);
2462 if (!scic->task_context_table)
2463 return -ENOMEM;
2464
2465 writel(lower_32_bits(dma_handle),
2466 &scic->smu_registers->host_task_table_lower);
2467 writel(upper_32_bits(dma_handle),
2468 &scic->smu_registers->host_task_table_upper);
2469
2470 result = scic_sds_unsolicited_frame_control_construct(scic);
2471 if (result)
2472 return result;
2473
2474 /*
2475 * Inform the silicon as to the location of the UF headers and
2476 * address table.
2477 */
2478 writel(lower_32_bits(scic->uf_control.headers.physical_address),
2479 &scic->scu_registers->sdma.uf_header_base_address_lower);
2480 writel(upper_32_bits(scic->uf_control.headers.physical_address),
2481 &scic->scu_registers->sdma.uf_header_base_address_upper);
2482
2483 writel(lower_32_bits(scic->uf_control.address_table.physical_address),
2484 &scic->scu_registers->sdma.uf_address_table_lower);
2485 writel(upper_32_bits(scic->uf_control.address_table.physical_address),
2486 &scic->scu_registers->sdma.uf_address_table_upper);
2487
2488 return 0;
2489 }
2490
2491 int isci_host_init(struct isci_host *isci_host)
2492 {
2493 int err = 0, i;
2494 enum sci_status status;
2495 union scic_oem_parameters oem;
2496 union scic_user_parameters scic_user_params;
2497 struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev);
2498
2499 isci_timer_list_construct(isci_host);
2500
2501 spin_lock_init(&isci_host->state_lock);
2502 spin_lock_init(&isci_host->scic_lock);
2503 spin_lock_init(&isci_host->queue_lock);
2504 init_waitqueue_head(&isci_host->eventq);
2505
2506 isci_host_change_state(isci_host, isci_starting);
2507 isci_host->can_queue = ISCI_CAN_QUEUE_VAL;
2508
2509 status = scic_controller_construct(&isci_host->sci, scu_base(isci_host),
2510 smu_base(isci_host));
2511
2512 if (status != SCI_SUCCESS) {
2513 dev_err(&isci_host->pdev->dev,
2514 "%s: scic_controller_construct failed - status = %x\n",
2515 __func__,
2516 status);
2517 return -ENODEV;
2518 }
2519
2520 isci_host->sas_ha.dev = &isci_host->pdev->dev;
2521 isci_host->sas_ha.lldd_ha = isci_host;
2522
2523 /*
2524 * grab initial values stored in the controller object for OEM and USER
2525 * parameters
2526 */
2527 isci_user_parameters_get(isci_host, &scic_user_params);
2528 status = scic_user_parameters_set(&isci_host->sci,
2529 &scic_user_params);
2530 if (status != SCI_SUCCESS) {
2531 dev_warn(&isci_host->pdev->dev,
2532 "%s: scic_user_parameters_set failed\n",
2533 __func__);
2534 return -ENODEV;
2535 }
2536
2537 scic_oem_parameters_get(&isci_host->sci, &oem);
2538
2539 /* grab any OEM parameters specified in orom */
2540 if (pci_info->orom) {
2541 status = isci_parse_oem_parameters(&oem,
2542 pci_info->orom,
2543 isci_host->id);
2544 if (status != SCI_SUCCESS) {
2545 dev_warn(&isci_host->pdev->dev,
2546 "parsing firmware oem parameters failed\n");
2547 return -EINVAL;
2548 }
2549 }
2550
2551 status = scic_oem_parameters_set(&isci_host->sci, &oem);
2552 if (status != SCI_SUCCESS) {
2553 dev_warn(&isci_host->pdev->dev,
2554 "%s: scic_oem_parameters_set failed\n",
2555 __func__);
2556 return -ENODEV;
2557 }
2558
2559 tasklet_init(&isci_host->completion_tasklet,
2560 isci_host_completion_routine, (unsigned long)isci_host);
2561
2562 INIT_LIST_HEAD(&isci_host->requests_to_complete);
2563 INIT_LIST_HEAD(&isci_host->requests_to_errorback);
2564
2565 spin_lock_irq(&isci_host->scic_lock);
2566 status = scic_controller_initialize(&isci_host->sci);
2567 spin_unlock_irq(&isci_host->scic_lock);
2568 if (status != SCI_SUCCESS) {
2569 dev_warn(&isci_host->pdev->dev,
2570 "%s: scic_controller_initialize failed -"
2571 " status = 0x%x\n",
2572 __func__, status);
2573 return -ENODEV;
2574 }
2575
2576 err = scic_controller_mem_init(&isci_host->sci);
2577 if (err)
2578 return err;
2579
2580 isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
2581 sizeof(struct isci_request),
2582 SLAB_HWCACHE_ALIGN, 0);
2583
2584 if (!isci_host->dma_pool)
2585 return -ENOMEM;
2586
2587 for (i = 0; i < SCI_MAX_PORTS; i++)
2588 isci_port_init(&isci_host->ports[i], isci_host, i);
2589
2590 for (i = 0; i < SCI_MAX_PHYS; i++)
2591 isci_phy_init(&isci_host->phys[i], isci_host, i);
2592
2593 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2594 struct isci_remote_device *idev = &isci_host->devices[i];
2595
2596 INIT_LIST_HEAD(&idev->reqs_in_process);
2597 INIT_LIST_HEAD(&idev->node);
2598 spin_lock_init(&idev->state_lock);
2599 }
2600
2601 return 0;
2602 }
2603
2604 void scic_sds_controller_link_up(struct scic_sds_controller *scic,
2605 struct scic_sds_port *port, struct scic_sds_phy *phy)
2606 {
2607 switch (scic->state_machine.current_state_id) {
2608 case SCI_BASE_CONTROLLER_STATE_STARTING:
2609 scic_sds_controller_phy_timer_stop(scic);
2610 scic->port_agent.link_up_handler(scic, &scic->port_agent,
2611 port, phy);
2612 scic_sds_controller_start_next_phy(scic);
2613 break;
2614 case SCI_BASE_CONTROLLER_STATE_READY:
2615 scic->port_agent.link_up_handler(scic, &scic->port_agent,
2616 port, phy);
2617 break;
2618 default:
2619 dev_dbg(scic_to_dev(scic),
2620 "%s: SCIC Controller linkup event from phy %d in "
2621 "unexpected state %d\n", __func__, phy->phy_index,
2622 scic->state_machine.current_state_id);
2623 }
2624 }
2625
2626 void scic_sds_controller_link_down(struct scic_sds_controller *scic,
2627 struct scic_sds_port *port, struct scic_sds_phy *phy)
2628 {
2629 switch (scic->state_machine.current_state_id) {
2630 case SCI_BASE_CONTROLLER_STATE_STARTING:
2631 case SCI_BASE_CONTROLLER_STATE_READY:
2632 scic->port_agent.link_down_handler(scic, &scic->port_agent,
2633 port, phy);
2634 break;
2635 default:
2636 dev_dbg(scic_to_dev(scic),
2637 "%s: SCIC Controller linkdown event from phy %d in "
2638 "unexpected state %d\n",
2639 __func__,
2640 phy->phy_index,
2641 scic->state_machine.current_state_id);
2642 }
2643 }
2644
2645 /**
2646 * This is a helper method to determine if any remote devices on this
2647 * controller are still in the stopping state.
2648 *
2649 */
2650 static bool scic_sds_controller_has_remote_devices_stopping(
2651 struct scic_sds_controller *controller)
2652 {
2653 u32 index;
2654
2655 for (index = 0; index < controller->remote_node_entries; index++) {
2656 if ((controller->device_table[index] != NULL) &&
2657 (controller->device_table[index]->state_machine.current_state_id
2658 == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING))
2659 return true;
2660 }
2661
2662 return false;
2663 }
2664
2665 /**
2666 * This method is called by the remote device to inform the controller
2667 * object that the remote device has stopped.
2668 */
2669 void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
2670 struct scic_sds_remote_device *sci_dev)
2671 {
2672 if (scic->state_machine.current_state_id !=
2673 SCI_BASE_CONTROLLER_STATE_STOPPING) {
2674 dev_dbg(scic_to_dev(scic),
2675 "SCIC Controller 0x%p remote device stopped event "
2676 "from device 0x%p in unexpected state %d\n",
2677 scic, sci_dev,
2678 scic->state_machine.current_state_id);
2679 return;
2680 }
2681
2682 if (!scic_sds_controller_has_remote_devices_stopping(scic)) {
2683 sci_base_state_machine_change_state(&scic->state_machine,
2684 SCI_BASE_CONTROLLER_STATE_STOPPED);
2685 }
2686 }
2687
2688 /**
2689 * This method will write to the SCU PCP register the request value. The method
2690 * is used to suspend/resume ports, devices, and phys.
2691 * @scic:
2692 *
2693 *
2694 */
2695 void scic_sds_controller_post_request(
2696 struct scic_sds_controller *scic,
2697 u32 request)
2698 {
2699 dev_dbg(scic_to_dev(scic),
2700 "%s: SCIC Controller 0x%p post request 0x%08x\n",
2701 __func__,
2702 scic,
2703 request);
2704
2705 writel(request, &scic->smu_registers->post_context_port);
2706 }
2707
2708 /**
2709 * This method will copy the soft copy of the task context into the physical
2710 * memory accessible by the controller.
2711 * @scic: This parameter specifies the controller for which to copy
2712 * the task context.
2713 * @sci_req: This parameter specifies the request for which the task
2714 * context is being copied.
2715 *
2716 * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
2717 * the physical memory version of the task context. Thus, all subsequent
2718 * updates to the task context are performed in the TC table (i.e. DMAable
2719 * memory). none
2720 */
2721 void scic_sds_controller_copy_task_context(
2722 struct scic_sds_controller *scic,
2723 struct scic_sds_request *sci_req)
2724 {
2725 struct scu_task_context *task_context_buffer;
2726
2727 task_context_buffer = scic_sds_controller_get_task_context_buffer(
2728 scic, sci_req->io_tag);
2729
2730 memcpy(task_context_buffer,
2731 sci_req->task_context_buffer,
2732 offsetof(struct scu_task_context, sgl_snapshot_ac));
2733
2734 /*
2735 * Now that the soft copy of the TC has been copied into the TC
2736 * table accessible by the silicon. Thus, any further changes to
2737 * the TC (e.g. TC termination) occur in the appropriate location. */
2738 sci_req->task_context_buffer = task_context_buffer;
2739 }
2740
2741 /**
2742 * This method returns the task context buffer for the given io tag.
2743 * @scic:
2744 * @io_tag:
2745 *
2746 * struct scu_task_context*
2747 */
2748 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
2749 struct scic_sds_controller *scic,
2750 u16 io_tag
2751 ) {
2752 u16 task_index = scic_sds_io_tag_get_index(io_tag);
2753
2754 if (task_index < scic->task_context_entries) {
2755 return &scic->task_context_table[task_index];
2756 }
2757
2758 return NULL;
2759 }
2760
2761 struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
2762 u16 io_tag)
2763 {
2764 u16 task_index;
2765 u16 task_sequence;
2766
2767 task_index = scic_sds_io_tag_get_index(io_tag);
2768
2769 if (task_index < scic->task_context_entries) {
2770 if (scic->io_request_table[task_index] != NULL) {
2771 task_sequence = scic_sds_io_tag_get_sequence(io_tag);
2772
2773 if (task_sequence == scic->io_request_sequence[task_index]) {
2774 return scic->io_request_table[task_index];
2775 }
2776 }
2777 }
2778
2779 return NULL;
2780 }
2781
2782 /**
2783 * This method allocates remote node index and the reserves the remote node
2784 * context space for use. This method can fail if there are no more remote
2785 * node index available.
2786 * @scic: This is the controller object which contains the set of
2787 * free remote node ids
2788 * @sci_dev: This is the device object which is requesting the a remote node
2789 * id
2790 * @node_id: This is the remote node id that is assinged to the device if one
2791 * is available
2792 *
2793 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2794 * node index available.
2795 */
2796 enum sci_status scic_sds_controller_allocate_remote_node_context(
2797 struct scic_sds_controller *scic,
2798 struct scic_sds_remote_device *sci_dev,
2799 u16 *node_id)
2800 {
2801 u16 node_index;
2802 u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2803
2804 node_index = scic_sds_remote_node_table_allocate_remote_node(
2805 &scic->available_remote_nodes, remote_node_count
2806 );
2807
2808 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2809 scic->device_table[node_index] = sci_dev;
2810
2811 *node_id = node_index;
2812
2813 return SCI_SUCCESS;
2814 }
2815
2816 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2817 }
2818
2819 /**
2820 * This method frees the remote node index back to the available pool. Once
2821 * this is done the remote node context buffer is no longer valid and can
2822 * not be used.
2823 * @scic:
2824 * @sci_dev:
2825 * @node_id:
2826 *
2827 */
2828 void scic_sds_controller_free_remote_node_context(
2829 struct scic_sds_controller *scic,
2830 struct scic_sds_remote_device *sci_dev,
2831 u16 node_id)
2832 {
2833 u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2834
2835 if (scic->device_table[node_id] == sci_dev) {
2836 scic->device_table[node_id] = NULL;
2837
2838 scic_sds_remote_node_table_release_remote_node_index(
2839 &scic->available_remote_nodes, remote_node_count, node_id
2840 );
2841 }
2842 }
2843
2844 /**
2845 * This method returns the union scu_remote_node_context for the specified remote
2846 * node id.
2847 * @scic:
2848 * @node_id:
2849 *
2850 * union scu_remote_node_context*
2851 */
2852 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
2853 struct scic_sds_controller *scic,
2854 u16 node_id
2855 ) {
2856 if (
2857 (node_id < scic->remote_node_entries)
2858 && (scic->device_table[node_id] != NULL)
2859 ) {
2860 return &scic->remote_node_context_table[node_id];
2861 }
2862
2863 return NULL;
2864 }
2865
2866 /**
2867 *
2868 * @resposne_buffer: This is the buffer into which the D2H register FIS will be
2869 * constructed.
2870 * @frame_header: This is the frame header returned by the hardware.
2871 * @frame_buffer: This is the frame buffer returned by the hardware.
2872 *
2873 * This method will combind the frame header and frame buffer to create a SATA
2874 * D2H register FIS none
2875 */
2876 void scic_sds_controller_copy_sata_response(
2877 void *response_buffer,
2878 void *frame_header,
2879 void *frame_buffer)
2880 {
2881 memcpy(response_buffer, frame_header, sizeof(u32));
2882
2883 memcpy(response_buffer + sizeof(u32),
2884 frame_buffer,
2885 sizeof(struct dev_to_host_fis) - sizeof(u32));
2886 }
2887
2888 /**
2889 * This method releases the frame once this is done the frame is available for
2890 * re-use by the hardware. The data contained in the frame header and frame
2891 * buffer is no longer valid. The UF queue get pointer is only updated if UF
2892 * control indicates this is appropriate.
2893 * @scic:
2894 * @frame_index:
2895 *
2896 */
2897 void scic_sds_controller_release_frame(
2898 struct scic_sds_controller *scic,
2899 u32 frame_index)
2900 {
2901 if (scic_sds_unsolicited_frame_control_release_frame(
2902 &scic->uf_control, frame_index) == true)
2903 writel(scic->uf_control.get,
2904 &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
2905 }
2906
2907 /**
2908 * scic_controller_start_io() - This method is called by the SCI user to
2909 * send/start an IO request. If the method invocation is successful, then
2910 * the IO request has been queued to the hardware for processing.
2911 * @controller: the handle to the controller object for which to start an IO
2912 * request.
2913 * @remote_device: the handle to the remote device object for which to start an
2914 * IO request.
2915 * @io_request: the handle to the io request object to start.
2916 * @io_tag: This parameter specifies a previously allocated IO tag that the
2917 * user desires to be utilized for this request. This parameter is optional.
2918 * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
2919 * for this parameter.
2920 *
2921 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2922 * to ensure that each of the methods that may allocate or free available IO
2923 * tags are handled in a mutually exclusive manner. This method is one of said
2924 * methods requiring proper critical code section protection (e.g. semaphore,
2925 * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a
2926 * result, it is expected the user will have set the NCQ tag field in the host
2927 * to device register FIS prior to calling this method. There is also a
2928 * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
2929 * the scic_controller_start_io() method. scic_controller_allocate_tag() for
2930 * more information on allocating a tag. Indicate if the controller
2931 * successfully started the IO request. SCI_SUCCESS if the IO request was
2932 * successfully started. Determine the failure situations and return values.
2933 */
2934 enum sci_status scic_controller_start_io(
2935 struct scic_sds_controller *scic,
2936 struct scic_sds_remote_device *rdev,
2937 struct scic_sds_request *req,
2938 u16 io_tag)
2939 {
2940 enum sci_status status;
2941
2942 if (scic->state_machine.current_state_id !=
2943 SCI_BASE_CONTROLLER_STATE_READY) {
2944 dev_warn(scic_to_dev(scic), "invalid state to start I/O");
2945 return SCI_FAILURE_INVALID_STATE;
2946 }
2947
2948 status = scic_sds_remote_device_start_io(scic, rdev, req);
2949 if (status != SCI_SUCCESS)
2950 return status;
2951
2952 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
2953 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req));
2954 return SCI_SUCCESS;
2955 }
2956
2957 /**
2958 * scic_controller_terminate_request() - This method is called by the SCI Core
2959 * user to terminate an ongoing (i.e. started) core IO request. This does
2960 * not abort the IO request at the target, but rather removes the IO request
2961 * from the host controller.
2962 * @controller: the handle to the controller object for which to terminate a
2963 * request.
2964 * @remote_device: the handle to the remote device object for which to
2965 * terminate a request.
2966 * @request: the handle to the io or task management request object to
2967 * terminate.
2968 *
2969 * Indicate if the controller successfully began the terminate process for the
2970 * IO request. SCI_SUCCESS if the terminate process was successfully started
2971 * for the request. Determine the failure situations and return values.
2972 */
2973 enum sci_status scic_controller_terminate_request(
2974 struct scic_sds_controller *scic,
2975 struct scic_sds_remote_device *rdev,
2976 struct scic_sds_request *req)
2977 {
2978 enum sci_status status;
2979
2980 if (scic->state_machine.current_state_id !=
2981 SCI_BASE_CONTROLLER_STATE_READY) {
2982 dev_warn(scic_to_dev(scic),
2983 "invalid state to terminate request\n");
2984 return SCI_FAILURE_INVALID_STATE;
2985 }
2986
2987 status = scic_sds_io_request_terminate(req);
2988 if (status != SCI_SUCCESS)
2989 return status;
2990
2991 /*
2992 * Utilize the original post context command and or in the POST_TC_ABORT
2993 * request sub-type.
2994 */
2995 scic_sds_controller_post_request(scic,
2996 scic_sds_request_get_post_context(req) |
2997 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2998 return SCI_SUCCESS;
2999 }
3000
3001 /**
3002 * scic_controller_complete_io() - This method will perform core specific
3003 * completion operations for an IO request. After this method is invoked,
3004 * the user should consider the IO request as invalid until it is properly
3005 * reused (i.e. re-constructed).
3006 * @controller: The handle to the controller object for which to complete the
3007 * IO request.
3008 * @remote_device: The handle to the remote device object for which to complete
3009 * the IO request.
3010 * @io_request: the handle to the io request object to complete.
3011 *
3012 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3013 * to ensure that each of the methods that may allocate or free available IO
3014 * tags are handled in a mutually exclusive manner. This method is one of said
3015 * methods requiring proper critical code section protection (e.g. semaphore,
3016 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3017 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3018 * the responsibility of the caller to invoke the scic_controller_free_io_tag()
3019 * method to free the tag (i.e. this method will not free the IO tag). Indicate
3020 * if the controller successfully completed the IO request. SCI_SUCCESS if the
3021 * completion process was successful.
3022 */
3023 enum sci_status scic_controller_complete_io(
3024 struct scic_sds_controller *scic,
3025 struct scic_sds_remote_device *rdev,
3026 struct scic_sds_request *request)
3027 {
3028 enum sci_status status;
3029 u16 index;
3030
3031 switch (scic->state_machine.current_state_id) {
3032 case SCI_BASE_CONTROLLER_STATE_STOPPING:
3033 /* XXX: Implement this function */
3034 return SCI_FAILURE;
3035 case SCI_BASE_CONTROLLER_STATE_READY:
3036 status = scic_sds_remote_device_complete_io(scic, rdev, request);
3037 if (status != SCI_SUCCESS)
3038 return status;
3039
3040 index = scic_sds_io_tag_get_index(request->io_tag);
3041 scic->io_request_table[index] = NULL;
3042 return SCI_SUCCESS;
3043 default:
3044 dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
3045 return SCI_FAILURE_INVALID_STATE;
3046 }
3047
3048 }
3049
3050 enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
3051 {
3052 struct scic_sds_controller *scic = sci_req->owning_controller;
3053
3054 if (scic->state_machine.current_state_id !=
3055 SCI_BASE_CONTROLLER_STATE_READY) {
3056 dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
3057 return SCI_FAILURE_INVALID_STATE;
3058 }
3059
3060 scic->io_request_table[scic_sds_io_tag_get_index(sci_req->io_tag)] = sci_req;
3061 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req));
3062 return SCI_SUCCESS;
3063 }
3064
3065 /**
3066 * scic_controller_start_task() - This method is called by the SCIC user to
3067 * send/start a framework task management request.
3068 * @controller: the handle to the controller object for which to start the task
3069 * management request.
3070 * @remote_device: the handle to the remote device object for which to start
3071 * the task management request.
3072 * @task_request: the handle to the task request object to start.
3073 * @io_tag: This parameter specifies a previously allocated IO tag that the
3074 * user desires to be utilized for this request. Note this not the io_tag
3075 * of the request being managed. It is to be utilized for the task request
3076 * itself. This parameter is optional. The user is allowed to supply
3077 * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
3078 *
3079 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3080 * to ensure that each of the methods that may allocate or free available IO
3081 * tags are handled in a mutually exclusive manner. This method is one of said
3082 * methods requiring proper critical code section protection (e.g. semaphore,
3083 * spin-lock, etc.). - The user must synchronize this task with completion
3084 * queue processing. If they are not synchronized then it is possible for the
3085 * io requests that are being managed by the task request can complete before
3086 * starting the task request. scic_controller_allocate_tag() for more
3087 * information on allocating a tag. Indicate if the controller successfully
3088 * started the IO request. SCI_TASK_SUCCESS if the task request was
3089 * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
3090 * returned if there is/are task(s) outstanding that require termination or
3091 * completion before this request can succeed.
3092 */
3093 enum sci_task_status scic_controller_start_task(
3094 struct scic_sds_controller *scic,
3095 struct scic_sds_remote_device *rdev,
3096 struct scic_sds_request *req,
3097 u16 task_tag)
3098 {
3099 enum sci_status status;
3100
3101 if (scic->state_machine.current_state_id !=
3102 SCI_BASE_CONTROLLER_STATE_READY) {
3103 dev_warn(scic_to_dev(scic),
3104 "%s: SCIC Controller starting task from invalid "
3105 "state\n",
3106 __func__);
3107 return SCI_TASK_FAILURE_INVALID_STATE;
3108 }
3109
3110 status = scic_sds_remote_device_start_task(scic, rdev, req);
3111 switch (status) {
3112 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
3113 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3114
3115 /*
3116 * We will let framework know this task request started successfully,
3117 * although core is still woring on starting the request (to post tc when
3118 * RNC is resumed.)
3119 */
3120 return SCI_SUCCESS;
3121 case SCI_SUCCESS:
3122 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3123
3124 scic_sds_controller_post_request(scic,
3125 scic_sds_request_get_post_context(req));
3126 break;
3127 default:
3128 break;
3129 }
3130
3131 return status;
3132 }
3133
3134 /**
3135 * scic_controller_allocate_io_tag() - This method will allocate a tag from the
3136 * pool of free IO tags. Direct allocation of IO tags by the SCI Core user
3137 * is optional. The scic_controller_start_io() method will allocate an IO
3138 * tag if this method is not utilized and the tag is not supplied to the IO
3139 * construct routine. Direct allocation of IO tags may provide additional
3140 * performance improvements in environments capable of supporting this usage
3141 * model. Additionally, direct allocation of IO tags also provides
3142 * additional flexibility to the SCI Core user. Specifically, the user may
3143 * retain IO tags across the lives of multiple IO requests.
3144 * @controller: the handle to the controller object for which to allocate the
3145 * tag.
3146 *
3147 * IO tags are a protected resource. It is incumbent upon the SCI Core user to
3148 * ensure that each of the methods that may allocate or free available IO tags
3149 * are handled in a mutually exclusive manner. This method is one of said
3150 * methods requiring proper critical code section protection (e.g. semaphore,
3151 * spin-lock, etc.). An unsigned integer representing an available IO tag.
3152 * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
3153 * currently available tags to be allocated. All return other values indicate a
3154 * legitimate tag.
3155 */
3156 u16 scic_controller_allocate_io_tag(
3157 struct scic_sds_controller *scic)
3158 {
3159 u16 task_context;
3160 u16 sequence_count;
3161
3162 if (!sci_pool_empty(scic->tci_pool)) {
3163 sci_pool_get(scic->tci_pool, task_context);
3164
3165 sequence_count = scic->io_request_sequence[task_context];
3166
3167 return scic_sds_io_tag_construct(sequence_count, task_context);
3168 }
3169
3170 return SCI_CONTROLLER_INVALID_IO_TAG;
3171 }
3172
3173 /**
3174 * scic_controller_free_io_tag() - This method will free an IO tag to the pool
3175 * of free IO tags. This method provides the SCI Core user more flexibility
3176 * with regards to IO tags. The user may desire to keep an IO tag after an
3177 * IO request has completed, because they plan on re-using the tag for a
3178 * subsequent IO request. This method is only legal if the tag was
3179 * allocated via scic_controller_allocate_io_tag().
3180 * @controller: This parameter specifies the handle to the controller object
3181 * for which to free/return the tag.
3182 * @io_tag: This parameter represents the tag to be freed to the pool of
3183 * available tags.
3184 *
3185 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3186 * to ensure that each of the methods that may allocate or free available IO
3187 * tags are handled in a mutually exclusive manner. This method is one of said
3188 * methods requiring proper critical code section protection (e.g. semaphore,
3189 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3190 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3191 * the responsibility of the caller to invoke this method to free the tag. This
3192 * method returns an indication of whether the tag was successfully put back
3193 * (freed) to the pool of available tags. SCI_SUCCESS This return value
3194 * indicates the tag was successfully placed into the pool of available IO
3195 * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
3196 * is not a valid IO tag value.
3197 */
3198 enum sci_status scic_controller_free_io_tag(
3199 struct scic_sds_controller *scic,
3200 u16 io_tag)
3201 {
3202 u16 sequence;
3203 u16 index;
3204
3205 BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
3206
3207 sequence = scic_sds_io_tag_get_sequence(io_tag);
3208 index = scic_sds_io_tag_get_index(io_tag);
3209
3210 if (!sci_pool_full(scic->tci_pool)) {
3211 if (sequence == scic->io_request_sequence[index]) {
3212 scic_sds_io_sequence_increment(
3213 scic->io_request_sequence[index]);
3214
3215 sci_pool_put(scic->tci_pool, index);
3216
3217 return SCI_SUCCESS;
3218 }
3219 }
3220
3221 return SCI_FAILURE_INVALID_IO_TAG;
3222 }
3223
3224
This page took 0.153119 seconds and 5 git commands to generate.