2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 #include <linux/circ_buf.h>
56 #include <linux/device.h>
62 #include "probe_roms.h"
63 #include "remote_device.h"
65 #include "scu_completion_codes.h"
66 #include "scu_event_codes.h"
67 #include "registers.h"
68 #include "scu_remote_node_context.h"
69 #include "scu_task_context.h"
70 #include "scu_unsolicited_frame.h"
72 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
74 #define smu_max_ports(dcc_value) \
76 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
77 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
80 #define smu_max_task_contexts(dcc_value) \
82 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
83 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
86 #define smu_max_rncs(dcc_value) \
88 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
89 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
92 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
97 * The number of milliseconds to wait while a given phy is consuming power
98 * before allowing another set of phys to consume power. Ultimately, this will
99 * be specified by OEM parameter.
101 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
104 * NORMALIZE_PUT_POINTER() -
106 * This macro will normalize the completion queue put pointer so its value can
107 * be used as an array inde
109 #define NORMALIZE_PUT_POINTER(x) \
110 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
114 * NORMALIZE_EVENT_POINTER() -
116 * This macro will normalize the completion queue event entry so its value can
117 * be used as an index.
119 #define NORMALIZE_EVENT_POINTER(x) \
121 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
122 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
126 * INCREMENT_COMPLETION_QUEUE_GET() -
128 * This macro will increment the controllers completion queue index value and
129 * possibly toggle the cycle bit if the completion queue index wraps back to 0.
131 #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
132 INCREMENT_QUEUE_GET(\
135 SCU_MAX_COMPLETION_QUEUE_ENTRIES, \
139 * INCREMENT_EVENT_QUEUE_GET() -
141 * This macro will increment the controllers event queue index value and
142 * possibly toggle the event cycle bit if the event queue index wraps back to 0.
144 #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
145 INCREMENT_QUEUE_GET(\
149 SMU_CQGR_EVENT_CYCLE_BIT \
154 * NORMALIZE_GET_POINTER() -
156 * This macro will normalize the completion queue get pointer so its value can
157 * be used as an index into an array
159 #define NORMALIZE_GET_POINTER(x) \
160 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
163 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
165 * This macro will normalize the completion queue cycle pointer so it matches
166 * the completion queue cycle bit
168 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
169 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
172 * COMPLETION_QUEUE_CYCLE_BIT() -
174 * This macro will return the cycle bit of the completion queue entry
176 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
178 /* Init the state machine and call the state entry function (if any) */
179 void sci_init_sm(struct sci_base_state_machine
*sm
,
180 const struct sci_base_state
*state_table
, u32 initial_state
)
182 sci_state_transition_t handler
;
184 sm
->initial_state_id
= initial_state
;
185 sm
->previous_state_id
= initial_state
;
186 sm
->current_state_id
= initial_state
;
187 sm
->state_table
= state_table
;
189 handler
= sm
->state_table
[initial_state
].enter_state
;
194 /* Call the state exit fn, update the current state, call the state entry fn */
195 void sci_change_state(struct sci_base_state_machine
*sm
, u32 next_state
)
197 sci_state_transition_t handler
;
199 handler
= sm
->state_table
[sm
->current_state_id
].exit_state
;
203 sm
->previous_state_id
= sm
->current_state_id
;
204 sm
->current_state_id
= next_state
;
206 handler
= sm
->state_table
[sm
->current_state_id
].enter_state
;
211 static bool scic_sds_controller_completion_queue_has_entries(
212 struct scic_sds_controller
*scic
)
214 u32 get_value
= scic
->completion_queue_get
;
215 u32 get_index
= get_value
& SMU_COMPLETION_QUEUE_GET_POINTER_MASK
;
217 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value
) ==
218 COMPLETION_QUEUE_CYCLE_BIT(scic
->completion_queue
[get_index
]))
224 static bool scic_sds_controller_isr(struct scic_sds_controller
*scic
)
226 if (scic_sds_controller_completion_queue_has_entries(scic
)) {
230 * we have a spurious interrupt it could be that we have already
231 * emptied the completion queue from a previous interrupt */
232 writel(SMU_ISR_COMPLETION
, &scic
->smu_registers
->interrupt_status
);
235 * There is a race in the hardware that could cause us not to be notified
236 * of an interrupt completion if we do not take this step. We will mask
237 * then unmask the interrupts so if there is another interrupt pending
238 * the clearing of the interrupt source we get the next interrupt message. */
239 writel(0xFF000000, &scic
->smu_registers
->interrupt_mask
);
240 writel(0, &scic
->smu_registers
->interrupt_mask
);
246 irqreturn_t
isci_msix_isr(int vec
, void *data
)
248 struct isci_host
*ihost
= data
;
250 if (scic_sds_controller_isr(&ihost
->sci
))
251 tasklet_schedule(&ihost
->completion_tasklet
);
256 static bool scic_sds_controller_error_isr(struct scic_sds_controller
*scic
)
258 u32 interrupt_status
;
261 readl(&scic
->smu_registers
->interrupt_status
);
262 interrupt_status
&= (SMU_ISR_QUEUE_ERROR
| SMU_ISR_QUEUE_SUSPEND
);
264 if (interrupt_status
!= 0) {
266 * There is an error interrupt pending so let it through and handle
272 * There is a race in the hardware that could cause us not to be notified
273 * of an interrupt completion if we do not take this step. We will mask
274 * then unmask the error interrupts so if there was another interrupt
275 * pending we will be notified.
276 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
277 writel(0xff, &scic
->smu_registers
->interrupt_mask
);
278 writel(0, &scic
->smu_registers
->interrupt_mask
);
283 static void scic_sds_controller_task_completion(struct scic_sds_controller
*scic
,
284 u32 completion_entry
)
287 struct scic_sds_request
*sci_req
;
289 index
= SCU_GET_COMPLETION_INDEX(completion_entry
);
290 sci_req
= scic
->io_request_table
[index
];
292 /* Make sure that we really want to process this IO request */
293 if (sci_req
&& sci_req
->io_tag
!= SCI_CONTROLLER_INVALID_IO_TAG
&&
294 ISCI_TAG_SEQ(sci_req
->io_tag
) == scic
->io_request_sequence
[index
])
295 /* Yep this is a valid io request pass it along to the io request handler */
296 scic_sds_io_request_tc_completion(sci_req
, completion_entry
);
299 static void scic_sds_controller_sdma_completion(struct scic_sds_controller
*scic
,
300 u32 completion_entry
)
303 struct scic_sds_request
*io_request
;
304 struct scic_sds_remote_device
*device
;
306 index
= SCU_GET_COMPLETION_INDEX(completion_entry
);
308 switch (scu_get_command_request_type(completion_entry
)) {
309 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC
:
310 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC
:
311 io_request
= scic
->io_request_table
[index
];
312 dev_warn(scic_to_dev(scic
),
313 "%s: SCIC SDS Completion type SDMA %x for io request "
318 /* @todo For a post TC operation we need to fail the IO
323 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC
:
324 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC
:
325 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC
:
326 device
= scic
->device_table
[index
];
327 dev_warn(scic_to_dev(scic
),
328 "%s: SCIC SDS Completion type SDMA %x for remote "
333 /* @todo For a port RNC operation we need to fail the
339 dev_warn(scic_to_dev(scic
),
340 "%s: SCIC SDS Completion unknown SDMA completion "
349 static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller
*scic
,
350 u32 completion_entry
)
355 struct isci_host
*ihost
= scic_to_ihost(scic
);
356 struct scu_unsolicited_frame_header
*frame_header
;
357 struct scic_sds_phy
*phy
;
358 struct scic_sds_remote_device
*device
;
360 enum sci_status result
= SCI_FAILURE
;
362 frame_index
= SCU_GET_FRAME_INDEX(completion_entry
);
364 frame_header
= scic
->uf_control
.buffers
.array
[frame_index
].header
;
365 scic
->uf_control
.buffers
.array
[frame_index
].state
= UNSOLICITED_FRAME_IN_USE
;
367 if (SCU_GET_FRAME_ERROR(completion_entry
)) {
369 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
370 * / this cause a problem? We expect the phy initialization will
371 * / fail if there is an error in the frame. */
372 scic_sds_controller_release_frame(scic
, frame_index
);
376 if (frame_header
->is_address_frame
) {
377 index
= SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry
);
378 phy
= &ihost
->phys
[index
].sci
;
379 result
= scic_sds_phy_frame_handler(phy
, frame_index
);
382 index
= SCU_GET_COMPLETION_INDEX(completion_entry
);
384 if (index
== SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX
) {
386 * This is a signature fis or a frame from a direct attached SATA
387 * device that has not yet been created. In either case forwared
388 * the frame to the PE and let it take care of the frame data. */
389 index
= SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry
);
390 phy
= &ihost
->phys
[index
].sci
;
391 result
= scic_sds_phy_frame_handler(phy
, frame_index
);
393 if (index
< scic
->remote_node_entries
)
394 device
= scic
->device_table
[index
];
399 result
= scic_sds_remote_device_frame_handler(device
, frame_index
);
401 scic_sds_controller_release_frame(scic
, frame_index
);
405 if (result
!= SCI_SUCCESS
) {
407 * / @todo Is there any reason to report some additional error message
408 * / when we get this failure notifiction? */
412 static void scic_sds_controller_event_completion(struct scic_sds_controller
*scic
,
413 u32 completion_entry
)
415 struct isci_host
*ihost
= scic_to_ihost(scic
);
416 struct scic_sds_request
*io_request
;
417 struct scic_sds_remote_device
*device
;
418 struct scic_sds_phy
*phy
;
421 index
= SCU_GET_COMPLETION_INDEX(completion_entry
);
423 switch (scu_get_event_type(completion_entry
)) {
424 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR
:
425 /* / @todo The driver did something wrong and we need to fix the condtion. */
426 dev_err(scic_to_dev(scic
),
427 "%s: SCIC Controller 0x%p received SMU command error "
434 case SCU_EVENT_TYPE_SMU_PCQ_ERROR
:
435 case SCU_EVENT_TYPE_SMU_ERROR
:
436 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR
:
438 * / @todo This is a hardware failure and its likely that we want to
439 * / reset the controller. */
440 dev_err(scic_to_dev(scic
),
441 "%s: SCIC Controller 0x%p received fatal controller "
448 case SCU_EVENT_TYPE_TRANSPORT_ERROR
:
449 io_request
= scic
->io_request_table
[index
];
450 scic_sds_io_request_event_handler(io_request
, completion_entry
);
453 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT
:
454 switch (scu_get_event_specifier(completion_entry
)) {
455 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE
:
456 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT
:
457 io_request
= scic
->io_request_table
[index
];
458 if (io_request
!= NULL
)
459 scic_sds_io_request_event_handler(io_request
, completion_entry
);
461 dev_warn(scic_to_dev(scic
),
462 "%s: SCIC Controller 0x%p received "
463 "event 0x%x for io request object "
464 "that doesnt exist.\n",
471 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT
:
472 device
= scic
->device_table
[index
];
474 scic_sds_remote_device_event_handler(device
, completion_entry
);
476 dev_warn(scic_to_dev(scic
),
477 "%s: SCIC Controller 0x%p received "
478 "event 0x%x for remote device object "
479 "that doesnt exist.\n",
488 case SCU_EVENT_TYPE_BROADCAST_CHANGE
:
490 * direct the broadcast change event to the phy first and then let
491 * the phy redirect the broadcast change to the port object */
492 case SCU_EVENT_TYPE_ERR_CNT_EVENT
:
494 * direct error counter event to the phy object since that is where
495 * we get the event notification. This is a type 4 event. */
496 case SCU_EVENT_TYPE_OSSP_EVENT
:
497 index
= SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry
);
498 phy
= &ihost
->phys
[index
].sci
;
499 scic_sds_phy_event_handler(phy
, completion_entry
);
502 case SCU_EVENT_TYPE_RNC_SUSPEND_TX
:
503 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX
:
504 case SCU_EVENT_TYPE_RNC_OPS_MISC
:
505 if (index
< scic
->remote_node_entries
) {
506 device
= scic
->device_table
[index
];
509 scic_sds_remote_device_event_handler(device
, completion_entry
);
511 dev_err(scic_to_dev(scic
),
512 "%s: SCIC Controller 0x%p received event 0x%x "
513 "for remote device object 0x%0x that doesnt "
523 dev_warn(scic_to_dev(scic
),
524 "%s: SCIC Controller received unknown event code %x\n",
533 static void scic_sds_controller_process_completions(struct scic_sds_controller
*scic
)
535 u32 completion_count
= 0;
536 u32 completion_entry
;
542 dev_dbg(scic_to_dev(scic
),
543 "%s: completion queue begining get:0x%08x\n",
545 scic
->completion_queue_get
);
547 /* Get the component parts of the completion queue */
548 get_index
= NORMALIZE_GET_POINTER(scic
->completion_queue_get
);
549 get_cycle
= SMU_CQGR_CYCLE_BIT
& scic
->completion_queue_get
;
551 event_index
= NORMALIZE_EVENT_POINTER(scic
->completion_queue_get
);
552 event_cycle
= SMU_CQGR_EVENT_CYCLE_BIT
& scic
->completion_queue_get
;
555 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle
)
556 == COMPLETION_QUEUE_CYCLE_BIT(scic
->completion_queue
[get_index
])
560 completion_entry
= scic
->completion_queue
[get_index
];
561 INCREMENT_COMPLETION_QUEUE_GET(scic
, get_index
, get_cycle
);
563 dev_dbg(scic_to_dev(scic
),
564 "%s: completion queue entry:0x%08x\n",
568 switch (SCU_GET_COMPLETION_TYPE(completion_entry
)) {
569 case SCU_COMPLETION_TYPE_TASK
:
570 scic_sds_controller_task_completion(scic
, completion_entry
);
573 case SCU_COMPLETION_TYPE_SDMA
:
574 scic_sds_controller_sdma_completion(scic
, completion_entry
);
577 case SCU_COMPLETION_TYPE_UFI
:
578 scic_sds_controller_unsolicited_frame(scic
, completion_entry
);
581 case SCU_COMPLETION_TYPE_EVENT
:
582 INCREMENT_EVENT_QUEUE_GET(scic
, event_index
, event_cycle
);
583 scic_sds_controller_event_completion(scic
, completion_entry
);
586 case SCU_COMPLETION_TYPE_NOTIFY
:
588 * Presently we do the same thing with a notify event that we do with the
589 * other event codes. */
590 INCREMENT_EVENT_QUEUE_GET(scic
, event_index
, event_cycle
);
591 scic_sds_controller_event_completion(scic
, completion_entry
);
595 dev_warn(scic_to_dev(scic
),
596 "%s: SCIC Controller received unknown "
597 "completion type %x\n",
604 /* Update the get register if we completed one or more entries */
605 if (completion_count
> 0) {
606 scic
->completion_queue_get
=
607 SMU_CQGR_GEN_BIT(ENABLE
) |
608 SMU_CQGR_GEN_BIT(EVENT_ENABLE
) |
610 SMU_CQGR_GEN_VAL(EVENT_POINTER
, event_index
) |
612 SMU_CQGR_GEN_VAL(POINTER
, get_index
);
614 writel(scic
->completion_queue_get
,
615 &scic
->smu_registers
->completion_queue_get
);
619 dev_dbg(scic_to_dev(scic
),
620 "%s: completion queue ending get:0x%08x\n",
622 scic
->completion_queue_get
);
626 static void scic_sds_controller_error_handler(struct scic_sds_controller
*scic
)
628 u32 interrupt_status
;
631 readl(&scic
->smu_registers
->interrupt_status
);
633 if ((interrupt_status
& SMU_ISR_QUEUE_SUSPEND
) &&
634 scic_sds_controller_completion_queue_has_entries(scic
)) {
636 scic_sds_controller_process_completions(scic
);
637 writel(SMU_ISR_QUEUE_SUSPEND
, &scic
->smu_registers
->interrupt_status
);
639 dev_err(scic_to_dev(scic
), "%s: status: %#x\n", __func__
,
642 sci_change_state(&scic
->sm
, SCIC_FAILED
);
647 /* If we dont process any completions I am not sure that we want to do this.
648 * We are in the middle of a hardware fault and should probably be reset.
650 writel(0, &scic
->smu_registers
->interrupt_mask
);
653 irqreturn_t
isci_intx_isr(int vec
, void *data
)
655 irqreturn_t ret
= IRQ_NONE
;
656 struct isci_host
*ihost
= data
;
657 struct scic_sds_controller
*scic
= &ihost
->sci
;
659 if (scic_sds_controller_isr(scic
)) {
660 writel(SMU_ISR_COMPLETION
, &scic
->smu_registers
->interrupt_status
);
661 tasklet_schedule(&ihost
->completion_tasklet
);
663 } else if (scic_sds_controller_error_isr(scic
)) {
664 spin_lock(&ihost
->scic_lock
);
665 scic_sds_controller_error_handler(scic
);
666 spin_unlock(&ihost
->scic_lock
);
673 irqreturn_t
isci_error_isr(int vec
, void *data
)
675 struct isci_host
*ihost
= data
;
677 if (scic_sds_controller_error_isr(&ihost
->sci
))
678 scic_sds_controller_error_handler(&ihost
->sci
);
684 * isci_host_start_complete() - This function is called by the core library,
685 * through the ISCI Module, to indicate controller start status.
686 * @isci_host: This parameter specifies the ISCI host object
687 * @completion_status: This parameter specifies the completion status from the
691 static void isci_host_start_complete(struct isci_host
*ihost
, enum sci_status completion_status
)
693 if (completion_status
!= SCI_SUCCESS
)
694 dev_info(&ihost
->pdev
->dev
,
695 "controller start timed out, continuing...\n");
696 isci_host_change_state(ihost
, isci_ready
);
697 clear_bit(IHOST_START_PENDING
, &ihost
->flags
);
698 wake_up(&ihost
->eventq
);
701 int isci_host_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
703 struct isci_host
*ihost
= SHOST_TO_SAS_HA(shost
)->lldd_ha
;
705 if (test_bit(IHOST_START_PENDING
, &ihost
->flags
))
708 /* todo: use sas_flush_discovery once it is upstream */
709 scsi_flush_work(shost
);
711 scsi_flush_work(shost
);
713 dev_dbg(&ihost
->pdev
->dev
,
714 "%s: ihost->status = %d, time = %ld\n",
715 __func__
, isci_host_get_state(ihost
), time
);
722 * scic_controller_get_suggested_start_timeout() - This method returns the
723 * suggested scic_controller_start() timeout amount. The user is free to
724 * use any timeout value, but this method provides the suggested minimum
725 * start timeout value. The returned value is based upon empirical
726 * information determined as a result of interoperability testing.
727 * @controller: the handle to the controller object for which to return the
728 * suggested start timeout.
730 * This method returns the number of milliseconds for the suggested start
733 static u32
scic_controller_get_suggested_start_timeout(
734 struct scic_sds_controller
*sc
)
736 /* Validate the user supplied parameters. */
741 * The suggested minimum timeout value for a controller start operation:
743 * Signature FIS Timeout
744 * + Phy Start Timeout
745 * + Number of Phy Spin Up Intervals
746 * ---------------------------------
747 * Number of milliseconds for the controller start operation.
749 * NOTE: The number of phy spin up intervals will be equivalent
750 * to the number of phys divided by the number phys allowed
751 * per interval - 1 (once OEM parameters are supported).
752 * Currently we assume only 1 phy per interval. */
754 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
755 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
756 + ((SCI_MAX_PHYS
- 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL
);
759 static void scic_controller_enable_interrupts(
760 struct scic_sds_controller
*scic
)
762 BUG_ON(scic
->smu_registers
== NULL
);
763 writel(0, &scic
->smu_registers
->interrupt_mask
);
766 void scic_controller_disable_interrupts(
767 struct scic_sds_controller
*scic
)
769 BUG_ON(scic
->smu_registers
== NULL
);
770 writel(0xffffffff, &scic
->smu_registers
->interrupt_mask
);
773 static void scic_sds_controller_enable_port_task_scheduler(
774 struct scic_sds_controller
*scic
)
776 u32 port_task_scheduler_value
;
778 port_task_scheduler_value
=
779 readl(&scic
->scu_registers
->peg0
.ptsg
.control
);
780 port_task_scheduler_value
|=
781 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE
) |
782 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE
));
783 writel(port_task_scheduler_value
,
784 &scic
->scu_registers
->peg0
.ptsg
.control
);
787 static void scic_sds_controller_assign_task_entries(struct scic_sds_controller
*scic
)
792 * Assign all the TCs to function 0
793 * TODO: Do we actually need to read this register to write it back?
797 readl(&scic
->smu_registers
->task_context_assignment
[0]);
799 task_assignment
|= (SMU_TCA_GEN_VAL(STARTING
, 0)) |
800 (SMU_TCA_GEN_VAL(ENDING
, scic
->task_context_entries
- 1)) |
801 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE
));
803 writel(task_assignment
,
804 &scic
->smu_registers
->task_context_assignment
[0]);
808 static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller
*scic
)
811 u32 completion_queue_control_value
;
812 u32 completion_queue_get_value
;
813 u32 completion_queue_put_value
;
815 scic
->completion_queue_get
= 0;
817 completion_queue_control_value
=
818 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES
- 1) |
819 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS
- 1));
821 writel(completion_queue_control_value
,
822 &scic
->smu_registers
->completion_queue_control
);
825 /* Set the completion queue get pointer and enable the queue */
826 completion_queue_get_value
= (
827 (SMU_CQGR_GEN_VAL(POINTER
, 0))
828 | (SMU_CQGR_GEN_VAL(EVENT_POINTER
, 0))
829 | (SMU_CQGR_GEN_BIT(ENABLE
))
830 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE
))
833 writel(completion_queue_get_value
,
834 &scic
->smu_registers
->completion_queue_get
);
836 /* Set the completion queue put pointer */
837 completion_queue_put_value
= (
838 (SMU_CQPR_GEN_VAL(POINTER
, 0))
839 | (SMU_CQPR_GEN_VAL(EVENT_POINTER
, 0))
842 writel(completion_queue_put_value
,
843 &scic
->smu_registers
->completion_queue_put
);
845 /* Initialize the cycle bit of the completion queue entries */
846 for (index
= 0; index
< SCU_MAX_COMPLETION_QUEUE_ENTRIES
; index
++) {
848 * If get.cycle_bit != completion_queue.cycle_bit
849 * its not a valid completion queue entry
850 * so at system start all entries are invalid */
851 scic
->completion_queue
[index
] = 0x80000000;
855 static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller
*scic
)
857 u32 frame_queue_control_value
;
858 u32 frame_queue_get_value
;
859 u32 frame_queue_put_value
;
861 /* Write the queue size */
862 frame_queue_control_value
=
863 SCU_UFQC_GEN_VAL(QUEUE_SIZE
, SCU_MAX_UNSOLICITED_FRAMES
);
865 writel(frame_queue_control_value
,
866 &scic
->scu_registers
->sdma
.unsolicited_frame_queue_control
);
868 /* Setup the get pointer for the unsolicited frame queue */
869 frame_queue_get_value
= (
870 SCU_UFQGP_GEN_VAL(POINTER
, 0)
871 | SCU_UFQGP_GEN_BIT(ENABLE_BIT
)
874 writel(frame_queue_get_value
,
875 &scic
->scu_registers
->sdma
.unsolicited_frame_get_pointer
);
876 /* Setup the put pointer for the unsolicited frame queue */
877 frame_queue_put_value
= SCU_UFQPP_GEN_VAL(POINTER
, 0);
878 writel(frame_queue_put_value
,
879 &scic
->scu_registers
->sdma
.unsolicited_frame_put_pointer
);
883 * This method will attempt to transition into the ready state for the
884 * controller and indicate that the controller start operation has completed
885 * if all criteria are met.
886 * @scic: This parameter indicates the controller object for which
887 * to transition to ready.
888 * @status: This parameter indicates the status value to be pass into the call
889 * to scic_cb_controller_start_complete().
893 static void scic_sds_controller_transition_to_ready(
894 struct scic_sds_controller
*scic
,
895 enum sci_status status
)
897 struct isci_host
*ihost
= scic_to_ihost(scic
);
899 if (scic
->sm
.current_state_id
== SCIC_STARTING
) {
901 * We move into the ready state, because some of the phys/ports
902 * may be up and operational.
904 sci_change_state(&scic
->sm
, SCIC_READY
);
906 isci_host_start_complete(ihost
, status
);
910 static bool is_phy_starting(struct scic_sds_phy
*sci_phy
)
912 enum scic_sds_phy_states state
;
914 state
= sci_phy
->sm
.current_state_id
;
916 case SCI_PHY_STARTING
:
917 case SCI_PHY_SUB_INITIAL
:
918 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN
:
919 case SCI_PHY_SUB_AWAIT_IAF_UF
:
920 case SCI_PHY_SUB_AWAIT_SAS_POWER
:
921 case SCI_PHY_SUB_AWAIT_SATA_POWER
:
922 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN
:
923 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN
:
924 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF
:
925 case SCI_PHY_SUB_FINAL
:
933 * scic_sds_controller_start_next_phy - start phy
936 * If all the phys have been started, then attempt to transition the
937 * controller to the READY state and inform the user
938 * (scic_cb_controller_start_complete()).
940 static enum sci_status
scic_sds_controller_start_next_phy(struct scic_sds_controller
*scic
)
942 struct isci_host
*ihost
= scic_to_ihost(scic
);
943 struct scic_sds_oem_params
*oem
= &scic
->oem_parameters
.sds1
;
944 struct scic_sds_phy
*sci_phy
;
945 enum sci_status status
;
947 status
= SCI_SUCCESS
;
949 if (scic
->phy_startup_timer_pending
)
952 if (scic
->next_phy_to_start
>= SCI_MAX_PHYS
) {
953 bool is_controller_start_complete
= true;
957 for (index
= 0; index
< SCI_MAX_PHYS
; index
++) {
958 sci_phy
= &ihost
->phys
[index
].sci
;
959 state
= sci_phy
->sm
.current_state_id
;
961 if (!phy_get_non_dummy_port(sci_phy
))
964 /* The controller start operation is complete iff:
965 * - all links have been given an opportunity to start
966 * - have no indication of a connected device
967 * - have an indication of a connected device and it has
968 * finished the link training process.
970 if ((sci_phy
->is_in_link_training
== false && state
== SCI_PHY_INITIAL
) ||
971 (sci_phy
->is_in_link_training
== false && state
== SCI_PHY_STOPPED
) ||
972 (sci_phy
->is_in_link_training
== true && is_phy_starting(sci_phy
))) {
973 is_controller_start_complete
= false;
979 * The controller has successfully finished the start process.
980 * Inform the SCI Core user and transition to the READY state. */
981 if (is_controller_start_complete
== true) {
982 scic_sds_controller_transition_to_ready(scic
, SCI_SUCCESS
);
983 sci_del_timer(&scic
->phy_timer
);
984 scic
->phy_startup_timer_pending
= false;
987 sci_phy
= &ihost
->phys
[scic
->next_phy_to_start
].sci
;
989 if (oem
->controller
.mode_type
== SCIC_PORT_MANUAL_CONFIGURATION_MODE
) {
990 if (phy_get_non_dummy_port(sci_phy
) == NULL
) {
991 scic
->next_phy_to_start
++;
993 /* Caution recursion ahead be forwarned
995 * The PHY was never added to a PORT in MPC mode
996 * so start the next phy in sequence This phy
997 * will never go link up and will not draw power
998 * the OEM parameters either configured the phy
999 * incorrectly for the PORT or it was never
1000 * assigned to a PORT
1002 return scic_sds_controller_start_next_phy(scic
);
1006 status
= scic_sds_phy_start(sci_phy
);
1008 if (status
== SCI_SUCCESS
) {
1009 sci_mod_timer(&scic
->phy_timer
,
1010 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
);
1011 scic
->phy_startup_timer_pending
= true;
1013 dev_warn(scic_to_dev(scic
),
1014 "%s: Controller stop operation failed "
1015 "to stop phy %d because of status "
1018 ihost
->phys
[scic
->next_phy_to_start
].sci
.phy_index
,
1022 scic
->next_phy_to_start
++;
1028 static void phy_startup_timeout(unsigned long data
)
1030 struct sci_timer
*tmr
= (struct sci_timer
*)data
;
1031 struct scic_sds_controller
*scic
= container_of(tmr
, typeof(*scic
), phy_timer
);
1032 struct isci_host
*ihost
= scic_to_ihost(scic
);
1033 unsigned long flags
;
1034 enum sci_status status
;
1036 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
1041 scic
->phy_startup_timer_pending
= false;
1044 status
= scic_sds_controller_start_next_phy(scic
);
1045 } while (status
!= SCI_SUCCESS
);
1048 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1051 static void isci_tci_free(struct isci_host
*ihost
, u16 tci
)
1053 u16 tail
= ihost
->tci_tail
& (SCI_MAX_IO_REQUESTS
-1);
1055 ihost
->tci_pool
[tail
] = tci
;
1056 ihost
->tci_tail
= tail
+ 1;
1059 static u16
isci_tci_alloc(struct isci_host
*ihost
)
1061 u16 head
= ihost
->tci_head
& (SCI_MAX_IO_REQUESTS
-1);
1062 u16 tci
= ihost
->tci_pool
[head
];
1064 ihost
->tci_head
= head
+ 1;
1068 static u16
isci_tci_active(struct isci_host
*ihost
)
1070 return CIRC_CNT(ihost
->tci_head
, ihost
->tci_tail
, SCI_MAX_IO_REQUESTS
);
1073 static u16
isci_tci_space(struct isci_host
*ihost
)
1075 return CIRC_SPACE(ihost
->tci_head
, ihost
->tci_tail
, SCI_MAX_IO_REQUESTS
);
1078 static enum sci_status
scic_controller_start(struct scic_sds_controller
*scic
,
1081 struct isci_host
*ihost
= scic_to_ihost(scic
);
1082 enum sci_status result
;
1085 if (scic
->sm
.current_state_id
!= SCIC_INITIALIZED
) {
1086 dev_warn(scic_to_dev(scic
),
1087 "SCIC Controller start operation requested in "
1089 return SCI_FAILURE_INVALID_STATE
;
1092 /* Build the TCi free pool */
1093 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS
> 1 << sizeof(ihost
->tci_pool
[0]) * 8);
1094 ihost
->tci_head
= 0;
1095 ihost
->tci_tail
= 0;
1096 for (index
= 0; index
< scic
->task_context_entries
; index
++)
1097 isci_tci_free(ihost
, index
);
1099 /* Build the RNi free pool */
1100 scic_sds_remote_node_table_initialize(
1101 &scic
->available_remote_nodes
,
1102 scic
->remote_node_entries
);
1105 * Before anything else lets make sure we will not be
1106 * interrupted by the hardware.
1108 scic_controller_disable_interrupts(scic
);
1110 /* Enable the port task scheduler */
1111 scic_sds_controller_enable_port_task_scheduler(scic
);
1113 /* Assign all the task entries to scic physical function */
1114 scic_sds_controller_assign_task_entries(scic
);
1116 /* Now initialize the completion queue */
1117 scic_sds_controller_initialize_completion_queue(scic
);
1119 /* Initialize the unsolicited frame queue for use */
1120 scic_sds_controller_initialize_unsolicited_frame_queue(scic
);
1122 /* Start all of the ports on this controller */
1123 for (index
= 0; index
< scic
->logical_port_entries
; index
++) {
1124 struct scic_sds_port
*sci_port
= &ihost
->ports
[index
].sci
;
1126 result
= scic_sds_port_start(sci_port
);
1131 scic_sds_controller_start_next_phy(scic
);
1133 sci_mod_timer(&scic
->timer
, timeout
);
1135 sci_change_state(&scic
->sm
, SCIC_STARTING
);
1140 void isci_host_scan_start(struct Scsi_Host
*shost
)
1142 struct isci_host
*ihost
= SHOST_TO_SAS_HA(shost
)->lldd_ha
;
1143 unsigned long tmo
= scic_controller_get_suggested_start_timeout(&ihost
->sci
);
1145 set_bit(IHOST_START_PENDING
, &ihost
->flags
);
1147 spin_lock_irq(&ihost
->scic_lock
);
1148 scic_controller_start(&ihost
->sci
, tmo
);
1149 scic_controller_enable_interrupts(&ihost
->sci
);
1150 spin_unlock_irq(&ihost
->scic_lock
);
1153 static void isci_host_stop_complete(struct isci_host
*ihost
, enum sci_status completion_status
)
1155 isci_host_change_state(ihost
, isci_stopped
);
1156 scic_controller_disable_interrupts(&ihost
->sci
);
1157 clear_bit(IHOST_STOP_PENDING
, &ihost
->flags
);
1158 wake_up(&ihost
->eventq
);
1161 static void scic_sds_controller_completion_handler(struct scic_sds_controller
*scic
)
1163 /* Empty out the completion queue */
1164 if (scic_sds_controller_completion_queue_has_entries(scic
))
1165 scic_sds_controller_process_completions(scic
);
1167 /* Clear the interrupt and enable all interrupts again */
1168 writel(SMU_ISR_COMPLETION
, &scic
->smu_registers
->interrupt_status
);
1169 /* Could we write the value of SMU_ISR_COMPLETION? */
1170 writel(0xFF000000, &scic
->smu_registers
->interrupt_mask
);
1171 writel(0, &scic
->smu_registers
->interrupt_mask
);
1175 * isci_host_completion_routine() - This function is the delayed service
1176 * routine that calls the sci core library's completion handler. It's
1177 * scheduled as a tasklet from the interrupt service routine when interrupts
1178 * in use, or set as the timeout function in polled mode.
1179 * @data: This parameter specifies the ISCI host object
1182 static void isci_host_completion_routine(unsigned long data
)
1184 struct isci_host
*isci_host
= (struct isci_host
*)data
;
1185 struct list_head completed_request_list
;
1186 struct list_head errored_request_list
;
1187 struct list_head
*current_position
;
1188 struct list_head
*next_position
;
1189 struct isci_request
*request
;
1190 struct isci_request
*next_request
;
1191 struct sas_task
*task
;
1193 INIT_LIST_HEAD(&completed_request_list
);
1194 INIT_LIST_HEAD(&errored_request_list
);
1196 spin_lock_irq(&isci_host
->scic_lock
);
1198 scic_sds_controller_completion_handler(&isci_host
->sci
);
1200 /* Take the lists of completed I/Os from the host. */
1202 list_splice_init(&isci_host
->requests_to_complete
,
1203 &completed_request_list
);
1205 /* Take the list of errored I/Os from the host. */
1206 list_splice_init(&isci_host
->requests_to_errorback
,
1207 &errored_request_list
);
1209 spin_unlock_irq(&isci_host
->scic_lock
);
1211 /* Process any completions in the lists. */
1212 list_for_each_safe(current_position
, next_position
,
1213 &completed_request_list
) {
1215 request
= list_entry(current_position
, struct isci_request
,
1217 task
= isci_request_access_task(request
);
1219 /* Normal notification (task_done) */
1220 dev_dbg(&isci_host
->pdev
->dev
,
1221 "%s: Normal - request/task = %p/%p\n",
1226 /* Return the task to libsas */
1229 task
->lldd_task
= NULL
;
1230 if (!(task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1232 /* If the task is already in the abort path,
1233 * the task_done callback cannot be called.
1235 task
->task_done(task
);
1238 /* Free the request object. */
1239 isci_request_free(isci_host
, request
);
1241 list_for_each_entry_safe(request
, next_request
, &errored_request_list
,
1244 task
= isci_request_access_task(request
);
1246 /* Use sas_task_abort */
1247 dev_warn(&isci_host
->pdev
->dev
,
1248 "%s: Error - request/task = %p/%p\n",
1255 /* Put the task into the abort path if it's not there
1258 if (!(task
->task_state_flags
& SAS_TASK_STATE_ABORTED
))
1259 sas_task_abort(task
);
1262 /* This is a case where the request has completed with a
1263 * status such that it needed further target servicing,
1264 * but the sas_task reference has already been removed
1265 * from the request. Since it was errored, it was not
1266 * being aborted, so there is nothing to do except free
1270 spin_lock_irq(&isci_host
->scic_lock
);
1271 /* Remove the request from the remote device's list
1272 * of pending requests.
1274 list_del_init(&request
->dev_node
);
1275 spin_unlock_irq(&isci_host
->scic_lock
);
1277 /* Free the request object. */
1278 isci_request_free(isci_host
, request
);
1285 * scic_controller_stop() - This method will stop an individual controller
1286 * object.This method will invoke the associated user callback upon
1287 * completion. The completion callback is called when the following
1288 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1289 * controller has been quiesced. This method will ensure that all IO
1290 * requests are quiesced, phys are stopped, and all additional operation by
1291 * the hardware is halted.
1292 * @controller: the handle to the controller object to stop.
1293 * @timeout: This parameter specifies the number of milliseconds in which the
1294 * stop operation should complete.
1296 * The controller must be in the STARTED or STOPPED state. Indicate if the
1297 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1298 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1299 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1300 * controller is not either in the STARTED or STOPPED states.
1302 static enum sci_status
scic_controller_stop(struct scic_sds_controller
*scic
,
1305 if (scic
->sm
.current_state_id
!= SCIC_READY
) {
1306 dev_warn(scic_to_dev(scic
),
1307 "SCIC Controller stop operation requested in "
1309 return SCI_FAILURE_INVALID_STATE
;
1312 sci_mod_timer(&scic
->timer
, timeout
);
1313 sci_change_state(&scic
->sm
, SCIC_STOPPING
);
1318 * scic_controller_reset() - This method will reset the supplied core
1319 * controller regardless of the state of said controller. This operation is
1320 * considered destructive. In other words, all current operations are wiped
1321 * out. No IO completions for outstanding devices occur. Outstanding IO
1322 * requests are not aborted or completed at the actual remote device.
1323 * @controller: the handle to the controller object to reset.
1325 * Indicate if the controller reset method succeeded or failed in some way.
1326 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1327 * the controller reset operation is unable to complete.
1329 static enum sci_status
scic_controller_reset(struct scic_sds_controller
*scic
)
1331 switch (scic
->sm
.current_state_id
) {
1337 * The reset operation is not a graceful cleanup, just
1338 * perform the state transition.
1340 sci_change_state(&scic
->sm
, SCIC_RESETTING
);
1343 dev_warn(scic_to_dev(scic
),
1344 "SCIC Controller reset operation requested in "
1346 return SCI_FAILURE_INVALID_STATE
;
1350 void isci_host_deinit(struct isci_host
*ihost
)
1354 isci_host_change_state(ihost
, isci_stopping
);
1355 for (i
= 0; i
< SCI_MAX_PORTS
; i
++) {
1356 struct isci_port
*iport
= &ihost
->ports
[i
];
1357 struct isci_remote_device
*idev
, *d
;
1359 list_for_each_entry_safe(idev
, d
, &iport
->remote_dev_list
, node
) {
1360 isci_remote_device_change_state(idev
, isci_stopping
);
1361 isci_remote_device_stop(ihost
, idev
);
1365 set_bit(IHOST_STOP_PENDING
, &ihost
->flags
);
1367 spin_lock_irq(&ihost
->scic_lock
);
1368 scic_controller_stop(&ihost
->sci
, SCIC_CONTROLLER_STOP_TIMEOUT
);
1369 spin_unlock_irq(&ihost
->scic_lock
);
1371 wait_for_stop(ihost
);
1372 scic_controller_reset(&ihost
->sci
);
1374 /* Cancel any/all outstanding port timers */
1375 for (i
= 0; i
< ihost
->sci
.logical_port_entries
; i
++) {
1376 struct scic_sds_port
*sci_port
= &ihost
->ports
[i
].sci
;
1377 del_timer_sync(&sci_port
->timer
.timer
);
1380 /* Cancel any/all outstanding phy timers */
1381 for (i
= 0; i
< SCI_MAX_PHYS
; i
++) {
1382 struct scic_sds_phy
*sci_phy
= &ihost
->phys
[i
].sci
;
1383 del_timer_sync(&sci_phy
->sata_timer
.timer
);
1386 del_timer_sync(&ihost
->sci
.port_agent
.timer
.timer
);
1388 del_timer_sync(&ihost
->sci
.power_control
.timer
.timer
);
1390 del_timer_sync(&ihost
->sci
.timer
.timer
);
1392 del_timer_sync(&ihost
->sci
.phy_timer
.timer
);
1395 static void __iomem
*scu_base(struct isci_host
*isci_host
)
1397 struct pci_dev
*pdev
= isci_host
->pdev
;
1398 int id
= isci_host
->id
;
1400 return pcim_iomap_table(pdev
)[SCI_SCU_BAR
* 2] + SCI_SCU_BAR_SIZE
* id
;
1403 static void __iomem
*smu_base(struct isci_host
*isci_host
)
1405 struct pci_dev
*pdev
= isci_host
->pdev
;
1406 int id
= isci_host
->id
;
1408 return pcim_iomap_table(pdev
)[SCI_SMU_BAR
* 2] + SCI_SMU_BAR_SIZE
* id
;
1411 static void isci_user_parameters_get(
1412 struct isci_host
*isci_host
,
1413 union scic_user_parameters
*scic_user_params
)
1415 struct scic_sds_user_parameters
*u
= &scic_user_params
->sds1
;
1418 for (i
= 0; i
< SCI_MAX_PHYS
; i
++) {
1419 struct sci_phy_user_params
*u_phy
= &u
->phys
[i
];
1421 u_phy
->max_speed_generation
= phy_gen
;
1423 /* we are not exporting these for now */
1424 u_phy
->align_insertion_frequency
= 0x7f;
1425 u_phy
->in_connection_align_insertion_frequency
= 0xff;
1426 u_phy
->notify_enable_spin_up_insertion_frequency
= 0x33;
1429 u
->stp_inactivity_timeout
= stp_inactive_to
;
1430 u
->ssp_inactivity_timeout
= ssp_inactive_to
;
1431 u
->stp_max_occupancy_timeout
= stp_max_occ_to
;
1432 u
->ssp_max_occupancy_timeout
= ssp_max_occ_to
;
1433 u
->no_outbound_task_timeout
= no_outbound_task_to
;
1434 u
->max_number_concurrent_device_spin_up
= max_concurr_spinup
;
1437 static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine
*sm
)
1439 struct scic_sds_controller
*scic
= container_of(sm
, typeof(*scic
), sm
);
1441 sci_change_state(&scic
->sm
, SCIC_RESET
);
1444 static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine
*sm
)
1446 struct scic_sds_controller
*scic
= container_of(sm
, typeof(*scic
), sm
);
1448 sci_del_timer(&scic
->timer
);
1451 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1452 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1453 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1454 #define INTERRUPT_COALESCE_NUMBER_MAX 256
1455 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1456 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1459 * scic_controller_set_interrupt_coalescence() - This method allows the user to
1460 * configure the interrupt coalescence.
1461 * @controller: This parameter represents the handle to the controller object
1462 * for which its interrupt coalesce register is overridden.
1463 * @coalesce_number: Used to control the number of entries in the Completion
1464 * Queue before an interrupt is generated. If the number of entries exceed
1465 * this number, an interrupt will be generated. The valid range of the input
1466 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1467 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1468 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1469 * interrupt coalescing timeout.
1471 * Indicate if the user successfully set the interrupt coalesce parameters.
1472 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1473 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1475 static enum sci_status
scic_controller_set_interrupt_coalescence(
1476 struct scic_sds_controller
*scic_controller
,
1477 u32 coalesce_number
,
1478 u32 coalesce_timeout
)
1480 u8 timeout_encode
= 0;
1484 /* Check if the input parameters fall in the range. */
1485 if (coalesce_number
> INTERRUPT_COALESCE_NUMBER_MAX
)
1486 return SCI_FAILURE_INVALID_PARAMETER_VALUE
;
1489 * Defined encoding for interrupt coalescing timeout:
1490 * Value Min Max Units
1491 * ----- --- --- -----
1521 * Others Undefined */
1524 * Use the table above to decide the encode of interrupt coalescing timeout
1525 * value for register writing. */
1526 if (coalesce_timeout
== 0)
1529 /* make the timeout value in unit of (10 ns). */
1530 coalesce_timeout
= coalesce_timeout
* 100;
1531 min
= INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS
/ 10;
1532 max
= INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS
/ 10;
1534 /* get the encode of timeout for register writing. */
1535 for (timeout_encode
= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN
;
1536 timeout_encode
<= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX
;
1538 if (min
<= coalesce_timeout
&& max
> coalesce_timeout
)
1540 else if (coalesce_timeout
>= max
&& coalesce_timeout
< min
* 2
1541 && coalesce_timeout
<= INTERRUPT_COALESCE_TIMEOUT_MAX_US
* 100) {
1542 if ((coalesce_timeout
- max
) < (2 * min
- coalesce_timeout
))
1554 if (timeout_encode
== INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX
+ 1)
1555 /* the value is out of range. */
1556 return SCI_FAILURE_INVALID_PARAMETER_VALUE
;
1559 writel(SMU_ICC_GEN_VAL(NUMBER
, coalesce_number
) |
1560 SMU_ICC_GEN_VAL(TIMER
, timeout_encode
),
1561 &scic_controller
->smu_registers
->interrupt_coalesce_control
);
1564 scic_controller
->interrupt_coalesce_number
= (u16
)coalesce_number
;
1565 scic_controller
->interrupt_coalesce_timeout
= coalesce_timeout
/ 100;
1571 static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine
*sm
)
1573 struct scic_sds_controller
*scic
= container_of(sm
, typeof(*scic
), sm
);
1575 /* set the default interrupt coalescence number and timeout value. */
1576 scic_controller_set_interrupt_coalescence(scic
, 0x10, 250);
1579 static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine
*sm
)
1581 struct scic_sds_controller
*scic
= container_of(sm
, typeof(*scic
), sm
);
1583 /* disable interrupt coalescence. */
1584 scic_controller_set_interrupt_coalescence(scic
, 0, 0);
1587 static enum sci_status
scic_sds_controller_stop_phys(struct scic_sds_controller
*scic
)
1590 enum sci_status status
;
1591 enum sci_status phy_status
;
1592 struct isci_host
*ihost
= scic_to_ihost(scic
);
1594 status
= SCI_SUCCESS
;
1596 for (index
= 0; index
< SCI_MAX_PHYS
; index
++) {
1597 phy_status
= scic_sds_phy_stop(&ihost
->phys
[index
].sci
);
1599 if (phy_status
!= SCI_SUCCESS
&&
1600 phy_status
!= SCI_FAILURE_INVALID_STATE
) {
1601 status
= SCI_FAILURE
;
1603 dev_warn(scic_to_dev(scic
),
1604 "%s: Controller stop operation failed to stop "
1605 "phy %d because of status %d.\n",
1607 ihost
->phys
[index
].sci
.phy_index
, phy_status
);
1614 static enum sci_status
scic_sds_controller_stop_ports(struct scic_sds_controller
*scic
)
1617 enum sci_status port_status
;
1618 enum sci_status status
= SCI_SUCCESS
;
1619 struct isci_host
*ihost
= scic_to_ihost(scic
);
1621 for (index
= 0; index
< scic
->logical_port_entries
; index
++) {
1622 struct scic_sds_port
*sci_port
= &ihost
->ports
[index
].sci
;
1624 port_status
= scic_sds_port_stop(sci_port
);
1626 if ((port_status
!= SCI_SUCCESS
) &&
1627 (port_status
!= SCI_FAILURE_INVALID_STATE
)) {
1628 status
= SCI_FAILURE
;
1630 dev_warn(scic_to_dev(scic
),
1631 "%s: Controller stop operation failed to "
1632 "stop port %d because of status %d.\n",
1634 sci_port
->logical_port_index
,
1642 static enum sci_status
scic_sds_controller_stop_devices(struct scic_sds_controller
*scic
)
1645 enum sci_status status
;
1646 enum sci_status device_status
;
1648 status
= SCI_SUCCESS
;
1650 for (index
= 0; index
< scic
->remote_node_entries
; index
++) {
1651 if (scic
->device_table
[index
] != NULL
) {
1652 /* / @todo What timeout value do we want to provide to this request? */
1653 device_status
= scic_remote_device_stop(scic
->device_table
[index
], 0);
1655 if ((device_status
!= SCI_SUCCESS
) &&
1656 (device_status
!= SCI_FAILURE_INVALID_STATE
)) {
1657 dev_warn(scic_to_dev(scic
),
1658 "%s: Controller stop operation failed "
1659 "to stop device 0x%p because of "
1662 scic
->device_table
[index
], device_status
);
1670 static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine
*sm
)
1672 struct scic_sds_controller
*scic
= container_of(sm
, typeof(*scic
), sm
);
1674 /* Stop all of the components for this controller */
1675 scic_sds_controller_stop_phys(scic
);
1676 scic_sds_controller_stop_ports(scic
);
1677 scic_sds_controller_stop_devices(scic
);
1680 static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine
*sm
)
1682 struct scic_sds_controller
*scic
= container_of(sm
, typeof(*scic
), sm
);
1684 sci_del_timer(&scic
->timer
);
1689 * scic_sds_controller_reset_hardware() -
1691 * This method will reset the controller hardware.
1693 static void scic_sds_controller_reset_hardware(struct scic_sds_controller
*scic
)
1695 /* Disable interrupts so we dont take any spurious interrupts */
1696 scic_controller_disable_interrupts(scic
);
1699 writel(0xFFFFFFFF, &scic
->smu_registers
->soft_reset_control
);
1701 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1704 /* The write to the CQGR clears the CQP */
1705 writel(0x00000000, &scic
->smu_registers
->completion_queue_get
);
1707 /* The write to the UFQGP clears the UFQPR */
1708 writel(0, &scic
->scu_registers
->sdma
.unsolicited_frame_get_pointer
);
1711 static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine
*sm
)
1713 struct scic_sds_controller
*scic
= container_of(sm
, typeof(*scic
), sm
);
1715 scic_sds_controller_reset_hardware(scic
);
1716 sci_change_state(&scic
->sm
, SCIC_RESET
);
1719 static const struct sci_base_state scic_sds_controller_state_table
[] = {
1721 .enter_state
= scic_sds_controller_initial_state_enter
,
1724 [SCIC_INITIALIZING
] = {},
1725 [SCIC_INITIALIZED
] = {},
1727 .exit_state
= scic_sds_controller_starting_state_exit
,
1730 .enter_state
= scic_sds_controller_ready_state_enter
,
1731 .exit_state
= scic_sds_controller_ready_state_exit
,
1733 [SCIC_RESETTING
] = {
1734 .enter_state
= scic_sds_controller_resetting_state_enter
,
1737 .enter_state
= scic_sds_controller_stopping_state_enter
,
1738 .exit_state
= scic_sds_controller_stopping_state_exit
,
1740 [SCIC_STOPPED
] = {},
1744 static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller
*scic
)
1746 /* these defaults are overridden by the platform / firmware */
1747 struct isci_host
*ihost
= scic_to_ihost(scic
);
1750 /* Default to APC mode. */
1751 scic
->oem_parameters
.sds1
.controller
.mode_type
= SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE
;
1753 /* Default to APC mode. */
1754 scic
->oem_parameters
.sds1
.controller
.max_concurrent_dev_spin_up
= 1;
1756 /* Default to no SSC operation. */
1757 scic
->oem_parameters
.sds1
.controller
.do_enable_ssc
= false;
1759 /* Initialize all of the port parameter information to narrow ports. */
1760 for (index
= 0; index
< SCI_MAX_PORTS
; index
++) {
1761 scic
->oem_parameters
.sds1
.ports
[index
].phy_mask
= 0;
1764 /* Initialize all of the phy parameter information. */
1765 for (index
= 0; index
< SCI_MAX_PHYS
; index
++) {
1766 /* Default to 6G (i.e. Gen 3) for now. */
1767 scic
->user_parameters
.sds1
.phys
[index
].max_speed_generation
= 3;
1769 /* the frequencies cannot be 0 */
1770 scic
->user_parameters
.sds1
.phys
[index
].align_insertion_frequency
= 0x7f;
1771 scic
->user_parameters
.sds1
.phys
[index
].in_connection_align_insertion_frequency
= 0xff;
1772 scic
->user_parameters
.sds1
.phys
[index
].notify_enable_spin_up_insertion_frequency
= 0x33;
1775 * Previous Vitesse based expanders had a arbitration issue that
1776 * is worked around by having the upper 32-bits of SAS address
1777 * with a value greater then the Vitesse company identifier.
1778 * Hence, usage of 0x5FCFFFFF. */
1779 scic
->oem_parameters
.sds1
.phys
[index
].sas_address
.low
= 0x1 + ihost
->id
;
1780 scic
->oem_parameters
.sds1
.phys
[index
].sas_address
.high
= 0x5FCFFFFF;
1783 scic
->user_parameters
.sds1
.stp_inactivity_timeout
= 5;
1784 scic
->user_parameters
.sds1
.ssp_inactivity_timeout
= 5;
1785 scic
->user_parameters
.sds1
.stp_max_occupancy_timeout
= 5;
1786 scic
->user_parameters
.sds1
.ssp_max_occupancy_timeout
= 20;
1787 scic
->user_parameters
.sds1
.no_outbound_task_timeout
= 20;
1790 static void controller_timeout(unsigned long data
)
1792 struct sci_timer
*tmr
= (struct sci_timer
*)data
;
1793 struct scic_sds_controller
*scic
= container_of(tmr
, typeof(*scic
), timer
);
1794 struct isci_host
*ihost
= scic_to_ihost(scic
);
1795 struct sci_base_state_machine
*sm
= &scic
->sm
;
1796 unsigned long flags
;
1798 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
1803 if (sm
->current_state_id
== SCIC_STARTING
)
1804 scic_sds_controller_transition_to_ready(scic
, SCI_FAILURE_TIMEOUT
);
1805 else if (sm
->current_state_id
== SCIC_STOPPING
) {
1806 sci_change_state(sm
, SCIC_FAILED
);
1807 isci_host_stop_complete(ihost
, SCI_FAILURE_TIMEOUT
);
1808 } else /* / @todo Now what do we want to do in this case? */
1809 dev_err(scic_to_dev(scic
),
1810 "%s: Controller timer fired when controller was not "
1811 "in a state being timed.\n",
1815 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1819 * scic_controller_construct() - This method will attempt to construct a
1820 * controller object utilizing the supplied parameter information.
1821 * @c: This parameter specifies the controller to be constructed.
1822 * @scu_base: mapped base address of the scu registers
1823 * @smu_base: mapped base address of the smu registers
1825 * Indicate if the controller was successfully constructed or if it failed in
1826 * some way. SCI_SUCCESS This value is returned if the controller was
1827 * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
1828 * if the interrupt coalescence timer may cause SAS compliance issues for SMP
1829 * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
1830 * This value is returned if the controller does not support the supplied type.
1831 * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
1832 * controller does not support the supplied initialization data version.
1834 static enum sci_status
scic_controller_construct(struct scic_sds_controller
*scic
,
1835 void __iomem
*scu_base
,
1836 void __iomem
*smu_base
)
1838 struct isci_host
*ihost
= scic_to_ihost(scic
);
1841 sci_init_sm(&scic
->sm
, scic_sds_controller_state_table
, SCIC_INITIAL
);
1843 scic
->scu_registers
= scu_base
;
1844 scic
->smu_registers
= smu_base
;
1846 scic_sds_port_configuration_agent_construct(&scic
->port_agent
);
1848 /* Construct the ports for this controller */
1849 for (i
= 0; i
< SCI_MAX_PORTS
; i
++)
1850 scic_sds_port_construct(&ihost
->ports
[i
].sci
, i
, scic
);
1851 scic_sds_port_construct(&ihost
->ports
[i
].sci
, SCIC_SDS_DUMMY_PORT
, scic
);
1853 /* Construct the phys for this controller */
1854 for (i
= 0; i
< SCI_MAX_PHYS
; i
++) {
1855 /* Add all the PHYs to the dummy port */
1856 scic_sds_phy_construct(&ihost
->phys
[i
].sci
,
1857 &ihost
->ports
[SCI_MAX_PORTS
].sci
, i
);
1860 scic
->invalid_phy_mask
= 0;
1862 sci_init_timer(&scic
->timer
, controller_timeout
);
1864 /* Initialize the User and OEM parameters to default values. */
1865 scic_sds_controller_set_default_config_parameters(scic
);
1867 return scic_controller_reset(scic
);
1870 int scic_oem_parameters_validate(struct scic_sds_oem_params
*oem
)
1874 for (i
= 0; i
< SCI_MAX_PORTS
; i
++)
1875 if (oem
->ports
[i
].phy_mask
> SCIC_SDS_PARM_PHY_MASK_MAX
)
1878 for (i
= 0; i
< SCI_MAX_PHYS
; i
++)
1879 if (oem
->phys
[i
].sas_address
.high
== 0 &&
1880 oem
->phys
[i
].sas_address
.low
== 0)
1883 if (oem
->controller
.mode_type
== SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE
) {
1884 for (i
= 0; i
< SCI_MAX_PHYS
; i
++)
1885 if (oem
->ports
[i
].phy_mask
!= 0)
1887 } else if (oem
->controller
.mode_type
== SCIC_PORT_MANUAL_CONFIGURATION_MODE
) {
1890 for (i
= 0; i
< SCI_MAX_PHYS
; i
++)
1891 phy_mask
|= oem
->ports
[i
].phy_mask
;
1898 if (oem
->controller
.max_concurrent_dev_spin_up
> MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT
)
1904 static enum sci_status
scic_oem_parameters_set(struct scic_sds_controller
*scic
,
1905 union scic_oem_parameters
*scic_parms
)
1907 u32 state
= scic
->sm
.current_state_id
;
1909 if (state
== SCIC_RESET
||
1910 state
== SCIC_INITIALIZING
||
1911 state
== SCIC_INITIALIZED
) {
1913 if (scic_oem_parameters_validate(&scic_parms
->sds1
))
1914 return SCI_FAILURE_INVALID_PARAMETER_VALUE
;
1915 scic
->oem_parameters
.sds1
= scic_parms
->sds1
;
1920 return SCI_FAILURE_INVALID_STATE
;
1923 void scic_oem_parameters_get(
1924 struct scic_sds_controller
*scic
,
1925 union scic_oem_parameters
*scic_parms
)
1927 memcpy(scic_parms
, (&scic
->oem_parameters
), sizeof(*scic_parms
));
1930 static void power_control_timeout(unsigned long data
)
1932 struct sci_timer
*tmr
= (struct sci_timer
*)data
;
1933 struct scic_sds_controller
*scic
= container_of(tmr
, typeof(*scic
), power_control
.timer
);
1934 struct isci_host
*ihost
= scic_to_ihost(scic
);
1935 struct scic_sds_phy
*sci_phy
;
1936 unsigned long flags
;
1939 spin_lock_irqsave(&ihost
->scic_lock
, flags
);
1944 scic
->power_control
.phys_granted_power
= 0;
1946 if (scic
->power_control
.phys_waiting
== 0) {
1947 scic
->power_control
.timer_started
= false;
1951 for (i
= 0; i
< SCI_MAX_PHYS
; i
++) {
1953 if (scic
->power_control
.phys_waiting
== 0)
1956 sci_phy
= scic
->power_control
.requesters
[i
];
1957 if (sci_phy
== NULL
)
1960 if (scic
->power_control
.phys_granted_power
>=
1961 scic
->oem_parameters
.sds1
.controller
.max_concurrent_dev_spin_up
)
1964 scic
->power_control
.requesters
[i
] = NULL
;
1965 scic
->power_control
.phys_waiting
--;
1966 scic
->power_control
.phys_granted_power
++;
1967 scic_sds_phy_consume_power_handler(sci_phy
);
1971 * It doesn't matter if the power list is empty, we need to start the
1972 * timer in case another phy becomes ready.
1974 sci_mod_timer(tmr
, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL
);
1975 scic
->power_control
.timer_started
= true;
1978 spin_unlock_irqrestore(&ihost
->scic_lock
, flags
);
1982 * This method inserts the phy in the stagger spinup control queue.
1987 void scic_sds_controller_power_control_queue_insert(
1988 struct scic_sds_controller
*scic
,
1989 struct scic_sds_phy
*sci_phy
)
1991 BUG_ON(sci_phy
== NULL
);
1993 if (scic
->power_control
.phys_granted_power
<
1994 scic
->oem_parameters
.sds1
.controller
.max_concurrent_dev_spin_up
) {
1995 scic
->power_control
.phys_granted_power
++;
1996 scic_sds_phy_consume_power_handler(sci_phy
);
1999 * stop and start the power_control timer. When the timer fires, the
2000 * no_of_phys_granted_power will be set to 0
2002 if (scic
->power_control
.timer_started
)
2003 sci_del_timer(&scic
->power_control
.timer
);
2005 sci_mod_timer(&scic
->power_control
.timer
,
2006 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL
);
2007 scic
->power_control
.timer_started
= true;
2010 /* Add the phy in the waiting list */
2011 scic
->power_control
.requesters
[sci_phy
->phy_index
] = sci_phy
;
2012 scic
->power_control
.phys_waiting
++;
2017 * This method removes the phy from the stagger spinup control queue.
2022 void scic_sds_controller_power_control_queue_remove(
2023 struct scic_sds_controller
*scic
,
2024 struct scic_sds_phy
*sci_phy
)
2026 BUG_ON(sci_phy
== NULL
);
2028 if (scic
->power_control
.requesters
[sci_phy
->phy_index
] != NULL
) {
2029 scic
->power_control
.phys_waiting
--;
2032 scic
->power_control
.requesters
[sci_phy
->phy_index
] = NULL
;
2035 #define AFE_REGISTER_WRITE_DELAY 10
2037 /* Initialize the AFE for this phy index. We need to read the AFE setup from
2038 * the OEM parameters
2040 static void scic_sds_controller_afe_initialization(struct scic_sds_controller
*scic
)
2042 const struct scic_sds_oem_params
*oem
= &scic
->oem_parameters
.sds1
;
2046 /* Clear DFX Status registers */
2047 writel(0x0081000f, &scic
->scu_registers
->afe
.afe_dfx_master_control0
);
2048 udelay(AFE_REGISTER_WRITE_DELAY
);
2051 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
2052 * Timer, PM Stagger Timer */
2053 writel(0x0007BFFF, &scic
->scu_registers
->afe
.afe_pmsn_master_control2
);
2054 udelay(AFE_REGISTER_WRITE_DELAY
);
2057 /* Configure bias currents to normal */
2059 writel(0x00005500, &scic
->scu_registers
->afe
.afe_bias_control
);
2061 writel(0x00005A00, &scic
->scu_registers
->afe
.afe_bias_control
);
2062 else if (is_b0() || is_c0())
2063 writel(0x00005F00, &scic
->scu_registers
->afe
.afe_bias_control
);
2065 udelay(AFE_REGISTER_WRITE_DELAY
);
2068 if (is_b0() || is_c0())
2069 writel(0x80040A08, &scic
->scu_registers
->afe
.afe_pll_control0
);
2071 writel(0x80040908, &scic
->scu_registers
->afe
.afe_pll_control0
);
2073 udelay(AFE_REGISTER_WRITE_DELAY
);
2075 /* Wait for the PLL to lock */
2077 afe_status
= readl(&scic
->scu_registers
->afe
.afe_common_block_status
);
2078 udelay(AFE_REGISTER_WRITE_DELAY
);
2079 } while ((afe_status
& 0x00001000) == 0);
2081 if (is_a0() || is_a2()) {
2082 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
2083 writel(0x7bcc96ad, &scic
->scu_registers
->afe
.afe_pmsn_master_control0
);
2084 udelay(AFE_REGISTER_WRITE_DELAY
);
2087 for (phy_id
= 0; phy_id
< SCI_MAX_PHYS
; phy_id
++) {
2088 const struct sci_phy_oem_params
*oem_phy
= &oem
->phys
[phy_id
];
2091 /* Configure transmitter SSC parameters */
2092 writel(0x00030000, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_ssc_control
);
2093 udelay(AFE_REGISTER_WRITE_DELAY
);
2094 } else if (is_c0()) {
2095 /* Configure transmitter SSC parameters */
2096 writel(0x0003000, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_ssc_control
);
2097 udelay(AFE_REGISTER_WRITE_DELAY
);
2100 * All defaults, except the Receive Word Alignament/Comma Detect
2101 * Enable....(0xe800) */
2102 writel(0x00004500, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_xcvr_control0
);
2103 udelay(AFE_REGISTER_WRITE_DELAY
);
2106 * All defaults, except the Receive Word Alignament/Comma Detect
2107 * Enable....(0xe800) */
2108 writel(0x00004512, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_xcvr_control0
);
2109 udelay(AFE_REGISTER_WRITE_DELAY
);
2111 writel(0x0050100F, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_xcvr_control1
);
2112 udelay(AFE_REGISTER_WRITE_DELAY
);
2116 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2117 * & increase TX int & ext bias 20%....(0xe85c) */
2119 writel(0x000003D4, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_channel_control
);
2121 writel(0x000003F0, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_channel_control
);
2123 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
2124 writel(0x000003D7, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_channel_control
);
2125 udelay(AFE_REGISTER_WRITE_DELAY
);
2128 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2129 * & increase TX int & ext bias 20%....(0xe85c) */
2130 writel(0x000003D4, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_channel_control
);
2132 writel(0x000001E7, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_channel_control
);
2133 udelay(AFE_REGISTER_WRITE_DELAY
);
2136 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2137 * & increase TX int & ext bias 20%....(0xe85c) */
2138 writel(0x000001E4, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_channel_control
);
2140 udelay(AFE_REGISTER_WRITE_DELAY
);
2142 if (is_a0() || is_a2()) {
2143 /* Enable TX equalization (0xe824) */
2144 writel(0x00040000, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_control
);
2145 udelay(AFE_REGISTER_WRITE_DELAY
);
2149 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
2150 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
2151 writel(0x00004100, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_xcvr_control0
);
2152 udelay(AFE_REGISTER_WRITE_DELAY
);
2154 /* Leave DFE/FFE on */
2156 writel(0x3F09983F, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_rx_ssc_control0
);
2158 writel(0x3F11103F, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_rx_ssc_control0
);
2160 writel(0x3F11103F, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_rx_ssc_control0
);
2161 udelay(AFE_REGISTER_WRITE_DELAY
);
2162 /* Enable TX equalization (0xe824) */
2163 writel(0x00040000, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_control
);
2165 writel(0x0140DF0F, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_rx_ssc_control1
);
2166 udelay(AFE_REGISTER_WRITE_DELAY
);
2168 writel(0x3F6F103F, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_rx_ssc_control0
);
2169 udelay(AFE_REGISTER_WRITE_DELAY
);
2171 /* Enable TX equalization (0xe824) */
2172 writel(0x00040000, &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_control
);
2175 udelay(AFE_REGISTER_WRITE_DELAY
);
2177 writel(oem_phy
->afe_tx_amp_control0
,
2178 &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_amp_control0
);
2179 udelay(AFE_REGISTER_WRITE_DELAY
);
2181 writel(oem_phy
->afe_tx_amp_control1
,
2182 &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_amp_control1
);
2183 udelay(AFE_REGISTER_WRITE_DELAY
);
2185 writel(oem_phy
->afe_tx_amp_control2
,
2186 &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_amp_control2
);
2187 udelay(AFE_REGISTER_WRITE_DELAY
);
2189 writel(oem_phy
->afe_tx_amp_control3
,
2190 &scic
->scu_registers
->afe
.scu_afe_xcvr
[phy_id
].afe_tx_amp_control3
);
2191 udelay(AFE_REGISTER_WRITE_DELAY
);
2194 /* Transfer control to the PEs */
2195 writel(0x00010f00, &scic
->scu_registers
->afe
.afe_dfx_master_control0
);
2196 udelay(AFE_REGISTER_WRITE_DELAY
);
2199 static void scic_sds_controller_initialize_power_control(struct scic_sds_controller
*scic
)
2201 sci_init_timer(&scic
->power_control
.timer
, power_control_timeout
);
2203 memset(scic
->power_control
.requesters
, 0,
2204 sizeof(scic
->power_control
.requesters
));
2206 scic
->power_control
.phys_waiting
= 0;
2207 scic
->power_control
.phys_granted_power
= 0;
2210 static enum sci_status
scic_controller_initialize(struct scic_sds_controller
*scic
)
2212 struct sci_base_state_machine
*sm
= &scic
->sm
;
2213 struct isci_host
*ihost
= scic_to_ihost(scic
);
2214 enum sci_status result
= SCI_FAILURE
;
2215 unsigned long i
, state
, val
;
2217 if (scic
->sm
.current_state_id
!= SCIC_RESET
) {
2218 dev_warn(scic_to_dev(scic
),
2219 "SCIC Controller initialize operation requested "
2220 "in invalid state\n");
2221 return SCI_FAILURE_INVALID_STATE
;
2224 sci_change_state(sm
, SCIC_INITIALIZING
);
2226 sci_init_timer(&scic
->phy_timer
, phy_startup_timeout
);
2228 scic
->next_phy_to_start
= 0;
2229 scic
->phy_startup_timer_pending
= false;
2231 scic_sds_controller_initialize_power_control(scic
);
2234 * There is nothing to do here for B0 since we do not have to
2235 * program the AFE registers.
2236 * / @todo The AFE settings are supposed to be correct for the B0 but
2237 * / presently they seem to be wrong. */
2238 scic_sds_controller_afe_initialization(scic
);
2241 /* Take the hardware out of reset */
2242 writel(0, &scic
->smu_registers
->soft_reset_control
);
2245 * / @todo Provide meaningfull error code for hardware failure
2246 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2247 for (i
= 100; i
>= 1; i
--) {
2250 /* Loop until the hardware reports success */
2251 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME
);
2252 status
= readl(&scic
->smu_registers
->control_status
);
2254 if ((status
& SCU_RAM_INIT_COMPLETED
) == SCU_RAM_INIT_COMPLETED
)
2261 * Determine what are the actaul device capacities that the
2262 * hardware will support */
2263 val
= readl(&scic
->smu_registers
->device_context_capacity
);
2265 /* Record the smaller of the two capacity values */
2266 scic
->logical_port_entries
= min(smu_max_ports(val
), SCI_MAX_PORTS
);
2267 scic
->task_context_entries
= min(smu_max_task_contexts(val
), SCI_MAX_IO_REQUESTS
);
2268 scic
->remote_node_entries
= min(smu_max_rncs(val
), SCI_MAX_REMOTE_DEVICES
);
2271 * Make all PEs that are unassigned match up with the
2274 for (i
= 0; i
< scic
->logical_port_entries
; i
++) {
2275 struct scu_port_task_scheduler_group_registers __iomem
2276 *ptsg
= &scic
->scu_registers
->peg0
.ptsg
;
2278 writel(i
, &ptsg
->protocol_engine
[i
]);
2281 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2282 val
= readl(&scic
->scu_registers
->sdma
.pdma_configuration
);
2283 val
|= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE
);
2284 writel(val
, &scic
->scu_registers
->sdma
.pdma_configuration
);
2286 val
= readl(&scic
->scu_registers
->sdma
.cdma_configuration
);
2287 val
|= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE
);
2288 writel(val
, &scic
->scu_registers
->sdma
.cdma_configuration
);
2291 * Initialize the PHYs before the PORTs because the PHY registers
2292 * are accessed during the port initialization.
2294 for (i
= 0; i
< SCI_MAX_PHYS
; i
++) {
2295 result
= scic_sds_phy_initialize(&ihost
->phys
[i
].sci
,
2296 &scic
->scu_registers
->peg0
.pe
[i
].tl
,
2297 &scic
->scu_registers
->peg0
.pe
[i
].ll
);
2298 if (result
!= SCI_SUCCESS
)
2302 for (i
= 0; i
< scic
->logical_port_entries
; i
++) {
2303 result
= scic_sds_port_initialize(&ihost
->ports
[i
].sci
,
2304 &scic
->scu_registers
->peg0
.ptsg
.port
[i
],
2305 &scic
->scu_registers
->peg0
.ptsg
.protocol_engine
,
2306 &scic
->scu_registers
->peg0
.viit
[i
]);
2308 if (result
!= SCI_SUCCESS
)
2312 result
= scic_sds_port_configuration_agent_initialize(scic
, &scic
->port_agent
);
2315 /* Advance the controller state machine */
2316 if (result
== SCI_SUCCESS
)
2317 state
= SCIC_INITIALIZED
;
2319 state
= SCIC_FAILED
;
2320 sci_change_state(sm
, state
);
2325 static enum sci_status
scic_user_parameters_set(
2326 struct scic_sds_controller
*scic
,
2327 union scic_user_parameters
*scic_parms
)
2329 u32 state
= scic
->sm
.current_state_id
;
2331 if (state
== SCIC_RESET
||
2332 state
== SCIC_INITIALIZING
||
2333 state
== SCIC_INITIALIZED
) {
2337 * Validate the user parameters. If they are not legal, then
2340 for (index
= 0; index
< SCI_MAX_PHYS
; index
++) {
2341 struct sci_phy_user_params
*user_phy
;
2343 user_phy
= &scic_parms
->sds1
.phys
[index
];
2345 if (!((user_phy
->max_speed_generation
<=
2346 SCIC_SDS_PARM_MAX_SPEED
) &&
2347 (user_phy
->max_speed_generation
>
2348 SCIC_SDS_PARM_NO_SPEED
)))
2349 return SCI_FAILURE_INVALID_PARAMETER_VALUE
;
2351 if (user_phy
->in_connection_align_insertion_frequency
<
2353 return SCI_FAILURE_INVALID_PARAMETER_VALUE
;
2355 if ((user_phy
->in_connection_align_insertion_frequency
<
2357 (user_phy
->align_insertion_frequency
== 0) ||
2359 notify_enable_spin_up_insertion_frequency
==
2361 return SCI_FAILURE_INVALID_PARAMETER_VALUE
;
2364 if ((scic_parms
->sds1
.stp_inactivity_timeout
== 0) ||
2365 (scic_parms
->sds1
.ssp_inactivity_timeout
== 0) ||
2366 (scic_parms
->sds1
.stp_max_occupancy_timeout
== 0) ||
2367 (scic_parms
->sds1
.ssp_max_occupancy_timeout
== 0) ||
2368 (scic_parms
->sds1
.no_outbound_task_timeout
== 0))
2369 return SCI_FAILURE_INVALID_PARAMETER_VALUE
;
2371 memcpy(&scic
->user_parameters
, scic_parms
, sizeof(*scic_parms
));
2376 return SCI_FAILURE_INVALID_STATE
;
2379 static int scic_controller_mem_init(struct scic_sds_controller
*scic
)
2381 struct device
*dev
= scic_to_dev(scic
);
2386 size
= SCU_MAX_COMPLETION_QUEUE_ENTRIES
* sizeof(u32
);
2387 scic
->completion_queue
= dmam_alloc_coherent(dev
, size
, &dma
, GFP_KERNEL
);
2388 if (!scic
->completion_queue
)
2391 writel(lower_32_bits(dma
), &scic
->smu_registers
->completion_queue_lower
);
2392 writel(upper_32_bits(dma
), &scic
->smu_registers
->completion_queue_upper
);
2394 size
= scic
->remote_node_entries
* sizeof(union scu_remote_node_context
);
2395 scic
->remote_node_context_table
= dmam_alloc_coherent(dev
, size
, &dma
,
2397 if (!scic
->remote_node_context_table
)
2400 writel(lower_32_bits(dma
), &scic
->smu_registers
->remote_node_context_lower
);
2401 writel(upper_32_bits(dma
), &scic
->smu_registers
->remote_node_context_upper
);
2403 size
= scic
->task_context_entries
* sizeof(struct scu_task_context
),
2404 scic
->task_context_table
= dmam_alloc_coherent(dev
, size
, &dma
, GFP_KERNEL
);
2405 if (!scic
->task_context_table
)
2408 writel(lower_32_bits(dma
), &scic
->smu_registers
->host_task_table_lower
);
2409 writel(upper_32_bits(dma
), &scic
->smu_registers
->host_task_table_upper
);
2411 err
= scic_sds_unsolicited_frame_control_construct(scic
);
2416 * Inform the silicon as to the location of the UF headers and
2419 writel(lower_32_bits(scic
->uf_control
.headers
.physical_address
),
2420 &scic
->scu_registers
->sdma
.uf_header_base_address_lower
);
2421 writel(upper_32_bits(scic
->uf_control
.headers
.physical_address
),
2422 &scic
->scu_registers
->sdma
.uf_header_base_address_upper
);
2424 writel(lower_32_bits(scic
->uf_control
.address_table
.physical_address
),
2425 &scic
->scu_registers
->sdma
.uf_address_table_lower
);
2426 writel(upper_32_bits(scic
->uf_control
.address_table
.physical_address
),
2427 &scic
->scu_registers
->sdma
.uf_address_table_upper
);
2432 int isci_host_init(struct isci_host
*isci_host
)
2435 enum sci_status status
;
2436 union scic_oem_parameters oem
;
2437 union scic_user_parameters scic_user_params
;
2438 struct isci_pci_info
*pci_info
= to_pci_info(isci_host
->pdev
);
2440 spin_lock_init(&isci_host
->state_lock
);
2441 spin_lock_init(&isci_host
->scic_lock
);
2442 spin_lock_init(&isci_host
->queue_lock
);
2443 init_waitqueue_head(&isci_host
->eventq
);
2445 isci_host_change_state(isci_host
, isci_starting
);
2446 isci_host
->can_queue
= ISCI_CAN_QUEUE_VAL
;
2448 status
= scic_controller_construct(&isci_host
->sci
, scu_base(isci_host
),
2449 smu_base(isci_host
));
2451 if (status
!= SCI_SUCCESS
) {
2452 dev_err(&isci_host
->pdev
->dev
,
2453 "%s: scic_controller_construct failed - status = %x\n",
2459 isci_host
->sas_ha
.dev
= &isci_host
->pdev
->dev
;
2460 isci_host
->sas_ha
.lldd_ha
= isci_host
;
2463 * grab initial values stored in the controller object for OEM and USER
2466 isci_user_parameters_get(isci_host
, &scic_user_params
);
2467 status
= scic_user_parameters_set(&isci_host
->sci
,
2469 if (status
!= SCI_SUCCESS
) {
2470 dev_warn(&isci_host
->pdev
->dev
,
2471 "%s: scic_user_parameters_set failed\n",
2476 scic_oem_parameters_get(&isci_host
->sci
, &oem
);
2478 /* grab any OEM parameters specified in orom */
2479 if (pci_info
->orom
) {
2480 status
= isci_parse_oem_parameters(&oem
,
2483 if (status
!= SCI_SUCCESS
) {
2484 dev_warn(&isci_host
->pdev
->dev
,
2485 "parsing firmware oem parameters failed\n");
2490 status
= scic_oem_parameters_set(&isci_host
->sci
, &oem
);
2491 if (status
!= SCI_SUCCESS
) {
2492 dev_warn(&isci_host
->pdev
->dev
,
2493 "%s: scic_oem_parameters_set failed\n",
2498 tasklet_init(&isci_host
->completion_tasklet
,
2499 isci_host_completion_routine
, (unsigned long)isci_host
);
2501 INIT_LIST_HEAD(&isci_host
->requests_to_complete
);
2502 INIT_LIST_HEAD(&isci_host
->requests_to_errorback
);
2504 spin_lock_irq(&isci_host
->scic_lock
);
2505 status
= scic_controller_initialize(&isci_host
->sci
);
2506 spin_unlock_irq(&isci_host
->scic_lock
);
2507 if (status
!= SCI_SUCCESS
) {
2508 dev_warn(&isci_host
->pdev
->dev
,
2509 "%s: scic_controller_initialize failed -"
2515 err
= scic_controller_mem_init(&isci_host
->sci
);
2519 isci_host
->dma_pool
= dmam_pool_create(DRV_NAME
, &isci_host
->pdev
->dev
,
2520 sizeof(struct isci_request
),
2521 SLAB_HWCACHE_ALIGN
, 0);
2523 if (!isci_host
->dma_pool
)
2526 for (i
= 0; i
< SCI_MAX_PORTS
; i
++)
2527 isci_port_init(&isci_host
->ports
[i
], isci_host
, i
);
2529 for (i
= 0; i
< SCI_MAX_PHYS
; i
++)
2530 isci_phy_init(&isci_host
->phys
[i
], isci_host
, i
);
2532 for (i
= 0; i
< SCI_MAX_REMOTE_DEVICES
; i
++) {
2533 struct isci_remote_device
*idev
= &isci_host
->devices
[i
];
2535 INIT_LIST_HEAD(&idev
->reqs_in_process
);
2536 INIT_LIST_HEAD(&idev
->node
);
2537 spin_lock_init(&idev
->state_lock
);
2543 void scic_sds_controller_link_up(struct scic_sds_controller
*scic
,
2544 struct scic_sds_port
*port
, struct scic_sds_phy
*phy
)
2546 switch (scic
->sm
.current_state_id
) {
2548 sci_del_timer(&scic
->phy_timer
);
2549 scic
->phy_startup_timer_pending
= false;
2550 scic
->port_agent
.link_up_handler(scic
, &scic
->port_agent
,
2552 scic_sds_controller_start_next_phy(scic
);
2555 scic
->port_agent
.link_up_handler(scic
, &scic
->port_agent
,
2559 dev_dbg(scic_to_dev(scic
),
2560 "%s: SCIC Controller linkup event from phy %d in "
2561 "unexpected state %d\n", __func__
, phy
->phy_index
,
2562 scic
->sm
.current_state_id
);
2566 void scic_sds_controller_link_down(struct scic_sds_controller
*scic
,
2567 struct scic_sds_port
*port
, struct scic_sds_phy
*phy
)
2569 switch (scic
->sm
.current_state_id
) {
2572 scic
->port_agent
.link_down_handler(scic
, &scic
->port_agent
,
2576 dev_dbg(scic_to_dev(scic
),
2577 "%s: SCIC Controller linkdown event from phy %d in "
2578 "unexpected state %d\n",
2581 scic
->sm
.current_state_id
);
2586 * This is a helper method to determine if any remote devices on this
2587 * controller are still in the stopping state.
2590 static bool scic_sds_controller_has_remote_devices_stopping(
2591 struct scic_sds_controller
*controller
)
2595 for (index
= 0; index
< controller
->remote_node_entries
; index
++) {
2596 if ((controller
->device_table
[index
] != NULL
) &&
2597 (controller
->device_table
[index
]->sm
.current_state_id
== SCI_DEV_STOPPING
))
2605 * This method is called by the remote device to inform the controller
2606 * object that the remote device has stopped.
2608 void scic_sds_controller_remote_device_stopped(struct scic_sds_controller
*scic
,
2609 struct scic_sds_remote_device
*sci_dev
)
2611 if (scic
->sm
.current_state_id
!= SCIC_STOPPING
) {
2612 dev_dbg(scic_to_dev(scic
),
2613 "SCIC Controller 0x%p remote device stopped event "
2614 "from device 0x%p in unexpected state %d\n",
2616 scic
->sm
.current_state_id
);
2620 if (!scic_sds_controller_has_remote_devices_stopping(scic
)) {
2621 sci_change_state(&scic
->sm
, SCIC_STOPPED
);
2626 * This method will write to the SCU PCP register the request value. The method
2627 * is used to suspend/resume ports, devices, and phys.
2632 void scic_sds_controller_post_request(
2633 struct scic_sds_controller
*scic
,
2636 dev_dbg(scic_to_dev(scic
),
2637 "%s: SCIC Controller 0x%p post request 0x%08x\n",
2642 writel(request
, &scic
->smu_registers
->post_context_port
);
2646 * This method will copy the soft copy of the task context into the physical
2647 * memory accessible by the controller.
2648 * @scic: This parameter specifies the controller for which to copy
2650 * @sci_req: This parameter specifies the request for which the task
2651 * context is being copied.
2653 * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
2654 * the physical memory version of the task context. Thus, all subsequent
2655 * updates to the task context are performed in the TC table (i.e. DMAable
2658 void scic_sds_controller_copy_task_context(
2659 struct scic_sds_controller
*scic
,
2660 struct scic_sds_request
*sci_req
)
2662 struct scu_task_context
*task_context_buffer
;
2664 task_context_buffer
= scic_sds_controller_get_task_context_buffer(
2665 scic
, sci_req
->io_tag
);
2667 memcpy(task_context_buffer
,
2668 sci_req
->task_context_buffer
,
2669 offsetof(struct scu_task_context
, sgl_snapshot_ac
));
2672 * Now that the soft copy of the TC has been copied into the TC
2673 * table accessible by the silicon. Thus, any further changes to
2674 * the TC (e.g. TC termination) occur in the appropriate location. */
2675 sci_req
->task_context_buffer
= task_context_buffer
;
2678 struct scu_task_context
*scic_sds_controller_get_task_context_buffer(struct scic_sds_controller
*scic
,
2681 u16 tci
= ISCI_TAG_TCI(io_tag
);
2683 if (tci
< scic
->task_context_entries
) {
2684 return &scic
->task_context_table
[tci
];
2690 struct scic_sds_request
*scic_request_by_tag(struct scic_sds_controller
*scic
, u16 io_tag
)
2695 task_index
= ISCI_TAG_TCI(io_tag
);
2697 if (task_index
< scic
->task_context_entries
) {
2698 if (scic
->io_request_table
[task_index
] != NULL
) {
2699 task_sequence
= ISCI_TAG_SEQ(io_tag
);
2701 if (task_sequence
== scic
->io_request_sequence
[task_index
]) {
2702 return scic
->io_request_table
[task_index
];
2711 * This method allocates remote node index and the reserves the remote node
2712 * context space for use. This method can fail if there are no more remote
2713 * node index available.
2714 * @scic: This is the controller object which contains the set of
2715 * free remote node ids
2716 * @sci_dev: This is the device object which is requesting the a remote node
2718 * @node_id: This is the remote node id that is assinged to the device if one
2721 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2722 * node index available.
2724 enum sci_status
scic_sds_controller_allocate_remote_node_context(
2725 struct scic_sds_controller
*scic
,
2726 struct scic_sds_remote_device
*sci_dev
,
2730 u32 remote_node_count
= scic_sds_remote_device_node_count(sci_dev
);
2732 node_index
= scic_sds_remote_node_table_allocate_remote_node(
2733 &scic
->available_remote_nodes
, remote_node_count
2736 if (node_index
!= SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX
) {
2737 scic
->device_table
[node_index
] = sci_dev
;
2739 *node_id
= node_index
;
2744 return SCI_FAILURE_INSUFFICIENT_RESOURCES
;
2748 * This method frees the remote node index back to the available pool. Once
2749 * this is done the remote node context buffer is no longer valid and can
2756 void scic_sds_controller_free_remote_node_context(
2757 struct scic_sds_controller
*scic
,
2758 struct scic_sds_remote_device
*sci_dev
,
2761 u32 remote_node_count
= scic_sds_remote_device_node_count(sci_dev
);
2763 if (scic
->device_table
[node_id
] == sci_dev
) {
2764 scic
->device_table
[node_id
] = NULL
;
2766 scic_sds_remote_node_table_release_remote_node_index(
2767 &scic
->available_remote_nodes
, remote_node_count
, node_id
2773 * This method returns the union scu_remote_node_context for the specified remote
2778 * union scu_remote_node_context*
2780 union scu_remote_node_context
*scic_sds_controller_get_remote_node_context_buffer(
2781 struct scic_sds_controller
*scic
,
2785 (node_id
< scic
->remote_node_entries
)
2786 && (scic
->device_table
[node_id
] != NULL
)
2788 return &scic
->remote_node_context_table
[node_id
];
2796 * @resposne_buffer: This is the buffer into which the D2H register FIS will be
2798 * @frame_header: This is the frame header returned by the hardware.
2799 * @frame_buffer: This is the frame buffer returned by the hardware.
2801 * This method will combind the frame header and frame buffer to create a SATA
2802 * D2H register FIS none
2804 void scic_sds_controller_copy_sata_response(
2805 void *response_buffer
,
2809 memcpy(response_buffer
, frame_header
, sizeof(u32
));
2811 memcpy(response_buffer
+ sizeof(u32
),
2813 sizeof(struct dev_to_host_fis
) - sizeof(u32
));
2817 * This method releases the frame once this is done the frame is available for
2818 * re-use by the hardware. The data contained in the frame header and frame
2819 * buffer is no longer valid. The UF queue get pointer is only updated if UF
2820 * control indicates this is appropriate.
2825 void scic_sds_controller_release_frame(
2826 struct scic_sds_controller
*scic
,
2829 if (scic_sds_unsolicited_frame_control_release_frame(
2830 &scic
->uf_control
, frame_index
) == true)
2831 writel(scic
->uf_control
.get
,
2832 &scic
->scu_registers
->sdma
.unsolicited_frame_get_pointer
);
2836 * scic_controller_start_io() - This method is called by the SCI user to
2837 * send/start an IO request. If the method invocation is successful, then
2838 * the IO request has been queued to the hardware for processing.
2839 * @controller: the handle to the controller object for which to start an IO
2841 * @remote_device: the handle to the remote device object for which to start an
2843 * @io_request: the handle to the io request object to start.
2844 * @io_tag: This parameter specifies a previously allocated IO tag that the
2845 * user desires to be utilized for this request. This parameter is optional.
2846 * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
2847 * for this parameter.
2849 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2850 * to ensure that each of the methods that may allocate or free available IO
2851 * tags are handled in a mutually exclusive manner. This method is one of said
2852 * methods requiring proper critical code section protection (e.g. semaphore,
2853 * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a
2854 * result, it is expected the user will have set the NCQ tag field in the host
2855 * to device register FIS prior to calling this method. There is also a
2856 * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
2857 * the scic_controller_start_io() method. scic_controller_allocate_tag() for
2858 * more information on allocating a tag. Indicate if the controller
2859 * successfully started the IO request. SCI_SUCCESS if the IO request was
2860 * successfully started. Determine the failure situations and return values.
2862 enum sci_status
scic_controller_start_io(struct scic_sds_controller
*scic
,
2863 struct scic_sds_remote_device
*rdev
,
2864 struct scic_sds_request
*req
,
2867 enum sci_status status
;
2869 if (scic
->sm
.current_state_id
!= SCIC_READY
) {
2870 dev_warn(scic_to_dev(scic
), "invalid state to start I/O");
2871 return SCI_FAILURE_INVALID_STATE
;
2874 status
= scic_sds_remote_device_start_io(scic
, rdev
, req
);
2875 if (status
!= SCI_SUCCESS
)
2878 scic
->io_request_table
[ISCI_TAG_TCI(req
->io_tag
)] = req
;
2879 scic_sds_controller_post_request(scic
, scic_sds_request_get_post_context(req
));
2884 * scic_controller_terminate_request() - This method is called by the SCI Core
2885 * user to terminate an ongoing (i.e. started) core IO request. This does
2886 * not abort the IO request at the target, but rather removes the IO request
2887 * from the host controller.
2888 * @controller: the handle to the controller object for which to terminate a
2890 * @remote_device: the handle to the remote device object for which to
2891 * terminate a request.
2892 * @request: the handle to the io or task management request object to
2895 * Indicate if the controller successfully began the terminate process for the
2896 * IO request. SCI_SUCCESS if the terminate process was successfully started
2897 * for the request. Determine the failure situations and return values.
2899 enum sci_status
scic_controller_terminate_request(
2900 struct scic_sds_controller
*scic
,
2901 struct scic_sds_remote_device
*rdev
,
2902 struct scic_sds_request
*req
)
2904 enum sci_status status
;
2906 if (scic
->sm
.current_state_id
!= SCIC_READY
) {
2907 dev_warn(scic_to_dev(scic
),
2908 "invalid state to terminate request\n");
2909 return SCI_FAILURE_INVALID_STATE
;
2912 status
= scic_sds_io_request_terminate(req
);
2913 if (status
!= SCI_SUCCESS
)
2917 * Utilize the original post context command and or in the POST_TC_ABORT
2920 scic_sds_controller_post_request(scic
,
2921 scic_sds_request_get_post_context(req
) |
2922 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT
);
2927 * scic_controller_complete_io() - This method will perform core specific
2928 * completion operations for an IO request. After this method is invoked,
2929 * the user should consider the IO request as invalid until it is properly
2930 * reused (i.e. re-constructed).
2931 * @controller: The handle to the controller object for which to complete the
2933 * @remote_device: The handle to the remote device object for which to complete
2935 * @io_request: the handle to the io request object to complete.
2937 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2938 * to ensure that each of the methods that may allocate or free available IO
2939 * tags are handled in a mutually exclusive manner. This method is one of said
2940 * methods requiring proper critical code section protection (e.g. semaphore,
2941 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
2942 * Core user, using the scic_controller_allocate_io_tag() method, then it is
2943 * the responsibility of the caller to invoke the scic_controller_free_io_tag()
2944 * method to free the tag (i.e. this method will not free the IO tag). Indicate
2945 * if the controller successfully completed the IO request. SCI_SUCCESS if the
2946 * completion process was successful.
2948 enum sci_status
scic_controller_complete_io(
2949 struct scic_sds_controller
*scic
,
2950 struct scic_sds_remote_device
*rdev
,
2951 struct scic_sds_request
*request
)
2953 enum sci_status status
;
2956 switch (scic
->sm
.current_state_id
) {
2958 /* XXX: Implement this function */
2961 status
= scic_sds_remote_device_complete_io(scic
, rdev
, request
);
2962 if (status
!= SCI_SUCCESS
)
2965 index
= ISCI_TAG_TCI(request
->io_tag
);
2966 scic
->io_request_table
[index
] = NULL
;
2969 dev_warn(scic_to_dev(scic
), "invalid state to complete I/O");
2970 return SCI_FAILURE_INVALID_STATE
;
2975 enum sci_status
scic_controller_continue_io(struct scic_sds_request
*sci_req
)
2977 struct scic_sds_controller
*scic
= sci_req
->owning_controller
;
2979 if (scic
->sm
.current_state_id
!= SCIC_READY
) {
2980 dev_warn(scic_to_dev(scic
), "invalid state to continue I/O");
2981 return SCI_FAILURE_INVALID_STATE
;
2984 scic
->io_request_table
[ISCI_TAG_TCI(sci_req
->io_tag
)] = sci_req
;
2985 scic_sds_controller_post_request(scic
, scic_sds_request_get_post_context(sci_req
));
2990 * scic_controller_start_task() - This method is called by the SCIC user to
2991 * send/start a framework task management request.
2992 * @controller: the handle to the controller object for which to start the task
2993 * management request.
2994 * @remote_device: the handle to the remote device object for which to start
2995 * the task management request.
2996 * @task_request: the handle to the task request object to start.
2997 * @io_tag: This parameter specifies a previously allocated IO tag that the
2998 * user desires to be utilized for this request. Note this not the io_tag
2999 * of the request being managed. It is to be utilized for the task request
3000 * itself. This parameter is optional. The user is allowed to supply
3001 * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
3003 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3004 * to ensure that each of the methods that may allocate or free available IO
3005 * tags are handled in a mutually exclusive manner. This method is one of said
3006 * methods requiring proper critical code section protection (e.g. semaphore,
3007 * spin-lock, etc.). - The user must synchronize this task with completion
3008 * queue processing. If they are not synchronized then it is possible for the
3009 * io requests that are being managed by the task request can complete before
3010 * starting the task request. scic_controller_allocate_tag() for more
3011 * information on allocating a tag. Indicate if the controller successfully
3012 * started the IO request. SCI_TASK_SUCCESS if the task request was
3013 * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
3014 * returned if there is/are task(s) outstanding that require termination or
3015 * completion before this request can succeed.
3017 enum sci_task_status
scic_controller_start_task(
3018 struct scic_sds_controller
*scic
,
3019 struct scic_sds_remote_device
*rdev
,
3020 struct scic_sds_request
*req
,
3023 enum sci_status status
;
3025 if (scic
->sm
.current_state_id
!= SCIC_READY
) {
3026 dev_warn(scic_to_dev(scic
),
3027 "%s: SCIC Controller starting task from invalid "
3030 return SCI_TASK_FAILURE_INVALID_STATE
;
3033 status
= scic_sds_remote_device_start_task(scic
, rdev
, req
);
3035 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
:
3036 scic
->io_request_table
[ISCI_TAG_TCI(req
->io_tag
)] = req
;
3039 * We will let framework know this task request started successfully,
3040 * although core is still woring on starting the request (to post tc when
3045 scic
->io_request_table
[ISCI_TAG_TCI(req
->io_tag
)] = req
;
3047 scic_sds_controller_post_request(scic
,
3048 scic_sds_request_get_post_context(req
));
3058 * scic_controller_allocate_io_tag() - This method will allocate a tag from the
3059 * pool of free IO tags. Direct allocation of IO tags by the SCI Core user
3060 * is optional. The scic_controller_start_io() method will allocate an IO
3061 * tag if this method is not utilized and the tag is not supplied to the IO
3062 * construct routine. Direct allocation of IO tags may provide additional
3063 * performance improvements in environments capable of supporting this usage
3064 * model. Additionally, direct allocation of IO tags also provides
3065 * additional flexibility to the SCI Core user. Specifically, the user may
3066 * retain IO tags across the lives of multiple IO requests.
3067 * @controller: the handle to the controller object for which to allocate the
3070 * IO tags are a protected resource. It is incumbent upon the SCI Core user to
3071 * ensure that each of the methods that may allocate or free available IO tags
3072 * are handled in a mutually exclusive manner. This method is one of said
3073 * methods requiring proper critical code section protection (e.g. semaphore,
3074 * spin-lock, etc.). An unsigned integer representing an available IO tag.
3075 * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
3076 * currently available tags to be allocated. All return other values indicate a
3079 u16
scic_controller_allocate_io_tag(struct scic_sds_controller
*scic
)
3081 struct isci_host
*ihost
= scic_to_ihost(scic
);
3083 if (isci_tci_space(ihost
)) {
3084 u16 tci
= isci_tci_alloc(ihost
);
3085 u8 seq
= scic
->io_request_sequence
[tci
];
3087 return ISCI_TAG(seq
, tci
);
3090 return SCI_CONTROLLER_INVALID_IO_TAG
;
3094 * scic_controller_free_io_tag() - This method will free an IO tag to the pool
3095 * of free IO tags. This method provides the SCI Core user more flexibility
3096 * with regards to IO tags. The user may desire to keep an IO tag after an
3097 * IO request has completed, because they plan on re-using the tag for a
3098 * subsequent IO request. This method is only legal if the tag was
3099 * allocated via scic_controller_allocate_io_tag().
3100 * @controller: This parameter specifies the handle to the controller object
3101 * for which to free/return the tag.
3102 * @io_tag: This parameter represents the tag to be freed to the pool of
3105 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3106 * to ensure that each of the methods that may allocate or free available IO
3107 * tags are handled in a mutually exclusive manner. This method is one of said
3108 * methods requiring proper critical code section protection (e.g. semaphore,
3109 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3110 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3111 * the responsibility of the caller to invoke this method to free the tag. This
3112 * method returns an indication of whether the tag was successfully put back
3113 * (freed) to the pool of available tags. SCI_SUCCESS This return value
3114 * indicates the tag was successfully placed into the pool of available IO
3115 * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
3116 * is not a valid IO tag value.
3118 enum sci_status
scic_controller_free_io_tag(struct scic_sds_controller
*scic
,
3121 struct isci_host
*ihost
= scic_to_ihost(scic
);
3122 u16 tci
= ISCI_TAG_TCI(io_tag
);
3123 u16 seq
= ISCI_TAG_SEQ(io_tag
);
3125 /* prevent tail from passing head */
3126 if (isci_tci_active(ihost
) == 0)
3127 return SCI_FAILURE_INVALID_IO_TAG
;
3129 if (seq
== scic
->io_request_sequence
[tci
]) {
3130 scic
->io_request_sequence
[tci
] = (seq
+1) & (SCI_MAX_SEQ
-1);
3132 isci_tci_free(ihost
, ISCI_TAG_TCI(io_tag
));
3136 return SCI_FAILURE_INVALID_IO_TAG
;