2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 #include "remote_device.h"
61 #include "state_machine.h"
62 #include "remote_node_table.h"
63 #include "registers.h"
64 #include "scu_unsolicited_frame.h"
65 #include "unsolicited_frame_control.h"
66 #include "probe_roms.h"
68 struct scic_sds_request
;
69 struct scu_task_context
;
73 * struct scic_power_control -
75 * This structure defines the fields for managing power control for direct
76 * attached disk devices.
78 struct scic_power_control
{
80 * This field is set when the power control timer is running and cleared when
86 * Timer to control when the directed attached disks can consume power.
88 struct sci_timer timer
;
91 * This field is used to keep track of how many phys are put into the
97 * This field is used to keep track of how many phys have been granted to consume power
99 u8 phys_granted_power
;
102 * This field is an array of phys that we are waiting on. The phys are direct
103 * mapped into requesters via struct scic_sds_phy.phy_index
105 struct scic_sds_phy
*requesters
[SCI_MAX_PHYS
];
109 struct scic_sds_port_configuration_agent
;
110 typedef void (*port_config_fn
)(struct scic_sds_controller
*,
111 struct scic_sds_port_configuration_agent
*,
112 struct scic_sds_port
*, struct scic_sds_phy
*);
114 struct scic_sds_port_configuration_agent
{
115 u16 phy_configured_mask
;
120 } phy_valid_port_range
[SCI_MAX_PHYS
];
122 port_config_fn link_up_handler
;
123 port_config_fn link_down_handler
;
124 struct sci_timer timer
;
128 * struct scic_sds_controller -
130 * This structure represents the SCU controller object.
132 struct scic_sds_controller
{
134 * This field contains the information for the base controller state
137 struct sci_base_state_machine state_machine
;
140 * Timer for controller start/stop operations.
142 struct sci_timer timer
;
145 * This field contains the user parameters to be utilized for this
146 * core controller object.
148 union scic_user_parameters user_parameters
;
151 * This field contains the OEM parameters to be utilized for this
152 * core controller object.
154 union scic_oem_parameters oem_parameters
;
157 * This field contains the port configuration agent for this controller.
159 struct scic_sds_port_configuration_agent port_agent
;
162 * This field is the array of device objects that are currently constructed
163 * for this controller object. This table is used as a fast lookup of device
164 * objects that need to handle device completion notifications from the
165 * hardware. The table is RNi based.
167 struct scic_sds_remote_device
*device_table
[SCI_MAX_REMOTE_DEVICES
];
170 * This field is the array of IO request objects that are currently active for
171 * this controller object. This table is used as a fast lookup of the io
172 * request object that need to handle completion queue notifications. The
173 * table is TCi based.
175 struct scic_sds_request
*io_request_table
[SCI_MAX_IO_REQUESTS
];
178 * This field is the free RNi data structure
180 struct scic_remote_node_table available_remote_nodes
;
183 * This field is the TCi pool used to manage the task context index.
185 SCI_POOL_CREATE(tci_pool
, u16
, SCI_MAX_IO_REQUESTS
);
188 * This filed is the struct scic_power_control data used to controll when direct
189 * attached devices can consume power.
191 struct scic_power_control power_control
;
194 * This field is the array of sequence values for the IO Tag fields. Even
195 * though only 4 bits of the field is used for the sequence the sequence is 16
196 * bits in size so the sequence can be bitwise or'd with the TCi to build the
199 u16 io_request_sequence
[SCI_MAX_IO_REQUESTS
];
202 * This field in the array of sequence values for the RNi. These are used
203 * to control io request build to io request start operations. The sequence
204 * value is recorded into an io request when it is built and is checked on
205 * the io request start operation to make sure that there was not a device
206 * hot plug between the build and start operation.
208 u8 remote_device_sequence
[SCI_MAX_REMOTE_DEVICES
];
211 * This field is a pointer to the memory allocated by the driver for the task
212 * context table. This data is shared between the hardware and software.
214 struct scu_task_context
*task_context_table
;
217 * This field is a pointer to the memory allocated by the driver for the
218 * remote node context table. This table is shared between the hardware and
221 union scu_remote_node_context
*remote_node_context_table
;
224 * This field is a pointer to the completion queue. This memory is
225 * written to by the hardware and read by the software.
227 u32
*completion_queue
;
230 * This field is the software copy of the completion queue get pointer. The
231 * controller object writes this value to the hardware after processing the
232 * completion entries.
234 u32 completion_queue_get
;
237 * This field is the minimum of the number of hardware supported port entries
238 * and the software requested port entries.
240 u32 logical_port_entries
;
243 * This field is the minimum number of hardware supported completion queue
244 * entries and the software requested completion queue entries.
246 u32 completion_queue_entries
;
249 * This field is the minimum number of hardware supported event entries and
250 * the software requested event entries.
252 u32 completion_event_entries
;
255 * This field is the minimum number of devices supported by the hardware and
256 * the number of devices requested by the software.
258 u32 remote_node_entries
;
261 * This field is the minimum number of IO requests supported by the hardware
262 * and the number of IO requests requested by the software.
264 u32 task_context_entries
;
267 * This object contains all of the unsolicited frame specific
268 * data utilized by the core controller.
270 struct scic_sds_unsolicited_frame_control uf_control
;
272 /* Phy Startup Data */
274 * Timer for controller phy request startup. On controller start the
275 * controller will start each PHY individually in order of phy index.
277 struct sci_timer phy_timer
;
280 * This field is set when the phy_timer is running and is cleared when
281 * the phy_timer is stopped.
283 bool phy_startup_timer_pending
;
286 * This field is the index of the next phy start. It is initialized to 0 and
287 * increments for each phy index that is started.
289 u32 next_phy_to_start
;
292 * This field controlls the invalid link up notifications to the SCI_USER. If
293 * an invalid_link_up notification is reported a bit for the PHY index is set
294 * so further notifications are not made. Once the PHY object reports link up
295 * and is made part of a port then this bit for the PHY index is cleared.
300 * This field saves the current interrupt coalescing number of the controller.
302 u16 interrupt_coalesce_number
;
305 * This field saves the current interrupt coalescing timeout value in microseconds.
307 u32 interrupt_coalesce_timeout
;
310 * This field is a pointer to the memory mapped register space for the
311 * struct smu_registers.
313 struct smu_registers __iomem
*smu_registers
;
316 * This field is a pointer to the memory mapped register space for the
317 * struct scu_registers.
319 struct scu_registers __iomem
*scu_registers
;
324 struct scic_sds_controller sci
;
325 union scic_oem_parameters oem_parameters
;
327 int id
; /* unique within a given pci device */
328 struct list_head timers
;
329 void *core_ctrl_memory
;
330 struct dma_pool
*dma_pool
;
331 struct isci_phy phys
[SCI_MAX_PHYS
];
332 struct isci_port ports
[SCI_MAX_PORTS
+ 1]; /* includes dummy port */
333 struct sas_ha_struct sas_ha
;
336 spinlock_t queue_lock
;
337 spinlock_t state_lock
;
339 struct pci_dev
*pdev
;
341 enum isci_status status
;
342 #define IHOST_START_PENDING 0
343 #define IHOST_STOP_PENDING 1
345 wait_queue_head_t eventq
;
346 struct Scsi_Host
*shost
;
347 struct tasklet_struct completion_tasklet
;
348 struct list_head requests_to_complete
;
349 struct list_head requests_to_errorback
;
350 spinlock_t scic_lock
;
352 struct isci_remote_device devices
[SCI_MAX_REMOTE_DEVICES
];
356 * enum scic_sds_controller_states - This enumeration depicts all the states
357 * for the common controller state machine.
359 enum scic_sds_controller_states
{
361 * Simply the initial state for the base controller state machine.
363 SCI_BASE_CONTROLLER_STATE_INITIAL
= 0,
366 * This state indicates that the controller is reset. The memory for
367 * the controller is in it's initial state, but the controller requires
369 * This state is entered from the INITIAL state.
370 * This state is entered from the RESETTING state.
372 SCI_BASE_CONTROLLER_STATE_RESET
,
375 * This state is typically an action state that indicates the controller
376 * is in the process of initialization. In this state no new IO operations
378 * This state is entered from the RESET state.
380 SCI_BASE_CONTROLLER_STATE_INITIALIZING
,
383 * This state indicates that the controller has been successfully
384 * initialized. In this state no new IO operations are permitted.
385 * This state is entered from the INITIALIZING state.
387 SCI_BASE_CONTROLLER_STATE_INITIALIZED
,
390 * This state indicates the the controller is in the process of becoming
391 * ready (i.e. starting). In this state no new IO operations are permitted.
392 * This state is entered from the INITIALIZED state.
394 SCI_BASE_CONTROLLER_STATE_STARTING
,
397 * This state indicates the controller is now ready. Thus, the user
398 * is able to perform IO operations on the controller.
399 * This state is entered from the STARTING state.
401 SCI_BASE_CONTROLLER_STATE_READY
,
404 * This state is typically an action state that indicates the controller
405 * is in the process of resetting. Thus, the user is unable to perform
406 * IO operations on the controller. A reset is considered destructive in
408 * This state is entered from the READY state.
409 * This state is entered from the FAILED state.
410 * This state is entered from the STOPPED state.
412 SCI_BASE_CONTROLLER_STATE_RESETTING
,
415 * This state indicates that the controller is in the process of stopping.
416 * In this state no new IO operations are permitted, but existing IO
417 * operations are allowed to complete.
418 * This state is entered from the READY state.
420 SCI_BASE_CONTROLLER_STATE_STOPPING
,
423 * This state indicates that the controller has successfully been stopped.
424 * In this state no new IO operations are permitted.
425 * This state is entered from the STOPPING state.
427 SCI_BASE_CONTROLLER_STATE_STOPPED
,
430 * This state indicates that the controller could not successfully be
431 * initialized. In this state no new IO operations are permitted.
432 * This state is entered from the INITIALIZING state.
433 * This state is entered from the STARTING state.
434 * This state is entered from the STOPPING state.
435 * This state is entered from the RESETTING state.
437 SCI_BASE_CONTROLLER_STATE_FAILED
,
439 SCI_BASE_CONTROLLER_MAX_STATES
446 * struct isci_pci_info - This class represents the pci function containing the
447 * controllers. Depending on PCI SKU, there could be up to 2 controllers in
450 #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
452 struct isci_pci_info
{
453 struct msix_entry msix_entries
[SCI_MAX_MSIX_INT
];
454 struct isci_host
*hosts
[SCI_MAX_CONTROLLERS
];
455 struct isci_orom
*orom
;
458 static inline struct isci_pci_info
*to_pci_info(struct pci_dev
*pdev
)
460 return pci_get_drvdata(pdev
);
463 #define for_each_isci_host(id, ihost, pdev) \
464 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
465 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
466 ihost = to_pci_info(pdev)->hosts[++id])
468 static inline enum isci_status
isci_host_get_state(struct isci_host
*isci_host
)
470 return isci_host
->status
;
473 static inline void isci_host_change_state(struct isci_host
*isci_host
,
474 enum isci_status status
)
478 dev_dbg(&isci_host
->pdev
->dev
,
479 "%s: isci_host = %p, state = 0x%x",
483 spin_lock_irqsave(&isci_host
->state_lock
, flags
);
484 isci_host
->status
= status
;
485 spin_unlock_irqrestore(&isci_host
->state_lock
, flags
);
489 static inline int isci_host_can_queue(struct isci_host
*isci_host
, int num
)
494 spin_lock_irqsave(&isci_host
->queue_lock
, flags
);
495 if ((isci_host
->can_queue
- num
) < 0) {
496 dev_dbg(&isci_host
->pdev
->dev
,
497 "%s: isci_host->can_queue = %d\n",
499 isci_host
->can_queue
);
500 ret
= -SAS_QUEUE_FULL
;
503 isci_host
->can_queue
-= num
;
505 spin_unlock_irqrestore(&isci_host
->queue_lock
, flags
);
510 static inline void isci_host_can_dequeue(struct isci_host
*isci_host
, int num
)
514 spin_lock_irqsave(&isci_host
->queue_lock
, flags
);
515 isci_host
->can_queue
+= num
;
516 spin_unlock_irqrestore(&isci_host
->queue_lock
, flags
);
519 static inline void wait_for_start(struct isci_host
*ihost
)
521 wait_event(ihost
->eventq
, !test_bit(IHOST_START_PENDING
, &ihost
->flags
));
524 static inline void wait_for_stop(struct isci_host
*ihost
)
526 wait_event(ihost
->eventq
, !test_bit(IHOST_STOP_PENDING
, &ihost
->flags
));
529 static inline void wait_for_device_start(struct isci_host
*ihost
, struct isci_remote_device
*idev
)
531 wait_event(ihost
->eventq
, !test_bit(IDEV_START_PENDING
, &idev
->flags
));
534 static inline void wait_for_device_stop(struct isci_host
*ihost
, struct isci_remote_device
*idev
)
536 wait_event(ihost
->eventq
, !test_bit(IDEV_STOP_PENDING
, &idev
->flags
));
539 static inline struct isci_host
*dev_to_ihost(struct domain_device
*dev
)
541 return dev
->port
->ha
->lldd_ha
;
544 static inline struct isci_host
*scic_to_ihost(struct scic_sds_controller
*scic
)
546 /* XXX delete after merging scic_sds_contoller and isci_host */
547 struct isci_host
*ihost
= container_of(scic
, typeof(*ihost
), sci
);
553 * INCREMENT_QUEUE_GET() -
555 * This macro will increment the specified index to and if the index wraps to 0
556 * it will toggel the cycle bit.
558 #define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \
560 if ((index) + 1 == entry_count) { \
562 (cycle) = (cycle) ^ (bit_toggle); \
569 * scic_sds_controller_get_protocol_engine_group() -
571 * This macro returns the protocol engine group for this controller object.
572 * Presently we only support protocol engine group 0 so just return that
574 #define scic_sds_controller_get_protocol_engine_group(controller) 0
577 * scic_sds_io_tag_construct() -
579 * This macro constructs an IO tag from the sequence and index values.
581 #define scic_sds_io_tag_construct(sequence, task_index) \
582 ((sequence) << 12 | (task_index))
585 * scic_sds_io_tag_get_sequence() -
587 * This macro returns the IO sequence from the IO tag value.
589 #define scic_sds_io_tag_get_sequence(io_tag) \
590 (((io_tag) & 0xF000) >> 12)
593 * scic_sds_io_tag_get_index() -
595 * This macro returns the TCi from the io tag value
597 #define scic_sds_io_tag_get_index(io_tag) \
601 * scic_sds_io_sequence_increment() -
603 * This is a helper macro to increment the io sequence count. We may find in
604 * the future that it will be faster to store the sequence count in such a way
605 * as we dont perform the shift operation to build io tag values so therefore
606 * need a way to incrment them correctly
608 #define scic_sds_io_sequence_increment(value) \
609 ((value) = (((value) + 1) & 0x000F))
611 /* expander attached sata devices require 3 rnc slots */
612 static inline int scic_sds_remote_device_node_count(struct scic_sds_remote_device
*sci_dev
)
614 struct domain_device
*dev
= sci_dev_to_domain(sci_dev
);
616 if ((dev
->dev_type
== SATA_DEV
|| (dev
->tproto
& SAS_PROTOCOL_STP
)) &&
617 !sci_dev
->is_direct_attached
)
618 return SCU_STP_REMOTE_NODE_COUNT
;
619 return SCU_SSP_REMOTE_NODE_COUNT
;
623 * scic_sds_controller_set_invalid_phy() -
625 * This macro will set the bit in the invalid phy mask for this controller
626 * object. This is used to control messages reported for invalid link up
629 #define scic_sds_controller_set_invalid_phy(controller, phy) \
630 ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
633 * scic_sds_controller_clear_invalid_phy() -
635 * This macro will clear the bit in the invalid phy mask for this controller
636 * object. This is used to control messages reported for invalid link up
639 #define scic_sds_controller_clear_invalid_phy(controller, phy) \
640 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
642 static inline struct device
*scic_to_dev(struct scic_sds_controller
*scic
)
644 return &scic_to_ihost(scic
)->pdev
->dev
;
647 static inline struct device
*sciphy_to_dev(struct scic_sds_phy
*sci_phy
)
649 struct isci_phy
*iphy
= sci_phy_to_iphy(sci_phy
);
651 if (!iphy
|| !iphy
->isci_port
|| !iphy
->isci_port
->isci_host
)
654 return &iphy
->isci_port
->isci_host
->pdev
->dev
;
657 static inline struct device
*sciport_to_dev(struct scic_sds_port
*sci_port
)
659 struct isci_port
*iport
= sci_port_to_iport(sci_port
);
661 if (!iport
|| !iport
->isci_host
)
664 return &iport
->isci_host
->pdev
->dev
;
667 static inline struct device
*scirdev_to_dev(struct scic_sds_remote_device
*sci_dev
)
669 struct isci_remote_device
*idev
=
670 container_of(sci_dev
, typeof(*idev
), sci
);
672 if (!idev
|| !idev
->isci_port
|| !idev
->isci_port
->isci_host
)
675 return &idev
->isci_port
->isci_host
->pdev
->dev
;
684 extern int isci_si_rev
;
686 static inline bool is_a0(void)
688 return isci_si_rev
== ISCI_SI_REVA0
;
691 static inline bool is_a2(void)
693 return isci_si_rev
== ISCI_SI_REVA2
;
696 static inline bool is_b0(void)
698 return isci_si_rev
> ISCI_SI_REVA2
;
701 void scic_sds_controller_post_request(struct scic_sds_controller
*scic
,
703 void scic_sds_controller_release_frame(struct scic_sds_controller
*scic
,
705 void scic_sds_controller_copy_sata_response(void *response_buffer
,
708 enum sci_status
scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller
*scic
,
709 struct scic_sds_remote_device
*sci_dev
,
711 void scic_sds_controller_free_remote_node_context(
712 struct scic_sds_controller
*scic
,
713 struct scic_sds_remote_device
*sci_dev
,
715 union scu_remote_node_context
*scic_sds_controller_get_remote_node_context_buffer(
716 struct scic_sds_controller
*scic
,
719 struct scic_sds_request
*scic_request_by_tag(struct scic_sds_controller
*scic
,
722 struct scu_task_context
*scic_sds_controller_get_task_context_buffer(
723 struct scic_sds_controller
*scic
,
726 void scic_sds_controller_power_control_queue_insert(
727 struct scic_sds_controller
*scic
,
728 struct scic_sds_phy
*sci_phy
);
730 void scic_sds_controller_power_control_queue_remove(
731 struct scic_sds_controller
*scic
,
732 struct scic_sds_phy
*sci_phy
);
734 void scic_sds_controller_link_up(
735 struct scic_sds_controller
*scic
,
736 struct scic_sds_port
*sci_port
,
737 struct scic_sds_phy
*sci_phy
);
739 void scic_sds_controller_link_down(
740 struct scic_sds_controller
*scic
,
741 struct scic_sds_port
*sci_port
,
742 struct scic_sds_phy
*sci_phy
);
744 void scic_sds_controller_remote_device_stopped(
745 struct scic_sds_controller
*scic
,
746 struct scic_sds_remote_device
*sci_dev
);
748 void scic_sds_controller_copy_task_context(
749 struct scic_sds_controller
*scic
,
750 struct scic_sds_request
*this_request
);
752 void scic_sds_controller_register_setup(struct scic_sds_controller
*scic
);
754 enum sci_status
scic_controller_continue_io(struct scic_sds_request
*sci_req
);
755 int isci_host_scan_finished(struct Scsi_Host
*, unsigned long);
756 void isci_host_scan_start(struct Scsi_Host
*);
758 int isci_host_init(struct isci_host
*);
760 void isci_host_init_controller_names(
761 struct isci_host
*isci_host
,
762 unsigned int controller_idx
);
764 void isci_host_deinit(
767 void isci_host_port_link_up(
769 struct scic_sds_port
*,
770 struct scic_sds_phy
*);
771 int isci_host_dev_found(struct domain_device
*);
773 void isci_host_remote_device_start_complete(
775 struct isci_remote_device
*,
778 void scic_controller_disable_interrupts(
779 struct scic_sds_controller
*scic
);
781 enum sci_status
scic_controller_start_io(
782 struct scic_sds_controller
*scic
,
783 struct scic_sds_remote_device
*remote_device
,
784 struct scic_sds_request
*io_request
,
787 enum sci_task_status
scic_controller_start_task(
788 struct scic_sds_controller
*scic
,
789 struct scic_sds_remote_device
*remote_device
,
790 struct scic_sds_request
*task_request
,
793 enum sci_status
scic_controller_terminate_request(
794 struct scic_sds_controller
*scic
,
795 struct scic_sds_remote_device
*remote_device
,
796 struct scic_sds_request
*request
);
798 enum sci_status
scic_controller_complete_io(
799 struct scic_sds_controller
*scic
,
800 struct scic_sds_remote_device
*remote_device
,
801 struct scic_sds_request
*io_request
);
803 u16
scic_controller_allocate_io_tag(
804 struct scic_sds_controller
*scic
);
806 enum sci_status
scic_controller_free_io_tag(
807 struct scic_sds_controller
*scic
,
810 void scic_sds_port_configuration_agent_construct(
811 struct scic_sds_port_configuration_agent
*port_agent
);
813 enum sci_status
scic_sds_port_configuration_agent_initialize(
814 struct scic_sds_controller
*controller
,
815 struct scic_sds_port_configuration_agent
*port_agent
);