isci: additional state machine cleanup
[deliverable/linux.git] / drivers / scsi / isci / host.h
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55 #ifndef _SCI_HOST_H_
56 #define _SCI_HOST_H_
57
58 #include "remote_device.h"
59 #include "phy.h"
60 #include "pool.h"
61 #include "isci.h"
62 #include "remote_node_table.h"
63 #include "registers.h"
64 #include "scu_unsolicited_frame.h"
65 #include "unsolicited_frame_control.h"
66 #include "probe_roms.h"
67
68 struct scic_sds_request;
69 struct scu_task_context;
70
71
72 /**
73 * struct scic_power_control -
74 *
75 * This structure defines the fields for managing power control for direct
76 * attached disk devices.
77 */
78 struct scic_power_control {
79 /**
80 * This field is set when the power control timer is running and cleared when
81 * it is not.
82 */
83 bool timer_started;
84
85 /**
86 * Timer to control when the directed attached disks can consume power.
87 */
88 struct sci_timer timer;
89
90 /**
91 * This field is used to keep track of how many phys are put into the
92 * requesters field.
93 */
94 u8 phys_waiting;
95
96 /**
97 * This field is used to keep track of how many phys have been granted to consume power
98 */
99 u8 phys_granted_power;
100
101 /**
102 * This field is an array of phys that we are waiting on. The phys are direct
103 * mapped into requesters via struct scic_sds_phy.phy_index
104 */
105 struct scic_sds_phy *requesters[SCI_MAX_PHYS];
106
107 };
108
109 struct scic_sds_port_configuration_agent;
110 typedef void (*port_config_fn)(struct scic_sds_controller *,
111 struct scic_sds_port_configuration_agent *,
112 struct scic_sds_port *, struct scic_sds_phy *);
113
114 struct scic_sds_port_configuration_agent {
115 u16 phy_configured_mask;
116 u16 phy_ready_mask;
117 struct {
118 u8 min_index;
119 u8 max_index;
120 } phy_valid_port_range[SCI_MAX_PHYS];
121 bool timer_pending;
122 port_config_fn link_up_handler;
123 port_config_fn link_down_handler;
124 struct sci_timer timer;
125 };
126
127 /**
128 * struct scic_sds_controller -
129 *
130 * This structure represents the SCU controller object.
131 */
132 struct scic_sds_controller {
133 /**
134 * This field contains the information for the base controller state
135 * machine.
136 */
137 struct sci_base_state_machine sm;
138
139 /**
140 * Timer for controller start/stop operations.
141 */
142 struct sci_timer timer;
143
144 /**
145 * This field contains the user parameters to be utilized for this
146 * core controller object.
147 */
148 union scic_user_parameters user_parameters;
149
150 /**
151 * This field contains the OEM parameters to be utilized for this
152 * core controller object.
153 */
154 union scic_oem_parameters oem_parameters;
155
156 /**
157 * This field contains the port configuration agent for this controller.
158 */
159 struct scic_sds_port_configuration_agent port_agent;
160
161 /**
162 * This field is the array of device objects that are currently constructed
163 * for this controller object. This table is used as a fast lookup of device
164 * objects that need to handle device completion notifications from the
165 * hardware. The table is RNi based.
166 */
167 struct scic_sds_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
168
169 /**
170 * This field is the array of IO request objects that are currently active for
171 * this controller object. This table is used as a fast lookup of the io
172 * request object that need to handle completion queue notifications. The
173 * table is TCi based.
174 */
175 struct scic_sds_request *io_request_table[SCI_MAX_IO_REQUESTS];
176
177 /**
178 * This field is the free RNi data structure
179 */
180 struct scic_remote_node_table available_remote_nodes;
181
182 /**
183 * This field is the TCi pool used to manage the task context index.
184 */
185 SCI_POOL_CREATE(tci_pool, u16, SCI_MAX_IO_REQUESTS);
186
187 /**
188 * This filed is the struct scic_power_control data used to controll when direct
189 * attached devices can consume power.
190 */
191 struct scic_power_control power_control;
192
193 /**
194 * This field is the array of sequence values for the IO Tag fields. Even
195 * though only 4 bits of the field is used for the sequence the sequence is 16
196 * bits in size so the sequence can be bitwise or'd with the TCi to build the
197 * IO Tag value.
198 */
199 u16 io_request_sequence[SCI_MAX_IO_REQUESTS];
200
201 /**
202 * This field in the array of sequence values for the RNi. These are used
203 * to control io request build to io request start operations. The sequence
204 * value is recorded into an io request when it is built and is checked on
205 * the io request start operation to make sure that there was not a device
206 * hot plug between the build and start operation.
207 */
208 u8 remote_device_sequence[SCI_MAX_REMOTE_DEVICES];
209
210 /**
211 * This field is a pointer to the memory allocated by the driver for the task
212 * context table. This data is shared between the hardware and software.
213 */
214 struct scu_task_context *task_context_table;
215
216 /**
217 * This field is a pointer to the memory allocated by the driver for the
218 * remote node context table. This table is shared between the hardware and
219 * software.
220 */
221 union scu_remote_node_context *remote_node_context_table;
222
223 /**
224 * This field is a pointer to the completion queue. This memory is
225 * written to by the hardware and read by the software.
226 */
227 u32 *completion_queue;
228
229 /**
230 * This field is the software copy of the completion queue get pointer. The
231 * controller object writes this value to the hardware after processing the
232 * completion entries.
233 */
234 u32 completion_queue_get;
235
236 /**
237 * This field is the minimum of the number of hardware supported port entries
238 * and the software requested port entries.
239 */
240 u32 logical_port_entries;
241
242 /**
243 * This field is the minimum number of hardware supported completion queue
244 * entries and the software requested completion queue entries.
245 */
246 u32 completion_queue_entries;
247
248 /**
249 * This field is the minimum number of hardware supported event entries and
250 * the software requested event entries.
251 */
252 u32 completion_event_entries;
253
254 /**
255 * This field is the minimum number of devices supported by the hardware and
256 * the number of devices requested by the software.
257 */
258 u32 remote_node_entries;
259
260 /**
261 * This field is the minimum number of IO requests supported by the hardware
262 * and the number of IO requests requested by the software.
263 */
264 u32 task_context_entries;
265
266 /**
267 * This object contains all of the unsolicited frame specific
268 * data utilized by the core controller.
269 */
270 struct scic_sds_unsolicited_frame_control uf_control;
271
272 /* Phy Startup Data */
273 /**
274 * Timer for controller phy request startup. On controller start the
275 * controller will start each PHY individually in order of phy index.
276 */
277 struct sci_timer phy_timer;
278
279 /**
280 * This field is set when the phy_timer is running and is cleared when
281 * the phy_timer is stopped.
282 */
283 bool phy_startup_timer_pending;
284
285 /**
286 * This field is the index of the next phy start. It is initialized to 0 and
287 * increments for each phy index that is started.
288 */
289 u32 next_phy_to_start;
290
291 /**
292 * This field controlls the invalid link up notifications to the SCI_USER. If
293 * an invalid_link_up notification is reported a bit for the PHY index is set
294 * so further notifications are not made. Once the PHY object reports link up
295 * and is made part of a port then this bit for the PHY index is cleared.
296 */
297 u8 invalid_phy_mask;
298
299 /*
300 * This field saves the current interrupt coalescing number of the controller.
301 */
302 u16 interrupt_coalesce_number;
303
304 /*
305 * This field saves the current interrupt coalescing timeout value in microseconds.
306 */
307 u32 interrupt_coalesce_timeout;
308
309 /**
310 * This field is a pointer to the memory mapped register space for the
311 * struct smu_registers.
312 */
313 struct smu_registers __iomem *smu_registers;
314
315 /**
316 * This field is a pointer to the memory mapped register space for the
317 * struct scu_registers.
318 */
319 struct scu_registers __iomem *scu_registers;
320
321 };
322
323 struct isci_host {
324 struct scic_sds_controller sci;
325 union scic_oem_parameters oem_parameters;
326
327 int id; /* unique within a given pci device */
328 void *core_ctrl_memory;
329 struct dma_pool *dma_pool;
330 struct isci_phy phys[SCI_MAX_PHYS];
331 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
332 struct sas_ha_struct sas_ha;
333
334 int can_queue;
335 spinlock_t queue_lock;
336 spinlock_t state_lock;
337
338 struct pci_dev *pdev;
339
340 enum isci_status status;
341 #define IHOST_START_PENDING 0
342 #define IHOST_STOP_PENDING 1
343 unsigned long flags;
344 wait_queue_head_t eventq;
345 struct Scsi_Host *shost;
346 struct tasklet_struct completion_tasklet;
347 struct list_head requests_to_complete;
348 struct list_head requests_to_errorback;
349 spinlock_t scic_lock;
350
351 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
352 };
353
354 /**
355 * enum scic_sds_controller_states - This enumeration depicts all the states
356 * for the common controller state machine.
357 */
358 enum scic_sds_controller_states {
359 /**
360 * Simply the initial state for the base controller state machine.
361 */
362 SCIC_INITIAL = 0,
363
364 /**
365 * This state indicates that the controller is reset. The memory for
366 * the controller is in it's initial state, but the controller requires
367 * initialization.
368 * This state is entered from the INITIAL state.
369 * This state is entered from the RESETTING state.
370 */
371 SCIC_RESET,
372
373 /**
374 * This state is typically an action state that indicates the controller
375 * is in the process of initialization. In this state no new IO operations
376 * are permitted.
377 * This state is entered from the RESET state.
378 */
379 SCIC_INITIALIZING,
380
381 /**
382 * This state indicates that the controller has been successfully
383 * initialized. In this state no new IO operations are permitted.
384 * This state is entered from the INITIALIZING state.
385 */
386 SCIC_INITIALIZED,
387
388 /**
389 * This state indicates the the controller is in the process of becoming
390 * ready (i.e. starting). In this state no new IO operations are permitted.
391 * This state is entered from the INITIALIZED state.
392 */
393 SCIC_STARTING,
394
395 /**
396 * This state indicates the controller is now ready. Thus, the user
397 * is able to perform IO operations on the controller.
398 * This state is entered from the STARTING state.
399 */
400 SCIC_READY,
401
402 /**
403 * This state is typically an action state that indicates the controller
404 * is in the process of resetting. Thus, the user is unable to perform
405 * IO operations on the controller. A reset is considered destructive in
406 * most cases.
407 * This state is entered from the READY state.
408 * This state is entered from the FAILED state.
409 * This state is entered from the STOPPED state.
410 */
411 SCIC_RESETTING,
412
413 /**
414 * This state indicates that the controller is in the process of stopping.
415 * In this state no new IO operations are permitted, but existing IO
416 * operations are allowed to complete.
417 * This state is entered from the READY state.
418 */
419 SCIC_STOPPING,
420
421 /**
422 * This state indicates that the controller has successfully been stopped.
423 * In this state no new IO operations are permitted.
424 * This state is entered from the STOPPING state.
425 */
426 SCIC_STOPPED,
427
428 /**
429 * This state indicates that the controller could not successfully be
430 * initialized. In this state no new IO operations are permitted.
431 * This state is entered from the INITIALIZING state.
432 * This state is entered from the STARTING state.
433 * This state is entered from the STOPPING state.
434 * This state is entered from the RESETTING state.
435 */
436 SCIC_FAILED,
437 };
438
439
440
441 /**
442 * struct isci_pci_info - This class represents the pci function containing the
443 * controllers. Depending on PCI SKU, there could be up to 2 controllers in
444 * the PCI function.
445 */
446 #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
447
448 struct isci_pci_info {
449 struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
450 struct isci_host *hosts[SCI_MAX_CONTROLLERS];
451 struct isci_orom *orom;
452 };
453
454 static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
455 {
456 return pci_get_drvdata(pdev);
457 }
458
459 #define for_each_isci_host(id, ihost, pdev) \
460 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
461 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
462 ihost = to_pci_info(pdev)->hosts[++id])
463
464 static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
465 {
466 return isci_host->status;
467 }
468
469 static inline void isci_host_change_state(struct isci_host *isci_host,
470 enum isci_status status)
471 {
472 unsigned long flags;
473
474 dev_dbg(&isci_host->pdev->dev,
475 "%s: isci_host = %p, state = 0x%x",
476 __func__,
477 isci_host,
478 status);
479 spin_lock_irqsave(&isci_host->state_lock, flags);
480 isci_host->status = status;
481 spin_unlock_irqrestore(&isci_host->state_lock, flags);
482
483 }
484
485 static inline int isci_host_can_queue(struct isci_host *isci_host, int num)
486 {
487 int ret = 0;
488 unsigned long flags;
489
490 spin_lock_irqsave(&isci_host->queue_lock, flags);
491 if ((isci_host->can_queue - num) < 0) {
492 dev_dbg(&isci_host->pdev->dev,
493 "%s: isci_host->can_queue = %d\n",
494 __func__,
495 isci_host->can_queue);
496 ret = -SAS_QUEUE_FULL;
497
498 } else
499 isci_host->can_queue -= num;
500
501 spin_unlock_irqrestore(&isci_host->queue_lock, flags);
502
503 return ret;
504 }
505
506 static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num)
507 {
508 unsigned long flags;
509
510 spin_lock_irqsave(&isci_host->queue_lock, flags);
511 isci_host->can_queue += num;
512 spin_unlock_irqrestore(&isci_host->queue_lock, flags);
513 }
514
515 static inline void wait_for_start(struct isci_host *ihost)
516 {
517 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
518 }
519
520 static inline void wait_for_stop(struct isci_host *ihost)
521 {
522 wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
523 }
524
525 static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
526 {
527 wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
528 }
529
530 static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
531 {
532 wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
533 }
534
535 static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
536 {
537 return dev->port->ha->lldd_ha;
538 }
539
540 static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic)
541 {
542 /* XXX delete after merging scic_sds_contoller and isci_host */
543 struct isci_host *ihost = container_of(scic, typeof(*ihost), sci);
544
545 return ihost;
546 }
547
548 /**
549 * INCREMENT_QUEUE_GET() -
550 *
551 * This macro will increment the specified index to and if the index wraps to 0
552 * it will toggel the cycle bit.
553 */
554 #define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \
555 { \
556 if ((index) + 1 == entry_count) { \
557 (index) = 0; \
558 (cycle) = (cycle) ^ (bit_toggle); \
559 } else { \
560 index = index + 1; \
561 } \
562 }
563
564 /**
565 * scic_sds_controller_get_protocol_engine_group() -
566 *
567 * This macro returns the protocol engine group for this controller object.
568 * Presently we only support protocol engine group 0 so just return that
569 */
570 #define scic_sds_controller_get_protocol_engine_group(controller) 0
571
572 /**
573 * scic_sds_io_tag_construct() -
574 *
575 * This macro constructs an IO tag from the sequence and index values.
576 */
577 #define scic_sds_io_tag_construct(sequence, task_index) \
578 ((sequence) << 12 | (task_index))
579
580 /**
581 * scic_sds_io_tag_get_sequence() -
582 *
583 * This macro returns the IO sequence from the IO tag value.
584 */
585 #define scic_sds_io_tag_get_sequence(io_tag) \
586 (((io_tag) & 0xF000) >> 12)
587
588 /**
589 * scic_sds_io_tag_get_index() -
590 *
591 * This macro returns the TCi from the io tag value
592 */
593 #define scic_sds_io_tag_get_index(io_tag) \
594 ((io_tag) & 0x0FFF)
595
596 /**
597 * scic_sds_io_sequence_increment() -
598 *
599 * This is a helper macro to increment the io sequence count. We may find in
600 * the future that it will be faster to store the sequence count in such a way
601 * as we dont perform the shift operation to build io tag values so therefore
602 * need a way to incrment them correctly
603 */
604 #define scic_sds_io_sequence_increment(value) \
605 ((value) = (((value) + 1) & 0x000F))
606
607 /* expander attached sata devices require 3 rnc slots */
608 static inline int scic_sds_remote_device_node_count(struct scic_sds_remote_device *sci_dev)
609 {
610 struct domain_device *dev = sci_dev_to_domain(sci_dev);
611
612 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
613 !sci_dev->is_direct_attached)
614 return SCU_STP_REMOTE_NODE_COUNT;
615 return SCU_SSP_REMOTE_NODE_COUNT;
616 }
617
618 /**
619 * scic_sds_controller_set_invalid_phy() -
620 *
621 * This macro will set the bit in the invalid phy mask for this controller
622 * object. This is used to control messages reported for invalid link up
623 * notifications.
624 */
625 #define scic_sds_controller_set_invalid_phy(controller, phy) \
626 ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
627
628 /**
629 * scic_sds_controller_clear_invalid_phy() -
630 *
631 * This macro will clear the bit in the invalid phy mask for this controller
632 * object. This is used to control messages reported for invalid link up
633 * notifications.
634 */
635 #define scic_sds_controller_clear_invalid_phy(controller, phy) \
636 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
637
638 static inline struct device *scic_to_dev(struct scic_sds_controller *scic)
639 {
640 return &scic_to_ihost(scic)->pdev->dev;
641 }
642
643 static inline struct device *sciphy_to_dev(struct scic_sds_phy *sci_phy)
644 {
645 struct isci_phy *iphy = sci_phy_to_iphy(sci_phy);
646
647 if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
648 return NULL;
649
650 return &iphy->isci_port->isci_host->pdev->dev;
651 }
652
653 static inline struct device *sciport_to_dev(struct scic_sds_port *sci_port)
654 {
655 struct isci_port *iport = sci_port_to_iport(sci_port);
656
657 if (!iport || !iport->isci_host)
658 return NULL;
659
660 return &iport->isci_host->pdev->dev;
661 }
662
663 static inline struct device *scirdev_to_dev(struct scic_sds_remote_device *sci_dev)
664 {
665 struct isci_remote_device *idev =
666 container_of(sci_dev, typeof(*idev), sci);
667
668 if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
669 return NULL;
670
671 return &idev->isci_port->isci_host->pdev->dev;
672 }
673
674 enum {
675 ISCI_SI_REVA0,
676 ISCI_SI_REVA2,
677 ISCI_SI_REVB0,
678 };
679
680 extern int isci_si_rev;
681
682 static inline bool is_a0(void)
683 {
684 return isci_si_rev == ISCI_SI_REVA0;
685 }
686
687 static inline bool is_a2(void)
688 {
689 return isci_si_rev == ISCI_SI_REVA2;
690 }
691
692 static inline bool is_b0(void)
693 {
694 return isci_si_rev > ISCI_SI_REVA2;
695 }
696
697 void scic_sds_controller_post_request(struct scic_sds_controller *scic,
698 u32 request);
699 void scic_sds_controller_release_frame(struct scic_sds_controller *scic,
700 u32 frame_index);
701 void scic_sds_controller_copy_sata_response(void *response_buffer,
702 void *frame_header,
703 void *frame_buffer);
704 enum sci_status scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller *scic,
705 struct scic_sds_remote_device *sci_dev,
706 u16 *node_id);
707 void scic_sds_controller_free_remote_node_context(
708 struct scic_sds_controller *scic,
709 struct scic_sds_remote_device *sci_dev,
710 u16 node_id);
711 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
712 struct scic_sds_controller *scic,
713 u16 node_id);
714
715 struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
716 u16 io_tag);
717
718 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
719 struct scic_sds_controller *scic,
720 u16 io_tag);
721
722 void scic_sds_controller_power_control_queue_insert(
723 struct scic_sds_controller *scic,
724 struct scic_sds_phy *sci_phy);
725
726 void scic_sds_controller_power_control_queue_remove(
727 struct scic_sds_controller *scic,
728 struct scic_sds_phy *sci_phy);
729
730 void scic_sds_controller_link_up(
731 struct scic_sds_controller *scic,
732 struct scic_sds_port *sci_port,
733 struct scic_sds_phy *sci_phy);
734
735 void scic_sds_controller_link_down(
736 struct scic_sds_controller *scic,
737 struct scic_sds_port *sci_port,
738 struct scic_sds_phy *sci_phy);
739
740 void scic_sds_controller_remote_device_stopped(
741 struct scic_sds_controller *scic,
742 struct scic_sds_remote_device *sci_dev);
743
744 void scic_sds_controller_copy_task_context(
745 struct scic_sds_controller *scic,
746 struct scic_sds_request *this_request);
747
748 void scic_sds_controller_register_setup(struct scic_sds_controller *scic);
749
750 enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req);
751 int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
752 void isci_host_scan_start(struct Scsi_Host *);
753
754 int isci_host_init(struct isci_host *);
755
756 void isci_host_init_controller_names(
757 struct isci_host *isci_host,
758 unsigned int controller_idx);
759
760 void isci_host_deinit(
761 struct isci_host *);
762
763 void isci_host_port_link_up(
764 struct isci_host *,
765 struct scic_sds_port *,
766 struct scic_sds_phy *);
767 int isci_host_dev_found(struct domain_device *);
768
769 void isci_host_remote_device_start_complete(
770 struct isci_host *,
771 struct isci_remote_device *,
772 enum sci_status);
773
774 void scic_controller_disable_interrupts(
775 struct scic_sds_controller *scic);
776
777 enum sci_status scic_controller_start_io(
778 struct scic_sds_controller *scic,
779 struct scic_sds_remote_device *remote_device,
780 struct scic_sds_request *io_request,
781 u16 io_tag);
782
783 enum sci_task_status scic_controller_start_task(
784 struct scic_sds_controller *scic,
785 struct scic_sds_remote_device *remote_device,
786 struct scic_sds_request *task_request,
787 u16 io_tag);
788
789 enum sci_status scic_controller_terminate_request(
790 struct scic_sds_controller *scic,
791 struct scic_sds_remote_device *remote_device,
792 struct scic_sds_request *request);
793
794 enum sci_status scic_controller_complete_io(
795 struct scic_sds_controller *scic,
796 struct scic_sds_remote_device *remote_device,
797 struct scic_sds_request *io_request);
798
799 u16 scic_controller_allocate_io_tag(
800 struct scic_sds_controller *scic);
801
802 enum sci_status scic_controller_free_io_tag(
803 struct scic_sds_controller *scic,
804 u16 io_tag);
805
806 void scic_sds_port_configuration_agent_construct(
807 struct scic_sds_port_configuration_agent *port_agent);
808
809 enum sci_status scic_sds_port_configuration_agent_initialize(
810 struct scic_sds_controller *controller,
811 struct scic_sds_port_configuration_agent *port_agent);
812 #endif
This page took 0.076221 seconds and 5 git commands to generate.