isci: remove 'min memory' infrastructure
[deliverable/linux.git] / drivers / scsi / isci / host.h
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55 #ifndef _SCI_HOST_H_
56 #define _SCI_HOST_H_
57
58 #include "remote_device.h"
59 #include "phy.h"
60 #include "pool.h"
61 #include "isci.h"
62 #include "remote_node_table.h"
63 #include "registers.h"
64 #include "scu_unsolicited_frame.h"
65 #include "unsolicited_frame_control.h"
66 #include "probe_roms.h"
67
68 struct scic_sds_request;
69 struct scu_task_context;
70
71
72 /**
73 * struct scic_power_control -
74 *
75 * This structure defines the fields for managing power control for direct
76 * attached disk devices.
77 */
78 struct scic_power_control {
79 /**
80 * This field is set when the power control timer is running and cleared when
81 * it is not.
82 */
83 bool timer_started;
84
85 /**
86 * Timer to control when the directed attached disks can consume power.
87 */
88 struct sci_timer timer;
89
90 /**
91 * This field is used to keep track of how many phys are put into the
92 * requesters field.
93 */
94 u8 phys_waiting;
95
96 /**
97 * This field is used to keep track of how many phys have been granted to consume power
98 */
99 u8 phys_granted_power;
100
101 /**
102 * This field is an array of phys that we are waiting on. The phys are direct
103 * mapped into requesters via struct scic_sds_phy.phy_index
104 */
105 struct scic_sds_phy *requesters[SCI_MAX_PHYS];
106
107 };
108
109 struct scic_sds_port_configuration_agent;
110 typedef void (*port_config_fn)(struct scic_sds_controller *,
111 struct scic_sds_port_configuration_agent *,
112 struct scic_sds_port *, struct scic_sds_phy *);
113
114 struct scic_sds_port_configuration_agent {
115 u16 phy_configured_mask;
116 u16 phy_ready_mask;
117 struct {
118 u8 min_index;
119 u8 max_index;
120 } phy_valid_port_range[SCI_MAX_PHYS];
121 bool timer_pending;
122 port_config_fn link_up_handler;
123 port_config_fn link_down_handler;
124 struct sci_timer timer;
125 };
126
127 /**
128 * struct scic_sds_controller -
129 *
130 * This structure represents the SCU controller object.
131 */
132 struct scic_sds_controller {
133 /**
134 * This field contains the information for the base controller state
135 * machine.
136 */
137 struct sci_base_state_machine sm;
138
139 /**
140 * Timer for controller start/stop operations.
141 */
142 struct sci_timer timer;
143
144 /**
145 * This field contains the user parameters to be utilized for this
146 * core controller object.
147 */
148 union scic_user_parameters user_parameters;
149
150 /**
151 * This field contains the OEM parameters to be utilized for this
152 * core controller object.
153 */
154 union scic_oem_parameters oem_parameters;
155
156 /**
157 * This field contains the port configuration agent for this controller.
158 */
159 struct scic_sds_port_configuration_agent port_agent;
160
161 /**
162 * This field is the array of device objects that are currently constructed
163 * for this controller object. This table is used as a fast lookup of device
164 * objects that need to handle device completion notifications from the
165 * hardware. The table is RNi based.
166 */
167 struct scic_sds_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
168
169 /**
170 * This field is the array of IO request objects that are currently active for
171 * this controller object. This table is used as a fast lookup of the io
172 * request object that need to handle completion queue notifications. The
173 * table is TCi based.
174 */
175 struct scic_sds_request *io_request_table[SCI_MAX_IO_REQUESTS];
176
177 /**
178 * This field is the free RNi data structure
179 */
180 struct scic_remote_node_table available_remote_nodes;
181
182 /**
183 * This field is the TCi pool used to manage the task context index.
184 */
185 SCI_POOL_CREATE(tci_pool, u16, SCI_MAX_IO_REQUESTS);
186
187 /**
188 * This filed is the struct scic_power_control data used to controll when direct
189 * attached devices can consume power.
190 */
191 struct scic_power_control power_control;
192
193 /**
194 * This field is the array of sequence values for the IO Tag fields. Even
195 * though only 4 bits of the field is used for the sequence the sequence is 16
196 * bits in size so the sequence can be bitwise or'd with the TCi to build the
197 * IO Tag value.
198 */
199 u16 io_request_sequence[SCI_MAX_IO_REQUESTS];
200
201 /**
202 * This field in the array of sequence values for the RNi. These are used
203 * to control io request build to io request start operations. The sequence
204 * value is recorded into an io request when it is built and is checked on
205 * the io request start operation to make sure that there was not a device
206 * hot plug between the build and start operation.
207 */
208 u8 remote_device_sequence[SCI_MAX_REMOTE_DEVICES];
209
210 /**
211 * This field is a pointer to the memory allocated by the driver for the task
212 * context table. This data is shared between the hardware and software.
213 */
214 struct scu_task_context *task_context_table;
215
216 /**
217 * This field is a pointer to the memory allocated by the driver for the
218 * remote node context table. This table is shared between the hardware and
219 * software.
220 */
221 union scu_remote_node_context *remote_node_context_table;
222
223 /**
224 * This field is a pointer to the completion queue. This memory is
225 * written to by the hardware and read by the software.
226 */
227 u32 *completion_queue;
228
229 /**
230 * This field is the software copy of the completion queue get pointer. The
231 * controller object writes this value to the hardware after processing the
232 * completion entries.
233 */
234 u32 completion_queue_get;
235
236 /**
237 * This field is the minimum of the number of hardware supported port entries
238 * and the software requested port entries.
239 */
240 u32 logical_port_entries;
241
242 /**
243 * This field is the minimum number of devices supported by the hardware and
244 * the number of devices requested by the software.
245 */
246 u32 remote_node_entries;
247
248 /**
249 * This field is the minimum number of IO requests supported by the hardware
250 * and the number of IO requests requested by the software.
251 */
252 u32 task_context_entries;
253
254 /**
255 * This object contains all of the unsolicited frame specific
256 * data utilized by the core controller.
257 */
258 struct scic_sds_unsolicited_frame_control uf_control;
259
260 /* Phy Startup Data */
261 /**
262 * Timer for controller phy request startup. On controller start the
263 * controller will start each PHY individually in order of phy index.
264 */
265 struct sci_timer phy_timer;
266
267 /**
268 * This field is set when the phy_timer is running and is cleared when
269 * the phy_timer is stopped.
270 */
271 bool phy_startup_timer_pending;
272
273 /**
274 * This field is the index of the next phy start. It is initialized to 0 and
275 * increments for each phy index that is started.
276 */
277 u32 next_phy_to_start;
278
279 /**
280 * This field controlls the invalid link up notifications to the SCI_USER. If
281 * an invalid_link_up notification is reported a bit for the PHY index is set
282 * so further notifications are not made. Once the PHY object reports link up
283 * and is made part of a port then this bit for the PHY index is cleared.
284 */
285 u8 invalid_phy_mask;
286
287 /*
288 * This field saves the current interrupt coalescing number of the controller.
289 */
290 u16 interrupt_coalesce_number;
291
292 /*
293 * This field saves the current interrupt coalescing timeout value in microseconds.
294 */
295 u32 interrupt_coalesce_timeout;
296
297 /**
298 * This field is a pointer to the memory mapped register space for the
299 * struct smu_registers.
300 */
301 struct smu_registers __iomem *smu_registers;
302
303 /**
304 * This field is a pointer to the memory mapped register space for the
305 * struct scu_registers.
306 */
307 struct scu_registers __iomem *scu_registers;
308
309 };
310
311 struct isci_host {
312 struct scic_sds_controller sci;
313 union scic_oem_parameters oem_parameters;
314
315 int id; /* unique within a given pci device */
316 struct dma_pool *dma_pool;
317 struct isci_phy phys[SCI_MAX_PHYS];
318 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
319 struct sas_ha_struct sas_ha;
320
321 int can_queue;
322 spinlock_t queue_lock;
323 spinlock_t state_lock;
324
325 struct pci_dev *pdev;
326
327 enum isci_status status;
328 #define IHOST_START_PENDING 0
329 #define IHOST_STOP_PENDING 1
330 unsigned long flags;
331 wait_queue_head_t eventq;
332 struct Scsi_Host *shost;
333 struct tasklet_struct completion_tasklet;
334 struct list_head requests_to_complete;
335 struct list_head requests_to_errorback;
336 spinlock_t scic_lock;
337
338 struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
339 };
340
341 /**
342 * enum scic_sds_controller_states - This enumeration depicts all the states
343 * for the common controller state machine.
344 */
345 enum scic_sds_controller_states {
346 /**
347 * Simply the initial state for the base controller state machine.
348 */
349 SCIC_INITIAL = 0,
350
351 /**
352 * This state indicates that the controller is reset. The memory for
353 * the controller is in it's initial state, but the controller requires
354 * initialization.
355 * This state is entered from the INITIAL state.
356 * This state is entered from the RESETTING state.
357 */
358 SCIC_RESET,
359
360 /**
361 * This state is typically an action state that indicates the controller
362 * is in the process of initialization. In this state no new IO operations
363 * are permitted.
364 * This state is entered from the RESET state.
365 */
366 SCIC_INITIALIZING,
367
368 /**
369 * This state indicates that the controller has been successfully
370 * initialized. In this state no new IO operations are permitted.
371 * This state is entered from the INITIALIZING state.
372 */
373 SCIC_INITIALIZED,
374
375 /**
376 * This state indicates the the controller is in the process of becoming
377 * ready (i.e. starting). In this state no new IO operations are permitted.
378 * This state is entered from the INITIALIZED state.
379 */
380 SCIC_STARTING,
381
382 /**
383 * This state indicates the controller is now ready. Thus, the user
384 * is able to perform IO operations on the controller.
385 * This state is entered from the STARTING state.
386 */
387 SCIC_READY,
388
389 /**
390 * This state is typically an action state that indicates the controller
391 * is in the process of resetting. Thus, the user is unable to perform
392 * IO operations on the controller. A reset is considered destructive in
393 * most cases.
394 * This state is entered from the READY state.
395 * This state is entered from the FAILED state.
396 * This state is entered from the STOPPED state.
397 */
398 SCIC_RESETTING,
399
400 /**
401 * This state indicates that the controller is in the process of stopping.
402 * In this state no new IO operations are permitted, but existing IO
403 * operations are allowed to complete.
404 * This state is entered from the READY state.
405 */
406 SCIC_STOPPING,
407
408 /**
409 * This state indicates that the controller has successfully been stopped.
410 * In this state no new IO operations are permitted.
411 * This state is entered from the STOPPING state.
412 */
413 SCIC_STOPPED,
414
415 /**
416 * This state indicates that the controller could not successfully be
417 * initialized. In this state no new IO operations are permitted.
418 * This state is entered from the INITIALIZING state.
419 * This state is entered from the STARTING state.
420 * This state is entered from the STOPPING state.
421 * This state is entered from the RESETTING state.
422 */
423 SCIC_FAILED,
424 };
425
426
427
428 /**
429 * struct isci_pci_info - This class represents the pci function containing the
430 * controllers. Depending on PCI SKU, there could be up to 2 controllers in
431 * the PCI function.
432 */
433 #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
434
435 struct isci_pci_info {
436 struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
437 struct isci_host *hosts[SCI_MAX_CONTROLLERS];
438 struct isci_orom *orom;
439 };
440
441 static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
442 {
443 return pci_get_drvdata(pdev);
444 }
445
446 #define for_each_isci_host(id, ihost, pdev) \
447 for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
448 id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
449 ihost = to_pci_info(pdev)->hosts[++id])
450
451 static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
452 {
453 return isci_host->status;
454 }
455
456 static inline void isci_host_change_state(struct isci_host *isci_host,
457 enum isci_status status)
458 {
459 unsigned long flags;
460
461 dev_dbg(&isci_host->pdev->dev,
462 "%s: isci_host = %p, state = 0x%x",
463 __func__,
464 isci_host,
465 status);
466 spin_lock_irqsave(&isci_host->state_lock, flags);
467 isci_host->status = status;
468 spin_unlock_irqrestore(&isci_host->state_lock, flags);
469
470 }
471
472 static inline int isci_host_can_queue(struct isci_host *isci_host, int num)
473 {
474 int ret = 0;
475 unsigned long flags;
476
477 spin_lock_irqsave(&isci_host->queue_lock, flags);
478 if ((isci_host->can_queue - num) < 0) {
479 dev_dbg(&isci_host->pdev->dev,
480 "%s: isci_host->can_queue = %d\n",
481 __func__,
482 isci_host->can_queue);
483 ret = -SAS_QUEUE_FULL;
484
485 } else
486 isci_host->can_queue -= num;
487
488 spin_unlock_irqrestore(&isci_host->queue_lock, flags);
489
490 return ret;
491 }
492
493 static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num)
494 {
495 unsigned long flags;
496
497 spin_lock_irqsave(&isci_host->queue_lock, flags);
498 isci_host->can_queue += num;
499 spin_unlock_irqrestore(&isci_host->queue_lock, flags);
500 }
501
502 static inline void wait_for_start(struct isci_host *ihost)
503 {
504 wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
505 }
506
507 static inline void wait_for_stop(struct isci_host *ihost)
508 {
509 wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
510 }
511
512 static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
513 {
514 wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
515 }
516
517 static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
518 {
519 wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
520 }
521
522 static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
523 {
524 return dev->port->ha->lldd_ha;
525 }
526
527 static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic)
528 {
529 /* XXX delete after merging scic_sds_contoller and isci_host */
530 struct isci_host *ihost = container_of(scic, typeof(*ihost), sci);
531
532 return ihost;
533 }
534
535 /**
536 * INCREMENT_QUEUE_GET() -
537 *
538 * This macro will increment the specified index to and if the index wraps to 0
539 * it will toggel the cycle bit.
540 */
541 #define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \
542 { \
543 if ((index) + 1 == entry_count) { \
544 (index) = 0; \
545 (cycle) = (cycle) ^ (bit_toggle); \
546 } else { \
547 index = index + 1; \
548 } \
549 }
550
551 /**
552 * scic_sds_controller_get_protocol_engine_group() -
553 *
554 * This macro returns the protocol engine group for this controller object.
555 * Presently we only support protocol engine group 0 so just return that
556 */
557 #define scic_sds_controller_get_protocol_engine_group(controller) 0
558
559 /**
560 * scic_sds_io_tag_construct() -
561 *
562 * This macro constructs an IO tag from the sequence and index values.
563 */
564 #define scic_sds_io_tag_construct(sequence, task_index) \
565 ((sequence) << 12 | (task_index))
566
567 /**
568 * scic_sds_io_tag_get_sequence() -
569 *
570 * This macro returns the IO sequence from the IO tag value.
571 */
572 #define scic_sds_io_tag_get_sequence(io_tag) \
573 (((io_tag) & 0xF000) >> 12)
574
575 /**
576 * scic_sds_io_tag_get_index() -
577 *
578 * This macro returns the TCi from the io tag value
579 */
580 #define scic_sds_io_tag_get_index(io_tag) \
581 ((io_tag) & 0x0FFF)
582
583 /**
584 * scic_sds_io_sequence_increment() -
585 *
586 * This is a helper macro to increment the io sequence count. We may find in
587 * the future that it will be faster to store the sequence count in such a way
588 * as we dont perform the shift operation to build io tag values so therefore
589 * need a way to incrment them correctly
590 */
591 #define scic_sds_io_sequence_increment(value) \
592 ((value) = (((value) + 1) & 0x000F))
593
594 /* expander attached sata devices require 3 rnc slots */
595 static inline int scic_sds_remote_device_node_count(struct scic_sds_remote_device *sci_dev)
596 {
597 struct domain_device *dev = sci_dev_to_domain(sci_dev);
598
599 if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
600 !sci_dev->is_direct_attached)
601 return SCU_STP_REMOTE_NODE_COUNT;
602 return SCU_SSP_REMOTE_NODE_COUNT;
603 }
604
605 /**
606 * scic_sds_controller_set_invalid_phy() -
607 *
608 * This macro will set the bit in the invalid phy mask for this controller
609 * object. This is used to control messages reported for invalid link up
610 * notifications.
611 */
612 #define scic_sds_controller_set_invalid_phy(controller, phy) \
613 ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
614
615 /**
616 * scic_sds_controller_clear_invalid_phy() -
617 *
618 * This macro will clear the bit in the invalid phy mask for this controller
619 * object. This is used to control messages reported for invalid link up
620 * notifications.
621 */
622 #define scic_sds_controller_clear_invalid_phy(controller, phy) \
623 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
624
625 static inline struct device *scic_to_dev(struct scic_sds_controller *scic)
626 {
627 return &scic_to_ihost(scic)->pdev->dev;
628 }
629
630 static inline struct device *sciphy_to_dev(struct scic_sds_phy *sci_phy)
631 {
632 struct isci_phy *iphy = sci_phy_to_iphy(sci_phy);
633
634 if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
635 return NULL;
636
637 return &iphy->isci_port->isci_host->pdev->dev;
638 }
639
640 static inline struct device *sciport_to_dev(struct scic_sds_port *sci_port)
641 {
642 struct isci_port *iport = sci_port_to_iport(sci_port);
643
644 if (!iport || !iport->isci_host)
645 return NULL;
646
647 return &iport->isci_host->pdev->dev;
648 }
649
650 static inline struct device *scirdev_to_dev(struct scic_sds_remote_device *sci_dev)
651 {
652 struct isci_remote_device *idev =
653 container_of(sci_dev, typeof(*idev), sci);
654
655 if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
656 return NULL;
657
658 return &idev->isci_port->isci_host->pdev->dev;
659 }
660
661 enum {
662 ISCI_SI_REVA0,
663 ISCI_SI_REVA2,
664 ISCI_SI_REVB0,
665 ISCI_SI_REVC0
666 };
667
668 extern int isci_si_rev;
669
670 static inline bool is_a0(void)
671 {
672 return isci_si_rev == ISCI_SI_REVA0;
673 }
674
675 static inline bool is_a2(void)
676 {
677 return isci_si_rev == ISCI_SI_REVA2;
678 }
679
680 static inline bool is_b0(void)
681 {
682 return isci_si_rev == ISCI_SI_REVB0;
683 }
684
685 static inline bool is_c0(void)
686 {
687 return isci_si_rev > ISCI_SI_REVB0;
688 }
689
690 void scic_sds_controller_post_request(struct scic_sds_controller *scic,
691 u32 request);
692 void scic_sds_controller_release_frame(struct scic_sds_controller *scic,
693 u32 frame_index);
694 void scic_sds_controller_copy_sata_response(void *response_buffer,
695 void *frame_header,
696 void *frame_buffer);
697 enum sci_status scic_sds_controller_allocate_remote_node_context(struct scic_sds_controller *scic,
698 struct scic_sds_remote_device *sci_dev,
699 u16 *node_id);
700 void scic_sds_controller_free_remote_node_context(
701 struct scic_sds_controller *scic,
702 struct scic_sds_remote_device *sci_dev,
703 u16 node_id);
704 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
705 struct scic_sds_controller *scic,
706 u16 node_id);
707
708 struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
709 u16 io_tag);
710
711 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
712 struct scic_sds_controller *scic,
713 u16 io_tag);
714
715 void scic_sds_controller_power_control_queue_insert(
716 struct scic_sds_controller *scic,
717 struct scic_sds_phy *sci_phy);
718
719 void scic_sds_controller_power_control_queue_remove(
720 struct scic_sds_controller *scic,
721 struct scic_sds_phy *sci_phy);
722
723 void scic_sds_controller_link_up(
724 struct scic_sds_controller *scic,
725 struct scic_sds_port *sci_port,
726 struct scic_sds_phy *sci_phy);
727
728 void scic_sds_controller_link_down(
729 struct scic_sds_controller *scic,
730 struct scic_sds_port *sci_port,
731 struct scic_sds_phy *sci_phy);
732
733 void scic_sds_controller_remote_device_stopped(
734 struct scic_sds_controller *scic,
735 struct scic_sds_remote_device *sci_dev);
736
737 void scic_sds_controller_copy_task_context(
738 struct scic_sds_controller *scic,
739 struct scic_sds_request *this_request);
740
741 void scic_sds_controller_register_setup(struct scic_sds_controller *scic);
742
743 enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req);
744 int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
745 void isci_host_scan_start(struct Scsi_Host *);
746
747 int isci_host_init(struct isci_host *);
748
749 void isci_host_init_controller_names(
750 struct isci_host *isci_host,
751 unsigned int controller_idx);
752
753 void isci_host_deinit(
754 struct isci_host *);
755
756 void isci_host_port_link_up(
757 struct isci_host *,
758 struct scic_sds_port *,
759 struct scic_sds_phy *);
760 int isci_host_dev_found(struct domain_device *);
761
762 void isci_host_remote_device_start_complete(
763 struct isci_host *,
764 struct isci_remote_device *,
765 enum sci_status);
766
767 void scic_controller_disable_interrupts(
768 struct scic_sds_controller *scic);
769
770 enum sci_status scic_controller_start_io(
771 struct scic_sds_controller *scic,
772 struct scic_sds_remote_device *remote_device,
773 struct scic_sds_request *io_request,
774 u16 io_tag);
775
776 enum sci_task_status scic_controller_start_task(
777 struct scic_sds_controller *scic,
778 struct scic_sds_remote_device *remote_device,
779 struct scic_sds_request *task_request,
780 u16 io_tag);
781
782 enum sci_status scic_controller_terminate_request(
783 struct scic_sds_controller *scic,
784 struct scic_sds_remote_device *remote_device,
785 struct scic_sds_request *request);
786
787 enum sci_status scic_controller_complete_io(
788 struct scic_sds_controller *scic,
789 struct scic_sds_remote_device *remote_device,
790 struct scic_sds_request *io_request);
791
792 u16 scic_controller_allocate_io_tag(
793 struct scic_sds_controller *scic);
794
795 enum sci_status scic_controller_free_io_tag(
796 struct scic_sds_controller *scic,
797 u16 io_tag);
798
799 void scic_sds_port_configuration_agent_construct(
800 struct scic_sds_port_configuration_agent *port_agent);
801
802 enum sci_status scic_sds_port_configuration_agent_initialize(
803 struct scic_sds_controller *controller,
804 struct scic_sds_port_configuration_agent *port_agent);
805 #endif
This page took 0.063225 seconds and 5 git commands to generate.