net: s2io: simplify logical constraint
[deliverable/linux.git] / drivers / scsi / device_handler / scsi_dh_rdac.c
1 /*
2 * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
3 *
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_eh.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
28
29 #define RDAC_NAME "rdac"
30 #define RDAC_RETRY_COUNT 5
31
32 /*
33 * LSI mode page stuff
34 *
35 * These struct definitions and the forming of the
36 * mode page were taken from the LSI RDAC 2.4 GPL'd
37 * driver, and then converted to Linux conventions.
38 */
39 #define RDAC_QUIESCENCE_TIME 20
40 /*
41 * Page Codes
42 */
43 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
44
45 /*
46 * Controller modes definitions
47 */
48 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
49
50 /*
51 * RDAC Options field
52 */
53 #define RDAC_FORCED_QUIESENCE 0x02
54
55 #define RDAC_TIMEOUT (60 * HZ)
56 #define RDAC_RETRIES 3
57
58 struct rdac_mode_6_hdr {
59 u8 data_len;
60 u8 medium_type;
61 u8 device_params;
62 u8 block_desc_len;
63 };
64
65 struct rdac_mode_10_hdr {
66 u16 data_len;
67 u8 medium_type;
68 u8 device_params;
69 u16 reserved;
70 u16 block_desc_len;
71 };
72
73 struct rdac_mode_common {
74 u8 controller_serial[16];
75 u8 alt_controller_serial[16];
76 u8 rdac_mode[2];
77 u8 alt_rdac_mode[2];
78 u8 quiescence_timeout;
79 u8 rdac_options;
80 };
81
82 struct rdac_pg_legacy {
83 struct rdac_mode_6_hdr hdr;
84 u8 page_code;
85 u8 page_len;
86 struct rdac_mode_common common;
87 #define MODE6_MAX_LUN 32
88 u8 lun_table[MODE6_MAX_LUN];
89 u8 reserved2[32];
90 u8 reserved3;
91 u8 reserved4;
92 };
93
94 struct rdac_pg_expanded {
95 struct rdac_mode_10_hdr hdr;
96 u8 page_code;
97 u8 subpage_code;
98 u8 page_len[2];
99 struct rdac_mode_common common;
100 u8 lun_table[256];
101 u8 reserved3;
102 u8 reserved4;
103 };
104
105 struct c9_inquiry {
106 u8 peripheral_info;
107 u8 page_code; /* 0xC9 */
108 u8 reserved1;
109 u8 page_len;
110 u8 page_id[4]; /* "vace" */
111 u8 avte_cvp;
112 u8 path_prio;
113 u8 reserved2[38];
114 };
115
116 #define SUBSYS_ID_LEN 16
117 #define SLOT_ID_LEN 2
118 #define ARRAY_LABEL_LEN 31
119
120 struct c4_inquiry {
121 u8 peripheral_info;
122 u8 page_code; /* 0xC4 */
123 u8 reserved1;
124 u8 page_len;
125 u8 page_id[4]; /* "subs" */
126 u8 subsys_id[SUBSYS_ID_LEN];
127 u8 revision[4];
128 u8 slot_id[SLOT_ID_LEN];
129 u8 reserved[2];
130 };
131
132 #define UNIQUE_ID_LEN 16
133 struct c8_inquiry {
134 u8 peripheral_info;
135 u8 page_code; /* 0xC8 */
136 u8 reserved1;
137 u8 page_len;
138 u8 page_id[4]; /* "edid" */
139 u8 reserved2[3];
140 u8 vol_uniq_id_len;
141 u8 vol_uniq_id[16];
142 u8 vol_user_label_len;
143 u8 vol_user_label[60];
144 u8 array_uniq_id_len;
145 u8 array_unique_id[UNIQUE_ID_LEN];
146 u8 array_user_label_len;
147 u8 array_user_label[60];
148 u8 lun[8];
149 };
150
151 struct rdac_controller {
152 u8 array_id[UNIQUE_ID_LEN];
153 int use_ms10;
154 struct kref kref;
155 struct list_head node; /* list of all controllers */
156 union {
157 struct rdac_pg_legacy legacy;
158 struct rdac_pg_expanded expanded;
159 } mode_select;
160 u8 index;
161 u8 array_name[ARRAY_LABEL_LEN];
162 struct Scsi_Host *host;
163 spinlock_t ms_lock;
164 int ms_queued;
165 struct work_struct ms_work;
166 struct scsi_device *ms_sdev;
167 struct list_head ms_head;
168 struct list_head dh_list;
169 };
170
171 struct c2_inquiry {
172 u8 peripheral_info;
173 u8 page_code; /* 0xC2 */
174 u8 reserved1;
175 u8 page_len;
176 u8 page_id[4]; /* "swr4" */
177 u8 sw_version[3];
178 u8 sw_date[3];
179 u8 features_enabled;
180 u8 max_lun_supported;
181 u8 partitions[239]; /* Total allocation length should be 0xFF */
182 };
183
184 struct rdac_dh_data {
185 struct list_head node;
186 struct rdac_controller *ctlr;
187 struct scsi_device *sdev;
188 #define UNINITIALIZED_LUN (1 << 8)
189 unsigned lun;
190
191 #define RDAC_MODE 0
192 #define RDAC_MODE_AVT 1
193 #define RDAC_MODE_IOSHIP 2
194 unsigned char mode;
195
196 #define RDAC_STATE_ACTIVE 0
197 #define RDAC_STATE_PASSIVE 1
198 unsigned char state;
199
200 #define RDAC_LUN_UNOWNED 0
201 #define RDAC_LUN_OWNED 1
202 char lun_state;
203
204 #define RDAC_PREFERRED 0
205 #define RDAC_NON_PREFERRED 1
206 char preferred;
207
208 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
209 union {
210 struct c2_inquiry c2;
211 struct c4_inquiry c4;
212 struct c8_inquiry c8;
213 struct c9_inquiry c9;
214 } inq;
215 };
216
217 static const char *mode[] = {
218 "RDAC",
219 "AVT",
220 "IOSHIP",
221 };
222 static const char *lun_state[] =
223 {
224 "unowned",
225 "owned",
226 };
227
228 struct rdac_queue_data {
229 struct list_head entry;
230 struct rdac_dh_data *h;
231 activate_complete callback_fn;
232 void *callback_data;
233 };
234
235 static LIST_HEAD(ctlr_list);
236 static DEFINE_SPINLOCK(list_lock);
237 static struct workqueue_struct *kmpath_rdacd;
238 static void send_mode_select(struct work_struct *work);
239
240 /*
241 * module parameter to enable rdac debug logging.
242 * 2 bits for each type of logging, only two types defined for now
243 * Can be enhanced if required at later point
244 */
245 static int rdac_logging = 1;
246 module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
247 MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
248 "Default is 1 - failover logging enabled, "
249 "set it to 0xF to enable all the logs");
250
251 #define RDAC_LOG_FAILOVER 0
252 #define RDAC_LOG_SENSE 2
253
254 #define RDAC_LOG_BITS 2
255
256 #define RDAC_LOG_LEVEL(SHIFT) \
257 ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
258
259 #define RDAC_LOG(SHIFT, sdev, f, arg...) \
260 do { \
261 if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
262 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
263 } while (0);
264
265 static struct request *get_rdac_req(struct scsi_device *sdev,
266 void *buffer, unsigned buflen, int rw)
267 {
268 struct request *rq;
269 struct request_queue *q = sdev->request_queue;
270
271 rq = blk_get_request(q, rw, GFP_NOIO);
272
273 if (IS_ERR(rq)) {
274 sdev_printk(KERN_INFO, sdev,
275 "get_rdac_req: blk_get_request failed.\n");
276 return NULL;
277 }
278 blk_rq_set_block_pc(rq);
279
280 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
281 blk_put_request(rq);
282 sdev_printk(KERN_INFO, sdev,
283 "get_rdac_req: blk_rq_map_kern failed.\n");
284 return NULL;
285 }
286
287 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
288 REQ_FAILFAST_DRIVER;
289 rq->retries = RDAC_RETRIES;
290 rq->timeout = RDAC_TIMEOUT;
291
292 return rq;
293 }
294
295 static struct request *rdac_failover_get(struct scsi_device *sdev,
296 struct rdac_dh_data *h, struct list_head *list)
297 {
298 struct request *rq;
299 struct rdac_mode_common *common;
300 unsigned data_size;
301 struct rdac_queue_data *qdata;
302 u8 *lun_table;
303
304 if (h->ctlr->use_ms10) {
305 struct rdac_pg_expanded *rdac_pg;
306
307 data_size = sizeof(struct rdac_pg_expanded);
308 rdac_pg = &h->ctlr->mode_select.expanded;
309 memset(rdac_pg, 0, data_size);
310 common = &rdac_pg->common;
311 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
312 rdac_pg->subpage_code = 0x1;
313 rdac_pg->page_len[0] = 0x01;
314 rdac_pg->page_len[1] = 0x28;
315 lun_table = rdac_pg->lun_table;
316 } else {
317 struct rdac_pg_legacy *rdac_pg;
318
319 data_size = sizeof(struct rdac_pg_legacy);
320 rdac_pg = &h->ctlr->mode_select.legacy;
321 memset(rdac_pg, 0, data_size);
322 common = &rdac_pg->common;
323 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
324 rdac_pg->page_len = 0x68;
325 lun_table = rdac_pg->lun_table;
326 }
327 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
328 common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
329 common->rdac_options = RDAC_FORCED_QUIESENCE;
330
331 list_for_each_entry(qdata, list, entry) {
332 lun_table[qdata->h->lun] = 0x81;
333 }
334
335 /* get request for block layer packet command */
336 rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
337 if (!rq)
338 return NULL;
339
340 /* Prepare the command. */
341 if (h->ctlr->use_ms10) {
342 rq->cmd[0] = MODE_SELECT_10;
343 rq->cmd[7] = data_size >> 8;
344 rq->cmd[8] = data_size & 0xff;
345 } else {
346 rq->cmd[0] = MODE_SELECT;
347 rq->cmd[4] = data_size;
348 }
349 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
350
351 rq->sense = h->sense;
352 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
353 rq->sense_len = 0;
354
355 return rq;
356 }
357
358 static void release_controller(struct kref *kref)
359 {
360 struct rdac_controller *ctlr;
361 ctlr = container_of(kref, struct rdac_controller, kref);
362
363 list_del(&ctlr->node);
364 kfree(ctlr);
365 }
366
367 static struct rdac_controller *get_controller(int index, char *array_name,
368 u8 *array_id, struct scsi_device *sdev)
369 {
370 struct rdac_controller *ctlr, *tmp;
371
372 list_for_each_entry(tmp, &ctlr_list, node) {
373 if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
374 (tmp->index == index) &&
375 (tmp->host == sdev->host)) {
376 kref_get(&tmp->kref);
377 return tmp;
378 }
379 }
380 ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
381 if (!ctlr)
382 return NULL;
383
384 /* initialize fields of controller */
385 memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
386 ctlr->index = index;
387 ctlr->host = sdev->host;
388 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
389
390 kref_init(&ctlr->kref);
391 ctlr->use_ms10 = -1;
392 ctlr->ms_queued = 0;
393 ctlr->ms_sdev = NULL;
394 spin_lock_init(&ctlr->ms_lock);
395 INIT_WORK(&ctlr->ms_work, send_mode_select);
396 INIT_LIST_HEAD(&ctlr->ms_head);
397 list_add(&ctlr->node, &ctlr_list);
398 INIT_LIST_HEAD(&ctlr->dh_list);
399
400 return ctlr;
401 }
402
403 static int submit_inquiry(struct scsi_device *sdev, int page_code,
404 unsigned int len, struct rdac_dh_data *h)
405 {
406 struct request *rq;
407 struct request_queue *q = sdev->request_queue;
408 int err = SCSI_DH_RES_TEMP_UNAVAIL;
409
410 rq = get_rdac_req(sdev, &h->inq, len, READ);
411 if (!rq)
412 goto done;
413
414 /* Prepare the command. */
415 rq->cmd[0] = INQUIRY;
416 rq->cmd[1] = 1;
417 rq->cmd[2] = page_code;
418 rq->cmd[4] = len;
419 rq->cmd_len = COMMAND_SIZE(INQUIRY);
420
421 rq->sense = h->sense;
422 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
423 rq->sense_len = 0;
424
425 err = blk_execute_rq(q, NULL, rq, 1);
426 if (err == -EIO)
427 err = SCSI_DH_IO;
428
429 blk_put_request(rq);
430 done:
431 return err;
432 }
433
434 static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
435 char *array_name, u8 *array_id)
436 {
437 int err, i;
438 struct c8_inquiry *inqp;
439
440 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
441 if (err == SCSI_DH_OK) {
442 inqp = &h->inq.c8;
443 if (inqp->page_code != 0xc8)
444 return SCSI_DH_NOSYS;
445 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
446 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
447 return SCSI_DH_NOSYS;
448 h->lun = inqp->lun[7]; /* Uses only the last byte */
449
450 for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
451 *(array_name+i) = inqp->array_user_label[(2*i)+1];
452
453 *(array_name+ARRAY_LABEL_LEN-1) = '\0';
454 memset(array_id, 0, UNIQUE_ID_LEN);
455 memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
456 }
457 return err;
458 }
459
460 static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
461 {
462 int err, access_state;
463 struct rdac_dh_data *tmp;
464 struct c9_inquiry *inqp;
465
466 h->state = RDAC_STATE_ACTIVE;
467 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
468 if (err == SCSI_DH_OK) {
469 inqp = &h->inq.c9;
470 /* detect the operating mode */
471 if ((inqp->avte_cvp >> 5) & 0x1)
472 h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
473 else if (inqp->avte_cvp >> 7)
474 h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
475 else
476 h->mode = RDAC_MODE; /* LUN in RDAC mode */
477
478 /* Update ownership */
479 if (inqp->avte_cvp & 0x1) {
480 h->lun_state = RDAC_LUN_OWNED;
481 access_state = SCSI_ACCESS_STATE_OPTIMAL;
482 } else {
483 h->lun_state = RDAC_LUN_UNOWNED;
484 if (h->mode == RDAC_MODE) {
485 h->state = RDAC_STATE_PASSIVE;
486 access_state = SCSI_ACCESS_STATE_STANDBY;
487 } else
488 access_state = SCSI_ACCESS_STATE_ACTIVE;
489 }
490
491 /* Update path prio*/
492 if (inqp->path_prio & 0x1) {
493 h->preferred = RDAC_PREFERRED;
494 access_state |= SCSI_ACCESS_STATE_PREFERRED;
495 } else
496 h->preferred = RDAC_NON_PREFERRED;
497 rcu_read_lock();
498 list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) {
499 /* h->sdev should always be valid */
500 BUG_ON(!tmp->sdev);
501 tmp->sdev->access_state = access_state;
502 }
503 rcu_read_unlock();
504 }
505
506 return err;
507 }
508
509 static int initialize_controller(struct scsi_device *sdev,
510 struct rdac_dh_data *h, char *array_name, u8 *array_id)
511 {
512 int err, index;
513 struct c4_inquiry *inqp;
514
515 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
516 if (err == SCSI_DH_OK) {
517 inqp = &h->inq.c4;
518 /* get the controller index */
519 if (inqp->slot_id[1] == 0x31)
520 index = 0;
521 else
522 index = 1;
523
524 spin_lock(&list_lock);
525 h->ctlr = get_controller(index, array_name, array_id, sdev);
526 if (!h->ctlr)
527 err = SCSI_DH_RES_TEMP_UNAVAIL;
528 else {
529 list_add_rcu(&h->node, &h->ctlr->dh_list);
530 h->sdev = sdev;
531 }
532 spin_unlock(&list_lock);
533 }
534 return err;
535 }
536
537 static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
538 {
539 int err;
540 struct c2_inquiry *inqp;
541
542 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
543 if (err == SCSI_DH_OK) {
544 inqp = &h->inq.c2;
545 /*
546 * If more than MODE6_MAX_LUN luns are supported, use
547 * mode select 10
548 */
549 if (inqp->max_lun_supported >= MODE6_MAX_LUN)
550 h->ctlr->use_ms10 = 1;
551 else
552 h->ctlr->use_ms10 = 0;
553 }
554 return err;
555 }
556
557 static int mode_select_handle_sense(struct scsi_device *sdev,
558 unsigned char *sensebuf)
559 {
560 struct scsi_sense_hdr sense_hdr;
561 int err = SCSI_DH_IO, ret;
562 struct rdac_dh_data *h = sdev->handler_data;
563
564 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
565 if (!ret)
566 goto done;
567
568 switch (sense_hdr.sense_key) {
569 case NO_SENSE:
570 case ABORTED_COMMAND:
571 case UNIT_ATTENTION:
572 err = SCSI_DH_RETRY;
573 break;
574 case NOT_READY:
575 if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
576 /* LUN Not Ready and is in the Process of Becoming
577 * Ready
578 */
579 err = SCSI_DH_RETRY;
580 break;
581 case ILLEGAL_REQUEST:
582 if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
583 /*
584 * Command Lock contention
585 */
586 err = SCSI_DH_IMM_RETRY;
587 break;
588 default:
589 break;
590 }
591
592 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
593 "MODE_SELECT returned with sense %02x/%02x/%02x",
594 (char *) h->ctlr->array_name, h->ctlr->index,
595 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
596
597 done:
598 return err;
599 }
600
601 static void send_mode_select(struct work_struct *work)
602 {
603 struct rdac_controller *ctlr =
604 container_of(work, struct rdac_controller, ms_work);
605 struct request *rq;
606 struct scsi_device *sdev = ctlr->ms_sdev;
607 struct rdac_dh_data *h = sdev->handler_data;
608 struct request_queue *q = sdev->request_queue;
609 int err, retry_cnt = RDAC_RETRY_COUNT;
610 struct rdac_queue_data *tmp, *qdata;
611 LIST_HEAD(list);
612
613 spin_lock(&ctlr->ms_lock);
614 list_splice_init(&ctlr->ms_head, &list);
615 ctlr->ms_queued = 0;
616 ctlr->ms_sdev = NULL;
617 spin_unlock(&ctlr->ms_lock);
618
619 retry:
620 err = SCSI_DH_RES_TEMP_UNAVAIL;
621 rq = rdac_failover_get(sdev, h, &list);
622 if (!rq)
623 goto done;
624
625 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
626 "%s MODE_SELECT command",
627 (char *) h->ctlr->array_name, h->ctlr->index,
628 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
629
630 err = blk_execute_rq(q, NULL, rq, 1);
631 blk_put_request(rq);
632 if (err != SCSI_DH_OK) {
633 err = mode_select_handle_sense(sdev, h->sense);
634 if (err == SCSI_DH_RETRY && retry_cnt--)
635 goto retry;
636 if (err == SCSI_DH_IMM_RETRY)
637 goto retry;
638 }
639 if (err == SCSI_DH_OK) {
640 h->state = RDAC_STATE_ACTIVE;
641 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
642 "MODE_SELECT completed",
643 (char *) h->ctlr->array_name, h->ctlr->index);
644 }
645
646 done:
647 list_for_each_entry_safe(qdata, tmp, &list, entry) {
648 list_del(&qdata->entry);
649 if (err == SCSI_DH_OK)
650 qdata->h->state = RDAC_STATE_ACTIVE;
651 if (qdata->callback_fn)
652 qdata->callback_fn(qdata->callback_data, err);
653 kfree(qdata);
654 }
655 return;
656 }
657
658 static int queue_mode_select(struct scsi_device *sdev,
659 activate_complete fn, void *data)
660 {
661 struct rdac_queue_data *qdata;
662 struct rdac_controller *ctlr;
663
664 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
665 if (!qdata)
666 return SCSI_DH_RETRY;
667
668 qdata->h = sdev->handler_data;
669 qdata->callback_fn = fn;
670 qdata->callback_data = data;
671
672 ctlr = qdata->h->ctlr;
673 spin_lock(&ctlr->ms_lock);
674 list_add_tail(&qdata->entry, &ctlr->ms_head);
675 if (!ctlr->ms_queued) {
676 ctlr->ms_queued = 1;
677 ctlr->ms_sdev = sdev;
678 queue_work(kmpath_rdacd, &ctlr->ms_work);
679 }
680 spin_unlock(&ctlr->ms_lock);
681 return SCSI_DH_OK;
682 }
683
684 static int rdac_activate(struct scsi_device *sdev,
685 activate_complete fn, void *data)
686 {
687 struct rdac_dh_data *h = sdev->handler_data;
688 int err = SCSI_DH_OK;
689 int act = 0;
690
691 err = check_ownership(sdev, h);
692 if (err != SCSI_DH_OK)
693 goto done;
694
695 switch (h->mode) {
696 case RDAC_MODE:
697 if (h->lun_state == RDAC_LUN_UNOWNED)
698 act = 1;
699 break;
700 case RDAC_MODE_IOSHIP:
701 if ((h->lun_state == RDAC_LUN_UNOWNED) &&
702 (h->preferred == RDAC_PREFERRED))
703 act = 1;
704 break;
705 default:
706 break;
707 }
708
709 if (act) {
710 err = queue_mode_select(sdev, fn, data);
711 if (err == SCSI_DH_OK)
712 return 0;
713 }
714 done:
715 if (fn)
716 fn(data, err);
717 return 0;
718 }
719
720 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
721 {
722 struct rdac_dh_data *h = sdev->handler_data;
723 int ret = BLKPREP_OK;
724
725 if (h->state != RDAC_STATE_ACTIVE) {
726 ret = BLKPREP_KILL;
727 req->cmd_flags |= REQ_QUIET;
728 }
729 return ret;
730
731 }
732
733 static int rdac_check_sense(struct scsi_device *sdev,
734 struct scsi_sense_hdr *sense_hdr)
735 {
736 struct rdac_dh_data *h = sdev->handler_data;
737
738 RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
739 "I/O returned with sense %02x/%02x/%02x",
740 (char *) h->ctlr->array_name, h->ctlr->index,
741 sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
742
743 switch (sense_hdr->sense_key) {
744 case NOT_READY:
745 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
746 /* LUN Not Ready - Logical Unit Not Ready and is in
747 * the process of becoming ready
748 * Just retry.
749 */
750 return ADD_TO_MLQUEUE;
751 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
752 /* LUN Not Ready - Storage firmware incompatible
753 * Manual code synchonisation required.
754 *
755 * Nothing we can do here. Try to bypass the path.
756 */
757 return SUCCESS;
758 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
759 /* LUN Not Ready - Quiescense in progress
760 *
761 * Just retry and wait.
762 */
763 return ADD_TO_MLQUEUE;
764 if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02)
765 /* LUN Not Ready - Quiescense in progress
766 * or has been achieved
767 * Just retry.
768 */
769 return ADD_TO_MLQUEUE;
770 break;
771 case ILLEGAL_REQUEST:
772 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
773 /* Invalid Request - Current Logical Unit Ownership.
774 * Controller is not the current owner of the LUN,
775 * Fail the path, so that the other path be used.
776 */
777 h->state = RDAC_STATE_PASSIVE;
778 return SUCCESS;
779 }
780 break;
781 case UNIT_ATTENTION:
782 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
783 /*
784 * Power On, Reset, or Bus Device Reset, just retry.
785 */
786 return ADD_TO_MLQUEUE;
787 if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
788 /*
789 * Quiescence in progress , just retry.
790 */
791 return ADD_TO_MLQUEUE;
792 break;
793 }
794 /* success just means we do not care what scsi-ml does */
795 return SCSI_RETURN_NOT_HANDLED;
796 }
797
798 static int rdac_bus_attach(struct scsi_device *sdev)
799 {
800 struct rdac_dh_data *h;
801 int err;
802 char array_name[ARRAY_LABEL_LEN];
803 char array_id[UNIQUE_ID_LEN];
804
805 h = kzalloc(sizeof(*h) , GFP_KERNEL);
806 if (!h)
807 return -ENOMEM;
808 h->lun = UNINITIALIZED_LUN;
809 h->state = RDAC_STATE_ACTIVE;
810
811 err = get_lun_info(sdev, h, array_name, array_id);
812 if (err != SCSI_DH_OK)
813 goto failed;
814
815 err = initialize_controller(sdev, h, array_name, array_id);
816 if (err != SCSI_DH_OK)
817 goto failed;
818
819 err = check_ownership(sdev, h);
820 if (err != SCSI_DH_OK)
821 goto clean_ctlr;
822
823 err = set_mode_select(sdev, h);
824 if (err != SCSI_DH_OK)
825 goto clean_ctlr;
826
827 sdev_printk(KERN_NOTICE, sdev,
828 "%s: LUN %d (%s) (%s)\n",
829 RDAC_NAME, h->lun, mode[(int)h->mode],
830 lun_state[(int)h->lun_state]);
831
832 sdev->handler_data = h;
833 return 0;
834
835 clean_ctlr:
836 spin_lock(&list_lock);
837 kref_put(&h->ctlr->kref, release_controller);
838 spin_unlock(&list_lock);
839
840 failed:
841 kfree(h);
842 return -EINVAL;
843 }
844
845 static void rdac_bus_detach( struct scsi_device *sdev )
846 {
847 struct rdac_dh_data *h = sdev->handler_data;
848
849 if (h->ctlr && h->ctlr->ms_queued)
850 flush_workqueue(kmpath_rdacd);
851
852 spin_lock(&list_lock);
853 if (h->ctlr) {
854 list_del_rcu(&h->node);
855 h->sdev = NULL;
856 kref_put(&h->ctlr->kref, release_controller);
857 }
858 spin_unlock(&list_lock);
859 sdev->handler_data = NULL;
860 kfree(h);
861 }
862
863 static struct scsi_device_handler rdac_dh = {
864 .name = RDAC_NAME,
865 .module = THIS_MODULE,
866 .prep_fn = rdac_prep_fn,
867 .check_sense = rdac_check_sense,
868 .attach = rdac_bus_attach,
869 .detach = rdac_bus_detach,
870 .activate = rdac_activate,
871 };
872
873 static int __init rdac_init(void)
874 {
875 int r;
876
877 r = scsi_register_device_handler(&rdac_dh);
878 if (r != 0) {
879 printk(KERN_ERR "Failed to register scsi device handler.");
880 goto done;
881 }
882
883 /*
884 * Create workqueue to handle mode selects for rdac
885 */
886 kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
887 if (!kmpath_rdacd) {
888 scsi_unregister_device_handler(&rdac_dh);
889 printk(KERN_ERR "kmpath_rdacd creation failed.\n");
890
891 r = -EINVAL;
892 }
893 done:
894 return r;
895 }
896
897 static void __exit rdac_exit(void)
898 {
899 destroy_workqueue(kmpath_rdacd);
900 scsi_unregister_device_handler(&rdac_dh);
901 }
902
903 module_init(rdac_init);
904 module_exit(rdac_exit);
905
906 MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
907 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
908 MODULE_VERSION("01.00.0000.0000");
909 MODULE_LICENSE("GPL");
This page took 0.092831 seconds and 5 git commands to generate.