lib: lz4: cleanup unaligned access efficiency detection
[deliverable/linux.git] / drivers / scsi / scsi_sysfs.c
1 /*
2 * scsi_sysfs.c
3 *
4 * SCSI sysfs interface routines.
5 *
6 * Created to pull SCSI mid layer sysfs routines into one file.
7 */
8
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/blkdev.h>
13 #include <linux/device.h>
14 #include <linux/pm_runtime.h>
15
16 #include <scsi/scsi.h>
17 #include <scsi/scsi_device.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi_dh.h>
21 #include <scsi/scsi_transport.h>
22 #include <scsi/scsi_driver.h>
23
24 #include "scsi_priv.h"
25 #include "scsi_logging.h"
26
27 static struct device_type scsi_dev_type;
28
29 static const struct {
30 enum scsi_device_state value;
31 char *name;
32 } sdev_states[] = {
33 { SDEV_CREATED, "created" },
34 { SDEV_RUNNING, "running" },
35 { SDEV_CANCEL, "cancel" },
36 { SDEV_DEL, "deleted" },
37 { SDEV_QUIESCE, "quiesce" },
38 { SDEV_OFFLINE, "offline" },
39 { SDEV_TRANSPORT_OFFLINE, "transport-offline" },
40 { SDEV_BLOCK, "blocked" },
41 { SDEV_CREATED_BLOCK, "created-blocked" },
42 };
43
44 const char *scsi_device_state_name(enum scsi_device_state state)
45 {
46 int i;
47 char *name = NULL;
48
49 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
50 if (sdev_states[i].value == state) {
51 name = sdev_states[i].name;
52 break;
53 }
54 }
55 return name;
56 }
57
58 static const struct {
59 enum scsi_host_state value;
60 char *name;
61 } shost_states[] = {
62 { SHOST_CREATED, "created" },
63 { SHOST_RUNNING, "running" },
64 { SHOST_CANCEL, "cancel" },
65 { SHOST_DEL, "deleted" },
66 { SHOST_RECOVERY, "recovery" },
67 { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
68 { SHOST_DEL_RECOVERY, "deleted/recovery", },
69 };
70 const char *scsi_host_state_name(enum scsi_host_state state)
71 {
72 int i;
73 char *name = NULL;
74
75 for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
76 if (shost_states[i].value == state) {
77 name = shost_states[i].name;
78 break;
79 }
80 }
81 return name;
82 }
83
84 static const struct {
85 unsigned char value;
86 char *name;
87 } sdev_access_states[] = {
88 { SCSI_ACCESS_STATE_OPTIMAL, "active/optimized" },
89 { SCSI_ACCESS_STATE_ACTIVE, "active/non-optimized" },
90 { SCSI_ACCESS_STATE_STANDBY, "standby" },
91 { SCSI_ACCESS_STATE_UNAVAILABLE, "unavailable" },
92 { SCSI_ACCESS_STATE_LBA, "lba-dependent" },
93 { SCSI_ACCESS_STATE_OFFLINE, "offline" },
94 { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
95 };
96
97 const char *scsi_access_state_name(unsigned char state)
98 {
99 int i;
100 char *name = NULL;
101
102 for (i = 0; i < ARRAY_SIZE(sdev_access_states); i++) {
103 if (sdev_access_states[i].value == state) {
104 name = sdev_access_states[i].name;
105 break;
106 }
107 }
108 return name;
109 }
110
111 static int check_set(unsigned long long *val, char *src)
112 {
113 char *last;
114
115 if (strncmp(src, "-", 20) == 0) {
116 *val = SCAN_WILD_CARD;
117 } else {
118 /*
119 * Doesn't check for int overflow
120 */
121 *val = simple_strtoull(src, &last, 0);
122 if (*last != '\0')
123 return 1;
124 }
125 return 0;
126 }
127
128 static int scsi_scan(struct Scsi_Host *shost, const char *str)
129 {
130 char s1[15], s2[15], s3[17], junk;
131 unsigned long long channel, id, lun;
132 int res;
133
134 res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk);
135 if (res != 3)
136 return -EINVAL;
137 if (check_set(&channel, s1))
138 return -EINVAL;
139 if (check_set(&id, s2))
140 return -EINVAL;
141 if (check_set(&lun, s3))
142 return -EINVAL;
143 if (shost->transportt->user_scan)
144 res = shost->transportt->user_scan(shost, channel, id, lun);
145 else
146 res = scsi_scan_host_selected(shost, channel, id, lun, 1);
147 return res;
148 }
149
150 /*
151 * shost_show_function: macro to create an attr function that can be used to
152 * show a non-bit field.
153 */
154 #define shost_show_function(name, field, format_string) \
155 static ssize_t \
156 show_##name (struct device *dev, struct device_attribute *attr, \
157 char *buf) \
158 { \
159 struct Scsi_Host *shost = class_to_shost(dev); \
160 return snprintf (buf, 20, format_string, shost->field); \
161 }
162
163 /*
164 * shost_rd_attr: macro to create a function and attribute variable for a
165 * read only field.
166 */
167 #define shost_rd_attr2(name, field, format_string) \
168 shost_show_function(name, field, format_string) \
169 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
170
171 #define shost_rd_attr(field, format_string) \
172 shost_rd_attr2(field, field, format_string)
173
174 /*
175 * Create the actual show/store functions and data structures.
176 */
177
178 static ssize_t
179 store_scan(struct device *dev, struct device_attribute *attr,
180 const char *buf, size_t count)
181 {
182 struct Scsi_Host *shost = class_to_shost(dev);
183 int res;
184
185 res = scsi_scan(shost, buf);
186 if (res == 0)
187 res = count;
188 return res;
189 };
190 static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
191
192 static ssize_t
193 store_shost_state(struct device *dev, struct device_attribute *attr,
194 const char *buf, size_t count)
195 {
196 int i;
197 struct Scsi_Host *shost = class_to_shost(dev);
198 enum scsi_host_state state = 0;
199
200 for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
201 const int len = strlen(shost_states[i].name);
202 if (strncmp(shost_states[i].name, buf, len) == 0 &&
203 buf[len] == '\n') {
204 state = shost_states[i].value;
205 break;
206 }
207 }
208 if (!state)
209 return -EINVAL;
210
211 if (scsi_host_set_state(shost, state))
212 return -EINVAL;
213 return count;
214 }
215
216 static ssize_t
217 show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
218 {
219 struct Scsi_Host *shost = class_to_shost(dev);
220 const char *name = scsi_host_state_name(shost->shost_state);
221
222 if (!name)
223 return -EINVAL;
224
225 return snprintf(buf, 20, "%s\n", name);
226 }
227
228 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
229 struct device_attribute dev_attr_hstate =
230 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
231
232 static ssize_t
233 show_shost_mode(unsigned int mode, char *buf)
234 {
235 ssize_t len = 0;
236
237 if (mode & MODE_INITIATOR)
238 len = sprintf(buf, "%s", "Initiator");
239
240 if (mode & MODE_TARGET)
241 len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target");
242
243 len += sprintf(buf + len, "\n");
244
245 return len;
246 }
247
248 static ssize_t
249 show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
250 char *buf)
251 {
252 struct Scsi_Host *shost = class_to_shost(dev);
253 unsigned int supported_mode = shost->hostt->supported_mode;
254
255 if (supported_mode == MODE_UNKNOWN)
256 /* by default this should be initiator */
257 supported_mode = MODE_INITIATOR;
258
259 return show_shost_mode(supported_mode, buf);
260 }
261
262 static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
263
264 static ssize_t
265 show_shost_active_mode(struct device *dev,
266 struct device_attribute *attr, char *buf)
267 {
268 struct Scsi_Host *shost = class_to_shost(dev);
269
270 if (shost->active_mode == MODE_UNKNOWN)
271 return snprintf(buf, 20, "unknown\n");
272 else
273 return show_shost_mode(shost->active_mode, buf);
274 }
275
276 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
277
278 static int check_reset_type(const char *str)
279 {
280 if (sysfs_streq(str, "adapter"))
281 return SCSI_ADAPTER_RESET;
282 else if (sysfs_streq(str, "firmware"))
283 return SCSI_FIRMWARE_RESET;
284 else
285 return 0;
286 }
287
288 static ssize_t
289 store_host_reset(struct device *dev, struct device_attribute *attr,
290 const char *buf, size_t count)
291 {
292 struct Scsi_Host *shost = class_to_shost(dev);
293 struct scsi_host_template *sht = shost->hostt;
294 int ret = -EINVAL;
295 int type;
296
297 type = check_reset_type(buf);
298 if (!type)
299 goto exit_store_host_reset;
300
301 if (sht->host_reset)
302 ret = sht->host_reset(shost, type);
303
304 exit_store_host_reset:
305 if (ret == 0)
306 ret = count;
307 return ret;
308 }
309
310 static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
311
312 static ssize_t
313 show_shost_eh_deadline(struct device *dev,
314 struct device_attribute *attr, char *buf)
315 {
316 struct Scsi_Host *shost = class_to_shost(dev);
317
318 if (shost->eh_deadline == -1)
319 return snprintf(buf, strlen("off") + 2, "off\n");
320 return sprintf(buf, "%u\n", shost->eh_deadline / HZ);
321 }
322
323 static ssize_t
324 store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
325 const char *buf, size_t count)
326 {
327 struct Scsi_Host *shost = class_to_shost(dev);
328 int ret = -EINVAL;
329 unsigned long deadline, flags;
330
331 if (shost->transportt &&
332 (shost->transportt->eh_strategy_handler ||
333 !shost->hostt->eh_host_reset_handler))
334 return ret;
335
336 if (!strncmp(buf, "off", strlen("off")))
337 deadline = -1;
338 else {
339 ret = kstrtoul(buf, 10, &deadline);
340 if (ret)
341 return ret;
342 if (deadline * HZ > UINT_MAX)
343 return -EINVAL;
344 }
345
346 spin_lock_irqsave(shost->host_lock, flags);
347 if (scsi_host_in_recovery(shost))
348 ret = -EBUSY;
349 else {
350 if (deadline == -1)
351 shost->eh_deadline = -1;
352 else
353 shost->eh_deadline = deadline * HZ;
354
355 ret = count;
356 }
357 spin_unlock_irqrestore(shost->host_lock, flags);
358
359 return ret;
360 }
361
362 static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
363
364 shost_rd_attr(use_blk_mq, "%d\n");
365 shost_rd_attr(unique_id, "%u\n");
366 shost_rd_attr(cmd_per_lun, "%hd\n");
367 shost_rd_attr(can_queue, "%hd\n");
368 shost_rd_attr(sg_tablesize, "%hu\n");
369 shost_rd_attr(sg_prot_tablesize, "%hu\n");
370 shost_rd_attr(unchecked_isa_dma, "%d\n");
371 shost_rd_attr(prot_capabilities, "%u\n");
372 shost_rd_attr(prot_guard_type, "%hd\n");
373 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
374
375 static ssize_t
376 show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
377 {
378 struct Scsi_Host *shost = class_to_shost(dev);
379 return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy));
380 }
381 static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
382
383 static struct attribute *scsi_sysfs_shost_attrs[] = {
384 &dev_attr_use_blk_mq.attr,
385 &dev_attr_unique_id.attr,
386 &dev_attr_host_busy.attr,
387 &dev_attr_cmd_per_lun.attr,
388 &dev_attr_can_queue.attr,
389 &dev_attr_sg_tablesize.attr,
390 &dev_attr_sg_prot_tablesize.attr,
391 &dev_attr_unchecked_isa_dma.attr,
392 &dev_attr_proc_name.attr,
393 &dev_attr_scan.attr,
394 &dev_attr_hstate.attr,
395 &dev_attr_supported_mode.attr,
396 &dev_attr_active_mode.attr,
397 &dev_attr_prot_capabilities.attr,
398 &dev_attr_prot_guard_type.attr,
399 &dev_attr_host_reset.attr,
400 &dev_attr_eh_deadline.attr,
401 NULL
402 };
403
404 struct attribute_group scsi_shost_attr_group = {
405 .attrs = scsi_sysfs_shost_attrs,
406 };
407
408 const struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
409 &scsi_shost_attr_group,
410 NULL
411 };
412
413 static void scsi_device_cls_release(struct device *class_dev)
414 {
415 struct scsi_device *sdev;
416
417 sdev = class_to_sdev(class_dev);
418 put_device(&sdev->sdev_gendev);
419 }
420
421 static void scsi_device_dev_release_usercontext(struct work_struct *work)
422 {
423 struct scsi_device *sdev;
424 struct device *parent;
425 struct list_head *this, *tmp;
426 unsigned long flags;
427
428 sdev = container_of(work, struct scsi_device, ew.work);
429
430 scsi_dh_release_device(sdev);
431
432 parent = sdev->sdev_gendev.parent;
433
434 spin_lock_irqsave(sdev->host->host_lock, flags);
435 list_del(&sdev->siblings);
436 list_del(&sdev->same_target_siblings);
437 list_del(&sdev->starved_entry);
438 spin_unlock_irqrestore(sdev->host->host_lock, flags);
439
440 cancel_work_sync(&sdev->event_work);
441
442 list_for_each_safe(this, tmp, &sdev->event_list) {
443 struct scsi_event *evt;
444
445 evt = list_entry(this, struct scsi_event, node);
446 list_del(&evt->node);
447 kfree(evt);
448 }
449
450 blk_put_queue(sdev->request_queue);
451 /* NULL queue means the device can't be used */
452 sdev->request_queue = NULL;
453
454 kfree(sdev->vpd_pg83);
455 kfree(sdev->vpd_pg80);
456 kfree(sdev->inquiry);
457 kfree(sdev);
458
459 if (parent)
460 put_device(parent);
461 }
462
463 static void scsi_device_dev_release(struct device *dev)
464 {
465 struct scsi_device *sdp = to_scsi_device(dev);
466 execute_in_process_context(scsi_device_dev_release_usercontext,
467 &sdp->ew);
468 }
469
470 static struct class sdev_class = {
471 .name = "scsi_device",
472 .dev_release = scsi_device_cls_release,
473 };
474
475 /* all probing is done in the individual ->probe routines */
476 static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
477 {
478 struct scsi_device *sdp;
479
480 if (dev->type != &scsi_dev_type)
481 return 0;
482
483 sdp = to_scsi_device(dev);
484 if (sdp->no_uld_attach)
485 return 0;
486 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
487 }
488
489 static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
490 {
491 struct scsi_device *sdev;
492
493 if (dev->type != &scsi_dev_type)
494 return 0;
495
496 sdev = to_scsi_device(dev);
497
498 add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
499 return 0;
500 }
501
502 struct bus_type scsi_bus_type = {
503 .name = "scsi",
504 .match = scsi_bus_match,
505 .uevent = scsi_bus_uevent,
506 #ifdef CONFIG_PM
507 .pm = &scsi_bus_pm_ops,
508 #endif
509 };
510 EXPORT_SYMBOL_GPL(scsi_bus_type);
511
512 int scsi_sysfs_register(void)
513 {
514 int error;
515
516 error = bus_register(&scsi_bus_type);
517 if (!error) {
518 error = class_register(&sdev_class);
519 if (error)
520 bus_unregister(&scsi_bus_type);
521 }
522
523 return error;
524 }
525
526 void scsi_sysfs_unregister(void)
527 {
528 class_unregister(&sdev_class);
529 bus_unregister(&scsi_bus_type);
530 }
531
532 /*
533 * sdev_show_function: macro to create an attr function that can be used to
534 * show a non-bit field.
535 */
536 #define sdev_show_function(field, format_string) \
537 static ssize_t \
538 sdev_show_##field (struct device *dev, struct device_attribute *attr, \
539 char *buf) \
540 { \
541 struct scsi_device *sdev; \
542 sdev = to_scsi_device(dev); \
543 return snprintf (buf, 20, format_string, sdev->field); \
544 } \
545
546 /*
547 * sdev_rd_attr: macro to create a function and attribute variable for a
548 * read only field.
549 */
550 #define sdev_rd_attr(field, format_string) \
551 sdev_show_function(field, format_string) \
552 static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
553
554
555 /*
556 * sdev_rw_attr: create a function and attribute variable for a
557 * read/write field.
558 */
559 #define sdev_rw_attr(field, format_string) \
560 sdev_show_function(field, format_string) \
561 \
562 static ssize_t \
563 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
564 const char *buf, size_t count) \
565 { \
566 struct scsi_device *sdev; \
567 sdev = to_scsi_device(dev); \
568 sscanf (buf, format_string, &sdev->field); \
569 return count; \
570 } \
571 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
572
573 /* Currently we don't export bit fields, but we might in future,
574 * so leave this code in */
575 #if 0
576 /*
577 * sdev_rd_attr: create a function and attribute variable for a
578 * read/write bit field.
579 */
580 #define sdev_rw_attr_bit(field) \
581 sdev_show_function(field, "%d\n") \
582 \
583 static ssize_t \
584 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
585 const char *buf, size_t count) \
586 { \
587 int ret; \
588 struct scsi_device *sdev; \
589 ret = scsi_sdev_check_buf_bit(buf); \
590 if (ret >= 0) { \
591 sdev = to_scsi_device(dev); \
592 sdev->field = ret; \
593 ret = count; \
594 } \
595 return ret; \
596 } \
597 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
598
599 /*
600 * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
601 * else return -EINVAL.
602 */
603 static int scsi_sdev_check_buf_bit(const char *buf)
604 {
605 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
606 if (buf[0] == '1')
607 return 1;
608 else if (buf[0] == '0')
609 return 0;
610 else
611 return -EINVAL;
612 } else
613 return -EINVAL;
614 }
615 #endif
616 /*
617 * Create the actual show/store functions and data structures.
618 */
619 sdev_rd_attr (type, "%d\n");
620 sdev_rd_attr (scsi_level, "%d\n");
621 sdev_rd_attr (vendor, "%.8s\n");
622 sdev_rd_attr (model, "%.16s\n");
623 sdev_rd_attr (rev, "%.4s\n");
624
625 static ssize_t
626 sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
627 char *buf)
628 {
629 struct scsi_device *sdev = to_scsi_device(dev);
630 return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
631 }
632 static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
633
634 static ssize_t
635 sdev_show_device_blocked(struct device *dev, struct device_attribute *attr,
636 char *buf)
637 {
638 struct scsi_device *sdev = to_scsi_device(dev);
639 return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked));
640 }
641 static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL);
642
643 /*
644 * TODO: can we make these symlinks to the block layer ones?
645 */
646 static ssize_t
647 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
648 {
649 struct scsi_device *sdev;
650 sdev = to_scsi_device(dev);
651 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
652 }
653
654 static ssize_t
655 sdev_store_timeout (struct device *dev, struct device_attribute *attr,
656 const char *buf, size_t count)
657 {
658 struct scsi_device *sdev;
659 int timeout;
660 sdev = to_scsi_device(dev);
661 sscanf (buf, "%d\n", &timeout);
662 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
663 return count;
664 }
665 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
666
667 static ssize_t
668 sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf)
669 {
670 struct scsi_device *sdev;
671 sdev = to_scsi_device(dev);
672 return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ);
673 }
674
675 static ssize_t
676 sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr,
677 const char *buf, size_t count)
678 {
679 struct scsi_device *sdev;
680 unsigned int eh_timeout;
681 int err;
682
683 if (!capable(CAP_SYS_ADMIN))
684 return -EACCES;
685
686 sdev = to_scsi_device(dev);
687 err = kstrtouint(buf, 10, &eh_timeout);
688 if (err)
689 return err;
690 sdev->eh_timeout = eh_timeout * HZ;
691
692 return count;
693 }
694 static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout);
695
696 static ssize_t
697 store_rescan_field (struct device *dev, struct device_attribute *attr,
698 const char *buf, size_t count)
699 {
700 scsi_rescan_device(dev);
701 return count;
702 }
703 static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
704
705 static ssize_t
706 sdev_store_delete(struct device *dev, struct device_attribute *attr,
707 const char *buf, size_t count)
708 {
709 if (device_remove_file_self(dev, attr))
710 scsi_remove_device(to_scsi_device(dev));
711 return count;
712 };
713 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
714
715 static ssize_t
716 store_state_field(struct device *dev, struct device_attribute *attr,
717 const char *buf, size_t count)
718 {
719 int i;
720 struct scsi_device *sdev = to_scsi_device(dev);
721 enum scsi_device_state state = 0;
722
723 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
724 const int len = strlen(sdev_states[i].name);
725 if (strncmp(sdev_states[i].name, buf, len) == 0 &&
726 buf[len] == '\n') {
727 state = sdev_states[i].value;
728 break;
729 }
730 }
731 if (!state)
732 return -EINVAL;
733
734 if (scsi_device_set_state(sdev, state))
735 return -EINVAL;
736 return count;
737 }
738
739 static ssize_t
740 show_state_field(struct device *dev, struct device_attribute *attr, char *buf)
741 {
742 struct scsi_device *sdev = to_scsi_device(dev);
743 const char *name = scsi_device_state_name(sdev->sdev_state);
744
745 if (!name)
746 return -EINVAL;
747
748 return snprintf(buf, 20, "%s\n", name);
749 }
750
751 static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field);
752
753 static ssize_t
754 show_queue_type_field(struct device *dev, struct device_attribute *attr,
755 char *buf)
756 {
757 struct scsi_device *sdev = to_scsi_device(dev);
758 const char *name = "none";
759
760 if (sdev->simple_tags)
761 name = "simple";
762
763 return snprintf(buf, 20, "%s\n", name);
764 }
765
766 static ssize_t
767 store_queue_type_field(struct device *dev, struct device_attribute *attr,
768 const char *buf, size_t count)
769 {
770 struct scsi_device *sdev = to_scsi_device(dev);
771
772 if (!sdev->tagged_supported)
773 return -EINVAL;
774
775 sdev_printk(KERN_INFO, sdev,
776 "ignoring write to deprecated queue_type attribute");
777 return count;
778 }
779
780 static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
781 store_queue_type_field);
782
783 #define sdev_vpd_pg_attr(_page) \
784 static ssize_t \
785 show_vpd_##_page(struct file *filp, struct kobject *kobj, \
786 struct bin_attribute *bin_attr, \
787 char *buf, loff_t off, size_t count) \
788 { \
789 struct device *dev = container_of(kobj, struct device, kobj); \
790 struct scsi_device *sdev = to_scsi_device(dev); \
791 int ret; \
792 if (!sdev->vpd_##_page) \
793 return -EINVAL; \
794 rcu_read_lock(); \
795 ret = memory_read_from_buffer(buf, count, &off, \
796 rcu_dereference(sdev->vpd_##_page), \
797 sdev->vpd_##_page##_len); \
798 rcu_read_unlock(); \
799 return ret; \
800 } \
801 static struct bin_attribute dev_attr_vpd_##_page = { \
802 .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
803 .size = 0, \
804 .read = show_vpd_##_page, \
805 };
806
807 sdev_vpd_pg_attr(pg83);
808 sdev_vpd_pg_attr(pg80);
809
810 static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
811 struct bin_attribute *bin_attr,
812 char *buf, loff_t off, size_t count)
813 {
814 struct device *dev = container_of(kobj, struct device, kobj);
815 struct scsi_device *sdev = to_scsi_device(dev);
816
817 if (!sdev->inquiry)
818 return -EINVAL;
819
820 return memory_read_from_buffer(buf, count, &off, sdev->inquiry,
821 sdev->inquiry_len);
822 }
823
824 static struct bin_attribute dev_attr_inquiry = {
825 .attr = {
826 .name = "inquiry",
827 .mode = S_IRUGO,
828 },
829 .size = 0,
830 .read = show_inquiry,
831 };
832
833 static ssize_t
834 show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
835 char *buf)
836 {
837 return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
838 }
839
840 static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL);
841
842 #define show_sdev_iostat(field) \
843 static ssize_t \
844 show_iostat_##field(struct device *dev, struct device_attribute *attr, \
845 char *buf) \
846 { \
847 struct scsi_device *sdev = to_scsi_device(dev); \
848 unsigned long long count = atomic_read(&sdev->field); \
849 return snprintf(buf, 20, "0x%llx\n", count); \
850 } \
851 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
852
853 show_sdev_iostat(iorequest_cnt);
854 show_sdev_iostat(iodone_cnt);
855 show_sdev_iostat(ioerr_cnt);
856
857 static ssize_t
858 sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
859 {
860 struct scsi_device *sdev;
861 sdev = to_scsi_device(dev);
862 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
863 }
864 static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
865
866 #define DECLARE_EVT_SHOW(name, Cap_name) \
867 static ssize_t \
868 sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \
869 char *buf) \
870 { \
871 struct scsi_device *sdev = to_scsi_device(dev); \
872 int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
873 return snprintf(buf, 20, "%d\n", val); \
874 }
875
876 #define DECLARE_EVT_STORE(name, Cap_name) \
877 static ssize_t \
878 sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
879 const char *buf, size_t count) \
880 { \
881 struct scsi_device *sdev = to_scsi_device(dev); \
882 int val = simple_strtoul(buf, NULL, 0); \
883 if (val == 0) \
884 clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
885 else if (val == 1) \
886 set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
887 else \
888 return -EINVAL; \
889 return count; \
890 }
891
892 #define DECLARE_EVT(name, Cap_name) \
893 DECLARE_EVT_SHOW(name, Cap_name) \
894 DECLARE_EVT_STORE(name, Cap_name) \
895 static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \
896 sdev_store_evt_##name);
897 #define REF_EVT(name) &dev_attr_evt_##name.attr
898
899 DECLARE_EVT(media_change, MEDIA_CHANGE)
900 DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED)
901 DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED)
902 DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
903 DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
904 DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
905
906 static ssize_t
907 sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
908 const char *buf, size_t count)
909 {
910 int depth, retval;
911 struct scsi_device *sdev = to_scsi_device(dev);
912 struct scsi_host_template *sht = sdev->host->hostt;
913
914 if (!sht->change_queue_depth)
915 return -EINVAL;
916
917 depth = simple_strtoul(buf, NULL, 0);
918
919 if (depth < 1 || depth > sdev->host->can_queue)
920 return -EINVAL;
921
922 retval = sht->change_queue_depth(sdev, depth);
923 if (retval < 0)
924 return retval;
925
926 sdev->max_queue_depth = sdev->queue_depth;
927
928 return count;
929 }
930 sdev_show_function(queue_depth, "%d\n");
931
932 static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
933 sdev_store_queue_depth);
934
935 static ssize_t
936 sdev_show_wwid(struct device *dev, struct device_attribute *attr,
937 char *buf)
938 {
939 struct scsi_device *sdev = to_scsi_device(dev);
940 ssize_t count;
941
942 count = scsi_vpd_lun_id(sdev, buf, PAGE_SIZE);
943 if (count > 0) {
944 buf[count] = '\n';
945 count++;
946 }
947 return count;
948 }
949 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
950
951 #ifdef CONFIG_SCSI_DH
952 static ssize_t
953 sdev_show_dh_state(struct device *dev, struct device_attribute *attr,
954 char *buf)
955 {
956 struct scsi_device *sdev = to_scsi_device(dev);
957
958 if (!sdev->handler)
959 return snprintf(buf, 20, "detached\n");
960
961 return snprintf(buf, 20, "%s\n", sdev->handler->name);
962 }
963
964 static ssize_t
965 sdev_store_dh_state(struct device *dev, struct device_attribute *attr,
966 const char *buf, size_t count)
967 {
968 struct scsi_device *sdev = to_scsi_device(dev);
969 int err = -EINVAL;
970
971 if (sdev->sdev_state == SDEV_CANCEL ||
972 sdev->sdev_state == SDEV_DEL)
973 return -ENODEV;
974
975 if (!sdev->handler) {
976 /*
977 * Attach to a device handler
978 */
979 err = scsi_dh_attach(sdev->request_queue, buf);
980 } else if (!strncmp(buf, "activate", 8)) {
981 /*
982 * Activate a device handler
983 */
984 if (sdev->handler->activate)
985 err = sdev->handler->activate(sdev, NULL, NULL);
986 else
987 err = 0;
988 } else if (!strncmp(buf, "detach", 6)) {
989 /*
990 * Detach from a device handler
991 */
992 sdev_printk(KERN_WARNING, sdev,
993 "can't detach handler %s.\n",
994 sdev->handler->name);
995 err = -EINVAL;
996 }
997
998 return err < 0 ? err : count;
999 }
1000
1001 static DEVICE_ATTR(dh_state, S_IRUGO | S_IWUSR, sdev_show_dh_state,
1002 sdev_store_dh_state);
1003
1004 static ssize_t
1005 sdev_show_access_state(struct device *dev,
1006 struct device_attribute *attr,
1007 char *buf)
1008 {
1009 struct scsi_device *sdev = to_scsi_device(dev);
1010 unsigned char access_state;
1011 const char *access_state_name;
1012
1013 if (!sdev->handler)
1014 return -EINVAL;
1015
1016 access_state = (sdev->access_state & SCSI_ACCESS_STATE_MASK);
1017 access_state_name = scsi_access_state_name(access_state);
1018
1019 return sprintf(buf, "%s\n",
1020 access_state_name ? access_state_name : "unknown");
1021 }
1022 static DEVICE_ATTR(access_state, S_IRUGO, sdev_show_access_state, NULL);
1023
1024 static ssize_t
1025 sdev_show_preferred_path(struct device *dev,
1026 struct device_attribute *attr,
1027 char *buf)
1028 {
1029 struct scsi_device *sdev = to_scsi_device(dev);
1030
1031 if (!sdev->handler)
1032 return -EINVAL;
1033
1034 if (sdev->access_state & SCSI_ACCESS_STATE_PREFERRED)
1035 return sprintf(buf, "1\n");
1036 else
1037 return sprintf(buf, "0\n");
1038 }
1039 static DEVICE_ATTR(preferred_path, S_IRUGO, sdev_show_preferred_path, NULL);
1040 #endif
1041
1042 static ssize_t
1043 sdev_show_queue_ramp_up_period(struct device *dev,
1044 struct device_attribute *attr,
1045 char *buf)
1046 {
1047 struct scsi_device *sdev;
1048 sdev = to_scsi_device(dev);
1049 return snprintf(buf, 20, "%u\n",
1050 jiffies_to_msecs(sdev->queue_ramp_up_period));
1051 }
1052
1053 static ssize_t
1054 sdev_store_queue_ramp_up_period(struct device *dev,
1055 struct device_attribute *attr,
1056 const char *buf, size_t count)
1057 {
1058 struct scsi_device *sdev = to_scsi_device(dev);
1059 unsigned int period;
1060
1061 if (kstrtouint(buf, 10, &period))
1062 return -EINVAL;
1063
1064 sdev->queue_ramp_up_period = msecs_to_jiffies(period);
1065 return count;
1066 }
1067
1068 static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
1069 sdev_show_queue_ramp_up_period,
1070 sdev_store_queue_ramp_up_period);
1071
1072 static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
1073 struct attribute *attr, int i)
1074 {
1075 struct device *dev = container_of(kobj, struct device, kobj);
1076 struct scsi_device *sdev = to_scsi_device(dev);
1077
1078
1079 if (attr == &dev_attr_queue_depth.attr &&
1080 !sdev->host->hostt->change_queue_depth)
1081 return S_IRUGO;
1082
1083 if (attr == &dev_attr_queue_ramp_up_period.attr &&
1084 !sdev->host->hostt->change_queue_depth)
1085 return 0;
1086
1087 #ifdef CONFIG_SCSI_DH
1088 if (attr == &dev_attr_access_state.attr &&
1089 !sdev->handler)
1090 return 0;
1091 if (attr == &dev_attr_preferred_path.attr &&
1092 !sdev->handler)
1093 return 0;
1094 #endif
1095 return attr->mode;
1096 }
1097
1098 static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
1099 struct bin_attribute *attr, int i)
1100 {
1101 struct device *dev = container_of(kobj, struct device, kobj);
1102 struct scsi_device *sdev = to_scsi_device(dev);
1103
1104
1105 if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
1106 return 0;
1107
1108 if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
1109 return 0;
1110
1111 return S_IRUGO;
1112 }
1113
1114 /* Default template for device attributes. May NOT be modified */
1115 static struct attribute *scsi_sdev_attrs[] = {
1116 &dev_attr_device_blocked.attr,
1117 &dev_attr_type.attr,
1118 &dev_attr_scsi_level.attr,
1119 &dev_attr_device_busy.attr,
1120 &dev_attr_vendor.attr,
1121 &dev_attr_model.attr,
1122 &dev_attr_rev.attr,
1123 &dev_attr_rescan.attr,
1124 &dev_attr_delete.attr,
1125 &dev_attr_state.attr,
1126 &dev_attr_timeout.attr,
1127 &dev_attr_eh_timeout.attr,
1128 &dev_attr_iocounterbits.attr,
1129 &dev_attr_iorequest_cnt.attr,
1130 &dev_attr_iodone_cnt.attr,
1131 &dev_attr_ioerr_cnt.attr,
1132 &dev_attr_modalias.attr,
1133 &dev_attr_queue_depth.attr,
1134 &dev_attr_queue_type.attr,
1135 &dev_attr_wwid.attr,
1136 #ifdef CONFIG_SCSI_DH
1137 &dev_attr_dh_state.attr,
1138 &dev_attr_access_state.attr,
1139 &dev_attr_preferred_path.attr,
1140 #endif
1141 &dev_attr_queue_ramp_up_period.attr,
1142 REF_EVT(media_change),
1143 REF_EVT(inquiry_change_reported),
1144 REF_EVT(capacity_change_reported),
1145 REF_EVT(soft_threshold_reached),
1146 REF_EVT(mode_parameter_change_reported),
1147 REF_EVT(lun_change_reported),
1148 NULL
1149 };
1150
1151 static struct bin_attribute *scsi_sdev_bin_attrs[] = {
1152 &dev_attr_vpd_pg83,
1153 &dev_attr_vpd_pg80,
1154 &dev_attr_inquiry,
1155 NULL
1156 };
1157 static struct attribute_group scsi_sdev_attr_group = {
1158 .attrs = scsi_sdev_attrs,
1159 .bin_attrs = scsi_sdev_bin_attrs,
1160 .is_visible = scsi_sdev_attr_is_visible,
1161 .is_bin_visible = scsi_sdev_bin_attr_is_visible,
1162 };
1163
1164 static const struct attribute_group *scsi_sdev_attr_groups[] = {
1165 &scsi_sdev_attr_group,
1166 NULL
1167 };
1168
1169 static int scsi_target_add(struct scsi_target *starget)
1170 {
1171 int error;
1172
1173 if (starget->state != STARGET_CREATED)
1174 return 0;
1175
1176 error = device_add(&starget->dev);
1177 if (error) {
1178 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
1179 return error;
1180 }
1181 transport_add_device(&starget->dev);
1182 starget->state = STARGET_RUNNING;
1183
1184 pm_runtime_set_active(&starget->dev);
1185 pm_runtime_enable(&starget->dev);
1186 device_enable_async_suspend(&starget->dev);
1187
1188 return 0;
1189 }
1190
1191 /**
1192 * scsi_sysfs_add_sdev - add scsi device to sysfs
1193 * @sdev: scsi_device to add
1194 *
1195 * Return value:
1196 * 0 on Success / non-zero on Failure
1197 **/
1198 int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1199 {
1200 int error, i;
1201 struct request_queue *rq = sdev->request_queue;
1202 struct scsi_target *starget = sdev->sdev_target;
1203
1204 error = scsi_device_set_state(sdev, SDEV_RUNNING);
1205 if (error)
1206 return error;
1207
1208 error = scsi_target_add(starget);
1209 if (error)
1210 return error;
1211
1212 transport_configure_device(&starget->dev);
1213
1214 device_enable_async_suspend(&sdev->sdev_gendev);
1215 scsi_autopm_get_target(starget);
1216 pm_runtime_set_active(&sdev->sdev_gendev);
1217 pm_runtime_forbid(&sdev->sdev_gendev);
1218 pm_runtime_enable(&sdev->sdev_gendev);
1219 scsi_autopm_put_target(starget);
1220
1221 scsi_autopm_get_device(sdev);
1222
1223 error = scsi_dh_add_device(sdev);
1224 if (error)
1225 /*
1226 * device_handler is optional, so any error can be ignored
1227 */
1228 sdev_printk(KERN_INFO, sdev,
1229 "failed to add device handler: %d\n", error);
1230
1231 error = device_add(&sdev->sdev_gendev);
1232 if (error) {
1233 sdev_printk(KERN_INFO, sdev,
1234 "failed to add device: %d\n", error);
1235 scsi_dh_remove_device(sdev);
1236 return error;
1237 }
1238
1239 device_enable_async_suspend(&sdev->sdev_dev);
1240 error = device_add(&sdev->sdev_dev);
1241 if (error) {
1242 sdev_printk(KERN_INFO, sdev,
1243 "failed to add class device: %d\n", error);
1244 scsi_dh_remove_device(sdev);
1245 device_del(&sdev->sdev_gendev);
1246 return error;
1247 }
1248 transport_add_device(&sdev->sdev_gendev);
1249 sdev->is_visible = 1;
1250
1251 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
1252
1253 if (error)
1254 /* we're treating error on bsg register as non-fatal,
1255 * so pretend nothing went wrong */
1256 sdev_printk(KERN_INFO, sdev,
1257 "Failed to register bsg queue, errno=%d\n", error);
1258
1259 /* add additional host specific attributes */
1260 if (sdev->host->hostt->sdev_attrs) {
1261 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
1262 error = device_create_file(&sdev->sdev_gendev,
1263 sdev->host->hostt->sdev_attrs[i]);
1264 if (error)
1265 return error;
1266 }
1267 }
1268
1269 scsi_autopm_put_device(sdev);
1270 return error;
1271 }
1272
1273 void __scsi_remove_device(struct scsi_device *sdev)
1274 {
1275 struct device *dev = &sdev->sdev_gendev;
1276
1277 /*
1278 * This cleanup path is not reentrant and while it is impossible
1279 * to get a new reference with scsi_device_get() someone can still
1280 * hold a previously acquired one.
1281 */
1282 if (sdev->sdev_state == SDEV_DEL)
1283 return;
1284
1285 if (sdev->is_visible) {
1286 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
1287 return;
1288
1289 bsg_unregister_queue(sdev->request_queue);
1290 device_unregister(&sdev->sdev_dev);
1291 transport_remove_device(dev);
1292 scsi_dh_remove_device(sdev);
1293 device_del(dev);
1294 } else
1295 put_device(&sdev->sdev_dev);
1296
1297 /*
1298 * Stop accepting new requests and wait until all queuecommand() and
1299 * scsi_run_queue() invocations have finished before tearing down the
1300 * device.
1301 */
1302 scsi_device_set_state(sdev, SDEV_DEL);
1303 blk_cleanup_queue(sdev->request_queue);
1304 cancel_work_sync(&sdev->requeue_work);
1305
1306 if (sdev->host->hostt->slave_destroy)
1307 sdev->host->hostt->slave_destroy(sdev);
1308 transport_destroy_device(dev);
1309
1310 /*
1311 * Paired with the kref_get() in scsi_sysfs_initialize(). We have
1312 * remoed sysfs visibility from the device, so make the target
1313 * invisible if this was the last device underneath it.
1314 */
1315 scsi_target_reap(scsi_target(sdev));
1316
1317 put_device(dev);
1318 }
1319
1320 /**
1321 * scsi_remove_device - unregister a device from the scsi bus
1322 * @sdev: scsi_device to unregister
1323 **/
1324 void scsi_remove_device(struct scsi_device *sdev)
1325 {
1326 struct Scsi_Host *shost = sdev->host;
1327
1328 mutex_lock(&shost->scan_mutex);
1329 __scsi_remove_device(sdev);
1330 mutex_unlock(&shost->scan_mutex);
1331 }
1332 EXPORT_SYMBOL(scsi_remove_device);
1333
1334 static void __scsi_remove_target(struct scsi_target *starget)
1335 {
1336 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1337 unsigned long flags;
1338 struct scsi_device *sdev;
1339
1340 spin_lock_irqsave(shost->host_lock, flags);
1341 restart:
1342 list_for_each_entry(sdev, &shost->__devices, siblings) {
1343 if (sdev->channel != starget->channel ||
1344 sdev->id != starget->id ||
1345 scsi_device_get(sdev))
1346 continue;
1347 spin_unlock_irqrestore(shost->host_lock, flags);
1348 scsi_remove_device(sdev);
1349 scsi_device_put(sdev);
1350 spin_lock_irqsave(shost->host_lock, flags);
1351 goto restart;
1352 }
1353 spin_unlock_irqrestore(shost->host_lock, flags);
1354 }
1355
1356 /**
1357 * scsi_remove_target - try to remove a target and all its devices
1358 * @dev: generic starget or parent of generic stargets to be removed
1359 *
1360 * Note: This is slightly racy. It is possible that if the user
1361 * requests the addition of another device then the target won't be
1362 * removed.
1363 */
1364 void scsi_remove_target(struct device *dev)
1365 {
1366 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1367 struct scsi_target *starget, *last_target = NULL;
1368 unsigned long flags;
1369
1370 restart:
1371 spin_lock_irqsave(shost->host_lock, flags);
1372 list_for_each_entry(starget, &shost->__targets, siblings) {
1373 if (starget->state == STARGET_DEL ||
1374 starget == last_target)
1375 continue;
1376 if (starget->dev.parent == dev || &starget->dev == dev) {
1377 kref_get(&starget->reap_ref);
1378 last_target = starget;
1379 spin_unlock_irqrestore(shost->host_lock, flags);
1380 __scsi_remove_target(starget);
1381 scsi_target_reap(starget);
1382 goto restart;
1383 }
1384 }
1385 spin_unlock_irqrestore(shost->host_lock, flags);
1386 }
1387 EXPORT_SYMBOL(scsi_remove_target);
1388
1389 int scsi_register_driver(struct device_driver *drv)
1390 {
1391 drv->bus = &scsi_bus_type;
1392
1393 return driver_register(drv);
1394 }
1395 EXPORT_SYMBOL(scsi_register_driver);
1396
1397 int scsi_register_interface(struct class_interface *intf)
1398 {
1399 intf->class = &sdev_class;
1400
1401 return class_interface_register(intf);
1402 }
1403 EXPORT_SYMBOL(scsi_register_interface);
1404
1405 /**
1406 * scsi_sysfs_add_host - add scsi host to subsystem
1407 * @shost: scsi host struct to add to subsystem
1408 **/
1409 int scsi_sysfs_add_host(struct Scsi_Host *shost)
1410 {
1411 int error, i;
1412
1413 /* add host specific attributes */
1414 if (shost->hostt->shost_attrs) {
1415 for (i = 0; shost->hostt->shost_attrs[i]; i++) {
1416 error = device_create_file(&shost->shost_dev,
1417 shost->hostt->shost_attrs[i]);
1418 if (error)
1419 return error;
1420 }
1421 }
1422
1423 transport_register_device(&shost->shost_gendev);
1424 transport_configure_device(&shost->shost_gendev);
1425 return 0;
1426 }
1427
1428 static struct device_type scsi_dev_type = {
1429 .name = "scsi_device",
1430 .release = scsi_device_dev_release,
1431 .groups = scsi_sdev_attr_groups,
1432 };
1433
1434 void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1435 {
1436 unsigned long flags;
1437 struct Scsi_Host *shost = sdev->host;
1438 struct scsi_target *starget = sdev->sdev_target;
1439
1440 device_initialize(&sdev->sdev_gendev);
1441 sdev->sdev_gendev.bus = &scsi_bus_type;
1442 sdev->sdev_gendev.type = &scsi_dev_type;
1443 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
1444 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1445
1446 device_initialize(&sdev->sdev_dev);
1447 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
1448 sdev->sdev_dev.class = &sdev_class;
1449 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu",
1450 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1451 /*
1452 * Get a default scsi_level from the target (derived from sibling
1453 * devices). This is the best we can do for guessing how to set
1454 * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the
1455 * setting doesn't matter, because all the bits are zero anyway.
1456 * But it does matter for higher LUNs.
1457 */
1458 sdev->scsi_level = starget->scsi_level;
1459 if (sdev->scsi_level <= SCSI_2 &&
1460 sdev->scsi_level != SCSI_UNKNOWN &&
1461 !shost->no_scsi2_lun_in_cdb)
1462 sdev->lun_in_cdb = 1;
1463
1464 transport_setup_device(&sdev->sdev_gendev);
1465 spin_lock_irqsave(shost->host_lock, flags);
1466 list_add_tail(&sdev->same_target_siblings, &starget->devices);
1467 list_add_tail(&sdev->siblings, &shost->__devices);
1468 spin_unlock_irqrestore(shost->host_lock, flags);
1469 /*
1470 * device can now only be removed via __scsi_remove_device() so hold
1471 * the target. Target will be held in CREATED state until something
1472 * beneath it becomes visible (in which case it moves to RUNNING)
1473 */
1474 kref_get(&starget->reap_ref);
1475 }
1476
1477 int scsi_is_sdev_device(const struct device *dev)
1478 {
1479 return dev->type == &scsi_dev_type;
1480 }
1481 EXPORT_SYMBOL(scsi_is_sdev_device);
1482
1483 /* A blank transport template that is used in drivers that don't
1484 * yet implement Transport Attributes */
1485 struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, };
This page took 0.076559 seconds and 5 git commands to generate.