[S390] cio: consolidate workqueues
[deliverable/linux.git] / drivers / s390 / cio / device.c
1 /*
2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices
4 *
5 * Copyright IBM Corp. 2002,2008
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */
10
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/device.h>
22 #include <linux/workqueue.h>
23 #include <linux/timer.h>
24
25 #include <asm/ccwdev.h>
26 #include <asm/cio.h>
27 #include <asm/param.h> /* HZ */
28 #include <asm/cmb.h>
29 #include <asm/isc.h>
30
31 #include "chp.h"
32 #include "cio.h"
33 #include "cio_debug.h"
34 #include "css.h"
35 #include "device.h"
36 #include "ioasm.h"
37 #include "io_sch.h"
38 #include "blacklist.h"
39
40 static struct timer_list recovery_timer;
41 static DEFINE_SPINLOCK(recovery_lock);
42 static int recovery_phase;
43 static const unsigned long recovery_delay[] = { 3, 30, 300 };
44
45 /******************* bus type handling ***********************/
46
47 /* The Linux driver model distinguishes between a bus type and
48 * the bus itself. Of course we only have one channel
49 * subsystem driver and one channel system per machine, but
50 * we still use the abstraction. T.R. says it's a good idea. */
51 static int
52 ccw_bus_match (struct device * dev, struct device_driver * drv)
53 {
54 struct ccw_device *cdev = to_ccwdev(dev);
55 struct ccw_driver *cdrv = to_ccwdrv(drv);
56 const struct ccw_device_id *ids = cdrv->ids, *found;
57
58 if (!ids)
59 return 0;
60
61 found = ccw_device_id_match(ids, &cdev->id);
62 if (!found)
63 return 0;
64
65 cdev->id.driver_info = found->driver_info;
66
67 return 1;
68 }
69
70 /* Store modalias string delimited by prefix/suffix string into buffer with
71 * specified size. Return length of resulting string (excluding trailing '\0')
72 * even if string doesn't fit buffer (snprintf semantics). */
73 static int snprint_alias(char *buf, size_t size,
74 struct ccw_device_id *id, const char *suffix)
75 {
76 int len;
77
78 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
79 if (len > size)
80 return len;
81 buf += len;
82 size -= len;
83
84 if (id->dev_type != 0)
85 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
86 id->dev_model, suffix);
87 else
88 len += snprintf(buf, size, "dtdm%s", suffix);
89
90 return len;
91 }
92
93 /* Set up environment variables for ccw device uevent. Return 0 on success,
94 * non-zero otherwise. */
95 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
96 {
97 struct ccw_device *cdev = to_ccwdev(dev);
98 struct ccw_device_id *id = &(cdev->id);
99 int ret;
100 char modalias_buf[30];
101
102 /* CU_TYPE= */
103 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
104 if (ret)
105 return ret;
106
107 /* CU_MODEL= */
108 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
109 if (ret)
110 return ret;
111
112 /* The next two can be zero, that's ok for us */
113 /* DEV_TYPE= */
114 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
115 if (ret)
116 return ret;
117
118 /* DEV_MODEL= */
119 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
120 if (ret)
121 return ret;
122
123 /* MODALIAS= */
124 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
125 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
126 return ret;
127 }
128
129 struct bus_type ccw_bus_type;
130
131 static void io_subchannel_irq(struct subchannel *);
132 static int io_subchannel_probe(struct subchannel *);
133 static int io_subchannel_remove(struct subchannel *);
134 static void io_subchannel_shutdown(struct subchannel *);
135 static int io_subchannel_sch_event(struct subchannel *, int);
136 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
137 int);
138 static void recovery_func(unsigned long data);
139 wait_queue_head_t ccw_device_init_wq;
140 atomic_t ccw_device_init_count;
141
142 static struct css_device_id io_subchannel_ids[] = {
143 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
144 { /* end of list */ },
145 };
146 MODULE_DEVICE_TABLE(css, io_subchannel_ids);
147
148 static int io_subchannel_prepare(struct subchannel *sch)
149 {
150 struct ccw_device *cdev;
151 /*
152 * Don't allow suspend while a ccw device registration
153 * is still outstanding.
154 */
155 cdev = sch_get_cdev(sch);
156 if (cdev && !device_is_registered(&cdev->dev))
157 return -EAGAIN;
158 return 0;
159 }
160
161 static void io_subchannel_settle(void)
162 {
163 wait_event(ccw_device_init_wq,
164 atomic_read(&ccw_device_init_count) == 0);
165 flush_workqueue(cio_work_q);
166 }
167
168 static struct css_driver io_subchannel_driver = {
169 .owner = THIS_MODULE,
170 .subchannel_type = io_subchannel_ids,
171 .name = "io_subchannel",
172 .irq = io_subchannel_irq,
173 .sch_event = io_subchannel_sch_event,
174 .chp_event = io_subchannel_chp_event,
175 .probe = io_subchannel_probe,
176 .remove = io_subchannel_remove,
177 .shutdown = io_subchannel_shutdown,
178 .prepare = io_subchannel_prepare,
179 .settle = io_subchannel_settle,
180 };
181
182 int __init io_subchannel_init(void)
183 {
184 int ret;
185
186 init_waitqueue_head(&ccw_device_init_wq);
187 atomic_set(&ccw_device_init_count, 0);
188 setup_timer(&recovery_timer, recovery_func, 0);
189
190 ret = bus_register(&ccw_bus_type);
191 if (ret)
192 return ret;
193 ret = css_driver_register(&io_subchannel_driver);
194 if (ret)
195 bus_unregister(&ccw_bus_type);
196
197 return ret;
198 }
199
200
201 /************************ device handling **************************/
202
203 /*
204 * A ccw_device has some interfaces in sysfs in addition to the
205 * standard ones.
206 * The following entries are designed to export the information which
207 * resided in 2.4 in /proc/subchannels. Subchannel and device number
208 * are obvious, so they don't have an entry :)
209 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
210 */
211 static ssize_t
212 chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
213 {
214 struct subchannel *sch = to_subchannel(dev);
215 struct chsc_ssd_info *ssd = &sch->ssd_info;
216 ssize_t ret = 0;
217 int chp;
218 int mask;
219
220 for (chp = 0; chp < 8; chp++) {
221 mask = 0x80 >> chp;
222 if (ssd->path_mask & mask)
223 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
224 else
225 ret += sprintf(buf + ret, "00 ");
226 }
227 ret += sprintf (buf+ret, "\n");
228 return min((ssize_t)PAGE_SIZE, ret);
229 }
230
231 static ssize_t
232 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
233 {
234 struct subchannel *sch = to_subchannel(dev);
235 struct pmcw *pmcw = &sch->schib.pmcw;
236
237 return sprintf (buf, "%02x %02x %02x\n",
238 pmcw->pim, pmcw->pam, pmcw->pom);
239 }
240
241 static ssize_t
242 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
243 {
244 struct ccw_device *cdev = to_ccwdev(dev);
245 struct ccw_device_id *id = &(cdev->id);
246
247 if (id->dev_type != 0)
248 return sprintf(buf, "%04x/%02x\n",
249 id->dev_type, id->dev_model);
250 else
251 return sprintf(buf, "n/a\n");
252 }
253
254 static ssize_t
255 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
256 {
257 struct ccw_device *cdev = to_ccwdev(dev);
258 struct ccw_device_id *id = &(cdev->id);
259
260 return sprintf(buf, "%04x/%02x\n",
261 id->cu_type, id->cu_model);
262 }
263
264 static ssize_t
265 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
266 {
267 struct ccw_device *cdev = to_ccwdev(dev);
268 struct ccw_device_id *id = &(cdev->id);
269 int len;
270
271 len = snprint_alias(buf, PAGE_SIZE, id, "\n");
272
273 return len > PAGE_SIZE ? PAGE_SIZE : len;
274 }
275
276 static ssize_t
277 online_show (struct device *dev, struct device_attribute *attr, char *buf)
278 {
279 struct ccw_device *cdev = to_ccwdev(dev);
280
281 return sprintf(buf, cdev->online ? "1\n" : "0\n");
282 }
283
284 int ccw_device_is_orphan(struct ccw_device *cdev)
285 {
286 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
287 }
288
289 static void ccw_device_unregister(struct ccw_device *cdev)
290 {
291 if (device_is_registered(&cdev->dev)) {
292 /* Undo device_add(). */
293 device_del(&cdev->dev);
294 }
295 if (cdev->private->flags.initialized) {
296 cdev->private->flags.initialized = 0;
297 /* Release reference from device_initialize(). */
298 put_device(&cdev->dev);
299 }
300 }
301
302 static void io_subchannel_quiesce(struct subchannel *);
303
304 /**
305 * ccw_device_set_offline() - disable a ccw device for I/O
306 * @cdev: target ccw device
307 *
308 * This function calls the driver's set_offline() function for @cdev, if
309 * given, and then disables @cdev.
310 * Returns:
311 * %0 on success and a negative error value on failure.
312 * Context:
313 * enabled, ccw device lock not held
314 */
315 int ccw_device_set_offline(struct ccw_device *cdev)
316 {
317 struct subchannel *sch;
318 int ret, state;
319
320 if (!cdev)
321 return -ENODEV;
322 if (!cdev->online || !cdev->drv)
323 return -EINVAL;
324
325 if (cdev->drv->set_offline) {
326 ret = cdev->drv->set_offline(cdev);
327 if (ret != 0)
328 return ret;
329 }
330 cdev->online = 0;
331 spin_lock_irq(cdev->ccwlock);
332 sch = to_subchannel(cdev->dev.parent);
333 /* Wait until a final state or DISCONNECTED is reached */
334 while (!dev_fsm_final_state(cdev) &&
335 cdev->private->state != DEV_STATE_DISCONNECTED) {
336 spin_unlock_irq(cdev->ccwlock);
337 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
338 cdev->private->state == DEV_STATE_DISCONNECTED));
339 spin_lock_irq(cdev->ccwlock);
340 }
341 do {
342 ret = ccw_device_offline(cdev);
343 if (!ret)
344 break;
345 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
346 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
347 cdev->private->dev_id.devno);
348 if (ret != -EBUSY)
349 goto error;
350 state = cdev->private->state;
351 spin_unlock_irq(cdev->ccwlock);
352 io_subchannel_quiesce(sch);
353 spin_lock_irq(cdev->ccwlock);
354 cdev->private->state = state;
355 } while (ret == -EBUSY);
356 spin_unlock_irq(cdev->ccwlock);
357 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
358 cdev->private->state == DEV_STATE_DISCONNECTED));
359 /* Inform the user if set offline failed. */
360 if (cdev->private->state == DEV_STATE_BOXED) {
361 pr_warning("%s: The device entered boxed state while "
362 "being set offline\n", dev_name(&cdev->dev));
363 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
364 pr_warning("%s: The device stopped operating while "
365 "being set offline\n", dev_name(&cdev->dev));
366 }
367 /* Give up reference from ccw_device_set_online(). */
368 put_device(&cdev->dev);
369 return 0;
370
371 error:
372 cdev->private->state = DEV_STATE_OFFLINE;
373 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
374 spin_unlock_irq(cdev->ccwlock);
375 /* Give up reference from ccw_device_set_online(). */
376 put_device(&cdev->dev);
377 return -ENODEV;
378 }
379
380 /**
381 * ccw_device_set_online() - enable a ccw device for I/O
382 * @cdev: target ccw device
383 *
384 * This function first enables @cdev and then calls the driver's set_online()
385 * function for @cdev, if given. If set_online() returns an error, @cdev is
386 * disabled again.
387 * Returns:
388 * %0 on success and a negative error value on failure.
389 * Context:
390 * enabled, ccw device lock not held
391 */
392 int ccw_device_set_online(struct ccw_device *cdev)
393 {
394 int ret;
395 int ret2;
396
397 if (!cdev)
398 return -ENODEV;
399 if (cdev->online || !cdev->drv)
400 return -EINVAL;
401 /* Hold on to an extra reference while device is online. */
402 if (!get_device(&cdev->dev))
403 return -ENODEV;
404
405 spin_lock_irq(cdev->ccwlock);
406 ret = ccw_device_online(cdev);
407 spin_unlock_irq(cdev->ccwlock);
408 if (ret == 0)
409 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
410 else {
411 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
412 "device 0.%x.%04x\n",
413 ret, cdev->private->dev_id.ssid,
414 cdev->private->dev_id.devno);
415 /* Give up online reference since onlining failed. */
416 put_device(&cdev->dev);
417 return ret;
418 }
419 spin_lock_irq(cdev->ccwlock);
420 /* Check if online processing was successful */
421 if ((cdev->private->state != DEV_STATE_ONLINE) &&
422 (cdev->private->state != DEV_STATE_W4SENSE)) {
423 spin_unlock_irq(cdev->ccwlock);
424 /* Inform the user that set online failed. */
425 if (cdev->private->state == DEV_STATE_BOXED) {
426 pr_warning("%s: Setting the device online failed "
427 "because it is boxed\n",
428 dev_name(&cdev->dev));
429 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
430 pr_warning("%s: Setting the device online failed "
431 "because it is not operational\n",
432 dev_name(&cdev->dev));
433 }
434 /* Give up online reference since onlining failed. */
435 put_device(&cdev->dev);
436 return -ENODEV;
437 }
438 spin_unlock_irq(cdev->ccwlock);
439 if (cdev->drv->set_online)
440 ret = cdev->drv->set_online(cdev);
441 if (ret)
442 goto rollback;
443 cdev->online = 1;
444 return 0;
445
446 rollback:
447 spin_lock_irq(cdev->ccwlock);
448 /* Wait until a final state or DISCONNECTED is reached */
449 while (!dev_fsm_final_state(cdev) &&
450 cdev->private->state != DEV_STATE_DISCONNECTED) {
451 spin_unlock_irq(cdev->ccwlock);
452 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
453 cdev->private->state == DEV_STATE_DISCONNECTED));
454 spin_lock_irq(cdev->ccwlock);
455 }
456 ret2 = ccw_device_offline(cdev);
457 if (ret2)
458 goto error;
459 spin_unlock_irq(cdev->ccwlock);
460 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
461 cdev->private->state == DEV_STATE_DISCONNECTED));
462 /* Give up online reference since onlining failed. */
463 put_device(&cdev->dev);
464 return ret;
465
466 error:
467 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
468 "device 0.%x.%04x\n",
469 ret2, cdev->private->dev_id.ssid,
470 cdev->private->dev_id.devno);
471 cdev->private->state = DEV_STATE_OFFLINE;
472 spin_unlock_irq(cdev->ccwlock);
473 /* Give up online reference since onlining failed. */
474 put_device(&cdev->dev);
475 return ret;
476 }
477
478 static int online_store_handle_offline(struct ccw_device *cdev)
479 {
480 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
481 spin_lock_irq(cdev->ccwlock);
482 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
483 spin_unlock_irq(cdev->ccwlock);
484 } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
485 return ccw_device_set_offline(cdev);
486 return 0;
487 }
488
489 static int online_store_recog_and_online(struct ccw_device *cdev)
490 {
491 /* Do device recognition, if needed. */
492 if (cdev->private->state == DEV_STATE_BOXED) {
493 spin_lock_irq(cdev->ccwlock);
494 ccw_device_recognition(cdev);
495 spin_unlock_irq(cdev->ccwlock);
496 wait_event(cdev->private->wait_q,
497 cdev->private->flags.recog_done);
498 if (cdev->private->state != DEV_STATE_OFFLINE)
499 /* recognition failed */
500 return -EAGAIN;
501 }
502 if (cdev->drv && cdev->drv->set_online)
503 ccw_device_set_online(cdev);
504 return 0;
505 }
506
507 static int online_store_handle_online(struct ccw_device *cdev, int force)
508 {
509 int ret;
510
511 ret = online_store_recog_and_online(cdev);
512 if (ret && !force)
513 return ret;
514 if (force && cdev->private->state == DEV_STATE_BOXED) {
515 ret = ccw_device_stlck(cdev);
516 if (ret)
517 return ret;
518 if (cdev->id.cu_type == 0)
519 cdev->private->state = DEV_STATE_NOT_OPER;
520 ret = online_store_recog_and_online(cdev);
521 if (ret)
522 return ret;
523 }
524 return 0;
525 }
526
527 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
528 const char *buf, size_t count)
529 {
530 struct ccw_device *cdev = to_ccwdev(dev);
531 int force, ret;
532 unsigned long i;
533
534 if (!dev_fsm_final_state(cdev) &&
535 cdev->private->state != DEV_STATE_DISCONNECTED)
536 return -EAGAIN;
537 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
538 return -EAGAIN;
539
540 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
541 atomic_set(&cdev->private->onoff, 0);
542 return -EINVAL;
543 }
544 if (!strncmp(buf, "force\n", count)) {
545 force = 1;
546 i = 1;
547 ret = 0;
548 } else {
549 force = 0;
550 ret = strict_strtoul(buf, 16, &i);
551 }
552 if (ret)
553 goto out;
554 switch (i) {
555 case 0:
556 ret = online_store_handle_offline(cdev);
557 break;
558 case 1:
559 ret = online_store_handle_online(cdev, force);
560 break;
561 default:
562 ret = -EINVAL;
563 }
564 out:
565 if (cdev->drv)
566 module_put(cdev->drv->owner);
567 atomic_set(&cdev->private->onoff, 0);
568 return (ret < 0) ? ret : count;
569 }
570
571 static ssize_t
572 available_show (struct device *dev, struct device_attribute *attr, char *buf)
573 {
574 struct ccw_device *cdev = to_ccwdev(dev);
575 struct subchannel *sch;
576
577 if (ccw_device_is_orphan(cdev))
578 return sprintf(buf, "no device\n");
579 switch (cdev->private->state) {
580 case DEV_STATE_BOXED:
581 return sprintf(buf, "boxed\n");
582 case DEV_STATE_DISCONNECTED:
583 case DEV_STATE_DISCONNECTED_SENSE_ID:
584 case DEV_STATE_NOT_OPER:
585 sch = to_subchannel(dev->parent);
586 if (!sch->lpm)
587 return sprintf(buf, "no path\n");
588 else
589 return sprintf(buf, "no device\n");
590 default:
591 /* All other states considered fine. */
592 return sprintf(buf, "good\n");
593 }
594 }
595
596 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
597 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
598 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
599 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
600 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
601 static DEVICE_ATTR(online, 0644, online_show, online_store);
602 static DEVICE_ATTR(availability, 0444, available_show, NULL);
603
604 static struct attribute *io_subchannel_attrs[] = {
605 &dev_attr_chpids.attr,
606 &dev_attr_pimpampom.attr,
607 NULL,
608 };
609
610 static struct attribute_group io_subchannel_attr_group = {
611 .attrs = io_subchannel_attrs,
612 };
613
614 static struct attribute * ccwdev_attrs[] = {
615 &dev_attr_devtype.attr,
616 &dev_attr_cutype.attr,
617 &dev_attr_modalias.attr,
618 &dev_attr_online.attr,
619 &dev_attr_cmb_enable.attr,
620 &dev_attr_availability.attr,
621 NULL,
622 };
623
624 static struct attribute_group ccwdev_attr_group = {
625 .attrs = ccwdev_attrs,
626 };
627
628 static const struct attribute_group *ccwdev_attr_groups[] = {
629 &ccwdev_attr_group,
630 NULL,
631 };
632
633 /* this is a simple abstraction for device_register that sets the
634 * correct bus type and adds the bus specific files */
635 static int ccw_device_register(struct ccw_device *cdev)
636 {
637 struct device *dev = &cdev->dev;
638 int ret;
639
640 dev->bus = &ccw_bus_type;
641 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
642 cdev->private->dev_id.devno);
643 if (ret)
644 return ret;
645 return device_add(dev);
646 }
647
648 static int match_dev_id(struct device *dev, void *data)
649 {
650 struct ccw_device *cdev = to_ccwdev(dev);
651 struct ccw_dev_id *dev_id = data;
652
653 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
654 }
655
656 static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
657 {
658 struct device *dev;
659
660 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
661
662 return dev ? to_ccwdev(dev) : NULL;
663 }
664
665 static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
666 {
667 int ret;
668
669 if (device_is_registered(&cdev->dev)) {
670 device_release_driver(&cdev->dev);
671 ret = device_attach(&cdev->dev);
672 WARN_ON(ret == -ENODEV);
673 }
674 }
675
676 static void
677 ccw_device_release(struct device *dev)
678 {
679 struct ccw_device *cdev;
680
681 cdev = to_ccwdev(dev);
682 /* Release reference of parent subchannel. */
683 put_device(cdev->dev.parent);
684 kfree(cdev->private);
685 kfree(cdev);
686 }
687
688 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
689 {
690 struct ccw_device *cdev;
691
692 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
693 if (cdev) {
694 cdev->private = kzalloc(sizeof(struct ccw_device_private),
695 GFP_KERNEL | GFP_DMA);
696 if (cdev->private)
697 return cdev;
698 }
699 kfree(cdev);
700 return ERR_PTR(-ENOMEM);
701 }
702
703 static void ccw_device_todo(struct work_struct *work);
704
705 static int io_subchannel_initialize_dev(struct subchannel *sch,
706 struct ccw_device *cdev)
707 {
708 cdev->private->cdev = cdev;
709 atomic_set(&cdev->private->onoff, 0);
710 cdev->dev.parent = &sch->dev;
711 cdev->dev.release = ccw_device_release;
712 INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
713 cdev->dev.groups = ccwdev_attr_groups;
714 /* Do first half of device_register. */
715 device_initialize(&cdev->dev);
716 if (!get_device(&sch->dev)) {
717 /* Release reference from device_initialize(). */
718 put_device(&cdev->dev);
719 return -ENODEV;
720 }
721 cdev->private->flags.initialized = 1;
722 return 0;
723 }
724
725 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
726 {
727 struct ccw_device *cdev;
728 int ret;
729
730 cdev = io_subchannel_allocate_dev(sch);
731 if (!IS_ERR(cdev)) {
732 ret = io_subchannel_initialize_dev(sch, cdev);
733 if (ret)
734 cdev = ERR_PTR(ret);
735 }
736 return cdev;
737 }
738
739 static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
740
741 static void sch_create_and_recog_new_device(struct subchannel *sch)
742 {
743 struct ccw_device *cdev;
744
745 /* Need to allocate a new ccw device. */
746 cdev = io_subchannel_create_ccwdev(sch);
747 if (IS_ERR(cdev)) {
748 /* OK, we did everything we could... */
749 css_sch_device_unregister(sch);
750 return;
751 }
752 /* Start recognition for the new ccw device. */
753 io_subchannel_recog(cdev, sch);
754 }
755
756 /*
757 * Register recognized device.
758 */
759 static void io_subchannel_register(struct ccw_device *cdev)
760 {
761 struct subchannel *sch;
762 int ret;
763 unsigned long flags;
764
765 sch = to_subchannel(cdev->dev.parent);
766 /*
767 * Check if subchannel is still registered. It may have become
768 * unregistered if a machine check hit us after finishing
769 * device recognition but before the register work could be
770 * queued.
771 */
772 if (!device_is_registered(&sch->dev))
773 goto out_err;
774 css_update_ssd_info(sch);
775 /*
776 * io_subchannel_register() will also be called after device
777 * recognition has been done for a boxed device (which will already
778 * be registered). We need to reprobe since we may now have sense id
779 * information.
780 */
781 if (device_is_registered(&cdev->dev)) {
782 if (!cdev->drv) {
783 ret = device_reprobe(&cdev->dev);
784 if (ret)
785 /* We can't do much here. */
786 CIO_MSG_EVENT(0, "device_reprobe() returned"
787 " %d for 0.%x.%04x\n", ret,
788 cdev->private->dev_id.ssid,
789 cdev->private->dev_id.devno);
790 }
791 goto out;
792 }
793 /*
794 * Now we know this subchannel will stay, we can throw
795 * our delayed uevent.
796 */
797 dev_set_uevent_suppress(&sch->dev, 0);
798 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
799 /* make it known to the system */
800 ret = ccw_device_register(cdev);
801 if (ret) {
802 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
803 cdev->private->dev_id.ssid,
804 cdev->private->dev_id.devno, ret);
805 spin_lock_irqsave(sch->lock, flags);
806 sch_set_cdev(sch, NULL);
807 spin_unlock_irqrestore(sch->lock, flags);
808 /* Release initial device reference. */
809 put_device(&cdev->dev);
810 goto out_err;
811 }
812 out:
813 cdev->private->flags.recog_done = 1;
814 wake_up(&cdev->private->wait_q);
815 out_err:
816 if (atomic_dec_and_test(&ccw_device_init_count))
817 wake_up(&ccw_device_init_wq);
818 }
819
820 static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
821 {
822 struct subchannel *sch;
823
824 /* Get subchannel reference for local processing. */
825 if (!get_device(cdev->dev.parent))
826 return;
827 sch = to_subchannel(cdev->dev.parent);
828 css_sch_device_unregister(sch);
829 /* Release subchannel reference for local processing. */
830 put_device(&sch->dev);
831 }
832
833 /*
834 * subchannel recognition done. Called from the state machine.
835 */
836 void
837 io_subchannel_recog_done(struct ccw_device *cdev)
838 {
839 if (css_init_done == 0) {
840 cdev->private->flags.recog_done = 1;
841 return;
842 }
843 switch (cdev->private->state) {
844 case DEV_STATE_BOXED:
845 /* Device did not respond in time. */
846 case DEV_STATE_NOT_OPER:
847 cdev->private->flags.recog_done = 1;
848 /* Remove device found not operational. */
849 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
850 if (atomic_dec_and_test(&ccw_device_init_count))
851 wake_up(&ccw_device_init_wq);
852 break;
853 case DEV_STATE_OFFLINE:
854 /*
855 * We can't register the device in interrupt context so
856 * we schedule a work item.
857 */
858 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
859 break;
860 }
861 }
862
863 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
864 {
865 struct ccw_device_private *priv;
866
867 cdev->ccwlock = sch->lock;
868
869 /* Init private data. */
870 priv = cdev->private;
871 priv->dev_id.devno = sch->schib.pmcw.dev;
872 priv->dev_id.ssid = sch->schid.ssid;
873 priv->schid = sch->schid;
874 priv->state = DEV_STATE_NOT_OPER;
875 INIT_LIST_HEAD(&priv->cmb_list);
876 init_waitqueue_head(&priv->wait_q);
877 init_timer(&priv->timer);
878
879 /* Increase counter of devices currently in recognition. */
880 atomic_inc(&ccw_device_init_count);
881
882 /* Start async. device sensing. */
883 spin_lock_irq(sch->lock);
884 sch_set_cdev(sch, cdev);
885 ccw_device_recognition(cdev);
886 spin_unlock_irq(sch->lock);
887 }
888
889 static int ccw_device_move_to_sch(struct ccw_device *cdev,
890 struct subchannel *sch)
891 {
892 struct subchannel *old_sch;
893 int rc, old_enabled = 0;
894
895 old_sch = to_subchannel(cdev->dev.parent);
896 /* Obtain child reference for new parent. */
897 if (!get_device(&sch->dev))
898 return -ENODEV;
899
900 if (!sch_is_pseudo_sch(old_sch)) {
901 spin_lock_irq(old_sch->lock);
902 old_enabled = old_sch->schib.pmcw.ena;
903 rc = 0;
904 if (old_enabled)
905 rc = cio_disable_subchannel(old_sch);
906 spin_unlock_irq(old_sch->lock);
907 if (rc == -EBUSY) {
908 /* Release child reference for new parent. */
909 put_device(&sch->dev);
910 return rc;
911 }
912 }
913
914 mutex_lock(&sch->reg_mutex);
915 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
916 mutex_unlock(&sch->reg_mutex);
917 if (rc) {
918 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
919 cdev->private->dev_id.ssid,
920 cdev->private->dev_id.devno, sch->schid.ssid,
921 sch->schib.pmcw.dev, rc);
922 if (old_enabled) {
923 /* Try to reenable the old subchannel. */
924 spin_lock_irq(old_sch->lock);
925 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
926 spin_unlock_irq(old_sch->lock);
927 }
928 /* Release child reference for new parent. */
929 put_device(&sch->dev);
930 return rc;
931 }
932 /* Clean up old subchannel. */
933 if (!sch_is_pseudo_sch(old_sch)) {
934 spin_lock_irq(old_sch->lock);
935 sch_set_cdev(old_sch, NULL);
936 spin_unlock_irq(old_sch->lock);
937 css_schedule_eval(old_sch->schid);
938 }
939 /* Release child reference for old parent. */
940 put_device(&old_sch->dev);
941 /* Initialize new subchannel. */
942 spin_lock_irq(sch->lock);
943 cdev->private->schid = sch->schid;
944 cdev->ccwlock = sch->lock;
945 if (!sch_is_pseudo_sch(sch))
946 sch_set_cdev(sch, cdev);
947 spin_unlock_irq(sch->lock);
948 if (!sch_is_pseudo_sch(sch))
949 css_update_ssd_info(sch);
950 return 0;
951 }
952
953 static int ccw_device_move_to_orph(struct ccw_device *cdev)
954 {
955 struct subchannel *sch = to_subchannel(cdev->dev.parent);
956 struct channel_subsystem *css = to_css(sch->dev.parent);
957
958 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
959 }
960
961 static void io_subchannel_irq(struct subchannel *sch)
962 {
963 struct ccw_device *cdev;
964
965 cdev = sch_get_cdev(sch);
966
967 CIO_TRACE_EVENT(6, "IRQ");
968 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
969 if (cdev)
970 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
971 }
972
973 void io_subchannel_init_config(struct subchannel *sch)
974 {
975 memset(&sch->config, 0, sizeof(sch->config));
976 sch->config.csense = 1;
977 }
978
979 static void io_subchannel_init_fields(struct subchannel *sch)
980 {
981 if (cio_is_console(sch->schid))
982 sch->opm = 0xff;
983 else
984 sch->opm = chp_get_sch_opm(sch);
985 sch->lpm = sch->schib.pmcw.pam & sch->opm;
986 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
987
988 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
989 " - PIM = %02X, PAM = %02X, POM = %02X\n",
990 sch->schib.pmcw.dev, sch->schid.ssid,
991 sch->schid.sch_no, sch->schib.pmcw.pim,
992 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
993
994 io_subchannel_init_config(sch);
995 }
996
997 /*
998 * Note: We always return 0 so that we bind to the device even on error.
999 * This is needed so that our remove function is called on unregister.
1000 */
1001 static int io_subchannel_probe(struct subchannel *sch)
1002 {
1003 struct ccw_device *cdev;
1004 int rc;
1005
1006 if (cio_is_console(sch->schid)) {
1007 rc = sysfs_create_group(&sch->dev.kobj,
1008 &io_subchannel_attr_group);
1009 if (rc)
1010 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1011 "attributes for subchannel "
1012 "0.%x.%04x (rc=%d)\n",
1013 sch->schid.ssid, sch->schid.sch_no, rc);
1014 /*
1015 * The console subchannel already has an associated ccw_device.
1016 * Throw the delayed uevent for the subchannel, register
1017 * the ccw_device and exit.
1018 */
1019 dev_set_uevent_suppress(&sch->dev, 0);
1020 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1021 cdev = sch_get_cdev(sch);
1022 cdev->dev.groups = ccwdev_attr_groups;
1023 device_initialize(&cdev->dev);
1024 cdev->private->flags.initialized = 1;
1025 ccw_device_register(cdev);
1026 /*
1027 * Check if the device is already online. If it is
1028 * the reference count needs to be corrected since we
1029 * didn't obtain a reference in ccw_device_set_online.
1030 */
1031 if (cdev->private->state != DEV_STATE_NOT_OPER &&
1032 cdev->private->state != DEV_STATE_OFFLINE &&
1033 cdev->private->state != DEV_STATE_BOXED)
1034 get_device(&cdev->dev);
1035 return 0;
1036 }
1037 io_subchannel_init_fields(sch);
1038 rc = cio_commit_config(sch);
1039 if (rc)
1040 goto out_schedule;
1041 rc = sysfs_create_group(&sch->dev.kobj,
1042 &io_subchannel_attr_group);
1043 if (rc)
1044 goto out_schedule;
1045 /* Allocate I/O subchannel private data. */
1046 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1047 GFP_KERNEL | GFP_DMA);
1048 if (!sch->private)
1049 goto out_schedule;
1050 css_schedule_eval(sch->schid);
1051 return 0;
1052
1053 out_schedule:
1054 spin_lock_irq(sch->lock);
1055 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1056 spin_unlock_irq(sch->lock);
1057 return 0;
1058 }
1059
1060 static int
1061 io_subchannel_remove (struct subchannel *sch)
1062 {
1063 struct ccw_device *cdev;
1064
1065 cdev = sch_get_cdev(sch);
1066 if (!cdev)
1067 goto out_free;
1068 io_subchannel_quiesce(sch);
1069 /* Set ccw device to not operational and drop reference. */
1070 spin_lock_irq(cdev->ccwlock);
1071 sch_set_cdev(sch, NULL);
1072 cdev->private->state = DEV_STATE_NOT_OPER;
1073 spin_unlock_irq(cdev->ccwlock);
1074 ccw_device_unregister(cdev);
1075 out_free:
1076 kfree(sch->private);
1077 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1078 return 0;
1079 }
1080
1081 static void io_subchannel_verify(struct subchannel *sch)
1082 {
1083 struct ccw_device *cdev;
1084
1085 cdev = sch_get_cdev(sch);
1086 if (cdev)
1087 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1088 }
1089
1090 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1091 {
1092 struct ccw_device *cdev;
1093
1094 cdev = sch_get_cdev(sch);
1095 if (!cdev)
1096 return;
1097 if (cio_update_schib(sch))
1098 goto err;
1099 /* Check for I/O on path. */
1100 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1101 goto out;
1102 if (cdev->private->state == DEV_STATE_ONLINE) {
1103 ccw_device_kill_io(cdev);
1104 goto out;
1105 }
1106 if (cio_clear(sch))
1107 goto err;
1108 out:
1109 /* Trigger path verification. */
1110 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1111 return;
1112
1113 err:
1114 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1115 }
1116
1117 static int io_subchannel_chp_event(struct subchannel *sch,
1118 struct chp_link *link, int event)
1119 {
1120 int mask;
1121
1122 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1123 if (!mask)
1124 return 0;
1125 switch (event) {
1126 case CHP_VARY_OFF:
1127 sch->opm &= ~mask;
1128 sch->lpm &= ~mask;
1129 io_subchannel_terminate_path(sch, mask);
1130 break;
1131 case CHP_VARY_ON:
1132 sch->opm |= mask;
1133 sch->lpm |= mask;
1134 io_subchannel_verify(sch);
1135 break;
1136 case CHP_OFFLINE:
1137 if (cio_update_schib(sch))
1138 return -ENODEV;
1139 io_subchannel_terminate_path(sch, mask);
1140 break;
1141 case CHP_ONLINE:
1142 if (cio_update_schib(sch))
1143 return -ENODEV;
1144 sch->lpm |= mask & sch->opm;
1145 io_subchannel_verify(sch);
1146 break;
1147 }
1148 return 0;
1149 }
1150
1151 static void io_subchannel_quiesce(struct subchannel *sch)
1152 {
1153 struct ccw_device *cdev;
1154 int ret;
1155
1156 spin_lock_irq(sch->lock);
1157 cdev = sch_get_cdev(sch);
1158 if (cio_is_console(sch->schid))
1159 goto out_unlock;
1160 if (!sch->schib.pmcw.ena)
1161 goto out_unlock;
1162 ret = cio_disable_subchannel(sch);
1163 if (ret != -EBUSY)
1164 goto out_unlock;
1165 if (cdev->handler)
1166 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1167 while (ret == -EBUSY) {
1168 cdev->private->state = DEV_STATE_QUIESCE;
1169 ret = ccw_device_cancel_halt_clear(cdev);
1170 if (ret == -EBUSY) {
1171 ccw_device_set_timeout(cdev, HZ/10);
1172 spin_unlock_irq(sch->lock);
1173 wait_event(cdev->private->wait_q,
1174 cdev->private->state != DEV_STATE_QUIESCE);
1175 spin_lock_irq(sch->lock);
1176 }
1177 ret = cio_disable_subchannel(sch);
1178 }
1179 out_unlock:
1180 spin_unlock_irq(sch->lock);
1181 }
1182
1183 static void io_subchannel_shutdown(struct subchannel *sch)
1184 {
1185 io_subchannel_quiesce(sch);
1186 }
1187
1188 static int device_is_disconnected(struct ccw_device *cdev)
1189 {
1190 if (!cdev)
1191 return 0;
1192 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1193 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1194 }
1195
1196 static int recovery_check(struct device *dev, void *data)
1197 {
1198 struct ccw_device *cdev = to_ccwdev(dev);
1199 int *redo = data;
1200
1201 spin_lock_irq(cdev->ccwlock);
1202 switch (cdev->private->state) {
1203 case DEV_STATE_DISCONNECTED:
1204 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1205 cdev->private->dev_id.ssid,
1206 cdev->private->dev_id.devno);
1207 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1208 *redo = 1;
1209 break;
1210 case DEV_STATE_DISCONNECTED_SENSE_ID:
1211 *redo = 1;
1212 break;
1213 }
1214 spin_unlock_irq(cdev->ccwlock);
1215
1216 return 0;
1217 }
1218
1219 static void recovery_work_func(struct work_struct *unused)
1220 {
1221 int redo = 0;
1222
1223 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1224 if (redo) {
1225 spin_lock_irq(&recovery_lock);
1226 if (!timer_pending(&recovery_timer)) {
1227 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1228 recovery_phase++;
1229 mod_timer(&recovery_timer, jiffies +
1230 recovery_delay[recovery_phase] * HZ);
1231 }
1232 spin_unlock_irq(&recovery_lock);
1233 } else
1234 CIO_MSG_EVENT(4, "recovery: end\n");
1235 }
1236
1237 static DECLARE_WORK(recovery_work, recovery_work_func);
1238
1239 static void recovery_func(unsigned long data)
1240 {
1241 /*
1242 * We can't do our recovery in softirq context and it's not
1243 * performance critical, so we schedule it.
1244 */
1245 schedule_work(&recovery_work);
1246 }
1247
1248 static void ccw_device_schedule_recovery(void)
1249 {
1250 unsigned long flags;
1251
1252 CIO_MSG_EVENT(4, "recovery: schedule\n");
1253 spin_lock_irqsave(&recovery_lock, flags);
1254 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1255 recovery_phase = 0;
1256 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1257 }
1258 spin_unlock_irqrestore(&recovery_lock, flags);
1259 }
1260
1261 static int purge_fn(struct device *dev, void *data)
1262 {
1263 struct ccw_device *cdev = to_ccwdev(dev);
1264 struct ccw_dev_id *id = &cdev->private->dev_id;
1265
1266 spin_lock_irq(cdev->ccwlock);
1267 if (is_blacklisted(id->ssid, id->devno) &&
1268 (cdev->private->state == DEV_STATE_OFFLINE)) {
1269 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1270 id->devno);
1271 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1272 }
1273 spin_unlock_irq(cdev->ccwlock);
1274 /* Abort loop in case of pending signal. */
1275 if (signal_pending(current))
1276 return -EINTR;
1277
1278 return 0;
1279 }
1280
1281 /**
1282 * ccw_purge_blacklisted - purge unused, blacklisted devices
1283 *
1284 * Unregister all ccw devices that are offline and on the blacklist.
1285 */
1286 int ccw_purge_blacklisted(void)
1287 {
1288 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1289 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1290 return 0;
1291 }
1292
1293 void ccw_device_set_disconnected(struct ccw_device *cdev)
1294 {
1295 if (!cdev)
1296 return;
1297 ccw_device_set_timeout(cdev, 0);
1298 cdev->private->flags.fake_irb = 0;
1299 cdev->private->state = DEV_STATE_DISCONNECTED;
1300 if (cdev->online)
1301 ccw_device_schedule_recovery();
1302 }
1303
1304 void ccw_device_set_notoper(struct ccw_device *cdev)
1305 {
1306 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1307
1308 CIO_TRACE_EVENT(2, "notoper");
1309 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1310 ccw_device_set_timeout(cdev, 0);
1311 cio_disable_subchannel(sch);
1312 cdev->private->state = DEV_STATE_NOT_OPER;
1313 }
1314
1315 enum io_sch_action {
1316 IO_SCH_UNREG,
1317 IO_SCH_ORPH_UNREG,
1318 IO_SCH_ATTACH,
1319 IO_SCH_UNREG_ATTACH,
1320 IO_SCH_ORPH_ATTACH,
1321 IO_SCH_REPROBE,
1322 IO_SCH_VERIFY,
1323 IO_SCH_DISC,
1324 IO_SCH_NOP,
1325 };
1326
1327 static enum io_sch_action sch_get_action(struct subchannel *sch)
1328 {
1329 struct ccw_device *cdev;
1330
1331 cdev = sch_get_cdev(sch);
1332 if (cio_update_schib(sch)) {
1333 /* Not operational. */
1334 if (!cdev)
1335 return IO_SCH_UNREG;
1336 if (!ccw_device_notify(cdev, CIO_GONE))
1337 return IO_SCH_UNREG;
1338 return IO_SCH_ORPH_UNREG;
1339 }
1340 /* Operational. */
1341 if (!cdev)
1342 return IO_SCH_ATTACH;
1343 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1344 if (!ccw_device_notify(cdev, CIO_GONE))
1345 return IO_SCH_UNREG_ATTACH;
1346 return IO_SCH_ORPH_ATTACH;
1347 }
1348 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1349 if (!ccw_device_notify(cdev, CIO_NO_PATH))
1350 return IO_SCH_UNREG;
1351 return IO_SCH_DISC;
1352 }
1353 if (device_is_disconnected(cdev))
1354 return IO_SCH_REPROBE;
1355 if (cdev->online)
1356 return IO_SCH_VERIFY;
1357 return IO_SCH_NOP;
1358 }
1359
1360 /**
1361 * io_subchannel_sch_event - process subchannel event
1362 * @sch: subchannel
1363 * @process: non-zero if function is called in process context
1364 *
1365 * An unspecified event occurred for this subchannel. Adjust data according
1366 * to the current operational state of the subchannel and device. Return
1367 * zero when the event has been handled sufficiently or -EAGAIN when this
1368 * function should be called again in process context.
1369 */
1370 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1371 {
1372 unsigned long flags;
1373 struct ccw_device *cdev;
1374 struct ccw_dev_id dev_id;
1375 enum io_sch_action action;
1376 int rc = -EAGAIN;
1377
1378 spin_lock_irqsave(sch->lock, flags);
1379 if (!device_is_registered(&sch->dev))
1380 goto out_unlock;
1381 if (work_pending(&sch->todo_work))
1382 goto out_unlock;
1383 cdev = sch_get_cdev(sch);
1384 if (cdev && work_pending(&cdev->private->todo_work))
1385 goto out_unlock;
1386 action = sch_get_action(sch);
1387 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1388 sch->schid.ssid, sch->schid.sch_no, process,
1389 action);
1390 /* Perform immediate actions while holding the lock. */
1391 switch (action) {
1392 case IO_SCH_REPROBE:
1393 /* Trigger device recognition. */
1394 ccw_device_trigger_reprobe(cdev);
1395 rc = 0;
1396 goto out_unlock;
1397 case IO_SCH_VERIFY:
1398 /* Trigger path verification. */
1399 io_subchannel_verify(sch);
1400 rc = 0;
1401 goto out_unlock;
1402 case IO_SCH_DISC:
1403 ccw_device_set_disconnected(cdev);
1404 rc = 0;
1405 goto out_unlock;
1406 case IO_SCH_ORPH_UNREG:
1407 case IO_SCH_ORPH_ATTACH:
1408 ccw_device_set_disconnected(cdev);
1409 break;
1410 case IO_SCH_UNREG_ATTACH:
1411 case IO_SCH_UNREG:
1412 if (cdev)
1413 ccw_device_set_notoper(cdev);
1414 break;
1415 case IO_SCH_NOP:
1416 rc = 0;
1417 goto out_unlock;
1418 default:
1419 break;
1420 }
1421 spin_unlock_irqrestore(sch->lock, flags);
1422 /* All other actions require process context. */
1423 if (!process)
1424 goto out;
1425 /* Handle attached ccw device. */
1426 switch (action) {
1427 case IO_SCH_ORPH_UNREG:
1428 case IO_SCH_ORPH_ATTACH:
1429 /* Move ccw device to orphanage. */
1430 rc = ccw_device_move_to_orph(cdev);
1431 if (rc)
1432 goto out;
1433 break;
1434 case IO_SCH_UNREG_ATTACH:
1435 /* Unregister ccw device. */
1436 ccw_device_unregister(cdev);
1437 break;
1438 default:
1439 break;
1440 }
1441 /* Handle subchannel. */
1442 switch (action) {
1443 case IO_SCH_ORPH_UNREG:
1444 case IO_SCH_UNREG:
1445 css_sch_device_unregister(sch);
1446 break;
1447 case IO_SCH_ORPH_ATTACH:
1448 case IO_SCH_UNREG_ATTACH:
1449 case IO_SCH_ATTACH:
1450 dev_id.ssid = sch->schid.ssid;
1451 dev_id.devno = sch->schib.pmcw.dev;
1452 cdev = get_ccwdev_by_dev_id(&dev_id);
1453 if (!cdev) {
1454 sch_create_and_recog_new_device(sch);
1455 break;
1456 }
1457 rc = ccw_device_move_to_sch(cdev, sch);
1458 if (rc) {
1459 /* Release reference from get_ccwdev_by_dev_id() */
1460 put_device(&cdev->dev);
1461 goto out;
1462 }
1463 spin_lock_irqsave(sch->lock, flags);
1464 ccw_device_trigger_reprobe(cdev);
1465 spin_unlock_irqrestore(sch->lock, flags);
1466 /* Release reference from get_ccwdev_by_dev_id() */
1467 put_device(&cdev->dev);
1468 break;
1469 default:
1470 break;
1471 }
1472 return 0;
1473
1474 out_unlock:
1475 spin_unlock_irqrestore(sch->lock, flags);
1476 out:
1477 return rc;
1478 }
1479
1480 #ifdef CONFIG_CCW_CONSOLE
1481 static struct ccw_device console_cdev;
1482 static struct ccw_device_private console_private;
1483 static int console_cdev_in_use;
1484
1485 static DEFINE_SPINLOCK(ccw_console_lock);
1486
1487 spinlock_t * cio_get_console_lock(void)
1488 {
1489 return &ccw_console_lock;
1490 }
1491
1492 static int ccw_device_console_enable(struct ccw_device *cdev,
1493 struct subchannel *sch)
1494 {
1495 int rc;
1496
1497 /* Attach subchannel private data. */
1498 sch->private = cio_get_console_priv();
1499 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1500 io_subchannel_init_fields(sch);
1501 rc = cio_commit_config(sch);
1502 if (rc)
1503 return rc;
1504 sch->driver = &io_subchannel_driver;
1505 /* Initialize the ccw_device structure. */
1506 cdev->dev.parent= &sch->dev;
1507 sch_set_cdev(sch, cdev);
1508 io_subchannel_recog(cdev, sch);
1509 /* Now wait for the async. recognition to come to an end. */
1510 spin_lock_irq(cdev->ccwlock);
1511 while (!dev_fsm_final_state(cdev))
1512 wait_cons_dev();
1513 rc = -EIO;
1514 if (cdev->private->state != DEV_STATE_OFFLINE)
1515 goto out_unlock;
1516 ccw_device_online(cdev);
1517 while (!dev_fsm_final_state(cdev))
1518 wait_cons_dev();
1519 if (cdev->private->state != DEV_STATE_ONLINE)
1520 goto out_unlock;
1521 rc = 0;
1522 out_unlock:
1523 spin_unlock_irq(cdev->ccwlock);
1524 return rc;
1525 }
1526
1527 struct ccw_device *
1528 ccw_device_probe_console(void)
1529 {
1530 struct subchannel *sch;
1531 int ret;
1532
1533 if (xchg(&console_cdev_in_use, 1) != 0)
1534 return ERR_PTR(-EBUSY);
1535 sch = cio_probe_console();
1536 if (IS_ERR(sch)) {
1537 console_cdev_in_use = 0;
1538 return (void *) sch;
1539 }
1540 memset(&console_cdev, 0, sizeof(struct ccw_device));
1541 memset(&console_private, 0, sizeof(struct ccw_device_private));
1542 console_cdev.private = &console_private;
1543 console_private.cdev = &console_cdev;
1544 ret = ccw_device_console_enable(&console_cdev, sch);
1545 if (ret) {
1546 cio_release_console();
1547 console_cdev_in_use = 0;
1548 return ERR_PTR(ret);
1549 }
1550 console_cdev.online = 1;
1551 return &console_cdev;
1552 }
1553
1554 static int ccw_device_pm_restore(struct device *dev);
1555
1556 int ccw_device_force_console(void)
1557 {
1558 if (!console_cdev_in_use)
1559 return -ENODEV;
1560 return ccw_device_pm_restore(&console_cdev.dev);
1561 }
1562 EXPORT_SYMBOL_GPL(ccw_device_force_console);
1563 #endif
1564
1565 /*
1566 * get ccw_device matching the busid, but only if owned by cdrv
1567 */
1568 static int
1569 __ccwdev_check_busid(struct device *dev, void *id)
1570 {
1571 char *bus_id;
1572
1573 bus_id = id;
1574
1575 return (strcmp(bus_id, dev_name(dev)) == 0);
1576 }
1577
1578
1579 /**
1580 * get_ccwdev_by_busid() - obtain device from a bus id
1581 * @cdrv: driver the device is owned by
1582 * @bus_id: bus id of the device to be searched
1583 *
1584 * This function searches all devices owned by @cdrv for a device with a bus
1585 * id matching @bus_id.
1586 * Returns:
1587 * If a match is found, its reference count of the found device is increased
1588 * and it is returned; else %NULL is returned.
1589 */
1590 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1591 const char *bus_id)
1592 {
1593 struct device *dev;
1594 struct device_driver *drv;
1595
1596 drv = get_driver(&cdrv->driver);
1597 if (!drv)
1598 return NULL;
1599
1600 dev = driver_find_device(drv, NULL, (void *)bus_id,
1601 __ccwdev_check_busid);
1602 put_driver(drv);
1603
1604 return dev ? to_ccwdev(dev) : NULL;
1605 }
1606
1607 /************************** device driver handling ************************/
1608
1609 /* This is the implementation of the ccw_driver class. The probe, remove
1610 * and release methods are initially very similar to the device_driver
1611 * implementations, with the difference that they have ccw_device
1612 * arguments.
1613 *
1614 * A ccw driver also contains the information that is needed for
1615 * device matching.
1616 */
1617 static int
1618 ccw_device_probe (struct device *dev)
1619 {
1620 struct ccw_device *cdev = to_ccwdev(dev);
1621 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1622 int ret;
1623
1624 cdev->drv = cdrv; /* to let the driver call _set_online */
1625
1626 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1627
1628 if (ret) {
1629 cdev->drv = NULL;
1630 return ret;
1631 }
1632
1633 return 0;
1634 }
1635
1636 static int
1637 ccw_device_remove (struct device *dev)
1638 {
1639 struct ccw_device *cdev = to_ccwdev(dev);
1640 struct ccw_driver *cdrv = cdev->drv;
1641 int ret;
1642
1643 if (cdrv->remove)
1644 cdrv->remove(cdev);
1645 if (cdev->online) {
1646 cdev->online = 0;
1647 spin_lock_irq(cdev->ccwlock);
1648 ret = ccw_device_offline(cdev);
1649 spin_unlock_irq(cdev->ccwlock);
1650 if (ret == 0)
1651 wait_event(cdev->private->wait_q,
1652 dev_fsm_final_state(cdev));
1653 else
1654 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1655 "device 0.%x.%04x\n",
1656 ret, cdev->private->dev_id.ssid,
1657 cdev->private->dev_id.devno);
1658 /* Give up reference obtained in ccw_device_set_online(). */
1659 put_device(&cdev->dev);
1660 }
1661 ccw_device_set_timeout(cdev, 0);
1662 cdev->drv = NULL;
1663 return 0;
1664 }
1665
1666 static void ccw_device_shutdown(struct device *dev)
1667 {
1668 struct ccw_device *cdev;
1669
1670 cdev = to_ccwdev(dev);
1671 if (cdev->drv && cdev->drv->shutdown)
1672 cdev->drv->shutdown(cdev);
1673 disable_cmf(cdev);
1674 }
1675
1676 static int ccw_device_pm_prepare(struct device *dev)
1677 {
1678 struct ccw_device *cdev = to_ccwdev(dev);
1679
1680 if (work_pending(&cdev->private->todo_work))
1681 return -EAGAIN;
1682 /* Fail while device is being set online/offline. */
1683 if (atomic_read(&cdev->private->onoff))
1684 return -EAGAIN;
1685
1686 if (cdev->online && cdev->drv && cdev->drv->prepare)
1687 return cdev->drv->prepare(cdev);
1688
1689 return 0;
1690 }
1691
1692 static void ccw_device_pm_complete(struct device *dev)
1693 {
1694 struct ccw_device *cdev = to_ccwdev(dev);
1695
1696 if (cdev->online && cdev->drv && cdev->drv->complete)
1697 cdev->drv->complete(cdev);
1698 }
1699
1700 static int ccw_device_pm_freeze(struct device *dev)
1701 {
1702 struct ccw_device *cdev = to_ccwdev(dev);
1703 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1704 int ret, cm_enabled;
1705
1706 /* Fail suspend while device is in transistional state. */
1707 if (!dev_fsm_final_state(cdev))
1708 return -EAGAIN;
1709 if (!cdev->online)
1710 return 0;
1711 if (cdev->drv && cdev->drv->freeze) {
1712 ret = cdev->drv->freeze(cdev);
1713 if (ret)
1714 return ret;
1715 }
1716
1717 spin_lock_irq(sch->lock);
1718 cm_enabled = cdev->private->cmb != NULL;
1719 spin_unlock_irq(sch->lock);
1720 if (cm_enabled) {
1721 /* Don't have the css write on memory. */
1722 ret = ccw_set_cmf(cdev, 0);
1723 if (ret)
1724 return ret;
1725 }
1726 /* From here on, disallow device driver I/O. */
1727 spin_lock_irq(sch->lock);
1728 ret = cio_disable_subchannel(sch);
1729 spin_unlock_irq(sch->lock);
1730
1731 return ret;
1732 }
1733
1734 static int ccw_device_pm_thaw(struct device *dev)
1735 {
1736 struct ccw_device *cdev = to_ccwdev(dev);
1737 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1738 int ret, cm_enabled;
1739
1740 if (!cdev->online)
1741 return 0;
1742
1743 spin_lock_irq(sch->lock);
1744 /* Allow device driver I/O again. */
1745 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1746 cm_enabled = cdev->private->cmb != NULL;
1747 spin_unlock_irq(sch->lock);
1748 if (ret)
1749 return ret;
1750
1751 if (cm_enabled) {
1752 ret = ccw_set_cmf(cdev, 1);
1753 if (ret)
1754 return ret;
1755 }
1756
1757 if (cdev->drv && cdev->drv->thaw)
1758 ret = cdev->drv->thaw(cdev);
1759
1760 return ret;
1761 }
1762
1763 static void __ccw_device_pm_restore(struct ccw_device *cdev)
1764 {
1765 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1766
1767 if (cio_is_console(sch->schid))
1768 goto out;
1769 /*
1770 * While we were sleeping, devices may have gone or become
1771 * available again. Kick re-detection.
1772 */
1773 spin_lock_irq(sch->lock);
1774 cdev->private->flags.resuming = 1;
1775 ccw_device_recognition(cdev);
1776 spin_unlock_irq(sch->lock);
1777 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1778 cdev->private->state == DEV_STATE_DISCONNECTED);
1779 out:
1780 cdev->private->flags.resuming = 0;
1781 }
1782
1783 static int resume_handle_boxed(struct ccw_device *cdev)
1784 {
1785 cdev->private->state = DEV_STATE_BOXED;
1786 if (ccw_device_notify(cdev, CIO_BOXED))
1787 return 0;
1788 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1789 return -ENODEV;
1790 }
1791
1792 static int resume_handle_disc(struct ccw_device *cdev)
1793 {
1794 cdev->private->state = DEV_STATE_DISCONNECTED;
1795 if (ccw_device_notify(cdev, CIO_GONE))
1796 return 0;
1797 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1798 return -ENODEV;
1799 }
1800
1801 static int ccw_device_pm_restore(struct device *dev)
1802 {
1803 struct ccw_device *cdev = to_ccwdev(dev);
1804 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1805 int ret = 0, cm_enabled;
1806
1807 __ccw_device_pm_restore(cdev);
1808 spin_lock_irq(sch->lock);
1809 if (cio_is_console(sch->schid)) {
1810 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1811 spin_unlock_irq(sch->lock);
1812 goto out_restore;
1813 }
1814 cdev->private->flags.donotify = 0;
1815 /* check recognition results */
1816 switch (cdev->private->state) {
1817 case DEV_STATE_OFFLINE:
1818 break;
1819 case DEV_STATE_BOXED:
1820 ret = resume_handle_boxed(cdev);
1821 spin_unlock_irq(sch->lock);
1822 if (ret)
1823 goto out;
1824 goto out_restore;
1825 case DEV_STATE_DISCONNECTED:
1826 goto out_disc_unlock;
1827 default:
1828 goto out_unreg_unlock;
1829 }
1830 /* check if the device id has changed */
1831 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1832 CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
1833 "changed from %04x to %04x)\n",
1834 sch->schid.ssid, sch->schid.sch_no,
1835 cdev->private->dev_id.devno,
1836 sch->schib.pmcw.dev);
1837 goto out_unreg_unlock;
1838 }
1839 /* check if the device type has changed */
1840 if (!ccw_device_test_sense_data(cdev)) {
1841 ccw_device_update_sense_data(cdev);
1842 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1843 ret = -ENODEV;
1844 goto out_unlock;
1845 }
1846 if (!cdev->online) {
1847 ret = 0;
1848 goto out_unlock;
1849 }
1850 ret = ccw_device_online(cdev);
1851 if (ret)
1852 goto out_disc_unlock;
1853
1854 cm_enabled = cdev->private->cmb != NULL;
1855 spin_unlock_irq(sch->lock);
1856
1857 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1858 if (cdev->private->state != DEV_STATE_ONLINE) {
1859 spin_lock_irq(sch->lock);
1860 goto out_disc_unlock;
1861 }
1862 if (cm_enabled) {
1863 ret = ccw_set_cmf(cdev, 1);
1864 if (ret) {
1865 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1866 "(rc=%d)\n", cdev->private->dev_id.ssid,
1867 cdev->private->dev_id.devno, ret);
1868 ret = 0;
1869 }
1870 }
1871
1872 out_restore:
1873 if (cdev->online && cdev->drv && cdev->drv->restore)
1874 ret = cdev->drv->restore(cdev);
1875 out:
1876 return ret;
1877
1878 out_disc_unlock:
1879 ret = resume_handle_disc(cdev);
1880 spin_unlock_irq(sch->lock);
1881 if (ret)
1882 return ret;
1883 goto out_restore;
1884
1885 out_unreg_unlock:
1886 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
1887 ret = -ENODEV;
1888 out_unlock:
1889 spin_unlock_irq(sch->lock);
1890 return ret;
1891 }
1892
1893 static const struct dev_pm_ops ccw_pm_ops = {
1894 .prepare = ccw_device_pm_prepare,
1895 .complete = ccw_device_pm_complete,
1896 .freeze = ccw_device_pm_freeze,
1897 .thaw = ccw_device_pm_thaw,
1898 .restore = ccw_device_pm_restore,
1899 };
1900
1901 struct bus_type ccw_bus_type = {
1902 .name = "ccw",
1903 .match = ccw_bus_match,
1904 .uevent = ccw_uevent,
1905 .probe = ccw_device_probe,
1906 .remove = ccw_device_remove,
1907 .shutdown = ccw_device_shutdown,
1908 .pm = &ccw_pm_ops,
1909 };
1910
1911 /**
1912 * ccw_driver_register() - register a ccw driver
1913 * @cdriver: driver to be registered
1914 *
1915 * This function is mainly a wrapper around driver_register().
1916 * Returns:
1917 * %0 on success and a negative error value on failure.
1918 */
1919 int ccw_driver_register(struct ccw_driver *cdriver)
1920 {
1921 struct device_driver *drv = &cdriver->driver;
1922
1923 drv->bus = &ccw_bus_type;
1924 drv->name = cdriver->name;
1925 drv->owner = cdriver->owner;
1926
1927 return driver_register(drv);
1928 }
1929
1930 /**
1931 * ccw_driver_unregister() - deregister a ccw driver
1932 * @cdriver: driver to be deregistered
1933 *
1934 * This function is mainly a wrapper around driver_unregister().
1935 */
1936 void ccw_driver_unregister(struct ccw_driver *cdriver)
1937 {
1938 driver_unregister(&cdriver->driver);
1939 }
1940
1941 /* Helper func for qdio. */
1942 struct subchannel_id
1943 ccw_device_get_subchannel_id(struct ccw_device *cdev)
1944 {
1945 struct subchannel *sch;
1946
1947 sch = to_subchannel(cdev->dev.parent);
1948 return sch->schid;
1949 }
1950
1951 static void ccw_device_todo(struct work_struct *work)
1952 {
1953 struct ccw_device_private *priv;
1954 struct ccw_device *cdev;
1955 struct subchannel *sch;
1956 enum cdev_todo todo;
1957
1958 priv = container_of(work, struct ccw_device_private, todo_work);
1959 cdev = priv->cdev;
1960 sch = to_subchannel(cdev->dev.parent);
1961 /* Find out todo. */
1962 spin_lock_irq(cdev->ccwlock);
1963 todo = priv->todo;
1964 priv->todo = CDEV_TODO_NOTHING;
1965 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1966 priv->dev_id.ssid, priv->dev_id.devno, todo);
1967 spin_unlock_irq(cdev->ccwlock);
1968 /* Perform todo. */
1969 switch (todo) {
1970 case CDEV_TODO_ENABLE_CMF:
1971 cmf_reenable(cdev);
1972 break;
1973 case CDEV_TODO_REBIND:
1974 ccw_device_do_unbind_bind(cdev);
1975 break;
1976 case CDEV_TODO_REGISTER:
1977 io_subchannel_register(cdev);
1978 break;
1979 case CDEV_TODO_UNREG_EVAL:
1980 if (!sch_is_pseudo_sch(sch))
1981 css_schedule_eval(sch->schid);
1982 /* fall-through */
1983 case CDEV_TODO_UNREG:
1984 if (sch_is_pseudo_sch(sch))
1985 ccw_device_unregister(cdev);
1986 else
1987 ccw_device_call_sch_unregister(cdev);
1988 break;
1989 default:
1990 break;
1991 }
1992 /* Release workqueue ref. */
1993 put_device(&cdev->dev);
1994 }
1995
1996 /**
1997 * ccw_device_sched_todo - schedule ccw device operation
1998 * @cdev: ccw device
1999 * @todo: todo
2000 *
2001 * Schedule the operation identified by @todo to be performed on the slow path
2002 * workqueue. Do nothing if another operation with higher priority is already
2003 * scheduled. Needs to be called with ccwdev lock held.
2004 */
2005 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2006 {
2007 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2008 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2009 todo);
2010 if (cdev->private->todo >= todo)
2011 return;
2012 cdev->private->todo = todo;
2013 /* Get workqueue ref. */
2014 if (!get_device(&cdev->dev))
2015 return;
2016 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2017 /* Already queued, release workqueue ref. */
2018 put_device(&cdev->dev);
2019 }
2020 }
2021
2022 MODULE_LICENSE("GPL");
2023 EXPORT_SYMBOL(ccw_device_set_online);
2024 EXPORT_SYMBOL(ccw_device_set_offline);
2025 EXPORT_SYMBOL(ccw_driver_register);
2026 EXPORT_SYMBOL(ccw_driver_unregister);
2027 EXPORT_SYMBOL(get_ccwdev_by_busid);
2028 EXPORT_SYMBOL(ccw_bus_type);
2029 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
This page took 0.070099 seconds and 6 git commands to generate.