2 * driver for channel subsystem
4 * Copyright IBM Corp. 2002, 2009
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
26 #include "cio_debug.h"
33 int css_init_done
= 0;
36 struct channel_subsystem
*channel_subsystems
[__MAX_CSSID
+ 1];
39 for_each_subchannel(int(*fn
)(struct subchannel_id
, void *), void *data
)
41 struct subchannel_id schid
;
44 init_subchannel_id(&schid
);
48 ret
= fn(schid
, data
);
51 } while (schid
.sch_no
++ < __MAX_SUBCHANNEL
);
53 } while (schid
.ssid
++ < max_ssid
);
60 int (*fn_known_sch
)(struct subchannel
*, void *);
61 int (*fn_unknown_sch
)(struct subchannel_id
, void *);
64 static int call_fn_known_sch(struct device
*dev
, void *data
)
66 struct subchannel
*sch
= to_subchannel(dev
);
67 struct cb_data
*cb
= data
;
70 idset_sch_del(cb
->set
, sch
->schid
);
72 rc
= cb
->fn_known_sch(sch
, cb
->data
);
76 static int call_fn_unknown_sch(struct subchannel_id schid
, void *data
)
78 struct cb_data
*cb
= data
;
81 if (idset_sch_contains(cb
->set
, schid
))
82 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
86 static int call_fn_all_sch(struct subchannel_id schid
, void *data
)
88 struct cb_data
*cb
= data
;
89 struct subchannel
*sch
;
92 sch
= get_subchannel_by_schid(schid
);
95 rc
= cb
->fn_known_sch(sch
, cb
->data
);
96 put_device(&sch
->dev
);
98 if (cb
->fn_unknown_sch
)
99 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
105 int for_each_subchannel_staged(int (*fn_known
)(struct subchannel
*, void *),
106 int (*fn_unknown
)(struct subchannel_id
,
113 cb
.fn_known_sch
= fn_known
;
114 cb
.fn_unknown_sch
= fn_unknown
;
116 cb
.set
= idset_sch_new();
118 /* fall back to brute force scanning in case of oom */
119 return for_each_subchannel(call_fn_all_sch
, &cb
);
123 /* Process registered subchannels. */
124 rc
= bus_for_each_dev(&css_bus_type
, NULL
, &cb
, call_fn_known_sch
);
127 /* Process unregistered subchannels. */
129 rc
= for_each_subchannel(call_fn_unknown_sch
, &cb
);
136 static void css_sch_todo(struct work_struct
*work
);
138 static struct subchannel
*
139 css_alloc_subchannel(struct subchannel_id schid
)
141 struct subchannel
*sch
;
144 sch
= kmalloc (sizeof (*sch
), GFP_KERNEL
| GFP_DMA
);
146 return ERR_PTR(-ENOMEM
);
147 ret
= cio_validate_subchannel (sch
, schid
);
152 INIT_WORK(&sch
->todo_work
, css_sch_todo
);
157 css_subchannel_release(struct device
*dev
)
159 struct subchannel
*sch
;
161 sch
= to_subchannel(dev
);
162 if (!cio_is_console(sch
->schid
)) {
163 /* Reset intparm to zeroes. */
164 sch
->config
.intparm
= 0;
165 cio_commit_config(sch
);
171 static int css_sch_device_register(struct subchannel
*sch
)
175 mutex_lock(&sch
->reg_mutex
);
176 dev_set_name(&sch
->dev
, "0.%x.%04x", sch
->schid
.ssid
,
178 ret
= device_register(&sch
->dev
);
179 mutex_unlock(&sch
->reg_mutex
);
184 * css_sch_device_unregister - unregister a subchannel
185 * @sch: subchannel to be unregistered
187 void css_sch_device_unregister(struct subchannel
*sch
)
189 mutex_lock(&sch
->reg_mutex
);
190 if (device_is_registered(&sch
->dev
))
191 device_unregister(&sch
->dev
);
192 mutex_unlock(&sch
->reg_mutex
);
194 EXPORT_SYMBOL_GPL(css_sch_device_unregister
);
196 static void css_sch_todo(struct work_struct
*work
)
198 struct subchannel
*sch
;
201 sch
= container_of(work
, struct subchannel
, todo_work
);
203 spin_lock_irq(sch
->lock
);
205 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch
->schid
.ssid
,
206 sch
->schid
.sch_no
, todo
);
207 sch
->todo
= SCH_TODO_NOTHING
;
208 spin_unlock_irq(sch
->lock
);
210 if (todo
== SCH_TODO_UNREG
)
211 css_sch_device_unregister(sch
);
212 /* Release workqueue ref. */
213 put_device(&sch
->dev
);
217 * css_sched_sch_todo - schedule a subchannel operation
221 * Schedule the operation identified by @todo to be performed on the slow path
222 * workqueue. Do nothing if another operation with higher priority is already
223 * scheduled. Needs to be called with subchannel lock held.
225 void css_sched_sch_todo(struct subchannel
*sch
, enum sch_todo todo
)
227 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
228 sch
->schid
.ssid
, sch
->schid
.sch_no
, todo
);
229 if (sch
->todo
>= todo
)
231 /* Get workqueue ref. */
232 if (!get_device(&sch
->dev
))
235 if (!queue_work(slow_path_wq
, &sch
->todo_work
)) {
236 /* Already queued, release workqueue ref. */
237 put_device(&sch
->dev
);
241 static void ssd_from_pmcw(struct chsc_ssd_info
*ssd
, struct pmcw
*pmcw
)
246 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
247 ssd
->path_mask
= pmcw
->pim
;
248 for (i
= 0; i
< 8; i
++) {
250 if (pmcw
->pim
& mask
) {
251 chp_id_init(&ssd
->chpid
[i
]);
252 ssd
->chpid
[i
].id
= pmcw
->chpid
[i
];
257 static void ssd_register_chpids(struct chsc_ssd_info
*ssd
)
262 for (i
= 0; i
< 8; i
++) {
264 if (ssd
->path_mask
& mask
)
265 if (!chp_is_registered(ssd
->chpid
[i
]))
266 chp_new(ssd
->chpid
[i
]);
270 void css_update_ssd_info(struct subchannel
*sch
)
274 if (cio_is_console(sch
->schid
)) {
275 /* Console is initialized too early for functions requiring
276 * memory allocation. */
277 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
279 ret
= chsc_get_ssd_info(sch
->schid
, &sch
->ssd_info
);
281 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
282 ssd_register_chpids(&sch
->ssd_info
);
286 static ssize_t
type_show(struct device
*dev
, struct device_attribute
*attr
,
289 struct subchannel
*sch
= to_subchannel(dev
);
291 return sprintf(buf
, "%01x\n", sch
->st
);
294 static DEVICE_ATTR(type
, 0444, type_show
, NULL
);
296 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
299 struct subchannel
*sch
= to_subchannel(dev
);
301 return sprintf(buf
, "css:t%01X\n", sch
->st
);
304 static DEVICE_ATTR(modalias
, 0444, modalias_show
, NULL
);
306 static struct attribute
*subch_attrs
[] = {
308 &dev_attr_modalias
.attr
,
312 static struct attribute_group subch_attr_group
= {
313 .attrs
= subch_attrs
,
316 static const struct attribute_group
*default_subch_attr_groups
[] = {
321 static int css_register_subchannel(struct subchannel
*sch
)
325 /* Initialize the subchannel structure */
326 sch
->dev
.parent
= &channel_subsystems
[0]->device
;
327 sch
->dev
.bus
= &css_bus_type
;
328 sch
->dev
.release
= &css_subchannel_release
;
329 sch
->dev
.groups
= default_subch_attr_groups
;
331 * We don't want to generate uevents for I/O subchannels that don't
332 * have a working ccw device behind them since they will be
333 * unregistered before they can be used anyway, so we delay the add
334 * uevent until after device recognition was successful.
335 * Note that we suppress the uevent for all subchannel types;
336 * the subchannel driver can decide itself when it wants to inform
337 * userspace of its existence.
339 dev_set_uevent_suppress(&sch
->dev
, 1);
340 css_update_ssd_info(sch
);
341 /* make it known to the system */
342 ret
= css_sch_device_register(sch
);
344 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
345 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
350 * No driver matched. Generate the uevent now so that
351 * a fitting driver module may be loaded based on the
354 dev_set_uevent_suppress(&sch
->dev
, 0);
355 kobject_uevent(&sch
->dev
.kobj
, KOBJ_ADD
);
360 int css_probe_device(struct subchannel_id schid
)
363 struct subchannel
*sch
;
365 if (cio_is_console(schid
))
366 sch
= cio_get_console_subchannel();
368 sch
= css_alloc_subchannel(schid
);
372 ret
= css_register_subchannel(sch
);
374 if (!cio_is_console(schid
))
375 put_device(&sch
->dev
);
381 check_subchannel(struct device
* dev
, void * data
)
383 struct subchannel
*sch
;
384 struct subchannel_id
*schid
= data
;
386 sch
= to_subchannel(dev
);
387 return schid_equal(&sch
->schid
, schid
);
391 get_subchannel_by_schid(struct subchannel_id schid
)
395 dev
= bus_find_device(&css_bus_type
, NULL
,
396 &schid
, check_subchannel
);
398 return dev
? to_subchannel(dev
) : NULL
;
402 * css_sch_is_valid() - check if a subchannel is valid
403 * @schib: subchannel information block for the subchannel
405 int css_sch_is_valid(struct schib
*schib
)
407 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_IO
) && !schib
->pmcw
.dnv
)
409 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_MSG
) && !schib
->pmcw
.w
)
413 EXPORT_SYMBOL_GPL(css_sch_is_valid
);
415 static int css_evaluate_new_subchannel(struct subchannel_id schid
, int slow
)
420 /* Will be done on the slow path. */
423 if (stsch_err(schid
, &schib
) || !css_sch_is_valid(&schib
)) {
424 /* Unusable - ignore. */
427 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid
.ssid
,
430 return css_probe_device(schid
);
433 static int css_evaluate_known_subchannel(struct subchannel
*sch
, int slow
)
438 if (sch
->driver
->sch_event
)
439 ret
= sch
->driver
->sch_event(sch
, slow
);
442 "Got subchannel machine check but "
443 "no sch_event handler provided.\n");
445 if (ret
!= 0 && ret
!= -EAGAIN
) {
446 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
447 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
452 static void css_evaluate_subchannel(struct subchannel_id schid
, int slow
)
454 struct subchannel
*sch
;
457 sch
= get_subchannel_by_schid(schid
);
459 ret
= css_evaluate_known_subchannel(sch
, slow
);
460 put_device(&sch
->dev
);
462 ret
= css_evaluate_new_subchannel(schid
, slow
);
464 css_schedule_eval(schid
);
467 static struct idset
*slow_subchannel_set
;
468 static spinlock_t slow_subchannel_lock
;
469 static wait_queue_head_t css_eval_wq
;
470 static atomic_t css_eval_scheduled
;
472 static int __init
slow_subchannel_init(void)
474 spin_lock_init(&slow_subchannel_lock
);
475 atomic_set(&css_eval_scheduled
, 0);
476 init_waitqueue_head(&css_eval_wq
);
477 slow_subchannel_set
= idset_sch_new();
478 if (!slow_subchannel_set
) {
479 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
485 static int slow_eval_known_fn(struct subchannel
*sch
, void *data
)
490 spin_lock_irq(&slow_subchannel_lock
);
491 eval
= idset_sch_contains(slow_subchannel_set
, sch
->schid
);
492 idset_sch_del(slow_subchannel_set
, sch
->schid
);
493 spin_unlock_irq(&slow_subchannel_lock
);
495 rc
= css_evaluate_known_subchannel(sch
, 1);
497 css_schedule_eval(sch
->schid
);
502 static int slow_eval_unknown_fn(struct subchannel_id schid
, void *data
)
507 spin_lock_irq(&slow_subchannel_lock
);
508 eval
= idset_sch_contains(slow_subchannel_set
, schid
);
509 idset_sch_del(slow_subchannel_set
, schid
);
510 spin_unlock_irq(&slow_subchannel_lock
);
512 rc
= css_evaluate_new_subchannel(schid
, 1);
515 css_schedule_eval(schid
);
521 /* These should abort looping */
530 static void css_slow_path_func(struct work_struct
*unused
)
534 CIO_TRACE_EVENT(4, "slowpath");
535 for_each_subchannel_staged(slow_eval_known_fn
, slow_eval_unknown_fn
,
537 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
538 if (idset_is_empty(slow_subchannel_set
)) {
539 atomic_set(&css_eval_scheduled
, 0);
540 wake_up(&css_eval_wq
);
542 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
545 static DECLARE_WORK(slow_path_work
, css_slow_path_func
);
546 struct workqueue_struct
*slow_path_wq
;
548 void css_schedule_eval(struct subchannel_id schid
)
552 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
553 idset_sch_add(slow_subchannel_set
, schid
);
554 atomic_set(&css_eval_scheduled
, 1);
555 queue_work(slow_path_wq
, &slow_path_work
);
556 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
559 void css_schedule_eval_all(void)
563 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
564 idset_fill(slow_subchannel_set
);
565 atomic_set(&css_eval_scheduled
, 1);
566 queue_work(slow_path_wq
, &slow_path_work
);
567 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
570 static int __unset_registered(struct device
*dev
, void *data
)
572 struct idset
*set
= data
;
573 struct subchannel
*sch
= to_subchannel(dev
);
575 idset_sch_del(set
, sch
->schid
);
579 void css_schedule_eval_all_unreg(void)
582 struct idset
*unreg_set
;
584 /* Find unregistered subchannels. */
585 unreg_set
= idset_sch_new();
588 css_schedule_eval_all();
591 idset_fill(unreg_set
);
592 bus_for_each_dev(&css_bus_type
, NULL
, unreg_set
, __unset_registered
);
593 /* Apply to slow_subchannel_set. */
594 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
595 idset_add_set(slow_subchannel_set
, unreg_set
);
596 atomic_set(&css_eval_scheduled
, 1);
597 queue_work(slow_path_wq
, &slow_path_work
);
598 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
599 idset_free(unreg_set
);
602 void css_wait_for_slow_path(void)
604 flush_workqueue(slow_path_wq
);
607 /* Schedule reprobing of all unregistered subchannels. */
608 void css_schedule_reprobe(void)
610 css_schedule_eval_all_unreg();
612 EXPORT_SYMBOL_GPL(css_schedule_reprobe
);
615 * Called from the machine check handler for subchannel report words.
617 static void css_process_crw(struct crw
*crw0
, struct crw
*crw1
, int overflow
)
619 struct subchannel_id mchk_schid
;
622 css_schedule_eval_all();
625 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
626 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
627 crw0
->slct
, crw0
->oflw
, crw0
->chn
, crw0
->rsc
, crw0
->anc
,
628 crw0
->erc
, crw0
->rsid
);
630 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
631 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
632 crw1
->slct
, crw1
->oflw
, crw1
->chn
, crw1
->rsc
,
633 crw1
->anc
, crw1
->erc
, crw1
->rsid
);
634 init_subchannel_id(&mchk_schid
);
635 mchk_schid
.sch_no
= crw0
->rsid
;
637 mchk_schid
.ssid
= (crw1
->rsid
>> 8) & 3;
640 * Since we are always presented with IPI in the CRW, we have to
641 * use stsch() to find out if the subchannel in question has come
644 css_evaluate_subchannel(mchk_schid
, 0);
648 css_generate_pgid(struct channel_subsystem
*css
, u32 tod_high
)
650 if (css_general_characteristics
.mcss
) {
651 css
->global_pgid
.pgid_high
.ext_cssid
.version
= 0x80;
652 css
->global_pgid
.pgid_high
.ext_cssid
.cssid
= css
->cssid
;
655 css
->global_pgid
.pgid_high
.cpu_addr
= stap();
657 css
->global_pgid
.pgid_high
.cpu_addr
= 0;
660 css
->global_pgid
.cpu_id
= S390_lowcore
.cpu_id
.ident
;
661 css
->global_pgid
.cpu_model
= S390_lowcore
.cpu_id
.machine
;
662 css
->global_pgid
.tod_high
= tod_high
;
667 channel_subsystem_release(struct device
*dev
)
669 struct channel_subsystem
*css
;
672 mutex_destroy(&css
->mutex
);
673 if (css
->pseudo_subchannel
) {
674 /* Implies that it has been generated but never registered. */
675 css_subchannel_release(&css
->pseudo_subchannel
->dev
);
676 css
->pseudo_subchannel
= NULL
;
682 css_cm_enable_show(struct device
*dev
, struct device_attribute
*attr
,
685 struct channel_subsystem
*css
= to_css(dev
);
690 mutex_lock(&css
->mutex
);
691 ret
= sprintf(buf
, "%x\n", css
->cm_enabled
);
692 mutex_unlock(&css
->mutex
);
697 css_cm_enable_store(struct device
*dev
, struct device_attribute
*attr
,
698 const char *buf
, size_t count
)
700 struct channel_subsystem
*css
= to_css(dev
);
704 ret
= strict_strtoul(buf
, 16, &val
);
707 mutex_lock(&css
->mutex
);
710 ret
= css
->cm_enabled
? chsc_secm(css
, 0) : 0;
713 ret
= css
->cm_enabled
? 0 : chsc_secm(css
, 1);
718 mutex_unlock(&css
->mutex
);
719 return ret
< 0 ? ret
: count
;
722 static DEVICE_ATTR(cm_enable
, 0644, css_cm_enable_show
, css_cm_enable_store
);
724 static int __init
setup_css(int nr
)
728 struct channel_subsystem
*css
;
730 css
= channel_subsystems
[nr
];
731 memset(css
, 0, sizeof(struct channel_subsystem
));
732 css
->pseudo_subchannel
=
733 kzalloc(sizeof(*css
->pseudo_subchannel
), GFP_KERNEL
);
734 if (!css
->pseudo_subchannel
)
736 css
->pseudo_subchannel
->dev
.parent
= &css
->device
;
737 css
->pseudo_subchannel
->dev
.release
= css_subchannel_release
;
738 dev_set_name(&css
->pseudo_subchannel
->dev
, "defunct");
739 mutex_init(&css
->pseudo_subchannel
->reg_mutex
);
740 ret
= cio_create_sch_lock(css
->pseudo_subchannel
);
742 kfree(css
->pseudo_subchannel
);
745 mutex_init(&css
->mutex
);
748 dev_set_name(&css
->device
, "css%x", nr
);
749 css
->device
.release
= channel_subsystem_release
;
750 tod_high
= (u32
) (get_clock() >> 32);
751 css_generate_pgid(css
, tod_high
);
755 static int css_reboot_event(struct notifier_block
*this,
762 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
763 struct channel_subsystem
*css
;
765 css
= channel_subsystems
[i
];
766 mutex_lock(&css
->mutex
);
768 if (chsc_secm(css
, 0))
770 mutex_unlock(&css
->mutex
);
776 static struct notifier_block css_reboot_notifier
= {
777 .notifier_call
= css_reboot_event
,
781 * Since the css devices are neither on a bus nor have a class
782 * nor have a special device type, we cannot stop/restart channel
783 * path measurements via the normal suspend/resume callbacks, but have
786 static int css_power_event(struct notifier_block
*this, unsigned long event
,
793 case PM_HIBERNATION_PREPARE
:
794 case PM_SUSPEND_PREPARE
:
796 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
797 struct channel_subsystem
*css
;
799 css
= channel_subsystems
[i
];
800 mutex_lock(&css
->mutex
);
801 if (!css
->cm_enabled
) {
802 mutex_unlock(&css
->mutex
);
805 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
|
808 if (__chsc_do_secm(css
, 0, secm_area
))
810 free_page((unsigned long)secm_area
);
814 mutex_unlock(&css
->mutex
);
817 case PM_POST_HIBERNATION
:
818 case PM_POST_SUSPEND
:
820 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
821 struct channel_subsystem
*css
;
823 css
= channel_subsystems
[i
];
824 mutex_lock(&css
->mutex
);
825 if (!css
->cm_enabled
) {
826 mutex_unlock(&css
->mutex
);
829 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
|
832 if (__chsc_do_secm(css
, 1, secm_area
))
834 free_page((unsigned long)secm_area
);
838 mutex_unlock(&css
->mutex
);
840 /* search for subchannels, which appeared during hibernation */
841 css_schedule_reprobe();
849 static struct notifier_block css_power_notifier
= {
850 .notifier_call
= css_power_event
,
854 * Now that the driver core is running, we can setup our channel subsystem.
855 * The struct subchannel's are created during probing (except for the
856 * static console subchannel).
858 static int __init
css_bus_init(void)
862 ret
= chsc_determine_css_characteristics();
866 ret
= chsc_alloc_sei_area();
870 /* Try to enable MSS. */
871 ret
= chsc_enable_facility(CHSC_SDA_OC_MSS
);
873 case 0: /* Success. */
874 max_ssid
= __MAX_SSID
;
882 ret
= slow_subchannel_init();
886 ret
= crw_register_handler(CRW_RSC_SCH
, css_process_crw
);
890 if ((ret
= bus_register(&css_bus_type
)))
893 /* Setup css structure. */
894 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
895 struct channel_subsystem
*css
;
897 css
= kmalloc(sizeof(struct channel_subsystem
), GFP_KERNEL
);
902 channel_subsystems
[i
] = css
;
905 kfree(channel_subsystems
[i
]);
908 ret
= device_register(&css
->device
);
910 put_device(&css
->device
);
913 if (css_chsc_characteristics
.secm
) {
914 ret
= device_create_file(&css
->device
,
915 &dev_attr_cm_enable
);
919 ret
= device_register(&css
->pseudo_subchannel
->dev
);
921 put_device(&css
->pseudo_subchannel
->dev
);
925 ret
= register_reboot_notifier(&css_reboot_notifier
);
928 ret
= register_pm_notifier(&css_power_notifier
);
930 unregister_reboot_notifier(&css_reboot_notifier
);
935 /* Enable default isc for I/O subchannels. */
936 isc_register(IO_SCH_ISC
);
940 if (css_chsc_characteristics
.secm
)
941 device_remove_file(&channel_subsystems
[i
]->device
,
942 &dev_attr_cm_enable
);
944 device_unregister(&channel_subsystems
[i
]->device
);
947 struct channel_subsystem
*css
;
950 css
= channel_subsystems
[i
];
951 device_unregister(&css
->pseudo_subchannel
->dev
);
952 css
->pseudo_subchannel
= NULL
;
953 if (css_chsc_characteristics
.secm
)
954 device_remove_file(&css
->device
,
955 &dev_attr_cm_enable
);
956 device_unregister(&css
->device
);
958 bus_unregister(&css_bus_type
);
960 crw_unregister_handler(CRW_RSC_CSS
);
961 chsc_free_sei_area();
962 idset_free(slow_subchannel_set
);
963 pr_alert("The CSS device driver initialization failed with "
968 static void __init
css_bus_cleanup(void)
970 struct channel_subsystem
*css
;
973 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
974 css
= channel_subsystems
[i
];
975 device_unregister(&css
->pseudo_subchannel
->dev
);
976 css
->pseudo_subchannel
= NULL
;
977 if (css_chsc_characteristics
.secm
)
978 device_remove_file(&css
->device
, &dev_attr_cm_enable
);
979 device_unregister(&css
->device
);
981 bus_unregister(&css_bus_type
);
982 crw_unregister_handler(CRW_RSC_CSS
);
983 chsc_free_sei_area();
984 idset_free(slow_subchannel_set
);
985 isc_unregister(IO_SCH_ISC
);
988 static int __init
channel_subsystem_init(void)
992 ret
= css_bus_init();
996 ret
= io_subchannel_init();
1002 subsys_initcall(channel_subsystem_init
);
1004 static int css_settle(struct device_driver
*drv
, void *unused
)
1006 struct css_driver
*cssdrv
= to_cssdriver(drv
);
1014 * Wait for the initialization of devices to finish, to make sure we are
1015 * done with our setup if the search for the root device starts.
1017 static int __init
channel_subsystem_init_sync(void)
1019 /* Start initial subchannel evaluation. */
1020 css_schedule_eval_all();
1021 /* Wait for the evaluation of subchannels to finish. */
1022 wait_event(css_eval_wq
, atomic_read(&css_eval_scheduled
) == 0);
1023 /* Wait for the subchannel type specific initialization to finish */
1024 return bus_for_each_drv(&css_bus_type
, NULL
, NULL
, css_settle
);
1026 subsys_initcall_sync(channel_subsystem_init_sync
);
1028 int sch_is_pseudo_sch(struct subchannel
*sch
)
1030 return sch
== to_css(sch
->dev
.parent
)->pseudo_subchannel
;
1033 static int css_bus_match(struct device
*dev
, struct device_driver
*drv
)
1035 struct subchannel
*sch
= to_subchannel(dev
);
1036 struct css_driver
*driver
= to_cssdriver(drv
);
1037 struct css_device_id
*id
;
1039 for (id
= driver
->subchannel_type
; id
->match_flags
; id
++) {
1040 if (sch
->st
== id
->type
)
1047 static int css_probe(struct device
*dev
)
1049 struct subchannel
*sch
;
1052 sch
= to_subchannel(dev
);
1053 sch
->driver
= to_cssdriver(dev
->driver
);
1054 ret
= sch
->driver
->probe
? sch
->driver
->probe(sch
) : 0;
1060 static int css_remove(struct device
*dev
)
1062 struct subchannel
*sch
;
1065 sch
= to_subchannel(dev
);
1066 ret
= sch
->driver
->remove
? sch
->driver
->remove(sch
) : 0;
1071 static void css_shutdown(struct device
*dev
)
1073 struct subchannel
*sch
;
1075 sch
= to_subchannel(dev
);
1076 if (sch
->driver
&& sch
->driver
->shutdown
)
1077 sch
->driver
->shutdown(sch
);
1080 static int css_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1082 struct subchannel
*sch
= to_subchannel(dev
);
1085 ret
= add_uevent_var(env
, "ST=%01X", sch
->st
);
1088 ret
= add_uevent_var(env
, "MODALIAS=css:t%01X", sch
->st
);
1092 static int css_pm_prepare(struct device
*dev
)
1094 struct subchannel
*sch
= to_subchannel(dev
);
1095 struct css_driver
*drv
;
1097 if (mutex_is_locked(&sch
->reg_mutex
))
1099 if (!sch
->dev
.driver
)
1101 drv
= to_cssdriver(sch
->dev
.driver
);
1102 /* Notify drivers that they may not register children. */
1103 return drv
->prepare
? drv
->prepare(sch
) : 0;
1106 static void css_pm_complete(struct device
*dev
)
1108 struct subchannel
*sch
= to_subchannel(dev
);
1109 struct css_driver
*drv
;
1111 if (!sch
->dev
.driver
)
1113 drv
= to_cssdriver(sch
->dev
.driver
);
1118 static int css_pm_freeze(struct device
*dev
)
1120 struct subchannel
*sch
= to_subchannel(dev
);
1121 struct css_driver
*drv
;
1123 if (!sch
->dev
.driver
)
1125 drv
= to_cssdriver(sch
->dev
.driver
);
1126 return drv
->freeze
? drv
->freeze(sch
) : 0;
1129 static int css_pm_thaw(struct device
*dev
)
1131 struct subchannel
*sch
= to_subchannel(dev
);
1132 struct css_driver
*drv
;
1134 if (!sch
->dev
.driver
)
1136 drv
= to_cssdriver(sch
->dev
.driver
);
1137 return drv
->thaw
? drv
->thaw(sch
) : 0;
1140 static int css_pm_restore(struct device
*dev
)
1142 struct subchannel
*sch
= to_subchannel(dev
);
1143 struct css_driver
*drv
;
1145 if (!sch
->dev
.driver
)
1147 drv
= to_cssdriver(sch
->dev
.driver
);
1148 return drv
->restore
? drv
->restore(sch
) : 0;
1151 static const struct dev_pm_ops css_pm_ops
= {
1152 .prepare
= css_pm_prepare
,
1153 .complete
= css_pm_complete
,
1154 .freeze
= css_pm_freeze
,
1155 .thaw
= css_pm_thaw
,
1156 .restore
= css_pm_restore
,
1159 struct bus_type css_bus_type
= {
1161 .match
= css_bus_match
,
1163 .remove
= css_remove
,
1164 .shutdown
= css_shutdown
,
1165 .uevent
= css_uevent
,
1170 * css_driver_register - register a css driver
1171 * @cdrv: css driver to register
1173 * This is mainly a wrapper around driver_register that sets name
1174 * and bus_type in the embedded struct device_driver correctly.
1176 int css_driver_register(struct css_driver
*cdrv
)
1178 cdrv
->drv
.name
= cdrv
->name
;
1179 cdrv
->drv
.bus
= &css_bus_type
;
1180 cdrv
->drv
.owner
= cdrv
->owner
;
1181 return driver_register(&cdrv
->drv
);
1183 EXPORT_SYMBOL_GPL(css_driver_register
);
1186 * css_driver_unregister - unregister a css driver
1187 * @cdrv: css driver to unregister
1189 * This is a wrapper around driver_unregister.
1191 void css_driver_unregister(struct css_driver
*cdrv
)
1193 driver_unregister(&cdrv
->drv
);
1195 EXPORT_SYMBOL_GPL(css_driver_unregister
);
1197 MODULE_LICENSE("GPL");
1198 EXPORT_SYMBOL(css_bus_type
);