2 * driver for channel subsystem
4 * Copyright IBM Corp. 2002, 2010
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
12 #define KMSG_COMPONENT "cio"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/list.h>
21 #include <linux/reboot.h>
22 #include <linux/suspend.h>
23 #include <linux/proc_fs.h>
29 #include "cio_debug.h"
36 int css_init_done
= 0;
40 struct channel_subsystem
*channel_subsystems
[MAX_CSS_IDX
+ 1];
41 static struct bus_type css_bus_type
;
44 for_each_subchannel(int(*fn
)(struct subchannel_id
, void *), void *data
)
46 struct subchannel_id schid
;
49 init_subchannel_id(&schid
);
52 ret
= fn(schid
, data
);
55 } while (schid
.sch_no
++ < __MAX_SUBCHANNEL
);
57 } while (schid
.ssid
++ < max_ssid
);
64 int (*fn_known_sch
)(struct subchannel
*, void *);
65 int (*fn_unknown_sch
)(struct subchannel_id
, void *);
68 static int call_fn_known_sch(struct device
*dev
, void *data
)
70 struct subchannel
*sch
= to_subchannel(dev
);
71 struct cb_data
*cb
= data
;
75 idset_sch_del(cb
->set
, sch
->schid
);
77 rc
= cb
->fn_known_sch(sch
, cb
->data
);
81 static int call_fn_unknown_sch(struct subchannel_id schid
, void *data
)
83 struct cb_data
*cb
= data
;
86 if (idset_sch_contains(cb
->set
, schid
))
87 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
91 static int call_fn_all_sch(struct subchannel_id schid
, void *data
)
93 struct cb_data
*cb
= data
;
94 struct subchannel
*sch
;
97 sch
= get_subchannel_by_schid(schid
);
100 rc
= cb
->fn_known_sch(sch
, cb
->data
);
101 put_device(&sch
->dev
);
103 if (cb
->fn_unknown_sch
)
104 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
110 int for_each_subchannel_staged(int (*fn_known
)(struct subchannel
*, void *),
111 int (*fn_unknown
)(struct subchannel_id
,
118 cb
.fn_known_sch
= fn_known
;
119 cb
.fn_unknown_sch
= fn_unknown
;
121 if (fn_known
&& !fn_unknown
) {
122 /* Skip idset allocation in case of known-only loop. */
124 return bus_for_each_dev(&css_bus_type
, NULL
, &cb
,
128 cb
.set
= idset_sch_new();
130 /* fall back to brute force scanning in case of oom */
131 return for_each_subchannel(call_fn_all_sch
, &cb
);
135 /* Process registered subchannels. */
136 rc
= bus_for_each_dev(&css_bus_type
, NULL
, &cb
, call_fn_known_sch
);
139 /* Process unregistered subchannels. */
141 rc
= for_each_subchannel(call_fn_unknown_sch
, &cb
);
148 static void css_sch_todo(struct work_struct
*work
);
150 static int css_sch_create_locks(struct subchannel
*sch
)
152 sch
->lock
= kmalloc(sizeof(*sch
->lock
), GFP_KERNEL
);
156 spin_lock_init(sch
->lock
);
157 mutex_init(&sch
->reg_mutex
);
162 static void css_subchannel_release(struct device
*dev
)
164 struct subchannel
*sch
= to_subchannel(dev
);
166 sch
->config
.intparm
= 0;
167 cio_commit_config(sch
);
172 struct subchannel
*css_alloc_subchannel(struct subchannel_id schid
)
174 struct subchannel
*sch
;
177 sch
= kzalloc(sizeof(*sch
), GFP_KERNEL
| GFP_DMA
);
179 return ERR_PTR(-ENOMEM
);
181 ret
= cio_validate_subchannel(sch
, schid
);
185 ret
= css_sch_create_locks(sch
);
189 INIT_WORK(&sch
->todo_work
, css_sch_todo
);
190 sch
->dev
.release
= &css_subchannel_release
;
191 device_initialize(&sch
->dev
);
199 static int css_sch_device_register(struct subchannel
*sch
)
203 mutex_lock(&sch
->reg_mutex
);
204 dev_set_name(&sch
->dev
, "0.%x.%04x", sch
->schid
.ssid
,
206 ret
= device_add(&sch
->dev
);
207 mutex_unlock(&sch
->reg_mutex
);
212 * css_sch_device_unregister - unregister a subchannel
213 * @sch: subchannel to be unregistered
215 void css_sch_device_unregister(struct subchannel
*sch
)
217 mutex_lock(&sch
->reg_mutex
);
218 if (device_is_registered(&sch
->dev
))
219 device_unregister(&sch
->dev
);
220 mutex_unlock(&sch
->reg_mutex
);
222 EXPORT_SYMBOL_GPL(css_sch_device_unregister
);
224 static void ssd_from_pmcw(struct chsc_ssd_info
*ssd
, struct pmcw
*pmcw
)
229 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
230 ssd
->path_mask
= pmcw
->pim
;
231 for (i
= 0; i
< 8; i
++) {
233 if (pmcw
->pim
& mask
) {
234 chp_id_init(&ssd
->chpid
[i
]);
235 ssd
->chpid
[i
].id
= pmcw
->chpid
[i
];
240 static void ssd_register_chpids(struct chsc_ssd_info
*ssd
)
245 for (i
= 0; i
< 8; i
++) {
247 if (ssd
->path_mask
& mask
)
248 if (!chp_is_registered(ssd
->chpid
[i
]))
249 chp_new(ssd
->chpid
[i
]);
253 void css_update_ssd_info(struct subchannel
*sch
)
257 ret
= chsc_get_ssd_info(sch
->schid
, &sch
->ssd_info
);
259 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
261 ssd_register_chpids(&sch
->ssd_info
);
264 static ssize_t
type_show(struct device
*dev
, struct device_attribute
*attr
,
267 struct subchannel
*sch
= to_subchannel(dev
);
269 return sprintf(buf
, "%01x\n", sch
->st
);
272 static DEVICE_ATTR(type
, 0444, type_show
, NULL
);
274 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
277 struct subchannel
*sch
= to_subchannel(dev
);
279 return sprintf(buf
, "css:t%01X\n", sch
->st
);
282 static DEVICE_ATTR(modalias
, 0444, modalias_show
, NULL
);
284 static struct attribute
*subch_attrs
[] = {
286 &dev_attr_modalias
.attr
,
290 static struct attribute_group subch_attr_group
= {
291 .attrs
= subch_attrs
,
294 static const struct attribute_group
*default_subch_attr_groups
[] = {
299 int css_register_subchannel(struct subchannel
*sch
)
303 /* Initialize the subchannel structure */
304 sch
->dev
.parent
= &channel_subsystems
[0]->device
;
305 sch
->dev
.bus
= &css_bus_type
;
306 sch
->dev
.groups
= default_subch_attr_groups
;
308 * We don't want to generate uevents for I/O subchannels that don't
309 * have a working ccw device behind them since they will be
310 * unregistered before they can be used anyway, so we delay the add
311 * uevent until after device recognition was successful.
312 * Note that we suppress the uevent for all subchannel types;
313 * the subchannel driver can decide itself when it wants to inform
314 * userspace of its existence.
316 dev_set_uevent_suppress(&sch
->dev
, 1);
317 css_update_ssd_info(sch
);
318 /* make it known to the system */
319 ret
= css_sch_device_register(sch
);
321 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
322 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
327 * No driver matched. Generate the uevent now so that
328 * a fitting driver module may be loaded based on the
331 dev_set_uevent_suppress(&sch
->dev
, 0);
332 kobject_uevent(&sch
->dev
.kobj
, KOBJ_ADD
);
337 static int css_probe_device(struct subchannel_id schid
)
339 struct subchannel
*sch
;
342 sch
= css_alloc_subchannel(schid
);
346 ret
= css_register_subchannel(sch
);
348 put_device(&sch
->dev
);
354 check_subchannel(struct device
* dev
, void * data
)
356 struct subchannel
*sch
;
357 struct subchannel_id
*schid
= data
;
359 sch
= to_subchannel(dev
);
360 return schid_equal(&sch
->schid
, schid
);
364 get_subchannel_by_schid(struct subchannel_id schid
)
368 dev
= bus_find_device(&css_bus_type
, NULL
,
369 &schid
, check_subchannel
);
371 return dev
? to_subchannel(dev
) : NULL
;
375 * css_sch_is_valid() - check if a subchannel is valid
376 * @schib: subchannel information block for the subchannel
378 int css_sch_is_valid(struct schib
*schib
)
380 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_IO
) && !schib
->pmcw
.dnv
)
382 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_MSG
) && !schib
->pmcw
.w
)
386 EXPORT_SYMBOL_GPL(css_sch_is_valid
);
388 static int css_evaluate_new_subchannel(struct subchannel_id schid
, int slow
)
393 /* Will be done on the slow path. */
396 if (stsch(schid
, &schib
)) {
397 /* Subchannel is not provided. */
400 if (!css_sch_is_valid(&schib
)) {
401 /* Unusable - ignore. */
404 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid
.ssid
,
407 return css_probe_device(schid
);
410 static int css_evaluate_known_subchannel(struct subchannel
*sch
, int slow
)
415 if (sch
->driver
->sch_event
)
416 ret
= sch
->driver
->sch_event(sch
, slow
);
419 "Got subchannel machine check but "
420 "no sch_event handler provided.\n");
422 if (ret
!= 0 && ret
!= -EAGAIN
) {
423 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
424 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
429 static void css_evaluate_subchannel(struct subchannel_id schid
, int slow
)
431 struct subchannel
*sch
;
434 sch
= get_subchannel_by_schid(schid
);
436 ret
= css_evaluate_known_subchannel(sch
, slow
);
437 put_device(&sch
->dev
);
439 ret
= css_evaluate_new_subchannel(schid
, slow
);
441 css_schedule_eval(schid
);
445 * css_sched_sch_todo - schedule a subchannel operation
449 * Schedule the operation identified by @todo to be performed on the slow path
450 * workqueue. Do nothing if another operation with higher priority is already
451 * scheduled. Needs to be called with subchannel lock held.
453 void css_sched_sch_todo(struct subchannel
*sch
, enum sch_todo todo
)
455 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
456 sch
->schid
.ssid
, sch
->schid
.sch_no
, todo
);
457 if (sch
->todo
>= todo
)
459 /* Get workqueue ref. */
460 if (!get_device(&sch
->dev
))
463 if (!queue_work(cio_work_q
, &sch
->todo_work
)) {
464 /* Already queued, release workqueue ref. */
465 put_device(&sch
->dev
);
468 EXPORT_SYMBOL_GPL(css_sched_sch_todo
);
470 static void css_sch_todo(struct work_struct
*work
)
472 struct subchannel
*sch
;
476 sch
= container_of(work
, struct subchannel
, todo_work
);
478 spin_lock_irq(sch
->lock
);
480 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch
->schid
.ssid
,
481 sch
->schid
.sch_no
, todo
);
482 sch
->todo
= SCH_TODO_NOTHING
;
483 spin_unlock_irq(sch
->lock
);
486 case SCH_TODO_NOTHING
:
489 ret
= css_evaluate_known_subchannel(sch
, 1);
490 if (ret
== -EAGAIN
) {
491 spin_lock_irq(sch
->lock
);
492 css_sched_sch_todo(sch
, todo
);
493 spin_unlock_irq(sch
->lock
);
497 css_sch_device_unregister(sch
);
500 /* Release workqueue ref. */
501 put_device(&sch
->dev
);
504 static struct idset
*slow_subchannel_set
;
505 static spinlock_t slow_subchannel_lock
;
506 static wait_queue_head_t css_eval_wq
;
507 static atomic_t css_eval_scheduled
;
509 static int __init
slow_subchannel_init(void)
511 spin_lock_init(&slow_subchannel_lock
);
512 atomic_set(&css_eval_scheduled
, 0);
513 init_waitqueue_head(&css_eval_wq
);
514 slow_subchannel_set
= idset_sch_new();
515 if (!slow_subchannel_set
) {
516 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
522 static int slow_eval_known_fn(struct subchannel
*sch
, void *data
)
527 spin_lock_irq(&slow_subchannel_lock
);
528 eval
= idset_sch_contains(slow_subchannel_set
, sch
->schid
);
529 idset_sch_del(slow_subchannel_set
, sch
->schid
);
530 spin_unlock_irq(&slow_subchannel_lock
);
532 rc
= css_evaluate_known_subchannel(sch
, 1);
534 css_schedule_eval(sch
->schid
);
539 static int slow_eval_unknown_fn(struct subchannel_id schid
, void *data
)
544 spin_lock_irq(&slow_subchannel_lock
);
545 eval
= idset_sch_contains(slow_subchannel_set
, schid
);
546 idset_sch_del(slow_subchannel_set
, schid
);
547 spin_unlock_irq(&slow_subchannel_lock
);
549 rc
= css_evaluate_new_subchannel(schid
, 1);
552 css_schedule_eval(schid
);
558 /* These should abort looping */
559 spin_lock_irq(&slow_subchannel_lock
);
560 idset_sch_del_subseq(slow_subchannel_set
, schid
);
561 spin_unlock_irq(&slow_subchannel_lock
);
566 /* Allow scheduling here since the containing loop might
573 static void css_slow_path_func(struct work_struct
*unused
)
577 CIO_TRACE_EVENT(4, "slowpath");
578 for_each_subchannel_staged(slow_eval_known_fn
, slow_eval_unknown_fn
,
580 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
581 if (idset_is_empty(slow_subchannel_set
)) {
582 atomic_set(&css_eval_scheduled
, 0);
583 wake_up(&css_eval_wq
);
585 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
588 static DECLARE_DELAYED_WORK(slow_path_work
, css_slow_path_func
);
589 struct workqueue_struct
*cio_work_q
;
591 void css_schedule_eval(struct subchannel_id schid
)
595 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
596 idset_sch_add(slow_subchannel_set
, schid
);
597 atomic_set(&css_eval_scheduled
, 1);
598 queue_delayed_work(cio_work_q
, &slow_path_work
, 0);
599 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
602 void css_schedule_eval_all(void)
606 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
607 idset_fill(slow_subchannel_set
);
608 atomic_set(&css_eval_scheduled
, 1);
609 queue_delayed_work(cio_work_q
, &slow_path_work
, 0);
610 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
613 static int __unset_registered(struct device
*dev
, void *data
)
615 struct idset
*set
= data
;
616 struct subchannel
*sch
= to_subchannel(dev
);
618 idset_sch_del(set
, sch
->schid
);
622 void css_schedule_eval_all_unreg(unsigned long delay
)
625 struct idset
*unreg_set
;
627 /* Find unregistered subchannels. */
628 unreg_set
= idset_sch_new();
631 css_schedule_eval_all();
634 idset_fill(unreg_set
);
635 bus_for_each_dev(&css_bus_type
, NULL
, unreg_set
, __unset_registered
);
636 /* Apply to slow_subchannel_set. */
637 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
638 idset_add_set(slow_subchannel_set
, unreg_set
);
639 atomic_set(&css_eval_scheduled
, 1);
640 queue_delayed_work(cio_work_q
, &slow_path_work
, delay
);
641 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
642 idset_free(unreg_set
);
645 void css_wait_for_slow_path(void)
647 flush_workqueue(cio_work_q
);
650 /* Schedule reprobing of all unregistered subchannels. */
651 void css_schedule_reprobe(void)
653 /* Schedule with a delay to allow merging of subsequent calls. */
654 css_schedule_eval_all_unreg(1 * HZ
);
656 EXPORT_SYMBOL_GPL(css_schedule_reprobe
);
659 * Called from the machine check handler for subchannel report words.
661 static void css_process_crw(struct crw
*crw0
, struct crw
*crw1
, int overflow
)
663 struct subchannel_id mchk_schid
;
664 struct subchannel
*sch
;
667 css_schedule_eval_all();
670 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
671 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
672 crw0
->slct
, crw0
->oflw
, crw0
->chn
, crw0
->rsc
, crw0
->anc
,
673 crw0
->erc
, crw0
->rsid
);
675 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
676 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
677 crw1
->slct
, crw1
->oflw
, crw1
->chn
, crw1
->rsc
,
678 crw1
->anc
, crw1
->erc
, crw1
->rsid
);
679 init_subchannel_id(&mchk_schid
);
680 mchk_schid
.sch_no
= crw0
->rsid
;
682 mchk_schid
.ssid
= (crw1
->rsid
>> 4) & 3;
684 if (crw0
->erc
== CRW_ERC_PMOD
) {
685 sch
= get_subchannel_by_schid(mchk_schid
);
687 css_update_ssd_info(sch
);
688 put_device(&sch
->dev
);
692 * Since we are always presented with IPI in the CRW, we have to
693 * use stsch() to find out if the subchannel in question has come
696 css_evaluate_subchannel(mchk_schid
, 0);
700 css_generate_pgid(struct channel_subsystem
*css
, u32 tod_high
)
704 if (css_general_characteristics
.mcss
) {
705 css
->global_pgid
.pgid_high
.ext_cssid
.version
= 0x80;
706 css
->global_pgid
.pgid_high
.ext_cssid
.cssid
=
707 (css
->cssid
< 0) ? 0 : css
->cssid
;
709 css
->global_pgid
.pgid_high
.cpu_addr
= stap();
712 css
->global_pgid
.cpu_id
= cpu_id
.ident
;
713 css
->global_pgid
.cpu_model
= cpu_id
.machine
;
714 css
->global_pgid
.tod_high
= tod_high
;
717 static void channel_subsystem_release(struct device
*dev
)
719 struct channel_subsystem
*css
= to_css(dev
);
721 mutex_destroy(&css
->mutex
);
725 static ssize_t
real_cssid_show(struct device
*dev
, struct device_attribute
*a
,
728 struct channel_subsystem
*css
= to_css(dev
);
733 return sprintf(buf
, "%x\n", css
->cssid
);
735 static DEVICE_ATTR_RO(real_cssid
);
737 static ssize_t
cm_enable_show(struct device
*dev
, struct device_attribute
*a
,
740 struct channel_subsystem
*css
= to_css(dev
);
743 mutex_lock(&css
->mutex
);
744 ret
= sprintf(buf
, "%x\n", css
->cm_enabled
);
745 mutex_unlock(&css
->mutex
);
749 static ssize_t
cm_enable_store(struct device
*dev
, struct device_attribute
*a
,
750 const char *buf
, size_t count
)
752 struct channel_subsystem
*css
= to_css(dev
);
756 ret
= kstrtoul(buf
, 16, &val
);
759 mutex_lock(&css
->mutex
);
762 ret
= css
->cm_enabled
? chsc_secm(css
, 0) : 0;
765 ret
= css
->cm_enabled
? 0 : chsc_secm(css
, 1);
770 mutex_unlock(&css
->mutex
);
771 return ret
< 0 ? ret
: count
;
773 static DEVICE_ATTR_RW(cm_enable
);
775 static umode_t
cm_enable_mode(struct kobject
*kobj
, struct attribute
*attr
,
778 return css_chsc_characteristics
.secm
? attr
->mode
: 0;
781 static struct attribute
*cssdev_attrs
[] = {
782 &dev_attr_real_cssid
.attr
,
786 static struct attribute_group cssdev_attr_group
= {
787 .attrs
= cssdev_attrs
,
790 static struct attribute
*cssdev_cm_attrs
[] = {
791 &dev_attr_cm_enable
.attr
,
795 static struct attribute_group cssdev_cm_attr_group
= {
796 .attrs
= cssdev_cm_attrs
,
797 .is_visible
= cm_enable_mode
,
800 static const struct attribute_group
*cssdev_attr_groups
[] = {
802 &cssdev_cm_attr_group
,
806 static int __init
setup_css(int nr
)
808 struct channel_subsystem
*css
;
811 css
= kzalloc(sizeof(*css
), GFP_KERNEL
);
815 channel_subsystems
[nr
] = css
;
816 dev_set_name(&css
->device
, "css%x", nr
);
817 css
->device
.groups
= cssdev_attr_groups
;
818 css
->device
.release
= channel_subsystem_release
;
820 mutex_init(&css
->mutex
);
821 css
->cssid
= chsc_get_cssid(nr
);
822 css_generate_pgid(css
, (u32
) (get_tod_clock() >> 32));
824 ret
= device_register(&css
->device
);
826 put_device(&css
->device
);
830 css
->pseudo_subchannel
= kzalloc(sizeof(*css
->pseudo_subchannel
),
832 if (!css
->pseudo_subchannel
) {
833 device_unregister(&css
->device
);
838 css
->pseudo_subchannel
->dev
.parent
= &css
->device
;
839 css
->pseudo_subchannel
->dev
.release
= css_subchannel_release
;
840 mutex_init(&css
->pseudo_subchannel
->reg_mutex
);
841 ret
= css_sch_create_locks(css
->pseudo_subchannel
);
843 kfree(css
->pseudo_subchannel
);
844 device_unregister(&css
->device
);
848 dev_set_name(&css
->pseudo_subchannel
->dev
, "defunct");
849 ret
= device_register(&css
->pseudo_subchannel
->dev
);
851 put_device(&css
->pseudo_subchannel
->dev
);
852 device_unregister(&css
->device
);
858 channel_subsystems
[nr
] = NULL
;
862 static int css_reboot_event(struct notifier_block
*this,
866 struct channel_subsystem
*css
;
871 mutex_lock(&css
->mutex
);
873 if (chsc_secm(css
, 0))
875 mutex_unlock(&css
->mutex
);
881 static struct notifier_block css_reboot_notifier
= {
882 .notifier_call
= css_reboot_event
,
886 * Since the css devices are neither on a bus nor have a class
887 * nor have a special device type, we cannot stop/restart channel
888 * path measurements via the normal suspend/resume callbacks, but have
891 static int css_power_event(struct notifier_block
*this, unsigned long event
,
894 struct channel_subsystem
*css
;
898 case PM_HIBERNATION_PREPARE
:
899 case PM_SUSPEND_PREPARE
:
902 mutex_lock(&css
->mutex
);
903 if (!css
->cm_enabled
) {
904 mutex_unlock(&css
->mutex
);
907 ret
= __chsc_do_secm(css
, 0);
908 ret
= notifier_from_errno(ret
);
909 mutex_unlock(&css
->mutex
);
912 case PM_POST_HIBERNATION
:
913 case PM_POST_SUSPEND
:
916 mutex_lock(&css
->mutex
);
917 if (!css
->cm_enabled
) {
918 mutex_unlock(&css
->mutex
);
921 ret
= __chsc_do_secm(css
, 1);
922 ret
= notifier_from_errno(ret
);
923 mutex_unlock(&css
->mutex
);
925 /* search for subchannels, which appeared during hibernation */
926 css_schedule_reprobe();
934 static struct notifier_block css_power_notifier
= {
935 .notifier_call
= css_power_event
,
939 * Now that the driver core is running, we can setup our channel subsystem.
940 * The struct subchannel's are created during probing.
942 static int __init
css_bus_init(void)
950 chsc_determine_css_characteristics();
951 /* Try to enable MSS. */
952 ret
= chsc_enable_facility(CHSC_SDA_OC_MSS
);
956 max_ssid
= __MAX_SSID
;
958 ret
= slow_subchannel_init();
962 ret
= crw_register_handler(CRW_RSC_SCH
, css_process_crw
);
966 if ((ret
= bus_register(&css_bus_type
)))
969 /* Setup css structure. */
970 for (i
= 0; i
<= MAX_CSS_IDX
; i
++) {
975 ret
= register_reboot_notifier(&css_reboot_notifier
);
978 ret
= register_pm_notifier(&css_power_notifier
);
980 unregister_reboot_notifier(&css_reboot_notifier
);
985 /* Enable default isc for I/O subchannels. */
986 isc_register(IO_SCH_ISC
);
991 struct channel_subsystem
*css
= channel_subsystems
[i
];
992 device_unregister(&css
->pseudo_subchannel
->dev
);
993 device_unregister(&css
->device
);
995 bus_unregister(&css_bus_type
);
997 crw_unregister_handler(CRW_RSC_SCH
);
998 idset_free(slow_subchannel_set
);
1000 pr_alert("The CSS device driver initialization failed with "
1005 static void __init
css_bus_cleanup(void)
1007 struct channel_subsystem
*css
;
1010 device_unregister(&css
->pseudo_subchannel
->dev
);
1011 device_unregister(&css
->device
);
1013 bus_unregister(&css_bus_type
);
1014 crw_unregister_handler(CRW_RSC_SCH
);
1015 idset_free(slow_subchannel_set
);
1016 chsc_init_cleanup();
1017 isc_unregister(IO_SCH_ISC
);
1020 static int __init
channel_subsystem_init(void)
1024 ret
= css_bus_init();
1027 cio_work_q
= create_singlethread_workqueue("cio");
1032 ret
= io_subchannel_init();
1038 destroy_workqueue(cio_work_q
);
1043 subsys_initcall(channel_subsystem_init
);
1045 static int css_settle(struct device_driver
*drv
, void *unused
)
1047 struct css_driver
*cssdrv
= to_cssdriver(drv
);
1050 return cssdrv
->settle();
1054 int css_complete_work(void)
1058 /* Wait for the evaluation of subchannels to finish. */
1059 ret
= wait_event_interruptible(css_eval_wq
,
1060 atomic_read(&css_eval_scheduled
) == 0);
1063 flush_workqueue(cio_work_q
);
1064 /* Wait for the subchannel type specific initialization to finish */
1065 return bus_for_each_drv(&css_bus_type
, NULL
, NULL
, css_settle
);
1070 * Wait for the initialization of devices to finish, to make sure we are
1071 * done with our setup if the search for the root device starts.
1073 static int __init
channel_subsystem_init_sync(void)
1075 /* Register subchannels which are already in use. */
1076 cio_register_early_subchannels();
1077 /* Start initial subchannel evaluation. */
1078 css_schedule_eval_all();
1079 css_complete_work();
1082 subsys_initcall_sync(channel_subsystem_init_sync
);
1084 void channel_subsystem_reinit(void)
1086 struct channel_path
*chp
;
1087 struct chp_id chpid
;
1089 chsc_enable_facility(CHSC_SDA_OC_MSS
);
1090 chp_id_for_each(&chpid
) {
1091 chp
= chpid_to_chp(chpid
);
1093 chp_update_desc(chp
);
1098 #ifdef CONFIG_PROC_FS
1099 static ssize_t
cio_settle_write(struct file
*file
, const char __user
*buf
,
1100 size_t count
, loff_t
*ppos
)
1104 /* Handle pending CRW's. */
1105 crw_wait_for_channel_report();
1106 ret
= css_complete_work();
1108 return ret
? ret
: count
;
1111 static const struct file_operations cio_settle_proc_fops
= {
1112 .open
= nonseekable_open
,
1113 .write
= cio_settle_write
,
1114 .llseek
= no_llseek
,
1117 static int __init
cio_settle_init(void)
1119 struct proc_dir_entry
*entry
;
1121 entry
= proc_create("cio_settle", S_IWUSR
, NULL
,
1122 &cio_settle_proc_fops
);
1127 device_initcall(cio_settle_init
);
1128 #endif /*CONFIG_PROC_FS*/
1130 int sch_is_pseudo_sch(struct subchannel
*sch
)
1132 return sch
== to_css(sch
->dev
.parent
)->pseudo_subchannel
;
1135 static int css_bus_match(struct device
*dev
, struct device_driver
*drv
)
1137 struct subchannel
*sch
= to_subchannel(dev
);
1138 struct css_driver
*driver
= to_cssdriver(drv
);
1139 struct css_device_id
*id
;
1141 for (id
= driver
->subchannel_type
; id
->match_flags
; id
++) {
1142 if (sch
->st
== id
->type
)
1149 static int css_probe(struct device
*dev
)
1151 struct subchannel
*sch
;
1154 sch
= to_subchannel(dev
);
1155 sch
->driver
= to_cssdriver(dev
->driver
);
1156 ret
= sch
->driver
->probe
? sch
->driver
->probe(sch
) : 0;
1162 static int css_remove(struct device
*dev
)
1164 struct subchannel
*sch
;
1167 sch
= to_subchannel(dev
);
1168 ret
= sch
->driver
->remove
? sch
->driver
->remove(sch
) : 0;
1173 static void css_shutdown(struct device
*dev
)
1175 struct subchannel
*sch
;
1177 sch
= to_subchannel(dev
);
1178 if (sch
->driver
&& sch
->driver
->shutdown
)
1179 sch
->driver
->shutdown(sch
);
1182 static int css_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1184 struct subchannel
*sch
= to_subchannel(dev
);
1187 ret
= add_uevent_var(env
, "ST=%01X", sch
->st
);
1190 ret
= add_uevent_var(env
, "MODALIAS=css:t%01X", sch
->st
);
1194 static int css_pm_prepare(struct device
*dev
)
1196 struct subchannel
*sch
= to_subchannel(dev
);
1197 struct css_driver
*drv
;
1199 if (mutex_is_locked(&sch
->reg_mutex
))
1201 if (!sch
->dev
.driver
)
1203 drv
= to_cssdriver(sch
->dev
.driver
);
1204 /* Notify drivers that they may not register children. */
1205 return drv
->prepare
? drv
->prepare(sch
) : 0;
1208 static void css_pm_complete(struct device
*dev
)
1210 struct subchannel
*sch
= to_subchannel(dev
);
1211 struct css_driver
*drv
;
1213 if (!sch
->dev
.driver
)
1215 drv
= to_cssdriver(sch
->dev
.driver
);
1220 static int css_pm_freeze(struct device
*dev
)
1222 struct subchannel
*sch
= to_subchannel(dev
);
1223 struct css_driver
*drv
;
1225 if (!sch
->dev
.driver
)
1227 drv
= to_cssdriver(sch
->dev
.driver
);
1228 return drv
->freeze
? drv
->freeze(sch
) : 0;
1231 static int css_pm_thaw(struct device
*dev
)
1233 struct subchannel
*sch
= to_subchannel(dev
);
1234 struct css_driver
*drv
;
1236 if (!sch
->dev
.driver
)
1238 drv
= to_cssdriver(sch
->dev
.driver
);
1239 return drv
->thaw
? drv
->thaw(sch
) : 0;
1242 static int css_pm_restore(struct device
*dev
)
1244 struct subchannel
*sch
= to_subchannel(dev
);
1245 struct css_driver
*drv
;
1247 css_update_ssd_info(sch
);
1248 if (!sch
->dev
.driver
)
1250 drv
= to_cssdriver(sch
->dev
.driver
);
1251 return drv
->restore
? drv
->restore(sch
) : 0;
1254 static const struct dev_pm_ops css_pm_ops
= {
1255 .prepare
= css_pm_prepare
,
1256 .complete
= css_pm_complete
,
1257 .freeze
= css_pm_freeze
,
1258 .thaw
= css_pm_thaw
,
1259 .restore
= css_pm_restore
,
1262 static struct bus_type css_bus_type
= {
1264 .match
= css_bus_match
,
1266 .remove
= css_remove
,
1267 .shutdown
= css_shutdown
,
1268 .uevent
= css_uevent
,
1273 * css_driver_register - register a css driver
1274 * @cdrv: css driver to register
1276 * This is mainly a wrapper around driver_register that sets name
1277 * and bus_type in the embedded struct device_driver correctly.
1279 int css_driver_register(struct css_driver
*cdrv
)
1281 cdrv
->drv
.bus
= &css_bus_type
;
1282 return driver_register(&cdrv
->drv
);
1284 EXPORT_SYMBOL_GPL(css_driver_register
);
1287 * css_driver_unregister - unregister a css driver
1288 * @cdrv: css driver to unregister
1290 * This is a wrapper around driver_unregister.
1292 void css_driver_unregister(struct css_driver
*cdrv
)
1294 driver_unregister(&cdrv
->drv
);
1296 EXPORT_SYMBOL_GPL(css_driver_unregister
);