1 // SPDX-License-Identifier: GPL-2.0
3 * driver for channel subsystem
5 * Copyright IBM Corp. 2002, 2010
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/reboot.h>
21 #include <linux/proc_fs.h>
22 #include <linux/genalloc.h>
23 #include <linux/dma-mapping.h>
29 #include "blacklist.h"
30 #include "cio_debug.h"
37 int css_init_done
= 0;
41 struct channel_subsystem
*channel_subsystems
[MAX_CSS_IDX
+ 1];
42 static struct bus_type css_bus_type
;
45 for_each_subchannel(int(*fn
)(struct subchannel_id
, void *), void *data
)
47 struct subchannel_id schid
;
50 init_subchannel_id(&schid
);
53 ret
= fn(schid
, data
);
56 } while (schid
.sch_no
++ < __MAX_SUBCHANNEL
);
58 } while (schid
.ssid
++ < max_ssid
);
65 int (*fn_known_sch
)(struct subchannel
*, void *);
66 int (*fn_unknown_sch
)(struct subchannel_id
, void *);
69 static int call_fn_known_sch(struct device
*dev
, void *data
)
71 struct subchannel
*sch
= to_subchannel(dev
);
72 struct cb_data
*cb
= data
;
76 idset_sch_del(cb
->set
, sch
->schid
);
78 rc
= cb
->fn_known_sch(sch
, cb
->data
);
82 static int call_fn_unknown_sch(struct subchannel_id schid
, void *data
)
84 struct cb_data
*cb
= data
;
87 if (idset_sch_contains(cb
->set
, schid
))
88 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
92 static int call_fn_all_sch(struct subchannel_id schid
, void *data
)
94 struct cb_data
*cb
= data
;
95 struct subchannel
*sch
;
98 sch
= get_subchannel_by_schid(schid
);
100 if (cb
->fn_known_sch
)
101 rc
= cb
->fn_known_sch(sch
, cb
->data
);
102 put_device(&sch
->dev
);
104 if (cb
->fn_unknown_sch
)
105 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
111 int for_each_subchannel_staged(int (*fn_known
)(struct subchannel
*, void *),
112 int (*fn_unknown
)(struct subchannel_id
,
119 cb
.fn_known_sch
= fn_known
;
120 cb
.fn_unknown_sch
= fn_unknown
;
122 if (fn_known
&& !fn_unknown
) {
123 /* Skip idset allocation in case of known-only loop. */
125 return bus_for_each_dev(&css_bus_type
, NULL
, &cb
,
129 cb
.set
= idset_sch_new();
131 /* fall back to brute force scanning in case of oom */
132 return for_each_subchannel(call_fn_all_sch
, &cb
);
136 /* Process registered subchannels. */
137 rc
= bus_for_each_dev(&css_bus_type
, NULL
, &cb
, call_fn_known_sch
);
140 /* Process unregistered subchannels. */
142 rc
= for_each_subchannel(call_fn_unknown_sch
, &cb
);
149 static void css_sch_todo(struct work_struct
*work
);
151 static int css_sch_create_locks(struct subchannel
*sch
)
153 sch
->lock
= kmalloc(sizeof(*sch
->lock
), GFP_KERNEL
);
157 spin_lock_init(sch
->lock
);
158 mutex_init(&sch
->reg_mutex
);
163 static void css_subchannel_release(struct device
*dev
)
165 struct subchannel
*sch
= to_subchannel(dev
);
167 sch
->config
.intparm
= 0;
168 cio_commit_config(sch
);
169 kfree(sch
->driver_override
);
174 static int css_validate_subchannel(struct subchannel_id schid
,
179 switch (schib
->pmcw
.st
) {
180 case SUBCHANNEL_TYPE_IO
:
181 case SUBCHANNEL_TYPE_MSG
:
182 if (!css_sch_is_valid(schib
))
184 else if (is_blacklisted(schid
.ssid
, schib
->pmcw
.dev
)) {
185 CIO_MSG_EVENT(6, "Blacklisted device detected "
186 "at devno %04X, subchannel set %x\n",
187 schib
->pmcw
.dev
, schid
.ssid
);
198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
199 schid
.ssid
, schid
.sch_no
, schib
->pmcw
.st
);
204 struct subchannel
*css_alloc_subchannel(struct subchannel_id schid
,
207 struct subchannel
*sch
;
210 ret
= css_validate_subchannel(schid
, schib
);
214 sch
= kzalloc(sizeof(*sch
), GFP_KERNEL
| GFP_DMA
);
216 return ERR_PTR(-ENOMEM
);
220 sch
->st
= schib
->pmcw
.st
;
222 ret
= css_sch_create_locks(sch
);
226 INIT_WORK(&sch
->todo_work
, css_sch_todo
);
227 sch
->dev
.release
= &css_subchannel_release
;
228 sch
->dev
.dma_mask
= &sch
->dma_mask
;
229 device_initialize(&sch
->dev
);
231 * The physical addresses for some of the dma structures that can
232 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
234 ret
= dma_set_coherent_mask(&sch
->dev
, DMA_BIT_MASK(31));
238 * But we don't have such restrictions imposed on the stuff that
239 * is handled by the streaming API.
241 ret
= dma_set_mask(&sch
->dev
, DMA_BIT_MASK(64));
252 static int css_sch_device_register(struct subchannel
*sch
)
256 mutex_lock(&sch
->reg_mutex
);
257 dev_set_name(&sch
->dev
, "0.%x.%04x", sch
->schid
.ssid
,
259 ret
= device_add(&sch
->dev
);
260 mutex_unlock(&sch
->reg_mutex
);
265 * css_sch_device_unregister - unregister a subchannel
266 * @sch: subchannel to be unregistered
268 void css_sch_device_unregister(struct subchannel
*sch
)
270 mutex_lock(&sch
->reg_mutex
);
271 if (device_is_registered(&sch
->dev
))
272 device_unregister(&sch
->dev
);
273 mutex_unlock(&sch
->reg_mutex
);
275 EXPORT_SYMBOL_GPL(css_sch_device_unregister
);
277 static void ssd_from_pmcw(struct chsc_ssd_info
*ssd
, struct pmcw
*pmcw
)
282 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
283 ssd
->path_mask
= pmcw
->pim
;
284 for (i
= 0; i
< 8; i
++) {
286 if (pmcw
->pim
& mask
) {
287 chp_id_init(&ssd
->chpid
[i
]);
288 ssd
->chpid
[i
].id
= pmcw
->chpid
[i
];
293 static void ssd_register_chpids(struct chsc_ssd_info
*ssd
)
298 for (i
= 0; i
< 8; i
++) {
300 if (ssd
->path_mask
& mask
)
301 chp_new(ssd
->chpid
[i
]);
305 void css_update_ssd_info(struct subchannel
*sch
)
309 ret
= chsc_get_ssd_info(sch
->schid
, &sch
->ssd_info
);
311 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
313 ssd_register_chpids(&sch
->ssd_info
);
316 static ssize_t
type_show(struct device
*dev
, struct device_attribute
*attr
,
319 struct subchannel
*sch
= to_subchannel(dev
);
321 return sprintf(buf
, "%01x\n", sch
->st
);
324 static DEVICE_ATTR_RO(type
);
326 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
329 struct subchannel
*sch
= to_subchannel(dev
);
331 return sprintf(buf
, "css:t%01X\n", sch
->st
);
334 static DEVICE_ATTR_RO(modalias
);
336 static ssize_t
driver_override_store(struct device
*dev
,
337 struct device_attribute
*attr
,
338 const char *buf
, size_t count
)
340 struct subchannel
*sch
= to_subchannel(dev
);
341 char *driver_override
, *old
, *cp
;
343 /* We need to keep extra room for a newline */
344 if (count
>= (PAGE_SIZE
- 1))
347 driver_override
= kstrndup(buf
, count
, GFP_KERNEL
);
348 if (!driver_override
)
351 cp
= strchr(driver_override
, '\n');
356 old
= sch
->driver_override
;
357 if (strlen(driver_override
)) {
358 sch
->driver_override
= driver_override
;
360 kfree(driver_override
);
361 sch
->driver_override
= NULL
;
370 static ssize_t
driver_override_show(struct device
*dev
,
371 struct device_attribute
*attr
, char *buf
)
373 struct subchannel
*sch
= to_subchannel(dev
);
377 len
= snprintf(buf
, PAGE_SIZE
, "%s\n", sch
->driver_override
);
381 static DEVICE_ATTR_RW(driver_override
);
383 static struct attribute
*subch_attrs
[] = {
385 &dev_attr_modalias
.attr
,
386 &dev_attr_driver_override
.attr
,
390 static struct attribute_group subch_attr_group
= {
391 .attrs
= subch_attrs
,
394 static const struct attribute_group
*default_subch_attr_groups
[] = {
399 static ssize_t
chpids_show(struct device
*dev
,
400 struct device_attribute
*attr
,
403 struct subchannel
*sch
= to_subchannel(dev
);
404 struct chsc_ssd_info
*ssd
= &sch
->ssd_info
;
409 for (chp
= 0; chp
< 8; chp
++) {
411 if (ssd
->path_mask
& mask
)
412 ret
+= sprintf(buf
+ ret
, "%02x ", ssd
->chpid
[chp
].id
);
414 ret
+= sprintf(buf
+ ret
, "00 ");
416 ret
+= sprintf(buf
+ ret
, "\n");
419 static DEVICE_ATTR_RO(chpids
);
421 static ssize_t
pimpampom_show(struct device
*dev
,
422 struct device_attribute
*attr
,
425 struct subchannel
*sch
= to_subchannel(dev
);
426 struct pmcw
*pmcw
= &sch
->schib
.pmcw
;
428 return sprintf(buf
, "%02x %02x %02x\n",
429 pmcw
->pim
, pmcw
->pam
, pmcw
->pom
);
431 static DEVICE_ATTR_RO(pimpampom
);
433 static struct attribute
*io_subchannel_type_attrs
[] = {
434 &dev_attr_chpids
.attr
,
435 &dev_attr_pimpampom
.attr
,
438 ATTRIBUTE_GROUPS(io_subchannel_type
);
440 static const struct device_type io_subchannel_type
= {
441 .groups
= io_subchannel_type_groups
,
444 int css_register_subchannel(struct subchannel
*sch
)
448 /* Initialize the subchannel structure */
449 sch
->dev
.parent
= &channel_subsystems
[0]->device
;
450 sch
->dev
.bus
= &css_bus_type
;
451 sch
->dev
.groups
= default_subch_attr_groups
;
453 if (sch
->st
== SUBCHANNEL_TYPE_IO
)
454 sch
->dev
.type
= &io_subchannel_type
;
457 * We don't want to generate uevents for I/O subchannels that don't
458 * have a working ccw device behind them since they will be
459 * unregistered before they can be used anyway, so we delay the add
460 * uevent until after device recognition was successful.
461 * Note that we suppress the uevent for all subchannel types;
462 * the subchannel driver can decide itself when it wants to inform
463 * userspace of its existence.
465 dev_set_uevent_suppress(&sch
->dev
, 1);
466 css_update_ssd_info(sch
);
467 /* make it known to the system */
468 ret
= css_sch_device_register(sch
);
470 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
471 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
476 * No driver matched. Generate the uevent now so that
477 * a fitting driver module may be loaded based on the
480 dev_set_uevent_suppress(&sch
->dev
, 0);
481 kobject_uevent(&sch
->dev
.kobj
, KOBJ_ADD
);
486 static int css_probe_device(struct subchannel_id schid
, struct schib
*schib
)
488 struct subchannel
*sch
;
491 sch
= css_alloc_subchannel(schid
, schib
);
495 ret
= css_register_subchannel(sch
);
497 put_device(&sch
->dev
);
503 check_subchannel(struct device
*dev
, const void *data
)
505 struct subchannel
*sch
;
506 struct subchannel_id
*schid
= (void *)data
;
508 sch
= to_subchannel(dev
);
509 return schid_equal(&sch
->schid
, schid
);
513 get_subchannel_by_schid(struct subchannel_id schid
)
517 dev
= bus_find_device(&css_bus_type
, NULL
,
518 &schid
, check_subchannel
);
520 return dev
? to_subchannel(dev
) : NULL
;
524 * css_sch_is_valid() - check if a subchannel is valid
525 * @schib: subchannel information block for the subchannel
527 int css_sch_is_valid(struct schib
*schib
)
529 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_IO
) && !schib
->pmcw
.dnv
)
531 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_MSG
) && !schib
->pmcw
.w
)
535 EXPORT_SYMBOL_GPL(css_sch_is_valid
);
537 static int css_evaluate_new_subchannel(struct subchannel_id schid
, int slow
)
543 /* Will be done on the slow path. */
547 * The first subchannel that is not-operational (ccode==3)
548 * indicates that there aren't any more devices available.
549 * If stsch gets an exception, it means the current subchannel set
552 ccode
= stsch(schid
, &schib
);
554 return (ccode
== 3) ? -ENXIO
: ccode
;
556 return css_probe_device(schid
, &schib
);
559 static int css_evaluate_known_subchannel(struct subchannel
*sch
, int slow
)
564 if (sch
->driver
->sch_event
)
565 ret
= sch
->driver
->sch_event(sch
, slow
);
568 "Got subchannel machine check but "
569 "no sch_event handler provided.\n");
571 if (ret
!= 0 && ret
!= -EAGAIN
) {
572 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
573 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
578 static void css_evaluate_subchannel(struct subchannel_id schid
, int slow
)
580 struct subchannel
*sch
;
583 sch
= get_subchannel_by_schid(schid
);
585 ret
= css_evaluate_known_subchannel(sch
, slow
);
586 put_device(&sch
->dev
);
588 ret
= css_evaluate_new_subchannel(schid
, slow
);
590 css_schedule_eval(schid
);
594 * css_sched_sch_todo - schedule a subchannel operation
598 * Schedule the operation identified by @todo to be performed on the slow path
599 * workqueue. Do nothing if another operation with higher priority is already
600 * scheduled. Needs to be called with subchannel lock held.
602 void css_sched_sch_todo(struct subchannel
*sch
, enum sch_todo todo
)
604 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
605 sch
->schid
.ssid
, sch
->schid
.sch_no
, todo
);
606 if (sch
->todo
>= todo
)
608 /* Get workqueue ref. */
609 if (!get_device(&sch
->dev
))
612 if (!queue_work(cio_work_q
, &sch
->todo_work
)) {
613 /* Already queued, release workqueue ref. */
614 put_device(&sch
->dev
);
617 EXPORT_SYMBOL_GPL(css_sched_sch_todo
);
619 static void css_sch_todo(struct work_struct
*work
)
621 struct subchannel
*sch
;
625 sch
= container_of(work
, struct subchannel
, todo_work
);
627 spin_lock_irq(sch
->lock
);
629 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch
->schid
.ssid
,
630 sch
->schid
.sch_no
, todo
);
631 sch
->todo
= SCH_TODO_NOTHING
;
632 spin_unlock_irq(sch
->lock
);
635 case SCH_TODO_NOTHING
:
638 ret
= css_evaluate_known_subchannel(sch
, 1);
639 if (ret
== -EAGAIN
) {
640 spin_lock_irq(sch
->lock
);
641 css_sched_sch_todo(sch
, todo
);
642 spin_unlock_irq(sch
->lock
);
646 css_sch_device_unregister(sch
);
649 /* Release workqueue ref. */
650 put_device(&sch
->dev
);
653 static struct idset
*slow_subchannel_set
;
654 static DEFINE_SPINLOCK(slow_subchannel_lock
);
655 static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq
);
656 static atomic_t css_eval_scheduled
;
658 static int __init
slow_subchannel_init(void)
660 atomic_set(&css_eval_scheduled
, 0);
661 slow_subchannel_set
= idset_sch_new();
662 if (!slow_subchannel_set
) {
663 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
669 static int slow_eval_known_fn(struct subchannel
*sch
, void *data
)
674 spin_lock_irq(&slow_subchannel_lock
);
675 eval
= idset_sch_contains(slow_subchannel_set
, sch
->schid
);
676 idset_sch_del(slow_subchannel_set
, sch
->schid
);
677 spin_unlock_irq(&slow_subchannel_lock
);
679 rc
= css_evaluate_known_subchannel(sch
, 1);
681 css_schedule_eval(sch
->schid
);
683 * The loop might take long time for platforms with lots of
684 * known devices. Allow scheduling here.
691 static int slow_eval_unknown_fn(struct subchannel_id schid
, void *data
)
696 spin_lock_irq(&slow_subchannel_lock
);
697 eval
= idset_sch_contains(slow_subchannel_set
, schid
);
698 idset_sch_del(slow_subchannel_set
, schid
);
699 spin_unlock_irq(&slow_subchannel_lock
);
701 rc
= css_evaluate_new_subchannel(schid
, 1);
704 css_schedule_eval(schid
);
710 /* These should abort looping */
711 spin_lock_irq(&slow_subchannel_lock
);
712 idset_sch_del_subseq(slow_subchannel_set
, schid
);
713 spin_unlock_irq(&slow_subchannel_lock
);
718 /* Allow scheduling here since the containing loop might
725 static void css_slow_path_func(struct work_struct
*unused
)
729 CIO_TRACE_EVENT(4, "slowpath");
730 for_each_subchannel_staged(slow_eval_known_fn
, slow_eval_unknown_fn
,
732 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
733 if (idset_is_empty(slow_subchannel_set
)) {
734 atomic_set(&css_eval_scheduled
, 0);
735 wake_up(&css_eval_wq
);
737 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
740 static DECLARE_DELAYED_WORK(slow_path_work
, css_slow_path_func
);
741 struct workqueue_struct
*cio_work_q
;
743 void css_schedule_eval(struct subchannel_id schid
)
747 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
748 idset_sch_add(slow_subchannel_set
, schid
);
749 atomic_set(&css_eval_scheduled
, 1);
750 queue_delayed_work(cio_work_q
, &slow_path_work
, 0);
751 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
754 void css_schedule_eval_all(void)
758 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
759 idset_fill(slow_subchannel_set
);
760 atomic_set(&css_eval_scheduled
, 1);
761 queue_delayed_work(cio_work_q
, &slow_path_work
, 0);
762 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
765 static int __unset_registered(struct device
*dev
, void *data
)
767 struct idset
*set
= data
;
768 struct subchannel
*sch
= to_subchannel(dev
);
770 idset_sch_del(set
, sch
->schid
);
774 void css_schedule_eval_all_unreg(unsigned long delay
)
777 struct idset
*unreg_set
;
779 /* Find unregistered subchannels. */
780 unreg_set
= idset_sch_new();
783 css_schedule_eval_all();
786 idset_fill(unreg_set
);
787 bus_for_each_dev(&css_bus_type
, NULL
, unreg_set
, __unset_registered
);
788 /* Apply to slow_subchannel_set. */
789 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
790 idset_add_set(slow_subchannel_set
, unreg_set
);
791 atomic_set(&css_eval_scheduled
, 1);
792 queue_delayed_work(cio_work_q
, &slow_path_work
, delay
);
793 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
794 idset_free(unreg_set
);
797 void css_wait_for_slow_path(void)
799 flush_workqueue(cio_work_q
);
802 /* Schedule reprobing of all unregistered subchannels. */
803 void css_schedule_reprobe(void)
805 /* Schedule with a delay to allow merging of subsequent calls. */
806 css_schedule_eval_all_unreg(1 * HZ
);
808 EXPORT_SYMBOL_GPL(css_schedule_reprobe
);
811 * Called from the machine check handler for subchannel report words.
813 static void css_process_crw(struct crw
*crw0
, struct crw
*crw1
, int overflow
)
815 struct subchannel_id mchk_schid
;
816 struct subchannel
*sch
;
819 css_schedule_eval_all();
822 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
823 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
824 crw0
->slct
, crw0
->oflw
, crw0
->chn
, crw0
->rsc
, crw0
->anc
,
825 crw0
->erc
, crw0
->rsid
);
827 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
828 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
829 crw1
->slct
, crw1
->oflw
, crw1
->chn
, crw1
->rsc
,
830 crw1
->anc
, crw1
->erc
, crw1
->rsid
);
831 init_subchannel_id(&mchk_schid
);
832 mchk_schid
.sch_no
= crw0
->rsid
;
834 mchk_schid
.ssid
= (crw1
->rsid
>> 4) & 3;
836 if (crw0
->erc
== CRW_ERC_PMOD
) {
837 sch
= get_subchannel_by_schid(mchk_schid
);
839 css_update_ssd_info(sch
);
840 put_device(&sch
->dev
);
844 * Since we are always presented with IPI in the CRW, we have to
845 * use stsch() to find out if the subchannel in question has come
848 css_evaluate_subchannel(mchk_schid
, 0);
852 css_generate_pgid(struct channel_subsystem
*css
, u32 tod_high
)
856 if (css_general_characteristics
.mcss
) {
857 css
->global_pgid
.pgid_high
.ext_cssid
.version
= 0x80;
858 css
->global_pgid
.pgid_high
.ext_cssid
.cssid
=
859 css
->id_valid
? css
->cssid
: 0;
861 css
->global_pgid
.pgid_high
.cpu_addr
= stap();
864 css
->global_pgid
.cpu_id
= cpu_id
.ident
;
865 css
->global_pgid
.cpu_model
= cpu_id
.machine
;
866 css
->global_pgid
.tod_high
= tod_high
;
869 static void channel_subsystem_release(struct device
*dev
)
871 struct channel_subsystem
*css
= to_css(dev
);
873 mutex_destroy(&css
->mutex
);
877 static ssize_t
real_cssid_show(struct device
*dev
, struct device_attribute
*a
,
880 struct channel_subsystem
*css
= to_css(dev
);
885 return sprintf(buf
, "%x\n", css
->cssid
);
887 static DEVICE_ATTR_RO(real_cssid
);
889 static ssize_t
cm_enable_show(struct device
*dev
, struct device_attribute
*a
,
892 struct channel_subsystem
*css
= to_css(dev
);
895 mutex_lock(&css
->mutex
);
896 ret
= sprintf(buf
, "%x\n", css
->cm_enabled
);
897 mutex_unlock(&css
->mutex
);
901 static ssize_t
cm_enable_store(struct device
*dev
, struct device_attribute
*a
,
902 const char *buf
, size_t count
)
904 struct channel_subsystem
*css
= to_css(dev
);
908 ret
= kstrtoul(buf
, 16, &val
);
911 mutex_lock(&css
->mutex
);
914 ret
= css
->cm_enabled
? chsc_secm(css
, 0) : 0;
917 ret
= css
->cm_enabled
? 0 : chsc_secm(css
, 1);
922 mutex_unlock(&css
->mutex
);
923 return ret
< 0 ? ret
: count
;
925 static DEVICE_ATTR_RW(cm_enable
);
927 static umode_t
cm_enable_mode(struct kobject
*kobj
, struct attribute
*attr
,
930 return css_chsc_characteristics
.secm
? attr
->mode
: 0;
933 static struct attribute
*cssdev_attrs
[] = {
934 &dev_attr_real_cssid
.attr
,
938 static struct attribute_group cssdev_attr_group
= {
939 .attrs
= cssdev_attrs
,
942 static struct attribute
*cssdev_cm_attrs
[] = {
943 &dev_attr_cm_enable
.attr
,
947 static struct attribute_group cssdev_cm_attr_group
= {
948 .attrs
= cssdev_cm_attrs
,
949 .is_visible
= cm_enable_mode
,
952 static const struct attribute_group
*cssdev_attr_groups
[] = {
954 &cssdev_cm_attr_group
,
958 static int __init
setup_css(int nr
)
960 struct channel_subsystem
*css
;
963 css
= kzalloc(sizeof(*css
), GFP_KERNEL
);
967 channel_subsystems
[nr
] = css
;
968 dev_set_name(&css
->device
, "css%x", nr
);
969 css
->device
.groups
= cssdev_attr_groups
;
970 css
->device
.release
= channel_subsystem_release
;
972 * We currently allocate notifier bits with this (using
973 * css->device as the device argument with the DMA API)
974 * and are fine with 64 bit addresses.
976 ret
= dma_coerce_mask_and_coherent(&css
->device
, DMA_BIT_MASK(64));
982 mutex_init(&css
->mutex
);
983 ret
= chsc_get_cssid_iid(nr
, &css
->cssid
, &css
->iid
);
985 css
->id_valid
= true;
986 pr_info("Partition identifier %01x.%01x\n", css
->cssid
,
989 css_generate_pgid(css
, (u32
) (get_tod_clock() >> 32));
991 ret
= device_register(&css
->device
);
993 put_device(&css
->device
);
997 css
->pseudo_subchannel
= kzalloc(sizeof(*css
->pseudo_subchannel
),
999 if (!css
->pseudo_subchannel
) {
1000 device_unregister(&css
->device
);
1005 css
->pseudo_subchannel
->dev
.parent
= &css
->device
;
1006 css
->pseudo_subchannel
->dev
.release
= css_subchannel_release
;
1007 mutex_init(&css
->pseudo_subchannel
->reg_mutex
);
1008 ret
= css_sch_create_locks(css
->pseudo_subchannel
);
1010 kfree(css
->pseudo_subchannel
);
1011 device_unregister(&css
->device
);
1015 dev_set_name(&css
->pseudo_subchannel
->dev
, "defunct");
1016 ret
= device_register(&css
->pseudo_subchannel
->dev
);
1018 put_device(&css
->pseudo_subchannel
->dev
);
1019 device_unregister(&css
->device
);
1025 channel_subsystems
[nr
] = NULL
;
1029 static int css_reboot_event(struct notifier_block
*this,
1030 unsigned long event
,
1033 struct channel_subsystem
*css
;
1038 mutex_lock(&css
->mutex
);
1039 if (css
->cm_enabled
)
1040 if (chsc_secm(css
, 0))
1042 mutex_unlock(&css
->mutex
);
1048 static struct notifier_block css_reboot_notifier
= {
1049 .notifier_call
= css_reboot_event
,
1052 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1053 static struct gen_pool
*cio_dma_pool
;
1055 /* Currently cio supports only a single css */
1056 struct device
*cio_get_dma_css_dev(void)
1058 return &channel_subsystems
[0]->device
;
1061 struct gen_pool
*cio_gp_dma_create(struct device
*dma_dev
, int nr_pages
)
1063 struct gen_pool
*gp_dma
;
1065 dma_addr_t dma_addr
;
1068 gp_dma
= gen_pool_create(3, -1);
1071 for (i
= 0; i
< nr_pages
; ++i
) {
1072 cpu_addr
= dma_alloc_coherent(dma_dev
, PAGE_SIZE
, &dma_addr
,
1076 gen_pool_add_virt(gp_dma
, (unsigned long) cpu_addr
,
1077 dma_addr
, PAGE_SIZE
, -1);
1082 static void __gp_dma_free_dma(struct gen_pool
*pool
,
1083 struct gen_pool_chunk
*chunk
, void *data
)
1085 size_t chunk_size
= chunk
->end_addr
- chunk
->start_addr
+ 1;
1087 dma_free_coherent((struct device
*) data
, chunk_size
,
1088 (void *) chunk
->start_addr
,
1089 (dma_addr_t
) chunk
->phys_addr
);
1092 void cio_gp_dma_destroy(struct gen_pool
*gp_dma
, struct device
*dma_dev
)
1096 /* this is quite ugly but no better idea */
1097 gen_pool_for_each_chunk(gp_dma
, __gp_dma_free_dma
, dma_dev
);
1098 gen_pool_destroy(gp_dma
);
1101 static int cio_dma_pool_init(void)
1103 /* No need to free up the resources: compiled in */
1104 cio_dma_pool
= cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1110 void *cio_gp_dma_zalloc(struct gen_pool
*gp_dma
, struct device
*dma_dev
,
1113 dma_addr_t dma_addr
;
1119 addr
= gen_pool_alloc(gp_dma
, size
);
1121 chunk_size
= round_up(size
, PAGE_SIZE
);
1122 addr
= (unsigned long) dma_alloc_coherent(dma_dev
,
1123 chunk_size
, &dma_addr
, CIO_DMA_GFP
);
1126 gen_pool_add_virt(gp_dma
, addr
, dma_addr
, chunk_size
, -1);
1127 addr
= gen_pool_alloc(gp_dma
, size
);
1129 return (void *) addr
;
1132 void cio_gp_dma_free(struct gen_pool
*gp_dma
, void *cpu_addr
, size_t size
)
1136 memset(cpu_addr
, 0, size
);
1137 gen_pool_free(gp_dma
, (unsigned long) cpu_addr
, size
);
1141 * Allocate dma memory from the css global pool. Intended for memory not
1142 * specific to any single device within the css. The allocated memory
1143 * is not guaranteed to be 31-bit addressable.
1145 * Caution: Not suitable for early stuff like console.
1147 void *cio_dma_zalloc(size_t size
)
1149 return cio_gp_dma_zalloc(cio_dma_pool
, cio_get_dma_css_dev(), size
);
1152 void cio_dma_free(void *cpu_addr
, size_t size
)
1154 cio_gp_dma_free(cio_dma_pool
, cpu_addr
, size
);
1158 * Now that the driver core is running, we can setup our channel subsystem.
1159 * The struct subchannel's are created during probing.
1161 static int __init
css_bus_init(void)
1169 chsc_determine_css_characteristics();
1170 /* Try to enable MSS. */
1171 ret
= chsc_enable_facility(CHSC_SDA_OC_MSS
);
1175 max_ssid
= __MAX_SSID
;
1177 ret
= slow_subchannel_init();
1181 ret
= crw_register_handler(CRW_RSC_SCH
, css_process_crw
);
1185 if ((ret
= bus_register(&css_bus_type
)))
1188 /* Setup css structure. */
1189 for (i
= 0; i
<= MAX_CSS_IDX
; i
++) {
1192 goto out_unregister
;
1194 ret
= register_reboot_notifier(&css_reboot_notifier
);
1196 goto out_unregister
;
1197 ret
= cio_dma_pool_init();
1199 goto out_unregister_rn
;
1203 /* Enable default isc for I/O subchannels. */
1204 isc_register(IO_SCH_ISC
);
1208 unregister_reboot_notifier(&css_reboot_notifier
);
1211 struct channel_subsystem
*css
= channel_subsystems
[i
];
1212 device_unregister(&css
->pseudo_subchannel
->dev
);
1213 device_unregister(&css
->device
);
1215 bus_unregister(&css_bus_type
);
1217 crw_unregister_handler(CRW_RSC_SCH
);
1218 idset_free(slow_subchannel_set
);
1219 chsc_init_cleanup();
1220 pr_alert("The CSS device driver initialization failed with "
1225 static void __init
css_bus_cleanup(void)
1227 struct channel_subsystem
*css
;
1230 device_unregister(&css
->pseudo_subchannel
->dev
);
1231 device_unregister(&css
->device
);
1233 bus_unregister(&css_bus_type
);
1234 crw_unregister_handler(CRW_RSC_SCH
);
1235 idset_free(slow_subchannel_set
);
1236 chsc_init_cleanup();
1237 isc_unregister(IO_SCH_ISC
);
1240 static int __init
channel_subsystem_init(void)
1244 ret
= css_bus_init();
1247 cio_work_q
= create_singlethread_workqueue("cio");
1252 ret
= io_subchannel_init();
1256 /* Register subchannels which are already in use. */
1257 cio_register_early_subchannels();
1258 /* Start initial subchannel evaluation. */
1259 css_schedule_eval_all();
1263 destroy_workqueue(cio_work_q
);
1268 subsys_initcall(channel_subsystem_init
);
1270 static int css_settle(struct device_driver
*drv
, void *unused
)
1272 struct css_driver
*cssdrv
= to_cssdriver(drv
);
1275 return cssdrv
->settle();
1279 int css_complete_work(void)
1283 /* Wait for the evaluation of subchannels to finish. */
1284 ret
= wait_event_interruptible(css_eval_wq
,
1285 atomic_read(&css_eval_scheduled
) == 0);
1288 flush_workqueue(cio_work_q
);
1289 /* Wait for the subchannel type specific initialization to finish */
1290 return bus_for_each_drv(&css_bus_type
, NULL
, NULL
, css_settle
);
1295 * Wait for the initialization of devices to finish, to make sure we are
1296 * done with our setup if the search for the root device starts.
1298 static int __init
channel_subsystem_init_sync(void)
1300 css_complete_work();
1303 subsys_initcall_sync(channel_subsystem_init_sync
);
1305 #ifdef CONFIG_PROC_FS
1306 static ssize_t
cio_settle_write(struct file
*file
, const char __user
*buf
,
1307 size_t count
, loff_t
*ppos
)
1311 /* Handle pending CRW's. */
1312 crw_wait_for_channel_report();
1313 ret
= css_complete_work();
1315 return ret
? ret
: count
;
1318 static const struct proc_ops cio_settle_proc_ops
= {
1319 .proc_open
= nonseekable_open
,
1320 .proc_write
= cio_settle_write
,
1321 .proc_lseek
= no_llseek
,
1324 static int __init
cio_settle_init(void)
1326 struct proc_dir_entry
*entry
;
1328 entry
= proc_create("cio_settle", S_IWUSR
, NULL
, &cio_settle_proc_ops
);
1333 device_initcall(cio_settle_init
);
1334 #endif /*CONFIG_PROC_FS*/
1336 int sch_is_pseudo_sch(struct subchannel
*sch
)
1338 if (!sch
->dev
.parent
)
1340 return sch
== to_css(sch
->dev
.parent
)->pseudo_subchannel
;
1343 static int css_bus_match(struct device
*dev
, struct device_driver
*drv
)
1345 struct subchannel
*sch
= to_subchannel(dev
);
1346 struct css_driver
*driver
= to_cssdriver(drv
);
1347 struct css_device_id
*id
;
1349 /* When driver_override is set, only bind to the matching driver */
1350 if (sch
->driver_override
&& strcmp(sch
->driver_override
, drv
->name
))
1353 for (id
= driver
->subchannel_type
; id
->match_flags
; id
++) {
1354 if (sch
->st
== id
->type
)
1361 static int css_probe(struct device
*dev
)
1363 struct subchannel
*sch
;
1366 sch
= to_subchannel(dev
);
1367 sch
->driver
= to_cssdriver(dev
->driver
);
1368 ret
= sch
->driver
->probe
? sch
->driver
->probe(sch
) : 0;
1374 static int css_remove(struct device
*dev
)
1376 struct subchannel
*sch
;
1379 sch
= to_subchannel(dev
);
1380 ret
= sch
->driver
->remove
? sch
->driver
->remove(sch
) : 0;
1385 static void css_shutdown(struct device
*dev
)
1387 struct subchannel
*sch
;
1389 sch
= to_subchannel(dev
);
1390 if (sch
->driver
&& sch
->driver
->shutdown
)
1391 sch
->driver
->shutdown(sch
);
1394 static int css_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1396 struct subchannel
*sch
= to_subchannel(dev
);
1399 ret
= add_uevent_var(env
, "ST=%01X", sch
->st
);
1402 ret
= add_uevent_var(env
, "MODALIAS=css:t%01X", sch
->st
);
1406 static struct bus_type css_bus_type
= {
1408 .match
= css_bus_match
,
1410 .remove
= css_remove
,
1411 .shutdown
= css_shutdown
,
1412 .uevent
= css_uevent
,
1416 * css_driver_register - register a css driver
1417 * @cdrv: css driver to register
1419 * This is mainly a wrapper around driver_register that sets name
1420 * and bus_type in the embedded struct device_driver correctly.
1422 int css_driver_register(struct css_driver
*cdrv
)
1424 cdrv
->drv
.bus
= &css_bus_type
;
1425 return driver_register(&cdrv
->drv
);
1427 EXPORT_SYMBOL_GPL(css_driver_register
);
1430 * css_driver_unregister - unregister a css driver
1431 * @cdrv: css driver to unregister
1433 * This is a wrapper around driver_unregister.
1435 void css_driver_unregister(struct css_driver
*cdrv
)
1437 driver_unregister(&cdrv
->drv
);
1439 EXPORT_SYMBOL_GPL(css_driver_unregister
);