]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/s390/cio/chsc.c
[S390] cio: Extend adapter interrupt interface.
[mirror_ubuntu-zesty-kernel.git] / drivers / s390 / cio / chsc.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
1da177e4
LT
4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
4ce3b30c 8 * Cornelia Huck (cornelia.huck@de.ibm.com)
1da177e4
LT
9 * Arnd Bergmann (arndb@de.ibm.com)
10 */
11
12#include <linux/module.h>
1da177e4
LT
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/device.h>
16
17#include <asm/cio.h>
e5854a58 18#include <asm/chpid.h>
1da177e4
LT
19
20#include "css.h"
21#include "cio.h"
22#include "cio_debug.h"
23#include "ioasm.h"
e6b6e10a 24#include "chp.h"
1da177e4
LT
25#include "chsc.h"
26
1da177e4
LT
27static void *sei_page;
28
7ad6a249
PO
29struct chsc_ssd_area {
30 struct chsc_header request;
31 u16 :10;
32 u16 ssid:2;
33 u16 :4;
34 u16 f_sch; /* first subchannel */
35 u16 :16;
36 u16 l_sch; /* last subchannel */
37 u32 :32;
38 struct chsc_header response;
39 u32 :32;
40 u8 sch_valid : 1;
41 u8 dev_valid : 1;
42 u8 st : 3; /* subchannel type */
43 u8 zeroes : 3;
44 u8 unit_addr; /* unit address */
45 u16 devno; /* device number */
46 u8 path_mask;
47 u8 fla_valid_mask;
48 u16 sch; /* subchannel */
49 u8 chpid[8]; /* chpids 0-7 */
50 u16 fla[8]; /* full link addresses 0-7 */
51} __attribute__ ((packed));
52
53int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
1da177e4 54{
7ad6a249
PO
55 unsigned long page;
56 struct chsc_ssd_area *ssd_area;
57 int ccode;
58 int ret;
59 int i;
60 int mask;
1da177e4 61
7ad6a249
PO
62 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 if (!page)
64 return -ENOMEM;
65 ssd_area = (struct chsc_ssd_area *) page;
495a5b45
CH
66 ssd_area->request.length = 0x0010;
67 ssd_area->request.code = 0x0004;
7ad6a249
PO
68 ssd_area->ssid = schid.ssid;
69 ssd_area->f_sch = schid.sch_no;
70 ssd_area->l_sch = schid.sch_no;
1da177e4
LT
71
72 ccode = chsc(ssd_area);
7ad6a249 73 /* Check response. */
1da177e4 74 if (ccode > 0) {
7ad6a249
PO
75 ret = (ccode == 3) ? -ENODEV : -EBUSY;
76 goto out_free;
1da177e4 77 }
7ad6a249
PO
78 if (ssd_area->response.code != 0x0001) {
79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
80 schid.ssid, schid.sch_no,
1da177e4 81 ssd_area->response.code);
7ad6a249
PO
82 ret = -EIO;
83 goto out_free;
1da177e4 84 }
7ad6a249
PO
85 if (!ssd_area->sch_valid) {
86 ret = -ENODEV;
87 goto out_free;
1da177e4 88 }
7ad6a249
PO
89 /* Copy data */
90 ret = 0;
91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
92 if ((ssd_area->st != 0) && (ssd_area->st != 2))
93 goto out_free;
94 ssd->path_mask = ssd_area->path_mask;
95 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
96 for (i = 0; i < 8; i++) {
97 mask = 0x80 >> i;
98 if (ssd_area->path_mask & mask) {
99 chp_id_init(&ssd->chpid[i]);
100 ssd->chpid[i].id = ssd_area->chpid[i];
1da177e4 101 }
7ad6a249
PO
102 if (ssd_area->fla_valid_mask & mask)
103 ssd->fla[i] = ssd_area->fla[i];
1da177e4 104 }
7ad6a249
PO
105out_free:
106 free_page(page);
1da177e4
LT
107 return ret;
108}
109
387b734f
SB
110static int check_for_io_on_path(struct subchannel *sch, int mask)
111{
112 int cc;
113
114 cc = stsch(sch->schid, &sch->schib);
115 if (cc)
116 return 0;
117 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
118 return 1;
119 return 0;
120}
121
122static void terminate_internal_io(struct subchannel *sch)
123{
124 if (cio_clear(sch)) {
125 /* Recheck device in case clear failed. */
126 sch->lpm = 0;
83b3370c
PO
127 if (device_trigger_verify(sch) != 0)
128 css_schedule_eval(sch->schid);
387b734f
SB
129 return;
130 }
131 /* Request retry of internal operation. */
132 device_set_intretry(sch);
133 /* Call handler. */
134 if (sch->driver && sch->driver->termination)
602b20f2 135 sch->driver->termination(sch);
387b734f
SB
136}
137
1da177e4
LT
138static int
139s390_subchannel_remove_chpid(struct device *dev, void *data)
140{
141 int j;
142 int mask;
143 struct subchannel *sch;
e6b6e10a 144 struct chp_id *chpid;
1da177e4
LT
145 struct schib schib;
146
147 sch = to_subchannel(dev);
148 chpid = data;
7e8ae7bf
CH
149 for (j = 0; j < 8; j++) {
150 mask = 0x80 >> j;
151 if ((sch->schib.pmcw.pim & mask) &&
e6b6e10a 152 (sch->schib.pmcw.chpid[j] == chpid->id))
1da177e4 153 break;
7e8ae7bf 154 }
1da177e4
LT
155 if (j >= 8)
156 return 0;
157
2ec22984 158 spin_lock_irq(sch->lock);
1da177e4 159
a8237fc4 160 stsch(sch->schid, &schib);
1da177e4
LT
161 if (!schib.pmcw.dnv)
162 goto out_unreg;
163 memcpy(&sch->schib, &schib, sizeof(struct schib));
164 /* Check for single path devices. */
165 if (sch->schib.pmcw.pim == 0x80)
166 goto out_unreg;
1da177e4 167
387b734f
SB
168 if (check_for_io_on_path(sch, mask)) {
169 if (device_is_online(sch))
170 device_kill_io(sch);
171 else {
172 terminate_internal_io(sch);
173 /* Re-start path verification. */
174 if (sch->driver && sch->driver->verify)
602b20f2 175 sch->driver->verify(sch);
387b734f
SB
176 }
177 } else {
178 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify)
602b20f2 180 sch->driver->verify(sch);
387b734f 181 else if (sch->lpm == mask)
1da177e4 182 goto out_unreg;
1da177e4
LT
183 }
184
2ec22984 185 spin_unlock_irq(sch->lock);
1da177e4 186 return 0;
387b734f 187
1da177e4 188out_unreg:
1da177e4 189 sch->lpm = 0;
387b734f 190 spin_unlock_irq(sch->lock);
83b3370c 191 css_schedule_eval(sch->schid);
1da177e4
LT
192 return 0;
193}
194
e6b6e10a 195void chsc_chp_offline(struct chp_id chpid)
1da177e4
LT
196{
197 char dbf_txt[15];
198
f86635fa 199 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
1da177e4
LT
200 CIO_TRACE_EVENT(2, dbf_txt);
201
e6b6e10a 202 if (chp_get_status(chpid) <= 0)
1da177e4 203 return;
e6b6e10a 204 bus_for_each_dev(&css_bus_type, NULL, &chpid,
1da177e4 205 s390_subchannel_remove_chpid);
1da177e4
LT
206}
207
4d284cac 208static int
f97a56fb
CH
209s390_process_res_acc_new_sch(struct subchannel_id schid)
210{
211 struct schib schib;
f97a56fb
CH
212 /*
213 * We don't know the device yet, but since a path
214 * may be available now to the device we'll have
215 * to do recognition again.
216 * Since we don't have any idea about which chpid
217 * that beast may be on we'll have to do a stsch
218 * on all devices, grr...
219 */
fb6958a5 220 if (stsch_err(schid, &schib))
f97a56fb 221 /* We're through */
83b3370c 222 return -ENXIO;
f97a56fb
CH
223
224 /* Put it on the slow path. */
83b3370c 225 css_schedule_eval(schid);
f97a56fb
CH
226 return 0;
227}
228
7ad6a249
PO
229struct res_acc_data {
230 struct chp_id chpid;
231 u32 fla_mask;
232 u16 fla;
233};
234
235static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
236 struct res_acc_data *data)
237{
238 int i;
239 int mask;
240
241 for (i = 0; i < 8; i++) {
242 mask = 0x80 >> i;
243 if (!(ssd->path_mask & mask))
244 continue;
245 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
246 continue;
247 if ((ssd->fla_valid_mask & mask) &&
248 ((ssd->fla[i] & data->fla_mask) != data->fla))
249 continue;
250 return mask;
251 }
252 return 0;
253}
254
1da177e4 255static int
f97a56fb 256__s390_process_res_acc(struct subchannel_id schid, void *data)
1da177e4 257{
f97a56fb
CH
258 int chp_mask, old_lpm;
259 struct res_acc_data *res_data;
1da177e4 260 struct subchannel *sch;
f97a56fb 261
12975aef 262 res_data = data;
f97a56fb
CH
263 sch = get_subchannel_by_schid(schid);
264 if (!sch)
265 /* Check if a subchannel is newly available. */
266 return s390_process_res_acc_new_sch(schid);
267
2ec22984 268 spin_lock_irq(sch->lock);
7ad6a249
PO
269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
270 if (chp_mask == 0)
271 goto out;
272 if (stsch(sch->schid, &sch->schib))
273 goto out;
f97a56fb
CH
274 old_lpm = sch->lpm;
275 sch->lpm = ((sch->schib.pmcw.pim &
276 sch->schib.pmcw.pam &
277 sch->schib.pmcw.pom)
278 | chp_mask) & sch->opm;
279 if (!old_lpm && sch->lpm)
280 device_trigger_reprobe(sch);
281 else if (sch->driver && sch->driver->verify)
602b20f2 282 sch->driver->verify(sch);
7ad6a249 283out:
2ec22984 284 spin_unlock_irq(sch->lock);
f97a56fb 285 put_device(&sch->dev);
dd9963f9 286 return 0;
f97a56fb
CH
287}
288
83b3370c 289static void s390_process_res_acc (struct res_acc_data *res_data)
f97a56fb 290{
1da177e4
LT
291 char dbf_txt[15];
292
e6b6e10a
PO
293 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
294 res_data->chpid.id);
1da177e4 295 CIO_TRACE_EVENT( 2, dbf_txt);
f97a56fb
CH
296 if (res_data->fla != 0) {
297 sprintf(dbf_txt, "fla%x", res_data->fla);
1da177e4
LT
298 CIO_TRACE_EVENT( 2, dbf_txt);
299 }
300
301 /*
302 * I/O resources may have become accessible.
303 * Scan through all subchannels that may be concerned and
304 * do a validation on those.
305 * The more information we have (info), the less scanning
306 * will we have to do.
307 */
83b3370c 308 for_each_subchannel(__s390_process_res_acc, res_data);
1da177e4
LT
309}
310
311static int
312__get_chpid_from_lir(void *data)
313{
314 struct lir {
315 u8 iq;
316 u8 ic;
317 u16 sci;
318 /* incident-node descriptor */
319 u32 indesc[28];
320 /* attached-node descriptor */
321 u32 andesc[28];
322 /* incident-specific information */
323 u32 isinfo[28];
0f008aa3 324 } __attribute__ ((packed)) *lir;
1da177e4 325
12975aef 326 lir = data;
1da177e4
LT
327 if (!(lir->iq&0x80))
328 /* NULL link incident record */
329 return -EINVAL;
330 if (!(lir->indesc[0]&0xc0000000))
331 /* node descriptor not valid */
332 return -EINVAL;
333 if (!(lir->indesc[0]&0x10000000))
334 /* don't handle device-type nodes - FIXME */
335 return -EINVAL;
336 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
337
338 return (u16) (lir->indesc[0]&0x000000ff);
339}
340
184357a5
PO
341struct chsc_sei_area {
342 struct chsc_header request;
343 u32 reserved1;
344 u32 reserved2;
345 u32 reserved3;
346 struct chsc_header response;
347 u32 reserved4;
348 u8 flags;
349 u8 vf; /* validity flags */
350 u8 rs; /* reporting source */
351 u8 cc; /* content code */
352 u16 fla; /* full link address */
353 u16 rsid; /* reporting source id */
354 u32 reserved5;
355 u32 reserved6;
356 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
357 /* ccdf has to be big enough for a link-incident record */
358} __attribute__ ((packed));
359
83b3370c 360static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
184357a5 361{
f86635fa
PO
362 struct chp_id chpid;
363 int id;
184357a5
PO
364
365 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
366 sei_area->rs, sei_area->rsid);
367 if (sei_area->rs != 4)
83b3370c 368 return;
f86635fa
PO
369 id = __get_chpid_from_lir(sei_area->ccdf);
370 if (id < 0)
184357a5 371 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
f86635fa
PO
372 else {
373 chp_id_init(&chpid);
374 chpid.id = id;
e6b6e10a 375 chsc_chp_offline(chpid);
f86635fa 376 }
184357a5
PO
377}
378
83b3370c 379static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
1da177e4 380{
f97a56fb 381 struct res_acc_data res_data;
f86635fa 382 struct chp_id chpid;
184357a5 383 int status;
184357a5
PO
384
385 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
386 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
387 if (sei_area->rs != 4)
83b3370c 388 return;
f86635fa
PO
389 chp_id_init(&chpid);
390 chpid.id = sei_area->rsid;
184357a5 391 /* allocate a new channel path structure, if needed */
e6b6e10a 392 status = chp_get_status(chpid);
184357a5 393 if (status < 0)
e6b6e10a 394 chp_new(chpid);
184357a5 395 else if (!status)
83b3370c 396 return;
184357a5 397 memset(&res_data, 0, sizeof(struct res_acc_data));
e6b6e10a 398 res_data.chpid = chpid;
184357a5
PO
399 if ((sei_area->vf & 0xc0) != 0) {
400 res_data.fla = sei_area->fla;
401 if ((sei_area->vf & 0xc0) == 0xc0)
402 /* full link address */
403 res_data.fla_mask = 0xffff;
404 else
405 /* link address */
406 res_data.fla_mask = 0xff00;
407 }
83b3370c 408 s390_process_res_acc(&res_data);
184357a5
PO
409}
410
e5854a58
PO
411struct chp_config_data {
412 u8 map[32];
413 u8 op;
414 u8 pc;
415};
416
83b3370c 417static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
e5854a58
PO
418{
419 struct chp_config_data *data;
420 struct chp_id chpid;
421 int num;
422
423 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
424 if (sei_area->rs != 0)
83b3370c 425 return;
e5854a58
PO
426 data = (struct chp_config_data *) &(sei_area->ccdf);
427 chp_id_init(&chpid);
428 for (num = 0; num <= __MAX_CHPID; num++) {
429 if (!chp_test_bit(data->map, num))
430 continue;
431 chpid.id = num;
432 printk(KERN_WARNING "cio: processing configure event %d for "
433 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
434 switch (data->op) {
435 case 0:
436 chp_cfg_schedule(chpid, 1);
437 break;
438 case 1:
439 chp_cfg_schedule(chpid, 0);
440 break;
441 case 2:
442 chp_cfg_cancel_deconfigure(chpid);
443 break;
444 }
445 }
e5854a58
PO
446}
447
83b3370c 448static void chsc_process_sei(struct chsc_sei_area *sei_area)
184357a5 449{
184357a5 450 /* Check if we might have lost some information. */
83b3370c 451 if (sei_area->flags & 0x40) {
184357a5 452 CIO_CRW_EVENT(2, "chsc: event overflow\n");
83b3370c
PO
453 css_schedule_eval_all();
454 }
184357a5 455 /* which kind of information was stored? */
184357a5
PO
456 switch (sei_area->cc) {
457 case 1: /* link incident*/
83b3370c 458 chsc_process_sei_link_incident(sei_area);
184357a5
PO
459 break;
460 case 2: /* i/o resource accessibiliy */
83b3370c 461 chsc_process_sei_res_acc(sei_area);
184357a5 462 break;
e5854a58 463 case 8: /* channel-path-configuration notification */
83b3370c 464 chsc_process_sei_chp_config(sei_area);
e5854a58 465 break;
184357a5
PO
466 default: /* other stuff */
467 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
468 sei_area->cc);
469 break;
470 }
184357a5
PO
471}
472
83b3370c 473void chsc_process_crw(void)
184357a5
PO
474{
475 struct chsc_sei_area *sei_area;
1da177e4
LT
476
477 if (!sei_page)
83b3370c 478 return;
184357a5
PO
479 /* Access to sei_page is serialized through machine check handler
480 * thread, so no need for locking. */
1da177e4
LT
481 sei_area = sei_page;
482
483 CIO_TRACE_EVENT( 2, "prcss");
1da177e4 484 do {
1da177e4 485 memset(sei_area, 0, sizeof(*sei_area));
495a5b45
CH
486 sei_area->request.length = 0x0010;
487 sei_area->request.code = 0x000e;
184357a5
PO
488 if (chsc(sei_area))
489 break;
1da177e4 490
184357a5
PO
491 if (sei_area->response.code == 0x0001) {
492 CIO_CRW_EVENT(4, "chsc: sei successful\n");
83b3370c 493 chsc_process_sei(sei_area);
184357a5
PO
494 } else {
495 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
1da177e4 496 sei_area->response.code);
1da177e4
LT
497 break;
498 }
499 } while (sei_area->flags & 0x80);
1da177e4
LT
500}
501
4d284cac 502static int
f97a56fb
CH
503__chp_add_new_sch(struct subchannel_id schid)
504{
505 struct schib schib;
f97a56fb 506
758976f9 507 if (stsch_err(schid, &schib))
f97a56fb 508 /* We're through */
83b3370c 509 return -ENXIO;
f97a56fb
CH
510
511 /* Put it on the slow path. */
83b3370c 512 css_schedule_eval(schid);
f97a56fb
CH
513 return 0;
514}
515
516
1da177e4 517static int
f97a56fb 518__chp_add(struct subchannel_id schid, void *data)
1da177e4 519{
7e8ae7bf 520 int i, mask;
e6b6e10a 521 struct chp_id *chpid;
1da177e4 522 struct subchannel *sch;
f97a56fb 523
e6b6e10a 524 chpid = data;
f97a56fb
CH
525 sch = get_subchannel_by_schid(schid);
526 if (!sch)
527 /* Check if the subchannel is now available. */
528 return __chp_add_new_sch(schid);
2ec22984 529 spin_lock_irq(sch->lock);
7e8ae7bf
CH
530 for (i=0; i<8; i++) {
531 mask = 0x80 >> i;
532 if ((sch->schib.pmcw.pim & mask) &&
e6b6e10a 533 (sch->schib.pmcw.chpid[i] == chpid->id)) {
f97a56fb
CH
534 if (stsch(sch->schid, &sch->schib) != 0) {
535 /* Endgame. */
2ec22984 536 spin_unlock_irq(sch->lock);
f97a56fb
CH
537 return -ENXIO;
538 }
539 break;
540 }
7e8ae7bf 541 }
f97a56fb 542 if (i==8) {
2ec22984 543 spin_unlock_irq(sch->lock);
f97a56fb
CH
544 return 0;
545 }
546 sch->lpm = ((sch->schib.pmcw.pim &
547 sch->schib.pmcw.pam &
548 sch->schib.pmcw.pom)
7e8ae7bf 549 | mask) & sch->opm;
f97a56fb
CH
550
551 if (sch->driver && sch->driver->verify)
602b20f2 552 sch->driver->verify(sch);
f97a56fb 553
2ec22984 554 spin_unlock_irq(sch->lock);
f97a56fb
CH
555 put_device(&sch->dev);
556 return 0;
557}
558
83b3370c 559void chsc_chp_online(struct chp_id chpid)
f97a56fb 560{
1da177e4
LT
561 char dbf_txt[15];
562
f86635fa 563 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
1da177e4
LT
564 CIO_TRACE_EVENT(2, dbf_txt);
565
83b3370c
PO
566 if (chp_get_status(chpid) != 0)
567 for_each_subchannel(__chp_add, &chpid);
1da177e4
LT
568}
569
f86635fa
PO
570static void __s390_subchannel_vary_chpid(struct subchannel *sch,
571 struct chp_id chpid, int on)
1da177e4
LT
572{
573 int chp, old_lpm;
7ad6a249 574 int mask;
1da177e4
LT
575 unsigned long flags;
576
2ec22984 577 spin_lock_irqsave(sch->lock, flags);
1da177e4
LT
578 old_lpm = sch->lpm;
579 for (chp = 0; chp < 8; chp++) {
7ad6a249
PO
580 mask = 0x80 >> chp;
581 if (!(sch->ssd_info.path_mask & mask))
582 continue;
583 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
1da177e4
LT
584 continue;
585
586 if (on) {
7ad6a249
PO
587 sch->opm |= mask;
588 sch->lpm |= mask;
1da177e4
LT
589 if (!old_lpm)
590 device_trigger_reprobe(sch);
591 else if (sch->driver && sch->driver->verify)
602b20f2 592 sch->driver->verify(sch);
24cb5b48
CH
593 break;
594 }
7ad6a249
PO
595 sch->opm &= ~mask;
596 sch->lpm &= ~mask;
597 if (check_for_io_on_path(sch, mask)) {
d23861ff
CH
598 if (device_is_online(sch))
599 /* Path verification is done after killing. */
600 device_kill_io(sch);
387b734f 601 else {
d23861ff
CH
602 /* Kill and retry internal I/O. */
603 terminate_internal_io(sch);
387b734f
SB
604 /* Re-start path verification. */
605 if (sch->driver && sch->driver->verify)
602b20f2 606 sch->driver->verify(sch);
387b734f 607 }
d23861ff 608 } else if (!sch->lpm) {
83b3370c
PO
609 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid);
24cb5b48 611 } else if (sch->driver && sch->driver->verify)
602b20f2 612 sch->driver->verify(sch);
1da177e4
LT
613 break;
614 }
2ec22984 615 spin_unlock_irqrestore(sch->lock, flags);
1da177e4
LT
616}
617
f86635fa 618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
1da177e4
LT
619{
620 struct subchannel *sch;
f86635fa 621 struct chp_id *chpid;
1da177e4
LT
622
623 sch = to_subchannel(dev);
624 chpid = data;
625
626 __s390_subchannel_vary_chpid(sch, *chpid, 0);
627 return 0;
628}
629
f86635fa 630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
1da177e4
LT
631{
632 struct subchannel *sch;
f86635fa 633 struct chp_id *chpid;
1da177e4
LT
634
635 sch = to_subchannel(dev);
636 chpid = data;
637
638 __s390_subchannel_vary_chpid(sch, *chpid, 1);
639 return 0;
640}
641
f97a56fb
CH
642static int
643__s390_vary_chpid_on(struct subchannel_id schid, void *data)
644{
645 struct schib schib;
646 struct subchannel *sch;
647
648 sch = get_subchannel_by_schid(schid);
649 if (sch) {
650 put_device(&sch->dev);
651 return 0;
652 }
fb6958a5 653 if (stsch_err(schid, &schib))
f97a56fb
CH
654 /* We're through */
655 return -ENXIO;
656 /* Put it on the slow path. */
83b3370c 657 css_schedule_eval(schid);
f97a56fb
CH
658 return 0;
659}
660
e6b6e10a
PO
661/**
662 * chsc_chp_vary - propagate channel-path vary operation to subchannels
663 * @chpid: channl-path ID
664 * @on: non-zero for vary online, zero for vary offline
1da177e4 665 */
e6b6e10a 666int chsc_chp_vary(struct chp_id chpid, int on)
1da177e4 667{
1da177e4
LT
668 /*
669 * Redo PathVerification on the devices the chpid connects to
670 */
671
672 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
673 s390_subchannel_vary_chpid_on :
674 s390_subchannel_vary_chpid_off);
f97a56fb
CH
675 if (on)
676 /* Scan for new devices on varied on path. */
677 for_each_subchannel(__s390_vary_chpid_on, NULL);
1da177e4
LT
678 return 0;
679}
680
495a5b45
CH
681static void
682chsc_remove_cmg_attr(struct channel_subsystem *css)
683{
684 int i;
685
686 for (i = 0; i <= __MAX_CHPID; i++) {
687 if (!css->chps[i])
688 continue;
e6b6e10a 689 chp_remove_cmg_attr(css->chps[i]);
495a5b45
CH
690 }
691}
692
693static int
694chsc_add_cmg_attr(struct channel_subsystem *css)
695{
696 int i, ret;
697
698 ret = 0;
699 for (i = 0; i <= __MAX_CHPID; i++) {
700 if (!css->chps[i])
701 continue;
e6b6e10a 702 ret = chp_add_cmg_attr(css->chps[i]);
495a5b45
CH
703 if (ret)
704 goto cleanup;
705 }
706 return ret;
707cleanup:
708 for (--i; i >= 0; i--) {
709 if (!css->chps[i])
710 continue;
e6b6e10a 711 chp_remove_cmg_attr(css->chps[i]);
495a5b45
CH
712 }
713 return ret;
714}
715
495a5b45
CH
716static int
717__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
718{
719 struct {
720 struct chsc_header request;
721 u32 operation_code : 2;
722 u32 : 30;
723 u32 key : 4;
724 u32 : 28;
725 u32 zeroes1;
726 u32 cub_addr1;
727 u32 zeroes2;
728 u32 cub_addr2;
729 u32 reserved[13];
730 struct chsc_header response;
731 u32 status : 8;
732 u32 : 4;
733 u32 fmt : 4;
734 u32 : 16;
0f008aa3 735 } __attribute__ ((packed)) *secm_area;
495a5b45
CH
736 int ret, ccode;
737
738 secm_area = page;
739 secm_area->request.length = 0x0050;
740 secm_area->request.code = 0x0016;
741
742 secm_area->key = PAGE_DEFAULT_KEY;
743 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
744 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
745
746 secm_area->operation_code = enable ? 0 : 1;
747
748 ccode = chsc(secm_area);
749 if (ccode > 0)
750 return (ccode == 3) ? -ENODEV : -EBUSY;
751
752 switch (secm_area->response.code) {
753 case 0x0001: /* Success. */
754 ret = 0;
755 break;
756 case 0x0003: /* Invalid block. */
757 case 0x0007: /* Invalid format. */
758 case 0x0008: /* Other invalid block. */
759 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
760 ret = -EINVAL;
761 break;
762 case 0x0004: /* Command not provided in model. */
763 CIO_CRW_EVENT(2, "Model does not provide secm\n");
764 ret = -EOPNOTSUPP;
765 break;
766 case 0x0102: /* cub adresses incorrect */
767 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
768 ret = -EINVAL;
769 break;
770 case 0x0103: /* key error */
771 CIO_CRW_EVENT(2, "Access key error in secm\n");
772 ret = -EINVAL;
773 break;
774 case 0x0105: /* error while starting */
775 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
776 ret = -EIO;
777 break;
778 default:
779 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
780 secm_area->response.code);
781 ret = -EIO;
782 }
783 return ret;
784}
785
786int
787chsc_secm(struct channel_subsystem *css, int enable)
788{
789 void *secm_area;
790 int ret;
791
792 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
793 if (!secm_area)
794 return -ENOMEM;
795
796 mutex_lock(&css->mutex);
797 if (enable && !css->cm_enabled) {
798 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
799 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
800 if (!css->cub_addr1 || !css->cub_addr2) {
801 free_page((unsigned long)css->cub_addr1);
802 free_page((unsigned long)css->cub_addr2);
803 free_page((unsigned long)secm_area);
804 mutex_unlock(&css->mutex);
805 return -ENOMEM;
806 }
807 }
808 ret = __chsc_do_secm(css, enable, secm_area);
809 if (!ret) {
810 css->cm_enabled = enable;
811 if (css->cm_enabled) {
812 ret = chsc_add_cmg_attr(css);
813 if (ret) {
814 memset(secm_area, 0, PAGE_SIZE);
815 __chsc_do_secm(css, 0, secm_area);
816 css->cm_enabled = 0;
817 }
818 } else
819 chsc_remove_cmg_attr(css);
820 }
8c4941c5 821 if (!css->cm_enabled) {
495a5b45
CH
822 free_page((unsigned long)css->cub_addr1);
823 free_page((unsigned long)css->cub_addr2);
824 }
825 mutex_unlock(&css->mutex);
826 free_page((unsigned long)secm_area);
827 return ret;
828}
829
e6b6e10a
PO
830int chsc_determine_channel_path_description(struct chp_id chpid,
831 struct channel_path_desc *desc)
1da177e4
LT
832{
833 int ccode, ret;
834
835 struct {
836 struct chsc_header request;
837 u32 : 24;
838 u32 first_chpid : 8;
839 u32 : 24;
840 u32 last_chpid : 8;
841 u32 zeroes1;
842 struct chsc_header response;
843 u32 zeroes2;
844 struct channel_path_desc desc;
0f008aa3 845 } __attribute__ ((packed)) *scpd_area;
1da177e4
LT
846
847 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
848 if (!scpd_area)
849 return -ENOMEM;
850
495a5b45
CH
851 scpd_area->request.length = 0x0010;
852 scpd_area->request.code = 0x0002;
1da177e4 853
f86635fa
PO
854 scpd_area->first_chpid = chpid.id;
855 scpd_area->last_chpid = chpid.id;
1da177e4
LT
856
857 ccode = chsc(scpd_area);
858 if (ccode > 0) {
859 ret = (ccode == 3) ? -ENODEV : -EBUSY;
860 goto out;
861 }
862
863 switch (scpd_area->response.code) {
864 case 0x0001: /* Success. */
865 memcpy(desc, &scpd_area->desc,
866 sizeof(struct channel_path_desc));
867 ret = 0;
868 break;
869 case 0x0003: /* Invalid block. */
870 case 0x0007: /* Invalid format. */
871 case 0x0008: /* Other invalid block. */
872 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
873 ret = -EINVAL;
874 break;
875 case 0x0004: /* Command not provided in model. */
876 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
877 ret = -EOPNOTSUPP;
878 break;
879 default:
880 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
881 scpd_area->response.code);
882 ret = -EIO;
883 }
884out:
885 free_page((unsigned long)scpd_area);
886 return ret;
887}
888
495a5b45
CH
889static void
890chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
891 struct cmg_chars *chars)
892{
893 switch (chp->cmg) {
894 case 2:
895 case 3:
896 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
897 GFP_KERNEL);
898 if (chp->cmg_chars) {
899 int i, mask;
900 struct cmg_chars *cmg_chars;
901
902 cmg_chars = chp->cmg_chars;
903 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
904 mask = 0x80 >> (i + 3);
905 if (cmcv & mask)
906 cmg_chars->values[i] = chars->values[i];
907 else
908 cmg_chars->values[i] = 0;
909 }
910 }
911 break;
912 default:
913 /* No cmg-dependent data. */
914 break;
915 }
916}
917
e6b6e10a 918int chsc_get_channel_measurement_chars(struct channel_path *chp)
495a5b45
CH
919{
920 int ccode, ret;
921
922 struct {
923 struct chsc_header request;
924 u32 : 24;
925 u32 first_chpid : 8;
926 u32 : 24;
927 u32 last_chpid : 8;
928 u32 zeroes1;
929 struct chsc_header response;
930 u32 zeroes2;
931 u32 not_valid : 1;
932 u32 shared : 1;
933 u32 : 22;
934 u32 chpid : 8;
935 u32 cmcv : 5;
936 u32 : 11;
937 u32 cmgq : 8;
938 u32 cmg : 8;
939 u32 zeroes3;
940 u32 data[NR_MEASUREMENT_CHARS];
0f008aa3 941 } __attribute__ ((packed)) *scmc_area;
495a5b45
CH
942
943 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
944 if (!scmc_area)
945 return -ENOMEM;
946
947 scmc_area->request.length = 0x0010;
948 scmc_area->request.code = 0x0022;
949
f86635fa
PO
950 scmc_area->first_chpid = chp->chpid.id;
951 scmc_area->last_chpid = chp->chpid.id;
495a5b45
CH
952
953 ccode = chsc(scmc_area);
954 if (ccode > 0) {
955 ret = (ccode == 3) ? -ENODEV : -EBUSY;
956 goto out;
957 }
958
959 switch (scmc_area->response.code) {
960 case 0x0001: /* Success. */
961 if (!scmc_area->not_valid) {
962 chp->cmg = scmc_area->cmg;
963 chp->shared = scmc_area->shared;
964 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
965 (struct cmg_chars *)
966 &scmc_area->data);
967 } else {
968 chp->cmg = -1;
969 chp->shared = -1;
970 }
971 ret = 0;
972 break;
973 case 0x0003: /* Invalid block. */
974 case 0x0007: /* Invalid format. */
975 case 0x0008: /* Invalid bit combination. */
976 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
977 ret = -EINVAL;
978 break;
979 case 0x0004: /* Command not provided. */
980 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
981 ret = -EOPNOTSUPP;
982 break;
983 default:
984 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
985 scmc_area->response.code);
986 ret = -EIO;
987 }
988out:
989 free_page((unsigned long)scmc_area);
990 return ret;
991}
992
4434a38c 993int __init chsc_alloc_sei_area(void)
1da177e4
LT
994{
995 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
996 if (!sei_page)
e556bbbd
CH
997 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
998 "chsc machine checks!\n");
1da177e4
LT
999 return (sei_page ? 0 : -ENOMEM);
1000}
1001
4434a38c
CH
1002void __init chsc_free_sei_area(void)
1003{
1004 kfree(sei_page);
1005}
1006
fb6958a5
CH
1007int __init
1008chsc_enable_facility(int operation_code)
1009{
1010 int ret;
1011 struct {
1012 struct chsc_header request;
1013 u8 reserved1:4;
1014 u8 format:4;
1015 u8 reserved2;
1016 u16 operation_code;
1017 u32 reserved3;
1018 u32 reserved4;
1019 u32 operation_data_area[252];
1020 struct chsc_header response;
1021 u32 reserved5:4;
1022 u32 format2:4;
1023 u32 reserved6:24;
0f008aa3 1024 } __attribute__ ((packed)) *sda_area;
fb6958a5
CH
1025
1026 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1027 if (!sda_area)
1028 return -ENOMEM;
495a5b45
CH
1029 sda_area->request.length = 0x0400;
1030 sda_area->request.code = 0x0031;
fb6958a5
CH
1031 sda_area->operation_code = operation_code;
1032
1033 ret = chsc(sda_area);
1034 if (ret > 0) {
1035 ret = (ret == 3) ? -ENODEV : -EBUSY;
1036 goto out;
1037 }
1038 switch (sda_area->response.code) {
15730ddb
CH
1039 case 0x0001: /* everything ok */
1040 ret = 0;
1041 break;
fb6958a5
CH
1042 case 0x0003: /* invalid request block */
1043 case 0x0007:
1044 ret = -EINVAL;
1045 break;
1046 case 0x0004: /* command not provided */
1047 case 0x0101: /* facility not provided */
1048 ret = -EOPNOTSUPP;
1049 break;
15730ddb
CH
1050 default: /* something went wrong */
1051 ret = -EIO;
fb6958a5
CH
1052 }
1053 out:
1054 free_page((unsigned long)sda_area);
1055 return ret;
1056}
1057
1da177e4
LT
1058struct css_general_char css_general_characteristics;
1059struct css_chsc_char css_chsc_characteristics;
1060
1061int __init
1062chsc_determine_css_characteristics(void)
1063{
1064 int result;
1065 struct {
1066 struct chsc_header request;
1067 u32 reserved1;
1068 u32 reserved2;
1069 u32 reserved3;
1070 struct chsc_header response;
1071 u32 reserved4;
1072 u32 general_char[510];
1073 u32 chsc_char[518];
0f008aa3 1074 } __attribute__ ((packed)) *scsc_area;
1da177e4
LT
1075
1076 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1077 if (!scsc_area) {
e556bbbd
CH
1078 CIO_MSG_EVENT(0, "Was not able to determine available"
1079 "CHSCs due to no memory.\n");
1da177e4
LT
1080 return -ENOMEM;
1081 }
1082
495a5b45
CH
1083 scsc_area->request.length = 0x0010;
1084 scsc_area->request.code = 0x0010;
1da177e4
LT
1085
1086 result = chsc(scsc_area);
1087 if (result) {
e556bbbd
CH
1088 CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
1089 "cc=%i.\n", result);
1da177e4
LT
1090 result = -EIO;
1091 goto exit;
1092 }
1093
1094 if (scsc_area->response.code != 1) {
e556bbbd
CH
1095 CIO_MSG_EVENT(0, "Was not able to determine "
1096 "available CHSCs.\n");
1da177e4
LT
1097 result = -EIO;
1098 goto exit;
1099 }
1100 memcpy(&css_general_characteristics, scsc_area->general_char,
1101 sizeof(css_general_characteristics));
1102 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1103 sizeof(css_chsc_characteristics));
1104exit:
1105 free_page ((unsigned long) scsc_area);
1106 return result;
1107}
1108
1109EXPORT_SYMBOL_GPL(css_general_characteristics);
1110EXPORT_SYMBOL_GPL(css_chsc_characteristics);