]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/s390/cio/chsc.c
[S390] cio: Rework css driver.
[mirror_ubuntu-artful-kernel.git] / drivers / s390 / cio / chsc.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
1da177e4 4 *
c820de39 5 * Copyright IBM Corp. 1999,2008
1da177e4 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
4ce3b30c 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
1da177e4
LT
8 * Arnd Bergmann (arndb@de.ibm.com)
9 */
10
11#include <linux/module.h>
1da177e4
LT
12#include <linux/slab.h>
13#include <linux/init.h>
14#include <linux/device.h>
15
16#include <asm/cio.h>
e5854a58 17#include <asm/chpid.h>
1da177e4
LT
18
19#include "css.h"
20#include "cio.h"
21#include "cio_debug.h"
22#include "ioasm.h"
e6b6e10a 23#include "chp.h"
1da177e4
LT
24#include "chsc.h"
25
1da177e4
LT
26static void *sei_page;
27
b9c9a21a
CH
28static int chsc_error_from_response(int response)
29{
30 switch (response) {
31 case 0x0001:
32 return 0;
33 case 0x0002:
34 case 0x0003:
35 case 0x0006:
36 case 0x0007:
37 case 0x0008:
38 case 0x000a:
39 return -EINVAL;
40 case 0x0004:
41 return -EOPNOTSUPP;
42 default:
43 return -EIO;
44 }
45}
46
7ad6a249
PO
47struct chsc_ssd_area {
48 struct chsc_header request;
49 u16 :10;
50 u16 ssid:2;
51 u16 :4;
52 u16 f_sch; /* first subchannel */
53 u16 :16;
54 u16 l_sch; /* last subchannel */
55 u32 :32;
56 struct chsc_header response;
57 u32 :32;
58 u8 sch_valid : 1;
59 u8 dev_valid : 1;
60 u8 st : 3; /* subchannel type */
61 u8 zeroes : 3;
62 u8 unit_addr; /* unit address */
63 u16 devno; /* device number */
64 u8 path_mask;
65 u8 fla_valid_mask;
66 u16 sch; /* subchannel */
67 u8 chpid[8]; /* chpids 0-7 */
68 u16 fla[8]; /* full link addresses 0-7 */
69} __attribute__ ((packed));
70
71int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
1da177e4 72{
7ad6a249
PO
73 unsigned long page;
74 struct chsc_ssd_area *ssd_area;
75 int ccode;
76 int ret;
77 int i;
78 int mask;
1da177e4 79
7ad6a249
PO
80 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
81 if (!page)
82 return -ENOMEM;
83 ssd_area = (struct chsc_ssd_area *) page;
495a5b45
CH
84 ssd_area->request.length = 0x0010;
85 ssd_area->request.code = 0x0004;
7ad6a249
PO
86 ssd_area->ssid = schid.ssid;
87 ssd_area->f_sch = schid.sch_no;
88 ssd_area->l_sch = schid.sch_no;
1da177e4
LT
89
90 ccode = chsc(ssd_area);
7ad6a249 91 /* Check response. */
1da177e4 92 if (ccode > 0) {
7ad6a249
PO
93 ret = (ccode == 3) ? -ENODEV : -EBUSY;
94 goto out_free;
1da177e4 95 }
b9c9a21a
CH
96 ret = chsc_error_from_response(ssd_area->response.code);
97 if (ret != 0) {
7ad6a249
PO
98 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
99 schid.ssid, schid.sch_no,
1da177e4 100 ssd_area->response.code);
7ad6a249 101 goto out_free;
1da177e4 102 }
7ad6a249
PO
103 if (!ssd_area->sch_valid) {
104 ret = -ENODEV;
105 goto out_free;
1da177e4 106 }
7ad6a249
PO
107 /* Copy data */
108 ret = 0;
109 memset(ssd, 0, sizeof(struct chsc_ssd_info));
b279a4f5
CH
110 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
111 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
7ad6a249
PO
112 goto out_free;
113 ssd->path_mask = ssd_area->path_mask;
114 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
115 for (i = 0; i < 8; i++) {
116 mask = 0x80 >> i;
117 if (ssd_area->path_mask & mask) {
118 chp_id_init(&ssd->chpid[i]);
119 ssd->chpid[i].id = ssd_area->chpid[i];
1da177e4 120 }
7ad6a249
PO
121 if (ssd_area->fla_valid_mask & mask)
122 ssd->fla[i] = ssd_area->fla[i];
1da177e4 123 }
7ad6a249
PO
124out_free:
125 free_page(page);
1da177e4
LT
126 return ret;
127}
128
e82a1567 129static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
1da177e4 130{
2ec22984 131 spin_lock_irq(sch->lock);
c820de39
CH
132 if (sch->driver && sch->driver->chp_event)
133 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
1da177e4 134 goto out_unreg;
2ec22984 135 spin_unlock_irq(sch->lock);
1da177e4 136 return 0;
387b734f 137
1da177e4 138out_unreg:
1da177e4 139 sch->lpm = 0;
387b734f 140 spin_unlock_irq(sch->lock);
83b3370c 141 css_schedule_eval(sch->schid);
1da177e4
LT
142 return 0;
143}
144
e6b6e10a 145void chsc_chp_offline(struct chp_id chpid)
1da177e4
LT
146{
147 char dbf_txt[15];
148
f86635fa 149 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
1da177e4
LT
150 CIO_TRACE_EVENT(2, dbf_txt);
151
e6b6e10a 152 if (chp_get_status(chpid) <= 0)
1da177e4 153 return;
22806dc1
CH
154 /* Wait until previous actions have settled. */
155 css_wait_for_slow_path();
e82a1567 156 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
1da177e4
LT
157}
158
e82a1567 159static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
f97a56fb
CH
160{
161 struct schib schib;
f97a56fb
CH
162 /*
163 * We don't know the device yet, but since a path
164 * may be available now to the device we'll have
165 * to do recognition again.
166 * Since we don't have any idea about which chpid
167 * that beast may be on we'll have to do a stsch
168 * on all devices, grr...
169 */
fb6958a5 170 if (stsch_err(schid, &schib))
f97a56fb 171 /* We're through */
83b3370c 172 return -ENXIO;
f97a56fb
CH
173
174 /* Put it on the slow path. */
83b3370c 175 css_schedule_eval(schid);
f97a56fb
CH
176 return 0;
177}
178
e82a1567 179static int __s390_process_res_acc(struct subchannel *sch, void *data)
1da177e4 180{
2ec22984 181 spin_lock_irq(sch->lock);
c820de39
CH
182 if (sch->driver && sch->driver->chp_event)
183 sch->driver->chp_event(sch, data, CHP_ONLINE);
2ec22984 184 spin_unlock_irq(sch->lock);
e82a1567 185
dd9963f9 186 return 0;
f97a56fb
CH
187}
188
83b3370c 189static void s390_process_res_acc (struct res_acc_data *res_data)
f97a56fb 190{
1da177e4
LT
191 char dbf_txt[15];
192
e6b6e10a
PO
193 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
194 res_data->chpid.id);
1da177e4 195 CIO_TRACE_EVENT( 2, dbf_txt);
f97a56fb
CH
196 if (res_data->fla != 0) {
197 sprintf(dbf_txt, "fla%x", res_data->fla);
1da177e4
LT
198 CIO_TRACE_EVENT( 2, dbf_txt);
199 }
22806dc1
CH
200 /* Wait until previous actions have settled. */
201 css_wait_for_slow_path();
1da177e4
LT
202 /*
203 * I/O resources may have become accessible.
204 * Scan through all subchannels that may be concerned and
205 * do a validation on those.
206 * The more information we have (info), the less scanning
207 * will we have to do.
208 */
e82a1567
PO
209 for_each_subchannel_staged(__s390_process_res_acc,
210 s390_process_res_acc_new_sch, res_data);
1da177e4
LT
211}
212
213static int
214__get_chpid_from_lir(void *data)
215{
216 struct lir {
217 u8 iq;
218 u8 ic;
219 u16 sci;
220 /* incident-node descriptor */
221 u32 indesc[28];
222 /* attached-node descriptor */
223 u32 andesc[28];
224 /* incident-specific information */
225 u32 isinfo[28];
0f008aa3 226 } __attribute__ ((packed)) *lir;
1da177e4 227
12975aef 228 lir = data;
1da177e4
LT
229 if (!(lir->iq&0x80))
230 /* NULL link incident record */
231 return -EINVAL;
232 if (!(lir->indesc[0]&0xc0000000))
233 /* node descriptor not valid */
234 return -EINVAL;
235 if (!(lir->indesc[0]&0x10000000))
236 /* don't handle device-type nodes - FIXME */
237 return -EINVAL;
238 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
239
240 return (u16) (lir->indesc[0]&0x000000ff);
241}
242
184357a5
PO
243struct chsc_sei_area {
244 struct chsc_header request;
245 u32 reserved1;
246 u32 reserved2;
247 u32 reserved3;
248 struct chsc_header response;
249 u32 reserved4;
250 u8 flags;
251 u8 vf; /* validity flags */
252 u8 rs; /* reporting source */
253 u8 cc; /* content code */
254 u16 fla; /* full link address */
255 u16 rsid; /* reporting source id */
256 u32 reserved5;
257 u32 reserved6;
258 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
259 /* ccdf has to be big enough for a link-incident record */
260} __attribute__ ((packed));
261
83b3370c 262static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
184357a5 263{
f86635fa
PO
264 struct chp_id chpid;
265 int id;
184357a5
PO
266
267 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
268 sei_area->rs, sei_area->rsid);
269 if (sei_area->rs != 4)
83b3370c 270 return;
f86635fa
PO
271 id = __get_chpid_from_lir(sei_area->ccdf);
272 if (id < 0)
184357a5 273 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
f86635fa
PO
274 else {
275 chp_id_init(&chpid);
276 chpid.id = id;
e6b6e10a 277 chsc_chp_offline(chpid);
f86635fa 278 }
184357a5
PO
279}
280
83b3370c 281static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
1da177e4 282{
f97a56fb 283 struct res_acc_data res_data;
f86635fa 284 struct chp_id chpid;
184357a5 285 int status;
184357a5
PO
286
287 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
288 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
289 if (sei_area->rs != 4)
83b3370c 290 return;
f86635fa
PO
291 chp_id_init(&chpid);
292 chpid.id = sei_area->rsid;
184357a5 293 /* allocate a new channel path structure, if needed */
e6b6e10a 294 status = chp_get_status(chpid);
184357a5 295 if (status < 0)
e6b6e10a 296 chp_new(chpid);
184357a5 297 else if (!status)
83b3370c 298 return;
184357a5 299 memset(&res_data, 0, sizeof(struct res_acc_data));
e6b6e10a 300 res_data.chpid = chpid;
184357a5
PO
301 if ((sei_area->vf & 0xc0) != 0) {
302 res_data.fla = sei_area->fla;
303 if ((sei_area->vf & 0xc0) == 0xc0)
304 /* full link address */
305 res_data.fla_mask = 0xffff;
306 else
307 /* link address */
308 res_data.fla_mask = 0xff00;
309 }
83b3370c 310 s390_process_res_acc(&res_data);
184357a5
PO
311}
312
e5854a58
PO
313struct chp_config_data {
314 u8 map[32];
315 u8 op;
316 u8 pc;
317};
318
83b3370c 319static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
e5854a58
PO
320{
321 struct chp_config_data *data;
322 struct chp_id chpid;
323 int num;
324
325 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
326 if (sei_area->rs != 0)
83b3370c 327 return;
e5854a58
PO
328 data = (struct chp_config_data *) &(sei_area->ccdf);
329 chp_id_init(&chpid);
330 for (num = 0; num <= __MAX_CHPID; num++) {
331 if (!chp_test_bit(data->map, num))
332 continue;
333 chpid.id = num;
334 printk(KERN_WARNING "cio: processing configure event %d for "
335 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
336 switch (data->op) {
337 case 0:
338 chp_cfg_schedule(chpid, 1);
339 break;
340 case 1:
341 chp_cfg_schedule(chpid, 0);
342 break;
343 case 2:
344 chp_cfg_cancel_deconfigure(chpid);
345 break;
346 }
347 }
e5854a58
PO
348}
349
83b3370c 350static void chsc_process_sei(struct chsc_sei_area *sei_area)
184357a5 351{
184357a5 352 /* Check if we might have lost some information. */
83b3370c 353 if (sei_area->flags & 0x40) {
184357a5 354 CIO_CRW_EVENT(2, "chsc: event overflow\n");
83b3370c
PO
355 css_schedule_eval_all();
356 }
184357a5 357 /* which kind of information was stored? */
184357a5
PO
358 switch (sei_area->cc) {
359 case 1: /* link incident*/
83b3370c 360 chsc_process_sei_link_incident(sei_area);
184357a5
PO
361 break;
362 case 2: /* i/o resource accessibiliy */
83b3370c 363 chsc_process_sei_res_acc(sei_area);
184357a5 364 break;
e5854a58 365 case 8: /* channel-path-configuration notification */
83b3370c 366 chsc_process_sei_chp_config(sei_area);
e5854a58 367 break;
184357a5
PO
368 default: /* other stuff */
369 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
370 sei_area->cc);
371 break;
372 }
184357a5
PO
373}
374
83b3370c 375void chsc_process_crw(void)
184357a5
PO
376{
377 struct chsc_sei_area *sei_area;
1da177e4
LT
378
379 if (!sei_page)
83b3370c 380 return;
184357a5
PO
381 /* Access to sei_page is serialized through machine check handler
382 * thread, so no need for locking. */
1da177e4
LT
383 sei_area = sei_page;
384
385 CIO_TRACE_EVENT( 2, "prcss");
1da177e4 386 do {
1da177e4 387 memset(sei_area, 0, sizeof(*sei_area));
495a5b45
CH
388 sei_area->request.length = 0x0010;
389 sei_area->request.code = 0x000e;
184357a5
PO
390 if (chsc(sei_area))
391 break;
1da177e4 392
184357a5
PO
393 if (sei_area->response.code == 0x0001) {
394 CIO_CRW_EVENT(4, "chsc: sei successful\n");
83b3370c 395 chsc_process_sei(sei_area);
184357a5
PO
396 } else {
397 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
1da177e4 398 sei_area->response.code);
1da177e4
LT
399 break;
400 }
401 } while (sei_area->flags & 0x80);
1da177e4
LT
402}
403
83b3370c 404void chsc_chp_online(struct chp_id chpid)
f97a56fb 405{
1da177e4 406 char dbf_txt[15];
c820de39 407 struct res_acc_data res_data;
1da177e4 408
f86635fa 409 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
1da177e4
LT
410 CIO_TRACE_EVENT(2, dbf_txt);
411
22806dc1 412 if (chp_get_status(chpid) != 0) {
c820de39
CH
413 memset(&res_data, 0, sizeof(struct res_acc_data));
414 res_data.chpid = chpid;
22806dc1
CH
415 /* Wait until previous actions have settled. */
416 css_wait_for_slow_path();
c820de39
CH
417 for_each_subchannel_staged(__s390_process_res_acc, NULL,
418 &res_data);
22806dc1 419 }
1da177e4
LT
420}
421
f86635fa
PO
422static void __s390_subchannel_vary_chpid(struct subchannel *sch,
423 struct chp_id chpid, int on)
1da177e4 424{
1da177e4 425 unsigned long flags;
c820de39 426 struct res_acc_data res_data;
1da177e4 427
c820de39
CH
428 memset(&res_data, 0, sizeof(struct res_acc_data));
429 res_data.chpid = chpid;
2ec22984 430 spin_lock_irqsave(sch->lock, flags);
c820de39
CH
431 if (sch->driver && sch->driver->chp_event)
432 sch->driver->chp_event(sch, &res_data,
433 on ? CHP_VARY_ON : CHP_VARY_OFF);
2ec22984 434 spin_unlock_irqrestore(sch->lock, flags);
1da177e4
LT
435}
436
e82a1567 437static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
1da177e4 438{
e82a1567 439 struct chp_id *chpid = data;
1da177e4
LT
440
441 __s390_subchannel_vary_chpid(sch, *chpid, 0);
442 return 0;
443}
444
e82a1567 445static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
1da177e4 446{
e82a1567 447 struct chp_id *chpid = data;
1da177e4
LT
448
449 __s390_subchannel_vary_chpid(sch, *chpid, 1);
450 return 0;
451}
452
f97a56fb
CH
453static int
454__s390_vary_chpid_on(struct subchannel_id schid, void *data)
455{
456 struct schib schib;
f97a56fb 457
fb6958a5 458 if (stsch_err(schid, &schib))
f97a56fb
CH
459 /* We're through */
460 return -ENXIO;
461 /* Put it on the slow path. */
83b3370c 462 css_schedule_eval(schid);
f97a56fb
CH
463 return 0;
464}
465
e6b6e10a
PO
466/**
467 * chsc_chp_vary - propagate channel-path vary operation to subchannels
468 * @chpid: channl-path ID
469 * @on: non-zero for vary online, zero for vary offline
1da177e4 470 */
e6b6e10a 471int chsc_chp_vary(struct chp_id chpid, int on)
1da177e4 472{
22806dc1
CH
473 /* Wait until previous actions have settled. */
474 css_wait_for_slow_path();
1da177e4
LT
475 /*
476 * Redo PathVerification on the devices the chpid connects to
477 */
478
f97a56fb 479 if (on)
e82a1567
PO
480 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
481 __s390_vary_chpid_on, &chpid);
482 else
483 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
484 NULL, &chpid);
485
1da177e4
LT
486 return 0;
487}
488
495a5b45
CH
489static void
490chsc_remove_cmg_attr(struct channel_subsystem *css)
491{
492 int i;
493
494 for (i = 0; i <= __MAX_CHPID; i++) {
495 if (!css->chps[i])
496 continue;
e6b6e10a 497 chp_remove_cmg_attr(css->chps[i]);
495a5b45
CH
498 }
499}
500
501static int
502chsc_add_cmg_attr(struct channel_subsystem *css)
503{
504 int i, ret;
505
506 ret = 0;
507 for (i = 0; i <= __MAX_CHPID; i++) {
508 if (!css->chps[i])
509 continue;
e6b6e10a 510 ret = chp_add_cmg_attr(css->chps[i]);
495a5b45
CH
511 if (ret)
512 goto cleanup;
513 }
514 return ret;
515cleanup:
516 for (--i; i >= 0; i--) {
517 if (!css->chps[i])
518 continue;
e6b6e10a 519 chp_remove_cmg_attr(css->chps[i]);
495a5b45
CH
520 }
521 return ret;
522}
523
495a5b45
CH
524static int
525__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
526{
527 struct {
528 struct chsc_header request;
529 u32 operation_code : 2;
530 u32 : 30;
531 u32 key : 4;
532 u32 : 28;
533 u32 zeroes1;
534 u32 cub_addr1;
535 u32 zeroes2;
536 u32 cub_addr2;
537 u32 reserved[13];
538 struct chsc_header response;
539 u32 status : 8;
540 u32 : 4;
541 u32 fmt : 4;
542 u32 : 16;
0f008aa3 543 } __attribute__ ((packed)) *secm_area;
495a5b45
CH
544 int ret, ccode;
545
546 secm_area = page;
547 secm_area->request.length = 0x0050;
548 secm_area->request.code = 0x0016;
549
550 secm_area->key = PAGE_DEFAULT_KEY;
551 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
552 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
553
554 secm_area->operation_code = enable ? 0 : 1;
555
556 ccode = chsc(secm_area);
557 if (ccode > 0)
558 return (ccode == 3) ? -ENODEV : -EBUSY;
559
560 switch (secm_area->response.code) {
b9c9a21a
CH
561 case 0x0102:
562 case 0x0103:
495a5b45 563 ret = -EINVAL;
495a5b45 564 default:
b9c9a21a 565 ret = chsc_error_from_response(secm_area->response.code);
495a5b45 566 }
b9c9a21a
CH
567 if (ret != 0)
568 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
569 secm_area->response.code);
495a5b45
CH
570 return ret;
571}
572
573int
574chsc_secm(struct channel_subsystem *css, int enable)
575{
576 void *secm_area;
577 int ret;
578
579 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
580 if (!secm_area)
581 return -ENOMEM;
582
495a5b45
CH
583 if (enable && !css->cm_enabled) {
584 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
585 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
586 if (!css->cub_addr1 || !css->cub_addr2) {
587 free_page((unsigned long)css->cub_addr1);
588 free_page((unsigned long)css->cub_addr2);
589 free_page((unsigned long)secm_area);
495a5b45
CH
590 return -ENOMEM;
591 }
592 }
593 ret = __chsc_do_secm(css, enable, secm_area);
594 if (!ret) {
595 css->cm_enabled = enable;
596 if (css->cm_enabled) {
597 ret = chsc_add_cmg_attr(css);
598 if (ret) {
599 memset(secm_area, 0, PAGE_SIZE);
600 __chsc_do_secm(css, 0, secm_area);
601 css->cm_enabled = 0;
602 }
603 } else
604 chsc_remove_cmg_attr(css);
605 }
8c4941c5 606 if (!css->cm_enabled) {
495a5b45
CH
607 free_page((unsigned long)css->cub_addr1);
608 free_page((unsigned long)css->cub_addr2);
609 }
495a5b45
CH
610 free_page((unsigned long)secm_area);
611 return ret;
612}
613
e6b6e10a
PO
614int chsc_determine_channel_path_description(struct chp_id chpid,
615 struct channel_path_desc *desc)
1da177e4
LT
616{
617 int ccode, ret;
618
619 struct {
620 struct chsc_header request;
621 u32 : 24;
622 u32 first_chpid : 8;
623 u32 : 24;
624 u32 last_chpid : 8;
625 u32 zeroes1;
626 struct chsc_header response;
627 u32 zeroes2;
628 struct channel_path_desc desc;
0f008aa3 629 } __attribute__ ((packed)) *scpd_area;
1da177e4
LT
630
631 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
632 if (!scpd_area)
633 return -ENOMEM;
634
495a5b45
CH
635 scpd_area->request.length = 0x0010;
636 scpd_area->request.code = 0x0002;
1da177e4 637
f86635fa
PO
638 scpd_area->first_chpid = chpid.id;
639 scpd_area->last_chpid = chpid.id;
1da177e4
LT
640
641 ccode = chsc(scpd_area);
642 if (ccode > 0) {
643 ret = (ccode == 3) ? -ENODEV : -EBUSY;
644 goto out;
645 }
646
b9c9a21a
CH
647 ret = chsc_error_from_response(scpd_area->response.code);
648 if (ret == 0)
649 /* Success. */
1da177e4
LT
650 memcpy(desc, &scpd_area->desc,
651 sizeof(struct channel_path_desc));
b9c9a21a
CH
652 else
653 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
1da177e4 654 scpd_area->response.code);
1da177e4
LT
655out:
656 free_page((unsigned long)scpd_area);
657 return ret;
658}
659
495a5b45
CH
660static void
661chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
662 struct cmg_chars *chars)
663{
664 switch (chp->cmg) {
665 case 2:
666 case 3:
667 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
668 GFP_KERNEL);
669 if (chp->cmg_chars) {
670 int i, mask;
671 struct cmg_chars *cmg_chars;
672
673 cmg_chars = chp->cmg_chars;
674 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
675 mask = 0x80 >> (i + 3);
676 if (cmcv & mask)
677 cmg_chars->values[i] = chars->values[i];
678 else
679 cmg_chars->values[i] = 0;
680 }
681 }
682 break;
683 default:
684 /* No cmg-dependent data. */
685 break;
686 }
687}
688
e6b6e10a 689int chsc_get_channel_measurement_chars(struct channel_path *chp)
495a5b45
CH
690{
691 int ccode, ret;
692
693 struct {
694 struct chsc_header request;
695 u32 : 24;
696 u32 first_chpid : 8;
697 u32 : 24;
698 u32 last_chpid : 8;
699 u32 zeroes1;
700 struct chsc_header response;
701 u32 zeroes2;
702 u32 not_valid : 1;
703 u32 shared : 1;
704 u32 : 22;
705 u32 chpid : 8;
706 u32 cmcv : 5;
707 u32 : 11;
708 u32 cmgq : 8;
709 u32 cmg : 8;
710 u32 zeroes3;
711 u32 data[NR_MEASUREMENT_CHARS];
0f008aa3 712 } __attribute__ ((packed)) *scmc_area;
495a5b45
CH
713
714 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
715 if (!scmc_area)
716 return -ENOMEM;
717
718 scmc_area->request.length = 0x0010;
719 scmc_area->request.code = 0x0022;
720
f86635fa
PO
721 scmc_area->first_chpid = chp->chpid.id;
722 scmc_area->last_chpid = chp->chpid.id;
495a5b45
CH
723
724 ccode = chsc(scmc_area);
725 if (ccode > 0) {
726 ret = (ccode == 3) ? -ENODEV : -EBUSY;
727 goto out;
728 }
729
b9c9a21a
CH
730 ret = chsc_error_from_response(scmc_area->response.code);
731 if (ret == 0) {
732 /* Success. */
495a5b45
CH
733 if (!scmc_area->not_valid) {
734 chp->cmg = scmc_area->cmg;
735 chp->shared = scmc_area->shared;
736 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
737 (struct cmg_chars *)
738 &scmc_area->data);
739 } else {
740 chp->cmg = -1;
741 chp->shared = -1;
742 }
b9c9a21a
CH
743 } else {
744 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
495a5b45 745 scmc_area->response.code);
495a5b45
CH
746 }
747out:
748 free_page((unsigned long)scmc_area);
749 return ret;
750}
751
4434a38c 752int __init chsc_alloc_sei_area(void)
1da177e4
LT
753{
754 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
755 if (!sei_page)
e556bbbd
CH
756 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
757 "chsc machine checks!\n");
1da177e4
LT
758 return (sei_page ? 0 : -ENOMEM);
759}
760
4434a38c
CH
761void __init chsc_free_sei_area(void)
762{
763 kfree(sei_page);
764}
765
fb6958a5
CH
766int __init
767chsc_enable_facility(int operation_code)
768{
769 int ret;
770 struct {
771 struct chsc_header request;
772 u8 reserved1:4;
773 u8 format:4;
774 u8 reserved2;
775 u16 operation_code;
776 u32 reserved3;
777 u32 reserved4;
778 u32 operation_data_area[252];
779 struct chsc_header response;
780 u32 reserved5:4;
781 u32 format2:4;
782 u32 reserved6:24;
0f008aa3 783 } __attribute__ ((packed)) *sda_area;
fb6958a5
CH
784
785 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
786 if (!sda_area)
787 return -ENOMEM;
495a5b45
CH
788 sda_area->request.length = 0x0400;
789 sda_area->request.code = 0x0031;
fb6958a5
CH
790 sda_area->operation_code = operation_code;
791
792 ret = chsc(sda_area);
793 if (ret > 0) {
794 ret = (ret == 3) ? -ENODEV : -EBUSY;
795 goto out;
796 }
b9c9a21a 797
fb6958a5 798 switch (sda_area->response.code) {
b9c9a21a 799 case 0x0101:
fb6958a5
CH
800 ret = -EOPNOTSUPP;
801 break;
b9c9a21a
CH
802 default:
803 ret = chsc_error_from_response(sda_area->response.code);
fb6958a5 804 }
b9c9a21a
CH
805 if (ret != 0)
806 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
807 operation_code, sda_area->response.code);
fb6958a5
CH
808 out:
809 free_page((unsigned long)sda_area);
810 return ret;
811}
812
1da177e4
LT
813struct css_general_char css_general_characteristics;
814struct css_chsc_char css_chsc_characteristics;
815
816int __init
817chsc_determine_css_characteristics(void)
818{
819 int result;
820 struct {
821 struct chsc_header request;
822 u32 reserved1;
823 u32 reserved2;
824 u32 reserved3;
825 struct chsc_header response;
826 u32 reserved4;
827 u32 general_char[510];
828 u32 chsc_char[518];
0f008aa3 829 } __attribute__ ((packed)) *scsc_area;
1da177e4
LT
830
831 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
b9c9a21a 832 if (!scsc_area)
1da177e4 833 return -ENOMEM;
1da177e4 834
495a5b45
CH
835 scsc_area->request.length = 0x0010;
836 scsc_area->request.code = 0x0010;
1da177e4
LT
837
838 result = chsc(scsc_area);
839 if (result) {
b9c9a21a 840 result = (result == 3) ? -ENODEV : -EBUSY;
1da177e4
LT
841 goto exit;
842 }
843
b9c9a21a
CH
844 result = chsc_error_from_response(scsc_area->response.code);
845 if (result == 0) {
846 memcpy(&css_general_characteristics, scsc_area->general_char,
847 sizeof(css_general_characteristics));
848 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
849 sizeof(css_chsc_characteristics));
850 } else
851 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
852 scsc_area->response.code);
1da177e4
LT
853exit:
854 free_page ((unsigned long) scsc_area);
855 return result;
856}
857
858EXPORT_SYMBOL_GPL(css_general_characteristics);
859EXPORT_SYMBOL_GPL(css_chsc_characteristics);