]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
9679baaf | 3 | * CCW device PGID and path verification I/O handling. |
1da177e4 | 4 | * |
a53c8fab | 5 | * Copyright IBM Corp. 2002, 2009 |
9679baaf PO |
6 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | |
8 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | |
1da177e4 LT |
9 | */ |
10 | ||
9679baaf PO |
11 | #include <linux/kernel.h> |
12 | #include <linux/string.h> | |
ef12cb90 | 13 | #include <linux/bitops.h> |
9679baaf PO |
14 | #include <linux/types.h> |
15 | #include <linux/errno.h> | |
ef12cb90 | 16 | #include <linux/slab.h> |
1da177e4 LT |
17 | #include <asm/ccwdev.h> |
18 | #include <asm/cio.h> | |
1da177e4 LT |
19 | |
20 | #include "cio.h" | |
21 | #include "cio_debug.h" | |
1da177e4 | 22 | #include "device.h" |
cd6b4f27 | 23 | #include "io_sch.h" |
1da177e4 | 24 | |
de1b0438 | 25 | #define PGID_RETRIES 256 |
9679baaf PO |
26 | #define PGID_TIMEOUT (10 * HZ) |
27 | ||
88e7616e SO |
28 | static void verify_start(struct ccw_device *cdev); |
29 | ||
7b7db1b5 | 30 | /* |
9679baaf | 31 | * Process path verification data and report result. |
7b7db1b5 | 32 | */ |
9679baaf | 33 | static void verify_done(struct ccw_device *cdev, int rc) |
7b7db1b5 | 34 | { |
9679baaf PO |
35 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
36 | struct ccw_dev_id *id = &cdev->private->dev_id; | |
454e1fa1 PO |
37 | int mpath = cdev->private->flags.mpath; |
38 | int pgroup = cdev->private->flags.pgroup; | |
9679baaf PO |
39 | |
40 | if (rc) | |
41 | goto out; | |
42 | /* Ensure consistent multipathing state at device and channel. */ | |
43 | if (sch->config.mp != mpath) { | |
44 | sch->config.mp = mpath; | |
45 | rc = cio_commit_config(sch); | |
46 | } | |
47 | out: | |
48 | CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d " | |
49 | "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath, | |
50 | sch->vpm); | |
51 | ccw_device_verify_done(cdev, rc); | |
7b7db1b5 SB |
52 | } |
53 | ||
1da177e4 | 54 | /* |
9679baaf | 55 | * Create channel program to perform a NOOP. |
1da177e4 | 56 | */ |
9679baaf | 57 | static void nop_build_cp(struct ccw_device *cdev) |
1da177e4 | 58 | { |
9679baaf PO |
59 | struct ccw_request *req = &cdev->private->req; |
60 | struct ccw1 *cp = cdev->private->iccws; | |
61 | ||
62 | cp->cmd_code = CCW_CMD_NOOP; | |
63 | cp->cda = 0; | |
64 | cp->count = 0; | |
65 | cp->flags = CCW_FLAG_SLI; | |
66 | req->cp = cp; | |
1da177e4 LT |
67 | } |
68 | ||
9679baaf PO |
69 | /* |
70 | * Perform NOOP on a single path. | |
71 | */ | |
72 | static void nop_do(struct ccw_device *cdev) | |
1da177e4 | 73 | { |
9679baaf PO |
74 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
75 | struct ccw_request *req = &cdev->private->req; | |
76 | ||
e6a0b7c9 SO |
77 | req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & |
78 | ~cdev->private->path_noirq_mask); | |
9679baaf PO |
79 | if (!req->lpm) |
80 | goto out_nopath; | |
81 | nop_build_cp(cdev); | |
82 | ccw_request_start(cdev); | |
83 | return; | |
84 | ||
85 | out_nopath: | |
86 | verify_done(cdev, sch->vpm ? 0 : -EACCES); | |
1da177e4 LT |
87 | } |
88 | ||
89 | /* | |
9679baaf | 90 | * Adjust NOOP I/O status. |
1da177e4 | 91 | */ |
9679baaf PO |
92 | static enum io_status nop_filter(struct ccw_device *cdev, void *data, |
93 | struct irb *irb, enum io_status status) | |
1da177e4 | 94 | { |
9679baaf PO |
95 | /* Only subchannel status might indicate a path error. */ |
96 | if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0) | |
97 | return IO_DONE; | |
98 | return status; | |
1da177e4 LT |
99 | } |
100 | ||
101 | /* | |
9679baaf | 102 | * Process NOOP request result for a single path. |
1da177e4 | 103 | */ |
9679baaf | 104 | static void nop_callback(struct ccw_device *cdev, void *data, int rc) |
1da177e4 | 105 | { |
9679baaf PO |
106 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
107 | struct ccw_request *req = &cdev->private->req; | |
108 | ||
69f5576f SO |
109 | switch (rc) { |
110 | case 0: | |
9679baaf | 111 | sch->vpm |= req->lpm; |
69f5576f SO |
112 | break; |
113 | case -ETIME: | |
114 | cdev->private->path_noirq_mask |= req->lpm; | |
115 | break; | |
116 | case -EACCES: | |
117 | cdev->private->path_notoper_mask |= req->lpm; | |
118 | break; | |
119 | default: | |
9679baaf | 120 | goto err; |
69f5576f SO |
121 | } |
122 | /* Continue on the next path. */ | |
9679baaf PO |
123 | req->lpm >>= 1; |
124 | nop_do(cdev); | |
125 | return; | |
126 | ||
127 | err: | |
128 | verify_done(cdev, rc); | |
1da177e4 LT |
129 | } |
130 | ||
131 | /* | |
9679baaf | 132 | * Create channel program to perform SET PGID on a single path. |
1da177e4 | 133 | */ |
9679baaf | 134 | static void spid_build_cp(struct ccw_device *cdev, u8 fn) |
1da177e4 | 135 | { |
9679baaf PO |
136 | struct ccw_request *req = &cdev->private->req; |
137 | struct ccw1 *cp = cdev->private->iccws; | |
9d49f86d | 138 | int i = pathmask_to_pos(req->lpm); |
9679baaf PO |
139 | struct pgid *pgid = &cdev->private->pgid[i]; |
140 | ||
141 | pgid->inf.fc = fn; | |
142 | cp->cmd_code = CCW_CMD_SET_PGID; | |
143 | cp->cda = (u32) (addr_t) pgid; | |
144 | cp->count = sizeof(*pgid); | |
145 | cp->flags = CCW_FLAG_SLI; | |
146 | req->cp = cp; | |
1da177e4 LT |
147 | } |
148 | ||
88e7616e SO |
149 | static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) |
150 | { | |
151 | if (rc) { | |
152 | /* We don't know the path groups' state. Abort. */ | |
153 | verify_done(cdev, rc); | |
154 | return; | |
155 | } | |
156 | /* | |
157 | * Path groups have been reset. Restart path verification but | |
158 | * leave paths in path_noirq_mask out. | |
159 | */ | |
160 | cdev->private->flags.pgid_unknown = 0; | |
161 | verify_start(cdev); | |
162 | } | |
163 | ||
164 | /* | |
165 | * Reset pathgroups and restart path verification, leave unusable paths out. | |
166 | */ | |
167 | static void pgid_wipeout_start(struct ccw_device *cdev) | |
168 | { | |
169 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | |
170 | struct ccw_dev_id *id = &cdev->private->dev_id; | |
171 | struct ccw_request *req = &cdev->private->req; | |
172 | u8 fn; | |
173 | ||
174 | CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", | |
175 | id->ssid, id->devno, cdev->private->pgid_valid_mask, | |
176 | cdev->private->path_noirq_mask); | |
177 | ||
178 | /* Initialize request data. */ | |
179 | memset(req, 0, sizeof(*req)); | |
180 | req->timeout = PGID_TIMEOUT; | |
181 | req->maxretries = PGID_RETRIES; | |
182 | req->lpm = sch->schib.pmcw.pam; | |
183 | req->callback = pgid_wipeout_callback; | |
184 | fn = SPID_FUNC_DISBAND; | |
185 | if (cdev->private->flags.mpath) | |
186 | fn |= SPID_FUNC_MULTI_PATH; | |
187 | spid_build_cp(cdev, fn); | |
188 | ccw_request_start(cdev); | |
189 | } | |
190 | ||
7e560814 | 191 | /* |
9679baaf | 192 | * Perform establish/resign SET PGID on a single path. |
7e560814 | 193 | */ |
9679baaf | 194 | static void spid_do(struct ccw_device *cdev) |
7e560814 | 195 | { |
9679baaf PO |
196 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
197 | struct ccw_request *req = &cdev->private->req; | |
198 | u8 fn; | |
199 | ||
52ef0608 | 200 | /* Use next available path that is not already in correct state. */ |
d302e1a5 | 201 | req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask); |
9679baaf PO |
202 | if (!req->lpm) |
203 | goto out_nopath; | |
204 | /* Channel program setup. */ | |
205 | if (req->lpm & sch->opm) | |
206 | fn = SPID_FUNC_ESTABLISH; | |
207 | else | |
208 | fn = SPID_FUNC_RESIGN; | |
454e1fa1 | 209 | if (cdev->private->flags.mpath) |
9679baaf PO |
210 | fn |= SPID_FUNC_MULTI_PATH; |
211 | spid_build_cp(cdev, fn); | |
212 | ccw_request_start(cdev); | |
213 | return; | |
214 | ||
215 | out_nopath: | |
88e7616e SO |
216 | if (cdev->private->flags.pgid_unknown) { |
217 | /* At least one SPID could be partially done. */ | |
218 | pgid_wipeout_start(cdev); | |
219 | return; | |
220 | } | |
9679baaf | 221 | verify_done(cdev, sch->vpm ? 0 : -EACCES); |
7e560814 CH |
222 | } |
223 | ||
1da177e4 | 224 | /* |
9679baaf | 225 | * Process SET PGID request result for a single path. |
1da177e4 | 226 | */ |
9679baaf | 227 | static void spid_callback(struct ccw_device *cdev, void *data, int rc) |
1da177e4 | 228 | { |
9679baaf PO |
229 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
230 | struct ccw_request *req = &cdev->private->req; | |
231 | ||
232 | switch (rc) { | |
233 | case 0: | |
234 | sch->vpm |= req->lpm & sch->opm; | |
235 | break; | |
69f5576f SO |
236 | case -ETIME: |
237 | cdev->private->flags.pgid_unknown = 1; | |
238 | cdev->private->path_noirq_mask |= req->lpm; | |
239 | break; | |
9679baaf | 240 | case -EACCES: |
69f5576f | 241 | cdev->private->path_notoper_mask |= req->lpm; |
9679baaf PO |
242 | break; |
243 | case -EOPNOTSUPP: | |
454e1fa1 | 244 | if (cdev->private->flags.mpath) { |
9679baaf | 245 | /* Try without multipathing. */ |
454e1fa1 | 246 | cdev->private->flags.mpath = 0; |
9679baaf | 247 | goto out_restart; |
d23861ff | 248 | } |
9679baaf | 249 | /* Try without pathgrouping. */ |
454e1fa1 | 250 | cdev->private->flags.pgroup = 0; |
9679baaf PO |
251 | goto out_restart; |
252 | default: | |
253 | goto err; | |
1da177e4 | 254 | } |
9679baaf PO |
255 | req->lpm >>= 1; |
256 | spid_do(cdev); | |
257 | return; | |
258 | ||
259 | out_restart: | |
260 | verify_start(cdev); | |
261 | return; | |
262 | err: | |
263 | verify_done(cdev, rc); | |
264 | } | |
265 | ||
52ef0608 PO |
266 | static void spid_start(struct ccw_device *cdev) |
267 | { | |
268 | struct ccw_request *req = &cdev->private->req; | |
269 | ||
270 | /* Initialize request data. */ | |
271 | memset(req, 0, sizeof(*req)); | |
272 | req->timeout = PGID_TIMEOUT; | |
273 | req->maxretries = PGID_RETRIES; | |
274 | req->lpm = 0x80; | |
982bdf81 | 275 | req->singlepath = 1; |
52ef0608 PO |
276 | req->callback = spid_callback; |
277 | spid_do(cdev); | |
278 | } | |
279 | ||
585b954e SO |
280 | static int pgid_is_reset(struct pgid *p) |
281 | { | |
282 | char *c; | |
283 | ||
284 | for (c = (char *)p + 1; c < (char *)(p + 1); c++) { | |
285 | if (*c != 0) | |
286 | return 0; | |
287 | } | |
288 | return 1; | |
289 | } | |
290 | ||
9679baaf PO |
291 | static int pgid_cmp(struct pgid *p1, struct pgid *p2) |
292 | { | |
293 | return memcmp((char *) p1 + 1, (char *) p2 + 1, | |
294 | sizeof(struct pgid) - 1); | |
1da177e4 LT |
295 | } |
296 | ||
7e560814 | 297 | /* |
9679baaf | 298 | * Determine pathgroup state from PGID data. |
7e560814 | 299 | */ |
9679baaf | 300 | static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, |
d99e79ec | 301 | int *mismatch, u8 *reserved, u8 *reset) |
7e560814 | 302 | { |
9679baaf PO |
303 | struct pgid *pgid = &cdev->private->pgid[0]; |
304 | struct pgid *first = NULL; | |
305 | int lpm; | |
306 | int i; | |
307 | ||
308 | *mismatch = 0; | |
309 | *reserved = 0; | |
310 | *reset = 0; | |
311 | for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) { | |
312 | if ((cdev->private->pgid_valid_mask & lpm) == 0) | |
313 | continue; | |
314 | if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) | |
d99e79ec | 315 | *reserved |= lpm; |
585b954e SO |
316 | if (pgid_is_reset(pgid)) { |
317 | *reset |= lpm; | |
9679baaf | 318 | continue; |
d23861ff | 319 | } |
9679baaf PO |
320 | if (!first) { |
321 | first = pgid; | |
322 | continue; | |
323 | } | |
324 | if (pgid_cmp(pgid, first) != 0) | |
325 | *mismatch = 1; | |
7e560814 | 326 | } |
9679baaf PO |
327 | if (!first) |
328 | first = &channel_subsystems[0]->global_pgid; | |
329 | *p = first; | |
7e560814 CH |
330 | } |
331 | ||
d302e1a5 | 332 | static u8 pgid_to_donepm(struct ccw_device *cdev) |
52ef0608 PO |
333 | { |
334 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | |
335 | struct pgid *pgid; | |
336 | int i; | |
337 | int lpm; | |
d302e1a5 | 338 | u8 donepm = 0; |
52ef0608 | 339 | |
d302e1a5 | 340 | /* Set bits for paths which are already in the target state. */ |
52ef0608 PO |
341 | for (i = 0; i < 8; i++) { |
342 | lpm = 0x80 >> i; | |
343 | if ((cdev->private->pgid_valid_mask & lpm) == 0) | |
344 | continue; | |
345 | pgid = &cdev->private->pgid[i]; | |
346 | if (sch->opm & lpm) { | |
347 | if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED) | |
348 | continue; | |
349 | } else { | |
350 | if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED) | |
351 | continue; | |
352 | } | |
353 | if (cdev->private->flags.mpath) { | |
354 | if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH) | |
355 | continue; | |
356 | } else { | |
357 | if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) | |
358 | continue; | |
359 | } | |
d302e1a5 | 360 | donepm |= lpm; |
52ef0608 PO |
361 | } |
362 | ||
d302e1a5 | 363 | return donepm; |
52ef0608 PO |
364 | } |
365 | ||
9679baaf | 366 | static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) |
1da177e4 | 367 | { |
9679baaf PO |
368 | int i; |
369 | ||
370 | for (i = 0; i < 8; i++) | |
371 | memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid)); | |
372 | } | |
373 | ||
374 | /* | |
375 | * Process SENSE PGID data and report result. | |
376 | */ | |
377 | static void snid_done(struct ccw_device *cdev, int rc) | |
378 | { | |
379 | struct ccw_dev_id *id = &cdev->private->dev_id; | |
52ef0608 | 380 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
9679baaf PO |
381 | struct pgid *pgid; |
382 | int mismatch = 0; | |
d99e79ec | 383 | u8 reserved = 0; |
585b954e | 384 | u8 reset = 0; |
d302e1a5 | 385 | u8 donepm; |
9679baaf PO |
386 | |
387 | if (rc) | |
388 | goto out; | |
389 | pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); | |
d99e79ec | 390 | if (reserved == cdev->private->pgid_valid_mask) |
9679baaf | 391 | rc = -EUSERS; |
52ef0608 PO |
392 | else if (mismatch) |
393 | rc = -EOPNOTSUPP; | |
394 | else { | |
d302e1a5 PO |
395 | donepm = pgid_to_donepm(cdev); |
396 | sch->vpm = donepm & sch->opm; | |
585b954e | 397 | cdev->private->pgid_reset_mask |= reset; |
e6a0b7c9 SO |
398 | cdev->private->pgid_todo_mask &= |
399 | ~(donepm | cdev->private->path_noirq_mask); | |
52ef0608 PO |
400 | pgid_fill(cdev, pgid); |
401 | } | |
9679baaf | 402 | out: |
52ef0608 | 403 | CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " |
d99e79ec | 404 | "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, |
d302e1a5 PO |
405 | id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, |
406 | cdev->private->pgid_todo_mask, mismatch, reserved, reset); | |
52ef0608 PO |
407 | switch (rc) { |
408 | case 0: | |
88e7616e SO |
409 | if (cdev->private->flags.pgid_unknown) { |
410 | pgid_wipeout_start(cdev); | |
411 | return; | |
412 | } | |
52ef0608 | 413 | /* Anything left to do? */ |
d302e1a5 | 414 | if (cdev->private->pgid_todo_mask == 0) { |
52ef0608 PO |
415 | verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); |
416 | return; | |
417 | } | |
418 | /* Perform path-grouping. */ | |
419 | spid_start(cdev); | |
420 | break; | |
421 | case -EOPNOTSUPP: | |
422 | /* Path-grouping not supported. */ | |
423 | cdev->private->flags.pgroup = 0; | |
424 | cdev->private->flags.mpath = 0; | |
425 | verify_start(cdev); | |
426 | break; | |
427 | default: | |
428 | verify_done(cdev, rc); | |
429 | } | |
1da177e4 | 430 | } |
9679baaf | 431 | |
1da177e4 | 432 | /* |
9679baaf | 433 | * Create channel program to perform a SENSE PGID on a single path. |
1da177e4 | 434 | */ |
9679baaf | 435 | static void snid_build_cp(struct ccw_device *cdev) |
1da177e4 | 436 | { |
9679baaf PO |
437 | struct ccw_request *req = &cdev->private->req; |
438 | struct ccw1 *cp = cdev->private->iccws; | |
9d49f86d | 439 | int i = pathmask_to_pos(req->lpm); |
9679baaf PO |
440 | |
441 | /* Channel program setup. */ | |
442 | cp->cmd_code = CCW_CMD_SENSE_PGID; | |
443 | cp->cda = (u32) (addr_t) &cdev->private->pgid[i]; | |
444 | cp->count = sizeof(struct pgid); | |
445 | cp->flags = CCW_FLAG_SLI; | |
446 | req->cp = cp; | |
447 | } | |
1da177e4 | 448 | |
9679baaf PO |
449 | /* |
450 | * Perform SENSE PGID on a single path. | |
451 | */ | |
452 | static void snid_do(struct ccw_device *cdev) | |
453 | { | |
454 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | |
455 | struct ccw_request *req = &cdev->private->req; | |
88e7616e | 456 | int ret; |
9679baaf | 457 | |
e6a0b7c9 SO |
458 | req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & |
459 | ~cdev->private->path_noirq_mask); | |
9679baaf PO |
460 | if (!req->lpm) |
461 | goto out_nopath; | |
462 | snid_build_cp(cdev); | |
463 | ccw_request_start(cdev); | |
464 | return; | |
465 | ||
466 | out_nopath: | |
88e7616e SO |
467 | if (cdev->private->pgid_valid_mask) |
468 | ret = 0; | |
469 | else if (cdev->private->path_noirq_mask) | |
470 | ret = -ETIME; | |
471 | else | |
472 | ret = -EACCES; | |
473 | snid_done(cdev, ret); | |
9679baaf | 474 | } |
7b7db1b5 | 475 | |
9679baaf PO |
476 | /* |
477 | * Process SENSE PGID request result for single path. | |
478 | */ | |
479 | static void snid_callback(struct ccw_device *cdev, void *data, int rc) | |
480 | { | |
481 | struct ccw_request *req = &cdev->private->req; | |
482 | ||
69f5576f SO |
483 | switch (rc) { |
484 | case 0: | |
9679baaf | 485 | cdev->private->pgid_valid_mask |= req->lpm; |
69f5576f SO |
486 | break; |
487 | case -ETIME: | |
488 | cdev->private->flags.pgid_unknown = 1; | |
489 | cdev->private->path_noirq_mask |= req->lpm; | |
490 | break; | |
491 | case -EACCES: | |
492 | cdev->private->path_notoper_mask |= req->lpm; | |
493 | break; | |
494 | default: | |
9679baaf | 495 | goto err; |
69f5576f SO |
496 | } |
497 | /* Continue on the next path. */ | |
9679baaf PO |
498 | req->lpm >>= 1; |
499 | snid_do(cdev); | |
500 | return; | |
501 | ||
502 | err: | |
503 | snid_done(cdev, rc); | |
504 | } | |
28bdc6f6 | 505 | |
9679baaf PO |
506 | /* |
507 | * Perform path verification. | |
508 | */ | |
509 | static void verify_start(struct ccw_device *cdev) | |
1da177e4 | 510 | { |
6810a2bc | 511 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
9679baaf | 512 | struct ccw_request *req = &cdev->private->req; |
52ef0608 | 513 | struct ccw_dev_id *devid = &cdev->private->dev_id; |
6810a2bc | 514 | |
28bdc6f6 | 515 | sch->vpm = 0; |
d302e1a5 | 516 | sch->lpm = sch->schib.pmcw.pam; |
69f5576f SO |
517 | |
518 | /* Initialize PGID data. */ | |
519 | memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); | |
520 | cdev->private->pgid_valid_mask = 0; | |
521 | cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; | |
522 | cdev->private->path_notoper_mask = 0; | |
523 | ||
9679baaf PO |
524 | /* Initialize request data. */ |
525 | memset(req, 0, sizeof(*req)); | |
526 | req->timeout = PGID_TIMEOUT; | |
527 | req->maxretries = PGID_RETRIES; | |
528 | req->lpm = 0x80; | |
982bdf81 | 529 | req->singlepath = 1; |
454e1fa1 | 530 | if (cdev->private->flags.pgroup) { |
52ef0608 PO |
531 | CIO_TRACE_EVENT(4, "snid"); |
532 | CIO_HEX_EVENT(4, devid, sizeof(*devid)); | |
533 | req->callback = snid_callback; | |
534 | snid_do(cdev); | |
9679baaf | 535 | } else { |
52ef0608 PO |
536 | CIO_TRACE_EVENT(4, "nop"); |
537 | CIO_HEX_EVENT(4, devid, sizeof(*devid)); | |
9679baaf PO |
538 | req->filter = nop_filter; |
539 | req->callback = nop_callback; | |
540 | nop_do(cdev); | |
6810a2bc | 541 | } |
1da177e4 LT |
542 | } |
543 | ||
9679baaf PO |
544 | /** |
545 | * ccw_device_verify_start - perform path verification | |
546 | * @cdev: ccw device | |
547 | * | |
548 | * Perform an I/O on each available channel path to @cdev to determine which | |
549 | * paths are operational. The resulting path mask is stored in sch->vpm. | |
550 | * If device options specify pathgrouping, establish a pathgroup for the | |
551 | * operational paths. When finished, call ccw_device_verify_done with a | |
552 | * return code specifying the result. | |
553 | */ | |
554 | void ccw_device_verify_start(struct ccw_device *cdev) | |
1da177e4 | 555 | { |
9679baaf PO |
556 | CIO_TRACE_EVENT(4, "vrfy"); |
557 | CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); | |
52ef0608 PO |
558 | /* |
559 | * Initialize pathgroup and multipath state with target values. | |
560 | * They may change in the course of path verification. | |
561 | */ | |
562 | cdev->private->flags.pgroup = cdev->private->options.pgroup; | |
563 | cdev->private->flags.mpath = cdev->private->options.mpath; | |
9679baaf | 564 | cdev->private->flags.doverify = 0; |
69f5576f | 565 | cdev->private->path_noirq_mask = 0; |
9679baaf | 566 | verify_start(cdev); |
1da177e4 LT |
567 | } |
568 | ||
569 | /* | |
9679baaf | 570 | * Process disband SET PGID request result. |
1da177e4 | 571 | */ |
9679baaf | 572 | static void disband_callback(struct ccw_device *cdev, void *data, int rc) |
1da177e4 | 573 | { |
9679baaf PO |
574 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
575 | struct ccw_dev_id *id = &cdev->private->dev_id; | |
576 | ||
577 | if (rc) | |
578 | goto out; | |
579 | /* Ensure consistent multipathing state at device and channel. */ | |
454e1fa1 | 580 | cdev->private->flags.mpath = 0; |
9679baaf PO |
581 | if (sch->config.mp) { |
582 | sch->config.mp = 0; | |
583 | rc = cio_commit_config(sch); | |
1da177e4 | 584 | } |
9679baaf PO |
585 | out: |
586 | CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno, | |
587 | rc); | |
588 | ccw_device_disband_done(cdev, rc); | |
1da177e4 LT |
589 | } |
590 | ||
9679baaf PO |
591 | /** |
592 | * ccw_device_disband_start - disband pathgroup | |
593 | * @cdev: ccw device | |
594 | * | |
595 | * Execute a SET PGID channel program on @cdev to disband a previously | |
596 | * established pathgroup. When finished, call ccw_device_disband_done with | |
597 | * a return code specifying the result. | |
598 | */ | |
599 | void ccw_device_disband_start(struct ccw_device *cdev) | |
1da177e4 | 600 | { |
9679baaf PO |
601 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
602 | struct ccw_request *req = &cdev->private->req; | |
603 | u8 fn; | |
604 | ||
605 | CIO_TRACE_EVENT(4, "disb"); | |
606 | CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); | |
607 | /* Request setup. */ | |
608 | memset(req, 0, sizeof(*req)); | |
609 | req->timeout = PGID_TIMEOUT; | |
610 | req->maxretries = PGID_RETRIES; | |
611 | req->lpm = sch->schib.pmcw.pam & sch->opm; | |
982bdf81 | 612 | req->singlepath = 1; |
9679baaf PO |
613 | req->callback = disband_callback; |
614 | fn = SPID_FUNC_DISBAND; | |
454e1fa1 | 615 | if (cdev->private->flags.mpath) |
9679baaf PO |
616 | fn |= SPID_FUNC_MULTI_PATH; |
617 | spid_build_cp(cdev, fn); | |
618 | ccw_request_start(cdev); | |
1da177e4 | 619 | } |
d7d12ef2 | 620 | |
ef12cb90 SO |
621 | struct stlck_data { |
622 | struct completion done; | |
623 | int rc; | |
624 | }; | |
625 | ||
d7d12ef2 PO |
626 | static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) |
627 | { | |
628 | struct ccw_request *req = &cdev->private->req; | |
629 | struct ccw1 *cp = cdev->private->iccws; | |
630 | ||
631 | cp[0].cmd_code = CCW_CMD_STLCK; | |
632 | cp[0].cda = (u32) (addr_t) buf1; | |
633 | cp[0].count = 32; | |
634 | cp[0].flags = CCW_FLAG_CC; | |
635 | cp[1].cmd_code = CCW_CMD_RELEASE; | |
636 | cp[1].cda = (u32) (addr_t) buf2; | |
637 | cp[1].count = 32; | |
638 | cp[1].flags = 0; | |
639 | req->cp = cp; | |
640 | } | |
641 | ||
642 | static void stlck_callback(struct ccw_device *cdev, void *data, int rc) | |
643 | { | |
ef12cb90 SO |
644 | struct stlck_data *sdata = data; |
645 | ||
646 | sdata->rc = rc; | |
647 | complete(&sdata->done); | |
d7d12ef2 PO |
648 | } |
649 | ||
650 | /** | |
651 | * ccw_device_stlck_start - perform unconditional release | |
652 | * @cdev: ccw device | |
653 | * @data: data pointer to be passed to ccw_device_stlck_done | |
654 | * @buf1: data pointer used in channel program | |
655 | * @buf2: data pointer used in channel program | |
656 | * | |
657 | * Execute a channel program on @cdev to release an existing PGID reservation. | |
d7d12ef2 | 658 | */ |
ef12cb90 SO |
659 | static void ccw_device_stlck_start(struct ccw_device *cdev, void *data, |
660 | void *buf1, void *buf2) | |
d7d12ef2 PO |
661 | { |
662 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | |
663 | struct ccw_request *req = &cdev->private->req; | |
664 | ||
665 | CIO_TRACE_EVENT(4, "stlck"); | |
666 | CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); | |
667 | /* Request setup. */ | |
668 | memset(req, 0, sizeof(*req)); | |
669 | req->timeout = PGID_TIMEOUT; | |
670 | req->maxretries = PGID_RETRIES; | |
671 | req->lpm = sch->schib.pmcw.pam & sch->opm; | |
672 | req->data = data; | |
673 | req->callback = stlck_callback; | |
674 | stlck_build_cp(cdev, buf1, buf2); | |
675 | ccw_request_start(cdev); | |
676 | } | |
677 | ||
ef12cb90 SO |
678 | /* |
679 | * Perform unconditional reserve + release. | |
680 | */ | |
681 | int ccw_device_stlck(struct ccw_device *cdev) | |
682 | { | |
683 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | |
684 | struct stlck_data data; | |
685 | u8 *buffer; | |
686 | int rc; | |
687 | ||
688 | /* Check if steal lock operation is valid for this device. */ | |
689 | if (cdev->drv) { | |
690 | if (!cdev->private->options.force) | |
691 | return -EINVAL; | |
692 | } | |
693 | buffer = kzalloc(64, GFP_DMA | GFP_KERNEL); | |
694 | if (!buffer) | |
695 | return -ENOMEM; | |
696 | init_completion(&data.done); | |
697 | data.rc = -EIO; | |
698 | spin_lock_irq(sch->lock); | |
699 | rc = cio_enable_subchannel(sch, (u32) (addr_t) sch); | |
700 | if (rc) | |
701 | goto out_unlock; | |
702 | /* Perform operation. */ | |
703 | cdev->private->state = DEV_STATE_STEAL_LOCK; | |
704 | ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]); | |
705 | spin_unlock_irq(sch->lock); | |
706 | /* Wait for operation to finish. */ | |
707 | if (wait_for_completion_interruptible(&data.done)) { | |
708 | /* Got a signal. */ | |
709 | spin_lock_irq(sch->lock); | |
710 | ccw_request_cancel(cdev); | |
711 | spin_unlock_irq(sch->lock); | |
712 | wait_for_completion(&data.done); | |
713 | } | |
714 | rc = data.rc; | |
715 | /* Check results. */ | |
716 | spin_lock_irq(sch->lock); | |
717 | cio_disable_subchannel(sch); | |
718 | cdev->private->state = DEV_STATE_BOXED; | |
719 | out_unlock: | |
720 | spin_unlock_irq(sch->lock); | |
721 | kfree(buffer); | |
722 | ||
723 | return rc; | |
724 | } |