]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/s390/cio/device_fsm.c
[S390] cio: Use device_is_registered().
[mirror_ubuntu-artful-kernel.git] / drivers / s390 / cio / device_fsm.c
CommitLineData
1da177e4
LT
1/*
2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling
4 *
c820de39 5 * Copyright IBM Corp. 2002,2008
4ce3b30c 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
1da177e4
LT
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */
9
10#include <linux/module.h>
1da177e4 11#include <linux/init.h>
4e57b681
TS
12#include <linux/jiffies.h>
13#include <linux/string.h>
1da177e4
LT
14
15#include <asm/ccwdev.h>
4c24da79 16#include <asm/cio.h>
e5854a58 17#include <asm/chpid.h>
1da177e4
LT
18
19#include "cio.h"
20#include "cio_debug.h"
21#include "css.h"
22#include "device.h"
23#include "chsc.h"
24#include "ioasm.h"
e6b6e10a 25#include "chp.h"
1da177e4 26
14ff56bb
SO
27static int timeout_log_enabled;
28
14ff56bb
SO
29static int __init ccw_timeout_log_setup(char *unused)
30{
31 timeout_log_enabled = 1;
32 return 1;
33}
34
35__setup("ccw_timeout_log", ccw_timeout_log_setup);
36
37static void ccw_timeout_log(struct ccw_device *cdev)
38{
39 struct schib schib;
40 struct subchannel *sch;
cd6b4f27 41 struct io_subchannel_private *private;
83262d63 42 union orb *orb;
14ff56bb
SO
43 int cc;
44
45 sch = to_subchannel(cdev->dev.parent);
cd6b4f27 46 private = to_io_private(sch);
83262d63 47 orb = &private->orb;
14ff56bb
SO
48 cc = stsch(sch->schid, &schib);
49
50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
51 "device information:\n", get_clock());
52 printk(KERN_WARNING "cio: orb:\n");
53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
83262d63 54 orb, sizeof(*orb), 0);
2a0217d5
KS
55 printk(KERN_WARNING "cio: ccw device bus id: %s\n",
56 dev_name(&cdev->dev));
57 printk(KERN_WARNING "cio: subchannel bus id: %s\n",
58 dev_name(&sch->dev));
14ff56bb
SO
59 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
60 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
61
83262d63
PO
62 if (orb->tm.b) {
63 printk(KERN_WARNING "cio: orb indicates transport mode\n");
64 printk(KERN_WARNING "cio: last tcw:\n");
65 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
66 (void *)(addr_t)orb->tm.tcw,
67 sizeof(struct tcw), 0);
68 } else {
69 printk(KERN_WARNING "cio: orb indicates command mode\n");
70 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
71 (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
72 printk(KERN_WARNING "cio: last channel program "
73 "(intern):\n");
74 else
75 printk(KERN_WARNING "cio: last channel program:\n");
76
77 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
78 (void *)(addr_t)orb->cmd.cpa,
79 sizeof(struct ccw1), 0);
80 }
14ff56bb
SO
81 printk(KERN_WARNING "cio: ccw device state: %d\n",
82 cdev->private->state);
83 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
84 printk(KERN_WARNING "cio: schib:\n");
85 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
86 &schib, sizeof(schib), 0);
87 printk(KERN_WARNING "cio: ccw device flags:\n");
88 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
89 &cdev->private->flags, sizeof(cdev->private->flags), 0);
90}
91
1da177e4
LT
92/*
93 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
94 */
95static void
96ccw_device_timeout(unsigned long data)
97{
98 struct ccw_device *cdev;
99
100 cdev = (struct ccw_device *) data;
101 spin_lock_irq(cdev->ccwlock);
14ff56bb
SO
102 if (timeout_log_enabled)
103 ccw_timeout_log(cdev);
1da177e4
LT
104 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
105 spin_unlock_irq(cdev->ccwlock);
106}
107
108/*
109 * Set timeout
110 */
111void
112ccw_device_set_timeout(struct ccw_device *cdev, int expires)
113{
114 if (expires == 0) {
115 del_timer(&cdev->private->timer);
116 return;
117 }
118 if (timer_pending(&cdev->private->timer)) {
119 if (mod_timer(&cdev->private->timer, jiffies + expires))
120 return;
121 }
122 cdev->private->timer.function = ccw_device_timeout;
123 cdev->private->timer.data = (unsigned long) cdev;
124 cdev->private->timer.expires = jiffies + expires;
125 add_timer(&cdev->private->timer);
126}
127
1da177e4
LT
128/*
129 * Cancel running i/o. This is called repeatedly since halt/clear are
130 * asynchronous operations. We do one try with cio_cancel, two tries
131 * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
132 * Returns 0 if device now idle, -ENODEV for device not operational and
133 * -EBUSY if an interrupt is expected (either from halt/clear or from a
134 * status pending).
135 */
136int
137ccw_device_cancel_halt_clear(struct ccw_device *cdev)
138{
139 struct subchannel *sch;
140 int ret;
141
142 sch = to_subchannel(cdev->dev.parent);
a8237fc4 143 ret = stsch(sch->schid, &sch->schib);
1da177e4
LT
144 if (ret || !sch->schib.pmcw.dnv)
145 return -ENODEV;
2470b648
CH
146 if (!sch->schib.pmcw.ena)
147 /* Not operational -> done. */
1da177e4
LT
148 return 0;
149 /* Stage 1: cancel io. */
23d805b6
PO
150 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
151 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
83262d63
PO
152 if (!scsw_is_tm(&sch->schib.scsw)) {
153 ret = cio_cancel(sch);
154 if (ret != -EINVAL)
155 return ret;
156 }
157 /* cancel io unsuccessful or not applicable (transport mode).
158 * Continue with asynchronous instructions. */
1da177e4
LT
159 cdev->private->iretry = 3; /* 3 halt retries. */
160 }
23d805b6 161 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
1da177e4
LT
162 /* Stage 2: halt io. */
163 if (cdev->private->iretry) {
164 cdev->private->iretry--;
165 ret = cio_halt(sch);
ba4ba8a6
PO
166 if (ret != -EBUSY)
167 return (ret == 0) ? -EBUSY : ret;
1da177e4
LT
168 }
169 /* halt io unsuccessful. */
170 cdev->private->iretry = 255; /* 255 clear retries. */
171 }
172 /* Stage 3: clear io. */
173 if (cdev->private->iretry) {
174 cdev->private->iretry--;
175 ret = cio_clear (sch);
176 return (ret == 0) ? -EBUSY : ret;
177 }
178 panic("Can't stop i/o on subchannel.\n");
179}
180
181static int
182ccw_device_handle_oper(struct ccw_device *cdev)
183{
184 struct subchannel *sch;
185
186 sch = to_subchannel(cdev->dev.parent);
187 cdev->private->flags.recog_done = 1;
188 /*
189 * Check if cu type and device type still match. If
190 * not, it is certainly another device and we have to
d7b5a4c9 191 * de- and re-register.
1da177e4
LT
192 */
193 if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
194 cdev->id.cu_model != cdev->private->senseid.cu_model ||
195 cdev->id.dev_type != cdev->private->senseid.dev_type ||
d7b5a4c9 196 cdev->id.dev_model != cdev->private->senseid.dev_model) {
1da177e4 197 PREPARE_WORK(&cdev->private->kick_work,
c1637532 198 ccw_device_do_unreg_rereg);
1da177e4
LT
199 queue_work(ccw_device_work, &cdev->private->kick_work);
200 return 0;
201 }
202 cdev->private->flags.donotify = 1;
203 return 1;
204}
205
206/*
207 * The machine won't give us any notification by machine check if a chpid has
208 * been varied online on the SE so we have to find out by magic (i. e. driving
209 * the channel subsystem to device selection and updating our path masks).
210 */
4d284cac 211static void
1da177e4
LT
212__recover_lost_chpids(struct subchannel *sch, int old_lpm)
213{
214 int mask, i;
f86635fa 215 struct chp_id chpid;
1da177e4 216
f86635fa 217 chp_id_init(&chpid);
1da177e4
LT
218 for (i = 0; i<8; i++) {
219 mask = 0x80 >> i;
220 if (!(sch->lpm & mask))
221 continue;
222 if (old_lpm & mask)
223 continue;
f86635fa 224 chpid.id = sch->schib.pmcw.chpid[i];
83b3370c
PO
225 if (!chp_is_registered(chpid))
226 css_schedule_eval_all();
1da177e4
LT
227 }
228}
229
230/*
231 * Stop device recognition.
232 */
233static void
234ccw_device_recog_done(struct ccw_device *cdev, int state)
235{
236 struct subchannel *sch;
237 int notify, old_lpm, same_dev;
238
239 sch = to_subchannel(cdev->dev.parent);
240
241 ccw_device_set_timeout(cdev, 0);
242 cio_disable_subchannel(sch);
243 /*
244 * Now that we tried recognition, we have performed device selection
245 * through ssch() and the path information is up to date.
246 */
247 old_lpm = sch->lpm;
a8237fc4 248 stsch(sch->schid, &sch->schib);
28bdc6f6 249 sch->lpm = sch->schib.pmcw.pam & sch->opm;
4ffa9234
CH
250 /* Check since device may again have become not operational. */
251 if (!sch->schib.pmcw.dnv)
252 state = DEV_STATE_NOT_OPER;
1da177e4
LT
253 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
254 /* Force reprobe on all chpids. */
255 old_lpm = 0;
256 if (sch->lpm != old_lpm)
257 __recover_lost_chpids(sch, old_lpm);
258 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
259 if (state == DEV_STATE_NOT_OPER) {
260 cdev->private->flags.recog_done = 1;
261 cdev->private->state = DEV_STATE_DISCONNECTED;
262 return;
263 }
264 /* Boxed devices don't need extra treatment. */
265 }
266 notify = 0;
267 same_dev = 0; /* Keep the compiler quiet... */
268 switch (state) {
269 case DEV_STATE_NOT_OPER:
139b83dd
ME
270 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
271 "subchannel 0.%x.%04x\n",
272 cdev->private->dev_id.devno,
273 sch->schid.ssid, sch->schid.sch_no);
1da177e4
LT
274 break;
275 case DEV_STATE_OFFLINE:
276 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
277 same_dev = ccw_device_handle_oper(cdev);
278 notify = 1;
279 }
280 /* fill out sense information */
81388d2a 281 memset(&cdev->id, 0, sizeof(cdev->id));
292888c8
HC
282 cdev->id.cu_type = cdev->private->senseid.cu_type;
283 cdev->id.cu_model = cdev->private->senseid.cu_model;
284 cdev->id.dev_type = cdev->private->senseid.dev_type;
285 cdev->id.dev_model = cdev->private->senseid.dev_model;
1da177e4
LT
286 if (notify) {
287 cdev->private->state = DEV_STATE_OFFLINE;
288 if (same_dev) {
289 /* Get device online again. */
290 ccw_device_online(cdev);
291 wake_up(&cdev->private->wait_q);
292 }
293 return;
294 }
295 /* Issue device info message. */
139b83dd
ME
296 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
297 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
298 "%04X/%02X\n",
299 cdev->private->dev_id.ssid,
300 cdev->private->dev_id.devno,
301 cdev->id.cu_type, cdev->id.cu_model,
302 cdev->id.dev_type, cdev->id.dev_model);
1da177e4
LT
303 break;
304 case DEV_STATE_BOXED:
139b83dd
ME
305 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
306 " subchannel 0.%x.%04x\n",
307 cdev->private->dev_id.devno,
308 sch->schid.ssid, sch->schid.sch_no);
1da177e4
LT
309 break;
310 }
311 cdev->private->state = state;
312 io_subchannel_recog_done(cdev);
313 if (state != DEV_STATE_NOT_OPER)
314 wake_up(&cdev->private->wait_q);
315}
316
317/*
318 * Function called from device_id.c after sense id has completed.
319 */
320void
321ccw_device_sense_id_done(struct ccw_device *cdev, int err)
322{
323 switch (err) {
324 case 0:
325 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
326 break;
327 case -ETIME: /* Sense id stopped by timeout. */
328 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
329 break;
330 default:
331 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
332 break;
333 }
334}
335
c820de39
CH
336int ccw_device_notify(struct ccw_device *cdev, int event)
337{
338 if (!cdev->drv)
339 return 0;
340 if (!cdev->online)
341 return 0;
91c36919
PO
342 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
343 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
344 event);
c820de39
CH
345 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
346}
347
91c36919 348static void cmf_reenable_delayed(struct work_struct *work)
1da177e4 349{
c1637532 350 struct ccw_device_private *priv;
1da177e4 351 struct ccw_device *cdev;
1da177e4 352
c1637532
MS
353 priv = container_of(work, struct ccw_device_private, kick_work);
354 cdev = priv->cdev;
91c36919
PO
355 cmf_reenable(cdev);
356}
357
358static void ccw_device_oper_notify(struct ccw_device *cdev)
359{
360 if (ccw_device_notify(cdev, CIO_OPER)) {
94bb0633 361 /* Reenable channel measurements, if needed. */
91c36919
PO
362 PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed);
363 queue_work(ccw_device_work, &cdev->private->kick_work);
364 return;
365 }
366 /* Driver doesn't want device back. */
367 ccw_device_set_notoper(cdev);
368 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg);
369 queue_work(ccw_device_work, &cdev->private->kick_work);
1da177e4
LT
370}
371
372/*
373 * Finished with online/offline processing.
374 */
375static void
376ccw_device_done(struct ccw_device *cdev, int state)
377{
378 struct subchannel *sch;
379
380 sch = to_subchannel(cdev->dev.parent);
381
f1ee3281
CH
382 ccw_device_set_timeout(cdev, 0);
383
1da177e4
LT
384 if (state != DEV_STATE_ONLINE)
385 cio_disable_subchannel(sch);
386
387 /* Reset device status. */
388 memset(&cdev->private->irb, 0, sizeof(struct irb));
389
390 cdev->private->state = state;
391
392
393 if (state == DEV_STATE_BOXED)
139b83dd
ME
394 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
395 cdev->private->dev_id.devno, sch->schid.sch_no);
1da177e4
LT
396
397 if (cdev->private->flags.donotify) {
398 cdev->private->flags.donotify = 0;
91c36919 399 ccw_device_oper_notify(cdev);
1da177e4
LT
400 }
401 wake_up(&cdev->private->wait_q);
1da177e4
LT
402}
403
4d284cac 404static int cmp_pgid(struct pgid *p1, struct pgid *p2)
7e560814
CH
405{
406 char *c1;
407 char *c2;
408
409 c1 = (char *)p1;
410 c2 = (char *)p2;
411
412 return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
413}
414
415static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
416{
417 int i;
418 int last;
419
420 last = 0;
421 for (i = 0; i < 8; i++) {
422 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
423 /* No PGID yet */
424 continue;
425 if (cdev->private->pgid[last].inf.ps.state1 ==
426 SNID_STATE1_RESET) {
427 /* First non-zero PGID */
428 last = i;
429 continue;
430 }
431 if (cmp_pgid(&cdev->private->pgid[i],
432 &cdev->private->pgid[last]) == 0)
433 /* Non-conflicting PGIDs */
434 continue;
435
436 /* PGID mismatch, can't pathgroup. */
437 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
438 "0.%x.%04x, can't pathgroup\n",
78964268
CH
439 cdev->private->dev_id.ssid,
440 cdev->private->dev_id.devno);
7e560814
CH
441 cdev->private->options.pgroup = 0;
442 return;
443 }
444 if (cdev->private->pgid[last].inf.ps.state1 ==
445 SNID_STATE1_RESET)
446 /* No previous pgid found */
7c9f4e3a
CH
447 memcpy(&cdev->private->pgid[0],
448 &channel_subsystems[0]->global_pgid,
7e560814
CH
449 sizeof(struct pgid));
450 else
451 /* Use existing pgid */
452 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
453 sizeof(struct pgid));
454}
455
1da177e4
LT
456/*
457 * Function called from device_pgid.c after sense path ground has completed.
458 */
459void
460ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
461{
462 struct subchannel *sch;
463
464 sch = to_subchannel(cdev->dev.parent);
465 switch (err) {
7e560814
CH
466 case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
467 cdev->private->options.pgroup = 0;
468 break;
469 case 0: /* success */
470 case -EACCES: /* partial success, some paths not operational */
471 /* Check if all pgids are equal or 0. */
472 __ccw_device_get_common_pgid(cdev);
1da177e4
LT
473 break;
474 case -ETIME: /* Sense path group id stopped by timeout. */
475 case -EUSERS: /* device is reserved for someone else. */
476 ccw_device_done(cdev, DEV_STATE_BOXED);
7e560814 477 return;
1da177e4
LT
478 default:
479 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
7e560814 480 return;
1da177e4 481 }
7e560814 482 /* Start Path Group verification. */
7e560814 483 cdev->private->state = DEV_STATE_VERIFY;
28bdc6f6 484 cdev->private->flags.doverify = 0;
7e560814 485 ccw_device_verify_start(cdev);
1da177e4
LT
486}
487
488/*
489 * Start device recognition.
490 */
491int
492ccw_device_recognition(struct ccw_device *cdev)
493{
494 struct subchannel *sch;
495 int ret;
496
497 if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
498 (cdev->private->state != DEV_STATE_BOXED))
499 return -EINVAL;
500 sch = to_subchannel(cdev->dev.parent);
edf22096 501 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1da177e4
LT
502 if (ret != 0)
503 /* Couldn't enable the subchannel for i/o. Sick device. */
504 return ret;
505
506 /* After 60s the device recognition is considered to have failed. */
507 ccw_device_set_timeout(cdev, 60*HZ);
508
509 /*
510 * We used to start here with a sense pgid to find out whether a device
511 * is locked by someone else. Unfortunately, the sense pgid command
512 * code has other meanings on devices predating the path grouping
513 * algorithm, so we start with sense id and box the device after an
514 * timeout (or if sense pgid during path verification detects the device
515 * is locked, as may happen on newer devices).
516 */
517 cdev->private->flags.recog_done = 0;
518 cdev->private->state = DEV_STATE_SENSE_ID;
519 ccw_device_sense_id_start(cdev);
520 return 0;
521}
522
523/*
524 * Handle timeout in device recognition.
525 */
526static void
527ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
528{
529 int ret;
530
531 ret = ccw_device_cancel_halt_clear(cdev);
532 switch (ret) {
533 case 0:
534 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
535 break;
536 case -ENODEV:
537 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
538 break;
539 default:
540 ccw_device_set_timeout(cdev, 3*HZ);
541 }
542}
543
544
1da177e4
LT
545void
546ccw_device_verify_done(struct ccw_device *cdev, int err)
547{
28bdc6f6
PO
548 struct subchannel *sch;
549
550 sch = to_subchannel(cdev->dev.parent);
551 /* Update schib - pom may have changed. */
552 stsch(sch->schid, &sch->schib);
553 /* Update lpm with verified path mask. */
554 sch->lpm = sch->vpm;
555 /* Repeat path verification? */
556 if (cdev->private->flags.doverify) {
557 cdev->private->flags.doverify = 0;
558 ccw_device_verify_start(cdev);
559 return;
560 }
1da177e4
LT
561 switch (err) {
562 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
563 cdev->private->options.pgroup = 0;
564 case 0:
565 ccw_device_done(cdev, DEV_STATE_ONLINE);
566 /* Deliver fake irb to device driver, if needed. */
567 if (cdev->private->flags.fake_irb) {
568 memset(&cdev->private->irb, 0, sizeof(struct irb));
23d805b6
PO
569 cdev->private->irb.scsw.cmd.cc = 1;
570 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
571 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
572 cdev->private->irb.scsw.cmd.stctl =
573 SCSW_STCTL_STATUS_PEND;
1da177e4
LT
574 cdev->private->flags.fake_irb = 0;
575 if (cdev->handler)
576 cdev->handler(cdev, cdev->private->intparm,
577 &cdev->private->irb);
578 memset(&cdev->private->irb, 0, sizeof(struct irb));
579 }
580 break;
581 case -ETIME:
8b42f5c2
PO
582 /* Reset oper notify indication after verify error. */
583 cdev->private->flags.donotify = 0;
1da177e4
LT
584 ccw_device_done(cdev, DEV_STATE_BOXED);
585 break;
586 default:
8b42f5c2
PO
587 /* Reset oper notify indication after verify error. */
588 cdev->private->flags.donotify = 0;
46258ab5
CH
589 if (cdev->online) {
590 ccw_device_set_timeout(cdev, 0);
3f4cf6e7 591 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
46258ab5 592 } else
ee04bbcc 593 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
1da177e4
LT
594 break;
595 }
596}
597
598/*
599 * Get device online.
600 */
601int
602ccw_device_online(struct ccw_device *cdev)
603{
604 struct subchannel *sch;
605 int ret;
606
607 if ((cdev->private->state != DEV_STATE_OFFLINE) &&
608 (cdev->private->state != DEV_STATE_BOXED))
609 return -EINVAL;
610 sch = to_subchannel(cdev->dev.parent);
edf22096 611 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1da177e4
LT
612 if (ret != 0) {
613 /* Couldn't enable the subchannel for i/o. Sick device. */
614 if (ret == -ENODEV)
615 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
616 return ret;
617 }
618 /* Do we want to do path grouping? */
619 if (!cdev->private->options.pgroup) {
7e560814
CH
620 /* Start initial path verification. */
621 cdev->private->state = DEV_STATE_VERIFY;
28bdc6f6 622 cdev->private->flags.doverify = 0;
7e560814 623 ccw_device_verify_start(cdev);
1da177e4
LT
624 return 0;
625 }
626 /* Do a SensePGID first. */
627 cdev->private->state = DEV_STATE_SENSE_PGID;
628 ccw_device_sense_pgid_start(cdev);
629 return 0;
630}
631
632void
633ccw_device_disband_done(struct ccw_device *cdev, int err)
634{
635 switch (err) {
636 case 0:
637 ccw_device_done(cdev, DEV_STATE_OFFLINE);
638 break;
639 case -ETIME:
640 ccw_device_done(cdev, DEV_STATE_BOXED);
641 break;
642 default:
3ecb0a5a 643 cdev->private->flags.donotify = 0;
3f4cf6e7 644 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1da177e4
LT
645 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
646 break;
647 }
648}
649
650/*
651 * Shutdown device.
652 */
653int
654ccw_device_offline(struct ccw_device *cdev)
655{
656 struct subchannel *sch;
657
b301ea8c
PO
658 /* Allow ccw_device_offline while disconnected. */
659 if (cdev->private->state == DEV_STATE_DISCONNECTED ||
660 cdev->private->state == DEV_STATE_NOT_OPER) {
661 cdev->private->flags.donotify = 0;
662 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
663 return 0;
664 }
d7b5a4c9
CH
665 if (ccw_device_is_orphan(cdev)) {
666 ccw_device_done(cdev, DEV_STATE_OFFLINE);
667 return 0;
668 }
1da177e4 669 sch = to_subchannel(cdev->dev.parent);
a8237fc4 670 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
1da177e4 671 return -ENODEV;
23d805b6 672 if (scsw_actl(&sch->schib.scsw) != 0)
1da177e4 673 return -EBUSY;
23d805b6
PO
674 if (cdev->private->state != DEV_STATE_ONLINE)
675 return -EINVAL;
1da177e4
LT
676 /* Are we doing path grouping? */
677 if (!cdev->private->options.pgroup) {
678 /* No, set state offline immediately. */
679 ccw_device_done(cdev, DEV_STATE_OFFLINE);
680 return 0;
681 }
682 /* Start Set Path Group commands. */
683 cdev->private->state = DEV_STATE_DISBAND_PGID;
684 ccw_device_disband_start(cdev);
685 return 0;
686}
687
688/*
689 * Handle timeout in device online/offline process.
690 */
691static void
692ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
693{
694 int ret;
695
696 ret = ccw_device_cancel_halt_clear(cdev);
697 switch (ret) {
698 case 0:
699 ccw_device_done(cdev, DEV_STATE_BOXED);
700 break;
701 case -ENODEV:
702 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
703 break;
704 default:
705 ccw_device_set_timeout(cdev, 3*HZ);
706 }
707}
708
709/*
710 * Handle not oper event in device recognition.
711 */
712static void
713ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
714{
715 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
716}
717
718/*
3f4cf6e7 719 * Handle not operational event in non-special state.
1da177e4 720 */
3f4cf6e7
CH
721static void ccw_device_generic_notoper(struct ccw_device *cdev,
722 enum dev_event dev_event)
1da177e4
LT
723{
724 struct subchannel *sch;
725
726 cdev->private->state = DEV_STATE_NOT_OPER;
727 sch = to_subchannel(cdev->dev.parent);
3f4cf6e7 728 css_schedule_eval(sch->schid);
1da177e4
LT
729}
730
731/*
732 * Handle path verification event.
733 */
734static void
735ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
736{
737 struct subchannel *sch;
738
1da177e4
LT
739 if (cdev->private->state == DEV_STATE_W4SENSE) {
740 cdev->private->flags.doverify = 1;
741 return;
742 }
743 sch = to_subchannel(cdev->dev.parent);
744 /*
745 * Since we might not just be coming from an interrupt from the
746 * subchannel we have to update the schib.
747 */
a8237fc4 748 stsch(sch->schid, &sch->schib);
1da177e4 749
23d805b6
PO
750 if (scsw_actl(&sch->schib.scsw) != 0 ||
751 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
752 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
1da177e4
LT
753 /*
754 * No final status yet or final status not yet delivered
755 * to the device driver. Can't do path verfication now,
756 * delay until final status was delivered.
757 */
758 cdev->private->flags.doverify = 1;
759 return;
760 }
761 /* Device is idle, we can do the path verification. */
762 cdev->private->state = DEV_STATE_VERIFY;
28bdc6f6 763 cdev->private->flags.doverify = 0;
1da177e4
LT
764 ccw_device_verify_start(cdev);
765}
766
767/*
768 * Got an interrupt for a normal io (state online).
769 */
770static void
771ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
772{
773 struct irb *irb;
83262d63 774 int is_cmd;
1da177e4
LT
775
776 irb = (struct irb *) __LC_IRB;
83262d63 777 is_cmd = !scsw_is_tm(&irb->scsw);
1da177e4 778 /* Check for unsolicited interrupt. */
23d805b6 779 if (!scsw_is_solicited(&irb->scsw)) {
83262d63 780 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
1da177e4
LT
781 !irb->esw.esw0.erw.cons) {
782 /* Unit check but no sense data. Need basic sense. */
783 if (ccw_device_do_sense(cdev, irb) != 0)
784 goto call_handler_unsol;
e0ec5749 785 memcpy(&cdev->private->irb, irb, sizeof(struct irb));
1da177e4
LT
786 cdev->private->state = DEV_STATE_W4SENSE;
787 cdev->private->intparm = 0;
788 return;
789 }
790call_handler_unsol:
791 if (cdev->handler)
792 cdev->handler (cdev, 0, irb);
18374d37
CH
793 if (cdev->private->flags.doverify)
794 ccw_device_online_verify(cdev, 0);
1da177e4
LT
795 return;
796 }
797 /* Accumulate status and find out if a basic sense is needed. */
798 ccw_device_accumulate_irb(cdev, irb);
83262d63 799 if (is_cmd && cdev->private->flags.dosense) {
1da177e4
LT
800 if (ccw_device_do_sense(cdev, irb) == 0) {
801 cdev->private->state = DEV_STATE_W4SENSE;
802 }
803 return;
804 }
805 /* Call the handler. */
806 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
807 /* Start delayed path verification. */
808 ccw_device_online_verify(cdev, 0);
809}
810
811/*
812 * Got an timeout in online state.
813 */
814static void
815ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
816{
817 int ret;
818
819 ccw_device_set_timeout(cdev, 0);
820 ret = ccw_device_cancel_halt_clear(cdev);
821 if (ret == -EBUSY) {
822 ccw_device_set_timeout(cdev, 3*HZ);
823 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
824 return;
825 }
3f4cf6e7
CH
826 if (ret == -ENODEV)
827 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
828 else if (cdev->handler)
1da177e4
LT
829 cdev->handler(cdev, cdev->private->intparm,
830 ERR_PTR(-ETIMEDOUT));
831}
832
833/*
834 * Got an interrupt for a basic sense.
835 */
2b67fc46 836static void
1da177e4
LT
837ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
838{
839 struct irb *irb;
840
841 irb = (struct irb *) __LC_IRB;
842 /* Check for unsolicited interrupt. */
23d805b6
PO
843 if (scsw_stctl(&irb->scsw) ==
844 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
845 if (scsw_cc(&irb->scsw) == 1)
1da177e4
LT
846 /* Basic sense hasn't started. Try again. */
847 ccw_device_do_sense(cdev, irb);
848 else {
139b83dd 849 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
e556bbbd
CH
850 "interrupt during w4sense...\n",
851 cdev->private->dev_id.ssid,
852 cdev->private->dev_id.devno);
1da177e4
LT
853 if (cdev->handler)
854 cdev->handler (cdev, 0, irb);
855 }
856 return;
857 }
3ba1998e
CH
858 /*
859 * Check if a halt or clear has been issued in the meanwhile. If yes,
860 * only deliver the halt/clear interrupt to the device driver as if it
861 * had killed the original request.
862 */
23d805b6
PO
863 if (scsw_fctl(&irb->scsw) &
864 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
d23861ff
CH
865 /* Retry Basic Sense if requested. */
866 if (cdev->private->flags.intretry) {
867 cdev->private->flags.intretry = 0;
868 ccw_device_do_sense(cdev, irb);
869 return;
870 }
3ba1998e
CH
871 cdev->private->flags.dosense = 0;
872 memset(&cdev->private->irb, 0, sizeof(struct irb));
873 ccw_device_accumulate_irb(cdev, irb);
874 goto call_handler;
875 }
1da177e4
LT
876 /* Add basic sense info to irb. */
877 ccw_device_accumulate_basic_sense(cdev, irb);
878 if (cdev->private->flags.dosense) {
879 /* Another basic sense is needed. */
880 ccw_device_do_sense(cdev, irb);
881 return;
882 }
3ba1998e 883call_handler:
1da177e4
LT
884 cdev->private->state = DEV_STATE_ONLINE;
885 /* Call the handler. */
886 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
887 /* Start delayed path verification. */
888 ccw_device_online_verify(cdev, 0);
889}
890
891static void
892ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
893{
894 struct irb *irb;
895
896 irb = (struct irb *) __LC_IRB;
897 /* Accumulate status. We don't do basic sense. */
898 ccw_device_accumulate_irb(cdev, irb);
b4f7b1ee
CH
899 /* Remember to clear irb to avoid residuals. */
900 memset(&cdev->private->irb, 0, sizeof(struct irb));
1da177e4
LT
901 /* Try to start delayed device verification. */
902 ccw_device_online_verify(cdev, 0);
903 /* Note: Don't call handler for cio initiated clear! */
904}
905
906static void
907ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
908{
909 struct subchannel *sch;
910
911 sch = to_subchannel(cdev->dev.parent);
912 ccw_device_set_timeout(cdev, 0);
7c8427c3
CH
913 /* Start delayed path verification. */
914 ccw_device_online_verify(cdev, 0);
1da177e4 915 /* OK, i/o is dead now. Call interrupt handler. */
1da177e4
LT
916 if (cdev->handler)
917 cdev->handler(cdev, cdev->private->intparm,
e7769b48 918 ERR_PTR(-EIO));
1da177e4
LT
919}
920
921static void
922ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
923{
924 int ret;
925
926 ret = ccw_device_cancel_halt_clear(cdev);
927 if (ret == -EBUSY) {
928 ccw_device_set_timeout(cdev, 3*HZ);
929 return;
930 }
7c8427c3
CH
931 /* Start delayed path verification. */
932 ccw_device_online_verify(cdev, 0);
1da177e4
LT
933 if (cdev->handler)
934 cdev->handler(cdev, cdev->private->intparm,
e7769b48 935 ERR_PTR(-EIO));
1da177e4
LT
936}
937
c820de39 938void ccw_device_kill_io(struct ccw_device *cdev)
1da177e4
LT
939{
940 int ret;
1da177e4 941
1da177e4
LT
942 ret = ccw_device_cancel_halt_clear(cdev);
943 if (ret == -EBUSY) {
944 ccw_device_set_timeout(cdev, 3*HZ);
945 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
946 return;
947 }
7c8427c3
CH
948 /* Start delayed path verification. */
949 ccw_device_online_verify(cdev, 0);
1da177e4
LT
950 if (cdev->handler)
951 cdev->handler(cdev, cdev->private->intparm,
e7769b48 952 ERR_PTR(-EIO));
1da177e4
LT
953}
954
955static void
28bdc6f6 956ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1da177e4 957{
28bdc6f6 958 /* Start verification after current task finished. */
7e560814 959 cdev->private->flags.doverify = 1;
1da177e4
LT
960}
961
962static void
963ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
964{
965 struct irb *irb;
966
967 switch (dev_event) {
968 case DEV_EVENT_INTERRUPT:
969 irb = (struct irb *) __LC_IRB;
970 /* Check for unsolicited interrupt. */
23d805b6 971 if ((scsw_stctl(&irb->scsw) ==
1da177e4 972 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
23d805b6 973 (!scsw_cc(&irb->scsw)))
1da177e4
LT
974 /* FIXME: we should restart stlck here, but this
975 * is extremely unlikely ... */
976 goto out_wakeup;
977
978 ccw_device_accumulate_irb(cdev, irb);
979 /* We don't care about basic sense etc. */
980 break;
981 default: /* timeout */
982 break;
983 }
984out_wakeup:
985 wake_up(&cdev->private->wait_q);
986}
987
988static void
989ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
990{
991 struct subchannel *sch;
992
993 sch = to_subchannel(cdev->dev.parent);
edf22096 994 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
1da177e4
LT
995 /* Couldn't enable the subchannel for i/o. Sick device. */
996 return;
997
998 /* After 60s the device recognition is considered to have failed. */
999 ccw_device_set_timeout(cdev, 60*HZ);
1000
1001 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1002 ccw_device_sense_id_start(cdev);
1003}
1004
c820de39 1005void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1da177e4 1006{
c820de39 1007 struct subchannel *sch;
1da177e4 1008
1da177e4
LT
1009 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1010 return;
1011
c820de39 1012 sch = to_subchannel(cdev->dev.parent);
1da177e4 1013 /* Update some values. */
a8237fc4 1014 if (stsch(sch->schid, &sch->schib))
1da177e4 1015 return;
7674da77
CH
1016 if (!sch->schib.pmcw.dnv)
1017 return;
1da177e4
LT
1018 /*
1019 * The pim, pam, pom values may not be accurate, but they are the best
1020 * we have before performing device selection :/
1021 */
28bdc6f6 1022 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1da177e4 1023 /* Re-set some bits in the pmcw that were lost. */
1da177e4
LT
1024 sch->schib.pmcw.csense = 1;
1025 sch->schib.pmcw.ena = 0;
1026 if ((sch->lpm & (sch->lpm - 1)) != 0)
1027 sch->schib.pmcw.mp = 1;
1da177e4 1028 /* We should also udate ssd info, but this has to wait. */
d7b5a4c9
CH
1029 /* Check if this is another device which appeared on the same sch. */
1030 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1031 PREPARE_WORK(&cdev->private->kick_work,
1032 ccw_device_move_to_orphanage);
c5d4a999 1033 queue_work(slow_path_wq, &cdev->private->kick_work);
d7b5a4c9
CH
1034 } else
1035 ccw_device_start_id(cdev, 0);
1da177e4
LT
1036}
1037
1038static void
1039ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1040{
1041 struct subchannel *sch;
1042
1043 sch = to_subchannel(cdev->dev.parent);
1044 /*
1045 * An interrupt in state offline means a previous disable was not
1046 * successful. Try again.
1047 */
1048 cio_disable_subchannel(sch);
1049}
1050
1051static void
1052ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1053{
1054 retry_set_schib(cdev);
1055 cdev->private->state = DEV_STATE_ONLINE;
1056 dev_fsm_event(cdev, dev_event);
1057}
1058
94bb0633
CH
1059static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1060 enum dev_event dev_event)
1061{
1062 cmf_retry_copy_block(cdev);
1063 cdev->private->state = DEV_STATE_ONLINE;
1064 dev_fsm_event(cdev, dev_event);
1065}
1da177e4
LT
1066
1067static void
1068ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1069{
1070 ccw_device_set_timeout(cdev, 0);
1071 if (dev_event == DEV_EVENT_NOTOPER)
1072 cdev->private->state = DEV_STATE_NOT_OPER;
1073 else
1074 cdev->private->state = DEV_STATE_OFFLINE;
1075 wake_up(&cdev->private->wait_q);
1076}
1077
1078static void
1079ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1080{
1081 int ret;
1082
1083 ret = ccw_device_cancel_halt_clear(cdev);
1084 switch (ret) {
1085 case 0:
1086 cdev->private->state = DEV_STATE_OFFLINE;
1087 wake_up(&cdev->private->wait_q);
1088 break;
1089 case -ENODEV:
1090 cdev->private->state = DEV_STATE_NOT_OPER;
1091 wake_up(&cdev->private->wait_q);
1092 break;
1093 default:
1094 ccw_device_set_timeout(cdev, HZ/10);
1095 }
1096}
1097
1098/*
1099 * No operation action. This is used e.g. to ignore a timeout event in
1100 * state offline.
1101 */
1102static void
1103ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1104{
1105}
1106
1107/*
1108 * Bug operation action.
1109 */
1110static void
1111ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1112{
139b83dd
ME
1113 CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1114 "0.%x.%04x\n", cdev->private->state, dev_event,
1115 cdev->private->dev_id.ssid,
1116 cdev->private->dev_id.devno);
1da177e4
LT
1117 BUG();
1118}
1119
1120/*
1121 * device statemachine
1122 */
1123fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1124 [DEV_STATE_NOT_OPER] = {
1125 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1126 [DEV_EVENT_INTERRUPT] = ccw_device_bug,
1127 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1128 [DEV_EVENT_VERIFY] = ccw_device_nop,
1129 },
1130 [DEV_STATE_SENSE_PGID] = {
3f4cf6e7 1131 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1132 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq,
1133 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1134 [DEV_EVENT_VERIFY] = ccw_device_nop,
1135 },
1136 [DEV_STATE_SENSE_ID] = {
1137 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
1138 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
1139 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
1140 [DEV_EVENT_VERIFY] = ccw_device_nop,
1141 },
1142 [DEV_STATE_OFFLINE] = {
3f4cf6e7 1143 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1144 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
1145 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1146 [DEV_EVENT_VERIFY] = ccw_device_nop,
1147 },
1148 [DEV_STATE_VERIFY] = {
3f4cf6e7 1149 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1150 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
1151 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
28bdc6f6 1152 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1da177e4
LT
1153 },
1154 [DEV_STATE_ONLINE] = {
3f4cf6e7 1155 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1156 [DEV_EVENT_INTERRUPT] = ccw_device_irq,
1157 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
1158 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1159 },
1160 [DEV_STATE_W4SENSE] = {
3f4cf6e7 1161 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1162 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
1163 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1164 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1165 },
1166 [DEV_STATE_DISBAND_PGID] = {
3f4cf6e7 1167 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1168 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq,
1169 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1170 [DEV_EVENT_VERIFY] = ccw_device_nop,
1171 },
1172 [DEV_STATE_BOXED] = {
3f4cf6e7 1173 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1174 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done,
1175 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
1176 [DEV_EVENT_VERIFY] = ccw_device_nop,
1177 },
1178 /* states to wait for i/o completion before doing something */
1179 [DEV_STATE_CLEAR_VERIFY] = {
3f4cf6e7 1180 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1181 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
1182 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1183 [DEV_EVENT_VERIFY] = ccw_device_nop,
1184 },
1185 [DEV_STATE_TIMEOUT_KILL] = {
3f4cf6e7 1186 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1da177e4
LT
1187 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
1188 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
1189 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
1190 },
1da177e4
LT
1191 [DEV_STATE_QUIESCE] = {
1192 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
1193 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
1194 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
1195 [DEV_EVENT_VERIFY] = ccw_device_nop,
1196 },
1197 /* special states for devices gone not operational */
1198 [DEV_STATE_DISCONNECTED] = {
1199 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1200 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1201 [DEV_EVENT_TIMEOUT] = ccw_device_bug,
28bdc6f6 1202 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1da177e4
LT
1203 },
1204 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1205 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
1206 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq,
1207 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout,
1208 [DEV_EVENT_VERIFY] = ccw_device_nop,
1209 },
1210 [DEV_STATE_CMFCHANGE] = {
1211 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
1212 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
1213 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
1214 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
1215 },
94bb0633
CH
1216 [DEV_STATE_CMFUPDATE] = {
1217 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
1218 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
1219 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1220 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1221 },
1da177e4
LT
1222};
1223
1da177e4 1224EXPORT_SYMBOL_GPL(ccw_device_set_timeout);