]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/s390/block/dasd.c
block: implement and use [__]blk_end_request_all()
[mirror_ubuntu-artful-kernel.git] / drivers / s390 / block / dasd.c
1 /*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 */
11
12 #define KMSG_COMPONENT "dasd"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15 #include <linux/kmod.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ctype.h>
19 #include <linux/major.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/hdreg.h>
23 #include <linux/async.h>
24
25 #include <asm/ccwdev.h>
26 #include <asm/ebcdic.h>
27 #include <asm/idals.h>
28 #include <asm/todclk.h>
29 #include <asm/itcw.h>
30
31 /* This is ugly... */
32 #define PRINTK_HEADER "dasd:"
33
34 #include "dasd_int.h"
35 /*
36 * SECTION: Constant definitions to be used within this file
37 */
38 #define DASD_CHANQ_MAX_SIZE 4
39
40 /*
41 * SECTION: exported variables of dasd.c
42 */
43 debug_info_t *dasd_debug_area;
44 struct dasd_discipline *dasd_diag_discipline_pointer;
45 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
46
47 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
48 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
49 " Copyright 2000 IBM Corporation");
50 MODULE_SUPPORTED_DEVICE("dasd");
51 MODULE_LICENSE("GPL");
52
53 /*
54 * SECTION: prototypes for static functions of dasd.c
55 */
56 static int dasd_alloc_queue(struct dasd_block *);
57 static void dasd_setup_queue(struct dasd_block *);
58 static void dasd_free_queue(struct dasd_block *);
59 static void dasd_flush_request_queue(struct dasd_block *);
60 static int dasd_flush_block_queue(struct dasd_block *);
61 static void dasd_device_tasklet(struct dasd_device *);
62 static void dasd_block_tasklet(struct dasd_block *);
63 static void do_kick_device(struct work_struct *);
64 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
65 static void dasd_device_timeout(unsigned long);
66 static void dasd_block_timeout(unsigned long);
67
68 /*
69 * SECTION: Operations on the device structure.
70 */
71 static wait_queue_head_t dasd_init_waitq;
72 static wait_queue_head_t dasd_flush_wq;
73 static wait_queue_head_t generic_waitq;
74
75 /*
76 * Allocate memory for a new device structure.
77 */
78 struct dasd_device *dasd_alloc_device(void)
79 {
80 struct dasd_device *device;
81
82 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
83 if (!device)
84 return ERR_PTR(-ENOMEM);
85
86 /* Get two pages for normal block device operations. */
87 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
88 if (!device->ccw_mem) {
89 kfree(device);
90 return ERR_PTR(-ENOMEM);
91 }
92 /* Get one page for error recovery. */
93 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
94 if (!device->erp_mem) {
95 free_pages((unsigned long) device->ccw_mem, 1);
96 kfree(device);
97 return ERR_PTR(-ENOMEM);
98 }
99
100 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
101 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
102 spin_lock_init(&device->mem_lock);
103 atomic_set(&device->tasklet_scheduled, 0);
104 tasklet_init(&device->tasklet,
105 (void (*)(unsigned long)) dasd_device_tasklet,
106 (unsigned long) device);
107 INIT_LIST_HEAD(&device->ccw_queue);
108 init_timer(&device->timer);
109 device->timer.function = dasd_device_timeout;
110 device->timer.data = (unsigned long) device;
111 INIT_WORK(&device->kick_work, do_kick_device);
112 device->state = DASD_STATE_NEW;
113 device->target = DASD_STATE_NEW;
114
115 return device;
116 }
117
118 /*
119 * Free memory of a device structure.
120 */
121 void dasd_free_device(struct dasd_device *device)
122 {
123 kfree(device->private);
124 free_page((unsigned long) device->erp_mem);
125 free_pages((unsigned long) device->ccw_mem, 1);
126 kfree(device);
127 }
128
129 /*
130 * Allocate memory for a new device structure.
131 */
132 struct dasd_block *dasd_alloc_block(void)
133 {
134 struct dasd_block *block;
135
136 block = kzalloc(sizeof(*block), GFP_ATOMIC);
137 if (!block)
138 return ERR_PTR(-ENOMEM);
139 /* open_count = 0 means device online but not in use */
140 atomic_set(&block->open_count, -1);
141
142 spin_lock_init(&block->request_queue_lock);
143 atomic_set(&block->tasklet_scheduled, 0);
144 tasklet_init(&block->tasklet,
145 (void (*)(unsigned long)) dasd_block_tasklet,
146 (unsigned long) block);
147 INIT_LIST_HEAD(&block->ccw_queue);
148 spin_lock_init(&block->queue_lock);
149 init_timer(&block->timer);
150 block->timer.function = dasd_block_timeout;
151 block->timer.data = (unsigned long) block;
152
153 return block;
154 }
155
156 /*
157 * Free memory of a device structure.
158 */
159 void dasd_free_block(struct dasd_block *block)
160 {
161 kfree(block);
162 }
163
164 /*
165 * Make a new device known to the system.
166 */
167 static int dasd_state_new_to_known(struct dasd_device *device)
168 {
169 int rc;
170
171 /*
172 * As long as the device is not in state DASD_STATE_NEW we want to
173 * keep the reference count > 0.
174 */
175 dasd_get_device(device);
176
177 if (device->block) {
178 rc = dasd_alloc_queue(device->block);
179 if (rc) {
180 dasd_put_device(device);
181 return rc;
182 }
183 }
184 device->state = DASD_STATE_KNOWN;
185 return 0;
186 }
187
188 /*
189 * Let the system forget about a device.
190 */
191 static int dasd_state_known_to_new(struct dasd_device *device)
192 {
193 /* Disable extended error reporting for this device. */
194 dasd_eer_disable(device);
195 /* Forget the discipline information. */
196 if (device->discipline) {
197 if (device->discipline->uncheck_device)
198 device->discipline->uncheck_device(device);
199 module_put(device->discipline->owner);
200 }
201 device->discipline = NULL;
202 if (device->base_discipline)
203 module_put(device->base_discipline->owner);
204 device->base_discipline = NULL;
205 device->state = DASD_STATE_NEW;
206
207 if (device->block)
208 dasd_free_queue(device->block);
209
210 /* Give up reference we took in dasd_state_new_to_known. */
211 dasd_put_device(device);
212 return 0;
213 }
214
215 /*
216 * Request the irq line for the device.
217 */
218 static int dasd_state_known_to_basic(struct dasd_device *device)
219 {
220 int rc;
221
222 /* Allocate and register gendisk structure. */
223 if (device->block) {
224 rc = dasd_gendisk_alloc(device->block);
225 if (rc)
226 return rc;
227 }
228 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
229 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
230 8 * sizeof(long));
231 debug_register_view(device->debug_area, &debug_sprintf_view);
232 debug_set_level(device->debug_area, DBF_WARNING);
233 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
234
235 device->state = DASD_STATE_BASIC;
236 return 0;
237 }
238
239 /*
240 * Release the irq line for the device. Terminate any running i/o.
241 */
242 static int dasd_state_basic_to_known(struct dasd_device *device)
243 {
244 int rc;
245 if (device->block) {
246 dasd_gendisk_free(device->block);
247 dasd_block_clear_timer(device->block);
248 }
249 rc = dasd_flush_device_queue(device);
250 if (rc)
251 return rc;
252 dasd_device_clear_timer(device);
253
254 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
255 if (device->debug_area != NULL) {
256 debug_unregister(device->debug_area);
257 device->debug_area = NULL;
258 }
259 device->state = DASD_STATE_KNOWN;
260 return 0;
261 }
262
263 /*
264 * Do the initial analysis. The do_analysis function may return
265 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
266 * until the discipline decides to continue the startup sequence
267 * by calling the function dasd_change_state. The eckd disciplines
268 * uses this to start a ccw that detects the format. The completion
269 * interrupt for this detection ccw uses the kernel event daemon to
270 * trigger the call to dasd_change_state. All this is done in the
271 * discipline code, see dasd_eckd.c.
272 * After the analysis ccw is done (do_analysis returned 0) the block
273 * device is setup.
274 * In case the analysis returns an error, the device setup is stopped
275 * (a fake disk was already added to allow formatting).
276 */
277 static int dasd_state_basic_to_ready(struct dasd_device *device)
278 {
279 int rc;
280 struct dasd_block *block;
281
282 rc = 0;
283 block = device->block;
284 /* make disk known with correct capacity */
285 if (block) {
286 if (block->base->discipline->do_analysis != NULL)
287 rc = block->base->discipline->do_analysis(block);
288 if (rc) {
289 if (rc != -EAGAIN)
290 device->state = DASD_STATE_UNFMT;
291 return rc;
292 }
293 dasd_setup_queue(block);
294 set_capacity(block->gdp,
295 block->blocks << block->s2b_shift);
296 device->state = DASD_STATE_READY;
297 rc = dasd_scan_partitions(block);
298 if (rc)
299 device->state = DASD_STATE_BASIC;
300 } else {
301 device->state = DASD_STATE_READY;
302 }
303 return rc;
304 }
305
306 /*
307 * Remove device from block device layer. Destroy dirty buffers.
308 * Forget format information. Check if the target level is basic
309 * and if it is create fake disk for formatting.
310 */
311 static int dasd_state_ready_to_basic(struct dasd_device *device)
312 {
313 int rc;
314
315 device->state = DASD_STATE_BASIC;
316 if (device->block) {
317 struct dasd_block *block = device->block;
318 rc = dasd_flush_block_queue(block);
319 if (rc) {
320 device->state = DASD_STATE_READY;
321 return rc;
322 }
323 dasd_destroy_partitions(block);
324 dasd_flush_request_queue(block);
325 block->blocks = 0;
326 block->bp_block = 0;
327 block->s2b_shift = 0;
328 }
329 return 0;
330 }
331
332 /*
333 * Back to basic.
334 */
335 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
336 {
337 device->state = DASD_STATE_BASIC;
338 return 0;
339 }
340
341 /*
342 * Make the device online and schedule the bottom half to start
343 * the requeueing of requests from the linux request queue to the
344 * ccw queue.
345 */
346 static int
347 dasd_state_ready_to_online(struct dasd_device * device)
348 {
349 int rc;
350 struct gendisk *disk;
351 struct disk_part_iter piter;
352 struct hd_struct *part;
353
354 if (device->discipline->ready_to_online) {
355 rc = device->discipline->ready_to_online(device);
356 if (rc)
357 return rc;
358 }
359 device->state = DASD_STATE_ONLINE;
360 if (device->block) {
361 dasd_schedule_block_bh(device->block);
362 disk = device->block->bdev->bd_disk;
363 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
364 while ((part = disk_part_iter_next(&piter)))
365 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
366 disk_part_iter_exit(&piter);
367 }
368 return 0;
369 }
370
371 /*
372 * Stop the requeueing of requests again.
373 */
374 static int dasd_state_online_to_ready(struct dasd_device *device)
375 {
376 int rc;
377 struct gendisk *disk;
378 struct disk_part_iter piter;
379 struct hd_struct *part;
380
381 if (device->discipline->online_to_ready) {
382 rc = device->discipline->online_to_ready(device);
383 if (rc)
384 return rc;
385 }
386 device->state = DASD_STATE_READY;
387 if (device->block) {
388 disk = device->block->bdev->bd_disk;
389 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
390 while ((part = disk_part_iter_next(&piter)))
391 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
392 disk_part_iter_exit(&piter);
393 }
394 return 0;
395 }
396
397 /*
398 * Device startup state changes.
399 */
400 static int dasd_increase_state(struct dasd_device *device)
401 {
402 int rc;
403
404 rc = 0;
405 if (device->state == DASD_STATE_NEW &&
406 device->target >= DASD_STATE_KNOWN)
407 rc = dasd_state_new_to_known(device);
408
409 if (!rc &&
410 device->state == DASD_STATE_KNOWN &&
411 device->target >= DASD_STATE_BASIC)
412 rc = dasd_state_known_to_basic(device);
413
414 if (!rc &&
415 device->state == DASD_STATE_BASIC &&
416 device->target >= DASD_STATE_READY)
417 rc = dasd_state_basic_to_ready(device);
418
419 if (!rc &&
420 device->state == DASD_STATE_UNFMT &&
421 device->target > DASD_STATE_UNFMT)
422 rc = -EPERM;
423
424 if (!rc &&
425 device->state == DASD_STATE_READY &&
426 device->target >= DASD_STATE_ONLINE)
427 rc = dasd_state_ready_to_online(device);
428
429 return rc;
430 }
431
432 /*
433 * Device shutdown state changes.
434 */
435 static int dasd_decrease_state(struct dasd_device *device)
436 {
437 int rc;
438
439 rc = 0;
440 if (device->state == DASD_STATE_ONLINE &&
441 device->target <= DASD_STATE_READY)
442 rc = dasd_state_online_to_ready(device);
443
444 if (!rc &&
445 device->state == DASD_STATE_READY &&
446 device->target <= DASD_STATE_BASIC)
447 rc = dasd_state_ready_to_basic(device);
448
449 if (!rc &&
450 device->state == DASD_STATE_UNFMT &&
451 device->target <= DASD_STATE_BASIC)
452 rc = dasd_state_unfmt_to_basic(device);
453
454 if (!rc &&
455 device->state == DASD_STATE_BASIC &&
456 device->target <= DASD_STATE_KNOWN)
457 rc = dasd_state_basic_to_known(device);
458
459 if (!rc &&
460 device->state == DASD_STATE_KNOWN &&
461 device->target <= DASD_STATE_NEW)
462 rc = dasd_state_known_to_new(device);
463
464 return rc;
465 }
466
467 /*
468 * This is the main startup/shutdown routine.
469 */
470 static void dasd_change_state(struct dasd_device *device)
471 {
472 int rc;
473
474 if (device->state == device->target)
475 /* Already where we want to go today... */
476 return;
477 if (device->state < device->target)
478 rc = dasd_increase_state(device);
479 else
480 rc = dasd_decrease_state(device);
481 if (rc && rc != -EAGAIN)
482 device->target = device->state;
483
484 if (device->state == device->target) {
485 wake_up(&dasd_init_waitq);
486 dasd_put_device(device);
487 }
488
489 /* let user-space know that the device status changed */
490 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
491 }
492
493 /*
494 * Kick starter for devices that did not complete the startup/shutdown
495 * procedure or were sleeping because of a pending state.
496 * dasd_kick_device will schedule a call do do_kick_device to the kernel
497 * event daemon.
498 */
499 static void do_kick_device(struct work_struct *work)
500 {
501 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
502 dasd_change_state(device);
503 dasd_schedule_device_bh(device);
504 dasd_put_device(device);
505 }
506
507 void dasd_kick_device(struct dasd_device *device)
508 {
509 dasd_get_device(device);
510 /* queue call to dasd_kick_device to the kernel event daemon. */
511 schedule_work(&device->kick_work);
512 }
513
514 /*
515 * Set the target state for a device and starts the state change.
516 */
517 void dasd_set_target_state(struct dasd_device *device, int target)
518 {
519 dasd_get_device(device);
520 /* If we are in probeonly mode stop at DASD_STATE_READY. */
521 if (dasd_probeonly && target > DASD_STATE_READY)
522 target = DASD_STATE_READY;
523 if (device->target != target) {
524 if (device->state == target) {
525 wake_up(&dasd_init_waitq);
526 dasd_put_device(device);
527 }
528 device->target = target;
529 }
530 if (device->state != device->target)
531 dasd_change_state(device);
532 }
533
534 /*
535 * Enable devices with device numbers in [from..to].
536 */
537 static inline int _wait_for_device(struct dasd_device *device)
538 {
539 return (device->state == device->target);
540 }
541
542 void dasd_enable_device(struct dasd_device *device)
543 {
544 dasd_set_target_state(device, DASD_STATE_ONLINE);
545 if (device->state <= DASD_STATE_KNOWN)
546 /* No discipline for device found. */
547 dasd_set_target_state(device, DASD_STATE_NEW);
548 /* Now wait for the devices to come up. */
549 wait_event(dasd_init_waitq, _wait_for_device(device));
550 }
551
552 /*
553 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
554 */
555 #ifdef CONFIG_DASD_PROFILE
556
557 struct dasd_profile_info_t dasd_global_profile;
558 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
559
560 /*
561 * Increments counter in global and local profiling structures.
562 */
563 #define dasd_profile_counter(value, counter, block) \
564 { \
565 int index; \
566 for (index = 0; index < 31 && value >> (2+index); index++); \
567 dasd_global_profile.counter[index]++; \
568 block->profile.counter[index]++; \
569 }
570
571 /*
572 * Add profiling information for cqr before execution.
573 */
574 static void dasd_profile_start(struct dasd_block *block,
575 struct dasd_ccw_req *cqr,
576 struct request *req)
577 {
578 struct list_head *l;
579 unsigned int counter;
580
581 if (dasd_profile_level != DASD_PROFILE_ON)
582 return;
583
584 /* count the length of the chanq for statistics */
585 counter = 0;
586 list_for_each(l, &block->ccw_queue)
587 if (++counter >= 31)
588 break;
589 dasd_global_profile.dasd_io_nr_req[counter]++;
590 block->profile.dasd_io_nr_req[counter]++;
591 }
592
593 /*
594 * Add profiling information for cqr after execution.
595 */
596 static void dasd_profile_end(struct dasd_block *block,
597 struct dasd_ccw_req *cqr,
598 struct request *req)
599 {
600 long strtime, irqtime, endtime, tottime; /* in microseconds */
601 long tottimeps, sectors;
602
603 if (dasd_profile_level != DASD_PROFILE_ON)
604 return;
605
606 sectors = req->nr_sectors;
607 if (!cqr->buildclk || !cqr->startclk ||
608 !cqr->stopclk || !cqr->endclk ||
609 !sectors)
610 return;
611
612 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
613 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
614 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
615 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
616 tottimeps = tottime / sectors;
617
618 if (!dasd_global_profile.dasd_io_reqs)
619 memset(&dasd_global_profile, 0,
620 sizeof(struct dasd_profile_info_t));
621 dasd_global_profile.dasd_io_reqs++;
622 dasd_global_profile.dasd_io_sects += sectors;
623
624 if (!block->profile.dasd_io_reqs)
625 memset(&block->profile, 0,
626 sizeof(struct dasd_profile_info_t));
627 block->profile.dasd_io_reqs++;
628 block->profile.dasd_io_sects += sectors;
629
630 dasd_profile_counter(sectors, dasd_io_secs, block);
631 dasd_profile_counter(tottime, dasd_io_times, block);
632 dasd_profile_counter(tottimeps, dasd_io_timps, block);
633 dasd_profile_counter(strtime, dasd_io_time1, block);
634 dasd_profile_counter(irqtime, dasd_io_time2, block);
635 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
636 dasd_profile_counter(endtime, dasd_io_time3, block);
637 }
638 #else
639 #define dasd_profile_start(block, cqr, req) do {} while (0)
640 #define dasd_profile_end(block, cqr, req) do {} while (0)
641 #endif /* CONFIG_DASD_PROFILE */
642
643 /*
644 * Allocate memory for a channel program with 'cplength' channel
645 * command words and 'datasize' additional space. There are two
646 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
647 * memory and 2) dasd_smalloc_request uses the static ccw memory
648 * that gets allocated for each device.
649 */
650 struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
651 int datasize,
652 struct dasd_device *device)
653 {
654 struct dasd_ccw_req *cqr;
655
656 /* Sanity checks */
657 BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
658 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
659
660 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
661 if (cqr == NULL)
662 return ERR_PTR(-ENOMEM);
663 cqr->cpaddr = NULL;
664 if (cplength > 0) {
665 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
666 GFP_ATOMIC | GFP_DMA);
667 if (cqr->cpaddr == NULL) {
668 kfree(cqr);
669 return ERR_PTR(-ENOMEM);
670 }
671 }
672 cqr->data = NULL;
673 if (datasize > 0) {
674 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
675 if (cqr->data == NULL) {
676 kfree(cqr->cpaddr);
677 kfree(cqr);
678 return ERR_PTR(-ENOMEM);
679 }
680 }
681 strncpy((char *) &cqr->magic, magic, 4);
682 ASCEBC((char *) &cqr->magic, 4);
683 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
684 dasd_get_device(device);
685 return cqr;
686 }
687
688 struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
689 int datasize,
690 struct dasd_device *device)
691 {
692 unsigned long flags;
693 struct dasd_ccw_req *cqr;
694 char *data;
695 int size;
696
697 /* Sanity checks */
698 BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
699 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
700
701 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
702 if (cplength > 0)
703 size += cplength * sizeof(struct ccw1);
704 if (datasize > 0)
705 size += datasize;
706 spin_lock_irqsave(&device->mem_lock, flags);
707 cqr = (struct dasd_ccw_req *)
708 dasd_alloc_chunk(&device->ccw_chunks, size);
709 spin_unlock_irqrestore(&device->mem_lock, flags);
710 if (cqr == NULL)
711 return ERR_PTR(-ENOMEM);
712 memset(cqr, 0, sizeof(struct dasd_ccw_req));
713 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
714 cqr->cpaddr = NULL;
715 if (cplength > 0) {
716 cqr->cpaddr = (struct ccw1 *) data;
717 data += cplength*sizeof(struct ccw1);
718 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
719 }
720 cqr->data = NULL;
721 if (datasize > 0) {
722 cqr->data = data;
723 memset(cqr->data, 0, datasize);
724 }
725 strncpy((char *) &cqr->magic, magic, 4);
726 ASCEBC((char *) &cqr->magic, 4);
727 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
728 dasd_get_device(device);
729 return cqr;
730 }
731
732 /*
733 * Free memory of a channel program. This function needs to free all the
734 * idal lists that might have been created by dasd_set_cda and the
735 * struct dasd_ccw_req itself.
736 */
737 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
738 {
739 #ifdef CONFIG_64BIT
740 struct ccw1 *ccw;
741
742 /* Clear any idals used for the request. */
743 ccw = cqr->cpaddr;
744 do {
745 clear_normalized_cda(ccw);
746 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
747 #endif
748 kfree(cqr->cpaddr);
749 kfree(cqr->data);
750 kfree(cqr);
751 dasd_put_device(device);
752 }
753
754 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
755 {
756 unsigned long flags;
757
758 spin_lock_irqsave(&device->mem_lock, flags);
759 dasd_free_chunk(&device->ccw_chunks, cqr);
760 spin_unlock_irqrestore(&device->mem_lock, flags);
761 dasd_put_device(device);
762 }
763
764 /*
765 * Check discipline magic in cqr.
766 */
767 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
768 {
769 struct dasd_device *device;
770
771 if (cqr == NULL)
772 return -EINVAL;
773 device = cqr->startdev;
774 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
775 DBF_DEV_EVENT(DBF_WARNING, device,
776 " dasd_ccw_req 0x%08x magic doesn't match"
777 " discipline 0x%08x",
778 cqr->magic,
779 *(unsigned int *) device->discipline->name);
780 return -EINVAL;
781 }
782 return 0;
783 }
784
785 /*
786 * Terminate the current i/o and set the request to clear_pending.
787 * Timer keeps device runnig.
788 * ccw_device_clear can fail if the i/o subsystem
789 * is in a bad mood.
790 */
791 int dasd_term_IO(struct dasd_ccw_req *cqr)
792 {
793 struct dasd_device *device;
794 int retries, rc;
795 char errorstring[ERRORLENGTH];
796
797 /* Check the cqr */
798 rc = dasd_check_cqr(cqr);
799 if (rc)
800 return rc;
801 retries = 0;
802 device = (struct dasd_device *) cqr->startdev;
803 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
804 rc = ccw_device_clear(device->cdev, (long) cqr);
805 switch (rc) {
806 case 0: /* termination successful */
807 cqr->retries--;
808 cqr->status = DASD_CQR_CLEAR_PENDING;
809 cqr->stopclk = get_clock();
810 cqr->starttime = 0;
811 DBF_DEV_EVENT(DBF_DEBUG, device,
812 "terminate cqr %p successful",
813 cqr);
814 break;
815 case -ENODEV:
816 DBF_DEV_EVENT(DBF_ERR, device, "%s",
817 "device gone, retry");
818 break;
819 case -EIO:
820 DBF_DEV_EVENT(DBF_ERR, device, "%s",
821 "I/O error, retry");
822 break;
823 case -EINVAL:
824 case -EBUSY:
825 DBF_DEV_EVENT(DBF_ERR, device, "%s",
826 "device busy, retry later");
827 break;
828 default:
829 /* internal error 10 - unknown rc*/
830 snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
831 dev_err(&device->cdev->dev, "An error occurred in the "
832 "DASD device driver, reason=%s\n", errorstring);
833 BUG();
834 break;
835 }
836 retries++;
837 }
838 dasd_schedule_device_bh(device);
839 return rc;
840 }
841
842 /*
843 * Start the i/o. This start_IO can fail if the channel is really busy.
844 * In that case set up a timer to start the request later.
845 */
846 int dasd_start_IO(struct dasd_ccw_req *cqr)
847 {
848 struct dasd_device *device;
849 int rc;
850 char errorstring[ERRORLENGTH];
851
852 /* Check the cqr */
853 rc = dasd_check_cqr(cqr);
854 if (rc)
855 return rc;
856 device = (struct dasd_device *) cqr->startdev;
857 if (cqr->retries < 0) {
858 /* internal error 14 - start_IO run out of retries */
859 sprintf(errorstring, "14 %p", cqr);
860 dev_err(&device->cdev->dev, "An error occurred in the DASD "
861 "device driver, reason=%s\n", errorstring);
862 cqr->status = DASD_CQR_ERROR;
863 return -EIO;
864 }
865 cqr->startclk = get_clock();
866 cqr->starttime = jiffies;
867 cqr->retries--;
868 if (cqr->cpmode == 1) {
869 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
870 (long) cqr, cqr->lpm);
871 } else {
872 rc = ccw_device_start(device->cdev, cqr->cpaddr,
873 (long) cqr, cqr->lpm, 0);
874 }
875 switch (rc) {
876 case 0:
877 cqr->status = DASD_CQR_IN_IO;
878 DBF_DEV_EVENT(DBF_DEBUG, device,
879 "start_IO: request %p started successful",
880 cqr);
881 break;
882 case -EBUSY:
883 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
884 "start_IO: device busy, retry later");
885 break;
886 case -ETIMEDOUT:
887 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
888 "start_IO: request timeout, retry later");
889 break;
890 case -EACCES:
891 /* -EACCES indicates that the request used only a
892 * subset of the available pathes and all these
893 * pathes are gone.
894 * Do a retry with all available pathes.
895 */
896 cqr->lpm = LPM_ANYPATH;
897 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
898 "start_IO: selected pathes gone,"
899 " retry on all pathes");
900 break;
901 case -ENODEV:
902 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
903 "start_IO: -ENODEV device gone, retry");
904 break;
905 case -EIO:
906 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
907 "start_IO: -EIO device gone, retry");
908 break;
909 default:
910 /* internal error 11 - unknown rc */
911 snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
912 dev_err(&device->cdev->dev,
913 "An error occurred in the DASD device driver, "
914 "reason=%s\n", errorstring);
915 BUG();
916 break;
917 }
918 return rc;
919 }
920
921 /*
922 * Timeout function for dasd devices. This is used for different purposes
923 * 1) missing interrupt handler for normal operation
924 * 2) delayed start of request where start_IO failed with -EBUSY
925 * 3) timeout for missing state change interrupts
926 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
927 * DASD_CQR_QUEUED for 2) and 3).
928 */
929 static void dasd_device_timeout(unsigned long ptr)
930 {
931 unsigned long flags;
932 struct dasd_device *device;
933
934 device = (struct dasd_device *) ptr;
935 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
936 /* re-activate request queue */
937 device->stopped &= ~DASD_STOPPED_PENDING;
938 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
939 dasd_schedule_device_bh(device);
940 }
941
942 /*
943 * Setup timeout for a device in jiffies.
944 */
945 void dasd_device_set_timer(struct dasd_device *device, int expires)
946 {
947 if (expires == 0)
948 del_timer(&device->timer);
949 else
950 mod_timer(&device->timer, jiffies + expires);
951 }
952
953 /*
954 * Clear timeout for a device.
955 */
956 void dasd_device_clear_timer(struct dasd_device *device)
957 {
958 del_timer(&device->timer);
959 }
960
961 static void dasd_handle_killed_request(struct ccw_device *cdev,
962 unsigned long intparm)
963 {
964 struct dasd_ccw_req *cqr;
965 struct dasd_device *device;
966
967 if (!intparm)
968 return;
969 cqr = (struct dasd_ccw_req *) intparm;
970 if (cqr->status != DASD_CQR_IN_IO) {
971 DBF_EVENT(DBF_DEBUG,
972 "invalid status in handle_killed_request: "
973 "bus_id %s, status %02x",
974 dev_name(&cdev->dev), cqr->status);
975 return;
976 }
977
978 device = (struct dasd_device *) cqr->startdev;
979 if (device == NULL ||
980 device != dasd_device_from_cdev_locked(cdev) ||
981 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
982 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
983 "bus_id %s", dev_name(&cdev->dev));
984 return;
985 }
986
987 /* Schedule request to be retried. */
988 cqr->status = DASD_CQR_QUEUED;
989
990 dasd_device_clear_timer(device);
991 dasd_schedule_device_bh(device);
992 dasd_put_device(device);
993 }
994
995 void dasd_generic_handle_state_change(struct dasd_device *device)
996 {
997 /* First of all start sense subsystem status request. */
998 dasd_eer_snss(device);
999
1000 device->stopped &= ~DASD_STOPPED_PENDING;
1001 dasd_schedule_device_bh(device);
1002 if (device->block)
1003 dasd_schedule_block_bh(device->block);
1004 }
1005
1006 /*
1007 * Interrupt handler for "normal" ssch-io based dasd devices.
1008 */
1009 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1010 struct irb *irb)
1011 {
1012 struct dasd_ccw_req *cqr, *next;
1013 struct dasd_device *device;
1014 unsigned long long now;
1015 int expires;
1016
1017 if (IS_ERR(irb)) {
1018 switch (PTR_ERR(irb)) {
1019 case -EIO:
1020 break;
1021 case -ETIMEDOUT:
1022 DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
1023 __func__, dev_name(&cdev->dev));
1024 break;
1025 default:
1026 DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
1027 __func__, dev_name(&cdev->dev), PTR_ERR(irb));
1028 }
1029 dasd_handle_killed_request(cdev, intparm);
1030 return;
1031 }
1032
1033 now = get_clock();
1034
1035 /* check for unsolicited interrupts */
1036 cqr = (struct dasd_ccw_req *) intparm;
1037 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1038 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1039 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
1040 if (cqr && cqr->status == DASD_CQR_IN_IO)
1041 cqr->status = DASD_CQR_QUEUED;
1042 device = dasd_device_from_cdev_locked(cdev);
1043 if (!IS_ERR(device)) {
1044 dasd_device_clear_timer(device);
1045 device->discipline->handle_unsolicited_interrupt(device,
1046 irb);
1047 dasd_put_device(device);
1048 }
1049 return;
1050 }
1051
1052 device = (struct dasd_device *) cqr->startdev;
1053 if (!device ||
1054 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1055 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
1056 "bus_id %s", dev_name(&cdev->dev));
1057 return;
1058 }
1059
1060 /* Check for clear pending */
1061 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1062 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1063 cqr->status = DASD_CQR_CLEARED;
1064 dasd_device_clear_timer(device);
1065 wake_up(&dasd_flush_wq);
1066 dasd_schedule_device_bh(device);
1067 return;
1068 }
1069
1070 /* check status - the request might have been killed by dyn detach */
1071 if (cqr->status != DASD_CQR_IN_IO) {
1072 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1073 "status %02x", dev_name(&cdev->dev), cqr->status);
1074 return;
1075 }
1076
1077 next = NULL;
1078 expires = 0;
1079 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1080 scsw_cstat(&irb->scsw) == 0) {
1081 /* request was completed successfully */
1082 cqr->status = DASD_CQR_SUCCESS;
1083 cqr->stopclk = now;
1084 /* Start first request on queue if possible -> fast_io. */
1085 if (cqr->devlist.next != &device->ccw_queue) {
1086 next = list_entry(cqr->devlist.next,
1087 struct dasd_ccw_req, devlist);
1088 }
1089 } else { /* error */
1090 memcpy(&cqr->irb, irb, sizeof(struct irb));
1091 /* log sense for every failed I/O to s390 debugfeature */
1092 dasd_log_sense_dbf(cqr, irb);
1093 if (device->features & DASD_FEATURE_ERPLOG) {
1094 dasd_log_sense(cqr, irb);
1095 }
1096
1097 /*
1098 * If we don't want complex ERP for this request, then just
1099 * reset this and retry it in the fastpath
1100 */
1101 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1102 cqr->retries > 0) {
1103 if (cqr->lpm == LPM_ANYPATH)
1104 DBF_DEV_EVENT(DBF_DEBUG, device,
1105 "default ERP in fastpath "
1106 "(%i retries left)",
1107 cqr->retries);
1108 cqr->lpm = LPM_ANYPATH;
1109 cqr->status = DASD_CQR_QUEUED;
1110 next = cqr;
1111 } else
1112 cqr->status = DASD_CQR_ERROR;
1113 }
1114 if (next && (next->status == DASD_CQR_QUEUED) &&
1115 (!device->stopped)) {
1116 if (device->discipline->start_IO(next) == 0)
1117 expires = next->expires;
1118 }
1119 if (expires != 0)
1120 dasd_device_set_timer(device, expires);
1121 else
1122 dasd_device_clear_timer(device);
1123 dasd_schedule_device_bh(device);
1124 }
1125
1126 /*
1127 * If we have an error on a dasd_block layer request then we cancel
1128 * and return all further requests from the same dasd_block as well.
1129 */
1130 static void __dasd_device_recovery(struct dasd_device *device,
1131 struct dasd_ccw_req *ref_cqr)
1132 {
1133 struct list_head *l, *n;
1134 struct dasd_ccw_req *cqr;
1135
1136 /*
1137 * only requeue request that came from the dasd_block layer
1138 */
1139 if (!ref_cqr->block)
1140 return;
1141
1142 list_for_each_safe(l, n, &device->ccw_queue) {
1143 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1144 if (cqr->status == DASD_CQR_QUEUED &&
1145 ref_cqr->block == cqr->block) {
1146 cqr->status = DASD_CQR_CLEARED;
1147 }
1148 }
1149 };
1150
1151 /*
1152 * Remove those ccw requests from the queue that need to be returned
1153 * to the upper layer.
1154 */
1155 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1156 struct list_head *final_queue)
1157 {
1158 struct list_head *l, *n;
1159 struct dasd_ccw_req *cqr;
1160
1161 /* Process request with final status. */
1162 list_for_each_safe(l, n, &device->ccw_queue) {
1163 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1164
1165 /* Stop list processing at the first non-final request. */
1166 if (cqr->status == DASD_CQR_QUEUED ||
1167 cqr->status == DASD_CQR_IN_IO ||
1168 cqr->status == DASD_CQR_CLEAR_PENDING)
1169 break;
1170 if (cqr->status == DASD_CQR_ERROR) {
1171 __dasd_device_recovery(device, cqr);
1172 }
1173 /* Rechain finished requests to final queue */
1174 list_move_tail(&cqr->devlist, final_queue);
1175 }
1176 }
1177
1178 /*
1179 * the cqrs from the final queue are returned to the upper layer
1180 * by setting a dasd_block state and calling the callback function
1181 */
1182 static void __dasd_device_process_final_queue(struct dasd_device *device,
1183 struct list_head *final_queue)
1184 {
1185 struct list_head *l, *n;
1186 struct dasd_ccw_req *cqr;
1187 struct dasd_block *block;
1188 void (*callback)(struct dasd_ccw_req *, void *data);
1189 void *callback_data;
1190 char errorstring[ERRORLENGTH];
1191
1192 list_for_each_safe(l, n, final_queue) {
1193 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1194 list_del_init(&cqr->devlist);
1195 block = cqr->block;
1196 callback = cqr->callback;
1197 callback_data = cqr->callback_data;
1198 if (block)
1199 spin_lock_bh(&block->queue_lock);
1200 switch (cqr->status) {
1201 case DASD_CQR_SUCCESS:
1202 cqr->status = DASD_CQR_DONE;
1203 break;
1204 case DASD_CQR_ERROR:
1205 cqr->status = DASD_CQR_NEED_ERP;
1206 break;
1207 case DASD_CQR_CLEARED:
1208 cqr->status = DASD_CQR_TERMINATED;
1209 break;
1210 default:
1211 /* internal error 12 - wrong cqr status*/
1212 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1213 dev_err(&device->cdev->dev,
1214 "An error occurred in the DASD device driver, "
1215 "reason=%s\n", errorstring);
1216 BUG();
1217 }
1218 if (cqr->callback != NULL)
1219 (callback)(cqr, callback_data);
1220 if (block)
1221 spin_unlock_bh(&block->queue_lock);
1222 }
1223 }
1224
1225 /*
1226 * Take a look at the first request on the ccw queue and check
1227 * if it reached its expire time. If so, terminate the IO.
1228 */
1229 static void __dasd_device_check_expire(struct dasd_device *device)
1230 {
1231 struct dasd_ccw_req *cqr;
1232
1233 if (list_empty(&device->ccw_queue))
1234 return;
1235 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1236 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1237 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1238 if (device->discipline->term_IO(cqr) != 0) {
1239 /* Hmpf, try again in 5 sec */
1240 dev_err(&device->cdev->dev,
1241 "cqr %p timed out (%is) but cannot be "
1242 "ended, retrying in 5 s\n",
1243 cqr, (cqr->expires/HZ));
1244 cqr->expires += 5*HZ;
1245 dasd_device_set_timer(device, 5*HZ);
1246 } else {
1247 dev_err(&device->cdev->dev,
1248 "cqr %p timed out (%is), %i retries "
1249 "remaining\n", cqr, (cqr->expires/HZ),
1250 cqr->retries);
1251 }
1252 }
1253 }
1254
1255 /*
1256 * Take a look at the first request on the ccw queue and check
1257 * if it needs to be started.
1258 */
1259 static void __dasd_device_start_head(struct dasd_device *device)
1260 {
1261 struct dasd_ccw_req *cqr;
1262 int rc;
1263
1264 if (list_empty(&device->ccw_queue))
1265 return;
1266 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1267 if (cqr->status != DASD_CQR_QUEUED)
1268 return;
1269 /* when device is stopped, return request to previous layer */
1270 if (device->stopped) {
1271 cqr->status = DASD_CQR_CLEARED;
1272 dasd_schedule_device_bh(device);
1273 return;
1274 }
1275
1276 rc = device->discipline->start_IO(cqr);
1277 if (rc == 0)
1278 dasd_device_set_timer(device, cqr->expires);
1279 else if (rc == -EACCES) {
1280 dasd_schedule_device_bh(device);
1281 } else
1282 /* Hmpf, try again in 1/2 sec */
1283 dasd_device_set_timer(device, 50);
1284 }
1285
1286 /*
1287 * Go through all request on the dasd_device request queue,
1288 * terminate them on the cdev if necessary, and return them to the
1289 * submitting layer via callback.
1290 * Note:
1291 * Make sure that all 'submitting layers' still exist when
1292 * this function is called!. In other words, when 'device' is a base
1293 * device then all block layer requests must have been removed before
1294 * via dasd_flush_block_queue.
1295 */
1296 int dasd_flush_device_queue(struct dasd_device *device)
1297 {
1298 struct dasd_ccw_req *cqr, *n;
1299 int rc;
1300 struct list_head flush_queue;
1301
1302 INIT_LIST_HEAD(&flush_queue);
1303 spin_lock_irq(get_ccwdev_lock(device->cdev));
1304 rc = 0;
1305 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1306 /* Check status and move request to flush_queue */
1307 switch (cqr->status) {
1308 case DASD_CQR_IN_IO:
1309 rc = device->discipline->term_IO(cqr);
1310 if (rc) {
1311 /* unable to terminate requeust */
1312 dev_err(&device->cdev->dev,
1313 "Flushing the DASD request queue "
1314 "failed for request %p\n", cqr);
1315 /* stop flush processing */
1316 goto finished;
1317 }
1318 break;
1319 case DASD_CQR_QUEUED:
1320 cqr->stopclk = get_clock();
1321 cqr->status = DASD_CQR_CLEARED;
1322 break;
1323 default: /* no need to modify the others */
1324 break;
1325 }
1326 list_move_tail(&cqr->devlist, &flush_queue);
1327 }
1328 finished:
1329 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1330 /*
1331 * After this point all requests must be in state CLEAR_PENDING,
1332 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1333 * one of the others.
1334 */
1335 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1336 wait_event(dasd_flush_wq,
1337 (cqr->status != DASD_CQR_CLEAR_PENDING));
1338 /*
1339 * Now set each request back to TERMINATED, DONE or NEED_ERP
1340 * and call the callback function of flushed requests
1341 */
1342 __dasd_device_process_final_queue(device, &flush_queue);
1343 return rc;
1344 }
1345
1346 /*
1347 * Acquire the device lock and process queues for the device.
1348 */
1349 static void dasd_device_tasklet(struct dasd_device *device)
1350 {
1351 struct list_head final_queue;
1352
1353 atomic_set (&device->tasklet_scheduled, 0);
1354 INIT_LIST_HEAD(&final_queue);
1355 spin_lock_irq(get_ccwdev_lock(device->cdev));
1356 /* Check expire time of first request on the ccw queue. */
1357 __dasd_device_check_expire(device);
1358 /* find final requests on ccw queue */
1359 __dasd_device_process_ccw_queue(device, &final_queue);
1360 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1361 /* Now call the callback function of requests with final status */
1362 __dasd_device_process_final_queue(device, &final_queue);
1363 spin_lock_irq(get_ccwdev_lock(device->cdev));
1364 /* Now check if the head of the ccw queue needs to be started. */
1365 __dasd_device_start_head(device);
1366 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1367 dasd_put_device(device);
1368 }
1369
1370 /*
1371 * Schedules a call to dasd_tasklet over the device tasklet.
1372 */
1373 void dasd_schedule_device_bh(struct dasd_device *device)
1374 {
1375 /* Protect against rescheduling. */
1376 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1377 return;
1378 dasd_get_device(device);
1379 tasklet_hi_schedule(&device->tasklet);
1380 }
1381
1382 /*
1383 * Queue a request to the head of the device ccw_queue.
1384 * Start the I/O if possible.
1385 */
1386 void dasd_add_request_head(struct dasd_ccw_req *cqr)
1387 {
1388 struct dasd_device *device;
1389 unsigned long flags;
1390
1391 device = cqr->startdev;
1392 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1393 cqr->status = DASD_CQR_QUEUED;
1394 list_add(&cqr->devlist, &device->ccw_queue);
1395 /* let the bh start the request to keep them in order */
1396 dasd_schedule_device_bh(device);
1397 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1398 }
1399
1400 /*
1401 * Queue a request to the tail of the device ccw_queue.
1402 * Start the I/O if possible.
1403 */
1404 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1405 {
1406 struct dasd_device *device;
1407 unsigned long flags;
1408
1409 device = cqr->startdev;
1410 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1411 cqr->status = DASD_CQR_QUEUED;
1412 list_add_tail(&cqr->devlist, &device->ccw_queue);
1413 /* let the bh start the request to keep them in order */
1414 dasd_schedule_device_bh(device);
1415 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1416 }
1417
1418 /*
1419 * Wakeup helper for the 'sleep_on' functions.
1420 */
1421 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1422 {
1423 wake_up((wait_queue_head_t *) data);
1424 }
1425
1426 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1427 {
1428 struct dasd_device *device;
1429 int rc;
1430
1431 device = cqr->startdev;
1432 spin_lock_irq(get_ccwdev_lock(device->cdev));
1433 rc = ((cqr->status == DASD_CQR_DONE ||
1434 cqr->status == DASD_CQR_NEED_ERP ||
1435 cqr->status == DASD_CQR_TERMINATED) &&
1436 list_empty(&cqr->devlist));
1437 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1438 return rc;
1439 }
1440
1441 /*
1442 * Queue a request to the tail of the device ccw_queue and wait for
1443 * it's completion.
1444 */
1445 int dasd_sleep_on(struct dasd_ccw_req *cqr)
1446 {
1447 struct dasd_device *device;
1448 int rc;
1449
1450 device = cqr->startdev;
1451
1452 cqr->callback = dasd_wakeup_cb;
1453 cqr->callback_data = (void *) &generic_waitq;
1454 dasd_add_request_tail(cqr);
1455 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1456
1457 /* Request status is either done or failed. */
1458 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1459 return rc;
1460 }
1461
1462 /*
1463 * Queue a request to the tail of the device ccw_queue and wait
1464 * interruptible for it's completion.
1465 */
1466 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1467 {
1468 struct dasd_device *device;
1469 int rc;
1470
1471 device = cqr->startdev;
1472 cqr->callback = dasd_wakeup_cb;
1473 cqr->callback_data = (void *) &generic_waitq;
1474 dasd_add_request_tail(cqr);
1475 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
1476 if (rc == -ERESTARTSYS) {
1477 dasd_cancel_req(cqr);
1478 /* wait (non-interruptible) for final status */
1479 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1480 }
1481 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1482 return rc;
1483 }
1484
1485 /*
1486 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1487 * for eckd devices) the currently running request has to be terminated
1488 * and be put back to status queued, before the special request is added
1489 * to the head of the queue. Then the special request is waited on normally.
1490 */
1491 static inline int _dasd_term_running_cqr(struct dasd_device *device)
1492 {
1493 struct dasd_ccw_req *cqr;
1494
1495 if (list_empty(&device->ccw_queue))
1496 return 0;
1497 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1498 return device->discipline->term_IO(cqr);
1499 }
1500
1501 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1502 {
1503 struct dasd_device *device;
1504 int rc;
1505
1506 device = cqr->startdev;
1507 spin_lock_irq(get_ccwdev_lock(device->cdev));
1508 rc = _dasd_term_running_cqr(device);
1509 if (rc) {
1510 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1511 return rc;
1512 }
1513
1514 cqr->callback = dasd_wakeup_cb;
1515 cqr->callback_data = (void *) &generic_waitq;
1516 cqr->status = DASD_CQR_QUEUED;
1517 list_add(&cqr->devlist, &device->ccw_queue);
1518
1519 /* let the bh start the request to keep them in order */
1520 dasd_schedule_device_bh(device);
1521
1522 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1523
1524 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1525
1526 /* Request status is either done or failed. */
1527 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1528 return rc;
1529 }
1530
1531 /*
1532 * Cancels a request that was started with dasd_sleep_on_req.
1533 * This is useful to timeout requests. The request will be
1534 * terminated if it is currently in i/o.
1535 * Returns 1 if the request has been terminated.
1536 * 0 if there was no need to terminate the request (not started yet)
1537 * negative error code if termination failed
1538 * Cancellation of a request is an asynchronous operation! The calling
1539 * function has to wait until the request is properly returned via callback.
1540 */
1541 int dasd_cancel_req(struct dasd_ccw_req *cqr)
1542 {
1543 struct dasd_device *device = cqr->startdev;
1544 unsigned long flags;
1545 int rc;
1546
1547 rc = 0;
1548 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1549 switch (cqr->status) {
1550 case DASD_CQR_QUEUED:
1551 /* request was not started - just set to cleared */
1552 cqr->status = DASD_CQR_CLEARED;
1553 break;
1554 case DASD_CQR_IN_IO:
1555 /* request in IO - terminate IO and release again */
1556 rc = device->discipline->term_IO(cqr);
1557 if (rc) {
1558 dev_err(&device->cdev->dev,
1559 "Cancelling request %p failed with rc=%d\n",
1560 cqr, rc);
1561 } else {
1562 cqr->stopclk = get_clock();
1563 rc = 1;
1564 }
1565 break;
1566 default: /* already finished or clear pending - do nothing */
1567 break;
1568 }
1569 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1570 dasd_schedule_device_bh(device);
1571 return rc;
1572 }
1573
1574
1575 /*
1576 * SECTION: Operations of the dasd_block layer.
1577 */
1578
1579 /*
1580 * Timeout function for dasd_block. This is used when the block layer
1581 * is waiting for something that may not come reliably, (e.g. a state
1582 * change interrupt)
1583 */
1584 static void dasd_block_timeout(unsigned long ptr)
1585 {
1586 unsigned long flags;
1587 struct dasd_block *block;
1588
1589 block = (struct dasd_block *) ptr;
1590 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1591 /* re-activate request queue */
1592 block->base->stopped &= ~DASD_STOPPED_PENDING;
1593 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1594 dasd_schedule_block_bh(block);
1595 }
1596
1597 /*
1598 * Setup timeout for a dasd_block in jiffies.
1599 */
1600 void dasd_block_set_timer(struct dasd_block *block, int expires)
1601 {
1602 if (expires == 0)
1603 del_timer(&block->timer);
1604 else
1605 mod_timer(&block->timer, jiffies + expires);
1606 }
1607
1608 /*
1609 * Clear timeout for a dasd_block.
1610 */
1611 void dasd_block_clear_timer(struct dasd_block *block)
1612 {
1613 del_timer(&block->timer);
1614 }
1615
1616 /*
1617 * Process finished error recovery ccw.
1618 */
1619 static inline void __dasd_block_process_erp(struct dasd_block *block,
1620 struct dasd_ccw_req *cqr)
1621 {
1622 dasd_erp_fn_t erp_fn;
1623 struct dasd_device *device = block->base;
1624
1625 if (cqr->status == DASD_CQR_DONE)
1626 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1627 else
1628 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1629 erp_fn = device->discipline->erp_postaction(cqr);
1630 erp_fn(cqr);
1631 }
1632
1633 /*
1634 * Fetch requests from the block device queue.
1635 */
1636 static void __dasd_process_request_queue(struct dasd_block *block)
1637 {
1638 struct request_queue *queue;
1639 struct request *req;
1640 struct dasd_ccw_req *cqr;
1641 struct dasd_device *basedev;
1642 unsigned long flags;
1643 queue = block->request_queue;
1644 basedev = block->base;
1645 /* No queue ? Then there is nothing to do. */
1646 if (queue == NULL)
1647 return;
1648
1649 /*
1650 * We requeue request from the block device queue to the ccw
1651 * queue only in two states. In state DASD_STATE_READY the
1652 * partition detection is done and we need to requeue requests
1653 * for that. State DASD_STATE_ONLINE is normal block device
1654 * operation.
1655 */
1656 if (basedev->state < DASD_STATE_READY)
1657 return;
1658 /* Now we try to fetch requests from the request queue */
1659 while (!blk_queue_plugged(queue) &&
1660 elv_next_request(queue)) {
1661
1662 req = elv_next_request(queue);
1663
1664 if (basedev->features & DASD_FEATURE_READONLY &&
1665 rq_data_dir(req) == WRITE) {
1666 DBF_DEV_EVENT(DBF_ERR, basedev,
1667 "Rejecting write request %p",
1668 req);
1669 blkdev_dequeue_request(req);
1670 __blk_end_request_all(req, -EIO);
1671 continue;
1672 }
1673 cqr = basedev->discipline->build_cp(basedev, block, req);
1674 if (IS_ERR(cqr)) {
1675 if (PTR_ERR(cqr) == -EBUSY)
1676 break; /* normal end condition */
1677 if (PTR_ERR(cqr) == -ENOMEM)
1678 break; /* terminate request queue loop */
1679 if (PTR_ERR(cqr) == -EAGAIN) {
1680 /*
1681 * The current request cannot be build right
1682 * now, we have to try later. If this request
1683 * is the head-of-queue we stop the device
1684 * for 1/2 second.
1685 */
1686 if (!list_empty(&block->ccw_queue))
1687 break;
1688 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1689 basedev->stopped |= DASD_STOPPED_PENDING;
1690 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1691 dasd_block_set_timer(block, HZ/2);
1692 break;
1693 }
1694 DBF_DEV_EVENT(DBF_ERR, basedev,
1695 "CCW creation failed (rc=%ld) "
1696 "on request %p",
1697 PTR_ERR(cqr), req);
1698 blkdev_dequeue_request(req);
1699 __blk_end_request_all(req, -EIO);
1700 continue;
1701 }
1702 /*
1703 * Note: callback is set to dasd_return_cqr_cb in
1704 * __dasd_block_start_head to cover erp requests as well
1705 */
1706 cqr->callback_data = (void *) req;
1707 cqr->status = DASD_CQR_FILLED;
1708 blkdev_dequeue_request(req);
1709 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1710 dasd_profile_start(block, cqr, req);
1711 }
1712 }
1713
1714 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1715 {
1716 struct request *req;
1717 int status;
1718 int error = 0;
1719
1720 req = (struct request *) cqr->callback_data;
1721 dasd_profile_end(cqr->block, cqr, req);
1722 status = cqr->block->base->discipline->free_cp(cqr, req);
1723 if (status <= 0)
1724 error = status ? status : -EIO;
1725 __blk_end_request_all(req, error);
1726 }
1727
1728 /*
1729 * Process ccw request queue.
1730 */
1731 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1732 struct list_head *final_queue)
1733 {
1734 struct list_head *l, *n;
1735 struct dasd_ccw_req *cqr;
1736 dasd_erp_fn_t erp_fn;
1737 unsigned long flags;
1738 struct dasd_device *base = block->base;
1739
1740 restart:
1741 /* Process request with final status. */
1742 list_for_each_safe(l, n, &block->ccw_queue) {
1743 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1744 if (cqr->status != DASD_CQR_DONE &&
1745 cqr->status != DASD_CQR_FAILED &&
1746 cqr->status != DASD_CQR_NEED_ERP &&
1747 cqr->status != DASD_CQR_TERMINATED)
1748 continue;
1749
1750 if (cqr->status == DASD_CQR_TERMINATED) {
1751 base->discipline->handle_terminated_request(cqr);
1752 goto restart;
1753 }
1754
1755 /* Process requests that may be recovered */
1756 if (cqr->status == DASD_CQR_NEED_ERP) {
1757 erp_fn = base->discipline->erp_action(cqr);
1758 erp_fn(cqr);
1759 goto restart;
1760 }
1761
1762 /* log sense for fatal error */
1763 if (cqr->status == DASD_CQR_FAILED) {
1764 dasd_log_sense(cqr, &cqr->irb);
1765 }
1766
1767 /* First of all call extended error reporting. */
1768 if (dasd_eer_enabled(base) &&
1769 cqr->status == DASD_CQR_FAILED) {
1770 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1771
1772 /* restart request */
1773 cqr->status = DASD_CQR_FILLED;
1774 cqr->retries = 255;
1775 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1776 base->stopped |= DASD_STOPPED_QUIESCE;
1777 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1778 flags);
1779 goto restart;
1780 }
1781
1782 /* Process finished ERP request. */
1783 if (cqr->refers) {
1784 __dasd_block_process_erp(block, cqr);
1785 goto restart;
1786 }
1787
1788 /* Rechain finished requests to final queue */
1789 cqr->endclk = get_clock();
1790 list_move_tail(&cqr->blocklist, final_queue);
1791 }
1792 }
1793
1794 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1795 {
1796 dasd_schedule_block_bh(cqr->block);
1797 }
1798
1799 static void __dasd_block_start_head(struct dasd_block *block)
1800 {
1801 struct dasd_ccw_req *cqr;
1802
1803 if (list_empty(&block->ccw_queue))
1804 return;
1805 /* We allways begin with the first requests on the queue, as some
1806 * of previously started requests have to be enqueued on a
1807 * dasd_device again for error recovery.
1808 */
1809 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1810 if (cqr->status != DASD_CQR_FILLED)
1811 continue;
1812 /* Non-temporary stop condition will trigger fail fast */
1813 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1814 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1815 (!dasd_eer_enabled(block->base))) {
1816 cqr->status = DASD_CQR_FAILED;
1817 dasd_schedule_block_bh(block);
1818 continue;
1819 }
1820 /* Don't try to start requests if device is stopped */
1821 if (block->base->stopped)
1822 return;
1823
1824 /* just a fail safe check, should not happen */
1825 if (!cqr->startdev)
1826 cqr->startdev = block->base;
1827
1828 /* make sure that the requests we submit find their way back */
1829 cqr->callback = dasd_return_cqr_cb;
1830
1831 dasd_add_request_tail(cqr);
1832 }
1833 }
1834
1835 /*
1836 * Central dasd_block layer routine. Takes requests from the generic
1837 * block layer request queue, creates ccw requests, enqueues them on
1838 * a dasd_device and processes ccw requests that have been returned.
1839 */
1840 static void dasd_block_tasklet(struct dasd_block *block)
1841 {
1842 struct list_head final_queue;
1843 struct list_head *l, *n;
1844 struct dasd_ccw_req *cqr;
1845
1846 atomic_set(&block->tasklet_scheduled, 0);
1847 INIT_LIST_HEAD(&final_queue);
1848 spin_lock(&block->queue_lock);
1849 /* Finish off requests on ccw queue */
1850 __dasd_process_block_ccw_queue(block, &final_queue);
1851 spin_unlock(&block->queue_lock);
1852 /* Now call the callback function of requests with final status */
1853 spin_lock_irq(&block->request_queue_lock);
1854 list_for_each_safe(l, n, &final_queue) {
1855 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1856 list_del_init(&cqr->blocklist);
1857 __dasd_cleanup_cqr(cqr);
1858 }
1859 spin_lock(&block->queue_lock);
1860 /* Get new request from the block device request queue */
1861 __dasd_process_request_queue(block);
1862 /* Now check if the head of the ccw queue needs to be started. */
1863 __dasd_block_start_head(block);
1864 spin_unlock(&block->queue_lock);
1865 spin_unlock_irq(&block->request_queue_lock);
1866 dasd_put_device(block->base);
1867 }
1868
1869 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1870 {
1871 wake_up(&dasd_flush_wq);
1872 }
1873
1874 /*
1875 * Go through all request on the dasd_block request queue, cancel them
1876 * on the respective dasd_device, and return them to the generic
1877 * block layer.
1878 */
1879 static int dasd_flush_block_queue(struct dasd_block *block)
1880 {
1881 struct dasd_ccw_req *cqr, *n;
1882 int rc, i;
1883 struct list_head flush_queue;
1884
1885 INIT_LIST_HEAD(&flush_queue);
1886 spin_lock_bh(&block->queue_lock);
1887 rc = 0;
1888 restart:
1889 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1890 /* if this request currently owned by a dasd_device cancel it */
1891 if (cqr->status >= DASD_CQR_QUEUED)
1892 rc = dasd_cancel_req(cqr);
1893 if (rc < 0)
1894 break;
1895 /* Rechain request (including erp chain) so it won't be
1896 * touched by the dasd_block_tasklet anymore.
1897 * Replace the callback so we notice when the request
1898 * is returned from the dasd_device layer.
1899 */
1900 cqr->callback = _dasd_wake_block_flush_cb;
1901 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1902 list_move_tail(&cqr->blocklist, &flush_queue);
1903 if (i > 1)
1904 /* moved more than one request - need to restart */
1905 goto restart;
1906 }
1907 spin_unlock_bh(&block->queue_lock);
1908 /* Now call the callback function of flushed requests */
1909 restart_cb:
1910 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1911 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1912 /* Process finished ERP request. */
1913 if (cqr->refers) {
1914 spin_lock_bh(&block->queue_lock);
1915 __dasd_block_process_erp(block, cqr);
1916 spin_unlock_bh(&block->queue_lock);
1917 /* restart list_for_xx loop since dasd_process_erp
1918 * might remove multiple elements */
1919 goto restart_cb;
1920 }
1921 /* call the callback function */
1922 spin_lock_irq(&block->request_queue_lock);
1923 cqr->endclk = get_clock();
1924 list_del_init(&cqr->blocklist);
1925 __dasd_cleanup_cqr(cqr);
1926 spin_unlock_irq(&block->request_queue_lock);
1927 }
1928 return rc;
1929 }
1930
1931 /*
1932 * Schedules a call to dasd_tasklet over the device tasklet.
1933 */
1934 void dasd_schedule_block_bh(struct dasd_block *block)
1935 {
1936 /* Protect against rescheduling. */
1937 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1938 return;
1939 /* life cycle of block is bound to it's base device */
1940 dasd_get_device(block->base);
1941 tasklet_hi_schedule(&block->tasklet);
1942 }
1943
1944
1945 /*
1946 * SECTION: external block device operations
1947 * (request queue handling, open, release, etc.)
1948 */
1949
1950 /*
1951 * Dasd request queue function. Called from ll_rw_blk.c
1952 */
1953 static void do_dasd_request(struct request_queue *queue)
1954 {
1955 struct dasd_block *block;
1956
1957 block = queue->queuedata;
1958 spin_lock(&block->queue_lock);
1959 /* Get new request from the block device request queue */
1960 __dasd_process_request_queue(block);
1961 /* Now check if the head of the ccw queue needs to be started. */
1962 __dasd_block_start_head(block);
1963 spin_unlock(&block->queue_lock);
1964 }
1965
1966 /*
1967 * Allocate and initialize request queue and default I/O scheduler.
1968 */
1969 static int dasd_alloc_queue(struct dasd_block *block)
1970 {
1971 int rc;
1972
1973 block->request_queue = blk_init_queue(do_dasd_request,
1974 &block->request_queue_lock);
1975 if (block->request_queue == NULL)
1976 return -ENOMEM;
1977
1978 block->request_queue->queuedata = block;
1979
1980 elevator_exit(block->request_queue->elevator);
1981 block->request_queue->elevator = NULL;
1982 rc = elevator_init(block->request_queue, "deadline");
1983 if (rc) {
1984 blk_cleanup_queue(block->request_queue);
1985 return rc;
1986 }
1987 return 0;
1988 }
1989
1990 /*
1991 * Allocate and initialize request queue.
1992 */
1993 static void dasd_setup_queue(struct dasd_block *block)
1994 {
1995 int max;
1996
1997 blk_queue_hardsect_size(block->request_queue, block->bp_block);
1998 max = block->base->discipline->max_blocks << block->s2b_shift;
1999 blk_queue_max_sectors(block->request_queue, max);
2000 blk_queue_max_phys_segments(block->request_queue, -1L);
2001 blk_queue_max_hw_segments(block->request_queue, -1L);
2002 /* with page sized segments we can translate each segement into
2003 * one idaw/tidaw
2004 */
2005 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2006 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2007 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
2008 }
2009
2010 /*
2011 * Deactivate and free request queue.
2012 */
2013 static void dasd_free_queue(struct dasd_block *block)
2014 {
2015 if (block->request_queue) {
2016 blk_cleanup_queue(block->request_queue);
2017 block->request_queue = NULL;
2018 }
2019 }
2020
2021 /*
2022 * Flush request on the request queue.
2023 */
2024 static void dasd_flush_request_queue(struct dasd_block *block)
2025 {
2026 struct request *req;
2027
2028 if (!block->request_queue)
2029 return;
2030
2031 spin_lock_irq(&block->request_queue_lock);
2032 while ((req = elv_next_request(block->request_queue))) {
2033 blkdev_dequeue_request(req);
2034 __blk_end_request_all(req, -EIO);
2035 }
2036 spin_unlock_irq(&block->request_queue_lock);
2037 }
2038
2039 static int dasd_open(struct block_device *bdev, fmode_t mode)
2040 {
2041 struct dasd_block *block = bdev->bd_disk->private_data;
2042 struct dasd_device *base = block->base;
2043 int rc;
2044
2045 atomic_inc(&block->open_count);
2046 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2047 rc = -ENODEV;
2048 goto unlock;
2049 }
2050
2051 if (!try_module_get(base->discipline->owner)) {
2052 rc = -EINVAL;
2053 goto unlock;
2054 }
2055
2056 if (dasd_probeonly) {
2057 dev_info(&base->cdev->dev,
2058 "Accessing the DASD failed because it is in "
2059 "probeonly mode\n");
2060 rc = -EPERM;
2061 goto out;
2062 }
2063
2064 if (base->state <= DASD_STATE_BASIC) {
2065 DBF_DEV_EVENT(DBF_ERR, base, " %s",
2066 " Cannot open unrecognized device");
2067 rc = -ENODEV;
2068 goto out;
2069 }
2070
2071 return 0;
2072
2073 out:
2074 module_put(base->discipline->owner);
2075 unlock:
2076 atomic_dec(&block->open_count);
2077 return rc;
2078 }
2079
2080 static int dasd_release(struct gendisk *disk, fmode_t mode)
2081 {
2082 struct dasd_block *block = disk->private_data;
2083
2084 atomic_dec(&block->open_count);
2085 module_put(block->base->discipline->owner);
2086 return 0;
2087 }
2088
2089 /*
2090 * Return disk geometry.
2091 */
2092 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2093 {
2094 struct dasd_block *block;
2095 struct dasd_device *base;
2096
2097 block = bdev->bd_disk->private_data;
2098 base = block->base;
2099 if (!block)
2100 return -ENODEV;
2101
2102 if (!base->discipline ||
2103 !base->discipline->fill_geometry)
2104 return -EINVAL;
2105
2106 base->discipline->fill_geometry(block, geo);
2107 geo->start = get_start_sect(bdev) >> block->s2b_shift;
2108 return 0;
2109 }
2110
2111 struct block_device_operations
2112 dasd_device_operations = {
2113 .owner = THIS_MODULE,
2114 .open = dasd_open,
2115 .release = dasd_release,
2116 .ioctl = dasd_ioctl,
2117 .compat_ioctl = dasd_ioctl,
2118 .getgeo = dasd_getgeo,
2119 };
2120
2121 /*******************************************************************************
2122 * end of block device operations
2123 */
2124
2125 static void
2126 dasd_exit(void)
2127 {
2128 #ifdef CONFIG_PROC_FS
2129 dasd_proc_exit();
2130 #endif
2131 dasd_eer_exit();
2132 if (dasd_page_cache != NULL) {
2133 kmem_cache_destroy(dasd_page_cache);
2134 dasd_page_cache = NULL;
2135 }
2136 dasd_gendisk_exit();
2137 dasd_devmap_exit();
2138 if (dasd_debug_area != NULL) {
2139 debug_unregister(dasd_debug_area);
2140 dasd_debug_area = NULL;
2141 }
2142 }
2143
2144 /*
2145 * SECTION: common functions for ccw_driver use
2146 */
2147
2148 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2149 {
2150 struct ccw_device *cdev = data;
2151 int ret;
2152
2153 ret = ccw_device_set_online(cdev);
2154 if (ret)
2155 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2156 dev_name(&cdev->dev), ret);
2157 else {
2158 struct dasd_device *device = dasd_device_from_cdev(cdev);
2159 wait_event(dasd_init_waitq, _wait_for_device(device));
2160 dasd_put_device(device);
2161 }
2162 }
2163
2164 /*
2165 * Initial attempt at a probe function. this can be simplified once
2166 * the other detection code is gone.
2167 */
2168 int dasd_generic_probe(struct ccw_device *cdev,
2169 struct dasd_discipline *discipline)
2170 {
2171 int ret;
2172
2173 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
2174 if (ret) {
2175 DBF_EVENT(DBF_WARNING,
2176 "dasd_generic_probe: could not set ccw-device options "
2177 "for %s\n", dev_name(&cdev->dev));
2178 return ret;
2179 }
2180 ret = dasd_add_sysfs_files(cdev);
2181 if (ret) {
2182 DBF_EVENT(DBF_WARNING,
2183 "dasd_generic_probe: could not add sysfs entries "
2184 "for %s\n", dev_name(&cdev->dev));
2185 return ret;
2186 }
2187 cdev->handler = &dasd_int_handler;
2188
2189 /*
2190 * Automatically online either all dasd devices (dasd_autodetect)
2191 * or all devices specified with dasd= parameters during
2192 * initial probe.
2193 */
2194 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2195 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2196 async_schedule(dasd_generic_auto_online, cdev);
2197 return 0;
2198 }
2199
2200 /*
2201 * This will one day be called from a global not_oper handler.
2202 * It is also used by driver_unregister during module unload.
2203 */
2204 void dasd_generic_remove(struct ccw_device *cdev)
2205 {
2206 struct dasd_device *device;
2207 struct dasd_block *block;
2208
2209 cdev->handler = NULL;
2210
2211 dasd_remove_sysfs_files(cdev);
2212 device = dasd_device_from_cdev(cdev);
2213 if (IS_ERR(device))
2214 return;
2215 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2216 /* Already doing offline processing */
2217 dasd_put_device(device);
2218 return;
2219 }
2220 /*
2221 * This device is removed unconditionally. Set offline
2222 * flag to prevent dasd_open from opening it while it is
2223 * no quite down yet.
2224 */
2225 dasd_set_target_state(device, DASD_STATE_NEW);
2226 /* dasd_delete_device destroys the device reference. */
2227 block = device->block;
2228 device->block = NULL;
2229 dasd_delete_device(device);
2230 /*
2231 * life cycle of block is bound to device, so delete it after
2232 * device was safely removed
2233 */
2234 if (block)
2235 dasd_free_block(block);
2236 }
2237
2238 /*
2239 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2240 * the device is detected for the first time and is supposed to be used
2241 * or the user has started activation through sysfs.
2242 */
2243 int dasd_generic_set_online(struct ccw_device *cdev,
2244 struct dasd_discipline *base_discipline)
2245 {
2246 struct dasd_discipline *discipline;
2247 struct dasd_device *device;
2248 int rc;
2249
2250 /* first online clears initial online feature flag */
2251 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2252 device = dasd_create_device(cdev);
2253 if (IS_ERR(device))
2254 return PTR_ERR(device);
2255
2256 discipline = base_discipline;
2257 if (device->features & DASD_FEATURE_USEDIAG) {
2258 if (!dasd_diag_discipline_pointer) {
2259 pr_warning("%s Setting the DASD online failed because "
2260 "of missing DIAG discipline\n",
2261 dev_name(&cdev->dev));
2262 dasd_delete_device(device);
2263 return -ENODEV;
2264 }
2265 discipline = dasd_diag_discipline_pointer;
2266 }
2267 if (!try_module_get(base_discipline->owner)) {
2268 dasd_delete_device(device);
2269 return -EINVAL;
2270 }
2271 if (!try_module_get(discipline->owner)) {
2272 module_put(base_discipline->owner);
2273 dasd_delete_device(device);
2274 return -EINVAL;
2275 }
2276 device->base_discipline = base_discipline;
2277 device->discipline = discipline;
2278
2279 /* check_device will allocate block device if necessary */
2280 rc = discipline->check_device(device);
2281 if (rc) {
2282 pr_warning("%s Setting the DASD online with discipline %s "
2283 "failed with rc=%i\n",
2284 dev_name(&cdev->dev), discipline->name, rc);
2285 module_put(discipline->owner);
2286 module_put(base_discipline->owner);
2287 dasd_delete_device(device);
2288 return rc;
2289 }
2290
2291 dasd_set_target_state(device, DASD_STATE_ONLINE);
2292 if (device->state <= DASD_STATE_KNOWN) {
2293 pr_warning("%s Setting the DASD online failed because of a "
2294 "missing discipline\n", dev_name(&cdev->dev));
2295 rc = -ENODEV;
2296 dasd_set_target_state(device, DASD_STATE_NEW);
2297 if (device->block)
2298 dasd_free_block(device->block);
2299 dasd_delete_device(device);
2300 } else
2301 pr_debug("dasd_generic device %s found\n",
2302 dev_name(&cdev->dev));
2303 dasd_put_device(device);
2304 return rc;
2305 }
2306
2307 int dasd_generic_set_offline(struct ccw_device *cdev)
2308 {
2309 struct dasd_device *device;
2310 struct dasd_block *block;
2311 int max_count, open_count;
2312
2313 device = dasd_device_from_cdev(cdev);
2314 if (IS_ERR(device))
2315 return PTR_ERR(device);
2316 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2317 /* Already doing offline processing */
2318 dasd_put_device(device);
2319 return 0;
2320 }
2321 /*
2322 * We must make sure that this device is currently not in use.
2323 * The open_count is increased for every opener, that includes
2324 * the blkdev_get in dasd_scan_partitions. We are only interested
2325 * in the other openers.
2326 */
2327 if (device->block) {
2328 max_count = device->block->bdev ? 0 : -1;
2329 open_count = atomic_read(&device->block->open_count);
2330 if (open_count > max_count) {
2331 if (open_count > 0)
2332 pr_warning("%s: The DASD cannot be set offline "
2333 "with open count %i\n",
2334 dev_name(&cdev->dev), open_count);
2335 else
2336 pr_warning("%s: The DASD cannot be set offline "
2337 "while it is in use\n",
2338 dev_name(&cdev->dev));
2339 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2340 dasd_put_device(device);
2341 return -EBUSY;
2342 }
2343 }
2344 dasd_set_target_state(device, DASD_STATE_NEW);
2345 /* dasd_delete_device destroys the device reference. */
2346 block = device->block;
2347 device->block = NULL;
2348 dasd_delete_device(device);
2349 /*
2350 * life cycle of block is bound to device, so delete it after
2351 * device was safely removed
2352 */
2353 if (block)
2354 dasd_free_block(block);
2355 return 0;
2356 }
2357
2358 int dasd_generic_notify(struct ccw_device *cdev, int event)
2359 {
2360 struct dasd_device *device;
2361 struct dasd_ccw_req *cqr;
2362 int ret;
2363
2364 device = dasd_device_from_cdev_locked(cdev);
2365 if (IS_ERR(device))
2366 return 0;
2367 ret = 0;
2368 switch (event) {
2369 case CIO_GONE:
2370 case CIO_BOXED:
2371 case CIO_NO_PATH:
2372 /* First of all call extended error reporting. */
2373 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2374
2375 if (device->state < DASD_STATE_BASIC)
2376 break;
2377 /* Device is active. We want to keep it. */
2378 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2379 if (cqr->status == DASD_CQR_IN_IO) {
2380 cqr->status = DASD_CQR_QUEUED;
2381 cqr->retries++;
2382 }
2383 device->stopped |= DASD_STOPPED_DC_WAIT;
2384 dasd_device_clear_timer(device);
2385 dasd_schedule_device_bh(device);
2386 ret = 1;
2387 break;
2388 case CIO_OPER:
2389 /* FIXME: add a sanity check. */
2390 device->stopped &= ~DASD_STOPPED_DC_WAIT;
2391 dasd_schedule_device_bh(device);
2392 if (device->block)
2393 dasd_schedule_block_bh(device->block);
2394 ret = 1;
2395 break;
2396 }
2397 dasd_put_device(device);
2398 return ret;
2399 }
2400
2401 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2402 void *rdc_buffer,
2403 int rdc_buffer_size,
2404 char *magic)
2405 {
2406 struct dasd_ccw_req *cqr;
2407 struct ccw1 *ccw;
2408
2409 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2410
2411 if (IS_ERR(cqr)) {
2412 /* internal error 13 - Allocating the RDC request failed*/
2413 dev_err(&device->cdev->dev,
2414 "An error occurred in the DASD device driver, "
2415 "reason=%s\n", "13");
2416 return cqr;
2417 }
2418
2419 ccw = cqr->cpaddr;
2420 ccw->cmd_code = CCW_CMD_RDC;
2421 ccw->cda = (__u32)(addr_t)rdc_buffer;
2422 ccw->count = rdc_buffer_size;
2423
2424 cqr->startdev = device;
2425 cqr->memdev = device;
2426 cqr->expires = 10*HZ;
2427 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2428 cqr->retries = 2;
2429 cqr->buildclk = get_clock();
2430 cqr->status = DASD_CQR_FILLED;
2431 return cqr;
2432 }
2433
2434
2435 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2436 void **rdc_buffer, int rdc_buffer_size)
2437 {
2438 int ret;
2439 struct dasd_ccw_req *cqr;
2440
2441 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size,
2442 magic);
2443 if (IS_ERR(cqr))
2444 return PTR_ERR(cqr);
2445
2446 ret = dasd_sleep_on(cqr);
2447 dasd_sfree_request(cqr, cqr->memdev);
2448 return ret;
2449 }
2450 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2451
2452 /*
2453 * In command mode and transport mode we need to look for sense
2454 * data in different places. The sense data itself is allways
2455 * an array of 32 bytes, so we can unify the sense data access
2456 * for both modes.
2457 */
2458 char *dasd_get_sense(struct irb *irb)
2459 {
2460 struct tsb *tsb = NULL;
2461 char *sense = NULL;
2462
2463 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2464 if (irb->scsw.tm.tcw)
2465 tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2466 irb->scsw.tm.tcw);
2467 if (tsb && tsb->length == 64 && tsb->flags)
2468 switch (tsb->flags & 0x07) {
2469 case 1: /* tsa_iostat */
2470 sense = tsb->tsa.iostat.sense;
2471 break;
2472 case 2: /* tsa_ddpc */
2473 sense = tsb->tsa.ddpc.sense;
2474 break;
2475 default:
2476 /* currently we don't use interrogate data */
2477 break;
2478 }
2479 } else if (irb->esw.esw0.erw.cons) {
2480 sense = irb->ecw;
2481 }
2482 return sense;
2483 }
2484 EXPORT_SYMBOL_GPL(dasd_get_sense);
2485
2486 static int __init dasd_init(void)
2487 {
2488 int rc;
2489
2490 init_waitqueue_head(&dasd_init_waitq);
2491 init_waitqueue_head(&dasd_flush_wq);
2492 init_waitqueue_head(&generic_waitq);
2493
2494 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2495 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2496 if (dasd_debug_area == NULL) {
2497 rc = -ENOMEM;
2498 goto failed;
2499 }
2500 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2501 debug_set_level(dasd_debug_area, DBF_WARNING);
2502
2503 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2504
2505 dasd_diag_discipline_pointer = NULL;
2506
2507 rc = dasd_devmap_init();
2508 if (rc)
2509 goto failed;
2510 rc = dasd_gendisk_init();
2511 if (rc)
2512 goto failed;
2513 rc = dasd_parse();
2514 if (rc)
2515 goto failed;
2516 rc = dasd_eer_init();
2517 if (rc)
2518 goto failed;
2519 #ifdef CONFIG_PROC_FS
2520 rc = dasd_proc_init();
2521 if (rc)
2522 goto failed;
2523 #endif
2524
2525 return 0;
2526 failed:
2527 pr_info("The DASD device driver could not be initialized\n");
2528 dasd_exit();
2529 return rc;
2530 }
2531
2532 module_init(dasd_init);
2533 module_exit(dasd_exit);
2534
2535 EXPORT_SYMBOL(dasd_debug_area);
2536 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2537
2538 EXPORT_SYMBOL(dasd_add_request_head);
2539 EXPORT_SYMBOL(dasd_add_request_tail);
2540 EXPORT_SYMBOL(dasd_cancel_req);
2541 EXPORT_SYMBOL(dasd_device_clear_timer);
2542 EXPORT_SYMBOL(dasd_block_clear_timer);
2543 EXPORT_SYMBOL(dasd_enable_device);
2544 EXPORT_SYMBOL(dasd_int_handler);
2545 EXPORT_SYMBOL(dasd_kfree_request);
2546 EXPORT_SYMBOL(dasd_kick_device);
2547 EXPORT_SYMBOL(dasd_kmalloc_request);
2548 EXPORT_SYMBOL(dasd_schedule_device_bh);
2549 EXPORT_SYMBOL(dasd_schedule_block_bh);
2550 EXPORT_SYMBOL(dasd_set_target_state);
2551 EXPORT_SYMBOL(dasd_device_set_timer);
2552 EXPORT_SYMBOL(dasd_block_set_timer);
2553 EXPORT_SYMBOL(dasd_sfree_request);
2554 EXPORT_SYMBOL(dasd_sleep_on);
2555 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2556 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2557 EXPORT_SYMBOL(dasd_smalloc_request);
2558 EXPORT_SYMBOL(dasd_start_IO);
2559 EXPORT_SYMBOL(dasd_term_IO);
2560
2561 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2562 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2563 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2564 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2565 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2566 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2567 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2568 EXPORT_SYMBOL_GPL(dasd_alloc_block);
2569 EXPORT_SYMBOL_GPL(dasd_free_block);