2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Carsten Otte <Cotte@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * Copyright IBM Corp. 1999, 2009
10 #define KMSG_COMPONENT "dasd"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/kmod.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/ctype.h>
17 #include <linux/major.h>
18 #include <linux/slab.h>
19 #include <linux/hdreg.h>
20 #include <linux/async.h>
21 #include <linux/mutex.h>
22 #include <linux/debugfs.h>
23 #include <linux/seq_file.h>
24 #include <linux/vmalloc.h>
26 #include <asm/ccwdev.h>
27 #include <asm/ebcdic.h>
28 #include <asm/idals.h>
33 #define PRINTK_HEADER "dasd:"
37 * SECTION: Constant definitions to be used within this file
39 #define DASD_CHANQ_MAX_SIZE 4
41 #define DASD_SLEEPON_START_TAG (void *) 1
42 #define DASD_SLEEPON_END_TAG (void *) 2
45 * SECTION: exported variables of dasd.c
47 debug_info_t
*dasd_debug_area
;
48 static struct dentry
*dasd_debugfs_root_entry
;
49 struct dasd_discipline
*dasd_diag_discipline_pointer
;
50 void dasd_int_handler(struct ccw_device
*, unsigned long, struct irb
*);
52 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
53 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
54 " Copyright IBM Corp. 2000");
55 MODULE_SUPPORTED_DEVICE("dasd");
56 MODULE_LICENSE("GPL");
59 * SECTION: prototypes for static functions of dasd.c
61 static int dasd_alloc_queue(struct dasd_block
*);
62 static void dasd_setup_queue(struct dasd_block
*);
63 static void dasd_free_queue(struct dasd_block
*);
64 static void dasd_flush_request_queue(struct dasd_block
*);
65 static int dasd_flush_block_queue(struct dasd_block
*);
66 static void dasd_device_tasklet(struct dasd_device
*);
67 static void dasd_block_tasklet(struct dasd_block
*);
68 static void do_kick_device(struct work_struct
*);
69 static void do_restore_device(struct work_struct
*);
70 static void do_reload_device(struct work_struct
*);
71 static void dasd_return_cqr_cb(struct dasd_ccw_req
*, void *);
72 static void dasd_device_timeout(unsigned long);
73 static void dasd_block_timeout(unsigned long);
74 static void __dasd_process_erp(struct dasd_device
*, struct dasd_ccw_req
*);
75 static void dasd_profile_init(struct dasd_profile
*, struct dentry
*);
76 static void dasd_profile_exit(struct dasd_profile
*);
79 * SECTION: Operations on the device structure.
81 static wait_queue_head_t dasd_init_waitq
;
82 static wait_queue_head_t dasd_flush_wq
;
83 static wait_queue_head_t generic_waitq
;
84 static wait_queue_head_t shutdown_waitq
;
87 * Allocate memory for a new device structure.
89 struct dasd_device
*dasd_alloc_device(void)
91 struct dasd_device
*device
;
93 device
= kzalloc(sizeof(struct dasd_device
), GFP_ATOMIC
);
95 return ERR_PTR(-ENOMEM
);
97 /* Get two pages for normal block device operations. */
98 device
->ccw_mem
= (void *) __get_free_pages(GFP_ATOMIC
| GFP_DMA
, 1);
99 if (!device
->ccw_mem
) {
101 return ERR_PTR(-ENOMEM
);
103 /* Get one page for error recovery. */
104 device
->erp_mem
= (void *) get_zeroed_page(GFP_ATOMIC
| GFP_DMA
);
105 if (!device
->erp_mem
) {
106 free_pages((unsigned long) device
->ccw_mem
, 1);
108 return ERR_PTR(-ENOMEM
);
111 dasd_init_chunklist(&device
->ccw_chunks
, device
->ccw_mem
, PAGE_SIZE
*2);
112 dasd_init_chunklist(&device
->erp_chunks
, device
->erp_mem
, PAGE_SIZE
);
113 spin_lock_init(&device
->mem_lock
);
114 atomic_set(&device
->tasklet_scheduled
, 0);
115 tasklet_init(&device
->tasklet
,
116 (void (*)(unsigned long)) dasd_device_tasklet
,
117 (unsigned long) device
);
118 INIT_LIST_HEAD(&device
->ccw_queue
);
119 init_timer(&device
->timer
);
120 device
->timer
.function
= dasd_device_timeout
;
121 device
->timer
.data
= (unsigned long) device
;
122 INIT_WORK(&device
->kick_work
, do_kick_device
);
123 INIT_WORK(&device
->restore_device
, do_restore_device
);
124 INIT_WORK(&device
->reload_device
, do_reload_device
);
125 device
->state
= DASD_STATE_NEW
;
126 device
->target
= DASD_STATE_NEW
;
127 mutex_init(&device
->state_mutex
);
128 spin_lock_init(&device
->profile
.lock
);
133 * Free memory of a device structure.
135 void dasd_free_device(struct dasd_device
*device
)
137 kfree(device
->private);
138 free_page((unsigned long) device
->erp_mem
);
139 free_pages((unsigned long) device
->ccw_mem
, 1);
144 * Allocate memory for a new device structure.
146 struct dasd_block
*dasd_alloc_block(void)
148 struct dasd_block
*block
;
150 block
= kzalloc(sizeof(*block
), GFP_ATOMIC
);
152 return ERR_PTR(-ENOMEM
);
153 /* open_count = 0 means device online but not in use */
154 atomic_set(&block
->open_count
, -1);
156 spin_lock_init(&block
->request_queue_lock
);
157 atomic_set(&block
->tasklet_scheduled
, 0);
158 tasklet_init(&block
->tasklet
,
159 (void (*)(unsigned long)) dasd_block_tasklet
,
160 (unsigned long) block
);
161 INIT_LIST_HEAD(&block
->ccw_queue
);
162 spin_lock_init(&block
->queue_lock
);
163 init_timer(&block
->timer
);
164 block
->timer
.function
= dasd_block_timeout
;
165 block
->timer
.data
= (unsigned long) block
;
166 spin_lock_init(&block
->profile
.lock
);
172 * Free memory of a device structure.
174 void dasd_free_block(struct dasd_block
*block
)
180 * Make a new device known to the system.
182 static int dasd_state_new_to_known(struct dasd_device
*device
)
187 * As long as the device is not in state DASD_STATE_NEW we want to
188 * keep the reference count > 0.
190 dasd_get_device(device
);
193 rc
= dasd_alloc_queue(device
->block
);
195 dasd_put_device(device
);
199 device
->state
= DASD_STATE_KNOWN
;
204 * Let the system forget about a device.
206 static int dasd_state_known_to_new(struct dasd_device
*device
)
208 /* Disable extended error reporting for this device. */
209 dasd_eer_disable(device
);
210 /* Forget the discipline information. */
211 if (device
->discipline
) {
212 if (device
->discipline
->uncheck_device
)
213 device
->discipline
->uncheck_device(device
);
214 module_put(device
->discipline
->owner
);
216 device
->discipline
= NULL
;
217 if (device
->base_discipline
)
218 module_put(device
->base_discipline
->owner
);
219 device
->base_discipline
= NULL
;
220 device
->state
= DASD_STATE_NEW
;
223 dasd_free_queue(device
->block
);
225 /* Give up reference we took in dasd_state_new_to_known. */
226 dasd_put_device(device
);
230 static struct dentry
*dasd_debugfs_setup(const char *name
,
231 struct dentry
*base_dentry
)
237 pde
= debugfs_create_dir(name
, base_dentry
);
238 if (!pde
|| IS_ERR(pde
))
244 * Request the irq line for the device.
246 static int dasd_state_known_to_basic(struct dasd_device
*device
)
248 struct dasd_block
*block
= device
->block
;
251 /* Allocate and register gendisk structure. */
253 rc
= dasd_gendisk_alloc(block
);
256 block
->debugfs_dentry
=
257 dasd_debugfs_setup(block
->gdp
->disk_name
,
258 dasd_debugfs_root_entry
);
259 dasd_profile_init(&block
->profile
, block
->debugfs_dentry
);
260 if (dasd_global_profile_level
== DASD_PROFILE_ON
)
261 dasd_profile_on(&device
->block
->profile
);
263 device
->debugfs_dentry
=
264 dasd_debugfs_setup(dev_name(&device
->cdev
->dev
),
265 dasd_debugfs_root_entry
);
266 dasd_profile_init(&device
->profile
, device
->debugfs_dentry
);
268 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
269 device
->debug_area
= debug_register(dev_name(&device
->cdev
->dev
), 4, 1,
271 debug_register_view(device
->debug_area
, &debug_sprintf_view
);
272 debug_set_level(device
->debug_area
, DBF_WARNING
);
273 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s", "debug area created");
275 device
->state
= DASD_STATE_BASIC
;
280 * Release the irq line for the device. Terminate any running i/o.
282 static int dasd_state_basic_to_known(struct dasd_device
*device
)
286 dasd_profile_exit(&device
->block
->profile
);
287 if (device
->block
->debugfs_dentry
)
288 debugfs_remove(device
->block
->debugfs_dentry
);
289 dasd_gendisk_free(device
->block
);
290 dasd_block_clear_timer(device
->block
);
292 rc
= dasd_flush_device_queue(device
);
295 dasd_device_clear_timer(device
);
296 dasd_profile_exit(&device
->profile
);
297 if (device
->debugfs_dentry
)
298 debugfs_remove(device
->debugfs_dentry
);
300 DBF_DEV_EVENT(DBF_EMERG
, device
, "%p debug area deleted", device
);
301 if (device
->debug_area
!= NULL
) {
302 debug_unregister(device
->debug_area
);
303 device
->debug_area
= NULL
;
305 device
->state
= DASD_STATE_KNOWN
;
310 * Do the initial analysis. The do_analysis function may return
311 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
312 * until the discipline decides to continue the startup sequence
313 * by calling the function dasd_change_state. The eckd disciplines
314 * uses this to start a ccw that detects the format. The completion
315 * interrupt for this detection ccw uses the kernel event daemon to
316 * trigger the call to dasd_change_state. All this is done in the
317 * discipline code, see dasd_eckd.c.
318 * After the analysis ccw is done (do_analysis returned 0) the block
320 * In case the analysis returns an error, the device setup is stopped
321 * (a fake disk was already added to allow formatting).
323 static int dasd_state_basic_to_ready(struct dasd_device
*device
)
326 struct dasd_block
*block
;
329 block
= device
->block
;
330 /* make disk known with correct capacity */
332 if (block
->base
->discipline
->do_analysis
!= NULL
)
333 rc
= block
->base
->discipline
->do_analysis(block
);
336 device
->state
= DASD_STATE_UNFMT
;
339 dasd_setup_queue(block
);
340 set_capacity(block
->gdp
,
341 block
->blocks
<< block
->s2b_shift
);
342 device
->state
= DASD_STATE_READY
;
343 rc
= dasd_scan_partitions(block
);
345 device
->state
= DASD_STATE_BASIC
;
347 device
->state
= DASD_STATE_READY
;
353 * Remove device from block device layer. Destroy dirty buffers.
354 * Forget format information. Check if the target level is basic
355 * and if it is create fake disk for formatting.
357 static int dasd_state_ready_to_basic(struct dasd_device
*device
)
361 device
->state
= DASD_STATE_BASIC
;
363 struct dasd_block
*block
= device
->block
;
364 rc
= dasd_flush_block_queue(block
);
366 device
->state
= DASD_STATE_READY
;
369 dasd_flush_request_queue(block
);
370 dasd_destroy_partitions(block
);
373 block
->s2b_shift
= 0;
381 static int dasd_state_unfmt_to_basic(struct dasd_device
*device
)
383 device
->state
= DASD_STATE_BASIC
;
388 * Make the device online and schedule the bottom half to start
389 * the requeueing of requests from the linux request queue to the
393 dasd_state_ready_to_online(struct dasd_device
* device
)
396 struct gendisk
*disk
;
397 struct disk_part_iter piter
;
398 struct hd_struct
*part
;
400 if (device
->discipline
->ready_to_online
) {
401 rc
= device
->discipline
->ready_to_online(device
);
405 device
->state
= DASD_STATE_ONLINE
;
407 dasd_schedule_block_bh(device
->block
);
408 if ((device
->features
& DASD_FEATURE_USERAW
)) {
409 disk
= device
->block
->gdp
;
410 kobject_uevent(&disk_to_dev(disk
)->kobj
, KOBJ_CHANGE
);
413 disk
= device
->block
->bdev
->bd_disk
;
414 disk_part_iter_init(&piter
, disk
, DISK_PITER_INCL_PART0
);
415 while ((part
= disk_part_iter_next(&piter
)))
416 kobject_uevent(&part_to_dev(part
)->kobj
, KOBJ_CHANGE
);
417 disk_part_iter_exit(&piter
);
423 * Stop the requeueing of requests again.
425 static int dasd_state_online_to_ready(struct dasd_device
*device
)
428 struct gendisk
*disk
;
429 struct disk_part_iter piter
;
430 struct hd_struct
*part
;
432 if (device
->discipline
->online_to_ready
) {
433 rc
= device
->discipline
->online_to_ready(device
);
437 device
->state
= DASD_STATE_READY
;
438 if (device
->block
&& !(device
->features
& DASD_FEATURE_USERAW
)) {
439 disk
= device
->block
->bdev
->bd_disk
;
440 disk_part_iter_init(&piter
, disk
, DISK_PITER_INCL_PART0
);
441 while ((part
= disk_part_iter_next(&piter
)))
442 kobject_uevent(&part_to_dev(part
)->kobj
, KOBJ_CHANGE
);
443 disk_part_iter_exit(&piter
);
449 * Device startup state changes.
451 static int dasd_increase_state(struct dasd_device
*device
)
456 if (device
->state
== DASD_STATE_NEW
&&
457 device
->target
>= DASD_STATE_KNOWN
)
458 rc
= dasd_state_new_to_known(device
);
461 device
->state
== DASD_STATE_KNOWN
&&
462 device
->target
>= DASD_STATE_BASIC
)
463 rc
= dasd_state_known_to_basic(device
);
466 device
->state
== DASD_STATE_BASIC
&&
467 device
->target
>= DASD_STATE_READY
)
468 rc
= dasd_state_basic_to_ready(device
);
471 device
->state
== DASD_STATE_UNFMT
&&
472 device
->target
> DASD_STATE_UNFMT
)
476 device
->state
== DASD_STATE_READY
&&
477 device
->target
>= DASD_STATE_ONLINE
)
478 rc
= dasd_state_ready_to_online(device
);
484 * Device shutdown state changes.
486 static int dasd_decrease_state(struct dasd_device
*device
)
491 if (device
->state
== DASD_STATE_ONLINE
&&
492 device
->target
<= DASD_STATE_READY
)
493 rc
= dasd_state_online_to_ready(device
);
496 device
->state
== DASD_STATE_READY
&&
497 device
->target
<= DASD_STATE_BASIC
)
498 rc
= dasd_state_ready_to_basic(device
);
501 device
->state
== DASD_STATE_UNFMT
&&
502 device
->target
<= DASD_STATE_BASIC
)
503 rc
= dasd_state_unfmt_to_basic(device
);
506 device
->state
== DASD_STATE_BASIC
&&
507 device
->target
<= DASD_STATE_KNOWN
)
508 rc
= dasd_state_basic_to_known(device
);
511 device
->state
== DASD_STATE_KNOWN
&&
512 device
->target
<= DASD_STATE_NEW
)
513 rc
= dasd_state_known_to_new(device
);
519 * This is the main startup/shutdown routine.
521 static void dasd_change_state(struct dasd_device
*device
)
525 if (device
->state
== device
->target
)
526 /* Already where we want to go today... */
528 if (device
->state
< device
->target
)
529 rc
= dasd_increase_state(device
);
531 rc
= dasd_decrease_state(device
);
535 device
->target
= device
->state
;
537 if (device
->state
== device
->target
)
538 wake_up(&dasd_init_waitq
);
540 /* let user-space know that the device status changed */
541 kobject_uevent(&device
->cdev
->dev
.kobj
, KOBJ_CHANGE
);
545 * Kick starter for devices that did not complete the startup/shutdown
546 * procedure or were sleeping because of a pending state.
547 * dasd_kick_device will schedule a call do do_kick_device to the kernel
550 static void do_kick_device(struct work_struct
*work
)
552 struct dasd_device
*device
= container_of(work
, struct dasd_device
, kick_work
);
553 mutex_lock(&device
->state_mutex
);
554 dasd_change_state(device
);
555 mutex_unlock(&device
->state_mutex
);
556 dasd_schedule_device_bh(device
);
557 dasd_put_device(device
);
560 void dasd_kick_device(struct dasd_device
*device
)
562 dasd_get_device(device
);
563 /* queue call to dasd_kick_device to the kernel event daemon. */
564 schedule_work(&device
->kick_work
);
568 * dasd_reload_device will schedule a call do do_reload_device to the kernel
571 static void do_reload_device(struct work_struct
*work
)
573 struct dasd_device
*device
= container_of(work
, struct dasd_device
,
575 device
->discipline
->reload(device
);
576 dasd_put_device(device
);
579 void dasd_reload_device(struct dasd_device
*device
)
581 dasd_get_device(device
);
582 /* queue call to dasd_reload_device to the kernel event daemon. */
583 schedule_work(&device
->reload_device
);
585 EXPORT_SYMBOL(dasd_reload_device
);
588 * dasd_restore_device will schedule a call do do_restore_device to the kernel
591 static void do_restore_device(struct work_struct
*work
)
593 struct dasd_device
*device
= container_of(work
, struct dasd_device
,
595 device
->cdev
->drv
->restore(device
->cdev
);
596 dasd_put_device(device
);
599 void dasd_restore_device(struct dasd_device
*device
)
601 dasd_get_device(device
);
602 /* queue call to dasd_restore_device to the kernel event daemon. */
603 schedule_work(&device
->restore_device
);
607 * Set the target state for a device and starts the state change.
609 void dasd_set_target_state(struct dasd_device
*device
, int target
)
611 dasd_get_device(device
);
612 mutex_lock(&device
->state_mutex
);
613 /* If we are in probeonly mode stop at DASD_STATE_READY. */
614 if (dasd_probeonly
&& target
> DASD_STATE_READY
)
615 target
= DASD_STATE_READY
;
616 if (device
->target
!= target
) {
617 if (device
->state
== target
)
618 wake_up(&dasd_init_waitq
);
619 device
->target
= target
;
621 if (device
->state
!= device
->target
)
622 dasd_change_state(device
);
623 mutex_unlock(&device
->state_mutex
);
624 dasd_put_device(device
);
628 * Enable devices with device numbers in [from..to].
630 static inline int _wait_for_device(struct dasd_device
*device
)
632 return (device
->state
== device
->target
);
635 void dasd_enable_device(struct dasd_device
*device
)
637 dasd_set_target_state(device
, DASD_STATE_ONLINE
);
638 if (device
->state
<= DASD_STATE_KNOWN
)
639 /* No discipline for device found. */
640 dasd_set_target_state(device
, DASD_STATE_NEW
);
641 /* Now wait for the devices to come up. */
642 wait_event(dasd_init_waitq
, _wait_for_device(device
));
644 dasd_reload_device(device
);
645 if (device
->discipline
->kick_validate
)
646 device
->discipline
->kick_validate(device
);
650 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
653 unsigned int dasd_global_profile_level
= DASD_PROFILE_OFF
;
655 #ifdef CONFIG_DASD_PROFILE
656 struct dasd_profile_info dasd_global_profile_data
;
657 static struct dentry
*dasd_global_profile_dentry
;
658 static struct dentry
*dasd_debugfs_global_entry
;
661 * Add profiling information for cqr before execution.
663 static void dasd_profile_start(struct dasd_block
*block
,
664 struct dasd_ccw_req
*cqr
,
668 unsigned int counter
;
669 struct dasd_device
*device
;
671 /* count the length of the chanq for statistics */
673 if (dasd_global_profile_level
|| block
->profile
.data
)
674 list_for_each(l
, &block
->ccw_queue
)
678 if (dasd_global_profile_level
) {
679 dasd_global_profile_data
.dasd_io_nr_req
[counter
]++;
680 if (rq_data_dir(req
) == READ
)
681 dasd_global_profile_data
.dasd_read_nr_req
[counter
]++;
684 spin_lock(&block
->profile
.lock
);
685 if (block
->profile
.data
)
686 block
->profile
.data
->dasd_io_nr_req
[counter
]++;
687 if (rq_data_dir(req
) == READ
)
688 block
->profile
.data
->dasd_read_nr_req
[counter
]++;
689 spin_unlock(&block
->profile
.lock
);
692 * We count the request for the start device, even though it may run on
693 * some other device due to error recovery. This way we make sure that
694 * we count each request only once.
696 device
= cqr
->startdev
;
697 if (device
->profile
.data
) {
698 counter
= 1; /* request is not yet queued on the start device */
699 list_for_each(l
, &device
->ccw_queue
)
703 spin_lock(&device
->profile
.lock
);
704 if (device
->profile
.data
) {
705 device
->profile
.data
->dasd_io_nr_req
[counter
]++;
706 if (rq_data_dir(req
) == READ
)
707 device
->profile
.data
->dasd_read_nr_req
[counter
]++;
709 spin_unlock(&device
->profile
.lock
);
713 * Add profiling information for cqr after execution.
716 #define dasd_profile_counter(value, index) \
718 for (index = 0; index < 31 && value >> (2+index); index++) \
722 static void dasd_profile_end_add_data(struct dasd_profile_info
*data
,
735 /* in case of an overflow, reset the whole profile */
736 if (data
->dasd_io_reqs
== UINT_MAX
) {
737 memset(data
, 0, sizeof(*data
));
738 getnstimeofday(&data
->starttod
);
740 data
->dasd_io_reqs
++;
741 data
->dasd_io_sects
+= sectors
;
743 data
->dasd_io_alias
++;
747 data
->dasd_io_secs
[sectors_ind
]++;
748 data
->dasd_io_times
[tottime_ind
]++;
749 data
->dasd_io_timps
[tottimeps_ind
]++;
750 data
->dasd_io_time1
[strtime_ind
]++;
751 data
->dasd_io_time2
[irqtime_ind
]++;
752 data
->dasd_io_time2ps
[irqtimeps_ind
]++;
753 data
->dasd_io_time3
[endtime_ind
]++;
756 data
->dasd_read_reqs
++;
757 data
->dasd_read_sects
+= sectors
;
759 data
->dasd_read_alias
++;
761 data
->dasd_read_tpm
++;
762 data
->dasd_read_secs
[sectors_ind
]++;
763 data
->dasd_read_times
[tottime_ind
]++;
764 data
->dasd_read_time1
[strtime_ind
]++;
765 data
->dasd_read_time2
[irqtime_ind
]++;
766 data
->dasd_read_time3
[endtime_ind
]++;
770 static void dasd_profile_end(struct dasd_block
*block
,
771 struct dasd_ccw_req
*cqr
,
774 long strtime
, irqtime
, endtime
, tottime
; /* in microseconds */
775 long tottimeps
, sectors
;
776 struct dasd_device
*device
;
777 int sectors_ind
, tottime_ind
, tottimeps_ind
, strtime_ind
;
778 int irqtime_ind
, irqtimeps_ind
, endtime_ind
;
780 device
= cqr
->startdev
;
781 if (!(dasd_global_profile_level
||
782 block
->profile
.data
||
783 device
->profile
.data
))
786 sectors
= blk_rq_sectors(req
);
787 if (!cqr
->buildclk
|| !cqr
->startclk
||
788 !cqr
->stopclk
|| !cqr
->endclk
||
792 strtime
= ((cqr
->startclk
- cqr
->buildclk
) >> 12);
793 irqtime
= ((cqr
->stopclk
- cqr
->startclk
) >> 12);
794 endtime
= ((cqr
->endclk
- cqr
->stopclk
) >> 12);
795 tottime
= ((cqr
->endclk
- cqr
->buildclk
) >> 12);
796 tottimeps
= tottime
/ sectors
;
798 dasd_profile_counter(sectors
, sectors_ind
);
799 dasd_profile_counter(tottime
, tottime_ind
);
800 dasd_profile_counter(tottimeps
, tottimeps_ind
);
801 dasd_profile_counter(strtime
, strtime_ind
);
802 dasd_profile_counter(irqtime
, irqtime_ind
);
803 dasd_profile_counter(irqtime
/ sectors
, irqtimeps_ind
);
804 dasd_profile_counter(endtime
, endtime_ind
);
806 if (dasd_global_profile_level
) {
807 dasd_profile_end_add_data(&dasd_global_profile_data
,
808 cqr
->startdev
!= block
->base
,
810 rq_data_dir(req
) == READ
,
811 sectors
, sectors_ind
, tottime_ind
,
812 tottimeps_ind
, strtime_ind
,
813 irqtime_ind
, irqtimeps_ind
,
817 spin_lock(&block
->profile
.lock
);
818 if (block
->profile
.data
)
819 dasd_profile_end_add_data(block
->profile
.data
,
820 cqr
->startdev
!= block
->base
,
822 rq_data_dir(req
) == READ
,
823 sectors
, sectors_ind
, tottime_ind
,
824 tottimeps_ind
, strtime_ind
,
825 irqtime_ind
, irqtimeps_ind
,
827 spin_unlock(&block
->profile
.lock
);
829 spin_lock(&device
->profile
.lock
);
830 if (device
->profile
.data
)
831 dasd_profile_end_add_data(device
->profile
.data
,
832 cqr
->startdev
!= block
->base
,
834 rq_data_dir(req
) == READ
,
835 sectors
, sectors_ind
, tottime_ind
,
836 tottimeps_ind
, strtime_ind
,
837 irqtime_ind
, irqtimeps_ind
,
839 spin_unlock(&device
->profile
.lock
);
842 void dasd_profile_reset(struct dasd_profile
*profile
)
844 struct dasd_profile_info
*data
;
846 spin_lock_bh(&profile
->lock
);
847 data
= profile
->data
;
849 spin_unlock_bh(&profile
->lock
);
852 memset(data
, 0, sizeof(*data
));
853 getnstimeofday(&data
->starttod
);
854 spin_unlock_bh(&profile
->lock
);
857 void dasd_global_profile_reset(void)
859 memset(&dasd_global_profile_data
, 0, sizeof(dasd_global_profile_data
));
860 getnstimeofday(&dasd_global_profile_data
.starttod
);
863 int dasd_profile_on(struct dasd_profile
*profile
)
865 struct dasd_profile_info
*data
;
867 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
870 spin_lock_bh(&profile
->lock
);
872 spin_unlock_bh(&profile
->lock
);
876 getnstimeofday(&data
->starttod
);
877 profile
->data
= data
;
878 spin_unlock_bh(&profile
->lock
);
882 void dasd_profile_off(struct dasd_profile
*profile
)
884 spin_lock_bh(&profile
->lock
);
885 kfree(profile
->data
);
886 profile
->data
= NULL
;
887 spin_unlock_bh(&profile
->lock
);
890 char *dasd_get_user_string(const char __user
*user_buf
, size_t user_len
)
894 buffer
= vmalloc(user_len
+ 1);
896 return ERR_PTR(-ENOMEM
);
897 if (copy_from_user(buffer
, user_buf
, user_len
) != 0) {
899 return ERR_PTR(-EFAULT
);
901 /* got the string, now strip linefeed. */
902 if (buffer
[user_len
- 1] == '\n')
903 buffer
[user_len
- 1] = 0;
905 buffer
[user_len
] = 0;
909 static ssize_t
dasd_stats_write(struct file
*file
,
910 const char __user
*user_buf
,
911 size_t user_len
, loff_t
*pos
)
915 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
916 struct dasd_profile
*prof
= m
->private;
918 if (user_len
> 65536)
920 buffer
= dasd_get_user_string(user_buf
, user_len
);
922 return PTR_ERR(buffer
);
924 str
= skip_spaces(buffer
);
926 if (strncmp(str
, "reset", 5) == 0) {
927 dasd_profile_reset(prof
);
928 } else if (strncmp(str
, "on", 2) == 0) {
929 rc
= dasd_profile_on(prof
);
932 } else if (strncmp(str
, "off", 3) == 0) {
933 dasd_profile_off(prof
);
940 static void dasd_stats_array(struct seq_file
*m
, unsigned int *array
)
944 for (i
= 0; i
< 32; i
++)
945 seq_printf(m
, "%u ", array
[i
]);
949 static void dasd_stats_seq_print(struct seq_file
*m
,
950 struct dasd_profile_info
*data
)
952 seq_printf(m
, "start_time %ld.%09ld\n",
953 data
->starttod
.tv_sec
, data
->starttod
.tv_nsec
);
954 seq_printf(m
, "total_requests %u\n", data
->dasd_io_reqs
);
955 seq_printf(m
, "total_sectors %u\n", data
->dasd_io_sects
);
956 seq_printf(m
, "total_pav %u\n", data
->dasd_io_alias
);
957 seq_printf(m
, "total_hpf %u\n", data
->dasd_io_tpm
);
958 seq_printf(m
, "histogram_sectors ");
959 dasd_stats_array(m
, data
->dasd_io_secs
);
960 seq_printf(m
, "histogram_io_times ");
961 dasd_stats_array(m
, data
->dasd_io_times
);
962 seq_printf(m
, "histogram_io_times_weighted ");
963 dasd_stats_array(m
, data
->dasd_io_timps
);
964 seq_printf(m
, "histogram_time_build_to_ssch ");
965 dasd_stats_array(m
, data
->dasd_io_time1
);
966 seq_printf(m
, "histogram_time_ssch_to_irq ");
967 dasd_stats_array(m
, data
->dasd_io_time2
);
968 seq_printf(m
, "histogram_time_ssch_to_irq_weighted ");
969 dasd_stats_array(m
, data
->dasd_io_time2ps
);
970 seq_printf(m
, "histogram_time_irq_to_end ");
971 dasd_stats_array(m
, data
->dasd_io_time3
);
972 seq_printf(m
, "histogram_ccw_queue_length ");
973 dasd_stats_array(m
, data
->dasd_io_nr_req
);
974 seq_printf(m
, "total_read_requests %u\n", data
->dasd_read_reqs
);
975 seq_printf(m
, "total_read_sectors %u\n", data
->dasd_read_sects
);
976 seq_printf(m
, "total_read_pav %u\n", data
->dasd_read_alias
);
977 seq_printf(m
, "total_read_hpf %u\n", data
->dasd_read_tpm
);
978 seq_printf(m
, "histogram_read_sectors ");
979 dasd_stats_array(m
, data
->dasd_read_secs
);
980 seq_printf(m
, "histogram_read_times ");
981 dasd_stats_array(m
, data
->dasd_read_times
);
982 seq_printf(m
, "histogram_read_time_build_to_ssch ");
983 dasd_stats_array(m
, data
->dasd_read_time1
);
984 seq_printf(m
, "histogram_read_time_ssch_to_irq ");
985 dasd_stats_array(m
, data
->dasd_read_time2
);
986 seq_printf(m
, "histogram_read_time_irq_to_end ");
987 dasd_stats_array(m
, data
->dasd_read_time3
);
988 seq_printf(m
, "histogram_read_ccw_queue_length ");
989 dasd_stats_array(m
, data
->dasd_read_nr_req
);
992 static int dasd_stats_show(struct seq_file
*m
, void *v
)
994 struct dasd_profile
*profile
;
995 struct dasd_profile_info
*data
;
997 profile
= m
->private;
998 spin_lock_bh(&profile
->lock
);
999 data
= profile
->data
;
1001 spin_unlock_bh(&profile
->lock
);
1002 seq_printf(m
, "disabled\n");
1005 dasd_stats_seq_print(m
, data
);
1006 spin_unlock_bh(&profile
->lock
);
1010 static int dasd_stats_open(struct inode
*inode
, struct file
*file
)
1012 struct dasd_profile
*profile
= inode
->i_private
;
1013 return single_open(file
, dasd_stats_show
, profile
);
1016 static const struct file_operations dasd_stats_raw_fops
= {
1017 .owner
= THIS_MODULE
,
1018 .open
= dasd_stats_open
,
1020 .llseek
= seq_lseek
,
1021 .release
= single_release
,
1022 .write
= dasd_stats_write
,
1025 static ssize_t
dasd_stats_global_write(struct file
*file
,
1026 const char __user
*user_buf
,
1027 size_t user_len
, loff_t
*pos
)
1032 if (user_len
> 65536)
1034 buffer
= dasd_get_user_string(user_buf
, user_len
);
1036 return PTR_ERR(buffer
);
1037 str
= skip_spaces(buffer
);
1039 if (strncmp(str
, "reset", 5) == 0) {
1040 dasd_global_profile_reset();
1041 } else if (strncmp(str
, "on", 2) == 0) {
1042 dasd_global_profile_reset();
1043 dasd_global_profile_level
= DASD_PROFILE_GLOBAL_ONLY
;
1044 } else if (strncmp(str
, "off", 3) == 0) {
1045 dasd_global_profile_level
= DASD_PROFILE_OFF
;
1052 static int dasd_stats_global_show(struct seq_file
*m
, void *v
)
1054 if (!dasd_global_profile_level
) {
1055 seq_printf(m
, "disabled\n");
1058 dasd_stats_seq_print(m
, &dasd_global_profile_data
);
1062 static int dasd_stats_global_open(struct inode
*inode
, struct file
*file
)
1064 return single_open(file
, dasd_stats_global_show
, NULL
);
1067 static const struct file_operations dasd_stats_global_fops
= {
1068 .owner
= THIS_MODULE
,
1069 .open
= dasd_stats_global_open
,
1071 .llseek
= seq_lseek
,
1072 .release
= single_release
,
1073 .write
= dasd_stats_global_write
,
1076 static void dasd_profile_init(struct dasd_profile
*profile
,
1077 struct dentry
*base_dentry
)
1084 profile
->dentry
= NULL
;
1085 profile
->data
= NULL
;
1086 mode
= (S_IRUSR
| S_IWUSR
| S_IFREG
);
1087 pde
= debugfs_create_file("statistics", mode
, base_dentry
,
1088 profile
, &dasd_stats_raw_fops
);
1089 if (pde
&& !IS_ERR(pde
))
1090 profile
->dentry
= pde
;
1094 static void dasd_profile_exit(struct dasd_profile
*profile
)
1096 dasd_profile_off(profile
);
1097 if (profile
->dentry
) {
1098 debugfs_remove(profile
->dentry
);
1099 profile
->dentry
= NULL
;
1103 static void dasd_statistics_removeroot(void)
1105 dasd_global_profile_level
= DASD_PROFILE_OFF
;
1106 if (dasd_global_profile_dentry
) {
1107 debugfs_remove(dasd_global_profile_dentry
);
1108 dasd_global_profile_dentry
= NULL
;
1110 if (dasd_debugfs_global_entry
)
1111 debugfs_remove(dasd_debugfs_global_entry
);
1112 if (dasd_debugfs_root_entry
)
1113 debugfs_remove(dasd_debugfs_root_entry
);
1116 static void dasd_statistics_createroot(void)
1121 dasd_debugfs_root_entry
= NULL
;
1122 dasd_debugfs_global_entry
= NULL
;
1123 dasd_global_profile_dentry
= NULL
;
1124 pde
= debugfs_create_dir("dasd", NULL
);
1125 if (!pde
|| IS_ERR(pde
))
1127 dasd_debugfs_root_entry
= pde
;
1128 pde
= debugfs_create_dir("global", dasd_debugfs_root_entry
);
1129 if (!pde
|| IS_ERR(pde
))
1131 dasd_debugfs_global_entry
= pde
;
1133 mode
= (S_IRUSR
| S_IWUSR
| S_IFREG
);
1134 pde
= debugfs_create_file("statistics", mode
, dasd_debugfs_global_entry
,
1135 NULL
, &dasd_stats_global_fops
);
1136 if (!pde
|| IS_ERR(pde
))
1138 dasd_global_profile_dentry
= pde
;
1142 DBF_EVENT(DBF_ERR
, "%s",
1143 "Creation of the dasd debugfs interface failed");
1144 dasd_statistics_removeroot();
1149 #define dasd_profile_start(block, cqr, req) do {} while (0)
1150 #define dasd_profile_end(block, cqr, req) do {} while (0)
1152 static void dasd_statistics_createroot(void)
1157 static void dasd_statistics_removeroot(void)
1162 int dasd_stats_generic_show(struct seq_file
*m
, void *v
)
1164 seq_printf(m
, "Statistics are not activated in this kernel\n");
1168 static void dasd_profile_init(struct dasd_profile
*profile
,
1169 struct dentry
*base_dentry
)
1174 static void dasd_profile_exit(struct dasd_profile
*profile
)
1179 int dasd_profile_on(struct dasd_profile
*profile
)
1184 #endif /* CONFIG_DASD_PROFILE */
1187 * Allocate memory for a channel program with 'cplength' channel
1188 * command words and 'datasize' additional space. There are two
1189 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
1190 * memory and 2) dasd_smalloc_request uses the static ccw memory
1191 * that gets allocated for each device.
1193 struct dasd_ccw_req
*dasd_kmalloc_request(int magic
, int cplength
,
1195 struct dasd_device
*device
)
1197 struct dasd_ccw_req
*cqr
;
1200 BUG_ON(datasize
> PAGE_SIZE
||
1201 (cplength
*sizeof(struct ccw1
)) > PAGE_SIZE
);
1203 cqr
= kzalloc(sizeof(struct dasd_ccw_req
), GFP_ATOMIC
);
1205 return ERR_PTR(-ENOMEM
);
1208 cqr
->cpaddr
= kcalloc(cplength
, sizeof(struct ccw1
),
1209 GFP_ATOMIC
| GFP_DMA
);
1210 if (cqr
->cpaddr
== NULL
) {
1212 return ERR_PTR(-ENOMEM
);
1217 cqr
->data
= kzalloc(datasize
, GFP_ATOMIC
| GFP_DMA
);
1218 if (cqr
->data
== NULL
) {
1221 return ERR_PTR(-ENOMEM
);
1225 set_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
1226 dasd_get_device(device
);
1230 struct dasd_ccw_req
*dasd_smalloc_request(int magic
, int cplength
,
1232 struct dasd_device
*device
)
1234 unsigned long flags
;
1235 struct dasd_ccw_req
*cqr
;
1239 size
= (sizeof(struct dasd_ccw_req
) + 7L) & -8L;
1241 size
+= cplength
* sizeof(struct ccw1
);
1244 spin_lock_irqsave(&device
->mem_lock
, flags
);
1245 cqr
= (struct dasd_ccw_req
*)
1246 dasd_alloc_chunk(&device
->ccw_chunks
, size
);
1247 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
1249 return ERR_PTR(-ENOMEM
);
1250 memset(cqr
, 0, sizeof(struct dasd_ccw_req
));
1251 data
= (char *) cqr
+ ((sizeof(struct dasd_ccw_req
) + 7L) & -8L);
1254 cqr
->cpaddr
= (struct ccw1
*) data
;
1255 data
+= cplength
*sizeof(struct ccw1
);
1256 memset(cqr
->cpaddr
, 0, cplength
*sizeof(struct ccw1
));
1261 memset(cqr
->data
, 0, datasize
);
1264 set_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
1265 dasd_get_device(device
);
1270 * Free memory of a channel program. This function needs to free all the
1271 * idal lists that might have been created by dasd_set_cda and the
1272 * struct dasd_ccw_req itself.
1274 void dasd_kfree_request(struct dasd_ccw_req
*cqr
, struct dasd_device
*device
)
1279 /* Clear any idals used for the request. */
1282 clear_normalized_cda(ccw
);
1283 } while (ccw
++->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
));
1288 dasd_put_device(device
);
1291 void dasd_sfree_request(struct dasd_ccw_req
*cqr
, struct dasd_device
*device
)
1293 unsigned long flags
;
1295 spin_lock_irqsave(&device
->mem_lock
, flags
);
1296 dasd_free_chunk(&device
->ccw_chunks
, cqr
);
1297 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
1298 dasd_put_device(device
);
1302 * Check discipline magic in cqr.
1304 static inline int dasd_check_cqr(struct dasd_ccw_req
*cqr
)
1306 struct dasd_device
*device
;
1310 device
= cqr
->startdev
;
1311 if (strncmp((char *) &cqr
->magic
, device
->discipline
->ebcname
, 4)) {
1312 DBF_DEV_EVENT(DBF_WARNING
, device
,
1313 " dasd_ccw_req 0x%08x magic doesn't match"
1314 " discipline 0x%08x",
1316 *(unsigned int *) device
->discipline
->name
);
1323 * Terminate the current i/o and set the request to clear_pending.
1324 * Timer keeps device runnig.
1325 * ccw_device_clear can fail if the i/o subsystem
1328 int dasd_term_IO(struct dasd_ccw_req
*cqr
)
1330 struct dasd_device
*device
;
1332 char errorstring
[ERRORLENGTH
];
1335 rc
= dasd_check_cqr(cqr
);
1339 device
= (struct dasd_device
*) cqr
->startdev
;
1340 while ((retries
< 5) && (cqr
->status
== DASD_CQR_IN_IO
)) {
1341 rc
= ccw_device_clear(device
->cdev
, (long) cqr
);
1343 case 0: /* termination successful */
1344 cqr
->status
= DASD_CQR_CLEAR_PENDING
;
1345 cqr
->stopclk
= get_clock();
1347 DBF_DEV_EVENT(DBF_DEBUG
, device
,
1348 "terminate cqr %p successful",
1352 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1353 "device gone, retry");
1356 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1357 "I/O error, retry");
1361 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1362 "device busy, retry later");
1365 /* internal error 10 - unknown rc*/
1366 snprintf(errorstring
, ERRORLENGTH
, "10 %d", rc
);
1367 dev_err(&device
->cdev
->dev
, "An error occurred in the "
1368 "DASD device driver, reason=%s\n", errorstring
);
1374 dasd_schedule_device_bh(device
);
1379 * Start the i/o. This start_IO can fail if the channel is really busy.
1380 * In that case set up a timer to start the request later.
1382 int dasd_start_IO(struct dasd_ccw_req
*cqr
)
1384 struct dasd_device
*device
;
1386 char errorstring
[ERRORLENGTH
];
1389 rc
= dasd_check_cqr(cqr
);
1394 device
= (struct dasd_device
*) cqr
->startdev
;
1396 test_bit(DASD_FLAG_LOCK_STOLEN
, &cqr
->block
->base
->flags
)) ||
1397 test_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
)) &&
1398 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
1399 DBF_DEV_EVENT(DBF_DEBUG
, device
, "start_IO: return request %p "
1400 "because of stolen lock", cqr
);
1401 cqr
->status
= DASD_CQR_ERROR
;
1402 cqr
->intrc
= -EPERM
;
1405 if (cqr
->retries
< 0) {
1406 /* internal error 14 - start_IO run out of retries */
1407 sprintf(errorstring
, "14 %p", cqr
);
1408 dev_err(&device
->cdev
->dev
, "An error occurred in the DASD "
1409 "device driver, reason=%s\n", errorstring
);
1410 cqr
->status
= DASD_CQR_ERROR
;
1413 cqr
->startclk
= get_clock();
1414 cqr
->starttime
= jiffies
;
1416 if (!test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
)) {
1417 cqr
->lpm
&= device
->path_data
.opm
;
1419 cqr
->lpm
= device
->path_data
.opm
;
1421 if (cqr
->cpmode
== 1) {
1422 rc
= ccw_device_tm_start(device
->cdev
, cqr
->cpaddr
,
1423 (long) cqr
, cqr
->lpm
);
1425 rc
= ccw_device_start(device
->cdev
, cqr
->cpaddr
,
1426 (long) cqr
, cqr
->lpm
, 0);
1430 cqr
->status
= DASD_CQR_IN_IO
;
1433 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1434 "start_IO: device busy, retry later");
1437 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1438 "start_IO: request timeout, retry later");
1441 /* -EACCES indicates that the request used only a subset of the
1442 * available paths and all these paths are gone. If the lpm of
1443 * this request was only a subset of the opm (e.g. the ppm) then
1444 * we just do a retry with all available paths.
1445 * If we already use the full opm, something is amiss, and we
1446 * need a full path verification.
1448 if (test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
)) {
1449 DBF_DEV_EVENT(DBF_WARNING
, device
,
1450 "start_IO: selected paths gone (%x)",
1452 } else if (cqr
->lpm
!= device
->path_data
.opm
) {
1453 cqr
->lpm
= device
->path_data
.opm
;
1454 DBF_DEV_EVENT(DBF_DEBUG
, device
, "%s",
1455 "start_IO: selected paths gone,"
1456 " retry on all paths");
1458 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1459 "start_IO: all paths in opm gone,"
1460 " do path verification");
1461 dasd_generic_last_path_gone(device
);
1462 device
->path_data
.opm
= 0;
1463 device
->path_data
.ppm
= 0;
1464 device
->path_data
.npm
= 0;
1465 device
->path_data
.tbvpm
=
1466 ccw_device_get_path_mask(device
->cdev
);
1470 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1471 "start_IO: -ENODEV device gone, retry");
1474 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1475 "start_IO: -EIO device gone, retry");
1478 /* most likely caused in power management context */
1479 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1480 "start_IO: -EINVAL device currently "
1484 /* internal error 11 - unknown rc */
1485 snprintf(errorstring
, ERRORLENGTH
, "11 %d", rc
);
1486 dev_err(&device
->cdev
->dev
,
1487 "An error occurred in the DASD device driver, "
1488 "reason=%s\n", errorstring
);
1497 * Timeout function for dasd devices. This is used for different purposes
1498 * 1) missing interrupt handler for normal operation
1499 * 2) delayed start of request where start_IO failed with -EBUSY
1500 * 3) timeout for missing state change interrupts
1501 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1502 * DASD_CQR_QUEUED for 2) and 3).
1504 static void dasd_device_timeout(unsigned long ptr
)
1506 unsigned long flags
;
1507 struct dasd_device
*device
;
1509 device
= (struct dasd_device
*) ptr
;
1510 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1511 /* re-activate request queue */
1512 dasd_device_remove_stop_bits(device
, DASD_STOPPED_PENDING
);
1513 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1514 dasd_schedule_device_bh(device
);
1518 * Setup timeout for a device in jiffies.
1520 void dasd_device_set_timer(struct dasd_device
*device
, int expires
)
1523 del_timer(&device
->timer
);
1525 mod_timer(&device
->timer
, jiffies
+ expires
);
1529 * Clear timeout for a device.
1531 void dasd_device_clear_timer(struct dasd_device
*device
)
1533 del_timer(&device
->timer
);
1536 static void dasd_handle_killed_request(struct ccw_device
*cdev
,
1537 unsigned long intparm
)
1539 struct dasd_ccw_req
*cqr
;
1540 struct dasd_device
*device
;
1544 cqr
= (struct dasd_ccw_req
*) intparm
;
1545 if (cqr
->status
!= DASD_CQR_IN_IO
) {
1546 DBF_EVENT_DEVID(DBF_DEBUG
, cdev
,
1547 "invalid status in handle_killed_request: "
1548 "%02x", cqr
->status
);
1552 device
= dasd_device_from_cdev_locked(cdev
);
1553 if (IS_ERR(device
)) {
1554 DBF_EVENT_DEVID(DBF_DEBUG
, cdev
, "%s",
1555 "unable to get device from cdev");
1559 if (!cqr
->startdev
||
1560 device
!= cqr
->startdev
||
1561 strncmp(cqr
->startdev
->discipline
->ebcname
,
1562 (char *) &cqr
->magic
, 4)) {
1563 DBF_EVENT_DEVID(DBF_DEBUG
, cdev
, "%s",
1564 "invalid device in request");
1565 dasd_put_device(device
);
1569 /* Schedule request to be retried. */
1570 cqr
->status
= DASD_CQR_QUEUED
;
1572 dasd_device_clear_timer(device
);
1573 dasd_schedule_device_bh(device
);
1574 dasd_put_device(device
);
1577 void dasd_generic_handle_state_change(struct dasd_device
*device
)
1579 /* First of all start sense subsystem status request. */
1580 dasd_eer_snss(device
);
1582 dasd_device_remove_stop_bits(device
, DASD_STOPPED_PENDING
);
1583 dasd_schedule_device_bh(device
);
1585 dasd_schedule_block_bh(device
->block
);
1589 * Interrupt handler for "normal" ssch-io based dasd devices.
1591 void dasd_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
1594 struct dasd_ccw_req
*cqr
, *next
;
1595 struct dasd_device
*device
;
1596 unsigned long long now
;
1600 switch (PTR_ERR(irb
)) {
1604 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s: "
1605 "request timed out\n", __func__
);
1608 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s: "
1609 "unknown error %ld\n", __func__
,
1612 dasd_handle_killed_request(cdev
, intparm
);
1617 cqr
= (struct dasd_ccw_req
*) intparm
;
1618 /* check for conditions that should be handled immediately */
1620 !(scsw_dstat(&irb
->scsw
) == (DEV_STAT_CHN_END
| DEV_STAT_DEV_END
) &&
1621 scsw_cstat(&irb
->scsw
) == 0)) {
1623 memcpy(&cqr
->irb
, irb
, sizeof(*irb
));
1624 device
= dasd_device_from_cdev_locked(cdev
);
1627 /* ignore unsolicited interrupts for DIAG discipline */
1628 if (device
->discipline
== dasd_diag_discipline_pointer
) {
1629 dasd_put_device(device
);
1632 device
->discipline
->dump_sense_dbf(device
, irb
, "int");
1633 if (device
->features
& DASD_FEATURE_ERPLOG
)
1634 device
->discipline
->dump_sense(device
, cqr
, irb
);
1635 device
->discipline
->check_for_device_change(device
, cqr
, irb
);
1636 dasd_put_device(device
);
1641 device
= (struct dasd_device
*) cqr
->startdev
;
1643 strncmp(device
->discipline
->ebcname
, (char *) &cqr
->magic
, 4)) {
1644 DBF_EVENT_DEVID(DBF_DEBUG
, cdev
, "%s",
1645 "invalid device in request");
1649 /* Check for clear pending */
1650 if (cqr
->status
== DASD_CQR_CLEAR_PENDING
&&
1651 scsw_fctl(&irb
->scsw
) & SCSW_FCTL_CLEAR_FUNC
) {
1652 cqr
->status
= DASD_CQR_CLEARED
;
1653 dasd_device_clear_timer(device
);
1654 wake_up(&dasd_flush_wq
);
1655 dasd_schedule_device_bh(device
);
1659 /* check status - the request might have been killed by dyn detach */
1660 if (cqr
->status
!= DASD_CQR_IN_IO
) {
1661 DBF_DEV_EVENT(DBF_DEBUG
, device
, "invalid status: bus_id %s, "
1662 "status %02x", dev_name(&cdev
->dev
), cqr
->status
);
1668 if (scsw_dstat(&irb
->scsw
) == (DEV_STAT_CHN_END
| DEV_STAT_DEV_END
) &&
1669 scsw_cstat(&irb
->scsw
) == 0) {
1670 /* request was completed successfully */
1671 cqr
->status
= DASD_CQR_SUCCESS
;
1673 /* Start first request on queue if possible -> fast_io. */
1674 if (cqr
->devlist
.next
!= &device
->ccw_queue
) {
1675 next
= list_entry(cqr
->devlist
.next
,
1676 struct dasd_ccw_req
, devlist
);
1678 } else { /* error */
1680 * If we don't want complex ERP for this request, then just
1681 * reset this and retry it in the fastpath
1683 if (!test_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
) &&
1685 if (cqr
->lpm
== device
->path_data
.opm
)
1686 DBF_DEV_EVENT(DBF_DEBUG
, device
,
1687 "default ERP in fastpath "
1688 "(%i retries left)",
1690 if (!test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
))
1691 cqr
->lpm
= device
->path_data
.opm
;
1692 cqr
->status
= DASD_CQR_QUEUED
;
1695 cqr
->status
= DASD_CQR_ERROR
;
1697 if (next
&& (next
->status
== DASD_CQR_QUEUED
) &&
1698 (!device
->stopped
)) {
1699 if (device
->discipline
->start_IO(next
) == 0)
1700 expires
= next
->expires
;
1703 dasd_device_set_timer(device
, expires
);
1705 dasd_device_clear_timer(device
);
1706 dasd_schedule_device_bh(device
);
1709 enum uc_todo
dasd_generic_uc_handler(struct ccw_device
*cdev
, struct irb
*irb
)
1711 struct dasd_device
*device
;
1713 device
= dasd_device_from_cdev_locked(cdev
);
1717 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) ||
1718 device
->state
!= device
->target
||
1719 !device
->discipline
->check_for_device_change
){
1720 dasd_put_device(device
);
1723 if (device
->discipline
->dump_sense_dbf
)
1724 device
->discipline
->dump_sense_dbf(device
, irb
, "uc");
1725 device
->discipline
->check_for_device_change(device
, NULL
, irb
);
1726 dasd_put_device(device
);
1728 return UC_TODO_RETRY
;
1730 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler
);
1733 * If we have an error on a dasd_block layer request then we cancel
1734 * and return all further requests from the same dasd_block as well.
1736 static void __dasd_device_recovery(struct dasd_device
*device
,
1737 struct dasd_ccw_req
*ref_cqr
)
1739 struct list_head
*l
, *n
;
1740 struct dasd_ccw_req
*cqr
;
1743 * only requeue request that came from the dasd_block layer
1745 if (!ref_cqr
->block
)
1748 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
1749 cqr
= list_entry(l
, struct dasd_ccw_req
, devlist
);
1750 if (cqr
->status
== DASD_CQR_QUEUED
&&
1751 ref_cqr
->block
== cqr
->block
) {
1752 cqr
->status
= DASD_CQR_CLEARED
;
1758 * Remove those ccw requests from the queue that need to be returned
1759 * to the upper layer.
1761 static void __dasd_device_process_ccw_queue(struct dasd_device
*device
,
1762 struct list_head
*final_queue
)
1764 struct list_head
*l
, *n
;
1765 struct dasd_ccw_req
*cqr
;
1767 /* Process request with final status. */
1768 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
1769 cqr
= list_entry(l
, struct dasd_ccw_req
, devlist
);
1771 /* Stop list processing at the first non-final request. */
1772 if (cqr
->status
== DASD_CQR_QUEUED
||
1773 cqr
->status
== DASD_CQR_IN_IO
||
1774 cqr
->status
== DASD_CQR_CLEAR_PENDING
)
1776 if (cqr
->status
== DASD_CQR_ERROR
) {
1777 __dasd_device_recovery(device
, cqr
);
1779 /* Rechain finished requests to final queue */
1780 list_move_tail(&cqr
->devlist
, final_queue
);
1785 * the cqrs from the final queue are returned to the upper layer
1786 * by setting a dasd_block state and calling the callback function
1788 static void __dasd_device_process_final_queue(struct dasd_device
*device
,
1789 struct list_head
*final_queue
)
1791 struct list_head
*l
, *n
;
1792 struct dasd_ccw_req
*cqr
;
1793 struct dasd_block
*block
;
1794 void (*callback
)(struct dasd_ccw_req
*, void *data
);
1795 void *callback_data
;
1796 char errorstring
[ERRORLENGTH
];
1798 list_for_each_safe(l
, n
, final_queue
) {
1799 cqr
= list_entry(l
, struct dasd_ccw_req
, devlist
);
1800 list_del_init(&cqr
->devlist
);
1802 callback
= cqr
->callback
;
1803 callback_data
= cqr
->callback_data
;
1805 spin_lock_bh(&block
->queue_lock
);
1806 switch (cqr
->status
) {
1807 case DASD_CQR_SUCCESS
:
1808 cqr
->status
= DASD_CQR_DONE
;
1810 case DASD_CQR_ERROR
:
1811 cqr
->status
= DASD_CQR_NEED_ERP
;
1813 case DASD_CQR_CLEARED
:
1814 cqr
->status
= DASD_CQR_TERMINATED
;
1817 /* internal error 12 - wrong cqr status*/
1818 snprintf(errorstring
, ERRORLENGTH
, "12 %p %x02", cqr
, cqr
->status
);
1819 dev_err(&device
->cdev
->dev
,
1820 "An error occurred in the DASD device driver, "
1821 "reason=%s\n", errorstring
);
1824 if (cqr
->callback
!= NULL
)
1825 (callback
)(cqr
, callback_data
);
1827 spin_unlock_bh(&block
->queue_lock
);
1832 * Take a look at the first request on the ccw queue and check
1833 * if it reached its expire time. If so, terminate the IO.
1835 static void __dasd_device_check_expire(struct dasd_device
*device
)
1837 struct dasd_ccw_req
*cqr
;
1839 if (list_empty(&device
->ccw_queue
))
1841 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, devlist
);
1842 if ((cqr
->status
== DASD_CQR_IN_IO
&& cqr
->expires
!= 0) &&
1843 (time_after_eq(jiffies
, cqr
->expires
+ cqr
->starttime
))) {
1844 if (device
->discipline
->term_IO(cqr
) != 0) {
1845 /* Hmpf, try again in 5 sec */
1846 dev_err(&device
->cdev
->dev
,
1847 "cqr %p timed out (%lus) but cannot be "
1848 "ended, retrying in 5 s\n",
1849 cqr
, (cqr
->expires
/HZ
));
1850 cqr
->expires
+= 5*HZ
;
1851 dasd_device_set_timer(device
, 5*HZ
);
1853 dev_err(&device
->cdev
->dev
,
1854 "cqr %p timed out (%lus), %i retries "
1855 "remaining\n", cqr
, (cqr
->expires
/HZ
),
1862 * Take a look at the first request on the ccw queue and check
1863 * if it needs to be started.
1865 static void __dasd_device_start_head(struct dasd_device
*device
)
1867 struct dasd_ccw_req
*cqr
;
1870 if (list_empty(&device
->ccw_queue
))
1872 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, devlist
);
1873 if (cqr
->status
!= DASD_CQR_QUEUED
)
1875 /* when device is stopped, return request to previous layer
1876 * exception: only the disconnect or unresumed bits are set and the
1877 * cqr is a path verification request
1879 if (device
->stopped
&&
1880 !(!(device
->stopped
& ~(DASD_STOPPED_DC_WAIT
| DASD_UNRESUMED_PM
))
1881 && test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
))) {
1882 cqr
->intrc
= -EAGAIN
;
1883 cqr
->status
= DASD_CQR_CLEARED
;
1884 dasd_schedule_device_bh(device
);
1888 rc
= device
->discipline
->start_IO(cqr
);
1890 dasd_device_set_timer(device
, cqr
->expires
);
1891 else if (rc
== -EACCES
) {
1892 dasd_schedule_device_bh(device
);
1894 /* Hmpf, try again in 1/2 sec */
1895 dasd_device_set_timer(device
, 50);
1898 static void __dasd_device_check_path_events(struct dasd_device
*device
)
1902 if (device
->path_data
.tbvpm
) {
1903 if (device
->stopped
& ~(DASD_STOPPED_DC_WAIT
|
1906 rc
= device
->discipline
->verify_path(
1907 device
, device
->path_data
.tbvpm
);
1909 dasd_device_set_timer(device
, 50);
1911 device
->path_data
.tbvpm
= 0;
1916 * Go through all request on the dasd_device request queue,
1917 * terminate them on the cdev if necessary, and return them to the
1918 * submitting layer via callback.
1920 * Make sure that all 'submitting layers' still exist when
1921 * this function is called!. In other words, when 'device' is a base
1922 * device then all block layer requests must have been removed before
1923 * via dasd_flush_block_queue.
1925 int dasd_flush_device_queue(struct dasd_device
*device
)
1927 struct dasd_ccw_req
*cqr
, *n
;
1929 struct list_head flush_queue
;
1931 INIT_LIST_HEAD(&flush_queue
);
1932 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1934 list_for_each_entry_safe(cqr
, n
, &device
->ccw_queue
, devlist
) {
1935 /* Check status and move request to flush_queue */
1936 switch (cqr
->status
) {
1937 case DASD_CQR_IN_IO
:
1938 rc
= device
->discipline
->term_IO(cqr
);
1940 /* unable to terminate requeust */
1941 dev_err(&device
->cdev
->dev
,
1942 "Flushing the DASD request queue "
1943 "failed for request %p\n", cqr
);
1944 /* stop flush processing */
1948 case DASD_CQR_QUEUED
:
1949 cqr
->stopclk
= get_clock();
1950 cqr
->status
= DASD_CQR_CLEARED
;
1952 default: /* no need to modify the others */
1955 list_move_tail(&cqr
->devlist
, &flush_queue
);
1958 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1960 * After this point all requests must be in state CLEAR_PENDING,
1961 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1962 * one of the others.
1964 list_for_each_entry_safe(cqr
, n
, &flush_queue
, devlist
)
1965 wait_event(dasd_flush_wq
,
1966 (cqr
->status
!= DASD_CQR_CLEAR_PENDING
));
1968 * Now set each request back to TERMINATED, DONE or NEED_ERP
1969 * and call the callback function of flushed requests
1971 __dasd_device_process_final_queue(device
, &flush_queue
);
1976 * Acquire the device lock and process queues for the device.
1978 static void dasd_device_tasklet(struct dasd_device
*device
)
1980 struct list_head final_queue
;
1982 atomic_set (&device
->tasklet_scheduled
, 0);
1983 INIT_LIST_HEAD(&final_queue
);
1984 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1985 /* Check expire time of first request on the ccw queue. */
1986 __dasd_device_check_expire(device
);
1987 /* find final requests on ccw queue */
1988 __dasd_device_process_ccw_queue(device
, &final_queue
);
1989 __dasd_device_check_path_events(device
);
1990 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1991 /* Now call the callback function of requests with final status */
1992 __dasd_device_process_final_queue(device
, &final_queue
);
1993 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1994 /* Now check if the head of the ccw queue needs to be started. */
1995 __dasd_device_start_head(device
);
1996 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1997 if (waitqueue_active(&shutdown_waitq
))
1998 wake_up(&shutdown_waitq
);
1999 dasd_put_device(device
);
2003 * Schedules a call to dasd_tasklet over the device tasklet.
2005 void dasd_schedule_device_bh(struct dasd_device
*device
)
2007 /* Protect against rescheduling. */
2008 if (atomic_cmpxchg (&device
->tasklet_scheduled
, 0, 1) != 0)
2010 dasd_get_device(device
);
2011 tasklet_hi_schedule(&device
->tasklet
);
2014 void dasd_device_set_stop_bits(struct dasd_device
*device
, int bits
)
2016 device
->stopped
|= bits
;
2018 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits
);
2020 void dasd_device_remove_stop_bits(struct dasd_device
*device
, int bits
)
2022 device
->stopped
&= ~bits
;
2023 if (!device
->stopped
)
2024 wake_up(&generic_waitq
);
2026 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits
);
2029 * Queue a request to the head of the device ccw_queue.
2030 * Start the I/O if possible.
2032 void dasd_add_request_head(struct dasd_ccw_req
*cqr
)
2034 struct dasd_device
*device
;
2035 unsigned long flags
;
2037 device
= cqr
->startdev
;
2038 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
2039 cqr
->status
= DASD_CQR_QUEUED
;
2040 list_add(&cqr
->devlist
, &device
->ccw_queue
);
2041 /* let the bh start the request to keep them in order */
2042 dasd_schedule_device_bh(device
);
2043 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
2047 * Queue a request to the tail of the device ccw_queue.
2048 * Start the I/O if possible.
2050 void dasd_add_request_tail(struct dasd_ccw_req
*cqr
)
2052 struct dasd_device
*device
;
2053 unsigned long flags
;
2055 device
= cqr
->startdev
;
2056 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
2057 cqr
->status
= DASD_CQR_QUEUED
;
2058 list_add_tail(&cqr
->devlist
, &device
->ccw_queue
);
2059 /* let the bh start the request to keep them in order */
2060 dasd_schedule_device_bh(device
);
2061 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
2065 * Wakeup helper for the 'sleep_on' functions.
2067 void dasd_wakeup_cb(struct dasd_ccw_req
*cqr
, void *data
)
2069 spin_lock_irq(get_ccwdev_lock(cqr
->startdev
->cdev
));
2070 cqr
->callback_data
= DASD_SLEEPON_END_TAG
;
2071 spin_unlock_irq(get_ccwdev_lock(cqr
->startdev
->cdev
));
2072 wake_up(&generic_waitq
);
2074 EXPORT_SYMBOL_GPL(dasd_wakeup_cb
);
2076 static inline int _wait_for_wakeup(struct dasd_ccw_req
*cqr
)
2078 struct dasd_device
*device
;
2081 device
= cqr
->startdev
;
2082 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
2083 rc
= (cqr
->callback_data
== DASD_SLEEPON_END_TAG
);
2084 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2089 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2091 static int __dasd_sleep_on_erp(struct dasd_ccw_req
*cqr
)
2093 struct dasd_device
*device
;
2094 dasd_erp_fn_t erp_fn
;
2096 if (cqr
->status
== DASD_CQR_FILLED
)
2098 device
= cqr
->startdev
;
2099 if (test_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
)) {
2100 if (cqr
->status
== DASD_CQR_TERMINATED
) {
2101 device
->discipline
->handle_terminated_request(cqr
);
2104 if (cqr
->status
== DASD_CQR_NEED_ERP
) {
2105 erp_fn
= device
->discipline
->erp_action(cqr
);
2109 if (cqr
->status
== DASD_CQR_FAILED
)
2110 dasd_log_sense(cqr
, &cqr
->irb
);
2112 __dasd_process_erp(device
, cqr
);
2119 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req
*cqr
)
2121 if (test_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
)) {
2122 if (cqr
->refers
) /* erp is not done yet */
2124 return ((cqr
->status
!= DASD_CQR_DONE
) &&
2125 (cqr
->status
!= DASD_CQR_FAILED
));
2127 return (cqr
->status
== DASD_CQR_FILLED
);
2130 static int _dasd_sleep_on(struct dasd_ccw_req
*maincqr
, int interruptible
)
2132 struct dasd_device
*device
;
2134 struct list_head ccw_queue
;
2135 struct dasd_ccw_req
*cqr
;
2137 INIT_LIST_HEAD(&ccw_queue
);
2138 maincqr
->status
= DASD_CQR_FILLED
;
2139 device
= maincqr
->startdev
;
2140 list_add(&maincqr
->blocklist
, &ccw_queue
);
2141 for (cqr
= maincqr
; __dasd_sleep_on_loop_condition(cqr
);
2142 cqr
= list_first_entry(&ccw_queue
,
2143 struct dasd_ccw_req
, blocklist
)) {
2145 if (__dasd_sleep_on_erp(cqr
))
2147 if (cqr
->status
!= DASD_CQR_FILLED
) /* could be failed */
2149 if (test_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
) &&
2150 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
2151 cqr
->status
= DASD_CQR_FAILED
;
2152 cqr
->intrc
= -EPERM
;
2155 /* Non-temporary stop condition will trigger fail fast */
2156 if (device
->stopped
& ~DASD_STOPPED_PENDING
&&
2157 test_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
) &&
2158 (!dasd_eer_enabled(device
))) {
2159 cqr
->status
= DASD_CQR_FAILED
;
2162 /* Don't try to start requests if device is stopped */
2163 if (interruptible
) {
2164 rc
= wait_event_interruptible(
2165 generic_waitq
, !(device
->stopped
));
2166 if (rc
== -ERESTARTSYS
) {
2167 cqr
->status
= DASD_CQR_FAILED
;
2168 maincqr
->intrc
= rc
;
2172 wait_event(generic_waitq
, !(device
->stopped
));
2175 cqr
->callback
= dasd_wakeup_cb
;
2177 cqr
->callback_data
= DASD_SLEEPON_START_TAG
;
2178 dasd_add_request_tail(cqr
);
2179 if (interruptible
) {
2180 rc
= wait_event_interruptible(
2181 generic_waitq
, _wait_for_wakeup(cqr
));
2182 if (rc
== -ERESTARTSYS
) {
2183 dasd_cancel_req(cqr
);
2184 /* wait (non-interruptible) for final status */
2185 wait_event(generic_waitq
,
2186 _wait_for_wakeup(cqr
));
2187 cqr
->status
= DASD_CQR_FAILED
;
2188 maincqr
->intrc
= rc
;
2192 wait_event(generic_waitq
, _wait_for_wakeup(cqr
));
2195 maincqr
->endclk
= get_clock();
2196 if ((maincqr
->status
!= DASD_CQR_DONE
) &&
2197 (maincqr
->intrc
!= -ERESTARTSYS
))
2198 dasd_log_sense(maincqr
, &maincqr
->irb
);
2199 if (maincqr
->status
== DASD_CQR_DONE
)
2201 else if (maincqr
->intrc
)
2202 rc
= maincqr
->intrc
;
2209 * Queue a request to the tail of the device ccw_queue and wait for
2212 int dasd_sleep_on(struct dasd_ccw_req
*cqr
)
2214 return _dasd_sleep_on(cqr
, 0);
2218 * Queue a request to the tail of the device ccw_queue and wait
2219 * interruptible for it's completion.
2221 int dasd_sleep_on_interruptible(struct dasd_ccw_req
*cqr
)
2223 return _dasd_sleep_on(cqr
, 1);
2227 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2228 * for eckd devices) the currently running request has to be terminated
2229 * and be put back to status queued, before the special request is added
2230 * to the head of the queue. Then the special request is waited on normally.
2232 static inline int _dasd_term_running_cqr(struct dasd_device
*device
)
2234 struct dasd_ccw_req
*cqr
;
2237 if (list_empty(&device
->ccw_queue
))
2239 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, devlist
);
2240 rc
= device
->discipline
->term_IO(cqr
);
2243 * CQR terminated because a more important request is pending.
2244 * Undo decreasing of retry counter because this is
2245 * not an error case.
2251 int dasd_sleep_on_immediatly(struct dasd_ccw_req
*cqr
)
2253 struct dasd_device
*device
;
2256 device
= cqr
->startdev
;
2257 if (test_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
) &&
2258 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
2259 cqr
->status
= DASD_CQR_FAILED
;
2260 cqr
->intrc
= -EPERM
;
2263 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
2264 rc
= _dasd_term_running_cqr(device
);
2266 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2269 cqr
->callback
= dasd_wakeup_cb
;
2270 cqr
->callback_data
= DASD_SLEEPON_START_TAG
;
2271 cqr
->status
= DASD_CQR_QUEUED
;
2273 * add new request as second
2274 * first the terminated cqr needs to be finished
2276 list_add(&cqr
->devlist
, device
->ccw_queue
.next
);
2278 /* let the bh start the request to keep them in order */
2279 dasd_schedule_device_bh(device
);
2281 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2283 wait_event(generic_waitq
, _wait_for_wakeup(cqr
));
2285 if (cqr
->status
== DASD_CQR_DONE
)
2287 else if (cqr
->intrc
)
2295 * Cancels a request that was started with dasd_sleep_on_req.
2296 * This is useful to timeout requests. The request will be
2297 * terminated if it is currently in i/o.
2298 * Returns 1 if the request has been terminated.
2299 * 0 if there was no need to terminate the request (not started yet)
2300 * negative error code if termination failed
2301 * Cancellation of a request is an asynchronous operation! The calling
2302 * function has to wait until the request is properly returned via callback.
2304 int dasd_cancel_req(struct dasd_ccw_req
*cqr
)
2306 struct dasd_device
*device
= cqr
->startdev
;
2307 unsigned long flags
;
2311 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
2312 switch (cqr
->status
) {
2313 case DASD_CQR_QUEUED
:
2314 /* request was not started - just set to cleared */
2315 cqr
->status
= DASD_CQR_CLEARED
;
2317 case DASD_CQR_IN_IO
:
2318 /* request in IO - terminate IO and release again */
2319 rc
= device
->discipline
->term_IO(cqr
);
2321 dev_err(&device
->cdev
->dev
,
2322 "Cancelling request %p failed with rc=%d\n",
2325 cqr
->stopclk
= get_clock();
2328 default: /* already finished or clear pending - do nothing */
2331 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
2332 dasd_schedule_device_bh(device
);
2338 * SECTION: Operations of the dasd_block layer.
2342 * Timeout function for dasd_block. This is used when the block layer
2343 * is waiting for something that may not come reliably, (e.g. a state
2346 static void dasd_block_timeout(unsigned long ptr
)
2348 unsigned long flags
;
2349 struct dasd_block
*block
;
2351 block
= (struct dasd_block
*) ptr
;
2352 spin_lock_irqsave(get_ccwdev_lock(block
->base
->cdev
), flags
);
2353 /* re-activate request queue */
2354 dasd_device_remove_stop_bits(block
->base
, DASD_STOPPED_PENDING
);
2355 spin_unlock_irqrestore(get_ccwdev_lock(block
->base
->cdev
), flags
);
2356 dasd_schedule_block_bh(block
);
2360 * Setup timeout for a dasd_block in jiffies.
2362 void dasd_block_set_timer(struct dasd_block
*block
, int expires
)
2365 del_timer(&block
->timer
);
2367 mod_timer(&block
->timer
, jiffies
+ expires
);
2371 * Clear timeout for a dasd_block.
2373 void dasd_block_clear_timer(struct dasd_block
*block
)
2375 del_timer(&block
->timer
);
2379 * Process finished error recovery ccw.
2381 static void __dasd_process_erp(struct dasd_device
*device
,
2382 struct dasd_ccw_req
*cqr
)
2384 dasd_erp_fn_t erp_fn
;
2386 if (cqr
->status
== DASD_CQR_DONE
)
2387 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s", "ERP successful");
2389 dev_err(&device
->cdev
->dev
, "ERP failed for the DASD\n");
2390 erp_fn
= device
->discipline
->erp_postaction(cqr
);
2395 * Fetch requests from the block device queue.
2397 static void __dasd_process_request_queue(struct dasd_block
*block
)
2399 struct request_queue
*queue
;
2400 struct request
*req
;
2401 struct dasd_ccw_req
*cqr
;
2402 struct dasd_device
*basedev
;
2403 unsigned long flags
;
2404 queue
= block
->request_queue
;
2405 basedev
= block
->base
;
2406 /* No queue ? Then there is nothing to do. */
2411 * We requeue request from the block device queue to the ccw
2412 * queue only in two states. In state DASD_STATE_READY the
2413 * partition detection is done and we need to requeue requests
2414 * for that. State DASD_STATE_ONLINE is normal block device
2417 if (basedev
->state
< DASD_STATE_READY
) {
2418 while ((req
= blk_fetch_request(block
->request_queue
)))
2419 __blk_end_request_all(req
, -EIO
);
2422 /* Now we try to fetch requests from the request queue */
2423 while ((req
= blk_peek_request(queue
))) {
2424 if (basedev
->features
& DASD_FEATURE_READONLY
&&
2425 rq_data_dir(req
) == WRITE
) {
2426 DBF_DEV_EVENT(DBF_ERR
, basedev
,
2427 "Rejecting write request %p",
2429 blk_start_request(req
);
2430 __blk_end_request_all(req
, -EIO
);
2433 cqr
= basedev
->discipline
->build_cp(basedev
, block
, req
);
2435 if (PTR_ERR(cqr
) == -EBUSY
)
2436 break; /* normal end condition */
2437 if (PTR_ERR(cqr
) == -ENOMEM
)
2438 break; /* terminate request queue loop */
2439 if (PTR_ERR(cqr
) == -EAGAIN
) {
2441 * The current request cannot be build right
2442 * now, we have to try later. If this request
2443 * is the head-of-queue we stop the device
2446 if (!list_empty(&block
->ccw_queue
))
2449 get_ccwdev_lock(basedev
->cdev
), flags
);
2450 dasd_device_set_stop_bits(basedev
,
2451 DASD_STOPPED_PENDING
);
2452 spin_unlock_irqrestore(
2453 get_ccwdev_lock(basedev
->cdev
), flags
);
2454 dasd_block_set_timer(block
, HZ
/2);
2457 DBF_DEV_EVENT(DBF_ERR
, basedev
,
2458 "CCW creation failed (rc=%ld) "
2461 blk_start_request(req
);
2462 __blk_end_request_all(req
, -EIO
);
2466 * Note: callback is set to dasd_return_cqr_cb in
2467 * __dasd_block_start_head to cover erp requests as well
2469 cqr
->callback_data
= (void *) req
;
2470 cqr
->status
= DASD_CQR_FILLED
;
2471 blk_start_request(req
);
2472 list_add_tail(&cqr
->blocklist
, &block
->ccw_queue
);
2473 dasd_profile_start(block
, cqr
, req
);
2477 static void __dasd_cleanup_cqr(struct dasd_ccw_req
*cqr
)
2479 struct request
*req
;
2483 req
= (struct request
*) cqr
->callback_data
;
2484 dasd_profile_end(cqr
->block
, cqr
, req
);
2485 status
= cqr
->block
->base
->discipline
->free_cp(cqr
, req
);
2487 error
= status
? status
: -EIO
;
2488 __blk_end_request_all(req
, error
);
2492 * Process ccw request queue.
2494 static void __dasd_process_block_ccw_queue(struct dasd_block
*block
,
2495 struct list_head
*final_queue
)
2497 struct list_head
*l
, *n
;
2498 struct dasd_ccw_req
*cqr
;
2499 dasd_erp_fn_t erp_fn
;
2500 unsigned long flags
;
2501 struct dasd_device
*base
= block
->base
;
2504 /* Process request with final status. */
2505 list_for_each_safe(l
, n
, &block
->ccw_queue
) {
2506 cqr
= list_entry(l
, struct dasd_ccw_req
, blocklist
);
2507 if (cqr
->status
!= DASD_CQR_DONE
&&
2508 cqr
->status
!= DASD_CQR_FAILED
&&
2509 cqr
->status
!= DASD_CQR_NEED_ERP
&&
2510 cqr
->status
!= DASD_CQR_TERMINATED
)
2513 if (cqr
->status
== DASD_CQR_TERMINATED
) {
2514 base
->discipline
->handle_terminated_request(cqr
);
2518 /* Process requests that may be recovered */
2519 if (cqr
->status
== DASD_CQR_NEED_ERP
) {
2520 erp_fn
= base
->discipline
->erp_action(cqr
);
2521 if (IS_ERR(erp_fn(cqr
)))
2526 /* log sense for fatal error */
2527 if (cqr
->status
== DASD_CQR_FAILED
) {
2528 dasd_log_sense(cqr
, &cqr
->irb
);
2531 /* First of all call extended error reporting. */
2532 if (dasd_eer_enabled(base
) &&
2533 cqr
->status
== DASD_CQR_FAILED
) {
2534 dasd_eer_write(base
, cqr
, DASD_EER_FATALERROR
);
2536 /* restart request */
2537 cqr
->status
= DASD_CQR_FILLED
;
2539 spin_lock_irqsave(get_ccwdev_lock(base
->cdev
), flags
);
2540 dasd_device_set_stop_bits(base
, DASD_STOPPED_QUIESCE
);
2541 spin_unlock_irqrestore(get_ccwdev_lock(base
->cdev
),
2546 /* Process finished ERP request. */
2548 __dasd_process_erp(base
, cqr
);
2552 /* Rechain finished requests to final queue */
2553 cqr
->endclk
= get_clock();
2554 list_move_tail(&cqr
->blocklist
, final_queue
);
2558 static void dasd_return_cqr_cb(struct dasd_ccw_req
*cqr
, void *data
)
2560 dasd_schedule_block_bh(cqr
->block
);
2563 static void __dasd_block_start_head(struct dasd_block
*block
)
2565 struct dasd_ccw_req
*cqr
;
2567 if (list_empty(&block
->ccw_queue
))
2569 /* We allways begin with the first requests on the queue, as some
2570 * of previously started requests have to be enqueued on a
2571 * dasd_device again for error recovery.
2573 list_for_each_entry(cqr
, &block
->ccw_queue
, blocklist
) {
2574 if (cqr
->status
!= DASD_CQR_FILLED
)
2576 if (test_bit(DASD_FLAG_LOCK_STOLEN
, &block
->base
->flags
) &&
2577 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
2578 cqr
->status
= DASD_CQR_FAILED
;
2579 cqr
->intrc
= -EPERM
;
2580 dasd_schedule_block_bh(block
);
2583 /* Non-temporary stop condition will trigger fail fast */
2584 if (block
->base
->stopped
& ~DASD_STOPPED_PENDING
&&
2585 test_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
) &&
2586 (!dasd_eer_enabled(block
->base
))) {
2587 cqr
->status
= DASD_CQR_FAILED
;
2588 dasd_schedule_block_bh(block
);
2591 /* Don't try to start requests if device is stopped */
2592 if (block
->base
->stopped
)
2595 /* just a fail safe check, should not happen */
2597 cqr
->startdev
= block
->base
;
2599 /* make sure that the requests we submit find their way back */
2600 cqr
->callback
= dasd_return_cqr_cb
;
2602 dasd_add_request_tail(cqr
);
2607 * Central dasd_block layer routine. Takes requests from the generic
2608 * block layer request queue, creates ccw requests, enqueues them on
2609 * a dasd_device and processes ccw requests that have been returned.
2611 static void dasd_block_tasklet(struct dasd_block
*block
)
2613 struct list_head final_queue
;
2614 struct list_head
*l
, *n
;
2615 struct dasd_ccw_req
*cqr
;
2617 atomic_set(&block
->tasklet_scheduled
, 0);
2618 INIT_LIST_HEAD(&final_queue
);
2619 spin_lock(&block
->queue_lock
);
2620 /* Finish off requests on ccw queue */
2621 __dasd_process_block_ccw_queue(block
, &final_queue
);
2622 spin_unlock(&block
->queue_lock
);
2623 /* Now call the callback function of requests with final status */
2624 spin_lock_irq(&block
->request_queue_lock
);
2625 list_for_each_safe(l
, n
, &final_queue
) {
2626 cqr
= list_entry(l
, struct dasd_ccw_req
, blocklist
);
2627 list_del_init(&cqr
->blocklist
);
2628 __dasd_cleanup_cqr(cqr
);
2630 spin_lock(&block
->queue_lock
);
2631 /* Get new request from the block device request queue */
2632 __dasd_process_request_queue(block
);
2633 /* Now check if the head of the ccw queue needs to be started. */
2634 __dasd_block_start_head(block
);
2635 spin_unlock(&block
->queue_lock
);
2636 spin_unlock_irq(&block
->request_queue_lock
);
2637 if (waitqueue_active(&shutdown_waitq
))
2638 wake_up(&shutdown_waitq
);
2639 dasd_put_device(block
->base
);
2642 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req
*cqr
, void *data
)
2644 wake_up(&dasd_flush_wq
);
2648 * Go through all request on the dasd_block request queue, cancel them
2649 * on the respective dasd_device, and return them to the generic
2652 static int dasd_flush_block_queue(struct dasd_block
*block
)
2654 struct dasd_ccw_req
*cqr
, *n
;
2656 struct list_head flush_queue
;
2658 INIT_LIST_HEAD(&flush_queue
);
2659 spin_lock_bh(&block
->queue_lock
);
2662 list_for_each_entry_safe(cqr
, n
, &block
->ccw_queue
, blocklist
) {
2663 /* if this request currently owned by a dasd_device cancel it */
2664 if (cqr
->status
>= DASD_CQR_QUEUED
)
2665 rc
= dasd_cancel_req(cqr
);
2668 /* Rechain request (including erp chain) so it won't be
2669 * touched by the dasd_block_tasklet anymore.
2670 * Replace the callback so we notice when the request
2671 * is returned from the dasd_device layer.
2673 cqr
->callback
= _dasd_wake_block_flush_cb
;
2674 for (i
= 0; cqr
!= NULL
; cqr
= cqr
->refers
, i
++)
2675 list_move_tail(&cqr
->blocklist
, &flush_queue
);
2677 /* moved more than one request - need to restart */
2680 spin_unlock_bh(&block
->queue_lock
);
2681 /* Now call the callback function of flushed requests */
2683 list_for_each_entry_safe(cqr
, n
, &flush_queue
, blocklist
) {
2684 wait_event(dasd_flush_wq
, (cqr
->status
< DASD_CQR_QUEUED
));
2685 /* Process finished ERP request. */
2687 spin_lock_bh(&block
->queue_lock
);
2688 __dasd_process_erp(block
->base
, cqr
);
2689 spin_unlock_bh(&block
->queue_lock
);
2690 /* restart list_for_xx loop since dasd_process_erp
2691 * might remove multiple elements */
2694 /* call the callback function */
2695 spin_lock_irq(&block
->request_queue_lock
);
2696 cqr
->endclk
= get_clock();
2697 list_del_init(&cqr
->blocklist
);
2698 __dasd_cleanup_cqr(cqr
);
2699 spin_unlock_irq(&block
->request_queue_lock
);
2705 * Schedules a call to dasd_tasklet over the device tasklet.
2707 void dasd_schedule_block_bh(struct dasd_block
*block
)
2709 /* Protect against rescheduling. */
2710 if (atomic_cmpxchg(&block
->tasklet_scheduled
, 0, 1) != 0)
2712 /* life cycle of block is bound to it's base device */
2713 dasd_get_device(block
->base
);
2714 tasklet_hi_schedule(&block
->tasklet
);
2719 * SECTION: external block device operations
2720 * (request queue handling, open, release, etc.)
2724 * Dasd request queue function. Called from ll_rw_blk.c
2726 static void do_dasd_request(struct request_queue
*queue
)
2728 struct dasd_block
*block
;
2730 block
= queue
->queuedata
;
2731 spin_lock(&block
->queue_lock
);
2732 /* Get new request from the block device request queue */
2733 __dasd_process_request_queue(block
);
2734 /* Now check if the head of the ccw queue needs to be started. */
2735 __dasd_block_start_head(block
);
2736 spin_unlock(&block
->queue_lock
);
2740 * Allocate and initialize request queue and default I/O scheduler.
2742 static int dasd_alloc_queue(struct dasd_block
*block
)
2746 block
->request_queue
= blk_init_queue(do_dasd_request
,
2747 &block
->request_queue_lock
);
2748 if (block
->request_queue
== NULL
)
2751 block
->request_queue
->queuedata
= block
;
2753 elevator_exit(block
->request_queue
->elevator
);
2754 block
->request_queue
->elevator
= NULL
;
2755 rc
= elevator_init(block
->request_queue
, "deadline");
2757 blk_cleanup_queue(block
->request_queue
);
2764 * Allocate and initialize request queue.
2766 static void dasd_setup_queue(struct dasd_block
*block
)
2770 if (block
->base
->features
& DASD_FEATURE_USERAW
) {
2772 * the max_blocks value for raw_track access is 256
2773 * it is higher than the native ECKD value because we
2774 * only need one ccw per track
2775 * so the max_hw_sectors are
2776 * 2048 x 512B = 1024kB = 16 tracks
2780 max
= block
->base
->discipline
->max_blocks
<< block
->s2b_shift
;
2782 blk_queue_logical_block_size(block
->request_queue
,
2784 blk_queue_max_hw_sectors(block
->request_queue
, max
);
2785 blk_queue_max_segments(block
->request_queue
, -1L);
2786 /* with page sized segments we can translate each segement into
2789 blk_queue_max_segment_size(block
->request_queue
, PAGE_SIZE
);
2790 blk_queue_segment_boundary(block
->request_queue
, PAGE_SIZE
- 1);
2794 * Deactivate and free request queue.
2796 static void dasd_free_queue(struct dasd_block
*block
)
2798 if (block
->request_queue
) {
2799 blk_cleanup_queue(block
->request_queue
);
2800 block
->request_queue
= NULL
;
2805 * Flush request on the request queue.
2807 static void dasd_flush_request_queue(struct dasd_block
*block
)
2809 struct request
*req
;
2811 if (!block
->request_queue
)
2814 spin_lock_irq(&block
->request_queue_lock
);
2815 while ((req
= blk_fetch_request(block
->request_queue
)))
2816 __blk_end_request_all(req
, -EIO
);
2817 spin_unlock_irq(&block
->request_queue_lock
);
2820 static int dasd_open(struct block_device
*bdev
, fmode_t mode
)
2822 struct dasd_device
*base
;
2825 base
= dasd_device_from_gendisk(bdev
->bd_disk
);
2829 atomic_inc(&base
->block
->open_count
);
2830 if (test_bit(DASD_FLAG_OFFLINE
, &base
->flags
)) {
2835 if (!try_module_get(base
->discipline
->owner
)) {
2840 if (dasd_probeonly
) {
2841 dev_info(&base
->cdev
->dev
,
2842 "Accessing the DASD failed because it is in "
2843 "probeonly mode\n");
2848 if (base
->state
<= DASD_STATE_BASIC
) {
2849 DBF_DEV_EVENT(DBF_ERR
, base
, " %s",
2850 " Cannot open unrecognized device");
2855 if ((mode
& FMODE_WRITE
) &&
2856 (test_bit(DASD_FLAG_DEVICE_RO
, &base
->flags
) ||
2857 (base
->features
& DASD_FEATURE_READONLY
))) {
2862 dasd_put_device(base
);
2866 module_put(base
->discipline
->owner
);
2868 atomic_dec(&base
->block
->open_count
);
2869 dasd_put_device(base
);
2873 static int dasd_release(struct gendisk
*disk
, fmode_t mode
)
2875 struct dasd_device
*base
;
2877 base
= dasd_device_from_gendisk(disk
);
2881 atomic_dec(&base
->block
->open_count
);
2882 module_put(base
->discipline
->owner
);
2883 dasd_put_device(base
);
2888 * Return disk geometry.
2890 static int dasd_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
2892 struct dasd_device
*base
;
2894 base
= dasd_device_from_gendisk(bdev
->bd_disk
);
2898 if (!base
->discipline
||
2899 !base
->discipline
->fill_geometry
) {
2900 dasd_put_device(base
);
2903 base
->discipline
->fill_geometry(base
->block
, geo
);
2904 geo
->start
= get_start_sect(bdev
) >> base
->block
->s2b_shift
;
2905 dasd_put_device(base
);
2909 const struct block_device_operations
2910 dasd_device_operations
= {
2911 .owner
= THIS_MODULE
,
2913 .release
= dasd_release
,
2914 .ioctl
= dasd_ioctl
,
2915 .compat_ioctl
= dasd_ioctl
,
2916 .getgeo
= dasd_getgeo
,
2919 /*******************************************************************************
2920 * end of block device operations
2926 #ifdef CONFIG_PROC_FS
2930 if (dasd_page_cache
!= NULL
) {
2931 kmem_cache_destroy(dasd_page_cache
);
2932 dasd_page_cache
= NULL
;
2934 dasd_gendisk_exit();
2936 if (dasd_debug_area
!= NULL
) {
2937 debug_unregister(dasd_debug_area
);
2938 dasd_debug_area
= NULL
;
2940 dasd_statistics_removeroot();
2944 * SECTION: common functions for ccw_driver use
2948 * Is the device read-only?
2949 * Note that this function does not report the setting of the
2950 * readonly device attribute, but how it is configured in z/VM.
2952 int dasd_device_is_ro(struct dasd_device
*device
)
2954 struct ccw_dev_id dev_id
;
2955 struct diag210 diag_data
;
2960 ccw_device_get_id(device
->cdev
, &dev_id
);
2961 memset(&diag_data
, 0, sizeof(diag_data
));
2962 diag_data
.vrdcdvno
= dev_id
.devno
;
2963 diag_data
.vrdclen
= sizeof(diag_data
);
2964 rc
= diag210(&diag_data
);
2965 if (rc
== 0 || rc
== 2) {
2966 return diag_data
.vrdcvfla
& 0x80;
2968 DBF_EVENT(DBF_WARNING
, "diag210 failed for dev=%04x with rc=%d",
2973 EXPORT_SYMBOL_GPL(dasd_device_is_ro
);
2975 static void dasd_generic_auto_online(void *data
, async_cookie_t cookie
)
2977 struct ccw_device
*cdev
= data
;
2980 ret
= ccw_device_set_online(cdev
);
2982 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2983 dev_name(&cdev
->dev
), ret
);
2987 * Initial attempt at a probe function. this can be simplified once
2988 * the other detection code is gone.
2990 int dasd_generic_probe(struct ccw_device
*cdev
,
2991 struct dasd_discipline
*discipline
)
2995 ret
= dasd_add_sysfs_files(cdev
);
2997 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s",
2998 "dasd_generic_probe: could not add "
3002 cdev
->handler
= &dasd_int_handler
;
3005 * Automatically online either all dasd devices (dasd_autodetect)
3006 * or all devices specified with dasd= parameters during
3009 if ((dasd_get_feature(cdev
, DASD_FEATURE_INITIAL_ONLINE
) > 0 ) ||
3010 (dasd_autodetect
&& dasd_busid_known(dev_name(&cdev
->dev
)) != 0))
3011 async_schedule(dasd_generic_auto_online
, cdev
);
3016 * This will one day be called from a global not_oper handler.
3017 * It is also used by driver_unregister during module unload.
3019 void dasd_generic_remove(struct ccw_device
*cdev
)
3021 struct dasd_device
*device
;
3022 struct dasd_block
*block
;
3024 cdev
->handler
= NULL
;
3026 dasd_remove_sysfs_files(cdev
);
3027 device
= dasd_device_from_cdev(cdev
);
3030 if (test_and_set_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
3031 /* Already doing offline processing */
3032 dasd_put_device(device
);
3036 * This device is removed unconditionally. Set offline
3037 * flag to prevent dasd_open from opening it while it is
3038 * no quite down yet.
3040 dasd_set_target_state(device
, DASD_STATE_NEW
);
3041 /* dasd_delete_device destroys the device reference. */
3042 block
= device
->block
;
3043 dasd_delete_device(device
);
3045 * life cycle of block is bound to device, so delete it after
3046 * device was safely removed
3049 dasd_free_block(block
);
3053 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3054 * the device is detected for the first time and is supposed to be used
3055 * or the user has started activation through sysfs.
3057 int dasd_generic_set_online(struct ccw_device
*cdev
,
3058 struct dasd_discipline
*base_discipline
)
3060 struct dasd_discipline
*discipline
;
3061 struct dasd_device
*device
;
3064 /* first online clears initial online feature flag */
3065 dasd_set_feature(cdev
, DASD_FEATURE_INITIAL_ONLINE
, 0);
3066 device
= dasd_create_device(cdev
);
3068 return PTR_ERR(device
);
3070 discipline
= base_discipline
;
3071 if (device
->features
& DASD_FEATURE_USEDIAG
) {
3072 if (!dasd_diag_discipline_pointer
) {
3073 pr_warning("%s Setting the DASD online failed because "
3074 "of missing DIAG discipline\n",
3075 dev_name(&cdev
->dev
));
3076 dasd_delete_device(device
);
3079 discipline
= dasd_diag_discipline_pointer
;
3081 if (!try_module_get(base_discipline
->owner
)) {
3082 dasd_delete_device(device
);
3085 if (!try_module_get(discipline
->owner
)) {
3086 module_put(base_discipline
->owner
);
3087 dasd_delete_device(device
);
3090 device
->base_discipline
= base_discipline
;
3091 device
->discipline
= discipline
;
3093 /* check_device will allocate block device if necessary */
3094 rc
= discipline
->check_device(device
);
3096 pr_warning("%s Setting the DASD online with discipline %s "
3097 "failed with rc=%i\n",
3098 dev_name(&cdev
->dev
), discipline
->name
, rc
);
3099 module_put(discipline
->owner
);
3100 module_put(base_discipline
->owner
);
3101 dasd_delete_device(device
);
3105 dasd_set_target_state(device
, DASD_STATE_ONLINE
);
3106 if (device
->state
<= DASD_STATE_KNOWN
) {
3107 pr_warning("%s Setting the DASD online failed because of a "
3108 "missing discipline\n", dev_name(&cdev
->dev
));
3110 dasd_set_target_state(device
, DASD_STATE_NEW
);
3112 dasd_free_block(device
->block
);
3113 dasd_delete_device(device
);
3115 pr_debug("dasd_generic device %s found\n",
3116 dev_name(&cdev
->dev
));
3118 wait_event(dasd_init_waitq
, _wait_for_device(device
));
3120 dasd_put_device(device
);
3124 int dasd_generic_set_offline(struct ccw_device
*cdev
)
3126 struct dasd_device
*device
;
3127 struct dasd_block
*block
;
3128 int max_count
, open_count
;
3130 device
= dasd_device_from_cdev(cdev
);
3132 return PTR_ERR(device
);
3133 if (test_and_set_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
3134 /* Already doing offline processing */
3135 dasd_put_device(device
);
3139 * We must make sure that this device is currently not in use.
3140 * The open_count is increased for every opener, that includes
3141 * the blkdev_get in dasd_scan_partitions. We are only interested
3142 * in the other openers.
3144 if (device
->block
) {
3145 max_count
= device
->block
->bdev
? 0 : -1;
3146 open_count
= atomic_read(&device
->block
->open_count
);
3147 if (open_count
> max_count
) {
3149 pr_warning("%s: The DASD cannot be set offline "
3150 "with open count %i\n",
3151 dev_name(&cdev
->dev
), open_count
);
3153 pr_warning("%s: The DASD cannot be set offline "
3154 "while it is in use\n",
3155 dev_name(&cdev
->dev
));
3156 clear_bit(DASD_FLAG_OFFLINE
, &device
->flags
);
3157 dasd_put_device(device
);
3161 dasd_set_target_state(device
, DASD_STATE_NEW
);
3162 /* dasd_delete_device destroys the device reference. */
3163 block
= device
->block
;
3164 dasd_delete_device(device
);
3166 * life cycle of block is bound to device, so delete it after
3167 * device was safely removed
3170 dasd_free_block(block
);
3174 int dasd_generic_last_path_gone(struct dasd_device
*device
)
3176 struct dasd_ccw_req
*cqr
;
3178 dev_warn(&device
->cdev
->dev
, "No operational channel path is left "
3179 "for the device\n");
3180 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s", "last path gone");
3181 /* First of all call extended error reporting. */
3182 dasd_eer_write(device
, NULL
, DASD_EER_NOPATH
);
3184 if (device
->state
< DASD_STATE_BASIC
)
3186 /* Device is active. We want to keep it. */
3187 list_for_each_entry(cqr
, &device
->ccw_queue
, devlist
)
3188 if ((cqr
->status
== DASD_CQR_IN_IO
) ||
3189 (cqr
->status
== DASD_CQR_CLEAR_PENDING
)) {
3190 cqr
->status
= DASD_CQR_QUEUED
;
3193 dasd_device_set_stop_bits(device
, DASD_STOPPED_DC_WAIT
);
3194 dasd_device_clear_timer(device
);
3195 dasd_schedule_device_bh(device
);
3198 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone
);
3200 int dasd_generic_path_operational(struct dasd_device
*device
)
3202 dev_info(&device
->cdev
->dev
, "A channel path to the device has become "
3204 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s", "path operational");
3205 dasd_device_remove_stop_bits(device
, DASD_STOPPED_DC_WAIT
);
3206 if (device
->stopped
& DASD_UNRESUMED_PM
) {
3207 dasd_device_remove_stop_bits(device
, DASD_UNRESUMED_PM
);
3208 dasd_restore_device(device
);
3211 dasd_schedule_device_bh(device
);
3213 dasd_schedule_block_bh(device
->block
);
3216 EXPORT_SYMBOL_GPL(dasd_generic_path_operational
);
3218 int dasd_generic_notify(struct ccw_device
*cdev
, int event
)
3220 struct dasd_device
*device
;
3223 device
= dasd_device_from_cdev_locked(cdev
);
3231 device
->path_data
.opm
= 0;
3232 device
->path_data
.ppm
= 0;
3233 device
->path_data
.npm
= 0;
3234 ret
= dasd_generic_last_path_gone(device
);
3238 if (device
->path_data
.opm
)
3239 ret
= dasd_generic_path_operational(device
);
3242 dasd_put_device(device
);
3246 void dasd_generic_path_event(struct ccw_device
*cdev
, int *path_event
)
3249 __u8 oldopm
, eventlpm
;
3250 struct dasd_device
*device
;
3252 device
= dasd_device_from_cdev_locked(cdev
);
3255 for (chp
= 0; chp
< 8; chp
++) {
3256 eventlpm
= 0x80 >> chp
;
3257 if (path_event
[chp
] & PE_PATH_GONE
) {
3258 oldopm
= device
->path_data
.opm
;
3259 device
->path_data
.opm
&= ~eventlpm
;
3260 device
->path_data
.ppm
&= ~eventlpm
;
3261 device
->path_data
.npm
&= ~eventlpm
;
3262 if (oldopm
&& !device
->path_data
.opm
)
3263 dasd_generic_last_path_gone(device
);
3265 if (path_event
[chp
] & PE_PATH_AVAILABLE
) {
3266 device
->path_data
.opm
&= ~eventlpm
;
3267 device
->path_data
.ppm
&= ~eventlpm
;
3268 device
->path_data
.npm
&= ~eventlpm
;
3269 device
->path_data
.tbvpm
|= eventlpm
;
3270 dasd_schedule_device_bh(device
);
3272 if (path_event
[chp
] & PE_PATHGROUP_ESTABLISHED
) {
3273 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
3274 "Pathgroup re-established\n");
3275 if (device
->discipline
->kick_validate
)
3276 device
->discipline
->kick_validate(device
);
3279 dasd_put_device(device
);
3281 EXPORT_SYMBOL_GPL(dasd_generic_path_event
);
3283 int dasd_generic_verify_path(struct dasd_device
*device
, __u8 lpm
)
3285 if (!device
->path_data
.opm
&& lpm
) {
3286 device
->path_data
.opm
= lpm
;
3287 dasd_generic_path_operational(device
);
3289 device
->path_data
.opm
|= lpm
;
3292 EXPORT_SYMBOL_GPL(dasd_generic_verify_path
);
3295 int dasd_generic_pm_freeze(struct ccw_device
*cdev
)
3297 struct dasd_ccw_req
*cqr
, *n
;
3299 struct list_head freeze_queue
;
3300 struct dasd_device
*device
= dasd_device_from_cdev(cdev
);
3303 return PTR_ERR(device
);
3305 /* mark device as suspended */
3306 set_bit(DASD_FLAG_SUSPENDED
, &device
->flags
);
3308 if (device
->discipline
->freeze
)
3309 rc
= device
->discipline
->freeze(device
);
3311 /* disallow new I/O */
3312 dasd_device_set_stop_bits(device
, DASD_STOPPED_PM
);
3313 /* clear active requests */
3314 INIT_LIST_HEAD(&freeze_queue
);
3315 spin_lock_irq(get_ccwdev_lock(cdev
));
3317 list_for_each_entry_safe(cqr
, n
, &device
->ccw_queue
, devlist
) {
3318 /* Check status and move request to flush_queue */
3319 if (cqr
->status
== DASD_CQR_IN_IO
) {
3320 rc
= device
->discipline
->term_IO(cqr
);
3322 /* unable to terminate requeust */
3323 dev_err(&device
->cdev
->dev
,
3324 "Unable to terminate request %p "
3325 "on suspend\n", cqr
);
3326 spin_unlock_irq(get_ccwdev_lock(cdev
));
3327 dasd_put_device(device
);
3331 list_move_tail(&cqr
->devlist
, &freeze_queue
);
3334 spin_unlock_irq(get_ccwdev_lock(cdev
));
3336 list_for_each_entry_safe(cqr
, n
, &freeze_queue
, devlist
) {
3337 wait_event(dasd_flush_wq
,
3338 (cqr
->status
!= DASD_CQR_CLEAR_PENDING
));
3339 if (cqr
->status
== DASD_CQR_CLEARED
)
3340 cqr
->status
= DASD_CQR_QUEUED
;
3342 /* move freeze_queue to start of the ccw_queue */
3343 spin_lock_irq(get_ccwdev_lock(cdev
));
3344 list_splice_tail(&freeze_queue
, &device
->ccw_queue
);
3345 spin_unlock_irq(get_ccwdev_lock(cdev
));
3347 dasd_put_device(device
);
3350 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze
);
3352 int dasd_generic_restore_device(struct ccw_device
*cdev
)
3354 struct dasd_device
*device
= dasd_device_from_cdev(cdev
);
3358 return PTR_ERR(device
);
3360 /* allow new IO again */
3361 dasd_device_remove_stop_bits(device
,
3362 (DASD_STOPPED_PM
| DASD_UNRESUMED_PM
));
3364 dasd_schedule_device_bh(device
);
3367 * call discipline restore function
3368 * if device is stopped do nothing e.g. for disconnected devices
3370 if (device
->discipline
->restore
&& !(device
->stopped
))
3371 rc
= device
->discipline
->restore(device
);
3372 if (rc
|| device
->stopped
)
3374 * if the resume failed for the DASD we put it in
3375 * an UNRESUMED stop state
3377 device
->stopped
|= DASD_UNRESUMED_PM
;
3380 dasd_schedule_block_bh(device
->block
);
3382 clear_bit(DASD_FLAG_SUSPENDED
, &device
->flags
);
3383 dasd_put_device(device
);
3386 EXPORT_SYMBOL_GPL(dasd_generic_restore_device
);
3388 static struct dasd_ccw_req
*dasd_generic_build_rdc(struct dasd_device
*device
,
3390 int rdc_buffer_size
,
3393 struct dasd_ccw_req
*cqr
;
3395 unsigned long *idaw
;
3397 cqr
= dasd_smalloc_request(magic
, 1 /* RDC */, rdc_buffer_size
, device
);
3400 /* internal error 13 - Allocating the RDC request failed*/
3401 dev_err(&device
->cdev
->dev
,
3402 "An error occurred in the DASD device driver, "
3403 "reason=%s\n", "13");
3408 ccw
->cmd_code
= CCW_CMD_RDC
;
3409 if (idal_is_needed(rdc_buffer
, rdc_buffer_size
)) {
3410 idaw
= (unsigned long *) (cqr
->data
);
3411 ccw
->cda
= (__u32
)(addr_t
) idaw
;
3412 ccw
->flags
= CCW_FLAG_IDA
;
3413 idaw
= idal_create_words(idaw
, rdc_buffer
, rdc_buffer_size
);
3415 ccw
->cda
= (__u32
)(addr_t
) rdc_buffer
;
3419 ccw
->count
= rdc_buffer_size
;
3420 cqr
->startdev
= device
;
3421 cqr
->memdev
= device
;
3422 cqr
->expires
= 10*HZ
;
3424 cqr
->buildclk
= get_clock();
3425 cqr
->status
= DASD_CQR_FILLED
;
3430 int dasd_generic_read_dev_chars(struct dasd_device
*device
, int magic
,
3431 void *rdc_buffer
, int rdc_buffer_size
)
3434 struct dasd_ccw_req
*cqr
;
3436 cqr
= dasd_generic_build_rdc(device
, rdc_buffer
, rdc_buffer_size
,
3439 return PTR_ERR(cqr
);
3441 ret
= dasd_sleep_on(cqr
);
3442 dasd_sfree_request(cqr
, cqr
->memdev
);
3445 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars
);
3448 * In command mode and transport mode we need to look for sense
3449 * data in different places. The sense data itself is allways
3450 * an array of 32 bytes, so we can unify the sense data access
3453 char *dasd_get_sense(struct irb
*irb
)
3455 struct tsb
*tsb
= NULL
;
3458 if (scsw_is_tm(&irb
->scsw
) && (irb
->scsw
.tm
.fcxs
== 0x01)) {
3459 if (irb
->scsw
.tm
.tcw
)
3460 tsb
= tcw_get_tsb((struct tcw
*)(unsigned long)
3462 if (tsb
&& tsb
->length
== 64 && tsb
->flags
)
3463 switch (tsb
->flags
& 0x07) {
3464 case 1: /* tsa_iostat */
3465 sense
= tsb
->tsa
.iostat
.sense
;
3467 case 2: /* tsa_ddpc */
3468 sense
= tsb
->tsa
.ddpc
.sense
;
3471 /* currently we don't use interrogate data */
3474 } else if (irb
->esw
.esw0
.erw
.cons
) {
3479 EXPORT_SYMBOL_GPL(dasd_get_sense
);
3481 static inline int _wait_for_empty_queues(struct dasd_device
*device
)
3484 return list_empty(&device
->ccw_queue
) &&
3485 list_empty(&device
->block
->ccw_queue
);
3487 return list_empty(&device
->ccw_queue
);
3490 void dasd_generic_shutdown(struct ccw_device
*cdev
)
3492 struct dasd_device
*device
;
3494 device
= dasd_device_from_cdev(cdev
);
3499 dasd_schedule_block_bh(device
->block
);
3501 dasd_schedule_device_bh(device
);
3503 wait_event(shutdown_waitq
, _wait_for_empty_queues(device
));
3505 EXPORT_SYMBOL_GPL(dasd_generic_shutdown
);
3507 static int __init
dasd_init(void)
3511 init_waitqueue_head(&dasd_init_waitq
);
3512 init_waitqueue_head(&dasd_flush_wq
);
3513 init_waitqueue_head(&generic_waitq
);
3514 init_waitqueue_head(&shutdown_waitq
);
3516 /* register 'common' DASD debug area, used for all DBF_XXX calls */
3517 dasd_debug_area
= debug_register("dasd", 1, 1, 8 * sizeof(long));
3518 if (dasd_debug_area
== NULL
) {
3522 debug_register_view(dasd_debug_area
, &debug_sprintf_view
);
3523 debug_set_level(dasd_debug_area
, DBF_WARNING
);
3525 DBF_EVENT(DBF_EMERG
, "%s", "debug area created");
3527 dasd_diag_discipline_pointer
= NULL
;
3529 dasd_statistics_createroot();
3531 rc
= dasd_devmap_init();
3534 rc
= dasd_gendisk_init();
3540 rc
= dasd_eer_init();
3543 #ifdef CONFIG_PROC_FS
3544 rc
= dasd_proc_init();
3551 pr_info("The DASD device driver could not be initialized\n");
3556 module_init(dasd_init
);
3557 module_exit(dasd_exit
);
3559 EXPORT_SYMBOL(dasd_debug_area
);
3560 EXPORT_SYMBOL(dasd_diag_discipline_pointer
);
3562 EXPORT_SYMBOL(dasd_add_request_head
);
3563 EXPORT_SYMBOL(dasd_add_request_tail
);
3564 EXPORT_SYMBOL(dasd_cancel_req
);
3565 EXPORT_SYMBOL(dasd_device_clear_timer
);
3566 EXPORT_SYMBOL(dasd_block_clear_timer
);
3567 EXPORT_SYMBOL(dasd_enable_device
);
3568 EXPORT_SYMBOL(dasd_int_handler
);
3569 EXPORT_SYMBOL(dasd_kfree_request
);
3570 EXPORT_SYMBOL(dasd_kick_device
);
3571 EXPORT_SYMBOL(dasd_kmalloc_request
);
3572 EXPORT_SYMBOL(dasd_schedule_device_bh
);
3573 EXPORT_SYMBOL(dasd_schedule_block_bh
);
3574 EXPORT_SYMBOL(dasd_set_target_state
);
3575 EXPORT_SYMBOL(dasd_device_set_timer
);
3576 EXPORT_SYMBOL(dasd_block_set_timer
);
3577 EXPORT_SYMBOL(dasd_sfree_request
);
3578 EXPORT_SYMBOL(dasd_sleep_on
);
3579 EXPORT_SYMBOL(dasd_sleep_on_immediatly
);
3580 EXPORT_SYMBOL(dasd_sleep_on_interruptible
);
3581 EXPORT_SYMBOL(dasd_smalloc_request
);
3582 EXPORT_SYMBOL(dasd_start_IO
);
3583 EXPORT_SYMBOL(dasd_term_IO
);
3585 EXPORT_SYMBOL_GPL(dasd_generic_probe
);
3586 EXPORT_SYMBOL_GPL(dasd_generic_remove
);
3587 EXPORT_SYMBOL_GPL(dasd_generic_notify
);
3588 EXPORT_SYMBOL_GPL(dasd_generic_set_online
);
3589 EXPORT_SYMBOL_GPL(dasd_generic_set_offline
);
3590 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change
);
3591 EXPORT_SYMBOL_GPL(dasd_flush_device_queue
);
3592 EXPORT_SYMBOL_GPL(dasd_alloc_block
);
3593 EXPORT_SYMBOL_GPL(dasd_free_block
);