2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
13 #include <linux/config.h>
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/buffer_head.h>
22 #include <asm/ccwdev.h>
23 #include <asm/ebcdic.h>
24 #include <asm/idals.h>
25 #include <asm/todclk.h>
28 #define PRINTK_HEADER "dasd:"
32 * SECTION: Constant definitions to be used within this file
34 #define DASD_CHANQ_MAX_SIZE 4
37 * SECTION: exported variables of dasd.c
39 debug_info_t
*dasd_debug_area
;
40 struct dasd_discipline
*dasd_diag_discipline_pointer
;
42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
44 " Copyright 2000 IBM Corporation");
45 MODULE_SUPPORTED_DEVICE("dasd");
46 MODULE_PARM(dasd
, "1-" __MODULE_STRING(256) "s");
47 MODULE_LICENSE("GPL");
50 * SECTION: prototypes for static functions of dasd.c
52 static int dasd_alloc_queue(struct dasd_device
* device
);
53 static void dasd_setup_queue(struct dasd_device
* device
);
54 static void dasd_free_queue(struct dasd_device
* device
);
55 static void dasd_flush_request_queue(struct dasd_device
*);
56 static void dasd_int_handler(struct ccw_device
*, unsigned long, struct irb
*);
57 static void dasd_flush_ccw_queue(struct dasd_device
*, int);
58 static void dasd_tasklet(struct dasd_device
*);
59 static void do_kick_device(void *data
);
62 * SECTION: Operations on the device structure.
64 static wait_queue_head_t dasd_init_waitq
;
67 * Allocate memory for a new device structure.
70 dasd_alloc_device(void)
72 struct dasd_device
*device
;
74 device
= kmalloc(sizeof (struct dasd_device
), GFP_ATOMIC
);
76 return ERR_PTR(-ENOMEM
);
77 memset(device
, 0, sizeof (struct dasd_device
));
78 /* open_count = 0 means device online but not in use */
79 atomic_set(&device
->open_count
, -1);
81 /* Get two pages for normal block device operations. */
82 device
->ccw_mem
= (void *) __get_free_pages(GFP_ATOMIC
| GFP_DMA
, 1);
83 if (device
->ccw_mem
== NULL
) {
85 return ERR_PTR(-ENOMEM
);
87 /* Get one page for error recovery. */
88 device
->erp_mem
= (void *) get_zeroed_page(GFP_ATOMIC
| GFP_DMA
);
89 if (device
->erp_mem
== NULL
) {
90 free_pages((unsigned long) device
->ccw_mem
, 1);
92 return ERR_PTR(-ENOMEM
);
95 dasd_init_chunklist(&device
->ccw_chunks
, device
->ccw_mem
, PAGE_SIZE
*2);
96 dasd_init_chunklist(&device
->erp_chunks
, device
->erp_mem
, PAGE_SIZE
);
97 spin_lock_init(&device
->mem_lock
);
98 spin_lock_init(&device
->request_queue_lock
);
99 atomic_set (&device
->tasklet_scheduled
, 0);
100 tasklet_init(&device
->tasklet
,
101 (void (*)(unsigned long)) dasd_tasklet
,
102 (unsigned long) device
);
103 INIT_LIST_HEAD(&device
->ccw_queue
);
104 init_timer(&device
->timer
);
105 INIT_WORK(&device
->kick_work
, do_kick_device
, device
);
106 device
->state
= DASD_STATE_NEW
;
107 device
->target
= DASD_STATE_NEW
;
113 * Free memory of a device structure.
116 dasd_free_device(struct dasd_device
*device
)
118 kfree(device
->private);
119 free_page((unsigned long) device
->erp_mem
);
120 free_pages((unsigned long) device
->ccw_mem
, 1);
125 * Make a new device known to the system.
128 dasd_state_new_to_known(struct dasd_device
*device
)
133 * As long as the device is not in state DASD_STATE_NEW we want to
134 * keep the reference count > 0.
136 dasd_get_device(device
);
138 rc
= dasd_alloc_queue(device
);
140 dasd_put_device(device
);
144 device
->state
= DASD_STATE_KNOWN
;
149 * Let the system forget about a device.
152 dasd_state_known_to_new(struct dasd_device
* device
)
154 /* Forget the discipline information. */
155 device
->discipline
= NULL
;
156 device
->state
= DASD_STATE_NEW
;
158 dasd_free_queue(device
);
160 /* Give up reference we took in dasd_state_new_to_known. */
161 dasd_put_device(device
);
165 * Request the irq line for the device.
168 dasd_state_known_to_basic(struct dasd_device
* device
)
172 /* Allocate and register gendisk structure. */
173 rc
= dasd_gendisk_alloc(device
);
177 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
178 device
->debug_area
= debug_register(device
->cdev
->dev
.bus_id
, 1, 2,
180 debug_register_view(device
->debug_area
, &debug_sprintf_view
);
181 debug_set_level(device
->debug_area
, DBF_EMERG
);
182 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s", "debug area created");
184 device
->state
= DASD_STATE_BASIC
;
189 * Release the irq line for the device. Terminate any running i/o.
192 dasd_state_basic_to_known(struct dasd_device
* device
)
194 dasd_gendisk_free(device
);
195 dasd_flush_ccw_queue(device
, 1);
196 DBF_DEV_EVENT(DBF_EMERG
, device
, "%p debug area deleted", device
);
197 if (device
->debug_area
!= NULL
) {
198 debug_unregister(device
->debug_area
);
199 device
->debug_area
= NULL
;
201 device
->state
= DASD_STATE_KNOWN
;
205 * Do the initial analysis. The do_analysis function may return
206 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
207 * until the discipline decides to continue the startup sequence
208 * by calling the function dasd_change_state. The eckd disciplines
209 * uses this to start a ccw that detects the format. The completion
210 * interrupt for this detection ccw uses the kernel event daemon to
211 * trigger the call to dasd_change_state. All this is done in the
212 * discipline code, see dasd_eckd.c.
213 * After the analysis ccw is done (do_analysis returned 0 or error)
214 * the block device is setup. Either a fake disk is added to allow
215 * formatting or a proper device request queue is created.
218 dasd_state_basic_to_ready(struct dasd_device
* device
)
223 if (device
->discipline
->do_analysis
!= NULL
)
224 rc
= device
->discipline
->do_analysis(device
);
227 dasd_setup_queue(device
);
228 device
->state
= DASD_STATE_READY
;
229 if (dasd_scan_partitions(device
) != 0)
230 device
->state
= DASD_STATE_BASIC
;
235 * Remove device from block device layer. Destroy dirty buffers.
236 * Forget format information. Check if the target level is basic
237 * and if it is create fake disk for formatting.
240 dasd_state_ready_to_basic(struct dasd_device
* device
)
242 dasd_flush_ccw_queue(device
, 0);
243 dasd_destroy_partitions(device
);
244 dasd_flush_request_queue(device
);
246 device
->bp_block
= 0;
247 device
->s2b_shift
= 0;
248 device
->state
= DASD_STATE_BASIC
;
252 * Make the device online and schedule the bottom half to start
253 * the requeueing of requests from the linux request queue to the
257 dasd_state_ready_to_online(struct dasd_device
* device
)
259 device
->state
= DASD_STATE_ONLINE
;
260 dasd_schedule_bh(device
);
265 * Stop the requeueing of requests again.
268 dasd_state_online_to_ready(struct dasd_device
* device
)
270 device
->state
= DASD_STATE_READY
;
274 * Device startup state changes.
277 dasd_increase_state(struct dasd_device
*device
)
282 if (device
->state
== DASD_STATE_NEW
&&
283 device
->target
>= DASD_STATE_KNOWN
)
284 rc
= dasd_state_new_to_known(device
);
287 device
->state
== DASD_STATE_KNOWN
&&
288 device
->target
>= DASD_STATE_BASIC
)
289 rc
= dasd_state_known_to_basic(device
);
292 device
->state
== DASD_STATE_BASIC
&&
293 device
->target
>= DASD_STATE_READY
)
294 rc
= dasd_state_basic_to_ready(device
);
297 device
->state
== DASD_STATE_READY
&&
298 device
->target
>= DASD_STATE_ONLINE
)
299 rc
= dasd_state_ready_to_online(device
);
305 * Device shutdown state changes.
308 dasd_decrease_state(struct dasd_device
*device
)
310 if (device
->state
== DASD_STATE_ONLINE
&&
311 device
->target
<= DASD_STATE_READY
)
312 dasd_state_online_to_ready(device
);
314 if (device
->state
== DASD_STATE_READY
&&
315 device
->target
<= DASD_STATE_BASIC
)
316 dasd_state_ready_to_basic(device
);
318 if (device
->state
== DASD_STATE_BASIC
&&
319 device
->target
<= DASD_STATE_KNOWN
)
320 dasd_state_basic_to_known(device
);
322 if (device
->state
== DASD_STATE_KNOWN
&&
323 device
->target
<= DASD_STATE_NEW
)
324 dasd_state_known_to_new(device
);
330 * This is the main startup/shutdown routine.
333 dasd_change_state(struct dasd_device
*device
)
337 if (device
->state
== device
->target
)
338 /* Already where we want to go today... */
340 if (device
->state
< device
->target
)
341 rc
= dasd_increase_state(device
);
343 rc
= dasd_decrease_state(device
);
344 if (rc
&& rc
!= -EAGAIN
)
345 device
->target
= device
->state
;
347 if (device
->state
== device
->target
)
348 wake_up(&dasd_init_waitq
);
352 * Kick starter for devices that did not complete the startup/shutdown
353 * procedure or were sleeping because of a pending state.
354 * dasd_kick_device will schedule a call do do_kick_device to the kernel
358 do_kick_device(void *data
)
360 struct dasd_device
*device
;
362 device
= (struct dasd_device
*) data
;
363 dasd_change_state(device
);
364 dasd_schedule_bh(device
);
365 dasd_put_device(device
);
369 dasd_kick_device(struct dasd_device
*device
)
371 dasd_get_device(device
);
372 /* queue call to dasd_kick_device to the kernel event daemon. */
373 schedule_work(&device
->kick_work
);
377 * Set the target state for a device and starts the state change.
380 dasd_set_target_state(struct dasd_device
*device
, int target
)
382 /* If we are in probeonly mode stop at DASD_STATE_READY. */
383 if (dasd_probeonly
&& target
> DASD_STATE_READY
)
384 target
= DASD_STATE_READY
;
385 if (device
->target
!= target
) {
386 if (device
->state
== target
)
387 wake_up(&dasd_init_waitq
);
388 device
->target
= target
;
390 if (device
->state
!= device
->target
)
391 dasd_change_state(device
);
395 * Enable devices with device numbers in [from..to].
398 _wait_for_device(struct dasd_device
*device
)
400 return (device
->state
== device
->target
);
404 dasd_enable_device(struct dasd_device
*device
)
406 dasd_set_target_state(device
, DASD_STATE_ONLINE
);
407 if (device
->state
<= DASD_STATE_KNOWN
)
408 /* No discipline for device found. */
409 dasd_set_target_state(device
, DASD_STATE_NEW
);
410 /* Now wait for the devices to come up. */
411 wait_event(dasd_init_waitq
, _wait_for_device(device
));
415 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
417 #ifdef CONFIG_DASD_PROFILE
419 struct dasd_profile_info_t dasd_global_profile
;
420 unsigned int dasd_profile_level
= DASD_PROFILE_OFF
;
423 * Increments counter in global and local profiling structures.
425 #define dasd_profile_counter(value, counter, device) \
428 for (index = 0; index < 31 && value >> (2+index); index++); \
429 dasd_global_profile.counter[index]++; \
430 device->profile.counter[index]++; \
434 * Add profiling information for cqr before execution.
437 dasd_profile_start(struct dasd_device
*device
, struct dasd_ccw_req
* cqr
,
441 unsigned int counter
;
443 if (dasd_profile_level
!= DASD_PROFILE_ON
)
446 /* count the length of the chanq for statistics */
448 list_for_each(l
, &device
->ccw_queue
)
451 dasd_global_profile
.dasd_io_nr_req
[counter
]++;
452 device
->profile
.dasd_io_nr_req
[counter
]++;
456 * Add profiling information for cqr after execution.
459 dasd_profile_end(struct dasd_device
*device
, struct dasd_ccw_req
* cqr
,
462 long strtime
, irqtime
, endtime
, tottime
; /* in microseconds */
463 long tottimeps
, sectors
;
465 if (dasd_profile_level
!= DASD_PROFILE_ON
)
468 sectors
= req
->nr_sectors
;
469 if (!cqr
->buildclk
|| !cqr
->startclk
||
470 !cqr
->stopclk
|| !cqr
->endclk
||
474 strtime
= ((cqr
->startclk
- cqr
->buildclk
) >> 12);
475 irqtime
= ((cqr
->stopclk
- cqr
->startclk
) >> 12);
476 endtime
= ((cqr
->endclk
- cqr
->stopclk
) >> 12);
477 tottime
= ((cqr
->endclk
- cqr
->buildclk
) >> 12);
478 tottimeps
= tottime
/ sectors
;
480 if (!dasd_global_profile
.dasd_io_reqs
)
481 memset(&dasd_global_profile
, 0,
482 sizeof (struct dasd_profile_info_t
));
483 dasd_global_profile
.dasd_io_reqs
++;
484 dasd_global_profile
.dasd_io_sects
+= sectors
;
486 if (!device
->profile
.dasd_io_reqs
)
487 memset(&device
->profile
, 0,
488 sizeof (struct dasd_profile_info_t
));
489 device
->profile
.dasd_io_reqs
++;
490 device
->profile
.dasd_io_sects
+= sectors
;
492 dasd_profile_counter(sectors
, dasd_io_secs
, device
);
493 dasd_profile_counter(tottime
, dasd_io_times
, device
);
494 dasd_profile_counter(tottimeps
, dasd_io_timps
, device
);
495 dasd_profile_counter(strtime
, dasd_io_time1
, device
);
496 dasd_profile_counter(irqtime
, dasd_io_time2
, device
);
497 dasd_profile_counter(irqtime
/ sectors
, dasd_io_time2ps
, device
);
498 dasd_profile_counter(endtime
, dasd_io_time3
, device
);
501 #define dasd_profile_start(device, cqr, req) do {} while (0)
502 #define dasd_profile_end(device, cqr, req) do {} while (0)
503 #endif /* CONFIG_DASD_PROFILE */
506 * Allocate memory for a channel program with 'cplength' channel
507 * command words and 'datasize' additional space. There are two
508 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
509 * memory and 2) dasd_smalloc_request uses the static ccw memory
510 * that gets allocated for each device.
512 struct dasd_ccw_req
*
513 dasd_kmalloc_request(char *magic
, int cplength
, int datasize
,
514 struct dasd_device
* device
)
516 struct dasd_ccw_req
*cqr
;
519 if ( magic
== NULL
|| datasize
> PAGE_SIZE
||
520 (cplength
*sizeof(struct ccw1
)) > PAGE_SIZE
)
523 cqr
= kmalloc(sizeof(struct dasd_ccw_req
), GFP_ATOMIC
);
525 return ERR_PTR(-ENOMEM
);
526 memset(cqr
, 0, sizeof(struct dasd_ccw_req
));
529 cqr
->cpaddr
= kmalloc(cplength
*sizeof(struct ccw1
),
530 GFP_ATOMIC
| GFP_DMA
);
531 if (cqr
->cpaddr
== NULL
) {
533 return ERR_PTR(-ENOMEM
);
535 memset(cqr
->cpaddr
, 0, cplength
*sizeof(struct ccw1
));
539 cqr
->data
= kmalloc(datasize
, GFP_ATOMIC
| GFP_DMA
);
540 if (cqr
->data
== NULL
) {
543 return ERR_PTR(-ENOMEM
);
545 memset(cqr
->data
, 0, datasize
);
547 strncpy((char *) &cqr
->magic
, magic
, 4);
548 ASCEBC((char *) &cqr
->magic
, 4);
549 set_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
550 dasd_get_device(device
);
554 struct dasd_ccw_req
*
555 dasd_smalloc_request(char *magic
, int cplength
, int datasize
,
556 struct dasd_device
* device
)
559 struct dasd_ccw_req
*cqr
;
564 if ( magic
== NULL
|| datasize
> PAGE_SIZE
||
565 (cplength
*sizeof(struct ccw1
)) > PAGE_SIZE
)
568 size
= (sizeof(struct dasd_ccw_req
) + 7L) & -8L;
570 size
+= cplength
* sizeof(struct ccw1
);
573 spin_lock_irqsave(&device
->mem_lock
, flags
);
574 cqr
= (struct dasd_ccw_req
*)
575 dasd_alloc_chunk(&device
->ccw_chunks
, size
);
576 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
578 return ERR_PTR(-ENOMEM
);
579 memset(cqr
, 0, sizeof(struct dasd_ccw_req
));
580 data
= (char *) cqr
+ ((sizeof(struct dasd_ccw_req
) + 7L) & -8L);
583 cqr
->cpaddr
= (struct ccw1
*) data
;
584 data
+= cplength
*sizeof(struct ccw1
);
585 memset(cqr
->cpaddr
, 0, cplength
*sizeof(struct ccw1
));
590 memset(cqr
->data
, 0, datasize
);
592 strncpy((char *) &cqr
->magic
, magic
, 4);
593 ASCEBC((char *) &cqr
->magic
, 4);
594 set_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
595 dasd_get_device(device
);
600 * Free memory of a channel program. This function needs to free all the
601 * idal lists that might have been created by dasd_set_cda and the
602 * struct dasd_ccw_req itself.
605 dasd_kfree_request(struct dasd_ccw_req
* cqr
, struct dasd_device
* device
)
607 #ifdef CONFIG_ARCH_S390X
610 /* Clear any idals used for the request. */
613 clear_normalized_cda(ccw
);
614 } while (ccw
++->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
));
619 dasd_put_device(device
);
623 dasd_sfree_request(struct dasd_ccw_req
* cqr
, struct dasd_device
* device
)
627 spin_lock_irqsave(&device
->mem_lock
, flags
);
628 dasd_free_chunk(&device
->ccw_chunks
, cqr
);
629 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
630 dasd_put_device(device
);
634 * Check discipline magic in cqr.
637 dasd_check_cqr(struct dasd_ccw_req
*cqr
)
639 struct dasd_device
*device
;
643 device
= cqr
->device
;
644 if (strncmp((char *) &cqr
->magic
, device
->discipline
->ebcname
, 4)) {
645 DEV_MESSAGE(KERN_WARNING
, device
,
646 " dasd_ccw_req 0x%08x magic doesn't match"
647 " discipline 0x%08x",
649 *(unsigned int *) device
->discipline
->name
);
656 * Terminate the current i/o and set the request to clear_pending.
657 * Timer keeps device runnig.
658 * ccw_device_clear can fail if the i/o subsystem
662 dasd_term_IO(struct dasd_ccw_req
* cqr
)
664 struct dasd_device
*device
;
668 rc
= dasd_check_cqr(cqr
);
672 device
= (struct dasd_device
*) cqr
->device
;
673 while ((retries
< 5) && (cqr
->status
== DASD_CQR_IN_IO
)) {
674 rc
= ccw_device_clear(device
->cdev
, (long) cqr
);
676 case 0: /* termination successful */
677 if (cqr
->retries
> 0) {
679 cqr
->status
= DASD_CQR_CLEAR
;
681 cqr
->status
= DASD_CQR_FAILED
;
682 cqr
->stopclk
= get_clock();
683 DBF_DEV_EVENT(DBF_DEBUG
, device
,
684 "terminate cqr %p successful",
688 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
689 "device gone, retry");
692 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
697 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
698 "device busy, retry later");
701 DEV_MESSAGE(KERN_ERR
, device
,
702 "line %d unknown RC=%d, please "
703 "report to linux390@de.ibm.com",
710 dasd_schedule_bh(device
);
715 * Start the i/o. This start_IO can fail if the channel is really busy.
716 * In that case set up a timer to start the request later.
719 dasd_start_IO(struct dasd_ccw_req
* cqr
)
721 struct dasd_device
*device
;
725 rc
= dasd_check_cqr(cqr
);
728 device
= (struct dasd_device
*) cqr
->device
;
729 if (cqr
->retries
< 0) {
730 DEV_MESSAGE(KERN_DEBUG
, device
,
731 "start_IO: request %p (%02x/%i) - no retry left.",
732 cqr
, cqr
->status
, cqr
->retries
);
733 cqr
->status
= DASD_CQR_FAILED
;
736 cqr
->startclk
= get_clock();
737 cqr
->starttime
= jiffies
;
739 rc
= ccw_device_start(device
->cdev
, cqr
->cpaddr
, (long) cqr
,
743 cqr
->status
= DASD_CQR_IN_IO
;
744 DBF_DEV_EVENT(DBF_DEBUG
, device
,
745 "start_IO: request %p started successful",
749 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
750 "start_IO: device busy, retry later");
753 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
754 "start_IO: request timeout, retry later");
757 /* -EACCES indicates that the request used only a
758 * subset of the available pathes and all these
760 * Do a retry with all available pathes.
762 cqr
->lpm
= LPM_ANYPATH
;
763 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
764 "start_IO: selected pathes gone,"
765 " retry on all pathes");
769 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
770 "start_IO: device gone, retry");
773 DEV_MESSAGE(KERN_ERR
, device
,
774 "line %d unknown RC=%d, please report"
775 " to linux390@de.ibm.com", __LINE__
, rc
);
783 * Timeout function for dasd devices. This is used for different purposes
784 * 1) missing interrupt handler for normal operation
785 * 2) delayed start of request where start_IO failed with -EBUSY
786 * 3) timeout for missing state change interrupts
787 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
788 * DASD_CQR_QUEUED for 2) and 3).
791 dasd_timeout_device(unsigned long ptr
)
794 struct dasd_device
*device
;
796 device
= (struct dasd_device
*) ptr
;
797 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
798 /* re-activate request queue */
799 device
->stopped
&= ~DASD_STOPPED_PENDING
;
800 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
801 dasd_schedule_bh(device
);
805 * Setup timeout for a device in jiffies.
808 dasd_set_timer(struct dasd_device
*device
, int expires
)
811 if (timer_pending(&device
->timer
))
812 del_timer(&device
->timer
);
815 if (timer_pending(&device
->timer
)) {
816 if (mod_timer(&device
->timer
, jiffies
+ expires
))
819 device
->timer
.function
= dasd_timeout_device
;
820 device
->timer
.data
= (unsigned long) device
;
821 device
->timer
.expires
= jiffies
+ expires
;
822 add_timer(&device
->timer
);
826 * Clear timeout for a device.
829 dasd_clear_timer(struct dasd_device
*device
)
831 if (timer_pending(&device
->timer
))
832 del_timer(&device
->timer
);
836 dasd_handle_killed_request(struct ccw_device
*cdev
, unsigned long intparm
)
838 struct dasd_ccw_req
*cqr
;
839 struct dasd_device
*device
;
841 cqr
= (struct dasd_ccw_req
*) intparm
;
842 if (cqr
->status
!= DASD_CQR_IN_IO
) {
844 "invalid status in handle_killed_request: "
845 "bus_id %s, status %02x",
846 cdev
->dev
.bus_id
, cqr
->status
);
850 device
= (struct dasd_device
*) cqr
->device
;
851 if (device
== NULL
||
852 device
!= dasd_device_from_cdev(cdev
) ||
853 strncmp(device
->discipline
->ebcname
, (char *) &cqr
->magic
, 4)) {
854 MESSAGE(KERN_DEBUG
, "invalid device in request: bus_id %s",
859 /* Schedule request to be retried. */
860 cqr
->status
= DASD_CQR_QUEUED
;
862 dasd_clear_timer(device
);
863 dasd_schedule_bh(device
);
864 dasd_put_device(device
);
868 dasd_handle_state_change_pending(struct dasd_device
*device
)
870 struct dasd_ccw_req
*cqr
;
871 struct list_head
*l
, *n
;
873 device
->stopped
&= ~DASD_STOPPED_PENDING
;
875 /* restart all 'running' IO on queue */
876 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
877 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
878 if (cqr
->status
== DASD_CQR_IN_IO
) {
879 cqr
->status
= DASD_CQR_QUEUED
;
882 dasd_clear_timer(device
);
883 dasd_schedule_bh(device
);
887 * Interrupt handler for "normal" ssch-io based dasd devices.
890 dasd_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
893 struct dasd_ccw_req
*cqr
, *next
;
894 struct dasd_device
*device
;
895 unsigned long long now
;
901 switch (PTR_ERR(irb
)) {
903 dasd_handle_killed_request(cdev
, intparm
);
906 printk(KERN_WARNING
"%s(%s): request timed out\n",
907 __FUNCTION__
, cdev
->dev
.bus_id
);
908 //FIXME - dasd uses own timeout interface...
911 printk(KERN_WARNING
"%s(%s): unknown error %ld\n",
912 __FUNCTION__
, cdev
->dev
.bus_id
, PTR_ERR(irb
));
919 DBF_EVENT(DBF_ERR
, "Interrupt: bus_id %s CS/DS %04x ip %08x",
920 cdev
->dev
.bus_id
, ((irb
->scsw
.cstat
<<8)|irb
->scsw
.dstat
),
921 (unsigned int) intparm
);
923 /* first of all check for state change pending interrupt */
924 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
925 if ((irb
->scsw
.dstat
& mask
) == mask
) {
926 device
= dasd_device_from_cdev(cdev
);
927 if (!IS_ERR(device
)) {
928 dasd_handle_state_change_pending(device
);
929 dasd_put_device(device
);
934 cqr
= (struct dasd_ccw_req
*) intparm
;
936 /* check for unsolicited interrupts */
939 "unsolicited interrupt received: bus_id %s",
944 device
= (struct dasd_device
*) cqr
->device
;
945 if (device
== NULL
||
946 strncmp(device
->discipline
->ebcname
, (char *) &cqr
->magic
, 4)) {
947 MESSAGE(KERN_DEBUG
, "invalid device in request: bus_id %s",
952 /* Check for clear pending */
953 if (cqr
->status
== DASD_CQR_CLEAR
&&
954 irb
->scsw
.fctl
& SCSW_FCTL_CLEAR_FUNC
) {
955 cqr
->status
= DASD_CQR_QUEUED
;
956 dasd_clear_timer(device
);
957 dasd_schedule_bh(device
);
961 /* check status - the request might have been killed by dyn detach */
962 if (cqr
->status
!= DASD_CQR_IN_IO
) {
964 "invalid status: bus_id %s, status %02x",
965 cdev
->dev
.bus_id
, cqr
->status
);
968 DBF_DEV_EVENT(DBF_DEBUG
, device
, "Int: CS/DS 0x%04x for cqr %p",
969 ((irb
->scsw
.cstat
<< 8) | irb
->scsw
.dstat
), cqr
);
971 /* Find out the appropriate era_action. */
972 if (irb
->scsw
.fctl
& SCSW_FCTL_HALT_FUNC
)
973 era
= dasd_era_fatal
;
974 else if (irb
->scsw
.dstat
== (DEV_STAT_CHN_END
| DEV_STAT_DEV_END
) &&
975 irb
->scsw
.cstat
== 0 &&
976 !irb
->esw
.esw0
.erw
.cons
)
978 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
))
979 era
= dasd_era_fatal
; /* don't recover this request */
980 else if (irb
->esw
.esw0
.erw
.cons
)
981 era
= device
->discipline
->examine_error(cqr
, irb
);
983 era
= dasd_era_recover
;
985 DBF_DEV_EVENT(DBF_DEBUG
, device
, "era_code %d", era
);
987 if (era
== dasd_era_none
) {
988 cqr
->status
= DASD_CQR_DONE
;
990 /* Start first request on queue if possible -> fast_io. */
991 if (cqr
->list
.next
!= &device
->ccw_queue
) {
992 next
= list_entry(cqr
->list
.next
,
993 struct dasd_ccw_req
, list
);
994 if ((next
->status
== DASD_CQR_QUEUED
) &&
995 (!device
->stopped
)) {
996 if (device
->discipline
->start_IO(next
) == 0)
997 expires
= next
->expires
;
999 DEV_MESSAGE(KERN_DEBUG
, device
, "%s",
1000 "Interrupt fastpath "
1004 } else { /* error */
1005 memcpy(&cqr
->irb
, irb
, sizeof (struct irb
));
1007 /* dump sense data */
1008 dasd_log_sense(cqr
, irb
);
1011 case dasd_era_fatal
:
1012 cqr
->status
= DASD_CQR_FAILED
;
1015 case dasd_era_recover
:
1016 cqr
->status
= DASD_CQR_ERROR
;
1023 dasd_set_timer(device
, expires
);
1025 dasd_clear_timer(device
);
1026 dasd_schedule_bh(device
);
1030 * posts the buffer_cache about a finalized request
1033 dasd_end_request(struct request
*req
, int uptodate
)
1035 if (end_that_request_first(req
, uptodate
, req
->hard_nr_sectors
))
1037 add_disk_randomness(req
->rq_disk
);
1038 end_that_request_last(req
);
1042 * Process finished error recovery ccw.
1045 __dasd_process_erp(struct dasd_device
*device
, struct dasd_ccw_req
*cqr
)
1047 dasd_erp_fn_t erp_fn
;
1049 if (cqr
->status
== DASD_CQR_DONE
)
1050 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s", "ERP successful");
1052 DEV_MESSAGE(KERN_ERR
, device
, "%s", "ERP unsuccessful");
1053 erp_fn
= device
->discipline
->erp_postaction(cqr
);
1058 * Process ccw request queue.
1061 __dasd_process_ccw_queue(struct dasd_device
* device
,
1062 struct list_head
*final_queue
)
1064 struct list_head
*l
, *n
;
1065 struct dasd_ccw_req
*cqr
;
1066 dasd_erp_fn_t erp_fn
;
1069 /* Process request with final status. */
1070 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
1071 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
1072 /* Stop list processing at the first non-final request. */
1073 if (cqr
->status
!= DASD_CQR_DONE
&&
1074 cqr
->status
!= DASD_CQR_FAILED
&&
1075 cqr
->status
!= DASD_CQR_ERROR
)
1077 /* Process requests with DASD_CQR_ERROR */
1078 if (cqr
->status
== DASD_CQR_ERROR
) {
1079 if (cqr
->irb
.scsw
.fctl
& SCSW_FCTL_HALT_FUNC
) {
1080 cqr
->status
= DASD_CQR_FAILED
;
1081 cqr
->stopclk
= get_clock();
1083 if (cqr
->irb
.esw
.esw0
.erw
.cons
) {
1084 erp_fn
= device
->discipline
->
1088 dasd_default_erp_action(cqr
);
1092 /* Process finished ERP request. */
1094 __dasd_process_erp(device
, cqr
);
1098 /* Rechain finished requests to final queue */
1099 cqr
->endclk
= get_clock();
1100 list_move_tail(&cqr
->list
, final_queue
);
1105 dasd_end_request_cb(struct dasd_ccw_req
* cqr
, void *data
)
1107 struct request
*req
;
1108 struct dasd_device
*device
;
1111 req
= (struct request
*) data
;
1112 device
= cqr
->device
;
1113 dasd_profile_end(device
, cqr
, req
);
1114 status
= cqr
->device
->discipline
->free_cp(cqr
,req
);
1115 spin_lock_irq(&device
->request_queue_lock
);
1116 dasd_end_request(req
, status
);
1117 spin_unlock_irq(&device
->request_queue_lock
);
1122 * Fetch requests from the block device queue.
1125 __dasd_process_blk_queue(struct dasd_device
* device
)
1127 request_queue_t
*queue
;
1128 struct request
*req
;
1129 struct dasd_ccw_req
*cqr
;
1132 queue
= device
->request_queue
;
1133 /* No queue ? Then there is nothing to do. */
1138 * We requeue request from the block device queue to the ccw
1139 * queue only in two states. In state DASD_STATE_READY the
1140 * partition detection is done and we need to requeue requests
1141 * for that. State DASD_STATE_ONLINE is normal block device
1144 if (device
->state
!= DASD_STATE_READY
&&
1145 device
->state
!= DASD_STATE_ONLINE
)
1148 /* Now we try to fetch requests from the request queue */
1149 list_for_each_entry(cqr
, &device
->ccw_queue
, list
)
1150 if (cqr
->status
== DASD_CQR_QUEUED
)
1152 while (!blk_queue_plugged(queue
) &&
1153 elv_next_request(queue
) &&
1154 nr_queued
< DASD_CHANQ_MAX_SIZE
) {
1155 req
= elv_next_request(queue
);
1157 if (device
->features
& DASD_FEATURE_READONLY
&&
1158 rq_data_dir(req
) == WRITE
) {
1159 DBF_DEV_EVENT(DBF_ERR
, device
,
1160 "Rejecting write request %p",
1162 blkdev_dequeue_request(req
);
1163 dasd_end_request(req
, 0);
1166 if (device
->stopped
& DASD_STOPPED_DC_EIO
) {
1167 blkdev_dequeue_request(req
);
1168 dasd_end_request(req
, 0);
1171 cqr
= device
->discipline
->build_cp(device
, req
);
1173 if (PTR_ERR(cqr
) == -ENOMEM
)
1174 break; /* terminate request queue loop */
1175 DBF_DEV_EVENT(DBF_ERR
, device
,
1176 "CCW creation failed (rc=%ld) "
1179 blkdev_dequeue_request(req
);
1180 dasd_end_request(req
, 0);
1183 cqr
->callback
= dasd_end_request_cb
;
1184 cqr
->callback_data
= (void *) req
;
1185 cqr
->status
= DASD_CQR_QUEUED
;
1186 blkdev_dequeue_request(req
);
1187 list_add_tail(&cqr
->list
, &device
->ccw_queue
);
1188 dasd_profile_start(device
, cqr
, req
);
1194 * Take a look at the first request on the ccw queue and check
1195 * if it reached its expire time. If so, terminate the IO.
1198 __dasd_check_expire(struct dasd_device
* device
)
1200 struct dasd_ccw_req
*cqr
;
1202 if (list_empty(&device
->ccw_queue
))
1204 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, list
);
1205 if (cqr
->status
== DASD_CQR_IN_IO
&& cqr
->expires
!= 0) {
1206 if (time_after_eq(jiffies
, cqr
->expires
+ cqr
->starttime
)) {
1207 if (device
->discipline
->term_IO(cqr
) != 0)
1208 /* Hmpf, try again in 1/10 sec */
1209 dasd_set_timer(device
, 10);
1215 * Take a look at the first request on the ccw queue and check
1216 * if it needs to be started.
1219 __dasd_start_head(struct dasd_device
* device
)
1221 struct dasd_ccw_req
*cqr
;
1224 if (list_empty(&device
->ccw_queue
))
1226 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, list
);
1227 if ((cqr
->status
== DASD_CQR_QUEUED
) &&
1228 (!device
->stopped
)) {
1229 /* try to start the first I/O that can be started */
1230 rc
= device
->discipline
->start_IO(cqr
);
1232 dasd_set_timer(device
, cqr
->expires
);
1233 else if (rc
== -EACCES
) {
1234 dasd_schedule_bh(device
);
1236 /* Hmpf, try again in 1/2 sec */
1237 dasd_set_timer(device
, 50);
1242 * Remove requests from the ccw queue.
1245 dasd_flush_ccw_queue(struct dasd_device
* device
, int all
)
1247 struct list_head flush_queue
;
1248 struct list_head
*l
, *n
;
1249 struct dasd_ccw_req
*cqr
;
1251 INIT_LIST_HEAD(&flush_queue
);
1252 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1253 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
1254 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
1255 /* Flush all request or only block device requests? */
1256 if (all
== 0 && cqr
->callback
== dasd_end_request_cb
)
1258 if (cqr
->status
== DASD_CQR_IN_IO
)
1259 device
->discipline
->term_IO(cqr
);
1260 if (cqr
->status
!= DASD_CQR_DONE
||
1261 cqr
->status
!= DASD_CQR_FAILED
) {
1262 cqr
->status
= DASD_CQR_FAILED
;
1263 cqr
->stopclk
= get_clock();
1265 /* Process finished ERP request. */
1267 __dasd_process_erp(device
, cqr
);
1270 /* Rechain request on device request queue */
1271 cqr
->endclk
= get_clock();
1272 list_move_tail(&cqr
->list
, &flush_queue
);
1274 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1275 /* Now call the callback function of flushed requests */
1276 list_for_each_safe(l
, n
, &flush_queue
) {
1277 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
1278 if (cqr
->callback
!= NULL
)
1279 (cqr
->callback
)(cqr
, cqr
->callback_data
);
1284 * Acquire the device lock and process queues for the device.
1287 dasd_tasklet(struct dasd_device
* device
)
1289 struct list_head final_queue
;
1290 struct list_head
*l
, *n
;
1291 struct dasd_ccw_req
*cqr
;
1293 atomic_set (&device
->tasklet_scheduled
, 0);
1294 INIT_LIST_HEAD(&final_queue
);
1295 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1296 /* Check expire time of first request on the ccw queue. */
1297 __dasd_check_expire(device
);
1298 /* Finish off requests on ccw queue */
1299 __dasd_process_ccw_queue(device
, &final_queue
);
1300 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1301 /* Now call the callback function of requests with final status */
1302 list_for_each_safe(l
, n
, &final_queue
) {
1303 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
1304 list_del(&cqr
->list
);
1305 if (cqr
->callback
!= NULL
)
1306 (cqr
->callback
)(cqr
, cqr
->callback_data
);
1308 spin_lock_irq(&device
->request_queue_lock
);
1309 spin_lock(get_ccwdev_lock(device
->cdev
));
1310 /* Get new request from the block device request queue */
1311 __dasd_process_blk_queue(device
);
1312 /* Now check if the head of the ccw queue needs to be started. */
1313 __dasd_start_head(device
);
1314 spin_unlock(get_ccwdev_lock(device
->cdev
));
1315 spin_unlock_irq(&device
->request_queue_lock
);
1316 dasd_put_device(device
);
1320 * Schedules a call to dasd_tasklet over the device tasklet.
1323 dasd_schedule_bh(struct dasd_device
* device
)
1325 /* Protect against rescheduling. */
1326 if (atomic_compare_and_swap (0, 1, &device
->tasklet_scheduled
))
1328 dasd_get_device(device
);
1329 tasklet_hi_schedule(&device
->tasklet
);
1333 * Queue a request to the head of the ccw_queue. Start the I/O if
1337 dasd_add_request_head(struct dasd_ccw_req
*req
)
1339 struct dasd_device
*device
;
1340 unsigned long flags
;
1342 device
= req
->device
;
1343 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1344 req
->status
= DASD_CQR_QUEUED
;
1345 req
->device
= device
;
1346 list_add(&req
->list
, &device
->ccw_queue
);
1347 /* let the bh start the request to keep them in order */
1348 dasd_schedule_bh(device
);
1349 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1353 * Queue a request to the tail of the ccw_queue. Start the I/O if
1357 dasd_add_request_tail(struct dasd_ccw_req
*req
)
1359 struct dasd_device
*device
;
1360 unsigned long flags
;
1362 device
= req
->device
;
1363 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1364 req
->status
= DASD_CQR_QUEUED
;
1365 req
->device
= device
;
1366 list_add_tail(&req
->list
, &device
->ccw_queue
);
1367 /* let the bh start the request to keep them in order */
1368 dasd_schedule_bh(device
);
1369 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1376 dasd_wakeup_cb(struct dasd_ccw_req
*cqr
, void *data
)
1378 wake_up((wait_queue_head_t
*) data
);
1382 _wait_for_wakeup(struct dasd_ccw_req
*cqr
)
1384 struct dasd_device
*device
;
1387 device
= cqr
->device
;
1388 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1389 rc
= cqr
->status
== DASD_CQR_DONE
|| cqr
->status
== DASD_CQR_FAILED
;
1390 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1395 * Attempts to start a special ccw queue and waits for its completion.
1398 dasd_sleep_on(struct dasd_ccw_req
* cqr
)
1400 wait_queue_head_t wait_q
;
1401 struct dasd_device
*device
;
1404 device
= cqr
->device
;
1405 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1407 init_waitqueue_head (&wait_q
);
1408 cqr
->callback
= dasd_wakeup_cb
;
1409 cqr
->callback_data
= (void *) &wait_q
;
1410 cqr
->status
= DASD_CQR_QUEUED
;
1411 list_add_tail(&cqr
->list
, &device
->ccw_queue
);
1413 /* let the bh start the request to keep them in order */
1414 dasd_schedule_bh(device
);
1416 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1418 wait_event(wait_q
, _wait_for_wakeup(cqr
));
1420 /* Request status is either done or failed. */
1421 rc
= (cqr
->status
== DASD_CQR_FAILED
) ? -EIO
: 0;
1426 * Attempts to start a special ccw queue and wait interruptible
1427 * for its completion.
1430 dasd_sleep_on_interruptible(struct dasd_ccw_req
* cqr
)
1432 wait_queue_head_t wait_q
;
1433 struct dasd_device
*device
;
1436 device
= cqr
->device
;
1437 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1439 init_waitqueue_head (&wait_q
);
1440 cqr
->callback
= dasd_wakeup_cb
;
1441 cqr
->callback_data
= (void *) &wait_q
;
1442 cqr
->status
= DASD_CQR_QUEUED
;
1443 list_add_tail(&cqr
->list
, &device
->ccw_queue
);
1445 /* let the bh start the request to keep them in order */
1446 dasd_schedule_bh(device
);
1447 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1451 rc
= wait_event_interruptible(wait_q
, _wait_for_wakeup(cqr
));
1452 if (rc
!= -ERESTARTSYS
) {
1453 /* Request status is either done or failed. */
1454 rc
= (cqr
->status
== DASD_CQR_FAILED
) ? -EIO
: 0;
1457 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1458 if (cqr
->status
== DASD_CQR_IN_IO
&&
1459 device
->discipline
->term_IO(cqr
) == 0) {
1460 list_del(&cqr
->list
);
1463 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1469 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1470 * for eckd devices) the currently running request has to be terminated
1471 * and be put back to status queued, before the special request is added
1472 * to the head of the queue. Then the special request is waited on normally.
1475 _dasd_term_running_cqr(struct dasd_device
*device
)
1477 struct dasd_ccw_req
*cqr
;
1480 if (list_empty(&device
->ccw_queue
))
1482 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, list
);
1483 rc
= device
->discipline
->term_IO(cqr
);
1485 /* termination successful */
1486 cqr
->status
= DASD_CQR_QUEUED
;
1487 cqr
->startclk
= cqr
->stopclk
= 0;
1494 dasd_sleep_on_immediatly(struct dasd_ccw_req
* cqr
)
1496 wait_queue_head_t wait_q
;
1497 struct dasd_device
*device
;
1500 device
= cqr
->device
;
1501 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1502 rc
= _dasd_term_running_cqr(device
);
1504 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1508 init_waitqueue_head (&wait_q
);
1509 cqr
->callback
= dasd_wakeup_cb
;
1510 cqr
->callback_data
= (void *) &wait_q
;
1511 cqr
->status
= DASD_CQR_QUEUED
;
1512 list_add(&cqr
->list
, &device
->ccw_queue
);
1514 /* let the bh start the request to keep them in order */
1515 dasd_schedule_bh(device
);
1517 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1519 wait_event(wait_q
, _wait_for_wakeup(cqr
));
1521 /* Request status is either done or failed. */
1522 rc
= (cqr
->status
== DASD_CQR_FAILED
) ? -EIO
: 0;
1527 * Cancels a request that was started with dasd_sleep_on_req.
1528 * This is useful to timeout requests. The request will be
1529 * terminated if it is currently in i/o.
1530 * Returns 1 if the request has been terminated.
1533 dasd_cancel_req(struct dasd_ccw_req
*cqr
)
1535 struct dasd_device
*device
= cqr
->device
;
1536 unsigned long flags
;
1540 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1541 switch (cqr
->status
) {
1542 case DASD_CQR_QUEUED
:
1543 /* request was not started - just set to failed */
1544 cqr
->status
= DASD_CQR_FAILED
;
1546 case DASD_CQR_IN_IO
:
1547 /* request in IO - terminate IO and release again */
1548 if (device
->discipline
->term_IO(cqr
) != 0)
1549 /* what to do if unable to terminate ??????
1551 cqr
->status
= DASD_CQR_FAILED
;
1552 cqr
->stopclk
= get_clock();
1556 case DASD_CQR_FAILED
:
1557 /* already finished - do nothing */
1560 DEV_MESSAGE(KERN_ALERT
, device
,
1561 "invalid status %02x in request",
1566 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1567 dasd_schedule_bh(device
);
1572 * SECTION: Block device operations (request queue, partitions, open, release).
1576 * Dasd request queue function. Called from ll_rw_blk.c
1579 do_dasd_request(request_queue_t
* queue
)
1581 struct dasd_device
*device
;
1583 device
= (struct dasd_device
*) queue
->queuedata
;
1584 spin_lock(get_ccwdev_lock(device
->cdev
));
1585 /* Get new request from the block device request queue */
1586 __dasd_process_blk_queue(device
);
1587 /* Now check if the head of the ccw queue needs to be started. */
1588 __dasd_start_head(device
);
1589 spin_unlock(get_ccwdev_lock(device
->cdev
));
1593 * Allocate and initialize request queue and default I/O scheduler.
1596 dasd_alloc_queue(struct dasd_device
* device
)
1600 device
->request_queue
= blk_init_queue(do_dasd_request
,
1601 &device
->request_queue_lock
);
1602 if (device
->request_queue
== NULL
)
1605 device
->request_queue
->queuedata
= device
;
1607 elevator_exit(device
->request_queue
->elevator
);
1608 rc
= elevator_init(device
->request_queue
, "deadline");
1610 blk_cleanup_queue(device
->request_queue
);
1617 * Allocate and initialize request queue.
1620 dasd_setup_queue(struct dasd_device
* device
)
1624 blk_queue_hardsect_size(device
->request_queue
, device
->bp_block
);
1625 max
= device
->discipline
->max_blocks
<< device
->s2b_shift
;
1626 blk_queue_max_sectors(device
->request_queue
, max
);
1627 blk_queue_max_phys_segments(device
->request_queue
, -1L);
1628 blk_queue_max_hw_segments(device
->request_queue
, -1L);
1629 blk_queue_max_segment_size(device
->request_queue
, -1L);
1630 blk_queue_segment_boundary(device
->request_queue
, -1L);
1631 blk_queue_ordered(device
->request_queue
, 1);
1635 * Deactivate and free request queue.
1638 dasd_free_queue(struct dasd_device
* device
)
1640 if (device
->request_queue
) {
1641 blk_cleanup_queue(device
->request_queue
);
1642 device
->request_queue
= NULL
;
1647 * Flush request on the request queue.
1650 dasd_flush_request_queue(struct dasd_device
* device
)
1652 struct request
*req
;
1654 if (!device
->request_queue
)
1657 spin_lock_irq(&device
->request_queue_lock
);
1658 while (!list_empty(&device
->request_queue
->queue_head
)) {
1659 req
= elv_next_request(device
->request_queue
);
1662 dasd_end_request(req
, 0);
1663 blkdev_dequeue_request(req
);
1665 spin_unlock_irq(&device
->request_queue_lock
);
1669 dasd_open(struct inode
*inp
, struct file
*filp
)
1671 struct gendisk
*disk
= inp
->i_bdev
->bd_disk
;
1672 struct dasd_device
*device
= disk
->private_data
;
1675 atomic_inc(&device
->open_count
);
1676 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
1681 if (!try_module_get(device
->discipline
->owner
)) {
1686 if (dasd_probeonly
) {
1687 DEV_MESSAGE(KERN_INFO
, device
, "%s",
1688 "No access to device due to probeonly mode");
1693 if (device
->state
< DASD_STATE_BASIC
) {
1694 DBF_DEV_EVENT(DBF_ERR
, device
, " %s",
1695 " Cannot open unrecognized device");
1703 module_put(device
->discipline
->owner
);
1705 atomic_dec(&device
->open_count
);
1710 dasd_release(struct inode
*inp
, struct file
*filp
)
1712 struct gendisk
*disk
= inp
->i_bdev
->bd_disk
;
1713 struct dasd_device
*device
= disk
->private_data
;
1715 atomic_dec(&device
->open_count
);
1716 module_put(device
->discipline
->owner
);
1720 struct block_device_operations
1721 dasd_device_operations
= {
1722 .owner
= THIS_MODULE
,
1724 .release
= dasd_release
,
1725 .ioctl
= dasd_ioctl
,
1732 #ifdef CONFIG_PROC_FS
1736 if (dasd_page_cache
!= NULL
) {
1737 kmem_cache_destroy(dasd_page_cache
);
1738 dasd_page_cache
= NULL
;
1740 dasd_gendisk_exit();
1742 devfs_remove("dasd");
1743 if (dasd_debug_area
!= NULL
) {
1744 debug_unregister(dasd_debug_area
);
1745 dasd_debug_area
= NULL
;
1750 * SECTION: common functions for ccw_driver use
1753 /* initial attempt at a probe function. this can be simplified once
1754 * the other detection code is gone */
1756 dasd_generic_probe (struct ccw_device
*cdev
,
1757 struct dasd_discipline
*discipline
)
1761 ret
= dasd_add_sysfs_files(cdev
);
1764 "dasd_generic_probe: could not add sysfs entries "
1765 "for %s\n", cdev
->dev
.bus_id
);
1767 cdev
->handler
= &dasd_int_handler
;
1773 /* this will one day be called from a global not_oper handler.
1774 * It is also used by driver_unregister during module unload */
1776 dasd_generic_remove (struct ccw_device
*cdev
)
1778 struct dasd_device
*device
;
1780 cdev
->handler
= NULL
;
1782 dasd_remove_sysfs_files(cdev
);
1783 device
= dasd_device_from_cdev(cdev
);
1786 if (test_and_set_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
1787 /* Already doing offline processing */
1788 dasd_put_device(device
);
1792 * This device is removed unconditionally. Set offline
1793 * flag to prevent dasd_open from opening it while it is
1794 * no quite down yet.
1796 dasd_set_target_state(device
, DASD_STATE_NEW
);
1797 /* dasd_delete_device destroys the device reference. */
1798 dasd_delete_device(device
);
1801 /* activate a device. This is called from dasd_{eckd,fba}_probe() when either
1802 * the device is detected for the first time and is supposed to be used
1803 * or the user has started activation through sysfs */
1805 dasd_generic_set_online (struct ccw_device
*cdev
,
1806 struct dasd_discipline
*discipline
)
1809 struct dasd_device
*device
;
1812 device
= dasd_create_device(cdev
);
1814 return PTR_ERR(device
);
1816 if (device
->features
& DASD_FEATURE_USEDIAG
) {
1817 if (!dasd_diag_discipline_pointer
) {
1818 printk (KERN_WARNING
1819 "dasd_generic couldn't online device %s "
1820 "- discipline DIAG not available\n",
1822 dasd_delete_device(device
);
1825 discipline
= dasd_diag_discipline_pointer
;
1827 device
->discipline
= discipline
;
1829 rc
= discipline
->check_device(device
);
1831 printk (KERN_WARNING
1832 "dasd_generic couldn't online device %s "
1833 "with discipline %s rc=%i\n",
1834 cdev
->dev
.bus_id
, discipline
->name
, rc
);
1835 dasd_delete_device(device
);
1839 dasd_set_target_state(device
, DASD_STATE_ONLINE
);
1840 if (device
->state
<= DASD_STATE_KNOWN
) {
1841 printk (KERN_WARNING
1842 "dasd_generic discipline not found for %s\n",
1845 dasd_set_target_state(device
, DASD_STATE_NEW
);
1846 dasd_delete_device(device
);
1848 pr_debug("dasd_generic device %s found\n",
1851 /* FIXME: we have to wait for the root device but we don't want
1852 * to wait for each single device but for all at once. */
1853 wait_event(dasd_init_waitq
, _wait_for_device(device
));
1855 dasd_put_device(device
);
1861 dasd_generic_set_offline (struct ccw_device
*cdev
)
1863 struct dasd_device
*device
;
1866 device
= dasd_device_from_cdev(cdev
);
1868 return PTR_ERR(device
);
1869 if (test_and_set_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
1870 /* Already doing offline processing */
1871 dasd_put_device(device
);
1875 * We must make sure that this device is currently not in use.
1876 * The open_count is increased for every opener, that includes
1877 * the blkdev_get in dasd_scan_partitions. We are only interested
1878 * in the other openers.
1880 max_count
= device
->bdev
? 0 : -1;
1881 if (atomic_read(&device
->open_count
) > max_count
) {
1882 printk (KERN_WARNING
"Can't offline dasd device with open"
1884 atomic_read(&device
->open_count
));
1885 clear_bit(DASD_FLAG_OFFLINE
, &device
->flags
);
1886 dasd_put_device(device
);
1889 dasd_set_target_state(device
, DASD_STATE_NEW
);
1890 /* dasd_delete_device destroys the device reference. */
1891 dasd_delete_device(device
);
1897 dasd_generic_notify(struct ccw_device
*cdev
, int event
)
1899 struct dasd_device
*device
;
1900 struct dasd_ccw_req
*cqr
;
1901 unsigned long flags
;
1904 device
= dasd_device_from_cdev(cdev
);
1907 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
1912 if (device
->state
< DASD_STATE_BASIC
)
1914 /* Device is active. We want to keep it. */
1915 if (test_bit(DASD_FLAG_DSC_ERROR
, &device
->flags
)) {
1916 list_for_each_entry(cqr
, &device
->ccw_queue
, list
)
1917 if (cqr
->status
== DASD_CQR_IN_IO
)
1918 cqr
->status
= DASD_CQR_FAILED
;
1919 device
->stopped
|= DASD_STOPPED_DC_EIO
;
1920 dasd_schedule_bh(device
);
1922 list_for_each_entry(cqr
, &device
->ccw_queue
, list
)
1923 if (cqr
->status
== DASD_CQR_IN_IO
) {
1924 cqr
->status
= DASD_CQR_QUEUED
;
1927 device
->stopped
|= DASD_STOPPED_DC_WAIT
;
1928 dasd_set_timer(device
, 0);
1933 /* FIXME: add a sanity check. */
1934 device
->stopped
&= ~(DASD_STOPPED_DC_WAIT
|DASD_STOPPED_DC_EIO
);
1935 dasd_schedule_bh(device
);
1939 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
1940 dasd_put_device(device
);
1945 * Automatically online either all dasd devices (dasd_autodetect) or
1946 * all devices specified with dasd= parameters.
1949 __dasd_auto_online(struct device
*dev
, void *data
)
1951 struct ccw_device
*cdev
;
1953 cdev
= to_ccwdev(dev
);
1954 if (dasd_autodetect
|| dasd_busid_known(cdev
->dev
.bus_id
) == 0)
1955 ccw_device_set_online(cdev
);
1960 dasd_generic_auto_online (struct ccw_driver
*dasd_discipline_driver
)
1962 struct device_driver
*drv
;
1964 drv
= get_driver(&dasd_discipline_driver
->driver
);
1965 driver_for_each_device(drv
, NULL
, NULL
, __dasd_auto_online
);
1974 init_waitqueue_head(&dasd_init_waitq
);
1976 /* register 'common' DASD debug area, used for all DBF_XXX calls */
1977 dasd_debug_area
= debug_register("dasd", 1, 2, 8 * sizeof (long));
1978 if (dasd_debug_area
== NULL
) {
1982 debug_register_view(dasd_debug_area
, &debug_sprintf_view
);
1983 debug_set_level(dasd_debug_area
, DBF_EMERG
);
1985 DBF_EVENT(DBF_EMERG
, "%s", "debug area created");
1987 dasd_diag_discipline_pointer
= NULL
;
1989 rc
= devfs_mk_dir("dasd");
1992 rc
= dasd_devmap_init();
1995 rc
= dasd_gendisk_init();
2001 rc
= dasd_ioctl_init();
2004 #ifdef CONFIG_PROC_FS
2005 rc
= dasd_proc_init();
2012 MESSAGE(KERN_INFO
, "%s", "initialization not performed due to errors");
2017 module_init(dasd_init
);
2018 module_exit(dasd_exit
);
2020 EXPORT_SYMBOL(dasd_debug_area
);
2021 EXPORT_SYMBOL(dasd_diag_discipline_pointer
);
2023 EXPORT_SYMBOL(dasd_add_request_head
);
2024 EXPORT_SYMBOL(dasd_add_request_tail
);
2025 EXPORT_SYMBOL(dasd_cancel_req
);
2026 EXPORT_SYMBOL(dasd_clear_timer
);
2027 EXPORT_SYMBOL(dasd_enable_device
);
2028 EXPORT_SYMBOL(dasd_int_handler
);
2029 EXPORT_SYMBOL(dasd_kfree_request
);
2030 EXPORT_SYMBOL(dasd_kick_device
);
2031 EXPORT_SYMBOL(dasd_kmalloc_request
);
2032 EXPORT_SYMBOL(dasd_schedule_bh
);
2033 EXPORT_SYMBOL(dasd_set_target_state
);
2034 EXPORT_SYMBOL(dasd_set_timer
);
2035 EXPORT_SYMBOL(dasd_sfree_request
);
2036 EXPORT_SYMBOL(dasd_sleep_on
);
2037 EXPORT_SYMBOL(dasd_sleep_on_immediatly
);
2038 EXPORT_SYMBOL(dasd_sleep_on_interruptible
);
2039 EXPORT_SYMBOL(dasd_smalloc_request
);
2040 EXPORT_SYMBOL(dasd_start_IO
);
2041 EXPORT_SYMBOL(dasd_term_IO
);
2043 EXPORT_SYMBOL_GPL(dasd_generic_probe
);
2044 EXPORT_SYMBOL_GPL(dasd_generic_remove
);
2045 EXPORT_SYMBOL_GPL(dasd_generic_notify
);
2046 EXPORT_SYMBOL_GPL(dasd_generic_set_online
);
2047 EXPORT_SYMBOL_GPL(dasd_generic_set_offline
);
2048 EXPORT_SYMBOL_GPL(dasd_generic_auto_online
);
2051 * Overrides for Emacs so that we follow Linus's tabbing style.
2052 * Emacs will notice this stuff at the end of the file and automatically
2053 * adjust the settings for this buffer only. This must remain at the end
2055 * ---------------------------------------------------------------------------
2058 * c-brace-imaginary-offset: 0
2059 * c-brace-offset: -4
2060 * c-argdecl-indent: 4
2061 * c-label-offset: -4
2062 * c-continued-statement-offset: 4
2063 * c-continued-brace-offset: 0
2064 * indent-tabs-mode: 1