]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> | |
3 | * Horst Hummel <Horst.Hummel@de.ibm.com> | |
4 | * Carsten Otte <Cotte@de.ibm.com> | |
5 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | |
6 | * Bugreports.to..: <Linux390@de.ibm.com> | |
7 | * Copyright IBM Corp. 1999, 2009 | |
8 | */ | |
9 | ||
10 | #define KMSG_COMPONENT "dasd" | |
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
12 | ||
13 | #include <linux/kmod.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/ctype.h> | |
17 | #include <linux/major.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/hdreg.h> | |
20 | #include <linux/async.h> | |
21 | #include <linux/mutex.h> | |
22 | #include <linux/debugfs.h> | |
23 | #include <linux/seq_file.h> | |
24 | #include <linux/vmalloc.h> | |
25 | ||
26 | #include <asm/ccwdev.h> | |
27 | #include <asm/ebcdic.h> | |
28 | #include <asm/idals.h> | |
29 | #include <asm/itcw.h> | |
30 | #include <asm/diag.h> | |
31 | ||
32 | /* This is ugly... */ | |
33 | #define PRINTK_HEADER "dasd:" | |
34 | ||
35 | #include "dasd_int.h" | |
36 | /* | |
37 | * SECTION: Constant definitions to be used within this file | |
38 | */ | |
39 | #define DASD_CHANQ_MAX_SIZE 4 | |
40 | ||
41 | #define DASD_SLEEPON_START_TAG (void *) 1 | |
42 | #define DASD_SLEEPON_END_TAG (void *) 2 | |
43 | ||
44 | /* | |
45 | * SECTION: exported variables of dasd.c | |
46 | */ | |
47 | debug_info_t *dasd_debug_area; | |
48 | static struct dentry *dasd_debugfs_root_entry; | |
49 | struct dasd_discipline *dasd_diag_discipline_pointer; | |
50 | void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | |
51 | ||
52 | MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); | |
53 | MODULE_DESCRIPTION("Linux on S/390 DASD device driver," | |
54 | " Copyright IBM Corp. 2000"); | |
55 | MODULE_SUPPORTED_DEVICE("dasd"); | |
56 | MODULE_LICENSE("GPL"); | |
57 | ||
58 | /* | |
59 | * SECTION: prototypes for static functions of dasd.c | |
60 | */ | |
61 | static int dasd_alloc_queue(struct dasd_block *); | |
62 | static void dasd_setup_queue(struct dasd_block *); | |
63 | static void dasd_free_queue(struct dasd_block *); | |
64 | static void dasd_flush_request_queue(struct dasd_block *); | |
65 | static int dasd_flush_block_queue(struct dasd_block *); | |
66 | static void dasd_device_tasklet(struct dasd_device *); | |
67 | static void dasd_block_tasklet(struct dasd_block *); | |
68 | static void do_kick_device(struct work_struct *); | |
69 | static void do_restore_device(struct work_struct *); | |
70 | static void do_reload_device(struct work_struct *); | |
71 | static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); | |
72 | static void dasd_device_timeout(unsigned long); | |
73 | static void dasd_block_timeout(unsigned long); | |
74 | static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); | |
75 | static void dasd_profile_init(struct dasd_profile *, struct dentry *); | |
76 | static void dasd_profile_exit(struct dasd_profile *); | |
77 | ||
78 | /* | |
79 | * SECTION: Operations on the device structure. | |
80 | */ | |
81 | static wait_queue_head_t dasd_init_waitq; | |
82 | static wait_queue_head_t dasd_flush_wq; | |
83 | static wait_queue_head_t generic_waitq; | |
84 | static wait_queue_head_t shutdown_waitq; | |
85 | ||
86 | /* | |
87 | * Allocate memory for a new device structure. | |
88 | */ | |
89 | struct dasd_device *dasd_alloc_device(void) | |
90 | { | |
91 | struct dasd_device *device; | |
92 | ||
93 | device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); | |
94 | if (!device) | |
95 | return ERR_PTR(-ENOMEM); | |
96 | ||
97 | /* Get two pages for normal block device operations. */ | |
98 | device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); | |
99 | if (!device->ccw_mem) { | |
100 | kfree(device); | |
101 | return ERR_PTR(-ENOMEM); | |
102 | } | |
103 | /* Get one page for error recovery. */ | |
104 | device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); | |
105 | if (!device->erp_mem) { | |
106 | free_pages((unsigned long) device->ccw_mem, 1); | |
107 | kfree(device); | |
108 | return ERR_PTR(-ENOMEM); | |
109 | } | |
110 | ||
111 | dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); | |
112 | dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); | |
113 | spin_lock_init(&device->mem_lock); | |
114 | atomic_set(&device->tasklet_scheduled, 0); | |
115 | tasklet_init(&device->tasklet, | |
116 | (void (*)(unsigned long)) dasd_device_tasklet, | |
117 | (unsigned long) device); | |
118 | INIT_LIST_HEAD(&device->ccw_queue); | |
119 | init_timer(&device->timer); | |
120 | device->timer.function = dasd_device_timeout; | |
121 | device->timer.data = (unsigned long) device; | |
122 | INIT_WORK(&device->kick_work, do_kick_device); | |
123 | INIT_WORK(&device->restore_device, do_restore_device); | |
124 | INIT_WORK(&device->reload_device, do_reload_device); | |
125 | device->state = DASD_STATE_NEW; | |
126 | device->target = DASD_STATE_NEW; | |
127 | mutex_init(&device->state_mutex); | |
128 | spin_lock_init(&device->profile.lock); | |
129 | return device; | |
130 | } | |
131 | ||
132 | /* | |
133 | * Free memory of a device structure. | |
134 | */ | |
135 | void dasd_free_device(struct dasd_device *device) | |
136 | { | |
137 | kfree(device->private); | |
138 | free_page((unsigned long) device->erp_mem); | |
139 | free_pages((unsigned long) device->ccw_mem, 1); | |
140 | kfree(device); | |
141 | } | |
142 | ||
143 | /* | |
144 | * Allocate memory for a new device structure. | |
145 | */ | |
146 | struct dasd_block *dasd_alloc_block(void) | |
147 | { | |
148 | struct dasd_block *block; | |
149 | ||
150 | block = kzalloc(sizeof(*block), GFP_ATOMIC); | |
151 | if (!block) | |
152 | return ERR_PTR(-ENOMEM); | |
153 | /* open_count = 0 means device online but not in use */ | |
154 | atomic_set(&block->open_count, -1); | |
155 | ||
156 | spin_lock_init(&block->request_queue_lock); | |
157 | atomic_set(&block->tasklet_scheduled, 0); | |
158 | tasklet_init(&block->tasklet, | |
159 | (void (*)(unsigned long)) dasd_block_tasklet, | |
160 | (unsigned long) block); | |
161 | INIT_LIST_HEAD(&block->ccw_queue); | |
162 | spin_lock_init(&block->queue_lock); | |
163 | init_timer(&block->timer); | |
164 | block->timer.function = dasd_block_timeout; | |
165 | block->timer.data = (unsigned long) block; | |
166 | spin_lock_init(&block->profile.lock); | |
167 | ||
168 | return block; | |
169 | } | |
170 | ||
171 | /* | |
172 | * Free memory of a device structure. | |
173 | */ | |
174 | void dasd_free_block(struct dasd_block *block) | |
175 | { | |
176 | kfree(block); | |
177 | } | |
178 | ||
179 | /* | |
180 | * Make a new device known to the system. | |
181 | */ | |
182 | static int dasd_state_new_to_known(struct dasd_device *device) | |
183 | { | |
184 | int rc; | |
185 | ||
186 | /* | |
187 | * As long as the device is not in state DASD_STATE_NEW we want to | |
188 | * keep the reference count > 0. | |
189 | */ | |
190 | dasd_get_device(device); | |
191 | ||
192 | if (device->block) { | |
193 | rc = dasd_alloc_queue(device->block); | |
194 | if (rc) { | |
195 | dasd_put_device(device); | |
196 | return rc; | |
197 | } | |
198 | } | |
199 | device->state = DASD_STATE_KNOWN; | |
200 | return 0; | |
201 | } | |
202 | ||
203 | /* | |
204 | * Let the system forget about a device. | |
205 | */ | |
206 | static int dasd_state_known_to_new(struct dasd_device *device) | |
207 | { | |
208 | /* Disable extended error reporting for this device. */ | |
209 | dasd_eer_disable(device); | |
210 | /* Forget the discipline information. */ | |
211 | if (device->discipline) { | |
212 | if (device->discipline->uncheck_device) | |
213 | device->discipline->uncheck_device(device); | |
214 | module_put(device->discipline->owner); | |
215 | } | |
216 | device->discipline = NULL; | |
217 | if (device->base_discipline) | |
218 | module_put(device->base_discipline->owner); | |
219 | device->base_discipline = NULL; | |
220 | device->state = DASD_STATE_NEW; | |
221 | ||
222 | if (device->block) | |
223 | dasd_free_queue(device->block); | |
224 | ||
225 | /* Give up reference we took in dasd_state_new_to_known. */ | |
226 | dasd_put_device(device); | |
227 | return 0; | |
228 | } | |
229 | ||
230 | static struct dentry *dasd_debugfs_setup(const char *name, | |
231 | struct dentry *base_dentry) | |
232 | { | |
233 | struct dentry *pde; | |
234 | ||
235 | if (!base_dentry) | |
236 | return NULL; | |
237 | pde = debugfs_create_dir(name, base_dentry); | |
238 | if (!pde || IS_ERR(pde)) | |
239 | return NULL; | |
240 | return pde; | |
241 | } | |
242 | ||
243 | /* | |
244 | * Request the irq line for the device. | |
245 | */ | |
246 | static int dasd_state_known_to_basic(struct dasd_device *device) | |
247 | { | |
248 | struct dasd_block *block = device->block; | |
249 | int rc; | |
250 | ||
251 | /* Allocate and register gendisk structure. */ | |
252 | if (block) { | |
253 | rc = dasd_gendisk_alloc(block); | |
254 | if (rc) | |
255 | return rc; | |
256 | block->debugfs_dentry = | |
257 | dasd_debugfs_setup(block->gdp->disk_name, | |
258 | dasd_debugfs_root_entry); | |
259 | dasd_profile_init(&block->profile, block->debugfs_dentry); | |
260 | if (dasd_global_profile_level == DASD_PROFILE_ON) | |
261 | dasd_profile_on(&device->block->profile); | |
262 | } | |
263 | device->debugfs_dentry = | |
264 | dasd_debugfs_setup(dev_name(&device->cdev->dev), | |
265 | dasd_debugfs_root_entry); | |
266 | dasd_profile_init(&device->profile, device->debugfs_dentry); | |
267 | ||
268 | /* register 'device' debug area, used for all DBF_DEV_XXX calls */ | |
269 | device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, | |
270 | 8 * sizeof(long)); | |
271 | debug_register_view(device->debug_area, &debug_sprintf_view); | |
272 | debug_set_level(device->debug_area, DBF_WARNING); | |
273 | DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); | |
274 | ||
275 | device->state = DASD_STATE_BASIC; | |
276 | return 0; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Release the irq line for the device. Terminate any running i/o. | |
281 | */ | |
282 | static int dasd_state_basic_to_known(struct dasd_device *device) | |
283 | { | |
284 | int rc; | |
285 | if (device->block) { | |
286 | dasd_profile_exit(&device->block->profile); | |
287 | if (device->block->debugfs_dentry) | |
288 | debugfs_remove(device->block->debugfs_dentry); | |
289 | dasd_gendisk_free(device->block); | |
290 | dasd_block_clear_timer(device->block); | |
291 | } | |
292 | rc = dasd_flush_device_queue(device); | |
293 | if (rc) | |
294 | return rc; | |
295 | dasd_device_clear_timer(device); | |
296 | dasd_profile_exit(&device->profile); | |
297 | if (device->debugfs_dentry) | |
298 | debugfs_remove(device->debugfs_dentry); | |
299 | ||
300 | DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); | |
301 | if (device->debug_area != NULL) { | |
302 | debug_unregister(device->debug_area); | |
303 | device->debug_area = NULL; | |
304 | } | |
305 | device->state = DASD_STATE_KNOWN; | |
306 | return 0; | |
307 | } | |
308 | ||
309 | /* | |
310 | * Do the initial analysis. The do_analysis function may return | |
311 | * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC | |
312 | * until the discipline decides to continue the startup sequence | |
313 | * by calling the function dasd_change_state. The eckd disciplines | |
314 | * uses this to start a ccw that detects the format. The completion | |
315 | * interrupt for this detection ccw uses the kernel event daemon to | |
316 | * trigger the call to dasd_change_state. All this is done in the | |
317 | * discipline code, see dasd_eckd.c. | |
318 | * After the analysis ccw is done (do_analysis returned 0) the block | |
319 | * device is setup. | |
320 | * In case the analysis returns an error, the device setup is stopped | |
321 | * (a fake disk was already added to allow formatting). | |
322 | */ | |
323 | static int dasd_state_basic_to_ready(struct dasd_device *device) | |
324 | { | |
325 | int rc; | |
326 | struct dasd_block *block; | |
327 | ||
328 | rc = 0; | |
329 | block = device->block; | |
330 | /* make disk known with correct capacity */ | |
331 | if (block) { | |
332 | if (block->base->discipline->do_analysis != NULL) | |
333 | rc = block->base->discipline->do_analysis(block); | |
334 | if (rc) { | |
335 | if (rc != -EAGAIN) | |
336 | device->state = DASD_STATE_UNFMT; | |
337 | return rc; | |
338 | } | |
339 | dasd_setup_queue(block); | |
340 | set_capacity(block->gdp, | |
341 | block->blocks << block->s2b_shift); | |
342 | device->state = DASD_STATE_READY; | |
343 | rc = dasd_scan_partitions(block); | |
344 | if (rc) | |
345 | device->state = DASD_STATE_BASIC; | |
346 | } else { | |
347 | device->state = DASD_STATE_READY; | |
348 | } | |
349 | return rc; | |
350 | } | |
351 | ||
352 | /* | |
353 | * Remove device from block device layer. Destroy dirty buffers. | |
354 | * Forget format information. Check if the target level is basic | |
355 | * and if it is create fake disk for formatting. | |
356 | */ | |
357 | static int dasd_state_ready_to_basic(struct dasd_device *device) | |
358 | { | |
359 | int rc; | |
360 | ||
361 | device->state = DASD_STATE_BASIC; | |
362 | if (device->block) { | |
363 | struct dasd_block *block = device->block; | |
364 | rc = dasd_flush_block_queue(block); | |
365 | if (rc) { | |
366 | device->state = DASD_STATE_READY; | |
367 | return rc; | |
368 | } | |
369 | dasd_flush_request_queue(block); | |
370 | dasd_destroy_partitions(block); | |
371 | block->blocks = 0; | |
372 | block->bp_block = 0; | |
373 | block->s2b_shift = 0; | |
374 | } | |
375 | return 0; | |
376 | } | |
377 | ||
378 | /* | |
379 | * Back to basic. | |
380 | */ | |
381 | static int dasd_state_unfmt_to_basic(struct dasd_device *device) | |
382 | { | |
383 | device->state = DASD_STATE_BASIC; | |
384 | return 0; | |
385 | } | |
386 | ||
387 | /* | |
388 | * Make the device online and schedule the bottom half to start | |
389 | * the requeueing of requests from the linux request queue to the | |
390 | * ccw queue. | |
391 | */ | |
392 | static int | |
393 | dasd_state_ready_to_online(struct dasd_device * device) | |
394 | { | |
395 | int rc; | |
396 | struct gendisk *disk; | |
397 | struct disk_part_iter piter; | |
398 | struct hd_struct *part; | |
399 | ||
400 | if (device->discipline->ready_to_online) { | |
401 | rc = device->discipline->ready_to_online(device); | |
402 | if (rc) | |
403 | return rc; | |
404 | } | |
405 | device->state = DASD_STATE_ONLINE; | |
406 | if (device->block) { | |
407 | dasd_schedule_block_bh(device->block); | |
408 | if ((device->features & DASD_FEATURE_USERAW)) { | |
409 | disk = device->block->gdp; | |
410 | kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); | |
411 | return 0; | |
412 | } | |
413 | disk = device->block->bdev->bd_disk; | |
414 | disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); | |
415 | while ((part = disk_part_iter_next(&piter))) | |
416 | kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); | |
417 | disk_part_iter_exit(&piter); | |
418 | } | |
419 | return 0; | |
420 | } | |
421 | ||
422 | /* | |
423 | * Stop the requeueing of requests again. | |
424 | */ | |
425 | static int dasd_state_online_to_ready(struct dasd_device *device) | |
426 | { | |
427 | int rc; | |
428 | struct gendisk *disk; | |
429 | struct disk_part_iter piter; | |
430 | struct hd_struct *part; | |
431 | ||
432 | if (device->discipline->online_to_ready) { | |
433 | rc = device->discipline->online_to_ready(device); | |
434 | if (rc) | |
435 | return rc; | |
436 | } | |
437 | device->state = DASD_STATE_READY; | |
438 | if (device->block && !(device->features & DASD_FEATURE_USERAW)) { | |
439 | disk = device->block->bdev->bd_disk; | |
440 | disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); | |
441 | while ((part = disk_part_iter_next(&piter))) | |
442 | kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); | |
443 | disk_part_iter_exit(&piter); | |
444 | } | |
445 | return 0; | |
446 | } | |
447 | ||
448 | /* | |
449 | * Device startup state changes. | |
450 | */ | |
451 | static int dasd_increase_state(struct dasd_device *device) | |
452 | { | |
453 | int rc; | |
454 | ||
455 | rc = 0; | |
456 | if (device->state == DASD_STATE_NEW && | |
457 | device->target >= DASD_STATE_KNOWN) | |
458 | rc = dasd_state_new_to_known(device); | |
459 | ||
460 | if (!rc && | |
461 | device->state == DASD_STATE_KNOWN && | |
462 | device->target >= DASD_STATE_BASIC) | |
463 | rc = dasd_state_known_to_basic(device); | |
464 | ||
465 | if (!rc && | |
466 | device->state == DASD_STATE_BASIC && | |
467 | device->target >= DASD_STATE_READY) | |
468 | rc = dasd_state_basic_to_ready(device); | |
469 | ||
470 | if (!rc && | |
471 | device->state == DASD_STATE_UNFMT && | |
472 | device->target > DASD_STATE_UNFMT) | |
473 | rc = -EPERM; | |
474 | ||
475 | if (!rc && | |
476 | device->state == DASD_STATE_READY && | |
477 | device->target >= DASD_STATE_ONLINE) | |
478 | rc = dasd_state_ready_to_online(device); | |
479 | ||
480 | return rc; | |
481 | } | |
482 | ||
483 | /* | |
484 | * Device shutdown state changes. | |
485 | */ | |
486 | static int dasd_decrease_state(struct dasd_device *device) | |
487 | { | |
488 | int rc; | |
489 | ||
490 | rc = 0; | |
491 | if (device->state == DASD_STATE_ONLINE && | |
492 | device->target <= DASD_STATE_READY) | |
493 | rc = dasd_state_online_to_ready(device); | |
494 | ||
495 | if (!rc && | |
496 | device->state == DASD_STATE_READY && | |
497 | device->target <= DASD_STATE_BASIC) | |
498 | rc = dasd_state_ready_to_basic(device); | |
499 | ||
500 | if (!rc && | |
501 | device->state == DASD_STATE_UNFMT && | |
502 | device->target <= DASD_STATE_BASIC) | |
503 | rc = dasd_state_unfmt_to_basic(device); | |
504 | ||
505 | if (!rc && | |
506 | device->state == DASD_STATE_BASIC && | |
507 | device->target <= DASD_STATE_KNOWN) | |
508 | rc = dasd_state_basic_to_known(device); | |
509 | ||
510 | if (!rc && | |
511 | device->state == DASD_STATE_KNOWN && | |
512 | device->target <= DASD_STATE_NEW) | |
513 | rc = dasd_state_known_to_new(device); | |
514 | ||
515 | return rc; | |
516 | } | |
517 | ||
518 | /* | |
519 | * This is the main startup/shutdown routine. | |
520 | */ | |
521 | static void dasd_change_state(struct dasd_device *device) | |
522 | { | |
523 | int rc; | |
524 | ||
525 | if (device->state == device->target) | |
526 | /* Already where we want to go today... */ | |
527 | return; | |
528 | if (device->state < device->target) | |
529 | rc = dasd_increase_state(device); | |
530 | else | |
531 | rc = dasd_decrease_state(device); | |
532 | if (rc == -EAGAIN) | |
533 | return; | |
534 | if (rc) | |
535 | device->target = device->state; | |
536 | ||
537 | if (device->state == device->target) | |
538 | wake_up(&dasd_init_waitq); | |
539 | ||
540 | /* let user-space know that the device status changed */ | |
541 | kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); | |
542 | } | |
543 | ||
544 | /* | |
545 | * Kick starter for devices that did not complete the startup/shutdown | |
546 | * procedure or were sleeping because of a pending state. | |
547 | * dasd_kick_device will schedule a call do do_kick_device to the kernel | |
548 | * event daemon. | |
549 | */ | |
550 | static void do_kick_device(struct work_struct *work) | |
551 | { | |
552 | struct dasd_device *device = container_of(work, struct dasd_device, kick_work); | |
553 | mutex_lock(&device->state_mutex); | |
554 | dasd_change_state(device); | |
555 | mutex_unlock(&device->state_mutex); | |
556 | dasd_schedule_device_bh(device); | |
557 | dasd_put_device(device); | |
558 | } | |
559 | ||
560 | void dasd_kick_device(struct dasd_device *device) | |
561 | { | |
562 | dasd_get_device(device); | |
563 | /* queue call to dasd_kick_device to the kernel event daemon. */ | |
564 | schedule_work(&device->kick_work); | |
565 | } | |
566 | ||
567 | /* | |
568 | * dasd_reload_device will schedule a call do do_reload_device to the kernel | |
569 | * event daemon. | |
570 | */ | |
571 | static void do_reload_device(struct work_struct *work) | |
572 | { | |
573 | struct dasd_device *device = container_of(work, struct dasd_device, | |
574 | reload_device); | |
575 | device->discipline->reload(device); | |
576 | dasd_put_device(device); | |
577 | } | |
578 | ||
579 | void dasd_reload_device(struct dasd_device *device) | |
580 | { | |
581 | dasd_get_device(device); | |
582 | /* queue call to dasd_reload_device to the kernel event daemon. */ | |
583 | schedule_work(&device->reload_device); | |
584 | } | |
585 | EXPORT_SYMBOL(dasd_reload_device); | |
586 | ||
587 | /* | |
588 | * dasd_restore_device will schedule a call do do_restore_device to the kernel | |
589 | * event daemon. | |
590 | */ | |
591 | static void do_restore_device(struct work_struct *work) | |
592 | { | |
593 | struct dasd_device *device = container_of(work, struct dasd_device, | |
594 | restore_device); | |
595 | device->cdev->drv->restore(device->cdev); | |
596 | dasd_put_device(device); | |
597 | } | |
598 | ||
599 | void dasd_restore_device(struct dasd_device *device) | |
600 | { | |
601 | dasd_get_device(device); | |
602 | /* queue call to dasd_restore_device to the kernel event daemon. */ | |
603 | schedule_work(&device->restore_device); | |
604 | } | |
605 | ||
606 | /* | |
607 | * Set the target state for a device and starts the state change. | |
608 | */ | |
609 | void dasd_set_target_state(struct dasd_device *device, int target) | |
610 | { | |
611 | dasd_get_device(device); | |
612 | mutex_lock(&device->state_mutex); | |
613 | /* If we are in probeonly mode stop at DASD_STATE_READY. */ | |
614 | if (dasd_probeonly && target > DASD_STATE_READY) | |
615 | target = DASD_STATE_READY; | |
616 | if (device->target != target) { | |
617 | if (device->state == target) | |
618 | wake_up(&dasd_init_waitq); | |
619 | device->target = target; | |
620 | } | |
621 | if (device->state != device->target) | |
622 | dasd_change_state(device); | |
623 | mutex_unlock(&device->state_mutex); | |
624 | dasd_put_device(device); | |
625 | } | |
626 | ||
627 | /* | |
628 | * Enable devices with device numbers in [from..to]. | |
629 | */ | |
630 | static inline int _wait_for_device(struct dasd_device *device) | |
631 | { | |
632 | return (device->state == device->target); | |
633 | } | |
634 | ||
635 | void dasd_enable_device(struct dasd_device *device) | |
636 | { | |
637 | dasd_set_target_state(device, DASD_STATE_ONLINE); | |
638 | if (device->state <= DASD_STATE_KNOWN) | |
639 | /* No discipline for device found. */ | |
640 | dasd_set_target_state(device, DASD_STATE_NEW); | |
641 | /* Now wait for the devices to come up. */ | |
642 | wait_event(dasd_init_waitq, _wait_for_device(device)); | |
643 | ||
644 | dasd_reload_device(device); | |
645 | if (device->discipline->kick_validate) | |
646 | device->discipline->kick_validate(device); | |
647 | } | |
648 | ||
649 | /* | |
650 | * SECTION: device operation (interrupt handler, start i/o, term i/o ...) | |
651 | */ | |
652 | ||
653 | unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; | |
654 | ||
655 | #ifdef CONFIG_DASD_PROFILE | |
656 | struct dasd_profile_info dasd_global_profile_data; | |
657 | static struct dentry *dasd_global_profile_dentry; | |
658 | static struct dentry *dasd_debugfs_global_entry; | |
659 | ||
660 | /* | |
661 | * Add profiling information for cqr before execution. | |
662 | */ | |
663 | static void dasd_profile_start(struct dasd_block *block, | |
664 | struct dasd_ccw_req *cqr, | |
665 | struct request *req) | |
666 | { | |
667 | struct list_head *l; | |
668 | unsigned int counter; | |
669 | struct dasd_device *device; | |
670 | ||
671 | /* count the length of the chanq for statistics */ | |
672 | counter = 0; | |
673 | if (dasd_global_profile_level || block->profile.data) | |
674 | list_for_each(l, &block->ccw_queue) | |
675 | if (++counter >= 31) | |
676 | break; | |
677 | ||
678 | if (dasd_global_profile_level) { | |
679 | dasd_global_profile_data.dasd_io_nr_req[counter]++; | |
680 | if (rq_data_dir(req) == READ) | |
681 | dasd_global_profile_data.dasd_read_nr_req[counter]++; | |
682 | } | |
683 | ||
684 | spin_lock(&block->profile.lock); | |
685 | if (block->profile.data) | |
686 | block->profile.data->dasd_io_nr_req[counter]++; | |
687 | if (rq_data_dir(req) == READ) | |
688 | block->profile.data->dasd_read_nr_req[counter]++; | |
689 | spin_unlock(&block->profile.lock); | |
690 | ||
691 | /* | |
692 | * We count the request for the start device, even though it may run on | |
693 | * some other device due to error recovery. This way we make sure that | |
694 | * we count each request only once. | |
695 | */ | |
696 | device = cqr->startdev; | |
697 | if (device->profile.data) { | |
698 | counter = 1; /* request is not yet queued on the start device */ | |
699 | list_for_each(l, &device->ccw_queue) | |
700 | if (++counter >= 31) | |
701 | break; | |
702 | } | |
703 | spin_lock(&device->profile.lock); | |
704 | if (device->profile.data) { | |
705 | device->profile.data->dasd_io_nr_req[counter]++; | |
706 | if (rq_data_dir(req) == READ) | |
707 | device->profile.data->dasd_read_nr_req[counter]++; | |
708 | } | |
709 | spin_unlock(&device->profile.lock); | |
710 | } | |
711 | ||
712 | /* | |
713 | * Add profiling information for cqr after execution. | |
714 | */ | |
715 | ||
716 | #define dasd_profile_counter(value, index) \ | |
717 | { \ | |
718 | for (index = 0; index < 31 && value >> (2+index); index++) \ | |
719 | ; \ | |
720 | } | |
721 | ||
722 | static void dasd_profile_end_add_data(struct dasd_profile_info *data, | |
723 | int is_alias, | |
724 | int is_tpm, | |
725 | int is_read, | |
726 | long sectors, | |
727 | int sectors_ind, | |
728 | int tottime_ind, | |
729 | int tottimeps_ind, | |
730 | int strtime_ind, | |
731 | int irqtime_ind, | |
732 | int irqtimeps_ind, | |
733 | int endtime_ind) | |
734 | { | |
735 | /* in case of an overflow, reset the whole profile */ | |
736 | if (data->dasd_io_reqs == UINT_MAX) { | |
737 | memset(data, 0, sizeof(*data)); | |
738 | getnstimeofday(&data->starttod); | |
739 | } | |
740 | data->dasd_io_reqs++; | |
741 | data->dasd_io_sects += sectors; | |
742 | if (is_alias) | |
743 | data->dasd_io_alias++; | |
744 | if (is_tpm) | |
745 | data->dasd_io_tpm++; | |
746 | ||
747 | data->dasd_io_secs[sectors_ind]++; | |
748 | data->dasd_io_times[tottime_ind]++; | |
749 | data->dasd_io_timps[tottimeps_ind]++; | |
750 | data->dasd_io_time1[strtime_ind]++; | |
751 | data->dasd_io_time2[irqtime_ind]++; | |
752 | data->dasd_io_time2ps[irqtimeps_ind]++; | |
753 | data->dasd_io_time3[endtime_ind]++; | |
754 | ||
755 | if (is_read) { | |
756 | data->dasd_read_reqs++; | |
757 | data->dasd_read_sects += sectors; | |
758 | if (is_alias) | |
759 | data->dasd_read_alias++; | |
760 | if (is_tpm) | |
761 | data->dasd_read_tpm++; | |
762 | data->dasd_read_secs[sectors_ind]++; | |
763 | data->dasd_read_times[tottime_ind]++; | |
764 | data->dasd_read_time1[strtime_ind]++; | |
765 | data->dasd_read_time2[irqtime_ind]++; | |
766 | data->dasd_read_time3[endtime_ind]++; | |
767 | } | |
768 | } | |
769 | ||
770 | static void dasd_profile_end(struct dasd_block *block, | |
771 | struct dasd_ccw_req *cqr, | |
772 | struct request *req) | |
773 | { | |
774 | long strtime, irqtime, endtime, tottime; /* in microseconds */ | |
775 | long tottimeps, sectors; | |
776 | struct dasd_device *device; | |
777 | int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; | |
778 | int irqtime_ind, irqtimeps_ind, endtime_ind; | |
779 | ||
780 | device = cqr->startdev; | |
781 | if (!(dasd_global_profile_level || | |
782 | block->profile.data || | |
783 | device->profile.data)) | |
784 | return; | |
785 | ||
786 | sectors = blk_rq_sectors(req); | |
787 | if (!cqr->buildclk || !cqr->startclk || | |
788 | !cqr->stopclk || !cqr->endclk || | |
789 | !sectors) | |
790 | return; | |
791 | ||
792 | strtime = ((cqr->startclk - cqr->buildclk) >> 12); | |
793 | irqtime = ((cqr->stopclk - cqr->startclk) >> 12); | |
794 | endtime = ((cqr->endclk - cqr->stopclk) >> 12); | |
795 | tottime = ((cqr->endclk - cqr->buildclk) >> 12); | |
796 | tottimeps = tottime / sectors; | |
797 | ||
798 | dasd_profile_counter(sectors, sectors_ind); | |
799 | dasd_profile_counter(tottime, tottime_ind); | |
800 | dasd_profile_counter(tottimeps, tottimeps_ind); | |
801 | dasd_profile_counter(strtime, strtime_ind); | |
802 | dasd_profile_counter(irqtime, irqtime_ind); | |
803 | dasd_profile_counter(irqtime / sectors, irqtimeps_ind); | |
804 | dasd_profile_counter(endtime, endtime_ind); | |
805 | ||
806 | if (dasd_global_profile_level) { | |
807 | dasd_profile_end_add_data(&dasd_global_profile_data, | |
808 | cqr->startdev != block->base, | |
809 | cqr->cpmode == 1, | |
810 | rq_data_dir(req) == READ, | |
811 | sectors, sectors_ind, tottime_ind, | |
812 | tottimeps_ind, strtime_ind, | |
813 | irqtime_ind, irqtimeps_ind, | |
814 | endtime_ind); | |
815 | } | |
816 | ||
817 | spin_lock(&block->profile.lock); | |
818 | if (block->profile.data) | |
819 | dasd_profile_end_add_data(block->profile.data, | |
820 | cqr->startdev != block->base, | |
821 | cqr->cpmode == 1, | |
822 | rq_data_dir(req) == READ, | |
823 | sectors, sectors_ind, tottime_ind, | |
824 | tottimeps_ind, strtime_ind, | |
825 | irqtime_ind, irqtimeps_ind, | |
826 | endtime_ind); | |
827 | spin_unlock(&block->profile.lock); | |
828 | ||
829 | spin_lock(&device->profile.lock); | |
830 | if (device->profile.data) | |
831 | dasd_profile_end_add_data(device->profile.data, | |
832 | cqr->startdev != block->base, | |
833 | cqr->cpmode == 1, | |
834 | rq_data_dir(req) == READ, | |
835 | sectors, sectors_ind, tottime_ind, | |
836 | tottimeps_ind, strtime_ind, | |
837 | irqtime_ind, irqtimeps_ind, | |
838 | endtime_ind); | |
839 | spin_unlock(&device->profile.lock); | |
840 | } | |
841 | ||
842 | void dasd_profile_reset(struct dasd_profile *profile) | |
843 | { | |
844 | struct dasd_profile_info *data; | |
845 | ||
846 | spin_lock_bh(&profile->lock); | |
847 | data = profile->data; | |
848 | if (!data) { | |
849 | spin_unlock_bh(&profile->lock); | |
850 | return; | |
851 | } | |
852 | memset(data, 0, sizeof(*data)); | |
853 | getnstimeofday(&data->starttod); | |
854 | spin_unlock_bh(&profile->lock); | |
855 | } | |
856 | ||
857 | void dasd_global_profile_reset(void) | |
858 | { | |
859 | memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data)); | |
860 | getnstimeofday(&dasd_global_profile_data.starttod); | |
861 | } | |
862 | ||
863 | int dasd_profile_on(struct dasd_profile *profile) | |
864 | { | |
865 | struct dasd_profile_info *data; | |
866 | ||
867 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
868 | if (!data) | |
869 | return -ENOMEM; | |
870 | spin_lock_bh(&profile->lock); | |
871 | if (profile->data) { | |
872 | spin_unlock_bh(&profile->lock); | |
873 | kfree(data); | |
874 | return 0; | |
875 | } | |
876 | getnstimeofday(&data->starttod); | |
877 | profile->data = data; | |
878 | spin_unlock_bh(&profile->lock); | |
879 | return 0; | |
880 | } | |
881 | ||
882 | void dasd_profile_off(struct dasd_profile *profile) | |
883 | { | |
884 | spin_lock_bh(&profile->lock); | |
885 | kfree(profile->data); | |
886 | profile->data = NULL; | |
887 | spin_unlock_bh(&profile->lock); | |
888 | } | |
889 | ||
890 | char *dasd_get_user_string(const char __user *user_buf, size_t user_len) | |
891 | { | |
892 | char *buffer; | |
893 | ||
894 | buffer = vmalloc(user_len + 1); | |
895 | if (buffer == NULL) | |
896 | return ERR_PTR(-ENOMEM); | |
897 | if (copy_from_user(buffer, user_buf, user_len) != 0) { | |
898 | vfree(buffer); | |
899 | return ERR_PTR(-EFAULT); | |
900 | } | |
901 | /* got the string, now strip linefeed. */ | |
902 | if (buffer[user_len - 1] == '\n') | |
903 | buffer[user_len - 1] = 0; | |
904 | else | |
905 | buffer[user_len] = 0; | |
906 | return buffer; | |
907 | } | |
908 | ||
909 | static ssize_t dasd_stats_write(struct file *file, | |
910 | const char __user *user_buf, | |
911 | size_t user_len, loff_t *pos) | |
912 | { | |
913 | char *buffer, *str; | |
914 | int rc; | |
915 | struct seq_file *m = (struct seq_file *)file->private_data; | |
916 | struct dasd_profile *prof = m->private; | |
917 | ||
918 | if (user_len > 65536) | |
919 | user_len = 65536; | |
920 | buffer = dasd_get_user_string(user_buf, user_len); | |
921 | if (IS_ERR(buffer)) | |
922 | return PTR_ERR(buffer); | |
923 | ||
924 | str = skip_spaces(buffer); | |
925 | rc = user_len; | |
926 | if (strncmp(str, "reset", 5) == 0) { | |
927 | dasd_profile_reset(prof); | |
928 | } else if (strncmp(str, "on", 2) == 0) { | |
929 | rc = dasd_profile_on(prof); | |
930 | if (!rc) | |
931 | rc = user_len; | |
932 | } else if (strncmp(str, "off", 3) == 0) { | |
933 | dasd_profile_off(prof); | |
934 | } else | |
935 | rc = -EINVAL; | |
936 | vfree(buffer); | |
937 | return rc; | |
938 | } | |
939 | ||
940 | static void dasd_stats_array(struct seq_file *m, unsigned int *array) | |
941 | { | |
942 | int i; | |
943 | ||
944 | for (i = 0; i < 32; i++) | |
945 | seq_printf(m, "%u ", array[i]); | |
946 | seq_putc(m, '\n'); | |
947 | } | |
948 | ||
949 | static void dasd_stats_seq_print(struct seq_file *m, | |
950 | struct dasd_profile_info *data) | |
951 | { | |
952 | seq_printf(m, "start_time %ld.%09ld\n", | |
953 | data->starttod.tv_sec, data->starttod.tv_nsec); | |
954 | seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); | |
955 | seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); | |
956 | seq_printf(m, "total_pav %u\n", data->dasd_io_alias); | |
957 | seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); | |
958 | seq_printf(m, "histogram_sectors "); | |
959 | dasd_stats_array(m, data->dasd_io_secs); | |
960 | seq_printf(m, "histogram_io_times "); | |
961 | dasd_stats_array(m, data->dasd_io_times); | |
962 | seq_printf(m, "histogram_io_times_weighted "); | |
963 | dasd_stats_array(m, data->dasd_io_timps); | |
964 | seq_printf(m, "histogram_time_build_to_ssch "); | |
965 | dasd_stats_array(m, data->dasd_io_time1); | |
966 | seq_printf(m, "histogram_time_ssch_to_irq "); | |
967 | dasd_stats_array(m, data->dasd_io_time2); | |
968 | seq_printf(m, "histogram_time_ssch_to_irq_weighted "); | |
969 | dasd_stats_array(m, data->dasd_io_time2ps); | |
970 | seq_printf(m, "histogram_time_irq_to_end "); | |
971 | dasd_stats_array(m, data->dasd_io_time3); | |
972 | seq_printf(m, "histogram_ccw_queue_length "); | |
973 | dasd_stats_array(m, data->dasd_io_nr_req); | |
974 | seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); | |
975 | seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); | |
976 | seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); | |
977 | seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); | |
978 | seq_printf(m, "histogram_read_sectors "); | |
979 | dasd_stats_array(m, data->dasd_read_secs); | |
980 | seq_printf(m, "histogram_read_times "); | |
981 | dasd_stats_array(m, data->dasd_read_times); | |
982 | seq_printf(m, "histogram_read_time_build_to_ssch "); | |
983 | dasd_stats_array(m, data->dasd_read_time1); | |
984 | seq_printf(m, "histogram_read_time_ssch_to_irq "); | |
985 | dasd_stats_array(m, data->dasd_read_time2); | |
986 | seq_printf(m, "histogram_read_time_irq_to_end "); | |
987 | dasd_stats_array(m, data->dasd_read_time3); | |
988 | seq_printf(m, "histogram_read_ccw_queue_length "); | |
989 | dasd_stats_array(m, data->dasd_read_nr_req); | |
990 | } | |
991 | ||
992 | static int dasd_stats_show(struct seq_file *m, void *v) | |
993 | { | |
994 | struct dasd_profile *profile; | |
995 | struct dasd_profile_info *data; | |
996 | ||
997 | profile = m->private; | |
998 | spin_lock_bh(&profile->lock); | |
999 | data = profile->data; | |
1000 | if (!data) { | |
1001 | spin_unlock_bh(&profile->lock); | |
1002 | seq_printf(m, "disabled\n"); | |
1003 | return 0; | |
1004 | } | |
1005 | dasd_stats_seq_print(m, data); | |
1006 | spin_unlock_bh(&profile->lock); | |
1007 | return 0; | |
1008 | } | |
1009 | ||
1010 | static int dasd_stats_open(struct inode *inode, struct file *file) | |
1011 | { | |
1012 | struct dasd_profile *profile = inode->i_private; | |
1013 | return single_open(file, dasd_stats_show, profile); | |
1014 | } | |
1015 | ||
1016 | static const struct file_operations dasd_stats_raw_fops = { | |
1017 | .owner = THIS_MODULE, | |
1018 | .open = dasd_stats_open, | |
1019 | .read = seq_read, | |
1020 | .llseek = seq_lseek, | |
1021 | .release = single_release, | |
1022 | .write = dasd_stats_write, | |
1023 | }; | |
1024 | ||
1025 | static ssize_t dasd_stats_global_write(struct file *file, | |
1026 | const char __user *user_buf, | |
1027 | size_t user_len, loff_t *pos) | |
1028 | { | |
1029 | char *buffer, *str; | |
1030 | ssize_t rc; | |
1031 | ||
1032 | if (user_len > 65536) | |
1033 | user_len = 65536; | |
1034 | buffer = dasd_get_user_string(user_buf, user_len); | |
1035 | if (IS_ERR(buffer)) | |
1036 | return PTR_ERR(buffer); | |
1037 | str = skip_spaces(buffer); | |
1038 | rc = user_len; | |
1039 | if (strncmp(str, "reset", 5) == 0) { | |
1040 | dasd_global_profile_reset(); | |
1041 | } else if (strncmp(str, "on", 2) == 0) { | |
1042 | dasd_global_profile_reset(); | |
1043 | dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; | |
1044 | } else if (strncmp(str, "off", 3) == 0) { | |
1045 | dasd_global_profile_level = DASD_PROFILE_OFF; | |
1046 | } else | |
1047 | rc = -EINVAL; | |
1048 | vfree(buffer); | |
1049 | return rc; | |
1050 | } | |
1051 | ||
1052 | static int dasd_stats_global_show(struct seq_file *m, void *v) | |
1053 | { | |
1054 | if (!dasd_global_profile_level) { | |
1055 | seq_printf(m, "disabled\n"); | |
1056 | return 0; | |
1057 | } | |
1058 | dasd_stats_seq_print(m, &dasd_global_profile_data); | |
1059 | return 0; | |
1060 | } | |
1061 | ||
1062 | static int dasd_stats_global_open(struct inode *inode, struct file *file) | |
1063 | { | |
1064 | return single_open(file, dasd_stats_global_show, NULL); | |
1065 | } | |
1066 | ||
1067 | static const struct file_operations dasd_stats_global_fops = { | |
1068 | .owner = THIS_MODULE, | |
1069 | .open = dasd_stats_global_open, | |
1070 | .read = seq_read, | |
1071 | .llseek = seq_lseek, | |
1072 | .release = single_release, | |
1073 | .write = dasd_stats_global_write, | |
1074 | }; | |
1075 | ||
1076 | static void dasd_profile_init(struct dasd_profile *profile, | |
1077 | struct dentry *base_dentry) | |
1078 | { | |
1079 | umode_t mode; | |
1080 | struct dentry *pde; | |
1081 | ||
1082 | if (!base_dentry) | |
1083 | return; | |
1084 | profile->dentry = NULL; | |
1085 | profile->data = NULL; | |
1086 | mode = (S_IRUSR | S_IWUSR | S_IFREG); | |
1087 | pde = debugfs_create_file("statistics", mode, base_dentry, | |
1088 | profile, &dasd_stats_raw_fops); | |
1089 | if (pde && !IS_ERR(pde)) | |
1090 | profile->dentry = pde; | |
1091 | return; | |
1092 | } | |
1093 | ||
1094 | static void dasd_profile_exit(struct dasd_profile *profile) | |
1095 | { | |
1096 | dasd_profile_off(profile); | |
1097 | if (profile->dentry) { | |
1098 | debugfs_remove(profile->dentry); | |
1099 | profile->dentry = NULL; | |
1100 | } | |
1101 | } | |
1102 | ||
1103 | static void dasd_statistics_removeroot(void) | |
1104 | { | |
1105 | dasd_global_profile_level = DASD_PROFILE_OFF; | |
1106 | if (dasd_global_profile_dentry) { | |
1107 | debugfs_remove(dasd_global_profile_dentry); | |
1108 | dasd_global_profile_dentry = NULL; | |
1109 | } | |
1110 | if (dasd_debugfs_global_entry) | |
1111 | debugfs_remove(dasd_debugfs_global_entry); | |
1112 | if (dasd_debugfs_root_entry) | |
1113 | debugfs_remove(dasd_debugfs_root_entry); | |
1114 | } | |
1115 | ||
1116 | static void dasd_statistics_createroot(void) | |
1117 | { | |
1118 | umode_t mode; | |
1119 | struct dentry *pde; | |
1120 | ||
1121 | dasd_debugfs_root_entry = NULL; | |
1122 | dasd_debugfs_global_entry = NULL; | |
1123 | dasd_global_profile_dentry = NULL; | |
1124 | pde = debugfs_create_dir("dasd", NULL); | |
1125 | if (!pde || IS_ERR(pde)) | |
1126 | goto error; | |
1127 | dasd_debugfs_root_entry = pde; | |
1128 | pde = debugfs_create_dir("global", dasd_debugfs_root_entry); | |
1129 | if (!pde || IS_ERR(pde)) | |
1130 | goto error; | |
1131 | dasd_debugfs_global_entry = pde; | |
1132 | ||
1133 | mode = (S_IRUSR | S_IWUSR | S_IFREG); | |
1134 | pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry, | |
1135 | NULL, &dasd_stats_global_fops); | |
1136 | if (!pde || IS_ERR(pde)) | |
1137 | goto error; | |
1138 | dasd_global_profile_dentry = pde; | |
1139 | return; | |
1140 | ||
1141 | error: | |
1142 | DBF_EVENT(DBF_ERR, "%s", | |
1143 | "Creation of the dasd debugfs interface failed"); | |
1144 | dasd_statistics_removeroot(); | |
1145 | return; | |
1146 | } | |
1147 | ||
1148 | #else | |
1149 | #define dasd_profile_start(block, cqr, req) do {} while (0) | |
1150 | #define dasd_profile_end(block, cqr, req) do {} while (0) | |
1151 | ||
1152 | static void dasd_statistics_createroot(void) | |
1153 | { | |
1154 | return; | |
1155 | } | |
1156 | ||
1157 | static void dasd_statistics_removeroot(void) | |
1158 | { | |
1159 | return; | |
1160 | } | |
1161 | ||
1162 | int dasd_stats_generic_show(struct seq_file *m, void *v) | |
1163 | { | |
1164 | seq_printf(m, "Statistics are not activated in this kernel\n"); | |
1165 | return 0; | |
1166 | } | |
1167 | ||
1168 | static void dasd_profile_init(struct dasd_profile *profile, | |
1169 | struct dentry *base_dentry) | |
1170 | { | |
1171 | return; | |
1172 | } | |
1173 | ||
1174 | static void dasd_profile_exit(struct dasd_profile *profile) | |
1175 | { | |
1176 | return; | |
1177 | } | |
1178 | ||
1179 | int dasd_profile_on(struct dasd_profile *profile) | |
1180 | { | |
1181 | return 0; | |
1182 | } | |
1183 | ||
1184 | #endif /* CONFIG_DASD_PROFILE */ | |
1185 | ||
1186 | /* | |
1187 | * Allocate memory for a channel program with 'cplength' channel | |
1188 | * command words and 'datasize' additional space. There are two | |
1189 | * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed | |
1190 | * memory and 2) dasd_smalloc_request uses the static ccw memory | |
1191 | * that gets allocated for each device. | |
1192 | */ | |
1193 | struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, | |
1194 | int datasize, | |
1195 | struct dasd_device *device) | |
1196 | { | |
1197 | struct dasd_ccw_req *cqr; | |
1198 | ||
1199 | /* Sanity checks */ | |
1200 | BUG_ON(datasize > PAGE_SIZE || | |
1201 | (cplength*sizeof(struct ccw1)) > PAGE_SIZE); | |
1202 | ||
1203 | cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); | |
1204 | if (cqr == NULL) | |
1205 | return ERR_PTR(-ENOMEM); | |
1206 | cqr->cpaddr = NULL; | |
1207 | if (cplength > 0) { | |
1208 | cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), | |
1209 | GFP_ATOMIC | GFP_DMA); | |
1210 | if (cqr->cpaddr == NULL) { | |
1211 | kfree(cqr); | |
1212 | return ERR_PTR(-ENOMEM); | |
1213 | } | |
1214 | } | |
1215 | cqr->data = NULL; | |
1216 | if (datasize > 0) { | |
1217 | cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); | |
1218 | if (cqr->data == NULL) { | |
1219 | kfree(cqr->cpaddr); | |
1220 | kfree(cqr); | |
1221 | return ERR_PTR(-ENOMEM); | |
1222 | } | |
1223 | } | |
1224 | cqr->magic = magic; | |
1225 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | |
1226 | dasd_get_device(device); | |
1227 | return cqr; | |
1228 | } | |
1229 | ||
1230 | struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, | |
1231 | int datasize, | |
1232 | struct dasd_device *device) | |
1233 | { | |
1234 | unsigned long flags; | |
1235 | struct dasd_ccw_req *cqr; | |
1236 | char *data; | |
1237 | int size; | |
1238 | ||
1239 | size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; | |
1240 | if (cplength > 0) | |
1241 | size += cplength * sizeof(struct ccw1); | |
1242 | if (datasize > 0) | |
1243 | size += datasize; | |
1244 | spin_lock_irqsave(&device->mem_lock, flags); | |
1245 | cqr = (struct dasd_ccw_req *) | |
1246 | dasd_alloc_chunk(&device->ccw_chunks, size); | |
1247 | spin_unlock_irqrestore(&device->mem_lock, flags); | |
1248 | if (cqr == NULL) | |
1249 | return ERR_PTR(-ENOMEM); | |
1250 | memset(cqr, 0, sizeof(struct dasd_ccw_req)); | |
1251 | data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); | |
1252 | cqr->cpaddr = NULL; | |
1253 | if (cplength > 0) { | |
1254 | cqr->cpaddr = (struct ccw1 *) data; | |
1255 | data += cplength*sizeof(struct ccw1); | |
1256 | memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); | |
1257 | } | |
1258 | cqr->data = NULL; | |
1259 | if (datasize > 0) { | |
1260 | cqr->data = data; | |
1261 | memset(cqr->data, 0, datasize); | |
1262 | } | |
1263 | cqr->magic = magic; | |
1264 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | |
1265 | dasd_get_device(device); | |
1266 | return cqr; | |
1267 | } | |
1268 | ||
1269 | /* | |
1270 | * Free memory of a channel program. This function needs to free all the | |
1271 | * idal lists that might have been created by dasd_set_cda and the | |
1272 | * struct dasd_ccw_req itself. | |
1273 | */ | |
1274 | void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) | |
1275 | { | |
1276 | #ifdef CONFIG_64BIT | |
1277 | struct ccw1 *ccw; | |
1278 | ||
1279 | /* Clear any idals used for the request. */ | |
1280 | ccw = cqr->cpaddr; | |
1281 | do { | |
1282 | clear_normalized_cda(ccw); | |
1283 | } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); | |
1284 | #endif | |
1285 | kfree(cqr->cpaddr); | |
1286 | kfree(cqr->data); | |
1287 | kfree(cqr); | |
1288 | dasd_put_device(device); | |
1289 | } | |
1290 | ||
1291 | void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) | |
1292 | { | |
1293 | unsigned long flags; | |
1294 | ||
1295 | spin_lock_irqsave(&device->mem_lock, flags); | |
1296 | dasd_free_chunk(&device->ccw_chunks, cqr); | |
1297 | spin_unlock_irqrestore(&device->mem_lock, flags); | |
1298 | dasd_put_device(device); | |
1299 | } | |
1300 | ||
1301 | /* | |
1302 | * Check discipline magic in cqr. | |
1303 | */ | |
1304 | static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) | |
1305 | { | |
1306 | struct dasd_device *device; | |
1307 | ||
1308 | if (cqr == NULL) | |
1309 | return -EINVAL; | |
1310 | device = cqr->startdev; | |
1311 | if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { | |
1312 | DBF_DEV_EVENT(DBF_WARNING, device, | |
1313 | " dasd_ccw_req 0x%08x magic doesn't match" | |
1314 | " discipline 0x%08x", | |
1315 | cqr->magic, | |
1316 | *(unsigned int *) device->discipline->name); | |
1317 | return -EINVAL; | |
1318 | } | |
1319 | return 0; | |
1320 | } | |
1321 | ||
1322 | /* | |
1323 | * Terminate the current i/o and set the request to clear_pending. | |
1324 | * Timer keeps device runnig. | |
1325 | * ccw_device_clear can fail if the i/o subsystem | |
1326 | * is in a bad mood. | |
1327 | */ | |
1328 | int dasd_term_IO(struct dasd_ccw_req *cqr) | |
1329 | { | |
1330 | struct dasd_device *device; | |
1331 | int retries, rc; | |
1332 | char errorstring[ERRORLENGTH]; | |
1333 | ||
1334 | /* Check the cqr */ | |
1335 | rc = dasd_check_cqr(cqr); | |
1336 | if (rc) | |
1337 | return rc; | |
1338 | retries = 0; | |
1339 | device = (struct dasd_device *) cqr->startdev; | |
1340 | while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { | |
1341 | rc = ccw_device_clear(device->cdev, (long) cqr); | |
1342 | switch (rc) { | |
1343 | case 0: /* termination successful */ | |
1344 | cqr->status = DASD_CQR_CLEAR_PENDING; | |
1345 | cqr->stopclk = get_clock(); | |
1346 | cqr->starttime = 0; | |
1347 | DBF_DEV_EVENT(DBF_DEBUG, device, | |
1348 | "terminate cqr %p successful", | |
1349 | cqr); | |
1350 | break; | |
1351 | case -ENODEV: | |
1352 | DBF_DEV_EVENT(DBF_ERR, device, "%s", | |
1353 | "device gone, retry"); | |
1354 | break; | |
1355 | case -EIO: | |
1356 | DBF_DEV_EVENT(DBF_ERR, device, "%s", | |
1357 | "I/O error, retry"); | |
1358 | break; | |
1359 | case -EINVAL: | |
1360 | case -EBUSY: | |
1361 | DBF_DEV_EVENT(DBF_ERR, device, "%s", | |
1362 | "device busy, retry later"); | |
1363 | break; | |
1364 | default: | |
1365 | /* internal error 10 - unknown rc*/ | |
1366 | snprintf(errorstring, ERRORLENGTH, "10 %d", rc); | |
1367 | dev_err(&device->cdev->dev, "An error occurred in the " | |
1368 | "DASD device driver, reason=%s\n", errorstring); | |
1369 | BUG(); | |
1370 | break; | |
1371 | } | |
1372 | retries++; | |
1373 | } | |
1374 | dasd_schedule_device_bh(device); | |
1375 | return rc; | |
1376 | } | |
1377 | ||
1378 | /* | |
1379 | * Start the i/o. This start_IO can fail if the channel is really busy. | |
1380 | * In that case set up a timer to start the request later. | |
1381 | */ | |
1382 | int dasd_start_IO(struct dasd_ccw_req *cqr) | |
1383 | { | |
1384 | struct dasd_device *device; | |
1385 | int rc; | |
1386 | char errorstring[ERRORLENGTH]; | |
1387 | ||
1388 | /* Check the cqr */ | |
1389 | rc = dasd_check_cqr(cqr); | |
1390 | if (rc) { | |
1391 | cqr->intrc = rc; | |
1392 | return rc; | |
1393 | } | |
1394 | device = (struct dasd_device *) cqr->startdev; | |
1395 | if (((cqr->block && | |
1396 | test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || | |
1397 | test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && | |
1398 | !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { | |
1399 | DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " | |
1400 | "because of stolen lock", cqr); | |
1401 | cqr->status = DASD_CQR_ERROR; | |
1402 | cqr->intrc = -EPERM; | |
1403 | return -EPERM; | |
1404 | } | |
1405 | if (cqr->retries < 0) { | |
1406 | /* internal error 14 - start_IO run out of retries */ | |
1407 | sprintf(errorstring, "14 %p", cqr); | |
1408 | dev_err(&device->cdev->dev, "An error occurred in the DASD " | |
1409 | "device driver, reason=%s\n", errorstring); | |
1410 | cqr->status = DASD_CQR_ERROR; | |
1411 | return -EIO; | |
1412 | } | |
1413 | cqr->startclk = get_clock(); | |
1414 | cqr->starttime = jiffies; | |
1415 | cqr->retries--; | |
1416 | if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { | |
1417 | cqr->lpm &= device->path_data.opm; | |
1418 | if (!cqr->lpm) | |
1419 | cqr->lpm = device->path_data.opm; | |
1420 | } | |
1421 | if (cqr->cpmode == 1) { | |
1422 | rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, | |
1423 | (long) cqr, cqr->lpm); | |
1424 | } else { | |
1425 | rc = ccw_device_start(device->cdev, cqr->cpaddr, | |
1426 | (long) cqr, cqr->lpm, 0); | |
1427 | } | |
1428 | switch (rc) { | |
1429 | case 0: | |
1430 | cqr->status = DASD_CQR_IN_IO; | |
1431 | break; | |
1432 | case -EBUSY: | |
1433 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | |
1434 | "start_IO: device busy, retry later"); | |
1435 | break; | |
1436 | case -ETIMEDOUT: | |
1437 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | |
1438 | "start_IO: request timeout, retry later"); | |
1439 | break; | |
1440 | case -EACCES: | |
1441 | /* -EACCES indicates that the request used only a subset of the | |
1442 | * available paths and all these paths are gone. If the lpm of | |
1443 | * this request was only a subset of the opm (e.g. the ppm) then | |
1444 | * we just do a retry with all available paths. | |
1445 | * If we already use the full opm, something is amiss, and we | |
1446 | * need a full path verification. | |
1447 | */ | |
1448 | if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { | |
1449 | DBF_DEV_EVENT(DBF_WARNING, device, | |
1450 | "start_IO: selected paths gone (%x)", | |
1451 | cqr->lpm); | |
1452 | } else if (cqr->lpm != device->path_data.opm) { | |
1453 | cqr->lpm = device->path_data.opm; | |
1454 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | |
1455 | "start_IO: selected paths gone," | |
1456 | " retry on all paths"); | |
1457 | } else { | |
1458 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | |
1459 | "start_IO: all paths in opm gone," | |
1460 | " do path verification"); | |
1461 | dasd_generic_last_path_gone(device); | |
1462 | device->path_data.opm = 0; | |
1463 | device->path_data.ppm = 0; | |
1464 | device->path_data.npm = 0; | |
1465 | device->path_data.tbvpm = | |
1466 | ccw_device_get_path_mask(device->cdev); | |
1467 | } | |
1468 | break; | |
1469 | case -ENODEV: | |
1470 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | |
1471 | "start_IO: -ENODEV device gone, retry"); | |
1472 | break; | |
1473 | case -EIO: | |
1474 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | |
1475 | "start_IO: -EIO device gone, retry"); | |
1476 | break; | |
1477 | case -EINVAL: | |
1478 | /* most likely caused in power management context */ | |
1479 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | |
1480 | "start_IO: -EINVAL device currently " | |
1481 | "not accessible"); | |
1482 | break; | |
1483 | default: | |
1484 | /* internal error 11 - unknown rc */ | |
1485 | snprintf(errorstring, ERRORLENGTH, "11 %d", rc); | |
1486 | dev_err(&device->cdev->dev, | |
1487 | "An error occurred in the DASD device driver, " | |
1488 | "reason=%s\n", errorstring); | |
1489 | BUG(); | |
1490 | break; | |
1491 | } | |
1492 | cqr->intrc = rc; | |
1493 | return rc; | |
1494 | } | |
1495 | ||
1496 | /* | |
1497 | * Timeout function for dasd devices. This is used for different purposes | |
1498 | * 1) missing interrupt handler for normal operation | |
1499 | * 2) delayed start of request where start_IO failed with -EBUSY | |
1500 | * 3) timeout for missing state change interrupts | |
1501 | * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), | |
1502 | * DASD_CQR_QUEUED for 2) and 3). | |
1503 | */ | |
1504 | static void dasd_device_timeout(unsigned long ptr) | |
1505 | { | |
1506 | unsigned long flags; | |
1507 | struct dasd_device *device; | |
1508 | ||
1509 | device = (struct dasd_device *) ptr; | |
1510 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | |
1511 | /* re-activate request queue */ | |
1512 | dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); | |
1513 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | |
1514 | dasd_schedule_device_bh(device); | |
1515 | } | |
1516 | ||
1517 | /* | |
1518 | * Setup timeout for a device in jiffies. | |
1519 | */ | |
1520 | void dasd_device_set_timer(struct dasd_device *device, int expires) | |
1521 | { | |
1522 | if (expires == 0) | |
1523 | del_timer(&device->timer); | |
1524 | else | |
1525 | mod_timer(&device->timer, jiffies + expires); | |
1526 | } | |
1527 | ||
1528 | /* | |
1529 | * Clear timeout for a device. | |
1530 | */ | |
1531 | void dasd_device_clear_timer(struct dasd_device *device) | |
1532 | { | |
1533 | del_timer(&device->timer); | |
1534 | } | |
1535 | ||
1536 | static void dasd_handle_killed_request(struct ccw_device *cdev, | |
1537 | unsigned long intparm) | |
1538 | { | |
1539 | struct dasd_ccw_req *cqr; | |
1540 | struct dasd_device *device; | |
1541 | ||
1542 | if (!intparm) | |
1543 | return; | |
1544 | cqr = (struct dasd_ccw_req *) intparm; | |
1545 | if (cqr->status != DASD_CQR_IN_IO) { | |
1546 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, | |
1547 | "invalid status in handle_killed_request: " | |
1548 | "%02x", cqr->status); | |
1549 | return; | |
1550 | } | |
1551 | ||
1552 | device = dasd_device_from_cdev_locked(cdev); | |
1553 | if (IS_ERR(device)) { | |
1554 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", | |
1555 | "unable to get device from cdev"); | |
1556 | return; | |
1557 | } | |
1558 | ||
1559 | if (!cqr->startdev || | |
1560 | device != cqr->startdev || | |
1561 | strncmp(cqr->startdev->discipline->ebcname, | |
1562 | (char *) &cqr->magic, 4)) { | |
1563 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", | |
1564 | "invalid device in request"); | |
1565 | dasd_put_device(device); | |
1566 | return; | |
1567 | } | |
1568 | ||
1569 | /* Schedule request to be retried. */ | |
1570 | cqr->status = DASD_CQR_QUEUED; | |
1571 | ||
1572 | dasd_device_clear_timer(device); | |
1573 | dasd_schedule_device_bh(device); | |
1574 | dasd_put_device(device); | |
1575 | } | |
1576 | ||
1577 | void dasd_generic_handle_state_change(struct dasd_device *device) | |
1578 | { | |
1579 | /* First of all start sense subsystem status request. */ | |
1580 | dasd_eer_snss(device); | |
1581 | ||
1582 | dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); | |
1583 | dasd_schedule_device_bh(device); | |
1584 | if (device->block) | |
1585 | dasd_schedule_block_bh(device->block); | |
1586 | } | |
1587 | ||
1588 | /* | |
1589 | * Interrupt handler for "normal" ssch-io based dasd devices. | |
1590 | */ | |
1591 | void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |
1592 | struct irb *irb) | |
1593 | { | |
1594 | struct dasd_ccw_req *cqr, *next; | |
1595 | struct dasd_device *device; | |
1596 | unsigned long long now; | |
1597 | int expires; | |
1598 | ||
1599 | if (IS_ERR(irb)) { | |
1600 | switch (PTR_ERR(irb)) { | |
1601 | case -EIO: | |
1602 | break; | |
1603 | case -ETIMEDOUT: | |
1604 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " | |
1605 | "request timed out\n", __func__); | |
1606 | break; | |
1607 | default: | |
1608 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " | |
1609 | "unknown error %ld\n", __func__, | |
1610 | PTR_ERR(irb)); | |
1611 | } | |
1612 | dasd_handle_killed_request(cdev, intparm); | |
1613 | return; | |
1614 | } | |
1615 | ||
1616 | now = get_clock(); | |
1617 | cqr = (struct dasd_ccw_req *) intparm; | |
1618 | /* check for conditions that should be handled immediately */ | |
1619 | if (!cqr || | |
1620 | !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && | |
1621 | scsw_cstat(&irb->scsw) == 0)) { | |
1622 | if (cqr) | |
1623 | memcpy(&cqr->irb, irb, sizeof(*irb)); | |
1624 | device = dasd_device_from_cdev_locked(cdev); | |
1625 | if (IS_ERR(device)) | |
1626 | return; | |
1627 | /* ignore unsolicited interrupts for DIAG discipline */ | |
1628 | if (device->discipline == dasd_diag_discipline_pointer) { | |
1629 | dasd_put_device(device); | |
1630 | return; | |
1631 | } | |
1632 | device->discipline->dump_sense_dbf(device, irb, "int"); | |
1633 | if (device->features & DASD_FEATURE_ERPLOG) | |
1634 | device->discipline->dump_sense(device, cqr, irb); | |
1635 | device->discipline->check_for_device_change(device, cqr, irb); | |
1636 | dasd_put_device(device); | |
1637 | } | |
1638 | if (!cqr) | |
1639 | return; | |
1640 | ||
1641 | device = (struct dasd_device *) cqr->startdev; | |
1642 | if (!device || | |
1643 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | |
1644 | DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", | |
1645 | "invalid device in request"); | |
1646 | return; | |
1647 | } | |
1648 | ||
1649 | /* Check for clear pending */ | |
1650 | if (cqr->status == DASD_CQR_CLEAR_PENDING && | |
1651 | scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { | |
1652 | cqr->status = DASD_CQR_CLEARED; | |
1653 | dasd_device_clear_timer(device); | |
1654 | wake_up(&dasd_flush_wq); | |
1655 | dasd_schedule_device_bh(device); | |
1656 | return; | |
1657 | } | |
1658 | ||
1659 | /* check status - the request might have been killed by dyn detach */ | |
1660 | if (cqr->status != DASD_CQR_IN_IO) { | |
1661 | DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " | |
1662 | "status %02x", dev_name(&cdev->dev), cqr->status); | |
1663 | return; | |
1664 | } | |
1665 | ||
1666 | next = NULL; | |
1667 | expires = 0; | |
1668 | if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && | |
1669 | scsw_cstat(&irb->scsw) == 0) { | |
1670 | /* request was completed successfully */ | |
1671 | cqr->status = DASD_CQR_SUCCESS; | |
1672 | cqr->stopclk = now; | |
1673 | /* Start first request on queue if possible -> fast_io. */ | |
1674 | if (cqr->devlist.next != &device->ccw_queue) { | |
1675 | next = list_entry(cqr->devlist.next, | |
1676 | struct dasd_ccw_req, devlist); | |
1677 | } | |
1678 | } else { /* error */ | |
1679 | /* | |
1680 | * If we don't want complex ERP for this request, then just | |
1681 | * reset this and retry it in the fastpath | |
1682 | */ | |
1683 | if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && | |
1684 | cqr->retries > 0) { | |
1685 | if (cqr->lpm == device->path_data.opm) | |
1686 | DBF_DEV_EVENT(DBF_DEBUG, device, | |
1687 | "default ERP in fastpath " | |
1688 | "(%i retries left)", | |
1689 | cqr->retries); | |
1690 | if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) | |
1691 | cqr->lpm = device->path_data.opm; | |
1692 | cqr->status = DASD_CQR_QUEUED; | |
1693 | next = cqr; | |
1694 | } else | |
1695 | cqr->status = DASD_CQR_ERROR; | |
1696 | } | |
1697 | if (next && (next->status == DASD_CQR_QUEUED) && | |
1698 | (!device->stopped)) { | |
1699 | if (device->discipline->start_IO(next) == 0) | |
1700 | expires = next->expires; | |
1701 | } | |
1702 | if (expires != 0) | |
1703 | dasd_device_set_timer(device, expires); | |
1704 | else | |
1705 | dasd_device_clear_timer(device); | |
1706 | dasd_schedule_device_bh(device); | |
1707 | } | |
1708 | ||
1709 | enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) | |
1710 | { | |
1711 | struct dasd_device *device; | |
1712 | ||
1713 | device = dasd_device_from_cdev_locked(cdev); | |
1714 | ||
1715 | if (IS_ERR(device)) | |
1716 | goto out; | |
1717 | if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || | |
1718 | device->state != device->target || | |
1719 | !device->discipline->check_for_device_change){ | |
1720 | dasd_put_device(device); | |
1721 | goto out; | |
1722 | } | |
1723 | if (device->discipline->dump_sense_dbf) | |
1724 | device->discipline->dump_sense_dbf(device, irb, "uc"); | |
1725 | device->discipline->check_for_device_change(device, NULL, irb); | |
1726 | dasd_put_device(device); | |
1727 | out: | |
1728 | return UC_TODO_RETRY; | |
1729 | } | |
1730 | EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); | |
1731 | ||
1732 | /* | |
1733 | * If we have an error on a dasd_block layer request then we cancel | |
1734 | * and return all further requests from the same dasd_block as well. | |
1735 | */ | |
1736 | static void __dasd_device_recovery(struct dasd_device *device, | |
1737 | struct dasd_ccw_req *ref_cqr) | |
1738 | { | |
1739 | struct list_head *l, *n; | |
1740 | struct dasd_ccw_req *cqr; | |
1741 | ||
1742 | /* | |
1743 | * only requeue request that came from the dasd_block layer | |
1744 | */ | |
1745 | if (!ref_cqr->block) | |
1746 | return; | |
1747 | ||
1748 | list_for_each_safe(l, n, &device->ccw_queue) { | |
1749 | cqr = list_entry(l, struct dasd_ccw_req, devlist); | |
1750 | if (cqr->status == DASD_CQR_QUEUED && | |
1751 | ref_cqr->block == cqr->block) { | |
1752 | cqr->status = DASD_CQR_CLEARED; | |
1753 | } | |
1754 | } | |
1755 | }; | |
1756 | ||
1757 | /* | |
1758 | * Remove those ccw requests from the queue that need to be returned | |
1759 | * to the upper layer. | |
1760 | */ | |
1761 | static void __dasd_device_process_ccw_queue(struct dasd_device *device, | |
1762 | struct list_head *final_queue) | |
1763 | { | |
1764 | struct list_head *l, *n; | |
1765 | struct dasd_ccw_req *cqr; | |
1766 | ||
1767 | /* Process request with final status. */ | |
1768 | list_for_each_safe(l, n, &device->ccw_queue) { | |
1769 | cqr = list_entry(l, struct dasd_ccw_req, devlist); | |
1770 | ||
1771 | /* Stop list processing at the first non-final request. */ | |
1772 | if (cqr->status == DASD_CQR_QUEUED || | |
1773 | cqr->status == DASD_CQR_IN_IO || | |
1774 | cqr->status == DASD_CQR_CLEAR_PENDING) | |
1775 | break; | |
1776 | if (cqr->status == DASD_CQR_ERROR) { | |
1777 | __dasd_device_recovery(device, cqr); | |
1778 | } | |
1779 | /* Rechain finished requests to final queue */ | |
1780 | list_move_tail(&cqr->devlist, final_queue); | |
1781 | } | |
1782 | } | |
1783 | ||
1784 | /* | |
1785 | * the cqrs from the final queue are returned to the upper layer | |
1786 | * by setting a dasd_block state and calling the callback function | |
1787 | */ | |
1788 | static void __dasd_device_process_final_queue(struct dasd_device *device, | |
1789 | struct list_head *final_queue) | |
1790 | { | |
1791 | struct list_head *l, *n; | |
1792 | struct dasd_ccw_req *cqr; | |
1793 | struct dasd_block *block; | |
1794 | void (*callback)(struct dasd_ccw_req *, void *data); | |
1795 | void *callback_data; | |
1796 | char errorstring[ERRORLENGTH]; | |
1797 | ||
1798 | list_for_each_safe(l, n, final_queue) { | |
1799 | cqr = list_entry(l, struct dasd_ccw_req, devlist); | |
1800 | list_del_init(&cqr->devlist); | |
1801 | block = cqr->block; | |
1802 | callback = cqr->callback; | |
1803 | callback_data = cqr->callback_data; | |
1804 | if (block) | |
1805 | spin_lock_bh(&block->queue_lock); | |
1806 | switch (cqr->status) { | |
1807 | case DASD_CQR_SUCCESS: | |
1808 | cqr->status = DASD_CQR_DONE; | |
1809 | break; | |
1810 | case DASD_CQR_ERROR: | |
1811 | cqr->status = DASD_CQR_NEED_ERP; | |
1812 | break; | |
1813 | case DASD_CQR_CLEARED: | |
1814 | cqr->status = DASD_CQR_TERMINATED; | |
1815 | break; | |
1816 | default: | |
1817 | /* internal error 12 - wrong cqr status*/ | |
1818 | snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); | |
1819 | dev_err(&device->cdev->dev, | |
1820 | "An error occurred in the DASD device driver, " | |
1821 | "reason=%s\n", errorstring); | |
1822 | BUG(); | |
1823 | } | |
1824 | if (cqr->callback != NULL) | |
1825 | (callback)(cqr, callback_data); | |
1826 | if (block) | |
1827 | spin_unlock_bh(&block->queue_lock); | |
1828 | } | |
1829 | } | |
1830 | ||
1831 | /* | |
1832 | * Take a look at the first request on the ccw queue and check | |
1833 | * if it reached its expire time. If so, terminate the IO. | |
1834 | */ | |
1835 | static void __dasd_device_check_expire(struct dasd_device *device) | |
1836 | { | |
1837 | struct dasd_ccw_req *cqr; | |
1838 | ||
1839 | if (list_empty(&device->ccw_queue)) | |
1840 | return; | |
1841 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); | |
1842 | if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && | |
1843 | (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { | |
1844 | if (device->discipline->term_IO(cqr) != 0) { | |
1845 | /* Hmpf, try again in 5 sec */ | |
1846 | dev_err(&device->cdev->dev, | |
1847 | "cqr %p timed out (%lus) but cannot be " | |
1848 | "ended, retrying in 5 s\n", | |
1849 | cqr, (cqr->expires/HZ)); | |
1850 | cqr->expires += 5*HZ; | |
1851 | dasd_device_set_timer(device, 5*HZ); | |
1852 | } else { | |
1853 | dev_err(&device->cdev->dev, | |
1854 | "cqr %p timed out (%lus), %i retries " | |
1855 | "remaining\n", cqr, (cqr->expires/HZ), | |
1856 | cqr->retries); | |
1857 | } | |
1858 | } | |
1859 | } | |
1860 | ||
1861 | /* | |
1862 | * Take a look at the first request on the ccw queue and check | |
1863 | * if it needs to be started. | |
1864 | */ | |
1865 | static void __dasd_device_start_head(struct dasd_device *device) | |
1866 | { | |
1867 | struct dasd_ccw_req *cqr; | |
1868 | int rc; | |
1869 | ||
1870 | if (list_empty(&device->ccw_queue)) | |
1871 | return; | |
1872 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); | |
1873 | if (cqr->status != DASD_CQR_QUEUED) | |
1874 | return; | |
1875 | /* when device is stopped, return request to previous layer | |
1876 | * exception: only the disconnect or unresumed bits are set and the | |
1877 | * cqr is a path verification request | |
1878 | */ | |
1879 | if (device->stopped && | |
1880 | !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) | |
1881 | && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { | |
1882 | cqr->intrc = -EAGAIN; | |
1883 | cqr->status = DASD_CQR_CLEARED; | |
1884 | dasd_schedule_device_bh(device); | |
1885 | return; | |
1886 | } | |
1887 | ||
1888 | rc = device->discipline->start_IO(cqr); | |
1889 | if (rc == 0) | |
1890 | dasd_device_set_timer(device, cqr->expires); | |
1891 | else if (rc == -EACCES) { | |
1892 | dasd_schedule_device_bh(device); | |
1893 | } else | |
1894 | /* Hmpf, try again in 1/2 sec */ | |
1895 | dasd_device_set_timer(device, 50); | |
1896 | } | |
1897 | ||
1898 | static void __dasd_device_check_path_events(struct dasd_device *device) | |
1899 | { | |
1900 | int rc; | |
1901 | ||
1902 | if (device->path_data.tbvpm) { | |
1903 | if (device->stopped & ~(DASD_STOPPED_DC_WAIT | | |
1904 | DASD_UNRESUMED_PM)) | |
1905 | return; | |
1906 | rc = device->discipline->verify_path( | |
1907 | device, device->path_data.tbvpm); | |
1908 | if (rc) | |
1909 | dasd_device_set_timer(device, 50); | |
1910 | else | |
1911 | device->path_data.tbvpm = 0; | |
1912 | } | |
1913 | }; | |
1914 | ||
1915 | /* | |
1916 | * Go through all request on the dasd_device request queue, | |
1917 | * terminate them on the cdev if necessary, and return them to the | |
1918 | * submitting layer via callback. | |
1919 | * Note: | |
1920 | * Make sure that all 'submitting layers' still exist when | |
1921 | * this function is called!. In other words, when 'device' is a base | |
1922 | * device then all block layer requests must have been removed before | |
1923 | * via dasd_flush_block_queue. | |
1924 | */ | |
1925 | int dasd_flush_device_queue(struct dasd_device *device) | |
1926 | { | |
1927 | struct dasd_ccw_req *cqr, *n; | |
1928 | int rc; | |
1929 | struct list_head flush_queue; | |
1930 | ||
1931 | INIT_LIST_HEAD(&flush_queue); | |
1932 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | |
1933 | rc = 0; | |
1934 | list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { | |
1935 | /* Check status and move request to flush_queue */ | |
1936 | switch (cqr->status) { | |
1937 | case DASD_CQR_IN_IO: | |
1938 | rc = device->discipline->term_IO(cqr); | |
1939 | if (rc) { | |
1940 | /* unable to terminate requeust */ | |
1941 | dev_err(&device->cdev->dev, | |
1942 | "Flushing the DASD request queue " | |
1943 | "failed for request %p\n", cqr); | |
1944 | /* stop flush processing */ | |
1945 | goto finished; | |
1946 | } | |
1947 | break; | |
1948 | case DASD_CQR_QUEUED: | |
1949 | cqr->stopclk = get_clock(); | |
1950 | cqr->status = DASD_CQR_CLEARED; | |
1951 | break; | |
1952 | default: /* no need to modify the others */ | |
1953 | break; | |
1954 | } | |
1955 | list_move_tail(&cqr->devlist, &flush_queue); | |
1956 | } | |
1957 | finished: | |
1958 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | |
1959 | /* | |
1960 | * After this point all requests must be in state CLEAR_PENDING, | |
1961 | * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become | |
1962 | * one of the others. | |
1963 | */ | |
1964 | list_for_each_entry_safe(cqr, n, &flush_queue, devlist) | |
1965 | wait_event(dasd_flush_wq, | |
1966 | (cqr->status != DASD_CQR_CLEAR_PENDING)); | |
1967 | /* | |
1968 | * Now set each request back to TERMINATED, DONE or NEED_ERP | |
1969 | * and call the callback function of flushed requests | |
1970 | */ | |
1971 | __dasd_device_process_final_queue(device, &flush_queue); | |
1972 | return rc; | |
1973 | } | |
1974 | ||
1975 | /* | |
1976 | * Acquire the device lock and process queues for the device. | |
1977 | */ | |
1978 | static void dasd_device_tasklet(struct dasd_device *device) | |
1979 | { | |
1980 | struct list_head final_queue; | |
1981 | ||
1982 | atomic_set (&device->tasklet_scheduled, 0); | |
1983 | INIT_LIST_HEAD(&final_queue); | |
1984 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | |
1985 | /* Check expire time of first request on the ccw queue. */ | |
1986 | __dasd_device_check_expire(device); | |
1987 | /* find final requests on ccw queue */ | |
1988 | __dasd_device_process_ccw_queue(device, &final_queue); | |
1989 | __dasd_device_check_path_events(device); | |
1990 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | |
1991 | /* Now call the callback function of requests with final status */ | |
1992 | __dasd_device_process_final_queue(device, &final_queue); | |
1993 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | |
1994 | /* Now check if the head of the ccw queue needs to be started. */ | |
1995 | __dasd_device_start_head(device); | |
1996 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | |
1997 | if (waitqueue_active(&shutdown_waitq)) | |
1998 | wake_up(&shutdown_waitq); | |
1999 | dasd_put_device(device); | |
2000 | } | |
2001 | ||
2002 | /* | |
2003 | * Schedules a call to dasd_tasklet over the device tasklet. | |
2004 | */ | |
2005 | void dasd_schedule_device_bh(struct dasd_device *device) | |
2006 | { | |
2007 | /* Protect against rescheduling. */ | |
2008 | if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) | |
2009 | return; | |
2010 | dasd_get_device(device); | |
2011 | tasklet_hi_schedule(&device->tasklet); | |
2012 | } | |
2013 | ||
2014 | void dasd_device_set_stop_bits(struct dasd_device *device, int bits) | |
2015 | { | |
2016 | device->stopped |= bits; | |
2017 | } | |
2018 | EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); | |
2019 | ||
2020 | void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) | |
2021 | { | |
2022 | device->stopped &= ~bits; | |
2023 | if (!device->stopped) | |
2024 | wake_up(&generic_waitq); | |
2025 | } | |
2026 | EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); | |
2027 | ||
2028 | /* | |
2029 | * Queue a request to the head of the device ccw_queue. | |
2030 | * Start the I/O if possible. | |
2031 | */ | |
2032 | void dasd_add_request_head(struct dasd_ccw_req *cqr) | |
2033 | { | |
2034 | struct dasd_device *device; | |
2035 | unsigned long flags; | |
2036 | ||
2037 | device = cqr->startdev; | |
2038 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | |
2039 | cqr->status = DASD_CQR_QUEUED; | |
2040 | list_add(&cqr->devlist, &device->ccw_queue); | |
2041 | /* let the bh start the request to keep them in order */ | |
2042 | dasd_schedule_device_bh(device); | |
2043 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | |
2044 | } | |
2045 | ||
2046 | /* | |
2047 | * Queue a request to the tail of the device ccw_queue. | |
2048 | * Start the I/O if possible. | |
2049 | */ | |
2050 | void dasd_add_request_tail(struct dasd_ccw_req *cqr) | |
2051 | { | |
2052 | struct dasd_device *device; | |
2053 | unsigned long flags; | |
2054 | ||
2055 | device = cqr->startdev; | |
2056 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | |
2057 | cqr->status = DASD_CQR_QUEUED; | |
2058 | list_add_tail(&cqr->devlist, &device->ccw_queue); | |
2059 | /* let the bh start the request to keep them in order */ | |
2060 | dasd_schedule_device_bh(device); | |
2061 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | |
2062 | } | |
2063 | ||
2064 | /* | |
2065 | * Wakeup helper for the 'sleep_on' functions. | |
2066 | */ | |
2067 | void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) | |
2068 | { | |
2069 | spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); | |
2070 | cqr->callback_data = DASD_SLEEPON_END_TAG; | |
2071 | spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); | |
2072 | wake_up(&generic_waitq); | |
2073 | } | |
2074 | EXPORT_SYMBOL_GPL(dasd_wakeup_cb); | |
2075 | ||
2076 | static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) | |
2077 | { | |
2078 | struct dasd_device *device; | |
2079 | int rc; | |
2080 | ||
2081 | device = cqr->startdev; | |
2082 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | |
2083 | rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); | |
2084 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | |
2085 | return rc; | |
2086 | } | |
2087 | ||
2088 | /* | |
2089 | * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. | |
2090 | */ | |
2091 | static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) | |
2092 | { | |
2093 | struct dasd_device *device; | |
2094 | dasd_erp_fn_t erp_fn; | |
2095 | ||
2096 | if (cqr->status == DASD_CQR_FILLED) | |
2097 | return 0; | |
2098 | device = cqr->startdev; | |
2099 | if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { | |
2100 | if (cqr->status == DASD_CQR_TERMINATED) { | |
2101 | device->discipline->handle_terminated_request(cqr); | |
2102 | return 1; | |
2103 | } | |
2104 | if (cqr->status == DASD_CQR_NEED_ERP) { | |
2105 | erp_fn = device->discipline->erp_action(cqr); | |
2106 | erp_fn(cqr); | |
2107 | return 1; | |
2108 | } | |
2109 | if (cqr->status == DASD_CQR_FAILED) | |
2110 | dasd_log_sense(cqr, &cqr->irb); | |
2111 | if (cqr->refers) { | |
2112 | __dasd_process_erp(device, cqr); | |
2113 | return 1; | |
2114 | } | |
2115 | } | |
2116 | return 0; | |
2117 | } | |
2118 | ||
2119 | static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) | |
2120 | { | |
2121 | if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { | |
2122 | if (cqr->refers) /* erp is not done yet */ | |
2123 | return 1; | |
2124 | return ((cqr->status != DASD_CQR_DONE) && | |
2125 | (cqr->status != DASD_CQR_FAILED)); | |
2126 | } else | |
2127 | return (cqr->status == DASD_CQR_FILLED); | |
2128 | } | |
2129 | ||
2130 | static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) | |
2131 | { | |
2132 | struct dasd_device *device; | |
2133 | int rc; | |
2134 | struct list_head ccw_queue; | |
2135 | struct dasd_ccw_req *cqr; | |
2136 | ||
2137 | INIT_LIST_HEAD(&ccw_queue); | |
2138 | maincqr->status = DASD_CQR_FILLED; | |
2139 | device = maincqr->startdev; | |
2140 | list_add(&maincqr->blocklist, &ccw_queue); | |
2141 | for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); | |
2142 | cqr = list_first_entry(&ccw_queue, | |
2143 | struct dasd_ccw_req, blocklist)) { | |
2144 | ||
2145 | if (__dasd_sleep_on_erp(cqr)) | |
2146 | continue; | |
2147 | if (cqr->status != DASD_CQR_FILLED) /* could be failed */ | |
2148 | continue; | |
2149 | if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && | |
2150 | !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { | |
2151 | cqr->status = DASD_CQR_FAILED; | |
2152 | cqr->intrc = -EPERM; | |
2153 | continue; | |
2154 | } | |
2155 | /* Non-temporary stop condition will trigger fail fast */ | |
2156 | if (device->stopped & ~DASD_STOPPED_PENDING && | |
2157 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && | |
2158 | (!dasd_eer_enabled(device))) { | |
2159 | cqr->status = DASD_CQR_FAILED; | |
2160 | continue; | |
2161 | } | |
2162 | /* Don't try to start requests if device is stopped */ | |
2163 | if (interruptible) { | |
2164 | rc = wait_event_interruptible( | |
2165 | generic_waitq, !(device->stopped)); | |
2166 | if (rc == -ERESTARTSYS) { | |
2167 | cqr->status = DASD_CQR_FAILED; | |
2168 | maincqr->intrc = rc; | |
2169 | continue; | |
2170 | } | |
2171 | } else | |
2172 | wait_event(generic_waitq, !(device->stopped)); | |
2173 | ||
2174 | if (!cqr->callback) | |
2175 | cqr->callback = dasd_wakeup_cb; | |
2176 | ||
2177 | cqr->callback_data = DASD_SLEEPON_START_TAG; | |
2178 | dasd_add_request_tail(cqr); | |
2179 | if (interruptible) { | |
2180 | rc = wait_event_interruptible( | |
2181 | generic_waitq, _wait_for_wakeup(cqr)); | |
2182 | if (rc == -ERESTARTSYS) { | |
2183 | dasd_cancel_req(cqr); | |
2184 | /* wait (non-interruptible) for final status */ | |
2185 | wait_event(generic_waitq, | |
2186 | _wait_for_wakeup(cqr)); | |
2187 | cqr->status = DASD_CQR_FAILED; | |
2188 | maincqr->intrc = rc; | |
2189 | continue; | |
2190 | } | |
2191 | } else | |
2192 | wait_event(generic_waitq, _wait_for_wakeup(cqr)); | |
2193 | } | |
2194 | ||
2195 | maincqr->endclk = get_clock(); | |
2196 | if ((maincqr->status != DASD_CQR_DONE) && | |
2197 | (maincqr->intrc != -ERESTARTSYS)) | |
2198 | dasd_log_sense(maincqr, &maincqr->irb); | |
2199 | if (maincqr->status == DASD_CQR_DONE) | |
2200 | rc = 0; | |
2201 | else if (maincqr->intrc) | |
2202 | rc = maincqr->intrc; | |
2203 | else | |
2204 | rc = -EIO; | |
2205 | return rc; | |
2206 | } | |
2207 | ||
2208 | /* | |
2209 | * Queue a request to the tail of the device ccw_queue and wait for | |
2210 | * it's completion. | |
2211 | */ | |
2212 | int dasd_sleep_on(struct dasd_ccw_req *cqr) | |
2213 | { | |
2214 | return _dasd_sleep_on(cqr, 0); | |
2215 | } | |
2216 | ||
2217 | /* | |
2218 | * Queue a request to the tail of the device ccw_queue and wait | |
2219 | * interruptible for it's completion. | |
2220 | */ | |
2221 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) | |
2222 | { | |
2223 | return _dasd_sleep_on(cqr, 1); | |
2224 | } | |
2225 | ||
2226 | /* | |
2227 | * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock | |
2228 | * for eckd devices) the currently running request has to be terminated | |
2229 | * and be put back to status queued, before the special request is added | |
2230 | * to the head of the queue. Then the special request is waited on normally. | |
2231 | */ | |
2232 | static inline int _dasd_term_running_cqr(struct dasd_device *device) | |
2233 | { | |
2234 | struct dasd_ccw_req *cqr; | |
2235 | int rc; | |
2236 | ||
2237 | if (list_empty(&device->ccw_queue)) | |
2238 | return 0; | |
2239 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); | |
2240 | rc = device->discipline->term_IO(cqr); | |
2241 | if (!rc) | |
2242 | /* | |
2243 | * CQR terminated because a more important request is pending. | |
2244 | * Undo decreasing of retry counter because this is | |
2245 | * not an error case. | |
2246 | */ | |
2247 | cqr->retries++; | |
2248 | return rc; | |
2249 | } | |
2250 | ||
2251 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | |
2252 | { | |
2253 | struct dasd_device *device; | |
2254 | int rc; | |
2255 | ||
2256 | device = cqr->startdev; | |
2257 | if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && | |
2258 | !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { | |
2259 | cqr->status = DASD_CQR_FAILED; | |
2260 | cqr->intrc = -EPERM; | |
2261 | return -EIO; | |
2262 | } | |
2263 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | |
2264 | rc = _dasd_term_running_cqr(device); | |
2265 | if (rc) { | |
2266 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | |
2267 | return rc; | |
2268 | } | |
2269 | cqr->callback = dasd_wakeup_cb; | |
2270 | cqr->callback_data = DASD_SLEEPON_START_TAG; | |
2271 | cqr->status = DASD_CQR_QUEUED; | |
2272 | /* | |
2273 | * add new request as second | |
2274 | * first the terminated cqr needs to be finished | |
2275 | */ | |
2276 | list_add(&cqr->devlist, device->ccw_queue.next); | |
2277 | ||
2278 | /* let the bh start the request to keep them in order */ | |
2279 | dasd_schedule_device_bh(device); | |
2280 | ||
2281 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | |
2282 | ||
2283 | wait_event(generic_waitq, _wait_for_wakeup(cqr)); | |
2284 | ||
2285 | if (cqr->status == DASD_CQR_DONE) | |
2286 | rc = 0; | |
2287 | else if (cqr->intrc) | |
2288 | rc = cqr->intrc; | |
2289 | else | |
2290 | rc = -EIO; | |
2291 | return rc; | |
2292 | } | |
2293 | ||
2294 | /* | |
2295 | * Cancels a request that was started with dasd_sleep_on_req. | |
2296 | * This is useful to timeout requests. The request will be | |
2297 | * terminated if it is currently in i/o. | |
2298 | * Returns 1 if the request has been terminated. | |
2299 | * 0 if there was no need to terminate the request (not started yet) | |
2300 | * negative error code if termination failed | |
2301 | * Cancellation of a request is an asynchronous operation! The calling | |
2302 | * function has to wait until the request is properly returned via callback. | |
2303 | */ | |
2304 | int dasd_cancel_req(struct dasd_ccw_req *cqr) | |
2305 | { | |
2306 | struct dasd_device *device = cqr->startdev; | |
2307 | unsigned long flags; | |
2308 | int rc; | |
2309 | ||
2310 | rc = 0; | |
2311 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | |
2312 | switch (cqr->status) { | |
2313 | case DASD_CQR_QUEUED: | |
2314 | /* request was not started - just set to cleared */ | |
2315 | cqr->status = DASD_CQR_CLEARED; | |
2316 | break; | |
2317 | case DASD_CQR_IN_IO: | |
2318 | /* request in IO - terminate IO and release again */ | |
2319 | rc = device->discipline->term_IO(cqr); | |
2320 | if (rc) { | |
2321 | dev_err(&device->cdev->dev, | |
2322 | "Cancelling request %p failed with rc=%d\n", | |
2323 | cqr, rc); | |
2324 | } else { | |
2325 | cqr->stopclk = get_clock(); | |
2326 | } | |
2327 | break; | |
2328 | default: /* already finished or clear pending - do nothing */ | |
2329 | break; | |
2330 | } | |
2331 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | |
2332 | dasd_schedule_device_bh(device); | |
2333 | return rc; | |
2334 | } | |
2335 | ||
2336 | ||
2337 | /* | |
2338 | * SECTION: Operations of the dasd_block layer. | |
2339 | */ | |
2340 | ||
2341 | /* | |
2342 | * Timeout function for dasd_block. This is used when the block layer | |
2343 | * is waiting for something that may not come reliably, (e.g. a state | |
2344 | * change interrupt) | |
2345 | */ | |
2346 | static void dasd_block_timeout(unsigned long ptr) | |
2347 | { | |
2348 | unsigned long flags; | |
2349 | struct dasd_block *block; | |
2350 | ||
2351 | block = (struct dasd_block *) ptr; | |
2352 | spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); | |
2353 | /* re-activate request queue */ | |
2354 | dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); | |
2355 | spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); | |
2356 | dasd_schedule_block_bh(block); | |
2357 | } | |
2358 | ||
2359 | /* | |
2360 | * Setup timeout for a dasd_block in jiffies. | |
2361 | */ | |
2362 | void dasd_block_set_timer(struct dasd_block *block, int expires) | |
2363 | { | |
2364 | if (expires == 0) | |
2365 | del_timer(&block->timer); | |
2366 | else | |
2367 | mod_timer(&block->timer, jiffies + expires); | |
2368 | } | |
2369 | ||
2370 | /* | |
2371 | * Clear timeout for a dasd_block. | |
2372 | */ | |
2373 | void dasd_block_clear_timer(struct dasd_block *block) | |
2374 | { | |
2375 | del_timer(&block->timer); | |
2376 | } | |
2377 | ||
2378 | /* | |
2379 | * Process finished error recovery ccw. | |
2380 | */ | |
2381 | static void __dasd_process_erp(struct dasd_device *device, | |
2382 | struct dasd_ccw_req *cqr) | |
2383 | { | |
2384 | dasd_erp_fn_t erp_fn; | |
2385 | ||
2386 | if (cqr->status == DASD_CQR_DONE) | |
2387 | DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); | |
2388 | else | |
2389 | dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); | |
2390 | erp_fn = device->discipline->erp_postaction(cqr); | |
2391 | erp_fn(cqr); | |
2392 | } | |
2393 | ||
2394 | /* | |
2395 | * Fetch requests from the block device queue. | |
2396 | */ | |
2397 | static void __dasd_process_request_queue(struct dasd_block *block) | |
2398 | { | |
2399 | struct request_queue *queue; | |
2400 | struct request *req; | |
2401 | struct dasd_ccw_req *cqr; | |
2402 | struct dasd_device *basedev; | |
2403 | unsigned long flags; | |
2404 | queue = block->request_queue; | |
2405 | basedev = block->base; | |
2406 | /* No queue ? Then there is nothing to do. */ | |
2407 | if (queue == NULL) | |
2408 | return; | |
2409 | ||
2410 | /* | |
2411 | * We requeue request from the block device queue to the ccw | |
2412 | * queue only in two states. In state DASD_STATE_READY the | |
2413 | * partition detection is done and we need to requeue requests | |
2414 | * for that. State DASD_STATE_ONLINE is normal block device | |
2415 | * operation. | |
2416 | */ | |
2417 | if (basedev->state < DASD_STATE_READY) { | |
2418 | while ((req = blk_fetch_request(block->request_queue))) | |
2419 | __blk_end_request_all(req, -EIO); | |
2420 | return; | |
2421 | } | |
2422 | /* Now we try to fetch requests from the request queue */ | |
2423 | while ((req = blk_peek_request(queue))) { | |
2424 | if (basedev->features & DASD_FEATURE_READONLY && | |
2425 | rq_data_dir(req) == WRITE) { | |
2426 | DBF_DEV_EVENT(DBF_ERR, basedev, | |
2427 | "Rejecting write request %p", | |
2428 | req); | |
2429 | blk_start_request(req); | |
2430 | __blk_end_request_all(req, -EIO); | |
2431 | continue; | |
2432 | } | |
2433 | cqr = basedev->discipline->build_cp(basedev, block, req); | |
2434 | if (IS_ERR(cqr)) { | |
2435 | if (PTR_ERR(cqr) == -EBUSY) | |
2436 | break; /* normal end condition */ | |
2437 | if (PTR_ERR(cqr) == -ENOMEM) | |
2438 | break; /* terminate request queue loop */ | |
2439 | if (PTR_ERR(cqr) == -EAGAIN) { | |
2440 | /* | |
2441 | * The current request cannot be build right | |
2442 | * now, we have to try later. If this request | |
2443 | * is the head-of-queue we stop the device | |
2444 | * for 1/2 second. | |
2445 | */ | |
2446 | if (!list_empty(&block->ccw_queue)) | |
2447 | break; | |
2448 | spin_lock_irqsave( | |
2449 | get_ccwdev_lock(basedev->cdev), flags); | |
2450 | dasd_device_set_stop_bits(basedev, | |
2451 | DASD_STOPPED_PENDING); | |
2452 | spin_unlock_irqrestore( | |
2453 | get_ccwdev_lock(basedev->cdev), flags); | |
2454 | dasd_block_set_timer(block, HZ/2); | |
2455 | break; | |
2456 | } | |
2457 | DBF_DEV_EVENT(DBF_ERR, basedev, | |
2458 | "CCW creation failed (rc=%ld) " | |
2459 | "on request %p", | |
2460 | PTR_ERR(cqr), req); | |
2461 | blk_start_request(req); | |
2462 | __blk_end_request_all(req, -EIO); | |
2463 | continue; | |
2464 | } | |
2465 | /* | |
2466 | * Note: callback is set to dasd_return_cqr_cb in | |
2467 | * __dasd_block_start_head to cover erp requests as well | |
2468 | */ | |
2469 | cqr->callback_data = (void *) req; | |
2470 | cqr->status = DASD_CQR_FILLED; | |
2471 | blk_start_request(req); | |
2472 | list_add_tail(&cqr->blocklist, &block->ccw_queue); | |
2473 | dasd_profile_start(block, cqr, req); | |
2474 | } | |
2475 | } | |
2476 | ||
2477 | static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) | |
2478 | { | |
2479 | struct request *req; | |
2480 | int status; | |
2481 | int error = 0; | |
2482 | ||
2483 | req = (struct request *) cqr->callback_data; | |
2484 | dasd_profile_end(cqr->block, cqr, req); | |
2485 | status = cqr->block->base->discipline->free_cp(cqr, req); | |
2486 | if (status <= 0) | |
2487 | error = status ? status : -EIO; | |
2488 | __blk_end_request_all(req, error); | |
2489 | } | |
2490 | ||
2491 | /* | |
2492 | * Process ccw request queue. | |
2493 | */ | |
2494 | static void __dasd_process_block_ccw_queue(struct dasd_block *block, | |
2495 | struct list_head *final_queue) | |
2496 | { | |
2497 | struct list_head *l, *n; | |
2498 | struct dasd_ccw_req *cqr; | |
2499 | dasd_erp_fn_t erp_fn; | |
2500 | unsigned long flags; | |
2501 | struct dasd_device *base = block->base; | |
2502 | ||
2503 | restart: | |
2504 | /* Process request with final status. */ | |
2505 | list_for_each_safe(l, n, &block->ccw_queue) { | |
2506 | cqr = list_entry(l, struct dasd_ccw_req, blocklist); | |
2507 | if (cqr->status != DASD_CQR_DONE && | |
2508 | cqr->status != DASD_CQR_FAILED && | |
2509 | cqr->status != DASD_CQR_NEED_ERP && | |
2510 | cqr->status != DASD_CQR_TERMINATED) | |
2511 | continue; | |
2512 | ||
2513 | if (cqr->status == DASD_CQR_TERMINATED) { | |
2514 | base->discipline->handle_terminated_request(cqr); | |
2515 | goto restart; | |
2516 | } | |
2517 | ||
2518 | /* Process requests that may be recovered */ | |
2519 | if (cqr->status == DASD_CQR_NEED_ERP) { | |
2520 | erp_fn = base->discipline->erp_action(cqr); | |
2521 | if (IS_ERR(erp_fn(cqr))) | |
2522 | continue; | |
2523 | goto restart; | |
2524 | } | |
2525 | ||
2526 | /* log sense for fatal error */ | |
2527 | if (cqr->status == DASD_CQR_FAILED) { | |
2528 | dasd_log_sense(cqr, &cqr->irb); | |
2529 | } | |
2530 | ||
2531 | /* First of all call extended error reporting. */ | |
2532 | if (dasd_eer_enabled(base) && | |
2533 | cqr->status == DASD_CQR_FAILED) { | |
2534 | dasd_eer_write(base, cqr, DASD_EER_FATALERROR); | |
2535 | ||
2536 | /* restart request */ | |
2537 | cqr->status = DASD_CQR_FILLED; | |
2538 | cqr->retries = 255; | |
2539 | spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); | |
2540 | dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); | |
2541 | spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), | |
2542 | flags); | |
2543 | goto restart; | |
2544 | } | |
2545 | ||
2546 | /* Process finished ERP request. */ | |
2547 | if (cqr->refers) { | |
2548 | __dasd_process_erp(base, cqr); | |
2549 | goto restart; | |
2550 | } | |
2551 | ||
2552 | /* Rechain finished requests to final queue */ | |
2553 | cqr->endclk = get_clock(); | |
2554 | list_move_tail(&cqr->blocklist, final_queue); | |
2555 | } | |
2556 | } | |
2557 | ||
2558 | static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) | |
2559 | { | |
2560 | dasd_schedule_block_bh(cqr->block); | |
2561 | } | |
2562 | ||
2563 | static void __dasd_block_start_head(struct dasd_block *block) | |
2564 | { | |
2565 | struct dasd_ccw_req *cqr; | |
2566 | ||
2567 | if (list_empty(&block->ccw_queue)) | |
2568 | return; | |
2569 | /* We allways begin with the first requests on the queue, as some | |
2570 | * of previously started requests have to be enqueued on a | |
2571 | * dasd_device again for error recovery. | |
2572 | */ | |
2573 | list_for_each_entry(cqr, &block->ccw_queue, blocklist) { | |
2574 | if (cqr->status != DASD_CQR_FILLED) | |
2575 | continue; | |
2576 | if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && | |
2577 | !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { | |
2578 | cqr->status = DASD_CQR_FAILED; | |
2579 | cqr->intrc = -EPERM; | |
2580 | dasd_schedule_block_bh(block); | |
2581 | continue; | |
2582 | } | |
2583 | /* Non-temporary stop condition will trigger fail fast */ | |
2584 | if (block->base->stopped & ~DASD_STOPPED_PENDING && | |
2585 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && | |
2586 | (!dasd_eer_enabled(block->base))) { | |
2587 | cqr->status = DASD_CQR_FAILED; | |
2588 | dasd_schedule_block_bh(block); | |
2589 | continue; | |
2590 | } | |
2591 | /* Don't try to start requests if device is stopped */ | |
2592 | if (block->base->stopped) | |
2593 | return; | |
2594 | ||
2595 | /* just a fail safe check, should not happen */ | |
2596 | if (!cqr->startdev) | |
2597 | cqr->startdev = block->base; | |
2598 | ||
2599 | /* make sure that the requests we submit find their way back */ | |
2600 | cqr->callback = dasd_return_cqr_cb; | |
2601 | ||
2602 | dasd_add_request_tail(cqr); | |
2603 | } | |
2604 | } | |
2605 | ||
2606 | /* | |
2607 | * Central dasd_block layer routine. Takes requests from the generic | |
2608 | * block layer request queue, creates ccw requests, enqueues them on | |
2609 | * a dasd_device and processes ccw requests that have been returned. | |
2610 | */ | |
2611 | static void dasd_block_tasklet(struct dasd_block *block) | |
2612 | { | |
2613 | struct list_head final_queue; | |
2614 | struct list_head *l, *n; | |
2615 | struct dasd_ccw_req *cqr; | |
2616 | ||
2617 | atomic_set(&block->tasklet_scheduled, 0); | |
2618 | INIT_LIST_HEAD(&final_queue); | |
2619 | spin_lock(&block->queue_lock); | |
2620 | /* Finish off requests on ccw queue */ | |
2621 | __dasd_process_block_ccw_queue(block, &final_queue); | |
2622 | spin_unlock(&block->queue_lock); | |
2623 | /* Now call the callback function of requests with final status */ | |
2624 | spin_lock_irq(&block->request_queue_lock); | |
2625 | list_for_each_safe(l, n, &final_queue) { | |
2626 | cqr = list_entry(l, struct dasd_ccw_req, blocklist); | |
2627 | list_del_init(&cqr->blocklist); | |
2628 | __dasd_cleanup_cqr(cqr); | |
2629 | } | |
2630 | spin_lock(&block->queue_lock); | |
2631 | /* Get new request from the block device request queue */ | |
2632 | __dasd_process_request_queue(block); | |
2633 | /* Now check if the head of the ccw queue needs to be started. */ | |
2634 | __dasd_block_start_head(block); | |
2635 | spin_unlock(&block->queue_lock); | |
2636 | spin_unlock_irq(&block->request_queue_lock); | |
2637 | if (waitqueue_active(&shutdown_waitq)) | |
2638 | wake_up(&shutdown_waitq); | |
2639 | dasd_put_device(block->base); | |
2640 | } | |
2641 | ||
2642 | static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) | |
2643 | { | |
2644 | wake_up(&dasd_flush_wq); | |
2645 | } | |
2646 | ||
2647 | /* | |
2648 | * Go through all request on the dasd_block request queue, cancel them | |
2649 | * on the respective dasd_device, and return them to the generic | |
2650 | * block layer. | |
2651 | */ | |
2652 | static int dasd_flush_block_queue(struct dasd_block *block) | |
2653 | { | |
2654 | struct dasd_ccw_req *cqr, *n; | |
2655 | int rc, i; | |
2656 | struct list_head flush_queue; | |
2657 | ||
2658 | INIT_LIST_HEAD(&flush_queue); | |
2659 | spin_lock_bh(&block->queue_lock); | |
2660 | rc = 0; | |
2661 | restart: | |
2662 | list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { | |
2663 | /* if this request currently owned by a dasd_device cancel it */ | |
2664 | if (cqr->status >= DASD_CQR_QUEUED) | |
2665 | rc = dasd_cancel_req(cqr); | |
2666 | if (rc < 0) | |
2667 | break; | |
2668 | /* Rechain request (including erp chain) so it won't be | |
2669 | * touched by the dasd_block_tasklet anymore. | |
2670 | * Replace the callback so we notice when the request | |
2671 | * is returned from the dasd_device layer. | |
2672 | */ | |
2673 | cqr->callback = _dasd_wake_block_flush_cb; | |
2674 | for (i = 0; cqr != NULL; cqr = cqr->refers, i++) | |
2675 | list_move_tail(&cqr->blocklist, &flush_queue); | |
2676 | if (i > 1) | |
2677 | /* moved more than one request - need to restart */ | |
2678 | goto restart; | |
2679 | } | |
2680 | spin_unlock_bh(&block->queue_lock); | |
2681 | /* Now call the callback function of flushed requests */ | |
2682 | restart_cb: | |
2683 | list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { | |
2684 | wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); | |
2685 | /* Process finished ERP request. */ | |
2686 | if (cqr->refers) { | |
2687 | spin_lock_bh(&block->queue_lock); | |
2688 | __dasd_process_erp(block->base, cqr); | |
2689 | spin_unlock_bh(&block->queue_lock); | |
2690 | /* restart list_for_xx loop since dasd_process_erp | |
2691 | * might remove multiple elements */ | |
2692 | goto restart_cb; | |
2693 | } | |
2694 | /* call the callback function */ | |
2695 | spin_lock_irq(&block->request_queue_lock); | |
2696 | cqr->endclk = get_clock(); | |
2697 | list_del_init(&cqr->blocklist); | |
2698 | __dasd_cleanup_cqr(cqr); | |
2699 | spin_unlock_irq(&block->request_queue_lock); | |
2700 | } | |
2701 | return rc; | |
2702 | } | |
2703 | ||
2704 | /* | |
2705 | * Schedules a call to dasd_tasklet over the device tasklet. | |
2706 | */ | |
2707 | void dasd_schedule_block_bh(struct dasd_block *block) | |
2708 | { | |
2709 | /* Protect against rescheduling. */ | |
2710 | if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) | |
2711 | return; | |
2712 | /* life cycle of block is bound to it's base device */ | |
2713 | dasd_get_device(block->base); | |
2714 | tasklet_hi_schedule(&block->tasklet); | |
2715 | } | |
2716 | ||
2717 | ||
2718 | /* | |
2719 | * SECTION: external block device operations | |
2720 | * (request queue handling, open, release, etc.) | |
2721 | */ | |
2722 | ||
2723 | /* | |
2724 | * Dasd request queue function. Called from ll_rw_blk.c | |
2725 | */ | |
2726 | static void do_dasd_request(struct request_queue *queue) | |
2727 | { | |
2728 | struct dasd_block *block; | |
2729 | ||
2730 | block = queue->queuedata; | |
2731 | spin_lock(&block->queue_lock); | |
2732 | /* Get new request from the block device request queue */ | |
2733 | __dasd_process_request_queue(block); | |
2734 | /* Now check if the head of the ccw queue needs to be started. */ | |
2735 | __dasd_block_start_head(block); | |
2736 | spin_unlock(&block->queue_lock); | |
2737 | } | |
2738 | ||
2739 | /* | |
2740 | * Allocate and initialize request queue and default I/O scheduler. | |
2741 | */ | |
2742 | static int dasd_alloc_queue(struct dasd_block *block) | |
2743 | { | |
2744 | int rc; | |
2745 | ||
2746 | block->request_queue = blk_init_queue(do_dasd_request, | |
2747 | &block->request_queue_lock); | |
2748 | if (block->request_queue == NULL) | |
2749 | return -ENOMEM; | |
2750 | ||
2751 | block->request_queue->queuedata = block; | |
2752 | ||
2753 | elevator_exit(block->request_queue->elevator); | |
2754 | block->request_queue->elevator = NULL; | |
2755 | rc = elevator_init(block->request_queue, "deadline"); | |
2756 | if (rc) { | |
2757 | blk_cleanup_queue(block->request_queue); | |
2758 | return rc; | |
2759 | } | |
2760 | return 0; | |
2761 | } | |
2762 | ||
2763 | /* | |
2764 | * Allocate and initialize request queue. | |
2765 | */ | |
2766 | static void dasd_setup_queue(struct dasd_block *block) | |
2767 | { | |
2768 | int max; | |
2769 | ||
2770 | if (block->base->features & DASD_FEATURE_USERAW) { | |
2771 | /* | |
2772 | * the max_blocks value for raw_track access is 256 | |
2773 | * it is higher than the native ECKD value because we | |
2774 | * only need one ccw per track | |
2775 | * so the max_hw_sectors are | |
2776 | * 2048 x 512B = 1024kB = 16 tracks | |
2777 | */ | |
2778 | max = 2048; | |
2779 | } else { | |
2780 | max = block->base->discipline->max_blocks << block->s2b_shift; | |
2781 | } | |
2782 | blk_queue_logical_block_size(block->request_queue, | |
2783 | block->bp_block); | |
2784 | blk_queue_max_hw_sectors(block->request_queue, max); | |
2785 | blk_queue_max_segments(block->request_queue, -1L); | |
2786 | /* with page sized segments we can translate each segement into | |
2787 | * one idaw/tidaw | |
2788 | */ | |
2789 | blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); | |
2790 | blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); | |
2791 | } | |
2792 | ||
2793 | /* | |
2794 | * Deactivate and free request queue. | |
2795 | */ | |
2796 | static void dasd_free_queue(struct dasd_block *block) | |
2797 | { | |
2798 | if (block->request_queue) { | |
2799 | blk_cleanup_queue(block->request_queue); | |
2800 | block->request_queue = NULL; | |
2801 | } | |
2802 | } | |
2803 | ||
2804 | /* | |
2805 | * Flush request on the request queue. | |
2806 | */ | |
2807 | static void dasd_flush_request_queue(struct dasd_block *block) | |
2808 | { | |
2809 | struct request *req; | |
2810 | ||
2811 | if (!block->request_queue) | |
2812 | return; | |
2813 | ||
2814 | spin_lock_irq(&block->request_queue_lock); | |
2815 | while ((req = blk_fetch_request(block->request_queue))) | |
2816 | __blk_end_request_all(req, -EIO); | |
2817 | spin_unlock_irq(&block->request_queue_lock); | |
2818 | } | |
2819 | ||
2820 | static int dasd_open(struct block_device *bdev, fmode_t mode) | |
2821 | { | |
2822 | struct dasd_device *base; | |
2823 | int rc; | |
2824 | ||
2825 | base = dasd_device_from_gendisk(bdev->bd_disk); | |
2826 | if (!base) | |
2827 | return -ENODEV; | |
2828 | ||
2829 | atomic_inc(&base->block->open_count); | |
2830 | if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { | |
2831 | rc = -ENODEV; | |
2832 | goto unlock; | |
2833 | } | |
2834 | ||
2835 | if (!try_module_get(base->discipline->owner)) { | |
2836 | rc = -EINVAL; | |
2837 | goto unlock; | |
2838 | } | |
2839 | ||
2840 | if (dasd_probeonly) { | |
2841 | dev_info(&base->cdev->dev, | |
2842 | "Accessing the DASD failed because it is in " | |
2843 | "probeonly mode\n"); | |
2844 | rc = -EPERM; | |
2845 | goto out; | |
2846 | } | |
2847 | ||
2848 | if (base->state <= DASD_STATE_BASIC) { | |
2849 | DBF_DEV_EVENT(DBF_ERR, base, " %s", | |
2850 | " Cannot open unrecognized device"); | |
2851 | rc = -ENODEV; | |
2852 | goto out; | |
2853 | } | |
2854 | ||
2855 | if ((mode & FMODE_WRITE) && | |
2856 | (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || | |
2857 | (base->features & DASD_FEATURE_READONLY))) { | |
2858 | rc = -EROFS; | |
2859 | goto out; | |
2860 | } | |
2861 | ||
2862 | dasd_put_device(base); | |
2863 | return 0; | |
2864 | ||
2865 | out: | |
2866 | module_put(base->discipline->owner); | |
2867 | unlock: | |
2868 | atomic_dec(&base->block->open_count); | |
2869 | dasd_put_device(base); | |
2870 | return rc; | |
2871 | } | |
2872 | ||
2873 | static int dasd_release(struct gendisk *disk, fmode_t mode) | |
2874 | { | |
2875 | struct dasd_device *base; | |
2876 | ||
2877 | base = dasd_device_from_gendisk(disk); | |
2878 | if (!base) | |
2879 | return -ENODEV; | |
2880 | ||
2881 | atomic_dec(&base->block->open_count); | |
2882 | module_put(base->discipline->owner); | |
2883 | dasd_put_device(base); | |
2884 | return 0; | |
2885 | } | |
2886 | ||
2887 | /* | |
2888 | * Return disk geometry. | |
2889 | */ | |
2890 | static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
2891 | { | |
2892 | struct dasd_device *base; | |
2893 | ||
2894 | base = dasd_device_from_gendisk(bdev->bd_disk); | |
2895 | if (!base) | |
2896 | return -ENODEV; | |
2897 | ||
2898 | if (!base->discipline || | |
2899 | !base->discipline->fill_geometry) { | |
2900 | dasd_put_device(base); | |
2901 | return -EINVAL; | |
2902 | } | |
2903 | base->discipline->fill_geometry(base->block, geo); | |
2904 | geo->start = get_start_sect(bdev) >> base->block->s2b_shift; | |
2905 | dasd_put_device(base); | |
2906 | return 0; | |
2907 | } | |
2908 | ||
2909 | const struct block_device_operations | |
2910 | dasd_device_operations = { | |
2911 | .owner = THIS_MODULE, | |
2912 | .open = dasd_open, | |
2913 | .release = dasd_release, | |
2914 | .ioctl = dasd_ioctl, | |
2915 | .compat_ioctl = dasd_ioctl, | |
2916 | .getgeo = dasd_getgeo, | |
2917 | }; | |
2918 | ||
2919 | /******************************************************************************* | |
2920 | * end of block device operations | |
2921 | */ | |
2922 | ||
2923 | static void | |
2924 | dasd_exit(void) | |
2925 | { | |
2926 | #ifdef CONFIG_PROC_FS | |
2927 | dasd_proc_exit(); | |
2928 | #endif | |
2929 | dasd_eer_exit(); | |
2930 | if (dasd_page_cache != NULL) { | |
2931 | kmem_cache_destroy(dasd_page_cache); | |
2932 | dasd_page_cache = NULL; | |
2933 | } | |
2934 | dasd_gendisk_exit(); | |
2935 | dasd_devmap_exit(); | |
2936 | if (dasd_debug_area != NULL) { | |
2937 | debug_unregister(dasd_debug_area); | |
2938 | dasd_debug_area = NULL; | |
2939 | } | |
2940 | dasd_statistics_removeroot(); | |
2941 | } | |
2942 | ||
2943 | /* | |
2944 | * SECTION: common functions for ccw_driver use | |
2945 | */ | |
2946 | ||
2947 | /* | |
2948 | * Is the device read-only? | |
2949 | * Note that this function does not report the setting of the | |
2950 | * readonly device attribute, but how it is configured in z/VM. | |
2951 | */ | |
2952 | int dasd_device_is_ro(struct dasd_device *device) | |
2953 | { | |
2954 | struct ccw_dev_id dev_id; | |
2955 | struct diag210 diag_data; | |
2956 | int rc; | |
2957 | ||
2958 | if (!MACHINE_IS_VM) | |
2959 | return 0; | |
2960 | ccw_device_get_id(device->cdev, &dev_id); | |
2961 | memset(&diag_data, 0, sizeof(diag_data)); | |
2962 | diag_data.vrdcdvno = dev_id.devno; | |
2963 | diag_data.vrdclen = sizeof(diag_data); | |
2964 | rc = diag210(&diag_data); | |
2965 | if (rc == 0 || rc == 2) { | |
2966 | return diag_data.vrdcvfla & 0x80; | |
2967 | } else { | |
2968 | DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", | |
2969 | dev_id.devno, rc); | |
2970 | return 0; | |
2971 | } | |
2972 | } | |
2973 | EXPORT_SYMBOL_GPL(dasd_device_is_ro); | |
2974 | ||
2975 | static void dasd_generic_auto_online(void *data, async_cookie_t cookie) | |
2976 | { | |
2977 | struct ccw_device *cdev = data; | |
2978 | int ret; | |
2979 | ||
2980 | ret = ccw_device_set_online(cdev); | |
2981 | if (ret) | |
2982 | pr_warning("%s: Setting the DASD online failed with rc=%d\n", | |
2983 | dev_name(&cdev->dev), ret); | |
2984 | } | |
2985 | ||
2986 | /* | |
2987 | * Initial attempt at a probe function. this can be simplified once | |
2988 | * the other detection code is gone. | |
2989 | */ | |
2990 | int dasd_generic_probe(struct ccw_device *cdev, | |
2991 | struct dasd_discipline *discipline) | |
2992 | { | |
2993 | int ret; | |
2994 | ||
2995 | ret = dasd_add_sysfs_files(cdev); | |
2996 | if (ret) { | |
2997 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", | |
2998 | "dasd_generic_probe: could not add " | |
2999 | "sysfs entries"); | |
3000 | return ret; | |
3001 | } | |
3002 | cdev->handler = &dasd_int_handler; | |
3003 | ||
3004 | /* | |
3005 | * Automatically online either all dasd devices (dasd_autodetect) | |
3006 | * or all devices specified with dasd= parameters during | |
3007 | * initial probe. | |
3008 | */ | |
3009 | if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || | |
3010 | (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) | |
3011 | async_schedule(dasd_generic_auto_online, cdev); | |
3012 | return 0; | |
3013 | } | |
3014 | ||
3015 | /* | |
3016 | * This will one day be called from a global not_oper handler. | |
3017 | * It is also used by driver_unregister during module unload. | |
3018 | */ | |
3019 | void dasd_generic_remove(struct ccw_device *cdev) | |
3020 | { | |
3021 | struct dasd_device *device; | |
3022 | struct dasd_block *block; | |
3023 | ||
3024 | cdev->handler = NULL; | |
3025 | ||
3026 | dasd_remove_sysfs_files(cdev); | |
3027 | device = dasd_device_from_cdev(cdev); | |
3028 | if (IS_ERR(device)) | |
3029 | return; | |
3030 | if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { | |
3031 | /* Already doing offline processing */ | |
3032 | dasd_put_device(device); | |
3033 | return; | |
3034 | } | |
3035 | /* | |
3036 | * This device is removed unconditionally. Set offline | |
3037 | * flag to prevent dasd_open from opening it while it is | |
3038 | * no quite down yet. | |
3039 | */ | |
3040 | dasd_set_target_state(device, DASD_STATE_NEW); | |
3041 | /* dasd_delete_device destroys the device reference. */ | |
3042 | block = device->block; | |
3043 | dasd_delete_device(device); | |
3044 | /* | |
3045 | * life cycle of block is bound to device, so delete it after | |
3046 | * device was safely removed | |
3047 | */ | |
3048 | if (block) | |
3049 | dasd_free_block(block); | |
3050 | } | |
3051 | ||
3052 | /* | |
3053 | * Activate a device. This is called from dasd_{eckd,fba}_probe() when either | |
3054 | * the device is detected for the first time and is supposed to be used | |
3055 | * or the user has started activation through sysfs. | |
3056 | */ | |
3057 | int dasd_generic_set_online(struct ccw_device *cdev, | |
3058 | struct dasd_discipline *base_discipline) | |
3059 | { | |
3060 | struct dasd_discipline *discipline; | |
3061 | struct dasd_device *device; | |
3062 | int rc; | |
3063 | ||
3064 | /* first online clears initial online feature flag */ | |
3065 | dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); | |
3066 | device = dasd_create_device(cdev); | |
3067 | if (IS_ERR(device)) | |
3068 | return PTR_ERR(device); | |
3069 | ||
3070 | discipline = base_discipline; | |
3071 | if (device->features & DASD_FEATURE_USEDIAG) { | |
3072 | if (!dasd_diag_discipline_pointer) { | |
3073 | pr_warning("%s Setting the DASD online failed because " | |
3074 | "of missing DIAG discipline\n", | |
3075 | dev_name(&cdev->dev)); | |
3076 | dasd_delete_device(device); | |
3077 | return -ENODEV; | |
3078 | } | |
3079 | discipline = dasd_diag_discipline_pointer; | |
3080 | } | |
3081 | if (!try_module_get(base_discipline->owner)) { | |
3082 | dasd_delete_device(device); | |
3083 | return -EINVAL; | |
3084 | } | |
3085 | if (!try_module_get(discipline->owner)) { | |
3086 | module_put(base_discipline->owner); | |
3087 | dasd_delete_device(device); | |
3088 | return -EINVAL; | |
3089 | } | |
3090 | device->base_discipline = base_discipline; | |
3091 | device->discipline = discipline; | |
3092 | ||
3093 | /* check_device will allocate block device if necessary */ | |
3094 | rc = discipline->check_device(device); | |
3095 | if (rc) { | |
3096 | pr_warning("%s Setting the DASD online with discipline %s " | |
3097 | "failed with rc=%i\n", | |
3098 | dev_name(&cdev->dev), discipline->name, rc); | |
3099 | module_put(discipline->owner); | |
3100 | module_put(base_discipline->owner); | |
3101 | dasd_delete_device(device); | |
3102 | return rc; | |
3103 | } | |
3104 | ||
3105 | dasd_set_target_state(device, DASD_STATE_ONLINE); | |
3106 | if (device->state <= DASD_STATE_KNOWN) { | |
3107 | pr_warning("%s Setting the DASD online failed because of a " | |
3108 | "missing discipline\n", dev_name(&cdev->dev)); | |
3109 | rc = -ENODEV; | |
3110 | dasd_set_target_state(device, DASD_STATE_NEW); | |
3111 | if (device->block) | |
3112 | dasd_free_block(device->block); | |
3113 | dasd_delete_device(device); | |
3114 | } else | |
3115 | pr_debug("dasd_generic device %s found\n", | |
3116 | dev_name(&cdev->dev)); | |
3117 | ||
3118 | wait_event(dasd_init_waitq, _wait_for_device(device)); | |
3119 | ||
3120 | dasd_put_device(device); | |
3121 | return rc; | |
3122 | } | |
3123 | ||
3124 | int dasd_generic_set_offline(struct ccw_device *cdev) | |
3125 | { | |
3126 | struct dasd_device *device; | |
3127 | struct dasd_block *block; | |
3128 | int max_count, open_count; | |
3129 | ||
3130 | device = dasd_device_from_cdev(cdev); | |
3131 | if (IS_ERR(device)) | |
3132 | return PTR_ERR(device); | |
3133 | if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { | |
3134 | /* Already doing offline processing */ | |
3135 | dasd_put_device(device); | |
3136 | return 0; | |
3137 | } | |
3138 | /* | |
3139 | * We must make sure that this device is currently not in use. | |
3140 | * The open_count is increased for every opener, that includes | |
3141 | * the blkdev_get in dasd_scan_partitions. We are only interested | |
3142 | * in the other openers. | |
3143 | */ | |
3144 | if (device->block) { | |
3145 | max_count = device->block->bdev ? 0 : -1; | |
3146 | open_count = atomic_read(&device->block->open_count); | |
3147 | if (open_count > max_count) { | |
3148 | if (open_count > 0) | |
3149 | pr_warning("%s: The DASD cannot be set offline " | |
3150 | "with open count %i\n", | |
3151 | dev_name(&cdev->dev), open_count); | |
3152 | else | |
3153 | pr_warning("%s: The DASD cannot be set offline " | |
3154 | "while it is in use\n", | |
3155 | dev_name(&cdev->dev)); | |
3156 | clear_bit(DASD_FLAG_OFFLINE, &device->flags); | |
3157 | dasd_put_device(device); | |
3158 | return -EBUSY; | |
3159 | } | |
3160 | } | |
3161 | dasd_set_target_state(device, DASD_STATE_NEW); | |
3162 | /* dasd_delete_device destroys the device reference. */ | |
3163 | block = device->block; | |
3164 | dasd_delete_device(device); | |
3165 | /* | |
3166 | * life cycle of block is bound to device, so delete it after | |
3167 | * device was safely removed | |
3168 | */ | |
3169 | if (block) | |
3170 | dasd_free_block(block); | |
3171 | return 0; | |
3172 | } | |
3173 | ||
3174 | int dasd_generic_last_path_gone(struct dasd_device *device) | |
3175 | { | |
3176 | struct dasd_ccw_req *cqr; | |
3177 | ||
3178 | dev_warn(&device->cdev->dev, "No operational channel path is left " | |
3179 | "for the device\n"); | |
3180 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); | |
3181 | /* First of all call extended error reporting. */ | |
3182 | dasd_eer_write(device, NULL, DASD_EER_NOPATH); | |
3183 | ||
3184 | if (device->state < DASD_STATE_BASIC) | |
3185 | return 0; | |
3186 | /* Device is active. We want to keep it. */ | |
3187 | list_for_each_entry(cqr, &device->ccw_queue, devlist) | |
3188 | if ((cqr->status == DASD_CQR_IN_IO) || | |
3189 | (cqr->status == DASD_CQR_CLEAR_PENDING)) { | |
3190 | cqr->status = DASD_CQR_QUEUED; | |
3191 | cqr->retries++; | |
3192 | } | |
3193 | dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); | |
3194 | dasd_device_clear_timer(device); | |
3195 | dasd_schedule_device_bh(device); | |
3196 | return 1; | |
3197 | } | |
3198 | EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); | |
3199 | ||
3200 | int dasd_generic_path_operational(struct dasd_device *device) | |
3201 | { | |
3202 | dev_info(&device->cdev->dev, "A channel path to the device has become " | |
3203 | "operational\n"); | |
3204 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); | |
3205 | dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); | |
3206 | if (device->stopped & DASD_UNRESUMED_PM) { | |
3207 | dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); | |
3208 | dasd_restore_device(device); | |
3209 | return 1; | |
3210 | } | |
3211 | dasd_schedule_device_bh(device); | |
3212 | if (device->block) | |
3213 | dasd_schedule_block_bh(device->block); | |
3214 | return 1; | |
3215 | } | |
3216 | EXPORT_SYMBOL_GPL(dasd_generic_path_operational); | |
3217 | ||
3218 | int dasd_generic_notify(struct ccw_device *cdev, int event) | |
3219 | { | |
3220 | struct dasd_device *device; | |
3221 | int ret; | |
3222 | ||
3223 | device = dasd_device_from_cdev_locked(cdev); | |
3224 | if (IS_ERR(device)) | |
3225 | return 0; | |
3226 | ret = 0; | |
3227 | switch (event) { | |
3228 | case CIO_GONE: | |
3229 | case CIO_BOXED: | |
3230 | case CIO_NO_PATH: | |
3231 | device->path_data.opm = 0; | |
3232 | device->path_data.ppm = 0; | |
3233 | device->path_data.npm = 0; | |
3234 | ret = dasd_generic_last_path_gone(device); | |
3235 | break; | |
3236 | case CIO_OPER: | |
3237 | ret = 1; | |
3238 | if (device->path_data.opm) | |
3239 | ret = dasd_generic_path_operational(device); | |
3240 | break; | |
3241 | } | |
3242 | dasd_put_device(device); | |
3243 | return ret; | |
3244 | } | |
3245 | ||
3246 | void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) | |
3247 | { | |
3248 | int chp; | |
3249 | __u8 oldopm, eventlpm; | |
3250 | struct dasd_device *device; | |
3251 | ||
3252 | device = dasd_device_from_cdev_locked(cdev); | |
3253 | if (IS_ERR(device)) | |
3254 | return; | |
3255 | for (chp = 0; chp < 8; chp++) { | |
3256 | eventlpm = 0x80 >> chp; | |
3257 | if (path_event[chp] & PE_PATH_GONE) { | |
3258 | oldopm = device->path_data.opm; | |
3259 | device->path_data.opm &= ~eventlpm; | |
3260 | device->path_data.ppm &= ~eventlpm; | |
3261 | device->path_data.npm &= ~eventlpm; | |
3262 | if (oldopm && !device->path_data.opm) | |
3263 | dasd_generic_last_path_gone(device); | |
3264 | } | |
3265 | if (path_event[chp] & PE_PATH_AVAILABLE) { | |
3266 | device->path_data.opm &= ~eventlpm; | |
3267 | device->path_data.ppm &= ~eventlpm; | |
3268 | device->path_data.npm &= ~eventlpm; | |
3269 | device->path_data.tbvpm |= eventlpm; | |
3270 | dasd_schedule_device_bh(device); | |
3271 | } | |
3272 | if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { | |
3273 | DBF_DEV_EVENT(DBF_WARNING, device, "%s", | |
3274 | "Pathgroup re-established\n"); | |
3275 | if (device->discipline->kick_validate) | |
3276 | device->discipline->kick_validate(device); | |
3277 | } | |
3278 | } | |
3279 | dasd_put_device(device); | |
3280 | } | |
3281 | EXPORT_SYMBOL_GPL(dasd_generic_path_event); | |
3282 | ||
3283 | int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) | |
3284 | { | |
3285 | if (!device->path_data.opm && lpm) { | |
3286 | device->path_data.opm = lpm; | |
3287 | dasd_generic_path_operational(device); | |
3288 | } else | |
3289 | device->path_data.opm |= lpm; | |
3290 | return 0; | |
3291 | } | |
3292 | EXPORT_SYMBOL_GPL(dasd_generic_verify_path); | |
3293 | ||
3294 | ||
3295 | int dasd_generic_pm_freeze(struct ccw_device *cdev) | |
3296 | { | |
3297 | struct dasd_ccw_req *cqr, *n; | |
3298 | int rc; | |
3299 | struct list_head freeze_queue; | |
3300 | struct dasd_device *device = dasd_device_from_cdev(cdev); | |
3301 | ||
3302 | if (IS_ERR(device)) | |
3303 | return PTR_ERR(device); | |
3304 | ||
3305 | /* mark device as suspended */ | |
3306 | set_bit(DASD_FLAG_SUSPENDED, &device->flags); | |
3307 | ||
3308 | if (device->discipline->freeze) | |
3309 | rc = device->discipline->freeze(device); | |
3310 | ||
3311 | /* disallow new I/O */ | |
3312 | dasd_device_set_stop_bits(device, DASD_STOPPED_PM); | |
3313 | /* clear active requests */ | |
3314 | INIT_LIST_HEAD(&freeze_queue); | |
3315 | spin_lock_irq(get_ccwdev_lock(cdev)); | |
3316 | rc = 0; | |
3317 | list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { | |
3318 | /* Check status and move request to flush_queue */ | |
3319 | if (cqr->status == DASD_CQR_IN_IO) { | |
3320 | rc = device->discipline->term_IO(cqr); | |
3321 | if (rc) { | |
3322 | /* unable to terminate requeust */ | |
3323 | dev_err(&device->cdev->dev, | |
3324 | "Unable to terminate request %p " | |
3325 | "on suspend\n", cqr); | |
3326 | spin_unlock_irq(get_ccwdev_lock(cdev)); | |
3327 | dasd_put_device(device); | |
3328 | return rc; | |
3329 | } | |
3330 | } | |
3331 | list_move_tail(&cqr->devlist, &freeze_queue); | |
3332 | } | |
3333 | ||
3334 | spin_unlock_irq(get_ccwdev_lock(cdev)); | |
3335 | ||
3336 | list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { | |
3337 | wait_event(dasd_flush_wq, | |
3338 | (cqr->status != DASD_CQR_CLEAR_PENDING)); | |
3339 | if (cqr->status == DASD_CQR_CLEARED) | |
3340 | cqr->status = DASD_CQR_QUEUED; | |
3341 | } | |
3342 | /* move freeze_queue to start of the ccw_queue */ | |
3343 | spin_lock_irq(get_ccwdev_lock(cdev)); | |
3344 | list_splice_tail(&freeze_queue, &device->ccw_queue); | |
3345 | spin_unlock_irq(get_ccwdev_lock(cdev)); | |
3346 | ||
3347 | dasd_put_device(device); | |
3348 | return rc; | |
3349 | } | |
3350 | EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); | |
3351 | ||
3352 | int dasd_generic_restore_device(struct ccw_device *cdev) | |
3353 | { | |
3354 | struct dasd_device *device = dasd_device_from_cdev(cdev); | |
3355 | int rc = 0; | |
3356 | ||
3357 | if (IS_ERR(device)) | |
3358 | return PTR_ERR(device); | |
3359 | ||
3360 | /* allow new IO again */ | |
3361 | dasd_device_remove_stop_bits(device, | |
3362 | (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); | |
3363 | ||
3364 | dasd_schedule_device_bh(device); | |
3365 | ||
3366 | /* | |
3367 | * call discipline restore function | |
3368 | * if device is stopped do nothing e.g. for disconnected devices | |
3369 | */ | |
3370 | if (device->discipline->restore && !(device->stopped)) | |
3371 | rc = device->discipline->restore(device); | |
3372 | if (rc || device->stopped) | |
3373 | /* | |
3374 | * if the resume failed for the DASD we put it in | |
3375 | * an UNRESUMED stop state | |
3376 | */ | |
3377 | device->stopped |= DASD_UNRESUMED_PM; | |
3378 | ||
3379 | if (device->block) | |
3380 | dasd_schedule_block_bh(device->block); | |
3381 | ||
3382 | clear_bit(DASD_FLAG_SUSPENDED, &device->flags); | |
3383 | dasd_put_device(device); | |
3384 | return 0; | |
3385 | } | |
3386 | EXPORT_SYMBOL_GPL(dasd_generic_restore_device); | |
3387 | ||
3388 | static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, | |
3389 | void *rdc_buffer, | |
3390 | int rdc_buffer_size, | |
3391 | int magic) | |
3392 | { | |
3393 | struct dasd_ccw_req *cqr; | |
3394 | struct ccw1 *ccw; | |
3395 | unsigned long *idaw; | |
3396 | ||
3397 | cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); | |
3398 | ||
3399 | if (IS_ERR(cqr)) { | |
3400 | /* internal error 13 - Allocating the RDC request failed*/ | |
3401 | dev_err(&device->cdev->dev, | |
3402 | "An error occurred in the DASD device driver, " | |
3403 | "reason=%s\n", "13"); | |
3404 | return cqr; | |
3405 | } | |
3406 | ||
3407 | ccw = cqr->cpaddr; | |
3408 | ccw->cmd_code = CCW_CMD_RDC; | |
3409 | if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { | |
3410 | idaw = (unsigned long *) (cqr->data); | |
3411 | ccw->cda = (__u32)(addr_t) idaw; | |
3412 | ccw->flags = CCW_FLAG_IDA; | |
3413 | idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); | |
3414 | } else { | |
3415 | ccw->cda = (__u32)(addr_t) rdc_buffer; | |
3416 | ccw->flags = 0; | |
3417 | } | |
3418 | ||
3419 | ccw->count = rdc_buffer_size; | |
3420 | cqr->startdev = device; | |
3421 | cqr->memdev = device; | |
3422 | cqr->expires = 10*HZ; | |
3423 | cqr->retries = 256; | |
3424 | cqr->buildclk = get_clock(); | |
3425 | cqr->status = DASD_CQR_FILLED; | |
3426 | return cqr; | |
3427 | } | |
3428 | ||
3429 | ||
3430 | int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, | |
3431 | void *rdc_buffer, int rdc_buffer_size) | |
3432 | { | |
3433 | int ret; | |
3434 | struct dasd_ccw_req *cqr; | |
3435 | ||
3436 | cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, | |
3437 | magic); | |
3438 | if (IS_ERR(cqr)) | |
3439 | return PTR_ERR(cqr); | |
3440 | ||
3441 | ret = dasd_sleep_on(cqr); | |
3442 | dasd_sfree_request(cqr, cqr->memdev); | |
3443 | return ret; | |
3444 | } | |
3445 | EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); | |
3446 | ||
3447 | /* | |
3448 | * In command mode and transport mode we need to look for sense | |
3449 | * data in different places. The sense data itself is allways | |
3450 | * an array of 32 bytes, so we can unify the sense data access | |
3451 | * for both modes. | |
3452 | */ | |
3453 | char *dasd_get_sense(struct irb *irb) | |
3454 | { | |
3455 | struct tsb *tsb = NULL; | |
3456 | char *sense = NULL; | |
3457 | ||
3458 | if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { | |
3459 | if (irb->scsw.tm.tcw) | |
3460 | tsb = tcw_get_tsb((struct tcw *)(unsigned long) | |
3461 | irb->scsw.tm.tcw); | |
3462 | if (tsb && tsb->length == 64 && tsb->flags) | |
3463 | switch (tsb->flags & 0x07) { | |
3464 | case 1: /* tsa_iostat */ | |
3465 | sense = tsb->tsa.iostat.sense; | |
3466 | break; | |
3467 | case 2: /* tsa_ddpc */ | |
3468 | sense = tsb->tsa.ddpc.sense; | |
3469 | break; | |
3470 | default: | |
3471 | /* currently we don't use interrogate data */ | |
3472 | break; | |
3473 | } | |
3474 | } else if (irb->esw.esw0.erw.cons) { | |
3475 | sense = irb->ecw; | |
3476 | } | |
3477 | return sense; | |
3478 | } | |
3479 | EXPORT_SYMBOL_GPL(dasd_get_sense); | |
3480 | ||
3481 | static inline int _wait_for_empty_queues(struct dasd_device *device) | |
3482 | { | |
3483 | if (device->block) | |
3484 | return list_empty(&device->ccw_queue) && | |
3485 | list_empty(&device->block->ccw_queue); | |
3486 | else | |
3487 | return list_empty(&device->ccw_queue); | |
3488 | } | |
3489 | ||
3490 | void dasd_generic_shutdown(struct ccw_device *cdev) | |
3491 | { | |
3492 | struct dasd_device *device; | |
3493 | ||
3494 | device = dasd_device_from_cdev(cdev); | |
3495 | if (IS_ERR(device)) | |
3496 | return; | |
3497 | ||
3498 | if (device->block) | |
3499 | dasd_schedule_block_bh(device->block); | |
3500 | ||
3501 | dasd_schedule_device_bh(device); | |
3502 | ||
3503 | wait_event(shutdown_waitq, _wait_for_empty_queues(device)); | |
3504 | } | |
3505 | EXPORT_SYMBOL_GPL(dasd_generic_shutdown); | |
3506 | ||
3507 | static int __init dasd_init(void) | |
3508 | { | |
3509 | int rc; | |
3510 | ||
3511 | init_waitqueue_head(&dasd_init_waitq); | |
3512 | init_waitqueue_head(&dasd_flush_wq); | |
3513 | init_waitqueue_head(&generic_waitq); | |
3514 | init_waitqueue_head(&shutdown_waitq); | |
3515 | ||
3516 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ | |
3517 | dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); | |
3518 | if (dasd_debug_area == NULL) { | |
3519 | rc = -ENOMEM; | |
3520 | goto failed; | |
3521 | } | |
3522 | debug_register_view(dasd_debug_area, &debug_sprintf_view); | |
3523 | debug_set_level(dasd_debug_area, DBF_WARNING); | |
3524 | ||
3525 | DBF_EVENT(DBF_EMERG, "%s", "debug area created"); | |
3526 | ||
3527 | dasd_diag_discipline_pointer = NULL; | |
3528 | ||
3529 | dasd_statistics_createroot(); | |
3530 | ||
3531 | rc = dasd_devmap_init(); | |
3532 | if (rc) | |
3533 | goto failed; | |
3534 | rc = dasd_gendisk_init(); | |
3535 | if (rc) | |
3536 | goto failed; | |
3537 | rc = dasd_parse(); | |
3538 | if (rc) | |
3539 | goto failed; | |
3540 | rc = dasd_eer_init(); | |
3541 | if (rc) | |
3542 | goto failed; | |
3543 | #ifdef CONFIG_PROC_FS | |
3544 | rc = dasd_proc_init(); | |
3545 | if (rc) | |
3546 | goto failed; | |
3547 | #endif | |
3548 | ||
3549 | return 0; | |
3550 | failed: | |
3551 | pr_info("The DASD device driver could not be initialized\n"); | |
3552 | dasd_exit(); | |
3553 | return rc; | |
3554 | } | |
3555 | ||
3556 | module_init(dasd_init); | |
3557 | module_exit(dasd_exit); | |
3558 | ||
3559 | EXPORT_SYMBOL(dasd_debug_area); | |
3560 | EXPORT_SYMBOL(dasd_diag_discipline_pointer); | |
3561 | ||
3562 | EXPORT_SYMBOL(dasd_add_request_head); | |
3563 | EXPORT_SYMBOL(dasd_add_request_tail); | |
3564 | EXPORT_SYMBOL(dasd_cancel_req); | |
3565 | EXPORT_SYMBOL(dasd_device_clear_timer); | |
3566 | EXPORT_SYMBOL(dasd_block_clear_timer); | |
3567 | EXPORT_SYMBOL(dasd_enable_device); | |
3568 | EXPORT_SYMBOL(dasd_int_handler); | |
3569 | EXPORT_SYMBOL(dasd_kfree_request); | |
3570 | EXPORT_SYMBOL(dasd_kick_device); | |
3571 | EXPORT_SYMBOL(dasd_kmalloc_request); | |
3572 | EXPORT_SYMBOL(dasd_schedule_device_bh); | |
3573 | EXPORT_SYMBOL(dasd_schedule_block_bh); | |
3574 | EXPORT_SYMBOL(dasd_set_target_state); | |
3575 | EXPORT_SYMBOL(dasd_device_set_timer); | |
3576 | EXPORT_SYMBOL(dasd_block_set_timer); | |
3577 | EXPORT_SYMBOL(dasd_sfree_request); | |
3578 | EXPORT_SYMBOL(dasd_sleep_on); | |
3579 | EXPORT_SYMBOL(dasd_sleep_on_immediatly); | |
3580 | EXPORT_SYMBOL(dasd_sleep_on_interruptible); | |
3581 | EXPORT_SYMBOL(dasd_smalloc_request); | |
3582 | EXPORT_SYMBOL(dasd_start_IO); | |
3583 | EXPORT_SYMBOL(dasd_term_IO); | |
3584 | ||
3585 | EXPORT_SYMBOL_GPL(dasd_generic_probe); | |
3586 | EXPORT_SYMBOL_GPL(dasd_generic_remove); | |
3587 | EXPORT_SYMBOL_GPL(dasd_generic_notify); | |
3588 | EXPORT_SYMBOL_GPL(dasd_generic_set_online); | |
3589 | EXPORT_SYMBOL_GPL(dasd_generic_set_offline); | |
3590 | EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); | |
3591 | EXPORT_SYMBOL_GPL(dasd_flush_device_queue); | |
3592 | EXPORT_SYMBOL_GPL(dasd_alloc_block); | |
3593 | EXPORT_SYMBOL_GPL(dasd_free_block); |