]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * drivers/s390/char/tape_block.c | |
3 | * block device frontend for tape device driver | |
4 | * | |
5 | * S390 and zSeries version | |
6 | * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | |
8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | |
9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | |
10 | * Stefan Bader <shbader@de.ibm.com> | |
11 | */ | |
12 | ||
13 | #include <linux/fs.h> | |
14 | #include <linux/config.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/blkdev.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/buffer_head.h> | |
19 | ||
20 | #include <asm/debug.h> | |
21 | ||
22 | #define TAPE_DBF_AREA tape_core_dbf | |
23 | ||
24 | #include "tape.h" | |
25 | ||
26 | #define PRINTK_HEADER "TAPE_BLOCK: " | |
27 | ||
28 | #define TAPEBLOCK_MAX_SEC 100 | |
29 | #define TAPEBLOCK_MIN_REQUEUE 3 | |
30 | ||
31 | /* | |
32 | * 2003/11/25 Stefan Bader <shbader@de.ibm.com> | |
33 | * | |
34 | * In 2.5/2.6 the block device request function is very likely to be called | |
35 | * with disabled interrupts (e.g. generic_unplug_device). So the driver can't | |
36 | * just call any function that tries to allocate CCW requests from that con- | |
37 | * text since it might sleep. There are two choices to work around this: | |
38 | * a) do not allocate with kmalloc but use its own memory pool | |
39 | * b) take requests from the queue outside that context, knowing that | |
40 | * allocation might sleep | |
41 | */ | |
42 | ||
43 | /* | |
44 | * file operation structure for tape block frontend | |
45 | */ | |
46 | static int tapeblock_open(struct inode *, struct file *); | |
47 | static int tapeblock_release(struct inode *, struct file *); | |
48 | static int tapeblock_ioctl(struct inode *, struct file *, unsigned int, | |
49 | unsigned long); | |
50 | static int tapeblock_medium_changed(struct gendisk *); | |
51 | static int tapeblock_revalidate_disk(struct gendisk *); | |
52 | ||
53 | static struct block_device_operations tapeblock_fops = { | |
54 | .owner = THIS_MODULE, | |
55 | .open = tapeblock_open, | |
56 | .release = tapeblock_release, | |
57 | .ioctl = tapeblock_ioctl, | |
58 | .media_changed = tapeblock_medium_changed, | |
59 | .revalidate_disk = tapeblock_revalidate_disk, | |
60 | }; | |
61 | ||
62 | static int tapeblock_major = 0; | |
63 | ||
64 | static void | |
65 | tapeblock_trigger_requeue(struct tape_device *device) | |
66 | { | |
67 | /* Protect against rescheduling. */ | |
68 | if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) | |
69 | return; | |
70 | schedule_work(&device->blk_data.requeue_task); | |
71 | } | |
72 | ||
73 | /* | |
74 | * Post finished request. | |
75 | */ | |
76 | static inline void | |
77 | tapeblock_end_request(struct request *req, int uptodate) | |
78 | { | |
79 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) | |
80 | BUG(); | |
81 | end_that_request_last(req); | |
82 | } | |
83 | ||
84 | static void | |
85 | __tapeblock_end_request(struct tape_request *ccw_req, void *data) | |
86 | { | |
87 | struct tape_device *device; | |
88 | struct request *req; | |
89 | ||
90 | DBF_LH(6, "__tapeblock_end_request()\n"); | |
91 | ||
92 | device = ccw_req->device; | |
93 | req = (struct request *) data; | |
94 | tapeblock_end_request(req, ccw_req->rc == 0); | |
95 | if (ccw_req->rc == 0) | |
96 | /* Update position. */ | |
97 | device->blk_data.block_position = | |
98 | (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; | |
99 | else | |
100 | /* We lost the position information due to an error. */ | |
101 | device->blk_data.block_position = -1; | |
102 | device->discipline->free_bread(ccw_req); | |
103 | if (!list_empty(&device->req_queue) || | |
104 | elv_next_request(device->blk_data.request_queue)) | |
105 | tapeblock_trigger_requeue(device); | |
106 | } | |
107 | ||
108 | /* | |
109 | * Feed the tape device CCW queue with requests supplied in a list. | |
110 | */ | |
111 | static inline int | |
112 | tapeblock_start_request(struct tape_device *device, struct request *req) | |
113 | { | |
114 | struct tape_request * ccw_req; | |
115 | int rc; | |
116 | ||
117 | DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req); | |
118 | ||
119 | ccw_req = device->discipline->bread(device, req); | |
120 | if (IS_ERR(ccw_req)) { | |
121 | DBF_EVENT(1, "TBLOCK: bread failed\n"); | |
122 | tapeblock_end_request(req, 0); | |
123 | return PTR_ERR(ccw_req); | |
124 | } | |
125 | ccw_req->callback = __tapeblock_end_request; | |
126 | ccw_req->callback_data = (void *) req; | |
127 | ccw_req->retries = TAPEBLOCK_RETRIES; | |
128 | ||
129 | rc = tape_do_io_async(device, ccw_req); | |
130 | if (rc) { | |
131 | /* | |
132 | * Start/enqueueing failed. No retries in | |
133 | * this case. | |
134 | */ | |
135 | tapeblock_end_request(req, 0); | |
136 | device->discipline->free_bread(ccw_req); | |
137 | } | |
138 | ||
139 | return rc; | |
140 | } | |
141 | ||
142 | /* | |
143 | * Move requests from the block device request queue to the tape device ccw | |
144 | * queue. | |
145 | */ | |
146 | static void | |
147 | tapeblock_requeue(void *data) { | |
148 | struct tape_device * device; | |
149 | request_queue_t * queue; | |
150 | int nr_queued; | |
151 | struct request * req; | |
152 | struct list_head * l; | |
153 | int rc; | |
154 | ||
155 | device = (struct tape_device *) data; | |
156 | if (!device) | |
157 | return; | |
158 | ||
159 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | |
160 | queue = device->blk_data.request_queue; | |
161 | ||
162 | /* Count number of requests on ccw queue. */ | |
163 | nr_queued = 0; | |
164 | list_for_each(l, &device->req_queue) | |
165 | nr_queued++; | |
166 | spin_unlock(get_ccwdev_lock(device->cdev)); | |
167 | ||
168 | spin_lock(&device->blk_data.request_queue_lock); | |
169 | while ( | |
170 | !blk_queue_plugged(queue) && | |
171 | elv_next_request(queue) && | |
172 | nr_queued < TAPEBLOCK_MIN_REQUEUE | |
173 | ) { | |
174 | req = elv_next_request(queue); | |
175 | if (rq_data_dir(req) == WRITE) { | |
176 | DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); | |
177 | blkdev_dequeue_request(req); | |
178 | tapeblock_end_request(req, 0); | |
179 | continue; | |
180 | } | |
181 | spin_unlock_irq(&device->blk_data.request_queue_lock); | |
182 | rc = tapeblock_start_request(device, req); | |
183 | spin_lock_irq(&device->blk_data.request_queue_lock); | |
184 | blkdev_dequeue_request(req); | |
185 | nr_queued++; | |
186 | } | |
187 | spin_unlock_irq(&device->blk_data.request_queue_lock); | |
188 | atomic_set(&device->blk_data.requeue_scheduled, 0); | |
189 | } | |
190 | ||
191 | /* | |
192 | * Tape request queue function. Called from ll_rw_blk.c | |
193 | */ | |
194 | static void | |
195 | tapeblock_request_fn(request_queue_t *queue) | |
196 | { | |
197 | struct tape_device *device; | |
198 | ||
199 | device = (struct tape_device *) queue->queuedata; | |
200 | DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device); | |
201 | if (device == NULL) | |
202 | BUG(); | |
203 | ||
204 | tapeblock_trigger_requeue(device); | |
205 | } | |
206 | ||
207 | /* | |
208 | * This function is called for every new tapedevice | |
209 | */ | |
210 | int | |
211 | tapeblock_setup_device(struct tape_device * device) | |
212 | { | |
213 | struct tape_blk_data * blkdat; | |
214 | struct gendisk * disk; | |
215 | int rc; | |
216 | ||
217 | blkdat = &device->blk_data; | |
218 | spin_lock_init(&blkdat->request_queue_lock); | |
219 | atomic_set(&blkdat->requeue_scheduled, 0); | |
220 | ||
221 | blkdat->request_queue = blk_init_queue( | |
222 | tapeblock_request_fn, | |
223 | &blkdat->request_queue_lock | |
224 | ); | |
225 | if (!blkdat->request_queue) | |
226 | return -ENOMEM; | |
227 | ||
228 | elevator_exit(blkdat->request_queue->elevator); | |
229 | rc = elevator_init(blkdat->request_queue, "noop"); | |
230 | if (rc) | |
231 | goto cleanup_queue; | |
232 | ||
233 | blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); | |
234 | blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); | |
235 | blk_queue_max_phys_segments(blkdat->request_queue, -1L); | |
236 | blk_queue_max_hw_segments(blkdat->request_queue, -1L); | |
237 | blk_queue_max_segment_size(blkdat->request_queue, -1L); | |
238 | blk_queue_segment_boundary(blkdat->request_queue, -1L); | |
239 | ||
240 | disk = alloc_disk(1); | |
241 | if (!disk) { | |
242 | rc = -ENOMEM; | |
243 | goto cleanup_queue; | |
244 | } | |
245 | ||
246 | disk->major = tapeblock_major; | |
247 | disk->first_minor = device->first_minor; | |
248 | disk->fops = &tapeblock_fops; | |
249 | disk->private_data = tape_get_device_reference(device); | |
250 | disk->queue = blkdat->request_queue; | |
251 | set_capacity(disk, 0); | |
252 | sprintf(disk->disk_name, "btibm%d", | |
253 | device->first_minor / TAPE_MINORS_PER_DEV); | |
254 | ||
255 | blkdat->disk = disk; | |
256 | blkdat->medium_changed = 1; | |
257 | blkdat->request_queue->queuedata = tape_get_device_reference(device); | |
258 | ||
259 | add_disk(disk); | |
260 | ||
261 | INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, | |
262 | tape_get_device_reference(device)); | |
263 | ||
264 | return 0; | |
265 | ||
266 | cleanup_queue: | |
267 | blk_cleanup_queue(blkdat->request_queue); | |
268 | blkdat->request_queue = NULL; | |
269 | ||
270 | return rc; | |
271 | } | |
272 | ||
273 | void | |
274 | tapeblock_cleanup_device(struct tape_device *device) | |
275 | { | |
276 | flush_scheduled_work(); | |
277 | device->blk_data.requeue_task.data = tape_put_device(device); | |
278 | ||
279 | if (!device->blk_data.disk) { | |
280 | PRINT_ERR("(%s): No gendisk to clean up!\n", | |
281 | device->cdev->dev.bus_id); | |
282 | goto cleanup_queue; | |
283 | } | |
284 | ||
285 | del_gendisk(device->blk_data.disk); | |
286 | device->blk_data.disk->private_data = | |
287 | tape_put_device(device->blk_data.disk->private_data); | |
288 | put_disk(device->blk_data.disk); | |
289 | ||
290 | device->blk_data.disk = NULL; | |
291 | cleanup_queue: | |
292 | device->blk_data.request_queue->queuedata = tape_put_device(device); | |
293 | ||
294 | blk_cleanup_queue(device->blk_data.request_queue); | |
295 | device->blk_data.request_queue = NULL; | |
296 | } | |
297 | ||
298 | /* | |
299 | * Detect number of blocks of the tape. | |
300 | * FIXME: can we extent this to detect the blocks size as well ? | |
301 | */ | |
302 | static int | |
303 | tapeblock_revalidate_disk(struct gendisk *disk) | |
304 | { | |
305 | struct tape_device * device; | |
306 | unsigned int nr_of_blks; | |
307 | int rc; | |
308 | ||
309 | device = (struct tape_device *) disk->private_data; | |
310 | if (!device) | |
311 | BUG(); | |
312 | ||
313 | if (!device->blk_data.medium_changed) | |
314 | return 0; | |
315 | ||
316 | PRINT_INFO("Detecting media size...\n"); | |
317 | rc = tape_mtop(device, MTFSFM, 1); | |
318 | if (rc) | |
319 | return rc; | |
320 | ||
321 | rc = tape_mtop(device, MTTELL, 1); | |
322 | if (rc < 0) | |
323 | return rc; | |
324 | ||
325 | DBF_LH(3, "Image file ends at %d\n", rc); | |
326 | nr_of_blks = rc; | |
327 | ||
328 | /* This will fail for the first file. Catch the error by checking the | |
329 | * position. */ | |
330 | tape_mtop(device, MTBSF, 1); | |
331 | ||
332 | rc = tape_mtop(device, MTTELL, 1); | |
333 | if (rc < 0) | |
334 | return rc; | |
335 | ||
336 | if (rc > nr_of_blks) | |
337 | return -EINVAL; | |
338 | ||
339 | DBF_LH(3, "Image file starts at %d\n", rc); | |
340 | device->bof = rc; | |
341 | nr_of_blks -= rc; | |
342 | ||
343 | PRINT_INFO("Found %i blocks on media\n", nr_of_blks); | |
344 | set_capacity(device->blk_data.disk, | |
345 | nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); | |
346 | ||
347 | device->blk_data.block_position = 0; | |
348 | device->blk_data.medium_changed = 0; | |
349 | return 0; | |
350 | } | |
351 | ||
352 | static int | |
353 | tapeblock_medium_changed(struct gendisk *disk) | |
354 | { | |
355 | struct tape_device *device; | |
356 | ||
357 | device = (struct tape_device *) disk->private_data; | |
358 | DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n", | |
359 | device, device->blk_data.medium_changed); | |
360 | ||
361 | return device->blk_data.medium_changed; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Block frontend tape device open function. | |
366 | */ | |
367 | static int | |
368 | tapeblock_open(struct inode *inode, struct file *filp) | |
369 | { | |
370 | struct gendisk * disk; | |
371 | struct tape_device * device; | |
372 | int rc; | |
373 | ||
374 | disk = inode->i_bdev->bd_disk; | |
375 | device = tape_get_device_reference(disk->private_data); | |
376 | ||
377 | if (device->required_tapemarks) { | |
378 | DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); | |
379 | PRINT_ERR("TBLOCK: Refusing to open tape with missing" | |
380 | " end of file marks.\n"); | |
381 | rc = -EPERM; | |
382 | goto put_device; | |
383 | } | |
384 | ||
385 | rc = tape_open(device); | |
386 | if (rc) | |
387 | goto put_device; | |
388 | ||
389 | rc = tapeblock_revalidate_disk(disk); | |
390 | if (rc) | |
391 | goto release; | |
392 | ||
393 | /* | |
394 | * Note: The reference to <device> is hold until the release function | |
395 | * is called. | |
396 | */ | |
397 | tape_state_set(device, TS_BLKUSE); | |
398 | return 0; | |
399 | ||
400 | release: | |
401 | tape_release(device); | |
402 | put_device: | |
403 | tape_put_device(device); | |
404 | return rc; | |
405 | } | |
406 | ||
407 | /* | |
408 | * Block frontend tape device release function. | |
409 | * | |
410 | * Note: One reference to the tape device was made by the open function. So | |
411 | * we just get the pointer here and release the reference. | |
412 | */ | |
413 | static int | |
414 | tapeblock_release(struct inode *inode, struct file *filp) | |
415 | { | |
416 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
417 | struct tape_device *device = disk->private_data; | |
418 | ||
419 | tape_state_set(device, TS_IN_USE); | |
420 | tape_release(device); | |
421 | tape_put_device(device); | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | /* | |
427 | * Support of some generic block device IOCTLs. | |
428 | */ | |
429 | static int | |
430 | tapeblock_ioctl( | |
431 | struct inode * inode, | |
432 | struct file * file, | |
433 | unsigned int command, | |
434 | unsigned long arg | |
435 | ) { | |
436 | int rc; | |
437 | int minor; | |
438 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
439 | struct tape_device *device = disk->private_data; | |
440 | ||
441 | rc = 0; | |
442 | disk = inode->i_bdev->bd_disk; | |
443 | if (!disk) | |
444 | BUG(); | |
445 | device = disk->private_data; | |
446 | if (!device) | |
447 | BUG(); | |
448 | minor = iminor(inode); | |
449 | ||
450 | DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command); | |
451 | DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor); | |
452 | ||
453 | switch (command) { | |
454 | /* Refuse some IOCTL calls without complaining (mount). */ | |
455 | case 0x5310: /* CDROMMULTISESSION */ | |
456 | rc = -EINVAL; | |
457 | break; | |
458 | default: | |
459 | PRINT_WARN("invalid ioctl 0x%x\n", command); | |
460 | rc = -EINVAL; | |
461 | } | |
462 | ||
463 | return rc; | |
464 | } | |
465 | ||
466 | /* | |
467 | * Initialize block device frontend. | |
468 | */ | |
469 | int | |
470 | tapeblock_init(void) | |
471 | { | |
472 | int rc; | |
473 | ||
474 | /* Register the tape major number to the kernel */ | |
475 | rc = register_blkdev(tapeblock_major, "tBLK"); | |
476 | if (rc < 0) | |
477 | return rc; | |
478 | ||
479 | if (tapeblock_major == 0) | |
480 | tapeblock_major = rc; | |
481 | PRINT_INFO("tape gets major %d for block device\n", tapeblock_major); | |
482 | return 0; | |
483 | } | |
484 | ||
485 | /* | |
486 | * Deregister major for block device frontend | |
487 | */ | |
488 | void | |
489 | tapeblock_exit(void) | |
490 | { | |
491 | unregister_blkdev(tapeblock_major, "tBLK"); | |
492 | } |