]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * drivers/s390/char/tape_block.c | |
3 | * block device frontend for tape device driver | |
4 | * | |
5 | * S390 and zSeries version | |
6 | * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | |
8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | |
9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | |
10 | * Stefan Bader <shbader@de.ibm.com> | |
11 | */ | |
12 | ||
13 | #include <linux/fs.h> | |
1da177e4 LT |
14 | #include <linux/module.h> |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/buffer_head.h> | |
18 | ||
19 | #include <asm/debug.h> | |
20 | ||
21 | #define TAPE_DBF_AREA tape_core_dbf | |
22 | ||
23 | #include "tape.h" | |
24 | ||
25 | #define PRINTK_HEADER "TAPE_BLOCK: " | |
26 | ||
27 | #define TAPEBLOCK_MAX_SEC 100 | |
28 | #define TAPEBLOCK_MIN_REQUEUE 3 | |
29 | ||
30 | /* | |
31 | * 2003/11/25 Stefan Bader <shbader@de.ibm.com> | |
32 | * | |
33 | * In 2.5/2.6 the block device request function is very likely to be called | |
34 | * with disabled interrupts (e.g. generic_unplug_device). So the driver can't | |
35 | * just call any function that tries to allocate CCW requests from that con- | |
36 | * text since it might sleep. There are two choices to work around this: | |
37 | * a) do not allocate with kmalloc but use its own memory pool | |
38 | * b) take requests from the queue outside that context, knowing that | |
39 | * allocation might sleep | |
40 | */ | |
41 | ||
42 | /* | |
43 | * file operation structure for tape block frontend | |
44 | */ | |
45 | static int tapeblock_open(struct inode *, struct file *); | |
46 | static int tapeblock_release(struct inode *, struct file *); | |
47 | static int tapeblock_ioctl(struct inode *, struct file *, unsigned int, | |
48 | unsigned long); | |
49 | static int tapeblock_medium_changed(struct gendisk *); | |
50 | static int tapeblock_revalidate_disk(struct gendisk *); | |
51 | ||
52 | static struct block_device_operations tapeblock_fops = { | |
53 | .owner = THIS_MODULE, | |
54 | .open = tapeblock_open, | |
55 | .release = tapeblock_release, | |
56 | .ioctl = tapeblock_ioctl, | |
57 | .media_changed = tapeblock_medium_changed, | |
58 | .revalidate_disk = tapeblock_revalidate_disk, | |
59 | }; | |
60 | ||
61 | static int tapeblock_major = 0; | |
62 | ||
63 | static void | |
64 | tapeblock_trigger_requeue(struct tape_device *device) | |
65 | { | |
66 | /* Protect against rescheduling. */ | |
973bd993 | 67 | if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0) |
1da177e4 LT |
68 | return; |
69 | schedule_work(&device->blk_data.requeue_task); | |
70 | } | |
71 | ||
72 | /* | |
73 | * Post finished request. | |
74 | */ | |
75 | static inline void | |
76 | tapeblock_end_request(struct request *req, int uptodate) | |
77 | { | |
78 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) | |
79 | BUG(); | |
8ffdc655 | 80 | end_that_request_last(req, uptodate); |
1da177e4 LT |
81 | } |
82 | ||
83 | static void | |
84 | __tapeblock_end_request(struct tape_request *ccw_req, void *data) | |
85 | { | |
86 | struct tape_device *device; | |
87 | struct request *req; | |
88 | ||
89 | DBF_LH(6, "__tapeblock_end_request()\n"); | |
90 | ||
91 | device = ccw_req->device; | |
92 | req = (struct request *) data; | |
93 | tapeblock_end_request(req, ccw_req->rc == 0); | |
94 | if (ccw_req->rc == 0) | |
95 | /* Update position. */ | |
96 | device->blk_data.block_position = | |
97 | (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; | |
98 | else | |
99 | /* We lost the position information due to an error. */ | |
100 | device->blk_data.block_position = -1; | |
101 | device->discipline->free_bread(ccw_req); | |
102 | if (!list_empty(&device->req_queue) || | |
103 | elv_next_request(device->blk_data.request_queue)) | |
104 | tapeblock_trigger_requeue(device); | |
105 | } | |
106 | ||
107 | /* | |
108 | * Feed the tape device CCW queue with requests supplied in a list. | |
109 | */ | |
110 | static inline int | |
111 | tapeblock_start_request(struct tape_device *device, struct request *req) | |
112 | { | |
113 | struct tape_request * ccw_req; | |
114 | int rc; | |
115 | ||
116 | DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req); | |
117 | ||
118 | ccw_req = device->discipline->bread(device, req); | |
119 | if (IS_ERR(ccw_req)) { | |
120 | DBF_EVENT(1, "TBLOCK: bread failed\n"); | |
121 | tapeblock_end_request(req, 0); | |
122 | return PTR_ERR(ccw_req); | |
123 | } | |
124 | ccw_req->callback = __tapeblock_end_request; | |
125 | ccw_req->callback_data = (void *) req; | |
126 | ccw_req->retries = TAPEBLOCK_RETRIES; | |
127 | ||
128 | rc = tape_do_io_async(device, ccw_req); | |
129 | if (rc) { | |
130 | /* | |
131 | * Start/enqueueing failed. No retries in | |
132 | * this case. | |
133 | */ | |
134 | tapeblock_end_request(req, 0); | |
135 | device->discipline->free_bread(ccw_req); | |
136 | } | |
137 | ||
138 | return rc; | |
139 | } | |
140 | ||
141 | /* | |
142 | * Move requests from the block device request queue to the tape device ccw | |
143 | * queue. | |
144 | */ | |
145 | static void | |
146 | tapeblock_requeue(void *data) { | |
147 | struct tape_device * device; | |
148 | request_queue_t * queue; | |
149 | int nr_queued; | |
150 | struct request * req; | |
151 | struct list_head * l; | |
152 | int rc; | |
153 | ||
154 | device = (struct tape_device *) data; | |
155 | if (!device) | |
156 | return; | |
157 | ||
158 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | |
159 | queue = device->blk_data.request_queue; | |
160 | ||
161 | /* Count number of requests on ccw queue. */ | |
162 | nr_queued = 0; | |
163 | list_for_each(l, &device->req_queue) | |
164 | nr_queued++; | |
165 | spin_unlock(get_ccwdev_lock(device->cdev)); | |
166 | ||
167 | spin_lock(&device->blk_data.request_queue_lock); | |
168 | while ( | |
169 | !blk_queue_plugged(queue) && | |
170 | elv_next_request(queue) && | |
171 | nr_queued < TAPEBLOCK_MIN_REQUEUE | |
172 | ) { | |
173 | req = elv_next_request(queue); | |
174 | if (rq_data_dir(req) == WRITE) { | |
175 | DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); | |
176 | blkdev_dequeue_request(req); | |
177 | tapeblock_end_request(req, 0); | |
178 | continue; | |
179 | } | |
180 | spin_unlock_irq(&device->blk_data.request_queue_lock); | |
181 | rc = tapeblock_start_request(device, req); | |
182 | spin_lock_irq(&device->blk_data.request_queue_lock); | |
183 | blkdev_dequeue_request(req); | |
184 | nr_queued++; | |
185 | } | |
186 | spin_unlock_irq(&device->blk_data.request_queue_lock); | |
187 | atomic_set(&device->blk_data.requeue_scheduled, 0); | |
188 | } | |
189 | ||
190 | /* | |
191 | * Tape request queue function. Called from ll_rw_blk.c | |
192 | */ | |
193 | static void | |
194 | tapeblock_request_fn(request_queue_t *queue) | |
195 | { | |
196 | struct tape_device *device; | |
197 | ||
198 | device = (struct tape_device *) queue->queuedata; | |
199 | DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device); | |
3a8dc893 | 200 | BUG_ON(device == NULL); |
1da177e4 LT |
201 | tapeblock_trigger_requeue(device); |
202 | } | |
203 | ||
204 | /* | |
205 | * This function is called for every new tapedevice | |
206 | */ | |
207 | int | |
208 | tapeblock_setup_device(struct tape_device * device) | |
209 | { | |
210 | struct tape_blk_data * blkdat; | |
211 | struct gendisk * disk; | |
212 | int rc; | |
213 | ||
214 | blkdat = &device->blk_data; | |
215 | spin_lock_init(&blkdat->request_queue_lock); | |
216 | atomic_set(&blkdat->requeue_scheduled, 0); | |
217 | ||
218 | blkdat->request_queue = blk_init_queue( | |
219 | tapeblock_request_fn, | |
220 | &blkdat->request_queue_lock | |
221 | ); | |
222 | if (!blkdat->request_queue) | |
223 | return -ENOMEM; | |
224 | ||
225 | elevator_exit(blkdat->request_queue->elevator); | |
226 | rc = elevator_init(blkdat->request_queue, "noop"); | |
227 | if (rc) | |
228 | goto cleanup_queue; | |
229 | ||
230 | blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); | |
231 | blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); | |
232 | blk_queue_max_phys_segments(blkdat->request_queue, -1L); | |
233 | blk_queue_max_hw_segments(blkdat->request_queue, -1L); | |
234 | blk_queue_max_segment_size(blkdat->request_queue, -1L); | |
235 | blk_queue_segment_boundary(blkdat->request_queue, -1L); | |
236 | ||
237 | disk = alloc_disk(1); | |
238 | if (!disk) { | |
239 | rc = -ENOMEM; | |
240 | goto cleanup_queue; | |
241 | } | |
242 | ||
243 | disk->major = tapeblock_major; | |
244 | disk->first_minor = device->first_minor; | |
245 | disk->fops = &tapeblock_fops; | |
246 | disk->private_data = tape_get_device_reference(device); | |
247 | disk->queue = blkdat->request_queue; | |
248 | set_capacity(disk, 0); | |
249 | sprintf(disk->disk_name, "btibm%d", | |
250 | device->first_minor / TAPE_MINORS_PER_DEV); | |
251 | ||
252 | blkdat->disk = disk; | |
253 | blkdat->medium_changed = 1; | |
254 | blkdat->request_queue->queuedata = tape_get_device_reference(device); | |
255 | ||
256 | add_disk(disk); | |
257 | ||
258 | INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, | |
259 | tape_get_device_reference(device)); | |
260 | ||
261 | return 0; | |
262 | ||
263 | cleanup_queue: | |
264 | blk_cleanup_queue(blkdat->request_queue); | |
265 | blkdat->request_queue = NULL; | |
266 | ||
267 | return rc; | |
268 | } | |
269 | ||
270 | void | |
271 | tapeblock_cleanup_device(struct tape_device *device) | |
272 | { | |
273 | flush_scheduled_work(); | |
274 | device->blk_data.requeue_task.data = tape_put_device(device); | |
275 | ||
276 | if (!device->blk_data.disk) { | |
277 | PRINT_ERR("(%s): No gendisk to clean up!\n", | |
278 | device->cdev->dev.bus_id); | |
279 | goto cleanup_queue; | |
280 | } | |
281 | ||
282 | del_gendisk(device->blk_data.disk); | |
283 | device->blk_data.disk->private_data = | |
284 | tape_put_device(device->blk_data.disk->private_data); | |
285 | put_disk(device->blk_data.disk); | |
286 | ||
287 | device->blk_data.disk = NULL; | |
288 | cleanup_queue: | |
289 | device->blk_data.request_queue->queuedata = tape_put_device(device); | |
290 | ||
291 | blk_cleanup_queue(device->blk_data.request_queue); | |
292 | device->blk_data.request_queue = NULL; | |
293 | } | |
294 | ||
295 | /* | |
296 | * Detect number of blocks of the tape. | |
297 | * FIXME: can we extent this to detect the blocks size as well ? | |
298 | */ | |
299 | static int | |
300 | tapeblock_revalidate_disk(struct gendisk *disk) | |
301 | { | |
302 | struct tape_device * device; | |
303 | unsigned int nr_of_blks; | |
304 | int rc; | |
305 | ||
306 | device = (struct tape_device *) disk->private_data; | |
3a8dc893 | 307 | BUG_ON(!device); |
1da177e4 LT |
308 | |
309 | if (!device->blk_data.medium_changed) | |
310 | return 0; | |
311 | ||
312 | PRINT_INFO("Detecting media size...\n"); | |
313 | rc = tape_mtop(device, MTFSFM, 1); | |
314 | if (rc) | |
315 | return rc; | |
316 | ||
317 | rc = tape_mtop(device, MTTELL, 1); | |
318 | if (rc < 0) | |
319 | return rc; | |
320 | ||
321 | DBF_LH(3, "Image file ends at %d\n", rc); | |
322 | nr_of_blks = rc; | |
323 | ||
324 | /* This will fail for the first file. Catch the error by checking the | |
325 | * position. */ | |
326 | tape_mtop(device, MTBSF, 1); | |
327 | ||
328 | rc = tape_mtop(device, MTTELL, 1); | |
329 | if (rc < 0) | |
330 | return rc; | |
331 | ||
332 | if (rc > nr_of_blks) | |
333 | return -EINVAL; | |
334 | ||
335 | DBF_LH(3, "Image file starts at %d\n", rc); | |
336 | device->bof = rc; | |
337 | nr_of_blks -= rc; | |
338 | ||
339 | PRINT_INFO("Found %i blocks on media\n", nr_of_blks); | |
340 | set_capacity(device->blk_data.disk, | |
341 | nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); | |
342 | ||
343 | device->blk_data.block_position = 0; | |
344 | device->blk_data.medium_changed = 0; | |
345 | return 0; | |
346 | } | |
347 | ||
348 | static int | |
349 | tapeblock_medium_changed(struct gendisk *disk) | |
350 | { | |
351 | struct tape_device *device; | |
352 | ||
353 | device = (struct tape_device *) disk->private_data; | |
354 | DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n", | |
355 | device, device->blk_data.medium_changed); | |
356 | ||
357 | return device->blk_data.medium_changed; | |
358 | } | |
359 | ||
360 | /* | |
361 | * Block frontend tape device open function. | |
362 | */ | |
363 | static int | |
364 | tapeblock_open(struct inode *inode, struct file *filp) | |
365 | { | |
366 | struct gendisk * disk; | |
367 | struct tape_device * device; | |
368 | int rc; | |
369 | ||
370 | disk = inode->i_bdev->bd_disk; | |
371 | device = tape_get_device_reference(disk->private_data); | |
372 | ||
373 | if (device->required_tapemarks) { | |
374 | DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); | |
375 | PRINT_ERR("TBLOCK: Refusing to open tape with missing" | |
376 | " end of file marks.\n"); | |
377 | rc = -EPERM; | |
378 | goto put_device; | |
379 | } | |
380 | ||
381 | rc = tape_open(device); | |
382 | if (rc) | |
383 | goto put_device; | |
384 | ||
385 | rc = tapeblock_revalidate_disk(disk); | |
386 | if (rc) | |
387 | goto release; | |
388 | ||
389 | /* | |
390 | * Note: The reference to <device> is hold until the release function | |
391 | * is called. | |
392 | */ | |
393 | tape_state_set(device, TS_BLKUSE); | |
394 | return 0; | |
395 | ||
396 | release: | |
397 | tape_release(device); | |
398 | put_device: | |
399 | tape_put_device(device); | |
400 | return rc; | |
401 | } | |
402 | ||
403 | /* | |
404 | * Block frontend tape device release function. | |
405 | * | |
406 | * Note: One reference to the tape device was made by the open function. So | |
407 | * we just get the pointer here and release the reference. | |
408 | */ | |
409 | static int | |
410 | tapeblock_release(struct inode *inode, struct file *filp) | |
411 | { | |
412 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
413 | struct tape_device *device = disk->private_data; | |
414 | ||
415 | tape_state_set(device, TS_IN_USE); | |
416 | tape_release(device); | |
417 | tape_put_device(device); | |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
422 | /* | |
423 | * Support of some generic block device IOCTLs. | |
424 | */ | |
425 | static int | |
426 | tapeblock_ioctl( | |
427 | struct inode * inode, | |
428 | struct file * file, | |
429 | unsigned int command, | |
430 | unsigned long arg | |
431 | ) { | |
432 | int rc; | |
433 | int minor; | |
f976069a PO |
434 | struct gendisk *disk; |
435 | struct tape_device *device; | |
1da177e4 LT |
436 | |
437 | rc = 0; | |
438 | disk = inode->i_bdev->bd_disk; | |
3a8dc893 | 439 | BUG_ON(!disk); |
1da177e4 | 440 | device = disk->private_data; |
3a8dc893 | 441 | BUG_ON(!device); |
1da177e4 LT |
442 | minor = iminor(inode); |
443 | ||
444 | DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command); | |
445 | DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor); | |
446 | ||
447 | switch (command) { | |
448 | /* Refuse some IOCTL calls without complaining (mount). */ | |
449 | case 0x5310: /* CDROMMULTISESSION */ | |
450 | rc = -EINVAL; | |
451 | break; | |
452 | default: | |
453 | PRINT_WARN("invalid ioctl 0x%x\n", command); | |
454 | rc = -EINVAL; | |
455 | } | |
456 | ||
457 | return rc; | |
458 | } | |
459 | ||
460 | /* | |
461 | * Initialize block device frontend. | |
462 | */ | |
463 | int | |
464 | tapeblock_init(void) | |
465 | { | |
466 | int rc; | |
467 | ||
468 | /* Register the tape major number to the kernel */ | |
469 | rc = register_blkdev(tapeblock_major, "tBLK"); | |
470 | if (rc < 0) | |
471 | return rc; | |
472 | ||
473 | if (tapeblock_major == 0) | |
474 | tapeblock_major = rc; | |
475 | PRINT_INFO("tape gets major %d for block device\n", tapeblock_major); | |
476 | return 0; | |
477 | } | |
478 | ||
479 | /* | |
480 | * Deregister major for block device frontend | |
481 | */ | |
482 | void | |
483 | tapeblock_exit(void) | |
484 | { | |
485 | unregister_blkdev(tapeblock_major, "tBLK"); | |
486 | } |