]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/ide/ide-tape.c
ide-tape: unwrap idetape_queue_pc_tail()
[mirror_ubuntu-kernels.git] / drivers / ide / ide-tape.c
1 /*
2 * IDE ATAPI streaming tape driver.
3 *
4 * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
5 * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
6 *
7 * This driver was constructed as a student project in the software laboratory
8 * of the faculty of electrical engineering in the Technion - Israel's
9 * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
10 *
11 * It is hereby placed under the terms of the GNU general public license.
12 * (See linux/COPYING).
13 *
14 * For a historical changelog see
15 * Documentation/ide/ChangeLog.ide-tape.1995-2002
16 */
17
18 #define IDETAPE_VERSION "1.20"
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/interrupt.h>
28 #include <linux/jiffies.h>
29 #include <linux/major.h>
30 #include <linux/errno.h>
31 #include <linux/genhd.h>
32 #include <linux/slab.h>
33 #include <linux/pci.h>
34 #include <linux/ide.h>
35 #include <linux/smp_lock.h>
36 #include <linux/completion.h>
37 #include <linux/bitops.h>
38 #include <linux/mutex.h>
39 #include <scsi/scsi.h>
40
41 #include <asm/byteorder.h>
42 #include <linux/irq.h>
43 #include <linux/uaccess.h>
44 #include <linux/io.h>
45 #include <asm/unaligned.h>
46 #include <linux/mtio.h>
47
48 enum {
49 /* output errors only */
50 DBG_ERR = (1 << 0),
51 /* output all sense key/asc */
52 DBG_SENSE = (1 << 1),
53 /* info regarding all chrdev-related procedures */
54 DBG_CHRDEV = (1 << 2),
55 /* all remaining procedures */
56 DBG_PROCS = (1 << 3),
57 /* buffer alloc info (pc_stack & rq_stack) */
58 DBG_PCRQ_STACK = (1 << 4),
59 };
60
61 /* define to see debug info */
62 #define IDETAPE_DEBUG_LOG 0
63
64 #if IDETAPE_DEBUG_LOG
65 #define debug_log(lvl, fmt, args...) \
66 { \
67 if (tape->debug_mask & lvl) \
68 printk(KERN_INFO "ide-tape: " fmt, ## args); \
69 }
70 #else
71 #define debug_log(lvl, fmt, args...) do {} while (0)
72 #endif
73
74 /**************************** Tunable parameters *****************************/
75
76
77 /*
78 * Pipelined mode parameters.
79 *
80 * We try to use the minimum number of stages which is enough to keep the tape
81 * constantly streaming. To accomplish that, we implement a feedback loop around
82 * the maximum number of stages:
83 *
84 * We start from MIN maximum stages (we will not even use MIN stages if we don't
85 * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
86 * pipeline is empty, until we reach the optimum value or until we reach MAX.
87 */
88 #define IDETAPE_MIN_PIPELINE_STAGES 1
89 #define IDETAPE_MAX_PIPELINE_STAGES 400
90 #define IDETAPE_INCREASE_STAGES_RATE 20
91
92 /*
93 * After each failed packet command we issue a request sense command and retry
94 * the packet command IDETAPE_MAX_PC_RETRIES times.
95 *
96 * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
97 */
98 #define IDETAPE_MAX_PC_RETRIES 3
99
100 /*
101 * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
102 * bytes. This is used for several packet commands (Not for READ/WRITE commands)
103 */
104 #define IDETAPE_PC_BUFFER_SIZE 256
105
106 /*
107 * In various places in the driver, we need to allocate storage
108 * for packet commands and requests, which will remain valid while
109 * we leave the driver to wait for an interrupt or a timeout event.
110 */
111 #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
112
113 /*
114 * Some drives (for example, Seagate STT3401A Travan) require a very long
115 * timeout, because they don't return an interrupt or clear their busy bit
116 * until after the command completes (even retension commands).
117 */
118 #define IDETAPE_WAIT_CMD (900*HZ)
119
120 /*
121 * The following parameter is used to select the point in the internal tape fifo
122 * in which we will start to refill the buffer. Decreasing the following
123 * parameter will improve the system's latency and interactive response, while
124 * using a high value might improve system throughput.
125 */
126 #define IDETAPE_FIFO_THRESHOLD 2
127
128 /*
129 * DSC polling parameters.
130 *
131 * Polling for DSC (a single bit in the status register) is a very important
132 * function in ide-tape. There are two cases in which we poll for DSC:
133 *
134 * 1. Before a read/write packet command, to ensure that we can transfer data
135 * from/to the tape's data buffers, without causing an actual media access.
136 * In case the tape is not ready yet, we take out our request from the device
137 * request queue, so that ide.c could service requests from the other device
138 * on the same interface in the meantime.
139 *
140 * 2. After the successful initialization of a "media access packet command",
141 * which is a command that can take a long time to complete (the interval can
142 * range from several seconds to even an hour). Again, we postpone our request
143 * in the middle to free the bus for the other device. The polling frequency
144 * here should be lower than the read/write frequency since those media access
145 * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
146 * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
147 * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
148 *
149 * We also set a timeout for the timer, in case something goes wrong. The
150 * timeout should be longer then the maximum execution time of a tape operation.
151 */
152
153 /* DSC timings. */
154 #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
155 #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
156 #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
157 #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
158 #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
159 #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
160 #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
161
162 /*************************** End of tunable parameters ***********************/
163
164 /* Read/Write error simulation */
165 #define SIMULATE_ERRORS 0
166
167 /* tape directions */
168 enum {
169 IDETAPE_DIR_NONE = (1 << 0),
170 IDETAPE_DIR_READ = (1 << 1),
171 IDETAPE_DIR_WRITE = (1 << 2),
172 };
173
174 struct idetape_bh {
175 u32 b_size;
176 atomic_t b_count;
177 struct idetape_bh *b_reqnext;
178 char *b_data;
179 };
180
181 /* Tape door status */
182 #define DOOR_UNLOCKED 0
183 #define DOOR_LOCKED 1
184 #define DOOR_EXPLICITLY_LOCKED 2
185
186 /* Some defines for the SPACE command */
187 #define IDETAPE_SPACE_OVER_FILEMARK 1
188 #define IDETAPE_SPACE_TO_EOD 3
189
190 /* Some defines for the LOAD UNLOAD command */
191 #define IDETAPE_LU_LOAD_MASK 1
192 #define IDETAPE_LU_RETENSION_MASK 2
193 #define IDETAPE_LU_EOT_MASK 4
194
195 /*
196 * Special requests for our block device strategy routine.
197 *
198 * In order to service a character device command, we add special requests to
199 * the tail of our block device request queue and wait for their completion.
200 */
201
202 enum {
203 REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
204 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
205 REQ_IDETAPE_READ = (1 << 2),
206 REQ_IDETAPE_WRITE = (1 << 3),
207 };
208
209 /* Error codes returned in rq->errors to the higher part of the driver. */
210 #define IDETAPE_ERROR_GENERAL 101
211 #define IDETAPE_ERROR_FILEMARK 102
212 #define IDETAPE_ERROR_EOD 103
213
214 /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
215 #define IDETAPE_BLOCK_DESCRIPTOR 0
216 #define IDETAPE_CAPABILITIES_PAGE 0x2a
217
218 /* Tape flag bits values. */
219 enum {
220 IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
221 /* 0 When the tape position is unknown */
222 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
223 /* Device already opened */
224 IDETAPE_FLAG_BUSY = (1 << 2),
225 /* Error detected in a pipeline stage */
226 IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
227 /* Attempt to auto-detect the current user block size */
228 IDETAPE_FLAG_DETECT_BS = (1 << 4),
229 /* Currently on a filemark */
230 IDETAPE_FLAG_FILEMARK = (1 << 5),
231 /* DRQ interrupt device */
232 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
233 /* pipeline active */
234 IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
235 /* 0 = no tape is loaded, so we don't rewind after ejecting */
236 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
237 };
238
239 /* A pipeline stage. */
240 typedef struct idetape_stage_s {
241 struct request rq; /* The corresponding request */
242 struct idetape_bh *bh; /* The data buffers */
243 struct idetape_stage_s *next; /* Pointer to the next stage */
244 } idetape_stage_t;
245
246 /*
247 * Most of our global data which we need to save even as we leave the driver due
248 * to an interrupt or a timer event is stored in the struct defined below.
249 */
250 typedef struct ide_tape_obj {
251 ide_drive_t *drive;
252 ide_driver_t *driver;
253 struct gendisk *disk;
254 struct kref kref;
255
256 /*
257 * Since a typical character device operation requires more
258 * than one packet command, we provide here enough memory
259 * for the maximum of interconnected packet commands.
260 * The packet commands are stored in the circular array pc_stack.
261 * pc_stack_index points to the last used entry, and warps around
262 * to the start when we get to the last array entry.
263 *
264 * pc points to the current processed packet command.
265 *
266 * failed_pc points to the last failed packet command, or contains
267 * NULL if we do not need to retry any packet command. This is
268 * required since an additional packet command is needed before the
269 * retry, to get detailed information on what went wrong.
270 */
271 /* Current packet command */
272 struct ide_atapi_pc *pc;
273 /* Last failed packet command */
274 struct ide_atapi_pc *failed_pc;
275 /* Packet command stack */
276 struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
277 /* Next free packet command storage space */
278 int pc_stack_index;
279 struct request rq_stack[IDETAPE_PC_STACK];
280 /* We implement a circular array */
281 int rq_stack_index;
282
283 /*
284 * DSC polling variables.
285 *
286 * While polling for DSC we use postponed_rq to postpone the current
287 * request so that ide.c will be able to service pending requests on the
288 * other device. Note that at most we will have only one DSC (usually
289 * data transfer) request in the device request queue. Additional
290 * requests can be queued in our internal pipeline, but they will be
291 * visible to ide.c only one at a time.
292 */
293 struct request *postponed_rq;
294 /* The time in which we started polling for DSC */
295 unsigned long dsc_polling_start;
296 /* Timer used to poll for dsc */
297 struct timer_list dsc_timer;
298 /* Read/Write dsc polling frequency */
299 unsigned long best_dsc_rw_freq;
300 unsigned long dsc_poll_freq;
301 unsigned long dsc_timeout;
302
303 /* Read position information */
304 u8 partition;
305 /* Current block */
306 unsigned int first_frame;
307
308 /* Last error information */
309 u8 sense_key, asc, ascq;
310
311 /* Character device operation */
312 unsigned int minor;
313 /* device name */
314 char name[4];
315 /* Current character device data transfer direction */
316 u8 chrdev_dir;
317
318 /* tape block size, usually 512 or 1024 bytes */
319 unsigned short blk_size;
320 int user_bs_factor;
321
322 /* Copy of the tape's Capabilities and Mechanical Page */
323 u8 caps[20];
324
325 /*
326 * Active data transfer request parameters.
327 *
328 * At most, there is only one ide-tape originated data transfer request
329 * in the device request queue. This allows ide.c to easily service
330 * requests from the other device when we postpone our active request.
331 * In the pipelined operation mode, we use our internal pipeline
332 * structure to hold more data requests. The data buffer size is chosen
333 * based on the tape's recommendation.
334 */
335 /* ptr to the request which is waiting in the device request queue */
336 struct request *active_data_rq;
337 /* Data buffer size chosen based on the tape's recommendation */
338 int stage_size;
339 idetape_stage_t *merge_stage;
340 int merge_stage_size;
341 struct idetape_bh *bh;
342 char *b_data;
343 int b_count;
344
345 /*
346 * Pipeline parameters.
347 *
348 * To accomplish non-pipelined mode, we simply set the following
349 * variables to zero (or NULL, where appropriate).
350 */
351 /* Number of currently used stages */
352 int nr_stages;
353 /* Number of pending stages */
354 int nr_pending_stages;
355 /* We will not allocate more than this number of stages */
356 int max_stages, min_pipeline, max_pipeline;
357 /* The first stage which will be removed from the pipeline */
358 idetape_stage_t *first_stage;
359 /* The currently active stage */
360 idetape_stage_t *active_stage;
361 /* Will be serviced after the currently active request */
362 idetape_stage_t *next_stage;
363 /* New requests will be added to the pipeline here */
364 idetape_stage_t *last_stage;
365 int pages_per_stage;
366 /* Wasted space in each stage */
367 int excess_bh_size;
368
369 /* Status/Action flags: long for set_bit */
370 unsigned long flags;
371 /* protects the ide-tape queue */
372 spinlock_t lock;
373
374 /* Measures average tape speed */
375 unsigned long avg_time;
376 int avg_size;
377 int avg_speed;
378
379 /* the door is currently locked */
380 int door_locked;
381 /* the tape hardware is write protected */
382 char drv_write_prot;
383 /* the tape is write protected (hardware or opened as read-only) */
384 char write_prot;
385
386 /*
387 * Limit the number of times a request can be postponed, to avoid an
388 * infinite postpone deadlock.
389 */
390 int postpone_cnt;
391
392 /* Speed control at the tape buffers input/output */
393 unsigned long insert_time;
394 int insert_size;
395 int insert_speed;
396 int measure_insert_time;
397
398 u32 debug_mask;
399 } idetape_tape_t;
400
401 static DEFINE_MUTEX(idetape_ref_mutex);
402
403 static struct class *idetape_sysfs_class;
404
405 #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
406
407 #define ide_tape_g(disk) \
408 container_of((disk)->private_data, struct ide_tape_obj, driver)
409
410 static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
411 {
412 struct ide_tape_obj *tape = NULL;
413
414 mutex_lock(&idetape_ref_mutex);
415 tape = ide_tape_g(disk);
416 if (tape)
417 kref_get(&tape->kref);
418 mutex_unlock(&idetape_ref_mutex);
419 return tape;
420 }
421
422 static void ide_tape_release(struct kref *);
423
424 static void ide_tape_put(struct ide_tape_obj *tape)
425 {
426 mutex_lock(&idetape_ref_mutex);
427 kref_put(&tape->kref, ide_tape_release);
428 mutex_unlock(&idetape_ref_mutex);
429 }
430
431 /*
432 * The variables below are used for the character device interface. Additional
433 * state variables are defined in our ide_drive_t structure.
434 */
435 static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
436
437 #define ide_tape_f(file) ((file)->private_data)
438
439 static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
440 {
441 struct ide_tape_obj *tape = NULL;
442
443 mutex_lock(&idetape_ref_mutex);
444 tape = idetape_devs[i];
445 if (tape)
446 kref_get(&tape->kref);
447 mutex_unlock(&idetape_ref_mutex);
448 return tape;
449 }
450
451 static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
452 unsigned int bcount)
453 {
454 struct idetape_bh *bh = pc->bh;
455 int count;
456
457 while (bcount) {
458 if (bh == NULL) {
459 printk(KERN_ERR "ide-tape: bh == NULL in "
460 "idetape_input_buffers\n");
461 ide_atapi_discard_data(drive, bcount);
462 return;
463 }
464 count = min(
465 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
466 bcount);
467 HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
468 atomic_read(&bh->b_count), count);
469 bcount -= count;
470 atomic_add(count, &bh->b_count);
471 if (atomic_read(&bh->b_count) == bh->b_size) {
472 bh = bh->b_reqnext;
473 if (bh)
474 atomic_set(&bh->b_count, 0);
475 }
476 }
477 pc->bh = bh;
478 }
479
480 static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
481 unsigned int bcount)
482 {
483 struct idetape_bh *bh = pc->bh;
484 int count;
485
486 while (bcount) {
487 if (bh == NULL) {
488 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
489 __func__);
490 return;
491 }
492 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
493 HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
494 bcount -= count;
495 pc->b_data += count;
496 pc->b_count -= count;
497 if (!pc->b_count) {
498 bh = bh->b_reqnext;
499 pc->bh = bh;
500 if (bh) {
501 pc->b_data = bh->b_data;
502 pc->b_count = atomic_read(&bh->b_count);
503 }
504 }
505 }
506 }
507
508 static void idetape_update_buffers(struct ide_atapi_pc *pc)
509 {
510 struct idetape_bh *bh = pc->bh;
511 int count;
512 unsigned int bcount = pc->xferred;
513
514 if (pc->flags & PC_FLAG_WRITING)
515 return;
516 while (bcount) {
517 if (bh == NULL) {
518 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
519 __func__);
520 return;
521 }
522 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
523 atomic_set(&bh->b_count, count);
524 if (atomic_read(&bh->b_count) == bh->b_size)
525 bh = bh->b_reqnext;
526 bcount -= count;
527 }
528 pc->bh = bh;
529 }
530
531 /*
532 * idetape_next_pc_storage returns a pointer to a place in which we can
533 * safely store a packet command, even though we intend to leave the
534 * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
535 * commands is allocated at initialization time.
536 */
537 static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
538 {
539 idetape_tape_t *tape = drive->driver_data;
540
541 debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
542
543 if (tape->pc_stack_index == IDETAPE_PC_STACK)
544 tape->pc_stack_index = 0;
545 return (&tape->pc_stack[tape->pc_stack_index++]);
546 }
547
548 /*
549 * idetape_next_rq_storage is used along with idetape_next_pc_storage.
550 * Since we queue packet commands in the request queue, we need to
551 * allocate a request, along with the allocation of a packet command.
552 */
553
554 /**************************************************************
555 * *
556 * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
557 * followed later on by kfree(). -ml *
558 * *
559 **************************************************************/
560
561 static struct request *idetape_next_rq_storage(ide_drive_t *drive)
562 {
563 idetape_tape_t *tape = drive->driver_data;
564
565 debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
566
567 if (tape->rq_stack_index == IDETAPE_PC_STACK)
568 tape->rq_stack_index = 0;
569 return (&tape->rq_stack[tape->rq_stack_index++]);
570 }
571
572 static void idetape_init_pc(struct ide_atapi_pc *pc)
573 {
574 memset(pc->c, 0, 12);
575 pc->retries = 0;
576 pc->flags = 0;
577 pc->req_xfer = 0;
578 pc->buf = pc->pc_buf;
579 pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
580 pc->bh = NULL;
581 pc->b_data = NULL;
582 }
583
584 /*
585 * called on each failed packet command retry to analyze the request sense. We
586 * currently do not utilize this information.
587 */
588 static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
589 {
590 idetape_tape_t *tape = drive->driver_data;
591 struct ide_atapi_pc *pc = tape->failed_pc;
592
593 tape->sense_key = sense[2] & 0xF;
594 tape->asc = sense[12];
595 tape->ascq = sense[13];
596
597 debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
598 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
599
600 /* Correct pc->xferred by asking the tape. */
601 if (pc->flags & PC_FLAG_DMA_ERROR) {
602 pc->xferred = pc->req_xfer -
603 tape->blk_size *
604 be32_to_cpu(get_unaligned((u32 *)&sense[3]));
605 idetape_update_buffers(pc);
606 }
607
608 /*
609 * If error was the result of a zero-length read or write command,
610 * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
611 * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
612 */
613 if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
614 /* length == 0 */
615 && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
616 if (tape->sense_key == 5) {
617 /* don't report an error, everything's ok */
618 pc->error = 0;
619 /* don't retry read/write */
620 pc->flags |= PC_FLAG_ABORT;
621 }
622 }
623 if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
624 pc->error = IDETAPE_ERROR_FILEMARK;
625 pc->flags |= PC_FLAG_ABORT;
626 }
627 if (pc->c[0] == WRITE_6) {
628 if ((sense[2] & 0x40) || (tape->sense_key == 0xd
629 && tape->asc == 0x0 && tape->ascq == 0x2)) {
630 pc->error = IDETAPE_ERROR_EOD;
631 pc->flags |= PC_FLAG_ABORT;
632 }
633 }
634 if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
635 if (tape->sense_key == 8) {
636 pc->error = IDETAPE_ERROR_EOD;
637 pc->flags |= PC_FLAG_ABORT;
638 }
639 if (!(pc->flags & PC_FLAG_ABORT) &&
640 pc->xferred)
641 pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
642 }
643 }
644
645 static void idetape_activate_next_stage(ide_drive_t *drive)
646 {
647 idetape_tape_t *tape = drive->driver_data;
648 idetape_stage_t *stage = tape->next_stage;
649 struct request *rq = &stage->rq;
650
651 debug_log(DBG_PROCS, "Enter %s\n", __func__);
652
653 if (stage == NULL) {
654 printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
655 " existing stage\n");
656 return;
657 }
658
659 rq->rq_disk = tape->disk;
660 rq->buffer = NULL;
661 rq->special = (void *)stage->bh;
662 tape->active_data_rq = rq;
663 tape->active_stage = stage;
664 tape->next_stage = stage->next;
665 }
666
667 /* Free a stage along with its related buffers completely. */
668 static void __idetape_kfree_stage(idetape_stage_t *stage)
669 {
670 struct idetape_bh *prev_bh, *bh = stage->bh;
671 int size;
672
673 while (bh != NULL) {
674 if (bh->b_data != NULL) {
675 size = (int) bh->b_size;
676 while (size > 0) {
677 free_page((unsigned long) bh->b_data);
678 size -= PAGE_SIZE;
679 bh->b_data += PAGE_SIZE;
680 }
681 }
682 prev_bh = bh;
683 bh = bh->b_reqnext;
684 kfree(prev_bh);
685 }
686 kfree(stage);
687 }
688
689 /*
690 * Finish servicing a request and insert a pending pipeline request into the
691 * main device queue.
692 */
693 static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
694 {
695 struct request *rq = HWGROUP(drive)->rq;
696 idetape_tape_t *tape = drive->driver_data;
697 unsigned long flags;
698 int error;
699
700 debug_log(DBG_PROCS, "Enter %s\n", __func__);
701
702 switch (uptodate) {
703 case 0: error = IDETAPE_ERROR_GENERAL; break;
704 case 1: error = 0; break;
705 default: error = uptodate;
706 }
707 rq->errors = error;
708 if (error)
709 tape->failed_pc = NULL;
710
711 if (!blk_special_request(rq)) {
712 ide_end_request(drive, uptodate, nr_sects);
713 return 0;
714 }
715
716 spin_lock_irqsave(&tape->lock, flags);
717
718 ide_end_drive_cmd(drive, 0, 0);
719
720 clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
721 spin_unlock_irqrestore(&tape->lock, flags);
722 return 0;
723 }
724
725 static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
726 {
727 idetape_tape_t *tape = drive->driver_data;
728
729 debug_log(DBG_PROCS, "Enter %s\n", __func__);
730
731 if (!tape->pc->error) {
732 idetape_analyze_error(drive, tape->pc->buf);
733 idetape_end_request(drive, 1, 0);
734 } else {
735 printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
736 "Aborting request!\n");
737 idetape_end_request(drive, 0, 0);
738 }
739 return ide_stopped;
740 }
741
742 static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
743 {
744 idetape_init_pc(pc);
745 pc->c[0] = REQUEST_SENSE;
746 pc->c[4] = 20;
747 pc->req_xfer = 20;
748 pc->idetape_callback = &idetape_request_sense_callback;
749 }
750
751 static void idetape_init_rq(struct request *rq, u8 cmd)
752 {
753 memset(rq, 0, sizeof(*rq));
754 rq->cmd_type = REQ_TYPE_SPECIAL;
755 rq->cmd[0] = cmd;
756 }
757
758 /*
759 * Generate a new packet command request in front of the request queue, before
760 * the current request, so that it will be processed immediately, on the next
761 * pass through the driver. The function below is called from the request
762 * handling part of the driver (the "bottom" part). Safe storage for the request
763 * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
764 *
765 * Memory for those requests is pre-allocated at initialization time, and is
766 * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
767 * the maximum possible number of inter-dependent packet commands.
768 *
769 * The higher level of the driver - The ioctl handler and the character device
770 * handling functions should queue request to the lower level part and wait for
771 * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
772 */
773 static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
774 struct request *rq)
775 {
776 struct ide_tape_obj *tape = drive->driver_data;
777
778 idetape_init_rq(rq, REQ_IDETAPE_PC1);
779 rq->buffer = (char *) pc;
780 rq->rq_disk = tape->disk;
781 (void) ide_do_drive_cmd(drive, rq, ide_preempt);
782 }
783
784 /*
785 * idetape_retry_pc is called when an error was detected during the
786 * last packet command. We queue a request sense packet command in
787 * the head of the request list.
788 */
789 static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
790 {
791 idetape_tape_t *tape = drive->driver_data;
792 struct ide_atapi_pc *pc;
793 struct request *rq;
794
795 (void)ide_read_error(drive);
796 pc = idetape_next_pc_storage(drive);
797 rq = idetape_next_rq_storage(drive);
798 idetape_create_request_sense_cmd(pc);
799 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
800 idetape_queue_pc_head(drive, pc, rq);
801 return ide_stopped;
802 }
803
804 /*
805 * Postpone the current request so that ide.c will be able to service requests
806 * from another device on the same hwgroup while we are polling for DSC.
807 */
808 static void idetape_postpone_request(ide_drive_t *drive)
809 {
810 idetape_tape_t *tape = drive->driver_data;
811
812 debug_log(DBG_PROCS, "Enter %s\n", __func__);
813
814 tape->postponed_rq = HWGROUP(drive)->rq;
815 ide_stall_queue(drive, tape->dsc_poll_freq);
816 }
817
818 typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
819
820 /*
821 * This is the usual interrupt handler which will be called during a packet
822 * command. We will transfer some of the data (as requested by the drive) and
823 * will re-point interrupt handler to us. When data transfer is finished, we
824 * will act according to the algorithm described before
825 * idetape_issue_pc.
826 */
827 static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
828 {
829 ide_hwif_t *hwif = drive->hwif;
830 idetape_tape_t *tape = drive->driver_data;
831 struct ide_atapi_pc *pc = tape->pc;
832 xfer_func_t *xferfunc;
833 idetape_io_buf *iobuf;
834 unsigned int temp;
835 #if SIMULATE_ERRORS
836 static int error_sim_count;
837 #endif
838 u16 bcount;
839 u8 stat, ireason;
840
841 debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
842
843 /* Clear the interrupt */
844 stat = ide_read_status(drive);
845
846 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
847 if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
848 /*
849 * A DMA error is sometimes expected. For example,
850 * if the tape is crossing a filemark during a
851 * READ command, it will issue an irq and position
852 * itself before the filemark, so that only a partial
853 * data transfer will occur (which causes the DMA
854 * error). In that case, we will later ask the tape
855 * how much bytes of the original request were
856 * actually transferred (we can't receive that
857 * information from the DMA engine on most chipsets).
858 */
859
860 /*
861 * On the contrary, a DMA error is never expected;
862 * it usually indicates a hardware error or abort.
863 * If the tape crosses a filemark during a READ
864 * command, it will issue an irq and position itself
865 * after the filemark (not before). Only a partial
866 * data transfer will occur, but no DMA error.
867 * (AS, 19 Apr 2001)
868 */
869 pc->flags |= PC_FLAG_DMA_ERROR;
870 } else {
871 pc->xferred = pc->req_xfer;
872 idetape_update_buffers(pc);
873 }
874 debug_log(DBG_PROCS, "DMA finished\n");
875
876 }
877
878 /* No more interrupts */
879 if ((stat & DRQ_STAT) == 0) {
880 debug_log(DBG_SENSE, "Packet command completed, %d bytes"
881 " transferred\n", pc->xferred);
882
883 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
884 local_irq_enable();
885
886 #if SIMULATE_ERRORS
887 if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
888 (++error_sim_count % 100) == 0) {
889 printk(KERN_INFO "ide-tape: %s: simulating error\n",
890 tape->name);
891 stat |= ERR_STAT;
892 }
893 #endif
894 if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
895 stat &= ~ERR_STAT;
896 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
897 /* Error detected */
898 debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
899
900 if (pc->c[0] == REQUEST_SENSE) {
901 printk(KERN_ERR "ide-tape: I/O error in request"
902 " sense command\n");
903 return ide_do_reset(drive);
904 }
905 debug_log(DBG_ERR, "[cmd %x]: check condition\n",
906 pc->c[0]);
907
908 /* Retry operation */
909 return idetape_retry_pc(drive);
910 }
911 pc->error = 0;
912 if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
913 (stat & SEEK_STAT) == 0) {
914 /* Media access command */
915 tape->dsc_polling_start = jiffies;
916 tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
917 tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
918 /* Allow ide.c to handle other requests */
919 idetape_postpone_request(drive);
920 return ide_stopped;
921 }
922 if (tape->failed_pc == pc)
923 tape->failed_pc = NULL;
924 /* Command finished - Call the callback function */
925 return pc->idetape_callback(drive);
926 }
927
928 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
929 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
930 printk(KERN_ERR "ide-tape: The tape wants to issue more "
931 "interrupts in DMA mode\n");
932 printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
933 ide_dma_off(drive);
934 return ide_do_reset(drive);
935 }
936 /* Get the number of bytes to transfer on this interrupt. */
937 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
938 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
939
940 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
941
942 if (ireason & CD) {
943 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
944 return ide_do_reset(drive);
945 }
946 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
947 /* Hopefully, we will never get here */
948 printk(KERN_ERR "ide-tape: We wanted to %s, ",
949 (ireason & IO) ? "Write" : "Read");
950 printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
951 (ireason & IO) ? "Read" : "Write");
952 return ide_do_reset(drive);
953 }
954 if (!(pc->flags & PC_FLAG_WRITING)) {
955 /* Reading - Check that we have enough space */
956 temp = pc->xferred + bcount;
957 if (temp > pc->req_xfer) {
958 if (temp > pc->buf_size) {
959 printk(KERN_ERR "ide-tape: The tape wants to "
960 "send us more data than expected "
961 "- discarding data\n");
962 ide_atapi_discard_data(drive, bcount);
963 ide_set_handler(drive, &idetape_pc_intr,
964 IDETAPE_WAIT_CMD, NULL);
965 return ide_started;
966 }
967 debug_log(DBG_SENSE, "The tape wants to send us more "
968 "data than expected - allowing transfer\n");
969 }
970 iobuf = &idetape_input_buffers;
971 xferfunc = hwif->atapi_input_bytes;
972 } else {
973 iobuf = &idetape_output_buffers;
974 xferfunc = hwif->atapi_output_bytes;
975 }
976
977 if (pc->bh)
978 iobuf(drive, pc, bcount);
979 else
980 xferfunc(drive, pc->cur_pos, bcount);
981
982 /* Update the current position */
983 pc->xferred += bcount;
984 pc->cur_pos += bcount;
985
986 debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
987 pc->c[0], bcount);
988
989 /* And set the interrupt handler again */
990 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
991 return ide_started;
992 }
993
994 /*
995 * Packet Command Interface
996 *
997 * The current Packet Command is available in tape->pc, and will not change
998 * until we finish handling it. Each packet command is associated with a
999 * callback function that will be called when the command is finished.
1000 *
1001 * The handling will be done in three stages:
1002 *
1003 * 1. idetape_issue_pc will send the packet command to the drive, and will set
1004 * the interrupt handler to idetape_pc_intr.
1005 *
1006 * 2. On each interrupt, idetape_pc_intr will be called. This step will be
1007 * repeated until the device signals us that no more interrupts will be issued.
1008 *
1009 * 3. ATAPI Tape media access commands have immediate status with a delayed
1010 * process. In case of a successful initiation of a media access packet command,
1011 * the DSC bit will be set when the actual execution of the command is finished.
1012 * Since the tape drive will not issue an interrupt, we have to poll for this
1013 * event. In this case, we define the request as "low priority request" by
1014 * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
1015 * exit the driver.
1016 *
1017 * ide.c will then give higher priority to requests which originate from the
1018 * other device, until will change rq_status to RQ_ACTIVE.
1019 *
1020 * 4. When the packet command is finished, it will be checked for errors.
1021 *
1022 * 5. In case an error was found, we queue a request sense packet command in
1023 * front of the request queue and retry the operation up to
1024 * IDETAPE_MAX_PC_RETRIES times.
1025 *
1026 * 6. In case no error was found, or we decided to give up and not to retry
1027 * again, the callback function will be called and then we will handle the next
1028 * request.
1029 */
1030 static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1031 {
1032 ide_hwif_t *hwif = drive->hwif;
1033 idetape_tape_t *tape = drive->driver_data;
1034 struct ide_atapi_pc *pc = tape->pc;
1035 int retries = 100;
1036 ide_startstop_t startstop;
1037 u8 ireason;
1038
1039 if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
1040 printk(KERN_ERR "ide-tape: Strange, packet command initiated "
1041 "yet DRQ isn't asserted\n");
1042 return startstop;
1043 }
1044 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1045 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
1046 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
1047 "a packet command, retrying\n");
1048 udelay(100);
1049 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1050 if (retries == 0) {
1051 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
1052 "issuing a packet command, ignoring\n");
1053 ireason |= CD;
1054 ireason &= ~IO;
1055 }
1056 }
1057 if ((ireason & CD) == 0 || (ireason & IO)) {
1058 printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
1059 "a packet command\n");
1060 return ide_do_reset(drive);
1061 }
1062 /* Set the interrupt routine */
1063 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1064 #ifdef CONFIG_BLK_DEV_IDEDMA
1065 /* Begin DMA, if necessary */
1066 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
1067 hwif->dma_ops->dma_start(drive);
1068 #endif
1069 /* Send the actual packet */
1070 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
1071 return ide_started;
1072 }
1073
1074 static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1075 struct ide_atapi_pc *pc)
1076 {
1077 ide_hwif_t *hwif = drive->hwif;
1078 idetape_tape_t *tape = drive->driver_data;
1079 int dma_ok = 0;
1080 u16 bcount;
1081
1082 if (tape->pc->c[0] == REQUEST_SENSE &&
1083 pc->c[0] == REQUEST_SENSE) {
1084 printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
1085 "Two request sense in serial were issued\n");
1086 }
1087
1088 if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
1089 tape->failed_pc = pc;
1090 /* Set the current packet command */
1091 tape->pc = pc;
1092
1093 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
1094 (pc->flags & PC_FLAG_ABORT)) {
1095 /*
1096 * We will "abort" retrying a packet command in case legitimate
1097 * error code was received (crossing a filemark, or end of the
1098 * media, for example).
1099 */
1100 if (!(pc->flags & PC_FLAG_ABORT)) {
1101 if (!(pc->c[0] == TEST_UNIT_READY &&
1102 tape->sense_key == 2 && tape->asc == 4 &&
1103 (tape->ascq == 1 || tape->ascq == 8))) {
1104 printk(KERN_ERR "ide-tape: %s: I/O error, "
1105 "pc = %2x, key = %2x, "
1106 "asc = %2x, ascq = %2x\n",
1107 tape->name, pc->c[0],
1108 tape->sense_key, tape->asc,
1109 tape->ascq);
1110 }
1111 /* Giving up */
1112 pc->error = IDETAPE_ERROR_GENERAL;
1113 }
1114 tape->failed_pc = NULL;
1115 return pc->idetape_callback(drive);
1116 }
1117 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
1118
1119 pc->retries++;
1120 /* We haven't transferred any data yet */
1121 pc->xferred = 0;
1122 pc->cur_pos = pc->buf;
1123 /* Request to transfer the entire buffer at once */
1124 bcount = pc->req_xfer;
1125
1126 if (pc->flags & PC_FLAG_DMA_ERROR) {
1127 pc->flags &= ~PC_FLAG_DMA_ERROR;
1128 printk(KERN_WARNING "ide-tape: DMA disabled, "
1129 "reverting to PIO\n");
1130 ide_dma_off(drive);
1131 }
1132 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
1133 dma_ok = !hwif->dma_ops->dma_setup(drive);
1134
1135 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
1136 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
1137
1138 if (dma_ok)
1139 /* Will begin DMA later */
1140 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
1141 if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
1142 ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
1143 IDETAPE_WAIT_CMD, NULL);
1144 return ide_started;
1145 } else {
1146 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
1147 return idetape_transfer_pc(drive);
1148 }
1149 }
1150
1151 static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
1152 {
1153 idetape_tape_t *tape = drive->driver_data;
1154
1155 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1156
1157 idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
1158 return ide_stopped;
1159 }
1160
1161 /* A mode sense command is used to "sense" tape parameters. */
1162 static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
1163 {
1164 idetape_init_pc(pc);
1165 pc->c[0] = MODE_SENSE;
1166 if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
1167 /* DBD = 1 - Don't return block descriptors */
1168 pc->c[1] = 8;
1169 pc->c[2] = page_code;
1170 /*
1171 * Changed pc->c[3] to 0 (255 will at best return unused info).
1172 *
1173 * For SCSI this byte is defined as subpage instead of high byte
1174 * of length and some IDE drives seem to interpret it this way
1175 * and return an error when 255 is used.
1176 */
1177 pc->c[3] = 0;
1178 /* We will just discard data in that case */
1179 pc->c[4] = 255;
1180 if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
1181 pc->req_xfer = 12;
1182 else if (page_code == IDETAPE_CAPABILITIES_PAGE)
1183 pc->req_xfer = 24;
1184 else
1185 pc->req_xfer = 50;
1186 pc->idetape_callback = &idetape_pc_callback;
1187 }
1188
1189 static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
1190 {
1191 idetape_tape_t *tape = drive->driver_data;
1192 struct ide_atapi_pc *pc = tape->pc;
1193 u8 stat;
1194
1195 stat = ide_read_status(drive);
1196
1197 if (stat & SEEK_STAT) {
1198 if (stat & ERR_STAT) {
1199 /* Error detected */
1200 if (pc->c[0] != TEST_UNIT_READY)
1201 printk(KERN_ERR "ide-tape: %s: I/O error, ",
1202 tape->name);
1203 /* Retry operation */
1204 return idetape_retry_pc(drive);
1205 }
1206 pc->error = 0;
1207 if (tape->failed_pc == pc)
1208 tape->failed_pc = NULL;
1209 } else {
1210 pc->error = IDETAPE_ERROR_GENERAL;
1211 tape->failed_pc = NULL;
1212 }
1213 return pc->idetape_callback(drive);
1214 }
1215
1216 static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
1217 {
1218 idetape_tape_t *tape = drive->driver_data;
1219 struct request *rq = HWGROUP(drive)->rq;
1220 int blocks = tape->pc->xferred / tape->blk_size;
1221
1222 tape->avg_size += blocks * tape->blk_size;
1223 tape->insert_size += blocks * tape->blk_size;
1224 if (tape->insert_size > 1024 * 1024)
1225 tape->measure_insert_time = 1;
1226 if (tape->measure_insert_time) {
1227 tape->measure_insert_time = 0;
1228 tape->insert_time = jiffies;
1229 tape->insert_size = 0;
1230 }
1231 if (time_after(jiffies, tape->insert_time))
1232 tape->insert_speed = tape->insert_size / 1024 * HZ /
1233 (jiffies - tape->insert_time);
1234 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
1235 tape->avg_speed = tape->avg_size * HZ /
1236 (jiffies - tape->avg_time) / 1024;
1237 tape->avg_size = 0;
1238 tape->avg_time = jiffies;
1239 }
1240 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1241
1242 tape->first_frame += blocks;
1243 rq->current_nr_sectors -= blocks;
1244
1245 if (!tape->pc->error)
1246 idetape_end_request(drive, 1, 0);
1247 else
1248 idetape_end_request(drive, tape->pc->error, 0);
1249 return ide_stopped;
1250 }
1251
1252 static void idetape_create_read_cmd(idetape_tape_t *tape,
1253 struct ide_atapi_pc *pc,
1254 unsigned int length, struct idetape_bh *bh)
1255 {
1256 idetape_init_pc(pc);
1257 pc->c[0] = READ_6;
1258 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1259 pc->c[1] = 1;
1260 pc->idetape_callback = &idetape_rw_callback;
1261 pc->bh = bh;
1262 atomic_set(&bh->b_count, 0);
1263 pc->buf = NULL;
1264 pc->buf_size = length * tape->blk_size;
1265 pc->req_xfer = pc->buf_size;
1266 if (pc->req_xfer == tape->stage_size)
1267 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1268 }
1269
1270 static void idetape_create_write_cmd(idetape_tape_t *tape,
1271 struct ide_atapi_pc *pc,
1272 unsigned int length, struct idetape_bh *bh)
1273 {
1274 idetape_init_pc(pc);
1275 pc->c[0] = WRITE_6;
1276 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1277 pc->c[1] = 1;
1278 pc->idetape_callback = &idetape_rw_callback;
1279 pc->flags |= PC_FLAG_WRITING;
1280 pc->bh = bh;
1281 pc->b_data = bh->b_data;
1282 pc->b_count = atomic_read(&bh->b_count);
1283 pc->buf = NULL;
1284 pc->buf_size = length * tape->blk_size;
1285 pc->req_xfer = pc->buf_size;
1286 if (pc->req_xfer == tape->stage_size)
1287 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1288 }
1289
1290 static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1291 struct request *rq, sector_t block)
1292 {
1293 idetape_tape_t *tape = drive->driver_data;
1294 struct ide_atapi_pc *pc = NULL;
1295 struct request *postponed_rq = tape->postponed_rq;
1296 u8 stat;
1297
1298 debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
1299 " current_nr_sectors: %d\n",
1300 rq->sector, rq->nr_sectors, rq->current_nr_sectors);
1301
1302 if (!blk_special_request(rq)) {
1303 /* We do not support buffer cache originated requests. */
1304 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
1305 "request queue (%d)\n", drive->name, rq->cmd_type);
1306 ide_end_request(drive, 0, 0);
1307 return ide_stopped;
1308 }
1309
1310 /* Retry a failed packet command */
1311 if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
1312 return idetape_issue_pc(drive, tape->failed_pc);
1313
1314 if (postponed_rq != NULL)
1315 if (rq != postponed_rq) {
1316 printk(KERN_ERR "ide-tape: ide-tape.c bug - "
1317 "Two DSC requests were queued\n");
1318 idetape_end_request(drive, 0, 0);
1319 return ide_stopped;
1320 }
1321
1322 tape->postponed_rq = NULL;
1323
1324 /*
1325 * If the tape is still busy, postpone our request and service
1326 * the other device meanwhile.
1327 */
1328 stat = ide_read_status(drive);
1329
1330 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
1331 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1332
1333 if (drive->post_reset == 1) {
1334 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1335 drive->post_reset = 0;
1336 }
1337
1338 if (time_after(jiffies, tape->insert_time))
1339 tape->insert_speed = tape->insert_size / 1024 * HZ /
1340 (jiffies - tape->insert_time);
1341 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
1342 (stat & SEEK_STAT) == 0) {
1343 if (postponed_rq == NULL) {
1344 tape->dsc_polling_start = jiffies;
1345 tape->dsc_poll_freq = tape->best_dsc_rw_freq;
1346 tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
1347 } else if (time_after(jiffies, tape->dsc_timeout)) {
1348 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1349 tape->name);
1350 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1351 idetape_media_access_finished(drive);
1352 return ide_stopped;
1353 } else {
1354 return ide_do_reset(drive);
1355 }
1356 } else if (time_after(jiffies,
1357 tape->dsc_polling_start +
1358 IDETAPE_DSC_MA_THRESHOLD))
1359 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
1360 idetape_postpone_request(drive);
1361 return ide_stopped;
1362 }
1363 if (rq->cmd[0] & REQ_IDETAPE_READ) {
1364 tape->postpone_cnt = 0;
1365 pc = idetape_next_pc_storage(drive);
1366 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
1367 (struct idetape_bh *)rq->special);
1368 goto out;
1369 }
1370 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
1371 tape->postpone_cnt = 0;
1372 pc = idetape_next_pc_storage(drive);
1373 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
1374 (struct idetape_bh *)rq->special);
1375 goto out;
1376 }
1377 if (rq->cmd[0] & REQ_IDETAPE_PC1) {
1378 pc = (struct ide_atapi_pc *) rq->buffer;
1379 rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
1380 rq->cmd[0] |= REQ_IDETAPE_PC2;
1381 goto out;
1382 }
1383 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1384 idetape_media_access_finished(drive);
1385 return ide_stopped;
1386 }
1387 BUG();
1388 out:
1389 return idetape_issue_pc(drive, pc);
1390 }
1391
1392 /* Pipeline related functions */
1393
1394 /*
1395 * The function below uses __get_free_page to allocate a pipeline stage, along
1396 * with all the necessary small buffers which together make a buffer of size
1397 * tape->stage_size (or a bit more). We attempt to combine sequential pages as
1398 * much as possible.
1399 *
1400 * It returns a pointer to the new allocated stage, or NULL if we can't (or
1401 * don't want to) allocate a stage.
1402 *
1403 * Pipeline stages are optional and are used to increase performance. If we
1404 * can't allocate them, we'll manage without them.
1405 */
1406 static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
1407 int clear)
1408 {
1409 idetape_stage_t *stage;
1410 struct idetape_bh *prev_bh, *bh;
1411 int pages = tape->pages_per_stage;
1412 char *b_data = NULL;
1413
1414 stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
1415 if (!stage)
1416 return NULL;
1417 stage->next = NULL;
1418
1419 stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1420 bh = stage->bh;
1421 if (bh == NULL)
1422 goto abort;
1423 bh->b_reqnext = NULL;
1424 bh->b_data = (char *) __get_free_page(GFP_KERNEL);
1425 if (!bh->b_data)
1426 goto abort;
1427 if (clear)
1428 memset(bh->b_data, 0, PAGE_SIZE);
1429 bh->b_size = PAGE_SIZE;
1430 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1431
1432 while (--pages) {
1433 b_data = (char *) __get_free_page(GFP_KERNEL);
1434 if (!b_data)
1435 goto abort;
1436 if (clear)
1437 memset(b_data, 0, PAGE_SIZE);
1438 if (bh->b_data == b_data + PAGE_SIZE) {
1439 bh->b_size += PAGE_SIZE;
1440 bh->b_data -= PAGE_SIZE;
1441 if (full)
1442 atomic_add(PAGE_SIZE, &bh->b_count);
1443 continue;
1444 }
1445 if (b_data == bh->b_data + bh->b_size) {
1446 bh->b_size += PAGE_SIZE;
1447 if (full)
1448 atomic_add(PAGE_SIZE, &bh->b_count);
1449 continue;
1450 }
1451 prev_bh = bh;
1452 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1453 if (!bh) {
1454 free_page((unsigned long) b_data);
1455 goto abort;
1456 }
1457 bh->b_reqnext = NULL;
1458 bh->b_data = b_data;
1459 bh->b_size = PAGE_SIZE;
1460 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1461 prev_bh->b_reqnext = bh;
1462 }
1463 bh->b_size -= tape->excess_bh_size;
1464 if (full)
1465 atomic_sub(tape->excess_bh_size, &bh->b_count);
1466 return stage;
1467 abort:
1468 __idetape_kfree_stage(stage);
1469 return NULL;
1470 }
1471
1472 static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1473 const char __user *buf, int n)
1474 {
1475 struct idetape_bh *bh = tape->bh;
1476 int count;
1477 int ret = 0;
1478
1479 while (n) {
1480 if (bh == NULL) {
1481 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1482 __func__);
1483 return 1;
1484 }
1485 count = min((unsigned int)
1486 (bh->b_size - atomic_read(&bh->b_count)),
1487 (unsigned int)n);
1488 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
1489 count))
1490 ret = 1;
1491 n -= count;
1492 atomic_add(count, &bh->b_count);
1493 buf += count;
1494 if (atomic_read(&bh->b_count) == bh->b_size) {
1495 bh = bh->b_reqnext;
1496 if (bh)
1497 atomic_set(&bh->b_count, 0);
1498 }
1499 }
1500 tape->bh = bh;
1501 return ret;
1502 }
1503
1504 static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1505 int n)
1506 {
1507 struct idetape_bh *bh = tape->bh;
1508 int count;
1509 int ret = 0;
1510
1511 while (n) {
1512 if (bh == NULL) {
1513 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1514 __func__);
1515 return 1;
1516 }
1517 count = min(tape->b_count, n);
1518 if (copy_to_user(buf, tape->b_data, count))
1519 ret = 1;
1520 n -= count;
1521 tape->b_data += count;
1522 tape->b_count -= count;
1523 buf += count;
1524 if (!tape->b_count) {
1525 bh = bh->b_reqnext;
1526 tape->bh = bh;
1527 if (bh) {
1528 tape->b_data = bh->b_data;
1529 tape->b_count = atomic_read(&bh->b_count);
1530 }
1531 }
1532 }
1533 return ret;
1534 }
1535
1536 static void idetape_init_merge_stage(idetape_tape_t *tape)
1537 {
1538 struct idetape_bh *bh = tape->merge_stage->bh;
1539
1540 tape->bh = bh;
1541 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1542 atomic_set(&bh->b_count, 0);
1543 else {
1544 tape->b_data = bh->b_data;
1545 tape->b_count = atomic_read(&bh->b_count);
1546 }
1547 }
1548
1549 /* Install a completion in a pending request and sleep until it is serviced. The
1550 * caller should ensure that the request will not be serviced before we install
1551 * the completion (usually by disabling interrupts).
1552 */
1553 static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
1554 {
1555 DECLARE_COMPLETION_ONSTACK(wait);
1556 idetape_tape_t *tape = drive->driver_data;
1557
1558 if (rq == NULL || !blk_special_request(rq)) {
1559 printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
1560 " request\n");
1561 return;
1562 }
1563 rq->end_io_data = &wait;
1564 rq->end_io = blk_end_sync_rq;
1565 spin_unlock_irq(&tape->lock);
1566 wait_for_completion(&wait);
1567 /* The stage and its struct request have been deallocated */
1568 spin_lock_irq(&tape->lock);
1569 }
1570
1571 static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
1572 {
1573 idetape_tape_t *tape = drive->driver_data;
1574 u8 *readpos = tape->pc->buf;
1575
1576 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1577
1578 if (!tape->pc->error) {
1579 debug_log(DBG_SENSE, "BOP - %s\n",
1580 (readpos[0] & 0x80) ? "Yes" : "No");
1581 debug_log(DBG_SENSE, "EOP - %s\n",
1582 (readpos[0] & 0x40) ? "Yes" : "No");
1583
1584 if (readpos[0] & 0x4) {
1585 printk(KERN_INFO "ide-tape: Block location is unknown"
1586 "to the tape\n");
1587 clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1588 idetape_end_request(drive, 0, 0);
1589 } else {
1590 debug_log(DBG_SENSE, "Block Location - %u\n",
1591 be32_to_cpu(*(u32 *)&readpos[4]));
1592
1593 tape->partition = readpos[1];
1594 tape->first_frame =
1595 be32_to_cpu(*(u32 *)&readpos[4]);
1596 set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1597 idetape_end_request(drive, 1, 0);
1598 }
1599 } else {
1600 idetape_end_request(drive, 0, 0);
1601 }
1602 return ide_stopped;
1603 }
1604
1605 /*
1606 * Write a filemark if write_filemark=1. Flush the device buffers without
1607 * writing a filemark otherwise.
1608 */
1609 static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
1610 struct ide_atapi_pc *pc, int write_filemark)
1611 {
1612 idetape_init_pc(pc);
1613 pc->c[0] = WRITE_FILEMARKS;
1614 pc->c[4] = write_filemark;
1615 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1616 pc->idetape_callback = &idetape_pc_callback;
1617 }
1618
1619 static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
1620 {
1621 idetape_init_pc(pc);
1622 pc->c[0] = TEST_UNIT_READY;
1623 pc->idetape_callback = &idetape_pc_callback;
1624 }
1625
1626 /*
1627 * We add a special packet command request to the tail of the request queue, and
1628 * wait for it to be serviced. This is not to be called from within the request
1629 * handling part of the driver! We allocate here data on the stack and it is
1630 * valid until the request is finished. This is not the case for the bottom part
1631 * of the driver, where we are always leaving the functions to wait for an
1632 * interrupt or a timer event.
1633 *
1634 * From the bottom part of the driver, we should allocate safe memory using
1635 * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
1636 * to the request list without waiting for it to be serviced! In that case, we
1637 * usually use idetape_queue_pc_head().
1638 */
1639 static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1640 {
1641 struct ide_tape_obj *tape = drive->driver_data;
1642 struct request rq;
1643
1644 idetape_init_rq(&rq, REQ_IDETAPE_PC1);
1645 rq.buffer = (char *) pc;
1646 rq.rq_disk = tape->disk;
1647 return ide_do_drive_cmd(drive, &rq, ide_wait);
1648 }
1649
1650 static void idetape_create_load_unload_cmd(ide_drive_t *drive,
1651 struct ide_atapi_pc *pc, int cmd)
1652 {
1653 idetape_init_pc(pc);
1654 pc->c[0] = START_STOP;
1655 pc->c[4] = cmd;
1656 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1657 pc->idetape_callback = &idetape_pc_callback;
1658 }
1659
1660 static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1661 {
1662 idetape_tape_t *tape = drive->driver_data;
1663 struct ide_atapi_pc pc;
1664 int load_attempted = 0;
1665
1666 /* Wait for the tape to become ready */
1667 set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
1668 timeout += jiffies;
1669 while (time_before(jiffies, timeout)) {
1670 idetape_create_test_unit_ready_cmd(&pc);
1671 if (!idetape_queue_pc_tail(drive, &pc))
1672 return 0;
1673 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
1674 || (tape->asc == 0x3A)) {
1675 /* no media */
1676 if (load_attempted)
1677 return -ENOMEDIUM;
1678 idetape_create_load_unload_cmd(drive, &pc,
1679 IDETAPE_LU_LOAD_MASK);
1680 idetape_queue_pc_tail(drive, &pc);
1681 load_attempted = 1;
1682 /* not about to be ready */
1683 } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
1684 (tape->ascq == 1 || tape->ascq == 8)))
1685 return -EIO;
1686 msleep(100);
1687 }
1688 return -EIO;
1689 }
1690
1691 static int idetape_flush_tape_buffers(ide_drive_t *drive)
1692 {
1693 struct ide_atapi_pc pc;
1694 int rc;
1695
1696 idetape_create_write_filemark_cmd(drive, &pc, 0);
1697 rc = idetape_queue_pc_tail(drive, &pc);
1698 if (rc)
1699 return rc;
1700 idetape_wait_ready(drive, 60 * 5 * HZ);
1701 return 0;
1702 }
1703
1704 static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
1705 {
1706 idetape_init_pc(pc);
1707 pc->c[0] = READ_POSITION;
1708 pc->req_xfer = 20;
1709 pc->idetape_callback = &idetape_read_position_callback;
1710 }
1711
1712 static int idetape_read_position(ide_drive_t *drive)
1713 {
1714 idetape_tape_t *tape = drive->driver_data;
1715 struct ide_atapi_pc pc;
1716 int position;
1717
1718 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1719
1720 idetape_create_read_position_cmd(&pc);
1721 if (idetape_queue_pc_tail(drive, &pc))
1722 return -1;
1723 position = tape->first_frame;
1724 return position;
1725 }
1726
1727 static void idetape_create_locate_cmd(ide_drive_t *drive,
1728 struct ide_atapi_pc *pc,
1729 unsigned int block, u8 partition, int skip)
1730 {
1731 idetape_init_pc(pc);
1732 pc->c[0] = POSITION_TO_ELEMENT;
1733 pc->c[1] = 2;
1734 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
1735 pc->c[8] = partition;
1736 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1737 pc->idetape_callback = &idetape_pc_callback;
1738 }
1739
1740 static int idetape_create_prevent_cmd(ide_drive_t *drive,
1741 struct ide_atapi_pc *pc, int prevent)
1742 {
1743 idetape_tape_t *tape = drive->driver_data;
1744
1745 /* device supports locking according to capabilities page */
1746 if (!(tape->caps[6] & 0x01))
1747 return 0;
1748
1749 idetape_init_pc(pc);
1750 pc->c[0] = ALLOW_MEDIUM_REMOVAL;
1751 pc->c[4] = prevent;
1752 pc->idetape_callback = &idetape_pc_callback;
1753 return 1;
1754 }
1755
1756 static int __idetape_discard_read_pipeline(ide_drive_t *drive)
1757 {
1758 idetape_tape_t *tape = drive->driver_data;
1759 unsigned long flags;
1760 int cnt;
1761
1762 if (tape->chrdev_dir != IDETAPE_DIR_READ)
1763 return 0;
1764
1765 /* Remove merge stage. */
1766 cnt = tape->merge_stage_size / tape->blk_size;
1767 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
1768 ++cnt; /* Filemarks count as 1 sector */
1769 tape->merge_stage_size = 0;
1770 if (tape->merge_stage != NULL) {
1771 __idetape_kfree_stage(tape->merge_stage);
1772 tape->merge_stage = NULL;
1773 }
1774
1775 /* Clear pipeline flags. */
1776 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
1777 tape->chrdev_dir = IDETAPE_DIR_NONE;
1778
1779 /* Remove pipeline stages. */
1780 if (tape->first_stage == NULL)
1781 return 0;
1782
1783 spin_lock_irqsave(&tape->lock, flags);
1784 tape->next_stage = NULL;
1785 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
1786 idetape_wait_for_request(drive, tape->active_data_rq);
1787 spin_unlock_irqrestore(&tape->lock, flags);
1788
1789 while (tape->first_stage != NULL) {
1790 struct request *rq_ptr = &tape->first_stage->rq;
1791
1792 cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
1793 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
1794 ++cnt;
1795 }
1796 tape->nr_pending_stages = 0;
1797 tape->max_stages = tape->min_pipeline;
1798 return cnt;
1799 }
1800
1801 /*
1802 * Position the tape to the requested block using the LOCATE packet command.
1803 * A READ POSITION command is then issued to check where we are positioned. Like
1804 * all higher level operations, we queue the commands at the tail of the request
1805 * queue and wait for their completion.
1806 */
1807 static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
1808 u8 partition, int skip)
1809 {
1810 idetape_tape_t *tape = drive->driver_data;
1811 int retval;
1812 struct ide_atapi_pc pc;
1813
1814 if (tape->chrdev_dir == IDETAPE_DIR_READ)
1815 __idetape_discard_read_pipeline(drive);
1816 idetape_wait_ready(drive, 60 * 5 * HZ);
1817 idetape_create_locate_cmd(drive, &pc, block, partition, skip);
1818 retval = idetape_queue_pc_tail(drive, &pc);
1819 if (retval)
1820 return (retval);
1821
1822 idetape_create_read_position_cmd(&pc);
1823 return (idetape_queue_pc_tail(drive, &pc));
1824 }
1825
1826 static void idetape_discard_read_pipeline(ide_drive_t *drive,
1827 int restore_position)
1828 {
1829 idetape_tape_t *tape = drive->driver_data;
1830 int cnt;
1831 int seek, position;
1832
1833 cnt = __idetape_discard_read_pipeline(drive);
1834 if (restore_position) {
1835 position = idetape_read_position(drive);
1836 seek = position > cnt ? position - cnt : 0;
1837 if (idetape_position_tape(drive, seek, 0, 0)) {
1838 printk(KERN_INFO "ide-tape: %s: position_tape failed in"
1839 " discard_pipeline()\n", tape->name);
1840 return;
1841 }
1842 }
1843 }
1844
1845 /*
1846 * Generate a read/write request for the block device interface and wait for it
1847 * to be serviced.
1848 */
1849 static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
1850 struct idetape_bh *bh)
1851 {
1852 idetape_tape_t *tape = drive->driver_data;
1853 struct request rq;
1854
1855 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
1856
1857 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
1858 printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
1859 __func__);
1860 return (0);
1861 }
1862
1863 idetape_init_rq(&rq, cmd);
1864 rq.rq_disk = tape->disk;
1865 rq.special = (void *)bh;
1866 rq.sector = tape->first_frame;
1867 rq.nr_sectors = blocks;
1868 rq.current_nr_sectors = blocks;
1869 (void) ide_do_drive_cmd(drive, &rq, ide_wait);
1870
1871 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
1872 return 0;
1873
1874 if (tape->merge_stage)
1875 idetape_init_merge_stage(tape);
1876 if (rq.errors == IDETAPE_ERROR_GENERAL)
1877 return -EIO;
1878 return (tape->blk_size * (blocks-rq.current_nr_sectors));
1879 }
1880
1881 /* start servicing the pipeline stages, starting from tape->next_stage. */
1882 static void idetape_plug_pipeline(ide_drive_t *drive)
1883 {
1884 idetape_tape_t *tape = drive->driver_data;
1885
1886 if (tape->next_stage == NULL)
1887 return;
1888 if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
1889 idetape_activate_next_stage(drive);
1890 (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
1891 }
1892 }
1893
1894 static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
1895 {
1896 idetape_init_pc(pc);
1897 pc->c[0] = INQUIRY;
1898 pc->c[4] = 254;
1899 pc->req_xfer = 254;
1900 pc->idetape_callback = &idetape_pc_callback;
1901 }
1902
1903 static void idetape_create_rewind_cmd(ide_drive_t *drive,
1904 struct ide_atapi_pc *pc)
1905 {
1906 idetape_init_pc(pc);
1907 pc->c[0] = REZERO_UNIT;
1908 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1909 pc->idetape_callback = &idetape_pc_callback;
1910 }
1911
1912 static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
1913 {
1914 idetape_init_pc(pc);
1915 pc->c[0] = ERASE;
1916 pc->c[1] = 1;
1917 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1918 pc->idetape_callback = &idetape_pc_callback;
1919 }
1920
1921 static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
1922 {
1923 idetape_init_pc(pc);
1924 pc->c[0] = SPACE;
1925 put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
1926 pc->c[1] = cmd;
1927 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1928 pc->idetape_callback = &idetape_pc_callback;
1929 }
1930
1931 /* Queue up a character device originated write request. */
1932 static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
1933 {
1934 idetape_tape_t *tape = drive->driver_data;
1935 unsigned long flags;
1936
1937 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
1938
1939 /* Attempt to allocate a new stage. Beware possible race conditions. */
1940 while (1) {
1941 spin_lock_irqsave(&tape->lock, flags);
1942 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
1943 idetape_wait_for_request(drive, tape->active_data_rq);
1944 spin_unlock_irqrestore(&tape->lock, flags);
1945 } else {
1946 spin_unlock_irqrestore(&tape->lock, flags);
1947 idetape_plug_pipeline(drive);
1948 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
1949 &tape->flags))
1950 continue;
1951 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
1952 blocks, tape->merge_stage->bh);
1953 }
1954 }
1955 }
1956
1957 /*
1958 * Wait until all pending pipeline requests are serviced. Typically called on
1959 * device close.
1960 */
1961 static void idetape_wait_for_pipeline(ide_drive_t *drive)
1962 {
1963 idetape_tape_t *tape = drive->driver_data;
1964 unsigned long flags;
1965
1966 while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
1967 &tape->flags)) {
1968 idetape_plug_pipeline(drive);
1969 spin_lock_irqsave(&tape->lock, flags);
1970 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
1971 idetape_wait_for_request(drive, tape->active_data_rq);
1972 spin_unlock_irqrestore(&tape->lock, flags);
1973 }
1974 }
1975
1976 static void idetape_empty_write_pipeline(ide_drive_t *drive)
1977 {
1978 idetape_tape_t *tape = drive->driver_data;
1979 int blocks, min;
1980 struct idetape_bh *bh;
1981
1982 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
1983 printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
1984 " but we are not writing.\n");
1985 return;
1986 }
1987 if (tape->merge_stage_size > tape->stage_size) {
1988 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
1989 tape->merge_stage_size = tape->stage_size;
1990 }
1991 if (tape->merge_stage_size) {
1992 blocks = tape->merge_stage_size / tape->blk_size;
1993 if (tape->merge_stage_size % tape->blk_size) {
1994 unsigned int i;
1995
1996 blocks++;
1997 i = tape->blk_size - tape->merge_stage_size %
1998 tape->blk_size;
1999 bh = tape->bh->b_reqnext;
2000 while (bh) {
2001 atomic_set(&bh->b_count, 0);
2002 bh = bh->b_reqnext;
2003 }
2004 bh = tape->bh;
2005 while (i) {
2006 if (bh == NULL) {
2007 printk(KERN_INFO "ide-tape: bug,"
2008 " bh NULL\n");
2009 break;
2010 }
2011 min = min(i, (unsigned int)(bh->b_size -
2012 atomic_read(&bh->b_count)));
2013 memset(bh->b_data + atomic_read(&bh->b_count),
2014 0, min);
2015 atomic_add(min, &bh->b_count);
2016 i -= min;
2017 bh = bh->b_reqnext;
2018 }
2019 }
2020 (void) idetape_add_chrdev_write_request(drive, blocks);
2021 tape->merge_stage_size = 0;
2022 }
2023 idetape_wait_for_pipeline(drive);
2024 if (tape->merge_stage != NULL) {
2025 __idetape_kfree_stage(tape->merge_stage);
2026 tape->merge_stage = NULL;
2027 }
2028 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2029 tape->chrdev_dir = IDETAPE_DIR_NONE;
2030
2031 /*
2032 * On the next backup, perform the feedback loop again. (I don't want to
2033 * keep sense information between backups, as some systems are
2034 * constantly on, and the system load can be totally different on the
2035 * next backup).
2036 */
2037 tape->max_stages = tape->min_pipeline;
2038 if (tape->first_stage != NULL ||
2039 tape->next_stage != NULL ||
2040 tape->last_stage != NULL ||
2041 tape->nr_stages != 0) {
2042 printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
2043 "first_stage %p, next_stage %p, "
2044 "last_stage %p, nr_stages %d\n",
2045 tape->first_stage, tape->next_stage,
2046 tape->last_stage, tape->nr_stages);
2047 }
2048 }
2049
2050 static int idetape_init_read(ide_drive_t *drive, int max_stages)
2051 {
2052 idetape_tape_t *tape = drive->driver_data;
2053 int bytes_read;
2054
2055 /* Initialize read operation */
2056 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2057 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2058 idetape_empty_write_pipeline(drive);
2059 idetape_flush_tape_buffers(drive);
2060 }
2061 if (tape->merge_stage || tape->merge_stage_size) {
2062 printk(KERN_ERR "ide-tape: merge_stage_size should be"
2063 " 0 now\n");
2064 tape->merge_stage_size = 0;
2065 }
2066 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2067 if (!tape->merge_stage)
2068 return -ENOMEM;
2069 tape->chrdev_dir = IDETAPE_DIR_READ;
2070
2071 /*
2072 * Issue a read 0 command to ensure that DSC handshake is
2073 * switched from completion mode to buffer available mode.
2074 * No point in issuing this if DSC overlap isn't supported, some
2075 * drives (Seagate STT3401A) will return an error.
2076 */
2077 if (drive->dsc_overlap) {
2078 bytes_read = idetape_queue_rw_tail(drive,
2079 REQ_IDETAPE_READ, 0,
2080 tape->merge_stage->bh);
2081 if (bytes_read < 0) {
2082 __idetape_kfree_stage(tape->merge_stage);
2083 tape->merge_stage = NULL;
2084 tape->chrdev_dir = IDETAPE_DIR_NONE;
2085 return bytes_read;
2086 }
2087 }
2088 }
2089
2090 if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2091 if (tape->nr_pending_stages >= 3 * max_stages / 4) {
2092 tape->measure_insert_time = 1;
2093 tape->insert_time = jiffies;
2094 tape->insert_size = 0;
2095 tape->insert_speed = 0;
2096 idetape_plug_pipeline(drive);
2097 }
2098 }
2099 return 0;
2100 }
2101
2102 /*
2103 * Called from idetape_chrdev_read() to service a character device read request
2104 * and add read-ahead requests to our pipeline.
2105 */
2106 static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
2107 {
2108 idetape_tape_t *tape = drive->driver_data;
2109
2110 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
2111
2112 /* If we are at a filemark, return a read length of 0 */
2113 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2114 return 0;
2115
2116 idetape_init_read(drive, tape->max_stages);
2117
2118 if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2119 return 0;
2120
2121 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2122 tape->merge_stage->bh);
2123 }
2124
2125 static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2126 {
2127 idetape_tape_t *tape = drive->driver_data;
2128 struct idetape_bh *bh;
2129 int blocks;
2130
2131 while (bcount) {
2132 unsigned int count;
2133
2134 bh = tape->merge_stage->bh;
2135 count = min(tape->stage_size, bcount);
2136 bcount -= count;
2137 blocks = count / tape->blk_size;
2138 while (count) {
2139 atomic_set(&bh->b_count,
2140 min(count, (unsigned int)bh->b_size));
2141 memset(bh->b_data, 0, atomic_read(&bh->b_count));
2142 count -= atomic_read(&bh->b_count);
2143 bh = bh->b_reqnext;
2144 }
2145 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
2146 tape->merge_stage->bh);
2147 }
2148 }
2149
2150 /*
2151 * Rewinds the tape to the Beginning Of the current Partition (BOP). We
2152 * currently support only one partition.
2153 */
2154 static int idetape_rewind_tape(ide_drive_t *drive)
2155 {
2156 int retval;
2157 struct ide_atapi_pc pc;
2158 idetape_tape_t *tape;
2159 tape = drive->driver_data;
2160
2161 debug_log(DBG_SENSE, "Enter %s\n", __func__);
2162
2163 idetape_create_rewind_cmd(drive, &pc);
2164 retval = idetape_queue_pc_tail(drive, &pc);
2165 if (retval)
2166 return retval;
2167
2168 idetape_create_read_position_cmd(&pc);
2169 retval = idetape_queue_pc_tail(drive, &pc);
2170 if (retval)
2171 return retval;
2172 return 0;
2173 }
2174
2175 /* mtio.h compatible commands should be issued to the chrdev interface. */
2176 static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2177 unsigned long arg)
2178 {
2179 idetape_tape_t *tape = drive->driver_data;
2180 void __user *argp = (void __user *)arg;
2181
2182 struct idetape_config {
2183 int dsc_rw_frequency;
2184 int dsc_media_access_frequency;
2185 int nr_stages;
2186 } config;
2187
2188 debug_log(DBG_PROCS, "Enter %s\n", __func__);
2189
2190 switch (cmd) {
2191 case 0x0340:
2192 if (copy_from_user(&config, argp, sizeof(config)))
2193 return -EFAULT;
2194 tape->best_dsc_rw_freq = config.dsc_rw_frequency;
2195 tape->max_stages = config.nr_stages;
2196 break;
2197 case 0x0350:
2198 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
2199 config.nr_stages = tape->max_stages;
2200 if (copy_to_user(argp, &config, sizeof(config)))
2201 return -EFAULT;
2202 break;
2203 default:
2204 return -EIO;
2205 }
2206 return 0;
2207 }
2208
2209 static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2210 int mt_count)
2211 {
2212 idetape_tape_t *tape = drive->driver_data;
2213 struct ide_atapi_pc pc;
2214 int retval, count = 0;
2215 int sprev = !!(tape->caps[4] & 0x20);
2216
2217 if (mt_count == 0)
2218 return 0;
2219 if (MTBSF == mt_op || MTBSFM == mt_op) {
2220 if (!sprev)
2221 return -EIO;
2222 mt_count = -mt_count;
2223 }
2224
2225 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2226 tape->merge_stage_size = 0;
2227 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2228 ++count;
2229 idetape_discard_read_pipeline(drive, 0);
2230 }
2231
2232 /*
2233 * The filemark was not found in our internal pipeline; now we can issue
2234 * the space command.
2235 */
2236 switch (mt_op) {
2237 case MTFSF:
2238 case MTBSF:
2239 idetape_create_space_cmd(&pc, mt_count - count,
2240 IDETAPE_SPACE_OVER_FILEMARK);
2241 return idetape_queue_pc_tail(drive, &pc);
2242 case MTFSFM:
2243 case MTBSFM:
2244 if (!sprev)
2245 return -EIO;
2246 retval = idetape_space_over_filemarks(drive, MTFSF,
2247 mt_count - count);
2248 if (retval)
2249 return retval;
2250 count = (MTBSFM == mt_op ? 1 : -1);
2251 return idetape_space_over_filemarks(drive, MTFSF, count);
2252 default:
2253 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2254 mt_op);
2255 return -EIO;
2256 }
2257 }
2258
2259 /*
2260 * Our character device read / write functions.
2261 *
2262 * The tape is optimized to maximize throughput when it is transferring an
2263 * integral number of the "continuous transfer limit", which is a parameter of
2264 * the specific tape (26kB on my particular tape, 32kB for Onstream).
2265 *
2266 * As of version 1.3 of the driver, the character device provides an abstract
2267 * continuous view of the media - any mix of block sizes (even 1 byte) on the
2268 * same backup/restore procedure is supported. The driver will internally
2269 * convert the requests to the recommended transfer unit, so that an unmatch
2270 * between the user's block size to the recommended size will only result in a
2271 * (slightly) increased driver overhead, but will no longer hit performance.
2272 * This is not applicable to Onstream.
2273 */
2274 static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2275 size_t count, loff_t *ppos)
2276 {
2277 struct ide_tape_obj *tape = ide_tape_f(file);
2278 ide_drive_t *drive = tape->drive;
2279 ssize_t bytes_read, temp, actually_read = 0, rc;
2280 ssize_t ret = 0;
2281 u16 ctl = *(u16 *)&tape->caps[12];
2282
2283 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2284
2285 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2286 if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
2287 if (count > tape->blk_size &&
2288 (count % tape->blk_size) == 0)
2289 tape->user_bs_factor = count / tape->blk_size;
2290 }
2291 rc = idetape_init_read(drive, tape->max_stages);
2292 if (rc < 0)
2293 return rc;
2294 if (count == 0)
2295 return (0);
2296 if (tape->merge_stage_size) {
2297 actually_read = min((unsigned int)(tape->merge_stage_size),
2298 (unsigned int)count);
2299 if (idetape_copy_stage_to_user(tape, buf, actually_read))
2300 ret = -EFAULT;
2301 buf += actually_read;
2302 tape->merge_stage_size -= actually_read;
2303 count -= actually_read;
2304 }
2305 while (count >= tape->stage_size) {
2306 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2307 if (bytes_read <= 0)
2308 goto finish;
2309 if (idetape_copy_stage_to_user(tape, buf, bytes_read))
2310 ret = -EFAULT;
2311 buf += bytes_read;
2312 count -= bytes_read;
2313 actually_read += bytes_read;
2314 }
2315 if (count) {
2316 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2317 if (bytes_read <= 0)
2318 goto finish;
2319 temp = min((unsigned long)count, (unsigned long)bytes_read);
2320 if (idetape_copy_stage_to_user(tape, buf, temp))
2321 ret = -EFAULT;
2322 actually_read += temp;
2323 tape->merge_stage_size = bytes_read-temp;
2324 }
2325 finish:
2326 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
2327 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
2328
2329 idetape_space_over_filemarks(drive, MTFSF, 1);
2330 return 0;
2331 }
2332
2333 return ret ? ret : actually_read;
2334 }
2335
2336 static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2337 size_t count, loff_t *ppos)
2338 {
2339 struct ide_tape_obj *tape = ide_tape_f(file);
2340 ide_drive_t *drive = tape->drive;
2341 ssize_t actually_written = 0;
2342 ssize_t ret = 0;
2343 u16 ctl = *(u16 *)&tape->caps[12];
2344
2345 /* The drive is write protected. */
2346 if (tape->write_prot)
2347 return -EACCES;
2348
2349 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2350
2351 /* Initialize write operation */
2352 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2353 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2354 idetape_discard_read_pipeline(drive, 1);
2355 if (tape->merge_stage || tape->merge_stage_size) {
2356 printk(KERN_ERR "ide-tape: merge_stage_size "
2357 "should be 0 now\n");
2358 tape->merge_stage_size = 0;
2359 }
2360 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2361 if (!tape->merge_stage)
2362 return -ENOMEM;
2363 tape->chrdev_dir = IDETAPE_DIR_WRITE;
2364 idetape_init_merge_stage(tape);
2365
2366 /*
2367 * Issue a write 0 command to ensure that DSC handshake is
2368 * switched from completion mode to buffer available mode. No
2369 * point in issuing this if DSC overlap isn't supported, some
2370 * drives (Seagate STT3401A) will return an error.
2371 */
2372 if (drive->dsc_overlap) {
2373 ssize_t retval = idetape_queue_rw_tail(drive,
2374 REQ_IDETAPE_WRITE, 0,
2375 tape->merge_stage->bh);
2376 if (retval < 0) {
2377 __idetape_kfree_stage(tape->merge_stage);
2378 tape->merge_stage = NULL;
2379 tape->chrdev_dir = IDETAPE_DIR_NONE;
2380 return retval;
2381 }
2382 }
2383 }
2384 if (count == 0)
2385 return (0);
2386 if (tape->merge_stage_size) {
2387 if (tape->merge_stage_size >= tape->stage_size) {
2388 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
2389 tape->merge_stage_size = 0;
2390 }
2391 actually_written = min((unsigned int)
2392 (tape->stage_size - tape->merge_stage_size),
2393 (unsigned int)count);
2394 if (idetape_copy_stage_from_user(tape, buf, actually_written))
2395 ret = -EFAULT;
2396 buf += actually_written;
2397 tape->merge_stage_size += actually_written;
2398 count -= actually_written;
2399
2400 if (tape->merge_stage_size == tape->stage_size) {
2401 ssize_t retval;
2402 tape->merge_stage_size = 0;
2403 retval = idetape_add_chrdev_write_request(drive, ctl);
2404 if (retval <= 0)
2405 return (retval);
2406 }
2407 }
2408 while (count >= tape->stage_size) {
2409 ssize_t retval;
2410 if (idetape_copy_stage_from_user(tape, buf, tape->stage_size))
2411 ret = -EFAULT;
2412 buf += tape->stage_size;
2413 count -= tape->stage_size;
2414 retval = idetape_add_chrdev_write_request(drive, ctl);
2415 actually_written += tape->stage_size;
2416 if (retval <= 0)
2417 return (retval);
2418 }
2419 if (count) {
2420 actually_written += count;
2421 if (idetape_copy_stage_from_user(tape, buf, count))
2422 ret = -EFAULT;
2423 tape->merge_stage_size += count;
2424 }
2425 return ret ? ret : actually_written;
2426 }
2427
2428 static int idetape_write_filemark(ide_drive_t *drive)
2429 {
2430 struct ide_atapi_pc pc;
2431
2432 /* Write a filemark */
2433 idetape_create_write_filemark_cmd(drive, &pc, 1);
2434 if (idetape_queue_pc_tail(drive, &pc)) {
2435 printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
2436 return -EIO;
2437 }
2438 return 0;
2439 }
2440
2441 /*
2442 * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
2443 * requested.
2444 *
2445 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
2446 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
2447 * usually not supported (it is supported in the rare case in which we crossed
2448 * the filemark during our read-ahead pipelined operation mode).
2449 *
2450 * The following commands are currently not supported:
2451 *
2452 * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
2453 * MT_ST_WRITE_THRESHOLD.
2454 */
2455 static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2456 {
2457 idetape_tape_t *tape = drive->driver_data;
2458 struct ide_atapi_pc pc;
2459 int i, retval;
2460
2461 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
2462 mt_op, mt_count);
2463
2464 /* Commands which need our pipelined read-ahead stages. */
2465 switch (mt_op) {
2466 case MTFSF:
2467 case MTFSFM:
2468 case MTBSF:
2469 case MTBSFM:
2470 if (!mt_count)
2471 return 0;
2472 return idetape_space_over_filemarks(drive, mt_op, mt_count);
2473 default:
2474 break;
2475 }
2476
2477 switch (mt_op) {
2478 case MTWEOF:
2479 if (tape->write_prot)
2480 return -EACCES;
2481 idetape_discard_read_pipeline(drive, 1);
2482 for (i = 0; i < mt_count; i++) {
2483 retval = idetape_write_filemark(drive);
2484 if (retval)
2485 return retval;
2486 }
2487 return 0;
2488 case MTREW:
2489 idetape_discard_read_pipeline(drive, 0);
2490 if (idetape_rewind_tape(drive))
2491 return -EIO;
2492 return 0;
2493 case MTLOAD:
2494 idetape_discard_read_pipeline(drive, 0);
2495 idetape_create_load_unload_cmd(drive, &pc,
2496 IDETAPE_LU_LOAD_MASK);
2497 return idetape_queue_pc_tail(drive, &pc);
2498 case MTUNLOAD:
2499 case MTOFFL:
2500 /*
2501 * If door is locked, attempt to unlock before
2502 * attempting to eject.
2503 */
2504 if (tape->door_locked) {
2505 if (idetape_create_prevent_cmd(drive, &pc, 0))
2506 if (!idetape_queue_pc_tail(drive, &pc))
2507 tape->door_locked = DOOR_UNLOCKED;
2508 }
2509 idetape_discard_read_pipeline(drive, 0);
2510 idetape_create_load_unload_cmd(drive, &pc,
2511 !IDETAPE_LU_LOAD_MASK);
2512 retval = idetape_queue_pc_tail(drive, &pc);
2513 if (!retval)
2514 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
2515 return retval;
2516 case MTNOP:
2517 idetape_discard_read_pipeline(drive, 0);
2518 return idetape_flush_tape_buffers(drive);
2519 case MTRETEN:
2520 idetape_discard_read_pipeline(drive, 0);
2521 idetape_create_load_unload_cmd(drive, &pc,
2522 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
2523 return idetape_queue_pc_tail(drive, &pc);
2524 case MTEOM:
2525 idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
2526 return idetape_queue_pc_tail(drive, &pc);
2527 case MTERASE:
2528 (void)idetape_rewind_tape(drive);
2529 idetape_create_erase_cmd(&pc);
2530 return idetape_queue_pc_tail(drive, &pc);
2531 case MTSETBLK:
2532 if (mt_count) {
2533 if (mt_count < tape->blk_size ||
2534 mt_count % tape->blk_size)
2535 return -EIO;
2536 tape->user_bs_factor = mt_count / tape->blk_size;
2537 clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
2538 } else
2539 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
2540 return 0;
2541 case MTSEEK:
2542 idetape_discard_read_pipeline(drive, 0);
2543 return idetape_position_tape(drive,
2544 mt_count * tape->user_bs_factor, tape->partition, 0);
2545 case MTSETPART:
2546 idetape_discard_read_pipeline(drive, 0);
2547 return idetape_position_tape(drive, 0, mt_count, 0);
2548 case MTFSR:
2549 case MTBSR:
2550 case MTLOCK:
2551 if (!idetape_create_prevent_cmd(drive, &pc, 1))
2552 return 0;
2553 retval = idetape_queue_pc_tail(drive, &pc);
2554 if (retval)
2555 return retval;
2556 tape->door_locked = DOOR_EXPLICITLY_LOCKED;
2557 return 0;
2558 case MTUNLOCK:
2559 if (!idetape_create_prevent_cmd(drive, &pc, 0))
2560 return 0;
2561 retval = idetape_queue_pc_tail(drive, &pc);
2562 if (retval)
2563 return retval;
2564 tape->door_locked = DOOR_UNLOCKED;
2565 return 0;
2566 default:
2567 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2568 mt_op);
2569 return -EIO;
2570 }
2571 }
2572
2573 /*
2574 * Our character device ioctls. General mtio.h magnetic io commands are
2575 * supported here, and not in the corresponding block interface. Our own
2576 * ide-tape ioctls are supported on both interfaces.
2577 */
2578 static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
2579 unsigned int cmd, unsigned long arg)
2580 {
2581 struct ide_tape_obj *tape = ide_tape_f(file);
2582 ide_drive_t *drive = tape->drive;
2583 struct mtop mtop;
2584 struct mtget mtget;
2585 struct mtpos mtpos;
2586 int block_offset = 0, position = tape->first_frame;
2587 void __user *argp = (void __user *)arg;
2588
2589 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
2590
2591 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2592 idetape_empty_write_pipeline(drive);
2593 idetape_flush_tape_buffers(drive);
2594 }
2595 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
2596 idetape_wait_for_pipeline(drive);
2597 block_offset = tape->merge_stage_size /
2598 (tape->blk_size * tape->user_bs_factor);
2599 position = idetape_read_position(drive);
2600 if (position < 0)
2601 return -EIO;
2602 }
2603 switch (cmd) {
2604 case MTIOCTOP:
2605 if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
2606 return -EFAULT;
2607 return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
2608 case MTIOCGET:
2609 memset(&mtget, 0, sizeof(struct mtget));
2610 mtget.mt_type = MT_ISSCSI2;
2611 mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
2612 mtget.mt_dsreg =
2613 ((tape->blk_size * tape->user_bs_factor)
2614 << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
2615
2616 if (tape->drv_write_prot)
2617 mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
2618
2619 if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
2620 return -EFAULT;
2621 return 0;
2622 case MTIOCPOS:
2623 mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
2624 if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
2625 return -EFAULT;
2626 return 0;
2627 default:
2628 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2629 idetape_discard_read_pipeline(drive, 1);
2630 return idetape_blkdev_ioctl(drive, cmd, arg);
2631 }
2632 }
2633
2634 /*
2635 * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
2636 * block size with the reported value.
2637 */
2638 static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
2639 {
2640 idetape_tape_t *tape = drive->driver_data;
2641 struct ide_atapi_pc pc;
2642
2643 idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
2644 if (idetape_queue_pc_tail(drive, &pc)) {
2645 printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
2646 if (tape->blk_size == 0) {
2647 printk(KERN_WARNING "ide-tape: Cannot deal with zero "
2648 "block size, assuming 32k\n");
2649 tape->blk_size = 32768;
2650 }
2651 return;
2652 }
2653 tape->blk_size = (pc.buf[4 + 5] << 16) +
2654 (pc.buf[4 + 6] << 8) +
2655 pc.buf[4 + 7];
2656 tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
2657 }
2658
2659 static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2660 {
2661 unsigned int minor = iminor(inode), i = minor & ~0xc0;
2662 ide_drive_t *drive;
2663 idetape_tape_t *tape;
2664 struct ide_atapi_pc pc;
2665 int retval;
2666
2667 if (i >= MAX_HWIFS * MAX_DRIVES)
2668 return -ENXIO;
2669
2670 tape = ide_tape_chrdev_get(i);
2671 if (!tape)
2672 return -ENXIO;
2673
2674 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2675
2676 /*
2677 * We really want to do nonseekable_open(inode, filp); here, but some
2678 * versions of tar incorrectly call lseek on tapes and bail out if that
2679 * fails. So we disallow pread() and pwrite(), but permit lseeks.
2680 */
2681 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
2682
2683 drive = tape->drive;
2684
2685 filp->private_data = tape;
2686
2687 if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
2688 retval = -EBUSY;
2689 goto out_put_tape;
2690 }
2691
2692 retval = idetape_wait_ready(drive, 60 * HZ);
2693 if (retval) {
2694 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2695 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
2696 goto out_put_tape;
2697 }
2698
2699 idetape_read_position(drive);
2700 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
2701 (void)idetape_rewind_tape(drive);
2702
2703 if (tape->chrdev_dir != IDETAPE_DIR_READ)
2704 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2705
2706 /* Read block size and write protect status from drive. */
2707 ide_tape_get_bsize_from_bdesc(drive);
2708
2709 /* Set write protect flag if device is opened as read-only. */
2710 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
2711 tape->write_prot = 1;
2712 else
2713 tape->write_prot = tape->drv_write_prot;
2714
2715 /* Make sure drive isn't write protected if user wants to write. */
2716 if (tape->write_prot) {
2717 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
2718 (filp->f_flags & O_ACCMODE) == O_RDWR) {
2719 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2720 retval = -EROFS;
2721 goto out_put_tape;
2722 }
2723 }
2724
2725 /* Lock the tape drive door so user can't eject. */
2726 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2727 if (idetape_create_prevent_cmd(drive, &pc, 1)) {
2728 if (!idetape_queue_pc_tail(drive, &pc)) {
2729 if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
2730 tape->door_locked = DOOR_LOCKED;
2731 }
2732 }
2733 }
2734 return 0;
2735
2736 out_put_tape:
2737 ide_tape_put(tape);
2738 return retval;
2739 }
2740
2741 static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
2742 {
2743 idetape_tape_t *tape = drive->driver_data;
2744
2745 idetape_empty_write_pipeline(drive);
2746 tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
2747 if (tape->merge_stage != NULL) {
2748 idetape_pad_zeros(drive, tape->blk_size *
2749 (tape->user_bs_factor - 1));
2750 __idetape_kfree_stage(tape->merge_stage);
2751 tape->merge_stage = NULL;
2752 }
2753 idetape_write_filemark(drive);
2754 idetape_flush_tape_buffers(drive);
2755 idetape_flush_tape_buffers(drive);
2756 }
2757
2758 static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2759 {
2760 struct ide_tape_obj *tape = ide_tape_f(filp);
2761 ide_drive_t *drive = tape->drive;
2762 struct ide_atapi_pc pc;
2763 unsigned int minor = iminor(inode);
2764
2765 lock_kernel();
2766 tape = drive->driver_data;
2767
2768 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2769
2770 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
2771 idetape_write_release(drive, minor);
2772 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2773 if (minor < 128)
2774 idetape_discard_read_pipeline(drive, 1);
2775 else
2776 idetape_wait_for_pipeline(drive);
2777 }
2778
2779 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
2780 (void) idetape_rewind_tape(drive);
2781 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2782 if (tape->door_locked == DOOR_LOCKED) {
2783 if (idetape_create_prevent_cmd(drive, &pc, 0)) {
2784 if (!idetape_queue_pc_tail(drive, &pc))
2785 tape->door_locked = DOOR_UNLOCKED;
2786 }
2787 }
2788 }
2789 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2790 ide_tape_put(tape);
2791 unlock_kernel();
2792 return 0;
2793 }
2794
2795 /*
2796 * check the contents of the ATAPI IDENTIFY command results. We return:
2797 *
2798 * 1 - If the tape can be supported by us, based on the information we have so
2799 * far.
2800 *
2801 * 0 - If this tape driver is not currently supported by us.
2802 */
2803 static int idetape_identify_device(ide_drive_t *drive)
2804 {
2805 u8 gcw[2], protocol, device_type, removable, packet_size;
2806
2807 if (drive->id_read == 0)
2808 return 1;
2809
2810 *((unsigned short *) &gcw) = drive->id->config;
2811
2812 protocol = (gcw[1] & 0xC0) >> 6;
2813 device_type = gcw[1] & 0x1F;
2814 removable = !!(gcw[0] & 0x80);
2815 packet_size = gcw[0] & 0x3;
2816
2817 /* Check that we can support this device */
2818 if (protocol != 2)
2819 printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
2820 protocol);
2821 else if (device_type != 1)
2822 printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
2823 "to tape\n", device_type);
2824 else if (!removable)
2825 printk(KERN_ERR "ide-tape: The removable flag is not set\n");
2826 else if (packet_size != 0) {
2827 printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
2828 " bytes\n", packet_size);
2829 } else
2830 return 1;
2831 return 0;
2832 }
2833
2834 static void idetape_get_inquiry_results(ide_drive_t *drive)
2835 {
2836 idetape_tape_t *tape = drive->driver_data;
2837 struct ide_atapi_pc pc;
2838 char fw_rev[6], vendor_id[10], product_id[18];
2839
2840 idetape_create_inquiry_cmd(&pc);
2841 if (idetape_queue_pc_tail(drive, &pc)) {
2842 printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
2843 tape->name);
2844 return;
2845 }
2846 memcpy(vendor_id, &pc.buf[8], 8);
2847 memcpy(product_id, &pc.buf[16], 16);
2848 memcpy(fw_rev, &pc.buf[32], 4);
2849
2850 ide_fixstring(vendor_id, 10, 0);
2851 ide_fixstring(product_id, 18, 0);
2852 ide_fixstring(fw_rev, 6, 0);
2853
2854 printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
2855 drive->name, tape->name, vendor_id, product_id, fw_rev);
2856 }
2857
2858 /*
2859 * Ask the tape about its various parameters. In particular, we will adjust our
2860 * data transfer buffer size to the recommended value as returned by the tape.
2861 */
2862 static void idetape_get_mode_sense_results(ide_drive_t *drive)
2863 {
2864 idetape_tape_t *tape = drive->driver_data;
2865 struct ide_atapi_pc pc;
2866 u8 *caps;
2867 u8 speed, max_speed;
2868
2869 idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
2870 if (idetape_queue_pc_tail(drive, &pc)) {
2871 printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
2872 " some default values\n");
2873 tape->blk_size = 512;
2874 put_unaligned(52, (u16 *)&tape->caps[12]);
2875 put_unaligned(540, (u16 *)&tape->caps[14]);
2876 put_unaligned(6*52, (u16 *)&tape->caps[16]);
2877 return;
2878 }
2879 caps = pc.buf + 4 + pc.buf[3];
2880
2881 /* convert to host order and save for later use */
2882 speed = be16_to_cpu(*(u16 *)&caps[14]);
2883 max_speed = be16_to_cpu(*(u16 *)&caps[8]);
2884
2885 put_unaligned(max_speed, (u16 *)&caps[8]);
2886 put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
2887 put_unaligned(speed, (u16 *)&caps[14]);
2888 put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
2889
2890 if (!speed) {
2891 printk(KERN_INFO "ide-tape: %s: invalid tape speed "
2892 "(assuming 650KB/sec)\n", drive->name);
2893 put_unaligned(650, (u16 *)&caps[14]);
2894 }
2895 if (!max_speed) {
2896 printk(KERN_INFO "ide-tape: %s: invalid max_speed "
2897 "(assuming 650KB/sec)\n", drive->name);
2898 put_unaligned(650, (u16 *)&caps[8]);
2899 }
2900
2901 memcpy(&tape->caps, caps, 20);
2902 if (caps[7] & 0x02)
2903 tape->blk_size = 512;
2904 else if (caps[7] & 0x04)
2905 tape->blk_size = 1024;
2906 }
2907
2908 #ifdef CONFIG_IDE_PROC_FS
2909 static void idetape_add_settings(ide_drive_t *drive)
2910 {
2911 idetape_tape_t *tape = drive->driver_data;
2912
2913 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
2914 1, 2, (u16 *)&tape->caps[16], NULL);
2915 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
2916 tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
2917 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
2918 tape->stage_size / 1024, 1, &tape->max_stages, NULL);
2919 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
2920 tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
2921 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
2922 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
2923 NULL);
2924 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
2925 0xffff, tape->stage_size / 1024, 1,
2926 &tape->nr_pending_stages, NULL);
2927 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
2928 1, 1, (u16 *)&tape->caps[14], NULL);
2929 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
2930 1024, &tape->stage_size, NULL);
2931 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
2932 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
2933 NULL);
2934 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
2935 1, &drive->dsc_overlap, NULL);
2936 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
2937 1, 1, &tape->avg_speed, NULL);
2938 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
2939 1, &tape->debug_mask, NULL);
2940 }
2941 #else
2942 static inline void idetape_add_settings(ide_drive_t *drive) { ; }
2943 #endif
2944
2945 /*
2946 * The function below is called to:
2947 *
2948 * 1. Initialize our various state variables.
2949 * 2. Ask the tape for its capabilities.
2950 * 3. Allocate a buffer which will be used for data transfer. The buffer size
2951 * is chosen based on the recommendation which we received in step 2.
2952 *
2953 * Note that at this point ide.c already assigned us an irq, so that we can
2954 * queue requests here and wait for their completion.
2955 */
2956 static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2957 {
2958 unsigned long t1, tmid, tn, t;
2959 int speed;
2960 int stage_size;
2961 u8 gcw[2];
2962 struct sysinfo si;
2963 u16 *ctl = (u16 *)&tape->caps[12];
2964
2965 spin_lock_init(&tape->lock);
2966 drive->dsc_overlap = 1;
2967 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
2968 printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
2969 tape->name);
2970 drive->dsc_overlap = 0;
2971 }
2972 /* Seagate Travan drives do not support DSC overlap. */
2973 if (strstr(drive->id->model, "Seagate STT3401"))
2974 drive->dsc_overlap = 0;
2975 tape->minor = minor;
2976 tape->name[0] = 'h';
2977 tape->name[1] = 't';
2978 tape->name[2] = '0' + minor;
2979 tape->chrdev_dir = IDETAPE_DIR_NONE;
2980 tape->pc = tape->pc_stack;
2981 *((unsigned short *) &gcw) = drive->id->config;
2982
2983 /* Command packet DRQ type */
2984 if (((gcw[0] & 0x60) >> 5) == 1)
2985 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
2986
2987 tape->min_pipeline = 10;
2988 tape->max_pipeline = 10;
2989 tape->max_stages = 10;
2990
2991 idetape_get_inquiry_results(drive);
2992 idetape_get_mode_sense_results(drive);
2993 ide_tape_get_bsize_from_bdesc(drive);
2994 tape->user_bs_factor = 1;
2995 tape->stage_size = *ctl * tape->blk_size;
2996 while (tape->stage_size > 0xffff) {
2997 printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
2998 *ctl /= 2;
2999 tape->stage_size = *ctl * tape->blk_size;
3000 }
3001 stage_size = tape->stage_size;
3002 tape->pages_per_stage = stage_size / PAGE_SIZE;
3003 if (stage_size % PAGE_SIZE) {
3004 tape->pages_per_stage++;
3005 tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
3006 }
3007
3008 /* Select the "best" DSC read/write polling freq and pipeline size. */
3009 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
3010
3011 tape->max_stages = speed * 1000 * 10 / tape->stage_size;
3012
3013 /* Limit memory use for pipeline to 10% of physical memory */
3014 si_meminfo(&si);
3015 if (tape->max_stages * tape->stage_size >
3016 si.totalram * si.mem_unit / 10)
3017 tape->max_stages =
3018 si.totalram * si.mem_unit / (10 * tape->stage_size);
3019
3020 tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
3021 tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
3022 tape->max_pipeline =
3023 min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
3024 if (tape->max_stages == 0) {
3025 tape->max_stages = 1;
3026 tape->min_pipeline = 1;
3027 tape->max_pipeline = 1;
3028 }
3029
3030 t1 = (tape->stage_size * HZ) / (speed * 1000);
3031 tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
3032 tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
3033
3034 if (tape->max_stages)
3035 t = tn;
3036 else
3037 t = t1;
3038
3039 /*
3040 * Ensure that the number we got makes sense; limit it within
3041 * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
3042 */
3043 tape->best_dsc_rw_freq = max_t(unsigned long,
3044 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
3045 IDETAPE_DSC_RW_MIN);
3046 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
3047 "%dkB pipeline, %lums tDSC%s\n",
3048 drive->name, tape->name, *(u16 *)&tape->caps[14],
3049 (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
3050 tape->stage_size / 1024,
3051 tape->max_stages * tape->stage_size / 1024,
3052 tape->best_dsc_rw_freq * 1000 / HZ,
3053 drive->using_dma ? ", DMA":"");
3054
3055 idetape_add_settings(drive);
3056 }
3057
3058 static void ide_tape_remove(ide_drive_t *drive)
3059 {
3060 idetape_tape_t *tape = drive->driver_data;
3061
3062 ide_proc_unregister_driver(drive, tape->driver);
3063
3064 ide_unregister_region(tape->disk);
3065
3066 ide_tape_put(tape);
3067 }
3068
3069 static void ide_tape_release(struct kref *kref)
3070 {
3071 struct ide_tape_obj *tape = to_ide_tape(kref);
3072 ide_drive_t *drive = tape->drive;
3073 struct gendisk *g = tape->disk;
3074
3075 BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
3076
3077 drive->dsc_overlap = 0;
3078 drive->driver_data = NULL;
3079 device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
3080 device_destroy(idetape_sysfs_class,
3081 MKDEV(IDETAPE_MAJOR, tape->minor + 128));
3082 idetape_devs[tape->minor] = NULL;
3083 g->private_data = NULL;
3084 put_disk(g);
3085 kfree(tape);
3086 }
3087
3088 #ifdef CONFIG_IDE_PROC_FS
3089 static int proc_idetape_read_name
3090 (char *page, char **start, off_t off, int count, int *eof, void *data)
3091 {
3092 ide_drive_t *drive = (ide_drive_t *) data;
3093 idetape_tape_t *tape = drive->driver_data;
3094 char *out = page;
3095 int len;
3096
3097 len = sprintf(out, "%s\n", tape->name);
3098 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
3099 }
3100
3101 static ide_proc_entry_t idetape_proc[] = {
3102 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
3103 { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
3104 { NULL, 0, NULL, NULL }
3105 };
3106 #endif
3107
3108 static int ide_tape_probe(ide_drive_t *);
3109
3110 static ide_driver_t idetape_driver = {
3111 .gen_driver = {
3112 .owner = THIS_MODULE,
3113 .name = "ide-tape",
3114 .bus = &ide_bus_type,
3115 },
3116 .probe = ide_tape_probe,
3117 .remove = ide_tape_remove,
3118 .version = IDETAPE_VERSION,
3119 .media = ide_tape,
3120 .supports_dsc_overlap = 1,
3121 .do_request = idetape_do_request,
3122 .end_request = idetape_end_request,
3123 .error = __ide_error,
3124 .abort = __ide_abort,
3125 #ifdef CONFIG_IDE_PROC_FS
3126 .proc = idetape_proc,
3127 #endif
3128 };
3129
3130 /* Our character device supporting functions, passed to register_chrdev. */
3131 static const struct file_operations idetape_fops = {
3132 .owner = THIS_MODULE,
3133 .read = idetape_chrdev_read,
3134 .write = idetape_chrdev_write,
3135 .ioctl = idetape_chrdev_ioctl,
3136 .open = idetape_chrdev_open,
3137 .release = idetape_chrdev_release,
3138 };
3139
3140 static int idetape_open(struct inode *inode, struct file *filp)
3141 {
3142 struct gendisk *disk = inode->i_bdev->bd_disk;
3143 struct ide_tape_obj *tape;
3144
3145 tape = ide_tape_get(disk);
3146 if (!tape)
3147 return -ENXIO;
3148
3149 return 0;
3150 }
3151
3152 static int idetape_release(struct inode *inode, struct file *filp)
3153 {
3154 struct gendisk *disk = inode->i_bdev->bd_disk;
3155 struct ide_tape_obj *tape = ide_tape_g(disk);
3156
3157 ide_tape_put(tape);
3158
3159 return 0;
3160 }
3161
3162 static int idetape_ioctl(struct inode *inode, struct file *file,
3163 unsigned int cmd, unsigned long arg)
3164 {
3165 struct block_device *bdev = inode->i_bdev;
3166 struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
3167 ide_drive_t *drive = tape->drive;
3168 int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
3169 if (err == -EINVAL)
3170 err = idetape_blkdev_ioctl(drive, cmd, arg);
3171 return err;
3172 }
3173
3174 static struct block_device_operations idetape_block_ops = {
3175 .owner = THIS_MODULE,
3176 .open = idetape_open,
3177 .release = idetape_release,
3178 .ioctl = idetape_ioctl,
3179 };
3180
3181 static int ide_tape_probe(ide_drive_t *drive)
3182 {
3183 idetape_tape_t *tape;
3184 struct gendisk *g;
3185 int minor;
3186
3187 if (!strstr("ide-tape", drive->driver_req))
3188 goto failed;
3189 if (!drive->present)
3190 goto failed;
3191 if (drive->media != ide_tape)
3192 goto failed;
3193 if (!idetape_identify_device(drive)) {
3194 printk(KERN_ERR "ide-tape: %s: not supported by this version of"
3195 " the driver\n", drive->name);
3196 goto failed;
3197 }
3198 if (drive->scsi) {
3199 printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
3200 " emulation.\n", drive->name);
3201 goto failed;
3202 }
3203 tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
3204 if (tape == NULL) {
3205 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
3206 drive->name);
3207 goto failed;
3208 }
3209
3210 g = alloc_disk(1 << PARTN_BITS);
3211 if (!g)
3212 goto out_free_tape;
3213
3214 ide_init_disk(g, drive);
3215
3216 ide_proc_register_driver(drive, &idetape_driver);
3217
3218 kref_init(&tape->kref);
3219
3220 tape->drive = drive;
3221 tape->driver = &idetape_driver;
3222 tape->disk = g;
3223
3224 g->private_data = &tape->driver;
3225
3226 drive->driver_data = tape;
3227
3228 mutex_lock(&idetape_ref_mutex);
3229 for (minor = 0; idetape_devs[minor]; minor++)
3230 ;
3231 idetape_devs[minor] = tape;
3232 mutex_unlock(&idetape_ref_mutex);
3233
3234 idetape_setup(drive, tape, minor);
3235
3236 device_create(idetape_sysfs_class, &drive->gendev,
3237 MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
3238 device_create(idetape_sysfs_class, &drive->gendev,
3239 MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
3240
3241 g->fops = &idetape_block_ops;
3242 ide_register_region(g);
3243
3244 return 0;
3245
3246 out_free_tape:
3247 kfree(tape);
3248 failed:
3249 return -ENODEV;
3250 }
3251
3252 static void __exit idetape_exit(void)
3253 {
3254 driver_unregister(&idetape_driver.gen_driver);
3255 class_destroy(idetape_sysfs_class);
3256 unregister_chrdev(IDETAPE_MAJOR, "ht");
3257 }
3258
3259 static int __init idetape_init(void)
3260 {
3261 int error = 1;
3262 idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
3263 if (IS_ERR(idetape_sysfs_class)) {
3264 idetape_sysfs_class = NULL;
3265 printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
3266 error = -EBUSY;
3267 goto out;
3268 }
3269
3270 if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
3271 printk(KERN_ERR "ide-tape: Failed to register chrdev"
3272 " interface\n");
3273 error = -EBUSY;
3274 goto out_free_class;
3275 }
3276
3277 error = driver_register(&idetape_driver.gen_driver);
3278 if (error)
3279 goto out_free_driver;
3280
3281 return 0;
3282
3283 out_free_driver:
3284 driver_unregister(&idetape_driver.gen_driver);
3285 out_free_class:
3286 class_destroy(idetape_sysfs_class);
3287 out:
3288 return error;
3289 }
3290
3291 MODULE_ALIAS("ide:*m-tape*");
3292 module_init(idetape_init);
3293 module_exit(idetape_exit);
3294 MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
3295 MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
3296 MODULE_LICENSE("GPL");