]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/ide/ide-tape.c
ide-tape: remove idetape_pipeline_size()
[mirror_ubuntu-kernels.git] / drivers / ide / ide-tape.c
1 /*
2 * IDE ATAPI streaming tape driver.
3 *
4 * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
5 * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
6 *
7 * This driver was constructed as a student project in the software laboratory
8 * of the faculty of electrical engineering in the Technion - Israel's
9 * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
10 *
11 * It is hereby placed under the terms of the GNU general public license.
12 * (See linux/COPYING).
13 *
14 * For a historical changelog see
15 * Documentation/ide/ChangeLog.ide-tape.1995-2002
16 */
17
18 #define IDETAPE_VERSION "1.20"
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/interrupt.h>
28 #include <linux/jiffies.h>
29 #include <linux/major.h>
30 #include <linux/errno.h>
31 #include <linux/genhd.h>
32 #include <linux/slab.h>
33 #include <linux/pci.h>
34 #include <linux/ide.h>
35 #include <linux/smp_lock.h>
36 #include <linux/completion.h>
37 #include <linux/bitops.h>
38 #include <linux/mutex.h>
39 #include <scsi/scsi.h>
40
41 #include <asm/byteorder.h>
42 #include <linux/irq.h>
43 #include <linux/uaccess.h>
44 #include <linux/io.h>
45 #include <asm/unaligned.h>
46 #include <linux/mtio.h>
47
48 enum {
49 /* output errors only */
50 DBG_ERR = (1 << 0),
51 /* output all sense key/asc */
52 DBG_SENSE = (1 << 1),
53 /* info regarding all chrdev-related procedures */
54 DBG_CHRDEV = (1 << 2),
55 /* all remaining procedures */
56 DBG_PROCS = (1 << 3),
57 /* buffer alloc info (pc_stack & rq_stack) */
58 DBG_PCRQ_STACK = (1 << 4),
59 };
60
61 /* define to see debug info */
62 #define IDETAPE_DEBUG_LOG 0
63
64 #if IDETAPE_DEBUG_LOG
65 #define debug_log(lvl, fmt, args...) \
66 { \
67 if (tape->debug_mask & lvl) \
68 printk(KERN_INFO "ide-tape: " fmt, ## args); \
69 }
70 #else
71 #define debug_log(lvl, fmt, args...) do {} while (0)
72 #endif
73
74 /**************************** Tunable parameters *****************************/
75
76
77 /*
78 * Pipelined mode parameters.
79 *
80 * We try to use the minimum number of stages which is enough to keep the tape
81 * constantly streaming. To accomplish that, we implement a feedback loop around
82 * the maximum number of stages:
83 *
84 * We start from MIN maximum stages (we will not even use MIN stages if we don't
85 * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
86 * pipeline is empty, until we reach the optimum value or until we reach MAX.
87 */
88 #define IDETAPE_MIN_PIPELINE_STAGES 1
89 #define IDETAPE_MAX_PIPELINE_STAGES 400
90 #define IDETAPE_INCREASE_STAGES_RATE 20
91
92 /*
93 * After each failed packet command we issue a request sense command and retry
94 * the packet command IDETAPE_MAX_PC_RETRIES times.
95 *
96 * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
97 */
98 #define IDETAPE_MAX_PC_RETRIES 3
99
100 /*
101 * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
102 * bytes. This is used for several packet commands (Not for READ/WRITE commands)
103 */
104 #define IDETAPE_PC_BUFFER_SIZE 256
105
106 /*
107 * In various places in the driver, we need to allocate storage
108 * for packet commands and requests, which will remain valid while
109 * we leave the driver to wait for an interrupt or a timeout event.
110 */
111 #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
112
113 /*
114 * Some drives (for example, Seagate STT3401A Travan) require a very long
115 * timeout, because they don't return an interrupt or clear their busy bit
116 * until after the command completes (even retension commands).
117 */
118 #define IDETAPE_WAIT_CMD (900*HZ)
119
120 /*
121 * The following parameter is used to select the point in the internal tape fifo
122 * in which we will start to refill the buffer. Decreasing the following
123 * parameter will improve the system's latency and interactive response, while
124 * using a high value might improve system throughput.
125 */
126 #define IDETAPE_FIFO_THRESHOLD 2
127
128 /*
129 * DSC polling parameters.
130 *
131 * Polling for DSC (a single bit in the status register) is a very important
132 * function in ide-tape. There are two cases in which we poll for DSC:
133 *
134 * 1. Before a read/write packet command, to ensure that we can transfer data
135 * from/to the tape's data buffers, without causing an actual media access.
136 * In case the tape is not ready yet, we take out our request from the device
137 * request queue, so that ide.c could service requests from the other device
138 * on the same interface in the meantime.
139 *
140 * 2. After the successful initialization of a "media access packet command",
141 * which is a command that can take a long time to complete (the interval can
142 * range from several seconds to even an hour). Again, we postpone our request
143 * in the middle to free the bus for the other device. The polling frequency
144 * here should be lower than the read/write frequency since those media access
145 * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
146 * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
147 * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
148 *
149 * We also set a timeout for the timer, in case something goes wrong. The
150 * timeout should be longer then the maximum execution time of a tape operation.
151 */
152
153 /* DSC timings. */
154 #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
155 #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
156 #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
157 #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
158 #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
159 #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
160 #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
161
162 /*************************** End of tunable parameters ***********************/
163
164 /* Read/Write error simulation */
165 #define SIMULATE_ERRORS 0
166
167 /* tape directions */
168 enum {
169 IDETAPE_DIR_NONE = (1 << 0),
170 IDETAPE_DIR_READ = (1 << 1),
171 IDETAPE_DIR_WRITE = (1 << 2),
172 };
173
174 struct idetape_bh {
175 u32 b_size;
176 atomic_t b_count;
177 struct idetape_bh *b_reqnext;
178 char *b_data;
179 };
180
181 /* Tape door status */
182 #define DOOR_UNLOCKED 0
183 #define DOOR_LOCKED 1
184 #define DOOR_EXPLICITLY_LOCKED 2
185
186 /* Some defines for the SPACE command */
187 #define IDETAPE_SPACE_OVER_FILEMARK 1
188 #define IDETAPE_SPACE_TO_EOD 3
189
190 /* Some defines for the LOAD UNLOAD command */
191 #define IDETAPE_LU_LOAD_MASK 1
192 #define IDETAPE_LU_RETENSION_MASK 2
193 #define IDETAPE_LU_EOT_MASK 4
194
195 /*
196 * Special requests for our block device strategy routine.
197 *
198 * In order to service a character device command, we add special requests to
199 * the tail of our block device request queue and wait for their completion.
200 */
201
202 enum {
203 REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
204 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
205 REQ_IDETAPE_READ = (1 << 2),
206 REQ_IDETAPE_WRITE = (1 << 3),
207 };
208
209 /* Error codes returned in rq->errors to the higher part of the driver. */
210 #define IDETAPE_ERROR_GENERAL 101
211 #define IDETAPE_ERROR_FILEMARK 102
212 #define IDETAPE_ERROR_EOD 103
213
214 /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
215 #define IDETAPE_BLOCK_DESCRIPTOR 0
216 #define IDETAPE_CAPABILITIES_PAGE 0x2a
217
218 /* Tape flag bits values. */
219 enum {
220 IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
221 /* 0 When the tape position is unknown */
222 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
223 /* Device already opened */
224 IDETAPE_FLAG_BUSY = (1 << 2),
225 /* Error detected in a pipeline stage */
226 IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
227 /* Attempt to auto-detect the current user block size */
228 IDETAPE_FLAG_DETECT_BS = (1 << 4),
229 /* Currently on a filemark */
230 IDETAPE_FLAG_FILEMARK = (1 << 5),
231 /* DRQ interrupt device */
232 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
233 /* pipeline active */
234 IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
235 /* 0 = no tape is loaded, so we don't rewind after ejecting */
236 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
237 };
238
239 /* A pipeline stage. */
240 typedef struct idetape_stage_s {
241 struct request rq; /* The corresponding request */
242 struct idetape_bh *bh; /* The data buffers */
243 struct idetape_stage_s *next; /* Pointer to the next stage */
244 } idetape_stage_t;
245
246 /*
247 * Most of our global data which we need to save even as we leave the driver due
248 * to an interrupt or a timer event is stored in the struct defined below.
249 */
250 typedef struct ide_tape_obj {
251 ide_drive_t *drive;
252 ide_driver_t *driver;
253 struct gendisk *disk;
254 struct kref kref;
255
256 /*
257 * Since a typical character device operation requires more
258 * than one packet command, we provide here enough memory
259 * for the maximum of interconnected packet commands.
260 * The packet commands are stored in the circular array pc_stack.
261 * pc_stack_index points to the last used entry, and warps around
262 * to the start when we get to the last array entry.
263 *
264 * pc points to the current processed packet command.
265 *
266 * failed_pc points to the last failed packet command, or contains
267 * NULL if we do not need to retry any packet command. This is
268 * required since an additional packet command is needed before the
269 * retry, to get detailed information on what went wrong.
270 */
271 /* Current packet command */
272 struct ide_atapi_pc *pc;
273 /* Last failed packet command */
274 struct ide_atapi_pc *failed_pc;
275 /* Packet command stack */
276 struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
277 /* Next free packet command storage space */
278 int pc_stack_index;
279 struct request rq_stack[IDETAPE_PC_STACK];
280 /* We implement a circular array */
281 int rq_stack_index;
282
283 /*
284 * DSC polling variables.
285 *
286 * While polling for DSC we use postponed_rq to postpone the current
287 * request so that ide.c will be able to service pending requests on the
288 * other device. Note that at most we will have only one DSC (usually
289 * data transfer) request in the device request queue. Additional
290 * requests can be queued in our internal pipeline, but they will be
291 * visible to ide.c only one at a time.
292 */
293 struct request *postponed_rq;
294 /* The time in which we started polling for DSC */
295 unsigned long dsc_polling_start;
296 /* Timer used to poll for dsc */
297 struct timer_list dsc_timer;
298 /* Read/Write dsc polling frequency */
299 unsigned long best_dsc_rw_freq;
300 unsigned long dsc_poll_freq;
301 unsigned long dsc_timeout;
302
303 /* Read position information */
304 u8 partition;
305 /* Current block */
306 unsigned int first_frame;
307
308 /* Last error information */
309 u8 sense_key, asc, ascq;
310
311 /* Character device operation */
312 unsigned int minor;
313 /* device name */
314 char name[4];
315 /* Current character device data transfer direction */
316 u8 chrdev_dir;
317
318 /* tape block size, usually 512 or 1024 bytes */
319 unsigned short blk_size;
320 int user_bs_factor;
321
322 /* Copy of the tape's Capabilities and Mechanical Page */
323 u8 caps[20];
324
325 /*
326 * Active data transfer request parameters.
327 *
328 * At most, there is only one ide-tape originated data transfer request
329 * in the device request queue. This allows ide.c to easily service
330 * requests from the other device when we postpone our active request.
331 * In the pipelined operation mode, we use our internal pipeline
332 * structure to hold more data requests. The data buffer size is chosen
333 * based on the tape's recommendation.
334 */
335 /* ptr to the request which is waiting in the device request queue */
336 struct request *active_data_rq;
337 /* Data buffer size chosen based on the tape's recommendation */
338 int stage_size;
339 idetape_stage_t *merge_stage;
340 int merge_stage_size;
341 struct idetape_bh *bh;
342 char *b_data;
343 int b_count;
344
345 /*
346 * Pipeline parameters.
347 *
348 * To accomplish non-pipelined mode, we simply set the following
349 * variables to zero (or NULL, where appropriate).
350 */
351 /* Number of currently used stages */
352 int nr_stages;
353 /* Number of pending stages */
354 int nr_pending_stages;
355 /* We will not allocate more than this number of stages */
356 int max_stages, min_pipeline, max_pipeline;
357 /* The first stage which will be removed from the pipeline */
358 idetape_stage_t *first_stage;
359 /* The currently active stage */
360 idetape_stage_t *active_stage;
361 /* Will be serviced after the currently active request */
362 idetape_stage_t *next_stage;
363 /* New requests will be added to the pipeline here */
364 idetape_stage_t *last_stage;
365 int pages_per_stage;
366 /* Wasted space in each stage */
367 int excess_bh_size;
368
369 /* Status/Action flags: long for set_bit */
370 unsigned long flags;
371 /* protects the ide-tape queue */
372 spinlock_t lock;
373
374 /* Measures average tape speed */
375 unsigned long avg_time;
376 int avg_size;
377 int avg_speed;
378
379 /* the door is currently locked */
380 int door_locked;
381 /* the tape hardware is write protected */
382 char drv_write_prot;
383 /* the tape is write protected (hardware or opened as read-only) */
384 char write_prot;
385
386 /*
387 * Limit the number of times a request can be postponed, to avoid an
388 * infinite postpone deadlock.
389 */
390 int postpone_cnt;
391
392 /* Speed control at the tape buffers input/output */
393 unsigned long insert_time;
394 int insert_size;
395 int insert_speed;
396 int measure_insert_time;
397
398 u32 debug_mask;
399 } idetape_tape_t;
400
401 static DEFINE_MUTEX(idetape_ref_mutex);
402
403 static struct class *idetape_sysfs_class;
404
405 #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
406
407 #define ide_tape_g(disk) \
408 container_of((disk)->private_data, struct ide_tape_obj, driver)
409
410 static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
411 {
412 struct ide_tape_obj *tape = NULL;
413
414 mutex_lock(&idetape_ref_mutex);
415 tape = ide_tape_g(disk);
416 if (tape)
417 kref_get(&tape->kref);
418 mutex_unlock(&idetape_ref_mutex);
419 return tape;
420 }
421
422 static void ide_tape_release(struct kref *);
423
424 static void ide_tape_put(struct ide_tape_obj *tape)
425 {
426 mutex_lock(&idetape_ref_mutex);
427 kref_put(&tape->kref, ide_tape_release);
428 mutex_unlock(&idetape_ref_mutex);
429 }
430
431 /*
432 * The variables below are used for the character device interface. Additional
433 * state variables are defined in our ide_drive_t structure.
434 */
435 static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
436
437 #define ide_tape_f(file) ((file)->private_data)
438
439 static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
440 {
441 struct ide_tape_obj *tape = NULL;
442
443 mutex_lock(&idetape_ref_mutex);
444 tape = idetape_devs[i];
445 if (tape)
446 kref_get(&tape->kref);
447 mutex_unlock(&idetape_ref_mutex);
448 return tape;
449 }
450
451 static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
452 unsigned int bcount)
453 {
454 struct idetape_bh *bh = pc->bh;
455 int count;
456
457 while (bcount) {
458 if (bh == NULL) {
459 printk(KERN_ERR "ide-tape: bh == NULL in "
460 "idetape_input_buffers\n");
461 ide_atapi_discard_data(drive, bcount);
462 return;
463 }
464 count = min(
465 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
466 bcount);
467 HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
468 atomic_read(&bh->b_count), count);
469 bcount -= count;
470 atomic_add(count, &bh->b_count);
471 if (atomic_read(&bh->b_count) == bh->b_size) {
472 bh = bh->b_reqnext;
473 if (bh)
474 atomic_set(&bh->b_count, 0);
475 }
476 }
477 pc->bh = bh;
478 }
479
480 static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
481 unsigned int bcount)
482 {
483 struct idetape_bh *bh = pc->bh;
484 int count;
485
486 while (bcount) {
487 if (bh == NULL) {
488 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
489 __func__);
490 return;
491 }
492 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
493 HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
494 bcount -= count;
495 pc->b_data += count;
496 pc->b_count -= count;
497 if (!pc->b_count) {
498 bh = bh->b_reqnext;
499 pc->bh = bh;
500 if (bh) {
501 pc->b_data = bh->b_data;
502 pc->b_count = atomic_read(&bh->b_count);
503 }
504 }
505 }
506 }
507
508 static void idetape_update_buffers(struct ide_atapi_pc *pc)
509 {
510 struct idetape_bh *bh = pc->bh;
511 int count;
512 unsigned int bcount = pc->xferred;
513
514 if (pc->flags & PC_FLAG_WRITING)
515 return;
516 while (bcount) {
517 if (bh == NULL) {
518 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
519 __func__);
520 return;
521 }
522 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
523 atomic_set(&bh->b_count, count);
524 if (atomic_read(&bh->b_count) == bh->b_size)
525 bh = bh->b_reqnext;
526 bcount -= count;
527 }
528 pc->bh = bh;
529 }
530
531 /*
532 * idetape_next_pc_storage returns a pointer to a place in which we can
533 * safely store a packet command, even though we intend to leave the
534 * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
535 * commands is allocated at initialization time.
536 */
537 static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
538 {
539 idetape_tape_t *tape = drive->driver_data;
540
541 debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
542
543 if (tape->pc_stack_index == IDETAPE_PC_STACK)
544 tape->pc_stack_index = 0;
545 return (&tape->pc_stack[tape->pc_stack_index++]);
546 }
547
548 /*
549 * idetape_next_rq_storage is used along with idetape_next_pc_storage.
550 * Since we queue packet commands in the request queue, we need to
551 * allocate a request, along with the allocation of a packet command.
552 */
553
554 /**************************************************************
555 * *
556 * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
557 * followed later on by kfree(). -ml *
558 * *
559 **************************************************************/
560
561 static struct request *idetape_next_rq_storage(ide_drive_t *drive)
562 {
563 idetape_tape_t *tape = drive->driver_data;
564
565 debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
566
567 if (tape->rq_stack_index == IDETAPE_PC_STACK)
568 tape->rq_stack_index = 0;
569 return (&tape->rq_stack[tape->rq_stack_index++]);
570 }
571
572 static void idetape_init_pc(struct ide_atapi_pc *pc)
573 {
574 memset(pc->c, 0, 12);
575 pc->retries = 0;
576 pc->flags = 0;
577 pc->req_xfer = 0;
578 pc->buf = pc->pc_buf;
579 pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
580 pc->bh = NULL;
581 pc->b_data = NULL;
582 }
583
584 /*
585 * called on each failed packet command retry to analyze the request sense. We
586 * currently do not utilize this information.
587 */
588 static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
589 {
590 idetape_tape_t *tape = drive->driver_data;
591 struct ide_atapi_pc *pc = tape->failed_pc;
592
593 tape->sense_key = sense[2] & 0xF;
594 tape->asc = sense[12];
595 tape->ascq = sense[13];
596
597 debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
598 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
599
600 /* Correct pc->xferred by asking the tape. */
601 if (pc->flags & PC_FLAG_DMA_ERROR) {
602 pc->xferred = pc->req_xfer -
603 tape->blk_size *
604 be32_to_cpu(get_unaligned((u32 *)&sense[3]));
605 idetape_update_buffers(pc);
606 }
607
608 /*
609 * If error was the result of a zero-length read or write command,
610 * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
611 * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
612 */
613 if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
614 /* length == 0 */
615 && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
616 if (tape->sense_key == 5) {
617 /* don't report an error, everything's ok */
618 pc->error = 0;
619 /* don't retry read/write */
620 pc->flags |= PC_FLAG_ABORT;
621 }
622 }
623 if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
624 pc->error = IDETAPE_ERROR_FILEMARK;
625 pc->flags |= PC_FLAG_ABORT;
626 }
627 if (pc->c[0] == WRITE_6) {
628 if ((sense[2] & 0x40) || (tape->sense_key == 0xd
629 && tape->asc == 0x0 && tape->ascq == 0x2)) {
630 pc->error = IDETAPE_ERROR_EOD;
631 pc->flags |= PC_FLAG_ABORT;
632 }
633 }
634 if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
635 if (tape->sense_key == 8) {
636 pc->error = IDETAPE_ERROR_EOD;
637 pc->flags |= PC_FLAG_ABORT;
638 }
639 if (!(pc->flags & PC_FLAG_ABORT) &&
640 pc->xferred)
641 pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
642 }
643 }
644
645 static void idetape_activate_next_stage(ide_drive_t *drive)
646 {
647 idetape_tape_t *tape = drive->driver_data;
648 idetape_stage_t *stage = tape->next_stage;
649 struct request *rq = &stage->rq;
650
651 debug_log(DBG_PROCS, "Enter %s\n", __func__);
652
653 if (stage == NULL) {
654 printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
655 " existing stage\n");
656 return;
657 }
658
659 rq->rq_disk = tape->disk;
660 rq->buffer = NULL;
661 rq->special = (void *)stage->bh;
662 tape->active_data_rq = rq;
663 tape->active_stage = stage;
664 tape->next_stage = stage->next;
665 }
666
667 /* Free a stage along with its related buffers completely. */
668 static void __idetape_kfree_stage(idetape_stage_t *stage)
669 {
670 struct idetape_bh *prev_bh, *bh = stage->bh;
671 int size;
672
673 while (bh != NULL) {
674 if (bh->b_data != NULL) {
675 size = (int) bh->b_size;
676 while (size > 0) {
677 free_page((unsigned long) bh->b_data);
678 size -= PAGE_SIZE;
679 bh->b_data += PAGE_SIZE;
680 }
681 }
682 prev_bh = bh;
683 bh = bh->b_reqnext;
684 kfree(prev_bh);
685 }
686 kfree(stage);
687 }
688
689 static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
690 {
691 __idetape_kfree_stage(stage);
692 }
693
694 /*
695 * Remove tape->first_stage from the pipeline. The caller should avoid race
696 * conditions.
697 */
698 static void idetape_remove_stage_head(ide_drive_t *drive)
699 {
700 idetape_tape_t *tape = drive->driver_data;
701 idetape_stage_t *stage;
702
703 debug_log(DBG_PROCS, "Enter %s\n", __func__);
704
705 if (tape->first_stage == NULL) {
706 printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
707 return;
708 }
709 if (tape->active_stage == tape->first_stage) {
710 printk(KERN_ERR "ide-tape: bug: Trying to free our active "
711 "pipeline stage\n");
712 return;
713 }
714 stage = tape->first_stage;
715 tape->first_stage = stage->next;
716 idetape_kfree_stage(tape, stage);
717 tape->nr_stages--;
718 if (tape->first_stage == NULL) {
719 tape->last_stage = NULL;
720 if (tape->next_stage != NULL)
721 printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
722 " NULL\n");
723 if (tape->nr_stages)
724 printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
725 "now\n");
726 }
727 }
728
729 /*
730 * This will free all the pipeline stages starting from new_last_stage->next
731 * to the end of the list, and point tape->last_stage to new_last_stage.
732 */
733 static void idetape_abort_pipeline(ide_drive_t *drive,
734 idetape_stage_t *new_last_stage)
735 {
736 idetape_tape_t *tape = drive->driver_data;
737 idetape_stage_t *stage = new_last_stage->next;
738 idetape_stage_t *nstage;
739
740 debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
741
742 while (stage) {
743 nstage = stage->next;
744 idetape_kfree_stage(tape, stage);
745 --tape->nr_stages;
746 --tape->nr_pending_stages;
747 stage = nstage;
748 }
749 if (new_last_stage)
750 new_last_stage->next = NULL;
751 tape->last_stage = new_last_stage;
752 tape->next_stage = NULL;
753 }
754
755 /*
756 * Finish servicing a request and insert a pending pipeline request into the
757 * main device queue.
758 */
759 static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
760 {
761 struct request *rq = HWGROUP(drive)->rq;
762 idetape_tape_t *tape = drive->driver_data;
763 unsigned long flags;
764 int error;
765 int remove_stage = 0;
766 idetape_stage_t *active_stage;
767
768 debug_log(DBG_PROCS, "Enter %s\n", __func__);
769
770 switch (uptodate) {
771 case 0: error = IDETAPE_ERROR_GENERAL; break;
772 case 1: error = 0; break;
773 default: error = uptodate;
774 }
775 rq->errors = error;
776 if (error)
777 tape->failed_pc = NULL;
778
779 if (!blk_special_request(rq)) {
780 ide_end_request(drive, uptodate, nr_sects);
781 return 0;
782 }
783
784 spin_lock_irqsave(&tape->lock, flags);
785
786 /* The request was a pipelined data transfer request */
787 if (tape->active_data_rq == rq) {
788 active_stage = tape->active_stage;
789 tape->active_stage = NULL;
790 tape->active_data_rq = NULL;
791 tape->nr_pending_stages--;
792 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
793 remove_stage = 1;
794 if (error) {
795 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
796 &tape->flags);
797 if (error == IDETAPE_ERROR_EOD)
798 idetape_abort_pipeline(drive,
799 active_stage);
800 }
801 } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
802 if (error == IDETAPE_ERROR_EOD) {
803 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
804 &tape->flags);
805 idetape_abort_pipeline(drive, active_stage);
806 }
807 }
808 if (tape->next_stage != NULL) {
809 idetape_activate_next_stage(drive);
810
811 /* Insert the next request into the request queue. */
812 (void)ide_do_drive_cmd(drive, tape->active_data_rq,
813 ide_end);
814 } else if (!error) {
815 /*
816 * This is a part of the feedback loop which tries to
817 * find the optimum number of stages. We are starting
818 * from a minimum maximum number of stages, and if we
819 * sense that the pipeline is empty, we try to increase
820 * it, until we reach the user compile time memory
821 * limit.
822 */
823 int i = (tape->max_pipeline - tape->min_pipeline) / 10;
824
825 tape->max_stages += max(i, 1);
826 tape->max_stages = max(tape->max_stages,
827 tape->min_pipeline);
828 tape->max_stages = min(tape->max_stages,
829 tape->max_pipeline);
830 }
831 }
832 ide_end_drive_cmd(drive, 0, 0);
833
834 if (remove_stage)
835 idetape_remove_stage_head(drive);
836 if (tape->active_data_rq == NULL)
837 clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
838 spin_unlock_irqrestore(&tape->lock, flags);
839 return 0;
840 }
841
842 static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
843 {
844 idetape_tape_t *tape = drive->driver_data;
845
846 debug_log(DBG_PROCS, "Enter %s\n", __func__);
847
848 if (!tape->pc->error) {
849 idetape_analyze_error(drive, tape->pc->buf);
850 idetape_end_request(drive, 1, 0);
851 } else {
852 printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
853 "Aborting request!\n");
854 idetape_end_request(drive, 0, 0);
855 }
856 return ide_stopped;
857 }
858
859 static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
860 {
861 idetape_init_pc(pc);
862 pc->c[0] = REQUEST_SENSE;
863 pc->c[4] = 20;
864 pc->req_xfer = 20;
865 pc->idetape_callback = &idetape_request_sense_callback;
866 }
867
868 static void idetape_init_rq(struct request *rq, u8 cmd)
869 {
870 memset(rq, 0, sizeof(*rq));
871 rq->cmd_type = REQ_TYPE_SPECIAL;
872 rq->cmd[0] = cmd;
873 }
874
875 /*
876 * Generate a new packet command request in front of the request queue, before
877 * the current request, so that it will be processed immediately, on the next
878 * pass through the driver. The function below is called from the request
879 * handling part of the driver (the "bottom" part). Safe storage for the request
880 * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
881 *
882 * Memory for those requests is pre-allocated at initialization time, and is
883 * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
884 * the maximum possible number of inter-dependent packet commands.
885 *
886 * The higher level of the driver - The ioctl handler and the character device
887 * handling functions should queue request to the lower level part and wait for
888 * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
889 */
890 static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
891 struct request *rq)
892 {
893 struct ide_tape_obj *tape = drive->driver_data;
894
895 idetape_init_rq(rq, REQ_IDETAPE_PC1);
896 rq->buffer = (char *) pc;
897 rq->rq_disk = tape->disk;
898 (void) ide_do_drive_cmd(drive, rq, ide_preempt);
899 }
900
901 /*
902 * idetape_retry_pc is called when an error was detected during the
903 * last packet command. We queue a request sense packet command in
904 * the head of the request list.
905 */
906 static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
907 {
908 idetape_tape_t *tape = drive->driver_data;
909 struct ide_atapi_pc *pc;
910 struct request *rq;
911
912 (void)ide_read_error(drive);
913 pc = idetape_next_pc_storage(drive);
914 rq = idetape_next_rq_storage(drive);
915 idetape_create_request_sense_cmd(pc);
916 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
917 idetape_queue_pc_head(drive, pc, rq);
918 return ide_stopped;
919 }
920
921 /*
922 * Postpone the current request so that ide.c will be able to service requests
923 * from another device on the same hwgroup while we are polling for DSC.
924 */
925 static void idetape_postpone_request(ide_drive_t *drive)
926 {
927 idetape_tape_t *tape = drive->driver_data;
928
929 debug_log(DBG_PROCS, "Enter %s\n", __func__);
930
931 tape->postponed_rq = HWGROUP(drive)->rq;
932 ide_stall_queue(drive, tape->dsc_poll_freq);
933 }
934
935 typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
936
937 /*
938 * This is the usual interrupt handler which will be called during a packet
939 * command. We will transfer some of the data (as requested by the drive) and
940 * will re-point interrupt handler to us. When data transfer is finished, we
941 * will act according to the algorithm described before
942 * idetape_issue_pc.
943 */
944 static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
945 {
946 ide_hwif_t *hwif = drive->hwif;
947 idetape_tape_t *tape = drive->driver_data;
948 struct ide_atapi_pc *pc = tape->pc;
949 xfer_func_t *xferfunc;
950 idetape_io_buf *iobuf;
951 unsigned int temp;
952 #if SIMULATE_ERRORS
953 static int error_sim_count;
954 #endif
955 u16 bcount;
956 u8 stat, ireason;
957
958 debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
959
960 /* Clear the interrupt */
961 stat = ide_read_status(drive);
962
963 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
964 if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
965 /*
966 * A DMA error is sometimes expected. For example,
967 * if the tape is crossing a filemark during a
968 * READ command, it will issue an irq and position
969 * itself before the filemark, so that only a partial
970 * data transfer will occur (which causes the DMA
971 * error). In that case, we will later ask the tape
972 * how much bytes of the original request were
973 * actually transferred (we can't receive that
974 * information from the DMA engine on most chipsets).
975 */
976
977 /*
978 * On the contrary, a DMA error is never expected;
979 * it usually indicates a hardware error or abort.
980 * If the tape crosses a filemark during a READ
981 * command, it will issue an irq and position itself
982 * after the filemark (not before). Only a partial
983 * data transfer will occur, but no DMA error.
984 * (AS, 19 Apr 2001)
985 */
986 pc->flags |= PC_FLAG_DMA_ERROR;
987 } else {
988 pc->xferred = pc->req_xfer;
989 idetape_update_buffers(pc);
990 }
991 debug_log(DBG_PROCS, "DMA finished\n");
992
993 }
994
995 /* No more interrupts */
996 if ((stat & DRQ_STAT) == 0) {
997 debug_log(DBG_SENSE, "Packet command completed, %d bytes"
998 " transferred\n", pc->xferred);
999
1000 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
1001 local_irq_enable();
1002
1003 #if SIMULATE_ERRORS
1004 if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
1005 (++error_sim_count % 100) == 0) {
1006 printk(KERN_INFO "ide-tape: %s: simulating error\n",
1007 tape->name);
1008 stat |= ERR_STAT;
1009 }
1010 #endif
1011 if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
1012 stat &= ~ERR_STAT;
1013 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
1014 /* Error detected */
1015 debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
1016
1017 if (pc->c[0] == REQUEST_SENSE) {
1018 printk(KERN_ERR "ide-tape: I/O error in request"
1019 " sense command\n");
1020 return ide_do_reset(drive);
1021 }
1022 debug_log(DBG_ERR, "[cmd %x]: check condition\n",
1023 pc->c[0]);
1024
1025 /* Retry operation */
1026 return idetape_retry_pc(drive);
1027 }
1028 pc->error = 0;
1029 if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
1030 (stat & SEEK_STAT) == 0) {
1031 /* Media access command */
1032 tape->dsc_polling_start = jiffies;
1033 tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
1034 tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
1035 /* Allow ide.c to handle other requests */
1036 idetape_postpone_request(drive);
1037 return ide_stopped;
1038 }
1039 if (tape->failed_pc == pc)
1040 tape->failed_pc = NULL;
1041 /* Command finished - Call the callback function */
1042 return pc->idetape_callback(drive);
1043 }
1044
1045 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
1046 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
1047 printk(KERN_ERR "ide-tape: The tape wants to issue more "
1048 "interrupts in DMA mode\n");
1049 printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
1050 ide_dma_off(drive);
1051 return ide_do_reset(drive);
1052 }
1053 /* Get the number of bytes to transfer on this interrupt. */
1054 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
1055 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
1056
1057 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1058
1059 if (ireason & CD) {
1060 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
1061 return ide_do_reset(drive);
1062 }
1063 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
1064 /* Hopefully, we will never get here */
1065 printk(KERN_ERR "ide-tape: We wanted to %s, ",
1066 (ireason & IO) ? "Write" : "Read");
1067 printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
1068 (ireason & IO) ? "Read" : "Write");
1069 return ide_do_reset(drive);
1070 }
1071 if (!(pc->flags & PC_FLAG_WRITING)) {
1072 /* Reading - Check that we have enough space */
1073 temp = pc->xferred + bcount;
1074 if (temp > pc->req_xfer) {
1075 if (temp > pc->buf_size) {
1076 printk(KERN_ERR "ide-tape: The tape wants to "
1077 "send us more data than expected "
1078 "- discarding data\n");
1079 ide_atapi_discard_data(drive, bcount);
1080 ide_set_handler(drive, &idetape_pc_intr,
1081 IDETAPE_WAIT_CMD, NULL);
1082 return ide_started;
1083 }
1084 debug_log(DBG_SENSE, "The tape wants to send us more "
1085 "data than expected - allowing transfer\n");
1086 }
1087 iobuf = &idetape_input_buffers;
1088 xferfunc = hwif->atapi_input_bytes;
1089 } else {
1090 iobuf = &idetape_output_buffers;
1091 xferfunc = hwif->atapi_output_bytes;
1092 }
1093
1094 if (pc->bh)
1095 iobuf(drive, pc, bcount);
1096 else
1097 xferfunc(drive, pc->cur_pos, bcount);
1098
1099 /* Update the current position */
1100 pc->xferred += bcount;
1101 pc->cur_pos += bcount;
1102
1103 debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
1104 pc->c[0], bcount);
1105
1106 /* And set the interrupt handler again */
1107 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1108 return ide_started;
1109 }
1110
1111 /*
1112 * Packet Command Interface
1113 *
1114 * The current Packet Command is available in tape->pc, and will not change
1115 * until we finish handling it. Each packet command is associated with a
1116 * callback function that will be called when the command is finished.
1117 *
1118 * The handling will be done in three stages:
1119 *
1120 * 1. idetape_issue_pc will send the packet command to the drive, and will set
1121 * the interrupt handler to idetape_pc_intr.
1122 *
1123 * 2. On each interrupt, idetape_pc_intr will be called. This step will be
1124 * repeated until the device signals us that no more interrupts will be issued.
1125 *
1126 * 3. ATAPI Tape media access commands have immediate status with a delayed
1127 * process. In case of a successful initiation of a media access packet command,
1128 * the DSC bit will be set when the actual execution of the command is finished.
1129 * Since the tape drive will not issue an interrupt, we have to poll for this
1130 * event. In this case, we define the request as "low priority request" by
1131 * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
1132 * exit the driver.
1133 *
1134 * ide.c will then give higher priority to requests which originate from the
1135 * other device, until will change rq_status to RQ_ACTIVE.
1136 *
1137 * 4. When the packet command is finished, it will be checked for errors.
1138 *
1139 * 5. In case an error was found, we queue a request sense packet command in
1140 * front of the request queue and retry the operation up to
1141 * IDETAPE_MAX_PC_RETRIES times.
1142 *
1143 * 6. In case no error was found, or we decided to give up and not to retry
1144 * again, the callback function will be called and then we will handle the next
1145 * request.
1146 */
1147 static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1148 {
1149 ide_hwif_t *hwif = drive->hwif;
1150 idetape_tape_t *tape = drive->driver_data;
1151 struct ide_atapi_pc *pc = tape->pc;
1152 int retries = 100;
1153 ide_startstop_t startstop;
1154 u8 ireason;
1155
1156 if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
1157 printk(KERN_ERR "ide-tape: Strange, packet command initiated "
1158 "yet DRQ isn't asserted\n");
1159 return startstop;
1160 }
1161 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1162 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
1163 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
1164 "a packet command, retrying\n");
1165 udelay(100);
1166 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1167 if (retries == 0) {
1168 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
1169 "issuing a packet command, ignoring\n");
1170 ireason |= CD;
1171 ireason &= ~IO;
1172 }
1173 }
1174 if ((ireason & CD) == 0 || (ireason & IO)) {
1175 printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
1176 "a packet command\n");
1177 return ide_do_reset(drive);
1178 }
1179 /* Set the interrupt routine */
1180 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1181 #ifdef CONFIG_BLK_DEV_IDEDMA
1182 /* Begin DMA, if necessary */
1183 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
1184 hwif->dma_ops->dma_start(drive);
1185 #endif
1186 /* Send the actual packet */
1187 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
1188 return ide_started;
1189 }
1190
1191 static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1192 struct ide_atapi_pc *pc)
1193 {
1194 ide_hwif_t *hwif = drive->hwif;
1195 idetape_tape_t *tape = drive->driver_data;
1196 int dma_ok = 0;
1197 u16 bcount;
1198
1199 if (tape->pc->c[0] == REQUEST_SENSE &&
1200 pc->c[0] == REQUEST_SENSE) {
1201 printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
1202 "Two request sense in serial were issued\n");
1203 }
1204
1205 if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
1206 tape->failed_pc = pc;
1207 /* Set the current packet command */
1208 tape->pc = pc;
1209
1210 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
1211 (pc->flags & PC_FLAG_ABORT)) {
1212 /*
1213 * We will "abort" retrying a packet command in case legitimate
1214 * error code was received (crossing a filemark, or end of the
1215 * media, for example).
1216 */
1217 if (!(pc->flags & PC_FLAG_ABORT)) {
1218 if (!(pc->c[0] == TEST_UNIT_READY &&
1219 tape->sense_key == 2 && tape->asc == 4 &&
1220 (tape->ascq == 1 || tape->ascq == 8))) {
1221 printk(KERN_ERR "ide-tape: %s: I/O error, "
1222 "pc = %2x, key = %2x, "
1223 "asc = %2x, ascq = %2x\n",
1224 tape->name, pc->c[0],
1225 tape->sense_key, tape->asc,
1226 tape->ascq);
1227 }
1228 /* Giving up */
1229 pc->error = IDETAPE_ERROR_GENERAL;
1230 }
1231 tape->failed_pc = NULL;
1232 return pc->idetape_callback(drive);
1233 }
1234 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
1235
1236 pc->retries++;
1237 /* We haven't transferred any data yet */
1238 pc->xferred = 0;
1239 pc->cur_pos = pc->buf;
1240 /* Request to transfer the entire buffer at once */
1241 bcount = pc->req_xfer;
1242
1243 if (pc->flags & PC_FLAG_DMA_ERROR) {
1244 pc->flags &= ~PC_FLAG_DMA_ERROR;
1245 printk(KERN_WARNING "ide-tape: DMA disabled, "
1246 "reverting to PIO\n");
1247 ide_dma_off(drive);
1248 }
1249 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
1250 dma_ok = !hwif->dma_ops->dma_setup(drive);
1251
1252 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
1253 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
1254
1255 if (dma_ok)
1256 /* Will begin DMA later */
1257 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
1258 if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
1259 ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
1260 IDETAPE_WAIT_CMD, NULL);
1261 return ide_started;
1262 } else {
1263 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
1264 return idetape_transfer_pc(drive);
1265 }
1266 }
1267
1268 static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
1269 {
1270 idetape_tape_t *tape = drive->driver_data;
1271
1272 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1273
1274 idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
1275 return ide_stopped;
1276 }
1277
1278 /* A mode sense command is used to "sense" tape parameters. */
1279 static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
1280 {
1281 idetape_init_pc(pc);
1282 pc->c[0] = MODE_SENSE;
1283 if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
1284 /* DBD = 1 - Don't return block descriptors */
1285 pc->c[1] = 8;
1286 pc->c[2] = page_code;
1287 /*
1288 * Changed pc->c[3] to 0 (255 will at best return unused info).
1289 *
1290 * For SCSI this byte is defined as subpage instead of high byte
1291 * of length and some IDE drives seem to interpret it this way
1292 * and return an error when 255 is used.
1293 */
1294 pc->c[3] = 0;
1295 /* We will just discard data in that case */
1296 pc->c[4] = 255;
1297 if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
1298 pc->req_xfer = 12;
1299 else if (page_code == IDETAPE_CAPABILITIES_PAGE)
1300 pc->req_xfer = 24;
1301 else
1302 pc->req_xfer = 50;
1303 pc->idetape_callback = &idetape_pc_callback;
1304 }
1305
1306 static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
1307 {
1308 idetape_tape_t *tape = drive->driver_data;
1309 struct ide_atapi_pc *pc = tape->pc;
1310 u8 stat;
1311
1312 stat = ide_read_status(drive);
1313
1314 if (stat & SEEK_STAT) {
1315 if (stat & ERR_STAT) {
1316 /* Error detected */
1317 if (pc->c[0] != TEST_UNIT_READY)
1318 printk(KERN_ERR "ide-tape: %s: I/O error, ",
1319 tape->name);
1320 /* Retry operation */
1321 return idetape_retry_pc(drive);
1322 }
1323 pc->error = 0;
1324 if (tape->failed_pc == pc)
1325 tape->failed_pc = NULL;
1326 } else {
1327 pc->error = IDETAPE_ERROR_GENERAL;
1328 tape->failed_pc = NULL;
1329 }
1330 return pc->idetape_callback(drive);
1331 }
1332
1333 static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
1334 {
1335 idetape_tape_t *tape = drive->driver_data;
1336 struct request *rq = HWGROUP(drive)->rq;
1337 int blocks = tape->pc->xferred / tape->blk_size;
1338
1339 tape->avg_size += blocks * tape->blk_size;
1340 tape->insert_size += blocks * tape->blk_size;
1341 if (tape->insert_size > 1024 * 1024)
1342 tape->measure_insert_time = 1;
1343 if (tape->measure_insert_time) {
1344 tape->measure_insert_time = 0;
1345 tape->insert_time = jiffies;
1346 tape->insert_size = 0;
1347 }
1348 if (time_after(jiffies, tape->insert_time))
1349 tape->insert_speed = tape->insert_size / 1024 * HZ /
1350 (jiffies - tape->insert_time);
1351 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
1352 tape->avg_speed = tape->avg_size * HZ /
1353 (jiffies - tape->avg_time) / 1024;
1354 tape->avg_size = 0;
1355 tape->avg_time = jiffies;
1356 }
1357 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1358
1359 tape->first_frame += blocks;
1360 rq->current_nr_sectors -= blocks;
1361
1362 if (!tape->pc->error)
1363 idetape_end_request(drive, 1, 0);
1364 else
1365 idetape_end_request(drive, tape->pc->error, 0);
1366 return ide_stopped;
1367 }
1368
1369 static void idetape_create_read_cmd(idetape_tape_t *tape,
1370 struct ide_atapi_pc *pc,
1371 unsigned int length, struct idetape_bh *bh)
1372 {
1373 idetape_init_pc(pc);
1374 pc->c[0] = READ_6;
1375 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1376 pc->c[1] = 1;
1377 pc->idetape_callback = &idetape_rw_callback;
1378 pc->bh = bh;
1379 atomic_set(&bh->b_count, 0);
1380 pc->buf = NULL;
1381 pc->buf_size = length * tape->blk_size;
1382 pc->req_xfer = pc->buf_size;
1383 if (pc->req_xfer == tape->stage_size)
1384 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1385 }
1386
1387 static void idetape_create_write_cmd(idetape_tape_t *tape,
1388 struct ide_atapi_pc *pc,
1389 unsigned int length, struct idetape_bh *bh)
1390 {
1391 idetape_init_pc(pc);
1392 pc->c[0] = WRITE_6;
1393 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1394 pc->c[1] = 1;
1395 pc->idetape_callback = &idetape_rw_callback;
1396 pc->flags |= PC_FLAG_WRITING;
1397 pc->bh = bh;
1398 pc->b_data = bh->b_data;
1399 pc->b_count = atomic_read(&bh->b_count);
1400 pc->buf = NULL;
1401 pc->buf_size = length * tape->blk_size;
1402 pc->req_xfer = pc->buf_size;
1403 if (pc->req_xfer == tape->stage_size)
1404 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1405 }
1406
1407 static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1408 struct request *rq, sector_t block)
1409 {
1410 idetape_tape_t *tape = drive->driver_data;
1411 struct ide_atapi_pc *pc = NULL;
1412 struct request *postponed_rq = tape->postponed_rq;
1413 u8 stat;
1414
1415 debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
1416 " current_nr_sectors: %d\n",
1417 rq->sector, rq->nr_sectors, rq->current_nr_sectors);
1418
1419 if (!blk_special_request(rq)) {
1420 /* We do not support buffer cache originated requests. */
1421 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
1422 "request queue (%d)\n", drive->name, rq->cmd_type);
1423 ide_end_request(drive, 0, 0);
1424 return ide_stopped;
1425 }
1426
1427 /* Retry a failed packet command */
1428 if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
1429 return idetape_issue_pc(drive, tape->failed_pc);
1430
1431 if (postponed_rq != NULL)
1432 if (rq != postponed_rq) {
1433 printk(KERN_ERR "ide-tape: ide-tape.c bug - "
1434 "Two DSC requests were queued\n");
1435 idetape_end_request(drive, 0, 0);
1436 return ide_stopped;
1437 }
1438
1439 tape->postponed_rq = NULL;
1440
1441 /*
1442 * If the tape is still busy, postpone our request and service
1443 * the other device meanwhile.
1444 */
1445 stat = ide_read_status(drive);
1446
1447 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
1448 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1449
1450 if (drive->post_reset == 1) {
1451 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1452 drive->post_reset = 0;
1453 }
1454
1455 if (time_after(jiffies, tape->insert_time))
1456 tape->insert_speed = tape->insert_size / 1024 * HZ /
1457 (jiffies - tape->insert_time);
1458 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
1459 (stat & SEEK_STAT) == 0) {
1460 if (postponed_rq == NULL) {
1461 tape->dsc_polling_start = jiffies;
1462 tape->dsc_poll_freq = tape->best_dsc_rw_freq;
1463 tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
1464 } else if (time_after(jiffies, tape->dsc_timeout)) {
1465 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1466 tape->name);
1467 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1468 idetape_media_access_finished(drive);
1469 return ide_stopped;
1470 } else {
1471 return ide_do_reset(drive);
1472 }
1473 } else if (time_after(jiffies,
1474 tape->dsc_polling_start +
1475 IDETAPE_DSC_MA_THRESHOLD))
1476 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
1477 idetape_postpone_request(drive);
1478 return ide_stopped;
1479 }
1480 if (rq->cmd[0] & REQ_IDETAPE_READ) {
1481 tape->postpone_cnt = 0;
1482 pc = idetape_next_pc_storage(drive);
1483 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
1484 (struct idetape_bh *)rq->special);
1485 goto out;
1486 }
1487 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
1488 tape->postpone_cnt = 0;
1489 pc = idetape_next_pc_storage(drive);
1490 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
1491 (struct idetape_bh *)rq->special);
1492 goto out;
1493 }
1494 if (rq->cmd[0] & REQ_IDETAPE_PC1) {
1495 pc = (struct ide_atapi_pc *) rq->buffer;
1496 rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
1497 rq->cmd[0] |= REQ_IDETAPE_PC2;
1498 goto out;
1499 }
1500 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1501 idetape_media_access_finished(drive);
1502 return ide_stopped;
1503 }
1504 BUG();
1505 out:
1506 return idetape_issue_pc(drive, pc);
1507 }
1508
1509 /* Pipeline related functions */
1510
1511 /*
1512 * The function below uses __get_free_page to allocate a pipeline stage, along
1513 * with all the necessary small buffers which together make a buffer of size
1514 * tape->stage_size (or a bit more). We attempt to combine sequential pages as
1515 * much as possible.
1516 *
1517 * It returns a pointer to the new allocated stage, or NULL if we can't (or
1518 * don't want to) allocate a stage.
1519 *
1520 * Pipeline stages are optional and are used to increase performance. If we
1521 * can't allocate them, we'll manage without them.
1522 */
1523 static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
1524 int clear)
1525 {
1526 idetape_stage_t *stage;
1527 struct idetape_bh *prev_bh, *bh;
1528 int pages = tape->pages_per_stage;
1529 char *b_data = NULL;
1530
1531 stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
1532 if (!stage)
1533 return NULL;
1534 stage->next = NULL;
1535
1536 stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1537 bh = stage->bh;
1538 if (bh == NULL)
1539 goto abort;
1540 bh->b_reqnext = NULL;
1541 bh->b_data = (char *) __get_free_page(GFP_KERNEL);
1542 if (!bh->b_data)
1543 goto abort;
1544 if (clear)
1545 memset(bh->b_data, 0, PAGE_SIZE);
1546 bh->b_size = PAGE_SIZE;
1547 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1548
1549 while (--pages) {
1550 b_data = (char *) __get_free_page(GFP_KERNEL);
1551 if (!b_data)
1552 goto abort;
1553 if (clear)
1554 memset(b_data, 0, PAGE_SIZE);
1555 if (bh->b_data == b_data + PAGE_SIZE) {
1556 bh->b_size += PAGE_SIZE;
1557 bh->b_data -= PAGE_SIZE;
1558 if (full)
1559 atomic_add(PAGE_SIZE, &bh->b_count);
1560 continue;
1561 }
1562 if (b_data == bh->b_data + bh->b_size) {
1563 bh->b_size += PAGE_SIZE;
1564 if (full)
1565 atomic_add(PAGE_SIZE, &bh->b_count);
1566 continue;
1567 }
1568 prev_bh = bh;
1569 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1570 if (!bh) {
1571 free_page((unsigned long) b_data);
1572 goto abort;
1573 }
1574 bh->b_reqnext = NULL;
1575 bh->b_data = b_data;
1576 bh->b_size = PAGE_SIZE;
1577 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1578 prev_bh->b_reqnext = bh;
1579 }
1580 bh->b_size -= tape->excess_bh_size;
1581 if (full)
1582 atomic_sub(tape->excess_bh_size, &bh->b_count);
1583 return stage;
1584 abort:
1585 __idetape_kfree_stage(stage);
1586 return NULL;
1587 }
1588
1589 static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1590 const char __user *buf, int n)
1591 {
1592 struct idetape_bh *bh = tape->bh;
1593 int count;
1594 int ret = 0;
1595
1596 while (n) {
1597 if (bh == NULL) {
1598 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1599 __func__);
1600 return 1;
1601 }
1602 count = min((unsigned int)
1603 (bh->b_size - atomic_read(&bh->b_count)),
1604 (unsigned int)n);
1605 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
1606 count))
1607 ret = 1;
1608 n -= count;
1609 atomic_add(count, &bh->b_count);
1610 buf += count;
1611 if (atomic_read(&bh->b_count) == bh->b_size) {
1612 bh = bh->b_reqnext;
1613 if (bh)
1614 atomic_set(&bh->b_count, 0);
1615 }
1616 }
1617 tape->bh = bh;
1618 return ret;
1619 }
1620
1621 static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1622 int n)
1623 {
1624 struct idetape_bh *bh = tape->bh;
1625 int count;
1626 int ret = 0;
1627
1628 while (n) {
1629 if (bh == NULL) {
1630 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1631 __func__);
1632 return 1;
1633 }
1634 count = min(tape->b_count, n);
1635 if (copy_to_user(buf, tape->b_data, count))
1636 ret = 1;
1637 n -= count;
1638 tape->b_data += count;
1639 tape->b_count -= count;
1640 buf += count;
1641 if (!tape->b_count) {
1642 bh = bh->b_reqnext;
1643 tape->bh = bh;
1644 if (bh) {
1645 tape->b_data = bh->b_data;
1646 tape->b_count = atomic_read(&bh->b_count);
1647 }
1648 }
1649 }
1650 return ret;
1651 }
1652
1653 static void idetape_init_merge_stage(idetape_tape_t *tape)
1654 {
1655 struct idetape_bh *bh = tape->merge_stage->bh;
1656
1657 tape->bh = bh;
1658 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1659 atomic_set(&bh->b_count, 0);
1660 else {
1661 tape->b_data = bh->b_data;
1662 tape->b_count = atomic_read(&bh->b_count);
1663 }
1664 }
1665
1666 /* Install a completion in a pending request and sleep until it is serviced. The
1667 * caller should ensure that the request will not be serviced before we install
1668 * the completion (usually by disabling interrupts).
1669 */
1670 static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
1671 {
1672 DECLARE_COMPLETION_ONSTACK(wait);
1673 idetape_tape_t *tape = drive->driver_data;
1674
1675 if (rq == NULL || !blk_special_request(rq)) {
1676 printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
1677 " request\n");
1678 return;
1679 }
1680 rq->end_io_data = &wait;
1681 rq->end_io = blk_end_sync_rq;
1682 spin_unlock_irq(&tape->lock);
1683 wait_for_completion(&wait);
1684 /* The stage and its struct request have been deallocated */
1685 spin_lock_irq(&tape->lock);
1686 }
1687
1688 static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
1689 {
1690 idetape_tape_t *tape = drive->driver_data;
1691 u8 *readpos = tape->pc->buf;
1692
1693 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1694
1695 if (!tape->pc->error) {
1696 debug_log(DBG_SENSE, "BOP - %s\n",
1697 (readpos[0] & 0x80) ? "Yes" : "No");
1698 debug_log(DBG_SENSE, "EOP - %s\n",
1699 (readpos[0] & 0x40) ? "Yes" : "No");
1700
1701 if (readpos[0] & 0x4) {
1702 printk(KERN_INFO "ide-tape: Block location is unknown"
1703 "to the tape\n");
1704 clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1705 idetape_end_request(drive, 0, 0);
1706 } else {
1707 debug_log(DBG_SENSE, "Block Location - %u\n",
1708 be32_to_cpu(*(u32 *)&readpos[4]));
1709
1710 tape->partition = readpos[1];
1711 tape->first_frame =
1712 be32_to_cpu(*(u32 *)&readpos[4]);
1713 set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1714 idetape_end_request(drive, 1, 0);
1715 }
1716 } else {
1717 idetape_end_request(drive, 0, 0);
1718 }
1719 return ide_stopped;
1720 }
1721
1722 /*
1723 * Write a filemark if write_filemark=1. Flush the device buffers without
1724 * writing a filemark otherwise.
1725 */
1726 static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
1727 struct ide_atapi_pc *pc, int write_filemark)
1728 {
1729 idetape_init_pc(pc);
1730 pc->c[0] = WRITE_FILEMARKS;
1731 pc->c[4] = write_filemark;
1732 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1733 pc->idetape_callback = &idetape_pc_callback;
1734 }
1735
1736 static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
1737 {
1738 idetape_init_pc(pc);
1739 pc->c[0] = TEST_UNIT_READY;
1740 pc->idetape_callback = &idetape_pc_callback;
1741 }
1742
1743 /*
1744 * We add a special packet command request to the tail of the request queue, and
1745 * wait for it to be serviced. This is not to be called from within the request
1746 * handling part of the driver! We allocate here data on the stack and it is
1747 * valid until the request is finished. This is not the case for the bottom part
1748 * of the driver, where we are always leaving the functions to wait for an
1749 * interrupt or a timer event.
1750 *
1751 * From the bottom part of the driver, we should allocate safe memory using
1752 * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
1753 * to the request list without waiting for it to be serviced! In that case, we
1754 * usually use idetape_queue_pc_head().
1755 */
1756 static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1757 {
1758 struct ide_tape_obj *tape = drive->driver_data;
1759 struct request rq;
1760
1761 idetape_init_rq(&rq, REQ_IDETAPE_PC1);
1762 rq.buffer = (char *) pc;
1763 rq.rq_disk = tape->disk;
1764 return ide_do_drive_cmd(drive, &rq, ide_wait);
1765 }
1766
1767 static void idetape_create_load_unload_cmd(ide_drive_t *drive,
1768 struct ide_atapi_pc *pc, int cmd)
1769 {
1770 idetape_init_pc(pc);
1771 pc->c[0] = START_STOP;
1772 pc->c[4] = cmd;
1773 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1774 pc->idetape_callback = &idetape_pc_callback;
1775 }
1776
1777 static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1778 {
1779 idetape_tape_t *tape = drive->driver_data;
1780 struct ide_atapi_pc pc;
1781 int load_attempted = 0;
1782
1783 /* Wait for the tape to become ready */
1784 set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
1785 timeout += jiffies;
1786 while (time_before(jiffies, timeout)) {
1787 idetape_create_test_unit_ready_cmd(&pc);
1788 if (!__idetape_queue_pc_tail(drive, &pc))
1789 return 0;
1790 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
1791 || (tape->asc == 0x3A)) {
1792 /* no media */
1793 if (load_attempted)
1794 return -ENOMEDIUM;
1795 idetape_create_load_unload_cmd(drive, &pc,
1796 IDETAPE_LU_LOAD_MASK);
1797 __idetape_queue_pc_tail(drive, &pc);
1798 load_attempted = 1;
1799 /* not about to be ready */
1800 } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
1801 (tape->ascq == 1 || tape->ascq == 8)))
1802 return -EIO;
1803 msleep(100);
1804 }
1805 return -EIO;
1806 }
1807
1808 static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1809 {
1810 return __idetape_queue_pc_tail(drive, pc);
1811 }
1812
1813 static int idetape_flush_tape_buffers(ide_drive_t *drive)
1814 {
1815 struct ide_atapi_pc pc;
1816 int rc;
1817
1818 idetape_create_write_filemark_cmd(drive, &pc, 0);
1819 rc = idetape_queue_pc_tail(drive, &pc);
1820 if (rc)
1821 return rc;
1822 idetape_wait_ready(drive, 60 * 5 * HZ);
1823 return 0;
1824 }
1825
1826 static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
1827 {
1828 idetape_init_pc(pc);
1829 pc->c[0] = READ_POSITION;
1830 pc->req_xfer = 20;
1831 pc->idetape_callback = &idetape_read_position_callback;
1832 }
1833
1834 static int idetape_read_position(ide_drive_t *drive)
1835 {
1836 idetape_tape_t *tape = drive->driver_data;
1837 struct ide_atapi_pc pc;
1838 int position;
1839
1840 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1841
1842 idetape_create_read_position_cmd(&pc);
1843 if (idetape_queue_pc_tail(drive, &pc))
1844 return -1;
1845 position = tape->first_frame;
1846 return position;
1847 }
1848
1849 static void idetape_create_locate_cmd(ide_drive_t *drive,
1850 struct ide_atapi_pc *pc,
1851 unsigned int block, u8 partition, int skip)
1852 {
1853 idetape_init_pc(pc);
1854 pc->c[0] = POSITION_TO_ELEMENT;
1855 pc->c[1] = 2;
1856 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
1857 pc->c[8] = partition;
1858 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1859 pc->idetape_callback = &idetape_pc_callback;
1860 }
1861
1862 static int idetape_create_prevent_cmd(ide_drive_t *drive,
1863 struct ide_atapi_pc *pc, int prevent)
1864 {
1865 idetape_tape_t *tape = drive->driver_data;
1866
1867 /* device supports locking according to capabilities page */
1868 if (!(tape->caps[6] & 0x01))
1869 return 0;
1870
1871 idetape_init_pc(pc);
1872 pc->c[0] = ALLOW_MEDIUM_REMOVAL;
1873 pc->c[4] = prevent;
1874 pc->idetape_callback = &idetape_pc_callback;
1875 return 1;
1876 }
1877
1878 static int __idetape_discard_read_pipeline(ide_drive_t *drive)
1879 {
1880 idetape_tape_t *tape = drive->driver_data;
1881 unsigned long flags;
1882 int cnt;
1883
1884 if (tape->chrdev_dir != IDETAPE_DIR_READ)
1885 return 0;
1886
1887 /* Remove merge stage. */
1888 cnt = tape->merge_stage_size / tape->blk_size;
1889 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
1890 ++cnt; /* Filemarks count as 1 sector */
1891 tape->merge_stage_size = 0;
1892 if (tape->merge_stage != NULL) {
1893 __idetape_kfree_stage(tape->merge_stage);
1894 tape->merge_stage = NULL;
1895 }
1896
1897 /* Clear pipeline flags. */
1898 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
1899 tape->chrdev_dir = IDETAPE_DIR_NONE;
1900
1901 /* Remove pipeline stages. */
1902 if (tape->first_stage == NULL)
1903 return 0;
1904
1905 spin_lock_irqsave(&tape->lock, flags);
1906 tape->next_stage = NULL;
1907 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
1908 idetape_wait_for_request(drive, tape->active_data_rq);
1909 spin_unlock_irqrestore(&tape->lock, flags);
1910
1911 while (tape->first_stage != NULL) {
1912 struct request *rq_ptr = &tape->first_stage->rq;
1913
1914 cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
1915 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
1916 ++cnt;
1917 idetape_remove_stage_head(drive);
1918 }
1919 tape->nr_pending_stages = 0;
1920 tape->max_stages = tape->min_pipeline;
1921 return cnt;
1922 }
1923
1924 /*
1925 * Position the tape to the requested block using the LOCATE packet command.
1926 * A READ POSITION command is then issued to check where we are positioned. Like
1927 * all higher level operations, we queue the commands at the tail of the request
1928 * queue and wait for their completion.
1929 */
1930 static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
1931 u8 partition, int skip)
1932 {
1933 idetape_tape_t *tape = drive->driver_data;
1934 int retval;
1935 struct ide_atapi_pc pc;
1936
1937 if (tape->chrdev_dir == IDETAPE_DIR_READ)
1938 __idetape_discard_read_pipeline(drive);
1939 idetape_wait_ready(drive, 60 * 5 * HZ);
1940 idetape_create_locate_cmd(drive, &pc, block, partition, skip);
1941 retval = idetape_queue_pc_tail(drive, &pc);
1942 if (retval)
1943 return (retval);
1944
1945 idetape_create_read_position_cmd(&pc);
1946 return (idetape_queue_pc_tail(drive, &pc));
1947 }
1948
1949 static void idetape_discard_read_pipeline(ide_drive_t *drive,
1950 int restore_position)
1951 {
1952 idetape_tape_t *tape = drive->driver_data;
1953 int cnt;
1954 int seek, position;
1955
1956 cnt = __idetape_discard_read_pipeline(drive);
1957 if (restore_position) {
1958 position = idetape_read_position(drive);
1959 seek = position > cnt ? position - cnt : 0;
1960 if (idetape_position_tape(drive, seek, 0, 0)) {
1961 printk(KERN_INFO "ide-tape: %s: position_tape failed in"
1962 " discard_pipeline()\n", tape->name);
1963 return;
1964 }
1965 }
1966 }
1967
1968 /*
1969 * Generate a read/write request for the block device interface and wait for it
1970 * to be serviced.
1971 */
1972 static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
1973 struct idetape_bh *bh)
1974 {
1975 idetape_tape_t *tape = drive->driver_data;
1976 struct request rq;
1977
1978 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
1979
1980 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
1981 printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
1982 __func__);
1983 return (0);
1984 }
1985
1986 idetape_init_rq(&rq, cmd);
1987 rq.rq_disk = tape->disk;
1988 rq.special = (void *)bh;
1989 rq.sector = tape->first_frame;
1990 rq.nr_sectors = blocks;
1991 rq.current_nr_sectors = blocks;
1992 (void) ide_do_drive_cmd(drive, &rq, ide_wait);
1993
1994 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
1995 return 0;
1996
1997 if (tape->merge_stage)
1998 idetape_init_merge_stage(tape);
1999 if (rq.errors == IDETAPE_ERROR_GENERAL)
2000 return -EIO;
2001 return (tape->blk_size * (blocks-rq.current_nr_sectors));
2002 }
2003
2004 /* start servicing the pipeline stages, starting from tape->next_stage. */
2005 static void idetape_plug_pipeline(ide_drive_t *drive)
2006 {
2007 idetape_tape_t *tape = drive->driver_data;
2008
2009 if (tape->next_stage == NULL)
2010 return;
2011 if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2012 idetape_activate_next_stage(drive);
2013 (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
2014 }
2015 }
2016
2017 static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
2018 {
2019 idetape_init_pc(pc);
2020 pc->c[0] = INQUIRY;
2021 pc->c[4] = 254;
2022 pc->req_xfer = 254;
2023 pc->idetape_callback = &idetape_pc_callback;
2024 }
2025
2026 static void idetape_create_rewind_cmd(ide_drive_t *drive,
2027 struct ide_atapi_pc *pc)
2028 {
2029 idetape_init_pc(pc);
2030 pc->c[0] = REZERO_UNIT;
2031 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2032 pc->idetape_callback = &idetape_pc_callback;
2033 }
2034
2035 static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
2036 {
2037 idetape_init_pc(pc);
2038 pc->c[0] = ERASE;
2039 pc->c[1] = 1;
2040 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2041 pc->idetape_callback = &idetape_pc_callback;
2042 }
2043
2044 static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
2045 {
2046 idetape_init_pc(pc);
2047 pc->c[0] = SPACE;
2048 put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
2049 pc->c[1] = cmd;
2050 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2051 pc->idetape_callback = &idetape_pc_callback;
2052 }
2053
2054 /* Queue up a character device originated write request. */
2055 static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
2056 {
2057 idetape_tape_t *tape = drive->driver_data;
2058 unsigned long flags;
2059
2060 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2061
2062 /* Attempt to allocate a new stage. Beware possible race conditions. */
2063 while (1) {
2064 spin_lock_irqsave(&tape->lock, flags);
2065 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2066 idetape_wait_for_request(drive, tape->active_data_rq);
2067 spin_unlock_irqrestore(&tape->lock, flags);
2068 } else {
2069 spin_unlock_irqrestore(&tape->lock, flags);
2070 idetape_plug_pipeline(drive);
2071 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
2072 &tape->flags))
2073 continue;
2074 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
2075 blocks, tape->merge_stage->bh);
2076 }
2077 }
2078 }
2079
2080 /*
2081 * Wait until all pending pipeline requests are serviced. Typically called on
2082 * device close.
2083 */
2084 static void idetape_wait_for_pipeline(ide_drive_t *drive)
2085 {
2086 idetape_tape_t *tape = drive->driver_data;
2087 unsigned long flags;
2088
2089 while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
2090 &tape->flags)) {
2091 idetape_plug_pipeline(drive);
2092 spin_lock_irqsave(&tape->lock, flags);
2093 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
2094 idetape_wait_for_request(drive, tape->active_data_rq);
2095 spin_unlock_irqrestore(&tape->lock, flags);
2096 }
2097 }
2098
2099 static void idetape_empty_write_pipeline(ide_drive_t *drive)
2100 {
2101 idetape_tape_t *tape = drive->driver_data;
2102 int blocks, min;
2103 struct idetape_bh *bh;
2104
2105 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2106 printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
2107 " but we are not writing.\n");
2108 return;
2109 }
2110 if (tape->merge_stage_size > tape->stage_size) {
2111 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
2112 tape->merge_stage_size = tape->stage_size;
2113 }
2114 if (tape->merge_stage_size) {
2115 blocks = tape->merge_stage_size / tape->blk_size;
2116 if (tape->merge_stage_size % tape->blk_size) {
2117 unsigned int i;
2118
2119 blocks++;
2120 i = tape->blk_size - tape->merge_stage_size %
2121 tape->blk_size;
2122 bh = tape->bh->b_reqnext;
2123 while (bh) {
2124 atomic_set(&bh->b_count, 0);
2125 bh = bh->b_reqnext;
2126 }
2127 bh = tape->bh;
2128 while (i) {
2129 if (bh == NULL) {
2130 printk(KERN_INFO "ide-tape: bug,"
2131 " bh NULL\n");
2132 break;
2133 }
2134 min = min(i, (unsigned int)(bh->b_size -
2135 atomic_read(&bh->b_count)));
2136 memset(bh->b_data + atomic_read(&bh->b_count),
2137 0, min);
2138 atomic_add(min, &bh->b_count);
2139 i -= min;
2140 bh = bh->b_reqnext;
2141 }
2142 }
2143 (void) idetape_add_chrdev_write_request(drive, blocks);
2144 tape->merge_stage_size = 0;
2145 }
2146 idetape_wait_for_pipeline(drive);
2147 if (tape->merge_stage != NULL) {
2148 __idetape_kfree_stage(tape->merge_stage);
2149 tape->merge_stage = NULL;
2150 }
2151 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2152 tape->chrdev_dir = IDETAPE_DIR_NONE;
2153
2154 /*
2155 * On the next backup, perform the feedback loop again. (I don't want to
2156 * keep sense information between backups, as some systems are
2157 * constantly on, and the system load can be totally different on the
2158 * next backup).
2159 */
2160 tape->max_stages = tape->min_pipeline;
2161 if (tape->first_stage != NULL ||
2162 tape->next_stage != NULL ||
2163 tape->last_stage != NULL ||
2164 tape->nr_stages != 0) {
2165 printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
2166 "first_stage %p, next_stage %p, "
2167 "last_stage %p, nr_stages %d\n",
2168 tape->first_stage, tape->next_stage,
2169 tape->last_stage, tape->nr_stages);
2170 }
2171 }
2172
2173 static int idetape_init_read(ide_drive_t *drive, int max_stages)
2174 {
2175 idetape_tape_t *tape = drive->driver_data;
2176 int bytes_read;
2177
2178 /* Initialize read operation */
2179 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2180 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2181 idetape_empty_write_pipeline(drive);
2182 idetape_flush_tape_buffers(drive);
2183 }
2184 if (tape->merge_stage || tape->merge_stage_size) {
2185 printk(KERN_ERR "ide-tape: merge_stage_size should be"
2186 " 0 now\n");
2187 tape->merge_stage_size = 0;
2188 }
2189 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2190 if (!tape->merge_stage)
2191 return -ENOMEM;
2192 tape->chrdev_dir = IDETAPE_DIR_READ;
2193
2194 /*
2195 * Issue a read 0 command to ensure that DSC handshake is
2196 * switched from completion mode to buffer available mode.
2197 * No point in issuing this if DSC overlap isn't supported, some
2198 * drives (Seagate STT3401A) will return an error.
2199 */
2200 if (drive->dsc_overlap) {
2201 bytes_read = idetape_queue_rw_tail(drive,
2202 REQ_IDETAPE_READ, 0,
2203 tape->merge_stage->bh);
2204 if (bytes_read < 0) {
2205 __idetape_kfree_stage(tape->merge_stage);
2206 tape->merge_stage = NULL;
2207 tape->chrdev_dir = IDETAPE_DIR_NONE;
2208 return bytes_read;
2209 }
2210 }
2211 }
2212
2213 if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2214 if (tape->nr_pending_stages >= 3 * max_stages / 4) {
2215 tape->measure_insert_time = 1;
2216 tape->insert_time = jiffies;
2217 tape->insert_size = 0;
2218 tape->insert_speed = 0;
2219 idetape_plug_pipeline(drive);
2220 }
2221 }
2222 return 0;
2223 }
2224
2225 /*
2226 * Called from idetape_chrdev_read() to service a character device read request
2227 * and add read-ahead requests to our pipeline.
2228 */
2229 static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
2230 {
2231 idetape_tape_t *tape = drive->driver_data;
2232
2233 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
2234
2235 /* If we are at a filemark, return a read length of 0 */
2236 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2237 return 0;
2238
2239 idetape_init_read(drive, tape->max_stages);
2240
2241 if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2242 return 0;
2243
2244 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2245 tape->merge_stage->bh);
2246 }
2247
2248 static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2249 {
2250 idetape_tape_t *tape = drive->driver_data;
2251 struct idetape_bh *bh;
2252 int blocks;
2253
2254 while (bcount) {
2255 unsigned int count;
2256
2257 bh = tape->merge_stage->bh;
2258 count = min(tape->stage_size, bcount);
2259 bcount -= count;
2260 blocks = count / tape->blk_size;
2261 while (count) {
2262 atomic_set(&bh->b_count,
2263 min(count, (unsigned int)bh->b_size));
2264 memset(bh->b_data, 0, atomic_read(&bh->b_count));
2265 count -= atomic_read(&bh->b_count);
2266 bh = bh->b_reqnext;
2267 }
2268 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
2269 tape->merge_stage->bh);
2270 }
2271 }
2272
2273 /*
2274 * Rewinds the tape to the Beginning Of the current Partition (BOP). We
2275 * currently support only one partition.
2276 */
2277 static int idetape_rewind_tape(ide_drive_t *drive)
2278 {
2279 int retval;
2280 struct ide_atapi_pc pc;
2281 idetape_tape_t *tape;
2282 tape = drive->driver_data;
2283
2284 debug_log(DBG_SENSE, "Enter %s\n", __func__);
2285
2286 idetape_create_rewind_cmd(drive, &pc);
2287 retval = idetape_queue_pc_tail(drive, &pc);
2288 if (retval)
2289 return retval;
2290
2291 idetape_create_read_position_cmd(&pc);
2292 retval = idetape_queue_pc_tail(drive, &pc);
2293 if (retval)
2294 return retval;
2295 return 0;
2296 }
2297
2298 /* mtio.h compatible commands should be issued to the chrdev interface. */
2299 static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2300 unsigned long arg)
2301 {
2302 idetape_tape_t *tape = drive->driver_data;
2303 void __user *argp = (void __user *)arg;
2304
2305 struct idetape_config {
2306 int dsc_rw_frequency;
2307 int dsc_media_access_frequency;
2308 int nr_stages;
2309 } config;
2310
2311 debug_log(DBG_PROCS, "Enter %s\n", __func__);
2312
2313 switch (cmd) {
2314 case 0x0340:
2315 if (copy_from_user(&config, argp, sizeof(config)))
2316 return -EFAULT;
2317 tape->best_dsc_rw_freq = config.dsc_rw_frequency;
2318 tape->max_stages = config.nr_stages;
2319 break;
2320 case 0x0350:
2321 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
2322 config.nr_stages = tape->max_stages;
2323 if (copy_to_user(argp, &config, sizeof(config)))
2324 return -EFAULT;
2325 break;
2326 default:
2327 return -EIO;
2328 }
2329 return 0;
2330 }
2331
2332 static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2333 int mt_count)
2334 {
2335 idetape_tape_t *tape = drive->driver_data;
2336 struct ide_atapi_pc pc;
2337 int retval, count = 0;
2338 int sprev = !!(tape->caps[4] & 0x20);
2339
2340 if (mt_count == 0)
2341 return 0;
2342 if (MTBSF == mt_op || MTBSFM == mt_op) {
2343 if (!sprev)
2344 return -EIO;
2345 mt_count = -mt_count;
2346 }
2347
2348 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2349 tape->merge_stage_size = 0;
2350 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2351 ++count;
2352 idetape_discard_read_pipeline(drive, 0);
2353 }
2354
2355 /*
2356 * The filemark was not found in our internal pipeline; now we can issue
2357 * the space command.
2358 */
2359 switch (mt_op) {
2360 case MTFSF:
2361 case MTBSF:
2362 idetape_create_space_cmd(&pc, mt_count - count,
2363 IDETAPE_SPACE_OVER_FILEMARK);
2364 return idetape_queue_pc_tail(drive, &pc);
2365 case MTFSFM:
2366 case MTBSFM:
2367 if (!sprev)
2368 return -EIO;
2369 retval = idetape_space_over_filemarks(drive, MTFSF,
2370 mt_count - count);
2371 if (retval)
2372 return retval;
2373 count = (MTBSFM == mt_op ? 1 : -1);
2374 return idetape_space_over_filemarks(drive, MTFSF, count);
2375 default:
2376 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2377 mt_op);
2378 return -EIO;
2379 }
2380 }
2381
2382 /*
2383 * Our character device read / write functions.
2384 *
2385 * The tape is optimized to maximize throughput when it is transferring an
2386 * integral number of the "continuous transfer limit", which is a parameter of
2387 * the specific tape (26kB on my particular tape, 32kB for Onstream).
2388 *
2389 * As of version 1.3 of the driver, the character device provides an abstract
2390 * continuous view of the media - any mix of block sizes (even 1 byte) on the
2391 * same backup/restore procedure is supported. The driver will internally
2392 * convert the requests to the recommended transfer unit, so that an unmatch
2393 * between the user's block size to the recommended size will only result in a
2394 * (slightly) increased driver overhead, but will no longer hit performance.
2395 * This is not applicable to Onstream.
2396 */
2397 static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2398 size_t count, loff_t *ppos)
2399 {
2400 struct ide_tape_obj *tape = ide_tape_f(file);
2401 ide_drive_t *drive = tape->drive;
2402 ssize_t bytes_read, temp, actually_read = 0, rc;
2403 ssize_t ret = 0;
2404 u16 ctl = *(u16 *)&tape->caps[12];
2405
2406 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2407
2408 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2409 if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
2410 if (count > tape->blk_size &&
2411 (count % tape->blk_size) == 0)
2412 tape->user_bs_factor = count / tape->blk_size;
2413 }
2414 rc = idetape_init_read(drive, tape->max_stages);
2415 if (rc < 0)
2416 return rc;
2417 if (count == 0)
2418 return (0);
2419 if (tape->merge_stage_size) {
2420 actually_read = min((unsigned int)(tape->merge_stage_size),
2421 (unsigned int)count);
2422 if (idetape_copy_stage_to_user(tape, buf, actually_read))
2423 ret = -EFAULT;
2424 buf += actually_read;
2425 tape->merge_stage_size -= actually_read;
2426 count -= actually_read;
2427 }
2428 while (count >= tape->stage_size) {
2429 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2430 if (bytes_read <= 0)
2431 goto finish;
2432 if (idetape_copy_stage_to_user(tape, buf, bytes_read))
2433 ret = -EFAULT;
2434 buf += bytes_read;
2435 count -= bytes_read;
2436 actually_read += bytes_read;
2437 }
2438 if (count) {
2439 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2440 if (bytes_read <= 0)
2441 goto finish;
2442 temp = min((unsigned long)count, (unsigned long)bytes_read);
2443 if (idetape_copy_stage_to_user(tape, buf, temp))
2444 ret = -EFAULT;
2445 actually_read += temp;
2446 tape->merge_stage_size = bytes_read-temp;
2447 }
2448 finish:
2449 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
2450 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
2451
2452 idetape_space_over_filemarks(drive, MTFSF, 1);
2453 return 0;
2454 }
2455
2456 return ret ? ret : actually_read;
2457 }
2458
2459 static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2460 size_t count, loff_t *ppos)
2461 {
2462 struct ide_tape_obj *tape = ide_tape_f(file);
2463 ide_drive_t *drive = tape->drive;
2464 ssize_t actually_written = 0;
2465 ssize_t ret = 0;
2466 u16 ctl = *(u16 *)&tape->caps[12];
2467
2468 /* The drive is write protected. */
2469 if (tape->write_prot)
2470 return -EACCES;
2471
2472 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2473
2474 /* Initialize write operation */
2475 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2476 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2477 idetape_discard_read_pipeline(drive, 1);
2478 if (tape->merge_stage || tape->merge_stage_size) {
2479 printk(KERN_ERR "ide-tape: merge_stage_size "
2480 "should be 0 now\n");
2481 tape->merge_stage_size = 0;
2482 }
2483 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2484 if (!tape->merge_stage)
2485 return -ENOMEM;
2486 tape->chrdev_dir = IDETAPE_DIR_WRITE;
2487 idetape_init_merge_stage(tape);
2488
2489 /*
2490 * Issue a write 0 command to ensure that DSC handshake is
2491 * switched from completion mode to buffer available mode. No
2492 * point in issuing this if DSC overlap isn't supported, some
2493 * drives (Seagate STT3401A) will return an error.
2494 */
2495 if (drive->dsc_overlap) {
2496 ssize_t retval = idetape_queue_rw_tail(drive,
2497 REQ_IDETAPE_WRITE, 0,
2498 tape->merge_stage->bh);
2499 if (retval < 0) {
2500 __idetape_kfree_stage(tape->merge_stage);
2501 tape->merge_stage = NULL;
2502 tape->chrdev_dir = IDETAPE_DIR_NONE;
2503 return retval;
2504 }
2505 }
2506 }
2507 if (count == 0)
2508 return (0);
2509 if (tape->merge_stage_size) {
2510 if (tape->merge_stage_size >= tape->stage_size) {
2511 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
2512 tape->merge_stage_size = 0;
2513 }
2514 actually_written = min((unsigned int)
2515 (tape->stage_size - tape->merge_stage_size),
2516 (unsigned int)count);
2517 if (idetape_copy_stage_from_user(tape, buf, actually_written))
2518 ret = -EFAULT;
2519 buf += actually_written;
2520 tape->merge_stage_size += actually_written;
2521 count -= actually_written;
2522
2523 if (tape->merge_stage_size == tape->stage_size) {
2524 ssize_t retval;
2525 tape->merge_stage_size = 0;
2526 retval = idetape_add_chrdev_write_request(drive, ctl);
2527 if (retval <= 0)
2528 return (retval);
2529 }
2530 }
2531 while (count >= tape->stage_size) {
2532 ssize_t retval;
2533 if (idetape_copy_stage_from_user(tape, buf, tape->stage_size))
2534 ret = -EFAULT;
2535 buf += tape->stage_size;
2536 count -= tape->stage_size;
2537 retval = idetape_add_chrdev_write_request(drive, ctl);
2538 actually_written += tape->stage_size;
2539 if (retval <= 0)
2540 return (retval);
2541 }
2542 if (count) {
2543 actually_written += count;
2544 if (idetape_copy_stage_from_user(tape, buf, count))
2545 ret = -EFAULT;
2546 tape->merge_stage_size += count;
2547 }
2548 return ret ? ret : actually_written;
2549 }
2550
2551 static int idetape_write_filemark(ide_drive_t *drive)
2552 {
2553 struct ide_atapi_pc pc;
2554
2555 /* Write a filemark */
2556 idetape_create_write_filemark_cmd(drive, &pc, 1);
2557 if (idetape_queue_pc_tail(drive, &pc)) {
2558 printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
2559 return -EIO;
2560 }
2561 return 0;
2562 }
2563
2564 /*
2565 * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
2566 * requested.
2567 *
2568 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
2569 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
2570 * usually not supported (it is supported in the rare case in which we crossed
2571 * the filemark during our read-ahead pipelined operation mode).
2572 *
2573 * The following commands are currently not supported:
2574 *
2575 * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
2576 * MT_ST_WRITE_THRESHOLD.
2577 */
2578 static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2579 {
2580 idetape_tape_t *tape = drive->driver_data;
2581 struct ide_atapi_pc pc;
2582 int i, retval;
2583
2584 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
2585 mt_op, mt_count);
2586
2587 /* Commands which need our pipelined read-ahead stages. */
2588 switch (mt_op) {
2589 case MTFSF:
2590 case MTFSFM:
2591 case MTBSF:
2592 case MTBSFM:
2593 if (!mt_count)
2594 return 0;
2595 return idetape_space_over_filemarks(drive, mt_op, mt_count);
2596 default:
2597 break;
2598 }
2599
2600 switch (mt_op) {
2601 case MTWEOF:
2602 if (tape->write_prot)
2603 return -EACCES;
2604 idetape_discard_read_pipeline(drive, 1);
2605 for (i = 0; i < mt_count; i++) {
2606 retval = idetape_write_filemark(drive);
2607 if (retval)
2608 return retval;
2609 }
2610 return 0;
2611 case MTREW:
2612 idetape_discard_read_pipeline(drive, 0);
2613 if (idetape_rewind_tape(drive))
2614 return -EIO;
2615 return 0;
2616 case MTLOAD:
2617 idetape_discard_read_pipeline(drive, 0);
2618 idetape_create_load_unload_cmd(drive, &pc,
2619 IDETAPE_LU_LOAD_MASK);
2620 return idetape_queue_pc_tail(drive, &pc);
2621 case MTUNLOAD:
2622 case MTOFFL:
2623 /*
2624 * If door is locked, attempt to unlock before
2625 * attempting to eject.
2626 */
2627 if (tape->door_locked) {
2628 if (idetape_create_prevent_cmd(drive, &pc, 0))
2629 if (!idetape_queue_pc_tail(drive, &pc))
2630 tape->door_locked = DOOR_UNLOCKED;
2631 }
2632 idetape_discard_read_pipeline(drive, 0);
2633 idetape_create_load_unload_cmd(drive, &pc,
2634 !IDETAPE_LU_LOAD_MASK);
2635 retval = idetape_queue_pc_tail(drive, &pc);
2636 if (!retval)
2637 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
2638 return retval;
2639 case MTNOP:
2640 idetape_discard_read_pipeline(drive, 0);
2641 return idetape_flush_tape_buffers(drive);
2642 case MTRETEN:
2643 idetape_discard_read_pipeline(drive, 0);
2644 idetape_create_load_unload_cmd(drive, &pc,
2645 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
2646 return idetape_queue_pc_tail(drive, &pc);
2647 case MTEOM:
2648 idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
2649 return idetape_queue_pc_tail(drive, &pc);
2650 case MTERASE:
2651 (void)idetape_rewind_tape(drive);
2652 idetape_create_erase_cmd(&pc);
2653 return idetape_queue_pc_tail(drive, &pc);
2654 case MTSETBLK:
2655 if (mt_count) {
2656 if (mt_count < tape->blk_size ||
2657 mt_count % tape->blk_size)
2658 return -EIO;
2659 tape->user_bs_factor = mt_count / tape->blk_size;
2660 clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
2661 } else
2662 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
2663 return 0;
2664 case MTSEEK:
2665 idetape_discard_read_pipeline(drive, 0);
2666 return idetape_position_tape(drive,
2667 mt_count * tape->user_bs_factor, tape->partition, 0);
2668 case MTSETPART:
2669 idetape_discard_read_pipeline(drive, 0);
2670 return idetape_position_tape(drive, 0, mt_count, 0);
2671 case MTFSR:
2672 case MTBSR:
2673 case MTLOCK:
2674 if (!idetape_create_prevent_cmd(drive, &pc, 1))
2675 return 0;
2676 retval = idetape_queue_pc_tail(drive, &pc);
2677 if (retval)
2678 return retval;
2679 tape->door_locked = DOOR_EXPLICITLY_LOCKED;
2680 return 0;
2681 case MTUNLOCK:
2682 if (!idetape_create_prevent_cmd(drive, &pc, 0))
2683 return 0;
2684 retval = idetape_queue_pc_tail(drive, &pc);
2685 if (retval)
2686 return retval;
2687 tape->door_locked = DOOR_UNLOCKED;
2688 return 0;
2689 default:
2690 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2691 mt_op);
2692 return -EIO;
2693 }
2694 }
2695
2696 /*
2697 * Our character device ioctls. General mtio.h magnetic io commands are
2698 * supported here, and not in the corresponding block interface. Our own
2699 * ide-tape ioctls are supported on both interfaces.
2700 */
2701 static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
2702 unsigned int cmd, unsigned long arg)
2703 {
2704 struct ide_tape_obj *tape = ide_tape_f(file);
2705 ide_drive_t *drive = tape->drive;
2706 struct mtop mtop;
2707 struct mtget mtget;
2708 struct mtpos mtpos;
2709 int block_offset = 0, position = tape->first_frame;
2710 void __user *argp = (void __user *)arg;
2711
2712 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
2713
2714 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2715 idetape_empty_write_pipeline(drive);
2716 idetape_flush_tape_buffers(drive);
2717 }
2718 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
2719 idetape_wait_for_pipeline(drive);
2720 block_offset = tape->merge_stage_size /
2721 (tape->blk_size * tape->user_bs_factor);
2722 position = idetape_read_position(drive);
2723 if (position < 0)
2724 return -EIO;
2725 }
2726 switch (cmd) {
2727 case MTIOCTOP:
2728 if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
2729 return -EFAULT;
2730 return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
2731 case MTIOCGET:
2732 memset(&mtget, 0, sizeof(struct mtget));
2733 mtget.mt_type = MT_ISSCSI2;
2734 mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
2735 mtget.mt_dsreg =
2736 ((tape->blk_size * tape->user_bs_factor)
2737 << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
2738
2739 if (tape->drv_write_prot)
2740 mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
2741
2742 if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
2743 return -EFAULT;
2744 return 0;
2745 case MTIOCPOS:
2746 mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
2747 if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
2748 return -EFAULT;
2749 return 0;
2750 default:
2751 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2752 idetape_discard_read_pipeline(drive, 1);
2753 return idetape_blkdev_ioctl(drive, cmd, arg);
2754 }
2755 }
2756
2757 /*
2758 * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
2759 * block size with the reported value.
2760 */
2761 static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
2762 {
2763 idetape_tape_t *tape = drive->driver_data;
2764 struct ide_atapi_pc pc;
2765
2766 idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
2767 if (idetape_queue_pc_tail(drive, &pc)) {
2768 printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
2769 if (tape->blk_size == 0) {
2770 printk(KERN_WARNING "ide-tape: Cannot deal with zero "
2771 "block size, assuming 32k\n");
2772 tape->blk_size = 32768;
2773 }
2774 return;
2775 }
2776 tape->blk_size = (pc.buf[4 + 5] << 16) +
2777 (pc.buf[4 + 6] << 8) +
2778 pc.buf[4 + 7];
2779 tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
2780 }
2781
2782 static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2783 {
2784 unsigned int minor = iminor(inode), i = minor & ~0xc0;
2785 ide_drive_t *drive;
2786 idetape_tape_t *tape;
2787 struct ide_atapi_pc pc;
2788 int retval;
2789
2790 if (i >= MAX_HWIFS * MAX_DRIVES)
2791 return -ENXIO;
2792
2793 tape = ide_tape_chrdev_get(i);
2794 if (!tape)
2795 return -ENXIO;
2796
2797 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2798
2799 /*
2800 * We really want to do nonseekable_open(inode, filp); here, but some
2801 * versions of tar incorrectly call lseek on tapes and bail out if that
2802 * fails. So we disallow pread() and pwrite(), but permit lseeks.
2803 */
2804 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
2805
2806 drive = tape->drive;
2807
2808 filp->private_data = tape;
2809
2810 if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
2811 retval = -EBUSY;
2812 goto out_put_tape;
2813 }
2814
2815 retval = idetape_wait_ready(drive, 60 * HZ);
2816 if (retval) {
2817 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2818 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
2819 goto out_put_tape;
2820 }
2821
2822 idetape_read_position(drive);
2823 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
2824 (void)idetape_rewind_tape(drive);
2825
2826 if (tape->chrdev_dir != IDETAPE_DIR_READ)
2827 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2828
2829 /* Read block size and write protect status from drive. */
2830 ide_tape_get_bsize_from_bdesc(drive);
2831
2832 /* Set write protect flag if device is opened as read-only. */
2833 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
2834 tape->write_prot = 1;
2835 else
2836 tape->write_prot = tape->drv_write_prot;
2837
2838 /* Make sure drive isn't write protected if user wants to write. */
2839 if (tape->write_prot) {
2840 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
2841 (filp->f_flags & O_ACCMODE) == O_RDWR) {
2842 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2843 retval = -EROFS;
2844 goto out_put_tape;
2845 }
2846 }
2847
2848 /* Lock the tape drive door so user can't eject. */
2849 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2850 if (idetape_create_prevent_cmd(drive, &pc, 1)) {
2851 if (!idetape_queue_pc_tail(drive, &pc)) {
2852 if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
2853 tape->door_locked = DOOR_LOCKED;
2854 }
2855 }
2856 }
2857 return 0;
2858
2859 out_put_tape:
2860 ide_tape_put(tape);
2861 return retval;
2862 }
2863
2864 static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
2865 {
2866 idetape_tape_t *tape = drive->driver_data;
2867
2868 idetape_empty_write_pipeline(drive);
2869 tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
2870 if (tape->merge_stage != NULL) {
2871 idetape_pad_zeros(drive, tape->blk_size *
2872 (tape->user_bs_factor - 1));
2873 __idetape_kfree_stage(tape->merge_stage);
2874 tape->merge_stage = NULL;
2875 }
2876 idetape_write_filemark(drive);
2877 idetape_flush_tape_buffers(drive);
2878 idetape_flush_tape_buffers(drive);
2879 }
2880
2881 static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2882 {
2883 struct ide_tape_obj *tape = ide_tape_f(filp);
2884 ide_drive_t *drive = tape->drive;
2885 struct ide_atapi_pc pc;
2886 unsigned int minor = iminor(inode);
2887
2888 lock_kernel();
2889 tape = drive->driver_data;
2890
2891 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2892
2893 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
2894 idetape_write_release(drive, minor);
2895 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2896 if (minor < 128)
2897 idetape_discard_read_pipeline(drive, 1);
2898 else
2899 idetape_wait_for_pipeline(drive);
2900 }
2901
2902 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
2903 (void) idetape_rewind_tape(drive);
2904 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2905 if (tape->door_locked == DOOR_LOCKED) {
2906 if (idetape_create_prevent_cmd(drive, &pc, 0)) {
2907 if (!idetape_queue_pc_tail(drive, &pc))
2908 tape->door_locked = DOOR_UNLOCKED;
2909 }
2910 }
2911 }
2912 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2913 ide_tape_put(tape);
2914 unlock_kernel();
2915 return 0;
2916 }
2917
2918 /*
2919 * check the contents of the ATAPI IDENTIFY command results. We return:
2920 *
2921 * 1 - If the tape can be supported by us, based on the information we have so
2922 * far.
2923 *
2924 * 0 - If this tape driver is not currently supported by us.
2925 */
2926 static int idetape_identify_device(ide_drive_t *drive)
2927 {
2928 u8 gcw[2], protocol, device_type, removable, packet_size;
2929
2930 if (drive->id_read == 0)
2931 return 1;
2932
2933 *((unsigned short *) &gcw) = drive->id->config;
2934
2935 protocol = (gcw[1] & 0xC0) >> 6;
2936 device_type = gcw[1] & 0x1F;
2937 removable = !!(gcw[0] & 0x80);
2938 packet_size = gcw[0] & 0x3;
2939
2940 /* Check that we can support this device */
2941 if (protocol != 2)
2942 printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
2943 protocol);
2944 else if (device_type != 1)
2945 printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
2946 "to tape\n", device_type);
2947 else if (!removable)
2948 printk(KERN_ERR "ide-tape: The removable flag is not set\n");
2949 else if (packet_size != 0) {
2950 printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
2951 " bytes\n", packet_size);
2952 } else
2953 return 1;
2954 return 0;
2955 }
2956
2957 static void idetape_get_inquiry_results(ide_drive_t *drive)
2958 {
2959 idetape_tape_t *tape = drive->driver_data;
2960 struct ide_atapi_pc pc;
2961 char fw_rev[6], vendor_id[10], product_id[18];
2962
2963 idetape_create_inquiry_cmd(&pc);
2964 if (idetape_queue_pc_tail(drive, &pc)) {
2965 printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
2966 tape->name);
2967 return;
2968 }
2969 memcpy(vendor_id, &pc.buf[8], 8);
2970 memcpy(product_id, &pc.buf[16], 16);
2971 memcpy(fw_rev, &pc.buf[32], 4);
2972
2973 ide_fixstring(vendor_id, 10, 0);
2974 ide_fixstring(product_id, 18, 0);
2975 ide_fixstring(fw_rev, 6, 0);
2976
2977 printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
2978 drive->name, tape->name, vendor_id, product_id, fw_rev);
2979 }
2980
2981 /*
2982 * Ask the tape about its various parameters. In particular, we will adjust our
2983 * data transfer buffer size to the recommended value as returned by the tape.
2984 */
2985 static void idetape_get_mode_sense_results(ide_drive_t *drive)
2986 {
2987 idetape_tape_t *tape = drive->driver_data;
2988 struct ide_atapi_pc pc;
2989 u8 *caps;
2990 u8 speed, max_speed;
2991
2992 idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
2993 if (idetape_queue_pc_tail(drive, &pc)) {
2994 printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
2995 " some default values\n");
2996 tape->blk_size = 512;
2997 put_unaligned(52, (u16 *)&tape->caps[12]);
2998 put_unaligned(540, (u16 *)&tape->caps[14]);
2999 put_unaligned(6*52, (u16 *)&tape->caps[16]);
3000 return;
3001 }
3002 caps = pc.buf + 4 + pc.buf[3];
3003
3004 /* convert to host order and save for later use */
3005 speed = be16_to_cpu(*(u16 *)&caps[14]);
3006 max_speed = be16_to_cpu(*(u16 *)&caps[8]);
3007
3008 put_unaligned(max_speed, (u16 *)&caps[8]);
3009 put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
3010 put_unaligned(speed, (u16 *)&caps[14]);
3011 put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
3012
3013 if (!speed) {
3014 printk(KERN_INFO "ide-tape: %s: invalid tape speed "
3015 "(assuming 650KB/sec)\n", drive->name);
3016 put_unaligned(650, (u16 *)&caps[14]);
3017 }
3018 if (!max_speed) {
3019 printk(KERN_INFO "ide-tape: %s: invalid max_speed "
3020 "(assuming 650KB/sec)\n", drive->name);
3021 put_unaligned(650, (u16 *)&caps[8]);
3022 }
3023
3024 memcpy(&tape->caps, caps, 20);
3025 if (caps[7] & 0x02)
3026 tape->blk_size = 512;
3027 else if (caps[7] & 0x04)
3028 tape->blk_size = 1024;
3029 }
3030
3031 #ifdef CONFIG_IDE_PROC_FS
3032 static void idetape_add_settings(ide_drive_t *drive)
3033 {
3034 idetape_tape_t *tape = drive->driver_data;
3035
3036 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3037 1, 2, (u16 *)&tape->caps[16], NULL);
3038 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
3039 tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
3040 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
3041 tape->stage_size / 1024, 1, &tape->max_stages, NULL);
3042 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
3043 tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
3044 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
3045 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
3046 NULL);
3047 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
3048 0xffff, tape->stage_size / 1024, 1,
3049 &tape->nr_pending_stages, NULL);
3050 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3051 1, 1, (u16 *)&tape->caps[14], NULL);
3052 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
3053 1024, &tape->stage_size, NULL);
3054 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
3055 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
3056 NULL);
3057 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
3058 1, &drive->dsc_overlap, NULL);
3059 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
3060 1, 1, &tape->avg_speed, NULL);
3061 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
3062 1, &tape->debug_mask, NULL);
3063 }
3064 #else
3065 static inline void idetape_add_settings(ide_drive_t *drive) { ; }
3066 #endif
3067
3068 /*
3069 * The function below is called to:
3070 *
3071 * 1. Initialize our various state variables.
3072 * 2. Ask the tape for its capabilities.
3073 * 3. Allocate a buffer which will be used for data transfer. The buffer size
3074 * is chosen based on the recommendation which we received in step 2.
3075 *
3076 * Note that at this point ide.c already assigned us an irq, so that we can
3077 * queue requests here and wait for their completion.
3078 */
3079 static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3080 {
3081 unsigned long t1, tmid, tn, t;
3082 int speed;
3083 int stage_size;
3084 u8 gcw[2];
3085 struct sysinfo si;
3086 u16 *ctl = (u16 *)&tape->caps[12];
3087
3088 spin_lock_init(&tape->lock);
3089 drive->dsc_overlap = 1;
3090 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
3091 printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
3092 tape->name);
3093 drive->dsc_overlap = 0;
3094 }
3095 /* Seagate Travan drives do not support DSC overlap. */
3096 if (strstr(drive->id->model, "Seagate STT3401"))
3097 drive->dsc_overlap = 0;
3098 tape->minor = minor;
3099 tape->name[0] = 'h';
3100 tape->name[1] = 't';
3101 tape->name[2] = '0' + minor;
3102 tape->chrdev_dir = IDETAPE_DIR_NONE;
3103 tape->pc = tape->pc_stack;
3104 *((unsigned short *) &gcw) = drive->id->config;
3105
3106 /* Command packet DRQ type */
3107 if (((gcw[0] & 0x60) >> 5) == 1)
3108 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
3109
3110 tape->min_pipeline = 10;
3111 tape->max_pipeline = 10;
3112 tape->max_stages = 10;
3113
3114 idetape_get_inquiry_results(drive);
3115 idetape_get_mode_sense_results(drive);
3116 ide_tape_get_bsize_from_bdesc(drive);
3117 tape->user_bs_factor = 1;
3118 tape->stage_size = *ctl * tape->blk_size;
3119 while (tape->stage_size > 0xffff) {
3120 printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
3121 *ctl /= 2;
3122 tape->stage_size = *ctl * tape->blk_size;
3123 }
3124 stage_size = tape->stage_size;
3125 tape->pages_per_stage = stage_size / PAGE_SIZE;
3126 if (stage_size % PAGE_SIZE) {
3127 tape->pages_per_stage++;
3128 tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
3129 }
3130
3131 /* Select the "best" DSC read/write polling freq and pipeline size. */
3132 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
3133
3134 tape->max_stages = speed * 1000 * 10 / tape->stage_size;
3135
3136 /* Limit memory use for pipeline to 10% of physical memory */
3137 si_meminfo(&si);
3138 if (tape->max_stages * tape->stage_size >
3139 si.totalram * si.mem_unit / 10)
3140 tape->max_stages =
3141 si.totalram * si.mem_unit / (10 * tape->stage_size);
3142
3143 tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
3144 tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
3145 tape->max_pipeline =
3146 min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
3147 if (tape->max_stages == 0) {
3148 tape->max_stages = 1;
3149 tape->min_pipeline = 1;
3150 tape->max_pipeline = 1;
3151 }
3152
3153 t1 = (tape->stage_size * HZ) / (speed * 1000);
3154 tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
3155 tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
3156
3157 if (tape->max_stages)
3158 t = tn;
3159 else
3160 t = t1;
3161
3162 /*
3163 * Ensure that the number we got makes sense; limit it within
3164 * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
3165 */
3166 tape->best_dsc_rw_freq = max_t(unsigned long,
3167 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
3168 IDETAPE_DSC_RW_MIN);
3169 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
3170 "%dkB pipeline, %lums tDSC%s\n",
3171 drive->name, tape->name, *(u16 *)&tape->caps[14],
3172 (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
3173 tape->stage_size / 1024,
3174 tape->max_stages * tape->stage_size / 1024,
3175 tape->best_dsc_rw_freq * 1000 / HZ,
3176 drive->using_dma ? ", DMA":"");
3177
3178 idetape_add_settings(drive);
3179 }
3180
3181 static void ide_tape_remove(ide_drive_t *drive)
3182 {
3183 idetape_tape_t *tape = drive->driver_data;
3184
3185 ide_proc_unregister_driver(drive, tape->driver);
3186
3187 ide_unregister_region(tape->disk);
3188
3189 ide_tape_put(tape);
3190 }
3191
3192 static void ide_tape_release(struct kref *kref)
3193 {
3194 struct ide_tape_obj *tape = to_ide_tape(kref);
3195 ide_drive_t *drive = tape->drive;
3196 struct gendisk *g = tape->disk;
3197
3198 BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
3199
3200 drive->dsc_overlap = 0;
3201 drive->driver_data = NULL;
3202 device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
3203 device_destroy(idetape_sysfs_class,
3204 MKDEV(IDETAPE_MAJOR, tape->minor + 128));
3205 idetape_devs[tape->minor] = NULL;
3206 g->private_data = NULL;
3207 put_disk(g);
3208 kfree(tape);
3209 }
3210
3211 #ifdef CONFIG_IDE_PROC_FS
3212 static int proc_idetape_read_name
3213 (char *page, char **start, off_t off, int count, int *eof, void *data)
3214 {
3215 ide_drive_t *drive = (ide_drive_t *) data;
3216 idetape_tape_t *tape = drive->driver_data;
3217 char *out = page;
3218 int len;
3219
3220 len = sprintf(out, "%s\n", tape->name);
3221 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
3222 }
3223
3224 static ide_proc_entry_t idetape_proc[] = {
3225 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
3226 { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
3227 { NULL, 0, NULL, NULL }
3228 };
3229 #endif
3230
3231 static int ide_tape_probe(ide_drive_t *);
3232
3233 static ide_driver_t idetape_driver = {
3234 .gen_driver = {
3235 .owner = THIS_MODULE,
3236 .name = "ide-tape",
3237 .bus = &ide_bus_type,
3238 },
3239 .probe = ide_tape_probe,
3240 .remove = ide_tape_remove,
3241 .version = IDETAPE_VERSION,
3242 .media = ide_tape,
3243 .supports_dsc_overlap = 1,
3244 .do_request = idetape_do_request,
3245 .end_request = idetape_end_request,
3246 .error = __ide_error,
3247 .abort = __ide_abort,
3248 #ifdef CONFIG_IDE_PROC_FS
3249 .proc = idetape_proc,
3250 #endif
3251 };
3252
3253 /* Our character device supporting functions, passed to register_chrdev. */
3254 static const struct file_operations idetape_fops = {
3255 .owner = THIS_MODULE,
3256 .read = idetape_chrdev_read,
3257 .write = idetape_chrdev_write,
3258 .ioctl = idetape_chrdev_ioctl,
3259 .open = idetape_chrdev_open,
3260 .release = idetape_chrdev_release,
3261 };
3262
3263 static int idetape_open(struct inode *inode, struct file *filp)
3264 {
3265 struct gendisk *disk = inode->i_bdev->bd_disk;
3266 struct ide_tape_obj *tape;
3267
3268 tape = ide_tape_get(disk);
3269 if (!tape)
3270 return -ENXIO;
3271
3272 return 0;
3273 }
3274
3275 static int idetape_release(struct inode *inode, struct file *filp)
3276 {
3277 struct gendisk *disk = inode->i_bdev->bd_disk;
3278 struct ide_tape_obj *tape = ide_tape_g(disk);
3279
3280 ide_tape_put(tape);
3281
3282 return 0;
3283 }
3284
3285 static int idetape_ioctl(struct inode *inode, struct file *file,
3286 unsigned int cmd, unsigned long arg)
3287 {
3288 struct block_device *bdev = inode->i_bdev;
3289 struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
3290 ide_drive_t *drive = tape->drive;
3291 int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
3292 if (err == -EINVAL)
3293 err = idetape_blkdev_ioctl(drive, cmd, arg);
3294 return err;
3295 }
3296
3297 static struct block_device_operations idetape_block_ops = {
3298 .owner = THIS_MODULE,
3299 .open = idetape_open,
3300 .release = idetape_release,
3301 .ioctl = idetape_ioctl,
3302 };
3303
3304 static int ide_tape_probe(ide_drive_t *drive)
3305 {
3306 idetape_tape_t *tape;
3307 struct gendisk *g;
3308 int minor;
3309
3310 if (!strstr("ide-tape", drive->driver_req))
3311 goto failed;
3312 if (!drive->present)
3313 goto failed;
3314 if (drive->media != ide_tape)
3315 goto failed;
3316 if (!idetape_identify_device(drive)) {
3317 printk(KERN_ERR "ide-tape: %s: not supported by this version of"
3318 " the driver\n", drive->name);
3319 goto failed;
3320 }
3321 if (drive->scsi) {
3322 printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
3323 " emulation.\n", drive->name);
3324 goto failed;
3325 }
3326 tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
3327 if (tape == NULL) {
3328 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
3329 drive->name);
3330 goto failed;
3331 }
3332
3333 g = alloc_disk(1 << PARTN_BITS);
3334 if (!g)
3335 goto out_free_tape;
3336
3337 ide_init_disk(g, drive);
3338
3339 ide_proc_register_driver(drive, &idetape_driver);
3340
3341 kref_init(&tape->kref);
3342
3343 tape->drive = drive;
3344 tape->driver = &idetape_driver;
3345 tape->disk = g;
3346
3347 g->private_data = &tape->driver;
3348
3349 drive->driver_data = tape;
3350
3351 mutex_lock(&idetape_ref_mutex);
3352 for (minor = 0; idetape_devs[minor]; minor++)
3353 ;
3354 idetape_devs[minor] = tape;
3355 mutex_unlock(&idetape_ref_mutex);
3356
3357 idetape_setup(drive, tape, minor);
3358
3359 device_create(idetape_sysfs_class, &drive->gendev,
3360 MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
3361 device_create(idetape_sysfs_class, &drive->gendev,
3362 MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
3363
3364 g->fops = &idetape_block_ops;
3365 ide_register_region(g);
3366
3367 return 0;
3368
3369 out_free_tape:
3370 kfree(tape);
3371 failed:
3372 return -ENODEV;
3373 }
3374
3375 static void __exit idetape_exit(void)
3376 {
3377 driver_unregister(&idetape_driver.gen_driver);
3378 class_destroy(idetape_sysfs_class);
3379 unregister_chrdev(IDETAPE_MAJOR, "ht");
3380 }
3381
3382 static int __init idetape_init(void)
3383 {
3384 int error = 1;
3385 idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
3386 if (IS_ERR(idetape_sysfs_class)) {
3387 idetape_sysfs_class = NULL;
3388 printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
3389 error = -EBUSY;
3390 goto out;
3391 }
3392
3393 if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
3394 printk(KERN_ERR "ide-tape: Failed to register chrdev"
3395 " interface\n");
3396 error = -EBUSY;
3397 goto out_free_class;
3398 }
3399
3400 error = driver_register(&idetape_driver.gen_driver);
3401 if (error)
3402 goto out_free_driver;
3403
3404 return 0;
3405
3406 out_free_driver:
3407 driver_unregister(&idetape_driver.gen_driver);
3408 out_free_class:
3409 class_destroy(idetape_sysfs_class);
3410 out:
3411 return error;
3412 }
3413
3414 MODULE_ALIAS("ide:*m-tape*");
3415 module_init(idetape_init);
3416 module_exit(idetape_exit);
3417 MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
3418 MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
3419 MODULE_LICENSE("GPL");