]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/ide/ide-tape.c
ide-tape: remove unused parameter from idetape_copy_stage_to_user
[mirror_ubuntu-kernels.git] / drivers / ide / ide-tape.c
1 /*
2 * IDE ATAPI streaming tape driver.
3 *
4 * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
5 * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
6 *
7 * This driver was constructed as a student project in the software laboratory
8 * of the faculty of electrical engineering in the Technion - Israel's
9 * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
10 *
11 * It is hereby placed under the terms of the GNU general public license.
12 * (See linux/COPYING).
13 *
14 * For a historical changelog see
15 * Documentation/ide/ChangeLog.ide-tape.1995-2002
16 */
17
18 #define IDETAPE_VERSION "1.20"
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/interrupt.h>
28 #include <linux/jiffies.h>
29 #include <linux/major.h>
30 #include <linux/errno.h>
31 #include <linux/genhd.h>
32 #include <linux/slab.h>
33 #include <linux/pci.h>
34 #include <linux/ide.h>
35 #include <linux/smp_lock.h>
36 #include <linux/completion.h>
37 #include <linux/bitops.h>
38 #include <linux/mutex.h>
39 #include <scsi/scsi.h>
40
41 #include <asm/byteorder.h>
42 #include <linux/irq.h>
43 #include <linux/uaccess.h>
44 #include <linux/io.h>
45 #include <asm/unaligned.h>
46 #include <linux/mtio.h>
47
48 enum {
49 /* output errors only */
50 DBG_ERR = (1 << 0),
51 /* output all sense key/asc */
52 DBG_SENSE = (1 << 1),
53 /* info regarding all chrdev-related procedures */
54 DBG_CHRDEV = (1 << 2),
55 /* all remaining procedures */
56 DBG_PROCS = (1 << 3),
57 /* buffer alloc info (pc_stack & rq_stack) */
58 DBG_PCRQ_STACK = (1 << 4),
59 };
60
61 /* define to see debug info */
62 #define IDETAPE_DEBUG_LOG 0
63
64 #if IDETAPE_DEBUG_LOG
65 #define debug_log(lvl, fmt, args...) \
66 { \
67 if (tape->debug_mask & lvl) \
68 printk(KERN_INFO "ide-tape: " fmt, ## args); \
69 }
70 #else
71 #define debug_log(lvl, fmt, args...) do {} while (0)
72 #endif
73
74 /**************************** Tunable parameters *****************************/
75
76
77 /*
78 * Pipelined mode parameters.
79 *
80 * We try to use the minimum number of stages which is enough to keep the tape
81 * constantly streaming. To accomplish that, we implement a feedback loop around
82 * the maximum number of stages:
83 *
84 * We start from MIN maximum stages (we will not even use MIN stages if we don't
85 * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
86 * pipeline is empty, until we reach the optimum value or until we reach MAX.
87 */
88 #define IDETAPE_MIN_PIPELINE_STAGES 1
89 #define IDETAPE_MAX_PIPELINE_STAGES 400
90 #define IDETAPE_INCREASE_STAGES_RATE 20
91
92 /*
93 * After each failed packet command we issue a request sense command and retry
94 * the packet command IDETAPE_MAX_PC_RETRIES times.
95 *
96 * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
97 */
98 #define IDETAPE_MAX_PC_RETRIES 3
99
100 /*
101 * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
102 * bytes. This is used for several packet commands (Not for READ/WRITE commands)
103 */
104 #define IDETAPE_PC_BUFFER_SIZE 256
105
106 /*
107 * In various places in the driver, we need to allocate storage
108 * for packet commands and requests, which will remain valid while
109 * we leave the driver to wait for an interrupt or a timeout event.
110 */
111 #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
112
113 /*
114 * Some drives (for example, Seagate STT3401A Travan) require a very long
115 * timeout, because they don't return an interrupt or clear their busy bit
116 * until after the command completes (even retension commands).
117 */
118 #define IDETAPE_WAIT_CMD (900*HZ)
119
120 /*
121 * The following parameter is used to select the point in the internal tape fifo
122 * in which we will start to refill the buffer. Decreasing the following
123 * parameter will improve the system's latency and interactive response, while
124 * using a high value might improve system throughput.
125 */
126 #define IDETAPE_FIFO_THRESHOLD 2
127
128 /*
129 * DSC polling parameters.
130 *
131 * Polling for DSC (a single bit in the status register) is a very important
132 * function in ide-tape. There are two cases in which we poll for DSC:
133 *
134 * 1. Before a read/write packet command, to ensure that we can transfer data
135 * from/to the tape's data buffers, without causing an actual media access.
136 * In case the tape is not ready yet, we take out our request from the device
137 * request queue, so that ide.c could service requests from the other device
138 * on the same interface in the meantime.
139 *
140 * 2. After the successful initialization of a "media access packet command",
141 * which is a command that can take a long time to complete (the interval can
142 * range from several seconds to even an hour). Again, we postpone our request
143 * in the middle to free the bus for the other device. The polling frequency
144 * here should be lower than the read/write frequency since those media access
145 * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
146 * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
147 * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
148 *
149 * We also set a timeout for the timer, in case something goes wrong. The
150 * timeout should be longer then the maximum execution time of a tape operation.
151 */
152
153 /* DSC timings. */
154 #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
155 #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
156 #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
157 #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
158 #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
159 #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
160 #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
161
162 /*************************** End of tunable parameters ***********************/
163
164 /* Read/Write error simulation */
165 #define SIMULATE_ERRORS 0
166
167 /* tape directions */
168 enum {
169 IDETAPE_DIR_NONE = (1 << 0),
170 IDETAPE_DIR_READ = (1 << 1),
171 IDETAPE_DIR_WRITE = (1 << 2),
172 };
173
174 struct idetape_bh {
175 u32 b_size;
176 atomic_t b_count;
177 struct idetape_bh *b_reqnext;
178 char *b_data;
179 };
180
181 /* Tape door status */
182 #define DOOR_UNLOCKED 0
183 #define DOOR_LOCKED 1
184 #define DOOR_EXPLICITLY_LOCKED 2
185
186 /* Some defines for the SPACE command */
187 #define IDETAPE_SPACE_OVER_FILEMARK 1
188 #define IDETAPE_SPACE_TO_EOD 3
189
190 /* Some defines for the LOAD UNLOAD command */
191 #define IDETAPE_LU_LOAD_MASK 1
192 #define IDETAPE_LU_RETENSION_MASK 2
193 #define IDETAPE_LU_EOT_MASK 4
194
195 /*
196 * Special requests for our block device strategy routine.
197 *
198 * In order to service a character device command, we add special requests to
199 * the tail of our block device request queue and wait for their completion.
200 */
201
202 enum {
203 REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
204 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
205 REQ_IDETAPE_READ = (1 << 2),
206 REQ_IDETAPE_WRITE = (1 << 3),
207 };
208
209 /* Error codes returned in rq->errors to the higher part of the driver. */
210 #define IDETAPE_ERROR_GENERAL 101
211 #define IDETAPE_ERROR_FILEMARK 102
212 #define IDETAPE_ERROR_EOD 103
213
214 /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
215 #define IDETAPE_BLOCK_DESCRIPTOR 0
216 #define IDETAPE_CAPABILITIES_PAGE 0x2a
217
218 /* Tape flag bits values. */
219 enum {
220 IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
221 /* 0 When the tape position is unknown */
222 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
223 /* Device already opened */
224 IDETAPE_FLAG_BUSY = (1 << 2),
225 /* Error detected in a pipeline stage */
226 IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
227 /* Attempt to auto-detect the current user block size */
228 IDETAPE_FLAG_DETECT_BS = (1 << 4),
229 /* Currently on a filemark */
230 IDETAPE_FLAG_FILEMARK = (1 << 5),
231 /* DRQ interrupt device */
232 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
233 /* pipeline active */
234 IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
235 /* 0 = no tape is loaded, so we don't rewind after ejecting */
236 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
237 };
238
239 /* A pipeline stage. */
240 typedef struct idetape_stage_s {
241 struct request rq; /* The corresponding request */
242 struct idetape_bh *bh; /* The data buffers */
243 struct idetape_stage_s *next; /* Pointer to the next stage */
244 } idetape_stage_t;
245
246 /*
247 * Most of our global data which we need to save even as we leave the driver due
248 * to an interrupt or a timer event is stored in the struct defined below.
249 */
250 typedef struct ide_tape_obj {
251 ide_drive_t *drive;
252 ide_driver_t *driver;
253 struct gendisk *disk;
254 struct kref kref;
255
256 /*
257 * Since a typical character device operation requires more
258 * than one packet command, we provide here enough memory
259 * for the maximum of interconnected packet commands.
260 * The packet commands are stored in the circular array pc_stack.
261 * pc_stack_index points to the last used entry, and warps around
262 * to the start when we get to the last array entry.
263 *
264 * pc points to the current processed packet command.
265 *
266 * failed_pc points to the last failed packet command, or contains
267 * NULL if we do not need to retry any packet command. This is
268 * required since an additional packet command is needed before the
269 * retry, to get detailed information on what went wrong.
270 */
271 /* Current packet command */
272 struct ide_atapi_pc *pc;
273 /* Last failed packet command */
274 struct ide_atapi_pc *failed_pc;
275 /* Packet command stack */
276 struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
277 /* Next free packet command storage space */
278 int pc_stack_index;
279 struct request rq_stack[IDETAPE_PC_STACK];
280 /* We implement a circular array */
281 int rq_stack_index;
282
283 /*
284 * DSC polling variables.
285 *
286 * While polling for DSC we use postponed_rq to postpone the current
287 * request so that ide.c will be able to service pending requests on the
288 * other device. Note that at most we will have only one DSC (usually
289 * data transfer) request in the device request queue. Additional
290 * requests can be queued in our internal pipeline, but they will be
291 * visible to ide.c only one at a time.
292 */
293 struct request *postponed_rq;
294 /* The time in which we started polling for DSC */
295 unsigned long dsc_polling_start;
296 /* Timer used to poll for dsc */
297 struct timer_list dsc_timer;
298 /* Read/Write dsc polling frequency */
299 unsigned long best_dsc_rw_freq;
300 unsigned long dsc_poll_freq;
301 unsigned long dsc_timeout;
302
303 /* Read position information */
304 u8 partition;
305 /* Current block */
306 unsigned int first_frame;
307
308 /* Last error information */
309 u8 sense_key, asc, ascq;
310
311 /* Character device operation */
312 unsigned int minor;
313 /* device name */
314 char name[4];
315 /* Current character device data transfer direction */
316 u8 chrdev_dir;
317
318 /* tape block size, usually 512 or 1024 bytes */
319 unsigned short blk_size;
320 int user_bs_factor;
321
322 /* Copy of the tape's Capabilities and Mechanical Page */
323 u8 caps[20];
324
325 /*
326 * Active data transfer request parameters.
327 *
328 * At most, there is only one ide-tape originated data transfer request
329 * in the device request queue. This allows ide.c to easily service
330 * requests from the other device when we postpone our active request.
331 * In the pipelined operation mode, we use our internal pipeline
332 * structure to hold more data requests. The data buffer size is chosen
333 * based on the tape's recommendation.
334 */
335 /* ptr to the request which is waiting in the device request queue */
336 struct request *active_data_rq;
337 /* Data buffer size chosen based on the tape's recommendation */
338 int stage_size;
339 idetape_stage_t *merge_stage;
340 int merge_stage_size;
341 struct idetape_bh *bh;
342 char *b_data;
343 int b_count;
344
345 /*
346 * Pipeline parameters.
347 *
348 * To accomplish non-pipelined mode, we simply set the following
349 * variables to zero (or NULL, where appropriate).
350 */
351 /* Number of currently used stages */
352 int nr_stages;
353 /* Number of pending stages */
354 int nr_pending_stages;
355 /* We will not allocate more than this number of stages */
356 int max_stages, min_pipeline, max_pipeline;
357 /* The first stage which will be removed from the pipeline */
358 idetape_stage_t *first_stage;
359 /* The currently active stage */
360 idetape_stage_t *active_stage;
361 /* Will be serviced after the currently active request */
362 idetape_stage_t *next_stage;
363 /* New requests will be added to the pipeline here */
364 idetape_stage_t *last_stage;
365 int pages_per_stage;
366 /* Wasted space in each stage */
367 int excess_bh_size;
368
369 /* Status/Action flags: long for set_bit */
370 unsigned long flags;
371 /* protects the ide-tape queue */
372 spinlock_t lock;
373
374 /* Measures average tape speed */
375 unsigned long avg_time;
376 int avg_size;
377 int avg_speed;
378
379 /* the door is currently locked */
380 int door_locked;
381 /* the tape hardware is write protected */
382 char drv_write_prot;
383 /* the tape is write protected (hardware or opened as read-only) */
384 char write_prot;
385
386 /*
387 * Limit the number of times a request can be postponed, to avoid an
388 * infinite postpone deadlock.
389 */
390 int postpone_cnt;
391
392 /* Speed control at the tape buffers input/output */
393 unsigned long insert_time;
394 int insert_size;
395 int insert_speed;
396 int measure_insert_time;
397
398 u32 debug_mask;
399 } idetape_tape_t;
400
401 static DEFINE_MUTEX(idetape_ref_mutex);
402
403 static struct class *idetape_sysfs_class;
404
405 #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
406
407 #define ide_tape_g(disk) \
408 container_of((disk)->private_data, struct ide_tape_obj, driver)
409
410 static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
411 {
412 struct ide_tape_obj *tape = NULL;
413
414 mutex_lock(&idetape_ref_mutex);
415 tape = ide_tape_g(disk);
416 if (tape)
417 kref_get(&tape->kref);
418 mutex_unlock(&idetape_ref_mutex);
419 return tape;
420 }
421
422 static void ide_tape_release(struct kref *);
423
424 static void ide_tape_put(struct ide_tape_obj *tape)
425 {
426 mutex_lock(&idetape_ref_mutex);
427 kref_put(&tape->kref, ide_tape_release);
428 mutex_unlock(&idetape_ref_mutex);
429 }
430
431 /*
432 * The variables below are used for the character device interface. Additional
433 * state variables are defined in our ide_drive_t structure.
434 */
435 static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
436
437 #define ide_tape_f(file) ((file)->private_data)
438
439 static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
440 {
441 struct ide_tape_obj *tape = NULL;
442
443 mutex_lock(&idetape_ref_mutex);
444 tape = idetape_devs[i];
445 if (tape)
446 kref_get(&tape->kref);
447 mutex_unlock(&idetape_ref_mutex);
448 return tape;
449 }
450
451 static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
452 unsigned int bcount)
453 {
454 struct idetape_bh *bh = pc->bh;
455 int count;
456
457 while (bcount) {
458 if (bh == NULL) {
459 printk(KERN_ERR "ide-tape: bh == NULL in "
460 "idetape_input_buffers\n");
461 ide_atapi_discard_data(drive, bcount);
462 return;
463 }
464 count = min(
465 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
466 bcount);
467 HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
468 atomic_read(&bh->b_count), count);
469 bcount -= count;
470 atomic_add(count, &bh->b_count);
471 if (atomic_read(&bh->b_count) == bh->b_size) {
472 bh = bh->b_reqnext;
473 if (bh)
474 atomic_set(&bh->b_count, 0);
475 }
476 }
477 pc->bh = bh;
478 }
479
480 static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
481 unsigned int bcount)
482 {
483 struct idetape_bh *bh = pc->bh;
484 int count;
485
486 while (bcount) {
487 if (bh == NULL) {
488 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
489 __func__);
490 return;
491 }
492 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
493 HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
494 bcount -= count;
495 pc->b_data += count;
496 pc->b_count -= count;
497 if (!pc->b_count) {
498 bh = bh->b_reqnext;
499 pc->bh = bh;
500 if (bh) {
501 pc->b_data = bh->b_data;
502 pc->b_count = atomic_read(&bh->b_count);
503 }
504 }
505 }
506 }
507
508 static void idetape_update_buffers(struct ide_atapi_pc *pc)
509 {
510 struct idetape_bh *bh = pc->bh;
511 int count;
512 unsigned int bcount = pc->xferred;
513
514 if (pc->flags & PC_FLAG_WRITING)
515 return;
516 while (bcount) {
517 if (bh == NULL) {
518 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
519 __func__);
520 return;
521 }
522 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
523 atomic_set(&bh->b_count, count);
524 if (atomic_read(&bh->b_count) == bh->b_size)
525 bh = bh->b_reqnext;
526 bcount -= count;
527 }
528 pc->bh = bh;
529 }
530
531 /*
532 * idetape_next_pc_storage returns a pointer to a place in which we can
533 * safely store a packet command, even though we intend to leave the
534 * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
535 * commands is allocated at initialization time.
536 */
537 static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
538 {
539 idetape_tape_t *tape = drive->driver_data;
540
541 debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
542
543 if (tape->pc_stack_index == IDETAPE_PC_STACK)
544 tape->pc_stack_index = 0;
545 return (&tape->pc_stack[tape->pc_stack_index++]);
546 }
547
548 /*
549 * idetape_next_rq_storage is used along with idetape_next_pc_storage.
550 * Since we queue packet commands in the request queue, we need to
551 * allocate a request, along with the allocation of a packet command.
552 */
553
554 /**************************************************************
555 * *
556 * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
557 * followed later on by kfree(). -ml *
558 * *
559 **************************************************************/
560
561 static struct request *idetape_next_rq_storage(ide_drive_t *drive)
562 {
563 idetape_tape_t *tape = drive->driver_data;
564
565 debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
566
567 if (tape->rq_stack_index == IDETAPE_PC_STACK)
568 tape->rq_stack_index = 0;
569 return (&tape->rq_stack[tape->rq_stack_index++]);
570 }
571
572 static void idetape_init_pc(struct ide_atapi_pc *pc)
573 {
574 memset(pc->c, 0, 12);
575 pc->retries = 0;
576 pc->flags = 0;
577 pc->req_xfer = 0;
578 pc->buf = pc->pc_buf;
579 pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
580 pc->bh = NULL;
581 pc->b_data = NULL;
582 }
583
584 /*
585 * called on each failed packet command retry to analyze the request sense. We
586 * currently do not utilize this information.
587 */
588 static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
589 {
590 idetape_tape_t *tape = drive->driver_data;
591 struct ide_atapi_pc *pc = tape->failed_pc;
592
593 tape->sense_key = sense[2] & 0xF;
594 tape->asc = sense[12];
595 tape->ascq = sense[13];
596
597 debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
598 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
599
600 /* Correct pc->xferred by asking the tape. */
601 if (pc->flags & PC_FLAG_DMA_ERROR) {
602 pc->xferred = pc->req_xfer -
603 tape->blk_size *
604 be32_to_cpu(get_unaligned((u32 *)&sense[3]));
605 idetape_update_buffers(pc);
606 }
607
608 /*
609 * If error was the result of a zero-length read or write command,
610 * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
611 * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
612 */
613 if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
614 /* length == 0 */
615 && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
616 if (tape->sense_key == 5) {
617 /* don't report an error, everything's ok */
618 pc->error = 0;
619 /* don't retry read/write */
620 pc->flags |= PC_FLAG_ABORT;
621 }
622 }
623 if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
624 pc->error = IDETAPE_ERROR_FILEMARK;
625 pc->flags |= PC_FLAG_ABORT;
626 }
627 if (pc->c[0] == WRITE_6) {
628 if ((sense[2] & 0x40) || (tape->sense_key == 0xd
629 && tape->asc == 0x0 && tape->ascq == 0x2)) {
630 pc->error = IDETAPE_ERROR_EOD;
631 pc->flags |= PC_FLAG_ABORT;
632 }
633 }
634 if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
635 if (tape->sense_key == 8) {
636 pc->error = IDETAPE_ERROR_EOD;
637 pc->flags |= PC_FLAG_ABORT;
638 }
639 if (!(pc->flags & PC_FLAG_ABORT) &&
640 pc->xferred)
641 pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
642 }
643 }
644
645 static void idetape_activate_next_stage(ide_drive_t *drive)
646 {
647 idetape_tape_t *tape = drive->driver_data;
648 idetape_stage_t *stage = tape->next_stage;
649 struct request *rq = &stage->rq;
650
651 debug_log(DBG_PROCS, "Enter %s\n", __func__);
652
653 if (stage == NULL) {
654 printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
655 " existing stage\n");
656 return;
657 }
658
659 rq->rq_disk = tape->disk;
660 rq->buffer = NULL;
661 rq->special = (void *)stage->bh;
662 tape->active_data_rq = rq;
663 tape->active_stage = stage;
664 tape->next_stage = stage->next;
665 }
666
667 /* Free a stage along with its related buffers completely. */
668 static void __idetape_kfree_stage(idetape_stage_t *stage)
669 {
670 struct idetape_bh *prev_bh, *bh = stage->bh;
671 int size;
672
673 while (bh != NULL) {
674 if (bh->b_data != NULL) {
675 size = (int) bh->b_size;
676 while (size > 0) {
677 free_page((unsigned long) bh->b_data);
678 size -= PAGE_SIZE;
679 bh->b_data += PAGE_SIZE;
680 }
681 }
682 prev_bh = bh;
683 bh = bh->b_reqnext;
684 kfree(prev_bh);
685 }
686 kfree(stage);
687 }
688
689 static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
690 {
691 __idetape_kfree_stage(stage);
692 }
693
694 /*
695 * Remove tape->first_stage from the pipeline. The caller should avoid race
696 * conditions.
697 */
698 static void idetape_remove_stage_head(ide_drive_t *drive)
699 {
700 idetape_tape_t *tape = drive->driver_data;
701 idetape_stage_t *stage;
702
703 debug_log(DBG_PROCS, "Enter %s\n", __func__);
704
705 if (tape->first_stage == NULL) {
706 printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
707 return;
708 }
709 if (tape->active_stage == tape->first_stage) {
710 printk(KERN_ERR "ide-tape: bug: Trying to free our active "
711 "pipeline stage\n");
712 return;
713 }
714 stage = tape->first_stage;
715 tape->first_stage = stage->next;
716 idetape_kfree_stage(tape, stage);
717 tape->nr_stages--;
718 if (tape->first_stage == NULL) {
719 tape->last_stage = NULL;
720 if (tape->next_stage != NULL)
721 printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
722 " NULL\n");
723 if (tape->nr_stages)
724 printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
725 "now\n");
726 }
727 }
728
729 /*
730 * This will free all the pipeline stages starting from new_last_stage->next
731 * to the end of the list, and point tape->last_stage to new_last_stage.
732 */
733 static void idetape_abort_pipeline(ide_drive_t *drive,
734 idetape_stage_t *new_last_stage)
735 {
736 idetape_tape_t *tape = drive->driver_data;
737 idetape_stage_t *stage = new_last_stage->next;
738 idetape_stage_t *nstage;
739
740 debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
741
742 while (stage) {
743 nstage = stage->next;
744 idetape_kfree_stage(tape, stage);
745 --tape->nr_stages;
746 --tape->nr_pending_stages;
747 stage = nstage;
748 }
749 if (new_last_stage)
750 new_last_stage->next = NULL;
751 tape->last_stage = new_last_stage;
752 tape->next_stage = NULL;
753 }
754
755 /*
756 * Finish servicing a request and insert a pending pipeline request into the
757 * main device queue.
758 */
759 static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
760 {
761 struct request *rq = HWGROUP(drive)->rq;
762 idetape_tape_t *tape = drive->driver_data;
763 unsigned long flags;
764 int error;
765 int remove_stage = 0;
766 idetape_stage_t *active_stage;
767
768 debug_log(DBG_PROCS, "Enter %s\n", __func__);
769
770 switch (uptodate) {
771 case 0: error = IDETAPE_ERROR_GENERAL; break;
772 case 1: error = 0; break;
773 default: error = uptodate;
774 }
775 rq->errors = error;
776 if (error)
777 tape->failed_pc = NULL;
778
779 if (!blk_special_request(rq)) {
780 ide_end_request(drive, uptodate, nr_sects);
781 return 0;
782 }
783
784 spin_lock_irqsave(&tape->lock, flags);
785
786 /* The request was a pipelined data transfer request */
787 if (tape->active_data_rq == rq) {
788 active_stage = tape->active_stage;
789 tape->active_stage = NULL;
790 tape->active_data_rq = NULL;
791 tape->nr_pending_stages--;
792 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
793 remove_stage = 1;
794 if (error) {
795 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
796 &tape->flags);
797 if (error == IDETAPE_ERROR_EOD)
798 idetape_abort_pipeline(drive,
799 active_stage);
800 }
801 } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
802 if (error == IDETAPE_ERROR_EOD) {
803 set_bit(IDETAPE_FLAG_PIPELINE_ERR,
804 &tape->flags);
805 idetape_abort_pipeline(drive, active_stage);
806 }
807 }
808 if (tape->next_stage != NULL) {
809 idetape_activate_next_stage(drive);
810
811 /* Insert the next request into the request queue. */
812 (void)ide_do_drive_cmd(drive, tape->active_data_rq,
813 ide_end);
814 } else if (!error) {
815 /*
816 * This is a part of the feedback loop which tries to
817 * find the optimum number of stages. We are starting
818 * from a minimum maximum number of stages, and if we
819 * sense that the pipeline is empty, we try to increase
820 * it, until we reach the user compile time memory
821 * limit.
822 */
823 int i = (tape->max_pipeline - tape->min_pipeline) / 10;
824
825 tape->max_stages += max(i, 1);
826 tape->max_stages = max(tape->max_stages,
827 tape->min_pipeline);
828 tape->max_stages = min(tape->max_stages,
829 tape->max_pipeline);
830 }
831 }
832 ide_end_drive_cmd(drive, 0, 0);
833
834 if (remove_stage)
835 idetape_remove_stage_head(drive);
836 if (tape->active_data_rq == NULL)
837 clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
838 spin_unlock_irqrestore(&tape->lock, flags);
839 return 0;
840 }
841
842 static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
843 {
844 idetape_tape_t *tape = drive->driver_data;
845
846 debug_log(DBG_PROCS, "Enter %s\n", __func__);
847
848 if (!tape->pc->error) {
849 idetape_analyze_error(drive, tape->pc->buf);
850 idetape_end_request(drive, 1, 0);
851 } else {
852 printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
853 "Aborting request!\n");
854 idetape_end_request(drive, 0, 0);
855 }
856 return ide_stopped;
857 }
858
859 static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
860 {
861 idetape_init_pc(pc);
862 pc->c[0] = REQUEST_SENSE;
863 pc->c[4] = 20;
864 pc->req_xfer = 20;
865 pc->idetape_callback = &idetape_request_sense_callback;
866 }
867
868 static void idetape_init_rq(struct request *rq, u8 cmd)
869 {
870 memset(rq, 0, sizeof(*rq));
871 rq->cmd_type = REQ_TYPE_SPECIAL;
872 rq->cmd[0] = cmd;
873 }
874
875 /*
876 * Generate a new packet command request in front of the request queue, before
877 * the current request, so that it will be processed immediately, on the next
878 * pass through the driver. The function below is called from the request
879 * handling part of the driver (the "bottom" part). Safe storage for the request
880 * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
881 *
882 * Memory for those requests is pre-allocated at initialization time, and is
883 * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
884 * the maximum possible number of inter-dependent packet commands.
885 *
886 * The higher level of the driver - The ioctl handler and the character device
887 * handling functions should queue request to the lower level part and wait for
888 * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
889 */
890 static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
891 struct request *rq)
892 {
893 struct ide_tape_obj *tape = drive->driver_data;
894
895 idetape_init_rq(rq, REQ_IDETAPE_PC1);
896 rq->buffer = (char *) pc;
897 rq->rq_disk = tape->disk;
898 (void) ide_do_drive_cmd(drive, rq, ide_preempt);
899 }
900
901 /*
902 * idetape_retry_pc is called when an error was detected during the
903 * last packet command. We queue a request sense packet command in
904 * the head of the request list.
905 */
906 static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
907 {
908 idetape_tape_t *tape = drive->driver_data;
909 struct ide_atapi_pc *pc;
910 struct request *rq;
911
912 (void)ide_read_error(drive);
913 pc = idetape_next_pc_storage(drive);
914 rq = idetape_next_rq_storage(drive);
915 idetape_create_request_sense_cmd(pc);
916 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
917 idetape_queue_pc_head(drive, pc, rq);
918 return ide_stopped;
919 }
920
921 /*
922 * Postpone the current request so that ide.c will be able to service requests
923 * from another device on the same hwgroup while we are polling for DSC.
924 */
925 static void idetape_postpone_request(ide_drive_t *drive)
926 {
927 idetape_tape_t *tape = drive->driver_data;
928
929 debug_log(DBG_PROCS, "Enter %s\n", __func__);
930
931 tape->postponed_rq = HWGROUP(drive)->rq;
932 ide_stall_queue(drive, tape->dsc_poll_freq);
933 }
934
935 typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
936
937 /*
938 * This is the usual interrupt handler which will be called during a packet
939 * command. We will transfer some of the data (as requested by the drive) and
940 * will re-point interrupt handler to us. When data transfer is finished, we
941 * will act according to the algorithm described before
942 * idetape_issue_pc.
943 */
944 static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
945 {
946 ide_hwif_t *hwif = drive->hwif;
947 idetape_tape_t *tape = drive->driver_data;
948 struct ide_atapi_pc *pc = tape->pc;
949 xfer_func_t *xferfunc;
950 idetape_io_buf *iobuf;
951 unsigned int temp;
952 #if SIMULATE_ERRORS
953 static int error_sim_count;
954 #endif
955 u16 bcount;
956 u8 stat, ireason;
957
958 debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
959
960 /* Clear the interrupt */
961 stat = ide_read_status(drive);
962
963 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
964 if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
965 /*
966 * A DMA error is sometimes expected. For example,
967 * if the tape is crossing a filemark during a
968 * READ command, it will issue an irq and position
969 * itself before the filemark, so that only a partial
970 * data transfer will occur (which causes the DMA
971 * error). In that case, we will later ask the tape
972 * how much bytes of the original request were
973 * actually transferred (we can't receive that
974 * information from the DMA engine on most chipsets).
975 */
976
977 /*
978 * On the contrary, a DMA error is never expected;
979 * it usually indicates a hardware error or abort.
980 * If the tape crosses a filemark during a READ
981 * command, it will issue an irq and position itself
982 * after the filemark (not before). Only a partial
983 * data transfer will occur, but no DMA error.
984 * (AS, 19 Apr 2001)
985 */
986 pc->flags |= PC_FLAG_DMA_ERROR;
987 } else {
988 pc->xferred = pc->req_xfer;
989 idetape_update_buffers(pc);
990 }
991 debug_log(DBG_PROCS, "DMA finished\n");
992
993 }
994
995 /* No more interrupts */
996 if ((stat & DRQ_STAT) == 0) {
997 debug_log(DBG_SENSE, "Packet command completed, %d bytes"
998 " transferred\n", pc->xferred);
999
1000 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
1001 local_irq_enable();
1002
1003 #if SIMULATE_ERRORS
1004 if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
1005 (++error_sim_count % 100) == 0) {
1006 printk(KERN_INFO "ide-tape: %s: simulating error\n",
1007 tape->name);
1008 stat |= ERR_STAT;
1009 }
1010 #endif
1011 if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
1012 stat &= ~ERR_STAT;
1013 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
1014 /* Error detected */
1015 debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
1016
1017 if (pc->c[0] == REQUEST_SENSE) {
1018 printk(KERN_ERR "ide-tape: I/O error in request"
1019 " sense command\n");
1020 return ide_do_reset(drive);
1021 }
1022 debug_log(DBG_ERR, "[cmd %x]: check condition\n",
1023 pc->c[0]);
1024
1025 /* Retry operation */
1026 return idetape_retry_pc(drive);
1027 }
1028 pc->error = 0;
1029 if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
1030 (stat & SEEK_STAT) == 0) {
1031 /* Media access command */
1032 tape->dsc_polling_start = jiffies;
1033 tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
1034 tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
1035 /* Allow ide.c to handle other requests */
1036 idetape_postpone_request(drive);
1037 return ide_stopped;
1038 }
1039 if (tape->failed_pc == pc)
1040 tape->failed_pc = NULL;
1041 /* Command finished - Call the callback function */
1042 return pc->idetape_callback(drive);
1043 }
1044
1045 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
1046 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
1047 printk(KERN_ERR "ide-tape: The tape wants to issue more "
1048 "interrupts in DMA mode\n");
1049 printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
1050 ide_dma_off(drive);
1051 return ide_do_reset(drive);
1052 }
1053 /* Get the number of bytes to transfer on this interrupt. */
1054 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
1055 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
1056
1057 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1058
1059 if (ireason & CD) {
1060 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
1061 return ide_do_reset(drive);
1062 }
1063 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
1064 /* Hopefully, we will never get here */
1065 printk(KERN_ERR "ide-tape: We wanted to %s, ",
1066 (ireason & IO) ? "Write" : "Read");
1067 printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
1068 (ireason & IO) ? "Read" : "Write");
1069 return ide_do_reset(drive);
1070 }
1071 if (!(pc->flags & PC_FLAG_WRITING)) {
1072 /* Reading - Check that we have enough space */
1073 temp = pc->xferred + bcount;
1074 if (temp > pc->req_xfer) {
1075 if (temp > pc->buf_size) {
1076 printk(KERN_ERR "ide-tape: The tape wants to "
1077 "send us more data than expected "
1078 "- discarding data\n");
1079 ide_atapi_discard_data(drive, bcount);
1080 ide_set_handler(drive, &idetape_pc_intr,
1081 IDETAPE_WAIT_CMD, NULL);
1082 return ide_started;
1083 }
1084 debug_log(DBG_SENSE, "The tape wants to send us more "
1085 "data than expected - allowing transfer\n");
1086 }
1087 iobuf = &idetape_input_buffers;
1088 xferfunc = hwif->atapi_input_bytes;
1089 } else {
1090 iobuf = &idetape_output_buffers;
1091 xferfunc = hwif->atapi_output_bytes;
1092 }
1093
1094 if (pc->bh)
1095 iobuf(drive, pc, bcount);
1096 else
1097 xferfunc(drive, pc->cur_pos, bcount);
1098
1099 /* Update the current position */
1100 pc->xferred += bcount;
1101 pc->cur_pos += bcount;
1102
1103 debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
1104 pc->c[0], bcount);
1105
1106 /* And set the interrupt handler again */
1107 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1108 return ide_started;
1109 }
1110
1111 /*
1112 * Packet Command Interface
1113 *
1114 * The current Packet Command is available in tape->pc, and will not change
1115 * until we finish handling it. Each packet command is associated with a
1116 * callback function that will be called when the command is finished.
1117 *
1118 * The handling will be done in three stages:
1119 *
1120 * 1. idetape_issue_pc will send the packet command to the drive, and will set
1121 * the interrupt handler to idetape_pc_intr.
1122 *
1123 * 2. On each interrupt, idetape_pc_intr will be called. This step will be
1124 * repeated until the device signals us that no more interrupts will be issued.
1125 *
1126 * 3. ATAPI Tape media access commands have immediate status with a delayed
1127 * process. In case of a successful initiation of a media access packet command,
1128 * the DSC bit will be set when the actual execution of the command is finished.
1129 * Since the tape drive will not issue an interrupt, we have to poll for this
1130 * event. In this case, we define the request as "low priority request" by
1131 * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
1132 * exit the driver.
1133 *
1134 * ide.c will then give higher priority to requests which originate from the
1135 * other device, until will change rq_status to RQ_ACTIVE.
1136 *
1137 * 4. When the packet command is finished, it will be checked for errors.
1138 *
1139 * 5. In case an error was found, we queue a request sense packet command in
1140 * front of the request queue and retry the operation up to
1141 * IDETAPE_MAX_PC_RETRIES times.
1142 *
1143 * 6. In case no error was found, or we decided to give up and not to retry
1144 * again, the callback function will be called and then we will handle the next
1145 * request.
1146 */
1147 static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1148 {
1149 ide_hwif_t *hwif = drive->hwif;
1150 idetape_tape_t *tape = drive->driver_data;
1151 struct ide_atapi_pc *pc = tape->pc;
1152 int retries = 100;
1153 ide_startstop_t startstop;
1154 u8 ireason;
1155
1156 if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
1157 printk(KERN_ERR "ide-tape: Strange, packet command initiated "
1158 "yet DRQ isn't asserted\n");
1159 return startstop;
1160 }
1161 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1162 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
1163 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
1164 "a packet command, retrying\n");
1165 udelay(100);
1166 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1167 if (retries == 0) {
1168 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
1169 "issuing a packet command, ignoring\n");
1170 ireason |= CD;
1171 ireason &= ~IO;
1172 }
1173 }
1174 if ((ireason & CD) == 0 || (ireason & IO)) {
1175 printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
1176 "a packet command\n");
1177 return ide_do_reset(drive);
1178 }
1179 /* Set the interrupt routine */
1180 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1181 #ifdef CONFIG_BLK_DEV_IDEDMA
1182 /* Begin DMA, if necessary */
1183 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
1184 hwif->dma_ops->dma_start(drive);
1185 #endif
1186 /* Send the actual packet */
1187 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
1188 return ide_started;
1189 }
1190
1191 static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1192 struct ide_atapi_pc *pc)
1193 {
1194 ide_hwif_t *hwif = drive->hwif;
1195 idetape_tape_t *tape = drive->driver_data;
1196 int dma_ok = 0;
1197 u16 bcount;
1198
1199 if (tape->pc->c[0] == REQUEST_SENSE &&
1200 pc->c[0] == REQUEST_SENSE) {
1201 printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
1202 "Two request sense in serial were issued\n");
1203 }
1204
1205 if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
1206 tape->failed_pc = pc;
1207 /* Set the current packet command */
1208 tape->pc = pc;
1209
1210 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
1211 (pc->flags & PC_FLAG_ABORT)) {
1212 /*
1213 * We will "abort" retrying a packet command in case legitimate
1214 * error code was received (crossing a filemark, or end of the
1215 * media, for example).
1216 */
1217 if (!(pc->flags & PC_FLAG_ABORT)) {
1218 if (!(pc->c[0] == TEST_UNIT_READY &&
1219 tape->sense_key == 2 && tape->asc == 4 &&
1220 (tape->ascq == 1 || tape->ascq == 8))) {
1221 printk(KERN_ERR "ide-tape: %s: I/O error, "
1222 "pc = %2x, key = %2x, "
1223 "asc = %2x, ascq = %2x\n",
1224 tape->name, pc->c[0],
1225 tape->sense_key, tape->asc,
1226 tape->ascq);
1227 }
1228 /* Giving up */
1229 pc->error = IDETAPE_ERROR_GENERAL;
1230 }
1231 tape->failed_pc = NULL;
1232 return pc->idetape_callback(drive);
1233 }
1234 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
1235
1236 pc->retries++;
1237 /* We haven't transferred any data yet */
1238 pc->xferred = 0;
1239 pc->cur_pos = pc->buf;
1240 /* Request to transfer the entire buffer at once */
1241 bcount = pc->req_xfer;
1242
1243 if (pc->flags & PC_FLAG_DMA_ERROR) {
1244 pc->flags &= ~PC_FLAG_DMA_ERROR;
1245 printk(KERN_WARNING "ide-tape: DMA disabled, "
1246 "reverting to PIO\n");
1247 ide_dma_off(drive);
1248 }
1249 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
1250 dma_ok = !hwif->dma_ops->dma_setup(drive);
1251
1252 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
1253 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
1254
1255 if (dma_ok)
1256 /* Will begin DMA later */
1257 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
1258 if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
1259 ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
1260 IDETAPE_WAIT_CMD, NULL);
1261 return ide_started;
1262 } else {
1263 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
1264 return idetape_transfer_pc(drive);
1265 }
1266 }
1267
1268 static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
1269 {
1270 idetape_tape_t *tape = drive->driver_data;
1271
1272 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1273
1274 idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
1275 return ide_stopped;
1276 }
1277
1278 /* A mode sense command is used to "sense" tape parameters. */
1279 static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
1280 {
1281 idetape_init_pc(pc);
1282 pc->c[0] = MODE_SENSE;
1283 if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
1284 /* DBD = 1 - Don't return block descriptors */
1285 pc->c[1] = 8;
1286 pc->c[2] = page_code;
1287 /*
1288 * Changed pc->c[3] to 0 (255 will at best return unused info).
1289 *
1290 * For SCSI this byte is defined as subpage instead of high byte
1291 * of length and some IDE drives seem to interpret it this way
1292 * and return an error when 255 is used.
1293 */
1294 pc->c[3] = 0;
1295 /* We will just discard data in that case */
1296 pc->c[4] = 255;
1297 if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
1298 pc->req_xfer = 12;
1299 else if (page_code == IDETAPE_CAPABILITIES_PAGE)
1300 pc->req_xfer = 24;
1301 else
1302 pc->req_xfer = 50;
1303 pc->idetape_callback = &idetape_pc_callback;
1304 }
1305
1306 static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
1307 {
1308 idetape_tape_t *tape = drive->driver_data;
1309 struct ide_atapi_pc *pc = tape->pc;
1310 u8 stat;
1311
1312 stat = ide_read_status(drive);
1313
1314 if (stat & SEEK_STAT) {
1315 if (stat & ERR_STAT) {
1316 /* Error detected */
1317 if (pc->c[0] != TEST_UNIT_READY)
1318 printk(KERN_ERR "ide-tape: %s: I/O error, ",
1319 tape->name);
1320 /* Retry operation */
1321 return idetape_retry_pc(drive);
1322 }
1323 pc->error = 0;
1324 if (tape->failed_pc == pc)
1325 tape->failed_pc = NULL;
1326 } else {
1327 pc->error = IDETAPE_ERROR_GENERAL;
1328 tape->failed_pc = NULL;
1329 }
1330 return pc->idetape_callback(drive);
1331 }
1332
1333 static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
1334 {
1335 idetape_tape_t *tape = drive->driver_data;
1336 struct request *rq = HWGROUP(drive)->rq;
1337 int blocks = tape->pc->xferred / tape->blk_size;
1338
1339 tape->avg_size += blocks * tape->blk_size;
1340 tape->insert_size += blocks * tape->blk_size;
1341 if (tape->insert_size > 1024 * 1024)
1342 tape->measure_insert_time = 1;
1343 if (tape->measure_insert_time) {
1344 tape->measure_insert_time = 0;
1345 tape->insert_time = jiffies;
1346 tape->insert_size = 0;
1347 }
1348 if (time_after(jiffies, tape->insert_time))
1349 tape->insert_speed = tape->insert_size / 1024 * HZ /
1350 (jiffies - tape->insert_time);
1351 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
1352 tape->avg_speed = tape->avg_size * HZ /
1353 (jiffies - tape->avg_time) / 1024;
1354 tape->avg_size = 0;
1355 tape->avg_time = jiffies;
1356 }
1357 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1358
1359 tape->first_frame += blocks;
1360 rq->current_nr_sectors -= blocks;
1361
1362 if (!tape->pc->error)
1363 idetape_end_request(drive, 1, 0);
1364 else
1365 idetape_end_request(drive, tape->pc->error, 0);
1366 return ide_stopped;
1367 }
1368
1369 static void idetape_create_read_cmd(idetape_tape_t *tape,
1370 struct ide_atapi_pc *pc,
1371 unsigned int length, struct idetape_bh *bh)
1372 {
1373 idetape_init_pc(pc);
1374 pc->c[0] = READ_6;
1375 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1376 pc->c[1] = 1;
1377 pc->idetape_callback = &idetape_rw_callback;
1378 pc->bh = bh;
1379 atomic_set(&bh->b_count, 0);
1380 pc->buf = NULL;
1381 pc->buf_size = length * tape->blk_size;
1382 pc->req_xfer = pc->buf_size;
1383 if (pc->req_xfer == tape->stage_size)
1384 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1385 }
1386
1387 static void idetape_create_write_cmd(idetape_tape_t *tape,
1388 struct ide_atapi_pc *pc,
1389 unsigned int length, struct idetape_bh *bh)
1390 {
1391 idetape_init_pc(pc);
1392 pc->c[0] = WRITE_6;
1393 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1394 pc->c[1] = 1;
1395 pc->idetape_callback = &idetape_rw_callback;
1396 pc->flags |= PC_FLAG_WRITING;
1397 pc->bh = bh;
1398 pc->b_data = bh->b_data;
1399 pc->b_count = atomic_read(&bh->b_count);
1400 pc->buf = NULL;
1401 pc->buf_size = length * tape->blk_size;
1402 pc->req_xfer = pc->buf_size;
1403 if (pc->req_xfer == tape->stage_size)
1404 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1405 }
1406
1407 static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1408 struct request *rq, sector_t block)
1409 {
1410 idetape_tape_t *tape = drive->driver_data;
1411 struct ide_atapi_pc *pc = NULL;
1412 struct request *postponed_rq = tape->postponed_rq;
1413 u8 stat;
1414
1415 debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
1416 " current_nr_sectors: %d\n",
1417 rq->sector, rq->nr_sectors, rq->current_nr_sectors);
1418
1419 if (!blk_special_request(rq)) {
1420 /* We do not support buffer cache originated requests. */
1421 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
1422 "request queue (%d)\n", drive->name, rq->cmd_type);
1423 ide_end_request(drive, 0, 0);
1424 return ide_stopped;
1425 }
1426
1427 /* Retry a failed packet command */
1428 if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
1429 return idetape_issue_pc(drive, tape->failed_pc);
1430
1431 if (postponed_rq != NULL)
1432 if (rq != postponed_rq) {
1433 printk(KERN_ERR "ide-tape: ide-tape.c bug - "
1434 "Two DSC requests were queued\n");
1435 idetape_end_request(drive, 0, 0);
1436 return ide_stopped;
1437 }
1438
1439 tape->postponed_rq = NULL;
1440
1441 /*
1442 * If the tape is still busy, postpone our request and service
1443 * the other device meanwhile.
1444 */
1445 stat = ide_read_status(drive);
1446
1447 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
1448 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1449
1450 if (drive->post_reset == 1) {
1451 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1452 drive->post_reset = 0;
1453 }
1454
1455 if (time_after(jiffies, tape->insert_time))
1456 tape->insert_speed = tape->insert_size / 1024 * HZ /
1457 (jiffies - tape->insert_time);
1458 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
1459 (stat & SEEK_STAT) == 0) {
1460 if (postponed_rq == NULL) {
1461 tape->dsc_polling_start = jiffies;
1462 tape->dsc_poll_freq = tape->best_dsc_rw_freq;
1463 tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
1464 } else if (time_after(jiffies, tape->dsc_timeout)) {
1465 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1466 tape->name);
1467 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1468 idetape_media_access_finished(drive);
1469 return ide_stopped;
1470 } else {
1471 return ide_do_reset(drive);
1472 }
1473 } else if (time_after(jiffies,
1474 tape->dsc_polling_start +
1475 IDETAPE_DSC_MA_THRESHOLD))
1476 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
1477 idetape_postpone_request(drive);
1478 return ide_stopped;
1479 }
1480 if (rq->cmd[0] & REQ_IDETAPE_READ) {
1481 tape->postpone_cnt = 0;
1482 pc = idetape_next_pc_storage(drive);
1483 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
1484 (struct idetape_bh *)rq->special);
1485 goto out;
1486 }
1487 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
1488 tape->postpone_cnt = 0;
1489 pc = idetape_next_pc_storage(drive);
1490 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
1491 (struct idetape_bh *)rq->special);
1492 goto out;
1493 }
1494 if (rq->cmd[0] & REQ_IDETAPE_PC1) {
1495 pc = (struct ide_atapi_pc *) rq->buffer;
1496 rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
1497 rq->cmd[0] |= REQ_IDETAPE_PC2;
1498 goto out;
1499 }
1500 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1501 idetape_media_access_finished(drive);
1502 return ide_stopped;
1503 }
1504 BUG();
1505 out:
1506 return idetape_issue_pc(drive, pc);
1507 }
1508
1509 /* Pipeline related functions */
1510
1511 /*
1512 * The function below uses __get_free_page to allocate a pipeline stage, along
1513 * with all the necessary small buffers which together make a buffer of size
1514 * tape->stage_size (or a bit more). We attempt to combine sequential pages as
1515 * much as possible.
1516 *
1517 * It returns a pointer to the new allocated stage, or NULL if we can't (or
1518 * don't want to) allocate a stage.
1519 *
1520 * Pipeline stages are optional and are used to increase performance. If we
1521 * can't allocate them, we'll manage without them.
1522 */
1523 static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
1524 int clear)
1525 {
1526 idetape_stage_t *stage;
1527 struct idetape_bh *prev_bh, *bh;
1528 int pages = tape->pages_per_stage;
1529 char *b_data = NULL;
1530
1531 stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
1532 if (!stage)
1533 return NULL;
1534 stage->next = NULL;
1535
1536 stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1537 bh = stage->bh;
1538 if (bh == NULL)
1539 goto abort;
1540 bh->b_reqnext = NULL;
1541 bh->b_data = (char *) __get_free_page(GFP_KERNEL);
1542 if (!bh->b_data)
1543 goto abort;
1544 if (clear)
1545 memset(bh->b_data, 0, PAGE_SIZE);
1546 bh->b_size = PAGE_SIZE;
1547 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1548
1549 while (--pages) {
1550 b_data = (char *) __get_free_page(GFP_KERNEL);
1551 if (!b_data)
1552 goto abort;
1553 if (clear)
1554 memset(b_data, 0, PAGE_SIZE);
1555 if (bh->b_data == b_data + PAGE_SIZE) {
1556 bh->b_size += PAGE_SIZE;
1557 bh->b_data -= PAGE_SIZE;
1558 if (full)
1559 atomic_add(PAGE_SIZE, &bh->b_count);
1560 continue;
1561 }
1562 if (b_data == bh->b_data + bh->b_size) {
1563 bh->b_size += PAGE_SIZE;
1564 if (full)
1565 atomic_add(PAGE_SIZE, &bh->b_count);
1566 continue;
1567 }
1568 prev_bh = bh;
1569 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1570 if (!bh) {
1571 free_page((unsigned long) b_data);
1572 goto abort;
1573 }
1574 bh->b_reqnext = NULL;
1575 bh->b_data = b_data;
1576 bh->b_size = PAGE_SIZE;
1577 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1578 prev_bh->b_reqnext = bh;
1579 }
1580 bh->b_size -= tape->excess_bh_size;
1581 if (full)
1582 atomic_sub(tape->excess_bh_size, &bh->b_count);
1583 return stage;
1584 abort:
1585 __idetape_kfree_stage(stage);
1586 return NULL;
1587 }
1588
1589 static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1590 idetape_stage_t *stage, const char __user *buf, int n)
1591 {
1592 struct idetape_bh *bh = tape->bh;
1593 int count;
1594 int ret = 0;
1595
1596 while (n) {
1597 if (bh == NULL) {
1598 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1599 __func__);
1600 return 1;
1601 }
1602 count = min((unsigned int)
1603 (bh->b_size - atomic_read(&bh->b_count)),
1604 (unsigned int)n);
1605 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
1606 count))
1607 ret = 1;
1608 n -= count;
1609 atomic_add(count, &bh->b_count);
1610 buf += count;
1611 if (atomic_read(&bh->b_count) == bh->b_size) {
1612 bh = bh->b_reqnext;
1613 if (bh)
1614 atomic_set(&bh->b_count, 0);
1615 }
1616 }
1617 tape->bh = bh;
1618 return ret;
1619 }
1620
1621 static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1622 int n)
1623 {
1624 struct idetape_bh *bh = tape->bh;
1625 int count;
1626 int ret = 0;
1627
1628 while (n) {
1629 if (bh == NULL) {
1630 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1631 __func__);
1632 return 1;
1633 }
1634 count = min(tape->b_count, n);
1635 if (copy_to_user(buf, tape->b_data, count))
1636 ret = 1;
1637 n -= count;
1638 tape->b_data += count;
1639 tape->b_count -= count;
1640 buf += count;
1641 if (!tape->b_count) {
1642 bh = bh->b_reqnext;
1643 tape->bh = bh;
1644 if (bh) {
1645 tape->b_data = bh->b_data;
1646 tape->b_count = atomic_read(&bh->b_count);
1647 }
1648 }
1649 }
1650 return ret;
1651 }
1652
1653 static void idetape_init_merge_stage(idetape_tape_t *tape)
1654 {
1655 struct idetape_bh *bh = tape->merge_stage->bh;
1656
1657 tape->bh = bh;
1658 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1659 atomic_set(&bh->b_count, 0);
1660 else {
1661 tape->b_data = bh->b_data;
1662 tape->b_count = atomic_read(&bh->b_count);
1663 }
1664 }
1665
1666 /* Install a completion in a pending request and sleep until it is serviced. The
1667 * caller should ensure that the request will not be serviced before we install
1668 * the completion (usually by disabling interrupts).
1669 */
1670 static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
1671 {
1672 DECLARE_COMPLETION_ONSTACK(wait);
1673 idetape_tape_t *tape = drive->driver_data;
1674
1675 if (rq == NULL || !blk_special_request(rq)) {
1676 printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
1677 " request\n");
1678 return;
1679 }
1680 rq->end_io_data = &wait;
1681 rq->end_io = blk_end_sync_rq;
1682 spin_unlock_irq(&tape->lock);
1683 wait_for_completion(&wait);
1684 /* The stage and its struct request have been deallocated */
1685 spin_lock_irq(&tape->lock);
1686 }
1687
1688 static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
1689 {
1690 idetape_tape_t *tape = drive->driver_data;
1691 u8 *readpos = tape->pc->buf;
1692
1693 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1694
1695 if (!tape->pc->error) {
1696 debug_log(DBG_SENSE, "BOP - %s\n",
1697 (readpos[0] & 0x80) ? "Yes" : "No");
1698 debug_log(DBG_SENSE, "EOP - %s\n",
1699 (readpos[0] & 0x40) ? "Yes" : "No");
1700
1701 if (readpos[0] & 0x4) {
1702 printk(KERN_INFO "ide-tape: Block location is unknown"
1703 "to the tape\n");
1704 clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1705 idetape_end_request(drive, 0, 0);
1706 } else {
1707 debug_log(DBG_SENSE, "Block Location - %u\n",
1708 be32_to_cpu(*(u32 *)&readpos[4]));
1709
1710 tape->partition = readpos[1];
1711 tape->first_frame =
1712 be32_to_cpu(*(u32 *)&readpos[4]);
1713 set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1714 idetape_end_request(drive, 1, 0);
1715 }
1716 } else {
1717 idetape_end_request(drive, 0, 0);
1718 }
1719 return ide_stopped;
1720 }
1721
1722 /*
1723 * Write a filemark if write_filemark=1. Flush the device buffers without
1724 * writing a filemark otherwise.
1725 */
1726 static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
1727 struct ide_atapi_pc *pc, int write_filemark)
1728 {
1729 idetape_init_pc(pc);
1730 pc->c[0] = WRITE_FILEMARKS;
1731 pc->c[4] = write_filemark;
1732 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1733 pc->idetape_callback = &idetape_pc_callback;
1734 }
1735
1736 static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
1737 {
1738 idetape_init_pc(pc);
1739 pc->c[0] = TEST_UNIT_READY;
1740 pc->idetape_callback = &idetape_pc_callback;
1741 }
1742
1743 /*
1744 * We add a special packet command request to the tail of the request queue, and
1745 * wait for it to be serviced. This is not to be called from within the request
1746 * handling part of the driver! We allocate here data on the stack and it is
1747 * valid until the request is finished. This is not the case for the bottom part
1748 * of the driver, where we are always leaving the functions to wait for an
1749 * interrupt or a timer event.
1750 *
1751 * From the bottom part of the driver, we should allocate safe memory using
1752 * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
1753 * to the request list without waiting for it to be serviced! In that case, we
1754 * usually use idetape_queue_pc_head().
1755 */
1756 static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1757 {
1758 struct ide_tape_obj *tape = drive->driver_data;
1759 struct request rq;
1760
1761 idetape_init_rq(&rq, REQ_IDETAPE_PC1);
1762 rq.buffer = (char *) pc;
1763 rq.rq_disk = tape->disk;
1764 return ide_do_drive_cmd(drive, &rq, ide_wait);
1765 }
1766
1767 static void idetape_create_load_unload_cmd(ide_drive_t *drive,
1768 struct ide_atapi_pc *pc, int cmd)
1769 {
1770 idetape_init_pc(pc);
1771 pc->c[0] = START_STOP;
1772 pc->c[4] = cmd;
1773 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1774 pc->idetape_callback = &idetape_pc_callback;
1775 }
1776
1777 static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1778 {
1779 idetape_tape_t *tape = drive->driver_data;
1780 struct ide_atapi_pc pc;
1781 int load_attempted = 0;
1782
1783 /* Wait for the tape to become ready */
1784 set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
1785 timeout += jiffies;
1786 while (time_before(jiffies, timeout)) {
1787 idetape_create_test_unit_ready_cmd(&pc);
1788 if (!__idetape_queue_pc_tail(drive, &pc))
1789 return 0;
1790 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
1791 || (tape->asc == 0x3A)) {
1792 /* no media */
1793 if (load_attempted)
1794 return -ENOMEDIUM;
1795 idetape_create_load_unload_cmd(drive, &pc,
1796 IDETAPE_LU_LOAD_MASK);
1797 __idetape_queue_pc_tail(drive, &pc);
1798 load_attempted = 1;
1799 /* not about to be ready */
1800 } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
1801 (tape->ascq == 1 || tape->ascq == 8)))
1802 return -EIO;
1803 msleep(100);
1804 }
1805 return -EIO;
1806 }
1807
1808 static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1809 {
1810 return __idetape_queue_pc_tail(drive, pc);
1811 }
1812
1813 static int idetape_flush_tape_buffers(ide_drive_t *drive)
1814 {
1815 struct ide_atapi_pc pc;
1816 int rc;
1817
1818 idetape_create_write_filemark_cmd(drive, &pc, 0);
1819 rc = idetape_queue_pc_tail(drive, &pc);
1820 if (rc)
1821 return rc;
1822 idetape_wait_ready(drive, 60 * 5 * HZ);
1823 return 0;
1824 }
1825
1826 static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
1827 {
1828 idetape_init_pc(pc);
1829 pc->c[0] = READ_POSITION;
1830 pc->req_xfer = 20;
1831 pc->idetape_callback = &idetape_read_position_callback;
1832 }
1833
1834 static int idetape_read_position(ide_drive_t *drive)
1835 {
1836 idetape_tape_t *tape = drive->driver_data;
1837 struct ide_atapi_pc pc;
1838 int position;
1839
1840 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1841
1842 idetape_create_read_position_cmd(&pc);
1843 if (idetape_queue_pc_tail(drive, &pc))
1844 return -1;
1845 position = tape->first_frame;
1846 return position;
1847 }
1848
1849 static void idetape_create_locate_cmd(ide_drive_t *drive,
1850 struct ide_atapi_pc *pc,
1851 unsigned int block, u8 partition, int skip)
1852 {
1853 idetape_init_pc(pc);
1854 pc->c[0] = POSITION_TO_ELEMENT;
1855 pc->c[1] = 2;
1856 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
1857 pc->c[8] = partition;
1858 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1859 pc->idetape_callback = &idetape_pc_callback;
1860 }
1861
1862 static int idetape_create_prevent_cmd(ide_drive_t *drive,
1863 struct ide_atapi_pc *pc, int prevent)
1864 {
1865 idetape_tape_t *tape = drive->driver_data;
1866
1867 /* device supports locking according to capabilities page */
1868 if (!(tape->caps[6] & 0x01))
1869 return 0;
1870
1871 idetape_init_pc(pc);
1872 pc->c[0] = ALLOW_MEDIUM_REMOVAL;
1873 pc->c[4] = prevent;
1874 pc->idetape_callback = &idetape_pc_callback;
1875 return 1;
1876 }
1877
1878 static int __idetape_discard_read_pipeline(ide_drive_t *drive)
1879 {
1880 idetape_tape_t *tape = drive->driver_data;
1881 unsigned long flags;
1882 int cnt;
1883
1884 if (tape->chrdev_dir != IDETAPE_DIR_READ)
1885 return 0;
1886
1887 /* Remove merge stage. */
1888 cnt = tape->merge_stage_size / tape->blk_size;
1889 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
1890 ++cnt; /* Filemarks count as 1 sector */
1891 tape->merge_stage_size = 0;
1892 if (tape->merge_stage != NULL) {
1893 __idetape_kfree_stage(tape->merge_stage);
1894 tape->merge_stage = NULL;
1895 }
1896
1897 /* Clear pipeline flags. */
1898 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
1899 tape->chrdev_dir = IDETAPE_DIR_NONE;
1900
1901 /* Remove pipeline stages. */
1902 if (tape->first_stage == NULL)
1903 return 0;
1904
1905 spin_lock_irqsave(&tape->lock, flags);
1906 tape->next_stage = NULL;
1907 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
1908 idetape_wait_for_request(drive, tape->active_data_rq);
1909 spin_unlock_irqrestore(&tape->lock, flags);
1910
1911 while (tape->first_stage != NULL) {
1912 struct request *rq_ptr = &tape->first_stage->rq;
1913
1914 cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
1915 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
1916 ++cnt;
1917 idetape_remove_stage_head(drive);
1918 }
1919 tape->nr_pending_stages = 0;
1920 tape->max_stages = tape->min_pipeline;
1921 return cnt;
1922 }
1923
1924 /*
1925 * Position the tape to the requested block using the LOCATE packet command.
1926 * A READ POSITION command is then issued to check where we are positioned. Like
1927 * all higher level operations, we queue the commands at the tail of the request
1928 * queue and wait for their completion.
1929 */
1930 static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
1931 u8 partition, int skip)
1932 {
1933 idetape_tape_t *tape = drive->driver_data;
1934 int retval;
1935 struct ide_atapi_pc pc;
1936
1937 if (tape->chrdev_dir == IDETAPE_DIR_READ)
1938 __idetape_discard_read_pipeline(drive);
1939 idetape_wait_ready(drive, 60 * 5 * HZ);
1940 idetape_create_locate_cmd(drive, &pc, block, partition, skip);
1941 retval = idetape_queue_pc_tail(drive, &pc);
1942 if (retval)
1943 return (retval);
1944
1945 idetape_create_read_position_cmd(&pc);
1946 return (idetape_queue_pc_tail(drive, &pc));
1947 }
1948
1949 static void idetape_discard_read_pipeline(ide_drive_t *drive,
1950 int restore_position)
1951 {
1952 idetape_tape_t *tape = drive->driver_data;
1953 int cnt;
1954 int seek, position;
1955
1956 cnt = __idetape_discard_read_pipeline(drive);
1957 if (restore_position) {
1958 position = idetape_read_position(drive);
1959 seek = position > cnt ? position - cnt : 0;
1960 if (idetape_position_tape(drive, seek, 0, 0)) {
1961 printk(KERN_INFO "ide-tape: %s: position_tape failed in"
1962 " discard_pipeline()\n", tape->name);
1963 return;
1964 }
1965 }
1966 }
1967
1968 /*
1969 * Generate a read/write request for the block device interface and wait for it
1970 * to be serviced.
1971 */
1972 static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
1973 struct idetape_bh *bh)
1974 {
1975 idetape_tape_t *tape = drive->driver_data;
1976 struct request rq;
1977
1978 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
1979
1980 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
1981 printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
1982 __func__);
1983 return (0);
1984 }
1985
1986 idetape_init_rq(&rq, cmd);
1987 rq.rq_disk = tape->disk;
1988 rq.special = (void *)bh;
1989 rq.sector = tape->first_frame;
1990 rq.nr_sectors = blocks;
1991 rq.current_nr_sectors = blocks;
1992 (void) ide_do_drive_cmd(drive, &rq, ide_wait);
1993
1994 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
1995 return 0;
1996
1997 if (tape->merge_stage)
1998 idetape_init_merge_stage(tape);
1999 if (rq.errors == IDETAPE_ERROR_GENERAL)
2000 return -EIO;
2001 return (tape->blk_size * (blocks-rq.current_nr_sectors));
2002 }
2003
2004 /* start servicing the pipeline stages, starting from tape->next_stage. */
2005 static void idetape_plug_pipeline(ide_drive_t *drive)
2006 {
2007 idetape_tape_t *tape = drive->driver_data;
2008
2009 if (tape->next_stage == NULL)
2010 return;
2011 if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2012 idetape_activate_next_stage(drive);
2013 (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
2014 }
2015 }
2016
2017 static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
2018 {
2019 idetape_init_pc(pc);
2020 pc->c[0] = INQUIRY;
2021 pc->c[4] = 254;
2022 pc->req_xfer = 254;
2023 pc->idetape_callback = &idetape_pc_callback;
2024 }
2025
2026 static void idetape_create_rewind_cmd(ide_drive_t *drive,
2027 struct ide_atapi_pc *pc)
2028 {
2029 idetape_init_pc(pc);
2030 pc->c[0] = REZERO_UNIT;
2031 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2032 pc->idetape_callback = &idetape_pc_callback;
2033 }
2034
2035 static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
2036 {
2037 idetape_init_pc(pc);
2038 pc->c[0] = ERASE;
2039 pc->c[1] = 1;
2040 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2041 pc->idetape_callback = &idetape_pc_callback;
2042 }
2043
2044 static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
2045 {
2046 idetape_init_pc(pc);
2047 pc->c[0] = SPACE;
2048 put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
2049 pc->c[1] = cmd;
2050 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
2051 pc->idetape_callback = &idetape_pc_callback;
2052 }
2053
2054 static void idetape_wait_first_stage(ide_drive_t *drive)
2055 {
2056 idetape_tape_t *tape = drive->driver_data;
2057 unsigned long flags;
2058
2059 if (tape->first_stage == NULL)
2060 return;
2061 spin_lock_irqsave(&tape->lock, flags);
2062 if (tape->active_stage == tape->first_stage)
2063 idetape_wait_for_request(drive, tape->active_data_rq);
2064 spin_unlock_irqrestore(&tape->lock, flags);
2065 }
2066
2067 /* Queue up a character device originated write request. */
2068 static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
2069 {
2070 idetape_tape_t *tape = drive->driver_data;
2071 unsigned long flags;
2072
2073 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2074
2075 /* Attempt to allocate a new stage. Beware possible race conditions. */
2076 while (1) {
2077 spin_lock_irqsave(&tape->lock, flags);
2078 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2079 idetape_wait_for_request(drive, tape->active_data_rq);
2080 spin_unlock_irqrestore(&tape->lock, flags);
2081 } else {
2082 spin_unlock_irqrestore(&tape->lock, flags);
2083 idetape_plug_pipeline(drive);
2084 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
2085 &tape->flags))
2086 continue;
2087 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
2088 blocks, tape->merge_stage->bh);
2089 }
2090 }
2091 }
2092
2093 /*
2094 * Wait until all pending pipeline requests are serviced. Typically called on
2095 * device close.
2096 */
2097 static void idetape_wait_for_pipeline(ide_drive_t *drive)
2098 {
2099 idetape_tape_t *tape = drive->driver_data;
2100 unsigned long flags;
2101
2102 while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
2103 &tape->flags)) {
2104 idetape_plug_pipeline(drive);
2105 spin_lock_irqsave(&tape->lock, flags);
2106 if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
2107 idetape_wait_for_request(drive, tape->active_data_rq);
2108 spin_unlock_irqrestore(&tape->lock, flags);
2109 }
2110 }
2111
2112 static void idetape_empty_write_pipeline(ide_drive_t *drive)
2113 {
2114 idetape_tape_t *tape = drive->driver_data;
2115 int blocks, min;
2116 struct idetape_bh *bh;
2117
2118 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2119 printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
2120 " but we are not writing.\n");
2121 return;
2122 }
2123 if (tape->merge_stage_size > tape->stage_size) {
2124 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
2125 tape->merge_stage_size = tape->stage_size;
2126 }
2127 if (tape->merge_stage_size) {
2128 blocks = tape->merge_stage_size / tape->blk_size;
2129 if (tape->merge_stage_size % tape->blk_size) {
2130 unsigned int i;
2131
2132 blocks++;
2133 i = tape->blk_size - tape->merge_stage_size %
2134 tape->blk_size;
2135 bh = tape->bh->b_reqnext;
2136 while (bh) {
2137 atomic_set(&bh->b_count, 0);
2138 bh = bh->b_reqnext;
2139 }
2140 bh = tape->bh;
2141 while (i) {
2142 if (bh == NULL) {
2143 printk(KERN_INFO "ide-tape: bug,"
2144 " bh NULL\n");
2145 break;
2146 }
2147 min = min(i, (unsigned int)(bh->b_size -
2148 atomic_read(&bh->b_count)));
2149 memset(bh->b_data + atomic_read(&bh->b_count),
2150 0, min);
2151 atomic_add(min, &bh->b_count);
2152 i -= min;
2153 bh = bh->b_reqnext;
2154 }
2155 }
2156 (void) idetape_add_chrdev_write_request(drive, blocks);
2157 tape->merge_stage_size = 0;
2158 }
2159 idetape_wait_for_pipeline(drive);
2160 if (tape->merge_stage != NULL) {
2161 __idetape_kfree_stage(tape->merge_stage);
2162 tape->merge_stage = NULL;
2163 }
2164 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2165 tape->chrdev_dir = IDETAPE_DIR_NONE;
2166
2167 /*
2168 * On the next backup, perform the feedback loop again. (I don't want to
2169 * keep sense information between backups, as some systems are
2170 * constantly on, and the system load can be totally different on the
2171 * next backup).
2172 */
2173 tape->max_stages = tape->min_pipeline;
2174 if (tape->first_stage != NULL ||
2175 tape->next_stage != NULL ||
2176 tape->last_stage != NULL ||
2177 tape->nr_stages != 0) {
2178 printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
2179 "first_stage %p, next_stage %p, "
2180 "last_stage %p, nr_stages %d\n",
2181 tape->first_stage, tape->next_stage,
2182 tape->last_stage, tape->nr_stages);
2183 }
2184 }
2185
2186 static int idetape_init_read(ide_drive_t *drive, int max_stages)
2187 {
2188 idetape_tape_t *tape = drive->driver_data;
2189 int bytes_read;
2190
2191 /* Initialize read operation */
2192 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2193 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2194 idetape_empty_write_pipeline(drive);
2195 idetape_flush_tape_buffers(drive);
2196 }
2197 if (tape->merge_stage || tape->merge_stage_size) {
2198 printk(KERN_ERR "ide-tape: merge_stage_size should be"
2199 " 0 now\n");
2200 tape->merge_stage_size = 0;
2201 }
2202 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2203 if (!tape->merge_stage)
2204 return -ENOMEM;
2205 tape->chrdev_dir = IDETAPE_DIR_READ;
2206
2207 /*
2208 * Issue a read 0 command to ensure that DSC handshake is
2209 * switched from completion mode to buffer available mode.
2210 * No point in issuing this if DSC overlap isn't supported, some
2211 * drives (Seagate STT3401A) will return an error.
2212 */
2213 if (drive->dsc_overlap) {
2214 bytes_read = idetape_queue_rw_tail(drive,
2215 REQ_IDETAPE_READ, 0,
2216 tape->merge_stage->bh);
2217 if (bytes_read < 0) {
2218 __idetape_kfree_stage(tape->merge_stage);
2219 tape->merge_stage = NULL;
2220 tape->chrdev_dir = IDETAPE_DIR_NONE;
2221 return bytes_read;
2222 }
2223 }
2224 }
2225
2226 if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
2227 if (tape->nr_pending_stages >= 3 * max_stages / 4) {
2228 tape->measure_insert_time = 1;
2229 tape->insert_time = jiffies;
2230 tape->insert_size = 0;
2231 tape->insert_speed = 0;
2232 idetape_plug_pipeline(drive);
2233 }
2234 }
2235 return 0;
2236 }
2237
2238 /*
2239 * Called from idetape_chrdev_read() to service a character device read request
2240 * and add read-ahead requests to our pipeline.
2241 */
2242 static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
2243 {
2244 idetape_tape_t *tape = drive->driver_data;
2245
2246 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
2247
2248 /* If we are at a filemark, return a read length of 0 */
2249 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2250 return 0;
2251
2252 idetape_init_read(drive, tape->max_stages);
2253
2254 if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
2255 return 0;
2256
2257 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2258 tape->merge_stage->bh);
2259 }
2260
2261 static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2262 {
2263 idetape_tape_t *tape = drive->driver_data;
2264 struct idetape_bh *bh;
2265 int blocks;
2266
2267 while (bcount) {
2268 unsigned int count;
2269
2270 bh = tape->merge_stage->bh;
2271 count = min(tape->stage_size, bcount);
2272 bcount -= count;
2273 blocks = count / tape->blk_size;
2274 while (count) {
2275 atomic_set(&bh->b_count,
2276 min(count, (unsigned int)bh->b_size));
2277 memset(bh->b_data, 0, atomic_read(&bh->b_count));
2278 count -= atomic_read(&bh->b_count);
2279 bh = bh->b_reqnext;
2280 }
2281 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
2282 tape->merge_stage->bh);
2283 }
2284 }
2285
2286 static int idetape_pipeline_size(ide_drive_t *drive)
2287 {
2288 idetape_tape_t *tape = drive->driver_data;
2289 idetape_stage_t *stage;
2290 struct request *rq;
2291 int size = 0;
2292
2293 idetape_wait_for_pipeline(drive);
2294 stage = tape->first_stage;
2295 while (stage != NULL) {
2296 rq = &stage->rq;
2297 size += tape->blk_size * (rq->nr_sectors -
2298 rq->current_nr_sectors);
2299 if (rq->errors == IDETAPE_ERROR_FILEMARK)
2300 size += tape->blk_size;
2301 stage = stage->next;
2302 }
2303 size += tape->merge_stage_size;
2304 return size;
2305 }
2306
2307 /*
2308 * Rewinds the tape to the Beginning Of the current Partition (BOP). We
2309 * currently support only one partition.
2310 */
2311 static int idetape_rewind_tape(ide_drive_t *drive)
2312 {
2313 int retval;
2314 struct ide_atapi_pc pc;
2315 idetape_tape_t *tape;
2316 tape = drive->driver_data;
2317
2318 debug_log(DBG_SENSE, "Enter %s\n", __func__);
2319
2320 idetape_create_rewind_cmd(drive, &pc);
2321 retval = idetape_queue_pc_tail(drive, &pc);
2322 if (retval)
2323 return retval;
2324
2325 idetape_create_read_position_cmd(&pc);
2326 retval = idetape_queue_pc_tail(drive, &pc);
2327 if (retval)
2328 return retval;
2329 return 0;
2330 }
2331
2332 /* mtio.h compatible commands should be issued to the chrdev interface. */
2333 static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2334 unsigned long arg)
2335 {
2336 idetape_tape_t *tape = drive->driver_data;
2337 void __user *argp = (void __user *)arg;
2338
2339 struct idetape_config {
2340 int dsc_rw_frequency;
2341 int dsc_media_access_frequency;
2342 int nr_stages;
2343 } config;
2344
2345 debug_log(DBG_PROCS, "Enter %s\n", __func__);
2346
2347 switch (cmd) {
2348 case 0x0340:
2349 if (copy_from_user(&config, argp, sizeof(config)))
2350 return -EFAULT;
2351 tape->best_dsc_rw_freq = config.dsc_rw_frequency;
2352 tape->max_stages = config.nr_stages;
2353 break;
2354 case 0x0350:
2355 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
2356 config.nr_stages = tape->max_stages;
2357 if (copy_to_user(argp, &config, sizeof(config)))
2358 return -EFAULT;
2359 break;
2360 default:
2361 return -EIO;
2362 }
2363 return 0;
2364 }
2365
2366 /*
2367 * The function below is now a bit more complicated than just passing the
2368 * command to the tape since we may have crossed some filemarks during our
2369 * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
2370 * support MTFSFM when the filemark is in our internal pipeline even if the tape
2371 * doesn't support spacing over filemarks in the reverse direction.
2372 */
2373 static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2374 int mt_count)
2375 {
2376 idetape_tape_t *tape = drive->driver_data;
2377 struct ide_atapi_pc pc;
2378 unsigned long flags;
2379 int retval, count = 0;
2380 int sprev = !!(tape->caps[4] & 0x20);
2381
2382 if (mt_count == 0)
2383 return 0;
2384 if (MTBSF == mt_op || MTBSFM == mt_op) {
2385 if (!sprev)
2386 return -EIO;
2387 mt_count = -mt_count;
2388 }
2389
2390 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2391 /* its a read-ahead buffer, scan it for crossed filemarks. */
2392 tape->merge_stage_size = 0;
2393 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2394 ++count;
2395 while (tape->first_stage != NULL) {
2396 if (count == mt_count) {
2397 if (mt_op == MTFSFM)
2398 set_bit(IDETAPE_FLAG_FILEMARK,
2399 &tape->flags);
2400 return 0;
2401 }
2402 spin_lock_irqsave(&tape->lock, flags);
2403 if (tape->first_stage == tape->active_stage) {
2404 /*
2405 * We have reached the active stage in the read
2406 * pipeline. There is no point in allowing the
2407 * drive to continue reading any farther, so we
2408 * stop the pipeline.
2409 *
2410 * This section should be moved to a separate
2411 * subroutine because similar operations are
2412 * done in __idetape_discard_read_pipeline(),
2413 * for example.
2414 */
2415 tape->next_stage = NULL;
2416 spin_unlock_irqrestore(&tape->lock, flags);
2417 idetape_wait_first_stage(drive);
2418 tape->next_stage = tape->first_stage->next;
2419 } else
2420 spin_unlock_irqrestore(&tape->lock, flags);
2421 if (tape->first_stage->rq.errors ==
2422 IDETAPE_ERROR_FILEMARK)
2423 ++count;
2424 idetape_remove_stage_head(drive);
2425 }
2426 idetape_discard_read_pipeline(drive, 0);
2427 }
2428
2429 /*
2430 * The filemark was not found in our internal pipeline; now we can issue
2431 * the space command.
2432 */
2433 switch (mt_op) {
2434 case MTFSF:
2435 case MTBSF:
2436 idetape_create_space_cmd(&pc, mt_count - count,
2437 IDETAPE_SPACE_OVER_FILEMARK);
2438 return idetape_queue_pc_tail(drive, &pc);
2439 case MTFSFM:
2440 case MTBSFM:
2441 if (!sprev)
2442 return -EIO;
2443 retval = idetape_space_over_filemarks(drive, MTFSF,
2444 mt_count - count);
2445 if (retval)
2446 return retval;
2447 count = (MTBSFM == mt_op ? 1 : -1);
2448 return idetape_space_over_filemarks(drive, MTFSF, count);
2449 default:
2450 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2451 mt_op);
2452 return -EIO;
2453 }
2454 }
2455
2456 /*
2457 * Our character device read / write functions.
2458 *
2459 * The tape is optimized to maximize throughput when it is transferring an
2460 * integral number of the "continuous transfer limit", which is a parameter of
2461 * the specific tape (26kB on my particular tape, 32kB for Onstream).
2462 *
2463 * As of version 1.3 of the driver, the character device provides an abstract
2464 * continuous view of the media - any mix of block sizes (even 1 byte) on the
2465 * same backup/restore procedure is supported. The driver will internally
2466 * convert the requests to the recommended transfer unit, so that an unmatch
2467 * between the user's block size to the recommended size will only result in a
2468 * (slightly) increased driver overhead, but will no longer hit performance.
2469 * This is not applicable to Onstream.
2470 */
2471 static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2472 size_t count, loff_t *ppos)
2473 {
2474 struct ide_tape_obj *tape = ide_tape_f(file);
2475 ide_drive_t *drive = tape->drive;
2476 ssize_t bytes_read, temp, actually_read = 0, rc;
2477 ssize_t ret = 0;
2478 u16 ctl = *(u16 *)&tape->caps[12];
2479
2480 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2481
2482 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2483 if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
2484 if (count > tape->blk_size &&
2485 (count % tape->blk_size) == 0)
2486 tape->user_bs_factor = count / tape->blk_size;
2487 }
2488 rc = idetape_init_read(drive, tape->max_stages);
2489 if (rc < 0)
2490 return rc;
2491 if (count == 0)
2492 return (0);
2493 if (tape->merge_stage_size) {
2494 actually_read = min((unsigned int)(tape->merge_stage_size),
2495 (unsigned int)count);
2496 if (idetape_copy_stage_to_user(tape, buf, actually_read))
2497 ret = -EFAULT;
2498 buf += actually_read;
2499 tape->merge_stage_size -= actually_read;
2500 count -= actually_read;
2501 }
2502 while (count >= tape->stage_size) {
2503 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2504 if (bytes_read <= 0)
2505 goto finish;
2506 if (idetape_copy_stage_to_user(tape, buf, bytes_read))
2507 ret = -EFAULT;
2508 buf += bytes_read;
2509 count -= bytes_read;
2510 actually_read += bytes_read;
2511 }
2512 if (count) {
2513 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2514 if (bytes_read <= 0)
2515 goto finish;
2516 temp = min((unsigned long)count, (unsigned long)bytes_read);
2517 if (idetape_copy_stage_to_user(tape, buf, temp))
2518 ret = -EFAULT;
2519 actually_read += temp;
2520 tape->merge_stage_size = bytes_read-temp;
2521 }
2522 finish:
2523 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
2524 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
2525
2526 idetape_space_over_filemarks(drive, MTFSF, 1);
2527 return 0;
2528 }
2529
2530 return ret ? ret : actually_read;
2531 }
2532
2533 static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2534 size_t count, loff_t *ppos)
2535 {
2536 struct ide_tape_obj *tape = ide_tape_f(file);
2537 ide_drive_t *drive = tape->drive;
2538 ssize_t actually_written = 0;
2539 ssize_t ret = 0;
2540 u16 ctl = *(u16 *)&tape->caps[12];
2541
2542 /* The drive is write protected. */
2543 if (tape->write_prot)
2544 return -EACCES;
2545
2546 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2547
2548 /* Initialize write operation */
2549 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2550 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2551 idetape_discard_read_pipeline(drive, 1);
2552 if (tape->merge_stage || tape->merge_stage_size) {
2553 printk(KERN_ERR "ide-tape: merge_stage_size "
2554 "should be 0 now\n");
2555 tape->merge_stage_size = 0;
2556 }
2557 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2558 if (!tape->merge_stage)
2559 return -ENOMEM;
2560 tape->chrdev_dir = IDETAPE_DIR_WRITE;
2561 idetape_init_merge_stage(tape);
2562
2563 /*
2564 * Issue a write 0 command to ensure that DSC handshake is
2565 * switched from completion mode to buffer available mode. No
2566 * point in issuing this if DSC overlap isn't supported, some
2567 * drives (Seagate STT3401A) will return an error.
2568 */
2569 if (drive->dsc_overlap) {
2570 ssize_t retval = idetape_queue_rw_tail(drive,
2571 REQ_IDETAPE_WRITE, 0,
2572 tape->merge_stage->bh);
2573 if (retval < 0) {
2574 __idetape_kfree_stage(tape->merge_stage);
2575 tape->merge_stage = NULL;
2576 tape->chrdev_dir = IDETAPE_DIR_NONE;
2577 return retval;
2578 }
2579 }
2580 }
2581 if (count == 0)
2582 return (0);
2583 if (tape->merge_stage_size) {
2584 if (tape->merge_stage_size >= tape->stage_size) {
2585 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
2586 tape->merge_stage_size = 0;
2587 }
2588 actually_written = min((unsigned int)
2589 (tape->stage_size - tape->merge_stage_size),
2590 (unsigned int)count);
2591 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
2592 actually_written))
2593 ret = -EFAULT;
2594 buf += actually_written;
2595 tape->merge_stage_size += actually_written;
2596 count -= actually_written;
2597
2598 if (tape->merge_stage_size == tape->stage_size) {
2599 ssize_t retval;
2600 tape->merge_stage_size = 0;
2601 retval = idetape_add_chrdev_write_request(drive, ctl);
2602 if (retval <= 0)
2603 return (retval);
2604 }
2605 }
2606 while (count >= tape->stage_size) {
2607 ssize_t retval;
2608 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
2609 tape->stage_size))
2610 ret = -EFAULT;
2611 buf += tape->stage_size;
2612 count -= tape->stage_size;
2613 retval = idetape_add_chrdev_write_request(drive, ctl);
2614 actually_written += tape->stage_size;
2615 if (retval <= 0)
2616 return (retval);
2617 }
2618 if (count) {
2619 actually_written += count;
2620 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
2621 count))
2622 ret = -EFAULT;
2623 tape->merge_stage_size += count;
2624 }
2625 return ret ? ret : actually_written;
2626 }
2627
2628 static int idetape_write_filemark(ide_drive_t *drive)
2629 {
2630 struct ide_atapi_pc pc;
2631
2632 /* Write a filemark */
2633 idetape_create_write_filemark_cmd(drive, &pc, 1);
2634 if (idetape_queue_pc_tail(drive, &pc)) {
2635 printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
2636 return -EIO;
2637 }
2638 return 0;
2639 }
2640
2641 /*
2642 * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
2643 * requested.
2644 *
2645 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
2646 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
2647 * usually not supported (it is supported in the rare case in which we crossed
2648 * the filemark during our read-ahead pipelined operation mode).
2649 *
2650 * The following commands are currently not supported:
2651 *
2652 * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
2653 * MT_ST_WRITE_THRESHOLD.
2654 */
2655 static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2656 {
2657 idetape_tape_t *tape = drive->driver_data;
2658 struct ide_atapi_pc pc;
2659 int i, retval;
2660
2661 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
2662 mt_op, mt_count);
2663
2664 /* Commands which need our pipelined read-ahead stages. */
2665 switch (mt_op) {
2666 case MTFSF:
2667 case MTFSFM:
2668 case MTBSF:
2669 case MTBSFM:
2670 if (!mt_count)
2671 return 0;
2672 return idetape_space_over_filemarks(drive, mt_op, mt_count);
2673 default:
2674 break;
2675 }
2676
2677 switch (mt_op) {
2678 case MTWEOF:
2679 if (tape->write_prot)
2680 return -EACCES;
2681 idetape_discard_read_pipeline(drive, 1);
2682 for (i = 0; i < mt_count; i++) {
2683 retval = idetape_write_filemark(drive);
2684 if (retval)
2685 return retval;
2686 }
2687 return 0;
2688 case MTREW:
2689 idetape_discard_read_pipeline(drive, 0);
2690 if (idetape_rewind_tape(drive))
2691 return -EIO;
2692 return 0;
2693 case MTLOAD:
2694 idetape_discard_read_pipeline(drive, 0);
2695 idetape_create_load_unload_cmd(drive, &pc,
2696 IDETAPE_LU_LOAD_MASK);
2697 return idetape_queue_pc_tail(drive, &pc);
2698 case MTUNLOAD:
2699 case MTOFFL:
2700 /*
2701 * If door is locked, attempt to unlock before
2702 * attempting to eject.
2703 */
2704 if (tape->door_locked) {
2705 if (idetape_create_prevent_cmd(drive, &pc, 0))
2706 if (!idetape_queue_pc_tail(drive, &pc))
2707 tape->door_locked = DOOR_UNLOCKED;
2708 }
2709 idetape_discard_read_pipeline(drive, 0);
2710 idetape_create_load_unload_cmd(drive, &pc,
2711 !IDETAPE_LU_LOAD_MASK);
2712 retval = idetape_queue_pc_tail(drive, &pc);
2713 if (!retval)
2714 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
2715 return retval;
2716 case MTNOP:
2717 idetape_discard_read_pipeline(drive, 0);
2718 return idetape_flush_tape_buffers(drive);
2719 case MTRETEN:
2720 idetape_discard_read_pipeline(drive, 0);
2721 idetape_create_load_unload_cmd(drive, &pc,
2722 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
2723 return idetape_queue_pc_tail(drive, &pc);
2724 case MTEOM:
2725 idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
2726 return idetape_queue_pc_tail(drive, &pc);
2727 case MTERASE:
2728 (void)idetape_rewind_tape(drive);
2729 idetape_create_erase_cmd(&pc);
2730 return idetape_queue_pc_tail(drive, &pc);
2731 case MTSETBLK:
2732 if (mt_count) {
2733 if (mt_count < tape->blk_size ||
2734 mt_count % tape->blk_size)
2735 return -EIO;
2736 tape->user_bs_factor = mt_count / tape->blk_size;
2737 clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
2738 } else
2739 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
2740 return 0;
2741 case MTSEEK:
2742 idetape_discard_read_pipeline(drive, 0);
2743 return idetape_position_tape(drive,
2744 mt_count * tape->user_bs_factor, tape->partition, 0);
2745 case MTSETPART:
2746 idetape_discard_read_pipeline(drive, 0);
2747 return idetape_position_tape(drive, 0, mt_count, 0);
2748 case MTFSR:
2749 case MTBSR:
2750 case MTLOCK:
2751 if (!idetape_create_prevent_cmd(drive, &pc, 1))
2752 return 0;
2753 retval = idetape_queue_pc_tail(drive, &pc);
2754 if (retval)
2755 return retval;
2756 tape->door_locked = DOOR_EXPLICITLY_LOCKED;
2757 return 0;
2758 case MTUNLOCK:
2759 if (!idetape_create_prevent_cmd(drive, &pc, 0))
2760 return 0;
2761 retval = idetape_queue_pc_tail(drive, &pc);
2762 if (retval)
2763 return retval;
2764 tape->door_locked = DOOR_UNLOCKED;
2765 return 0;
2766 default:
2767 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2768 mt_op);
2769 return -EIO;
2770 }
2771 }
2772
2773 /*
2774 * Our character device ioctls. General mtio.h magnetic io commands are
2775 * supported here, and not in the corresponding block interface. Our own
2776 * ide-tape ioctls are supported on both interfaces.
2777 */
2778 static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
2779 unsigned int cmd, unsigned long arg)
2780 {
2781 struct ide_tape_obj *tape = ide_tape_f(file);
2782 ide_drive_t *drive = tape->drive;
2783 struct mtop mtop;
2784 struct mtget mtget;
2785 struct mtpos mtpos;
2786 int block_offset = 0, position = tape->first_frame;
2787 void __user *argp = (void __user *)arg;
2788
2789 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
2790
2791 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2792 idetape_empty_write_pipeline(drive);
2793 idetape_flush_tape_buffers(drive);
2794 }
2795 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
2796 block_offset = idetape_pipeline_size(drive) /
2797 (tape->blk_size * tape->user_bs_factor);
2798 position = idetape_read_position(drive);
2799 if (position < 0)
2800 return -EIO;
2801 }
2802 switch (cmd) {
2803 case MTIOCTOP:
2804 if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
2805 return -EFAULT;
2806 return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
2807 case MTIOCGET:
2808 memset(&mtget, 0, sizeof(struct mtget));
2809 mtget.mt_type = MT_ISSCSI2;
2810 mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
2811 mtget.mt_dsreg =
2812 ((tape->blk_size * tape->user_bs_factor)
2813 << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
2814
2815 if (tape->drv_write_prot)
2816 mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
2817
2818 if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
2819 return -EFAULT;
2820 return 0;
2821 case MTIOCPOS:
2822 mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
2823 if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
2824 return -EFAULT;
2825 return 0;
2826 default:
2827 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2828 idetape_discard_read_pipeline(drive, 1);
2829 return idetape_blkdev_ioctl(drive, cmd, arg);
2830 }
2831 }
2832
2833 /*
2834 * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
2835 * block size with the reported value.
2836 */
2837 static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
2838 {
2839 idetape_tape_t *tape = drive->driver_data;
2840 struct ide_atapi_pc pc;
2841
2842 idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
2843 if (idetape_queue_pc_tail(drive, &pc)) {
2844 printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
2845 if (tape->blk_size == 0) {
2846 printk(KERN_WARNING "ide-tape: Cannot deal with zero "
2847 "block size, assuming 32k\n");
2848 tape->blk_size = 32768;
2849 }
2850 return;
2851 }
2852 tape->blk_size = (pc.buf[4 + 5] << 16) +
2853 (pc.buf[4 + 6] << 8) +
2854 pc.buf[4 + 7];
2855 tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
2856 }
2857
2858 static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2859 {
2860 unsigned int minor = iminor(inode), i = minor & ~0xc0;
2861 ide_drive_t *drive;
2862 idetape_tape_t *tape;
2863 struct ide_atapi_pc pc;
2864 int retval;
2865
2866 if (i >= MAX_HWIFS * MAX_DRIVES)
2867 return -ENXIO;
2868
2869 tape = ide_tape_chrdev_get(i);
2870 if (!tape)
2871 return -ENXIO;
2872
2873 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2874
2875 /*
2876 * We really want to do nonseekable_open(inode, filp); here, but some
2877 * versions of tar incorrectly call lseek on tapes and bail out if that
2878 * fails. So we disallow pread() and pwrite(), but permit lseeks.
2879 */
2880 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
2881
2882 drive = tape->drive;
2883
2884 filp->private_data = tape;
2885
2886 if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
2887 retval = -EBUSY;
2888 goto out_put_tape;
2889 }
2890
2891 retval = idetape_wait_ready(drive, 60 * HZ);
2892 if (retval) {
2893 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2894 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
2895 goto out_put_tape;
2896 }
2897
2898 idetape_read_position(drive);
2899 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
2900 (void)idetape_rewind_tape(drive);
2901
2902 if (tape->chrdev_dir != IDETAPE_DIR_READ)
2903 clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
2904
2905 /* Read block size and write protect status from drive. */
2906 ide_tape_get_bsize_from_bdesc(drive);
2907
2908 /* Set write protect flag if device is opened as read-only. */
2909 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
2910 tape->write_prot = 1;
2911 else
2912 tape->write_prot = tape->drv_write_prot;
2913
2914 /* Make sure drive isn't write protected if user wants to write. */
2915 if (tape->write_prot) {
2916 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
2917 (filp->f_flags & O_ACCMODE) == O_RDWR) {
2918 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2919 retval = -EROFS;
2920 goto out_put_tape;
2921 }
2922 }
2923
2924 /* Lock the tape drive door so user can't eject. */
2925 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2926 if (idetape_create_prevent_cmd(drive, &pc, 1)) {
2927 if (!idetape_queue_pc_tail(drive, &pc)) {
2928 if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
2929 tape->door_locked = DOOR_LOCKED;
2930 }
2931 }
2932 }
2933 return 0;
2934
2935 out_put_tape:
2936 ide_tape_put(tape);
2937 return retval;
2938 }
2939
2940 static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
2941 {
2942 idetape_tape_t *tape = drive->driver_data;
2943
2944 idetape_empty_write_pipeline(drive);
2945 tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
2946 if (tape->merge_stage != NULL) {
2947 idetape_pad_zeros(drive, tape->blk_size *
2948 (tape->user_bs_factor - 1));
2949 __idetape_kfree_stage(tape->merge_stage);
2950 tape->merge_stage = NULL;
2951 }
2952 idetape_write_filemark(drive);
2953 idetape_flush_tape_buffers(drive);
2954 idetape_flush_tape_buffers(drive);
2955 }
2956
2957 static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2958 {
2959 struct ide_tape_obj *tape = ide_tape_f(filp);
2960 ide_drive_t *drive = tape->drive;
2961 struct ide_atapi_pc pc;
2962 unsigned int minor = iminor(inode);
2963
2964 lock_kernel();
2965 tape = drive->driver_data;
2966
2967 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2968
2969 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
2970 idetape_write_release(drive, minor);
2971 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2972 if (minor < 128)
2973 idetape_discard_read_pipeline(drive, 1);
2974 else
2975 idetape_wait_for_pipeline(drive);
2976 }
2977
2978 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
2979 (void) idetape_rewind_tape(drive);
2980 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2981 if (tape->door_locked == DOOR_LOCKED) {
2982 if (idetape_create_prevent_cmd(drive, &pc, 0)) {
2983 if (!idetape_queue_pc_tail(drive, &pc))
2984 tape->door_locked = DOOR_UNLOCKED;
2985 }
2986 }
2987 }
2988 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2989 ide_tape_put(tape);
2990 unlock_kernel();
2991 return 0;
2992 }
2993
2994 /*
2995 * check the contents of the ATAPI IDENTIFY command results. We return:
2996 *
2997 * 1 - If the tape can be supported by us, based on the information we have so
2998 * far.
2999 *
3000 * 0 - If this tape driver is not currently supported by us.
3001 */
3002 static int idetape_identify_device(ide_drive_t *drive)
3003 {
3004 u8 gcw[2], protocol, device_type, removable, packet_size;
3005
3006 if (drive->id_read == 0)
3007 return 1;
3008
3009 *((unsigned short *) &gcw) = drive->id->config;
3010
3011 protocol = (gcw[1] & 0xC0) >> 6;
3012 device_type = gcw[1] & 0x1F;
3013 removable = !!(gcw[0] & 0x80);
3014 packet_size = gcw[0] & 0x3;
3015
3016 /* Check that we can support this device */
3017 if (protocol != 2)
3018 printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
3019 protocol);
3020 else if (device_type != 1)
3021 printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
3022 "to tape\n", device_type);
3023 else if (!removable)
3024 printk(KERN_ERR "ide-tape: The removable flag is not set\n");
3025 else if (packet_size != 0) {
3026 printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
3027 " bytes\n", packet_size);
3028 } else
3029 return 1;
3030 return 0;
3031 }
3032
3033 static void idetape_get_inquiry_results(ide_drive_t *drive)
3034 {
3035 idetape_tape_t *tape = drive->driver_data;
3036 struct ide_atapi_pc pc;
3037 char fw_rev[6], vendor_id[10], product_id[18];
3038
3039 idetape_create_inquiry_cmd(&pc);
3040 if (idetape_queue_pc_tail(drive, &pc)) {
3041 printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
3042 tape->name);
3043 return;
3044 }
3045 memcpy(vendor_id, &pc.buf[8], 8);
3046 memcpy(product_id, &pc.buf[16], 16);
3047 memcpy(fw_rev, &pc.buf[32], 4);
3048
3049 ide_fixstring(vendor_id, 10, 0);
3050 ide_fixstring(product_id, 18, 0);
3051 ide_fixstring(fw_rev, 6, 0);
3052
3053 printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
3054 drive->name, tape->name, vendor_id, product_id, fw_rev);
3055 }
3056
3057 /*
3058 * Ask the tape about its various parameters. In particular, we will adjust our
3059 * data transfer buffer size to the recommended value as returned by the tape.
3060 */
3061 static void idetape_get_mode_sense_results(ide_drive_t *drive)
3062 {
3063 idetape_tape_t *tape = drive->driver_data;
3064 struct ide_atapi_pc pc;
3065 u8 *caps;
3066 u8 speed, max_speed;
3067
3068 idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
3069 if (idetape_queue_pc_tail(drive, &pc)) {
3070 printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
3071 " some default values\n");
3072 tape->blk_size = 512;
3073 put_unaligned(52, (u16 *)&tape->caps[12]);
3074 put_unaligned(540, (u16 *)&tape->caps[14]);
3075 put_unaligned(6*52, (u16 *)&tape->caps[16]);
3076 return;
3077 }
3078 caps = pc.buf + 4 + pc.buf[3];
3079
3080 /* convert to host order and save for later use */
3081 speed = be16_to_cpu(*(u16 *)&caps[14]);
3082 max_speed = be16_to_cpu(*(u16 *)&caps[8]);
3083
3084 put_unaligned(max_speed, (u16 *)&caps[8]);
3085 put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
3086 put_unaligned(speed, (u16 *)&caps[14]);
3087 put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
3088
3089 if (!speed) {
3090 printk(KERN_INFO "ide-tape: %s: invalid tape speed "
3091 "(assuming 650KB/sec)\n", drive->name);
3092 put_unaligned(650, (u16 *)&caps[14]);
3093 }
3094 if (!max_speed) {
3095 printk(KERN_INFO "ide-tape: %s: invalid max_speed "
3096 "(assuming 650KB/sec)\n", drive->name);
3097 put_unaligned(650, (u16 *)&caps[8]);
3098 }
3099
3100 memcpy(&tape->caps, caps, 20);
3101 if (caps[7] & 0x02)
3102 tape->blk_size = 512;
3103 else if (caps[7] & 0x04)
3104 tape->blk_size = 1024;
3105 }
3106
3107 #ifdef CONFIG_IDE_PROC_FS
3108 static void idetape_add_settings(ide_drive_t *drive)
3109 {
3110 idetape_tape_t *tape = drive->driver_data;
3111
3112 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3113 1, 2, (u16 *)&tape->caps[16], NULL);
3114 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
3115 tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
3116 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
3117 tape->stage_size / 1024, 1, &tape->max_stages, NULL);
3118 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
3119 tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
3120 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
3121 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
3122 NULL);
3123 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
3124 0xffff, tape->stage_size / 1024, 1,
3125 &tape->nr_pending_stages, NULL);
3126 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
3127 1, 1, (u16 *)&tape->caps[14], NULL);
3128 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
3129 1024, &tape->stage_size, NULL);
3130 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
3131 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
3132 NULL);
3133 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
3134 1, &drive->dsc_overlap, NULL);
3135 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
3136 1, 1, &tape->avg_speed, NULL);
3137 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
3138 1, &tape->debug_mask, NULL);
3139 }
3140 #else
3141 static inline void idetape_add_settings(ide_drive_t *drive) { ; }
3142 #endif
3143
3144 /*
3145 * The function below is called to:
3146 *
3147 * 1. Initialize our various state variables.
3148 * 2. Ask the tape for its capabilities.
3149 * 3. Allocate a buffer which will be used for data transfer. The buffer size
3150 * is chosen based on the recommendation which we received in step 2.
3151 *
3152 * Note that at this point ide.c already assigned us an irq, so that we can
3153 * queue requests here and wait for their completion.
3154 */
3155 static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
3156 {
3157 unsigned long t1, tmid, tn, t;
3158 int speed;
3159 int stage_size;
3160 u8 gcw[2];
3161 struct sysinfo si;
3162 u16 *ctl = (u16 *)&tape->caps[12];
3163
3164 spin_lock_init(&tape->lock);
3165 drive->dsc_overlap = 1;
3166 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
3167 printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
3168 tape->name);
3169 drive->dsc_overlap = 0;
3170 }
3171 /* Seagate Travan drives do not support DSC overlap. */
3172 if (strstr(drive->id->model, "Seagate STT3401"))
3173 drive->dsc_overlap = 0;
3174 tape->minor = minor;
3175 tape->name[0] = 'h';
3176 tape->name[1] = 't';
3177 tape->name[2] = '0' + minor;
3178 tape->chrdev_dir = IDETAPE_DIR_NONE;
3179 tape->pc = tape->pc_stack;
3180 *((unsigned short *) &gcw) = drive->id->config;
3181
3182 /* Command packet DRQ type */
3183 if (((gcw[0] & 0x60) >> 5) == 1)
3184 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
3185
3186 tape->min_pipeline = 10;
3187 tape->max_pipeline = 10;
3188 tape->max_stages = 10;
3189
3190 idetape_get_inquiry_results(drive);
3191 idetape_get_mode_sense_results(drive);
3192 ide_tape_get_bsize_from_bdesc(drive);
3193 tape->user_bs_factor = 1;
3194 tape->stage_size = *ctl * tape->blk_size;
3195 while (tape->stage_size > 0xffff) {
3196 printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
3197 *ctl /= 2;
3198 tape->stage_size = *ctl * tape->blk_size;
3199 }
3200 stage_size = tape->stage_size;
3201 tape->pages_per_stage = stage_size / PAGE_SIZE;
3202 if (stage_size % PAGE_SIZE) {
3203 tape->pages_per_stage++;
3204 tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
3205 }
3206
3207 /* Select the "best" DSC read/write polling freq and pipeline size. */
3208 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
3209
3210 tape->max_stages = speed * 1000 * 10 / tape->stage_size;
3211
3212 /* Limit memory use for pipeline to 10% of physical memory */
3213 si_meminfo(&si);
3214 if (tape->max_stages * tape->stage_size >
3215 si.totalram * si.mem_unit / 10)
3216 tape->max_stages =
3217 si.totalram * si.mem_unit / (10 * tape->stage_size);
3218
3219 tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
3220 tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
3221 tape->max_pipeline =
3222 min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
3223 if (tape->max_stages == 0) {
3224 tape->max_stages = 1;
3225 tape->min_pipeline = 1;
3226 tape->max_pipeline = 1;
3227 }
3228
3229 t1 = (tape->stage_size * HZ) / (speed * 1000);
3230 tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
3231 tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
3232
3233 if (tape->max_stages)
3234 t = tn;
3235 else
3236 t = t1;
3237
3238 /*
3239 * Ensure that the number we got makes sense; limit it within
3240 * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
3241 */
3242 tape->best_dsc_rw_freq = max_t(unsigned long,
3243 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
3244 IDETAPE_DSC_RW_MIN);
3245 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
3246 "%dkB pipeline, %lums tDSC%s\n",
3247 drive->name, tape->name, *(u16 *)&tape->caps[14],
3248 (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
3249 tape->stage_size / 1024,
3250 tape->max_stages * tape->stage_size / 1024,
3251 tape->best_dsc_rw_freq * 1000 / HZ,
3252 drive->using_dma ? ", DMA":"");
3253
3254 idetape_add_settings(drive);
3255 }
3256
3257 static void ide_tape_remove(ide_drive_t *drive)
3258 {
3259 idetape_tape_t *tape = drive->driver_data;
3260
3261 ide_proc_unregister_driver(drive, tape->driver);
3262
3263 ide_unregister_region(tape->disk);
3264
3265 ide_tape_put(tape);
3266 }
3267
3268 static void ide_tape_release(struct kref *kref)
3269 {
3270 struct ide_tape_obj *tape = to_ide_tape(kref);
3271 ide_drive_t *drive = tape->drive;
3272 struct gendisk *g = tape->disk;
3273
3274 BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
3275
3276 drive->dsc_overlap = 0;
3277 drive->driver_data = NULL;
3278 device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
3279 device_destroy(idetape_sysfs_class,
3280 MKDEV(IDETAPE_MAJOR, tape->minor + 128));
3281 idetape_devs[tape->minor] = NULL;
3282 g->private_data = NULL;
3283 put_disk(g);
3284 kfree(tape);
3285 }
3286
3287 #ifdef CONFIG_IDE_PROC_FS
3288 static int proc_idetape_read_name
3289 (char *page, char **start, off_t off, int count, int *eof, void *data)
3290 {
3291 ide_drive_t *drive = (ide_drive_t *) data;
3292 idetape_tape_t *tape = drive->driver_data;
3293 char *out = page;
3294 int len;
3295
3296 len = sprintf(out, "%s\n", tape->name);
3297 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
3298 }
3299
3300 static ide_proc_entry_t idetape_proc[] = {
3301 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
3302 { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
3303 { NULL, 0, NULL, NULL }
3304 };
3305 #endif
3306
3307 static int ide_tape_probe(ide_drive_t *);
3308
3309 static ide_driver_t idetape_driver = {
3310 .gen_driver = {
3311 .owner = THIS_MODULE,
3312 .name = "ide-tape",
3313 .bus = &ide_bus_type,
3314 },
3315 .probe = ide_tape_probe,
3316 .remove = ide_tape_remove,
3317 .version = IDETAPE_VERSION,
3318 .media = ide_tape,
3319 .supports_dsc_overlap = 1,
3320 .do_request = idetape_do_request,
3321 .end_request = idetape_end_request,
3322 .error = __ide_error,
3323 .abort = __ide_abort,
3324 #ifdef CONFIG_IDE_PROC_FS
3325 .proc = idetape_proc,
3326 #endif
3327 };
3328
3329 /* Our character device supporting functions, passed to register_chrdev. */
3330 static const struct file_operations idetape_fops = {
3331 .owner = THIS_MODULE,
3332 .read = idetape_chrdev_read,
3333 .write = idetape_chrdev_write,
3334 .ioctl = idetape_chrdev_ioctl,
3335 .open = idetape_chrdev_open,
3336 .release = idetape_chrdev_release,
3337 };
3338
3339 static int idetape_open(struct inode *inode, struct file *filp)
3340 {
3341 struct gendisk *disk = inode->i_bdev->bd_disk;
3342 struct ide_tape_obj *tape;
3343
3344 tape = ide_tape_get(disk);
3345 if (!tape)
3346 return -ENXIO;
3347
3348 return 0;
3349 }
3350
3351 static int idetape_release(struct inode *inode, struct file *filp)
3352 {
3353 struct gendisk *disk = inode->i_bdev->bd_disk;
3354 struct ide_tape_obj *tape = ide_tape_g(disk);
3355
3356 ide_tape_put(tape);
3357
3358 return 0;
3359 }
3360
3361 static int idetape_ioctl(struct inode *inode, struct file *file,
3362 unsigned int cmd, unsigned long arg)
3363 {
3364 struct block_device *bdev = inode->i_bdev;
3365 struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
3366 ide_drive_t *drive = tape->drive;
3367 int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
3368 if (err == -EINVAL)
3369 err = idetape_blkdev_ioctl(drive, cmd, arg);
3370 return err;
3371 }
3372
3373 static struct block_device_operations idetape_block_ops = {
3374 .owner = THIS_MODULE,
3375 .open = idetape_open,
3376 .release = idetape_release,
3377 .ioctl = idetape_ioctl,
3378 };
3379
3380 static int ide_tape_probe(ide_drive_t *drive)
3381 {
3382 idetape_tape_t *tape;
3383 struct gendisk *g;
3384 int minor;
3385
3386 if (!strstr("ide-tape", drive->driver_req))
3387 goto failed;
3388 if (!drive->present)
3389 goto failed;
3390 if (drive->media != ide_tape)
3391 goto failed;
3392 if (!idetape_identify_device(drive)) {
3393 printk(KERN_ERR "ide-tape: %s: not supported by this version of"
3394 " the driver\n", drive->name);
3395 goto failed;
3396 }
3397 if (drive->scsi) {
3398 printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
3399 " emulation.\n", drive->name);
3400 goto failed;
3401 }
3402 tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
3403 if (tape == NULL) {
3404 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
3405 drive->name);
3406 goto failed;
3407 }
3408
3409 g = alloc_disk(1 << PARTN_BITS);
3410 if (!g)
3411 goto out_free_tape;
3412
3413 ide_init_disk(g, drive);
3414
3415 ide_proc_register_driver(drive, &idetape_driver);
3416
3417 kref_init(&tape->kref);
3418
3419 tape->drive = drive;
3420 tape->driver = &idetape_driver;
3421 tape->disk = g;
3422
3423 g->private_data = &tape->driver;
3424
3425 drive->driver_data = tape;
3426
3427 mutex_lock(&idetape_ref_mutex);
3428 for (minor = 0; idetape_devs[minor]; minor++)
3429 ;
3430 idetape_devs[minor] = tape;
3431 mutex_unlock(&idetape_ref_mutex);
3432
3433 idetape_setup(drive, tape, minor);
3434
3435 device_create(idetape_sysfs_class, &drive->gendev,
3436 MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
3437 device_create(idetape_sysfs_class, &drive->gendev,
3438 MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
3439
3440 g->fops = &idetape_block_ops;
3441 ide_register_region(g);
3442
3443 return 0;
3444
3445 out_free_tape:
3446 kfree(tape);
3447 failed:
3448 return -ENODEV;
3449 }
3450
3451 static void __exit idetape_exit(void)
3452 {
3453 driver_unregister(&idetape_driver.gen_driver);
3454 class_destroy(idetape_sysfs_class);
3455 unregister_chrdev(IDETAPE_MAJOR, "ht");
3456 }
3457
3458 static int __init idetape_init(void)
3459 {
3460 int error = 1;
3461 idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
3462 if (IS_ERR(idetape_sysfs_class)) {
3463 idetape_sysfs_class = NULL;
3464 printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
3465 error = -EBUSY;
3466 goto out;
3467 }
3468
3469 if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
3470 printk(KERN_ERR "ide-tape: Failed to register chrdev"
3471 " interface\n");
3472 error = -EBUSY;
3473 goto out_free_class;
3474 }
3475
3476 error = driver_register(&idetape_driver.gen_driver);
3477 if (error)
3478 goto out_free_driver;
3479
3480 return 0;
3481
3482 out_free_driver:
3483 driver_unregister(&idetape_driver.gen_driver);
3484 out_free_class:
3485 class_destroy(idetape_sysfs_class);
3486 out:
3487 return error;
3488 }
3489
3490 MODULE_ALIAS("ide:*m-tape*");
3491 module_init(idetape_init);
3492 module_exit(idetape_exit);
3493 MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
3494 MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
3495 MODULE_LICENSE("GPL");