]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/ub.c
block: push down BKL into .open and .release
[mirror_ubuntu-artful-kernel.git] / drivers / block / ub.c
CommitLineData
1da177e4
LT
1/*
2 * The low performance USB storage driver (ub).
3 *
4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
6 *
7 * This work is a part of Linux kernel, is derived from it,
8 * and is not licensed separately. See file COPYING for details.
9 *
10 * TODO (sorted by decreasing priority)
ef45cb62 11 * -- Return sense now that rq allows it (we always auto-sense anyway).
1da177e4
LT
12 * -- set readonly flag for CDs, set removable flag for CF readers
13 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
1da177e4 14 * -- verify the 13 conditions and do bulk resets
ba6abf13 15 * -- highmem
1da177e4
LT
16 * -- move top_sense and work_bcs into separate allocations (if they survive)
17 * for cache purists and esoteric architectures.
ba6abf13 18 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
1da177e4 19 * -- prune comments, they are too volumnous
1da177e4 20 * -- Resove XXX's
1872bceb 21 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
1da177e4
LT
22 */
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/usb.h>
a00828e9 26#include <linux/usb_usual.h>
1da177e4 27#include <linux/blkdev.h>
1da177e4 28#include <linux/timer.h>
45711f1a 29#include <linux/scatterlist.h>
5a0e3ad6 30#include <linux/slab.h>
8a6cfeb6 31#include <linux/smp_lock.h>
1da177e4
LT
32#include <scsi/scsi.h>
33
34#define DRV_NAME "ub"
1da177e4
LT
35
36#define UB_MAJOR 180
37
1872bceb
PZ
38/*
39 * The command state machine is the key model for understanding of this driver.
40 *
41 * The general rule is that all transitions are done towards the bottom
42 * of the diagram, thus preventing any loops.
43 *
44 * An exception to that is how the STAT state is handled. A counter allows it
45 * to be re-entered along the path marked with [C].
46 *
47 * +--------+
48 * ! INIT !
49 * +--------+
50 * !
51 * ub_scsi_cmd_start fails ->--------------------------------------\
52 * ! !
53 * V !
54 * +--------+ !
55 * ! CMD ! !
56 * +--------+ !
57 * ! +--------+ !
58 * was -EPIPE -->-------------------------------->! CLEAR ! !
59 * ! +--------+ !
60 * ! ! !
61 * was error -->------------------------------------- ! --------->\
62 * ! ! !
63 * /--<-- cmd->dir == NONE ? ! !
64 * ! ! ! !
65 * ! V ! !
66 * ! +--------+ ! !
67 * ! ! DATA ! ! !
68 * ! +--------+ ! !
69 * ! ! +---------+ ! !
70 * ! was -EPIPE -->--------------->! CLR2STS ! ! !
71 * ! ! +---------+ ! !
72 * ! ! ! ! !
73 * ! ! was error -->---- ! --------->\
74 * ! was error -->--------------------- ! ------------- ! --------->\
75 * ! ! ! ! !
76 * ! V ! ! !
77 * \--->+--------+ ! ! !
78 * ! STAT !<--------------------------/ ! !
79 * /--->+--------+ ! !
80 * ! ! ! !
81 * [C] was -EPIPE -->-----------\ ! !
82 * ! ! ! ! !
83 * +<---- len == 0 ! ! !
84 * ! ! ! ! !
85 * ! was error -->--------------------------------------!---------->\
86 * ! ! ! ! !
87 * +<---- bad CSW ! ! !
88 * +<---- bad tag ! ! !
89 * ! ! V ! !
90 * ! ! +--------+ ! !
91 * ! ! ! CLRRS ! ! !
92 * ! ! +--------+ ! !
93 * ! ! ! ! !
94 * \------- ! --------------------[C]--------\ ! !
95 * ! ! ! !
96 * cmd->error---\ +--------+ ! !
97 * ! +--------------->! SENSE !<----------/ !
98 * STAT_FAIL----/ +--------+ !
99 * ! ! V
100 * ! V +--------+
101 * \--------------------------------\--------------------->! DONE !
102 * +--------+
103 */
104
1da177e4 105/*
f4800078
PZ
106 * This many LUNs per USB device.
107 * Every one of them takes a host, see UB_MAX_HOSTS.
1da177e4 108 */
9f793d2c 109#define UB_MAX_LUNS 9
f4800078
PZ
110
111/*
112 */
113
4fb729f5 114#define UB_PARTS_PER_LUN 8
1da177e4
LT
115
116#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
117
118#define UB_SENSE_SIZE 18
119
120/*
121 */
122
123/* command block wrapper */
124struct bulk_cb_wrap {
125 __le32 Signature; /* contains 'USBC' */
126 u32 Tag; /* unique per command id */
127 __le32 DataTransferLength; /* size of data */
128 u8 Flags; /* direction in bit 0 */
f4800078 129 u8 Lun; /* LUN */
1da177e4
LT
130 u8 Length; /* of of the CDB */
131 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */
132};
133
134#define US_BULK_CB_WRAP_LEN 31
135#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
136#define US_BULK_FLAG_IN 1
137#define US_BULK_FLAG_OUT 0
138
139/* command status wrapper */
140struct bulk_cs_wrap {
141 __le32 Signature; /* should = 'USBS' */
142 u32 Tag; /* same as original command */
143 __le32 Residue; /* amount not transferred */
144 u8 Status; /* see below */
145};
146
147#define US_BULK_CS_WRAP_LEN 13
148#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
1da177e4
LT
149#define US_BULK_STAT_OK 0
150#define US_BULK_STAT_FAIL 1
151#define US_BULK_STAT_PHASE 2
152
153/* bulk-only class specific requests */
154#define US_BULK_RESET_REQUEST 0xff
155#define US_BULK_GET_MAX_LUN 0xfe
156
157/*
158 */
159struct ub_dev;
160
64bd8453 161#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
1da177e4
LT
162#define UB_MAX_SECTORS 64
163
164/*
165 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
166 * even if a webcam hogs the bus, but some devices need time to spin up.
167 */
168#define UB_URB_TIMEOUT (HZ*2)
169#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */
170#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */
171#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */
172
173/*
174 * An instance of a SCSI command in transit.
175 */
176#define UB_DIR_NONE 0
177#define UB_DIR_READ 1
178#define UB_DIR_ILLEGAL2 2
179#define UB_DIR_WRITE 3
180
181#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
182 (((c)==UB_DIR_READ)? 'r': 'n'))
183
184enum ub_scsi_cmd_state {
185 UB_CMDST_INIT, /* Initial state */
186 UB_CMDST_CMD, /* Command submitted */
187 UB_CMDST_DATA, /* Data phase */
188 UB_CMDST_CLR2STS, /* Clearing before requesting status */
189 UB_CMDST_STAT, /* Status phase */
190 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */
1872bceb 191 UB_CMDST_CLRRS, /* Clearing before retrying status */
1da177e4
LT
192 UB_CMDST_SENSE, /* Sending Request Sense */
193 UB_CMDST_DONE /* Final state */
194};
195
1da177e4
LT
196struct ub_scsi_cmd {
197 unsigned char cdb[UB_MAX_CDB_SIZE];
198 unsigned char cdb_len;
199
200 unsigned char dir; /* 0 - none, 1 - read, 3 - write. */
1da177e4
LT
201 enum ub_scsi_cmd_state state;
202 unsigned int tag;
203 struct ub_scsi_cmd *next;
204
205 int error; /* Return code - valid upon done */
206 unsigned int act_len; /* Return size */
207 unsigned char key, asc, ascq; /* May be valid if error==-EIO */
208
209 int stat_count; /* Retries getting status. */
2c51ae70 210 unsigned int timeo; /* jiffies until rq->timeout changes */
1da177e4 211
1da177e4 212 unsigned int len; /* Requested length */
a1cf96ef
PZ
213 unsigned int current_sg;
214 unsigned int nsg; /* sgv[nsg] */
215 struct scatterlist sgv[UB_MAX_REQ_SG];
1da177e4 216
f4800078 217 struct ub_lun *lun;
1da177e4
LT
218 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
219 void *back;
220};
221
2c26c9e6
PZ
222struct ub_request {
223 struct request *rq;
224 unsigned int current_try;
225 unsigned int nsg; /* sgv[nsg] */
226 struct scatterlist sgv[UB_MAX_REQ_SG];
227};
228
1da177e4
LT
229/*
230 */
231struct ub_capacity {
232 unsigned long nsec; /* Linux size - 512 byte sectors */
233 unsigned int bsize; /* Linux hardsect_size */
234 unsigned int bshift; /* Shift between 512 and hard sects */
235};
236
1da177e4
LT
237/*
238 * This is a direct take-off from linux/include/completion.h
239 * The difference is that I do not wait on this thing, just poll.
240 * When I want to wait (ub_probe), I just use the stock completion.
241 *
242 * Note that INIT_COMPLETION takes no lock. It is correct. But why
243 * in the bloody hell that thing takes struct instead of pointer to struct
244 * is quite beyond me. I just copied it from the stock completion.
245 */
246struct ub_completion {
247 unsigned int done;
248 spinlock_t lock;
249};
250
251static inline void ub_init_completion(struct ub_completion *x)
252{
253 x->done = 0;
254 spin_lock_init(&x->lock);
255}
256
257#define UB_INIT_COMPLETION(x) ((x).done = 0)
258
259static void ub_complete(struct ub_completion *x)
260{
261 unsigned long flags;
262
263 spin_lock_irqsave(&x->lock, flags);
264 x->done++;
265 spin_unlock_irqrestore(&x->lock, flags);
266}
267
268static int ub_is_completed(struct ub_completion *x)
269{
270 unsigned long flags;
271 int ret;
272
273 spin_lock_irqsave(&x->lock, flags);
274 ret = x->done;
275 spin_unlock_irqrestore(&x->lock, flags);
276 return ret;
277}
278
279/*
280 */
281struct ub_scsi_cmd_queue {
282 int qlen, qmax;
283 struct ub_scsi_cmd *head, *tail;
284};
285
286/*
f4800078
PZ
287 * The block device instance (one per LUN).
288 */
289struct ub_lun {
290 struct ub_dev *udev;
291 struct list_head link;
292 struct gendisk *disk;
293 int id; /* Host index */
294 int num; /* LUN number */
295 char name[16];
296
297 int changed; /* Media was changed */
298 int removable;
299 int readonly;
f4800078 300
2c26c9e6
PZ
301 struct ub_request urq;
302
f4800078
PZ
303 /* Use Ingo's mempool if or when we have more than one command. */
304 /*
305 * Currently we never need more than one command for the whole device.
306 * However, giving every LUN a command is a cheap and automatic way
307 * to enforce fairness between them.
308 */
309 int cmda[1];
310 struct ub_scsi_cmd cmdv[1];
311
312 struct ub_capacity capacity;
313};
314
315/*
316 * The USB device instance.
1da177e4
LT
317 */
318struct ub_dev {
65b4fe55 319 spinlock_t *lock;
1da177e4
LT
320 atomic_t poison; /* The USB device is disconnected */
321 int openc; /* protected by ub_lock! */
322 /* kref is too implicit for our taste */
2c26c9e6 323 int reset; /* Reset is running */
0da13c8c 324 int bad_resid;
1da177e4 325 unsigned int tagcnt;
f4800078 326 char name[12];
1da177e4
LT
327 struct usb_device *dev;
328 struct usb_interface *intf;
329
f4800078 330 struct list_head luns;
1da177e4
LT
331
332 unsigned int send_bulk_pipe; /* cached pipe values */
333 unsigned int recv_bulk_pipe;
334 unsigned int send_ctrl_pipe;
335 unsigned int recv_ctrl_pipe;
336
337 struct tasklet_struct tasklet;
338
1da177e4
LT
339 struct ub_scsi_cmd_queue cmd_queue;
340 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
341 unsigned char top_sense[UB_SENSE_SIZE];
342
343 struct ub_completion work_done;
344 struct urb work_urb;
345 struct timer_list work_timer;
346 int last_pipe; /* What might need clearing */
1872bceb 347 __le32 signature; /* Learned signature */
1da177e4
LT
348 struct bulk_cb_wrap work_bcb;
349 struct bulk_cs_wrap work_bcs;
350 struct usb_ctrlrequest work_cr;
351
2c26c9e6
PZ
352 struct work_struct reset_work;
353 wait_queue_head_t reset_wait;
1da177e4
LT
354};
355
356/*
357 */
358static void ub_cleanup(struct ub_dev *sc);
6c1eb8c1 359static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
2c26c9e6
PZ
360static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
361 struct ub_scsi_cmd *cmd, struct ub_request *urq);
362static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
363 struct ub_scsi_cmd *cmd, struct ub_request *urq);
1da177e4 364static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
3755100d 365static void ub_end_rq(struct request *rq, unsigned int status);
2c26c9e6
PZ
366static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
367 struct ub_request *urq, struct ub_scsi_cmd *cmd);
1da177e4 368static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
7d12e780 369static void ub_urb_complete(struct urb *urb);
1da177e4
LT
370static void ub_scsi_action(unsigned long _dev);
371static void ub_scsi_dispatch(struct ub_dev *sc);
372static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
a1cf96ef 373static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
1da177e4 374static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
1872bceb 375static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
1da177e4 376static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
1872bceb 377static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
1da177e4
LT
378static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
379static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
380 int stalled_pipe);
381static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
2c2e4a2e 382static void ub_reset_enter(struct ub_dev *sc, int try);
c4028958 383static void ub_reset_task(struct work_struct *work);
f4800078
PZ
384static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
385static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
386 struct ub_capacity *ret);
2c2e4a2e
PZ
387static int ub_sync_reset(struct ub_dev *sc);
388static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
f4800078 389static int ub_probe_lun(struct ub_dev *sc, int lnum);
1da177e4
LT
390
391/*
392 */
a00828e9
PZ
393#ifdef CONFIG_USB_LIBUSUAL
394
e6e244b6 395#define ub_usb_ids usb_storage_usb_ids
a00828e9
PZ
396#else
397
577cdf0c 398static const struct usb_device_id ub_usb_ids[] = {
1da177e4
LT
399 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
400 { }
401};
402
403MODULE_DEVICE_TABLE(usb, ub_usb_ids);
a00828e9 404#endif /* CONFIG_USB_LIBUSUAL */
1da177e4
LT
405
406/*
407 * Find me a way to identify "next free minor" for add_disk(),
408 * and the array disappears the next day. However, the number of
409 * hosts has something to do with the naming and /proc/partitions.
410 * This has to be thought out in detail before changing.
411 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
412 */
413#define UB_MAX_HOSTS 26
414static char ub_hostv[UB_MAX_HOSTS];
f4800078 415
65b4fe55
PZ
416#define UB_QLOCK_NUM 5
417static spinlock_t ub_qlockv[UB_QLOCK_NUM];
418static int ub_qlock_next = 0;
419
1da177e4
LT
420static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
421
1da177e4
LT
422/*
423 * The id allocator.
424 *
425 * This also stores the host for indexing by minor, which is somewhat dirty.
426 */
427static int ub_id_get(void)
428{
429 unsigned long flags;
430 int i;
431
432 spin_lock_irqsave(&ub_lock, flags);
433 for (i = 0; i < UB_MAX_HOSTS; i++) {
434 if (ub_hostv[i] == 0) {
435 ub_hostv[i] = 1;
436 spin_unlock_irqrestore(&ub_lock, flags);
437 return i;
438 }
439 }
440 spin_unlock_irqrestore(&ub_lock, flags);
441 return -1;
442}
443
444static void ub_id_put(int id)
445{
446 unsigned long flags;
447
448 if (id < 0 || id >= UB_MAX_HOSTS) {
449 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
450 return;
451 }
452
453 spin_lock_irqsave(&ub_lock, flags);
454 if (ub_hostv[id] == 0) {
455 spin_unlock_irqrestore(&ub_lock, flags);
456 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
457 return;
458 }
459 ub_hostv[id] = 0;
460 spin_unlock_irqrestore(&ub_lock, flags);
461}
462
65b4fe55
PZ
463/*
464 * This is necessitated by the fact that blk_cleanup_queue does not
465 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
466 * Since our blk_init_queue() passes a spinlock common with ub_dev,
467 * we have life time issues when ub_cleanup frees ub_dev.
468 */
469static spinlock_t *ub_next_lock(void)
470{
471 unsigned long flags;
472 spinlock_t *ret;
473
474 spin_lock_irqsave(&ub_lock, flags);
475 ret = &ub_qlockv[ub_qlock_next];
476 ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
477 spin_unlock_irqrestore(&ub_lock, flags);
478 return ret;
479}
480
1da177e4
LT
481/*
482 * Downcount for deallocation. This rides on two assumptions:
483 * - once something is poisoned, its refcount cannot grow
484 * - opens cannot happen at this time (del_gendisk was done)
485 * If the above is true, we can drop the lock, which we need for
486 * blk_cleanup_queue(): the silly thing may attempt to sleep.
487 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
488 */
489static void ub_put(struct ub_dev *sc)
490{
491 unsigned long flags;
492
493 spin_lock_irqsave(&ub_lock, flags);
494 --sc->openc;
495 if (sc->openc == 0 && atomic_read(&sc->poison)) {
496 spin_unlock_irqrestore(&ub_lock, flags);
497 ub_cleanup(sc);
498 } else {
499 spin_unlock_irqrestore(&ub_lock, flags);
500 }
501}
502
503/*
504 * Final cleanup and deallocation.
505 */
506static void ub_cleanup(struct ub_dev *sc)
507{
f4800078
PZ
508 struct list_head *p;
509 struct ub_lun *lun;
165125e1 510 struct request_queue *q;
1da177e4 511
f4800078
PZ
512 while (!list_empty(&sc->luns)) {
513 p = sc->luns.next;
514 lun = list_entry(p, struct ub_lun, link);
515 list_del(p);
1da177e4 516
f4800078
PZ
517 /* I don't think queue can be NULL. But... Stolen from sx8.c */
518 if ((q = lun->disk->queue) != NULL)
519 blk_cleanup_queue(q);
520 /*
521 * If we zero disk->private_data BEFORE put_disk, we have
522 * to check for NULL all over the place in open, release,
523 * check_media and revalidate, because the block level
524 * semaphore is well inside the put_disk.
525 * But we cannot zero after the call, because *disk is gone.
526 * The sd.c is blatantly racy in this area.
527 */
528 /* disk->private_data = NULL; */
529 put_disk(lun->disk);
530 lun->disk = NULL;
531
532 ub_id_put(lun->id);
533 kfree(lun);
534 }
1da177e4 535
77ef6c4d
PZ
536 usb_set_intfdata(sc->intf, NULL);
537 usb_put_intf(sc->intf);
538 usb_put_dev(sc->dev);
1da177e4
LT
539 kfree(sc);
540}
541
542/*
543 * The "command allocator".
544 */
f4800078 545static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
1da177e4
LT
546{
547 struct ub_scsi_cmd *ret;
548
f4800078 549 if (lun->cmda[0])
1da177e4 550 return NULL;
f4800078
PZ
551 ret = &lun->cmdv[0];
552 lun->cmda[0] = 1;
1da177e4
LT
553 return ret;
554}
555
f4800078 556static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
1da177e4 557{
f4800078 558 if (cmd != &lun->cmdv[0]) {
1da177e4 559 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
f4800078 560 lun->name, cmd);
1da177e4
LT
561 return;
562 }
f4800078
PZ
563 if (!lun->cmda[0]) {
564 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
1da177e4
LT
565 return;
566 }
f4800078 567 lun->cmda[0] = 0;
1da177e4
LT
568}
569
570/*
571 * The command queue.
572 */
573static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
574{
575 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
576
577 if (t->qlen++ == 0) {
578 t->head = cmd;
579 t->tail = cmd;
580 } else {
581 t->tail->next = cmd;
582 t->tail = cmd;
583 }
584
585 if (t->qlen > t->qmax)
586 t->qmax = t->qlen;
587}
588
589static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
590{
591 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
592
593 if (t->qlen++ == 0) {
594 t->head = cmd;
595 t->tail = cmd;
596 } else {
597 cmd->next = t->head;
598 t->head = cmd;
599 }
600
601 if (t->qlen > t->qmax)
602 t->qmax = t->qlen;
603}
604
605static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
606{
607 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
608 struct ub_scsi_cmd *cmd;
609
610 if (t->qlen == 0)
611 return NULL;
612 if (--t->qlen == 0)
613 t->tail = NULL;
614 cmd = t->head;
615 t->head = cmd->next;
616 cmd->next = NULL;
617 return cmd;
618}
619
620#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
621
622/*
623 * The request function is our main entry point
624 */
625
165125e1 626static void ub_request_fn(struct request_queue *q)
1da177e4 627{
f4800078 628 struct ub_lun *lun = q->queuedata;
1da177e4
LT
629 struct request *rq;
630
9934c8c0 631 while ((rq = blk_peek_request(q)) != NULL) {
6c1eb8c1 632 if (ub_request_fn_1(lun, rq) != 0) {
1da177e4
LT
633 blk_stop_queue(q);
634 break;
635 }
636 }
637}
638
6c1eb8c1 639static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
1da177e4 640{
f4800078 641 struct ub_dev *sc = lun->udev;
1da177e4 642 struct ub_scsi_cmd *cmd;
2c26c9e6
PZ
643 struct ub_request *urq;
644 int n_elem;
1da177e4 645
d1ad4ea3 646 if (atomic_read(&sc->poison)) {
9934c8c0 647 blk_start_request(rq);
3755100d 648 ub_end_rq(rq, DID_NO_CONNECT << 16);
d1ad4ea3
PZ
649 return 0;
650 }
651
4c4762d1 652 if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) {
9934c8c0 653 blk_start_request(rq);
3755100d 654 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
1da177e4
LT
655 return 0;
656 }
657
2c26c9e6
PZ
658 if (lun->urq.rq != NULL)
659 return -1;
f4800078 660 if ((cmd = ub_get_cmd(lun)) == NULL)
1da177e4
LT
661 return -1;
662 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
663
9934c8c0 664 blk_start_request(rq);
2c26c9e6
PZ
665
666 urq = &lun->urq;
667 memset(urq, 0, sizeof(struct ub_request));
668 urq->rq = rq;
669
670 /*
671 * get scatterlist from block layer
672 */
541645be 673 sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
2c26c9e6
PZ
674 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
675 if (n_elem < 0) {
b5600339 676 /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
2c26c9e6 677 printk(KERN_INFO "%s: failed request map (%d)\n",
b5600339 678 lun->name, n_elem);
2c26c9e6
PZ
679 goto drop;
680 }
681 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
682 printk(KERN_WARNING "%s: request with %d segments\n",
683 lun->name, n_elem);
684 goto drop;
685 }
686 urq->nsg = n_elem;
2c26c9e6 687
33659ebb 688 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
2c26c9e6 689 ub_cmd_build_packet(sc, lun, cmd, urq);
1da177e4 690 } else {
2c26c9e6 691 ub_cmd_build_block(sc, lun, cmd, urq);
1da177e4 692 }
1da177e4 693 cmd->state = UB_CMDST_INIT;
f4800078 694 cmd->lun = lun;
1da177e4 695 cmd->done = ub_rw_cmd_done;
2c26c9e6 696 cmd->back = urq;
1da177e4
LT
697
698 cmd->tag = sc->tagcnt++;
2c26c9e6
PZ
699 if (ub_submit_scsi(sc, cmd) != 0)
700 goto drop;
701
702 return 0;
1da177e4 703
2c26c9e6
PZ
704drop:
705 ub_put_cmd(lun, cmd);
3755100d 706 ub_end_rq(rq, DID_ERROR << 16);
1da177e4
LT
707 return 0;
708}
709
2c26c9e6
PZ
710static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
711 struct ub_scsi_cmd *cmd, struct ub_request *urq)
1da177e4 712{
2c26c9e6 713 struct request *rq = urq->rq;
a1cf96ef 714 unsigned int block, nblks;
1da177e4
LT
715
716 if (rq_data_dir(rq) == WRITE)
2c26c9e6 717 cmd->dir = UB_DIR_WRITE;
1da177e4 718 else
2c26c9e6 719 cmd->dir = UB_DIR_READ;
1da177e4 720
2c26c9e6
PZ
721 cmd->nsg = urq->nsg;
722 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
1da177e4
LT
723
724 /*
725 * build the command
726 *
e1defc4f 727 * The call to blk_queue_logical_block_size() guarantees that request
1da177e4
LT
728 * is aligned, but it is given in terms of 512 byte units, always.
729 */
83096ebf
TH
730 block = blk_rq_pos(rq) >> lun->capacity.bshift;
731 nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
ba6abf13 732
2c26c9e6 733 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
1da177e4
LT
734 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
735 cmd->cdb[2] = block >> 24;
736 cmd->cdb[3] = block >> 16;
737 cmd->cdb[4] = block >> 8;
738 cmd->cdb[5] = block;
739 cmd->cdb[7] = nblks >> 8;
740 cmd->cdb[8] = nblks;
741 cmd->cdb_len = 10;
742
1011c1b9 743 cmd->len = blk_rq_bytes(rq);
1da177e4
LT
744}
745
2c26c9e6
PZ
746static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
747 struct ub_scsi_cmd *cmd, struct ub_request *urq)
1da177e4 748{
2c26c9e6 749 struct request *rq = urq->rq;
1da177e4 750
b0790410 751 if (blk_rq_bytes(rq) == 0) {
1da177e4
LT
752 cmd->dir = UB_DIR_NONE;
753 } else {
754 if (rq_data_dir(rq) == WRITE)
755 cmd->dir = UB_DIR_WRITE;
756 else
757 cmd->dir = UB_DIR_READ;
758 }
a1cf96ef 759
2c26c9e6
PZ
760 cmd->nsg = urq->nsg;
761 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
a1cf96ef
PZ
762
763 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
764 cmd->cdb_len = rq->cmd_len;
765
b0790410 766 cmd->len = blk_rq_bytes(rq);
2c51ae70
PZ
767
768 /*
769 * To reapply this to every URB is not as incorrect as it looks.
770 * In return, we avoid any complicated tracking calculations.
771 */
772 cmd->timeo = rq->timeout;
1da177e4
LT
773}
774
775static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
776{
f4800078 777 struct ub_lun *lun = cmd->lun;
2c26c9e6
PZ
778 struct ub_request *urq = cmd->back;
779 struct request *rq;
d1ad4ea3 780 unsigned int scsi_status;
1da177e4 781
2c26c9e6
PZ
782 rq = urq->rq;
783
a1cf96ef 784 if (cmd->error == 0) {
33659ebb 785 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
5f49f631
TH
786 if (cmd->act_len >= rq->resid_len)
787 rq->resid_len = 0;
788 else
789 rq->resid_len -= cmd->act_len;
ef45cb62
PZ
790 scsi_status = 0;
791 } else {
792 if (cmd->act_len != cmd->len) {
ef45cb62
PZ
793 scsi_status = SAM_STAT_CHECK_CONDITION;
794 } else {
795 scsi_status = 0;
796 }
ba6abf13 797 }
a1cf96ef 798 } else {
33659ebb 799 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
a1cf96ef
PZ
800 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
801 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
802 rq->sense_len = UB_SENSE_SIZE;
803 if (sc->top_sense[0] != 0)
d1ad4ea3 804 scsi_status = SAM_STAT_CHECK_CONDITION;
a1cf96ef 805 else
d1ad4ea3 806 scsi_status = DID_ERROR << 16;
2c26c9e6 807 } else {
82fe26ba
PZ
808 if (cmd->error == -EIO &&
809 (cmd->key == 0 ||
810 cmd->key == MEDIUM_ERROR ||
811 cmd->key == UNIT_ATTENTION)) {
2c26c9e6
PZ
812 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
813 return;
814 }
d1ad4ea3 815 scsi_status = SAM_STAT_CHECK_CONDITION;
a1cf96ef
PZ
816 }
817 }
ba6abf13 818
2c26c9e6
PZ
819 urq->rq = NULL;
820
f4800078 821 ub_put_cmd(lun, cmd);
3755100d 822 ub_end_rq(rq, scsi_status);
ba6abf13 823 blk_start_queue(lun->disk->queue);
1da177e4
LT
824}
825
3755100d 826static void ub_end_rq(struct request *rq, unsigned int scsi_status)
1da177e4 827{
7d699baf 828 int error;
d1ad4ea3
PZ
829
830 if (scsi_status == 0) {
7d699baf 831 error = 0;
d1ad4ea3 832 } else {
7d699baf 833 error = -EIO;
d1ad4ea3
PZ
834 rq->errors = scsi_status;
835 }
3755100d 836 __blk_end_request_all(rq, error);
1da177e4
LT
837}
838
2c26c9e6
PZ
839static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
840 struct ub_request *urq, struct ub_scsi_cmd *cmd)
841{
842
843 if (atomic_read(&sc->poison))
844 return -ENXIO;
845
2c2e4a2e 846 ub_reset_enter(sc, urq->current_try);
2c26c9e6
PZ
847
848 if (urq->current_try >= 3)
849 return -EIO;
850 urq->current_try++;
b5600339
PZ
851
852 /* Remove this if anyone complains of flooding. */
853 printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
2c26c9e6
PZ
854 "[sense %x %02x %02x] retry %d\n",
855 sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
856 cmd->key, cmd->asc, cmd->ascq, urq->current_try);
857
858 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
859 ub_cmd_build_block(sc, lun, cmd, urq);
860
861 cmd->state = UB_CMDST_INIT;
862 cmd->lun = lun;
863 cmd->done = ub_rw_cmd_done;
864 cmd->back = urq;
865
866 cmd->tag = sc->tagcnt++;
867
868#if 0 /* Wasteful */
869 return ub_submit_scsi(sc, cmd);
870#else
871 ub_cmdq_add(sc, cmd);
872 return 0;
873#endif
874}
875
1da177e4
LT
876/*
877 * Submit a regular SCSI operation (not an auto-sense).
878 *
879 * The Iron Law of Good Submit Routine is:
880 * Zero return - callback is done, Nonzero return - callback is not done.
881 * No exceptions.
882 *
883 * Host is assumed locked.
1da177e4
LT
884 */
885static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
886{
887
888 if (cmd->state != UB_CMDST_INIT ||
889 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
890 return -EINVAL;
891 }
892
893 ub_cmdq_add(sc, cmd);
894 /*
895 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
896 * safer to jump to a tasklet, in case upper layers do something silly.
897 */
898 tasklet_schedule(&sc->tasklet);
899 return 0;
900}
901
902/*
903 * Submit the first URB for the queued command.
904 * This function does not deal with queueing in any way.
905 */
906static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
907{
908 struct bulk_cb_wrap *bcb;
909 int rc;
910
911 bcb = &sc->work_bcb;
912
913 /*
914 * ``If the allocation length is eighteen or greater, and a device
915 * server returns less than eithteen bytes of data, the application
916 * client should assume that the bytes not transferred would have been
917 * zeroes had the device server returned those bytes.''
918 *
919 * We zero sense for all commands so that when a packet request
920 * fails it does not return a stale sense.
921 */
922 memset(&sc->top_sense, 0, UB_SENSE_SIZE);
923
924 /* set up the command wrapper */
925 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
926 bcb->Tag = cmd->tag; /* Endianness is not important */
927 bcb->DataTransferLength = cpu_to_le32(cmd->len);
928 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
f4800078 929 bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
1da177e4
LT
930 bcb->Length = cmd->cdb_len;
931
932 /* copy the command payload */
933 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
934
935 UB_INIT_COMPLETION(sc->work_done);
936
937 sc->last_pipe = sc->send_bulk_pipe;
938 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
939 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
1da177e4 940
1da177e4
LT
941 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
942 /* XXX Clear stalls */
1da177e4
LT
943 ub_complete(&sc->work_done);
944 return rc;
945 }
946
947 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
948 add_timer(&sc->work_timer);
949
950 cmd->state = UB_CMDST_CMD;
1da177e4
LT
951 return 0;
952}
953
954/*
955 * Timeout handler.
956 */
957static void ub_urb_timeout(unsigned long arg)
958{
959 struct ub_dev *sc = (struct ub_dev *) arg;
960 unsigned long flags;
961
65b4fe55 962 spin_lock_irqsave(sc->lock, flags);
b31f821c
PZ
963 if (!ub_is_completed(&sc->work_done))
964 usb_unlink_urb(&sc->work_urb);
65b4fe55 965 spin_unlock_irqrestore(sc->lock, flags);
1da177e4
LT
966}
967
968/*
969 * Completion routine for the work URB.
970 *
971 * This can be called directly from usb_submit_urb (while we have
972 * the sc->lock taken) and from an interrupt (while we do NOT have
973 * the sc->lock taken). Therefore, bounce this off to a tasklet.
974 */
7d12e780 975static void ub_urb_complete(struct urb *urb)
1da177e4
LT
976{
977 struct ub_dev *sc = urb->context;
978
979 ub_complete(&sc->work_done);
980 tasklet_schedule(&sc->tasklet);
981}
982
983static void ub_scsi_action(unsigned long _dev)
984{
985 struct ub_dev *sc = (struct ub_dev *) _dev;
986 unsigned long flags;
987
65b4fe55 988 spin_lock_irqsave(sc->lock, flags);
1da177e4 989 ub_scsi_dispatch(sc);
65b4fe55 990 spin_unlock_irqrestore(sc->lock, flags);
1da177e4
LT
991}
992
993static void ub_scsi_dispatch(struct ub_dev *sc)
994{
995 struct ub_scsi_cmd *cmd;
996 int rc;
997
2c26c9e6 998 while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
1da177e4
LT
999 if (cmd->state == UB_CMDST_DONE) {
1000 ub_cmdq_pop(sc);
1001 (*cmd->done)(sc, cmd);
1002 } else if (cmd->state == UB_CMDST_INIT) {
1da177e4
LT
1003 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
1004 break;
1005 cmd->error = rc;
1006 cmd->state = UB_CMDST_DONE;
1da177e4
LT
1007 } else {
1008 if (!ub_is_completed(&sc->work_done))
1009 break;
b31f821c 1010 del_timer(&sc->work_timer);
1da177e4
LT
1011 ub_scsi_urb_compl(sc, cmd);
1012 }
1013 }
1014}
1015
1016static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1017{
1018 struct urb *urb = &sc->work_urb;
1019 struct bulk_cs_wrap *bcs;
3444b26a 1020 int endp;
2c26c9e6 1021 int len;
1da177e4
LT
1022 int rc;
1023
1024 if (atomic_read(&sc->poison)) {
2c26c9e6
PZ
1025 ub_state_done(sc, cmd, -ENODEV);
1026 return;
1da177e4
LT
1027 }
1028
3444b26a
DV
1029 endp = usb_pipeendpoint(sc->last_pipe);
1030 if (usb_pipein(sc->last_pipe))
1031 endp |= USB_DIR_IN;
1032
1da177e4
LT
1033 if (cmd->state == UB_CMDST_CLEAR) {
1034 if (urb->status == -EPIPE) {
1035 /*
1036 * STALL while clearning STALL.
1037 * The control pipe clears itself - nothing to do.
1da177e4 1038 */
f4800078
PZ
1039 printk(KERN_NOTICE "%s: stall on control pipe\n",
1040 sc->name);
1da177e4
LT
1041 goto Bad_End;
1042 }
1043
1044 /*
1045 * We ignore the result for the halt clear.
1046 */
1047
3444b26a 1048 usb_reset_endpoint(sc->dev, endp);
1da177e4
LT
1049
1050 ub_state_sense(sc, cmd);
1051
1052 } else if (cmd->state == UB_CMDST_CLR2STS) {
1053 if (urb->status == -EPIPE) {
f4800078
PZ
1054 printk(KERN_NOTICE "%s: stall on control pipe\n",
1055 sc->name);
1da177e4
LT
1056 goto Bad_End;
1057 }
1058
1059 /*
1060 * We ignore the result for the halt clear.
1061 */
1062
3444b26a 1063 usb_reset_endpoint(sc->dev, endp);
1da177e4
LT
1064
1065 ub_state_stat(sc, cmd);
1066
1872bceb
PZ
1067 } else if (cmd->state == UB_CMDST_CLRRS) {
1068 if (urb->status == -EPIPE) {
1872bceb
PZ
1069 printk(KERN_NOTICE "%s: stall on control pipe\n",
1070 sc->name);
1071 goto Bad_End;
1072 }
1073
1074 /*
1075 * We ignore the result for the halt clear.
1076 */
1077
3444b26a 1078 usb_reset_endpoint(sc->dev, endp);
1872bceb
PZ
1079
1080 ub_state_stat_counted(sc, cmd);
1081
1da177e4 1082 } else if (cmd->state == UB_CMDST_CMD) {
2c26c9e6
PZ
1083 switch (urb->status) {
1084 case 0:
1085 break;
1086 case -EOVERFLOW:
1087 goto Bad_End;
1088 case -EPIPE:
1da177e4
LT
1089 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1090 if (rc != 0) {
1091 printk(KERN_NOTICE "%s: "
f4800078
PZ
1092 "unable to submit clear (%d)\n",
1093 sc->name, rc);
1da177e4
LT
1094 /*
1095 * This is typically ENOMEM or some other such shit.
1096 * Retrying is pointless. Just do Bad End on it...
1097 */
2c26c9e6
PZ
1098 ub_state_done(sc, cmd, rc);
1099 return;
1da177e4
LT
1100 }
1101 cmd->state = UB_CMDST_CLEAR;
1da177e4 1102 return;
2c26c9e6
PZ
1103 case -ESHUTDOWN: /* unplug */
1104 case -EILSEQ: /* unplug timeout on uhci */
1105 ub_state_done(sc, cmd, -ENODEV);
1106 return;
1107 default:
1da177e4
LT
1108 goto Bad_End;
1109 }
1110 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1da177e4
LT
1111 goto Bad_End;
1112 }
1113
a1cf96ef 1114 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1da177e4
LT
1115 ub_state_stat(sc, cmd);
1116 return;
1117 }
1118
a1cf96ef
PZ
1119 // udelay(125); // usb-storage has this
1120 ub_data_start(sc, cmd);
1da177e4
LT
1121
1122 } else if (cmd->state == UB_CMDST_DATA) {
1123 if (urb->status == -EPIPE) {
1124 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1125 if (rc != 0) {
1126 printk(KERN_NOTICE "%s: "
f4800078
PZ
1127 "unable to submit clear (%d)\n",
1128 sc->name, rc);
2c26c9e6
PZ
1129 ub_state_done(sc, cmd, rc);
1130 return;
1da177e4
LT
1131 }
1132 cmd->state = UB_CMDST_CLR2STS;
1da177e4
LT
1133 return;
1134 }
1135 if (urb->status == -EOVERFLOW) {
1136 /*
1137 * A babble? Failure, but we must transfer CSW now.
1138 */
1139 cmd->error = -EOVERFLOW; /* A cheap trick... */
a1cf96ef
PZ
1140 ub_state_stat(sc, cmd);
1141 return;
1da177e4 1142 }
2c26c9e6
PZ
1143
1144 if (cmd->dir == UB_DIR_WRITE) {
1145 /*
1146 * Do not continue writes in case of a failure.
1147 * Doing so would cause sectors to be mixed up,
1148 * which is worse than sectors lost.
1149 *
1150 * We must try to read the CSW, or many devices
1151 * get confused.
1152 */
1153 len = urb->actual_length;
1154 if (urb->status != 0 ||
1155 len != cmd->sgv[cmd->current_sg].length) {
1156 cmd->act_len += len;
2c26c9e6
PZ
1157
1158 cmd->error = -EIO;
1159 ub_state_stat(sc, cmd);
1160 return;
1161 }
1162
1163 } else {
1164 /*
1165 * If an error occurs on read, we record it, and
1166 * continue to fetch data in order to avoid bubble.
1167 *
1168 * As a small shortcut, we stop if we detect that
1169 * a CSW mixed into data.
1170 */
1171 if (urb->status != 0)
1172 cmd->error = -EIO;
1173
1174 len = urb->actual_length;
1175 if (urb->status != 0 ||
1176 len != cmd->sgv[cmd->current_sg].length) {
1177 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1178 goto Bad_End;
1179 }
1180 }
1da177e4 1181
a1cf96ef 1182 cmd->act_len += urb->actual_length;
1da177e4 1183
a1cf96ef
PZ
1184 if (++cmd->current_sg < cmd->nsg) {
1185 ub_data_start(sc, cmd);
1186 return;
1187 }
1da177e4
LT
1188 ub_state_stat(sc, cmd);
1189
1190 } else if (cmd->state == UB_CMDST_STAT) {
1191 if (urb->status == -EPIPE) {
1192 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1193 if (rc != 0) {
1194 printk(KERN_NOTICE "%s: "
f4800078
PZ
1195 "unable to submit clear (%d)\n",
1196 sc->name, rc);
2c26c9e6
PZ
1197 ub_state_done(sc, cmd, rc);
1198 return;
1da177e4 1199 }
1872bceb
PZ
1200
1201 /*
1202 * Having a stall when getting CSW is an error, so
1203 * make sure uppper levels are not oblivious to it.
1204 */
1205 cmd->error = -EIO; /* A cheap trick... */
1206
1207 cmd->state = UB_CMDST_CLRRS;
1da177e4
LT
1208 return;
1209 }
2c26c9e6
PZ
1210
1211 /* Catch everything, including -EOVERFLOW and other nasties. */
1da177e4
LT
1212 if (urb->status != 0)
1213 goto Bad_End;
1214
1215 if (urb->actual_length == 0) {
1872bceb 1216 ub_state_stat_counted(sc, cmd);
1da177e4
LT
1217 return;
1218 }
1219
1220 /*
1221 * Check the returned Bulk protocol status.
1872bceb 1222 * The status block has to be validated first.
1da177e4
LT
1223 */
1224
1225 bcs = &sc->work_bcs;
1872bceb
PZ
1226
1227 if (sc->signature == cpu_to_le32(0)) {
1da177e4 1228 /*
1872bceb
PZ
1229 * This is the first reply, so do not perform the check.
1230 * Instead, remember the signature the device uses
1231 * for future checks. But do not allow a nul.
1da177e4 1232 */
1872bceb
PZ
1233 sc->signature = bcs->Signature;
1234 if (sc->signature == cpu_to_le32(0)) {
1235 ub_state_stat_counted(sc, cmd);
1236 return;
1237 }
1238 } else {
1239 if (bcs->Signature != sc->signature) {
1240 ub_state_stat_counted(sc, cmd);
1241 return;
1242 }
1da177e4 1243 }
1da177e4
LT
1244
1245 if (bcs->Tag != cmd->tag) {
1246 /*
1247 * This usually happens when we disagree with the
1248 * device's microcode about something. For instance,
1249 * a few of them throw this after timeouts. They buffer
1250 * commands and reply at commands we timed out before.
1251 * Without flushing these replies we loop forever.
1252 */
1872bceb 1253 ub_state_stat_counted(sc, cmd);
1da177e4
LT
1254 return;
1255 }
1256
0da13c8c
PZ
1257 if (!sc->bad_resid) {
1258 len = le32_to_cpu(bcs->Residue);
1259 if (len != cmd->len - cmd->act_len) {
1260 /*
1261 * Only start ignoring if this cmd ended well.
1262 */
1263 if (cmd->len == cmd->act_len) {
1264 printk(KERN_NOTICE "%s: "
1265 "bad residual %d of %d, ignoring\n",
1266 sc->name, len, cmd->len);
1267 sc->bad_resid = 1;
1268 }
1269 }
1872bceb
PZ
1270 }
1271
1da177e4
LT
1272 switch (bcs->Status) {
1273 case US_BULK_STAT_OK:
1274 break;
1275 case US_BULK_STAT_FAIL:
1276 ub_state_sense(sc, cmd);
1277 return;
1278 case US_BULK_STAT_PHASE:
1da177e4
LT
1279 goto Bad_End;
1280 default:
1281 printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1282 sc->name, bcs->Status);
2c26c9e6
PZ
1283 ub_state_done(sc, cmd, -EINVAL);
1284 return;
1da177e4
LT
1285 }
1286
1287 /* Not zeroing error to preserve a babble indicator */
1872bceb
PZ
1288 if (cmd->error != 0) {
1289 ub_state_sense(sc, cmd);
1290 return;
1291 }
1da177e4 1292 cmd->state = UB_CMDST_DONE;
1da177e4
LT
1293 ub_cmdq_pop(sc);
1294 (*cmd->done)(sc, cmd);
1295
1296 } else if (cmd->state == UB_CMDST_SENSE) {
1297 ub_state_done(sc, cmd, -EIO);
1298
1299 } else {
9029b174 1300 printk(KERN_WARNING "%s: wrong command state %d\n",
f4800078 1301 sc->name, cmd->state);
2c26c9e6
PZ
1302 ub_state_done(sc, cmd, -EINVAL);
1303 return;
1da177e4
LT
1304 }
1305 return;
1306
1307Bad_End: /* Little Excel is dead */
1308 ub_state_done(sc, cmd, -EIO);
1309}
1310
a1cf96ef
PZ
1311/*
1312 * Factorization helper for the command state machine:
1313 * Initiate a data segment transfer.
1314 */
1315static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1316{
1317 struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1318 int pipe;
1319 int rc;
1320
1321 UB_INIT_COMPLETION(sc->work_done);
1322
1323 if (cmd->dir == UB_DIR_READ)
1324 pipe = sc->recv_bulk_pipe;
1325 else
1326 pipe = sc->send_bulk_pipe;
1327 sc->last_pipe = pipe;
45711f1a
JA
1328 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
1329 sg->length, ub_urb_complete, sc);
a1cf96ef
PZ
1330
1331 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1332 /* XXX Clear stalls */
a1cf96ef
PZ
1333 ub_complete(&sc->work_done);
1334 ub_state_done(sc, cmd, rc);
1335 return;
1336 }
1337
2c51ae70
PZ
1338 if (cmd->timeo)
1339 sc->work_timer.expires = jiffies + cmd->timeo;
1340 else
1341 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
a1cf96ef
PZ
1342 add_timer(&sc->work_timer);
1343
1344 cmd->state = UB_CMDST_DATA;
a1cf96ef
PZ
1345}
1346
1da177e4
LT
1347/*
1348 * Factorization helper for the command state machine:
1349 * Finish the command.
1350 */
1351static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1352{
1353
1354 cmd->error = rc;
1355 cmd->state = UB_CMDST_DONE;
1da177e4
LT
1356 ub_cmdq_pop(sc);
1357 (*cmd->done)(sc, cmd);
1358}
1359
1360/*
1361 * Factorization helper for the command state machine:
1362 * Submit a CSW read.
1363 */
1872bceb 1364static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1da177e4
LT
1365{
1366 int rc;
1367
1368 UB_INIT_COMPLETION(sc->work_done);
1369
1370 sc->last_pipe = sc->recv_bulk_pipe;
1371 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1372 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1da177e4
LT
1373
1374 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1375 /* XXX Clear stalls */
1da177e4
LT
1376 ub_complete(&sc->work_done);
1377 ub_state_done(sc, cmd, rc);
1872bceb 1378 return -1;
1da177e4
LT
1379 }
1380
2c51ae70
PZ
1381 if (cmd->timeo)
1382 sc->work_timer.expires = jiffies + cmd->timeo;
1383 else
1384 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1da177e4 1385 add_timer(&sc->work_timer);
1872bceb 1386 return 0;
1da177e4
LT
1387}
1388
1389/*
1390 * Factorization helper for the command state machine:
1391 * Submit a CSW read and go to STAT state.
1392 */
1393static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1394{
1872bceb
PZ
1395
1396 if (__ub_state_stat(sc, cmd) != 0)
1397 return;
1da177e4
LT
1398
1399 cmd->stat_count = 0;
1400 cmd->state = UB_CMDST_STAT;
1872bceb
PZ
1401}
1402
1403/*
1404 * Factorization helper for the command state machine:
1405 * Submit a CSW read and go to STAT state with counter (along [C] path).
1406 */
1407static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1408{
1409
1410 if (++cmd->stat_count >= 4) {
1411 ub_state_sense(sc, cmd);
1412 return;
1413 }
1414
1415 if (__ub_state_stat(sc, cmd) != 0)
1416 return;
1417
1418 cmd->state = UB_CMDST_STAT;
1da177e4
LT
1419}
1420
1421/*
1422 * Factorization helper for the command state machine:
1423 * Submit a REQUEST SENSE and go to SENSE state.
1424 */
1425static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1426{
1427 struct ub_scsi_cmd *scmd;
a1cf96ef 1428 struct scatterlist *sg;
1da177e4
LT
1429 int rc;
1430
1431 if (cmd->cdb[0] == REQUEST_SENSE) {
1432 rc = -EPIPE;
1433 goto error;
1434 }
1435
1436 scmd = &sc->top_rqs_cmd;
a1cf96ef 1437 memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1da177e4
LT
1438 scmd->cdb[0] = REQUEST_SENSE;
1439 scmd->cdb[4] = UB_SENSE_SIZE;
1440 scmd->cdb_len = 6;
1441 scmd->dir = UB_DIR_READ;
1442 scmd->state = UB_CMDST_INIT;
a1cf96ef
PZ
1443 scmd->nsg = 1;
1444 sg = &scmd->sgv[0];
4f33a9d9 1445 sg_init_table(sg, UB_MAX_REQ_SG);
642f1490
JA
1446 sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
1447 (unsigned long)sc->top_sense & (PAGE_SIZE-1));
1da177e4 1448 scmd->len = UB_SENSE_SIZE;
f4800078 1449 scmd->lun = cmd->lun;
1da177e4
LT
1450 scmd->done = ub_top_sense_done;
1451 scmd->back = cmd;
1452
1453 scmd->tag = sc->tagcnt++;
1454
1455 cmd->state = UB_CMDST_SENSE;
1da177e4
LT
1456
1457 ub_cmdq_insert(sc, scmd);
1458 return;
1459
1460error:
1461 ub_state_done(sc, cmd, rc);
1462}
1463
1464/*
1465 * A helper for the command's state machine:
1466 * Submit a stall clear.
1467 */
1468static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1469 int stalled_pipe)
1470{
1471 int endp;
1472 struct usb_ctrlrequest *cr;
1473 int rc;
1474
1475 endp = usb_pipeendpoint(stalled_pipe);
1476 if (usb_pipein (stalled_pipe))
1477 endp |= USB_DIR_IN;
1478
1479 cr = &sc->work_cr;
1480 cr->bRequestType = USB_RECIP_ENDPOINT;
1481 cr->bRequest = USB_REQ_CLEAR_FEATURE;
1482 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1483 cr->wIndex = cpu_to_le16(endp);
1484 cr->wLength = cpu_to_le16(0);
1485
1486 UB_INIT_COMPLETION(sc->work_done);
1487
1488 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1489 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1da177e4
LT
1490
1491 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1492 ub_complete(&sc->work_done);
1493 return rc;
1494 }
1495
1496 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1497 add_timer(&sc->work_timer);
1498 return 0;
1499}
1500
1501/*
1502 */
1503static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1504{
a1cf96ef 1505 unsigned char *sense = sc->top_sense;
1da177e4
LT
1506 struct ub_scsi_cmd *cmd;
1507
1da177e4
LT
1508 /*
1509 * Find the command which triggered the unit attention or a check,
1510 * save the sense into it, and advance its state machine.
1511 */
1512 if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1513 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1514 return;
1515 }
1516 if (cmd != scmd->back) {
1517 printk(KERN_WARNING "%s: "
f4800078
PZ
1518 "sense done for wrong command 0x%x\n",
1519 sc->name, cmd->tag);
1da177e4
LT
1520 return;
1521 }
1522 if (cmd->state != UB_CMDST_SENSE) {
9029b174 1523 printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
f4800078 1524 sc->name, cmd->state);
1da177e4
LT
1525 return;
1526 }
1527
952ba222
PZ
1528 /*
1529 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1530 */
1da177e4
LT
1531 cmd->key = sense[2] & 0x0F;
1532 cmd->asc = sense[12];
1533 cmd->ascq = sense[13];
1534
1535 ub_scsi_urb_compl(sc, cmd);
1536}
1537
2c26c9e6
PZ
1538/*
1539 * Reset management
1540 */
1541
2c2e4a2e 1542static void ub_reset_enter(struct ub_dev *sc, int try)
2c26c9e6
PZ
1543{
1544
1545 if (sc->reset) {
1546 /* This happens often on multi-LUN devices. */
1547 return;
1548 }
2c2e4a2e 1549 sc->reset = try + 1;
2c26c9e6
PZ
1550
1551#if 0 /* Not needed because the disconnect waits for us. */
1552 unsigned long flags;
1553 spin_lock_irqsave(&ub_lock, flags);
1554 sc->openc++;
1555 spin_unlock_irqrestore(&ub_lock, flags);
1556#endif
1557
1558#if 0 /* We let them stop themselves. */
2c26c9e6 1559 struct ub_lun *lun;
a69228de 1560 list_for_each_entry(lun, &sc->luns, link) {
2c26c9e6
PZ
1561 blk_stop_queue(lun->disk->queue);
1562 }
1563#endif
1564
1565 schedule_work(&sc->reset_work);
1566}
1567
c4028958 1568static void ub_reset_task(struct work_struct *work)
2c26c9e6 1569{
c4028958 1570 struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
2c26c9e6 1571 unsigned long flags;
2c26c9e6 1572 struct ub_lun *lun;
011b15df 1573 int rc;
2c26c9e6
PZ
1574
1575 if (!sc->reset) {
1576 printk(KERN_WARNING "%s: Running reset unrequested\n",
1577 sc->name);
1578 return;
1579 }
1580
1581 if (atomic_read(&sc->poison)) {
b5600339 1582 ;
2c2e4a2e
PZ
1583 } else if ((sc->reset & 1) == 0) {
1584 ub_sync_reset(sc);
1585 msleep(700); /* usb-storage sleeps 6s (!) */
1586 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1587 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2c26c9e6 1588 } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
b5600339 1589 ;
2c26c9e6 1590 } else {
011b15df
AS
1591 rc = usb_lock_device_for_reset(sc->dev, sc->intf);
1592 if (rc < 0) {
2c26c9e6
PZ
1593 printk(KERN_NOTICE
1594 "%s: usb_lock_device_for_reset failed (%d)\n",
011b15df 1595 sc->name, rc);
2c26c9e6
PZ
1596 } else {
1597 rc = usb_reset_device(sc->dev);
1598 if (rc < 0) {
1599 printk(KERN_NOTICE "%s: "
1600 "usb_lock_device_for_reset failed (%d)\n",
1601 sc->name, rc);
1602 }
011b15df 1603 usb_unlock_device(sc->dev);
2c26c9e6
PZ
1604 }
1605 }
1606
1607 /*
1608 * In theory, no commands can be running while reset is active,
1609 * so nobody can ask for another reset, and so we do not need any
1610 * queues of resets or anything. We do need a spinlock though,
1611 * to interact with block layer.
1612 */
65b4fe55 1613 spin_lock_irqsave(sc->lock, flags);
2c26c9e6
PZ
1614 sc->reset = 0;
1615 tasklet_schedule(&sc->tasklet);
a69228de 1616 list_for_each_entry(lun, &sc->luns, link) {
2c26c9e6
PZ
1617 blk_start_queue(lun->disk->queue);
1618 }
1619 wake_up(&sc->reset_wait);
65b4fe55 1620 spin_unlock_irqrestore(sc->lock, flags);
2c26c9e6
PZ
1621}
1622
d73b7aff
PZ
1623/*
1624 * XXX Reset brackets are too much hassle to implement, so just stub them
1625 * in order to prevent forced unbinding (which deadlocks solid when our
1626 * ->disconnect method waits for the reset to complete and this kills keventd).
1627 *
1628 * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device,
1629 * or else the post_reset is invoked, and restats I/O on a locked device.
1630 */
1631static int ub_pre_reset(struct usb_interface *iface) {
1632 return 0;
1633}
1634
1635static int ub_post_reset(struct usb_interface *iface) {
1636 return 0;
1637}
1638
1da177e4
LT
1639/*
1640 * This is called from a process context.
1641 */
f4800078 1642static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1da177e4
LT
1643{
1644
f4800078 1645 lun->readonly = 0; /* XXX Query this from the device */
1da177e4 1646
f4800078
PZ
1647 lun->capacity.nsec = 0;
1648 lun->capacity.bsize = 512;
1649 lun->capacity.bshift = 0;
1da177e4 1650
f4800078 1651 if (ub_sync_tur(sc, lun) != 0)
1da177e4 1652 return; /* Not ready */
f4800078 1653 lun->changed = 0;
1da177e4 1654
f4800078 1655 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1da177e4
LT
1656 /*
1657 * The retry here means something is wrong, either with the
1658 * device, with the transport, or with our code.
1659 * We keep this because sd.c has retries for capacity.
1660 */
f4800078
PZ
1661 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1662 lun->capacity.nsec = 0;
1663 lun->capacity.bsize = 512;
1664 lun->capacity.bshift = 0;
1da177e4
LT
1665 }
1666 }
1667}
1668
1669/*
1670 * The open funcion.
1671 * This is mostly needed to keep refcounting, but also to support
1672 * media checks on removable media drives.
1673 */
4099a966 1674static int ub_bd_open(struct block_device *bdev, fmode_t mode)
1da177e4 1675{
4099a966 1676 struct ub_lun *lun = bdev->bd_disk->private_data;
41fea55e 1677 struct ub_dev *sc = lun->udev;
1da177e4
LT
1678 unsigned long flags;
1679 int rc;
1680
1da177e4
LT
1681 spin_lock_irqsave(&ub_lock, flags);
1682 if (atomic_read(&sc->poison)) {
1683 spin_unlock_irqrestore(&ub_lock, flags);
1684 return -ENXIO;
1685 }
1686 sc->openc++;
1687 spin_unlock_irqrestore(&ub_lock, flags);
1688
f4800078 1689 if (lun->removable || lun->readonly)
4099a966 1690 check_disk_change(bdev);
1da177e4
LT
1691
1692 /*
1693 * The sd.c considers ->media_present and ->changed not equivalent,
1694 * under some pretty murky conditions (a failure of READ CAPACITY).
1695 * We may need it one day.
1696 */
4099a966 1697 if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
1da177e4
LT
1698 rc = -ENOMEDIUM;
1699 goto err_open;
1700 }
1701
4099a966 1702 if (lun->readonly && (mode & FMODE_WRITE)) {
1da177e4
LT
1703 rc = -EROFS;
1704 goto err_open;
1705 }
1706
1707 return 0;
1708
1709err_open:
1710 ub_put(sc);
1711 return rc;
1712}
1713
6e9624b8
AB
1714static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode)
1715{
1716 int ret;
1717
1718 lock_kernel();
1719 ret = ub_bd_open(bdev, mode);
1720 unlock_kernel();
1721
1722 return ret;
1723}
1724
1725
1da177e4
LT
1726/*
1727 */
4099a966 1728static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1da177e4 1729{
f4800078
PZ
1730 struct ub_lun *lun = disk->private_data;
1731 struct ub_dev *sc = lun->udev;
1da177e4 1732
6e9624b8 1733 lock_kernel();
1da177e4 1734 ub_put(sc);
6e9624b8
AB
1735 unlock_kernel();
1736
1da177e4
LT
1737 return 0;
1738}
1739
1740/*
1741 * The ioctl interface.
1742 */
4099a966 1743static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1da177e4
LT
1744 unsigned int cmd, unsigned long arg)
1745{
4099a966 1746 struct gendisk *disk = bdev->bd_disk;
1da177e4 1747 void __user *usermem = (void __user *) arg;
8a6cfeb6
AB
1748 int ret;
1749
1750 lock_kernel();
1751 ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1752 unlock_kernel();
1da177e4 1753
8a6cfeb6 1754 return ret;
1da177e4
LT
1755}
1756
1757/*
9029b174 1758 * This is called by check_disk_change if we reported a media change.
1da177e4
LT
1759 * The main onjective here is to discover the features of the media such as
1760 * the capacity, read-only status, etc. USB storage generally does not
1761 * need to be spun up, but if we needed it, this would be the place.
1762 *
1763 * This call can sleep.
1764 *
1765 * The return code is not used.
1766 */
1767static int ub_bd_revalidate(struct gendisk *disk)
1768{
f4800078
PZ
1769 struct ub_lun *lun = disk->private_data;
1770
1771 ub_revalidate(lun->udev, lun);
1da177e4
LT
1772
1773 /* XXX Support sector size switching like in sr.c */
e1defc4f 1774 blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
f4800078
PZ
1775 set_capacity(disk, lun->capacity.nsec);
1776 // set_disk_ro(sdkp->disk, lun->readonly);
1da177e4
LT
1777
1778 return 0;
1779}
1780
1781/*
1782 * The check is called by the block layer to verify if the media
1783 * is still available. It is supposed to be harmless, lightweight and
1784 * non-intrusive in case the media was not changed.
1785 *
1786 * This call can sleep.
1787 *
1788 * The return code is bool!
1789 */
1790static int ub_bd_media_changed(struct gendisk *disk)
1791{
f4800078 1792 struct ub_lun *lun = disk->private_data;
1da177e4 1793
f4800078 1794 if (!lun->removable)
1da177e4
LT
1795 return 0;
1796
1797 /*
1798 * We clean checks always after every command, so this is not
1799 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1800 * the device is actually not ready with operator or software
1801 * intervention required. One dangerous item might be a drive which
1802 * spins itself down, and come the time to write dirty pages, this
1803 * will fail, then block layer discards the data. Since we never
1804 * spin drives up, such devices simply cannot be used with ub anyway.
1805 */
f4800078
PZ
1806 if (ub_sync_tur(lun->udev, lun) != 0) {
1807 lun->changed = 1;
1da177e4
LT
1808 return 1;
1809 }
1810
f4800078 1811 return lun->changed;
1da177e4
LT
1812}
1813
83d5cde4 1814static const struct block_device_operations ub_bd_fops = {
1da177e4 1815 .owner = THIS_MODULE,
6e9624b8 1816 .open = ub_bd_unlocked_open,
4099a966 1817 .release = ub_bd_release,
8a6cfeb6 1818 .ioctl = ub_bd_ioctl,
1da177e4
LT
1819 .media_changed = ub_bd_media_changed,
1820 .revalidate_disk = ub_bd_revalidate,
1821};
1822
1823/*
1824 * Common ->done routine for commands executed synchronously.
1825 */
1826static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1827{
1828 struct completion *cop = cmd->back;
1829 complete(cop);
1830}
1831
1832/*
1833 * Test if the device has a check condition on it, synchronously.
1834 */
f4800078 1835static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1da177e4
LT
1836{
1837 struct ub_scsi_cmd *cmd;
1838 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1839 unsigned long flags;
1840 struct completion compl;
1841 int rc;
1842
1843 init_completion(&compl);
1844
1845 rc = -ENOMEM;
29da7937 1846 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1da177e4 1847 goto err_alloc;
1da177e4
LT
1848
1849 cmd->cdb[0] = TEST_UNIT_READY;
1850 cmd->cdb_len = 6;
1851 cmd->dir = UB_DIR_NONE;
1852 cmd->state = UB_CMDST_INIT;
f4800078 1853 cmd->lun = lun; /* This may be NULL, but that's ok */
1da177e4
LT
1854 cmd->done = ub_probe_done;
1855 cmd->back = &compl;
1856
65b4fe55 1857 spin_lock_irqsave(sc->lock, flags);
1da177e4
LT
1858 cmd->tag = sc->tagcnt++;
1859
1860 rc = ub_submit_scsi(sc, cmd);
65b4fe55 1861 spin_unlock_irqrestore(sc->lock, flags);
1da177e4 1862
b5600339 1863 if (rc != 0)
1da177e4 1864 goto err_submit;
1da177e4
LT
1865
1866 wait_for_completion(&compl);
1867
1868 rc = cmd->error;
1869
1870 if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */
1871 rc = cmd->key;
1872
1873err_submit:
1874 kfree(cmd);
1875err_alloc:
1876 return rc;
1877}
1878
1879/*
1880 * Read the SCSI capacity synchronously (for probing).
1881 */
f4800078
PZ
1882static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1883 struct ub_capacity *ret)
1da177e4
LT
1884{
1885 struct ub_scsi_cmd *cmd;
a1cf96ef 1886 struct scatterlist *sg;
1da177e4
LT
1887 char *p;
1888 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1889 unsigned long flags;
1890 unsigned int bsize, shift;
1891 unsigned long nsec;
1892 struct completion compl;
1893 int rc;
1894
1895 init_completion(&compl);
1896
1897 rc = -ENOMEM;
29da7937 1898 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1da177e4 1899 goto err_alloc;
1da177e4
LT
1900 p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1901
1902 cmd->cdb[0] = 0x25;
1903 cmd->cdb_len = 10;
1904 cmd->dir = UB_DIR_READ;
1905 cmd->state = UB_CMDST_INIT;
a1cf96ef
PZ
1906 cmd->nsg = 1;
1907 sg = &cmd->sgv[0];
4f33a9d9 1908 sg_init_table(sg, UB_MAX_REQ_SG);
642f1490 1909 sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
1da177e4 1910 cmd->len = 8;
f4800078 1911 cmd->lun = lun;
1da177e4
LT
1912 cmd->done = ub_probe_done;
1913 cmd->back = &compl;
1914
65b4fe55 1915 spin_lock_irqsave(sc->lock, flags);
1da177e4
LT
1916 cmd->tag = sc->tagcnt++;
1917
1918 rc = ub_submit_scsi(sc, cmd);
65b4fe55 1919 spin_unlock_irqrestore(sc->lock, flags);
1da177e4 1920
b5600339 1921 if (rc != 0)
1da177e4 1922 goto err_submit;
1da177e4
LT
1923
1924 wait_for_completion(&compl);
1925
1926 if (cmd->error != 0) {
1da177e4
LT
1927 rc = -EIO;
1928 goto err_read;
1929 }
1930 if (cmd->act_len != 8) {
1da177e4
LT
1931 rc = -EIO;
1932 goto err_read;
1933 }
1934
1935 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1936 nsec = be32_to_cpu(*(__be32 *)p) + 1;
1937 bsize = be32_to_cpu(*(__be32 *)(p + 4));
1938 switch (bsize) {
1939 case 512: shift = 0; break;
1940 case 1024: shift = 1; break;
1941 case 2048: shift = 2; break;
1942 case 4096: shift = 3; break;
1943 default:
1da177e4
LT
1944 rc = -EDOM;
1945 goto err_inv_bsize;
1946 }
1947
1948 ret->bsize = bsize;
1949 ret->bshift = shift;
1950 ret->nsec = nsec << shift;
1951 rc = 0;
1952
1953err_inv_bsize:
1954err_read:
1955err_submit:
1956 kfree(cmd);
1957err_alloc:
1958 return rc;
1959}
1960
1961/*
1962 */
7d12e780 1963static void ub_probe_urb_complete(struct urb *urb)
1da177e4
LT
1964{
1965 struct completion *cop = urb->context;
1966 complete(cop);
1967}
1968
1969static void ub_probe_timeout(unsigned long arg)
1970{
1971 struct completion *cop = (struct completion *) arg;
1972 complete(cop);
1973}
1974
2c2e4a2e
PZ
1975/*
1976 * Reset with a Bulk reset.
1977 */
1978static int ub_sync_reset(struct ub_dev *sc)
1979{
1980 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1981 struct usb_ctrlrequest *cr;
1982 struct completion compl;
1983 struct timer_list timer;
1984 int rc;
1985
1986 init_completion(&compl);
1987
1988 cr = &sc->work_cr;
1989 cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1990 cr->bRequest = US_BULK_RESET_REQUEST;
1991 cr->wValue = cpu_to_le16(0);
1992 cr->wIndex = cpu_to_le16(ifnum);
1993 cr->wLength = cpu_to_le16(0);
1994
1995 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1996 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2c2e4a2e
PZ
1997
1998 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1999 printk(KERN_WARNING
2000 "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
2001 return rc;
2002 }
2003
2004 init_timer(&timer);
2005 timer.function = ub_probe_timeout;
2006 timer.data = (unsigned long) &compl;
2007 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2008 add_timer(&timer);
2009
2010 wait_for_completion(&compl);
2011
2012 del_timer_sync(&timer);
2013 usb_kill_urb(&sc->work_urb);
2014
2015 return sc->work_urb.status;
2016}
2017
f4800078
PZ
2018/*
2019 * Get number of LUNs by the way of Bulk GetMaxLUN command.
2020 */
2021static int ub_sync_getmaxlun(struct ub_dev *sc)
2022{
2023 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2024 unsigned char *p;
2025 enum { ALLOC_SIZE = 1 };
2026 struct usb_ctrlrequest *cr;
2027 struct completion compl;
2028 struct timer_list timer;
2029 int nluns;
2030 int rc;
2031
2032 init_completion(&compl);
2033
2034 rc = -ENOMEM;
2035 if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2036 goto err_alloc;
2037 *p = 55;
2038
2039 cr = &sc->work_cr;
2040 cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2041 cr->bRequest = US_BULK_GET_MAX_LUN;
2042 cr->wValue = cpu_to_le16(0);
2043 cr->wIndex = cpu_to_le16(ifnum);
2044 cr->wLength = cpu_to_le16(1);
2045
2046 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2047 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
f4800078 2048
b5600339 2049 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
f4800078 2050 goto err_submit;
f4800078
PZ
2051
2052 init_timer(&timer);
2053 timer.function = ub_probe_timeout;
2054 timer.data = (unsigned long) &compl;
2055 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2056 add_timer(&timer);
2057
2058 wait_for_completion(&compl);
2059
2060 del_timer_sync(&timer);
2061 usb_kill_urb(&sc->work_urb);
2062
b5600339 2063 if ((rc = sc->work_urb.status) < 0)
64bd8453 2064 goto err_io;
64bd8453 2065
f4800078 2066 if (sc->work_urb.actual_length != 1) {
f4800078
PZ
2067 nluns = 0;
2068 } else {
2069 if ((nluns = *p) == 55) {
2070 nluns = 0;
2071 } else {
2072 /* GetMaxLUN returns the maximum LUN number */
2073 nluns += 1;
2074 if (nluns > UB_MAX_LUNS)
2075 nluns = UB_MAX_LUNS;
2076 }
f4800078
PZ
2077 }
2078
2079 kfree(p);
2080 return nluns;
2081
64bd8453 2082err_io:
f4800078
PZ
2083err_submit:
2084 kfree(p);
2085err_alloc:
2086 return rc;
2087}
2088
1da177e4
LT
2089/*
2090 * Clear initial stalls.
2091 */
2092static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2093{
2094 int endp;
2095 struct usb_ctrlrequest *cr;
2096 struct completion compl;
2097 struct timer_list timer;
2098 int rc;
2099
2100 init_completion(&compl);
2101
2102 endp = usb_pipeendpoint(stalled_pipe);
2103 if (usb_pipein (stalled_pipe))
2104 endp |= USB_DIR_IN;
2105
2106 cr = &sc->work_cr;
2107 cr->bRequestType = USB_RECIP_ENDPOINT;
2108 cr->bRequest = USB_REQ_CLEAR_FEATURE;
2109 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2110 cr->wIndex = cpu_to_le16(endp);
2111 cr->wLength = cpu_to_le16(0);
2112
2113 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2114 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1da177e4
LT
2115
2116 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2117 printk(KERN_WARNING
2118 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2119 return rc;
2120 }
2121
2122 init_timer(&timer);
2123 timer.function = ub_probe_timeout;
2124 timer.data = (unsigned long) &compl;
2125 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2126 add_timer(&timer);
2127
2128 wait_for_completion(&compl);
2129
2130 del_timer_sync(&timer);
2131 usb_kill_urb(&sc->work_urb);
2132
3444b26a 2133 usb_reset_endpoint(sc->dev, endp);
1da177e4
LT
2134
2135 return 0;
2136}
2137
2138/*
2139 * Get the pipe settings.
2140 */
2141static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2142 struct usb_interface *intf)
2143{
2144 struct usb_host_interface *altsetting = intf->cur_altsetting;
2145 struct usb_endpoint_descriptor *ep_in = NULL;
2146 struct usb_endpoint_descriptor *ep_out = NULL;
2147 struct usb_endpoint_descriptor *ep;
2148 int i;
2149
2150 /*
2151 * Find the endpoints we need.
2152 * We are expecting a minimum of 2 endpoints - in and out (bulk).
2153 * We will ignore any others.
2154 */
2155 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2156 ep = &altsetting->endpoint[i].desc;
2157
2158 /* Is it a BULK endpoint? */
db5e6df1 2159 if (usb_endpoint_xfer_bulk(ep)) {
1da177e4 2160 /* BULK in or out? */
db5e6df1 2161 if (usb_endpoint_dir_in(ep)) {
643616e6
PZ
2162 if (ep_in == NULL)
2163 ep_in = ep;
2164 } else {
2165 if (ep_out == NULL)
2166 ep_out = ep;
2167 }
1da177e4
LT
2168 }
2169 }
2170
2171 if (ep_in == NULL || ep_out == NULL) {
9029b174 2172 printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
2c26c9e6 2173 return -ENODEV;
1da177e4
LT
2174 }
2175
2176 /* Calculate and store the pipe values */
2177 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2178 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2179 sc->send_bulk_pipe = usb_sndbulkpipe(dev,
db5e6df1 2180 usb_endpoint_num(ep_out));
1da177e4 2181 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
db5e6df1 2182 usb_endpoint_num(ep_in));
1da177e4
LT
2183
2184 return 0;
2185}
2186
2187/*
2188 * Probing is done in the process context, which allows us to cheat
2189 * and not to build a state machine for the discovery.
2190 */
2191static int ub_probe(struct usb_interface *intf,
2192 const struct usb_device_id *dev_id)
2193{
2194 struct ub_dev *sc;
f4800078 2195 int nluns;
1da177e4
LT
2196 int rc;
2197 int i;
2198
a00828e9
PZ
2199 if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2200 return -ENXIO;
2201
1da177e4 2202 rc = -ENOMEM;
29da7937 2203 if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
1da177e4 2204 goto err_core;
65b4fe55 2205 sc->lock = ub_next_lock();
f4800078 2206 INIT_LIST_HEAD(&sc->luns);
1da177e4
LT
2207 usb_init_urb(&sc->work_urb);
2208 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2209 atomic_set(&sc->poison, 0);
c4028958 2210 INIT_WORK(&sc->reset_work, ub_reset_task);
2c26c9e6 2211 init_waitqueue_head(&sc->reset_wait);
1da177e4
LT
2212
2213 init_timer(&sc->work_timer);
2214 sc->work_timer.data = (unsigned long) sc;
2215 sc->work_timer.function = ub_urb_timeout;
2216
2217 ub_init_completion(&sc->work_done);
2218 sc->work_done.done = 1; /* A little yuk, but oh well... */
2219
1da177e4
LT
2220 sc->dev = interface_to_usbdev(intf);
2221 sc->intf = intf;
2222 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
1da177e4
LT
2223 usb_set_intfdata(intf, sc);
2224 usb_get_dev(sc->dev);
77ef6c4d
PZ
2225 /*
2226 * Since we give the interface struct to the block level through
2227 * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2228 * oopses on close after a disconnect (kernels 2.6.16 and up).
2229 */
2230 usb_get_intf(sc->intf);
1da177e4 2231
f4800078
PZ
2232 snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2233 sc->dev->bus->busnum, sc->dev->devnum);
2234
1da177e4
LT
2235 /* XXX Verify that we can handle the device (from descriptors) */
2236
2c26c9e6
PZ
2237 if (ub_get_pipes(sc, sc->dev, intf) != 0)
2238 goto err_dev_desc;
1da177e4 2239
1da177e4
LT
2240 /*
2241 * At this point, all USB initialization is done, do upper layer.
2242 * We really hate halfway initialized structures, so from the
2243 * invariants perspective, this ub_dev is fully constructed at
2244 * this point.
2245 */
2246
2247 /*
2248 * This is needed to clear toggles. It is a problem only if we do
2249 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2250 */
c6c88834 2251#if 0 /* iPod Mini fails if we do this (big white iPod works) */
1da177e4
LT
2252 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2253 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
c6c88834 2254#endif
1da177e4
LT
2255
2256 /*
2257 * The way this is used by the startup code is a little specific.
2258 * A SCSI check causes a USB stall. Our common case code sees it
2259 * and clears the check, after which the device is ready for use.
2260 * But if a check was not present, any command other than
2261 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2262 *
2263 * If we neglect to clear the SCSI check, the first real command fails
2264 * (which is the capacity readout). We clear that and retry, but why
2265 * causing spurious retries for no reason.
2266 *
2267 * Revalidation may start with its own TEST_UNIT_READY, but that one
2268 * has to succeed, so we clear checks with an additional one here.
2269 * In any case it's not our business how revaliadation is implemented.
2270 */
b5600339 2271 for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */
f4800078 2272 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
1da177e4
LT
2273 if (rc != 0x6) break;
2274 msleep(10);
2275 }
2276
f4800078
PZ
2277 nluns = 1;
2278 for (i = 0; i < 3; i++) {
11a223ae 2279 if ((rc = ub_sync_getmaxlun(sc)) < 0)
f4800078 2280 break;
f4800078
PZ
2281 if (rc != 0) {
2282 nluns = rc;
2283 break;
2284 }
9f793d2c 2285 msleep(100);
f4800078 2286 }
1da177e4 2287
f4800078
PZ
2288 for (i = 0; i < nluns; i++) {
2289 ub_probe_lun(sc, i);
2290 }
2291 return 0;
2292
2c26c9e6 2293err_dev_desc:
f4800078 2294 usb_set_intfdata(intf, NULL);
77ef6c4d 2295 usb_put_intf(sc->intf);
f4800078
PZ
2296 usb_put_dev(sc->dev);
2297 kfree(sc);
2298err_core:
2299 return rc;
2300}
2301
2302static int ub_probe_lun(struct ub_dev *sc, int lnum)
2303{
2304 struct ub_lun *lun;
165125e1 2305 struct request_queue *q;
f4800078
PZ
2306 struct gendisk *disk;
2307 int rc;
2308
2309 rc = -ENOMEM;
29da7937 2310 if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
f4800078 2311 goto err_alloc;
f4800078
PZ
2312 lun->num = lnum;
2313
2314 rc = -ENOSR;
2315 if ((lun->id = ub_id_get()) == -1)
2316 goto err_id;
2317
2318 lun->udev = sc;
f4800078
PZ
2319
2320 snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2321 lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2322
2323 lun->removable = 1; /* XXX Query this from the device */
2324 lun->changed = 1; /* ub_revalidate clears only */
f4800078 2325 ub_revalidate(sc, lun);
1da177e4 2326
1da177e4 2327 rc = -ENOMEM;
4fb729f5 2328 if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
1da177e4
LT
2329 goto err_diskalloc;
2330
f4800078 2331 sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
1da177e4 2332 disk->major = UB_MAJOR;
4fb729f5 2333 disk->first_minor = lun->id * UB_PARTS_PER_LUN;
1da177e4 2334 disk->fops = &ub_bd_fops;
f4800078 2335 disk->private_data = lun;
64bd8453 2336 disk->driverfs_dev = &sc->intf->dev;
1da177e4
LT
2337
2338 rc = -ENOMEM;
65b4fe55 2339 if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
1da177e4
LT
2340 goto err_blkqinit;
2341
2342 disk->queue = q;
2343
f4800078 2344 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
8a78362c 2345 blk_queue_max_segments(q, UB_MAX_REQ_SG);
f4800078 2346 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
086fa5ff 2347 blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
e1defc4f 2348 blk_queue_logical_block_size(q, lun->capacity.bsize);
1da177e4 2349
688e9fb1 2350 lun->disk = disk;
f4800078 2351 q->queuedata = lun;
688e9fb1 2352 list_add(&lun->link, &sc->luns);
1da177e4 2353
f4800078
PZ
2354 set_capacity(disk, lun->capacity.nsec);
2355 if (lun->removable)
1da177e4
LT
2356 disk->flags |= GENHD_FL_REMOVABLE;
2357
2358 add_disk(disk);
2359
2360 return 0;
2361
2362err_blkqinit:
2363 put_disk(disk);
2364err_diskalloc:
f4800078 2365 ub_id_put(lun->id);
1da177e4 2366err_id:
f4800078
PZ
2367 kfree(lun);
2368err_alloc:
1da177e4
LT
2369 return rc;
2370}
2371
2372static void ub_disconnect(struct usb_interface *intf)
2373{
2374 struct ub_dev *sc = usb_get_intfdata(intf);
f4800078 2375 struct ub_lun *lun;
1da177e4
LT
2376 unsigned long flags;
2377
2378 /*
2379 * Prevent ub_bd_release from pulling the rug from under us.
2380 * XXX This is starting to look like a kref.
2381 * XXX Why not to take this ref at probe time?
2382 */
2383 spin_lock_irqsave(&ub_lock, flags);
2384 sc->openc++;
2385 spin_unlock_irqrestore(&ub_lock, flags);
2386
2387 /*
9029b174 2388 * Fence stall clearings, operations triggered by unlinkings and so on.
1da177e4
LT
2389 * We do not attempt to unlink any URBs, because we do not trust the
2390 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2391 */
2392 atomic_set(&sc->poison, 1);
2393
2c26c9e6
PZ
2394 /*
2395 * Wait for reset to end, if any.
2396 */
2397 wait_event(sc->reset_wait, !sc->reset);
2398
1da177e4
LT
2399 /*
2400 * Blow away queued commands.
2401 *
2402 * Actually, this never works, because before we get here
2403 * the HCD terminates outstanding URB(s). It causes our
2404 * SCSI command queue to advance, commands fail to submit,
2405 * and the whole queue drains. So, we just use this code to
2406 * print warnings.
2407 */
65b4fe55 2408 spin_lock_irqsave(sc->lock, flags);
1da177e4
LT
2409 {
2410 struct ub_scsi_cmd *cmd;
2411 int cnt = 0;
2c26c9e6 2412 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
1da177e4
LT
2413 cmd->error = -ENOTCONN;
2414 cmd->state = UB_CMDST_DONE;
1da177e4
LT
2415 ub_cmdq_pop(sc);
2416 (*cmd->done)(sc, cmd);
2417 cnt++;
2418 }
2419 if (cnt != 0) {
2420 printk(KERN_WARNING "%s: "
2421 "%d was queued after shutdown\n", sc->name, cnt);
2422 }
2423 }
65b4fe55 2424 spin_unlock_irqrestore(sc->lock, flags);
1da177e4
LT
2425
2426 /*
2427 * Unregister the upper layer.
2428 */
a69228de 2429 list_for_each_entry(lun, &sc->luns, link) {
688e9fb1 2430 del_gendisk(lun->disk);
f4800078
PZ
2431 /*
2432 * I wish I could do:
75ad23bc 2433 * queue_flag_set(QUEUE_FLAG_DEAD, q);
f4800078
PZ
2434 * As it is, we rely on our internal poisoning and let
2435 * the upper levels to spin furiously failing all the I/O.
2436 */
2437 }
1da177e4
LT
2438
2439 /*
1da177e4
LT
2440 * Testing for -EINPROGRESS is always a bug, so we are bending
2441 * the rules a little.
2442 */
65b4fe55 2443 spin_lock_irqsave(sc->lock, flags);
1da177e4
LT
2444 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
2445 printk(KERN_WARNING "%s: "
2446 "URB is active after disconnect\n", sc->name);
2447 }
65b4fe55 2448 spin_unlock_irqrestore(sc->lock, flags);
1da177e4
LT
2449
2450 /*
9029b174 2451 * There is virtually no chance that other CPU runs a timeout so long
1da177e4
LT
2452 * after ub_urb_complete should have called del_timer, but only if HCD
2453 * didn't forget to deliver a callback on unlink.
2454 */
2455 del_timer_sync(&sc->work_timer);
2456
2457 /*
2458 * At this point there must be no commands coming from anyone
2459 * and no URBs left in transit.
2460 */
2461
1da177e4
LT
2462 ub_put(sc);
2463}
2464
2465static struct usb_driver ub_driver = {
1da177e4
LT
2466 .name = "ub",
2467 .probe = ub_probe,
2468 .disconnect = ub_disconnect,
2469 .id_table = ub_usb_ids,
d73b7aff
PZ
2470 .pre_reset = ub_pre_reset,
2471 .post_reset = ub_post_reset,
1da177e4
LT
2472};
2473
2474static int __init ub_init(void)
2475{
2476 int rc;
65b4fe55
PZ
2477 int i;
2478
2479 for (i = 0; i < UB_QLOCK_NUM; i++)
2480 spin_lock_init(&ub_qlockv[i]);
1da177e4 2481
1da177e4
LT
2482 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2483 goto err_regblkdev;
1da177e4
LT
2484
2485 if ((rc = usb_register(&ub_driver)) != 0)
2486 goto err_register;
2487
a00828e9 2488 usb_usual_set_present(USB_US_TYPE_UB);
1da177e4
LT
2489 return 0;
2490
2491err_register:
1da177e4
LT
2492 unregister_blkdev(UB_MAJOR, DRV_NAME);
2493err_regblkdev:
2494 return rc;
2495}
2496
2497static void __exit ub_exit(void)
2498{
2499 usb_deregister(&ub_driver);
2500
1da177e4 2501 unregister_blkdev(UB_MAJOR, DRV_NAME);
a00828e9 2502 usb_usual_clear_present(USB_US_TYPE_UB);
1da177e4
LT
2503}
2504
2505module_init(ub_init);
2506module_exit(ub_exit);
2507
2508MODULE_LICENSE("GPL");