2 * sd.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 * Linux scsi disk driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
8 * Modification history:
9 * - Drew Eckhardt <drew@colorado.edu> original
10 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
11 * outstanding request, and other enhancements.
12 * Support loadable low-level scsi drivers.
13 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
14 * eight major numbers.
15 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
16 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
17 * sd_init and cleanups.
18 * - Alex Davis <letmein@erols.com> Fix problem where partition info
19 * not being read in sd_open. Fix problem where removable media
20 * could be ejected after sd_open.
21 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
22 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
23 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
24 * Support 32k/1M disks.
26 * Logging policy (needs CONFIG_SCSI_LOGGING defined):
27 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
28 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
29 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
30 * - entering other commands: SCSI_LOG_HLQUEUE level 3
31 * Note: when the logging level is set by the user, it must be greater
32 * than the level indicated above to trigger output.
35 #include <linux/module.h>
37 #include <linux/kernel.h>
39 #include <linux/bio.h>
40 #include <linux/genhd.h>
41 #include <linux/hdreg.h>
42 #include <linux/errno.h>
43 #include <linux/idr.h>
44 #include <linux/interrupt.h>
45 #include <linux/init.h>
46 #include <linux/blkdev.h>
47 #include <linux/blkpg.h>
48 #include <linux/delay.h>
49 #include <linux/mutex.h>
50 #include <linux/string_helpers.h>
51 #include <linux/async.h>
52 #include <linux/slab.h>
53 #include <linux/pm_runtime.h>
54 #include <asm/uaccess.h>
55 #include <asm/unaligned.h>
57 #include <scsi/scsi.h>
58 #include <scsi/scsi_cmnd.h>
59 #include <scsi/scsi_dbg.h>
60 #include <scsi/scsi_device.h>
61 #include <scsi/scsi_driver.h>
62 #include <scsi/scsi_eh.h>
63 #include <scsi/scsi_host.h>
64 #include <scsi/scsi_ioctl.h>
65 #include <scsi/scsicam.h>
68 #include "scsi_priv.h"
69 #include "scsi_logging.h"
71 MODULE_AUTHOR("Eric Youngdale");
72 MODULE_DESCRIPTION("SCSI disk (sd) driver");
73 MODULE_LICENSE("GPL");
75 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR
);
76 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR
);
77 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR
);
78 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR
);
79 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR
);
80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR
);
81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR
);
82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR
);
83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR
);
84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR
);
85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR
);
86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR
);
87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR
);
88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR
);
89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR
);
90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR
);
91 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK
);
92 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD
);
93 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC
);
95 #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
101 static void sd_config_discard(struct scsi_disk
*, unsigned int);
102 static void sd_config_write_same(struct scsi_disk
*);
103 static int sd_revalidate_disk(struct gendisk
*);
104 static void sd_unlock_native_capacity(struct gendisk
*disk
);
105 static int sd_probe(struct device
*);
106 static int sd_remove(struct device
*);
107 static void sd_shutdown(struct device
*);
108 static int sd_suspend_system(struct device
*);
109 static int sd_suspend_runtime(struct device
*);
110 static int sd_resume(struct device
*);
111 static void sd_rescan(struct device
*);
112 static int sd_done(struct scsi_cmnd
*);
113 static int sd_eh_action(struct scsi_cmnd
*, int);
114 static void sd_read_capacity(struct scsi_disk
*sdkp
, unsigned char *buffer
);
115 static void scsi_disk_release(struct device
*cdev
);
116 static void sd_print_sense_hdr(struct scsi_disk
*, struct scsi_sense_hdr
*);
117 static void sd_print_result(struct scsi_disk
*, int);
119 static DEFINE_SPINLOCK(sd_index_lock
);
120 static DEFINE_IDA(sd_index_ida
);
122 /* This semaphore is used to mediate the 0->1 reference get in the
123 * face of object destruction (i.e. we can't allow a get on an
124 * object after last put) */
125 static DEFINE_MUTEX(sd_ref_mutex
);
127 static struct kmem_cache
*sd_cdb_cache
;
128 static mempool_t
*sd_cdb_pool
;
130 static const char *sd_cache_types
[] = {
131 "write through", "none", "write back",
132 "write back, no read (daft)"
136 cache_type_store(struct device
*dev
, struct device_attribute
*attr
,
137 const char *buf
, size_t count
)
139 int i
, ct
= -1, rcd
, wce
, sp
;
140 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
141 struct scsi_device
*sdp
= sdkp
->device
;
144 struct scsi_mode_data data
;
145 struct scsi_sense_hdr sshdr
;
146 static const char temp
[] = "temporary ";
149 if (sdp
->type
!= TYPE_DISK
)
150 /* no cache control on RBC devices; theoretically they
151 * can do it, but there's probably so many exceptions
152 * it's not worth the risk */
155 if (strncmp(buf
, temp
, sizeof(temp
) - 1) == 0) {
156 buf
+= sizeof(temp
) - 1;
157 sdkp
->cache_override
= 1;
159 sdkp
->cache_override
= 0;
162 for (i
= 0; i
< ARRAY_SIZE(sd_cache_types
); i
++) {
163 len
= strlen(sd_cache_types
[i
]);
164 if (strncmp(sd_cache_types
[i
], buf
, len
) == 0 &&
172 rcd
= ct
& 0x01 ? 1 : 0;
173 wce
= ct
& 0x02 ? 1 : 0;
175 if (sdkp
->cache_override
) {
181 if (scsi_mode_sense(sdp
, 0x08, 8, buffer
, sizeof(buffer
), SD_TIMEOUT
,
182 SD_MAX_RETRIES
, &data
, NULL
))
184 len
= min_t(size_t, sizeof(buffer
), data
.length
- data
.header_length
-
185 data
.block_descriptor_length
);
186 buffer_data
= buffer
+ data
.header_length
+
187 data
.block_descriptor_length
;
188 buffer_data
[2] &= ~0x05;
189 buffer_data
[2] |= wce
<< 2 | rcd
;
190 sp
= buffer_data
[0] & 0x80 ? 1 : 0;
192 if (scsi_mode_select(sdp
, 1, sp
, 8, buffer_data
, len
, SD_TIMEOUT
,
193 SD_MAX_RETRIES
, &data
, &sshdr
)) {
194 if (scsi_sense_valid(&sshdr
))
195 sd_print_sense_hdr(sdkp
, &sshdr
);
198 revalidate_disk(sdkp
->disk
);
203 manage_start_stop_show(struct device
*dev
, struct device_attribute
*attr
,
206 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
207 struct scsi_device
*sdp
= sdkp
->device
;
209 return snprintf(buf
, 20, "%u\n", sdp
->manage_start_stop
);
213 manage_start_stop_store(struct device
*dev
, struct device_attribute
*attr
,
214 const char *buf
, size_t count
)
216 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
217 struct scsi_device
*sdp
= sdkp
->device
;
219 if (!capable(CAP_SYS_ADMIN
))
222 sdp
->manage_start_stop
= simple_strtoul(buf
, NULL
, 10);
226 static DEVICE_ATTR_RW(manage_start_stop
);
229 allow_restart_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
231 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
233 return snprintf(buf
, 40, "%d\n", sdkp
->device
->allow_restart
);
237 allow_restart_store(struct device
*dev
, struct device_attribute
*attr
,
238 const char *buf
, size_t count
)
240 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
241 struct scsi_device
*sdp
= sdkp
->device
;
243 if (!capable(CAP_SYS_ADMIN
))
246 if (sdp
->type
!= TYPE_DISK
)
249 sdp
->allow_restart
= simple_strtoul(buf
, NULL
, 10);
253 static DEVICE_ATTR_RW(allow_restart
);
256 cache_type_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
258 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
259 int ct
= sdkp
->RCD
+ 2*sdkp
->WCE
;
261 return snprintf(buf
, 40, "%s\n", sd_cache_types
[ct
]);
263 static DEVICE_ATTR_RW(cache_type
);
266 FUA_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
268 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
270 return snprintf(buf
, 20, "%u\n", sdkp
->DPOFUA
);
272 static DEVICE_ATTR_RO(FUA
);
275 protection_type_show(struct device
*dev
, struct device_attribute
*attr
,
278 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
280 return snprintf(buf
, 20, "%u\n", sdkp
->protection_type
);
284 protection_type_store(struct device
*dev
, struct device_attribute
*attr
,
285 const char *buf
, size_t count
)
287 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
291 if (!capable(CAP_SYS_ADMIN
))
294 err
= kstrtouint(buf
, 10, &val
);
299 if (val
>= 0 && val
<= SD_DIF_TYPE3_PROTECTION
)
300 sdkp
->protection_type
= val
;
304 static DEVICE_ATTR_RW(protection_type
);
307 protection_mode_show(struct device
*dev
, struct device_attribute
*attr
,
310 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
311 struct scsi_device
*sdp
= sdkp
->device
;
312 unsigned int dif
, dix
;
314 dif
= scsi_host_dif_capable(sdp
->host
, sdkp
->protection_type
);
315 dix
= scsi_host_dix_capable(sdp
->host
, sdkp
->protection_type
);
317 if (!dix
&& scsi_host_dix_capable(sdp
->host
, SD_DIF_TYPE0_PROTECTION
)) {
323 return snprintf(buf
, 20, "none\n");
325 return snprintf(buf
, 20, "%s%u\n", dix
? "dix" : "dif", dif
);
327 static DEVICE_ATTR_RO(protection_mode
);
330 app_tag_own_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
332 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
334 return snprintf(buf
, 20, "%u\n", sdkp
->ATO
);
336 static DEVICE_ATTR_RO(app_tag_own
);
339 thin_provisioning_show(struct device
*dev
, struct device_attribute
*attr
,
342 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
344 return snprintf(buf
, 20, "%u\n", sdkp
->lbpme
);
346 static DEVICE_ATTR_RO(thin_provisioning
);
348 static const char *lbp_mode
[] = {
349 [SD_LBP_FULL
] = "full",
350 [SD_LBP_UNMAP
] = "unmap",
351 [SD_LBP_WS16
] = "writesame_16",
352 [SD_LBP_WS10
] = "writesame_10",
353 [SD_LBP_ZERO
] = "writesame_zero",
354 [SD_LBP_DISABLE
] = "disabled",
358 provisioning_mode_show(struct device
*dev
, struct device_attribute
*attr
,
361 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
363 return snprintf(buf
, 20, "%s\n", lbp_mode
[sdkp
->provisioning_mode
]);
367 provisioning_mode_store(struct device
*dev
, struct device_attribute
*attr
,
368 const char *buf
, size_t count
)
370 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
371 struct scsi_device
*sdp
= sdkp
->device
;
373 if (!capable(CAP_SYS_ADMIN
))
376 if (sdp
->type
!= TYPE_DISK
)
379 if (!strncmp(buf
, lbp_mode
[SD_LBP_UNMAP
], 20))
380 sd_config_discard(sdkp
, SD_LBP_UNMAP
);
381 else if (!strncmp(buf
, lbp_mode
[SD_LBP_WS16
], 20))
382 sd_config_discard(sdkp
, SD_LBP_WS16
);
383 else if (!strncmp(buf
, lbp_mode
[SD_LBP_WS10
], 20))
384 sd_config_discard(sdkp
, SD_LBP_WS10
);
385 else if (!strncmp(buf
, lbp_mode
[SD_LBP_ZERO
], 20))
386 sd_config_discard(sdkp
, SD_LBP_ZERO
);
387 else if (!strncmp(buf
, lbp_mode
[SD_LBP_DISABLE
], 20))
388 sd_config_discard(sdkp
, SD_LBP_DISABLE
);
394 static DEVICE_ATTR_RW(provisioning_mode
);
397 max_medium_access_timeouts_show(struct device
*dev
,
398 struct device_attribute
*attr
, char *buf
)
400 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
402 return snprintf(buf
, 20, "%u\n", sdkp
->max_medium_access_timeouts
);
406 max_medium_access_timeouts_store(struct device
*dev
,
407 struct device_attribute
*attr
, const char *buf
,
410 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
413 if (!capable(CAP_SYS_ADMIN
))
416 err
= kstrtouint(buf
, 10, &sdkp
->max_medium_access_timeouts
);
418 return err
? err
: count
;
420 static DEVICE_ATTR_RW(max_medium_access_timeouts
);
423 max_write_same_blocks_show(struct device
*dev
, struct device_attribute
*attr
,
426 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
428 return snprintf(buf
, 20, "%u\n", sdkp
->max_ws_blocks
);
432 max_write_same_blocks_store(struct device
*dev
, struct device_attribute
*attr
,
433 const char *buf
, size_t count
)
435 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
436 struct scsi_device
*sdp
= sdkp
->device
;
440 if (!capable(CAP_SYS_ADMIN
))
443 if (sdp
->type
!= TYPE_DISK
)
446 err
= kstrtoul(buf
, 10, &max
);
452 sdp
->no_write_same
= 1;
453 else if (max
<= SD_MAX_WS16_BLOCKS
) {
454 sdp
->no_write_same
= 0;
455 sdkp
->max_ws_blocks
= max
;
458 sd_config_write_same(sdkp
);
462 static DEVICE_ATTR_RW(max_write_same_blocks
);
464 static struct attribute
*sd_disk_attrs
[] = {
465 &dev_attr_cache_type
.attr
,
467 &dev_attr_allow_restart
.attr
,
468 &dev_attr_manage_start_stop
.attr
,
469 &dev_attr_protection_type
.attr
,
470 &dev_attr_protection_mode
.attr
,
471 &dev_attr_app_tag_own
.attr
,
472 &dev_attr_thin_provisioning
.attr
,
473 &dev_attr_provisioning_mode
.attr
,
474 &dev_attr_max_write_same_blocks
.attr
,
475 &dev_attr_max_medium_access_timeouts
.attr
,
478 ATTRIBUTE_GROUPS(sd_disk
);
480 static struct class sd_disk_class
= {
482 .owner
= THIS_MODULE
,
483 .dev_release
= scsi_disk_release
,
484 .dev_groups
= sd_disk_groups
,
487 static const struct dev_pm_ops sd_pm_ops
= {
488 .suspend
= sd_suspend_system
,
490 .poweroff
= sd_suspend_system
,
491 .restore
= sd_resume
,
492 .runtime_suspend
= sd_suspend_runtime
,
493 .runtime_resume
= sd_resume
,
496 static struct scsi_driver sd_template
= {
497 .owner
= THIS_MODULE
,
502 .shutdown
= sd_shutdown
,
507 .eh_action
= sd_eh_action
,
511 * Dummy kobj_map->probe function.
512 * The default ->probe function will call modprobe, which is
513 * pointless as this module is already loaded.
515 static struct kobject
*sd_default_probe(dev_t devt
, int *partno
, void *data
)
521 * Device no to disk mapping:
523 * major disc2 disc p1
524 * |............|.............|....|....| <- dev_t
527 * Inside a major, we have 16k disks, however mapped non-
528 * contiguously. The first 16 disks are for major0, the next
529 * ones with major1, ... Disk 256 is for major0 again, disk 272
531 * As we stay compatible with our numbering scheme, we can reuse
532 * the well-know SCSI majors 8, 65--71, 136--143.
534 static int sd_major(int major_idx
)
538 return SCSI_DISK0_MAJOR
;
540 return SCSI_DISK1_MAJOR
+ major_idx
- 1;
542 return SCSI_DISK8_MAJOR
+ major_idx
- 8;
545 return 0; /* shut up gcc */
549 static struct scsi_disk
*__scsi_disk_get(struct gendisk
*disk
)
551 struct scsi_disk
*sdkp
= NULL
;
553 if (disk
->private_data
) {
554 sdkp
= scsi_disk(disk
);
555 if (scsi_device_get(sdkp
->device
) == 0)
556 get_device(&sdkp
->dev
);
563 static struct scsi_disk
*scsi_disk_get(struct gendisk
*disk
)
565 struct scsi_disk
*sdkp
;
567 mutex_lock(&sd_ref_mutex
);
568 sdkp
= __scsi_disk_get(disk
);
569 mutex_unlock(&sd_ref_mutex
);
573 static struct scsi_disk
*scsi_disk_get_from_dev(struct device
*dev
)
575 struct scsi_disk
*sdkp
;
577 mutex_lock(&sd_ref_mutex
);
578 sdkp
= dev_get_drvdata(dev
);
580 sdkp
= __scsi_disk_get(sdkp
->disk
);
581 mutex_unlock(&sd_ref_mutex
);
585 static void scsi_disk_put(struct scsi_disk
*sdkp
)
587 struct scsi_device
*sdev
= sdkp
->device
;
589 mutex_lock(&sd_ref_mutex
);
590 put_device(&sdkp
->dev
);
591 scsi_device_put(sdev
);
592 mutex_unlock(&sd_ref_mutex
);
595 static void sd_prot_op(struct scsi_cmnd
*scmd
, unsigned int dif
)
597 unsigned int prot_op
= SCSI_PROT_NORMAL
;
598 unsigned int dix
= scsi_prot_sg_count(scmd
);
600 if (scmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
602 prot_op
= SCSI_PROT_READ_PASS
;
603 else if (dif
&& !dix
)
604 prot_op
= SCSI_PROT_READ_STRIP
;
605 else if (!dif
&& dix
)
606 prot_op
= SCSI_PROT_READ_INSERT
;
609 prot_op
= SCSI_PROT_WRITE_PASS
;
610 else if (dif
&& !dix
)
611 prot_op
= SCSI_PROT_WRITE_INSERT
;
612 else if (!dif
&& dix
)
613 prot_op
= SCSI_PROT_WRITE_STRIP
;
616 scsi_set_prot_op(scmd
, prot_op
);
617 scsi_set_prot_type(scmd
, dif
);
620 static void sd_config_discard(struct scsi_disk
*sdkp
, unsigned int mode
)
622 struct request_queue
*q
= sdkp
->disk
->queue
;
623 unsigned int logical_block_size
= sdkp
->device
->sector_size
;
624 unsigned int max_blocks
= 0;
626 q
->limits
.discard_zeroes_data
= sdkp
->lbprz
;
627 q
->limits
.discard_alignment
= sdkp
->unmap_alignment
*
629 q
->limits
.discard_granularity
=
630 max(sdkp
->physical_block_size
,
631 sdkp
->unmap_granularity
* logical_block_size
);
633 sdkp
->provisioning_mode
= mode
;
638 q
->limits
.max_discard_sectors
= 0;
639 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, q
);
643 max_blocks
= min_not_zero(sdkp
->max_unmap_blocks
,
644 (u32
)SD_MAX_WS16_BLOCKS
);
648 max_blocks
= min_not_zero(sdkp
->max_ws_blocks
,
649 (u32
)SD_MAX_WS16_BLOCKS
);
653 max_blocks
= min_not_zero(sdkp
->max_ws_blocks
,
654 (u32
)SD_MAX_WS10_BLOCKS
);
658 max_blocks
= min_not_zero(sdkp
->max_ws_blocks
,
659 (u32
)SD_MAX_WS10_BLOCKS
);
660 q
->limits
.discard_zeroes_data
= 1;
664 q
->limits
.max_discard_sectors
= max_blocks
* (logical_block_size
>> 9);
665 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
669 * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
670 * @sdp: scsi device to operate one
671 * @rq: Request to prepare
673 * Will issue either UNMAP or WRITE SAME(16) depending on preference
674 * indicated by target device.
676 static int sd_setup_discard_cmnd(struct scsi_device
*sdp
, struct request
*rq
)
678 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
679 sector_t sector
= blk_rq_pos(rq
);
680 unsigned int nr_sectors
= blk_rq_sectors(rq
);
681 unsigned int nr_bytes
= blk_rq_bytes(rq
);
687 sector
>>= ilog2(sdp
->sector_size
) - 9;
688 nr_sectors
>>= ilog2(sdp
->sector_size
) - 9;
689 rq
->timeout
= SD_TIMEOUT
;
691 memset(rq
->cmd
, 0, rq
->cmd_len
);
693 page
= alloc_page(GFP_ATOMIC
| __GFP_ZERO
);
695 return BLKPREP_DEFER
;
697 switch (sdkp
->provisioning_mode
) {
699 buf
= page_address(page
);
705 put_unaligned_be16(6 + 16, &buf
[0]);
706 put_unaligned_be16(16, &buf
[2]);
707 put_unaligned_be64(sector
, &buf
[8]);
708 put_unaligned_be32(nr_sectors
, &buf
[16]);
715 rq
->cmd
[0] = WRITE_SAME_16
;
716 rq
->cmd
[1] = 0x8; /* UNMAP */
717 put_unaligned_be64(sector
, &rq
->cmd
[2]);
718 put_unaligned_be32(nr_sectors
, &rq
->cmd
[10]);
720 len
= sdkp
->device
->sector_size
;
726 rq
->cmd
[0] = WRITE_SAME
;
727 if (sdkp
->provisioning_mode
== SD_LBP_WS10
)
728 rq
->cmd
[1] = 0x8; /* UNMAP */
729 put_unaligned_be32(sector
, &rq
->cmd
[2]);
730 put_unaligned_be16(nr_sectors
, &rq
->cmd
[7]);
732 len
= sdkp
->device
->sector_size
;
740 blk_add_request_payload(rq
, page
, len
);
741 ret
= scsi_setup_blk_pc_cmnd(sdp
, rq
);
742 rq
->__data_len
= nr_bytes
;
745 if (ret
!= BLKPREP_OK
)
750 static void sd_config_write_same(struct scsi_disk
*sdkp
)
752 struct request_queue
*q
= sdkp
->disk
->queue
;
753 unsigned int logical_block_size
= sdkp
->device
->sector_size
;
755 if (sdkp
->device
->no_write_same
) {
756 sdkp
->max_ws_blocks
= 0;
760 /* Some devices can not handle block counts above 0xffff despite
761 * supporting WRITE SAME(16). Consequently we default to 64k
762 * blocks per I/O unless the device explicitly advertises a
765 if (sdkp
->max_ws_blocks
> SD_MAX_WS10_BLOCKS
)
766 sdkp
->max_ws_blocks
= min_not_zero(sdkp
->max_ws_blocks
,
767 (u32
)SD_MAX_WS16_BLOCKS
);
768 else if (sdkp
->ws16
|| sdkp
->ws10
|| sdkp
->device
->no_report_opcodes
)
769 sdkp
->max_ws_blocks
= min_not_zero(sdkp
->max_ws_blocks
,
770 (u32
)SD_MAX_WS10_BLOCKS
);
772 sdkp
->device
->no_write_same
= 1;
773 sdkp
->max_ws_blocks
= 0;
777 blk_queue_max_write_same_sectors(q
, sdkp
->max_ws_blocks
*
778 (logical_block_size
>> 9));
782 * sd_setup_write_same_cmnd - write the same data to multiple blocks
783 * @sdp: scsi device to operate one
784 * @rq: Request to prepare
786 * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
787 * preference indicated by target device.
789 static int sd_setup_write_same_cmnd(struct scsi_device
*sdp
, struct request
*rq
)
791 struct scsi_disk
*sdkp
= scsi_disk(rq
->rq_disk
);
792 struct bio
*bio
= rq
->bio
;
793 sector_t sector
= blk_rq_pos(rq
);
794 unsigned int nr_sectors
= blk_rq_sectors(rq
);
795 unsigned int nr_bytes
= blk_rq_bytes(rq
);
798 if (sdkp
->device
->no_write_same
)
801 BUG_ON(bio_offset(bio
) || bio_iovec(bio
).bv_len
!= sdp
->sector_size
);
803 sector
>>= ilog2(sdp
->sector_size
) - 9;
804 nr_sectors
>>= ilog2(sdp
->sector_size
) - 9;
806 rq
->__data_len
= sdp
->sector_size
;
807 rq
->timeout
= SD_WRITE_SAME_TIMEOUT
;
808 memset(rq
->cmd
, 0, rq
->cmd_len
);
810 if (sdkp
->ws16
|| sector
> 0xffffffff || nr_sectors
> 0xffff) {
812 rq
->cmd
[0] = WRITE_SAME_16
;
813 put_unaligned_be64(sector
, &rq
->cmd
[2]);
814 put_unaligned_be32(nr_sectors
, &rq
->cmd
[10]);
817 rq
->cmd
[0] = WRITE_SAME
;
818 put_unaligned_be32(sector
, &rq
->cmd
[2]);
819 put_unaligned_be16(nr_sectors
, &rq
->cmd
[7]);
822 ret
= scsi_setup_blk_pc_cmnd(sdp
, rq
);
823 rq
->__data_len
= nr_bytes
;
828 static int scsi_setup_flush_cmnd(struct scsi_device
*sdp
, struct request
*rq
)
830 rq
->timeout
*= SD_FLUSH_TIMEOUT_MULTIPLIER
;
831 rq
->retries
= SD_MAX_RETRIES
;
832 rq
->cmd
[0] = SYNCHRONIZE_CACHE
;
835 return scsi_setup_blk_pc_cmnd(sdp
, rq
);
838 static void sd_unprep_fn(struct request_queue
*q
, struct request
*rq
)
840 struct scsi_cmnd
*SCpnt
= rq
->special
;
842 if (rq
->cmd_flags
& REQ_DISCARD
) {
843 struct bio
*bio
= rq
->bio
;
845 __free_page(bio
->bi_io_vec
->bv_page
);
847 if (SCpnt
->cmnd
!= rq
->cmd
) {
848 mempool_free(SCpnt
->cmnd
, sd_cdb_pool
);
855 * sd_prep_fn - build a scsi (read or write) command from
856 * information in the request structure.
857 * @SCpnt: pointer to mid-level's per scsi command structure that
858 * contains request and into which the scsi command is written
860 * Returns 1 if successful and 0 if error (or cannot be done now).
862 static int sd_prep_fn(struct request_queue
*q
, struct request
*rq
)
864 struct scsi_cmnd
*SCpnt
;
865 struct scsi_device
*sdp
= q
->queuedata
;
866 struct gendisk
*disk
= rq
->rq_disk
;
867 struct scsi_disk
*sdkp
;
868 sector_t block
= blk_rq_pos(rq
);
870 unsigned int this_count
= blk_rq_sectors(rq
);
872 unsigned char protect
;
875 * Discard request come in as REQ_TYPE_FS but we turn them into
876 * block PC requests to make life easier.
878 if (rq
->cmd_flags
& REQ_DISCARD
) {
879 ret
= sd_setup_discard_cmnd(sdp
, rq
);
881 } else if (rq
->cmd_flags
& REQ_WRITE_SAME
) {
882 ret
= sd_setup_write_same_cmnd(sdp
, rq
);
884 } else if (rq
->cmd_flags
& REQ_FLUSH
) {
885 ret
= scsi_setup_flush_cmnd(sdp
, rq
);
887 } else if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
888 ret
= scsi_setup_blk_pc_cmnd(sdp
, rq
);
890 } else if (rq
->cmd_type
!= REQ_TYPE_FS
) {
894 ret
= scsi_setup_fs_cmnd(sdp
, rq
);
895 if (ret
!= BLKPREP_OK
)
898 sdkp
= scsi_disk(disk
);
900 /* from here on until we're complete, any goto out
901 * is used for a killable error condition */
904 SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO
, SCpnt
,
905 "sd_prep_fn: block=%llu, "
907 (unsigned long long)block
,
910 if (!sdp
|| !scsi_device_online(sdp
) ||
911 block
+ blk_rq_sectors(rq
) > get_capacity(disk
)) {
912 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO
, SCpnt
,
913 "Finishing %u sectors\n",
914 blk_rq_sectors(rq
)));
915 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO
, SCpnt
,
916 "Retry with 0x%p\n", SCpnt
));
922 * quietly refuse to do anything to a changed disc until
923 * the changed bit has been reset
925 /* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */
930 * Some SD card readers can't handle multi-sector accesses which touch
931 * the last one or two hardware sectors. Split accesses as needed.
933 threshold
= get_capacity(disk
) - SD_LAST_BUGGY_SECTORS
*
934 (sdp
->sector_size
/ 512);
936 if (unlikely(sdp
->last_sector_bug
&& block
+ this_count
> threshold
)) {
937 if (block
< threshold
) {
938 /* Access up to the threshold but not beyond */
939 this_count
= threshold
- block
;
941 /* Access only a single hardware sector */
942 this_count
= sdp
->sector_size
/ 512;
946 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO
, SCpnt
, "block=%llu\n",
947 (unsigned long long)block
));
950 * If we have a 1K hardware sectorsize, prevent access to single
951 * 512 byte sectors. In theory we could handle this - in fact
952 * the scsi cdrom driver must be able to handle this because
953 * we typically use 1K blocksizes, and cdroms typically have
954 * 2K hardware sectorsizes. Of course, things are simpler
955 * with the cdrom, since it is read-only. For performance
956 * reasons, the filesystems should be able to handle this
957 * and not force the scsi disk driver to use bounce buffers
960 if (sdp
->sector_size
== 1024) {
961 if ((block
& 1) || (blk_rq_sectors(rq
) & 1)) {
962 scmd_printk(KERN_ERR
, SCpnt
,
963 "Bad block number requested\n");
967 this_count
= this_count
>> 1;
970 if (sdp
->sector_size
== 2048) {
971 if ((block
& 3) || (blk_rq_sectors(rq
) & 3)) {
972 scmd_printk(KERN_ERR
, SCpnt
,
973 "Bad block number requested\n");
977 this_count
= this_count
>> 2;
980 if (sdp
->sector_size
== 4096) {
981 if ((block
& 7) || (blk_rq_sectors(rq
) & 7)) {
982 scmd_printk(KERN_ERR
, SCpnt
,
983 "Bad block number requested\n");
987 this_count
= this_count
>> 3;
990 if (rq_data_dir(rq
) == WRITE
) {
991 if (!sdp
->writeable
) {
994 SCpnt
->cmnd
[0] = WRITE_6
;
995 SCpnt
->sc_data_direction
= DMA_TO_DEVICE
;
997 if (blk_integrity_rq(rq
))
998 sd_dif_prepare(rq
, block
, sdp
->sector_size
);
1000 } else if (rq_data_dir(rq
) == READ
) {
1001 SCpnt
->cmnd
[0] = READ_6
;
1002 SCpnt
->sc_data_direction
= DMA_FROM_DEVICE
;
1004 scmd_printk(KERN_ERR
, SCpnt
, "Unknown command %llx\n", (unsigned long long) rq
->cmd_flags
);
1008 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO
, SCpnt
,
1009 "%s %d/%u 512 byte blocks.\n",
1010 (rq_data_dir(rq
) == WRITE
) ?
1011 "writing" : "reading", this_count
,
1012 blk_rq_sectors(rq
)));
1014 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
1015 host_dif
= scsi_host_dif_capable(sdp
->host
, sdkp
->protection_type
);
1021 if (host_dif
== SD_DIF_TYPE2_PROTECTION
) {
1022 SCpnt
->cmnd
= mempool_alloc(sd_cdb_pool
, GFP_ATOMIC
);
1024 if (unlikely(SCpnt
->cmnd
== NULL
)) {
1025 ret
= BLKPREP_DEFER
;
1029 SCpnt
->cmd_len
= SD_EXT_CDB_SIZE
;
1030 memset(SCpnt
->cmnd
, 0, SCpnt
->cmd_len
);
1031 SCpnt
->cmnd
[0] = VARIABLE_LENGTH_CMD
;
1032 SCpnt
->cmnd
[7] = 0x18;
1033 SCpnt
->cmnd
[9] = (rq_data_dir(rq
) == READ
) ? READ_32
: WRITE_32
;
1034 SCpnt
->cmnd
[10] = protect
| ((rq
->cmd_flags
& REQ_FUA
) ? 0x8 : 0);
1037 SCpnt
->cmnd
[12] = sizeof(block
) > 4 ? (unsigned char) (block
>> 56) & 0xff : 0;
1038 SCpnt
->cmnd
[13] = sizeof(block
) > 4 ? (unsigned char) (block
>> 48) & 0xff : 0;
1039 SCpnt
->cmnd
[14] = sizeof(block
) > 4 ? (unsigned char) (block
>> 40) & 0xff : 0;
1040 SCpnt
->cmnd
[15] = sizeof(block
) > 4 ? (unsigned char) (block
>> 32) & 0xff : 0;
1041 SCpnt
->cmnd
[16] = (unsigned char) (block
>> 24) & 0xff;
1042 SCpnt
->cmnd
[17] = (unsigned char) (block
>> 16) & 0xff;
1043 SCpnt
->cmnd
[18] = (unsigned char) (block
>> 8) & 0xff;
1044 SCpnt
->cmnd
[19] = (unsigned char) block
& 0xff;
1046 /* Expected Indirect LBA */
1047 SCpnt
->cmnd
[20] = (unsigned char) (block
>> 24) & 0xff;
1048 SCpnt
->cmnd
[21] = (unsigned char) (block
>> 16) & 0xff;
1049 SCpnt
->cmnd
[22] = (unsigned char) (block
>> 8) & 0xff;
1050 SCpnt
->cmnd
[23] = (unsigned char) block
& 0xff;
1052 /* Transfer length */
1053 SCpnt
->cmnd
[28] = (unsigned char) (this_count
>> 24) & 0xff;
1054 SCpnt
->cmnd
[29] = (unsigned char) (this_count
>> 16) & 0xff;
1055 SCpnt
->cmnd
[30] = (unsigned char) (this_count
>> 8) & 0xff;
1056 SCpnt
->cmnd
[31] = (unsigned char) this_count
& 0xff;
1057 } else if (sdp
->use_16_for_rw
) {
1058 SCpnt
->cmnd
[0] += READ_16
- READ_6
;
1059 SCpnt
->cmnd
[1] = protect
| ((rq
->cmd_flags
& REQ_FUA
) ? 0x8 : 0);
1060 SCpnt
->cmnd
[2] = sizeof(block
) > 4 ? (unsigned char) (block
>> 56) & 0xff : 0;
1061 SCpnt
->cmnd
[3] = sizeof(block
) > 4 ? (unsigned char) (block
>> 48) & 0xff : 0;
1062 SCpnt
->cmnd
[4] = sizeof(block
) > 4 ? (unsigned char) (block
>> 40) & 0xff : 0;
1063 SCpnt
->cmnd
[5] = sizeof(block
) > 4 ? (unsigned char) (block
>> 32) & 0xff : 0;
1064 SCpnt
->cmnd
[6] = (unsigned char) (block
>> 24) & 0xff;
1065 SCpnt
->cmnd
[7] = (unsigned char) (block
>> 16) & 0xff;
1066 SCpnt
->cmnd
[8] = (unsigned char) (block
>> 8) & 0xff;
1067 SCpnt
->cmnd
[9] = (unsigned char) block
& 0xff;
1068 SCpnt
->cmnd
[10] = (unsigned char) (this_count
>> 24) & 0xff;
1069 SCpnt
->cmnd
[11] = (unsigned char) (this_count
>> 16) & 0xff;
1070 SCpnt
->cmnd
[12] = (unsigned char) (this_count
>> 8) & 0xff;
1071 SCpnt
->cmnd
[13] = (unsigned char) this_count
& 0xff;
1072 SCpnt
->cmnd
[14] = SCpnt
->cmnd
[15] = 0;
1073 } else if ((this_count
> 0xff) || (block
> 0x1fffff) ||
1074 scsi_device_protection(SCpnt
->device
) ||
1075 SCpnt
->device
->use_10_for_rw
) {
1076 if (this_count
> 0xffff)
1077 this_count
= 0xffff;
1079 SCpnt
->cmnd
[0] += READ_10
- READ_6
;
1080 SCpnt
->cmnd
[1] = protect
| ((rq
->cmd_flags
& REQ_FUA
) ? 0x8 : 0);
1081 SCpnt
->cmnd
[2] = (unsigned char) (block
>> 24) & 0xff;
1082 SCpnt
->cmnd
[3] = (unsigned char) (block
>> 16) & 0xff;
1083 SCpnt
->cmnd
[4] = (unsigned char) (block
>> 8) & 0xff;
1084 SCpnt
->cmnd
[5] = (unsigned char) block
& 0xff;
1085 SCpnt
->cmnd
[6] = SCpnt
->cmnd
[9] = 0;
1086 SCpnt
->cmnd
[7] = (unsigned char) (this_count
>> 8) & 0xff;
1087 SCpnt
->cmnd
[8] = (unsigned char) this_count
& 0xff;
1089 if (unlikely(rq
->cmd_flags
& REQ_FUA
)) {
1091 * This happens only if this drive failed
1092 * 10byte rw command with ILLEGAL_REQUEST
1093 * during operation and thus turned off
1096 scmd_printk(KERN_ERR
, SCpnt
,
1097 "FUA write on READ/WRITE(6) drive\n");
1101 SCpnt
->cmnd
[1] |= (unsigned char) ((block
>> 16) & 0x1f);
1102 SCpnt
->cmnd
[2] = (unsigned char) ((block
>> 8) & 0xff);
1103 SCpnt
->cmnd
[3] = (unsigned char) block
& 0xff;
1104 SCpnt
->cmnd
[4] = (unsigned char) this_count
;
1107 SCpnt
->sdb
.length
= this_count
* sdp
->sector_size
;
1109 /* If DIF or DIX is enabled, tell HBA how to handle request */
1110 if (host_dif
|| scsi_prot_sg_count(SCpnt
))
1111 sd_prot_op(SCpnt
, host_dif
);
1114 * We shouldn't disconnect in the middle of a sector, so with a dumb
1115 * host adapter, it's safe to assume that we can at least transfer
1116 * this many bytes between each connect / disconnect.
1118 SCpnt
->transfersize
= sdp
->sector_size
;
1119 SCpnt
->underflow
= this_count
<< 9;
1120 SCpnt
->allowed
= SD_MAX_RETRIES
;
1123 * This indicates that the command is ready from our end to be
1128 return scsi_prep_return(q
, rq
, ret
);
1132 * sd_open - open a scsi disk device
1133 * @inode: only i_rdev member may be used
1134 * @filp: only f_mode and f_flags may be used
1136 * Returns 0 if successful. Returns a negated errno value in case
1139 * Note: This can be called from a user context (e.g. fsck(1) )
1140 * or from within the kernel (e.g. as a result of a mount(1) ).
1141 * In the latter case @inode and @filp carry an abridged amount
1142 * of information as noted above.
1144 * Locking: called with bdev->bd_mutex held.
1146 static int sd_open(struct block_device
*bdev
, fmode_t mode
)
1148 struct scsi_disk
*sdkp
= scsi_disk_get(bdev
->bd_disk
);
1149 struct scsi_device
*sdev
;
1155 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO
, sdkp
, "sd_open\n"));
1157 sdev
= sdkp
->device
;
1160 * If the device is in error recovery, wait until it is done.
1161 * If the device is offline, then disallow any access to it.
1164 if (!scsi_block_when_processing_errors(sdev
))
1167 if (sdev
->removable
|| sdkp
->write_prot
)
1168 check_disk_change(bdev
);
1171 * If the drive is empty, just let the open fail.
1173 retval
= -ENOMEDIUM
;
1174 if (sdev
->removable
&& !sdkp
->media_present
&& !(mode
& FMODE_NDELAY
))
1178 * If the device has the write protect tab set, have the open fail
1179 * if the user expects to be able to write to the thing.
1182 if (sdkp
->write_prot
&& (mode
& FMODE_WRITE
))
1186 * It is possible that the disk changing stuff resulted in
1187 * the device being taken offline. If this is the case,
1188 * report this to the user, and don't pretend that the
1189 * open actually succeeded.
1192 if (!scsi_device_online(sdev
))
1195 if ((atomic_inc_return(&sdkp
->openers
) == 1) && sdev
->removable
) {
1196 if (scsi_block_when_processing_errors(sdev
))
1197 scsi_set_medium_removal(sdev
, SCSI_REMOVAL_PREVENT
);
1203 scsi_disk_put(sdkp
);
1208 * sd_release - invoked when the (last) close(2) is called on this
1210 * @inode: only i_rdev member may be used
1211 * @filp: only f_mode and f_flags may be used
1215 * Note: may block (uninterruptible) if error recovery is underway
1218 * Locking: called with bdev->bd_mutex held.
1220 static void sd_release(struct gendisk
*disk
, fmode_t mode
)
1222 struct scsi_disk
*sdkp
= scsi_disk(disk
);
1223 struct scsi_device
*sdev
= sdkp
->device
;
1225 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO
, sdkp
, "sd_release\n"));
1227 if (atomic_dec_return(&sdkp
->openers
) == 0 && sdev
->removable
) {
1228 if (scsi_block_when_processing_errors(sdev
))
1229 scsi_set_medium_removal(sdev
, SCSI_REMOVAL_ALLOW
);
1233 * XXX and what if there are packets in flight and this close()
1234 * XXX is followed by a "rmmod sd_mod"?
1237 scsi_disk_put(sdkp
);
1240 static int sd_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1242 struct scsi_disk
*sdkp
= scsi_disk(bdev
->bd_disk
);
1243 struct scsi_device
*sdp
= sdkp
->device
;
1244 struct Scsi_Host
*host
= sdp
->host
;
1247 /* default to most commonly used values */
1248 diskinfo
[0] = 0x40; /* 1 << 6 */
1249 diskinfo
[1] = 0x20; /* 1 << 5 */
1250 diskinfo
[2] = sdkp
->capacity
>> 11;
1252 /* override with calculated, extended default, or driver values */
1253 if (host
->hostt
->bios_param
)
1254 host
->hostt
->bios_param(sdp
, bdev
, sdkp
->capacity
, diskinfo
);
1256 scsicam_bios_param(bdev
, sdkp
->capacity
, diskinfo
);
1258 geo
->heads
= diskinfo
[0];
1259 geo
->sectors
= diskinfo
[1];
1260 geo
->cylinders
= diskinfo
[2];
1265 * sd_ioctl - process an ioctl
1266 * @inode: only i_rdev/i_bdev members may be used
1267 * @filp: only f_mode and f_flags may be used
1268 * @cmd: ioctl command number
1269 * @arg: this is third argument given to ioctl(2) system call.
1270 * Often contains a pointer.
1272 * Returns 0 if successful (some ioctls return positive numbers on
1273 * success as well). Returns a negated errno value in case of error.
1275 * Note: most ioctls are forward onto the block subsystem or further
1276 * down in the scsi subsystem.
1278 static int sd_ioctl(struct block_device
*bdev
, fmode_t mode
,
1279 unsigned int cmd
, unsigned long arg
)
1281 struct gendisk
*disk
= bdev
->bd_disk
;
1282 struct scsi_disk
*sdkp
= scsi_disk(disk
);
1283 struct scsi_device
*sdp
= sdkp
->device
;
1284 void __user
*p
= (void __user
*)arg
;
1287 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO
, sdkp
, "sd_ioctl: disk=%s, "
1288 "cmd=0x%x\n", disk
->disk_name
, cmd
));
1290 error
= scsi_verify_blk_ioctl(bdev
, cmd
);
1295 * If we are in the middle of error recovery, don't let anyone
1296 * else try and use this device. Also, if error recovery fails, it
1297 * may try and take the device offline, in which case all further
1298 * access to the device is prohibited.
1300 error
= scsi_nonblockable_ioctl(sdp
, cmd
, p
,
1301 (mode
& FMODE_NDELAY
) != 0);
1302 if (!scsi_block_when_processing_errors(sdp
) || !error
)
1306 * Send SCSI addressing ioctls directly to mid level, send other
1307 * ioctls to block level and then onto mid level if they can't be
1311 case SCSI_IOCTL_GET_IDLUN
:
1312 case SCSI_IOCTL_GET_BUS_NUMBER
:
1313 error
= scsi_ioctl(sdp
, cmd
, p
);
1316 error
= scsi_cmd_blk_ioctl(bdev
, mode
, cmd
, p
);
1317 if (error
!= -ENOTTY
)
1319 error
= scsi_ioctl(sdp
, cmd
, p
);
1326 static void set_media_not_present(struct scsi_disk
*sdkp
)
1328 if (sdkp
->media_present
)
1329 sdkp
->device
->changed
= 1;
1331 if (sdkp
->device
->removable
) {
1332 sdkp
->media_present
= 0;
1337 static int media_not_present(struct scsi_disk
*sdkp
,
1338 struct scsi_sense_hdr
*sshdr
)
1340 if (!scsi_sense_valid(sshdr
))
1343 /* not invoked for commands that could return deferred errors */
1344 switch (sshdr
->sense_key
) {
1345 case UNIT_ATTENTION
:
1347 /* medium not present */
1348 if (sshdr
->asc
== 0x3A) {
1349 set_media_not_present(sdkp
);
1357 * sd_check_events - check media events
1358 * @disk: kernel device descriptor
1359 * @clearing: disk events currently being cleared
1361 * Returns mask of DISK_EVENT_*.
1363 * Note: this function is invoked from the block subsystem.
1365 static unsigned int sd_check_events(struct gendisk
*disk
, unsigned int clearing
)
1367 struct scsi_disk
*sdkp
= scsi_disk(disk
);
1368 struct scsi_device
*sdp
= sdkp
->device
;
1369 struct scsi_sense_hdr
*sshdr
= NULL
;
1372 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO
, sdkp
, "sd_check_events\n"));
1375 * If the device is offline, don't send any commands - just pretend as
1376 * if the command failed. If the device ever comes back online, we
1377 * can deal with it then. It is only because of unrecoverable errors
1378 * that we would ever take a device offline in the first place.
1380 if (!scsi_device_online(sdp
)) {
1381 set_media_not_present(sdkp
);
1386 * Using TEST_UNIT_READY enables differentiation between drive with
1387 * no cartridge loaded - NOT READY, drive with changed cartridge -
1388 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1390 * Drives that auto spin down. eg iomega jaz 1G, will be started
1391 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1392 * sd_revalidate() is called.
1396 if (scsi_block_when_processing_errors(sdp
)) {
1397 sshdr
= kzalloc(sizeof(*sshdr
), GFP_KERNEL
);
1398 retval
= scsi_test_unit_ready(sdp
, SD_TIMEOUT
, SD_MAX_RETRIES
,
1402 /* failed to execute TUR, assume media not present */
1403 if (host_byte(retval
)) {
1404 set_media_not_present(sdkp
);
1408 if (media_not_present(sdkp
, sshdr
))
1412 * For removable scsi disk we have to recognise the presence
1413 * of a disk in the drive.
1415 if (!sdkp
->media_present
)
1417 sdkp
->media_present
= 1;
1420 * sdp->changed is set under the following conditions:
1422 * Medium present state has changed in either direction.
1423 * Device has indicated UNIT_ATTENTION.
1426 retval
= sdp
->changed
? DISK_EVENT_MEDIA_CHANGE
: 0;
1431 static int sd_sync_cache(struct scsi_disk
*sdkp
)
1434 struct scsi_device
*sdp
= sdkp
->device
;
1435 const int timeout
= sdp
->request_queue
->rq_timeout
1436 * SD_FLUSH_TIMEOUT_MULTIPLIER
;
1437 struct scsi_sense_hdr sshdr
;
1439 if (!scsi_device_online(sdp
))
1442 for (retries
= 3; retries
> 0; --retries
) {
1443 unsigned char cmd
[10] = { 0 };
1445 cmd
[0] = SYNCHRONIZE_CACHE
;
1447 * Leave the rest of the command zero to indicate
1450 res
= scsi_execute_req_flags(sdp
, cmd
, DMA_NONE
, NULL
, 0,
1451 &sshdr
, timeout
, SD_MAX_RETRIES
,
1458 sd_print_result(sdkp
, res
);
1460 if (driver_byte(res
) & DRIVER_SENSE
)
1461 sd_print_sense_hdr(sdkp
, &sshdr
);
1462 /* we need to evaluate the error return */
1463 if (scsi_sense_valid(&sshdr
) &&
1464 (sshdr
.asc
== 0x3a || /* medium not present */
1465 sshdr
.asc
== 0x20)) /* invalid command */
1466 /* this is no error here */
1469 switch (host_byte(res
)) {
1470 /* ignore errors due to racing a disconnection */
1471 case DID_BAD_TARGET
:
1472 case DID_NO_CONNECT
:
1474 /* signal the upper layer it might try again */
1478 case DID_SOFT_ERROR
:
1487 static void sd_rescan(struct device
*dev
)
1489 struct scsi_disk
*sdkp
= scsi_disk_get_from_dev(dev
);
1492 revalidate_disk(sdkp
->disk
);
1493 scsi_disk_put(sdkp
);
1498 #ifdef CONFIG_COMPAT
1500 * This gets directly called from VFS. When the ioctl
1501 * is not recognized we go back to the other translation paths.
1503 static int sd_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
1504 unsigned int cmd
, unsigned long arg
)
1506 struct scsi_device
*sdev
= scsi_disk(bdev
->bd_disk
)->device
;
1509 ret
= scsi_verify_blk_ioctl(bdev
, cmd
);
1514 * If we are in the middle of error recovery, don't let anyone
1515 * else try and use this device. Also, if error recovery fails, it
1516 * may try and take the device offline, in which case all further
1517 * access to the device is prohibited.
1519 if (!scsi_block_when_processing_errors(sdev
))
1522 if (sdev
->host
->hostt
->compat_ioctl
) {
1523 ret
= sdev
->host
->hostt
->compat_ioctl(sdev
, cmd
, (void __user
*)arg
);
1529 * Let the static ioctl translation table take care of it.
1531 return -ENOIOCTLCMD
;
1535 static const struct block_device_operations sd_fops
= {
1536 .owner
= THIS_MODULE
,
1538 .release
= sd_release
,
1540 .getgeo
= sd_getgeo
,
1541 #ifdef CONFIG_COMPAT
1542 .compat_ioctl
= sd_compat_ioctl
,
1544 .check_events
= sd_check_events
,
1545 .revalidate_disk
= sd_revalidate_disk
,
1546 .unlock_native_capacity
= sd_unlock_native_capacity
,
1550 * sd_eh_action - error handling callback
1551 * @scmd: sd-issued command that has failed
1552 * @eh_disp: The recovery disposition suggested by the midlayer
1554 * This function is called by the SCSI midlayer upon completion of an
1555 * error test command (currently TEST UNIT READY). The result of sending
1556 * the eh command is passed in eh_disp. We're looking for devices that
1557 * fail medium access commands but are OK with non access commands like
1558 * test unit ready (so wrongly see the device as having a successful
1561 static int sd_eh_action(struct scsi_cmnd
*scmd
, int eh_disp
)
1563 struct scsi_disk
*sdkp
= scsi_disk(scmd
->request
->rq_disk
);
1565 if (!scsi_device_online(scmd
->device
) ||
1566 !scsi_medium_access_command(scmd
) ||
1567 host_byte(scmd
->result
) != DID_TIME_OUT
||
1572 * The device has timed out executing a medium access command.
1573 * However, the TEST UNIT READY command sent during error
1574 * handling completed successfully. Either the device is in the
1575 * process of recovering or has it suffered an internal failure
1576 * that prevents access to the storage medium.
1578 sdkp
->medium_access_timed_out
++;
1581 * If the device keeps failing read/write commands but TEST UNIT
1582 * READY always completes successfully we assume that medium
1583 * access is no longer possible and take the device offline.
1585 if (sdkp
->medium_access_timed_out
>= sdkp
->max_medium_access_timeouts
) {
1586 scmd_printk(KERN_ERR
, scmd
,
1587 "Medium access timeout failure. Offlining disk!\n");
1588 scsi_device_set_state(scmd
->device
, SDEV_OFFLINE
);
1596 static unsigned int sd_completed_bytes(struct scsi_cmnd
*scmd
)
1598 u64 start_lba
= blk_rq_pos(scmd
->request
);
1599 u64 end_lba
= blk_rq_pos(scmd
->request
) + (scsi_bufflen(scmd
) / 512);
1603 * resid is optional but mostly filled in. When it's unused,
1604 * its value is zero, so we assume the whole buffer transferred
1606 unsigned int transferred
= scsi_bufflen(scmd
) - scsi_get_resid(scmd
);
1607 unsigned int good_bytes
;
1609 if (scmd
->request
->cmd_type
!= REQ_TYPE_FS
)
1612 info_valid
= scsi_get_sense_info_fld(scmd
->sense_buffer
,
1613 SCSI_SENSE_BUFFERSIZE
,
1618 if (scsi_bufflen(scmd
) <= scmd
->device
->sector_size
)
1621 if (scmd
->device
->sector_size
< 512) {
1622 /* only legitimate sector_size here is 256 */
1626 /* be careful ... don't want any overflows */
1627 unsigned int factor
= scmd
->device
->sector_size
/ 512;
1628 do_div(start_lba
, factor
);
1629 do_div(end_lba
, factor
);
1632 /* The bad lba was reported incorrectly, we have no idea where
1635 if (bad_lba
< start_lba
|| bad_lba
>= end_lba
)
1638 /* This computation should always be done in terms of
1639 * the resolution of the device's medium.
1641 good_bytes
= (bad_lba
- start_lba
) * scmd
->device
->sector_size
;
1642 return min(good_bytes
, transferred
);
1646 * sd_done - bottom half handler: called when the lower level
1647 * driver has completed (successfully or otherwise) a scsi command.
1648 * @SCpnt: mid-level's per command structure.
1650 * Note: potentially run from within an ISR. Must not block.
1652 static int sd_done(struct scsi_cmnd
*SCpnt
)
1654 int result
= SCpnt
->result
;
1655 unsigned int good_bytes
= result
? 0 : scsi_bufflen(SCpnt
);
1656 struct scsi_sense_hdr sshdr
;
1657 struct scsi_disk
*sdkp
= scsi_disk(SCpnt
->request
->rq_disk
);
1658 struct request
*req
= SCpnt
->request
;
1659 int sense_valid
= 0;
1660 int sense_deferred
= 0;
1661 unsigned char op
= SCpnt
->cmnd
[0];
1662 unsigned char unmap
= SCpnt
->cmnd
[1] & 8;
1664 if (req
->cmd_flags
& REQ_DISCARD
|| req
->cmd_flags
& REQ_WRITE_SAME
) {
1666 good_bytes
= blk_rq_bytes(req
);
1667 scsi_set_resid(SCpnt
, 0);
1670 scsi_set_resid(SCpnt
, blk_rq_bytes(req
));
1675 sense_valid
= scsi_command_normalize_sense(SCpnt
, &sshdr
);
1677 sense_deferred
= scsi_sense_is_deferred(&sshdr
);
1679 #ifdef CONFIG_SCSI_LOGGING
1680 SCSI_LOG_HLCOMPLETE(1, scsi_print_result(SCpnt
));
1682 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO
, SCpnt
,
1683 "sd_done: sb[respc,sk,asc,"
1684 "ascq]=%x,%x,%x,%x\n",
1685 sshdr
.response_code
,
1686 sshdr
.sense_key
, sshdr
.asc
,
1690 if (driver_byte(result
) != DRIVER_SENSE
&&
1691 (!sense_valid
|| sense_deferred
))
1694 sdkp
->medium_access_timed_out
= 0;
1696 switch (sshdr
.sense_key
) {
1697 case HARDWARE_ERROR
:
1699 good_bytes
= sd_completed_bytes(SCpnt
);
1701 case RECOVERED_ERROR
:
1702 good_bytes
= scsi_bufflen(SCpnt
);
1705 /* This indicates a false check condition, so ignore it. An
1706 * unknown amount of data was transferred so treat it as an
1709 scsi_print_sense("sd", SCpnt
);
1711 memset(SCpnt
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1713 case ABORTED_COMMAND
:
1714 if (sshdr
.asc
== 0x10) /* DIF: Target detected corruption */
1715 good_bytes
= sd_completed_bytes(SCpnt
);
1717 case ILLEGAL_REQUEST
:
1718 if (sshdr
.asc
== 0x10) /* DIX: Host detected corruption */
1719 good_bytes
= sd_completed_bytes(SCpnt
);
1720 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
1721 if (sshdr
.asc
== 0x20 || sshdr
.asc
== 0x24) {
1724 sd_config_discard(sdkp
, SD_LBP_DISABLE
);
1729 sd_config_discard(sdkp
, SD_LBP_DISABLE
);
1731 sdkp
->device
->no_write_same
= 1;
1732 sd_config_write_same(sdkp
);
1735 req
->__data_len
= blk_rq_bytes(req
);
1736 req
->cmd_flags
|= REQ_QUIET
;
1745 if (rq_data_dir(SCpnt
->request
) == READ
&& scsi_prot_sg_count(SCpnt
))
1746 sd_dif_complete(SCpnt
, good_bytes
);
1752 * spinup disk - called only in sd_revalidate_disk()
1755 sd_spinup_disk(struct scsi_disk
*sdkp
)
1757 unsigned char cmd
[10];
1758 unsigned long spintime_expire
= 0;
1759 int retries
, spintime
;
1760 unsigned int the_result
;
1761 struct scsi_sense_hdr sshdr
;
1762 int sense_valid
= 0;
1766 /* Spin up drives, as required. Only do this at boot time */
1767 /* Spinup needs to be done for module loads too. */
1772 cmd
[0] = TEST_UNIT_READY
;
1773 memset((void *) &cmd
[1], 0, 9);
1775 the_result
= scsi_execute_req(sdkp
->device
, cmd
,
1778 SD_MAX_RETRIES
, NULL
);
1781 * If the drive has indicated to us that it
1782 * doesn't have any media in it, don't bother
1783 * with any more polling.
1785 if (media_not_present(sdkp
, &sshdr
))
1789 sense_valid
= scsi_sense_valid(&sshdr
);
1791 } while (retries
< 3 &&
1792 (!scsi_status_is_good(the_result
) ||
1793 ((driver_byte(the_result
) & DRIVER_SENSE
) &&
1794 sense_valid
&& sshdr
.sense_key
== UNIT_ATTENTION
)));
1796 if ((driver_byte(the_result
) & DRIVER_SENSE
) == 0) {
1797 /* no sense, TUR either succeeded or failed
1798 * with a status error */
1799 if(!spintime
&& !scsi_status_is_good(the_result
)) {
1800 sd_printk(KERN_NOTICE
, sdkp
, "Unit Not Ready\n");
1801 sd_print_result(sdkp
, the_result
);
1807 * The device does not want the automatic start to be issued.
1809 if (sdkp
->device
->no_start_on_add
)
1812 if (sense_valid
&& sshdr
.sense_key
== NOT_READY
) {
1813 if (sshdr
.asc
== 4 && sshdr
.ascq
== 3)
1814 break; /* manual intervention required */
1815 if (sshdr
.asc
== 4 && sshdr
.ascq
== 0xb)
1816 break; /* standby */
1817 if (sshdr
.asc
== 4 && sshdr
.ascq
== 0xc)
1818 break; /* unavailable */
1820 * Issue command to spin up drive when not ready
1823 sd_printk(KERN_NOTICE
, sdkp
, "Spinning up disk...");
1824 cmd
[0] = START_STOP
;
1825 cmd
[1] = 1; /* Return immediately */
1826 memset((void *) &cmd
[2], 0, 8);
1827 cmd
[4] = 1; /* Start spin cycle */
1828 if (sdkp
->device
->start_stop_pwr_cond
)
1830 scsi_execute_req(sdkp
->device
, cmd
, DMA_NONE
,
1832 SD_TIMEOUT
, SD_MAX_RETRIES
,
1834 spintime_expire
= jiffies
+ 100 * HZ
;
1837 /* Wait 1 second for next try */
1842 * Wait for USB flash devices with slow firmware.
1843 * Yes, this sense key/ASC combination shouldn't
1844 * occur here. It's characteristic of these devices.
1846 } else if (sense_valid
&&
1847 sshdr
.sense_key
== UNIT_ATTENTION
&&
1848 sshdr
.asc
== 0x28) {
1850 spintime_expire
= jiffies
+ 5 * HZ
;
1853 /* Wait 1 second for next try */
1856 /* we don't understand the sense code, so it's
1857 * probably pointless to loop */
1859 sd_printk(KERN_NOTICE
, sdkp
, "Unit Not Ready\n");
1860 sd_print_sense_hdr(sdkp
, &sshdr
);
1865 } while (spintime
&& time_before_eq(jiffies
, spintime_expire
));
1868 if (scsi_status_is_good(the_result
))
1871 printk("not responding...\n");
1877 * Determine whether disk supports Data Integrity Field.
1879 static int sd_read_protection_type(struct scsi_disk
*sdkp
, unsigned char *buffer
)
1881 struct scsi_device
*sdp
= sdkp
->device
;
1885 if (scsi_device_protection(sdp
) == 0 || (buffer
[12] & 1) == 0)
1888 type
= ((buffer
[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1890 if (type
> SD_DIF_TYPE3_PROTECTION
)
1892 else if (scsi_host_dif_capable(sdp
->host
, type
))
1895 if (sdkp
->first_scan
|| type
!= sdkp
->protection_type
)
1898 sd_printk(KERN_ERR
, sdkp
, "formatted with unsupported" \
1899 " protection type %u. Disabling disk!\n",
1903 sd_printk(KERN_NOTICE
, sdkp
,
1904 "Enabling DIF Type %u protection\n", type
);
1907 sd_printk(KERN_NOTICE
, sdkp
,
1908 "Disabling DIF Type %u protection\n", type
);
1912 sdkp
->protection_type
= type
;
1917 static void read_capacity_error(struct scsi_disk
*sdkp
, struct scsi_device
*sdp
,
1918 struct scsi_sense_hdr
*sshdr
, int sense_valid
,
1921 sd_print_result(sdkp
, the_result
);
1922 if (driver_byte(the_result
) & DRIVER_SENSE
)
1923 sd_print_sense_hdr(sdkp
, sshdr
);
1925 sd_printk(KERN_NOTICE
, sdkp
, "Sense not available.\n");
1928 * Set dirty bit for removable devices if not ready -
1929 * sometimes drives will not report this properly.
1931 if (sdp
->removable
&&
1932 sense_valid
&& sshdr
->sense_key
== NOT_READY
)
1933 set_media_not_present(sdkp
);
1936 * We used to set media_present to 0 here to indicate no media
1937 * in the drive, but some drives fail read capacity even with
1938 * media present, so we can't do that.
1940 sdkp
->capacity
= 0; /* unknown mapped to zero - as usual */
1944 #if RC16_LEN > SD_BUF_SIZE
1945 #error RC16_LEN must not be more than SD_BUF_SIZE
1948 #define READ_CAPACITY_RETRIES_ON_RESET 10
1950 static int read_capacity_16(struct scsi_disk
*sdkp
, struct scsi_device
*sdp
,
1951 unsigned char *buffer
)
1953 unsigned char cmd
[16];
1954 struct scsi_sense_hdr sshdr
;
1955 int sense_valid
= 0;
1957 int retries
= 3, reset_retries
= READ_CAPACITY_RETRIES_ON_RESET
;
1958 unsigned int alignment
;
1959 unsigned long long lba
;
1960 unsigned sector_size
;
1962 if (sdp
->no_read_capacity_16
)
1967 cmd
[0] = SERVICE_ACTION_IN
;
1968 cmd
[1] = SAI_READ_CAPACITY_16
;
1970 memset(buffer
, 0, RC16_LEN
);
1972 the_result
= scsi_execute_req(sdp
, cmd
, DMA_FROM_DEVICE
,
1973 buffer
, RC16_LEN
, &sshdr
,
1974 SD_TIMEOUT
, SD_MAX_RETRIES
, NULL
);
1976 if (media_not_present(sdkp
, &sshdr
))
1980 sense_valid
= scsi_sense_valid(&sshdr
);
1982 sshdr
.sense_key
== ILLEGAL_REQUEST
&&
1983 (sshdr
.asc
== 0x20 || sshdr
.asc
== 0x24) &&
1985 /* Invalid Command Operation Code or
1986 * Invalid Field in CDB, just retry
1987 * silently with RC10 */
1990 sshdr
.sense_key
== UNIT_ATTENTION
&&
1991 sshdr
.asc
== 0x29 && sshdr
.ascq
== 0x00)
1992 /* Device reset might occur several times,
1993 * give it one more chance */
1994 if (--reset_retries
> 0)
1999 } while (the_result
&& retries
);
2002 sd_printk(KERN_NOTICE
, sdkp
, "READ CAPACITY(16) failed\n");
2003 read_capacity_error(sdkp
, sdp
, &sshdr
, sense_valid
, the_result
);
2007 sector_size
= get_unaligned_be32(&buffer
[8]);
2008 lba
= get_unaligned_be64(&buffer
[0]);
2010 if (sd_read_protection_type(sdkp
, buffer
) < 0) {
2015 if ((sizeof(sdkp
->capacity
) == 4) && (lba
>= 0xffffffffULL
)) {
2016 sd_printk(KERN_ERR
, sdkp
, "Too big for this kernel. Use a "
2017 "kernel compiled with support for large block "
2023 /* Logical blocks per physical block exponent */
2024 sdkp
->physical_block_size
= (1 << (buffer
[13] & 0xf)) * sector_size
;
2026 /* Lowest aligned logical block */
2027 alignment
= ((buffer
[14] & 0x3f) << 8 | buffer
[15]) * sector_size
;
2028 blk_queue_alignment_offset(sdp
->request_queue
, alignment
);
2029 if (alignment
&& sdkp
->first_scan
)
2030 sd_printk(KERN_NOTICE
, sdkp
,
2031 "physical block alignment offset: %u\n", alignment
);
2033 if (buffer
[14] & 0x80) { /* LBPME */
2036 if (buffer
[14] & 0x40) /* LBPRZ */
2039 sd_config_discard(sdkp
, SD_LBP_WS16
);
2042 sdkp
->capacity
= lba
+ 1;
2046 static int read_capacity_10(struct scsi_disk
*sdkp
, struct scsi_device
*sdp
,
2047 unsigned char *buffer
)
2049 unsigned char cmd
[16];
2050 struct scsi_sense_hdr sshdr
;
2051 int sense_valid
= 0;
2053 int retries
= 3, reset_retries
= READ_CAPACITY_RETRIES_ON_RESET
;
2055 unsigned sector_size
;
2058 cmd
[0] = READ_CAPACITY
;
2059 memset(&cmd
[1], 0, 9);
2060 memset(buffer
, 0, 8);
2062 the_result
= scsi_execute_req(sdp
, cmd
, DMA_FROM_DEVICE
,
2064 SD_TIMEOUT
, SD_MAX_RETRIES
, NULL
);
2066 if (media_not_present(sdkp
, &sshdr
))
2070 sense_valid
= scsi_sense_valid(&sshdr
);
2072 sshdr
.sense_key
== UNIT_ATTENTION
&&
2073 sshdr
.asc
== 0x29 && sshdr
.ascq
== 0x00)
2074 /* Device reset might occur several times,
2075 * give it one more chance */
2076 if (--reset_retries
> 0)
2081 } while (the_result
&& retries
);
2084 sd_printk(KERN_NOTICE
, sdkp
, "READ CAPACITY failed\n");
2085 read_capacity_error(sdkp
, sdp
, &sshdr
, sense_valid
, the_result
);
2089 sector_size
= get_unaligned_be32(&buffer
[4]);
2090 lba
= get_unaligned_be32(&buffer
[0]);
2092 if (sdp
->no_read_capacity_16
&& (lba
== 0xffffffff)) {
2093 /* Some buggy (usb cardreader) devices return an lba of
2094 0xffffffff when the want to report a size of 0 (with
2095 which they really mean no media is present) */
2097 sdkp
->physical_block_size
= sector_size
;
2101 if ((sizeof(sdkp
->capacity
) == 4) && (lba
== 0xffffffff)) {
2102 sd_printk(KERN_ERR
, sdkp
, "Too big for this kernel. Use a "
2103 "kernel compiled with support for large block "
2109 sdkp
->capacity
= lba
+ 1;
2110 sdkp
->physical_block_size
= sector_size
;
2114 static int sd_try_rc16_first(struct scsi_device
*sdp
)
2116 if (sdp
->host
->max_cmd_len
< 16)
2118 if (sdp
->try_rc_10_first
)
2120 if (sdp
->scsi_level
> SCSI_SPC_2
)
2122 if (scsi_device_protection(sdp
))
2128 * read disk capacity
2131 sd_read_capacity(struct scsi_disk
*sdkp
, unsigned char *buffer
)
2134 struct scsi_device
*sdp
= sdkp
->device
;
2135 sector_t old_capacity
= sdkp
->capacity
;
2137 if (sd_try_rc16_first(sdp
)) {
2138 sector_size
= read_capacity_16(sdkp
, sdp
, buffer
);
2139 if (sector_size
== -EOVERFLOW
)
2141 if (sector_size
== -ENODEV
)
2143 if (sector_size
< 0)
2144 sector_size
= read_capacity_10(sdkp
, sdp
, buffer
);
2145 if (sector_size
< 0)
2148 sector_size
= read_capacity_10(sdkp
, sdp
, buffer
);
2149 if (sector_size
== -EOVERFLOW
)
2151 if (sector_size
< 0)
2153 if ((sizeof(sdkp
->capacity
) > 4) &&
2154 (sdkp
->capacity
> 0xffffffffULL
)) {
2155 int old_sector_size
= sector_size
;
2156 sd_printk(KERN_NOTICE
, sdkp
, "Very big device. "
2157 "Trying to use READ CAPACITY(16).\n");
2158 sector_size
= read_capacity_16(sdkp
, sdp
, buffer
);
2159 if (sector_size
< 0) {
2160 sd_printk(KERN_NOTICE
, sdkp
,
2161 "Using 0xffffffff as device size\n");
2162 sdkp
->capacity
= 1 + (sector_t
) 0xffffffff;
2163 sector_size
= old_sector_size
;
2169 /* Some devices are known to return the total number of blocks,
2170 * not the highest block number. Some devices have versions
2171 * which do this and others which do not. Some devices we might
2172 * suspect of doing this but we don't know for certain.
2174 * If we know the reported capacity is wrong, decrement it. If
2175 * we can only guess, then assume the number of blocks is even
2176 * (usually true but not always) and err on the side of lowering
2179 if (sdp
->fix_capacity
||
2180 (sdp
->guess_capacity
&& (sdkp
->capacity
& 0x01))) {
2181 sd_printk(KERN_INFO
, sdkp
, "Adjusting the sector count "
2182 "from its reported value: %llu\n",
2183 (unsigned long long) sdkp
->capacity
);
2188 if (sector_size
== 0) {
2190 sd_printk(KERN_NOTICE
, sdkp
, "Sector size 0 reported, "
2194 if (sector_size
!= 512 &&
2195 sector_size
!= 1024 &&
2196 sector_size
!= 2048 &&
2197 sector_size
!= 4096 &&
2198 sector_size
!= 256) {
2199 sd_printk(KERN_NOTICE
, sdkp
, "Unsupported sector size %d.\n",
2202 * The user might want to re-format the drive with
2203 * a supported sectorsize. Once this happens, it
2204 * would be relatively trivial to set the thing up.
2205 * For this reason, we leave the thing in the table.
2209 * set a bogus sector size so the normal read/write
2210 * logic in the block layer will eventually refuse any
2211 * request on this device without tripping over power
2212 * of two sector size assumptions
2216 blk_queue_logical_block_size(sdp
->request_queue
, sector_size
);
2219 char cap_str_2
[10], cap_str_10
[10];
2220 u64 sz
= (u64
)sdkp
->capacity
<< ilog2(sector_size
);
2222 string_get_size(sz
, STRING_UNITS_2
, cap_str_2
,
2224 string_get_size(sz
, STRING_UNITS_10
, cap_str_10
,
2225 sizeof(cap_str_10
));
2227 if (sdkp
->first_scan
|| old_capacity
!= sdkp
->capacity
) {
2228 sd_printk(KERN_NOTICE
, sdkp
,
2229 "%llu %d-byte logical blocks: (%s/%s)\n",
2230 (unsigned long long)sdkp
->capacity
,
2231 sector_size
, cap_str_10
, cap_str_2
);
2233 if (sdkp
->physical_block_size
!= sector_size
)
2234 sd_printk(KERN_NOTICE
, sdkp
,
2235 "%u-byte physical blocks\n",
2236 sdkp
->physical_block_size
);
2240 sdp
->use_16_for_rw
= (sdkp
->capacity
> 0xffffffff);
2242 /* Rescale capacity to 512-byte units */
2243 if (sector_size
== 4096)
2244 sdkp
->capacity
<<= 3;
2245 else if (sector_size
== 2048)
2246 sdkp
->capacity
<<= 2;
2247 else if (sector_size
== 1024)
2248 sdkp
->capacity
<<= 1;
2249 else if (sector_size
== 256)
2250 sdkp
->capacity
>>= 1;
2252 blk_queue_physical_block_size(sdp
->request_queue
,
2253 sdkp
->physical_block_size
);
2254 sdkp
->device
->sector_size
= sector_size
;
2257 /* called with buffer of length 512 */
2259 sd_do_mode_sense(struct scsi_device
*sdp
, int dbd
, int modepage
,
2260 unsigned char *buffer
, int len
, struct scsi_mode_data
*data
,
2261 struct scsi_sense_hdr
*sshdr
)
2263 return scsi_mode_sense(sdp
, dbd
, modepage
, buffer
, len
,
2264 SD_TIMEOUT
, SD_MAX_RETRIES
, data
,
2269 * read write protect setting, if possible - called only in sd_revalidate_disk()
2270 * called with buffer of length SD_BUF_SIZE
2273 sd_read_write_protect_flag(struct scsi_disk
*sdkp
, unsigned char *buffer
)
2276 struct scsi_device
*sdp
= sdkp
->device
;
2277 struct scsi_mode_data data
;
2278 int old_wp
= sdkp
->write_prot
;
2280 set_disk_ro(sdkp
->disk
, 0);
2281 if (sdp
->skip_ms_page_3f
) {
2282 sd_first_printk(KERN_NOTICE
, sdkp
, "Assuming Write Enabled\n");
2286 if (sdp
->use_192_bytes_for_3f
) {
2287 res
= sd_do_mode_sense(sdp
, 0, 0x3F, buffer
, 192, &data
, NULL
);
2290 * First attempt: ask for all pages (0x3F), but only 4 bytes.
2291 * We have to start carefully: some devices hang if we ask
2292 * for more than is available.
2294 res
= sd_do_mode_sense(sdp
, 0, 0x3F, buffer
, 4, &data
, NULL
);
2297 * Second attempt: ask for page 0 When only page 0 is
2298 * implemented, a request for page 3F may return Sense Key
2299 * 5: Illegal Request, Sense Code 24: Invalid field in
2302 if (!scsi_status_is_good(res
))
2303 res
= sd_do_mode_sense(sdp
, 0, 0, buffer
, 4, &data
, NULL
);
2306 * Third attempt: ask 255 bytes, as we did earlier.
2308 if (!scsi_status_is_good(res
))
2309 res
= sd_do_mode_sense(sdp
, 0, 0x3F, buffer
, 255,
2313 if (!scsi_status_is_good(res
)) {
2314 sd_first_printk(KERN_WARNING
, sdkp
,
2315 "Test WP failed, assume Write Enabled\n");
2317 sdkp
->write_prot
= ((data
.device_specific
& 0x80) != 0);
2318 set_disk_ro(sdkp
->disk
, sdkp
->write_prot
);
2319 if (sdkp
->first_scan
|| old_wp
!= sdkp
->write_prot
) {
2320 sd_printk(KERN_NOTICE
, sdkp
, "Write Protect is %s\n",
2321 sdkp
->write_prot
? "on" : "off");
2322 sd_printk(KERN_DEBUG
, sdkp
,
2323 "Mode Sense: %02x %02x %02x %02x\n",
2324 buffer
[0], buffer
[1], buffer
[2], buffer
[3]);
2330 * sd_read_cache_type - called only from sd_revalidate_disk()
2331 * called with buffer of length SD_BUF_SIZE
2334 sd_read_cache_type(struct scsi_disk
*sdkp
, unsigned char *buffer
)
2337 struct scsi_device
*sdp
= sdkp
->device
;
2342 struct scsi_mode_data data
;
2343 struct scsi_sense_hdr sshdr
;
2344 int old_wce
= sdkp
->WCE
;
2345 int old_rcd
= sdkp
->RCD
;
2346 int old_dpofua
= sdkp
->DPOFUA
;
2349 if (sdkp
->cache_override
)
2353 if (sdp
->skip_ms_page_8
) {
2354 if (sdp
->type
== TYPE_RBC
)
2357 if (sdp
->skip_ms_page_3f
)
2360 if (sdp
->use_192_bytes_for_3f
)
2364 } else if (sdp
->type
== TYPE_RBC
) {
2372 /* cautiously ask */
2373 res
= sd_do_mode_sense(sdp
, dbd
, modepage
, buffer
, first_len
,
2376 if (!scsi_status_is_good(res
))
2379 if (!data
.header_length
) {
2382 sd_first_printk(KERN_ERR
, sdkp
,
2383 "Missing header in MODE_SENSE response\n");
2386 /* that went OK, now ask for the proper length */
2390 * We're only interested in the first three bytes, actually.
2391 * But the data cache page is defined for the first 20.
2395 else if (len
> SD_BUF_SIZE
) {
2396 sd_first_printk(KERN_NOTICE
, sdkp
, "Truncating mode parameter "
2397 "data from %d to %d bytes\n", len
, SD_BUF_SIZE
);
2400 if (modepage
== 0x3F && sdp
->use_192_bytes_for_3f
)
2404 if (len
> first_len
)
2405 res
= sd_do_mode_sense(sdp
, dbd
, modepage
, buffer
, len
,
2408 if (scsi_status_is_good(res
)) {
2409 int offset
= data
.header_length
+ data
.block_descriptor_length
;
2411 while (offset
< len
) {
2412 u8 page_code
= buffer
[offset
] & 0x3F;
2413 u8 spf
= buffer
[offset
] & 0x40;
2415 if (page_code
== 8 || page_code
== 6) {
2416 /* We're interested only in the first 3 bytes.
2418 if (len
- offset
<= 2) {
2419 sd_first_printk(KERN_ERR
, sdkp
,
2420 "Incomplete mode parameter "
2424 modepage
= page_code
;
2428 /* Go to the next page */
2429 if (spf
&& len
- offset
> 3)
2430 offset
+= 4 + (buffer
[offset
+2] << 8) +
2432 else if (!spf
&& len
- offset
> 1)
2433 offset
+= 2 + buffer
[offset
+1];
2435 sd_first_printk(KERN_ERR
, sdkp
,
2437 "parameter data\n");
2443 sd_first_printk(KERN_ERR
, sdkp
, "No Caching mode page found\n");
2447 if (modepage
== 8) {
2448 sdkp
->WCE
= ((buffer
[offset
+ 2] & 0x04) != 0);
2449 sdkp
->RCD
= ((buffer
[offset
+ 2] & 0x01) != 0);
2451 sdkp
->WCE
= ((buffer
[offset
+ 2] & 0x01) == 0);
2455 sdkp
->DPOFUA
= (data
.device_specific
& 0x10) != 0;
2456 if (sdkp
->DPOFUA
&& !sdkp
->device
->use_10_for_rw
) {
2457 sd_first_printk(KERN_NOTICE
, sdkp
,
2458 "Uses READ/WRITE(6), disabling FUA\n");
2462 if (sdkp
->first_scan
|| old_wce
!= sdkp
->WCE
||
2463 old_rcd
!= sdkp
->RCD
|| old_dpofua
!= sdkp
->DPOFUA
)
2464 sd_printk(KERN_NOTICE
, sdkp
,
2465 "Write cache: %s, read cache: %s, %s\n",
2466 sdkp
->WCE
? "enabled" : "disabled",
2467 sdkp
->RCD
? "disabled" : "enabled",
2468 sdkp
->DPOFUA
? "supports DPO and FUA"
2469 : "doesn't support DPO or FUA");
2475 if (scsi_sense_valid(&sshdr
) &&
2476 sshdr
.sense_key
== ILLEGAL_REQUEST
&&
2477 sshdr
.asc
== 0x24 && sshdr
.ascq
== 0x0)
2478 /* Invalid field in CDB */
2479 sd_first_printk(KERN_NOTICE
, sdkp
, "Cache data unavailable\n");
2481 sd_first_printk(KERN_ERR
, sdkp
,
2482 "Asking for cache data failed\n");
2485 if (sdp
->wce_default_on
) {
2486 sd_first_printk(KERN_NOTICE
, sdkp
,
2487 "Assuming drive cache: write back\n");
2490 sd_first_printk(KERN_ERR
, sdkp
,
2491 "Assuming drive cache: write through\n");
2499 * The ATO bit indicates whether the DIF application tag is available
2500 * for use by the operating system.
2502 static void sd_read_app_tag_own(struct scsi_disk
*sdkp
, unsigned char *buffer
)
2505 struct scsi_device
*sdp
= sdkp
->device
;
2506 struct scsi_mode_data data
;
2507 struct scsi_sense_hdr sshdr
;
2509 if (sdp
->type
!= TYPE_DISK
)
2512 if (sdkp
->protection_type
== 0)
2515 res
= scsi_mode_sense(sdp
, 1, 0x0a, buffer
, 36, SD_TIMEOUT
,
2516 SD_MAX_RETRIES
, &data
, &sshdr
);
2518 if (!scsi_status_is_good(res
) || !data
.header_length
||
2520 sd_first_printk(KERN_WARNING
, sdkp
,
2521 "getting Control mode page failed, assume no ATO\n");
2523 if (scsi_sense_valid(&sshdr
))
2524 sd_print_sense_hdr(sdkp
, &sshdr
);
2529 offset
= data
.header_length
+ data
.block_descriptor_length
;
2531 if ((buffer
[offset
] & 0x3f) != 0x0a) {
2532 sd_first_printk(KERN_ERR
, sdkp
, "ATO Got wrong page\n");
2536 if ((buffer
[offset
+ 5] & 0x80) == 0)
2545 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2546 * @disk: disk to query
2548 static void sd_read_block_limits(struct scsi_disk
*sdkp
)
2550 unsigned int sector_sz
= sdkp
->device
->sector_size
;
2551 const int vpd_len
= 64;
2552 unsigned char *buffer
= kmalloc(vpd_len
, GFP_KERNEL
);
2555 /* Block Limits VPD */
2556 scsi_get_vpd_page(sdkp
->device
, 0xb0, buffer
, vpd_len
))
2559 blk_queue_io_min(sdkp
->disk
->queue
,
2560 get_unaligned_be16(&buffer
[6]) * sector_sz
);
2561 blk_queue_io_opt(sdkp
->disk
->queue
,
2562 get_unaligned_be32(&buffer
[12]) * sector_sz
);
2564 if (buffer
[3] == 0x3c) {
2565 unsigned int lba_count
, desc_count
;
2567 sdkp
->max_ws_blocks
= (u32
)get_unaligned_be64(&buffer
[36]);
2572 lba_count
= get_unaligned_be32(&buffer
[20]);
2573 desc_count
= get_unaligned_be32(&buffer
[24]);
2575 if (lba_count
&& desc_count
)
2576 sdkp
->max_unmap_blocks
= lba_count
;
2578 sdkp
->unmap_granularity
= get_unaligned_be32(&buffer
[28]);
2580 if (buffer
[32] & 0x80)
2581 sdkp
->unmap_alignment
=
2582 get_unaligned_be32(&buffer
[32]) & ~(1 << 31);
2584 if (!sdkp
->lbpvpd
) { /* LBP VPD page not provided */
2586 if (sdkp
->max_unmap_blocks
)
2587 sd_config_discard(sdkp
, SD_LBP_UNMAP
);
2589 sd_config_discard(sdkp
, SD_LBP_WS16
);
2591 } else { /* LBP VPD page tells us what to use */
2593 if (sdkp
->lbpu
&& sdkp
->max_unmap_blocks
)
2594 sd_config_discard(sdkp
, SD_LBP_UNMAP
);
2595 else if (sdkp
->lbpws
)
2596 sd_config_discard(sdkp
, SD_LBP_WS16
);
2597 else if (sdkp
->lbpws10
)
2598 sd_config_discard(sdkp
, SD_LBP_WS10
);
2600 sd_config_discard(sdkp
, SD_LBP_DISABLE
);
2609 * sd_read_block_characteristics - Query block dev. characteristics
2610 * @disk: disk to query
2612 static void sd_read_block_characteristics(struct scsi_disk
*sdkp
)
2614 unsigned char *buffer
;
2616 const int vpd_len
= 64;
2618 buffer
= kmalloc(vpd_len
, GFP_KERNEL
);
2621 /* Block Device Characteristics VPD */
2622 scsi_get_vpd_page(sdkp
->device
, 0xb1, buffer
, vpd_len
))
2625 rot
= get_unaligned_be16(&buffer
[4]);
2628 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, sdkp
->disk
->queue
);
2635 * sd_read_block_provisioning - Query provisioning VPD page
2636 * @disk: disk to query
2638 static void sd_read_block_provisioning(struct scsi_disk
*sdkp
)
2640 unsigned char *buffer
;
2641 const int vpd_len
= 8;
2643 if (sdkp
->lbpme
== 0)
2646 buffer
= kmalloc(vpd_len
, GFP_KERNEL
);
2648 if (!buffer
|| scsi_get_vpd_page(sdkp
->device
, 0xb2, buffer
, vpd_len
))
2652 sdkp
->lbpu
= (buffer
[5] >> 7) & 1; /* UNMAP */
2653 sdkp
->lbpws
= (buffer
[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
2654 sdkp
->lbpws10
= (buffer
[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
2660 static void sd_read_write_same(struct scsi_disk
*sdkp
, unsigned char *buffer
)
2662 struct scsi_device
*sdev
= sdkp
->device
;
2664 if (sdev
->host
->no_write_same
) {
2665 sdev
->no_write_same
= 1;
2670 if (scsi_report_opcode(sdev
, buffer
, SD_BUF_SIZE
, INQUIRY
) < 0) {
2671 /* too large values might cause issues with arcmsr */
2672 int vpd_buf_len
= 64;
2674 sdev
->no_report_opcodes
= 1;
2676 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
2677 * CODES is unsupported and the device has an ATA
2678 * Information VPD page (SAT).
2680 if (!scsi_get_vpd_page(sdev
, 0x89, buffer
, vpd_buf_len
))
2681 sdev
->no_write_same
= 1;
2684 if (scsi_report_opcode(sdev
, buffer
, SD_BUF_SIZE
, WRITE_SAME_16
) == 1)
2687 if (scsi_report_opcode(sdev
, buffer
, SD_BUF_SIZE
, WRITE_SAME
) == 1)
2691 static int sd_try_extended_inquiry(struct scsi_device
*sdp
)
2694 * Although VPD inquiries can go to SCSI-2 type devices,
2695 * some USB ones crash on receiving them, and the pages
2696 * we currently ask for are for SPC-3 and beyond
2698 if (sdp
->scsi_level
> SCSI_SPC_2
&& !sdp
->skip_vpd_pages
)
2704 * sd_revalidate_disk - called the first time a new disk is seen,
2705 * performs disk spin up, read_capacity, etc.
2706 * @disk: struct gendisk we care about
2708 static int sd_revalidate_disk(struct gendisk
*disk
)
2710 struct scsi_disk
*sdkp
= scsi_disk(disk
);
2711 struct scsi_device
*sdp
= sdkp
->device
;
2712 unsigned char *buffer
;
2715 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO
, sdkp
,
2716 "sd_revalidate_disk\n"));
2719 * If the device is offline, don't try and read capacity or any
2720 * of the other niceties.
2722 if (!scsi_device_online(sdp
))
2725 buffer
= kmalloc(SD_BUF_SIZE
, GFP_KERNEL
);
2727 sd_printk(KERN_WARNING
, sdkp
, "sd_revalidate_disk: Memory "
2728 "allocation failure.\n");
2732 sd_spinup_disk(sdkp
);
2735 * Without media there is no reason to ask; moreover, some devices
2736 * react badly if we do.
2738 if (sdkp
->media_present
) {
2739 sd_read_capacity(sdkp
, buffer
);
2741 if (sd_try_extended_inquiry(sdp
)) {
2742 sd_read_block_provisioning(sdkp
);
2743 sd_read_block_limits(sdkp
);
2744 sd_read_block_characteristics(sdkp
);
2747 sd_read_write_protect_flag(sdkp
, buffer
);
2748 sd_read_cache_type(sdkp
, buffer
);
2749 sd_read_app_tag_own(sdkp
, buffer
);
2750 sd_read_write_same(sdkp
, buffer
);
2753 sdkp
->first_scan
= 0;
2756 * We now have all cache related info, determine how we deal
2757 * with flush requests.
2765 blk_queue_flush(sdkp
->disk
->queue
, flush
);
2767 set_capacity(disk
, sdkp
->capacity
);
2768 sd_config_write_same(sdkp
);
2776 * sd_unlock_native_capacity - unlock native capacity
2777 * @disk: struct gendisk to set capacity for
2779 * Block layer calls this function if it detects that partitions
2780 * on @disk reach beyond the end of the device. If the SCSI host
2781 * implements ->unlock_native_capacity() method, it's invoked to
2782 * give it a chance to adjust the device capacity.
2785 * Defined by block layer. Might sleep.
2787 static void sd_unlock_native_capacity(struct gendisk
*disk
)
2789 struct scsi_device
*sdev
= scsi_disk(disk
)->device
;
2791 if (sdev
->host
->hostt
->unlock_native_capacity
)
2792 sdev
->host
->hostt
->unlock_native_capacity(sdev
);
2796 * sd_format_disk_name - format disk name
2797 * @prefix: name prefix - ie. "sd" for SCSI disks
2798 * @index: index of the disk to format name for
2799 * @buf: output buffer
2800 * @buflen: length of the output buffer
2802 * SCSI disk names starts at sda. The 26th device is sdz and the
2803 * 27th is sdaa. The last one for two lettered suffix is sdzz
2804 * which is followed by sdaaa.
2806 * This is basically 26 base counting with one extra 'nil' entry
2807 * at the beginning from the second digit on and can be
2808 * determined using similar method as 26 base conversion with the
2809 * index shifted -1 after each digit is computed.
2815 * 0 on success, -errno on failure.
2817 static int sd_format_disk_name(char *prefix
, int index
, char *buf
, int buflen
)
2819 const int base
= 'z' - 'a' + 1;
2820 char *begin
= buf
+ strlen(prefix
);
2821 char *end
= buf
+ buflen
;
2831 *--p
= 'a' + (index
% unit
);
2832 index
= (index
/ unit
) - 1;
2833 } while (index
>= 0);
2835 memmove(begin
, p
, end
- p
);
2836 memcpy(buf
, prefix
, strlen(prefix
));
2842 * The asynchronous part of sd_probe
2844 static void sd_probe_async(void *data
, async_cookie_t cookie
)
2846 struct scsi_disk
*sdkp
= data
;
2847 struct scsi_device
*sdp
;
2854 index
= sdkp
->index
;
2855 dev
= &sdp
->sdev_gendev
;
2857 gd
->major
= sd_major((index
& 0xf0) >> 4);
2858 gd
->first_minor
= ((index
& 0xf) << 4) | (index
& 0xfff00);
2859 gd
->minors
= SD_MINORS
;
2861 gd
->fops
= &sd_fops
;
2862 gd
->private_data
= &sdkp
->driver
;
2863 gd
->queue
= sdkp
->device
->request_queue
;
2865 /* defaults, until the device tells us otherwise */
2866 sdp
->sector_size
= 512;
2868 sdkp
->media_present
= 1;
2869 sdkp
->write_prot
= 0;
2870 sdkp
->cache_override
= 0;
2874 sdkp
->first_scan
= 1;
2875 sdkp
->max_medium_access_timeouts
= SD_MAX_MEDIUM_TIMEOUTS
;
2877 sd_revalidate_disk(gd
);
2879 blk_queue_prep_rq(sdp
->request_queue
, sd_prep_fn
);
2880 blk_queue_unprep_rq(sdp
->request_queue
, sd_unprep_fn
);
2882 gd
->driverfs_dev
= &sdp
->sdev_gendev
;
2883 gd
->flags
= GENHD_FL_EXT_DEVT
;
2884 if (sdp
->removable
) {
2885 gd
->flags
|= GENHD_FL_REMOVABLE
;
2886 gd
->events
|= DISK_EVENT_MEDIA_CHANGE
;
2889 blk_pm_runtime_init(sdp
->request_queue
, dev
);
2892 sd_dif_config_host(sdkp
);
2894 sd_revalidate_disk(gd
);
2896 sd_printk(KERN_NOTICE
, sdkp
, "Attached SCSI %sdisk\n",
2897 sdp
->removable
? "removable " : "");
2898 scsi_autopm_put_device(sdp
);
2899 put_device(&sdkp
->dev
);
2903 * sd_probe - called during driver initialization and whenever a
2904 * new scsi device is attached to the system. It is called once
2905 * for each scsi device (not just disks) present.
2906 * @dev: pointer to device object
2908 * Returns 0 if successful (or not interested in this scsi device
2909 * (e.g. scanner)); 1 when there is an error.
2911 * Note: this function is invoked from the scsi mid-level.
2912 * This function sets up the mapping between a given
2913 * <host,channel,id,lun> (found in sdp) and new device name
2914 * (e.g. /dev/sda). More precisely it is the block device major
2915 * and minor number that is chosen here.
2917 * Assume sd_probe is not re-entrant (for time being)
2918 * Also think about sd_probe() and sd_remove() running coincidentally.
2920 static int sd_probe(struct device
*dev
)
2922 struct scsi_device
*sdp
= to_scsi_device(dev
);
2923 struct scsi_disk
*sdkp
;
2929 if (sdp
->type
!= TYPE_DISK
&& sdp
->type
!= TYPE_MOD
&& sdp
->type
!= TYPE_RBC
)
2932 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO
, sdp
,
2936 sdkp
= kzalloc(sizeof(*sdkp
), GFP_KERNEL
);
2940 gd
= alloc_disk(SD_MINORS
);
2945 if (!ida_pre_get(&sd_index_ida
, GFP_KERNEL
))
2948 spin_lock(&sd_index_lock
);
2949 error
= ida_get_new(&sd_index_ida
, &index
);
2950 spin_unlock(&sd_index_lock
);
2951 } while (error
== -EAGAIN
);
2954 sdev_printk(KERN_WARNING
, sdp
, "sd_probe: memory exhausted.\n");
2958 error
= sd_format_disk_name("sd", index
, gd
->disk_name
, DISK_NAME_LEN
);
2960 sdev_printk(KERN_WARNING
, sdp
, "SCSI disk (sd) name length exceeded.\n");
2961 goto out_free_index
;
2965 sdkp
->driver
= &sd_template
;
2967 sdkp
->index
= index
;
2968 atomic_set(&sdkp
->openers
, 0);
2969 atomic_set(&sdkp
->device
->ioerr_cnt
, 0);
2971 if (!sdp
->request_queue
->rq_timeout
) {
2972 if (sdp
->type
!= TYPE_MOD
)
2973 blk_queue_rq_timeout(sdp
->request_queue
, SD_TIMEOUT
);
2975 blk_queue_rq_timeout(sdp
->request_queue
,
2979 device_initialize(&sdkp
->dev
);
2980 sdkp
->dev
.parent
= dev
;
2981 sdkp
->dev
.class = &sd_disk_class
;
2982 dev_set_name(&sdkp
->dev
, "%s", dev_name(dev
));
2984 if (device_add(&sdkp
->dev
))
2985 goto out_free_index
;
2988 dev_set_drvdata(dev
, sdkp
);
2990 get_device(&sdkp
->dev
); /* prevent release before async_schedule */
2991 async_schedule_domain(sd_probe_async
, sdkp
, &scsi_sd_probe_domain
);
2996 spin_lock(&sd_index_lock
);
2997 ida_remove(&sd_index_ida
, index
);
2998 spin_unlock(&sd_index_lock
);
3008 * sd_remove - called whenever a scsi disk (previously recognized by
3009 * sd_probe) is detached from the system. It is called (potentially
3010 * multiple times) during sd module unload.
3011 * @sdp: pointer to mid level scsi device object
3013 * Note: this function is invoked from the scsi mid-level.
3014 * This function potentially frees up a device name (e.g. /dev/sdc)
3015 * that could be re-used by a subsequent sd_probe().
3016 * This function is not called when the built-in sd driver is "exit-ed".
3018 static int sd_remove(struct device
*dev
)
3020 struct scsi_disk
*sdkp
;
3023 sdkp
= dev_get_drvdata(dev
);
3024 devt
= disk_devt(sdkp
->disk
);
3025 scsi_autopm_get_device(sdkp
->device
);
3027 async_synchronize_full_domain(&scsi_sd_pm_domain
);
3028 async_synchronize_full_domain(&scsi_sd_probe_domain
);
3029 blk_queue_prep_rq(sdkp
->device
->request_queue
, scsi_prep_fn
);
3030 blk_queue_unprep_rq(sdkp
->device
->request_queue
, NULL
);
3031 device_del(&sdkp
->dev
);
3032 del_gendisk(sdkp
->disk
);
3035 blk_register_region(devt
, SD_MINORS
, NULL
,
3036 sd_default_probe
, NULL
, NULL
);
3038 mutex_lock(&sd_ref_mutex
);
3039 dev_set_drvdata(dev
, NULL
);
3040 put_device(&sdkp
->dev
);
3041 mutex_unlock(&sd_ref_mutex
);
3047 * scsi_disk_release - Called to free the scsi_disk structure
3048 * @dev: pointer to embedded class device
3050 * sd_ref_mutex must be held entering this routine. Because it is
3051 * called on last put, you should always use the scsi_disk_get()
3052 * scsi_disk_put() helpers which manipulate the semaphore directly
3053 * and never do a direct put_device.
3055 static void scsi_disk_release(struct device
*dev
)
3057 struct scsi_disk
*sdkp
= to_scsi_disk(dev
);
3058 struct gendisk
*disk
= sdkp
->disk
;
3060 spin_lock(&sd_index_lock
);
3061 ida_remove(&sd_index_ida
, sdkp
->index
);
3062 spin_unlock(&sd_index_lock
);
3064 disk
->private_data
= NULL
;
3066 put_device(&sdkp
->device
->sdev_gendev
);
3071 static int sd_start_stop_device(struct scsi_disk
*sdkp
, int start
)
3073 unsigned char cmd
[6] = { START_STOP
}; /* START_VALID */
3074 struct scsi_sense_hdr sshdr
;
3075 struct scsi_device
*sdp
= sdkp
->device
;
3079 cmd
[4] |= 1; /* START */
3081 if (sdp
->start_stop_pwr_cond
)
3082 cmd
[4] |= start
? 1 << 4 : 3 << 4; /* Active or Standby */
3084 if (!scsi_device_online(sdp
))
3087 res
= scsi_execute_req_flags(sdp
, cmd
, DMA_NONE
, NULL
, 0, &sshdr
,
3088 SD_TIMEOUT
, SD_MAX_RETRIES
, NULL
, REQ_PM
);
3090 sd_printk(KERN_WARNING
, sdkp
, "START_STOP FAILED\n");
3091 sd_print_result(sdkp
, res
);
3092 if (driver_byte(res
) & DRIVER_SENSE
)
3093 sd_print_sense_hdr(sdkp
, &sshdr
);
3094 if (scsi_sense_valid(&sshdr
) &&
3095 /* 0x3a is medium not present */
3100 /* SCSI error codes must not go to the generic layer */
3108 * Send a SYNCHRONIZE CACHE instruction down to the device through
3109 * the normal SCSI command structure. Wait for the command to
3112 static void sd_shutdown(struct device
*dev
)
3114 struct scsi_disk
*sdkp
= scsi_disk_get_from_dev(dev
);
3117 return; /* this can happen */
3119 if (pm_runtime_suspended(dev
))
3122 if (sdkp
->WCE
&& sdkp
->media_present
) {
3123 sd_printk(KERN_NOTICE
, sdkp
, "Synchronizing SCSI cache\n");
3124 sd_sync_cache(sdkp
);
3127 if (system_state
!= SYSTEM_RESTART
&& sdkp
->device
->manage_start_stop
) {
3128 sd_printk(KERN_NOTICE
, sdkp
, "Stopping disk\n");
3129 sd_start_stop_device(sdkp
, 0);
3133 scsi_disk_put(sdkp
);
3136 static int sd_suspend_common(struct device
*dev
, bool ignore_stop_errors
)
3138 struct scsi_disk
*sdkp
= scsi_disk_get_from_dev(dev
);
3142 return 0; /* this can happen */
3144 if (sdkp
->WCE
&& sdkp
->media_present
) {
3145 sd_printk(KERN_NOTICE
, sdkp
, "Synchronizing SCSI cache\n");
3146 ret
= sd_sync_cache(sdkp
);
3148 /* ignore OFFLINE device */
3155 if (sdkp
->device
->manage_start_stop
) {
3156 sd_printk(KERN_NOTICE
, sdkp
, "Stopping disk\n");
3157 /* an error is not worth aborting a system sleep */
3158 ret
= sd_start_stop_device(sdkp
, 0);
3159 if (ignore_stop_errors
)
3164 scsi_disk_put(sdkp
);
3168 static int sd_suspend_system(struct device
*dev
)
3170 return sd_suspend_common(dev
, true);
3173 static int sd_suspend_runtime(struct device
*dev
)
3175 return sd_suspend_common(dev
, false);
3178 static int sd_resume(struct device
*dev
)
3180 struct scsi_disk
*sdkp
= scsi_disk_get_from_dev(dev
);
3183 if (!sdkp
->device
->manage_start_stop
)
3186 sd_printk(KERN_NOTICE
, sdkp
, "Starting disk\n");
3187 ret
= sd_start_stop_device(sdkp
, 1);
3190 scsi_disk_put(sdkp
);
3195 * init_sd - entry point for this driver (both when built in or when
3198 * Note: this function registers this driver with the scsi mid-level.
3200 static int __init
init_sd(void)
3202 int majors
= 0, i
, err
;
3204 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
3206 for (i
= 0; i
< SD_MAJORS
; i
++) {
3207 if (register_blkdev(sd_major(i
), "sd") != 0)
3210 blk_register_region(sd_major(i
), SD_MINORS
, NULL
,
3211 sd_default_probe
, NULL
, NULL
);
3217 err
= class_register(&sd_disk_class
);
3221 sd_cdb_cache
= kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE
,
3223 if (!sd_cdb_cache
) {
3224 printk(KERN_ERR
"sd: can't init extended cdb cache\n");
3228 sd_cdb_pool
= mempool_create_slab_pool(SD_MEMPOOL_SIZE
, sd_cdb_cache
);
3230 printk(KERN_ERR
"sd: can't init extended cdb pool\n");
3234 err
= scsi_register_driver(&sd_template
.gendrv
);
3236 goto err_out_driver
;
3241 mempool_destroy(sd_cdb_pool
);
3244 kmem_cache_destroy(sd_cdb_cache
);
3247 class_unregister(&sd_disk_class
);
3249 for (i
= 0; i
< SD_MAJORS
; i
++)
3250 unregister_blkdev(sd_major(i
), "sd");
3255 * exit_sd - exit point for this driver (when it is a module).
3257 * Note: this function unregisters this driver from the scsi mid-level.
3259 static void __exit
exit_sd(void)
3263 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
3265 scsi_unregister_driver(&sd_template
.gendrv
);
3266 mempool_destroy(sd_cdb_pool
);
3267 kmem_cache_destroy(sd_cdb_cache
);
3269 class_unregister(&sd_disk_class
);
3271 for (i
= 0; i
< SD_MAJORS
; i
++) {
3272 blk_unregister_region(sd_major(i
), SD_MINORS
);
3273 unregister_blkdev(sd_major(i
), "sd");
3277 module_init(init_sd
);
3278 module_exit(exit_sd
);
3280 static void sd_print_sense_hdr(struct scsi_disk
*sdkp
,
3281 struct scsi_sense_hdr
*sshdr
)
3283 sd_printk(KERN_INFO
, sdkp
, " ");
3284 scsi_show_sense_hdr(sshdr
);
3285 sd_printk(KERN_INFO
, sdkp
, " ");
3286 scsi_show_extd_sense(sshdr
->asc
, sshdr
->ascq
);
3289 static void sd_print_result(struct scsi_disk
*sdkp
, int result
)
3291 sd_printk(KERN_INFO
, sdkp
, " ");
3292 scsi_show_result(result
);