]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/sd.c
scsi_debug: fix prevent_allow+verify regressions
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / sd.c
CommitLineData
1da177e4
LT
1/*
2 * sd.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 *
5 * Linux scsi disk driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
8 * Modification history:
9 * - Drew Eckhardt <drew@colorado.edu> original
10 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
11 * outstanding request, and other enhancements.
12 * Support loadable low-level scsi drivers.
13 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
14 * eight major numbers.
15 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
16 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
17 * sd_init and cleanups.
18 * - Alex Davis <letmein@erols.com> Fix problem where partition info
19 * not being read in sd_open. Fix problem where removable media
20 * could be ejected after sd_open.
21 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
22 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
23 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
24 * Support 32k/1M disks.
25 *
26 * Logging policy (needs CONFIG_SCSI_LOGGING defined):
27 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
28 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
29 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
30 * - entering other commands: SCSI_LOG_HLQUEUE level 3
31 * Note: when the logging level is set by the user, it must be greater
32 * than the level indicated above to trigger output.
33 */
34
1da177e4
LT
35#include <linux/module.h>
36#include <linux/fs.h>
37#include <linux/kernel.h>
1da177e4
LT
38#include <linux/mm.h>
39#include <linux/bio.h>
40#include <linux/genhd.h>
41#include <linux/hdreg.h>
42#include <linux/errno.h>
43#include <linux/idr.h>
44#include <linux/interrupt.h>
45#include <linux/init.h>
46#include <linux/blkdev.h>
47#include <linux/blkpg.h>
1da177e4 48#include <linux/delay.h>
0b950672 49#include <linux/mutex.h>
7404ad3b 50#include <linux/string_helpers.h>
4ace92fc 51#include <linux/async.h>
5a0e3ad6 52#include <linux/slab.h>
54f57588 53#include <linux/pm_runtime.h>
1da177e4 54#include <asm/uaccess.h>
8f76d151 55#include <asm/unaligned.h>
1da177e4
LT
56
57#include <scsi/scsi.h>
58#include <scsi/scsi_cmnd.h>
59#include <scsi/scsi_dbg.h>
60#include <scsi/scsi_device.h>
61#include <scsi/scsi_driver.h>
62#include <scsi/scsi_eh.h>
63#include <scsi/scsi_host.h>
64#include <scsi/scsi_ioctl.h>
1da177e4
LT
65#include <scsi/scsicam.h>
66
aa91696e 67#include "sd.h"
a7a20d10 68#include "scsi_priv.h"
1da177e4
LT
69#include "scsi_logging.h"
70
f018fa55
RH
71MODULE_AUTHOR("Eric Youngdale");
72MODULE_DESCRIPTION("SCSI disk (sd) driver");
73MODULE_LICENSE("GPL");
74
75MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
76MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
77MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
78MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
79MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
80MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
81MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
82MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
83MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
84MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
85MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
86MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
87MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
88MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
89MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
90MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
d7b8bcb0
MT
91MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
92MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
93MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
f018fa55 94
870d6656 95#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
f615b48c 96#define SD_MINORS 16
870d6656 97#else
3e1a7ff8 98#define SD_MINORS 0
870d6656
TH
99#endif
100
c98a0eb0 101static void sd_config_discard(struct scsi_disk *, unsigned int);
5db44863 102static void sd_config_write_same(struct scsi_disk *);
7b3d9545 103static int sd_revalidate_disk(struct gendisk *);
72ec24bd 104static void sd_unlock_native_capacity(struct gendisk *disk);
7b3d9545
LT
105static int sd_probe(struct device *);
106static int sd_remove(struct device *);
107static void sd_shutdown(struct device *);
95897910
ON
108static int sd_suspend_system(struct device *);
109static int sd_suspend_runtime(struct device *);
7b3d9545
LT
110static int sd_resume(struct device *);
111static void sd_rescan(struct device *);
a1b73fc1
CH
112static int sd_init_command(struct scsi_cmnd *SCpnt);
113static void sd_uninit_command(struct scsi_cmnd *SCpnt);
7b3d9545 114static int sd_done(struct scsi_cmnd *);
2451079b 115static int sd_eh_action(struct scsi_cmnd *, int);
7b3d9545 116static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
ee959b00 117static void scsi_disk_release(struct device *cdev);
7b3d9545 118static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
ef61329d 119static void sd_print_result(const struct scsi_disk *, const char *, int);
7b3d9545 120
4034cc68 121static DEFINE_SPINLOCK(sd_index_lock);
f27bac27 122static DEFINE_IDA(sd_index_ida);
1da177e4
LT
123
124/* This semaphore is used to mediate the 0->1 reference get in the
125 * face of object destruction (i.e. we can't allow a get on an
126 * object after last put) */
0b950672 127static DEFINE_MUTEX(sd_ref_mutex);
1da177e4 128
439d77f7
HS
129static struct kmem_cache *sd_cdb_cache;
130static mempool_t *sd_cdb_pool;
4e7392ec 131
6bdaa1f1
JB
132static const char *sd_cache_types[] = {
133 "write through", "none", "write back",
134 "write back, no read (daft)"
135};
136
cb2fb68d
VC
137static void sd_set_flush_flag(struct scsi_disk *sdkp)
138{
139 unsigned flush = 0;
140
141 if (sdkp->WCE) {
142 flush |= REQ_FLUSH;
143 if (sdkp->DPOFUA)
144 flush |= REQ_FUA;
145 }
146
147 blk_queue_flush(sdkp->disk->queue, flush);
148}
149
ee959b00 150static ssize_t
e1ea2351
GKH
151cache_type_store(struct device *dev, struct device_attribute *attr,
152 const char *buf, size_t count)
6bdaa1f1
JB
153{
154 int i, ct = -1, rcd, wce, sp;
ee959b00 155 struct scsi_disk *sdkp = to_scsi_disk(dev);
6bdaa1f1
JB
156 struct scsi_device *sdp = sdkp->device;
157 char buffer[64];
158 char *buffer_data;
159 struct scsi_mode_data data;
160 struct scsi_sense_hdr sshdr;
2ee3e26c 161 static const char temp[] = "temporary ";
6bdaa1f1
JB
162 int len;
163
164 if (sdp->type != TYPE_DISK)
165 /* no cache control on RBC devices; theoretically they
166 * can do it, but there's probably so many exceptions
167 * it's not worth the risk */
168 return -EINVAL;
169
39c60a09
JB
170 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
171 buf += sizeof(temp) - 1;
172 sdkp->cache_override = 1;
173 } else {
174 sdkp->cache_override = 0;
175 }
176
6391a113 177 for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
439d77f7 178 len = strlen(sd_cache_types[i]);
6bdaa1f1
JB
179 if (strncmp(sd_cache_types[i], buf, len) == 0 &&
180 buf[len] == '\n') {
181 ct = i;
182 break;
183 }
184 }
185 if (ct < 0)
186 return -EINVAL;
187 rcd = ct & 0x01 ? 1 : 0;
2eefd57b 188 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
39c60a09
JB
189
190 if (sdkp->cache_override) {
191 sdkp->WCE = wce;
192 sdkp->RCD = rcd;
cb2fb68d 193 sd_set_flush_flag(sdkp);
39c60a09
JB
194 return count;
195 }
196
6bdaa1f1
JB
197 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
198 SD_MAX_RETRIES, &data, NULL))
199 return -EINVAL;
a9312fb8 200 len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
6bdaa1f1
JB
201 data.block_descriptor_length);
202 buffer_data = buffer + data.header_length +
203 data.block_descriptor_length;
204 buffer_data[2] &= ~0x05;
205 buffer_data[2] |= wce << 2 | rcd;
206 sp = buffer_data[0] & 0x80 ? 1 : 0;
2c5d16d6 207 buffer_data[0] &= ~0x80;
6bdaa1f1
JB
208
209 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
210 SD_MAX_RETRIES, &data, &sshdr)) {
211 if (scsi_sense_valid(&sshdr))
e73aec82 212 sd_print_sense_hdr(sdkp, &sshdr);
6bdaa1f1
JB
213 return -EINVAL;
214 }
f98a8cae 215 revalidate_disk(sdkp->disk);
6bdaa1f1
JB
216 return count;
217}
218
ee959b00 219static ssize_t
e1ea2351
GKH
220manage_start_stop_show(struct device *dev, struct device_attribute *attr,
221 char *buf)
222{
223 struct scsi_disk *sdkp = to_scsi_disk(dev);
224 struct scsi_device *sdp = sdkp->device;
225
226 return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
227}
228
229static ssize_t
230manage_start_stop_store(struct device *dev, struct device_attribute *attr,
231 const char *buf, size_t count)
c3c94c5a 232{
ee959b00 233 struct scsi_disk *sdkp = to_scsi_disk(dev);
c3c94c5a
TH
234 struct scsi_device *sdp = sdkp->device;
235
236 if (!capable(CAP_SYS_ADMIN))
237 return -EACCES;
238
239 sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
240
241 return count;
242}
e1ea2351 243static DEVICE_ATTR_RW(manage_start_stop);
c3c94c5a 244
ee959b00 245static ssize_t
e1ea2351
GKH
246allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
247{
248 struct scsi_disk *sdkp = to_scsi_disk(dev);
249
250 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
251}
252
253static ssize_t
254allow_restart_store(struct device *dev, struct device_attribute *attr,
255 const char *buf, size_t count)
a144c5ae 256{
ee959b00 257 struct scsi_disk *sdkp = to_scsi_disk(dev);
a144c5ae
BK
258 struct scsi_device *sdp = sdkp->device;
259
260 if (!capable(CAP_SYS_ADMIN))
261 return -EACCES;
262
263 if (sdp->type != TYPE_DISK)
264 return -EINVAL;
265
266 sdp->allow_restart = simple_strtoul(buf, NULL, 10);
267
268 return count;
269}
e1ea2351 270static DEVICE_ATTR_RW(allow_restart);
a144c5ae 271
ee959b00 272static ssize_t
e1ea2351 273cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
6bdaa1f1 274{
ee959b00 275 struct scsi_disk *sdkp = to_scsi_disk(dev);
6bdaa1f1
JB
276 int ct = sdkp->RCD + 2*sdkp->WCE;
277
278 return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
279}
e1ea2351 280static DEVICE_ATTR_RW(cache_type);
6bdaa1f1 281
ee959b00 282static ssize_t
e1ea2351 283FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
6bdaa1f1 284{
ee959b00 285 struct scsi_disk *sdkp = to_scsi_disk(dev);
6bdaa1f1
JB
286
287 return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
288}
e1ea2351 289static DEVICE_ATTR_RO(FUA);
6bdaa1f1 290
ee959b00 291static ssize_t
e1ea2351
GKH
292protection_type_show(struct device *dev, struct device_attribute *attr,
293 char *buf)
e0597d70
MP
294{
295 struct scsi_disk *sdkp = to_scsi_disk(dev);
296
297 return snprintf(buf, 20, "%u\n", sdkp->protection_type);
298}
299
8172499a 300static ssize_t
e1ea2351
GKH
301protection_type_store(struct device *dev, struct device_attribute *attr,
302 const char *buf, size_t count)
8172499a
MP
303{
304 struct scsi_disk *sdkp = to_scsi_disk(dev);
305 unsigned int val;
306 int err;
307
308 if (!capable(CAP_SYS_ADMIN))
309 return -EACCES;
310
311 err = kstrtouint(buf, 10, &val);
312
313 if (err)
314 return err;
315
316 if (val >= 0 && val <= SD_DIF_TYPE3_PROTECTION)
317 sdkp->protection_type = val;
318
319 return count;
320}
e1ea2351 321static DEVICE_ATTR_RW(protection_type);
8172499a 322
518fa8e3 323static ssize_t
e1ea2351
GKH
324protection_mode_show(struct device *dev, struct device_attribute *attr,
325 char *buf)
518fa8e3
MP
326{
327 struct scsi_disk *sdkp = to_scsi_disk(dev);
328 struct scsi_device *sdp = sdkp->device;
329 unsigned int dif, dix;
330
331 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
332 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
333
334 if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
335 dif = 0;
336 dix = 1;
337 }
338
339 if (!dif && !dix)
340 return snprintf(buf, 20, "none\n");
341
342 return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
343}
e1ea2351 344static DEVICE_ATTR_RO(protection_mode);
518fa8e3 345
e0597d70 346static ssize_t
e1ea2351 347app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
e0597d70
MP
348{
349 struct scsi_disk *sdkp = to_scsi_disk(dev);
350
351 return snprintf(buf, 20, "%u\n", sdkp->ATO);
352}
e1ea2351 353static DEVICE_ATTR_RO(app_tag_own);
e0597d70 354
e339c1a7 355static ssize_t
e1ea2351
GKH
356thin_provisioning_show(struct device *dev, struct device_attribute *attr,
357 char *buf)
e339c1a7
MP
358{
359 struct scsi_disk *sdkp = to_scsi_disk(dev);
360
c98a0eb0
MP
361 return snprintf(buf, 20, "%u\n", sdkp->lbpme);
362}
e1ea2351 363static DEVICE_ATTR_RO(thin_provisioning);
c98a0eb0
MP
364
365static const char *lbp_mode[] = {
366 [SD_LBP_FULL] = "full",
367 [SD_LBP_UNMAP] = "unmap",
368 [SD_LBP_WS16] = "writesame_16",
369 [SD_LBP_WS10] = "writesame_10",
370 [SD_LBP_ZERO] = "writesame_zero",
371 [SD_LBP_DISABLE] = "disabled",
372};
373
374static ssize_t
e1ea2351
GKH
375provisioning_mode_show(struct device *dev, struct device_attribute *attr,
376 char *buf)
c98a0eb0
MP
377{
378 struct scsi_disk *sdkp = to_scsi_disk(dev);
379
380 return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]);
381}
382
383static ssize_t
e1ea2351
GKH
384provisioning_mode_store(struct device *dev, struct device_attribute *attr,
385 const char *buf, size_t count)
c98a0eb0
MP
386{
387 struct scsi_disk *sdkp = to_scsi_disk(dev);
388 struct scsi_device *sdp = sdkp->device;
389
390 if (!capable(CAP_SYS_ADMIN))
391 return -EACCES;
392
393 if (sdp->type != TYPE_DISK)
394 return -EINVAL;
395
396 if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20))
397 sd_config_discard(sdkp, SD_LBP_UNMAP);
398 else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20))
399 sd_config_discard(sdkp, SD_LBP_WS16);
400 else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20))
401 sd_config_discard(sdkp, SD_LBP_WS10);
402 else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20))
403 sd_config_discard(sdkp, SD_LBP_ZERO);
404 else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20))
405 sd_config_discard(sdkp, SD_LBP_DISABLE);
406 else
407 return -EINVAL;
408
409 return count;
e339c1a7 410}
e1ea2351 411static DEVICE_ATTR_RW(provisioning_mode);
e339c1a7 412
18a4d0a2 413static ssize_t
e1ea2351
GKH
414max_medium_access_timeouts_show(struct device *dev,
415 struct device_attribute *attr, char *buf)
18a4d0a2
MP
416{
417 struct scsi_disk *sdkp = to_scsi_disk(dev);
418
419 return snprintf(buf, 20, "%u\n", sdkp->max_medium_access_timeouts);
420}
421
422static ssize_t
e1ea2351
GKH
423max_medium_access_timeouts_store(struct device *dev,
424 struct device_attribute *attr, const char *buf,
425 size_t count)
18a4d0a2
MP
426{
427 struct scsi_disk *sdkp = to_scsi_disk(dev);
428 int err;
429
430 if (!capable(CAP_SYS_ADMIN))
431 return -EACCES;
432
433 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
434
435 return err ? err : count;
436}
e1ea2351 437static DEVICE_ATTR_RW(max_medium_access_timeouts);
18a4d0a2 438
5db44863 439static ssize_t
e1ea2351
GKH
440max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
441 char *buf)
5db44863
MP
442{
443 struct scsi_disk *sdkp = to_scsi_disk(dev);
444
445 return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks);
446}
447
448static ssize_t
e1ea2351
GKH
449max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
450 const char *buf, size_t count)
5db44863
MP
451{
452 struct scsi_disk *sdkp = to_scsi_disk(dev);
453 struct scsi_device *sdp = sdkp->device;
454 unsigned long max;
455 int err;
456
457 if (!capable(CAP_SYS_ADMIN))
458 return -EACCES;
459
460 if (sdp->type != TYPE_DISK)
461 return -EINVAL;
462
463 err = kstrtoul(buf, 10, &max);
464
465 if (err)
466 return err;
467
468 if (max == 0)
469 sdp->no_write_same = 1;
66c28f97
MP
470 else if (max <= SD_MAX_WS16_BLOCKS) {
471 sdp->no_write_same = 0;
5db44863 472 sdkp->max_ws_blocks = max;
66c28f97 473 }
5db44863
MP
474
475 sd_config_write_same(sdkp);
476
477 return count;
478}
e1ea2351
GKH
479static DEVICE_ATTR_RW(max_write_same_blocks);
480
481static struct attribute *sd_disk_attrs[] = {
482 &dev_attr_cache_type.attr,
483 &dev_attr_FUA.attr,
484 &dev_attr_allow_restart.attr,
485 &dev_attr_manage_start_stop.attr,
486 &dev_attr_protection_type.attr,
487 &dev_attr_protection_mode.attr,
488 &dev_attr_app_tag_own.attr,
489 &dev_attr_thin_provisioning.attr,
490 &dev_attr_provisioning_mode.attr,
491 &dev_attr_max_write_same_blocks.attr,
492 &dev_attr_max_medium_access_timeouts.attr,
493 NULL,
6bdaa1f1 494};
e1ea2351 495ATTRIBUTE_GROUPS(sd_disk);
6bdaa1f1
JB
496
497static struct class sd_disk_class = {
498 .name = "scsi_disk",
499 .owner = THIS_MODULE,
ee959b00 500 .dev_release = scsi_disk_release,
e1ea2351 501 .dev_groups = sd_disk_groups,
6bdaa1f1 502};
1da177e4 503
691e3d31 504static const struct dev_pm_ops sd_pm_ops = {
95897910 505 .suspend = sd_suspend_system,
691e3d31 506 .resume = sd_resume,
95897910 507 .poweroff = sd_suspend_system,
691e3d31 508 .restore = sd_resume,
95897910 509 .runtime_suspend = sd_suspend_runtime,
691e3d31
AL
510 .runtime_resume = sd_resume,
511};
512
1da177e4 513static struct scsi_driver sd_template = {
1da177e4
LT
514 .gendrv = {
515 .name = "sd",
3af6b352 516 .owner = THIS_MODULE,
1da177e4
LT
517 .probe = sd_probe,
518 .remove = sd_remove,
519 .shutdown = sd_shutdown,
691e3d31 520 .pm = &sd_pm_ops,
1da177e4
LT
521 },
522 .rescan = sd_rescan,
a1b73fc1
CH
523 .init_command = sd_init_command,
524 .uninit_command = sd_uninit_command,
7b3d9545 525 .done = sd_done,
18a4d0a2 526 .eh_action = sd_eh_action,
1da177e4
LT
527};
528
0761df9c
HR
529/*
530 * Dummy kobj_map->probe function.
531 * The default ->probe function will call modprobe, which is
532 * pointless as this module is already loaded.
533 */
534static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data)
535{
536 return NULL;
537}
538
1da177e4
LT
539/*
540 * Device no to disk mapping:
541 *
542 * major disc2 disc p1
543 * |............|.............|....|....| <- dev_t
544 * 31 20 19 8 7 4 3 0
545 *
546 * Inside a major, we have 16k disks, however mapped non-
547 * contiguously. The first 16 disks are for major0, the next
548 * ones with major1, ... Disk 256 is for major0 again, disk 272
549 * for major1, ...
550 * As we stay compatible with our numbering scheme, we can reuse
551 * the well-know SCSI majors 8, 65--71, 136--143.
552 */
553static int sd_major(int major_idx)
554{
555 switch (major_idx) {
556 case 0:
557 return SCSI_DISK0_MAJOR;
558 case 1 ... 7:
559 return SCSI_DISK1_MAJOR + major_idx - 1;
560 case 8 ... 15:
561 return SCSI_DISK8_MAJOR + major_idx - 8;
562 default:
563 BUG();
564 return 0; /* shut up gcc */
565 }
566}
567
3d9a1f53 568static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
1da177e4
LT
569{
570 struct scsi_disk *sdkp = NULL;
571
3d9a1f53
CH
572 mutex_lock(&sd_ref_mutex);
573
39b7f1e2
AS
574 if (disk->private_data) {
575 sdkp = scsi_disk(disk);
576 if (scsi_device_get(sdkp->device) == 0)
ee959b00 577 get_device(&sdkp->dev);
39b7f1e2
AS
578 else
579 sdkp = NULL;
580 }
0b950672 581 mutex_unlock(&sd_ref_mutex);
1da177e4
LT
582 return sdkp;
583}
584
585static void scsi_disk_put(struct scsi_disk *sdkp)
586{
587 struct scsi_device *sdev = sdkp->device;
588
0b950672 589 mutex_lock(&sd_ref_mutex);
ee959b00 590 put_device(&sdkp->dev);
1da177e4 591 scsi_device_put(sdev);
0b950672 592 mutex_unlock(&sd_ref_mutex);
1da177e4
LT
593}
594
c611529e
MP
595static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
596 unsigned int dix, unsigned int dif)
35e1a5d9 597{
c611529e
MP
598 struct bio *bio = scmd->request->bio;
599 unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
600 unsigned int protect = 0;
601
602 if (dix) { /* DIX Type 0, 1, 2, 3 */
603 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
604 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
605
606 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
607 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
608 }
609
610 if (dif != SD_DIF_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
611 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
612
613 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
614 scmd->prot_flags |= SCSI_PROT_REF_CHECK;
615 }
616
617 if (dif) { /* DIX/DIF Type 1, 2, 3 */
618 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
619
620 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
621 protect = 3 << 5; /* Disable target PI checking */
622 else
623 protect = 1 << 5; /* Enable target PI checking */
35e1a5d9
MP
624 }
625
626 scsi_set_prot_op(scmd, prot_op);
627 scsi_set_prot_type(scmd, dif);
c611529e
MP
628 scmd->prot_flags &= sd_prot_flag_mask(prot_op);
629
630 return protect;
35e1a5d9
MP
631}
632
c98a0eb0
MP
633static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
634{
635 struct request_queue *q = sdkp->disk->queue;
636 unsigned int logical_block_size = sdkp->device->sector_size;
637 unsigned int max_blocks = 0;
638
7985090a 639 q->limits.discard_zeroes_data = 0;
39773722
MP
640
641 /*
642 * When LBPRZ is reported, discard alignment and granularity
643 * must be fixed to the logical block size. Otherwise the block
644 * layer will drop misaligned portions of the request which can
645 * lead to data corruption. If LBPRZ is not set, we honor the
646 * device preference.
647 */
648 if (sdkp->lbprz) {
649 q->limits.discard_alignment = 0;
650 q->limits.discard_granularity = 1;
651 } else {
652 q->limits.discard_alignment = sdkp->unmap_alignment *
653 logical_block_size;
654 q->limits.discard_granularity =
655 max(sdkp->physical_block_size,
656 sdkp->unmap_granularity * logical_block_size);
657 }
c98a0eb0 658
89730393
MP
659 sdkp->provisioning_mode = mode;
660
c98a0eb0
MP
661 switch (mode) {
662
663 case SD_LBP_DISABLE:
2bb4cd5c 664 blk_queue_max_discard_sectors(q, 0);
c98a0eb0
MP
665 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
666 return;
667
668 case SD_LBP_UNMAP:
5db44863
MP
669 max_blocks = min_not_zero(sdkp->max_unmap_blocks,
670 (u32)SD_MAX_WS16_BLOCKS);
c98a0eb0
MP
671 break;
672
673 case SD_LBP_WS16:
5db44863
MP
674 max_blocks = min_not_zero(sdkp->max_ws_blocks,
675 (u32)SD_MAX_WS16_BLOCKS);
7985090a 676 q->limits.discard_zeroes_data = sdkp->lbprz;
c98a0eb0
MP
677 break;
678
679 case SD_LBP_WS10:
5db44863
MP
680 max_blocks = min_not_zero(sdkp->max_ws_blocks,
681 (u32)SD_MAX_WS10_BLOCKS);
7985090a 682 q->limits.discard_zeroes_data = sdkp->lbprz;
c98a0eb0
MP
683 break;
684
685 case SD_LBP_ZERO:
5db44863
MP
686 max_blocks = min_not_zero(sdkp->max_ws_blocks,
687 (u32)SD_MAX_WS10_BLOCKS);
c98a0eb0
MP
688 q->limits.discard_zeroes_data = 1;
689 break;
690 }
691
2bb4cd5c 692 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
c98a0eb0 693 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
c98a0eb0
MP
694}
695
e339c1a7 696/**
26e85fcd 697 * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
66ac0280 698 * @sdp: scsi device to operate one
e339c1a7
MP
699 * @rq: Request to prepare
700 *
701 * Will issue either UNMAP or WRITE SAME(16) depending on preference
702 * indicated by target device.
703 **/
6a7b4398 704static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
e339c1a7 705{
6a7b4398
CH
706 struct request *rq = cmd->request;
707 struct scsi_device *sdp = cmd->device;
e339c1a7 708 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
26e85fcd
MP
709 sector_t sector = blk_rq_pos(rq);
710 unsigned int nr_sectors = blk_rq_sectors(rq);
711 unsigned int nr_bytes = blk_rq_bytes(rq);
66ac0280 712 unsigned int len;
f1126e95 713 int ret;
c98a0eb0 714 char *buf;
66ac0280 715 struct page *page;
e339c1a7 716
26e85fcd
MP
717 sector >>= ilog2(sdp->sector_size) - 9;
718 nr_sectors >>= ilog2(sdp->sector_size) - 9;
e339c1a7 719
66ac0280
CH
720 page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
721 if (!page)
722 return BLKPREP_DEFER;
723
c98a0eb0
MP
724 switch (sdkp->provisioning_mode) {
725 case SD_LBP_UNMAP:
726 buf = page_address(page);
e339c1a7 727
6a7b4398
CH
728 cmd->cmd_len = 10;
729 cmd->cmnd[0] = UNMAP;
730 cmd->cmnd[8] = 24;
e339c1a7
MP
731
732 put_unaligned_be16(6 + 16, &buf[0]);
733 put_unaligned_be16(16, &buf[2]);
734 put_unaligned_be64(sector, &buf[8]);
66ac0280 735 put_unaligned_be32(nr_sectors, &buf[16]);
e339c1a7 736
66ac0280 737 len = 24;
c98a0eb0
MP
738 break;
739
740 case SD_LBP_WS16:
6a7b4398
CH
741 cmd->cmd_len = 16;
742 cmd->cmnd[0] = WRITE_SAME_16;
743 cmd->cmnd[1] = 0x8; /* UNMAP */
744 put_unaligned_be64(sector, &cmd->cmnd[2]);
745 put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
66ac0280
CH
746
747 len = sdkp->device->sector_size;
c98a0eb0
MP
748 break;
749
750 case SD_LBP_WS10:
751 case SD_LBP_ZERO:
6a7b4398
CH
752 cmd->cmd_len = 10;
753 cmd->cmnd[0] = WRITE_SAME;
c98a0eb0 754 if (sdkp->provisioning_mode == SD_LBP_WS10)
6a7b4398
CH
755 cmd->cmnd[1] = 0x8; /* UNMAP */
756 put_unaligned_be32(sector, &cmd->cmnd[2]);
757 put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
c98a0eb0
MP
758
759 len = sdkp->device->sector_size;
760 break;
761
762 default:
09b9cc44 763 ret = BLKPREP_KILL;
c98a0eb0 764 goto out;
e339c1a7
MP
765 }
766
dc4a9307 767 rq->completion_data = page;
6a7b4398
CH
768 rq->timeout = SD_TIMEOUT;
769
770 cmd->transfersize = len;
e4200f8e 771 cmd->allowed = SD_MAX_RETRIES;
6a7b4398
CH
772
773 /*
774 * Initially __data_len is set to the amount of data that needs to be
775 * transferred to the target. This amount depends on whether WRITE SAME
776 * or UNMAP is being used. After the scatterlist has been mapped by
777 * scsi_init_io() we set __data_len to the size of the area to be
778 * discarded on disk. This allows us to report completion on the full
779 * amount of blocks described by the request.
780 */
66ac0280 781 blk_add_request_payload(rq, page, len);
3c356bde 782 ret = scsi_init_io(cmd);
26e85fcd 783 rq->__data_len = nr_bytes;
c98a0eb0
MP
784
785out:
b4f42e28 786 if (ret != BLKPREP_OK)
610a6349 787 __free_page(page);
f1126e95
FT
788 return ret;
789}
790
5db44863
MP
791static void sd_config_write_same(struct scsi_disk *sdkp)
792{
793 struct request_queue *q = sdkp->disk->queue;
794 unsigned int logical_block_size = sdkp->device->sector_size;
5db44863
MP
795
796 if (sdkp->device->no_write_same) {
797 sdkp->max_ws_blocks = 0;
798 goto out;
799 }
800
801 /* Some devices can not handle block counts above 0xffff despite
802 * supporting WRITE SAME(16). Consequently we default to 64k
803 * blocks per I/O unless the device explicitly advertises a
804 * bigger limit.
805 */
66c28f97
MP
806 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
807 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
808 (u32)SD_MAX_WS16_BLOCKS);
809 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
810 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
811 (u32)SD_MAX_WS10_BLOCKS);
812 else {
813 sdkp->device->no_write_same = 1;
814 sdkp->max_ws_blocks = 0;
815 }
5db44863
MP
816
817out:
66c28f97
MP
818 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
819 (logical_block_size >> 9));
5db44863
MP
820}
821
822/**
823 * sd_setup_write_same_cmnd - write the same data to multiple blocks
59b1134c 824 * @cmd: command to prepare
5db44863
MP
825 *
826 * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
827 * preference indicated by target device.
828 **/
59b1134c 829static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
5db44863 830{
59b1134c
CH
831 struct request *rq = cmd->request;
832 struct scsi_device *sdp = cmd->device;
5db44863
MP
833 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
834 struct bio *bio = rq->bio;
835 sector_t sector = blk_rq_pos(rq);
836 unsigned int nr_sectors = blk_rq_sectors(rq);
837 unsigned int nr_bytes = blk_rq_bytes(rq);
838 int ret;
839
840 if (sdkp->device->no_write_same)
841 return BLKPREP_KILL;
842
a4ad39b1 843 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
5db44863
MP
844
845 sector >>= ilog2(sdp->sector_size) - 9;
846 nr_sectors >>= ilog2(sdp->sector_size) - 9;
847
5db44863 848 rq->timeout = SD_WRITE_SAME_TIMEOUT;
5db44863
MP
849
850 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
59b1134c
CH
851 cmd->cmd_len = 16;
852 cmd->cmnd[0] = WRITE_SAME_16;
853 put_unaligned_be64(sector, &cmd->cmnd[2]);
854 put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
5db44863 855 } else {
59b1134c
CH
856 cmd->cmd_len = 10;
857 cmd->cmnd[0] = WRITE_SAME;
858 put_unaligned_be32(sector, &cmd->cmnd[2]);
859 put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
5db44863
MP
860 }
861
59b1134c 862 cmd->transfersize = sdp->sector_size;
a25ee548 863 cmd->allowed = SD_MAX_RETRIES;
5db44863 864
59b1134c
CH
865 /*
866 * For WRITE_SAME the data transferred in the DATA IN buffer is
867 * different from the amount of data actually written to the target.
868 *
869 * We set up __data_len to the amount of data transferred from the
870 * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
871 * to transfer a single sector of data first, but then reset it to
872 * the amount of data to be written right after so that the I/O path
873 * knows how much to actually write.
874 */
875 rq->__data_len = sdp->sector_size;
3c356bde 876 ret = scsi_init_io(cmd);
59b1134c 877 rq->__data_len = nr_bytes;
5db44863
MP
878 return ret;
879}
880
a118c6c1 881static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
90467c29 882{
a118c6c1
CH
883 struct request *rq = cmd->request;
884
885 /* flush requests don't perform I/O, zero the S/G table */
886 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
90467c29 887
a118c6c1
CH
888 cmd->cmnd[0] = SYNCHRONIZE_CACHE;
889 cmd->cmd_len = 10;
890 cmd->transfersize = 0;
891 cmd->allowed = SD_MAX_RETRIES;
892
26b9fd8b 893 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
a118c6c1 894 return BLKPREP_OK;
90467c29
FT
895}
896
87949eee 897static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
1da177e4 898{
a1b73fc1
CH
899 struct request *rq = SCpnt->request;
900 struct scsi_device *sdp = SCpnt->device;
776b23a0 901 struct gendisk *disk = rq->rq_disk;
af55ff67 902 struct scsi_disk *sdkp;
83096ebf 903 sector_t block = blk_rq_pos(rq);
18351070 904 sector_t threshold;
83096ebf 905 unsigned int this_count = blk_rq_sectors(rq);
c611529e
MP
906 unsigned int dif, dix;
907 int ret;
4e7392ec 908 unsigned char protect;
7f9a6bc4 909
3c356bde 910 ret = scsi_init_io(SCpnt);
7f9a6bc4
JB
911 if (ret != BLKPREP_OK)
912 goto out;
913 SCpnt = rq->special;
af55ff67 914 sdkp = scsi_disk(disk);
7f9a6bc4
JB
915
916 /* from here on until we're complete, any goto out
917 * is used for a killable error condition */
918 ret = BLKPREP_KILL;
1da177e4 919
a1b73fc1
CH
920 SCSI_LOG_HLQUEUE(1,
921 scmd_printk(KERN_INFO, SCpnt,
922 "%s: block=%llu, count=%d\n",
923 __func__, (unsigned long long)block, this_count));
1da177e4
LT
924
925 if (!sdp || !scsi_device_online(sdp) ||
83096ebf 926 block + blk_rq_sectors(rq) > get_capacity(disk)) {
fa0d34be 927 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
83096ebf
TH
928 "Finishing %u sectors\n",
929 blk_rq_sectors(rq)));
fa0d34be
MP
930 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
931 "Retry with 0x%p\n", SCpnt));
7f9a6bc4 932 goto out;
1da177e4
LT
933 }
934
935 if (sdp->changed) {
936 /*
937 * quietly refuse to do anything to a changed disc until
938 * the changed bit has been reset
939 */
3ff5588d 940 /* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */
7f9a6bc4 941 goto out;
1da177e4 942 }
7f9a6bc4 943
a0899d4d 944 /*
18351070
LT
945 * Some SD card readers can't handle multi-sector accesses which touch
946 * the last one or two hardware sectors. Split accesses as needed.
a0899d4d 947 */
18351070
LT
948 threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
949 (sdp->sector_size / 512);
950
951 if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
952 if (block < threshold) {
953 /* Access up to the threshold but not beyond */
954 this_count = threshold - block;
955 } else {
956 /* Access only a single hardware sector */
957 this_count = sdp->sector_size / 512;
958 }
959 }
a0899d4d 960
fa0d34be
MP
961 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
962 (unsigned long long)block));
1da177e4
LT
963
964 /*
965 * If we have a 1K hardware sectorsize, prevent access to single
966 * 512 byte sectors. In theory we could handle this - in fact
967 * the scsi cdrom driver must be able to handle this because
968 * we typically use 1K blocksizes, and cdroms typically have
969 * 2K hardware sectorsizes. Of course, things are simpler
970 * with the cdrom, since it is read-only. For performance
971 * reasons, the filesystems should be able to handle this
972 * and not force the scsi disk driver to use bounce buffers
973 * for this.
974 */
975 if (sdp->sector_size == 1024) {
83096ebf 976 if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
e73aec82
MP
977 scmd_printk(KERN_ERR, SCpnt,
978 "Bad block number requested\n");
7f9a6bc4 979 goto out;
1da177e4
LT
980 } else {
981 block = block >> 1;
982 this_count = this_count >> 1;
983 }
984 }
985 if (sdp->sector_size == 2048) {
83096ebf 986 if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
e73aec82
MP
987 scmd_printk(KERN_ERR, SCpnt,
988 "Bad block number requested\n");
7f9a6bc4 989 goto out;
1da177e4
LT
990 } else {
991 block = block >> 2;
992 this_count = this_count >> 2;
993 }
994 }
995 if (sdp->sector_size == 4096) {
83096ebf 996 if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
e73aec82
MP
997 scmd_printk(KERN_ERR, SCpnt,
998 "Bad block number requested\n");
7f9a6bc4 999 goto out;
1da177e4
LT
1000 } else {
1001 block = block >> 3;
1002 this_count = this_count >> 3;
1003 }
1004 }
1005 if (rq_data_dir(rq) == WRITE) {
1da177e4 1006 SCpnt->cmnd[0] = WRITE_6;
af55ff67 1007
8c579ab6 1008 if (blk_integrity_rq(rq))
c611529e 1009 sd_dif_prepare(SCpnt);
af55ff67 1010
1da177e4
LT
1011 } else if (rq_data_dir(rq) == READ) {
1012 SCpnt->cmnd[0] = READ_6;
1da177e4 1013 } else {
5953316d 1014 scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
7f9a6bc4 1015 goto out;
1da177e4
LT
1016 }
1017
fa0d34be 1018 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
83096ebf 1019 "%s %d/%u 512 byte blocks.\n",
fa0d34be
MP
1020 (rq_data_dir(rq) == WRITE) ?
1021 "writing" : "reading", this_count,
83096ebf 1022 blk_rq_sectors(rq)));
1da177e4 1023
c611529e
MP
1024 dix = scsi_prot_sg_count(SCpnt);
1025 dif = scsi_host_dif_capable(SCpnt->device->host, sdkp->protection_type);
1026
1027 if (dif || dix)
1028 protect = sd_setup_protect_cmnd(SCpnt, dix, dif);
af55ff67 1029 else
4e7392ec
MP
1030 protect = 0;
1031
c611529e 1032 if (protect && sdkp->protection_type == SD_DIF_TYPE2_PROTECTION) {
4e7392ec
MP
1033 SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
1034
1035 if (unlikely(SCpnt->cmnd == NULL)) {
1036 ret = BLKPREP_DEFER;
1037 goto out;
1038 }
af55ff67 1039
4e7392ec
MP
1040 SCpnt->cmd_len = SD_EXT_CDB_SIZE;
1041 memset(SCpnt->cmnd, 0, SCpnt->cmd_len);
1042 SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
1043 SCpnt->cmnd[7] = 0x18;
1044 SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
33659ebb 1045 SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
4e7392ec
MP
1046
1047 /* LBA */
1048 SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
1049 SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
1050 SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
1051 SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
1052 SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff;
1053 SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff;
1054 SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff;
1055 SCpnt->cmnd[19] = (unsigned char) block & 0xff;
1056
1057 /* Expected Indirect LBA */
1058 SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff;
1059 SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff;
1060 SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff;
1061 SCpnt->cmnd[23] = (unsigned char) block & 0xff;
1062
1063 /* Transfer length */
1064 SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff;
1065 SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
1066 SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
1067 SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
e430cbc8 1068 } else if (sdp->use_16_for_rw || (this_count > 0xffff)) {
1da177e4 1069 SCpnt->cmnd[0] += READ_16 - READ_6;
33659ebb 1070 SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
1da177e4
LT
1071 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
1072 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
1073 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
1074 SCpnt->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
1075 SCpnt->cmnd[6] = (unsigned char) (block >> 24) & 0xff;
1076 SCpnt->cmnd[7] = (unsigned char) (block >> 16) & 0xff;
1077 SCpnt->cmnd[8] = (unsigned char) (block >> 8) & 0xff;
1078 SCpnt->cmnd[9] = (unsigned char) block & 0xff;
1079 SCpnt->cmnd[10] = (unsigned char) (this_count >> 24) & 0xff;
1080 SCpnt->cmnd[11] = (unsigned char) (this_count >> 16) & 0xff;
1081 SCpnt->cmnd[12] = (unsigned char) (this_count >> 8) & 0xff;
1082 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
1083 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
1084 } else if ((this_count > 0xff) || (block > 0x1fffff) ||
af55ff67 1085 scsi_device_protection(SCpnt->device) ||
1da177e4 1086 SCpnt->device->use_10_for_rw) {
1da177e4 1087 SCpnt->cmnd[0] += READ_10 - READ_6;
33659ebb 1088 SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
1da177e4
LT
1089 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
1090 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
1091 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
1092 SCpnt->cmnd[5] = (unsigned char) block & 0xff;
1093 SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
1094 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
1095 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
1096 } else {
33659ebb 1097 if (unlikely(rq->cmd_flags & REQ_FUA)) {
007365ad
TH
1098 /*
1099 * This happens only if this drive failed
1100 * 10byte rw command with ILLEGAL_REQUEST
1101 * during operation and thus turned off
1102 * use_10_for_rw.
1103 */
e73aec82
MP
1104 scmd_printk(KERN_ERR, SCpnt,
1105 "FUA write on READ/WRITE(6) drive\n");
7f9a6bc4 1106 goto out;
007365ad
TH
1107 }
1108
1da177e4
LT
1109 SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
1110 SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
1111 SCpnt->cmnd[3] = (unsigned char) block & 0xff;
1112 SCpnt->cmnd[4] = (unsigned char) this_count;
1113 SCpnt->cmnd[5] = 0;
1114 }
30b0c37b 1115 SCpnt->sdb.length = this_count * sdp->sector_size;
1da177e4
LT
1116
1117 /*
1118 * We shouldn't disconnect in the middle of a sector, so with a dumb
1119 * host adapter, it's safe to assume that we can at least transfer
1120 * this many bytes between each connect / disconnect.
1121 */
1122 SCpnt->transfersize = sdp->sector_size;
1123 SCpnt->underflow = this_count << 9;
1124 SCpnt->allowed = SD_MAX_RETRIES;
1da177e4 1125
1da177e4
LT
1126 /*
1127 * This indicates that the command is ready from our end to be
1128 * queued.
1129 */
7f9a6bc4
JB
1130 ret = BLKPREP_OK;
1131 out:
a1b73fc1 1132 return ret;
1da177e4
LT
1133}
1134
87949eee
CH
1135static int sd_init_command(struct scsi_cmnd *cmd)
1136{
1137 struct request *rq = cmd->request;
1138
1139 if (rq->cmd_flags & REQ_DISCARD)
1140 return sd_setup_discard_cmnd(cmd);
1141 else if (rq->cmd_flags & REQ_WRITE_SAME)
1142 return sd_setup_write_same_cmnd(cmd);
1143 else if (rq->cmd_flags & REQ_FLUSH)
1144 return sd_setup_flush_cmnd(cmd);
1145 else
1146 return sd_setup_read_write_cmnd(cmd);
1147}
1148
1149static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1150{
1151 struct request *rq = SCpnt->request;
1152
1153 if (rq->cmd_flags & REQ_DISCARD)
1154 __free_page(rq->completion_data);
1155
1156 if (SCpnt->cmnd != rq->cmd) {
1157 mempool_free(SCpnt->cmnd, sd_cdb_pool);
1158 SCpnt->cmnd = NULL;
1159 SCpnt->cmd_len = 0;
1160 }
1161}
1162
1da177e4
LT
1163/**
1164 * sd_open - open a scsi disk device
1165 * @inode: only i_rdev member may be used
1166 * @filp: only f_mode and f_flags may be used
1167 *
1168 * Returns 0 if successful. Returns a negated errno value in case
1169 * of error.
1170 *
1171 * Note: This can be called from a user context (e.g. fsck(1) )
1172 * or from within the kernel (e.g. as a result of a mount(1) ).
1173 * In the latter case @inode and @filp carry an abridged amount
1174 * of information as noted above.
409f3499
AB
1175 *
1176 * Locking: called with bdev->bd_mutex held.
1da177e4 1177 **/
0338e291 1178static int sd_open(struct block_device *bdev, fmode_t mode)
1da177e4 1179{
0338e291 1180 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
1da177e4
LT
1181 struct scsi_device *sdev;
1182 int retval;
1183
0338e291 1184 if (!sdkp)
1da177e4
LT
1185 return -ENXIO;
1186
fa0d34be 1187 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
1da177e4
LT
1188
1189 sdev = sdkp->device;
1190
1191 /*
1192 * If the device is in error recovery, wait until it is done.
1193 * If the device is offline, then disallow any access to it.
1194 */
1195 retval = -ENXIO;
1196 if (!scsi_block_when_processing_errors(sdev))
1197 goto error_out;
1198
1199 if (sdev->removable || sdkp->write_prot)
0338e291 1200 check_disk_change(bdev);
1da177e4
LT
1201
1202 /*
1203 * If the drive is empty, just let the open fail.
1204 */
1205 retval = -ENOMEDIUM;
0338e291 1206 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
1da177e4
LT
1207 goto error_out;
1208
1209 /*
1210 * If the device has the write protect tab set, have the open fail
1211 * if the user expects to be able to write to the thing.
1212 */
1213 retval = -EROFS;
0338e291 1214 if (sdkp->write_prot && (mode & FMODE_WRITE))
1da177e4
LT
1215 goto error_out;
1216
1217 /*
1218 * It is possible that the disk changing stuff resulted in
1219 * the device being taken offline. If this is the case,
1220 * report this to the user, and don't pretend that the
1221 * open actually succeeded.
1222 */
1223 retval = -ENXIO;
1224 if (!scsi_device_online(sdev))
1225 goto error_out;
1226
409f3499 1227 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
1da177e4
LT
1228 if (scsi_block_when_processing_errors(sdev))
1229 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
1230 }
1231
1232 return 0;
1233
1234error_out:
1235 scsi_disk_put(sdkp);
1236 return retval;
1237}
1238
1239/**
1240 * sd_release - invoked when the (last) close(2) is called on this
1241 * scsi disk.
1242 * @inode: only i_rdev member may be used
1243 * @filp: only f_mode and f_flags may be used
1244 *
1245 * Returns 0.
1246 *
1247 * Note: may block (uninterruptible) if error recovery is underway
1248 * on this disk.
409f3499
AB
1249 *
1250 * Locking: called with bdev->bd_mutex held.
1da177e4 1251 **/
db2a144b 1252static void sd_release(struct gendisk *disk, fmode_t mode)
1da177e4 1253{
1da177e4
LT
1254 struct scsi_disk *sdkp = scsi_disk(disk);
1255 struct scsi_device *sdev = sdkp->device;
1256
56937f7b 1257 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
1da177e4 1258
7e443312 1259 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
1da177e4
LT
1260 if (scsi_block_when_processing_errors(sdev))
1261 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1262 }
1263
1264 /*
1265 * XXX and what if there are packets in flight and this close()
1266 * XXX is followed by a "rmmod sd_mod"?
1267 */
478a8a05 1268
1da177e4 1269 scsi_disk_put(sdkp);
1da177e4
LT
1270}
1271
a885c8c4 1272static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1da177e4
LT
1273{
1274 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1275 struct scsi_device *sdp = sdkp->device;
1276 struct Scsi_Host *host = sdp->host;
1277 int diskinfo[4];
1278
1279 /* default to most commonly used values */
1280 diskinfo[0] = 0x40; /* 1 << 6 */
1281 diskinfo[1] = 0x20; /* 1 << 5 */
1282 diskinfo[2] = sdkp->capacity >> 11;
1283
1284 /* override with calculated, extended default, or driver values */
1285 if (host->hostt->bios_param)
1286 host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
1287 else
1288 scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
1289
a885c8c4
CH
1290 geo->heads = diskinfo[0];
1291 geo->sectors = diskinfo[1];
1292 geo->cylinders = diskinfo[2];
1da177e4
LT
1293 return 0;
1294}
1295
1296/**
1297 * sd_ioctl - process an ioctl
1298 * @inode: only i_rdev/i_bdev members may be used
1299 * @filp: only f_mode and f_flags may be used
1300 * @cmd: ioctl command number
1301 * @arg: this is third argument given to ioctl(2) system call.
1302 * Often contains a pointer.
1303 *
25985edc 1304 * Returns 0 if successful (some ioctls return positive numbers on
1da177e4
LT
1305 * success as well). Returns a negated errno value in case of error.
1306 *
1307 * Note: most ioctls are forward onto the block subsystem or further
3a4fa0a2 1308 * down in the scsi subsystem.
1da177e4 1309 **/
0338e291 1310static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1da177e4
LT
1311 unsigned int cmd, unsigned long arg)
1312{
1da177e4 1313 struct gendisk *disk = bdev->bd_disk;
fe2d1851
NN
1314 struct scsi_disk *sdkp = scsi_disk(disk);
1315 struct scsi_device *sdp = sdkp->device;
1da177e4
LT
1316 void __user *p = (void __user *)arg;
1317 int error;
1318
fe2d1851
NN
1319 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
1320 "cmd=0x%x\n", disk->disk_name, cmd));
1da177e4 1321
0bfc96cb
PB
1322 error = scsi_verify_blk_ioctl(bdev, cmd);
1323 if (error < 0)
1324 return error;
1325
1da177e4
LT
1326 /*
1327 * If we are in the middle of error recovery, don't let anyone
1328 * else try and use this device. Also, if error recovery fails, it
1329 * may try and take the device offline, in which case all further
1330 * access to the device is prohibited.
1331 */
906d15fb
CH
1332 error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1333 (mode & FMODE_NDELAY) != 0);
1334 if (error)
8a6cfeb6 1335 goto out;
1da177e4 1336
1da177e4
LT
1337 /*
1338 * Send SCSI addressing ioctls directly to mid level, send other
1339 * ioctls to block level and then onto mid level if they can't be
1340 * resolved.
1341 */
1342 switch (cmd) {
1343 case SCSI_IOCTL_GET_IDLUN:
1344 case SCSI_IOCTL_GET_BUS_NUMBER:
8a6cfeb6
AB
1345 error = scsi_ioctl(sdp, cmd, p);
1346 break;
1da177e4 1347 default:
577ebb37 1348 error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
1da177e4 1349 if (error != -ENOTTY)
8a6cfeb6
AB
1350 break;
1351 error = scsi_ioctl(sdp, cmd, p);
1352 break;
1da177e4 1353 }
8a6cfeb6 1354out:
8a6cfeb6 1355 return error;
1da177e4
LT
1356}
1357
1358static void set_media_not_present(struct scsi_disk *sdkp)
1359{
2bae0093
TH
1360 if (sdkp->media_present)
1361 sdkp->device->changed = 1;
1362
1363 if (sdkp->device->removable) {
1364 sdkp->media_present = 0;
1365 sdkp->capacity = 0;
1366 }
1367}
1368
1369static int media_not_present(struct scsi_disk *sdkp,
1370 struct scsi_sense_hdr *sshdr)
1371{
1372 if (!scsi_sense_valid(sshdr))
1373 return 0;
1374
1375 /* not invoked for commands that could return deferred errors */
1376 switch (sshdr->sense_key) {
1377 case UNIT_ATTENTION:
1378 case NOT_READY:
1379 /* medium not present */
1380 if (sshdr->asc == 0x3A) {
1381 set_media_not_present(sdkp);
1382 return 1;
1383 }
1384 }
1385 return 0;
1da177e4
LT
1386}
1387
1388/**
2bae0093
TH
1389 * sd_check_events - check media events
1390 * @disk: kernel device descriptor
1391 * @clearing: disk events currently being cleared
1da177e4 1392 *
2bae0093 1393 * Returns mask of DISK_EVENT_*.
1da177e4
LT
1394 *
1395 * Note: this function is invoked from the block subsystem.
1396 **/
2bae0093 1397static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1da177e4
LT
1398{
1399 struct scsi_disk *sdkp = scsi_disk(disk);
1400 struct scsi_device *sdp = sdkp->device;
001aac25 1401 struct scsi_sense_hdr *sshdr = NULL;
1da177e4
LT
1402 int retval;
1403
2bae0093 1404 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1da177e4
LT
1405
1406 /*
1407 * If the device is offline, don't send any commands - just pretend as
1408 * if the command failed. If the device ever comes back online, we
1409 * can deal with it then. It is only because of unrecoverable errors
1410 * that we would ever take a device offline in the first place.
1411 */
285e9670
KS
1412 if (!scsi_device_online(sdp)) {
1413 set_media_not_present(sdkp);
285e9670
KS
1414 goto out;
1415 }
1da177e4
LT
1416
1417 /*
1418 * Using TEST_UNIT_READY enables differentiation between drive with
1419 * no cartridge loaded - NOT READY, drive with changed cartridge -
1420 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1421 *
1422 * Drives that auto spin down. eg iomega jaz 1G, will be started
1423 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1424 * sd_revalidate() is called.
1425 */
1426 retval = -ENODEV;
285e9670 1427
001aac25
JB
1428 if (scsi_block_when_processing_errors(sdp)) {
1429 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1430 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
1431 sshdr);
1432 }
1da177e4 1433
2bae0093
TH
1434 /* failed to execute TUR, assume media not present */
1435 if (host_byte(retval)) {
285e9670 1436 set_media_not_present(sdkp);
285e9670
KS
1437 goto out;
1438 }
1da177e4 1439
2bae0093
TH
1440 if (media_not_present(sdkp, sshdr))
1441 goto out;
1442
1da177e4
LT
1443 /*
1444 * For removable scsi disk we have to recognise the presence
2bae0093 1445 * of a disk in the drive.
1da177e4 1446 */
2bae0093
TH
1447 if (!sdkp->media_present)
1448 sdp->changed = 1;
1da177e4 1449 sdkp->media_present = 1;
285e9670 1450out:
3ff5588d 1451 /*
2bae0093 1452 * sdp->changed is set under the following conditions:
3ff5588d 1453 *
2bae0093
TH
1454 * Medium present state has changed in either direction.
1455 * Device has indicated UNIT_ATTENTION.
3ff5588d 1456 */
001aac25 1457 kfree(sshdr);
2bae0093
TH
1458 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1459 sdp->changed = 0;
1da177e4 1460 return retval;
1da177e4
LT
1461}
1462
e73aec82 1463static int sd_sync_cache(struct scsi_disk *sdkp)
1da177e4 1464{
1da177e4 1465 int retries, res;
e73aec82 1466 struct scsi_device *sdp = sdkp->device;
7e660100
JB
1467 const int timeout = sdp->request_queue->rq_timeout
1468 * SD_FLUSH_TIMEOUT_MULTIPLIER;
ea73a9f2 1469 struct scsi_sense_hdr sshdr;
1da177e4
LT
1470
1471 if (!scsi_device_online(sdp))
1472 return -ENODEV;
1473
1da177e4
LT
1474 for (retries = 3; retries > 0; --retries) {
1475 unsigned char cmd[10] = { 0 };
1476
1477 cmd[0] = SYNCHRONIZE_CACHE;
1478 /*
1479 * Leave the rest of the command zero to indicate
1480 * flush everything.
1481 */
9b21493c 1482 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
7e660100
JB
1483 &sshdr, timeout, SD_MAX_RETRIES,
1484 NULL, REQ_PM);
ea73a9f2 1485 if (res == 0)
1da177e4
LT
1486 break;
1487 }
1488
e73aec82 1489 if (res) {
ef61329d 1490 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
95897910 1491
e73aec82
MP
1492 if (driver_byte(res) & DRIVER_SENSE)
1493 sd_print_sense_hdr(sdkp, &sshdr);
95897910
ON
1494 /* we need to evaluate the error return */
1495 if (scsi_sense_valid(&sshdr) &&
7aae5134
AS
1496 (sshdr.asc == 0x3a || /* medium not present */
1497 sshdr.asc == 0x20)) /* invalid command */
95897910
ON
1498 /* this is no error here */
1499 return 0;
1500
1501 switch (host_byte(res)) {
1502 /* ignore errors due to racing a disconnection */
1503 case DID_BAD_TARGET:
1504 case DID_NO_CONNECT:
1505 return 0;
1506 /* signal the upper layer it might try again */
1507 case DID_BUS_BUSY:
1508 case DID_IMM_RETRY:
1509 case DID_REQUEUE:
1510 case DID_SOFT_ERROR:
1511 return -EBUSY;
1512 default:
1513 return -EIO;
1514 }
1da177e4 1515 }
3721050a 1516 return 0;
1da177e4
LT
1517}
1518
1da177e4
LT
1519static void sd_rescan(struct device *dev)
1520{
3d9a1f53 1521 struct scsi_disk *sdkp = dev_get_drvdata(dev);
39b7f1e2 1522
3d9a1f53 1523 revalidate_disk(sdkp->disk);
1da177e4
LT
1524}
1525
1526
1527#ifdef CONFIG_COMPAT
1528/*
1529 * This gets directly called from VFS. When the ioctl
1530 * is not recognized we go back to the other translation paths.
1531 */
0338e291
AV
1532static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
1533 unsigned int cmd, unsigned long arg)
1da177e4 1534{
0338e291 1535 struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
21a9d4c9 1536 int error;
1da177e4 1537
21a9d4c9
CH
1538 error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
1539 (mode & FMODE_NDELAY) != 0);
1540 if (error)
1541 return error;
1da177e4 1542
1da177e4
LT
1543 /*
1544 * Let the static ioctl translation table take care of it.
1545 */
21a9d4c9
CH
1546 if (!sdev->host->hostt->compat_ioctl)
1547 return -ENOIOCTLCMD;
1548 return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
1da177e4
LT
1549}
1550#endif
1551
83d5cde4 1552static const struct block_device_operations sd_fops = {
1da177e4 1553 .owner = THIS_MODULE,
0338e291
AV
1554 .open = sd_open,
1555 .release = sd_release,
8a6cfeb6 1556 .ioctl = sd_ioctl,
a885c8c4 1557 .getgeo = sd_getgeo,
1da177e4 1558#ifdef CONFIG_COMPAT
0338e291 1559 .compat_ioctl = sd_compat_ioctl,
1da177e4 1560#endif
2bae0093 1561 .check_events = sd_check_events,
1da177e4 1562 .revalidate_disk = sd_revalidate_disk,
72ec24bd 1563 .unlock_native_capacity = sd_unlock_native_capacity,
1da177e4
LT
1564};
1565
18a4d0a2
MP
1566/**
1567 * sd_eh_action - error handling callback
1568 * @scmd: sd-issued command that has failed
18a4d0a2
MP
1569 * @eh_disp: The recovery disposition suggested by the midlayer
1570 *
2451079b
JB
1571 * This function is called by the SCSI midlayer upon completion of an
1572 * error test command (currently TEST UNIT READY). The result of sending
1573 * the eh command is passed in eh_disp. We're looking for devices that
1574 * fail medium access commands but are OK with non access commands like
1575 * test unit ready (so wrongly see the device as having a successful
1576 * recovery)
18a4d0a2 1577 **/
2451079b 1578static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
18a4d0a2
MP
1579{
1580 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
1581
1582 if (!scsi_device_online(scmd->device) ||
2451079b
JB
1583 !scsi_medium_access_command(scmd) ||
1584 host_byte(scmd->result) != DID_TIME_OUT ||
1585 eh_disp != SUCCESS)
18a4d0a2
MP
1586 return eh_disp;
1587
1588 /*
1589 * The device has timed out executing a medium access command.
1590 * However, the TEST UNIT READY command sent during error
1591 * handling completed successfully. Either the device is in the
1592 * process of recovering or has it suffered an internal failure
1593 * that prevents access to the storage medium.
1594 */
2451079b 1595 sdkp->medium_access_timed_out++;
18a4d0a2
MP
1596
1597 /*
1598 * If the device keeps failing read/write commands but TEST UNIT
1599 * READY always completes successfully we assume that medium
1600 * access is no longer possible and take the device offline.
1601 */
1602 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
1603 scmd_printk(KERN_ERR, scmd,
1604 "Medium access timeout failure. Offlining disk!\n");
1605 scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1606
1607 return FAILED;
1608 }
1609
1610 return eh_disp;
1611}
1612
af55ff67
MP
1613static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1614{
83096ebf
TH
1615 u64 start_lba = blk_rq_pos(scmd->request);
1616 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
74856fbf 1617 u64 factor = scmd->device->sector_size / 512;
af55ff67
MP
1618 u64 bad_lba;
1619 int info_valid;
a8733c7b
JB
1620 /*
1621 * resid is optional but mostly filled in. When it's unused,
1622 * its value is zero, so we assume the whole buffer transferred
1623 */
1624 unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
1625 unsigned int good_bytes;
af55ff67 1626
33659ebb 1627 if (scmd->request->cmd_type != REQ_TYPE_FS)
af55ff67
MP
1628 return 0;
1629
1630 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
1631 SCSI_SENSE_BUFFERSIZE,
1632 &bad_lba);
1633 if (!info_valid)
1634 return 0;
1635
1636 if (scsi_bufflen(scmd) <= scmd->device->sector_size)
1637 return 0;
1638
74856fbf
MH
1639 /* be careful ... don't want any overflows */
1640 do_div(start_lba, factor);
1641 do_div(end_lba, factor);
af55ff67
MP
1642
1643 /* The bad lba was reported incorrectly, we have no idea where
1644 * the error is.
1645 */
1646 if (bad_lba < start_lba || bad_lba >= end_lba)
1647 return 0;
1648
1649 /* This computation should always be done in terms of
1650 * the resolution of the device's medium.
1651 */
a8733c7b
JB
1652 good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
1653 return min(good_bytes, transferred);
af55ff67
MP
1654}
1655
1da177e4 1656/**
7b3d9545 1657 * sd_done - bottom half handler: called when the lower level
1da177e4
LT
1658 * driver has completed (successfully or otherwise) a scsi command.
1659 * @SCpnt: mid-level's per command structure.
1660 *
1661 * Note: potentially run from within an ISR. Must not block.
1662 **/
7b3d9545 1663static int sd_done(struct scsi_cmnd *SCpnt)
1da177e4
LT
1664{
1665 int result = SCpnt->result;
af55ff67 1666 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1da177e4 1667 struct scsi_sense_hdr sshdr;
4e7392ec 1668 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
26e85fcd 1669 struct request *req = SCpnt->request;
1da177e4
LT
1670 int sense_valid = 0;
1671 int sense_deferred = 0;
c98a0eb0 1672 unsigned char op = SCpnt->cmnd[0];
5db44863 1673 unsigned char unmap = SCpnt->cmnd[1] & 8;
1da177e4 1674
5db44863 1675 if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
26e85fcd
MP
1676 if (!result) {
1677 good_bytes = blk_rq_bytes(req);
1678 scsi_set_resid(SCpnt, 0);
1679 } else {
1680 good_bytes = 0;
1681 scsi_set_resid(SCpnt, blk_rq_bytes(req));
1682 }
1683 }
6a32a8ae 1684
1da177e4
LT
1685 if (result) {
1686 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
1687 if (sense_valid)
1688 sense_deferred = scsi_sense_is_deferred(&sshdr);
1689 }
2a863ba8
DJ
1690 sdkp->medium_access_timed_out = 0;
1691
03aba2f7
LT
1692 if (driver_byte(result) != DRIVER_SENSE &&
1693 (!sense_valid || sense_deferred))
1694 goto out;
1695
1696 switch (sshdr.sense_key) {
1697 case HARDWARE_ERROR:
1698 case MEDIUM_ERROR:
af55ff67 1699 good_bytes = sd_completed_bytes(SCpnt);
03aba2f7
LT
1700 break;
1701 case RECOVERED_ERROR:
af55ff67
MP
1702 good_bytes = scsi_bufflen(SCpnt);
1703 break;
10dab226
JW
1704 case NO_SENSE:
1705 /* This indicates a false check condition, so ignore it. An
1706 * unknown amount of data was transferred so treat it as an
1707 * error.
1708 */
10dab226
JW
1709 SCpnt->result = 0;
1710 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1711 break;
c98a0eb0
MP
1712 case ABORTED_COMMAND:
1713 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
1714 good_bytes = sd_completed_bytes(SCpnt);
1715 break;
1716 case ILLEGAL_REQUEST:
1717 if (sshdr.asc == 0x10) /* DIX: Host detected corruption */
af55ff67 1718 good_bytes = sd_completed_bytes(SCpnt);
c98a0eb0 1719 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
5db44863
MP
1720 if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
1721 switch (op) {
1722 case UNMAP:
1723 sd_config_discard(sdkp, SD_LBP_DISABLE);
1724 break;
1725 case WRITE_SAME_16:
1726 case WRITE_SAME:
1727 if (unmap)
1728 sd_config_discard(sdkp, SD_LBP_DISABLE);
1729 else {
1730 sdkp->device->no_write_same = 1;
1731 sd_config_write_same(sdkp);
1732
1733 good_bytes = 0;
1734 req->__data_len = blk_rq_bytes(req);
1735 req->cmd_flags |= REQ_QUIET;
1736 }
1737 }
1738 }
03aba2f7
LT
1739 break;
1740 default:
1741 break;
1da177e4 1742 }
03aba2f7 1743 out:
ef61329d
HR
1744 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
1745 "sd_done: completed %d of %d bytes\n",
1746 good_bytes, scsi_bufflen(SCpnt)));
1747
af55ff67
MP
1748 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1749 sd_dif_complete(SCpnt, good_bytes);
1750
7b3d9545 1751 return good_bytes;
1da177e4
LT
1752}
1753
1da177e4
LT
1754/*
1755 * spinup disk - called only in sd_revalidate_disk()
1756 */
1757static void
e73aec82 1758sd_spinup_disk(struct scsi_disk *sdkp)
ea73a9f2 1759{
1da177e4 1760 unsigned char cmd[10];
4451e472 1761 unsigned long spintime_expire = 0;
1da177e4
LT
1762 int retries, spintime;
1763 unsigned int the_result;
1764 struct scsi_sense_hdr sshdr;
1765 int sense_valid = 0;
1766
1767 spintime = 0;
1768
1769 /* Spin up drives, as required. Only do this at boot time */
1770 /* Spinup needs to be done for module loads too. */
1771 do {
1772 retries = 0;
1773
1774 do {
1775 cmd[0] = TEST_UNIT_READY;
1776 memset((void *) &cmd[1], 0, 9);
1777
ea73a9f2
JB
1778 the_result = scsi_execute_req(sdkp->device, cmd,
1779 DMA_NONE, NULL, 0,
1780 &sshdr, SD_TIMEOUT,
f4f4e47e 1781 SD_MAX_RETRIES, NULL);
1da177e4 1782
b4d38e38
AS
1783 /*
1784 * If the drive has indicated to us that it
1785 * doesn't have any media in it, don't bother
1786 * with any more polling.
1787 */
1788 if (media_not_present(sdkp, &sshdr))
1789 return;
1790
1da177e4 1791 if (the_result)
ea73a9f2 1792 sense_valid = scsi_sense_valid(&sshdr);
1da177e4
LT
1793 retries++;
1794 } while (retries < 3 &&
1795 (!scsi_status_is_good(the_result) ||
1796 ((driver_byte(the_result) & DRIVER_SENSE) &&
1797 sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
1798
1da177e4
LT
1799 if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
1800 /* no sense, TUR either succeeded or failed
1801 * with a status error */
e73aec82 1802 if(!spintime && !scsi_status_is_good(the_result)) {
ef61329d
HR
1803 sd_print_result(sdkp, "Test Unit Ready failed",
1804 the_result);
e73aec82 1805 }
1da177e4
LT
1806 break;
1807 }
ef61329d 1808
1da177e4
LT
1809 /*
1810 * The device does not want the automatic start to be issued.
1811 */
33dd6f92 1812 if (sdkp->device->no_start_on_add)
1da177e4 1813 break;
1da177e4 1814
33dd6f92
MW
1815 if (sense_valid && sshdr.sense_key == NOT_READY) {
1816 if (sshdr.asc == 4 && sshdr.ascq == 3)
1817 break; /* manual intervention required */
1818 if (sshdr.asc == 4 && sshdr.ascq == 0xb)
1819 break; /* standby */
1820 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
1821 break; /* unavailable */
1822 /*
1823 * Issue command to spin up drive when not ready
1824 */
1da177e4 1825 if (!spintime) {
e73aec82 1826 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
1da177e4
LT
1827 cmd[0] = START_STOP;
1828 cmd[1] = 1; /* Return immediately */
1829 memset((void *) &cmd[2], 0, 8);
1830 cmd[4] = 1; /* Start spin cycle */
d2886ea3
SR
1831 if (sdkp->device->start_stop_pwr_cond)
1832 cmd[4] |= 1 << 4;
ea73a9f2
JB
1833 scsi_execute_req(sdkp->device, cmd, DMA_NONE,
1834 NULL, 0, &sshdr,
f4f4e47e
FT
1835 SD_TIMEOUT, SD_MAX_RETRIES,
1836 NULL);
4451e472
AS
1837 spintime_expire = jiffies + 100 * HZ;
1838 spintime = 1;
1da177e4 1839 }
1da177e4
LT
1840 /* Wait 1 second for next try */
1841 msleep(1000);
1842 printk(".");
4451e472
AS
1843
1844 /*
1845 * Wait for USB flash devices with slow firmware.
1846 * Yes, this sense key/ASC combination shouldn't
1847 * occur here. It's characteristic of these devices.
1848 */
1849 } else if (sense_valid &&
1850 sshdr.sense_key == UNIT_ATTENTION &&
1851 sshdr.asc == 0x28) {
1852 if (!spintime) {
1853 spintime_expire = jiffies + 5 * HZ;
1854 spintime = 1;
1855 }
1856 /* Wait 1 second for next try */
1857 msleep(1000);
1da177e4
LT
1858 } else {
1859 /* we don't understand the sense code, so it's
1860 * probably pointless to loop */
1861 if(!spintime) {
e73aec82
MP
1862 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1863 sd_print_sense_hdr(sdkp, &sshdr);
1da177e4
LT
1864 }
1865 break;
1866 }
1867
4451e472 1868 } while (spintime && time_before_eq(jiffies, spintime_expire));
1da177e4
LT
1869
1870 if (spintime) {
1871 if (scsi_status_is_good(the_result))
1872 printk("ready\n");
1873 else
1874 printk("not responding...\n");
1875 }
1876}
1877
e0597d70
MP
1878
1879/*
1880 * Determine whether disk supports Data Integrity Field.
1881 */
fe542396 1882static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
e0597d70
MP
1883{
1884 struct scsi_device *sdp = sdkp->device;
1885 u8 type;
fe542396 1886 int ret = 0;
e0597d70
MP
1887
1888 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
fe542396 1889 return ret;
35e1a5d9
MP
1890
1891 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1892
fe542396
MP
1893 if (type > SD_DIF_TYPE3_PROTECTION)
1894 ret = -ENODEV;
1895 else if (scsi_host_dif_capable(sdp->host, type))
1896 ret = 1;
1897
1898 if (sdkp->first_scan || type != sdkp->protection_type)
1899 switch (ret) {
1900 case -ENODEV:
1901 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
1902 " protection type %u. Disabling disk!\n",
1903 type);
1904 break;
1905 case 1:
1906 sd_printk(KERN_NOTICE, sdkp,
1907 "Enabling DIF Type %u protection\n", type);
1908 break;
1909 case 0:
1910 sd_printk(KERN_NOTICE, sdkp,
1911 "Disabling DIF Type %u protection\n", type);
1912 break;
1913 }
e0597d70 1914
be922f47
MP
1915 sdkp->protection_type = type;
1916
fe542396 1917 return ret;
e0597d70
MP
1918}
1919
0da205e0
MW
1920static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
1921 struct scsi_sense_hdr *sshdr, int sense_valid,
1922 int the_result)
1923{
0da205e0
MW
1924 if (driver_byte(the_result) & DRIVER_SENSE)
1925 sd_print_sense_hdr(sdkp, sshdr);
1926 else
1927 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
1928
1929 /*
1930 * Set dirty bit for removable devices if not ready -
1931 * sometimes drives will not report this properly.
1932 */
1933 if (sdp->removable &&
1934 sense_valid && sshdr->sense_key == NOT_READY)
2bae0093 1935 set_media_not_present(sdkp);
0da205e0
MW
1936
1937 /*
1938 * We used to set media_present to 0 here to indicate no media
1939 * in the drive, but some drives fail read capacity even with
1940 * media present, so we can't do that.
1941 */
1942 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
1943}
1944
1945#define RC16_LEN 32
1946#if RC16_LEN > SD_BUF_SIZE
1947#error RC16_LEN must not be more than SD_BUF_SIZE
1948#endif
1949
3233ac19
JB
1950#define READ_CAPACITY_RETRIES_ON_RESET 10
1951
0da205e0
MW
1952static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1953 unsigned char *buffer)
ea73a9f2 1954{
1da177e4 1955 unsigned char cmd[16];
1da177e4
LT
1956 struct scsi_sense_hdr sshdr;
1957 int sense_valid = 0;
0da205e0 1958 int the_result;
3233ac19 1959 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
ea09bcc9 1960 unsigned int alignment;
0da205e0
MW
1961 unsigned long long lba;
1962 unsigned sector_size;
1da177e4 1963
5ce524bd
HG
1964 if (sdp->no_read_capacity_16)
1965 return -EINVAL;
1966
1da177e4 1967 do {
0da205e0 1968 memset(cmd, 0, 16);
eb846d9f 1969 cmd[0] = SERVICE_ACTION_IN_16;
0da205e0
MW
1970 cmd[1] = SAI_READ_CAPACITY_16;
1971 cmd[13] = RC16_LEN;
1972 memset(buffer, 0, RC16_LEN);
1973
ea73a9f2 1974 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
0da205e0
MW
1975 buffer, RC16_LEN, &sshdr,
1976 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1da177e4 1977
ea73a9f2 1978 if (media_not_present(sdkp, &sshdr))
0da205e0 1979 return -ENODEV;
1da177e4 1980
2b301307 1981 if (the_result) {
ea73a9f2 1982 sense_valid = scsi_sense_valid(&sshdr);
2b301307
MW
1983 if (sense_valid &&
1984 sshdr.sense_key == ILLEGAL_REQUEST &&
1985 (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
1986 sshdr.ascq == 0x00)
1987 /* Invalid Command Operation Code or
1988 * Invalid Field in CDB, just retry
1989 * silently with RC10 */
1990 return -EINVAL;
3233ac19
JB
1991 if (sense_valid &&
1992 sshdr.sense_key == UNIT_ATTENTION &&
1993 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
1994 /* Device reset might occur several times,
1995 * give it one more chance */
1996 if (--reset_retries > 0)
1997 continue;
2b301307 1998 }
1da177e4
LT
1999 retries--;
2000
2001 } while (the_result && retries);
2002
0da205e0 2003 if (the_result) {
ef61329d 2004 sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
0da205e0
MW
2005 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2006 return -EINVAL;
2007 }
e73aec82 2008
8f76d151
DH
2009 sector_size = get_unaligned_be32(&buffer[8]);
2010 lba = get_unaligned_be64(&buffer[0]);
0da205e0 2011
fe542396
MP
2012 if (sd_read_protection_type(sdkp, buffer) < 0) {
2013 sdkp->capacity = 0;
2014 return -ENODEV;
2015 }
0da205e0
MW
2016
2017 if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
2018 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2019 "kernel compiled with support for large block "
2020 "devices.\n");
2021 sdkp->capacity = 0;
2022 return -EOVERFLOW;
2023 }
2024
ea09bcc9 2025 /* Logical blocks per physical block exponent */
526f7c79 2026 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
ea09bcc9
MP
2027
2028 /* Lowest aligned logical block */
2029 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
2030 blk_queue_alignment_offset(sdp->request_queue, alignment);
2031 if (alignment && sdkp->first_scan)
2032 sd_printk(KERN_NOTICE, sdkp,
2033 "physical block alignment offset: %u\n", alignment);
2034
c98a0eb0
MP
2035 if (buffer[14] & 0x80) { /* LBPME */
2036 sdkp->lbpme = 1;
e339c1a7 2037
c98a0eb0
MP
2038 if (buffer[14] & 0x40) /* LBPRZ */
2039 sdkp->lbprz = 1;
e339c1a7 2040
c98a0eb0 2041 sd_config_discard(sdkp, SD_LBP_WS16);
e339c1a7
MP
2042 }
2043
0da205e0
MW
2044 sdkp->capacity = lba + 1;
2045 return sector_size;
2046}
2047
2048static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2049 unsigned char *buffer)
2050{
2051 unsigned char cmd[16];
2052 struct scsi_sense_hdr sshdr;
2053 int sense_valid = 0;
2054 int the_result;
3233ac19 2055 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
0da205e0
MW
2056 sector_t lba;
2057 unsigned sector_size;
2058
2059 do {
2060 cmd[0] = READ_CAPACITY;
2061 memset(&cmd[1], 0, 9);
2062 memset(buffer, 0, 8);
2063
2064 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2065 buffer, 8, &sshdr,
2066 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
2067
2068 if (media_not_present(sdkp, &sshdr))
2069 return -ENODEV;
2070
3233ac19 2071 if (the_result) {
0da205e0 2072 sense_valid = scsi_sense_valid(&sshdr);
3233ac19
JB
2073 if (sense_valid &&
2074 sshdr.sense_key == UNIT_ATTENTION &&
2075 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2076 /* Device reset might occur several times,
2077 * give it one more chance */
2078 if (--reset_retries > 0)
2079 continue;
2080 }
0da205e0
MW
2081 retries--;
2082
2083 } while (the_result && retries);
2084
2085 if (the_result) {
ef61329d 2086 sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
0da205e0
MW
2087 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2088 return -EINVAL;
2089 }
2090
8f76d151
DH
2091 sector_size = get_unaligned_be32(&buffer[4]);
2092 lba = get_unaligned_be32(&buffer[0]);
0da205e0 2093
5ce524bd
HG
2094 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
2095 /* Some buggy (usb cardreader) devices return an lba of
2096 0xffffffff when the want to report a size of 0 (with
2097 which they really mean no media is present) */
2098 sdkp->capacity = 0;
5cc10350 2099 sdkp->physical_block_size = sector_size;
5ce524bd
HG
2100 return sector_size;
2101 }
2102
0da205e0
MW
2103 if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
2104 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2105 "kernel compiled with support for large block "
2106 "devices.\n");
2107 sdkp->capacity = 0;
2108 return -EOVERFLOW;
2109 }
2110
2111 sdkp->capacity = lba + 1;
526f7c79 2112 sdkp->physical_block_size = sector_size;
0da205e0
MW
2113 return sector_size;
2114}
2115
2b301307
MW
2116static int sd_try_rc16_first(struct scsi_device *sdp)
2117{
f87146bb
HR
2118 if (sdp->host->max_cmd_len < 16)
2119 return 0;
6a0bdffa
AS
2120 if (sdp->try_rc_10_first)
2121 return 0;
2b301307
MW
2122 if (sdp->scsi_level > SCSI_SPC_2)
2123 return 1;
2124 if (scsi_device_protection(sdp))
2125 return 1;
2126 return 0;
2127}
2128
0da205e0
MW
2129/*
2130 * read disk capacity
2131 */
2132static void
2133sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2134{
2135 int sector_size;
2136 struct scsi_device *sdp = sdkp->device;
70a9b873 2137 sector_t old_capacity = sdkp->capacity;
0da205e0 2138
2b301307 2139 if (sd_try_rc16_first(sdp)) {
0da205e0
MW
2140 sector_size = read_capacity_16(sdkp, sdp, buffer);
2141 if (sector_size == -EOVERFLOW)
1da177e4 2142 goto got_data;
2b301307
MW
2143 if (sector_size == -ENODEV)
2144 return;
2145 if (sector_size < 0)
2146 sector_size = read_capacity_10(sdkp, sdp, buffer);
0da205e0
MW
2147 if (sector_size < 0)
2148 return;
1da177e4 2149 } else {
0da205e0
MW
2150 sector_size = read_capacity_10(sdkp, sdp, buffer);
2151 if (sector_size == -EOVERFLOW)
2152 goto got_data;
2153 if (sector_size < 0)
2154 return;
2155 if ((sizeof(sdkp->capacity) > 4) &&
2156 (sdkp->capacity > 0xffffffffULL)) {
2157 int old_sector_size = sector_size;
2158 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
2159 "Trying to use READ CAPACITY(16).\n");
2160 sector_size = read_capacity_16(sdkp, sdp, buffer);
2161 if (sector_size < 0) {
2162 sd_printk(KERN_NOTICE, sdkp,
2163 "Using 0xffffffff as device size\n");
2164 sdkp->capacity = 1 + (sector_t) 0xffffffff;
2165 sector_size = old_sector_size;
2166 goto got_data;
2167 }
2168 }
2169 }
1da177e4 2170
5c211caa
AS
2171 /* Some devices are known to return the total number of blocks,
2172 * not the highest block number. Some devices have versions
2173 * which do this and others which do not. Some devices we might
2174 * suspect of doing this but we don't know for certain.
2175 *
2176 * If we know the reported capacity is wrong, decrement it. If
2177 * we can only guess, then assume the number of blocks is even
2178 * (usually true but not always) and err on the side of lowering
2179 * the capacity.
2180 */
2181 if (sdp->fix_capacity ||
2182 (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
2183 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
2184 "from its reported value: %llu\n",
2185 (unsigned long long) sdkp->capacity);
1da177e4 2186 --sdkp->capacity;
61bf54b7
ON
2187 }
2188
1da177e4
LT
2189got_data:
2190 if (sector_size == 0) {
2191 sector_size = 512;
e73aec82
MP
2192 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
2193 "assuming 512.\n");
1da177e4
LT
2194 }
2195
2196 if (sector_size != 512 &&
2197 sector_size != 1024 &&
2198 sector_size != 2048 &&
74856fbf 2199 sector_size != 4096) {
e73aec82
MP
2200 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2201 sector_size);
1da177e4
LT
2202 /*
2203 * The user might want to re-format the drive with
2204 * a supported sectorsize. Once this happens, it
2205 * would be relatively trivial to set the thing up.
2206 * For this reason, we leave the thing in the table.
2207 */
2208 sdkp->capacity = 0;
2209 /*
2210 * set a bogus sector size so the normal read/write
2211 * logic in the block layer will eventually refuse any
2212 * request on this device without tripping over power
2213 * of two sector size assumptions
2214 */
2215 sector_size = 512;
2216 }
e1defc4f 2217 blk_queue_logical_block_size(sdp->request_queue, sector_size);
7404ad3b 2218
1da177e4 2219 {
7404ad3b 2220 char cap_str_2[10], cap_str_10[10];
1da177e4 2221
b9f28d86
JB
2222 string_get_size(sdkp->capacity, sector_size,
2223 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
2224 string_get_size(sdkp->capacity, sector_size,
2225 STRING_UNITS_10, cap_str_10,
7404ad3b 2226 sizeof(cap_str_10));
1da177e4 2227
ea09bcc9 2228 if (sdkp->first_scan || old_capacity != sdkp->capacity) {
70a9b873 2229 sd_printk(KERN_NOTICE, sdkp,
ea09bcc9 2230 "%llu %d-byte logical blocks: (%s/%s)\n",
70a9b873
MP
2231 (unsigned long long)sdkp->capacity,
2232 sector_size, cap_str_10, cap_str_2);
ea09bcc9 2233
526f7c79 2234 if (sdkp->physical_block_size != sector_size)
ea09bcc9
MP
2235 sd_printk(KERN_NOTICE, sdkp,
2236 "%u-byte physical blocks\n",
526f7c79 2237 sdkp->physical_block_size);
ea09bcc9 2238 }
1da177e4
LT
2239 }
2240
bcdb247c
MP
2241 if (sdkp->capacity > 0xffffffff) {
2242 sdp->use_16_for_rw = 1;
2243 sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
2244 } else
2245 sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
53ad570b 2246
1da177e4
LT
2247 /* Rescale capacity to 512-byte units */
2248 if (sector_size == 4096)
2249 sdkp->capacity <<= 3;
2250 else if (sector_size == 2048)
2251 sdkp->capacity <<= 2;
2252 else if (sector_size == 1024)
2253 sdkp->capacity <<= 1;
1da177e4 2254
526f7c79
MP
2255 blk_queue_physical_block_size(sdp->request_queue,
2256 sdkp->physical_block_size);
1da177e4
LT
2257 sdkp->device->sector_size = sector_size;
2258}
2259
2260/* called with buffer of length 512 */
2261static inline int
ea73a9f2
JB
2262sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
2263 unsigned char *buffer, int len, struct scsi_mode_data *data,
2264 struct scsi_sense_hdr *sshdr)
1da177e4 2265{
ea73a9f2 2266 return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
1cf72699 2267 SD_TIMEOUT, SD_MAX_RETRIES, data,
ea73a9f2 2268 sshdr);
1da177e4
LT
2269}
2270
2271/*
2272 * read write protect setting, if possible - called only in sd_revalidate_disk()
48970800 2273 * called with buffer of length SD_BUF_SIZE
1da177e4
LT
2274 */
2275static void
e73aec82 2276sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
ea73a9f2 2277{
1da177e4 2278 int res;
ea73a9f2 2279 struct scsi_device *sdp = sdkp->device;
1da177e4 2280 struct scsi_mode_data data;
70a9b873 2281 int old_wp = sdkp->write_prot;
1da177e4
LT
2282
2283 set_disk_ro(sdkp->disk, 0);
ea73a9f2 2284 if (sdp->skip_ms_page_3f) {
b2bff6ce 2285 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
1da177e4
LT
2286 return;
2287 }
2288
ea73a9f2
JB
2289 if (sdp->use_192_bytes_for_3f) {
2290 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
1da177e4
LT
2291 } else {
2292 /*
2293 * First attempt: ask for all pages (0x3F), but only 4 bytes.
2294 * We have to start carefully: some devices hang if we ask
2295 * for more than is available.
2296 */
ea73a9f2 2297 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
1da177e4
LT
2298
2299 /*
2300 * Second attempt: ask for page 0 When only page 0 is
2301 * implemented, a request for page 3F may return Sense Key
2302 * 5: Illegal Request, Sense Code 24: Invalid field in
2303 * CDB.
2304 */
2305 if (!scsi_status_is_good(res))
ea73a9f2 2306 res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
1da177e4
LT
2307
2308 /*
2309 * Third attempt: ask 255 bytes, as we did earlier.
2310 */
2311 if (!scsi_status_is_good(res))
ea73a9f2
JB
2312 res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
2313 &data, NULL);
1da177e4
LT
2314 }
2315
2316 if (!scsi_status_is_good(res)) {
b2bff6ce 2317 sd_first_printk(KERN_WARNING, sdkp,
e73aec82 2318 "Test WP failed, assume Write Enabled\n");
1da177e4
LT
2319 } else {
2320 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2321 set_disk_ro(sdkp->disk, sdkp->write_prot);
70a9b873
MP
2322 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2323 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2324 sdkp->write_prot ? "on" : "off");
2325 sd_printk(KERN_DEBUG, sdkp,
2326 "Mode Sense: %02x %02x %02x %02x\n",
2327 buffer[0], buffer[1], buffer[2], buffer[3]);
2328 }
1da177e4
LT
2329 }
2330}
2331
2332/*
2333 * sd_read_cache_type - called only from sd_revalidate_disk()
48970800 2334 * called with buffer of length SD_BUF_SIZE
1da177e4
LT
2335 */
2336static void
e73aec82 2337sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
631e8a13 2338{
1da177e4 2339 int len = 0, res;
ea73a9f2 2340 struct scsi_device *sdp = sdkp->device;
1da177e4 2341
631e8a13
AV
2342 int dbd;
2343 int modepage;
0bcaa111 2344 int first_len;
1da177e4
LT
2345 struct scsi_mode_data data;
2346 struct scsi_sense_hdr sshdr;
70a9b873
MP
2347 int old_wce = sdkp->WCE;
2348 int old_rcd = sdkp->RCD;
2349 int old_dpofua = sdkp->DPOFUA;
1da177e4 2350
39c60a09
JB
2351
2352 if (sdkp->cache_override)
2353 return;
2354
0bcaa111
LT
2355 first_len = 4;
2356 if (sdp->skip_ms_page_8) {
2357 if (sdp->type == TYPE_RBC)
2358 goto defaults;
2359 else {
2360 if (sdp->skip_ms_page_3f)
2361 goto defaults;
2362 modepage = 0x3F;
2363 if (sdp->use_192_bytes_for_3f)
2364 first_len = 192;
2365 dbd = 0;
2366 }
2367 } else if (sdp->type == TYPE_RBC) {
631e8a13
AV
2368 modepage = 6;
2369 dbd = 8;
2370 } else {
2371 modepage = 8;
2372 dbd = 0;
2373 }
2374
1da177e4 2375 /* cautiously ask */
0bcaa111
LT
2376 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
2377 &data, &sshdr);
1da177e4
LT
2378
2379 if (!scsi_status_is_good(res))
2380 goto bad_sense;
2381
6d73c851
AV
2382 if (!data.header_length) {
2383 modepage = 6;
0bcaa111 2384 first_len = 0;
b2bff6ce
MP
2385 sd_first_printk(KERN_ERR, sdkp,
2386 "Missing header in MODE_SENSE response\n");
6d73c851
AV
2387 }
2388
1da177e4
LT
2389 /* that went OK, now ask for the proper length */
2390 len = data.length;
2391
2392 /*
2393 * We're only interested in the first three bytes, actually.
2394 * But the data cache page is defined for the first 20.
2395 */
2396 if (len < 3)
2397 goto bad_sense;
0bcaa111 2398 else if (len > SD_BUF_SIZE) {
b2bff6ce 2399 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
0bcaa111
LT
2400 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2401 len = SD_BUF_SIZE;
2402 }
2403 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2404 len = 192;
1da177e4
LT
2405
2406 /* Get the data */
0bcaa111
LT
2407 if (len > first_len)
2408 res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
2409 &data, &sshdr);
1da177e4
LT
2410
2411 if (scsi_status_is_good(res)) {
631e8a13 2412 int offset = data.header_length + data.block_descriptor_length;
1da177e4 2413
0bcaa111
LT
2414 while (offset < len) {
2415 u8 page_code = buffer[offset] & 0x3F;
2416 u8 spf = buffer[offset] & 0x40;
2417
2418 if (page_code == 8 || page_code == 6) {
2419 /* We're interested only in the first 3 bytes.
2420 */
2421 if (len - offset <= 2) {
b2bff6ce
MP
2422 sd_first_printk(KERN_ERR, sdkp,
2423 "Incomplete mode parameter "
2424 "data\n");
0bcaa111
LT
2425 goto defaults;
2426 } else {
2427 modepage = page_code;
2428 goto Page_found;
2429 }
2430 } else {
2431 /* Go to the next page */
2432 if (spf && len - offset > 3)
2433 offset += 4 + (buffer[offset+2] << 8) +
2434 buffer[offset+3];
2435 else if (!spf && len - offset > 1)
2436 offset += 2 + buffer[offset+1];
2437 else {
b2bff6ce
MP
2438 sd_first_printk(KERN_ERR, sdkp,
2439 "Incomplete mode "
2440 "parameter data\n");
0bcaa111
LT
2441 goto defaults;
2442 }
2443 }
48970800
AV
2444 }
2445
b2bff6ce 2446 sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
984f1733
AS
2447 goto defaults;
2448
0bcaa111 2449 Page_found:
631e8a13
AV
2450 if (modepage == 8) {
2451 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2452 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
2453 } else {
2454 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
2455 sdkp->RCD = 0;
2456 }
1da177e4 2457
007365ad 2458 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
b14bf2d0
AS
2459 if (sdp->broken_fua) {
2460 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2461 sdkp->DPOFUA = 0;
2462 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
b2bff6ce 2463 sd_first_printk(KERN_NOTICE, sdkp,
e73aec82 2464 "Uses READ/WRITE(6), disabling FUA\n");
007365ad
TH
2465 sdkp->DPOFUA = 0;
2466 }
2467
2eefd57b
SRT
2468 /* No cache flush allowed for write protected devices */
2469 if (sdkp->WCE && sdkp->write_prot)
2470 sdkp->WCE = 0;
2471
70a9b873
MP
2472 if (sdkp->first_scan || old_wce != sdkp->WCE ||
2473 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
2474 sd_printk(KERN_NOTICE, sdkp,
2475 "Write cache: %s, read cache: %s, %s\n",
2476 sdkp->WCE ? "enabled" : "disabled",
2477 sdkp->RCD ? "disabled" : "enabled",
2478 sdkp->DPOFUA ? "supports DPO and FUA"
2479 : "doesn't support DPO or FUA");
1da177e4
LT
2480
2481 return;
2482 }
2483
2484bad_sense:
ea73a9f2 2485 if (scsi_sense_valid(&sshdr) &&
1da177e4
LT
2486 sshdr.sense_key == ILLEGAL_REQUEST &&
2487 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
e73aec82 2488 /* Invalid field in CDB */
b2bff6ce 2489 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
1da177e4 2490 else
b2bff6ce
MP
2491 sd_first_printk(KERN_ERR, sdkp,
2492 "Asking for cache data failed\n");
1da177e4
LT
2493
2494defaults:
b81478d8 2495 if (sdp->wce_default_on) {
b2bff6ce
MP
2496 sd_first_printk(KERN_NOTICE, sdkp,
2497 "Assuming drive cache: write back\n");
b81478d8
NJ
2498 sdkp->WCE = 1;
2499 } else {
b2bff6ce
MP
2500 sd_first_printk(KERN_ERR, sdkp,
2501 "Assuming drive cache: write through\n");
b81478d8
NJ
2502 sdkp->WCE = 0;
2503 }
1da177e4 2504 sdkp->RCD = 0;
48970800 2505 sdkp->DPOFUA = 0;
1da177e4
LT
2506}
2507
e0597d70
MP
2508/*
2509 * The ATO bit indicates whether the DIF application tag is available
2510 * for use by the operating system.
2511 */
439d77f7 2512static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
e0597d70
MP
2513{
2514 int res, offset;
2515 struct scsi_device *sdp = sdkp->device;
2516 struct scsi_mode_data data;
2517 struct scsi_sense_hdr sshdr;
2518
2519 if (sdp->type != TYPE_DISK)
2520 return;
2521
2522 if (sdkp->protection_type == 0)
2523 return;
2524
2525 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
2526 SD_MAX_RETRIES, &data, &sshdr);
2527
2528 if (!scsi_status_is_good(res) || !data.header_length ||
2529 data.length < 6) {
b2bff6ce 2530 sd_first_printk(KERN_WARNING, sdkp,
e0597d70
MP
2531 "getting Control mode page failed, assume no ATO\n");
2532
2533 if (scsi_sense_valid(&sshdr))
2534 sd_print_sense_hdr(sdkp, &sshdr);
2535
2536 return;
2537 }
2538
2539 offset = data.header_length + data.block_descriptor_length;
2540
2541 if ((buffer[offset] & 0x3f) != 0x0a) {
b2bff6ce 2542 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
e0597d70
MP
2543 return;
2544 }
2545
2546 if ((buffer[offset + 5] & 0x80) == 0)
2547 return;
2548
2549 sdkp->ATO = 1;
2550
2551 return;
2552}
2553
d11b6916
MP
2554/**
2555 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2556 * @disk: disk to query
2557 */
2558static void sd_read_block_limits(struct scsi_disk *sdkp)
2559{
2560 unsigned int sector_sz = sdkp->device->sector_size;
bb2d3de1 2561 const int vpd_len = 64;
bcdb247c 2562 u32 max_xfer_length;
e3deec09 2563 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
d11b6916 2564
e3deec09
JB
2565 if (!buffer ||
2566 /* Block Limits VPD */
2567 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2568 goto out;
d11b6916 2569
bcdb247c
MP
2570 max_xfer_length = get_unaligned_be32(&buffer[8]);
2571 if (max_xfer_length)
2572 sdkp->max_xfer_blocks = max_xfer_length;
2573
d11b6916
MP
2574 blk_queue_io_min(sdkp->disk->queue,
2575 get_unaligned_be16(&buffer[6]) * sector_sz);
2576 blk_queue_io_opt(sdkp->disk->queue,
2577 get_unaligned_be32(&buffer[12]) * sector_sz);
2578
c98a0eb0
MP
2579 if (buffer[3] == 0x3c) {
2580 unsigned int lba_count, desc_count;
e339c1a7 2581
5db44863 2582 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
e339c1a7 2583
c98a0eb0 2584 if (!sdkp->lbpme)
045d3fe7 2585 goto out;
045d3fe7 2586
c98a0eb0
MP
2587 lba_count = get_unaligned_be32(&buffer[20]);
2588 desc_count = get_unaligned_be32(&buffer[24]);
045d3fe7 2589
c98a0eb0
MP
2590 if (lba_count && desc_count)
2591 sdkp->max_unmap_blocks = lba_count;
e339c1a7 2592
c98a0eb0 2593 sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
e339c1a7
MP
2594
2595 if (buffer[32] & 0x80)
c98a0eb0 2596 sdkp->unmap_alignment =
e339c1a7 2597 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
c98a0eb0
MP
2598
2599 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
2600
2601 if (sdkp->max_unmap_blocks)
2602 sd_config_discard(sdkp, SD_LBP_UNMAP);
2603 else
2604 sd_config_discard(sdkp, SD_LBP_WS16);
2605
2606 } else { /* LBP VPD page tells us what to use */
e461338b
MP
2607 if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
2608 sd_config_discard(sdkp, SD_LBP_UNMAP);
2609 else if (sdkp->lbpws)
c98a0eb0
MP
2610 sd_config_discard(sdkp, SD_LBP_WS16);
2611 else if (sdkp->lbpws10)
2612 sd_config_discard(sdkp, SD_LBP_WS10);
7985090a
MP
2613 else if (sdkp->lbpu && sdkp->max_unmap_blocks)
2614 sd_config_discard(sdkp, SD_LBP_UNMAP);
c98a0eb0
MP
2615 else
2616 sd_config_discard(sdkp, SD_LBP_DISABLE);
2617 }
e339c1a7
MP
2618 }
2619
e3deec09 2620 out:
d11b6916
MP
2621 kfree(buffer);
2622}
2623
3821d768
MP
2624/**
2625 * sd_read_block_characteristics - Query block dev. characteristics
2626 * @disk: disk to query
2627 */
2628static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2629{
e3deec09 2630 unsigned char *buffer;
3821d768 2631 u16 rot;
bb2d3de1 2632 const int vpd_len = 64;
3821d768 2633
e3deec09 2634 buffer = kmalloc(vpd_len, GFP_KERNEL);
3821d768 2635
e3deec09
JB
2636 if (!buffer ||
2637 /* Block Device Characteristics VPD */
2638 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
2639 goto out;
3821d768
MP
2640
2641 rot = get_unaligned_be16(&buffer[4]);
2642
b277da0a 2643 if (rot == 1) {
3821d768 2644 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
b277da0a
MS
2645 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
2646 }
3821d768 2647
e3deec09 2648 out:
3821d768
MP
2649 kfree(buffer);
2650}
2651
045d3fe7 2652/**
c98a0eb0 2653 * sd_read_block_provisioning - Query provisioning VPD page
045d3fe7
MP
2654 * @disk: disk to query
2655 */
c98a0eb0 2656static void sd_read_block_provisioning(struct scsi_disk *sdkp)
045d3fe7
MP
2657{
2658 unsigned char *buffer;
2659 const int vpd_len = 8;
2660
c98a0eb0 2661 if (sdkp->lbpme == 0)
045d3fe7
MP
2662 return;
2663
2664 buffer = kmalloc(vpd_len, GFP_KERNEL);
2665
2666 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
2667 goto out;
2668
c98a0eb0
MP
2669 sdkp->lbpvpd = 1;
2670 sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
2671 sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
2672 sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
045d3fe7
MP
2673
2674 out:
2675 kfree(buffer);
2676}
2677
5db44863
MP
2678static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2679{
66c28f97
MP
2680 struct scsi_device *sdev = sdkp->device;
2681
54b2b50c
MP
2682 if (sdev->host->no_write_same) {
2683 sdev->no_write_same = 1;
2684
2685 return;
2686 }
2687
66c28f97 2688 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
af73623f
BS
2689 /* too large values might cause issues with arcmsr */
2690 int vpd_buf_len = 64;
2691
66c28f97
MP
2692 sdev->no_report_opcodes = 1;
2693
2694 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
2695 * CODES is unsupported and the device has an ATA
2696 * Information VPD page (SAT).
2697 */
af73623f 2698 if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
66c28f97
MP
2699 sdev->no_write_same = 1;
2700 }
2701
2702 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
5db44863 2703 sdkp->ws16 = 1;
66c28f97
MP
2704
2705 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
2706 sdkp->ws10 = 1;
5db44863
MP
2707}
2708
ffd4bc2a
MP
2709static int sd_try_extended_inquiry(struct scsi_device *sdp)
2710{
c1d40a52
MP
2711 /* Attempt VPD inquiry if the device blacklist explicitly calls
2712 * for it.
2713 */
2714 if (sdp->try_vpd_pages)
2715 return 1;
ffd4bc2a
MP
2716 /*
2717 * Although VPD inquiries can go to SCSI-2 type devices,
2718 * some USB ones crash on receiving them, and the pages
2719 * we currently ask for are for SPC-3 and beyond
2720 */
09b6b51b 2721 if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
ffd4bc2a
MP
2722 return 1;
2723 return 0;
2724}
2725
1da177e4
LT
2726/**
2727 * sd_revalidate_disk - called the first time a new disk is seen,
2728 * performs disk spin up, read_capacity, etc.
2729 * @disk: struct gendisk we care about
2730 **/
2731static int sd_revalidate_disk(struct gendisk *disk)
2732{
2733 struct scsi_disk *sdkp = scsi_disk(disk);
2734 struct scsi_device *sdp = sdkp->device;
1da177e4 2735 unsigned char *buffer;
bcdb247c 2736 unsigned int max_xfer;
1da177e4 2737
fa0d34be
MP
2738 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
2739 "sd_revalidate_disk\n"));
1da177e4
LT
2740
2741 /*
2742 * If the device is offline, don't try and read capacity or any
2743 * of the other niceties.
2744 */
2745 if (!scsi_device_online(sdp))
2746 goto out;
2747
a6123f14 2748 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
1da177e4 2749 if (!buffer) {
e73aec82
MP
2750 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
2751 "allocation failure.\n");
ea73a9f2 2752 goto out;
1da177e4
LT
2753 }
2754
e73aec82 2755 sd_spinup_disk(sdkp);
1da177e4
LT
2756
2757 /*
2758 * Without media there is no reason to ask; moreover, some devices
2759 * react badly if we do.
2760 */
2761 if (sdkp->media_present) {
e73aec82 2762 sd_read_capacity(sdkp, buffer);
ffd4bc2a
MP
2763
2764 if (sd_try_extended_inquiry(sdp)) {
c98a0eb0 2765 sd_read_block_provisioning(sdkp);
ffd4bc2a
MP
2766 sd_read_block_limits(sdkp);
2767 sd_read_block_characteristics(sdkp);
2768 }
2769
e73aec82
MP
2770 sd_read_write_protect_flag(sdkp, buffer);
2771 sd_read_cache_type(sdkp, buffer);
e0597d70 2772 sd_read_app_tag_own(sdkp, buffer);
5db44863 2773 sd_read_write_same(sdkp, buffer);
1da177e4 2774 }
461d4e90 2775
70a9b873
MP
2776 sdkp->first_scan = 0;
2777
461d4e90
TH
2778 /*
2779 * We now have all cache related info, determine how we deal
4913efe4 2780 * with flush requests.
461d4e90 2781 */
cb2fb68d 2782 sd_set_flush_flag(sdkp);
461d4e90 2783
3a9794d3 2784 max_xfer = sdkp->max_xfer_blocks;
bcdb247c 2785 max_xfer <<= ilog2(sdp->sector_size) - 9;
3a9794d3 2786
4f258a46
MP
2787 sdkp->disk->queue->limits.max_sectors =
2788 min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
2789
1da177e4 2790 set_capacity(disk, sdkp->capacity);
5db44863 2791 sd_config_write_same(sdkp);
1da177e4
LT
2792 kfree(buffer);
2793
1da177e4
LT
2794 out:
2795 return 0;
2796}
2797
72ec24bd
TH
2798/**
2799 * sd_unlock_native_capacity - unlock native capacity
2800 * @disk: struct gendisk to set capacity for
2801 *
2802 * Block layer calls this function if it detects that partitions
2803 * on @disk reach beyond the end of the device. If the SCSI host
2804 * implements ->unlock_native_capacity() method, it's invoked to
2805 * give it a chance to adjust the device capacity.
2806 *
2807 * CONTEXT:
2808 * Defined by block layer. Might sleep.
2809 */
2810static void sd_unlock_native_capacity(struct gendisk *disk)
2811{
2812 struct scsi_device *sdev = scsi_disk(disk)->device;
2813
2814 if (sdev->host->hostt->unlock_native_capacity)
2815 sdev->host->hostt->unlock_native_capacity(sdev);
2816}
2817
3e1a7ff8
TH
2818/**
2819 * sd_format_disk_name - format disk name
2820 * @prefix: name prefix - ie. "sd" for SCSI disks
2821 * @index: index of the disk to format name for
2822 * @buf: output buffer
2823 * @buflen: length of the output buffer
2824 *
2825 * SCSI disk names starts at sda. The 26th device is sdz and the
2826 * 27th is sdaa. The last one for two lettered suffix is sdzz
2827 * which is followed by sdaaa.
2828 *
2829 * This is basically 26 base counting with one extra 'nil' entry
3ad2f3fb 2830 * at the beginning from the second digit on and can be
3e1a7ff8
TH
2831 * determined using similar method as 26 base conversion with the
2832 * index shifted -1 after each digit is computed.
2833 *
2834 * CONTEXT:
2835 * Don't care.
2836 *
2837 * RETURNS:
2838 * 0 on success, -errno on failure.
2839 */
2840static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
2841{
2842 const int base = 'z' - 'a' + 1;
2843 char *begin = buf + strlen(prefix);
2844 char *end = buf + buflen;
2845 char *p;
2846 int unit;
2847
2848 p = end - 1;
2849 *p = '\0';
2850 unit = base;
2851 do {
2852 if (p == begin)
2853 return -EINVAL;
2854 *--p = 'a' + (index % unit);
2855 index = (index / unit) - 1;
2856 } while (index >= 0);
2857
2858 memmove(begin, p, end - p);
2859 memcpy(buf, prefix, strlen(prefix));
2860
2861 return 0;
2862}
2863
4ace92fc
AV
2864/*
2865 * The asynchronous part of sd_probe
2866 */
2867static void sd_probe_async(void *data, async_cookie_t cookie)
2868{
2869 struct scsi_disk *sdkp = data;
2870 struct scsi_device *sdp;
2871 struct gendisk *gd;
2872 u32 index;
2873 struct device *dev;
2874
2875 sdp = sdkp->device;
2876 gd = sdkp->disk;
2877 index = sdkp->index;
2878 dev = &sdp->sdev_gendev;
2879
1a03ae0f
MR
2880 gd->major = sd_major((index & 0xf0) >> 4);
2881 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
2882 gd->minors = SD_MINORS;
2883
4ace92fc
AV
2884 gd->fops = &sd_fops;
2885 gd->private_data = &sdkp->driver;
2886 gd->queue = sdkp->device->request_queue;
2887
70a9b873
MP
2888 /* defaults, until the device tells us otherwise */
2889 sdp->sector_size = 512;
2890 sdkp->capacity = 0;
2891 sdkp->media_present = 1;
2892 sdkp->write_prot = 0;
39c60a09 2893 sdkp->cache_override = 0;
70a9b873
MP
2894 sdkp->WCE = 0;
2895 sdkp->RCD = 0;
2896 sdkp->ATO = 0;
2897 sdkp->first_scan = 1;
18a4d0a2 2898 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
70a9b873 2899
4ace92fc
AV
2900 sd_revalidate_disk(gd);
2901
4ace92fc 2902 gd->driverfs_dev = &sdp->sdev_gendev;
97fedbbe 2903 gd->flags = GENHD_FL_EXT_DEVT;
2bae0093 2904 if (sdp->removable) {
4ace92fc 2905 gd->flags |= GENHD_FL_REMOVABLE;
2bae0093
TH
2906 gd->events |= DISK_EVENT_MEDIA_CHANGE;
2907 }
4ace92fc 2908
10c580e4 2909 blk_pm_runtime_init(sdp->request_queue, dev);
4ace92fc 2910 add_disk(gd);
fe542396
MP
2911 if (sdkp->capacity)
2912 sd_dif_config_host(sdkp);
4ace92fc 2913
3821d768
MP
2914 sd_revalidate_disk(gd);
2915
4ace92fc
AV
2916 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2917 sdp->removable ? "removable " : "");
478a8a05 2918 scsi_autopm_put_device(sdp);
ea038f63 2919 put_device(&sdkp->dev);
4ace92fc
AV
2920}
2921
1da177e4
LT
2922/**
2923 * sd_probe - called during driver initialization and whenever a
2924 * new scsi device is attached to the system. It is called once
2925 * for each scsi device (not just disks) present.
2926 * @dev: pointer to device object
2927 *
2928 * Returns 0 if successful (or not interested in this scsi device
2929 * (e.g. scanner)); 1 when there is an error.
2930 *
2931 * Note: this function is invoked from the scsi mid-level.
2932 * This function sets up the mapping between a given
2933 * <host,channel,id,lun> (found in sdp) and new device name
2934 * (e.g. /dev/sda). More precisely it is the block device major
2935 * and minor number that is chosen here.
2936 *
2db93ce8
PU
2937 * Assume sd_probe is not re-entrant (for time being)
2938 * Also think about sd_probe() and sd_remove() running coincidentally.
1da177e4
LT
2939 **/
2940static int sd_probe(struct device *dev)
2941{
2942 struct scsi_device *sdp = to_scsi_device(dev);
2943 struct scsi_disk *sdkp;
2944 struct gendisk *gd;
439d77f7 2945 int index;
1da177e4
LT
2946 int error;
2947
6fe8c1db 2948 scsi_autopm_get_device(sdp);
1da177e4 2949 error = -ENODEV;
631e8a13 2950 if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
1da177e4
LT
2951 goto out;
2952
9ccfc756 2953 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
2db93ce8 2954 "sd_probe\n"));
1da177e4
LT
2955
2956 error = -ENOMEM;
24669f75 2957 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
1da177e4
LT
2958 if (!sdkp)
2959 goto out;
2960
689d6fac 2961 gd = alloc_disk(SD_MINORS);
1da177e4
LT
2962 if (!gd)
2963 goto out_free;
2964
f27bac27
TH
2965 do {
2966 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
2967 goto out_put;
1da177e4 2968
4034cc68 2969 spin_lock(&sd_index_lock);
f27bac27 2970 error = ida_get_new(&sd_index_ida, &index);
4034cc68 2971 spin_unlock(&sd_index_lock);
f27bac27 2972 } while (error == -EAGAIN);
1da177e4 2973
21208ae5
DK
2974 if (error) {
2975 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
1da177e4 2976 goto out_put;
1a03ae0f
MR
2977 }
2978
3e1a7ff8 2979 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
21208ae5
DK
2980 if (error) {
2981 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
f27bac27 2982 goto out_free_index;
21208ae5 2983 }
f27bac27 2984
1da177e4
LT
2985 sdkp->device = sdp;
2986 sdkp->driver = &sd_template;
2987 sdkp->disk = gd;
2988 sdkp->index = index;
409f3499 2989 atomic_set(&sdkp->openers, 0);
9e1a1537 2990 atomic_set(&sdkp->device->ioerr_cnt, 0);
1da177e4 2991
601e7638
JB
2992 if (!sdp->request_queue->rq_timeout) {
2993 if (sdp->type != TYPE_MOD)
2994 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
2995 else
2996 blk_queue_rq_timeout(sdp->request_queue,
2997 SD_MOD_TIMEOUT);
2998 }
2999
3000 device_initialize(&sdkp->dev);
478a8a05 3001 sdkp->dev.parent = dev;
601e7638 3002 sdkp->dev.class = &sd_disk_class;
02aa2a37 3003 dev_set_name(&sdkp->dev, "%s", dev_name(dev));
601e7638 3004
dee0586e
DC
3005 error = device_add(&sdkp->dev);
3006 if (error)
601e7638
JB
3007 goto out_free_index;
3008
478a8a05
AS
3009 get_device(dev);
3010 dev_set_drvdata(dev, sdkp);
601e7638 3011
ea038f63 3012 get_device(&sdkp->dev); /* prevent release before async_schedule */
a7a20d10 3013 async_schedule_domain(sd_probe_async, sdkp, &scsi_sd_probe_domain);
1da177e4
LT
3014
3015 return 0;
3016
f27bac27 3017 out_free_index:
4034cc68 3018 spin_lock(&sd_index_lock);
f27bac27 3019 ida_remove(&sd_index_ida, index);
4034cc68 3020 spin_unlock(&sd_index_lock);
6bdaa1f1 3021 out_put:
1da177e4 3022 put_disk(gd);
6bdaa1f1 3023 out_free:
1da177e4 3024 kfree(sdkp);
6bdaa1f1 3025 out:
6fe8c1db 3026 scsi_autopm_put_device(sdp);
1da177e4
LT
3027 return error;
3028}
3029
3030/**
3031 * sd_remove - called whenever a scsi disk (previously recognized by
3032 * sd_probe) is detached from the system. It is called (potentially
3033 * multiple times) during sd module unload.
3034 * @sdp: pointer to mid level scsi device object
3035 *
3036 * Note: this function is invoked from the scsi mid-level.
3037 * This function potentially frees up a device name (e.g. /dev/sdc)
3038 * that could be re-used by a subsequent sd_probe().
3039 * This function is not called when the built-in sd driver is "exit-ed".
3040 **/
3041static int sd_remove(struct device *dev)
3042{
601e7638 3043 struct scsi_disk *sdkp;
0761df9c 3044 dev_t devt;
1da177e4 3045
601e7638 3046 sdkp = dev_get_drvdata(dev);
0761df9c 3047 devt = disk_devt(sdkp->disk);
478a8a05
AS
3048 scsi_autopm_get_device(sdkp->device);
3049
3c31b52f 3050 async_synchronize_full_domain(&scsi_sd_pm_domain);
a7a20d10 3051 async_synchronize_full_domain(&scsi_sd_probe_domain);
ee959b00 3052 device_del(&sdkp->dev);
1da177e4
LT
3053 del_gendisk(sdkp->disk);
3054 sd_shutdown(dev);
39b7f1e2 3055
0761df9c
HR
3056 blk_register_region(devt, SD_MINORS, NULL,
3057 sd_default_probe, NULL, NULL);
3058
0b950672 3059 mutex_lock(&sd_ref_mutex);
39b7f1e2 3060 dev_set_drvdata(dev, NULL);
ee959b00 3061 put_device(&sdkp->dev);
0b950672 3062 mutex_unlock(&sd_ref_mutex);
1da177e4
LT
3063
3064 return 0;
3065}
3066
3067/**
3068 * scsi_disk_release - Called to free the scsi_disk structure
ee959b00 3069 * @dev: pointer to embedded class device
1da177e4 3070 *
0b950672 3071 * sd_ref_mutex must be held entering this routine. Because it is
1da177e4
LT
3072 * called on last put, you should always use the scsi_disk_get()
3073 * scsi_disk_put() helpers which manipulate the semaphore directly
ee959b00 3074 * and never do a direct put_device.
1da177e4 3075 **/
ee959b00 3076static void scsi_disk_release(struct device *dev)
1da177e4 3077{
ee959b00 3078 struct scsi_disk *sdkp = to_scsi_disk(dev);
1da177e4
LT
3079 struct gendisk *disk = sdkp->disk;
3080
4034cc68 3081 spin_lock(&sd_index_lock);
f27bac27 3082 ida_remove(&sd_index_ida, sdkp->index);
4034cc68 3083 spin_unlock(&sd_index_lock);
1da177e4 3084
e727c42b 3085 blk_integrity_unregister(disk);
1da177e4 3086 disk->private_data = NULL;
1da177e4 3087 put_disk(disk);
39b7f1e2 3088 put_device(&sdkp->device->sdev_gendev);
1da177e4
LT
3089
3090 kfree(sdkp);
3091}
3092
cc5d2c8c 3093static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
c3c94c5a
TH
3094{
3095 unsigned char cmd[6] = { START_STOP }; /* START_VALID */
3096 struct scsi_sense_hdr sshdr;
cc5d2c8c 3097 struct scsi_device *sdp = sdkp->device;
c3c94c5a
TH
3098 int res;
3099
3100 if (start)
3101 cmd[4] |= 1; /* START */
3102
d2886ea3
SR
3103 if (sdp->start_stop_pwr_cond)
3104 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
3105
c3c94c5a
TH
3106 if (!scsi_device_online(sdp))
3107 return -ENODEV;
3108
9b21493c
LM
3109 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
3110 SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
c3c94c5a 3111 if (res) {
ef61329d 3112 sd_print_result(sdkp, "Start/Stop Unit failed", res);
c3c94c5a 3113 if (driver_byte(res) & DRIVER_SENSE)
cc5d2c8c 3114 sd_print_sense_hdr(sdkp, &sshdr);
95897910
ON
3115 if (scsi_sense_valid(&sshdr) &&
3116 /* 0x3a is medium not present */
3117 sshdr.asc == 0x3a)
3118 res = 0;
c3c94c5a
TH
3119 }
3120
95897910
ON
3121 /* SCSI error codes must not go to the generic layer */
3122 if (res)
3123 return -EIO;
3124
3125 return 0;
c3c94c5a
TH
3126}
3127
1da177e4
LT
3128/*
3129 * Send a SYNCHRONIZE CACHE instruction down to the device through
3130 * the normal SCSI command structure. Wait for the command to
3131 * complete.
3132 */
3133static void sd_shutdown(struct device *dev)
3134{
3d9a1f53 3135 struct scsi_disk *sdkp = dev_get_drvdata(dev);
1da177e4
LT
3136
3137 if (!sdkp)
3138 return; /* this can happen */
3139
54f57588 3140 if (pm_runtime_suspended(dev))
3d9a1f53 3141 return;
54f57588 3142
95897910 3143 if (sdkp->WCE && sdkp->media_present) {
e73aec82
MP
3144 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3145 sd_sync_cache(sdkp);
39b7f1e2 3146 }
c3c94c5a 3147
cc5d2c8c
JB
3148 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
3149 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3150 sd_start_stop_device(sdkp, 0);
c3c94c5a 3151 }
39b7f1e2 3152}
1da177e4 3153
95897910 3154static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
c3c94c5a 3155{
3d9a1f53 3156 struct scsi_disk *sdkp = dev_get_drvdata(dev);
09ff92fe 3157 int ret = 0;
c3c94c5a
TH
3158
3159 if (!sdkp)
3160 return 0; /* this can happen */
3161
95897910 3162 if (sdkp->WCE && sdkp->media_present) {
cc5d2c8c 3163 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
c3c94c5a 3164 ret = sd_sync_cache(sdkp);
95897910
ON
3165 if (ret) {
3166 /* ignore OFFLINE device */
3167 if (ret == -ENODEV)
3168 ret = 0;
09ff92fe 3169 goto done;
95897910 3170 }
c3c94c5a
TH
3171 }
3172
691e3d31 3173 if (sdkp->device->manage_start_stop) {
cc5d2c8c 3174 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
95897910 3175 /* an error is not worth aborting a system sleep */
cc5d2c8c 3176 ret = sd_start_stop_device(sdkp, 0);
95897910
ON
3177 if (ignore_stop_errors)
3178 ret = 0;
c3c94c5a
TH
3179 }
3180
09ff92fe 3181done:
09ff92fe 3182 return ret;
c3c94c5a
TH
3183}
3184
95897910
ON
3185static int sd_suspend_system(struct device *dev)
3186{
3187 return sd_suspend_common(dev, true);
3188}
3189
3190static int sd_suspend_runtime(struct device *dev)
3191{
3192 return sd_suspend_common(dev, false);
3193}
3194
c3c94c5a
TH
3195static int sd_resume(struct device *dev)
3196{
3d9a1f53 3197 struct scsi_disk *sdkp = dev_get_drvdata(dev);
c3c94c5a 3198
cc5d2c8c 3199 if (!sdkp->device->manage_start_stop)
3d9a1f53 3200 return 0;
c3c94c5a 3201
cc5d2c8c 3202 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
3d9a1f53 3203 return sd_start_stop_device(sdkp, 1);
c3c94c5a
TH
3204}
3205
1da177e4
LT
3206/**
3207 * init_sd - entry point for this driver (both when built in or when
3208 * a module).
3209 *
3210 * Note: this function registers this driver with the scsi mid-level.
3211 **/
3212static int __init init_sd(void)
3213{
5e4009ba 3214 int majors = 0, i, err;
1da177e4
LT
3215
3216 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
3217
0761df9c
HR
3218 for (i = 0; i < SD_MAJORS; i++) {
3219 if (register_blkdev(sd_major(i), "sd") != 0)
3220 continue;
3221 majors++;
3222 blk_register_region(sd_major(i), SD_MINORS, NULL,
3223 sd_default_probe, NULL, NULL);
3224 }
1da177e4
LT
3225
3226 if (!majors)
3227 return -ENODEV;
3228
5e4009ba
JG
3229 err = class_register(&sd_disk_class);
3230 if (err)
3231 goto err_out;
6bdaa1f1 3232
4e7392ec
MP
3233 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
3234 0, 0, NULL);
3235 if (!sd_cdb_cache) {
3236 printk(KERN_ERR "sd: can't init extended cdb cache\n");
8d964478 3237 err = -ENOMEM;
4e7392ec
MP
3238 goto err_out_class;
3239 }
3240
3241 sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
3242 if (!sd_cdb_pool) {
3243 printk(KERN_ERR "sd: can't init extended cdb pool\n");
8d964478 3244 err = -ENOMEM;
4e7392ec
MP
3245 goto err_out_cache;
3246 }
3247
afd5e34b
JD
3248 err = scsi_register_driver(&sd_template.gendrv);
3249 if (err)
3250 goto err_out_driver;
3251
5e4009ba
JG
3252 return 0;
3253
afd5e34b
JD
3254err_out_driver:
3255 mempool_destroy(sd_cdb_pool);
3256
4e7392ec
MP
3257err_out_cache:
3258 kmem_cache_destroy(sd_cdb_cache);
3259
5e4009ba
JG
3260err_out_class:
3261 class_unregister(&sd_disk_class);
3262err_out:
3263 for (i = 0; i < SD_MAJORS; i++)
3264 unregister_blkdev(sd_major(i), "sd");
3265 return err;
1da177e4
LT
3266}
3267
3268/**
3269 * exit_sd - exit point for this driver (when it is a module).
3270 *
3271 * Note: this function unregisters this driver from the scsi mid-level.
3272 **/
3273static void __exit exit_sd(void)
3274{
3275 int i;
3276
3277 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
3278
afd5e34b 3279 scsi_unregister_driver(&sd_template.gendrv);
4e7392ec
MP
3280 mempool_destroy(sd_cdb_pool);
3281 kmem_cache_destroy(sd_cdb_cache);
3282
5e4009ba
JG
3283 class_unregister(&sd_disk_class);
3284
0761df9c
HR
3285 for (i = 0; i < SD_MAJORS; i++) {
3286 blk_unregister_region(sd_major(i), SD_MINORS);
1da177e4 3287 unregister_blkdev(sd_major(i), "sd");
0761df9c 3288 }
1da177e4
LT
3289}
3290
1da177e4
LT
3291module_init(init_sd);
3292module_exit(exit_sd);
e73aec82
MP
3293
3294static void sd_print_sense_hdr(struct scsi_disk *sdkp,
3295 struct scsi_sense_hdr *sshdr)
3296{
21045519
HR
3297 scsi_print_sense_hdr(sdkp->device,
3298 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
e73aec82
MP
3299}
3300
ef61329d
HR
3301static void sd_print_result(const struct scsi_disk *sdkp, const char *msg,
3302 int result)
e73aec82 3303{
ef61329d
HR
3304 const char *hb_string = scsi_hostbyte_string(result);
3305 const char *db_string = scsi_driverbyte_string(result);
3306
3307 if (hb_string || db_string)
3308 sd_printk(KERN_INFO, sdkp,
3309 "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
3310 hb_string ? hb_string : "invalid",
3311 db_string ? db_string : "invalid");
3312 else
3313 sd_printk(KERN_INFO, sdkp,
3314 "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
3315 msg, host_byte(result), driver_byte(result));
e73aec82
MP
3316}
3317