]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/sd.c
Merge tag 'hole_punch_for_v5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / sd.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * sd.c Copyright (C) 1992 Drew Eckhardt
4 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 *
6 * Linux scsi disk driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 * Modification history:
10 * - Drew Eckhardt <drew@colorado.edu> original
11 * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
12 * outstanding request, and other enhancements.
13 * Support loadable low-level scsi drivers.
14 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
15 * eight major numbers.
16 * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
17 * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
18 * sd_init and cleanups.
19 * - Alex Davis <letmein@erols.com> Fix problem where partition info
20 * not being read in sd_open. Fix problem where removable media
21 * could be ejected after sd_open.
22 * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
23 * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
24 * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
25 * Support 32k/1M disks.
26 *
27 * Logging policy (needs CONFIG_SCSI_LOGGING defined):
28 * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
29 * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
30 * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
31 * - entering other commands: SCSI_LOG_HLQUEUE level 3
32 * Note: when the logging level is set by the user, it must be greater
33 * than the level indicated above to trigger output.
34 */
35
36 #include <linux/module.h>
37 #include <linux/fs.h>
38 #include <linux/kernel.h>
39 #include <linux/mm.h>
40 #include <linux/bio.h>
41 #include <linux/genhd.h>
42 #include <linux/hdreg.h>
43 #include <linux/errno.h>
44 #include <linux/idr.h>
45 #include <linux/interrupt.h>
46 #include <linux/init.h>
47 #include <linux/blkdev.h>
48 #include <linux/blkpg.h>
49 #include <linux/blk-pm.h>
50 #include <linux/delay.h>
51 #include <linux/mutex.h>
52 #include <linux/string_helpers.h>
53 #include <linux/async.h>
54 #include <linux/slab.h>
55 #include <linux/sed-opal.h>
56 #include <linux/pm_runtime.h>
57 #include <linux/pr.h>
58 #include <linux/t10-pi.h>
59 #include <linux/uaccess.h>
60 #include <asm/unaligned.h>
61
62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_dbg.h>
65 #include <scsi/scsi_device.h>
66 #include <scsi/scsi_driver.h>
67 #include <scsi/scsi_eh.h>
68 #include <scsi/scsi_host.h>
69 #include <scsi/scsi_ioctl.h>
70 #include <scsi/scsicam.h>
71
72 #include "sd.h"
73 #include "scsi_priv.h"
74 #include "scsi_logging.h"
75
76 MODULE_AUTHOR("Eric Youngdale");
77 MODULE_DESCRIPTION("SCSI disk (sd) driver");
78 MODULE_LICENSE("GPL");
79
80 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
81 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
82 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
83 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
84 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
85 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
86 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
87 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
88 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
89 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
90 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
91 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
92 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
93 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
94 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
95 MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
96 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
97 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
98 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
99 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
100
101 #define SD_MINORS 16
102
103 static void sd_config_discard(struct scsi_disk *, unsigned int);
104 static void sd_config_write_same(struct scsi_disk *);
105 static int sd_revalidate_disk(struct gendisk *);
106 static void sd_unlock_native_capacity(struct gendisk *disk);
107 static int sd_probe(struct device *);
108 static int sd_remove(struct device *);
109 static void sd_shutdown(struct device *);
110 static int sd_suspend_system(struct device *);
111 static int sd_suspend_runtime(struct device *);
112 static int sd_resume(struct device *);
113 static void sd_rescan(struct device *);
114 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
115 static void sd_uninit_command(struct scsi_cmnd *SCpnt);
116 static int sd_done(struct scsi_cmnd *);
117 static void sd_eh_reset(struct scsi_cmnd *);
118 static int sd_eh_action(struct scsi_cmnd *, int);
119 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
120 static void scsi_disk_release(struct device *cdev);
121
122 static DEFINE_IDA(sd_index_ida);
123
124 /* This semaphore is used to mediate the 0->1 reference get in the
125 * face of object destruction (i.e. we can't allow a get on an
126 * object after last put) */
127 static DEFINE_MUTEX(sd_ref_mutex);
128
129 static struct kmem_cache *sd_cdb_cache;
130 static mempool_t *sd_cdb_pool;
131 static mempool_t *sd_page_pool;
132
133 static const char *sd_cache_types[] = {
134 "write through", "none", "write back",
135 "write back, no read (daft)"
136 };
137
138 static void sd_set_flush_flag(struct scsi_disk *sdkp)
139 {
140 bool wc = false, fua = false;
141
142 if (sdkp->WCE) {
143 wc = true;
144 if (sdkp->DPOFUA)
145 fua = true;
146 }
147
148 blk_queue_write_cache(sdkp->disk->queue, wc, fua);
149 }
150
151 static ssize_t
152 cache_type_store(struct device *dev, struct device_attribute *attr,
153 const char *buf, size_t count)
154 {
155 int ct, rcd, wce, sp;
156 struct scsi_disk *sdkp = to_scsi_disk(dev);
157 struct scsi_device *sdp = sdkp->device;
158 char buffer[64];
159 char *buffer_data;
160 struct scsi_mode_data data;
161 struct scsi_sense_hdr sshdr;
162 static const char temp[] = "temporary ";
163 int len;
164
165 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
166 /* no cache control on RBC devices; theoretically they
167 * can do it, but there's probably so many exceptions
168 * it's not worth the risk */
169 return -EINVAL;
170
171 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
172 buf += sizeof(temp) - 1;
173 sdkp->cache_override = 1;
174 } else {
175 sdkp->cache_override = 0;
176 }
177
178 ct = sysfs_match_string(sd_cache_types, buf);
179 if (ct < 0)
180 return -EINVAL;
181
182 rcd = ct & 0x01 ? 1 : 0;
183 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
184
185 if (sdkp->cache_override) {
186 sdkp->WCE = wce;
187 sdkp->RCD = rcd;
188 sd_set_flush_flag(sdkp);
189 return count;
190 }
191
192 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
193 sdkp->max_retries, &data, NULL))
194 return -EINVAL;
195 len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
196 data.block_descriptor_length);
197 buffer_data = buffer + data.header_length +
198 data.block_descriptor_length;
199 buffer_data[2] &= ~0x05;
200 buffer_data[2] |= wce << 2 | rcd;
201 sp = buffer_data[0] & 0x80 ? 1 : 0;
202 buffer_data[0] &= ~0x80;
203
204 /*
205 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
206 * received mode parameter buffer before doing MODE SELECT.
207 */
208 data.device_specific = 0;
209
210 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
211 sdkp->max_retries, &data, &sshdr)) {
212 if (scsi_sense_valid(&sshdr))
213 sd_print_sense_hdr(sdkp, &sshdr);
214 return -EINVAL;
215 }
216 sd_revalidate_disk(sdkp->disk);
217 return count;
218 }
219
220 static ssize_t
221 manage_start_stop_show(struct device *dev, struct device_attribute *attr,
222 char *buf)
223 {
224 struct scsi_disk *sdkp = to_scsi_disk(dev);
225 struct scsi_device *sdp = sdkp->device;
226
227 return sprintf(buf, "%u\n", sdp->manage_start_stop);
228 }
229
230 static ssize_t
231 manage_start_stop_store(struct device *dev, struct device_attribute *attr,
232 const char *buf, size_t count)
233 {
234 struct scsi_disk *sdkp = to_scsi_disk(dev);
235 struct scsi_device *sdp = sdkp->device;
236 bool v;
237
238 if (!capable(CAP_SYS_ADMIN))
239 return -EACCES;
240
241 if (kstrtobool(buf, &v))
242 return -EINVAL;
243
244 sdp->manage_start_stop = v;
245
246 return count;
247 }
248 static DEVICE_ATTR_RW(manage_start_stop);
249
250 static ssize_t
251 allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
252 {
253 struct scsi_disk *sdkp = to_scsi_disk(dev);
254
255 return sprintf(buf, "%u\n", sdkp->device->allow_restart);
256 }
257
258 static ssize_t
259 allow_restart_store(struct device *dev, struct device_attribute *attr,
260 const char *buf, size_t count)
261 {
262 bool v;
263 struct scsi_disk *sdkp = to_scsi_disk(dev);
264 struct scsi_device *sdp = sdkp->device;
265
266 if (!capable(CAP_SYS_ADMIN))
267 return -EACCES;
268
269 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
270 return -EINVAL;
271
272 if (kstrtobool(buf, &v))
273 return -EINVAL;
274
275 sdp->allow_restart = v;
276
277 return count;
278 }
279 static DEVICE_ATTR_RW(allow_restart);
280
281 static ssize_t
282 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
283 {
284 struct scsi_disk *sdkp = to_scsi_disk(dev);
285 int ct = sdkp->RCD + 2*sdkp->WCE;
286
287 return sprintf(buf, "%s\n", sd_cache_types[ct]);
288 }
289 static DEVICE_ATTR_RW(cache_type);
290
291 static ssize_t
292 FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
293 {
294 struct scsi_disk *sdkp = to_scsi_disk(dev);
295
296 return sprintf(buf, "%u\n", sdkp->DPOFUA);
297 }
298 static DEVICE_ATTR_RO(FUA);
299
300 static ssize_t
301 protection_type_show(struct device *dev, struct device_attribute *attr,
302 char *buf)
303 {
304 struct scsi_disk *sdkp = to_scsi_disk(dev);
305
306 return sprintf(buf, "%u\n", sdkp->protection_type);
307 }
308
309 static ssize_t
310 protection_type_store(struct device *dev, struct device_attribute *attr,
311 const char *buf, size_t count)
312 {
313 struct scsi_disk *sdkp = to_scsi_disk(dev);
314 unsigned int val;
315 int err;
316
317 if (!capable(CAP_SYS_ADMIN))
318 return -EACCES;
319
320 err = kstrtouint(buf, 10, &val);
321
322 if (err)
323 return err;
324
325 if (val <= T10_PI_TYPE3_PROTECTION)
326 sdkp->protection_type = val;
327
328 return count;
329 }
330 static DEVICE_ATTR_RW(protection_type);
331
332 static ssize_t
333 protection_mode_show(struct device *dev, struct device_attribute *attr,
334 char *buf)
335 {
336 struct scsi_disk *sdkp = to_scsi_disk(dev);
337 struct scsi_device *sdp = sdkp->device;
338 unsigned int dif, dix;
339
340 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
341 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
342
343 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
344 dif = 0;
345 dix = 1;
346 }
347
348 if (!dif && !dix)
349 return sprintf(buf, "none\n");
350
351 return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
352 }
353 static DEVICE_ATTR_RO(protection_mode);
354
355 static ssize_t
356 app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
357 {
358 struct scsi_disk *sdkp = to_scsi_disk(dev);
359
360 return sprintf(buf, "%u\n", sdkp->ATO);
361 }
362 static DEVICE_ATTR_RO(app_tag_own);
363
364 static ssize_t
365 thin_provisioning_show(struct device *dev, struct device_attribute *attr,
366 char *buf)
367 {
368 struct scsi_disk *sdkp = to_scsi_disk(dev);
369
370 return sprintf(buf, "%u\n", sdkp->lbpme);
371 }
372 static DEVICE_ATTR_RO(thin_provisioning);
373
374 /* sysfs_match_string() requires dense arrays */
375 static const char *lbp_mode[] = {
376 [SD_LBP_FULL] = "full",
377 [SD_LBP_UNMAP] = "unmap",
378 [SD_LBP_WS16] = "writesame_16",
379 [SD_LBP_WS10] = "writesame_10",
380 [SD_LBP_ZERO] = "writesame_zero",
381 [SD_LBP_DISABLE] = "disabled",
382 };
383
384 static ssize_t
385 provisioning_mode_show(struct device *dev, struct device_attribute *attr,
386 char *buf)
387 {
388 struct scsi_disk *sdkp = to_scsi_disk(dev);
389
390 return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
391 }
392
393 static ssize_t
394 provisioning_mode_store(struct device *dev, struct device_attribute *attr,
395 const char *buf, size_t count)
396 {
397 struct scsi_disk *sdkp = to_scsi_disk(dev);
398 struct scsi_device *sdp = sdkp->device;
399 int mode;
400
401 if (!capable(CAP_SYS_ADMIN))
402 return -EACCES;
403
404 if (sd_is_zoned(sdkp)) {
405 sd_config_discard(sdkp, SD_LBP_DISABLE);
406 return count;
407 }
408
409 if (sdp->type != TYPE_DISK)
410 return -EINVAL;
411
412 mode = sysfs_match_string(lbp_mode, buf);
413 if (mode < 0)
414 return -EINVAL;
415
416 sd_config_discard(sdkp, mode);
417
418 return count;
419 }
420 static DEVICE_ATTR_RW(provisioning_mode);
421
422 /* sysfs_match_string() requires dense arrays */
423 static const char *zeroing_mode[] = {
424 [SD_ZERO_WRITE] = "write",
425 [SD_ZERO_WS] = "writesame",
426 [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap",
427 [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap",
428 };
429
430 static ssize_t
431 zeroing_mode_show(struct device *dev, struct device_attribute *attr,
432 char *buf)
433 {
434 struct scsi_disk *sdkp = to_scsi_disk(dev);
435
436 return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
437 }
438
439 static ssize_t
440 zeroing_mode_store(struct device *dev, struct device_attribute *attr,
441 const char *buf, size_t count)
442 {
443 struct scsi_disk *sdkp = to_scsi_disk(dev);
444 int mode;
445
446 if (!capable(CAP_SYS_ADMIN))
447 return -EACCES;
448
449 mode = sysfs_match_string(zeroing_mode, buf);
450 if (mode < 0)
451 return -EINVAL;
452
453 sdkp->zeroing_mode = mode;
454
455 return count;
456 }
457 static DEVICE_ATTR_RW(zeroing_mode);
458
459 static ssize_t
460 max_medium_access_timeouts_show(struct device *dev,
461 struct device_attribute *attr, char *buf)
462 {
463 struct scsi_disk *sdkp = to_scsi_disk(dev);
464
465 return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
466 }
467
468 static ssize_t
469 max_medium_access_timeouts_store(struct device *dev,
470 struct device_attribute *attr, const char *buf,
471 size_t count)
472 {
473 struct scsi_disk *sdkp = to_scsi_disk(dev);
474 int err;
475
476 if (!capable(CAP_SYS_ADMIN))
477 return -EACCES;
478
479 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
480
481 return err ? err : count;
482 }
483 static DEVICE_ATTR_RW(max_medium_access_timeouts);
484
485 static ssize_t
486 max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
487 char *buf)
488 {
489 struct scsi_disk *sdkp = to_scsi_disk(dev);
490
491 return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
492 }
493
494 static ssize_t
495 max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
496 const char *buf, size_t count)
497 {
498 struct scsi_disk *sdkp = to_scsi_disk(dev);
499 struct scsi_device *sdp = sdkp->device;
500 unsigned long max;
501 int err;
502
503 if (!capable(CAP_SYS_ADMIN))
504 return -EACCES;
505
506 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
507 return -EINVAL;
508
509 err = kstrtoul(buf, 10, &max);
510
511 if (err)
512 return err;
513
514 if (max == 0)
515 sdp->no_write_same = 1;
516 else if (max <= SD_MAX_WS16_BLOCKS) {
517 sdp->no_write_same = 0;
518 sdkp->max_ws_blocks = max;
519 }
520
521 sd_config_write_same(sdkp);
522
523 return count;
524 }
525 static DEVICE_ATTR_RW(max_write_same_blocks);
526
527 static ssize_t
528 zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
529 {
530 struct scsi_disk *sdkp = to_scsi_disk(dev);
531
532 if (sdkp->device->type == TYPE_ZBC)
533 return sprintf(buf, "host-managed\n");
534 if (sdkp->zoned == 1)
535 return sprintf(buf, "host-aware\n");
536 if (sdkp->zoned == 2)
537 return sprintf(buf, "drive-managed\n");
538 return sprintf(buf, "none\n");
539 }
540 static DEVICE_ATTR_RO(zoned_cap);
541
542 static ssize_t
543 max_retries_store(struct device *dev, struct device_attribute *attr,
544 const char *buf, size_t count)
545 {
546 struct scsi_disk *sdkp = to_scsi_disk(dev);
547 struct scsi_device *sdev = sdkp->device;
548 int retries, err;
549
550 err = kstrtoint(buf, 10, &retries);
551 if (err)
552 return err;
553
554 if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
555 sdkp->max_retries = retries;
556 return count;
557 }
558
559 sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
560 SD_MAX_RETRIES);
561 return -EINVAL;
562 }
563
564 static ssize_t
565 max_retries_show(struct device *dev, struct device_attribute *attr,
566 char *buf)
567 {
568 struct scsi_disk *sdkp = to_scsi_disk(dev);
569
570 return sprintf(buf, "%d\n", sdkp->max_retries);
571 }
572
573 static DEVICE_ATTR_RW(max_retries);
574
575 static struct attribute *sd_disk_attrs[] = {
576 &dev_attr_cache_type.attr,
577 &dev_attr_FUA.attr,
578 &dev_attr_allow_restart.attr,
579 &dev_attr_manage_start_stop.attr,
580 &dev_attr_protection_type.attr,
581 &dev_attr_protection_mode.attr,
582 &dev_attr_app_tag_own.attr,
583 &dev_attr_thin_provisioning.attr,
584 &dev_attr_provisioning_mode.attr,
585 &dev_attr_zeroing_mode.attr,
586 &dev_attr_max_write_same_blocks.attr,
587 &dev_attr_max_medium_access_timeouts.attr,
588 &dev_attr_zoned_cap.attr,
589 &dev_attr_max_retries.attr,
590 NULL,
591 };
592 ATTRIBUTE_GROUPS(sd_disk);
593
594 static struct class sd_disk_class = {
595 .name = "scsi_disk",
596 .owner = THIS_MODULE,
597 .dev_release = scsi_disk_release,
598 .dev_groups = sd_disk_groups,
599 };
600
601 static const struct dev_pm_ops sd_pm_ops = {
602 .suspend = sd_suspend_system,
603 .resume = sd_resume,
604 .poweroff = sd_suspend_system,
605 .restore = sd_resume,
606 .runtime_suspend = sd_suspend_runtime,
607 .runtime_resume = sd_resume,
608 };
609
610 static struct scsi_driver sd_template = {
611 .gendrv = {
612 .name = "sd",
613 .owner = THIS_MODULE,
614 .probe = sd_probe,
615 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
616 .remove = sd_remove,
617 .shutdown = sd_shutdown,
618 .pm = &sd_pm_ops,
619 },
620 .rescan = sd_rescan,
621 .init_command = sd_init_command,
622 .uninit_command = sd_uninit_command,
623 .done = sd_done,
624 .eh_action = sd_eh_action,
625 .eh_reset = sd_eh_reset,
626 };
627
628 /*
629 * Don't request a new module, as that could deadlock in multipath
630 * environment.
631 */
632 static void sd_default_probe(dev_t devt)
633 {
634 }
635
636 /*
637 * Device no to disk mapping:
638 *
639 * major disc2 disc p1
640 * |............|.............|....|....| <- dev_t
641 * 31 20 19 8 7 4 3 0
642 *
643 * Inside a major, we have 16k disks, however mapped non-
644 * contiguously. The first 16 disks are for major0, the next
645 * ones with major1, ... Disk 256 is for major0 again, disk 272
646 * for major1, ...
647 * As we stay compatible with our numbering scheme, we can reuse
648 * the well-know SCSI majors 8, 65--71, 136--143.
649 */
650 static int sd_major(int major_idx)
651 {
652 switch (major_idx) {
653 case 0:
654 return SCSI_DISK0_MAJOR;
655 case 1 ... 7:
656 return SCSI_DISK1_MAJOR + major_idx - 1;
657 case 8 ... 15:
658 return SCSI_DISK8_MAJOR + major_idx - 8;
659 default:
660 BUG();
661 return 0; /* shut up gcc */
662 }
663 }
664
665 static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
666 {
667 struct scsi_disk *sdkp = NULL;
668
669 mutex_lock(&sd_ref_mutex);
670
671 if (disk->private_data) {
672 sdkp = scsi_disk(disk);
673 if (scsi_device_get(sdkp->device) == 0)
674 get_device(&sdkp->dev);
675 else
676 sdkp = NULL;
677 }
678 mutex_unlock(&sd_ref_mutex);
679 return sdkp;
680 }
681
682 static void scsi_disk_put(struct scsi_disk *sdkp)
683 {
684 struct scsi_device *sdev = sdkp->device;
685
686 mutex_lock(&sd_ref_mutex);
687 put_device(&sdkp->dev);
688 scsi_device_put(sdev);
689 mutex_unlock(&sd_ref_mutex);
690 }
691
692 #ifdef CONFIG_BLK_SED_OPAL
693 static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
694 size_t len, bool send)
695 {
696 struct scsi_disk *sdkp = data;
697 struct scsi_device *sdev = sdkp->device;
698 u8 cdb[12] = { 0, };
699 int ret;
700
701 cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
702 cdb[1] = secp;
703 put_unaligned_be16(spsp, &cdb[2]);
704 put_unaligned_be32(len, &cdb[6]);
705
706 ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
707 buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
708 RQF_PM, NULL);
709 return ret <= 0 ? ret : -EIO;
710 }
711 #endif /* CONFIG_BLK_SED_OPAL */
712
713 /*
714 * Look up the DIX operation based on whether the command is read or
715 * write and whether dix and dif are enabled.
716 */
717 static unsigned int sd_prot_op(bool write, bool dix, bool dif)
718 {
719 /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
720 static const unsigned int ops[] = { /* wrt dix dif */
721 SCSI_PROT_NORMAL, /* 0 0 0 */
722 SCSI_PROT_READ_STRIP, /* 0 0 1 */
723 SCSI_PROT_READ_INSERT, /* 0 1 0 */
724 SCSI_PROT_READ_PASS, /* 0 1 1 */
725 SCSI_PROT_NORMAL, /* 1 0 0 */
726 SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
727 SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
728 SCSI_PROT_WRITE_PASS, /* 1 1 1 */
729 };
730
731 return ops[write << 2 | dix << 1 | dif];
732 }
733
734 /*
735 * Returns a mask of the protection flags that are valid for a given DIX
736 * operation.
737 */
738 static unsigned int sd_prot_flag_mask(unsigned int prot_op)
739 {
740 static const unsigned int flag_mask[] = {
741 [SCSI_PROT_NORMAL] = 0,
742
743 [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
744 SCSI_PROT_GUARD_CHECK |
745 SCSI_PROT_REF_CHECK |
746 SCSI_PROT_REF_INCREMENT,
747
748 [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
749 SCSI_PROT_IP_CHECKSUM,
750
751 [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
752 SCSI_PROT_GUARD_CHECK |
753 SCSI_PROT_REF_CHECK |
754 SCSI_PROT_REF_INCREMENT |
755 SCSI_PROT_IP_CHECKSUM,
756
757 [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
758 SCSI_PROT_REF_INCREMENT,
759
760 [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
761 SCSI_PROT_REF_CHECK |
762 SCSI_PROT_REF_INCREMENT |
763 SCSI_PROT_IP_CHECKSUM,
764
765 [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
766 SCSI_PROT_GUARD_CHECK |
767 SCSI_PROT_REF_CHECK |
768 SCSI_PROT_REF_INCREMENT |
769 SCSI_PROT_IP_CHECKSUM,
770 };
771
772 return flag_mask[prot_op];
773 }
774
775 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
776 unsigned int dix, unsigned int dif)
777 {
778 struct bio *bio = scmd->request->bio;
779 unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
780 unsigned int protect = 0;
781
782 if (dix) { /* DIX Type 0, 1, 2, 3 */
783 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
784 scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
785
786 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
787 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
788 }
789
790 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
791 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
792
793 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
794 scmd->prot_flags |= SCSI_PROT_REF_CHECK;
795 }
796
797 if (dif) { /* DIX/DIF Type 1, 2, 3 */
798 scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
799
800 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
801 protect = 3 << 5; /* Disable target PI checking */
802 else
803 protect = 1 << 5; /* Enable target PI checking */
804 }
805
806 scsi_set_prot_op(scmd, prot_op);
807 scsi_set_prot_type(scmd, dif);
808 scmd->prot_flags &= sd_prot_flag_mask(prot_op);
809
810 return protect;
811 }
812
813 static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
814 {
815 struct request_queue *q = sdkp->disk->queue;
816 unsigned int logical_block_size = sdkp->device->sector_size;
817 unsigned int max_blocks = 0;
818
819 q->limits.discard_alignment =
820 sdkp->unmap_alignment * logical_block_size;
821 q->limits.discard_granularity =
822 max(sdkp->physical_block_size,
823 sdkp->unmap_granularity * logical_block_size);
824 sdkp->provisioning_mode = mode;
825
826 switch (mode) {
827
828 case SD_LBP_FULL:
829 case SD_LBP_DISABLE:
830 blk_queue_max_discard_sectors(q, 0);
831 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
832 return;
833
834 case SD_LBP_UNMAP:
835 max_blocks = min_not_zero(sdkp->max_unmap_blocks,
836 (u32)SD_MAX_WS16_BLOCKS);
837 break;
838
839 case SD_LBP_WS16:
840 if (sdkp->device->unmap_limit_for_ws)
841 max_blocks = sdkp->max_unmap_blocks;
842 else
843 max_blocks = sdkp->max_ws_blocks;
844
845 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
846 break;
847
848 case SD_LBP_WS10:
849 if (sdkp->device->unmap_limit_for_ws)
850 max_blocks = sdkp->max_unmap_blocks;
851 else
852 max_blocks = sdkp->max_ws_blocks;
853
854 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
855 break;
856
857 case SD_LBP_ZERO:
858 max_blocks = min_not_zero(sdkp->max_ws_blocks,
859 (u32)SD_MAX_WS10_BLOCKS);
860 break;
861 }
862
863 blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
864 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
865 }
866
867 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
868 {
869 struct scsi_device *sdp = cmd->device;
870 struct request *rq = cmd->request;
871 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
872 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
873 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
874 unsigned int data_len = 24;
875 char *buf;
876
877 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
878 if (!rq->special_vec.bv_page)
879 return BLK_STS_RESOURCE;
880 clear_highpage(rq->special_vec.bv_page);
881 rq->special_vec.bv_offset = 0;
882 rq->special_vec.bv_len = data_len;
883 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
884
885 cmd->cmd_len = 10;
886 cmd->cmnd[0] = UNMAP;
887 cmd->cmnd[8] = 24;
888
889 buf = page_address(rq->special_vec.bv_page);
890 put_unaligned_be16(6 + 16, &buf[0]);
891 put_unaligned_be16(16, &buf[2]);
892 put_unaligned_be64(lba, &buf[8]);
893 put_unaligned_be32(nr_blocks, &buf[16]);
894
895 cmd->allowed = sdkp->max_retries;
896 cmd->transfersize = data_len;
897 rq->timeout = SD_TIMEOUT;
898
899 return scsi_alloc_sgtables(cmd);
900 }
901
902 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
903 bool unmap)
904 {
905 struct scsi_device *sdp = cmd->device;
906 struct request *rq = cmd->request;
907 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
908 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
909 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
910 u32 data_len = sdp->sector_size;
911
912 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
913 if (!rq->special_vec.bv_page)
914 return BLK_STS_RESOURCE;
915 clear_highpage(rq->special_vec.bv_page);
916 rq->special_vec.bv_offset = 0;
917 rq->special_vec.bv_len = data_len;
918 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
919
920 cmd->cmd_len = 16;
921 cmd->cmnd[0] = WRITE_SAME_16;
922 if (unmap)
923 cmd->cmnd[1] = 0x8; /* UNMAP */
924 put_unaligned_be64(lba, &cmd->cmnd[2]);
925 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
926
927 cmd->allowed = sdkp->max_retries;
928 cmd->transfersize = data_len;
929 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
930
931 return scsi_alloc_sgtables(cmd);
932 }
933
934 static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
935 bool unmap)
936 {
937 struct scsi_device *sdp = cmd->device;
938 struct request *rq = cmd->request;
939 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
940 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
941 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
942 u32 data_len = sdp->sector_size;
943
944 rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
945 if (!rq->special_vec.bv_page)
946 return BLK_STS_RESOURCE;
947 clear_highpage(rq->special_vec.bv_page);
948 rq->special_vec.bv_offset = 0;
949 rq->special_vec.bv_len = data_len;
950 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
951
952 cmd->cmd_len = 10;
953 cmd->cmnd[0] = WRITE_SAME;
954 if (unmap)
955 cmd->cmnd[1] = 0x8; /* UNMAP */
956 put_unaligned_be32(lba, &cmd->cmnd[2]);
957 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
958
959 cmd->allowed = sdkp->max_retries;
960 cmd->transfersize = data_len;
961 rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
962
963 return scsi_alloc_sgtables(cmd);
964 }
965
966 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
967 {
968 struct request *rq = cmd->request;
969 struct scsi_device *sdp = cmd->device;
970 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
971 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
972 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
973
974 if (!(rq->cmd_flags & REQ_NOUNMAP)) {
975 switch (sdkp->zeroing_mode) {
976 case SD_ZERO_WS16_UNMAP:
977 return sd_setup_write_same16_cmnd(cmd, true);
978 case SD_ZERO_WS10_UNMAP:
979 return sd_setup_write_same10_cmnd(cmd, true);
980 }
981 }
982
983 if (sdp->no_write_same) {
984 rq->rq_flags |= RQF_QUIET;
985 return BLK_STS_TARGET;
986 }
987
988 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
989 return sd_setup_write_same16_cmnd(cmd, false);
990
991 return sd_setup_write_same10_cmnd(cmd, false);
992 }
993
994 static void sd_config_write_same(struct scsi_disk *sdkp)
995 {
996 struct request_queue *q = sdkp->disk->queue;
997 unsigned int logical_block_size = sdkp->device->sector_size;
998
999 if (sdkp->device->no_write_same) {
1000 sdkp->max_ws_blocks = 0;
1001 goto out;
1002 }
1003
1004 /* Some devices can not handle block counts above 0xffff despite
1005 * supporting WRITE SAME(16). Consequently we default to 64k
1006 * blocks per I/O unless the device explicitly advertises a
1007 * bigger limit.
1008 */
1009 if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
1010 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1011 (u32)SD_MAX_WS16_BLOCKS);
1012 else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
1013 sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
1014 (u32)SD_MAX_WS10_BLOCKS);
1015 else {
1016 sdkp->device->no_write_same = 1;
1017 sdkp->max_ws_blocks = 0;
1018 }
1019
1020 if (sdkp->lbprz && sdkp->lbpws)
1021 sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
1022 else if (sdkp->lbprz && sdkp->lbpws10)
1023 sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
1024 else if (sdkp->max_ws_blocks)
1025 sdkp->zeroing_mode = SD_ZERO_WS;
1026 else
1027 sdkp->zeroing_mode = SD_ZERO_WRITE;
1028
1029 if (sdkp->max_ws_blocks &&
1030 sdkp->physical_block_size > logical_block_size) {
1031 /*
1032 * Reporting a maximum number of blocks that is not aligned
1033 * on the device physical size would cause a large write same
1034 * request to be split into physically unaligned chunks by
1035 * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
1036 * even if the caller of these functions took care to align the
1037 * large request. So make sure the maximum reported is aligned
1038 * to the device physical block size. This is only an optional
1039 * optimization for regular disks, but this is mandatory to
1040 * avoid failure of large write same requests directed at
1041 * sequential write required zones of host-managed ZBC disks.
1042 */
1043 sdkp->max_ws_blocks =
1044 round_down(sdkp->max_ws_blocks,
1045 bytes_to_logical(sdkp->device,
1046 sdkp->physical_block_size));
1047 }
1048
1049 out:
1050 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
1051 (logical_block_size >> 9));
1052 blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
1053 (logical_block_size >> 9));
1054 }
1055
1056 /**
1057 * sd_setup_write_same_cmnd - write the same data to multiple blocks
1058 * @cmd: command to prepare
1059 *
1060 * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
1061 * the preference indicated by the target device.
1062 **/
1063 static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
1064 {
1065 struct request *rq = cmd->request;
1066 struct scsi_device *sdp = cmd->device;
1067 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
1068 struct bio *bio = rq->bio;
1069 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1070 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1071 blk_status_t ret;
1072
1073 if (sdkp->device->no_write_same)
1074 return BLK_STS_TARGET;
1075
1076 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
1077
1078 rq->timeout = SD_WRITE_SAME_TIMEOUT;
1079
1080 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
1081 cmd->cmd_len = 16;
1082 cmd->cmnd[0] = WRITE_SAME_16;
1083 put_unaligned_be64(lba, &cmd->cmnd[2]);
1084 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1085 } else {
1086 cmd->cmd_len = 10;
1087 cmd->cmnd[0] = WRITE_SAME;
1088 put_unaligned_be32(lba, &cmd->cmnd[2]);
1089 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1090 }
1091
1092 cmd->transfersize = sdp->sector_size;
1093 cmd->allowed = sdkp->max_retries;
1094
1095 /*
1096 * For WRITE SAME the data transferred via the DATA OUT buffer is
1097 * different from the amount of data actually written to the target.
1098 *
1099 * We set up __data_len to the amount of data transferred via the
1100 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
1101 * to transfer a single sector of data first, but then reset it to
1102 * the amount of data to be written right after so that the I/O path
1103 * knows how much to actually write.
1104 */
1105 rq->__data_len = sdp->sector_size;
1106 ret = scsi_alloc_sgtables(cmd);
1107 rq->__data_len = blk_rq_bytes(rq);
1108
1109 return ret;
1110 }
1111
1112 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
1113 {
1114 struct request *rq = cmd->request;
1115 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
1116
1117 /* flush requests don't perform I/O, zero the S/G table */
1118 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1119
1120 cmd->cmnd[0] = SYNCHRONIZE_CACHE;
1121 cmd->cmd_len = 10;
1122 cmd->transfersize = 0;
1123 cmd->allowed = sdkp->max_retries;
1124
1125 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
1126 return BLK_STS_OK;
1127 }
1128
1129 static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
1130 sector_t lba, unsigned int nr_blocks,
1131 unsigned char flags)
1132 {
1133 cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
1134 if (unlikely(cmd->cmnd == NULL))
1135 return BLK_STS_RESOURCE;
1136
1137 cmd->cmd_len = SD_EXT_CDB_SIZE;
1138 memset(cmd->cmnd, 0, cmd->cmd_len);
1139
1140 cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
1141 cmd->cmnd[7] = 0x18; /* Additional CDB len */
1142 cmd->cmnd[9] = write ? WRITE_32 : READ_32;
1143 cmd->cmnd[10] = flags;
1144 put_unaligned_be64(lba, &cmd->cmnd[12]);
1145 put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
1146 put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
1147
1148 return BLK_STS_OK;
1149 }
1150
1151 static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
1152 sector_t lba, unsigned int nr_blocks,
1153 unsigned char flags)
1154 {
1155 cmd->cmd_len = 16;
1156 cmd->cmnd[0] = write ? WRITE_16 : READ_16;
1157 cmd->cmnd[1] = flags;
1158 cmd->cmnd[14] = 0;
1159 cmd->cmnd[15] = 0;
1160 put_unaligned_be64(lba, &cmd->cmnd[2]);
1161 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
1162
1163 return BLK_STS_OK;
1164 }
1165
1166 static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
1167 sector_t lba, unsigned int nr_blocks,
1168 unsigned char flags)
1169 {
1170 cmd->cmd_len = 10;
1171 cmd->cmnd[0] = write ? WRITE_10 : READ_10;
1172 cmd->cmnd[1] = flags;
1173 cmd->cmnd[6] = 0;
1174 cmd->cmnd[9] = 0;
1175 put_unaligned_be32(lba, &cmd->cmnd[2]);
1176 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
1177
1178 return BLK_STS_OK;
1179 }
1180
1181 static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
1182 sector_t lba, unsigned int nr_blocks,
1183 unsigned char flags)
1184 {
1185 /* Avoid that 0 blocks gets translated into 256 blocks. */
1186 if (WARN_ON_ONCE(nr_blocks == 0))
1187 return BLK_STS_IOERR;
1188
1189 if (unlikely(flags & 0x8)) {
1190 /*
1191 * This happens only if this drive failed 10byte rw
1192 * command with ILLEGAL_REQUEST during operation and
1193 * thus turned off use_10_for_rw.
1194 */
1195 scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
1196 return BLK_STS_IOERR;
1197 }
1198
1199 cmd->cmd_len = 6;
1200 cmd->cmnd[0] = write ? WRITE_6 : READ_6;
1201 cmd->cmnd[1] = (lba >> 16) & 0x1f;
1202 cmd->cmnd[2] = (lba >> 8) & 0xff;
1203 cmd->cmnd[3] = lba & 0xff;
1204 cmd->cmnd[4] = nr_blocks;
1205 cmd->cmnd[5] = 0;
1206
1207 return BLK_STS_OK;
1208 }
1209
1210 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
1211 {
1212 struct request *rq = cmd->request;
1213 struct scsi_device *sdp = cmd->device;
1214 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
1215 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1216 sector_t threshold;
1217 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
1218 unsigned int mask = logical_to_sectors(sdp, 1) - 1;
1219 bool write = rq_data_dir(rq) == WRITE;
1220 unsigned char protect, fua;
1221 blk_status_t ret;
1222 unsigned int dif;
1223 bool dix;
1224
1225 ret = scsi_alloc_sgtables(cmd);
1226 if (ret != BLK_STS_OK)
1227 return ret;
1228
1229 ret = BLK_STS_IOERR;
1230 if (!scsi_device_online(sdp) || sdp->changed) {
1231 scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
1232 goto fail;
1233 }
1234
1235 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
1236 scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
1237 goto fail;
1238 }
1239
1240 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
1241 scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
1242 goto fail;
1243 }
1244
1245 /*
1246 * Some SD card readers can't handle accesses which touch the
1247 * last one or two logical blocks. Split accesses as needed.
1248 */
1249 threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
1250
1251 if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
1252 if (lba < threshold) {
1253 /* Access up to the threshold but not beyond */
1254 nr_blocks = threshold - lba;
1255 } else {
1256 /* Access only a single logical block */
1257 nr_blocks = 1;
1258 }
1259 }
1260
1261 if (req_op(rq) == REQ_OP_ZONE_APPEND) {
1262 ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
1263 if (ret)
1264 goto fail;
1265 }
1266
1267 fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
1268 dix = scsi_prot_sg_count(cmd);
1269 dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
1270
1271 if (dif || dix)
1272 protect = sd_setup_protect_cmnd(cmd, dix, dif);
1273 else
1274 protect = 0;
1275
1276 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1277 ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
1278 protect | fua);
1279 } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
1280 ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
1281 protect | fua);
1282 } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
1283 sdp->use_10_for_rw || protect) {
1284 ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
1285 protect | fua);
1286 } else {
1287 ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
1288 protect | fua);
1289 }
1290
1291 if (unlikely(ret != BLK_STS_OK))
1292 goto fail;
1293
1294 /*
1295 * We shouldn't disconnect in the middle of a sector, so with a dumb
1296 * host adapter, it's safe to assume that we can at least transfer
1297 * this many bytes between each connect / disconnect.
1298 */
1299 cmd->transfersize = sdp->sector_size;
1300 cmd->underflow = nr_blocks << 9;
1301 cmd->allowed = sdkp->max_retries;
1302 cmd->sdb.length = nr_blocks * sdp->sector_size;
1303
1304 SCSI_LOG_HLQUEUE(1,
1305 scmd_printk(KERN_INFO, cmd,
1306 "%s: block=%llu, count=%d\n", __func__,
1307 (unsigned long long)blk_rq_pos(rq),
1308 blk_rq_sectors(rq)));
1309 SCSI_LOG_HLQUEUE(2,
1310 scmd_printk(KERN_INFO, cmd,
1311 "%s %d/%u 512 byte blocks.\n",
1312 write ? "writing" : "reading", nr_blocks,
1313 blk_rq_sectors(rq)));
1314
1315 /*
1316 * This indicates that the command is ready from our end to be queued.
1317 */
1318 return BLK_STS_OK;
1319 fail:
1320 scsi_free_sgtables(cmd);
1321 return ret;
1322 }
1323
1324 static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
1325 {
1326 struct request *rq = cmd->request;
1327
1328 switch (req_op(rq)) {
1329 case REQ_OP_DISCARD:
1330 switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
1331 case SD_LBP_UNMAP:
1332 return sd_setup_unmap_cmnd(cmd);
1333 case SD_LBP_WS16:
1334 return sd_setup_write_same16_cmnd(cmd, true);
1335 case SD_LBP_WS10:
1336 return sd_setup_write_same10_cmnd(cmd, true);
1337 case SD_LBP_ZERO:
1338 return sd_setup_write_same10_cmnd(cmd, false);
1339 default:
1340 return BLK_STS_TARGET;
1341 }
1342 case REQ_OP_WRITE_ZEROES:
1343 return sd_setup_write_zeroes_cmnd(cmd);
1344 case REQ_OP_WRITE_SAME:
1345 return sd_setup_write_same_cmnd(cmd);
1346 case REQ_OP_FLUSH:
1347 return sd_setup_flush_cmnd(cmd);
1348 case REQ_OP_READ:
1349 case REQ_OP_WRITE:
1350 case REQ_OP_ZONE_APPEND:
1351 return sd_setup_read_write_cmnd(cmd);
1352 case REQ_OP_ZONE_RESET:
1353 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1354 false);
1355 case REQ_OP_ZONE_RESET_ALL:
1356 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
1357 true);
1358 case REQ_OP_ZONE_OPEN:
1359 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
1360 case REQ_OP_ZONE_CLOSE:
1361 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
1362 case REQ_OP_ZONE_FINISH:
1363 return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
1364 default:
1365 WARN_ON_ONCE(1);
1366 return BLK_STS_NOTSUPP;
1367 }
1368 }
1369
1370 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1371 {
1372 struct request *rq = SCpnt->request;
1373 u8 *cmnd;
1374
1375 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1376 mempool_free(rq->special_vec.bv_page, sd_page_pool);
1377
1378 if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1379 cmnd = SCpnt->cmnd;
1380 SCpnt->cmnd = NULL;
1381 SCpnt->cmd_len = 0;
1382 mempool_free(cmnd, sd_cdb_pool);
1383 }
1384 }
1385
1386 static bool sd_need_revalidate(struct block_device *bdev,
1387 struct scsi_disk *sdkp)
1388 {
1389 if (sdkp->device->removable || sdkp->write_prot) {
1390 if (bdev_check_media_change(bdev))
1391 return true;
1392 }
1393
1394 /*
1395 * Force a full rescan after ioctl(BLKRRPART). While the disk state has
1396 * nothing to do with partitions, BLKRRPART is used to force a full
1397 * revalidate after things like a format for historical reasons.
1398 */
1399 return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1400 }
1401
1402 /**
1403 * sd_open - open a scsi disk device
1404 * @bdev: Block device of the scsi disk to open
1405 * @mode: FMODE_* mask
1406 *
1407 * Returns 0 if successful. Returns a negated errno value in case
1408 * of error.
1409 *
1410 * Note: This can be called from a user context (e.g. fsck(1) )
1411 * or from within the kernel (e.g. as a result of a mount(1) ).
1412 * In the latter case @inode and @filp carry an abridged amount
1413 * of information as noted above.
1414 *
1415 * Locking: called with bdev->bd_disk->open_mutex held.
1416 **/
1417 static int sd_open(struct block_device *bdev, fmode_t mode)
1418 {
1419 struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
1420 struct scsi_device *sdev;
1421 int retval;
1422
1423 if (!sdkp)
1424 return -ENXIO;
1425
1426 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
1427
1428 sdev = sdkp->device;
1429
1430 /*
1431 * If the device is in error recovery, wait until it is done.
1432 * If the device is offline, then disallow any access to it.
1433 */
1434 retval = -ENXIO;
1435 if (!scsi_block_when_processing_errors(sdev))
1436 goto error_out;
1437
1438 if (sd_need_revalidate(bdev, sdkp))
1439 sd_revalidate_disk(bdev->bd_disk);
1440
1441 /*
1442 * If the drive is empty, just let the open fail.
1443 */
1444 retval = -ENOMEDIUM;
1445 if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
1446 goto error_out;
1447
1448 /*
1449 * If the device has the write protect tab set, have the open fail
1450 * if the user expects to be able to write to the thing.
1451 */
1452 retval = -EROFS;
1453 if (sdkp->write_prot && (mode & FMODE_WRITE))
1454 goto error_out;
1455
1456 /*
1457 * It is possible that the disk changing stuff resulted in
1458 * the device being taken offline. If this is the case,
1459 * report this to the user, and don't pretend that the
1460 * open actually succeeded.
1461 */
1462 retval = -ENXIO;
1463 if (!scsi_device_online(sdev))
1464 goto error_out;
1465
1466 if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
1467 if (scsi_block_when_processing_errors(sdev))
1468 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
1469 }
1470
1471 return 0;
1472
1473 error_out:
1474 scsi_disk_put(sdkp);
1475 return retval;
1476 }
1477
1478 /**
1479 * sd_release - invoked when the (last) close(2) is called on this
1480 * scsi disk.
1481 * @disk: disk to release
1482 * @mode: FMODE_* mask
1483 *
1484 * Returns 0.
1485 *
1486 * Note: may block (uninterruptible) if error recovery is underway
1487 * on this disk.
1488 *
1489 * Locking: called with bdev->bd_disk->open_mutex held.
1490 **/
1491 static void sd_release(struct gendisk *disk, fmode_t mode)
1492 {
1493 struct scsi_disk *sdkp = scsi_disk(disk);
1494 struct scsi_device *sdev = sdkp->device;
1495
1496 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
1497
1498 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
1499 if (scsi_block_when_processing_errors(sdev))
1500 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1501 }
1502
1503 scsi_disk_put(sdkp);
1504 }
1505
1506 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1507 {
1508 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1509 struct scsi_device *sdp = sdkp->device;
1510 struct Scsi_Host *host = sdp->host;
1511 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1512 int diskinfo[4];
1513
1514 /* default to most commonly used values */
1515 diskinfo[0] = 0x40; /* 1 << 6 */
1516 diskinfo[1] = 0x20; /* 1 << 5 */
1517 diskinfo[2] = capacity >> 11;
1518
1519 /* override with calculated, extended default, or driver values */
1520 if (host->hostt->bios_param)
1521 host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
1522 else
1523 scsicam_bios_param(bdev, capacity, diskinfo);
1524
1525 geo->heads = diskinfo[0];
1526 geo->sectors = diskinfo[1];
1527 geo->cylinders = diskinfo[2];
1528 return 0;
1529 }
1530
1531 /**
1532 * sd_ioctl_common - process an ioctl
1533 * @bdev: target block device
1534 * @mode: FMODE_* mask
1535 * @cmd: ioctl command number
1536 * @p: this is third argument given to ioctl(2) system call.
1537 * Often contains a pointer.
1538 *
1539 * Returns 0 if successful (some ioctls return positive numbers on
1540 * success as well). Returns a negated errno value in case of error.
1541 *
1542 * Note: most ioctls are forward onto the block subsystem or further
1543 * down in the scsi subsystem.
1544 **/
1545 static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
1546 unsigned int cmd, void __user *p)
1547 {
1548 struct gendisk *disk = bdev->bd_disk;
1549 struct scsi_disk *sdkp = scsi_disk(disk);
1550 struct scsi_device *sdp = sdkp->device;
1551 int error;
1552
1553 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
1554 "cmd=0x%x\n", disk->disk_name, cmd));
1555
1556 error = scsi_verify_blk_ioctl(bdev, cmd);
1557 if (error < 0)
1558 return error;
1559
1560 /*
1561 * If we are in the middle of error recovery, don't let anyone
1562 * else try and use this device. Also, if error recovery fails, it
1563 * may try and take the device offline, in which case all further
1564 * access to the device is prohibited.
1565 */
1566 error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
1567 (mode & FMODE_NDELAY) != 0);
1568 if (error)
1569 goto out;
1570
1571 if (is_sed_ioctl(cmd))
1572 return sed_ioctl(sdkp->opal_dev, cmd, p);
1573
1574 /*
1575 * Send SCSI addressing ioctls directly to mid level, send other
1576 * ioctls to block level and then onto mid level if they can't be
1577 * resolved.
1578 */
1579 switch (cmd) {
1580 case SCSI_IOCTL_GET_IDLUN:
1581 case SCSI_IOCTL_GET_BUS_NUMBER:
1582 error = scsi_ioctl(sdp, cmd, p);
1583 break;
1584 default:
1585 error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
1586 break;
1587 }
1588 out:
1589 return error;
1590 }
1591
1592 static void set_media_not_present(struct scsi_disk *sdkp)
1593 {
1594 if (sdkp->media_present)
1595 sdkp->device->changed = 1;
1596
1597 if (sdkp->device->removable) {
1598 sdkp->media_present = 0;
1599 sdkp->capacity = 0;
1600 }
1601 }
1602
1603 static int media_not_present(struct scsi_disk *sdkp,
1604 struct scsi_sense_hdr *sshdr)
1605 {
1606 if (!scsi_sense_valid(sshdr))
1607 return 0;
1608
1609 /* not invoked for commands that could return deferred errors */
1610 switch (sshdr->sense_key) {
1611 case UNIT_ATTENTION:
1612 case NOT_READY:
1613 /* medium not present */
1614 if (sshdr->asc == 0x3A) {
1615 set_media_not_present(sdkp);
1616 return 1;
1617 }
1618 }
1619 return 0;
1620 }
1621
1622 /**
1623 * sd_check_events - check media events
1624 * @disk: kernel device descriptor
1625 * @clearing: disk events currently being cleared
1626 *
1627 * Returns mask of DISK_EVENT_*.
1628 *
1629 * Note: this function is invoked from the block subsystem.
1630 **/
1631 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1632 {
1633 struct scsi_disk *sdkp = scsi_disk_get(disk);
1634 struct scsi_device *sdp;
1635 int retval;
1636 bool disk_changed;
1637
1638 if (!sdkp)
1639 return 0;
1640
1641 sdp = sdkp->device;
1642 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1643
1644 /*
1645 * If the device is offline, don't send any commands - just pretend as
1646 * if the command failed. If the device ever comes back online, we
1647 * can deal with it then. It is only because of unrecoverable errors
1648 * that we would ever take a device offline in the first place.
1649 */
1650 if (!scsi_device_online(sdp)) {
1651 set_media_not_present(sdkp);
1652 goto out;
1653 }
1654
1655 /*
1656 * Using TEST_UNIT_READY enables differentiation between drive with
1657 * no cartridge loaded - NOT READY, drive with changed cartridge -
1658 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1659 *
1660 * Drives that auto spin down. eg iomega jaz 1G, will be started
1661 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1662 * sd_revalidate() is called.
1663 */
1664 if (scsi_block_when_processing_errors(sdp)) {
1665 struct scsi_sense_hdr sshdr = { 0, };
1666
1667 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
1668 &sshdr);
1669
1670 /* failed to execute TUR, assume media not present */
1671 if (retval < 0 || host_byte(retval)) {
1672 set_media_not_present(sdkp);
1673 goto out;
1674 }
1675
1676 if (media_not_present(sdkp, &sshdr))
1677 goto out;
1678 }
1679
1680 /*
1681 * For removable scsi disk we have to recognise the presence
1682 * of a disk in the drive.
1683 */
1684 if (!sdkp->media_present)
1685 sdp->changed = 1;
1686 sdkp->media_present = 1;
1687 out:
1688 /*
1689 * sdp->changed is set under the following conditions:
1690 *
1691 * Medium present state has changed in either direction.
1692 * Device has indicated UNIT_ATTENTION.
1693 */
1694 disk_changed = sdp->changed;
1695 sdp->changed = 0;
1696 scsi_disk_put(sdkp);
1697 return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1698 }
1699
1700 static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
1701 {
1702 int retries, res;
1703 struct scsi_device *sdp = sdkp->device;
1704 const int timeout = sdp->request_queue->rq_timeout
1705 * SD_FLUSH_TIMEOUT_MULTIPLIER;
1706 struct scsi_sense_hdr my_sshdr;
1707
1708 if (!scsi_device_online(sdp))
1709 return -ENODEV;
1710
1711 /* caller might not be interested in sense, but we need it */
1712 if (!sshdr)
1713 sshdr = &my_sshdr;
1714
1715 for (retries = 3; retries > 0; --retries) {
1716 unsigned char cmd[10] = { 0 };
1717
1718 cmd[0] = SYNCHRONIZE_CACHE;
1719 /*
1720 * Leave the rest of the command zero to indicate
1721 * flush everything.
1722 */
1723 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
1724 timeout, sdkp->max_retries, 0, RQF_PM, NULL);
1725 if (res == 0)
1726 break;
1727 }
1728
1729 if (res) {
1730 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1731
1732 if (res < 0)
1733 return res;
1734
1735 if (scsi_status_is_check_condition(res) &&
1736 scsi_sense_valid(sshdr)) {
1737 sd_print_sense_hdr(sdkp, sshdr);
1738
1739 /* we need to evaluate the error return */
1740 if (sshdr->asc == 0x3a || /* medium not present */
1741 sshdr->asc == 0x20 || /* invalid command */
1742 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
1743 /* this is no error here */
1744 return 0;
1745 }
1746
1747 switch (host_byte(res)) {
1748 /* ignore errors due to racing a disconnection */
1749 case DID_BAD_TARGET:
1750 case DID_NO_CONNECT:
1751 return 0;
1752 /* signal the upper layer it might try again */
1753 case DID_BUS_BUSY:
1754 case DID_IMM_RETRY:
1755 case DID_REQUEUE:
1756 case DID_SOFT_ERROR:
1757 return -EBUSY;
1758 default:
1759 return -EIO;
1760 }
1761 }
1762 return 0;
1763 }
1764
1765 static void sd_rescan(struct device *dev)
1766 {
1767 struct scsi_disk *sdkp = dev_get_drvdata(dev);
1768
1769 sd_revalidate_disk(sdkp->disk);
1770 }
1771
1772 static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1773 unsigned int cmd, unsigned long arg)
1774 {
1775 void __user *p = (void __user *)arg;
1776 int ret;
1777
1778 ret = sd_ioctl_common(bdev, mode, cmd, p);
1779 if (ret != -ENOTTY)
1780 return ret;
1781
1782 return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
1783 }
1784
1785 #ifdef CONFIG_COMPAT
1786 static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
1787 unsigned int cmd, unsigned long arg)
1788 {
1789 void __user *p = compat_ptr(arg);
1790 int ret;
1791
1792 ret = sd_ioctl_common(bdev, mode, cmd, p);
1793 if (ret != -ENOTTY)
1794 return ret;
1795
1796 return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
1797 }
1798 #endif
1799
1800 static char sd_pr_type(enum pr_type type)
1801 {
1802 switch (type) {
1803 case PR_WRITE_EXCLUSIVE:
1804 return 0x01;
1805 case PR_EXCLUSIVE_ACCESS:
1806 return 0x03;
1807 case PR_WRITE_EXCLUSIVE_REG_ONLY:
1808 return 0x05;
1809 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
1810 return 0x06;
1811 case PR_WRITE_EXCLUSIVE_ALL_REGS:
1812 return 0x07;
1813 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
1814 return 0x08;
1815 default:
1816 return 0;
1817 }
1818 };
1819
1820 static int sd_pr_command(struct block_device *bdev, u8 sa,
1821 u64 key, u64 sa_key, u8 type, u8 flags)
1822 {
1823 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1824 struct scsi_device *sdev = sdkp->device;
1825 struct scsi_sense_hdr sshdr;
1826 int result;
1827 u8 cmd[16] = { 0, };
1828 u8 data[24] = { 0, };
1829
1830 cmd[0] = PERSISTENT_RESERVE_OUT;
1831 cmd[1] = sa;
1832 cmd[2] = type;
1833 put_unaligned_be32(sizeof(data), &cmd[5]);
1834
1835 put_unaligned_be64(key, &data[0]);
1836 put_unaligned_be64(sa_key, &data[8]);
1837 data[20] = flags;
1838
1839 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
1840 &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
1841
1842 if (scsi_status_is_check_condition(result) &&
1843 scsi_sense_valid(&sshdr)) {
1844 sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
1845 scsi_print_sense_hdr(sdev, NULL, &sshdr);
1846 }
1847
1848 return result;
1849 }
1850
1851 static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
1852 u32 flags)
1853 {
1854 if (flags & ~PR_FL_IGNORE_KEY)
1855 return -EOPNOTSUPP;
1856 return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
1857 old_key, new_key, 0,
1858 (1 << 0) /* APTPL */);
1859 }
1860
1861 static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
1862 u32 flags)
1863 {
1864 if (flags)
1865 return -EOPNOTSUPP;
1866 return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
1867 }
1868
1869 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1870 {
1871 return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
1872 }
1873
1874 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
1875 enum pr_type type, bool abort)
1876 {
1877 return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
1878 sd_pr_type(type), 0);
1879 }
1880
1881 static int sd_pr_clear(struct block_device *bdev, u64 key)
1882 {
1883 return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
1884 }
1885
1886 static const struct pr_ops sd_pr_ops = {
1887 .pr_register = sd_pr_register,
1888 .pr_reserve = sd_pr_reserve,
1889 .pr_release = sd_pr_release,
1890 .pr_preempt = sd_pr_preempt,
1891 .pr_clear = sd_pr_clear,
1892 };
1893
1894 static const struct block_device_operations sd_fops = {
1895 .owner = THIS_MODULE,
1896 .open = sd_open,
1897 .release = sd_release,
1898 .ioctl = sd_ioctl,
1899 .getgeo = sd_getgeo,
1900 #ifdef CONFIG_COMPAT
1901 .compat_ioctl = sd_compat_ioctl,
1902 #endif
1903 .check_events = sd_check_events,
1904 .unlock_native_capacity = sd_unlock_native_capacity,
1905 .report_zones = sd_zbc_report_zones,
1906 .pr_ops = &sd_pr_ops,
1907 };
1908
1909 /**
1910 * sd_eh_reset - reset error handling callback
1911 * @scmd: sd-issued command that has failed
1912 *
1913 * This function is called by the SCSI midlayer before starting
1914 * SCSI EH. When counting medium access failures we have to be
1915 * careful to register it only only once per device and SCSI EH run;
1916 * there might be several timed out commands which will cause the
1917 * 'max_medium_access_timeouts' counter to trigger after the first
1918 * SCSI EH run already and set the device to offline.
1919 * So this function resets the internal counter before starting SCSI EH.
1920 **/
1921 static void sd_eh_reset(struct scsi_cmnd *scmd)
1922 {
1923 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
1924
1925 /* New SCSI EH run, reset gate variable */
1926 sdkp->ignore_medium_access_errors = false;
1927 }
1928
1929 /**
1930 * sd_eh_action - error handling callback
1931 * @scmd: sd-issued command that has failed
1932 * @eh_disp: The recovery disposition suggested by the midlayer
1933 *
1934 * This function is called by the SCSI midlayer upon completion of an
1935 * error test command (currently TEST UNIT READY). The result of sending
1936 * the eh command is passed in eh_disp. We're looking for devices that
1937 * fail medium access commands but are OK with non access commands like
1938 * test unit ready (so wrongly see the device as having a successful
1939 * recovery)
1940 **/
1941 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
1942 {
1943 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
1944 struct scsi_device *sdev = scmd->device;
1945
1946 if (!scsi_device_online(sdev) ||
1947 !scsi_medium_access_command(scmd) ||
1948 host_byte(scmd->result) != DID_TIME_OUT ||
1949 eh_disp != SUCCESS)
1950 return eh_disp;
1951
1952 /*
1953 * The device has timed out executing a medium access command.
1954 * However, the TEST UNIT READY command sent during error
1955 * handling completed successfully. Either the device is in the
1956 * process of recovering or has it suffered an internal failure
1957 * that prevents access to the storage medium.
1958 */
1959 if (!sdkp->ignore_medium_access_errors) {
1960 sdkp->medium_access_timed_out++;
1961 sdkp->ignore_medium_access_errors = true;
1962 }
1963
1964 /*
1965 * If the device keeps failing read/write commands but TEST UNIT
1966 * READY always completes successfully we assume that medium
1967 * access is no longer possible and take the device offline.
1968 */
1969 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
1970 scmd_printk(KERN_ERR, scmd,
1971 "Medium access timeout failure. Offlining disk!\n");
1972 mutex_lock(&sdev->state_mutex);
1973 scsi_device_set_state(sdev, SDEV_OFFLINE);
1974 mutex_unlock(&sdev->state_mutex);
1975
1976 return SUCCESS;
1977 }
1978
1979 return eh_disp;
1980 }
1981
1982 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1983 {
1984 struct request *req = scmd->request;
1985 struct scsi_device *sdev = scmd->device;
1986 unsigned int transferred, good_bytes;
1987 u64 start_lba, end_lba, bad_lba;
1988
1989 /*
1990 * Some commands have a payload smaller than the device logical
1991 * block size (e.g. INQUIRY on a 4K disk).
1992 */
1993 if (scsi_bufflen(scmd) <= sdev->sector_size)
1994 return 0;
1995
1996 /* Check if we have a 'bad_lba' information */
1997 if (!scsi_get_sense_info_fld(scmd->sense_buffer,
1998 SCSI_SENSE_BUFFERSIZE,
1999 &bad_lba))
2000 return 0;
2001
2002 /*
2003 * If the bad lba was reported incorrectly, we have no idea where
2004 * the error is.
2005 */
2006 start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
2007 end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
2008 if (bad_lba < start_lba || bad_lba >= end_lba)
2009 return 0;
2010
2011 /*
2012 * resid is optional but mostly filled in. When it's unused,
2013 * its value is zero, so we assume the whole buffer transferred
2014 */
2015 transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
2016
2017 /* This computation should always be done in terms of the
2018 * resolution of the device's medium.
2019 */
2020 good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
2021
2022 return min(good_bytes, transferred);
2023 }
2024
2025 /**
2026 * sd_done - bottom half handler: called when the lower level
2027 * driver has completed (successfully or otherwise) a scsi command.
2028 * @SCpnt: mid-level's per command structure.
2029 *
2030 * Note: potentially run from within an ISR. Must not block.
2031 **/
2032 static int sd_done(struct scsi_cmnd *SCpnt)
2033 {
2034 int result = SCpnt->result;
2035 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
2036 unsigned int sector_size = SCpnt->device->sector_size;
2037 unsigned int resid;
2038 struct scsi_sense_hdr sshdr;
2039 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
2040 struct request *req = SCpnt->request;
2041 int sense_valid = 0;
2042 int sense_deferred = 0;
2043
2044 switch (req_op(req)) {
2045 case REQ_OP_DISCARD:
2046 case REQ_OP_WRITE_ZEROES:
2047 case REQ_OP_WRITE_SAME:
2048 case REQ_OP_ZONE_RESET:
2049 case REQ_OP_ZONE_RESET_ALL:
2050 case REQ_OP_ZONE_OPEN:
2051 case REQ_OP_ZONE_CLOSE:
2052 case REQ_OP_ZONE_FINISH:
2053 if (!result) {
2054 good_bytes = blk_rq_bytes(req);
2055 scsi_set_resid(SCpnt, 0);
2056 } else {
2057 good_bytes = 0;
2058 scsi_set_resid(SCpnt, blk_rq_bytes(req));
2059 }
2060 break;
2061 default:
2062 /*
2063 * In case of bogus fw or device, we could end up having
2064 * an unaligned partial completion. Check this here and force
2065 * alignment.
2066 */
2067 resid = scsi_get_resid(SCpnt);
2068 if (resid & (sector_size - 1)) {
2069 sd_printk(KERN_INFO, sdkp,
2070 "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
2071 resid, sector_size);
2072 scsi_print_command(SCpnt);
2073 resid = min(scsi_bufflen(SCpnt),
2074 round_up(resid, sector_size));
2075 scsi_set_resid(SCpnt, resid);
2076 }
2077 }
2078
2079 if (result) {
2080 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
2081 if (sense_valid)
2082 sense_deferred = scsi_sense_is_deferred(&sshdr);
2083 }
2084 sdkp->medium_access_timed_out = 0;
2085
2086 if (!scsi_status_is_check_condition(result) &&
2087 (!sense_valid || sense_deferred))
2088 goto out;
2089
2090 switch (sshdr.sense_key) {
2091 case HARDWARE_ERROR:
2092 case MEDIUM_ERROR:
2093 good_bytes = sd_completed_bytes(SCpnt);
2094 break;
2095 case RECOVERED_ERROR:
2096 good_bytes = scsi_bufflen(SCpnt);
2097 break;
2098 case NO_SENSE:
2099 /* This indicates a false check condition, so ignore it. An
2100 * unknown amount of data was transferred so treat it as an
2101 * error.
2102 */
2103 SCpnt->result = 0;
2104 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2105 break;
2106 case ABORTED_COMMAND:
2107 if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
2108 good_bytes = sd_completed_bytes(SCpnt);
2109 break;
2110 case ILLEGAL_REQUEST:
2111 switch (sshdr.asc) {
2112 case 0x10: /* DIX: Host detected corruption */
2113 good_bytes = sd_completed_bytes(SCpnt);
2114 break;
2115 case 0x20: /* INVALID COMMAND OPCODE */
2116 case 0x24: /* INVALID FIELD IN CDB */
2117 switch (SCpnt->cmnd[0]) {
2118 case UNMAP:
2119 sd_config_discard(sdkp, SD_LBP_DISABLE);
2120 break;
2121 case WRITE_SAME_16:
2122 case WRITE_SAME:
2123 if (SCpnt->cmnd[1] & 8) { /* UNMAP */
2124 sd_config_discard(sdkp, SD_LBP_DISABLE);
2125 } else {
2126 sdkp->device->no_write_same = 1;
2127 sd_config_write_same(sdkp);
2128 req->rq_flags |= RQF_QUIET;
2129 }
2130 break;
2131 }
2132 }
2133 break;
2134 default:
2135 break;
2136 }
2137
2138 out:
2139 if (sd_is_zoned(sdkp))
2140 good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
2141
2142 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
2143 "sd_done: completed %d of %d bytes\n",
2144 good_bytes, scsi_bufflen(SCpnt)));
2145
2146 return good_bytes;
2147 }
2148
2149 /*
2150 * spinup disk - called only in sd_revalidate_disk()
2151 */
2152 static void
2153 sd_spinup_disk(struct scsi_disk *sdkp)
2154 {
2155 unsigned char cmd[10];
2156 unsigned long spintime_expire = 0;
2157 int retries, spintime;
2158 unsigned int the_result;
2159 struct scsi_sense_hdr sshdr;
2160 int sense_valid = 0;
2161
2162 spintime = 0;
2163
2164 /* Spin up drives, as required. Only do this at boot time */
2165 /* Spinup needs to be done for module loads too. */
2166 do {
2167 retries = 0;
2168
2169 do {
2170 cmd[0] = TEST_UNIT_READY;
2171 memset((void *) &cmd[1], 0, 9);
2172
2173 the_result = scsi_execute_req(sdkp->device, cmd,
2174 DMA_NONE, NULL, 0,
2175 &sshdr, SD_TIMEOUT,
2176 sdkp->max_retries, NULL);
2177
2178 /*
2179 * If the drive has indicated to us that it
2180 * doesn't have any media in it, don't bother
2181 * with any more polling.
2182 */
2183 if (media_not_present(sdkp, &sshdr))
2184 return;
2185
2186 if (the_result)
2187 sense_valid = scsi_sense_valid(&sshdr);
2188 retries++;
2189 } while (retries < 3 &&
2190 (!scsi_status_is_good(the_result) ||
2191 (scsi_status_is_check_condition(the_result) &&
2192 sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
2193
2194 if (!scsi_status_is_check_condition(the_result)) {
2195 /* no sense, TUR either succeeded or failed
2196 * with a status error */
2197 if(!spintime && !scsi_status_is_good(the_result)) {
2198 sd_print_result(sdkp, "Test Unit Ready failed",
2199 the_result);
2200 }
2201 break;
2202 }
2203
2204 /*
2205 * The device does not want the automatic start to be issued.
2206 */
2207 if (sdkp->device->no_start_on_add)
2208 break;
2209
2210 if (sense_valid && sshdr.sense_key == NOT_READY) {
2211 if (sshdr.asc == 4 && sshdr.ascq == 3)
2212 break; /* manual intervention required */
2213 if (sshdr.asc == 4 && sshdr.ascq == 0xb)
2214 break; /* standby */
2215 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2216 break; /* unavailable */
2217 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2218 break; /* sanitize in progress */
2219 /*
2220 * Issue command to spin up drive when not ready
2221 */
2222 if (!spintime) {
2223 sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
2224 cmd[0] = START_STOP;
2225 cmd[1] = 1; /* Return immediately */
2226 memset((void *) &cmd[2], 0, 8);
2227 cmd[4] = 1; /* Start spin cycle */
2228 if (sdkp->device->start_stop_pwr_cond)
2229 cmd[4] |= 1 << 4;
2230 scsi_execute_req(sdkp->device, cmd, DMA_NONE,
2231 NULL, 0, &sshdr,
2232 SD_TIMEOUT, sdkp->max_retries,
2233 NULL);
2234 spintime_expire = jiffies + 100 * HZ;
2235 spintime = 1;
2236 }
2237 /* Wait 1 second for next try */
2238 msleep(1000);
2239 printk(KERN_CONT ".");
2240
2241 /*
2242 * Wait for USB flash devices with slow firmware.
2243 * Yes, this sense key/ASC combination shouldn't
2244 * occur here. It's characteristic of these devices.
2245 */
2246 } else if (sense_valid &&
2247 sshdr.sense_key == UNIT_ATTENTION &&
2248 sshdr.asc == 0x28) {
2249 if (!spintime) {
2250 spintime_expire = jiffies + 5 * HZ;
2251 spintime = 1;
2252 }
2253 /* Wait 1 second for next try */
2254 msleep(1000);
2255 } else {
2256 /* we don't understand the sense code, so it's
2257 * probably pointless to loop */
2258 if(!spintime) {
2259 sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
2260 sd_print_sense_hdr(sdkp, &sshdr);
2261 }
2262 break;
2263 }
2264
2265 } while (spintime && time_before_eq(jiffies, spintime_expire));
2266
2267 if (spintime) {
2268 if (scsi_status_is_good(the_result))
2269 printk(KERN_CONT "ready\n");
2270 else
2271 printk(KERN_CONT "not responding...\n");
2272 }
2273 }
2274
2275 /*
2276 * Determine whether disk supports Data Integrity Field.
2277 */
2278 static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
2279 {
2280 struct scsi_device *sdp = sdkp->device;
2281 u8 type;
2282 int ret = 0;
2283
2284 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
2285 sdkp->protection_type = 0;
2286 return ret;
2287 }
2288
2289 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
2290
2291 if (type > T10_PI_TYPE3_PROTECTION)
2292 ret = -ENODEV;
2293 else if (scsi_host_dif_capable(sdp->host, type))
2294 ret = 1;
2295
2296 if (sdkp->first_scan || type != sdkp->protection_type)
2297 switch (ret) {
2298 case -ENODEV:
2299 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
2300 " protection type %u. Disabling disk!\n",
2301 type);
2302 break;
2303 case 1:
2304 sd_printk(KERN_NOTICE, sdkp,
2305 "Enabling DIF Type %u protection\n", type);
2306 break;
2307 case 0:
2308 sd_printk(KERN_NOTICE, sdkp,
2309 "Disabling DIF Type %u protection\n", type);
2310 break;
2311 }
2312
2313 sdkp->protection_type = type;
2314
2315 return ret;
2316 }
2317
2318 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2319 struct scsi_sense_hdr *sshdr, int sense_valid,
2320 int the_result)
2321 {
2322 if (sense_valid)
2323 sd_print_sense_hdr(sdkp, sshdr);
2324 else
2325 sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
2326
2327 /*
2328 * Set dirty bit for removable devices if not ready -
2329 * sometimes drives will not report this properly.
2330 */
2331 if (sdp->removable &&
2332 sense_valid && sshdr->sense_key == NOT_READY)
2333 set_media_not_present(sdkp);
2334
2335 /*
2336 * We used to set media_present to 0 here to indicate no media
2337 * in the drive, but some drives fail read capacity even with
2338 * media present, so we can't do that.
2339 */
2340 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
2341 }
2342
2343 #define RC16_LEN 32
2344 #if RC16_LEN > SD_BUF_SIZE
2345 #error RC16_LEN must not be more than SD_BUF_SIZE
2346 #endif
2347
2348 #define READ_CAPACITY_RETRIES_ON_RESET 10
2349
2350 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2351 unsigned char *buffer)
2352 {
2353 unsigned char cmd[16];
2354 struct scsi_sense_hdr sshdr;
2355 int sense_valid = 0;
2356 int the_result;
2357 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2358 unsigned int alignment;
2359 unsigned long long lba;
2360 unsigned sector_size;
2361
2362 if (sdp->no_read_capacity_16)
2363 return -EINVAL;
2364
2365 do {
2366 memset(cmd, 0, 16);
2367 cmd[0] = SERVICE_ACTION_IN_16;
2368 cmd[1] = SAI_READ_CAPACITY_16;
2369 cmd[13] = RC16_LEN;
2370 memset(buffer, 0, RC16_LEN);
2371
2372 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2373 buffer, RC16_LEN, &sshdr,
2374 SD_TIMEOUT, sdkp->max_retries, NULL);
2375
2376 if (media_not_present(sdkp, &sshdr))
2377 return -ENODEV;
2378
2379 if (the_result > 0) {
2380 sense_valid = scsi_sense_valid(&sshdr);
2381 if (sense_valid &&
2382 sshdr.sense_key == ILLEGAL_REQUEST &&
2383 (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
2384 sshdr.ascq == 0x00)
2385 /* Invalid Command Operation Code or
2386 * Invalid Field in CDB, just retry
2387 * silently with RC10 */
2388 return -EINVAL;
2389 if (sense_valid &&
2390 sshdr.sense_key == UNIT_ATTENTION &&
2391 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2392 /* Device reset might occur several times,
2393 * give it one more chance */
2394 if (--reset_retries > 0)
2395 continue;
2396 }
2397 retries--;
2398
2399 } while (the_result && retries);
2400
2401 if (the_result) {
2402 sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2403 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2404 return -EINVAL;
2405 }
2406
2407 sector_size = get_unaligned_be32(&buffer[8]);
2408 lba = get_unaligned_be64(&buffer[0]);
2409
2410 if (sd_read_protection_type(sdkp, buffer) < 0) {
2411 sdkp->capacity = 0;
2412 return -ENODEV;
2413 }
2414
2415 /* Logical blocks per physical block exponent */
2416 sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
2417
2418 /* RC basis */
2419 sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
2420
2421 /* Lowest aligned logical block */
2422 alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
2423 blk_queue_alignment_offset(sdp->request_queue, alignment);
2424 if (alignment && sdkp->first_scan)
2425 sd_printk(KERN_NOTICE, sdkp,
2426 "physical block alignment offset: %u\n", alignment);
2427
2428 if (buffer[14] & 0x80) { /* LBPME */
2429 sdkp->lbpme = 1;
2430
2431 if (buffer[14] & 0x40) /* LBPRZ */
2432 sdkp->lbprz = 1;
2433
2434 sd_config_discard(sdkp, SD_LBP_WS16);
2435 }
2436
2437 sdkp->capacity = lba + 1;
2438 return sector_size;
2439 }
2440
2441 static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2442 unsigned char *buffer)
2443 {
2444 unsigned char cmd[16];
2445 struct scsi_sense_hdr sshdr;
2446 int sense_valid = 0;
2447 int the_result;
2448 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2449 sector_t lba;
2450 unsigned sector_size;
2451
2452 do {
2453 cmd[0] = READ_CAPACITY;
2454 memset(&cmd[1], 0, 9);
2455 memset(buffer, 0, 8);
2456
2457 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2458 buffer, 8, &sshdr,
2459 SD_TIMEOUT, sdkp->max_retries, NULL);
2460
2461 if (media_not_present(sdkp, &sshdr))
2462 return -ENODEV;
2463
2464 if (the_result > 0) {
2465 sense_valid = scsi_sense_valid(&sshdr);
2466 if (sense_valid &&
2467 sshdr.sense_key == UNIT_ATTENTION &&
2468 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
2469 /* Device reset might occur several times,
2470 * give it one more chance */
2471 if (--reset_retries > 0)
2472 continue;
2473 }
2474 retries--;
2475
2476 } while (the_result && retries);
2477
2478 if (the_result) {
2479 sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2480 read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
2481 return -EINVAL;
2482 }
2483
2484 sector_size = get_unaligned_be32(&buffer[4]);
2485 lba = get_unaligned_be32(&buffer[0]);
2486
2487 if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
2488 /* Some buggy (usb cardreader) devices return an lba of
2489 0xffffffff when the want to report a size of 0 (with
2490 which they really mean no media is present) */
2491 sdkp->capacity = 0;
2492 sdkp->physical_block_size = sector_size;
2493 return sector_size;
2494 }
2495
2496 sdkp->capacity = lba + 1;
2497 sdkp->physical_block_size = sector_size;
2498 return sector_size;
2499 }
2500
2501 static int sd_try_rc16_first(struct scsi_device *sdp)
2502 {
2503 if (sdp->host->max_cmd_len < 16)
2504 return 0;
2505 if (sdp->try_rc_10_first)
2506 return 0;
2507 if (sdp->scsi_level > SCSI_SPC_2)
2508 return 1;
2509 if (scsi_device_protection(sdp))
2510 return 1;
2511 return 0;
2512 }
2513
2514 /*
2515 * read disk capacity
2516 */
2517 static void
2518 sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
2519 {
2520 int sector_size;
2521 struct scsi_device *sdp = sdkp->device;
2522
2523 if (sd_try_rc16_first(sdp)) {
2524 sector_size = read_capacity_16(sdkp, sdp, buffer);
2525 if (sector_size == -EOVERFLOW)
2526 goto got_data;
2527 if (sector_size == -ENODEV)
2528 return;
2529 if (sector_size < 0)
2530 sector_size = read_capacity_10(sdkp, sdp, buffer);
2531 if (sector_size < 0)
2532 return;
2533 } else {
2534 sector_size = read_capacity_10(sdkp, sdp, buffer);
2535 if (sector_size == -EOVERFLOW)
2536 goto got_data;
2537 if (sector_size < 0)
2538 return;
2539 if ((sizeof(sdkp->capacity) > 4) &&
2540 (sdkp->capacity > 0xffffffffULL)) {
2541 int old_sector_size = sector_size;
2542 sd_printk(KERN_NOTICE, sdkp, "Very big device. "
2543 "Trying to use READ CAPACITY(16).\n");
2544 sector_size = read_capacity_16(sdkp, sdp, buffer);
2545 if (sector_size < 0) {
2546 sd_printk(KERN_NOTICE, sdkp,
2547 "Using 0xffffffff as device size\n");
2548 sdkp->capacity = 1 + (sector_t) 0xffffffff;
2549 sector_size = old_sector_size;
2550 goto got_data;
2551 }
2552 /* Remember that READ CAPACITY(16) succeeded */
2553 sdp->try_rc_10_first = 0;
2554 }
2555 }
2556
2557 /* Some devices are known to return the total number of blocks,
2558 * not the highest block number. Some devices have versions
2559 * which do this and others which do not. Some devices we might
2560 * suspect of doing this but we don't know for certain.
2561 *
2562 * If we know the reported capacity is wrong, decrement it. If
2563 * we can only guess, then assume the number of blocks is even
2564 * (usually true but not always) and err on the side of lowering
2565 * the capacity.
2566 */
2567 if (sdp->fix_capacity ||
2568 (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
2569 sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
2570 "from its reported value: %llu\n",
2571 (unsigned long long) sdkp->capacity);
2572 --sdkp->capacity;
2573 }
2574
2575 got_data:
2576 if (sector_size == 0) {
2577 sector_size = 512;
2578 sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
2579 "assuming 512.\n");
2580 }
2581
2582 if (sector_size != 512 &&
2583 sector_size != 1024 &&
2584 sector_size != 2048 &&
2585 sector_size != 4096) {
2586 sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
2587 sector_size);
2588 /*
2589 * The user might want to re-format the drive with
2590 * a supported sectorsize. Once this happens, it
2591 * would be relatively trivial to set the thing up.
2592 * For this reason, we leave the thing in the table.
2593 */
2594 sdkp->capacity = 0;
2595 /*
2596 * set a bogus sector size so the normal read/write
2597 * logic in the block layer will eventually refuse any
2598 * request on this device without tripping over power
2599 * of two sector size assumptions
2600 */
2601 sector_size = 512;
2602 }
2603 blk_queue_logical_block_size(sdp->request_queue, sector_size);
2604 blk_queue_physical_block_size(sdp->request_queue,
2605 sdkp->physical_block_size);
2606 sdkp->device->sector_size = sector_size;
2607
2608 if (sdkp->capacity > 0xffffffff)
2609 sdp->use_16_for_rw = 1;
2610
2611 }
2612
2613 /*
2614 * Print disk capacity
2615 */
2616 static void
2617 sd_print_capacity(struct scsi_disk *sdkp,
2618 sector_t old_capacity)
2619 {
2620 int sector_size = sdkp->device->sector_size;
2621 char cap_str_2[10], cap_str_10[10];
2622
2623 if (!sdkp->first_scan && old_capacity == sdkp->capacity)
2624 return;
2625
2626 string_get_size(sdkp->capacity, sector_size,
2627 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
2628 string_get_size(sdkp->capacity, sector_size,
2629 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
2630
2631 sd_printk(KERN_NOTICE, sdkp,
2632 "%llu %d-byte logical blocks: (%s/%s)\n",
2633 (unsigned long long)sdkp->capacity,
2634 sector_size, cap_str_10, cap_str_2);
2635
2636 if (sdkp->physical_block_size != sector_size)
2637 sd_printk(KERN_NOTICE, sdkp,
2638 "%u-byte physical blocks\n",
2639 sdkp->physical_block_size);
2640 }
2641
2642 /* called with buffer of length 512 */
2643 static inline int
2644 sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
2645 unsigned char *buffer, int len, struct scsi_mode_data *data,
2646 struct scsi_sense_hdr *sshdr)
2647 {
2648 return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
2649 SD_TIMEOUT, sdkp->max_retries, data,
2650 sshdr);
2651 }
2652
2653 /*
2654 * read write protect setting, if possible - called only in sd_revalidate_disk()
2655 * called with buffer of length SD_BUF_SIZE
2656 */
2657 static void
2658 sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2659 {
2660 int res;
2661 struct scsi_device *sdp = sdkp->device;
2662 struct scsi_mode_data data;
2663 int old_wp = sdkp->write_prot;
2664
2665 set_disk_ro(sdkp->disk, 0);
2666 if (sdp->skip_ms_page_3f) {
2667 sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
2668 return;
2669 }
2670
2671 if (sdp->use_192_bytes_for_3f) {
2672 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
2673 } else {
2674 /*
2675 * First attempt: ask for all pages (0x3F), but only 4 bytes.
2676 * We have to start carefully: some devices hang if we ask
2677 * for more than is available.
2678 */
2679 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
2680
2681 /*
2682 * Second attempt: ask for page 0 When only page 0 is
2683 * implemented, a request for page 3F may return Sense Key
2684 * 5: Illegal Request, Sense Code 24: Invalid field in
2685 * CDB.
2686 */
2687 if (res < 0)
2688 res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
2689
2690 /*
2691 * Third attempt: ask 255 bytes, as we did earlier.
2692 */
2693 if (res < 0)
2694 res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
2695 &data, NULL);
2696 }
2697
2698 if (res < 0) {
2699 sd_first_printk(KERN_WARNING, sdkp,
2700 "Test WP failed, assume Write Enabled\n");
2701 } else {
2702 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2703 set_disk_ro(sdkp->disk, sdkp->write_prot);
2704 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2705 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2706 sdkp->write_prot ? "on" : "off");
2707 sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
2708 }
2709 }
2710 }
2711
2712 /*
2713 * sd_read_cache_type - called only from sd_revalidate_disk()
2714 * called with buffer of length SD_BUF_SIZE
2715 */
2716 static void
2717 sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2718 {
2719 int len = 0, res;
2720 struct scsi_device *sdp = sdkp->device;
2721
2722 int dbd;
2723 int modepage;
2724 int first_len;
2725 struct scsi_mode_data data;
2726 struct scsi_sense_hdr sshdr;
2727 int old_wce = sdkp->WCE;
2728 int old_rcd = sdkp->RCD;
2729 int old_dpofua = sdkp->DPOFUA;
2730
2731
2732 if (sdkp->cache_override)
2733 return;
2734
2735 first_len = 4;
2736 if (sdp->skip_ms_page_8) {
2737 if (sdp->type == TYPE_RBC)
2738 goto defaults;
2739 else {
2740 if (sdp->skip_ms_page_3f)
2741 goto defaults;
2742 modepage = 0x3F;
2743 if (sdp->use_192_bytes_for_3f)
2744 first_len = 192;
2745 dbd = 0;
2746 }
2747 } else if (sdp->type == TYPE_RBC) {
2748 modepage = 6;
2749 dbd = 8;
2750 } else {
2751 modepage = 8;
2752 dbd = 0;
2753 }
2754
2755 /* cautiously ask */
2756 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
2757 &data, &sshdr);
2758
2759 if (res < 0)
2760 goto bad_sense;
2761
2762 if (!data.header_length) {
2763 modepage = 6;
2764 first_len = 0;
2765 sd_first_printk(KERN_ERR, sdkp,
2766 "Missing header in MODE_SENSE response\n");
2767 }
2768
2769 /* that went OK, now ask for the proper length */
2770 len = data.length;
2771
2772 /*
2773 * We're only interested in the first three bytes, actually.
2774 * But the data cache page is defined for the first 20.
2775 */
2776 if (len < 3)
2777 goto bad_sense;
2778 else if (len > SD_BUF_SIZE) {
2779 sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2780 "data from %d to %d bytes\n", len, SD_BUF_SIZE);
2781 len = SD_BUF_SIZE;
2782 }
2783 if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
2784 len = 192;
2785
2786 /* Get the data */
2787 if (len > first_len)
2788 res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
2789 &data, &sshdr);
2790
2791 if (!res) {
2792 int offset = data.header_length + data.block_descriptor_length;
2793
2794 while (offset < len) {
2795 u8 page_code = buffer[offset] & 0x3F;
2796 u8 spf = buffer[offset] & 0x40;
2797
2798 if (page_code == 8 || page_code == 6) {
2799 /* We're interested only in the first 3 bytes.
2800 */
2801 if (len - offset <= 2) {
2802 sd_first_printk(KERN_ERR, sdkp,
2803 "Incomplete mode parameter "
2804 "data\n");
2805 goto defaults;
2806 } else {
2807 modepage = page_code;
2808 goto Page_found;
2809 }
2810 } else {
2811 /* Go to the next page */
2812 if (spf && len - offset > 3)
2813 offset += 4 + (buffer[offset+2] << 8) +
2814 buffer[offset+3];
2815 else if (!spf && len - offset > 1)
2816 offset += 2 + buffer[offset+1];
2817 else {
2818 sd_first_printk(KERN_ERR, sdkp,
2819 "Incomplete mode "
2820 "parameter data\n");
2821 goto defaults;
2822 }
2823 }
2824 }
2825
2826 sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
2827 goto defaults;
2828
2829 Page_found:
2830 if (modepage == 8) {
2831 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2832 sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
2833 } else {
2834 sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
2835 sdkp->RCD = 0;
2836 }
2837
2838 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2839 if (sdp->broken_fua) {
2840 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2841 sdkp->DPOFUA = 0;
2842 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2843 !sdkp->device->use_16_for_rw) {
2844 sd_first_printk(KERN_NOTICE, sdkp,
2845 "Uses READ/WRITE(6), disabling FUA\n");
2846 sdkp->DPOFUA = 0;
2847 }
2848
2849 /* No cache flush allowed for write protected devices */
2850 if (sdkp->WCE && sdkp->write_prot)
2851 sdkp->WCE = 0;
2852
2853 if (sdkp->first_scan || old_wce != sdkp->WCE ||
2854 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
2855 sd_printk(KERN_NOTICE, sdkp,
2856 "Write cache: %s, read cache: %s, %s\n",
2857 sdkp->WCE ? "enabled" : "disabled",
2858 sdkp->RCD ? "disabled" : "enabled",
2859 sdkp->DPOFUA ? "supports DPO and FUA"
2860 : "doesn't support DPO or FUA");
2861
2862 return;
2863 }
2864
2865 bad_sense:
2866 if (scsi_sense_valid(&sshdr) &&
2867 sshdr.sense_key == ILLEGAL_REQUEST &&
2868 sshdr.asc == 0x24 && sshdr.ascq == 0x0)
2869 /* Invalid field in CDB */
2870 sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
2871 else
2872 sd_first_printk(KERN_ERR, sdkp,
2873 "Asking for cache data failed\n");
2874
2875 defaults:
2876 if (sdp->wce_default_on) {
2877 sd_first_printk(KERN_NOTICE, sdkp,
2878 "Assuming drive cache: write back\n");
2879 sdkp->WCE = 1;
2880 } else {
2881 sd_first_printk(KERN_ERR, sdkp,
2882 "Assuming drive cache: write through\n");
2883 sdkp->WCE = 0;
2884 }
2885 sdkp->RCD = 0;
2886 sdkp->DPOFUA = 0;
2887 }
2888
2889 /*
2890 * The ATO bit indicates whether the DIF application tag is available
2891 * for use by the operating system.
2892 */
2893 static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
2894 {
2895 int res, offset;
2896 struct scsi_device *sdp = sdkp->device;
2897 struct scsi_mode_data data;
2898 struct scsi_sense_hdr sshdr;
2899
2900 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
2901 return;
2902
2903 if (sdkp->protection_type == 0)
2904 return;
2905
2906 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
2907 sdkp->max_retries, &data, &sshdr);
2908
2909 if (res < 0 || !data.header_length ||
2910 data.length < 6) {
2911 sd_first_printk(KERN_WARNING, sdkp,
2912 "getting Control mode page failed, assume no ATO\n");
2913
2914 if (scsi_sense_valid(&sshdr))
2915 sd_print_sense_hdr(sdkp, &sshdr);
2916
2917 return;
2918 }
2919
2920 offset = data.header_length + data.block_descriptor_length;
2921
2922 if ((buffer[offset] & 0x3f) != 0x0a) {
2923 sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2924 return;
2925 }
2926
2927 if ((buffer[offset + 5] & 0x80) == 0)
2928 return;
2929
2930 sdkp->ATO = 1;
2931
2932 return;
2933 }
2934
2935 /**
2936 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2937 * @sdkp: disk to query
2938 */
2939 static void sd_read_block_limits(struct scsi_disk *sdkp)
2940 {
2941 unsigned int sector_sz = sdkp->device->sector_size;
2942 const int vpd_len = 64;
2943 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2944
2945 if (!buffer ||
2946 /* Block Limits VPD */
2947 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2948 goto out;
2949
2950 blk_queue_io_min(sdkp->disk->queue,
2951 get_unaligned_be16(&buffer[6]) * sector_sz);
2952
2953 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
2954 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
2955
2956 if (buffer[3] == 0x3c) {
2957 unsigned int lba_count, desc_count;
2958
2959 sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
2960
2961 if (!sdkp->lbpme)
2962 goto out;
2963
2964 lba_count = get_unaligned_be32(&buffer[20]);
2965 desc_count = get_unaligned_be32(&buffer[24]);
2966
2967 if (lba_count && desc_count)
2968 sdkp->max_unmap_blocks = lba_count;
2969
2970 sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
2971
2972 if (buffer[32] & 0x80)
2973 sdkp->unmap_alignment =
2974 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
2975
2976 if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
2977
2978 if (sdkp->max_unmap_blocks)
2979 sd_config_discard(sdkp, SD_LBP_UNMAP);
2980 else
2981 sd_config_discard(sdkp, SD_LBP_WS16);
2982
2983 } else { /* LBP VPD page tells us what to use */
2984 if (sdkp->lbpu && sdkp->max_unmap_blocks)
2985 sd_config_discard(sdkp, SD_LBP_UNMAP);
2986 else if (sdkp->lbpws)
2987 sd_config_discard(sdkp, SD_LBP_WS16);
2988 else if (sdkp->lbpws10)
2989 sd_config_discard(sdkp, SD_LBP_WS10);
2990 else
2991 sd_config_discard(sdkp, SD_LBP_DISABLE);
2992 }
2993 }
2994
2995 out:
2996 kfree(buffer);
2997 }
2998
2999 /**
3000 * sd_read_block_characteristics - Query block dev. characteristics
3001 * @sdkp: disk to query
3002 */
3003 static void sd_read_block_characteristics(struct scsi_disk *sdkp)
3004 {
3005 struct request_queue *q = sdkp->disk->queue;
3006 unsigned char *buffer;
3007 u16 rot;
3008 const int vpd_len = 64;
3009
3010 buffer = kmalloc(vpd_len, GFP_KERNEL);
3011
3012 if (!buffer ||
3013 /* Block Device Characteristics VPD */
3014 scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
3015 goto out;
3016
3017 rot = get_unaligned_be16(&buffer[4]);
3018
3019 if (rot == 1) {
3020 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3021 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
3022 }
3023
3024 if (sdkp->device->type == TYPE_ZBC) {
3025 /* Host-managed */
3026 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
3027 } else {
3028 sdkp->zoned = (buffer[8] >> 4) & 3;
3029 if (sdkp->zoned == 1) {
3030 /* Host-aware */
3031 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
3032 } else {
3033 /* Regular disk or drive managed disk */
3034 blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
3035 }
3036 }
3037
3038 if (!sdkp->first_scan)
3039 goto out;
3040
3041 if (blk_queue_is_zoned(q)) {
3042 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
3043 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
3044 } else {
3045 if (sdkp->zoned == 1)
3046 sd_printk(KERN_NOTICE, sdkp,
3047 "Host-aware SMR disk used as regular disk\n");
3048 else if (sdkp->zoned == 2)
3049 sd_printk(KERN_NOTICE, sdkp,
3050 "Drive-managed SMR disk\n");
3051 }
3052
3053 out:
3054 kfree(buffer);
3055 }
3056
3057 /**
3058 * sd_read_block_provisioning - Query provisioning VPD page
3059 * @sdkp: disk to query
3060 */
3061 static void sd_read_block_provisioning(struct scsi_disk *sdkp)
3062 {
3063 unsigned char *buffer;
3064 const int vpd_len = 8;
3065
3066 if (sdkp->lbpme == 0)
3067 return;
3068
3069 buffer = kmalloc(vpd_len, GFP_KERNEL);
3070
3071 if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
3072 goto out;
3073
3074 sdkp->lbpvpd = 1;
3075 sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
3076 sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
3077 sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
3078
3079 out:
3080 kfree(buffer);
3081 }
3082
3083 static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
3084 {
3085 struct scsi_device *sdev = sdkp->device;
3086
3087 if (sdev->host->no_write_same) {
3088 sdev->no_write_same = 1;
3089
3090 return;
3091 }
3092
3093 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
3094 /* too large values might cause issues with arcmsr */
3095 int vpd_buf_len = 64;
3096
3097 sdev->no_report_opcodes = 1;
3098
3099 /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
3100 * CODES is unsupported and the device has an ATA
3101 * Information VPD page (SAT).
3102 */
3103 if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
3104 sdev->no_write_same = 1;
3105 }
3106
3107 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
3108 sdkp->ws16 = 1;
3109
3110 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
3111 sdkp->ws10 = 1;
3112 }
3113
3114 static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
3115 {
3116 struct scsi_device *sdev = sdkp->device;
3117
3118 if (!sdev->security_supported)
3119 return;
3120
3121 if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3122 SECURITY_PROTOCOL_IN) == 1 &&
3123 scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
3124 SECURITY_PROTOCOL_OUT) == 1)
3125 sdkp->security = 1;
3126 }
3127
3128 /*
3129 * Determine the device's preferred I/O size for reads and writes
3130 * unless the reported value is unreasonably small, large, not a
3131 * multiple of the physical block size, or simply garbage.
3132 */
3133 static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3134 unsigned int dev_max)
3135 {
3136 struct scsi_device *sdp = sdkp->device;
3137 unsigned int opt_xfer_bytes =
3138 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3139
3140 if (sdkp->opt_xfer_blocks == 0)
3141 return false;
3142
3143 if (sdkp->opt_xfer_blocks > dev_max) {
3144 sd_first_printk(KERN_WARNING, sdkp,
3145 "Optimal transfer size %u logical blocks " \
3146 "> dev_max (%u logical blocks)\n",
3147 sdkp->opt_xfer_blocks, dev_max);
3148 return false;
3149 }
3150
3151 if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
3152 sd_first_printk(KERN_WARNING, sdkp,
3153 "Optimal transfer size %u logical blocks " \
3154 "> sd driver limit (%u logical blocks)\n",
3155 sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
3156 return false;
3157 }
3158
3159 if (opt_xfer_bytes < PAGE_SIZE) {
3160 sd_first_printk(KERN_WARNING, sdkp,
3161 "Optimal transfer size %u bytes < " \
3162 "PAGE_SIZE (%u bytes)\n",
3163 opt_xfer_bytes, (unsigned int)PAGE_SIZE);
3164 return false;
3165 }
3166
3167 if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
3168 sd_first_printk(KERN_WARNING, sdkp,
3169 "Optimal transfer size %u bytes not a " \
3170 "multiple of physical block size (%u bytes)\n",
3171 opt_xfer_bytes, sdkp->physical_block_size);
3172 return false;
3173 }
3174
3175 sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
3176 opt_xfer_bytes);
3177 return true;
3178 }
3179
3180 /**
3181 * sd_revalidate_disk - called the first time a new disk is seen,
3182 * performs disk spin up, read_capacity, etc.
3183 * @disk: struct gendisk we care about
3184 **/
3185 static int sd_revalidate_disk(struct gendisk *disk)
3186 {
3187 struct scsi_disk *sdkp = scsi_disk(disk);
3188 struct scsi_device *sdp = sdkp->device;
3189 struct request_queue *q = sdkp->disk->queue;
3190 sector_t old_capacity = sdkp->capacity;
3191 unsigned char *buffer;
3192 unsigned int dev_max, rw_max;
3193
3194 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
3195 "sd_revalidate_disk\n"));
3196
3197 /*
3198 * If the device is offline, don't try and read capacity or any
3199 * of the other niceties.
3200 */
3201 if (!scsi_device_online(sdp))
3202 goto out;
3203
3204 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
3205 if (!buffer) {
3206 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
3207 "allocation failure.\n");
3208 goto out;
3209 }
3210
3211 sd_spinup_disk(sdkp);
3212
3213 /*
3214 * Without media there is no reason to ask; moreover, some devices
3215 * react badly if we do.
3216 */
3217 if (sdkp->media_present) {
3218 sd_read_capacity(sdkp, buffer);
3219
3220 /*
3221 * set the default to rotational. All non-rotational devices
3222 * support the block characteristics VPD page, which will
3223 * cause this to be updated correctly and any device which
3224 * doesn't support it should be treated as rotational.
3225 */
3226 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
3227 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
3228
3229 if (scsi_device_supports_vpd(sdp)) {
3230 sd_read_block_provisioning(sdkp);
3231 sd_read_block_limits(sdkp);
3232 sd_read_block_characteristics(sdkp);
3233 sd_zbc_read_zones(sdkp, buffer);
3234 }
3235
3236 sd_print_capacity(sdkp, old_capacity);
3237
3238 sd_read_write_protect_flag(sdkp, buffer);
3239 sd_read_cache_type(sdkp, buffer);
3240 sd_read_app_tag_own(sdkp, buffer);
3241 sd_read_write_same(sdkp, buffer);
3242 sd_read_security(sdkp, buffer);
3243 }
3244
3245 /*
3246 * We now have all cache related info, determine how we deal
3247 * with flush requests.
3248 */
3249 sd_set_flush_flag(sdkp);
3250
3251 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
3252 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
3253
3254 /* Some devices report a maximum block count for READ/WRITE requests. */
3255 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
3256 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
3257
3258 if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3259 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3260 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
3261 } else {
3262 q->limits.io_opt = 0;
3263 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3264 (sector_t)BLK_DEF_MAX_SECTORS);
3265 }
3266
3267 /* Do not exceed controller limit */
3268 rw_max = min(rw_max, queue_max_hw_sectors(q));
3269
3270 /*
3271 * Only update max_sectors if previously unset or if the current value
3272 * exceeds the capabilities of the hardware.
3273 */
3274 if (sdkp->first_scan ||
3275 q->limits.max_sectors > q->limits.max_dev_sectors ||
3276 q->limits.max_sectors > q->limits.max_hw_sectors)
3277 q->limits.max_sectors = rw_max;
3278
3279 sdkp->first_scan = 0;
3280
3281 set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
3282 sd_config_write_same(sdkp);
3283 kfree(buffer);
3284
3285 /*
3286 * For a zoned drive, revalidating the zones can be done only once
3287 * the gendisk capacity is set. So if this fails, set back the gendisk
3288 * capacity to 0.
3289 */
3290 if (sd_zbc_revalidate_zones(sdkp))
3291 set_capacity_and_notify(disk, 0);
3292
3293 out:
3294 return 0;
3295 }
3296
3297 /**
3298 * sd_unlock_native_capacity - unlock native capacity
3299 * @disk: struct gendisk to set capacity for
3300 *
3301 * Block layer calls this function if it detects that partitions
3302 * on @disk reach beyond the end of the device. If the SCSI host
3303 * implements ->unlock_native_capacity() method, it's invoked to
3304 * give it a chance to adjust the device capacity.
3305 *
3306 * CONTEXT:
3307 * Defined by block layer. Might sleep.
3308 */
3309 static void sd_unlock_native_capacity(struct gendisk *disk)
3310 {
3311 struct scsi_device *sdev = scsi_disk(disk)->device;
3312
3313 if (sdev->host->hostt->unlock_native_capacity)
3314 sdev->host->hostt->unlock_native_capacity(sdev);
3315 }
3316
3317 /**
3318 * sd_format_disk_name - format disk name
3319 * @prefix: name prefix - ie. "sd" for SCSI disks
3320 * @index: index of the disk to format name for
3321 * @buf: output buffer
3322 * @buflen: length of the output buffer
3323 *
3324 * SCSI disk names starts at sda. The 26th device is sdz and the
3325 * 27th is sdaa. The last one for two lettered suffix is sdzz
3326 * which is followed by sdaaa.
3327 *
3328 * This is basically 26 base counting with one extra 'nil' entry
3329 * at the beginning from the second digit on and can be
3330 * determined using similar method as 26 base conversion with the
3331 * index shifted -1 after each digit is computed.
3332 *
3333 * CONTEXT:
3334 * Don't care.
3335 *
3336 * RETURNS:
3337 * 0 on success, -errno on failure.
3338 */
3339 static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
3340 {
3341 const int base = 'z' - 'a' + 1;
3342 char *begin = buf + strlen(prefix);
3343 char *end = buf + buflen;
3344 char *p;
3345 int unit;
3346
3347 p = end - 1;
3348 *p = '\0';
3349 unit = base;
3350 do {
3351 if (p == begin)
3352 return -EINVAL;
3353 *--p = 'a' + (index % unit);
3354 index = (index / unit) - 1;
3355 } while (index >= 0);
3356
3357 memmove(begin, p, end - p);
3358 memcpy(buf, prefix, strlen(prefix));
3359
3360 return 0;
3361 }
3362
3363 /**
3364 * sd_probe - called during driver initialization and whenever a
3365 * new scsi device is attached to the system. It is called once
3366 * for each scsi device (not just disks) present.
3367 * @dev: pointer to device object
3368 *
3369 * Returns 0 if successful (or not interested in this scsi device
3370 * (e.g. scanner)); 1 when there is an error.
3371 *
3372 * Note: this function is invoked from the scsi mid-level.
3373 * This function sets up the mapping between a given
3374 * <host,channel,id,lun> (found in sdp) and new device name
3375 * (e.g. /dev/sda). More precisely it is the block device major
3376 * and minor number that is chosen here.
3377 *
3378 * Assume sd_probe is not re-entrant (for time being)
3379 * Also think about sd_probe() and sd_remove() running coincidentally.
3380 **/
3381 static int sd_probe(struct device *dev)
3382 {
3383 struct scsi_device *sdp = to_scsi_device(dev);
3384 struct scsi_disk *sdkp;
3385 struct gendisk *gd;
3386 int index;
3387 int error;
3388
3389 scsi_autopm_get_device(sdp);
3390 error = -ENODEV;
3391 if (sdp->type != TYPE_DISK &&
3392 sdp->type != TYPE_ZBC &&
3393 sdp->type != TYPE_MOD &&
3394 sdp->type != TYPE_RBC)
3395 goto out;
3396
3397 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
3398 sdev_printk(KERN_WARNING, sdp,
3399 "Unsupported ZBC host-managed device.\n");
3400 goto out;
3401 }
3402
3403 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
3404 "sd_probe\n"));
3405
3406 error = -ENOMEM;
3407 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
3408 if (!sdkp)
3409 goto out;
3410
3411 gd = alloc_disk(SD_MINORS);
3412 if (!gd)
3413 goto out_free;
3414
3415 index = ida_alloc(&sd_index_ida, GFP_KERNEL);
3416 if (index < 0) {
3417 sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
3418 goto out_put;
3419 }
3420
3421 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3422 if (error) {
3423 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
3424 goto out_free_index;
3425 }
3426
3427 sdkp->device = sdp;
3428 sdkp->driver = &sd_template;
3429 sdkp->disk = gd;
3430 sdkp->index = index;
3431 sdkp->max_retries = SD_MAX_RETRIES;
3432 atomic_set(&sdkp->openers, 0);
3433 atomic_set(&sdkp->device->ioerr_cnt, 0);
3434
3435 if (!sdp->request_queue->rq_timeout) {
3436 if (sdp->type != TYPE_MOD)
3437 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
3438 else
3439 blk_queue_rq_timeout(sdp->request_queue,
3440 SD_MOD_TIMEOUT);
3441 }
3442
3443 device_initialize(&sdkp->dev);
3444 sdkp->dev.parent = dev;
3445 sdkp->dev.class = &sd_disk_class;
3446 dev_set_name(&sdkp->dev, "%s", dev_name(dev));
3447
3448 error = device_add(&sdkp->dev);
3449 if (error)
3450 goto out_free_index;
3451
3452 get_device(dev);
3453 dev_set_drvdata(dev, sdkp);
3454
3455 gd->major = sd_major((index & 0xf0) >> 4);
3456 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
3457
3458 gd->fops = &sd_fops;
3459 gd->private_data = &sdkp->driver;
3460 gd->queue = sdkp->device->request_queue;
3461
3462 /* defaults, until the device tells us otherwise */
3463 sdp->sector_size = 512;
3464 sdkp->capacity = 0;
3465 sdkp->media_present = 1;
3466 sdkp->write_prot = 0;
3467 sdkp->cache_override = 0;
3468 sdkp->WCE = 0;
3469 sdkp->RCD = 0;
3470 sdkp->ATO = 0;
3471 sdkp->first_scan = 1;
3472 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
3473
3474 sd_revalidate_disk(gd);
3475
3476 gd->flags = GENHD_FL_EXT_DEVT;
3477 if (sdp->removable) {
3478 gd->flags |= GENHD_FL_REMOVABLE;
3479 gd->events |= DISK_EVENT_MEDIA_CHANGE;
3480 gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
3481 }
3482
3483 blk_pm_runtime_init(sdp->request_queue, dev);
3484 if (sdp->rpm_autosuspend) {
3485 pm_runtime_set_autosuspend_delay(dev,
3486 sdp->host->hostt->rpm_autosuspend_delay);
3487 }
3488 device_add_disk(dev, gd, NULL);
3489 if (sdkp->capacity)
3490 sd_dif_config_host(sdkp);
3491
3492 sd_revalidate_disk(gd);
3493
3494 if (sdkp->security) {
3495 sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
3496 if (sdkp->opal_dev)
3497 sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
3498 }
3499
3500 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
3501 sdp->removable ? "removable " : "");
3502 scsi_autopm_put_device(sdp);
3503
3504 return 0;
3505
3506 out_free_index:
3507 ida_free(&sd_index_ida, index);
3508 out_put:
3509 put_disk(gd);
3510 out_free:
3511 sd_zbc_release_disk(sdkp);
3512 kfree(sdkp);
3513 out:
3514 scsi_autopm_put_device(sdp);
3515 return error;
3516 }
3517
3518 /**
3519 * sd_remove - called whenever a scsi disk (previously recognized by
3520 * sd_probe) is detached from the system. It is called (potentially
3521 * multiple times) during sd module unload.
3522 * @dev: pointer to device object
3523 *
3524 * Note: this function is invoked from the scsi mid-level.
3525 * This function potentially frees up a device name (e.g. /dev/sdc)
3526 * that could be re-used by a subsequent sd_probe().
3527 * This function is not called when the built-in sd driver is "exit-ed".
3528 **/
3529 static int sd_remove(struct device *dev)
3530 {
3531 struct scsi_disk *sdkp;
3532
3533 sdkp = dev_get_drvdata(dev);
3534 scsi_autopm_get_device(sdkp->device);
3535
3536 async_synchronize_full_domain(&scsi_sd_pm_domain);
3537 device_del(&sdkp->dev);
3538 del_gendisk(sdkp->disk);
3539 sd_shutdown(dev);
3540
3541 free_opal_dev(sdkp->opal_dev);
3542
3543 mutex_lock(&sd_ref_mutex);
3544 dev_set_drvdata(dev, NULL);
3545 put_device(&sdkp->dev);
3546 mutex_unlock(&sd_ref_mutex);
3547
3548 return 0;
3549 }
3550
3551 /**
3552 * scsi_disk_release - Called to free the scsi_disk structure
3553 * @dev: pointer to embedded class device
3554 *
3555 * sd_ref_mutex must be held entering this routine. Because it is
3556 * called on last put, you should always use the scsi_disk_get()
3557 * scsi_disk_put() helpers which manipulate the semaphore directly
3558 * and never do a direct put_device.
3559 **/
3560 static void scsi_disk_release(struct device *dev)
3561 {
3562 struct scsi_disk *sdkp = to_scsi_disk(dev);
3563 struct gendisk *disk = sdkp->disk;
3564 struct request_queue *q = disk->queue;
3565
3566 ida_free(&sd_index_ida, sdkp->index);
3567
3568 /*
3569 * Wait until all requests that are in progress have completed.
3570 * This is necessary to avoid that e.g. scsi_end_request() crashes
3571 * due to clearing the disk->private_data pointer. Wait from inside
3572 * scsi_disk_release() instead of from sd_release() to avoid that
3573 * freezing and unfreezing the request queue affects user space I/O
3574 * in case multiple processes open a /dev/sd... node concurrently.
3575 */
3576 blk_mq_freeze_queue(q);
3577 blk_mq_unfreeze_queue(q);
3578
3579 disk->private_data = NULL;
3580 put_disk(disk);
3581 put_device(&sdkp->device->sdev_gendev);
3582
3583 sd_zbc_release_disk(sdkp);
3584
3585 kfree(sdkp);
3586 }
3587
3588 static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3589 {
3590 unsigned char cmd[6] = { START_STOP }; /* START_VALID */
3591 struct scsi_sense_hdr sshdr;
3592 struct scsi_device *sdp = sdkp->device;
3593 int res;
3594
3595 if (start)
3596 cmd[4] |= 1; /* START */
3597
3598 if (sdp->start_stop_pwr_cond)
3599 cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
3600
3601 if (!scsi_device_online(sdp))
3602 return -ENODEV;
3603
3604 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
3605 SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
3606 if (res) {
3607 sd_print_result(sdkp, "Start/Stop Unit failed", res);
3608 if (res > 0 && scsi_sense_valid(&sshdr)) {
3609 sd_print_sense_hdr(sdkp, &sshdr);
3610 /* 0x3a is medium not present */
3611 if (sshdr.asc == 0x3a)
3612 res = 0;
3613 }
3614 }
3615
3616 /* SCSI error codes must not go to the generic layer */
3617 if (res)
3618 return -EIO;
3619
3620 return 0;
3621 }
3622
3623 /*
3624 * Send a SYNCHRONIZE CACHE instruction down to the device through
3625 * the normal SCSI command structure. Wait for the command to
3626 * complete.
3627 */
3628 static void sd_shutdown(struct device *dev)
3629 {
3630 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3631
3632 if (!sdkp)
3633 return; /* this can happen */
3634
3635 if (pm_runtime_suspended(dev))
3636 return;
3637
3638 if (sdkp->WCE && sdkp->media_present) {
3639 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3640 sd_sync_cache(sdkp, NULL);
3641 }
3642
3643 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
3644 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3645 sd_start_stop_device(sdkp, 0);
3646 }
3647 }
3648
3649 static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3650 {
3651 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3652 struct scsi_sense_hdr sshdr;
3653 int ret = 0;
3654
3655 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
3656 return 0;
3657
3658 if (sdkp->WCE && sdkp->media_present) {
3659 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3660 ret = sd_sync_cache(sdkp, &sshdr);
3661
3662 if (ret) {
3663 /* ignore OFFLINE device */
3664 if (ret == -ENODEV)
3665 return 0;
3666
3667 if (!scsi_sense_valid(&sshdr) ||
3668 sshdr.sense_key != ILLEGAL_REQUEST)
3669 return ret;
3670
3671 /*
3672 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
3673 * doesn't support sync. There's not much to do and
3674 * suspend shouldn't fail.
3675 */
3676 ret = 0;
3677 }
3678 }
3679
3680 if (sdkp->device->manage_start_stop) {
3681 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3682 /* an error is not worth aborting a system sleep */
3683 ret = sd_start_stop_device(sdkp, 0);
3684 if (ignore_stop_errors)
3685 ret = 0;
3686 }
3687
3688 return ret;
3689 }
3690
3691 static int sd_suspend_system(struct device *dev)
3692 {
3693 return sd_suspend_common(dev, true);
3694 }
3695
3696 static int sd_suspend_runtime(struct device *dev)
3697 {
3698 return sd_suspend_common(dev, false);
3699 }
3700
3701 static int sd_resume(struct device *dev)
3702 {
3703 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3704 int ret;
3705
3706 if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
3707 return 0;
3708
3709 if (!sdkp->device->manage_start_stop)
3710 return 0;
3711
3712 sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
3713 ret = sd_start_stop_device(sdkp, 1);
3714 if (!ret)
3715 opal_unlock_from_suspend(sdkp->opal_dev);
3716 return ret;
3717 }
3718
3719 /**
3720 * init_sd - entry point for this driver (both when built in or when
3721 * a module).
3722 *
3723 * Note: this function registers this driver with the scsi mid-level.
3724 **/
3725 static int __init init_sd(void)
3726 {
3727 int majors = 0, i, err;
3728
3729 SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
3730
3731 for (i = 0; i < SD_MAJORS; i++) {
3732 if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
3733 continue;
3734 majors++;
3735 }
3736
3737 if (!majors)
3738 return -ENODEV;
3739
3740 err = class_register(&sd_disk_class);
3741 if (err)
3742 goto err_out;
3743
3744 sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
3745 0, 0, NULL);
3746 if (!sd_cdb_cache) {
3747 printk(KERN_ERR "sd: can't init extended cdb cache\n");
3748 err = -ENOMEM;
3749 goto err_out_class;
3750 }
3751
3752 sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
3753 if (!sd_cdb_pool) {
3754 printk(KERN_ERR "sd: can't init extended cdb pool\n");
3755 err = -ENOMEM;
3756 goto err_out_cache;
3757 }
3758
3759 sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
3760 if (!sd_page_pool) {
3761 printk(KERN_ERR "sd: can't init discard page pool\n");
3762 err = -ENOMEM;
3763 goto err_out_ppool;
3764 }
3765
3766 err = scsi_register_driver(&sd_template.gendrv);
3767 if (err)
3768 goto err_out_driver;
3769
3770 return 0;
3771
3772 err_out_driver:
3773 mempool_destroy(sd_page_pool);
3774
3775 err_out_ppool:
3776 mempool_destroy(sd_cdb_pool);
3777
3778 err_out_cache:
3779 kmem_cache_destroy(sd_cdb_cache);
3780
3781 err_out_class:
3782 class_unregister(&sd_disk_class);
3783 err_out:
3784 for (i = 0; i < SD_MAJORS; i++)
3785 unregister_blkdev(sd_major(i), "sd");
3786 return err;
3787 }
3788
3789 /**
3790 * exit_sd - exit point for this driver (when it is a module).
3791 *
3792 * Note: this function unregisters this driver from the scsi mid-level.
3793 **/
3794 static void __exit exit_sd(void)
3795 {
3796 int i;
3797
3798 SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
3799
3800 scsi_unregister_driver(&sd_template.gendrv);
3801 mempool_destroy(sd_cdb_pool);
3802 mempool_destroy(sd_page_pool);
3803 kmem_cache_destroy(sd_cdb_cache);
3804
3805 class_unregister(&sd_disk_class);
3806
3807 for (i = 0; i < SD_MAJORS; i++)
3808 unregister_blkdev(sd_major(i), "sd");
3809 }
3810
3811 module_init(init_sd);
3812 module_exit(exit_sd);
3813
3814 void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
3815 {
3816 scsi_print_sense_hdr(sdkp->device,
3817 sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
3818 }
3819
3820 void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
3821 {
3822 const char *hb_string = scsi_hostbyte_string(result);
3823
3824 if (hb_string)
3825 sd_printk(KERN_INFO, sdkp,
3826 "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
3827 hb_string ? hb_string : "invalid",
3828 "DRIVER_OK");
3829 else
3830 sd_printk(KERN_INFO, sdkp,
3831 "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
3832 msg, host_byte(result), "DRIVER_OK");
3833 }