]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/scsi/scsi_debug.c
memory: renesas-rpc-if: fix platform-device leak in error path
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / scsi_debug.c
CommitLineData
8d7c56d0 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
1da177e4
LT
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 *
48e3bf16 10 * Copyright (C) 2001 - 2020 Douglas Gilbert
1da177e4 11 *
30f67481 12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
1da177e4
LT
13 */
14
c1287970
TW
15
16#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
1da177e4
LT
18#include <linux/module.h>
19
20#include <linux/kernel.h>
1da177e4 21#include <linux/errno.h>
b333a819 22#include <linux/jiffies.h>
5a0e3ad6 23#include <linux/slab.h>
1da177e4
LT
24#include <linux/types.h>
25#include <linux/string.h>
26#include <linux/genhd.h>
27#include <linux/fs.h>
28#include <linux/init.h>
29#include <linux/proc_fs.h>
1da177e4
LT
30#include <linux/vmalloc.h>
31#include <linux/moduleparam.h>
852e034d 32#include <linux/scatterlist.h>
1da177e4 33#include <linux/blkdev.h>
c6a44287 34#include <linux/crc-t10dif.h>
cbf67842
DG
35#include <linux/spinlock.h>
36#include <linux/interrupt.h>
37#include <linux/atomic.h>
38#include <linux/hrtimer.h>
09ba24c1 39#include <linux/uuid.h>
6ebf105c 40#include <linux/t10-pi.h>
1442f76d 41#include <linux/msdos_partition.h>
0c4bc91d 42#include <linux/random.h>
87c715dc 43#include <linux/xarray.h>
ed9f3e25 44#include <linux/prefetch.h>
c6a44287
MP
45
46#include <net/checksum.h>
9ff26eef 47
44d92694
MP
48#include <asm/unaligned.h>
49
9ff26eef
FT
50#include <scsi/scsi.h>
51#include <scsi/scsi_cmnd.h>
52#include <scsi/scsi_device.h>
1da177e4
LT
53#include <scsi/scsi_host.h>
54#include <scsi/scsicam.h>
a34c4e98 55#include <scsi/scsi_eh.h>
cbf67842 56#include <scsi/scsi_tcq.h>
395cef03 57#include <scsi/scsi_dbg.h>
1da177e4 58
c6a44287 59#include "sd.h"
1da177e4 60#include "scsi_logging.h"
1da177e4 61
773642d9 62/* make sure inq_product_rev string corresponds to this version */
30f67481
DG
63#define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64static const char *sdebug_version_date = "20200710";
cbf67842
DG
65
66#define MY_NAME "scsi_debug"
1da177e4 67
6f3cbf55 68/* Additional Sense Code (ASC) */
c65b1445
DG
69#define NO_ADDITIONAL_SENSE 0x0
70#define LOGICAL_UNIT_NOT_READY 0x4
c2248fc9 71#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
1da177e4 72#define UNRECOVERED_READ_ERR 0x11
c65b1445 73#define PARAMETER_LIST_LENGTH_ERR 0x1a
1da177e4 74#define INVALID_OPCODE 0x20
22017ed2 75#define LBA_OUT_OF_RANGE 0x21
1da177e4 76#define INVALID_FIELD_IN_CDB 0x24
c65b1445 77#define INVALID_FIELD_IN_PARAM_LIST 0x26
9447b6ce 78#define WRITE_PROTECTED 0x27
cbf67842
DG
79#define UA_RESET_ASC 0x29
80#define UA_CHANGED_ASC 0x2a
19c8ead7
EM
81#define TARGET_CHANGED_ASC 0x3f
82#define LUNS_CHANGED_ASCQ 0x0e
22017ed2
DG
83#define INSUFF_RES_ASC 0x55
84#define INSUFF_RES_ASCQ 0x3
cbf67842
DG
85#define POWER_ON_RESET_ASCQ 0x0
86#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
22017ed2 88#define CAPACITY_CHANGED_ASCQ 0x9
1da177e4 89#define SAVING_PARAMS_UNSUP 0x39
6f3cbf55 90#define TRANSPORT_PROBLEM 0x4b
c65b1445
DG
91#define THRESHOLD_EXCEEDED 0x5d
92#define LOW_POWER_COND_ON 0x5e
22017ed2 93#define MISCOMPARE_VERIFY_ASC 0x1d
acafd0b9
EM
94#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
481b5e5c 96#define WRITE_ERROR_ASC 0xc
f0d1cf93
DG
97#define UNALIGNED_WRITE_ASCQ 0x4
98#define WRITE_BOUNDARY_ASCQ 0x5
99#define READ_INVDATA_ASCQ 0x6
100#define READ_BOUNDARY_ASCQ 0x7
101#define INSUFF_ZONE_ASCQ 0xe
1da177e4 102
6f3cbf55
DG
103/* Additional Sense Code Qualifier (ASCQ) */
104#define ACK_NAK_TO 0x3
105
1da177e4
LT
106/* Default values for driver parameters */
107#define DEF_NUM_HOST 1
108#define DEF_NUM_TGTS 1
109#define DEF_MAX_LUNS 1
110/* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112 */
5b94e232 113#define DEF_ATO 1
9b760fd8 114#define DEF_CDB_LEN 10
c2206098 115#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
9267e0eb 116#define DEF_DEV_SIZE_PRE_INIT 0
1da177e4 117#define DEF_DEV_SIZE_MB 8
9267e0eb 118#define DEF_ZBC_DEV_SIZE_MB 128
5b94e232
MP
119#define DEF_DIF 0
120#define DEF_DIX 0
87c715dc 121#define DEF_PER_HOST_STORE false
1da177e4 122#define DEF_D_SENSE 0
5b94e232 123#define DEF_EVERY_NTH 0
23183910 124#define DEF_FAKE_RW 0
c6a44287 125#define DEF_GUARD 0
cbf67842 126#define DEF_HOST_LOCK 0
5b94e232
MP
127#define DEF_LBPU 0
128#define DEF_LBPWS 0
129#define DEF_LBPWS10 0
be1dd78d 130#define DEF_LBPRZ 1
ea61fca5 131#define DEF_LOWEST_ALIGNED 0
cbf67842 132#define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
5b94e232
MP
133#define DEF_NO_LUN_0 0
134#define DEF_NUM_PARTS 0
135#define DEF_OPTS 0
32c5844a 136#define DEF_OPT_BLKS 1024
5b94e232 137#define DEF_PHYSBLK_EXP 0
86e6828a 138#define DEF_OPT_XFERLEN_EXP 0
b01f6f83 139#define DEF_PTYPE TYPE_DISK
0c4bc91d 140#define DEF_RANDOM false
d986788b 141#define DEF_REMOVABLE false
760f3b03 142#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
5b94e232
MP
143#define DEF_SECTOR_SIZE 512
144#define DEF_UNMAP_ALIGNMENT 0
145#define DEF_UNMAP_GRANULARITY 1
6014759c
MP
146#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147#define DEF_UNMAP_MAX_DESC 256
5b94e232
MP
148#define DEF_VIRTUAL_GB 0
149#define DEF_VPD_USE_HOSTNO 1
150#define DEF_WRITESAME_LENGTH 0xFFFF
c2248fc9 151#define DEF_STRICT 0
c4837394
DG
152#define DEF_STATISTICS false
153#define DEF_SUBMIT_QUEUES 1
fc13638a 154#define DEF_TUR_MS_TO_READY 0
09ba24c1 155#define DEF_UUID_CTL 0
c2206098 156#define JDELAY_OVERRIDDEN -9999
1da177e4 157
f0d1cf93
DG
158/* Default parameters for ZBC drives */
159#define DEF_ZBC_ZONE_SIZE_MB 128
160#define DEF_ZBC_MAX_OPEN_ZONES 8
aa8fecf9 161#define DEF_ZBC_NR_CONV_ZONES 1
f0d1cf93 162
b01f6f83
DG
163#define SDEBUG_LUN_0_VAL 0
164
773642d9
DG
165/* bit mask values for sdebug_opts */
166#define SDEBUG_OPT_NOISE 1
167#define SDEBUG_OPT_MEDIUM_ERR 2
168#define SDEBUG_OPT_TIMEOUT 4
169#define SDEBUG_OPT_RECOVERED_ERR 8
170#define SDEBUG_OPT_TRANSPORT_ERR 16
171#define SDEBUG_OPT_DIF_ERR 32
172#define SDEBUG_OPT_DIX_ERR 64
173#define SDEBUG_OPT_MAC_TIMEOUT 128
174#define SDEBUG_OPT_SHORT_TRANSFER 0x100
175#define SDEBUG_OPT_Q_NOISE 0x200
176#define SDEBUG_OPT_ALL_TSF 0x400
177#define SDEBUG_OPT_RARE_TSF 0x800
178#define SDEBUG_OPT_N_WCE 0x1000
179#define SDEBUG_OPT_RESET_NOISE 0x2000
180#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
7ee6d1b4 181#define SDEBUG_OPT_HOST_BUSY 0x8000
7382f9d8 182#define SDEBUG_OPT_CMD_ABORT 0x10000
773642d9
DG
183#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
7ee6d1b4 188 SDEBUG_OPT_SHORT_TRANSFER | \
7382f9d8
DG
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
3a90a63d
DG
191#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
1da177e4 193
fd32119b 194/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
cbf67842
DG
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199#define SDEBUG_UA_BUS_RESET 1
200#define SDEBUG_UA_MODE_CHANGED 2
0d01c5df 201#define SDEBUG_UA_CAPACITY_CHANGED 3
19c8ead7 202#define SDEBUG_UA_LUNS_CHANGED 4
acafd0b9
EM
203#define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205#define SDEBUG_NUM_UAS 7
cbf67842 206
773642d9 207/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
1da177e4
LT
208 * sector on read commands: */
209#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
32f7ef73 210#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
1da177e4 211
c4837394
DG
212/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
218 */
219#define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
fc09acb7 221#define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
cbf67842 222
b6ff8ca7
DG
223/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224#define F_D_IN 1 /* Data-in command (e.g. READ) */
225#define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
fd32119b
DG
226#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
227#define F_D_UNKN 8
b6ff8ca7
DG
228#define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229#define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230#define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231#define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232#define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233#define F_INV_OP 0x200 /* invalid opcode (not supported) */
234#define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235#define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236#define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237#define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
238
239/* Useful combinations of the above flags */
fd32119b 240#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
46f64e70 241#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
fd32119b 242#define FF_SA (F_SA_HIGH | F_SA_LOW)
4f2c8bf6 243#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
fd32119b
DG
244
245#define SDEBUG_MAX_PARTS 4
246
b01f6f83 247#define SDEBUG_MAX_CMD_LEN 32
fd32119b 248
87c715dc
DG
249#define SDEB_XA_NOT_IN_USE XA_MARK_1
250
64e14ece
DLM
251/* Zone types (zbcr05 table 25) */
252enum sdebug_z_type {
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
256};
257
f0d1cf93
DG
258/* enumeration names taken from table 26, zbcr05 */
259enum sdebug_z_cond {
260 ZBC_NOT_WRITE_POINTER = 0x0,
261 ZC1_EMPTY = 0x1,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
264 ZC4_CLOSED = 0x4,
265 ZC6_READ_ONLY = 0xd,
266 ZC5_FULL = 0xe,
267 ZC7_OFFLINE = 0xf,
268};
269
270struct sdeb_zone_state { /* ZBC: per zone state */
64e14ece 271 enum sdebug_z_type z_type;
f0d1cf93 272 enum sdebug_z_cond z_cond;
64e14ece 273 bool z_non_seq_resource;
f0d1cf93
DG
274 unsigned int z_size;
275 sector_t z_start;
276 sector_t z_wp;
277};
fd32119b
DG
278
279struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
282 unsigned int target;
283 u64 lun;
bf476433 284 uuid_t lu_name;
fd32119b
DG
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
287 atomic_t num_in_q;
fc13638a 288 atomic_t stopped; /* 1: by SSU, 2: device start */
fd32119b 289 bool used;
f0d1cf93
DG
290
291 /* For ZBC devices */
64e14ece 292 enum blk_zoned_model zmodel;
f0d1cf93
DG
293 unsigned int zsize;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
aa8fecf9 296 unsigned int nr_conv_zones;
f0d1cf93
DG
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
fc13638a 301 ktime_t create_ts; /* time since bootup that this device was created */
f0d1cf93 302 struct sdeb_zone_state *zstate;
fd32119b
DG
303};
304
305struct sdebug_host_info {
306 struct list_head host_list;
87c715dc 307 int si_idx; /* sdeb_store_info (per host) xarray index */
fd32119b
DG
308 struct Scsi_Host *shost;
309 struct device dev;
310 struct list_head dev_info_list;
311};
312
87c715dc
DG
313/* There is an xarray of pointers to this struct's objects, one per host */
314struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
319};
320
fd32119b
DG
321#define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
323
10bde980 324enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
4a0c6f43 325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
10bde980 326
fd32119b
DG
327struct sdebug_defer {
328 struct hrtimer hrt;
329 struct execute_work ew;
4a0c6f43 330 ktime_t cmpl_ts;/* time since boot to complete this cmd */
c4837394
DG
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
c10fa55f 333 int hc_idx; /* hostwide tag index */
c4837394 334 int issuing_cpu;
10bde980
DG
335 bool init_hrt;
336 bool init_wq;
4a0c6f43 337 bool init_poll;
7382f9d8 338 bool aborted; /* true when blk_abort_request() already called */
10bde980 339 enum sdeb_defer_type defer_t;
fd32119b
DG
340};
341
342struct sdebug_queued_cmd {
c4837394
DG
343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 * instance indicates this slot is in use.
345 */
fd32119b
DG
346 struct sdebug_defer *sd_dp;
347 struct scsi_cmnd *a_cmnd;
348};
349
c4837394
DG
350struct sdebug_queue {
351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
353 spinlock_t qc_lock;
354 atomic_t blocked; /* to temporarily stop more being queued */
fd32119b
DG
355};
356
c4837394
DG
357static atomic_t sdebug_cmnd_count; /* number of incoming commands */
358static atomic_t sdebug_completions; /* count of deferred completions */
359static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
360static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
3a90a63d 361static atomic_t sdeb_inject_pending;
4a0c6f43 362static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
c4837394 363
fd32119b 364struct opcode_info_t {
b01f6f83
DG
365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
366 /* for terminating element */
fd32119b
DG
367 u8 opcode; /* if num_attached > 0, preferred */
368 u16 sa; /* service action */
369 u32 flags; /* OR-ed set of SDEB_F_* */
370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
9a051019
DG
372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
fd32119b
DG
374};
375
376/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
c2248fc9
DG
377enum sdeb_opcode_index {
378 SDEB_I_INVALID_OPCODE = 0,
379 SDEB_I_INQUIRY = 1,
380 SDEB_I_REPORT_LUNS = 2,
381 SDEB_I_REQUEST_SENSE = 3,
382 SDEB_I_TEST_UNIT_READY = 4,
383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
385 SDEB_I_LOG_SENSE = 7,
386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
389 SDEB_I_START_STOP = 11,
46f64e70
DG
390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
c2248fc9
DG
392 SDEB_I_MAINT_IN = 14,
393 SDEB_I_MAINT_OUT = 15,
c3e2fe92 394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
481b5e5c 395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
c2248fc9
DG
396 SDEB_I_RESERVE = 18, /* 6, 10 */
397 SDEB_I_RELEASE = 19, /* 6, 10 */
398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
400 SDEB_I_ATA_PT = 22, /* 12, 16 */
401 SDEB_I_SEND_DIAG = 23,
402 SDEB_I_UNMAP = 24,
c208556a
BVA
403 SDEB_I_WRITE_BUFFER = 25,
404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
406 SDEB_I_COMP_WRITE = 28,
ed9f3e25 407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
f0d1cf93
DG
408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
c2248fc9
DG
411};
412
c4837394 413
c2248fc9
DG
414static const unsigned char opcode_ind_arr[256] = {
415/* 0x0; 0x0->0x1f: 6 byte cdbs */
416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
417 0, 0, 0, 0,
418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
420 SDEB_I_RELEASE,
421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 SDEB_I_ALLOW_REMOVAL, 0,
423/* 0x20; 0x20->0x3f: 10 byte cdbs */
424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
ed9f3e25 426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
c2248fc9
DG
427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428/* 0x40; 0x40->0x5f: 10 byte cdbs */
429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
c208556a 431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
c2248fc9
DG
432 SDEB_I_RELEASE,
433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
fd32119b 434/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
c2248fc9
DG
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, SDEB_I_VARIABLE_LEN,
438/* 0x80; 0x80->0x9f: 16 byte cdbs */
439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
c3e2fe92
DG
440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 0, 0, 0, SDEB_I_VERIFY,
f0d1cf93
DG
442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
46f64e70 444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
c2248fc9
DG
445/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 SDEB_I_MAINT_OUT, 0, 0, 0,
46f64e70
DG
448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
c2248fc9
DG
450 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452/* 0xc0; 0xc0->0xff: vendor specific */
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457};
458
80c49563
DG
459/*
460 * The following "response" functions return the SCSI mid-level's 4 byte
461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462 * command completion, they can mask their return value with
463 * SDEG_RES_IMMED_MASK .
464 */
465#define SDEG_RES_IMMED_MASK 0x40000000
466
c2248fc9
DG
467static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481b5e5c 476static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
c2248fc9
DG
477static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
38d5c833
DG
482static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
c3e2fe92 484static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
c2248fc9
DG
485static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
38d5c833 487static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
acafd0b9 488static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
80c49563 489static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
ed9f3e25 490static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
f0d1cf93
DG
491static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
c2248fc9 496
87c715dc
DG
497static int sdebug_do_add_host(bool mk_new_store);
498static int sdebug_add_host_helper(int per_host_idx);
499static void sdebug_do_remove_host(bool the_end);
500static int sdebug_add_store(void);
501static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502static void sdebug_erase_all_stores(bool apart_from_first);
503
46f64e70
DG
504/*
505 * The following are overflow arrays for cdbs that "hit" the same index in
506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507 * should be placed in opcode_info_arr[], the others should be placed here.
508 */
509static const struct opcode_info_t msense_iarr[] = {
c2248fc9
DG
510 {0, 0x1a, 0, F_D_IN, NULL, NULL,
511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
512};
513
46f64e70 514static const struct opcode_info_t mselect_iarr[] = {
c2248fc9
DG
515 {0, 0x15, 0, F_D_OUT, NULL, NULL,
516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
517};
518
46f64e70
DG
519static const struct opcode_info_t read_iarr[] = {
520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
b7e24581 521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
c2248fc9 522 0, 0, 0, 0} },
46f64e70 523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
c2248fc9 524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
46f64e70 525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
b7e24581 526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
c2248fc9
DG
527 0xc7, 0, 0, 0, 0} },
528};
529
46f64e70
DG
530static const struct opcode_info_t write_iarr[] = {
531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
533 0, 0, 0, 0, 0, 0} },
534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
536 0, 0, 0} },
537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xbf, 0xc7, 0, 0, 0, 0} },
c2248fc9
DG
540};
541
c3e2fe92
DG
542static const struct opcode_info_t verify_iarr[] = {
543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
545 0, 0, 0, 0, 0, 0} },
546};
547
46f64e70 548static const struct opcode_info_t sa_in_16_iarr[] = {
c2248fc9
DG
549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
46f64e70 551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
c2248fc9
DG
552};
553
46f64e70
DG
554static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
b7e24581 556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
c2248fc9 557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
481b5e5c
DG
558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
c2248fc9
DG
561};
562
46f64e70 563static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
38d5c833 564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
c2248fc9 565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
46f64e70 566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
38d5c833 567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
c2248fc9 568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
46f64e70 569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
c2248fc9
DG
570};
571
46f64e70
DG
572static const struct opcode_info_t write_same_iarr[] = {
573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
c2248fc9 574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
46f64e70 575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
c2248fc9
DG
576};
577
46f64e70
DG
578static const struct opcode_info_t reserve_iarr[] = {
579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
c2248fc9
DG
580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
581};
582
46f64e70
DG
583static const struct opcode_info_t release_iarr[] = {
584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
c2248fc9
DG
585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
586};
587
80c49563 588static const struct opcode_info_t sync_cache_iarr[] = {
4f2c8bf6 589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
80c49563
DG
590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
592};
593
ed9f3e25 594static const struct opcode_info_t pre_fetch_iarr[] = {
b6ff8ca7 595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
ed9f3e25
DG
596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
598};
599
f0d1cf93 600static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
b6ff8ca7 601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
f0d1cf93
DG
602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
b6ff8ca7 604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
f0d1cf93
DG
605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
b6ff8ca7 607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
f0d1cf93
DG
608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
610};
611
612static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
b6ff8ca7 613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
f0d1cf93
DG
614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
616};
617
c2248fc9
DG
618
619/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620 * plus the terminating elements for logic that scans this table such as
621 * REPORT SUPPORTED OPERATION CODES. */
ed9f3e25 622static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
c2248fc9 623/* 0 */
46f64e70 624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
c2248fc9 625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
46f64e70 626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
c2248fc9
DG
627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
46f64e70 630 0, 0} }, /* REPORT LUNS */
c2248fc9
DG
631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
46f64e70
DG
635/* 5 */
636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
c2248fc9
DG
643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
644 0, 0, 0} },
46f64e70 645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
c2248fc9
DG
646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
647 0, 0} },
46f64e70
DG
648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
c2248fc9 651/* 10 */
46f64e70
DG
652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 resp_write_dt0, write_iarr, /* WRITE(16) */
654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
80c49563 655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
4f2c8bf6 656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
c2248fc9 657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
46f64e70
DG
658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
481b5e5c
DG
662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
46f64e70
DG
665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 0xff, 0, 0xc7, 0, 0, 0, 0} },
669/* 15 */
c2248fc9
DG
670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
c3e2fe92
DG
672 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
46f64e70
DG
676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
679 0xff, 0xff} },
680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
c2248fc9
DG
682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
683 0} },
46f64e70
DG
684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 NULL, release_iarr, /* RELEASE(10) <no response function> */
c2248fc9
DG
686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
687 0} },
688/* 20 */
f7f9f26b
DG
689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
c2248fc9
DG
691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
46f64e70 697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
b7e24581 698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
46f64e70 699/* 25 */
acafd0b9
EM
700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 0, 0, 0, 0} }, /* WRITE_BUFFER */
46f64e70
DG
703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
706 0, 0, 0, 0, 0} },
4f2c8bf6 707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
80c49563 708 resp_sync_cache, sync_cache_iarr,
b7e24581 709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
80c49563 710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
46f64e70 711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
c2248fc9 712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
b7e24581 713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
b6ff8ca7 714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
ed9f3e25
DG
715 resp_pre_fetch, pre_fetch_iarr,
716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 0, 0, 0, 0} }, /* PRE-FETCH (10) */
c2248fc9 718
ed9f3e25 719/* 30 */
b6ff8ca7 720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
f0d1cf93
DG
721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
b6ff8ca7 724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
f0d1cf93
DG
725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
728/* sentinel */
c2248fc9
DG
729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
731};
732
87c715dc
DG
733static int sdebug_num_hosts;
734static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
773642d9 735static int sdebug_ato = DEF_ATO;
9b760fd8 736static int sdebug_cdb_len = DEF_CDB_LEN;
c2206098 737static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
9267e0eb 738static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
773642d9
DG
739static int sdebug_dif = DEF_DIF;
740static int sdebug_dix = DEF_DIX;
741static int sdebug_dsense = DEF_D_SENSE;
742static int sdebug_every_nth = DEF_EVERY_NTH;
743static int sdebug_fake_rw = DEF_FAKE_RW;
744static unsigned int sdebug_guard = DEF_GUARD;
c10fa55f 745static int sdebug_host_max_queue; /* per host */
773642d9
DG
746static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747static int sdebug_max_luns = DEF_MAX_LUNS;
c4837394 748static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
d9da891a
LO
749static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
cbf67842 751static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
c2206098 752static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
773642d9
DG
753static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754static int sdebug_no_uld;
755static int sdebug_num_parts = DEF_NUM_PARTS;
756static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757static int sdebug_opt_blks = DEF_OPT_BLKS;
758static int sdebug_opts = DEF_OPTS;
759static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
86e6828a 760static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
b01f6f83 761static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
773642d9
DG
762static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763static int sdebug_sector_size = DEF_SECTOR_SIZE;
fc13638a 764static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
773642d9
DG
765static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767static unsigned int sdebug_lbpu = DEF_LBPU;
768static unsigned int sdebug_lbpws = DEF_LBPWS;
769static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770static unsigned int sdebug_lbprz = DEF_LBPRZ;
771static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
09ba24c1 776static int sdebug_uuid_ctl = DEF_UUID_CTL;
0c4bc91d 777static bool sdebug_random = DEF_RANDOM;
87c715dc 778static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
773642d9
DG
779static bool sdebug_removable = DEF_REMOVABLE;
780static bool sdebug_clustering;
781static bool sdebug_host_lock = DEF_HOST_LOCK;
782static bool sdebug_strict = DEF_STRICT;
817fd66b 783static bool sdebug_any_injecting_opt;
773642d9 784static bool sdebug_verbose;
f46eb0e9 785static bool have_dif_prot;
4f2c8bf6 786static bool write_since_sync;
c4837394 787static bool sdebug_statistics = DEF_STATISTICS;
9447b6ce 788static bool sdebug_wp;
9267e0eb
DG
789/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791static char *sdeb_zbc_model_s;
1da177e4 792
ad0c7775
DG
793enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 SAM_LUN_AM_FLAT = 0x1,
795 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 SAM_LUN_AM_EXTENDED = 0x3};
797static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
799
c65b1445 800static unsigned int sdebug_store_sectors;
1da177e4
LT
801static sector_t sdebug_capacity; /* in sectors */
802
803/* old BIOS stuff, kernel may get rid of them but some mode sense pages
804 may still need them */
805static int sdebug_heads; /* heads per disk */
806static int sdebug_cylinders_per; /* cylinders per surface */
807static int sdebug_sectors_per; /* sectors per cylinder */
808
1da177e4
LT
809static LIST_HEAD(sdebug_host_list);
810static DEFINE_SPINLOCK(sdebug_host_list_lock);
811
87c715dc
DG
812static struct xarray per_store_arr;
813static struct xarray *per_store_ap = &per_store_arr;
814static int sdeb_first_idx = -1; /* invalid index ==> none created */
815static int sdeb_most_recent_idx = -1;
816static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
1da177e4 817
44d92694 818static unsigned long map_size;
cbf67842
DG
819static int num_aborts;
820static int num_dev_resets;
821static int num_target_resets;
822static int num_bus_resets;
823static int num_host_resets;
c6a44287
MP
824static int dix_writes;
825static int dix_reads;
826static int dif_errors;
1da177e4 827
f0d1cf93 828/* ZBC global data */
64e14ece 829static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
98e0a689 830static int sdeb_zbc_zone_size_mb;
380603a5 831static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
aa8fecf9 832static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
f0d1cf93 833
c4837394 834static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
c4b57d89 835static int poll_queues; /* iouring iopoll interface.*/
c4837394 836static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
fd32119b 837
1da177e4 838static DEFINE_RWLOCK(atomic_rw);
87c715dc
DG
839static DEFINE_RWLOCK(atomic_rw2);
840
841static rwlock_t *ramdisk_lck_a[2];
1da177e4 842
cbf67842
DG
843static char sdebug_proc_name[] = MY_NAME;
844static const char *my_name = MY_NAME;
1da177e4 845
1da177e4
LT
846static struct bus_type pseudo_lld_bus;
847
848static struct device_driver sdebug_driverfs_driver = {
849 .name = sdebug_proc_name,
850 .bus = &pseudo_lld_bus,
1da177e4
LT
851};
852
853static const int check_condition_result =
464a00c9 854 SAM_STAT_CHECK_CONDITION;
1da177e4 855
c6a44287 856static const int illegal_condition_result =
464a00c9 857 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
c6a44287 858
cbf67842 859static const int device_qfull_result =
7a64c814 860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
cbf67842 861
ed9f3e25
DG
862static const int condition_met_result = SAM_STAT_CONDITION_MET;
863
fd32119b 864
760f3b03
DG
865/* Only do the extra work involved in logical block provisioning if one or
866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867 * real reads and writes (i.e. not skipping them for speed).
868 */
869static inline bool scsi_debug_lbp(void)
fd32119b
DG
870{
871 return 0 == sdebug_fake_rw &&
872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
873}
c65b1445 874
87c715dc
DG
875static void *lba2fake_store(struct sdeb_store_info *sip,
876 unsigned long long lba)
14faa944 877{
87c715dc 878 struct sdeb_store_info *lsip = sip;
14faa944 879
87c715dc
DG
880 lba = do_div(lba, sdebug_store_sectors);
881 if (!sip || !sip->storep) {
882 WARN_ON_ONCE(true);
883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
884 }
885 return lsip->storep + lba * sdebug_sector_size;
14faa944
AM
886}
887
87c715dc
DG
888static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
889 sector_t sector)
14faa944 890{
49413112 891 sector = sector_div(sector, sdebug_store_sectors);
14faa944 892
87c715dc 893 return sip->dif_storep + sector;
14faa944
AM
894}
895
8dea0d02
FT
896static void sdebug_max_tgts_luns(void)
897{
898 struct sdebug_host_info *sdbg_host;
899 struct Scsi_Host *hpnt;
900
901 spin_lock(&sdebug_host_list_lock);
902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 hpnt = sdbg_host->shost;
904 if ((hpnt->this_id >= 0) &&
773642d9
DG
905 (sdebug_num_tgts > hpnt->this_id))
906 hpnt->max_id = sdebug_num_tgts + 1;
8dea0d02 907 else
773642d9
DG
908 hpnt->max_id = sdebug_num_tgts;
909 /* sdebug_max_luns; */
f2d3fd29 910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8dea0d02
FT
911 }
912 spin_unlock(&sdebug_host_list_lock);
913}
914
22017ed2
DG
915enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
916
917/* Set in_bit to -1 to indicate no bit position of invalid field */
fd32119b
DG
918static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 enum sdeb_cmd_data c_d,
920 int in_byte, int in_bit)
22017ed2
DG
921{
922 unsigned char *sbuff;
923 u8 sks[4];
924 int sl, asc;
925
926 sbuff = scp->sense_buffer;
927 if (!sbuff) {
928 sdev_printk(KERN_ERR, scp->device,
929 "%s: sense_buffer is NULL\n", __func__);
930 return;
931 }
932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
f2b1e9c6 934 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
22017ed2
DG
935 memset(sks, 0, sizeof(sks));
936 sks[0] = 0x80;
937 if (c_d)
938 sks[0] |= 0x40;
939 if (in_bit >= 0) {
940 sks[0] |= 0x8;
941 sks[0] |= 0x7 & in_bit;
942 }
943 put_unaligned_be16(in_byte, sks + 1);
773642d9 944 if (sdebug_dsense) {
22017ed2
DG
945 sl = sbuff[7] + 8;
946 sbuff[7] = sl;
947 sbuff[sl] = 0x2;
948 sbuff[sl + 1] = 0x6;
949 memcpy(sbuff + sl + 4, sks, 3);
950 } else
951 memcpy(sbuff + 15, sks, 3);
773642d9 952 if (sdebug_verbose)
22017ed2
DG
953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
956}
957
cbf67842 958static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
8dea0d02 959{
f2b1e9c6 960 if (!scp->sense_buffer) {
cbf67842
DG
961 sdev_printk(KERN_ERR, scp->device,
962 "%s: sense_buffer is NULL\n", __func__);
963 return;
964 }
f2b1e9c6 965 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
8dea0d02 966
f2b1e9c6 967 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
8dea0d02 968
773642d9 969 if (sdebug_verbose)
cbf67842
DG
970 sdev_printk(KERN_INFO, scp->device,
971 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 my_name, key, asc, asq);
8dea0d02 973}
1da177e4 974
fd32119b 975static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
22017ed2
DG
976{
977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
978}
979
6f4e626f
NC
980static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
981 void __user *arg)
1da177e4 982{
773642d9 983 if (sdebug_verbose) {
cbf67842
DG
984 if (0x1261 == cmd)
985 sdev_printk(KERN_INFO, dev,
986 "%s: BLKFLSBUF [0x1261]\n", __func__);
987 else if (0x5331 == cmd)
988 sdev_printk(KERN_INFO, dev,
989 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
990 __func__);
991 else
992 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
993 __func__, cmd);
1da177e4
LT
994 }
995 return -EINVAL;
996 /* return -ENOTTY; // correct return but upsets fdisk */
997}
998
9b760fd8
DG
999static void config_cdb_len(struct scsi_device *sdev)
1000{
1001 switch (sdebug_cdb_len) {
1002 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 sdev->use_10_for_rw = false;
1004 sdev->use_16_for_rw = false;
1005 sdev->use_10_for_ms = false;
1006 break;
1007 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 sdev->use_10_for_rw = true;
1009 sdev->use_16_for_rw = false;
1010 sdev->use_10_for_ms = false;
1011 break;
1012 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 sdev->use_10_for_rw = true;
1014 sdev->use_16_for_rw = false;
1015 sdev->use_10_for_ms = true;
1016 break;
1017 case 16:
1018 sdev->use_10_for_rw = false;
1019 sdev->use_16_for_rw = true;
1020 sdev->use_10_for_ms = true;
1021 break;
1022 case 32: /* No knobs to suggest this so same as 16 for now */
1023 sdev->use_10_for_rw = false;
1024 sdev->use_16_for_rw = true;
1025 sdev->use_10_for_ms = true;
1026 break;
1027 default:
1028 pr_warn("unexpected cdb_len=%d, force to 10\n",
1029 sdebug_cdb_len);
1030 sdev->use_10_for_rw = true;
1031 sdev->use_16_for_rw = false;
1032 sdev->use_10_for_ms = false;
1033 sdebug_cdb_len = 10;
1034 break;
1035 }
1036}
1037
1038static void all_config_cdb_len(void)
1039{
1040 struct sdebug_host_info *sdbg_host;
1041 struct Scsi_Host *shost;
1042 struct scsi_device *sdev;
1043
1044 spin_lock(&sdebug_host_list_lock);
1045 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 shost = sdbg_host->shost;
1047 shost_for_each_device(sdev, shost) {
1048 config_cdb_len(sdev);
1049 }
1050 }
1051 spin_unlock(&sdebug_host_list_lock);
1052}
1053
19c8ead7
EM
1054static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1055{
1056 struct sdebug_host_info *sdhp;
1057 struct sdebug_dev_info *dp;
1058
1059 spin_lock(&sdebug_host_list_lock);
1060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 if ((devip->sdbg_host == dp->sdbg_host) &&
1063 (devip->target == dp->target))
1064 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1065 }
1066 }
1067 spin_unlock(&sdebug_host_list_lock);
1068}
1069
f46eb0e9 1070static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1da177e4 1071{
cbf67842 1072 int k;
cbf67842
DG
1073
1074 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 if (k != SDEBUG_NUM_UAS) {
1076 const char *cp = NULL;
1077
1078 switch (k) {
1079 case SDEBUG_UA_POR:
f46eb0e9
DG
1080 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 POWER_ON_RESET_ASCQ);
773642d9 1082 if (sdebug_verbose)
cbf67842
DG
1083 cp = "power on reset";
1084 break;
1085 case SDEBUG_UA_BUS_RESET:
f46eb0e9
DG
1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1087 BUS_RESET_ASCQ);
773642d9 1088 if (sdebug_verbose)
cbf67842
DG
1089 cp = "bus reset";
1090 break;
1091 case SDEBUG_UA_MODE_CHANGED:
f46eb0e9
DG
1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1093 MODE_CHANGED_ASCQ);
773642d9 1094 if (sdebug_verbose)
cbf67842
DG
1095 cp = "mode parameters changed";
1096 break;
0d01c5df 1097 case SDEBUG_UA_CAPACITY_CHANGED:
f46eb0e9
DG
1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 CAPACITY_CHANGED_ASCQ);
773642d9 1100 if (sdebug_verbose)
0d01c5df 1101 cp = "capacity data changed";
f49accf1 1102 break;
acafd0b9 1103 case SDEBUG_UA_MICROCODE_CHANGED:
f46eb0e9 1104 mk_sense_buffer(scp, UNIT_ATTENTION,
b01f6f83
DG
1105 TARGET_CHANGED_ASC,
1106 MICROCODE_CHANGED_ASCQ);
773642d9 1107 if (sdebug_verbose)
acafd0b9
EM
1108 cp = "microcode has been changed";
1109 break;
1110 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
f46eb0e9 1111 mk_sense_buffer(scp, UNIT_ATTENTION,
acafd0b9
EM
1112 TARGET_CHANGED_ASC,
1113 MICROCODE_CHANGED_WO_RESET_ASCQ);
773642d9 1114 if (sdebug_verbose)
acafd0b9
EM
1115 cp = "microcode has been changed without reset";
1116 break;
19c8ead7
EM
1117 case SDEBUG_UA_LUNS_CHANGED:
1118 /*
1119 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 * on the target, until a REPORT LUNS command is
1122 * received. SPC-4 behavior is to report it only once.
773642d9 1123 * NOTE: sdebug_scsi_level does not use the same
19c8ead7
EM
1124 * values as struct scsi_device->scsi_level.
1125 */
773642d9 1126 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
19c8ead7 1127 clear_luns_changed_on_target(devip);
f46eb0e9 1128 mk_sense_buffer(scp, UNIT_ATTENTION,
19c8ead7
EM
1129 TARGET_CHANGED_ASC,
1130 LUNS_CHANGED_ASCQ);
773642d9 1131 if (sdebug_verbose)
19c8ead7
EM
1132 cp = "reported luns data has changed";
1133 break;
cbf67842 1134 default:
773642d9
DG
1135 pr_warn("unexpected unit attention code=%d\n", k);
1136 if (sdebug_verbose)
cbf67842
DG
1137 cp = "unknown";
1138 break;
1139 }
1140 clear_bit(k, devip->uas_bm);
773642d9 1141 if (sdebug_verbose)
f46eb0e9 1142 sdev_printk(KERN_INFO, scp->device,
cbf67842
DG
1143 "%s reports: Unit attention: %s\n",
1144 my_name, cp);
1da177e4
LT
1145 return check_condition_result;
1146 }
1147 return 0;
1148}
1149
fb0cc8d1 1150/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
21a61829 1151static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1da177e4
LT
1152 int arr_len)
1153{
21a61829 1154 int act_len;
ae3d56d8 1155 struct scsi_data_buffer *sdb = &scp->sdb;
1da177e4 1156
072d0bb3 1157 if (!sdb->length)
1da177e4 1158 return 0;
ae3d56d8 1159 if (scp->sc_data_direction != DMA_FROM_DEVICE)
773642d9 1160 return DID_ERROR << 16;
21a61829
FT
1161
1162 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1163 arr, arr_len);
42d387be 1164 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
21a61829 1165
1da177e4
LT
1166 return 0;
1167}
1168
fb0cc8d1
DG
1169/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171 * calls, not required to write in ascending offset order. Assumes resid
1172 * set to scsi_bufflen() prior to any calls.
1173 */
1174static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 int arr_len, unsigned int off_dst)
1176{
9237f04e 1177 unsigned int act_len, n;
ae3d56d8 1178 struct scsi_data_buffer *sdb = &scp->sdb;
fb0cc8d1
DG
1179 off_t skip = off_dst;
1180
1181 if (sdb->length <= off_dst)
1182 return 0;
ae3d56d8 1183 if (scp->sc_data_direction != DMA_FROM_DEVICE)
fb0cc8d1
DG
1184 return DID_ERROR << 16;
1185
1186 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 arr, arr_len, skip);
1188 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
42d387be
BVA
1189 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 scsi_get_resid(scp));
9237f04e 1191 n = scsi_bufflen(scp) - (off_dst + act_len);
128ec190 1192 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
fb0cc8d1
DG
1193 return 0;
1194}
1195
1196/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197 * 'arr' or -1 if error.
1198 */
21a61829
FT
1199static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1200 int arr_len)
1da177e4 1201{
21a61829 1202 if (!scsi_bufflen(scp))
1da177e4 1203 return 0;
ae3d56d8 1204 if (scp->sc_data_direction != DMA_TO_DEVICE)
1da177e4 1205 return -1;
21a61829
FT
1206
1207 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1da177e4
LT
1208}
1209
1210
e5203cf0
HR
1211static char sdebug_inq_vendor_id[9] = "Linux ";
1212static char sdebug_inq_product_id[17] = "scsi_debug ";
9b760fd8 1213static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1b37bd60
DG
1214/* Use some locally assigned NAAs for SAS addresses. */
1215static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217static const u64 naa3_comp_c = 0x3111111000000000ULL;
1da177e4 1218
cbf67842 1219/* Device identification VPD page. Returns number of bytes placed in arr */
760f3b03
DG
1220static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 int target_dev_id, int dev_id_num,
09ba24c1 1222 const char *dev_id_str, int dev_id_str_len,
bf476433 1223 const uuid_t *lu_name)
1da177e4 1224{
c65b1445
DG
1225 int num, port_a;
1226 char b[32];
1da177e4 1227
c65b1445 1228 port_a = target_dev_id + 1;
1da177e4
LT
1229 /* T10 vendor identifier field format (faked) */
1230 arr[0] = 0x2; /* ASCII */
1231 arr[1] = 0x1;
1232 arr[2] = 0x0;
e5203cf0
HR
1233 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 memcpy(&arr[12], sdebug_inq_product_id, 16);
1da177e4
LT
1235 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 num = 8 + 16 + dev_id_str_len;
1237 arr[3] = num;
1238 num += 4;
c65b1445 1239 if (dev_id_num >= 0) {
09ba24c1
DG
1240 if (sdebug_uuid_ctl) {
1241 /* Locally assigned UUID */
1242 arr[num++] = 0x1; /* binary (not necessarily sas) */
1243 arr[num++] = 0xa; /* PIV=0, lu, naa */
1244 arr[num++] = 0x0;
1245 arr[num++] = 0x12;
1246 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1247 arr[num++] = 0x0;
1248 memcpy(arr + num, lu_name, 16);
1249 num += 16;
1250 } else {
1b37bd60 1251 /* NAA-3, Logical unit identifier (binary) */
09ba24c1
DG
1252 arr[num++] = 0x1; /* binary (not necessarily sas) */
1253 arr[num++] = 0x3; /* PIV=0, lu, naa */
1254 arr[num++] = 0x0;
1255 arr[num++] = 0x8;
1b37bd60 1256 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
09ba24c1
DG
1257 num += 8;
1258 }
c65b1445
DG
1259 /* Target relative port number */
1260 arr[num++] = 0x61; /* proto=sas, binary */
1261 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1262 arr[num++] = 0x0; /* reserved */
1263 arr[num++] = 0x4; /* length */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0; /* reserved */
1266 arr[num++] = 0x0;
1267 arr[num++] = 0x1; /* relative port A */
1268 }
1b37bd60 1269 /* NAA-3, Target port identifier */
c65b1445
DG
1270 arr[num++] = 0x61; /* proto=sas, binary */
1271 arr[num++] = 0x93; /* piv=1, target port, naa */
1272 arr[num++] = 0x0;
1273 arr[num++] = 0x8;
1b37bd60 1274 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
773642d9 1275 num += 8;
1b37bd60 1276 /* NAA-3, Target port group identifier */
5a09e398
HR
1277 arr[num++] = 0x61; /* proto=sas, binary */
1278 arr[num++] = 0x95; /* piv=1, target port group id */
1279 arr[num++] = 0x0;
1280 arr[num++] = 0x4;
1281 arr[num++] = 0;
1282 arr[num++] = 0;
773642d9
DG
1283 put_unaligned_be16(port_group_id, arr + num);
1284 num += 2;
1b37bd60 1285 /* NAA-3, Target device identifier */
c65b1445
DG
1286 arr[num++] = 0x61; /* proto=sas, binary */
1287 arr[num++] = 0xa3; /* piv=1, target device, naa */
1288 arr[num++] = 0x0;
1289 arr[num++] = 0x8;
1b37bd60 1290 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
773642d9 1291 num += 8;
c65b1445
DG
1292 /* SCSI name string: Target device identifier */
1293 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1294 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1295 arr[num++] = 0x0;
1296 arr[num++] = 24;
1b37bd60 1297 memcpy(arr + num, "naa.32222220", 12);
c65b1445
DG
1298 num += 12;
1299 snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 memcpy(arr + num, b, 8);
1301 num += 8;
1302 memset(arr + num, 0, 4);
1303 num += 4;
1304 return num;
1305}
1306
c65b1445
DG
1307static unsigned char vpd84_data[] = {
1308/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309 0x22,0x22,0x22,0x0,0xbb,0x1,
1310 0x22,0x22,0x22,0x0,0xbb,0x2,
1311};
1312
cbf67842 1313/* Software interface identification VPD page */
760f3b03 1314static int inquiry_vpd_84(unsigned char *arr)
c65b1445
DG
1315{
1316 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 return sizeof(vpd84_data);
1318}
1319
cbf67842 1320/* Management network addresses VPD page */
760f3b03 1321static int inquiry_vpd_85(unsigned char *arr)
c65b1445
DG
1322{
1323 int num = 0;
91d4c752
JP
1324 const char *na1 = "https://www.kernel.org/config";
1325 const char *na2 = "http://www.kernel.org/log";
c65b1445
DG
1326 int plen, olen;
1327
1328 arr[num++] = 0x1; /* lu, storage config */
1329 arr[num++] = 0x0; /* reserved */
1330 arr[num++] = 0x0;
1331 olen = strlen(na1);
1332 plen = olen + 1;
1333 if (plen % 4)
1334 plen = ((plen / 4) + 1) * 4;
1335 arr[num++] = plen; /* length, null termianted, padded */
1336 memcpy(arr + num, na1, olen);
1337 memset(arr + num + olen, 0, plen - olen);
1338 num += plen;
1339
1340 arr[num++] = 0x4; /* lu, logging */
1341 arr[num++] = 0x0; /* reserved */
1342 arr[num++] = 0x0;
1343 olen = strlen(na2);
1344 plen = olen + 1;
1345 if (plen % 4)
1346 plen = ((plen / 4) + 1) * 4;
1347 arr[num++] = plen; /* length, null terminated, padded */
1348 memcpy(arr + num, na2, olen);
1349 memset(arr + num + olen, 0, plen - olen);
1350 num += plen;
1351
1352 return num;
1353}
1354
1355/* SCSI ports VPD page */
760f3b03 1356static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
c65b1445
DG
1357{
1358 int num = 0;
1359 int port_a, port_b;
1360
1361 port_a = target_dev_id + 1;
1362 port_b = port_a + 1;
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0; /* reserved */
1365 arr[num++] = 0x0;
1366 arr[num++] = 0x1; /* relative port 1 (primary) */
1367 memset(arr + num, 0, 6);
1368 num += 6;
1369 arr[num++] = 0x0;
1370 arr[num++] = 12; /* length tp descriptor */
1371 /* naa-5 target port identifier (A) */
1372 arr[num++] = 0x61; /* proto=sas, binary */
1373 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1374 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x8; /* length */
1b37bd60 1376 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
773642d9 1377 num += 8;
c65b1445
DG
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0; /* reserved */
1380 arr[num++] = 0x0;
1381 arr[num++] = 0x2; /* relative port 2 (secondary) */
1382 memset(arr + num, 0, 6);
1383 num += 6;
1384 arr[num++] = 0x0;
1385 arr[num++] = 12; /* length tp descriptor */
1386 /* naa-5 target port identifier (B) */
1387 arr[num++] = 0x61; /* proto=sas, binary */
1388 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1389 arr[num++] = 0x0; /* reserved */
1390 arr[num++] = 0x8; /* length */
1b37bd60 1391 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
773642d9 1392 num += 8;
c65b1445
DG
1393
1394 return num;
1395}
1396
1397
1398static unsigned char vpd89_data[] = {
1399/* from 4th byte */ 0,0,0,0,
1400'l','i','n','u','x',' ',' ',' ',
1401'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1402'1','2','3','4',
14030x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
14040xec,0,0,0,
14050x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
14060,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
14070x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
14080x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
14090x53,0x41,
14100x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
14110x20,0x20,
14120x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
14130x10,0x80,
14140,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
14150x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
14160x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
14170,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
14180x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
14190x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
14200,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
14210,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14220,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14230,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14240x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
14250,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
14260xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
14270,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
14280,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14290,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14300,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14310,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14320,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14330,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14340,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14350,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14360,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14370,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14380,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
14390,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1440};
1441
cbf67842 1442/* ATA Information VPD page */
760f3b03 1443static int inquiry_vpd_89(unsigned char *arr)
c65b1445
DG
1444{
1445 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 return sizeof(vpd89_data);
1447}
1448
1449
1450static unsigned char vpdb0_data[] = {
1e49f785
DG
1451 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
c65b1445
DG
1455};
1456
cbf67842 1457/* Block limits VPD page (SBC-3) */
760f3b03 1458static int inquiry_vpd_b0(unsigned char *arr)
c65b1445 1459{
ea61fca5
MP
1460 unsigned int gran;
1461
c65b1445 1462 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
e308b3d1
MP
1463
1464 /* Optimal transfer length granularity */
86e6828a
LH
1465 if (sdebug_opt_xferlen_exp != 0 &&
1466 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 gran = 1 << sdebug_opt_xferlen_exp;
1468 else
1469 gran = 1 << sdebug_physblk_exp;
773642d9 1470 put_unaligned_be16(gran, arr + 2);
e308b3d1
MP
1471
1472 /* Maximum Transfer Length */
773642d9
DG
1473 if (sdebug_store_sectors > 0x400)
1474 put_unaligned_be32(sdebug_store_sectors, arr + 4);
44d92694 1475
e308b3d1 1476 /* Optimal Transfer Length */
773642d9 1477 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
e308b3d1 1478
773642d9 1479 if (sdebug_lbpu) {
e308b3d1 1480 /* Maximum Unmap LBA Count */
773642d9 1481 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
e308b3d1
MP
1482
1483 /* Maximum Unmap Block Descriptor Count */
773642d9 1484 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
44d92694
MP
1485 }
1486
e308b3d1 1487 /* Unmap Granularity Alignment */
773642d9
DG
1488 if (sdebug_unmap_alignment) {
1489 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
44d92694
MP
1490 arr[28] |= 0x80; /* UGAVALID */
1491 }
1492
e308b3d1 1493 /* Optimal Unmap Granularity */
773642d9 1494 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
6014759c 1495
5b94e232 1496 /* Maximum WRITE SAME Length */
773642d9 1497 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
5b94e232
MP
1498
1499 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
44d92694 1500
c65b1445 1501 return sizeof(vpdb0_data);
1da177e4
LT
1502}
1503
1e49f785 1504/* Block device characteristics VPD page (SBC-3) */
64e14ece 1505static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
eac6e8e4
MW
1506{
1507 memset(arr, 0, 0x3c);
1508 arr[0] = 0;
1e49f785
DG
1509 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1510 arr[2] = 0;
1511 arr[3] = 5; /* less than 1.8" */
64e14ece
DLM
1512 if (devip->zmodel == BLK_ZONED_HA)
1513 arr[4] = 1 << 4; /* zoned field = 01b */
eac6e8e4
MW
1514
1515 return 0x3c;
1516}
1da177e4 1517
760f3b03
DG
1518/* Logical block provisioning VPD page (SBC-4) */
1519static int inquiry_vpd_b2(unsigned char *arr)
6014759c 1520{
3f0bc3b3 1521 memset(arr, 0, 0x4);
6014759c 1522 arr[0] = 0; /* threshold exponent */
773642d9 1523 if (sdebug_lbpu)
6014759c 1524 arr[1] = 1 << 7;
773642d9 1525 if (sdebug_lbpws)
6014759c 1526 arr[1] |= 1 << 6;
773642d9 1527 if (sdebug_lbpws10)
5b94e232 1528 arr[1] |= 1 << 5;
760f3b03
DG
1529 if (sdebug_lbprz && scsi_debug_lbp())
1530 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1531 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 /* threshold_percentage=0 */
3f0bc3b3 1534 return 0x4;
6014759c
MP
1535}
1536
d36da305 1537/* Zoned block device characteristics VPD page (ZBC mandatory) */
f0d1cf93 1538static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
d36da305
DG
1539{
1540 memset(arr, 0, 0x3c);
1541 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1542 /*
1543 * Set Optimal number of open sequential write preferred zones and
1544 * Optimal number of non-sequentially written sequential write
f0d1cf93
DG
1545 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 * fields set to zero, apart from Max. number of open swrz_s field.
d36da305
DG
1547 */
1548 put_unaligned_be32(0xffffffff, &arr[4]);
1549 put_unaligned_be32(0xffffffff, &arr[8]);
64e14ece 1550 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
f0d1cf93
DG
1551 put_unaligned_be32(devip->max_open, &arr[12]);
1552 else
1553 put_unaligned_be32(0xffffffff, &arr[12]);
d36da305
DG
1554 return 0x3c;
1555}
1556
1da177e4 1557#define SDEBUG_LONG_INQ_SZ 96
c65b1445 1558#define SDEBUG_MAX_INQ_ARR_SZ 584
1da177e4 1559
c2248fc9 1560static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1da177e4
LT
1561{
1562 unsigned char pq_pdt;
91d4c752 1563 unsigned char *arr;
01123ef4 1564 unsigned char *cmd = scp->cmnd;
128ec190
GK
1565 u32 alloc_len, n;
1566 int ret;
d36da305 1567 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1da177e4 1568
773642d9 1569 alloc_len = get_unaligned_be16(cmd + 3);
6f3cbf55
DG
1570 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1571 if (! arr)
1572 return DID_REQUEUE << 16;
760f3b03 1573 is_disk = (sdebug_ptype == TYPE_DISK);
64e14ece 1574 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
d36da305 1575 is_disk_zbc = (is_disk || is_zbc);
b01f6f83 1576 have_wlun = scsi_is_wlun(scp->device->lun);
c2248fc9 1577 if (have_wlun)
b01f6f83
DG
1578 pq_pdt = TYPE_WLUN; /* present, wlun */
1579 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1580 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
c65b1445 1581 else
773642d9 1582 pq_pdt = (sdebug_ptype & 0x1f);
1da177e4
LT
1583 arr[0] = pq_pdt;
1584 if (0x2 & cmd[1]) { /* CMDDT bit set */
22017ed2 1585 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
5a09e398 1586 kfree(arr);
1da177e4
LT
1587 return check_condition_result;
1588 } else if (0x1 & cmd[1]) { /* EVPD bit set */
128ec190
GK
1589 int lu_id_num, port_group_id, target_dev_id;
1590 u32 len;
c65b1445
DG
1591 char lu_id_str[6];
1592 int host_no = devip->sdbg_host->shost->host_no;
1da177e4 1593
5a09e398
HR
1594 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1595 (devip->channel & 0x7f);
b01f6f83 1596 if (sdebug_vpd_use_hostno == 0)
23183910 1597 host_no = 0;
c2248fc9 1598 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
c65b1445
DG
1599 (devip->target * 1000) + devip->lun);
1600 target_dev_id = ((host_no + 1) * 2000) +
1601 (devip->target * 1000) - 3;
1602 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1da177e4 1603 if (0 == cmd[2]) { /* supported vital product data pages */
c65b1445
DG
1604 arr[1] = cmd[2]; /*sanity */
1605 n = 4;
1606 arr[n++] = 0x0; /* this page */
1607 arr[n++] = 0x80; /* unit serial number */
1608 arr[n++] = 0x83; /* device identification */
1609 arr[n++] = 0x84; /* software interface ident. */
1610 arr[n++] = 0x85; /* management network addresses */
1611 arr[n++] = 0x86; /* extended inquiry */
1612 arr[n++] = 0x87; /* mode page policy */
1613 arr[n++] = 0x88; /* SCSI ports */
d36da305 1614 if (is_disk_zbc) { /* SBC or ZBC */
760f3b03
DG
1615 arr[n++] = 0x89; /* ATA information */
1616 arr[n++] = 0xb0; /* Block limits */
1617 arr[n++] = 0xb1; /* Block characteristics */
d36da305
DG
1618 if (is_disk)
1619 arr[n++] = 0xb2; /* LB Provisioning */
64e14ece 1620 if (is_zbc)
d36da305 1621 arr[n++] = 0xb6; /* ZB dev. char. */
760f3b03 1622 }
c65b1445 1623 arr[3] = n - 4; /* number of supported VPD pages */
1da177e4 1624 } else if (0x80 == cmd[2]) { /* unit serial number */
c65b1445 1625 arr[1] = cmd[2]; /*sanity */
1da177e4 1626 arr[3] = len;
c65b1445 1627 memcpy(&arr[4], lu_id_str, len);
1da177e4 1628 } else if (0x83 == cmd[2]) { /* device identification */
c65b1445 1629 arr[1] = cmd[2]; /*sanity */
760f3b03
DG
1630 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1631 target_dev_id, lu_id_num,
09ba24c1
DG
1632 lu_id_str, len,
1633 &devip->lu_name);
c65b1445
DG
1634 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1635 arr[1] = cmd[2]; /*sanity */
760f3b03 1636 arr[3] = inquiry_vpd_84(&arr[4]);
c65b1445
DG
1637 } else if (0x85 == cmd[2]) { /* Management network addresses */
1638 arr[1] = cmd[2]; /*sanity */
760f3b03 1639 arr[3] = inquiry_vpd_85(&arr[4]);
c65b1445
DG
1640 } else if (0x86 == cmd[2]) { /* extended inquiry */
1641 arr[1] = cmd[2]; /*sanity */
1642 arr[3] = 0x3c; /* number of following entries */
8475c811 1643 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
c6a44287 1644 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
760f3b03 1645 else if (have_dif_prot)
c6a44287
MP
1646 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1647 else
1648 arr[4] = 0x0; /* no protection stuff */
c65b1445
DG
1649 arr[5] = 0x7; /* head of q, ordered + simple q's */
1650 } else if (0x87 == cmd[2]) { /* mode page policy */
1651 arr[1] = cmd[2]; /*sanity */
1652 arr[3] = 0x8; /* number of following entries */
1653 arr[4] = 0x2; /* disconnect-reconnect mp */
1654 arr[6] = 0x80; /* mlus, shared */
1655 arr[8] = 0x18; /* protocol specific lu */
1656 arr[10] = 0x82; /* mlus, per initiator port */
1657 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1658 arr[1] = cmd[2]; /*sanity */
760f3b03 1659 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
d36da305 1660 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
c65b1445 1661 arr[1] = cmd[2]; /*sanity */
760f3b03 1662 n = inquiry_vpd_89(&arr[4]);
773642d9 1663 put_unaligned_be16(n, arr + 2);
d36da305 1664 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
c65b1445 1665 arr[1] = cmd[2]; /*sanity */
760f3b03 1666 arr[3] = inquiry_vpd_b0(&arr[4]);
d36da305 1667 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
eac6e8e4 1668 arr[1] = cmd[2]; /*sanity */
64e14ece 1669 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
760f3b03 1670 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
6014759c 1671 arr[1] = cmd[2]; /*sanity */
760f3b03 1672 arr[3] = inquiry_vpd_b2(&arr[4]);
d36da305
DG
1673 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1674 arr[1] = cmd[2]; /*sanity */
f0d1cf93 1675 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1da177e4 1676 } else {
22017ed2 1677 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
5a09e398 1678 kfree(arr);
1da177e4
LT
1679 return check_condition_result;
1680 }
128ec190 1681 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
5a09e398 1682 ret = fill_from_dev_buffer(scp, arr,
128ec190 1683 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
5a09e398
HR
1684 kfree(arr);
1685 return ret;
1da177e4
LT
1686 }
1687 /* drops through here for a standard inquiry */
773642d9
DG
1688 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1689 arr[2] = sdebug_scsi_level;
1da177e4
LT
1690 arr[3] = 2; /* response_data_format==2 */
1691 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
f46eb0e9 1692 arr[5] = (int)have_dif_prot; /* PROTECT bit */
b01f6f83 1693 if (sdebug_vpd_use_hostno == 0)
70bdf202 1694 arr[5] |= 0x10; /* claim: implicit TPGS */
c65b1445 1695 arr[6] = 0x10; /* claim: MultiP */
1da177e4 1696 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
c65b1445 1697 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
e5203cf0
HR
1698 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1699 memcpy(&arr[16], sdebug_inq_product_id, 16);
1700 memcpy(&arr[32], sdebug_inq_product_rev, 4);
9b760fd8
DG
1701 /* Use Vendor Specific area to place driver date in ASCII hex */
1702 memcpy(&arr[36], sdebug_version_date, 8);
1da177e4 1703 /* version descriptors (2 bytes each) follow */
760f3b03
DG
1704 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1705 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
c65b1445 1706 n = 62;
760f3b03
DG
1707 if (is_disk) { /* SBC-4 no version claimed */
1708 put_unaligned_be16(0x600, arr + n);
1709 n += 2;
1710 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1711 put_unaligned_be16(0x525, arr + n);
1712 n += 2;
d36da305
DG
1713 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1714 put_unaligned_be16(0x624, arr + n);
1715 n += 2;
1da177e4 1716 }
760f3b03 1717 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
5a09e398 1718 ret = fill_from_dev_buffer(scp, arr,
128ec190 1719 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
5a09e398
HR
1720 kfree(arr);
1721 return ret;
1da177e4
LT
1722}
1723
84905d34 1724/* See resp_iec_m_pg() for how this data is manipulated */
fd32119b
DG
1725static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1726 0, 0, 0x0, 0x0};
1727
91d4c752
JP
1728static int resp_requests(struct scsi_cmnd *scp,
1729 struct sdebug_dev_info *devip)
1da177e4 1730{
01123ef4 1731 unsigned char *cmd = scp->cmnd;
84905d34
DG
1732 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1733 bool dsense = !!(cmd[1] & 1);
128ec190
GK
1734 u32 alloc_len = cmd[4];
1735 u32 len = 18;
84905d34 1736 int stopped_state = atomic_read(&devip->stopped);
1da177e4 1737
c65b1445 1738 memset(arr, 0, sizeof(arr));
84905d34
DG
1739 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1740 if (dsense) {
1741 arr[0] = 0x72;
1742 arr[1] = NOT_READY;
1743 arr[2] = LOGICAL_UNIT_NOT_READY;
1744 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1745 len = 8;
1746 } else {
1747 arr[0] = 0x70;
1748 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1749 arr[7] = 0xa; /* 18 byte sense buffer */
1750 arr[12] = LOGICAL_UNIT_NOT_READY;
1751 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1752 }
1753 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1754 /* Information exceptions control mode page: TEST=1, MRIE=6 */
c2248fc9 1755 if (dsense) {
c65b1445
DG
1756 arr[0] = 0x72;
1757 arr[1] = 0x0; /* NO_SENSE in sense_key */
1758 arr[2] = THRESHOLD_EXCEEDED;
84905d34 1759 arr[3] = 0xff; /* Failure prediction(false) */
c2248fc9 1760 len = 8;
c65b1445
DG
1761 } else {
1762 arr[0] = 0x70;
1763 arr[2] = 0x0; /* NO_SENSE in sense_key */
1764 arr[7] = 0xa; /* 18 byte sense buffer */
1765 arr[12] = THRESHOLD_EXCEEDED;
84905d34 1766 arr[13] = 0xff; /* Failure prediction(false) */
c65b1445 1767 }
84905d34
DG
1768 } else { /* nothing to report */
1769 if (dsense) {
c65b1445 1770 len = 8;
84905d34
DG
1771 memset(arr, 0, len);
1772 arr[0] = 0x72;
c2248fc9 1773 } else {
84905d34 1774 memset(arr, 0, len);
c2248fc9 1775 arr[0] = 0x70;
c2248fc9 1776 arr[7] = 0xa;
c65b1445
DG
1777 }
1778 }
128ec190 1779 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1da177e4
LT
1780}
1781
fc13638a 1782static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
c65b1445 1783{
01123ef4 1784 unsigned char *cmd = scp->cmnd;
fc13638a 1785 int power_cond, want_stop, stopped_state;
4f2c8bf6 1786 bool changing;
c65b1445 1787
c65b1445
DG
1788 power_cond = (cmd[4] & 0xf0) >> 4;
1789 if (power_cond) {
22017ed2 1790 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
c65b1445
DG
1791 return check_condition_result;
1792 }
fc13638a
DG
1793 want_stop = !(cmd[4] & 1);
1794 stopped_state = atomic_read(&devip->stopped);
1795 if (stopped_state == 2) {
1796 ktime_t now_ts = ktime_get_boottime();
1797
1798 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1799 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1800
1801 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1802 /* tur_ms_to_ready timer extinguished */
1803 atomic_set(&devip->stopped, 0);
1804 stopped_state = 0;
1805 }
1806 }
1807 if (stopped_state == 2) {
1808 if (want_stop) {
1809 stopped_state = 1; /* dummy up success */
1810 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1811 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1812 return check_condition_result;
1813 }
1814 }
1815 }
1816 changing = (stopped_state != want_stop);
1817 if (changing)
1818 atomic_xchg(&devip->stopped, want_stop);
1819 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
4f2c8bf6
DG
1820 return SDEG_RES_IMMED_MASK;
1821 else
1822 return 0;
c65b1445
DG
1823}
1824
28898873
FT
1825static sector_t get_sdebug_capacity(void)
1826{
773642d9
DG
1827 static const unsigned int gibibyte = 1073741824;
1828
1829 if (sdebug_virtual_gb > 0)
1830 return (sector_t)sdebug_virtual_gb *
1831 (gibibyte / sdebug_sector_size);
28898873
FT
1832 else
1833 return sdebug_store_sectors;
1834}
1835
1da177e4 1836#define SDEBUG_READCAP_ARR_SZ 8
91d4c752
JP
1837static int resp_readcap(struct scsi_cmnd *scp,
1838 struct sdebug_dev_info *devip)
1da177e4
LT
1839{
1840 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
c65b1445 1841 unsigned int capac;
1da177e4 1842
c65b1445 1843 /* following just in case virtual_gb changed */
28898873 1844 sdebug_capacity = get_sdebug_capacity();
1da177e4 1845 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
c65b1445
DG
1846 if (sdebug_capacity < 0xffffffff) {
1847 capac = (unsigned int)sdebug_capacity - 1;
773642d9
DG
1848 put_unaligned_be32(capac, arr + 0);
1849 } else
1850 put_unaligned_be32(0xffffffff, arr + 0);
1851 put_unaligned_be16(sdebug_sector_size, arr + 6);
1da177e4
LT
1852 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1853}
1854
c65b1445 1855#define SDEBUG_READCAP16_ARR_SZ 32
91d4c752
JP
1856static int resp_readcap16(struct scsi_cmnd *scp,
1857 struct sdebug_dev_info *devip)
c65b1445 1858{
01123ef4 1859 unsigned char *cmd = scp->cmnd;
c65b1445 1860 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
80791561 1861 u32 alloc_len;
c65b1445 1862
773642d9 1863 alloc_len = get_unaligned_be32(cmd + 10);
c65b1445 1864 /* following just in case virtual_gb changed */
28898873 1865 sdebug_capacity = get_sdebug_capacity();
c65b1445 1866 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
773642d9
DG
1867 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1868 put_unaligned_be32(sdebug_sector_size, arr + 8);
1869 arr[13] = sdebug_physblk_exp & 0xf;
1870 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
44d92694 1871
be1dd78d 1872 if (scsi_debug_lbp()) {
5b94e232 1873 arr[14] |= 0x80; /* LBPME */
760f3b03
DG
1874 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1875 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1876 * in the wider field maps to 0 in this field.
1877 */
1878 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1879 arr[14] |= 0x40;
be1dd78d 1880 }
44d92694 1881
773642d9 1882 arr[15] = sdebug_lowest_aligned & 0xff;
c6a44287 1883
760f3b03 1884 if (have_dif_prot) {
773642d9 1885 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
c6a44287
MP
1886 arr[12] |= 1; /* PROT_EN */
1887 }
1888
c65b1445 1889 return fill_from_dev_buffer(scp, arr,
80791561 1890 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
c65b1445
DG
1891}
1892
5a09e398
HR
1893#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1894
91d4c752
JP
1895static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1896 struct sdebug_dev_info *devip)
5a09e398 1897{
01123ef4 1898 unsigned char *cmd = scp->cmnd;
91d4c752 1899 unsigned char *arr;
5a09e398 1900 int host_no = devip->sdbg_host->shost->host_no;
5a09e398 1901 int port_group_a, port_group_b, port_a, port_b;
5876c023
YB
1902 u32 alen, n, rlen;
1903 int ret;
5a09e398 1904
773642d9 1905 alen = get_unaligned_be32(cmd + 6);
6f3cbf55
DG
1906 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1907 if (! arr)
1908 return DID_REQUEUE << 16;
5a09e398
HR
1909 /*
1910 * EVPD page 0x88 states we have two ports, one
1911 * real and a fake port with no device connected.
1912 * So we create two port groups with one port each
1913 * and set the group with port B to unavailable.
1914 */
1915 port_a = 0x1; /* relative port A */
1916 port_b = 0x2; /* relative port B */
1917 port_group_a = (((host_no + 1) & 0x7f) << 8) +
773642d9 1918 (devip->channel & 0x7f);
5a09e398 1919 port_group_b = (((host_no + 1) & 0x7f) << 8) +
773642d9 1920 (devip->channel & 0x7f) + 0x80;
5a09e398
HR
1921
1922 /*
1923 * The asymmetric access state is cycled according to the host_id.
1924 */
1925 n = 4;
b01f6f83 1926 if (sdebug_vpd_use_hostno == 0) {
773642d9
DG
1927 arr[n++] = host_no % 3; /* Asymm access state */
1928 arr[n++] = 0x0F; /* claim: all states are supported */
5a09e398 1929 } else {
773642d9
DG
1930 arr[n++] = 0x0; /* Active/Optimized path */
1931 arr[n++] = 0x01; /* only support active/optimized paths */
5a09e398 1932 }
773642d9
DG
1933 put_unaligned_be16(port_group_a, arr + n);
1934 n += 2;
5a09e398
HR
1935 arr[n++] = 0; /* Reserved */
1936 arr[n++] = 0; /* Status code */
1937 arr[n++] = 0; /* Vendor unique */
1938 arr[n++] = 0x1; /* One port per group */
1939 arr[n++] = 0; /* Reserved */
1940 arr[n++] = 0; /* Reserved */
773642d9
DG
1941 put_unaligned_be16(port_a, arr + n);
1942 n += 2;
5a09e398
HR
1943 arr[n++] = 3; /* Port unavailable */
1944 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
773642d9
DG
1945 put_unaligned_be16(port_group_b, arr + n);
1946 n += 2;
5a09e398
HR
1947 arr[n++] = 0; /* Reserved */
1948 arr[n++] = 0; /* Status code */
1949 arr[n++] = 0; /* Vendor unique */
1950 arr[n++] = 0x1; /* One port per group */
1951 arr[n++] = 0; /* Reserved */
1952 arr[n++] = 0; /* Reserved */
773642d9
DG
1953 put_unaligned_be16(port_b, arr + n);
1954 n += 2;
5a09e398
HR
1955
1956 rlen = n - 4;
773642d9 1957 put_unaligned_be32(rlen, arr + 0);
5a09e398
HR
1958
1959 /*
1960 * Return the smallest value of either
1961 * - The allocated length
1962 * - The constructed command length
1963 * - The maximum array size
1964 */
5876c023 1965 rlen = min(alen, n);
5a09e398 1966 ret = fill_from_dev_buffer(scp, arr,
5876c023 1967 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
5a09e398
HR
1968 kfree(arr);
1969 return ret;
1970}
1971
fd32119b
DG
1972static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1973 struct sdebug_dev_info *devip)
38d5c833
DG
1974{
1975 bool rctd;
1976 u8 reporting_opts, req_opcode, sdeb_i, supp;
1977 u16 req_sa, u;
1978 u32 alloc_len, a_len;
1979 int k, offset, len, errsts, count, bump, na;
1980 const struct opcode_info_t *oip;
1981 const struct opcode_info_t *r_oip;
1982 u8 *arr;
1983 u8 *cmd = scp->cmnd;
1984
1985 rctd = !!(cmd[2] & 0x80);
1986 reporting_opts = cmd[2] & 0x7;
1987 req_opcode = cmd[3];
1988 req_sa = get_unaligned_be16(cmd + 4);
1989 alloc_len = get_unaligned_be32(cmd + 6);
6d310dfb 1990 if (alloc_len < 4 || alloc_len > 0xffff) {
38d5c833
DG
1991 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1992 return check_condition_result;
1993 }
1994 if (alloc_len > 8192)
1995 a_len = 8192;
1996 else
1997 a_len = alloc_len;
99531e60 1998 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
38d5c833
DG
1999 if (NULL == arr) {
2000 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2001 INSUFF_RES_ASCQ);
2002 return check_condition_result;
2003 }
2004 switch (reporting_opts) {
2005 case 0: /* all commands */
2006 /* count number of commands */
2007 for (count = 0, oip = opcode_info_arr;
2008 oip->num_attached != 0xff; ++oip) {
2009 if (F_INV_OP & oip->flags)
2010 continue;
2011 count += (oip->num_attached + 1);
2012 }
2013 bump = rctd ? 20 : 8;
2014 put_unaligned_be32(count * bump, arr);
2015 for (offset = 4, oip = opcode_info_arr;
2016 oip->num_attached != 0xff && offset < a_len; ++oip) {
2017 if (F_INV_OP & oip->flags)
2018 continue;
2019 na = oip->num_attached;
2020 arr[offset] = oip->opcode;
2021 put_unaligned_be16(oip->sa, arr + offset + 2);
2022 if (rctd)
2023 arr[offset + 5] |= 0x2;
2024 if (FF_SA & oip->flags)
2025 arr[offset + 5] |= 0x1;
2026 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2027 if (rctd)
2028 put_unaligned_be16(0xa, arr + offset + 8);
2029 r_oip = oip;
2030 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2031 if (F_INV_OP & oip->flags)
2032 continue;
2033 offset += bump;
2034 arr[offset] = oip->opcode;
2035 put_unaligned_be16(oip->sa, arr + offset + 2);
2036 if (rctd)
2037 arr[offset + 5] |= 0x2;
2038 if (FF_SA & oip->flags)
2039 arr[offset + 5] |= 0x1;
2040 put_unaligned_be16(oip->len_mask[0],
2041 arr + offset + 6);
2042 if (rctd)
2043 put_unaligned_be16(0xa,
2044 arr + offset + 8);
2045 }
2046 oip = r_oip;
2047 offset += bump;
2048 }
2049 break;
2050 case 1: /* one command: opcode only */
2051 case 2: /* one command: opcode plus service action */
2052 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2053 sdeb_i = opcode_ind_arr[req_opcode];
2054 oip = &opcode_info_arr[sdeb_i];
2055 if (F_INV_OP & oip->flags) {
2056 supp = 1;
2057 offset = 4;
2058 } else {
2059 if (1 == reporting_opts) {
2060 if (FF_SA & oip->flags) {
2061 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2062 2, 2);
2063 kfree(arr);
2064 return check_condition_result;
2065 }
2066 req_sa = 0;
2067 } else if (2 == reporting_opts &&
2068 0 == (FF_SA & oip->flags)) {
2069 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2070 kfree(arr); /* point at requested sa */
2071 return check_condition_result;
2072 }
2073 if (0 == (FF_SA & oip->flags) &&
2074 req_opcode == oip->opcode)
2075 supp = 3;
2076 else if (0 == (FF_SA & oip->flags)) {
2077 na = oip->num_attached;
2078 for (k = 0, oip = oip->arrp; k < na;
2079 ++k, ++oip) {
2080 if (req_opcode == oip->opcode)
2081 break;
2082 }
2083 supp = (k >= na) ? 1 : 3;
2084 } else if (req_sa != oip->sa) {
2085 na = oip->num_attached;
2086 for (k = 0, oip = oip->arrp; k < na;
2087 ++k, ++oip) {
2088 if (req_sa == oip->sa)
2089 break;
2090 }
2091 supp = (k >= na) ? 1 : 3;
2092 } else
2093 supp = 3;
2094 if (3 == supp) {
2095 u = oip->len_mask[0];
2096 put_unaligned_be16(u, arr + 2);
2097 arr[4] = oip->opcode;
2098 for (k = 1; k < u; ++k)
2099 arr[4 + k] = (k < 16) ?
2100 oip->len_mask[k] : 0xff;
2101 offset = 4 + u;
2102 } else
2103 offset = 4;
2104 }
2105 arr[1] = (rctd ? 0x80 : 0) | supp;
2106 if (rctd) {
2107 put_unaligned_be16(0xa, arr + offset);
2108 offset += 12;
2109 }
2110 break;
2111 default:
2112 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2113 kfree(arr);
2114 return check_condition_result;
2115 }
2116 offset = (offset < a_len) ? offset : a_len;
2117 len = (offset < alloc_len) ? offset : alloc_len;
2118 errsts = fill_from_dev_buffer(scp, arr, len);
2119 kfree(arr);
2120 return errsts;
2121}
2122
fd32119b
DG
2123static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2124 struct sdebug_dev_info *devip)
38d5c833
DG
2125{
2126 bool repd;
2127 u32 alloc_len, len;
2128 u8 arr[16];
2129 u8 *cmd = scp->cmnd;
2130
2131 memset(arr, 0, sizeof(arr));
2132 repd = !!(cmd[2] & 0x80);
2133 alloc_len = get_unaligned_be32(cmd + 6);
2134 if (alloc_len < 4) {
2135 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2136 return check_condition_result;
2137 }
2138 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2139 arr[1] = 0x1; /* ITNRS */
2140 if (repd) {
2141 arr[3] = 0xc;
2142 len = 16;
2143 } else
2144 len = 4;
2145
2146 len = (len < alloc_len) ? len : alloc_len;
2147 return fill_from_dev_buffer(scp, arr, len);
2148}
2149
1da177e4
LT
2150/* <<Following mode page info copied from ST318451LW>> */
2151
91d4c752 2152static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1da177e4
LT
2153{ /* Read-Write Error Recovery page for mode_sense */
2154 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2155 5, 0, 0xff, 0xff};
2156
2157 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2158 if (1 == pcontrol)
2159 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2160 return sizeof(err_recov_pg);
2161}
2162
91d4c752 2163static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1da177e4
LT
2164{ /* Disconnect-Reconnect page for mode_sense */
2165 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2166 0, 0, 0, 0, 0, 0, 0, 0};
2167
2168 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2169 if (1 == pcontrol)
2170 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2171 return sizeof(disconnect_pg);
2172}
2173
91d4c752 2174static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1da177e4 2175{ /* Format device page for mode_sense */
597136ab
MP
2176 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2177 0, 0, 0, 0, 0, 0, 0, 0,
2178 0, 0, 0, 0, 0x40, 0, 0, 0};
2179
2180 memcpy(p, format_pg, sizeof(format_pg));
773642d9
DG
2181 put_unaligned_be16(sdebug_sectors_per, p + 10);
2182 put_unaligned_be16(sdebug_sector_size, p + 12);
2183 if (sdebug_removable)
597136ab
MP
2184 p[20] |= 0x20; /* should agree with INQUIRY */
2185 if (1 == pcontrol)
2186 memset(p + 2, 0, sizeof(format_pg) - 2);
2187 return sizeof(format_pg);
1da177e4
LT
2188}
2189
fd32119b
DG
2190static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2191 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2192 0, 0, 0, 0};
2193
91d4c752 2194static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1da177e4 2195{ /* Caching page for mode_sense */
cbf67842
DG
2196 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2197 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2198 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1da177e4
LT
2199 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2200
773642d9 2201 if (SDEBUG_OPT_N_WCE & sdebug_opts)
cbf67842 2202 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1da177e4
LT
2203 memcpy(p, caching_pg, sizeof(caching_pg));
2204 if (1 == pcontrol)
cbf67842
DG
2205 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2206 else if (2 == pcontrol)
2207 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1da177e4
LT
2208 return sizeof(caching_pg);
2209}
2210
fd32119b
DG
2211static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2212 0, 0, 0x2, 0x4b};
2213
91d4c752 2214static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
1da177e4 2215{ /* Control mode page for mode_sense */
c65b1445 2216 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
9a051019 2217 0, 0, 0, 0};
c65b1445 2218 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1da177e4
LT
2219 0, 0, 0x2, 0x4b};
2220
773642d9 2221 if (sdebug_dsense)
1da177e4 2222 ctrl_m_pg[2] |= 0x4;
c65b1445
DG
2223 else
2224 ctrl_m_pg[2] &= ~0x4;
c6a44287 2225
773642d9 2226 if (sdebug_ato)
c6a44287
MP
2227 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2228
1da177e4
LT
2229 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2230 if (1 == pcontrol)
c65b1445
DG
2231 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2232 else if (2 == pcontrol)
2233 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1da177e4
LT
2234 return sizeof(ctrl_m_pg);
2235}
2236
c65b1445 2237
91d4c752 2238static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
1da177e4 2239{ /* Informational Exceptions control mode page for mode_sense */
c65b1445
DG
2240 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2241 0, 0, 0x0, 0x0};
2242 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2243 0, 0, 0x0, 0x0};
2244
1da177e4
LT
2245 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2246 if (1 == pcontrol)
c65b1445
DG
2247 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2248 else if (2 == pcontrol)
2249 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1da177e4
LT
2250 return sizeof(iec_m_pg);
2251}
2252
91d4c752 2253static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
c65b1445
DG
2254{ /* SAS SSP mode page - short format for mode_sense */
2255 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2256 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2257
2258 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2259 if (1 == pcontrol)
2260 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2261 return sizeof(sas_sf_m_pg);
2262}
2263
2264
91d4c752 2265static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
c65b1445
DG
2266 int target_dev_id)
2267{ /* SAS phy control and discover mode page for mode_sense */
2268 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2269 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
773642d9
DG
2270 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2271 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
c65b1445
DG
2272 0x2, 0, 0, 0, 0, 0, 0, 0,
2273 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2274 0, 0, 0, 0, 0, 0, 0, 0,
2275 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
773642d9
DG
2276 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2277 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
c65b1445
DG
2278 0x3, 0, 0, 0, 0, 0, 0, 0,
2279 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2280 0, 0, 0, 0, 0, 0, 0, 0,
2281 };
2282 int port_a, port_b;
2283
1b37bd60
DG
2284 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2285 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2286 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2287 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
c65b1445
DG
2288 port_a = target_dev_id + 1;
2289 port_b = port_a + 1;
2290 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
773642d9
DG
2291 put_unaligned_be32(port_a, p + 20);
2292 put_unaligned_be32(port_b, p + 48 + 20);
c65b1445
DG
2293 if (1 == pcontrol)
2294 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2295 return sizeof(sas_pcd_m_pg);
2296}
2297
91d4c752 2298static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
c65b1445
DG
2299{ /* SAS SSP shared protocol specific port mode subpage */
2300 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2301 0, 0, 0, 0, 0, 0, 0, 0,
2302 };
2303
2304 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2305 if (1 == pcontrol)
2306 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2307 return sizeof(sas_sha_m_pg);
2308}
2309
1da177e4
LT
2310#define SDEBUG_MAX_MSENSE_SZ 256
2311
fd32119b
DG
2312static int resp_mode_sense(struct scsi_cmnd *scp,
2313 struct sdebug_dev_info *devip)
1da177e4 2314{
23183910 2315 int pcontrol, pcode, subpcode, bd_len;
1da177e4 2316 unsigned char dev_spec;
128ec190
GK
2317 u32 alloc_len, offset, len;
2318 int target_dev_id;
c2248fc9 2319 int target = scp->device->id;
91d4c752 2320 unsigned char *ap;
1da177e4 2321 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
01123ef4 2322 unsigned char *cmd = scp->cmnd;
d36da305 2323 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
1da177e4 2324
760f3b03 2325 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
1da177e4
LT
2326 pcontrol = (cmd[2] & 0xc0) >> 6;
2327 pcode = cmd[2] & 0x3f;
2328 subpcode = cmd[3];
2329 msense_6 = (MODE_SENSE == cmd[0]);
760f3b03
DG
2330 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2331 is_disk = (sdebug_ptype == TYPE_DISK);
64e14ece 2332 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
d36da305 2333 if ((is_disk || is_zbc) && !dbd)
23183910
DG
2334 bd_len = llbaa ? 16 : 8;
2335 else
2336 bd_len = 0;
773642d9 2337 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
1da177e4
LT
2338 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2339 if (0x3 == pcontrol) { /* Saving values not supported */
cbf67842 2340 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1da177e4
LT
2341 return check_condition_result;
2342 }
c65b1445
DG
2343 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2344 (devip->target * 1000) - 3;
d36da305
DG
2345 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2346 if (is_disk || is_zbc) {
b01f6f83 2347 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
9447b6ce
MP
2348 if (sdebug_wp)
2349 dev_spec |= 0x80;
2350 } else
23183910 2351 dev_spec = 0x0;
1da177e4
LT
2352 if (msense_6) {
2353 arr[2] = dev_spec;
23183910 2354 arr[3] = bd_len;
1da177e4
LT
2355 offset = 4;
2356 } else {
2357 arr[3] = dev_spec;
23183910
DG
2358 if (16 == bd_len)
2359 arr[4] = 0x1; /* set LONGLBA bit */
2360 arr[7] = bd_len; /* assume 255 or less */
1da177e4
LT
2361 offset = 8;
2362 }
2363 ap = arr + offset;
28898873
FT
2364 if ((bd_len > 0) && (!sdebug_capacity))
2365 sdebug_capacity = get_sdebug_capacity();
2366
23183910 2367 if (8 == bd_len) {
773642d9
DG
2368 if (sdebug_capacity > 0xfffffffe)
2369 put_unaligned_be32(0xffffffff, ap + 0);
2370 else
2371 put_unaligned_be32(sdebug_capacity, ap + 0);
2372 put_unaligned_be16(sdebug_sector_size, ap + 6);
23183910
DG
2373 offset += bd_len;
2374 ap = arr + offset;
2375 } else if (16 == bd_len) {
773642d9
DG
2376 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2377 put_unaligned_be32(sdebug_sector_size, ap + 12);
23183910
DG
2378 offset += bd_len;
2379 ap = arr + offset;
2380 }
1da177e4 2381
c65b1445
DG
2382 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2383 /* TODO: Control Extension page */
22017ed2 2384 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1da177e4
LT
2385 return check_condition_result;
2386 }
760f3b03
DG
2387 bad_pcode = false;
2388
1da177e4
LT
2389 switch (pcode) {
2390 case 0x1: /* Read-Write error recovery page, direct access */
2391 len = resp_err_recov_pg(ap, pcontrol, target);
2392 offset += len;
2393 break;
2394 case 0x2: /* Disconnect-Reconnect page, all devices */
2395 len = resp_disconnect_pg(ap, pcontrol, target);
2396 offset += len;
2397 break;
9a051019 2398 case 0x3: /* Format device page, direct access */
760f3b03
DG
2399 if (is_disk) {
2400 len = resp_format_pg(ap, pcontrol, target);
2401 offset += len;
2402 } else
2403 bad_pcode = true;
9a051019 2404 break;
1da177e4 2405 case 0x8: /* Caching page, direct access */
d36da305 2406 if (is_disk || is_zbc) {
760f3b03
DG
2407 len = resp_caching_pg(ap, pcontrol, target);
2408 offset += len;
2409 } else
2410 bad_pcode = true;
1da177e4
LT
2411 break;
2412 case 0xa: /* Control Mode page, all devices */
2413 len = resp_ctrl_m_pg(ap, pcontrol, target);
2414 offset += len;
2415 break;
c65b1445
DG
2416 case 0x19: /* if spc==1 then sas phy, control+discover */
2417 if ((subpcode > 0x2) && (subpcode < 0xff)) {
22017ed2 2418 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
c65b1445 2419 return check_condition_result;
9a051019 2420 }
c65b1445
DG
2421 len = 0;
2422 if ((0x0 == subpcode) || (0xff == subpcode))
2423 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2424 if ((0x1 == subpcode) || (0xff == subpcode))
2425 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2426 target_dev_id);
2427 if ((0x2 == subpcode) || (0xff == subpcode))
2428 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2429 offset += len;
2430 break;
1da177e4
LT
2431 case 0x1c: /* Informational Exceptions Mode page, all devices */
2432 len = resp_iec_m_pg(ap, pcontrol, target);
2433 offset += len;
2434 break;
2435 case 0x3f: /* Read all Mode pages */
c65b1445
DG
2436 if ((0 == subpcode) || (0xff == subpcode)) {
2437 len = resp_err_recov_pg(ap, pcontrol, target);
2438 len += resp_disconnect_pg(ap + len, pcontrol, target);
760f3b03
DG
2439 if (is_disk) {
2440 len += resp_format_pg(ap + len, pcontrol,
2441 target);
2442 len += resp_caching_pg(ap + len, pcontrol,
2443 target);
d36da305
DG
2444 } else if (is_zbc) {
2445 len += resp_caching_pg(ap + len, pcontrol,
2446 target);
760f3b03 2447 }
c65b1445
DG
2448 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2449 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2450 if (0xff == subpcode) {
2451 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2452 target, target_dev_id);
2453 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2454 }
2455 len += resp_iec_m_pg(ap + len, pcontrol, target);
760f3b03 2456 offset += len;
c65b1445 2457 } else {
22017ed2 2458 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
c65b1445 2459 return check_condition_result;
9a051019 2460 }
1da177e4
LT
2461 break;
2462 default:
760f3b03
DG
2463 bad_pcode = true;
2464 break;
2465 }
2466 if (bad_pcode) {
22017ed2 2467 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
1da177e4
LT
2468 return check_condition_result;
2469 }
2470 if (msense_6)
2471 arr[0] = offset - 1;
773642d9
DG
2472 else
2473 put_unaligned_be16((offset - 2), arr + 0);
128ec190 2474 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
1da177e4
LT
2475}
2476
c65b1445
DG
2477#define SDEBUG_MAX_MSELECT_SZ 512
2478
fd32119b
DG
2479static int resp_mode_select(struct scsi_cmnd *scp,
2480 struct sdebug_dev_info *devip)
c65b1445
DG
2481{
2482 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
c2248fc9 2483 int param_len, res, mpage;
c65b1445 2484 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
01123ef4 2485 unsigned char *cmd = scp->cmnd;
c2248fc9 2486 int mselect6 = (MODE_SELECT == cmd[0]);
c65b1445 2487
c65b1445
DG
2488 memset(arr, 0, sizeof(arr));
2489 pf = cmd[1] & 0x10;
2490 sp = cmd[1] & 0x1;
773642d9 2491 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
c65b1445 2492 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
22017ed2 2493 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
c65b1445
DG
2494 return check_condition_result;
2495 }
9a051019
DG
2496 res = fetch_to_dev_buffer(scp, arr, param_len);
2497 if (-1 == res)
773642d9
DG
2498 return DID_ERROR << 16;
2499 else if (sdebug_verbose && (res < param_len))
cbf67842
DG
2500 sdev_printk(KERN_INFO, scp->device,
2501 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2502 __func__, param_len, res);
773642d9
DG
2503 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2504 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
4d02b9ec
GK
2505 off = bd_len + (mselect6 ? 4 : 8);
2506 if (md_len > 2 || off >= res) {
22017ed2 2507 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
c65b1445
DG
2508 return check_condition_result;
2509 }
c65b1445
DG
2510 mpage = arr[off] & 0x3f;
2511 ps = !!(arr[off] & 0x80);
2512 if (ps) {
22017ed2 2513 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
c65b1445
DG
2514 return check_condition_result;
2515 }
2516 spf = !!(arr[off] & 0x40);
773642d9 2517 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
c65b1445
DG
2518 (arr[off + 1] + 2);
2519 if ((pg_len + off) > param_len) {
cbf67842 2520 mk_sense_buffer(scp, ILLEGAL_REQUEST,
c65b1445
DG
2521 PARAMETER_LIST_LENGTH_ERR, 0);
2522 return check_condition_result;
2523 }
2524 switch (mpage) {
cbf67842
DG
2525 case 0x8: /* Caching Mode page */
2526 if (caching_pg[1] == arr[off + 1]) {
2527 memcpy(caching_pg + 2, arr + off + 2,
2528 sizeof(caching_pg) - 2);
2529 goto set_mode_changed_ua;
2530 }
2531 break;
c65b1445
DG
2532 case 0xa: /* Control Mode page */
2533 if (ctrl_m_pg[1] == arr[off + 1]) {
2534 memcpy(ctrl_m_pg + 2, arr + off + 2,
2535 sizeof(ctrl_m_pg) - 2);
9447b6ce
MP
2536 if (ctrl_m_pg[4] & 0x8)
2537 sdebug_wp = true;
2538 else
2539 sdebug_wp = false;
773642d9 2540 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
cbf67842 2541 goto set_mode_changed_ua;
c65b1445
DG
2542 }
2543 break;
2544 case 0x1c: /* Informational Exceptions Mode page */
2545 if (iec_m_pg[1] == arr[off + 1]) {
2546 memcpy(iec_m_pg + 2, arr + off + 2,
2547 sizeof(iec_m_pg) - 2);
cbf67842 2548 goto set_mode_changed_ua;
c65b1445
DG
2549 }
2550 break;
2551 default:
2552 break;
2553 }
22017ed2 2554 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
c65b1445 2555 return check_condition_result;
cbf67842
DG
2556set_mode_changed_ua:
2557 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2558 return 0;
c65b1445
DG
2559}
2560
91d4c752 2561static int resp_temp_l_pg(unsigned char *arr)
c65b1445
DG
2562{
2563 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2564 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2565 };
2566
9a051019
DG
2567 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2568 return sizeof(temp_l_pg);
c65b1445
DG
2569}
2570
91d4c752 2571static int resp_ie_l_pg(unsigned char *arr)
c65b1445
DG
2572{
2573 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2574 };
2575
9a051019 2576 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
c65b1445
DG
2577 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2578 arr[4] = THRESHOLD_EXCEEDED;
2579 arr[5] = 0xff;
2580 }
9a051019 2581 return sizeof(ie_l_pg);
c65b1445
DG
2582}
2583
2584#define SDEBUG_MAX_LSENSE_SZ 512
2585
9a051019
DG
2586static int resp_log_sense(struct scsi_cmnd *scp,
2587 struct sdebug_dev_info *devip)
c65b1445 2588{
128ec190
GK
2589 int ppc, sp, pcode, subpcode;
2590 u32 alloc_len, len, n;
c65b1445 2591 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
01123ef4 2592 unsigned char *cmd = scp->cmnd;
c65b1445 2593
c65b1445
DG
2594 memset(arr, 0, sizeof(arr));
2595 ppc = cmd[1] & 0x2;
2596 sp = cmd[1] & 0x1;
2597 if (ppc || sp) {
22017ed2 2598 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
c65b1445
DG
2599 return check_condition_result;
2600 }
c65b1445 2601 pcode = cmd[2] & 0x3f;
23183910 2602 subpcode = cmd[3] & 0xff;
773642d9 2603 alloc_len = get_unaligned_be16(cmd + 7);
c65b1445 2604 arr[0] = pcode;
23183910
DG
2605 if (0 == subpcode) {
2606 switch (pcode) {
2607 case 0x0: /* Supported log pages log page */
2608 n = 4;
2609 arr[n++] = 0x0; /* this page */
2610 arr[n++] = 0xd; /* Temperature */
2611 arr[n++] = 0x2f; /* Informational exceptions */
2612 arr[3] = n - 4;
2613 break;
2614 case 0xd: /* Temperature log page */
2615 arr[3] = resp_temp_l_pg(arr + 4);
2616 break;
2617 case 0x2f: /* Informational exceptions log page */
2618 arr[3] = resp_ie_l_pg(arr + 4);
2619 break;
2620 default:
22017ed2 2621 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
23183910
DG
2622 return check_condition_result;
2623 }
2624 } else if (0xff == subpcode) {
2625 arr[0] |= 0x40;
2626 arr[1] = subpcode;
2627 switch (pcode) {
2628 case 0x0: /* Supported log pages and subpages log page */
2629 n = 4;
2630 arr[n++] = 0x0;
2631 arr[n++] = 0x0; /* 0,0 page */
2632 arr[n++] = 0x0;
2633 arr[n++] = 0xff; /* this page */
2634 arr[n++] = 0xd;
2635 arr[n++] = 0x0; /* Temperature */
2636 arr[n++] = 0x2f;
2637 arr[n++] = 0x0; /* Informational exceptions */
2638 arr[3] = n - 4;
2639 break;
2640 case 0xd: /* Temperature subpages */
2641 n = 4;
2642 arr[n++] = 0xd;
2643 arr[n++] = 0x0; /* Temperature */
2644 arr[3] = n - 4;
2645 break;
2646 case 0x2f: /* Informational exceptions subpages */
2647 n = 4;
2648 arr[n++] = 0x2f;
2649 arr[n++] = 0x0; /* Informational exceptions */
2650 arr[3] = n - 4;
2651 break;
2652 default:
22017ed2 2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
23183910
DG
2654 return check_condition_result;
2655 }
2656 } else {
22017ed2 2657 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
c65b1445
DG
2658 return check_condition_result;
2659 }
128ec190 2660 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
c65b1445 2661 return fill_from_dev_buffer(scp, arr,
128ec190 2662 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
c65b1445
DG
2663}
2664
f0d1cf93
DG
2665static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2666{
2667 return devip->nr_zones != 0;
2668}
2669
2670static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2671 unsigned long long lba)
2672{
108e36f0 2673 return &devip->zstate[lba >> devip->zsize_shift];
f0d1cf93
DG
2674}
2675
2676static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2677{
64e14ece 2678 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
f0d1cf93
DG
2679}
2680
2681static void zbc_close_zone(struct sdebug_dev_info *devip,
2682 struct sdeb_zone_state *zsp)
2683{
2684 enum sdebug_z_cond zc;
2685
2686 if (zbc_zone_is_conv(zsp))
2687 return;
2688
2689 zc = zsp->z_cond;
2690 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2691 return;
2692
2693 if (zc == ZC2_IMPLICIT_OPEN)
2694 devip->nr_imp_open--;
2695 else
2696 devip->nr_exp_open--;
2697
2698 if (zsp->z_wp == zsp->z_start) {
2699 zsp->z_cond = ZC1_EMPTY;
2700 } else {
2701 zsp->z_cond = ZC4_CLOSED;
2702 devip->nr_closed++;
2703 }
2704}
2705
2706static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2707{
2708 struct sdeb_zone_state *zsp = &devip->zstate[0];
2709 unsigned int i;
2710
2711 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2712 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2713 zbc_close_zone(devip, zsp);
2714 return;
2715 }
2716 }
2717}
2718
2719static void zbc_open_zone(struct sdebug_dev_info *devip,
2720 struct sdeb_zone_state *zsp, bool explicit)
2721{
2722 enum sdebug_z_cond zc;
2723
2724 if (zbc_zone_is_conv(zsp))
2725 return;
2726
2727 zc = zsp->z_cond;
2728 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2729 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2730 return;
2731
2732 /* Close an implicit open zone if necessary */
2733 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2734 zbc_close_zone(devip, zsp);
2735 else if (devip->max_open &&
2736 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2737 zbc_close_imp_open_zone(devip);
2738
2739 if (zsp->z_cond == ZC4_CLOSED)
2740 devip->nr_closed--;
2741 if (explicit) {
2742 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2743 devip->nr_exp_open++;
2744 } else {
2745 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2746 devip->nr_imp_open++;
2747 }
2748}
2749
2750static void zbc_inc_wp(struct sdebug_dev_info *devip,
2751 unsigned long long lba, unsigned int num)
2752{
2753 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
64e14ece 2754 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
f0d1cf93
DG
2755
2756 if (zbc_zone_is_conv(zsp))
2757 return;
2758
64e14ece
DLM
2759 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2760 zsp->z_wp += num;
2761 if (zsp->z_wp >= zend)
2762 zsp->z_cond = ZC5_FULL;
2763 return;
2764 }
2765
2766 while (num) {
2767 if (lba != zsp->z_wp)
2768 zsp->z_non_seq_resource = true;
2769
2770 end = lba + num;
2771 if (end >= zend) {
2772 n = zend - lba;
2773 zsp->z_wp = zend;
2774 } else if (end > zsp->z_wp) {
2775 n = num;
2776 zsp->z_wp = end;
2777 } else {
2778 n = num;
2779 }
2780 if (zsp->z_wp >= zend)
2781 zsp->z_cond = ZC5_FULL;
2782
2783 num -= n;
2784 lba += n;
2785 if (num) {
2786 zsp++;
2787 zend = zsp->z_start + zsp->z_size;
2788 }
2789 }
f0d1cf93
DG
2790}
2791
2792static int check_zbc_access_params(struct scsi_cmnd *scp,
2793 unsigned long long lba, unsigned int num, bool write)
2794{
2795 struct scsi_device *sdp = scp->device;
2796 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2797 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2798 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2799
2800 if (!write) {
64e14ece
DLM
2801 if (devip->zmodel == BLK_ZONED_HA)
2802 return 0;
2803 /* For host-managed, reads cannot cross zone types boundaries */
f0d1cf93
DG
2804 if (zsp_end != zsp &&
2805 zbc_zone_is_conv(zsp) &&
2806 !zbc_zone_is_conv(zsp_end)) {
2807 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2808 LBA_OUT_OF_RANGE,
2809 READ_INVDATA_ASCQ);
2810 return check_condition_result;
2811 }
2812 return 0;
2813 }
2814
2815 /* No restrictions for writes within conventional zones */
2816 if (zbc_zone_is_conv(zsp)) {
2817 if (!zbc_zone_is_conv(zsp_end)) {
2818 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2819 LBA_OUT_OF_RANGE,
2820 WRITE_BOUNDARY_ASCQ);
2821 return check_condition_result;
2822 }
2823 return 0;
2824 }
2825
64e14ece
DLM
2826 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2827 /* Writes cannot cross sequential zone boundaries */
2828 if (zsp_end != zsp) {
2829 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2830 LBA_OUT_OF_RANGE,
2831 WRITE_BOUNDARY_ASCQ);
2832 return check_condition_result;
2833 }
2834 /* Cannot write full zones */
2835 if (zsp->z_cond == ZC5_FULL) {
2836 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2837 INVALID_FIELD_IN_CDB, 0);
2838 return check_condition_result;
2839 }
2840 /* Writes must be aligned to the zone WP */
2841 if (lba != zsp->z_wp) {
2842 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2843 LBA_OUT_OF_RANGE,
2844 UNALIGNED_WRITE_ASCQ);
2845 return check_condition_result;
2846 }
f0d1cf93
DG
2847 }
2848
2849 /* Handle implicit open of closed and empty zones */
2850 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2851 if (devip->max_open &&
2852 devip->nr_exp_open >= devip->max_open) {
2853 mk_sense_buffer(scp, DATA_PROTECT,
2854 INSUFF_RES_ASC,
2855 INSUFF_ZONE_ASCQ);
2856 return check_condition_result;
2857 }
2858 zbc_open_zone(devip, zsp, false);
2859 }
2860
2861 return 0;
2862}
2863
2864static inline int check_device_access_params
2865 (struct scsi_cmnd *scp, unsigned long long lba,
2866 unsigned int num, bool write)
1da177e4 2867{
f0d1cf93
DG
2868 struct scsi_device *sdp = scp->device;
2869 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2870
c65b1445 2871 if (lba + num > sdebug_capacity) {
22017ed2 2872 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
1da177e4
LT
2873 return check_condition_result;
2874 }
c65b1445
DG
2875 /* transfer length excessive (tie in to block limits VPD page) */
2876 if (num > sdebug_store_sectors) {
22017ed2 2877 /* needs work to find which cdb byte 'num' comes from */
cbf67842 2878 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
c65b1445
DG
2879 return check_condition_result;
2880 }
9447b6ce
MP
2881 if (write && unlikely(sdebug_wp)) {
2882 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2883 return check_condition_result;
2884 }
f0d1cf93
DG
2885 if (sdebug_dev_is_zoned(devip))
2886 return check_zbc_access_params(scp, lba, num, write);
2887
19789100
FT
2888 return 0;
2889}
2890
b6ff8ca7
DG
2891/*
2892 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2893 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2894 * that access any of the "stores" in struct sdeb_store_info should call this
2895 * function with bug_if_fake_rw set to true.
2896 */
2897static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2898 bool bug_if_fake_rw)
87c715dc 2899{
b6ff8ca7
DG
2900 if (sdebug_fake_rw) {
2901 BUG_ON(bug_if_fake_rw); /* See note above */
2902 return NULL;
2903 }
2904 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
87c715dc
DG
2905}
2906
a4517511 2907/* Returns number of bytes copied or -1 if error. */
87c715dc
DG
2908static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2909 u32 sg_skip, u64 lba, u32 num, bool do_write)
19789100
FT
2910{
2911 int ret;
c2248fc9 2912 u64 block, rest = 0;
a4517511 2913 enum dma_data_direction dir;
87c715dc
DG
2914 struct scsi_data_buffer *sdb = &scp->sdb;
2915 u8 *fsp;
a4517511 2916
c2248fc9 2917 if (do_write) {
a4517511 2918 dir = DMA_TO_DEVICE;
4f2c8bf6 2919 write_since_sync = true;
a4517511 2920 } else {
a4517511 2921 dir = DMA_FROM_DEVICE;
a4517511 2922 }
19789100 2923
87c715dc 2924 if (!sdb->length || !sip)
a4517511 2925 return 0;
87c715dc 2926 if (scp->sc_data_direction != dir)
a4517511 2927 return -1;
87c715dc 2928 fsp = sip->storep;
19789100
FT
2929
2930 block = do_div(lba, sdebug_store_sectors);
2931 if (block + num > sdebug_store_sectors)
2932 rest = block + num - sdebug_store_sectors;
2933
386ecb12 2934 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
87c715dc 2935 fsp + (block * sdebug_sector_size),
0a7e69c7 2936 (num - rest) * sdebug_sector_size, sg_skip, do_write);
773642d9 2937 if (ret != (num - rest) * sdebug_sector_size)
a4517511
AM
2938 return ret;
2939
2940 if (rest) {
386ecb12 2941 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
87c715dc 2942 fsp, rest * sdebug_sector_size,
0a7e69c7
DG
2943 sg_skip + ((num - rest) * sdebug_sector_size),
2944 do_write);
a4517511 2945 }
19789100
FT
2946
2947 return ret;
2948}
2949
87c715dc
DG
2950/* Returns number of bytes copied or -1 if error. */
2951static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2952{
2953 struct scsi_data_buffer *sdb = &scp->sdb;
2954
2955 if (!sdb->length)
2956 return 0;
2957 if (scp->sc_data_direction != DMA_TO_DEVICE)
2958 return -1;
2959 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2960 num * sdebug_sector_size, 0, true);
2961}
2962
2963/* If sip->storep+lba compares equal to arr(num), then copy top half of
2964 * arr into sip->storep+lba and return true. If comparison fails then
38d5c833 2965 * return false. */
87c715dc 2966static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
c3e2fe92 2967 const u8 *arr, bool compare_only)
38d5c833
DG
2968{
2969 bool res;
2970 u64 block, rest = 0;
2971 u32 store_blks = sdebug_store_sectors;
773642d9 2972 u32 lb_size = sdebug_sector_size;
87c715dc 2973 u8 *fsp = sip->storep;
38d5c833
DG
2974
2975 block = do_div(lba, store_blks);
2976 if (block + num > store_blks)
2977 rest = block + num - store_blks;
2978
87c715dc 2979 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
38d5c833
DG
2980 if (!res)
2981 return res;
2982 if (rest)
87c715dc 2983 res = memcmp(fsp, arr + ((num - rest) * lb_size),
38d5c833
DG
2984 rest * lb_size);
2985 if (!res)
2986 return res;
c3e2fe92
DG
2987 if (compare_only)
2988 return true;
38d5c833 2989 arr += num * lb_size;
87c715dc 2990 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
38d5c833 2991 if (rest)
87c715dc 2992 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
38d5c833
DG
2993 return res;
2994}
2995
51d648af 2996static __be16 dif_compute_csum(const void *buf, int len)
beb40ea4 2997{
51d648af 2998 __be16 csum;
beb40ea4 2999
773642d9 3000 if (sdebug_guard)
51d648af
AM
3001 csum = (__force __be16)ip_compute_csum(buf, len);
3002 else
beb40ea4 3003 csum = cpu_to_be16(crc_t10dif(buf, len));
51d648af 3004
beb40ea4
AM
3005 return csum;
3006}
3007
6ebf105c 3008static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
beb40ea4
AM
3009 sector_t sector, u32 ei_lba)
3010{
773642d9 3011 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
beb40ea4
AM
3012
3013 if (sdt->guard_tag != csum) {
c1287970 3014 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
beb40ea4
AM
3015 (unsigned long)sector,
3016 be16_to_cpu(sdt->guard_tag),
3017 be16_to_cpu(csum));
3018 return 0x01;
3019 }
8475c811 3020 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
beb40ea4 3021 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
c1287970
TW
3022 pr_err("REF check failed on sector %lu\n",
3023 (unsigned long)sector);
beb40ea4
AM
3024 return 0x03;
3025 }
8475c811 3026 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
beb40ea4 3027 be32_to_cpu(sdt->ref_tag) != ei_lba) {
c1287970
TW
3028 pr_err("REF check failed on sector %lu\n",
3029 (unsigned long)sector);
beb40ea4
AM
3030 return 0x03;
3031 }
3032 return 0;
3033}
3034
87c715dc 3035static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
65f72f2a 3036 unsigned int sectors, bool read)
c6a44287 3037{
be4e11be 3038 size_t resid;
c6a44287 3039 void *paddr;
87c715dc 3040 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
b6ff8ca7 3041 scp->device->hostdata, true);
87c715dc 3042 struct t10_pi_tuple *dif_storep = sip->dif_storep;
14faa944 3043 const void *dif_store_end = dif_storep + sdebug_store_sectors;
be4e11be 3044 struct sg_mapping_iter miter;
c6a44287 3045
e18d8bea
AM
3046 /* Bytes of protection data to copy into sgl */
3047 resid = sectors * sizeof(*dif_storep);
c6a44287 3048
87c715dc
DG
3049 sg_miter_start(&miter, scsi_prot_sglist(scp),
3050 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3051 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
be4e11be
AM
3052
3053 while (sg_miter_next(&miter) && resid > 0) {
87c715dc
DG
3054 size_t len = min_t(size_t, miter.length, resid);
3055 void *start = dif_store(sip, sector);
be4e11be 3056 size_t rest = 0;
14faa944
AM
3057
3058 if (dif_store_end < start + len)
3059 rest = start + len - dif_store_end;
c6a44287 3060
be4e11be 3061 paddr = miter.addr;
14faa944 3062
65f72f2a
AM
3063 if (read)
3064 memcpy(paddr, start, len - rest);
3065 else
3066 memcpy(start, paddr, len - rest);
3067
3068 if (rest) {
3069 if (read)
3070 memcpy(paddr + len - rest, dif_storep, rest);
3071 else
3072 memcpy(dif_storep, paddr + len - rest, rest);
3073 }
c6a44287 3074
e18d8bea 3075 sector += len / sizeof(*dif_storep);
c6a44287 3076 resid -= len;
c6a44287 3077 }
be4e11be 3078 sg_miter_stop(&miter);
bb8c063c
AM
3079}
3080
87c715dc 3081static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
bb8c063c
AM
3082 unsigned int sectors, u32 ei_lba)
3083{
f7be6772 3084 int ret = 0;
bb8c063c 3085 unsigned int i;
bb8c063c 3086 sector_t sector;
87c715dc 3087 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
b6ff8ca7 3088 scp->device->hostdata, true);
87c715dc 3089 struct t10_pi_tuple *sdt;
bb8c063c 3090
c45eabec 3091 for (i = 0; i < sectors; i++, ei_lba++) {
bb8c063c 3092 sector = start_sec + i;
87c715dc 3093 sdt = dif_store(sip, sector);
bb8c063c 3094
51d648af 3095 if (sdt->app_tag == cpu_to_be16(0xffff))
bb8c063c
AM
3096 continue;
3097
f7be6772
MP
3098 /*
3099 * Because scsi_debug acts as both initiator and
3100 * target we proceed to verify the PI even if
3101 * RDPROTECT=3. This is done so the "initiator" knows
3102 * which type of error to return. Otherwise we would
3103 * have to iterate over the PI twice.
3104 */
3105 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3106 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3107 sector, ei_lba);
3108 if (ret) {
3109 dif_errors++;
3110 break;
3111 }
bb8c063c 3112 }
bb8c063c 3113 }
c6a44287 3114
87c715dc 3115 dif_copy_prot(scp, start_sec, sectors, true);
c6a44287
MP
3116 dix_reads++;
3117
f7be6772 3118 return ret;
c6a44287
MP
3119}
3120
fd32119b 3121static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
19789100 3122{
87c715dc 3123 bool check_prot;
c2248fc9
DG
3124 u32 num;
3125 u32 ei_lba;
19789100 3126 int ret;
87c715dc 3127 u64 lba;
b6ff8ca7 3128 struct sdeb_store_info *sip = devip2sip(devip, true);
87c715dc
DG
3129 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3130 u8 *cmd = scp->cmnd;
19789100 3131
c2248fc9
DG
3132 switch (cmd[0]) {
3133 case READ_16:
3134 ei_lba = 0;
3135 lba = get_unaligned_be64(cmd + 2);
3136 num = get_unaligned_be32(cmd + 10);
3137 check_prot = true;
3138 break;
3139 case READ_10:
3140 ei_lba = 0;
3141 lba = get_unaligned_be32(cmd + 2);
3142 num = get_unaligned_be16(cmd + 7);
3143 check_prot = true;
3144 break;
3145 case READ_6:
3146 ei_lba = 0;
3147 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3148 (u32)(cmd[1] & 0x1f) << 16;
3149 num = (0 == cmd[4]) ? 256 : cmd[4];
3150 check_prot = true;
3151 break;
3152 case READ_12:
3153 ei_lba = 0;
3154 lba = get_unaligned_be32(cmd + 2);
3155 num = get_unaligned_be32(cmd + 6);
3156 check_prot = true;
3157 break;
3158 case XDWRITEREAD_10:
3159 ei_lba = 0;
3160 lba = get_unaligned_be32(cmd + 2);
3161 num = get_unaligned_be16(cmd + 7);
3162 check_prot = false;
3163 break;
3164 default: /* assume READ(32) */
3165 lba = get_unaligned_be64(cmd + 12);
3166 ei_lba = get_unaligned_be32(cmd + 20);
3167 num = get_unaligned_be32(cmd + 28);
3168 check_prot = false;
3169 break;
3170 }
f46eb0e9 3171 if (unlikely(have_dif_prot && check_prot)) {
8475c811 3172 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
c2248fc9
DG
3173 (cmd[1] & 0xe0)) {
3174 mk_sense_invalid_opcode(scp);
3175 return check_condition_result;
3176 }
8475c811
CH
3177 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3178 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
c2248fc9
DG
3179 (cmd[1] & 0xe0) == 0)
3180 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3181 "to DIF device\n");
3182 }
3a90a63d
DG
3183 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3184 atomic_read(&sdeb_inject_pending))) {
3185 num /= 2;
3186 atomic_set(&sdeb_inject_pending, 0);
3187 }
c2248fc9 3188
9447b6ce
MP
3189 ret = check_device_access_params(scp, lba, num, false);
3190 if (ret)
3191 return ret;
f46eb0e9 3192 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
d9da891a
LO
3193 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3194 ((lba + num) > sdebug_medium_error_start))) {
c65b1445 3195 /* claim unrecoverable read error */
c2248fc9 3196 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
c65b1445 3197 /* set info field and valid bit for fixed descriptor */
c2248fc9
DG
3198 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3199 scp->sense_buffer[0] |= 0x80; /* Valid bit */
32f7ef73
DG
3200 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3201 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
c2248fc9 3202 put_unaligned_be32(ret, scp->sense_buffer + 3);
c65b1445 3203 }
c2248fc9 3204 scsi_set_resid(scp, scsi_bufflen(scp));
1da177e4
LT
3205 return check_condition_result;
3206 }
c6a44287 3207
67da413f 3208 read_lock(macc_lckp);
6c78cc06 3209
c6a44287 3210 /* DIX + T10 DIF */
f46eb0e9 3211 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
f7be6772
MP
3212 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3213 case 1: /* Guard tag error */
3214 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3215 read_unlock(macc_lckp);
3216 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3217 return check_condition_result;
3218 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3219 read_unlock(macc_lckp);
3220 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3221 return illegal_condition_result;
3222 }
3223 break;
3224 case 3: /* Reference tag error */
3225 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3226 read_unlock(macc_lckp);
3227 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3228 return check_condition_result;
3229 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3230 read_unlock(macc_lckp);
3231 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3232 return illegal_condition_result;
3233 }
3234 break;
c6a44287
MP
3235 }
3236 }
3237
87c715dc 3238 ret = do_device_access(sip, scp, 0, lba, num, false);
67da413f 3239 read_unlock(macc_lckp);
f46eb0e9 3240 if (unlikely(ret == -1))
a4517511
AM
3241 return DID_ERROR << 16;
3242
42d387be 3243 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
a4517511 3244
3a90a63d
DG
3245 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3246 atomic_read(&sdeb_inject_pending))) {
3247 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3248 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3249 atomic_set(&sdeb_inject_pending, 0);
c2248fc9 3250 return check_condition_result;
3a90a63d 3251 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
c2248fc9
DG
3252 /* Logical block guard check failed */
3253 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3a90a63d 3254 atomic_set(&sdeb_inject_pending, 0);
c2248fc9 3255 return illegal_condition_result;
3a90a63d 3256 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
c2248fc9 3257 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3a90a63d 3258 atomic_set(&sdeb_inject_pending, 0);
c2248fc9
DG
3259 return illegal_condition_result;
3260 }
3261 }
a4517511 3262 return 0;
1da177e4
LT
3263}
3264
c6a44287 3265static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
395cef03 3266 unsigned int sectors, u32 ei_lba)
c6a44287 3267{
be4e11be 3268 int ret;
6ebf105c 3269 struct t10_pi_tuple *sdt;
be4e11be 3270 void *daddr;
65f72f2a 3271 sector_t sector = start_sec;
c6a44287 3272 int ppage_offset;
be4e11be
AM
3273 int dpage_offset;
3274 struct sg_mapping_iter diter;
3275 struct sg_mapping_iter piter;
c6a44287 3276
c6a44287
MP
3277 BUG_ON(scsi_sg_count(SCpnt) == 0);
3278 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3279
be4e11be
AM
3280 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3281 scsi_prot_sg_count(SCpnt),
3282 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3283 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3284 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3285
3286 /* For each protection page */
3287 while (sg_miter_next(&piter)) {
3288 dpage_offset = 0;
3289 if (WARN_ON(!sg_miter_next(&diter))) {
3290 ret = 0x01;
3291 goto out;
3292 }
c6a44287 3293
be4e11be 3294 for (ppage_offset = 0; ppage_offset < piter.length;
6ebf105c 3295 ppage_offset += sizeof(struct t10_pi_tuple)) {
c6a44287 3296 /* If we're at the end of the current
be4e11be 3297 * data page advance to the next one
c6a44287 3298 */
be4e11be
AM
3299 if (dpage_offset >= diter.length) {
3300 if (WARN_ON(!sg_miter_next(&diter))) {
3301 ret = 0x01;
3302 goto out;
3303 }
3304 dpage_offset = 0;
c6a44287
MP
3305 }
3306
be4e11be
AM
3307 sdt = piter.addr + ppage_offset;
3308 daddr = diter.addr + dpage_offset;
c6a44287 3309
f7be6772
MP
3310 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3311 ret = dif_verify(sdt, daddr, sector, ei_lba);
3312 if (ret)
3313 goto out;
395cef03
MP
3314 }
3315
c6a44287 3316 sector++;
395cef03 3317 ei_lba++;
773642d9 3318 dpage_offset += sdebug_sector_size;
c6a44287 3319 }
be4e11be
AM
3320 diter.consumed = dpage_offset;
3321 sg_miter_stop(&diter);
c6a44287 3322 }
be4e11be 3323 sg_miter_stop(&piter);
c6a44287 3324
65f72f2a 3325 dif_copy_prot(SCpnt, start_sec, sectors, false);
c6a44287
MP
3326 dix_writes++;
3327
3328 return 0;
3329
3330out:
3331 dif_errors++;
be4e11be
AM
3332 sg_miter_stop(&diter);
3333 sg_miter_stop(&piter);
c6a44287
MP
3334 return ret;
3335}
3336
b90ebc3d
AM
3337static unsigned long lba_to_map_index(sector_t lba)
3338{
773642d9
DG
3339 if (sdebug_unmap_alignment)
3340 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3341 sector_div(lba, sdebug_unmap_granularity);
b90ebc3d
AM
3342 return lba;
3343}
3344
3345static sector_t map_index_to_lba(unsigned long index)
44d92694 3346{
773642d9 3347 sector_t lba = index * sdebug_unmap_granularity;
a027b5b9 3348
773642d9
DG
3349 if (sdebug_unmap_alignment)
3350 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
a027b5b9 3351 return lba;
b90ebc3d 3352}
44d92694 3353
87c715dc
DG
3354static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3355 unsigned int *num)
b90ebc3d
AM
3356{
3357 sector_t end;
3358 unsigned int mapped;
3359 unsigned long index;
3360 unsigned long next;
44d92694 3361
b90ebc3d 3362 index = lba_to_map_index(lba);
87c715dc 3363 mapped = test_bit(index, sip->map_storep);
44d92694
MP
3364
3365 if (mapped)
87c715dc 3366 next = find_next_zero_bit(sip->map_storep, map_size, index);
44d92694 3367 else
87c715dc 3368 next = find_next_bit(sip->map_storep, map_size, index);
44d92694 3369
b90ebc3d 3370 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
44d92694 3371 *num = end - lba;
44d92694
MP
3372 return mapped;
3373}
3374
87c715dc
DG
3375static void map_region(struct sdeb_store_info *sip, sector_t lba,
3376 unsigned int len)
44d92694 3377{
44d92694
MP
3378 sector_t end = lba + len;
3379
44d92694 3380 while (lba < end) {
b90ebc3d 3381 unsigned long index = lba_to_map_index(lba);
44d92694 3382
b90ebc3d 3383 if (index < map_size)
87c715dc 3384 set_bit(index, sip->map_storep);
44d92694 3385
b90ebc3d 3386 lba = map_index_to_lba(index + 1);
44d92694
MP
3387 }
3388}
3389
87c715dc
DG
3390static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3391 unsigned int len)
44d92694 3392{
44d92694 3393 sector_t end = lba + len;
87c715dc 3394 u8 *fsp = sip->storep;
44d92694 3395
44d92694 3396 while (lba < end) {
b90ebc3d 3397 unsigned long index = lba_to_map_index(lba);
44d92694 3398
b90ebc3d 3399 if (lba == map_index_to_lba(index) &&
773642d9 3400 lba + sdebug_unmap_granularity <= end &&
b90ebc3d 3401 index < map_size) {
87c715dc 3402 clear_bit(index, sip->map_storep);
760f3b03 3403 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
87c715dc 3404 memset(fsp + lba * sdebug_sector_size,
760f3b03 3405 (sdebug_lbprz & 1) ? 0 : 0xff,
773642d9
DG
3406 sdebug_sector_size *
3407 sdebug_unmap_granularity);
b90ebc3d 3408 }
87c715dc
DG
3409 if (sip->dif_storep) {
3410 memset(sip->dif_storep + lba, 0xff,
3411 sizeof(*sip->dif_storep) *
773642d9 3412 sdebug_unmap_granularity);
e9926b43 3413 }
be1dd78d 3414 }
b90ebc3d 3415 lba = map_index_to_lba(index + 1);
44d92694
MP
3416 }
3417}
3418
fd32119b 3419static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1da177e4 3420{
87c715dc 3421 bool check_prot;
c2248fc9
DG
3422 u32 num;
3423 u32 ei_lba;
19789100 3424 int ret;
87c715dc 3425 u64 lba;
b6ff8ca7
DG
3426 struct sdeb_store_info *sip = devip2sip(devip, true);
3427 rwlock_t *macc_lckp = &sip->macc_lck;
87c715dc 3428 u8 *cmd = scp->cmnd;
1da177e4 3429
c2248fc9
DG
3430 switch (cmd[0]) {
3431 case WRITE_16:
3432 ei_lba = 0;
3433 lba = get_unaligned_be64(cmd + 2);
3434 num = get_unaligned_be32(cmd + 10);
3435 check_prot = true;
3436 break;
3437 case WRITE_10:
3438 ei_lba = 0;
3439 lba = get_unaligned_be32(cmd + 2);
3440 num = get_unaligned_be16(cmd + 7);
3441 check_prot = true;
3442 break;
3443 case WRITE_6:
3444 ei_lba = 0;
3445 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3446 (u32)(cmd[1] & 0x1f) << 16;
3447 num = (0 == cmd[4]) ? 256 : cmd[4];
3448 check_prot = true;
3449 break;
3450 case WRITE_12:
3451 ei_lba = 0;
3452 lba = get_unaligned_be32(cmd + 2);
3453 num = get_unaligned_be32(cmd + 6);
3454 check_prot = true;
3455 break;
3456 case 0x53: /* XDWRITEREAD(10) */
3457 ei_lba = 0;
3458 lba = get_unaligned_be32(cmd + 2);
3459 num = get_unaligned_be16(cmd + 7);
3460 check_prot = false;
3461 break;
3462 default: /* assume WRITE(32) */
3463 lba = get_unaligned_be64(cmd + 12);
3464 ei_lba = get_unaligned_be32(cmd + 20);
3465 num = get_unaligned_be32(cmd + 28);
3466 check_prot = false;
3467 break;
3468 }
f46eb0e9 3469 if (unlikely(have_dif_prot && check_prot)) {
8475c811 3470 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
c2248fc9
DG
3471 (cmd[1] & 0xe0)) {
3472 mk_sense_invalid_opcode(scp);
3473 return check_condition_result;
3474 }
8475c811
CH
3475 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3476 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
c2248fc9
DG
3477 (cmd[1] & 0xe0) == 0)
3478 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3479 "to DIF device\n");
3480 }
f0d1cf93
DG
3481
3482 write_lock(macc_lckp);
9447b6ce 3483 ret = check_device_access_params(scp, lba, num, true);
f0d1cf93
DG
3484 if (ret) {
3485 write_unlock(macc_lckp);
9447b6ce 3486 return ret;
f0d1cf93 3487 }
6c78cc06 3488
c6a44287 3489 /* DIX + T10 DIF */
f46eb0e9 3490 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
f7be6772
MP
3491 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3492 case 1: /* Guard tag error */
3493 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3494 write_unlock(macc_lckp);
3495 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3496 return illegal_condition_result;
3497 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3498 write_unlock(macc_lckp);
3499 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3500 return check_condition_result;
3501 }
3502 break;
3503 case 3: /* Reference tag error */
3504 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3505 write_unlock(macc_lckp);
3506 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3507 return illegal_condition_result;
3508 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3509 write_unlock(macc_lckp);
3510 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3511 return check_condition_result;
3512 }
3513 break;
c6a44287
MP
3514 }
3515 }
3516
87c715dc 3517 ret = do_device_access(sip, scp, 0, lba, num, true);
f46eb0e9 3518 if (unlikely(scsi_debug_lbp()))
87c715dc 3519 map_region(sip, lba, num);
f0d1cf93
DG
3520 /* If ZBC zone then bump its write pointer */
3521 if (sdebug_dev_is_zoned(devip))
3522 zbc_inc_wp(devip, lba, num);
67da413f 3523 write_unlock(macc_lckp);
f46eb0e9 3524 if (unlikely(-1 == ret))
773642d9 3525 return DID_ERROR << 16;
c4837394
DG
3526 else if (unlikely(sdebug_verbose &&
3527 (ret < (num * sdebug_sector_size))))
c2248fc9 3528 sdev_printk(KERN_INFO, scp->device,
cbf67842 3529 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
773642d9 3530 my_name, num * sdebug_sector_size, ret);
44d92694 3531
3a90a63d
DG
3532 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3533 atomic_read(&sdeb_inject_pending))) {
3534 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3535 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3536 atomic_set(&sdeb_inject_pending, 0);
3537 return check_condition_result;
3538 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3539 /* Logical block guard check failed */
3540 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3541 atomic_set(&sdeb_inject_pending, 0);
3542 return illegal_condition_result;
3543 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3544 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3545 atomic_set(&sdeb_inject_pending, 0);
3546 return illegal_condition_result;
c2248fc9
DG
3547 }
3548 }
44d92694
MP
3549 return 0;
3550}
3551
481b5e5c
DG
3552/*
3553 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3554 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3555 */
3556static int resp_write_scat(struct scsi_cmnd *scp,
3557 struct sdebug_dev_info *devip)
3558{
3559 u8 *cmd = scp->cmnd;
3560 u8 *lrdp = NULL;
3561 u8 *up;
b6ff8ca7
DG
3562 struct sdeb_store_info *sip = devip2sip(devip, true);
3563 rwlock_t *macc_lckp = &sip->macc_lck;
481b5e5c
DG
3564 u8 wrprotect;
3565 u16 lbdof, num_lrd, k;
3566 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3567 u32 lb_size = sdebug_sector_size;
3568 u32 ei_lba;
3569 u64 lba;
481b5e5c
DG
3570 int ret, res;
3571 bool is_16;
3572 static const u32 lrd_size = 32; /* + parameter list header size */
3573
3574 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3575 is_16 = false;
3576 wrprotect = (cmd[10] >> 5) & 0x7;
3577 lbdof = get_unaligned_be16(cmd + 12);
3578 num_lrd = get_unaligned_be16(cmd + 16);
3579 bt_len = get_unaligned_be32(cmd + 28);
3580 } else { /* that leaves WRITE SCATTERED(16) */
3581 is_16 = true;
3582 wrprotect = (cmd[2] >> 5) & 0x7;
3583 lbdof = get_unaligned_be16(cmd + 4);
3584 num_lrd = get_unaligned_be16(cmd + 8);
3585 bt_len = get_unaligned_be32(cmd + 10);
3586 if (unlikely(have_dif_prot)) {
3587 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3588 wrprotect) {
3589 mk_sense_invalid_opcode(scp);
3590 return illegal_condition_result;
3591 }
3592 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3593 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3594 wrprotect == 0)
3595 sdev_printk(KERN_ERR, scp->device,
3596 "Unprotected WR to DIF device\n");
3597 }
3598 }
3599 if ((num_lrd == 0) || (bt_len == 0))
3600 return 0; /* T10 says these do-nothings are not errors */
3601 if (lbdof == 0) {
3602 if (sdebug_verbose)
3603 sdev_printk(KERN_INFO, scp->device,
3604 "%s: %s: LB Data Offset field bad\n",
3605 my_name, __func__);
3606 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3607 return illegal_condition_result;
3608 }
3609 lbdof_blen = lbdof * lb_size;
3610 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3611 if (sdebug_verbose)
3612 sdev_printk(KERN_INFO, scp->device,
3613 "%s: %s: LBA range descriptors don't fit\n",
3614 my_name, __func__);
3615 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3616 return illegal_condition_result;
3617 }
3618 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3619 if (lrdp == NULL)
3620 return SCSI_MLQUEUE_HOST_BUSY;
3621 if (sdebug_verbose)
3622 sdev_printk(KERN_INFO, scp->device,
3623 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3624 my_name, __func__, lbdof_blen);
3625 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3626 if (res == -1) {
3627 ret = DID_ERROR << 16;
3628 goto err_out;
3629 }
3630
67da413f 3631 write_lock(macc_lckp);
481b5e5c
DG
3632 sg_off = lbdof_blen;
3633 /* Spec says Buffer xfer Length field in number of LBs in dout */
3634 cum_lb = 0;
3635 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3636 lba = get_unaligned_be64(up + 0);
3637 num = get_unaligned_be32(up + 8);
3638 if (sdebug_verbose)
3639 sdev_printk(KERN_INFO, scp->device,
3640 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3641 my_name, __func__, k, lba, num, sg_off);
3642 if (num == 0)
3643 continue;
9447b6ce 3644 ret = check_device_access_params(scp, lba, num, true);
481b5e5c
DG
3645 if (ret)
3646 goto err_out_unlock;
3647 num_by = num * lb_size;
3648 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3649
3650 if ((cum_lb + num) > bt_len) {
3651 if (sdebug_verbose)
3652 sdev_printk(KERN_INFO, scp->device,
3653 "%s: %s: sum of blocks > data provided\n",
3654 my_name, __func__);
3655 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3656 0);
3657 ret = illegal_condition_result;
3658 goto err_out_unlock;
3659 }
3660
3661 /* DIX + T10 DIF */
3662 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3663 int prot_ret = prot_verify_write(scp, lba, num,
3664 ei_lba);
3665
3666 if (prot_ret) {
3667 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3668 prot_ret);
3669 ret = illegal_condition_result;
3670 goto err_out_unlock;
3671 }
3672 }
3673
87c715dc 3674 ret = do_device_access(sip, scp, sg_off, lba, num, true);
f0d1cf93
DG
3675 /* If ZBC zone then bump its write pointer */
3676 if (sdebug_dev_is_zoned(devip))
3677 zbc_inc_wp(devip, lba, num);
481b5e5c 3678 if (unlikely(scsi_debug_lbp()))
87c715dc 3679 map_region(sip, lba, num);
481b5e5c
DG
3680 if (unlikely(-1 == ret)) {
3681 ret = DID_ERROR << 16;
3682 goto err_out_unlock;
3683 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3684 sdev_printk(KERN_INFO, scp->device,
3685 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3686 my_name, num_by, ret);
3687
3a90a63d
DG
3688 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3689 atomic_read(&sdeb_inject_pending))) {
3690 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3691 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3692 atomic_set(&sdeb_inject_pending, 0);
3693 ret = check_condition_result;
3694 goto err_out_unlock;
3695 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3696 /* Logical block guard check failed */
3697 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3698 atomic_set(&sdeb_inject_pending, 0);
3699 ret = illegal_condition_result;
3700 goto err_out_unlock;
3701 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3702 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3703 atomic_set(&sdeb_inject_pending, 0);
3704 ret = illegal_condition_result;
3705 goto err_out_unlock;
481b5e5c
DG
3706 }
3707 }
3708 sg_off += num_by;
3709 cum_lb += num;
3710 }
3711 ret = 0;
3712err_out_unlock:
67da413f 3713 write_unlock(macc_lckp);
481b5e5c
DG
3714err_out:
3715 kfree(lrdp);
3716 return ret;
3717}
3718
fd32119b
DG
3719static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3720 u32 ei_lba, bool unmap, bool ndob)
44d92694 3721{
f0d1cf93
DG
3722 struct scsi_device *sdp = scp->device;
3723 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
44d92694 3724 unsigned long long i;
40d07b52 3725 u64 block, lbaa;
87c715dc
DG
3726 u32 lb_size = sdebug_sector_size;
3727 int ret;
3728 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
b6ff8ca7
DG
3729 scp->device->hostdata, true);
3730 rwlock_t *macc_lckp = &sip->macc_lck;
40d07b52 3731 u8 *fs1p;
87c715dc 3732 u8 *fsp;
44d92694 3733
f0d1cf93
DG
3734 write_lock(macc_lckp);
3735
9447b6ce 3736 ret = check_device_access_params(scp, lba, num, true);
f0d1cf93
DG
3737 if (ret) {
3738 write_unlock(macc_lckp);
44d92694 3739 return ret;
f0d1cf93 3740 }
44d92694 3741
9ed8d3dc 3742 if (unmap && scsi_debug_lbp()) {
87c715dc 3743 unmap_region(sip, lba, num);
44d92694
MP
3744 goto out;
3745 }
40d07b52
DG
3746 lbaa = lba;
3747 block = do_div(lbaa, sdebug_store_sectors);
c2248fc9 3748 /* if ndob then zero 1 logical block, else fetch 1 logical block */
87c715dc
DG
3749 fsp = sip->storep;
3750 fs1p = fsp + (block * lb_size);
c2248fc9 3751 if (ndob) {
40d07b52 3752 memset(fs1p, 0, lb_size);
c2248fc9
DG
3753 ret = 0;
3754 } else
40d07b52 3755 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
44d92694
MP
3756
3757 if (-1 == ret) {
67da413f 3758 write_unlock(&sip->macc_lck);
773642d9 3759 return DID_ERROR << 16;
40d07b52 3760 } else if (sdebug_verbose && !ndob && (ret < lb_size))
c2248fc9 3761 sdev_printk(KERN_INFO, scp->device,
e33d7c56 3762 "%s: %s: lb size=%u, IO sent=%d bytes\n",
40d07b52 3763 my_name, "write same", lb_size, ret);
44d92694
MP
3764
3765 /* Copy first sector to remaining blocks */
40d07b52
DG
3766 for (i = 1 ; i < num ; i++) {
3767 lbaa = lba + i;
3768 block = do_div(lbaa, sdebug_store_sectors);
87c715dc 3769 memmove(fsp + (block * lb_size), fs1p, lb_size);
40d07b52 3770 }
9ed8d3dc 3771 if (scsi_debug_lbp())
87c715dc 3772 map_region(sip, lba, num);
f0d1cf93
DG
3773 /* If ZBC zone then bump its write pointer */
3774 if (sdebug_dev_is_zoned(devip))
3775 zbc_inc_wp(devip, lba, num);
44d92694 3776out:
67da413f 3777 write_unlock(macc_lckp);
44d92694 3778
1da177e4
LT
3779 return 0;
3780}
3781
fd32119b
DG
3782static int resp_write_same_10(struct scsi_cmnd *scp,
3783 struct sdebug_dev_info *devip)
c2248fc9
DG
3784{
3785 u8 *cmd = scp->cmnd;
3786 u32 lba;
3787 u16 num;
3788 u32 ei_lba = 0;
3789 bool unmap = false;
3790
3791 if (cmd[1] & 0x8) {
773642d9 3792 if (sdebug_lbpws10 == 0) {
c2248fc9
DG
3793 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3794 return check_condition_result;
3795 } else
3796 unmap = true;
3797 }
3798 lba = get_unaligned_be32(cmd + 2);
3799 num = get_unaligned_be16(cmd + 7);
773642d9 3800 if (num > sdebug_write_same_length) {
c2248fc9
DG
3801 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3802 return check_condition_result;
3803 }
3804 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3805}
3806
fd32119b
DG
3807static int resp_write_same_16(struct scsi_cmnd *scp,
3808 struct sdebug_dev_info *devip)
c2248fc9
DG
3809{
3810 u8 *cmd = scp->cmnd;
3811 u64 lba;
3812 u32 num;
3813 u32 ei_lba = 0;
3814 bool unmap = false;
3815 bool ndob = false;
3816
3817 if (cmd[1] & 0x8) { /* UNMAP */
773642d9 3818 if (sdebug_lbpws == 0) {
c2248fc9
DG
3819 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3820 return check_condition_result;
3821 } else
3822 unmap = true;
3823 }
3824 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3825 ndob = true;
3826 lba = get_unaligned_be64(cmd + 2);
3827 num = get_unaligned_be32(cmd + 10);
773642d9 3828 if (num > sdebug_write_same_length) {
c2248fc9
DG
3829 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3830 return check_condition_result;
3831 }
3832 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3833}
3834
acafd0b9
EM
3835/* Note the mode field is in the same position as the (lower) service action
3836 * field. For the Report supported operation codes command, SPC-4 suggests
3837 * each mode of this command should be reported separately; for future. */
fd32119b
DG
3838static int resp_write_buffer(struct scsi_cmnd *scp,
3839 struct sdebug_dev_info *devip)
acafd0b9
EM
3840{
3841 u8 *cmd = scp->cmnd;
3842 struct scsi_device *sdp = scp->device;
3843 struct sdebug_dev_info *dp;
3844 u8 mode;
3845
3846 mode = cmd[1] & 0x1f;
3847 switch (mode) {
3848 case 0x4: /* download microcode (MC) and activate (ACT) */
3849 /* set UAs on this device only */
3850 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3851 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3852 break;
3853 case 0x5: /* download MC, save and ACT */
3854 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3855 break;
3856 case 0x6: /* download MC with offsets and ACT */
3857 /* set UAs on most devices (LUs) in this target */
3858 list_for_each_entry(dp,
3859 &devip->sdbg_host->dev_info_list,
3860 dev_list)
3861 if (dp->target == sdp->id) {
3862 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3863 if (devip != dp)
3864 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3865 dp->uas_bm);
3866 }
3867 break;
3868 case 0x7: /* download MC with offsets, save, and ACT */
3869 /* set UA on all devices (LUs) in this target */
3870 list_for_each_entry(dp,
3871 &devip->sdbg_host->dev_info_list,
3872 dev_list)
3873 if (dp->target == sdp->id)
3874 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3875 dp->uas_bm);
3876 break;
3877 default:
3878 /* do nothing for this command for other mode values */
3879 break;
3880 }
3881 return 0;
3882}
3883
fd32119b
DG
3884static int resp_comp_write(struct scsi_cmnd *scp,
3885 struct sdebug_dev_info *devip)
38d5c833
DG
3886{
3887 u8 *cmd = scp->cmnd;
3888 u8 *arr;
b6ff8ca7
DG
3889 struct sdeb_store_info *sip = devip2sip(devip, true);
3890 rwlock_t *macc_lckp = &sip->macc_lck;
38d5c833
DG
3891 u64 lba;
3892 u32 dnum;
773642d9 3893 u32 lb_size = sdebug_sector_size;
38d5c833 3894 u8 num;
38d5c833 3895 int ret;
d467d31f 3896 int retval = 0;
38d5c833 3897
d467d31f 3898 lba = get_unaligned_be64(cmd + 2);
38d5c833
DG
3899 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3900 if (0 == num)
3901 return 0; /* degenerate case, not an error */
8475c811 3902 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
38d5c833
DG
3903 (cmd[1] & 0xe0)) {
3904 mk_sense_invalid_opcode(scp);
3905 return check_condition_result;
3906 }
8475c811
CH
3907 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3908 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
38d5c833
DG
3909 (cmd[1] & 0xe0) == 0)
3910 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3911 "to DIF device\n");
9447b6ce
MP
3912 ret = check_device_access_params(scp, lba, num, false);
3913 if (ret)
3914 return ret;
d467d31f 3915 dnum = 2 * num;
6396bb22 3916 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
d467d31f
DG
3917 if (NULL == arr) {
3918 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3919 INSUFF_RES_ASCQ);
3920 return check_condition_result;
3921 }
38d5c833 3922
67da413f 3923 write_lock(macc_lckp);
38d5c833 3924
87c715dc 3925 ret = do_dout_fetch(scp, dnum, arr);
38d5c833 3926 if (ret == -1) {
d467d31f
DG
3927 retval = DID_ERROR << 16;
3928 goto cleanup;
773642d9 3929 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
38d5c833
DG
3930 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3931 "indicated=%u, IO sent=%d bytes\n", my_name,
3932 dnum * lb_size, ret);
c3e2fe92 3933 if (!comp_write_worker(sip, lba, num, arr, false)) {
38d5c833 3934 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
d467d31f
DG
3935 retval = check_condition_result;
3936 goto cleanup;
38d5c833
DG
3937 }
3938 if (scsi_debug_lbp())
87c715dc 3939 map_region(sip, lba, num);
d467d31f 3940cleanup:
67da413f 3941 write_unlock(macc_lckp);
d467d31f
DG
3942 kfree(arr);
3943 return retval;
38d5c833
DG
3944}
3945
44d92694
MP
3946struct unmap_block_desc {
3947 __be64 lba;
3948 __be32 blocks;
3949 __be32 __reserved;
3950};
3951
fd32119b 3952static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
44d92694
MP
3953{
3954 unsigned char *buf;
3955 struct unmap_block_desc *desc;
b6ff8ca7
DG
3956 struct sdeb_store_info *sip = devip2sip(devip, true);
3957 rwlock_t *macc_lckp = &sip->macc_lck;
44d92694
MP
3958 unsigned int i, payload_len, descriptors;
3959 int ret;
44d92694 3960
c2248fc9
DG
3961 if (!scsi_debug_lbp())
3962 return 0; /* fib and say its done */
3963 payload_len = get_unaligned_be16(scp->cmnd + 7);
3964 BUG_ON(scsi_bufflen(scp) != payload_len);
44d92694
MP
3965
3966 descriptors = (payload_len - 8) / 16;
773642d9 3967 if (descriptors > sdebug_unmap_max_desc) {
c2248fc9
DG
3968 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3969 return check_condition_result;
3970 }
44d92694 3971
b333a819 3972 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
c2248fc9
DG
3973 if (!buf) {
3974 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3975 INSUFF_RES_ASCQ);
44d92694 3976 return check_condition_result;
c2248fc9 3977 }
44d92694 3978
c2248fc9 3979 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
44d92694
MP
3980
3981 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3982 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3983
3984 desc = (void *)&buf[8];
3985
67da413f 3986 write_lock(macc_lckp);
6c78cc06 3987
44d92694
MP
3988 for (i = 0 ; i < descriptors ; i++) {
3989 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3990 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3991
9447b6ce 3992 ret = check_device_access_params(scp, lba, num, true);
44d92694
MP
3993 if (ret)
3994 goto out;
3995
87c715dc 3996 unmap_region(sip, lba, num);
44d92694
MP
3997 }
3998
3999 ret = 0;
4000
4001out:
67da413f 4002 write_unlock(macc_lckp);
44d92694
MP
4003 kfree(buf);
4004
4005 return ret;
4006}
4007
4008#define SDEBUG_GET_LBA_STATUS_LEN 32
4009
fd32119b
DG
4010static int resp_get_lba_status(struct scsi_cmnd *scp,
4011 struct sdebug_dev_info *devip)
44d92694 4012{
c2248fc9
DG
4013 u8 *cmd = scp->cmnd;
4014 u64 lba;
4015 u32 alloc_len, mapped, num;
44d92694 4016 int ret;
87c715dc 4017 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
44d92694 4018
c2248fc9
DG
4019 lba = get_unaligned_be64(cmd + 2);
4020 alloc_len = get_unaligned_be32(cmd + 10);
44d92694
MP
4021
4022 if (alloc_len < 24)
4023 return 0;
4024
9447b6ce 4025 ret = check_device_access_params(scp, lba, 1, false);
44d92694
MP
4026 if (ret)
4027 return ret;
4028
b6ff8ca7
DG
4029 if (scsi_debug_lbp()) {
4030 struct sdeb_store_info *sip = devip2sip(devip, true);
4031
87c715dc 4032 mapped = map_state(sip, lba, &num);
b6ff8ca7 4033 } else {
c2248fc9
DG
4034 mapped = 1;
4035 /* following just in case virtual_gb changed */
4036 sdebug_capacity = get_sdebug_capacity();
4037 if (sdebug_capacity - lba <= 0xffffffff)
4038 num = sdebug_capacity - lba;
4039 else
4040 num = 0xffffffff;
4041 }
44d92694
MP
4042
4043 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
c2248fc9
DG
4044 put_unaligned_be32(20, arr); /* Parameter Data Length */
4045 put_unaligned_be64(lba, arr + 8); /* LBA */
4046 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4047 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
44d92694 4048
c2248fc9 4049 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
44d92694
MP
4050}
4051
80c49563
DG
4052static int resp_sync_cache(struct scsi_cmnd *scp,
4053 struct sdebug_dev_info *devip)
4054{
4f2c8bf6 4055 int res = 0;
80c49563
DG
4056 u64 lba;
4057 u32 num_blocks;
4058 u8 *cmd = scp->cmnd;
4059
4060 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4061 lba = get_unaligned_be32(cmd + 2);
4062 num_blocks = get_unaligned_be16(cmd + 7);
4063 } else { /* SYNCHRONIZE_CACHE(16) */
4064 lba = get_unaligned_be64(cmd + 2);
4065 num_blocks = get_unaligned_be32(cmd + 10);
4066 }
4067 if (lba + num_blocks > sdebug_capacity) {
4068 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4069 return check_condition_result;
4070 }
fc13638a 4071 if (!write_since_sync || (cmd[1] & 0x2))
4f2c8bf6
DG
4072 res = SDEG_RES_IMMED_MASK;
4073 else /* delay if write_since_sync and IMMED clear */
4074 write_since_sync = false;
4075 return res;
80c49563
DG
4076}
4077
ed9f3e25
DG
4078/*
4079 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4080 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4081 * a GOOD status otherwise. Model a disk with a big cache and yield
4082 * CONDITION MET. Actually tries to bring range in main memory into the
4083 * cache associated with the CPU(s).
4084 */
4085static int resp_pre_fetch(struct scsi_cmnd *scp,
4086 struct sdebug_dev_info *devip)
4087{
4088 int res = 0;
4089 u64 lba;
4090 u64 block, rest = 0;
4091 u32 nblks;
4092 u8 *cmd = scp->cmnd;
b6ff8ca7
DG
4093 struct sdeb_store_info *sip = devip2sip(devip, true);
4094 rwlock_t *macc_lckp = &sip->macc_lck;
4095 u8 *fsp = sip->storep;
ed9f3e25
DG
4096
4097 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4098 lba = get_unaligned_be32(cmd + 2);
4099 nblks = get_unaligned_be16(cmd + 7);
4100 } else { /* PRE-FETCH(16) */
4101 lba = get_unaligned_be64(cmd + 2);
4102 nblks = get_unaligned_be32(cmd + 10);
4103 }
4104 if (lba + nblks > sdebug_capacity) {
4105 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4106 return check_condition_result;
4107 }
4108 if (!fsp)
4109 goto fini;
4110 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4111 block = do_div(lba, sdebug_store_sectors);
4112 if (block + nblks > sdebug_store_sectors)
4113 rest = block + nblks - sdebug_store_sectors;
4114
4115 /* Try to bring the PRE-FETCH range into CPU's cache */
4116 read_lock(macc_lckp);
4117 prefetch_range(fsp + (sdebug_sector_size * block),
4118 (nblks - rest) * sdebug_sector_size);
4119 if (rest)
4120 prefetch_range(fsp, rest * sdebug_sector_size);
4121 read_unlock(macc_lckp);
4122fini:
4123 if (cmd[1] & 0x2)
4124 res = SDEG_RES_IMMED_MASK;
4125 return res | condition_met_result;
4126}
4127
fb0cc8d1
DG
4128#define RL_BUCKET_ELEMS 8
4129
8d039e22
DG
4130/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4131 * (W-LUN), the normal Linux scanning logic does not associate it with a
4132 * device (e.g. /dev/sg7). The following magic will make that association:
4133 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4134 * where <n> is a host number. If there are multiple targets in a host then
4135 * the above will associate a W-LUN to each target. To only get a W-LUN
4136 * for target 2, then use "echo '- 2 49409' > scan" .
4137 */
4138static int resp_report_luns(struct scsi_cmnd *scp,
4139 struct sdebug_dev_info *devip)
1da177e4 4140{
8d039e22 4141 unsigned char *cmd = scp->cmnd;
1da177e4 4142 unsigned int alloc_len;
8d039e22 4143 unsigned char select_report;
22017ed2 4144 u64 lun;
8d039e22 4145 struct scsi_lun *lun_p;
fb0cc8d1 4146 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
8d039e22
DG
4147 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4148 unsigned int wlun_cnt; /* report luns W-LUN count */
4149 unsigned int tlun_cnt; /* total LUN count */
4150 unsigned int rlen; /* response length (in bytes) */
fb0cc8d1
DG
4151 int k, j, n, res;
4152 unsigned int off_rsp = 0;
4153 const int sz_lun = sizeof(struct scsi_lun);
1da177e4 4154
19c8ead7 4155 clear_luns_changed_on_target(devip);
8d039e22
DG
4156
4157 select_report = cmd[2];
4158 alloc_len = get_unaligned_be32(cmd + 6);
4159
4160 if (alloc_len < 4) {
4161 pr_err("alloc len too small %d\n", alloc_len);
4162 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1da177e4
LT
4163 return check_condition_result;
4164 }
8d039e22
DG
4165
4166 switch (select_report) {
4167 case 0: /* all LUNs apart from W-LUNs */
4168 lun_cnt = sdebug_max_luns;
4169 wlun_cnt = 0;
4170 break;
4171 case 1: /* only W-LUNs */
c65b1445 4172 lun_cnt = 0;
8d039e22
DG
4173 wlun_cnt = 1;
4174 break;
4175 case 2: /* all LUNs */
4176 lun_cnt = sdebug_max_luns;
4177 wlun_cnt = 1;
4178 break;
4179 case 0x10: /* only administrative LUs */
4180 case 0x11: /* see SPC-5 */
4181 case 0x12: /* only subsiduary LUs owned by referenced LU */
4182 default:
4183 pr_debug("select report invalid %d\n", select_report);
4184 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4185 return check_condition_result;
4186 }
4187
4188 if (sdebug_no_lun_0 && (lun_cnt > 0))
c65b1445 4189 --lun_cnt;
8d039e22
DG
4190
4191 tlun_cnt = lun_cnt + wlun_cnt;
fb0cc8d1
DG
4192 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4193 scsi_set_resid(scp, scsi_bufflen(scp));
8d039e22
DG
4194 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4195 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4196
fb0cc8d1 4197 /* loops rely on sizeof response header same as sizeof lun (both 8) */
8d039e22 4198 lun = sdebug_no_lun_0 ? 1 : 0;
fb0cc8d1
DG
4199 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4200 memset(arr, 0, sizeof(arr));
4201 lun_p = (struct scsi_lun *)&arr[0];
4202 if (k == 0) {
4203 put_unaligned_be32(rlen, &arr[0]);
4204 ++lun_p;
4205 j = 1;
4206 }
4207 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4208 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4209 break;
4210 int_to_scsilun(lun++, lun_p);
ad0c7775
DG
4211 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4212 lun_p->scsi_lun[0] |= 0x40;
fb0cc8d1
DG
4213 }
4214 if (j < RL_BUCKET_ELEMS)
4215 break;
4216 n = j * sz_lun;
4217 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4218 if (res)
4219 return res;
4220 off_rsp += n;
4221 }
4222 if (wlun_cnt) {
4223 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4224 ++j;
4225 }
4226 if (j > 0)
4227 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
8d039e22 4228 return res;
1da177e4
LT
4229}
4230
c3e2fe92
DG
4231static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4232{
4233 bool is_bytchk3 = false;
4234 u8 bytchk;
4235 int ret, j;
4236 u32 vnum, a_num, off;
4237 const u32 lb_size = sdebug_sector_size;
c3e2fe92
DG
4238 u64 lba;
4239 u8 *arr;
4240 u8 *cmd = scp->cmnd;
b6ff8ca7
DG
4241 struct sdeb_store_info *sip = devip2sip(devip, true);
4242 rwlock_t *macc_lckp = &sip->macc_lck;
c3e2fe92
DG
4243
4244 bytchk = (cmd[1] >> 1) & 0x3;
4245 if (bytchk == 0) {
4246 return 0; /* always claim internal verify okay */
4247 } else if (bytchk == 2) {
4248 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4249 return check_condition_result;
4250 } else if (bytchk == 3) {
4251 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4252 }
4253 switch (cmd[0]) {
4254 case VERIFY_16:
4255 lba = get_unaligned_be64(cmd + 2);
4256 vnum = get_unaligned_be32(cmd + 10);
4257 break;
4258 case VERIFY: /* is VERIFY(10) */
4259 lba = get_unaligned_be32(cmd + 2);
4260 vnum = get_unaligned_be16(cmd + 7);
4261 break;
4262 default:
4263 mk_sense_invalid_opcode(scp);
4264 return check_condition_result;
4265 }
fc6c6cd3
GK
4266 if (vnum == 0)
4267 return 0; /* not an error */
c3e2fe92
DG
4268 a_num = is_bytchk3 ? 1 : vnum;
4269 /* Treat following check like one for read (i.e. no write) access */
4270 ret = check_device_access_params(scp, lba, a_num, false);
4271 if (ret)
4272 return ret;
4273
4274 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4275 if (!arr) {
4276 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4277 INSUFF_RES_ASCQ);
4278 return check_condition_result;
4279 }
4280 /* Not changing store, so only need read access */
67da413f 4281 read_lock(macc_lckp);
c3e2fe92
DG
4282
4283 ret = do_dout_fetch(scp, a_num, arr);
4284 if (ret == -1) {
4285 ret = DID_ERROR << 16;
4286 goto cleanup;
4287 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4288 sdev_printk(KERN_INFO, scp->device,
4289 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4290 my_name, __func__, a_num * lb_size, ret);
4291 }
4292 if (is_bytchk3) {
4293 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4294 memcpy(arr + off, arr, lb_size);
4295 }
4296 ret = 0;
4297 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4298 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4299 ret = check_condition_result;
4300 goto cleanup;
4301 }
4302cleanup:
67da413f 4303 read_unlock(macc_lckp);
c3e2fe92
DG
4304 kfree(arr);
4305 return ret;
4306}
4307
f0d1cf93
DG
4308#define RZONES_DESC_HD 64
4309
4310/* Report zones depending on start LBA nad reporting options */
4311static int resp_report_zones(struct scsi_cmnd *scp,
4312 struct sdebug_dev_info *devip)
4313{
4314 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4315 int ret = 0;
4316 u32 alloc_len, rep_opts, rep_len;
4317 bool partial;
4318 u64 lba, zs_lba;
4319 u8 *arr = NULL, *desc;
4320 u8 *cmd = scp->cmnd;
4321 struct sdeb_zone_state *zsp;
b6ff8ca7 4322 struct sdeb_store_info *sip = devip2sip(devip, false);
f0d1cf93
DG
4323 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4324
4325 if (!sdebug_dev_is_zoned(devip)) {
4326 mk_sense_invalid_opcode(scp);
4327 return check_condition_result;
4328 }
4329 zs_lba = get_unaligned_be64(cmd + 2);
4330 alloc_len = get_unaligned_be32(cmd + 10);
fc6c6cd3
GK
4331 if (alloc_len == 0)
4332 return 0; /* not an error */
f0d1cf93
DG
4333 rep_opts = cmd[14] & 0x3f;
4334 partial = cmd[14] & 0x80;
4335
4336 if (zs_lba >= sdebug_capacity) {
4337 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4338 return check_condition_result;
4339 }
4340
108e36f0 4341 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
f0d1cf93
DG
4342 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4343 max_zones);
4344
62488126 4345 arr = kzalloc(alloc_len, GFP_ATOMIC);
f0d1cf93
DG
4346 if (!arr) {
4347 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4348 INSUFF_RES_ASCQ);
4349 return check_condition_result;
4350 }
4351
4352 read_lock(macc_lckp);
4353
4354 desc = arr + 64;
4355 for (i = 0; i < max_zones; i++) {
4356 lba = zs_lba + devip->zsize * i;
4357 if (lba > sdebug_capacity)
4358 break;
4359 zsp = zbc_zone(devip, lba);
4360 switch (rep_opts) {
4361 case 0x00:
4362 /* All zones */
4363 break;
4364 case 0x01:
4365 /* Empty zones */
4366 if (zsp->z_cond != ZC1_EMPTY)
4367 continue;
4368 break;
4369 case 0x02:
4370 /* Implicit open zones */
4371 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4372 continue;
4373 break;
4374 case 0x03:
4375 /* Explicit open zones */
4376 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4377 continue;
4378 break;
4379 case 0x04:
4380 /* Closed zones */
4381 if (zsp->z_cond != ZC4_CLOSED)
4382 continue;
4383 break;
4384 case 0x05:
4385 /* Full zones */
4386 if (zsp->z_cond != ZC5_FULL)
4387 continue;
4388 break;
4389 case 0x06:
4390 case 0x07:
4391 case 0x10:
f0d1cf93 4392 /*
64e14ece
DLM
4393 * Read-only, offline, reset WP recommended are
4394 * not emulated: no zones to report;
f0d1cf93
DG
4395 */
4396 continue;
64e14ece
DLM
4397 case 0x11:
4398 /* non-seq-resource set */
4399 if (!zsp->z_non_seq_resource)
4400 continue;
4401 break;
f0d1cf93
DG
4402 case 0x3f:
4403 /* Not write pointer (conventional) zones */
4404 if (!zbc_zone_is_conv(zsp))
4405 continue;
4406 break;
4407 default:
4408 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4409 INVALID_FIELD_IN_CDB, 0);
4410 ret = check_condition_result;
4411 goto fini;
4412 }
4413
4414 if (nrz < rep_max_zones) {
4415 /* Fill zone descriptor */
64e14ece 4416 desc[0] = zsp->z_type;
f0d1cf93 4417 desc[1] = zsp->z_cond << 4;
64e14ece
DLM
4418 if (zsp->z_non_seq_resource)
4419 desc[1] |= 1 << 1;
f0d1cf93
DG
4420 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4421 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4422 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4423 desc += 64;
4424 }
4425
4426 if (partial && nrz >= rep_max_zones)
4427 break;
4428
4429 nrz++;
4430 }
4431
4432 /* Report header */
4433 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4434 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4435
4436 rep_len = (unsigned long)desc - (unsigned long)arr;
128ec190 4437 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
f0d1cf93
DG
4438
4439fini:
4440 read_unlock(macc_lckp);
4441 kfree(arr);
4442 return ret;
4443}
4444
4445/* Logic transplanted from tcmu-runner, file_zbc.c */
4446static void zbc_open_all(struct sdebug_dev_info *devip)
4447{
4448 struct sdeb_zone_state *zsp = &devip->zstate[0];
4449 unsigned int i;
4450
4451 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4452 if (zsp->z_cond == ZC4_CLOSED)
4453 zbc_open_zone(devip, &devip->zstate[i], true);
4454 }
4455}
4456
4457static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4458{
4459 int res = 0;
4460 u64 z_id;
4461 enum sdebug_z_cond zc;
4462 u8 *cmd = scp->cmnd;
4463 struct sdeb_zone_state *zsp;
4464 bool all = cmd[14] & 0x01;
b6ff8ca7 4465 struct sdeb_store_info *sip = devip2sip(devip, false);
f0d1cf93
DG
4466 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4467
4468 if (!sdebug_dev_is_zoned(devip)) {
4469 mk_sense_invalid_opcode(scp);
4470 return check_condition_result;
4471 }
4472
4473 write_lock(macc_lckp);
4474
4475 if (all) {
4476 /* Check if all closed zones can be open */
4477 if (devip->max_open &&
4478 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4479 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4480 INSUFF_ZONE_ASCQ);
4481 res = check_condition_result;
4482 goto fini;
4483 }
4484 /* Open all closed zones */
4485 zbc_open_all(devip);
4486 goto fini;
4487 }
4488
4489 /* Open the specified zone */
4490 z_id = get_unaligned_be64(cmd + 2);
4491 if (z_id >= sdebug_capacity) {
4492 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4493 res = check_condition_result;
4494 goto fini;
4495 }
4496
4497 zsp = zbc_zone(devip, z_id);
4498 if (z_id != zsp->z_start) {
4499 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4500 res = check_condition_result;
4501 goto fini;
4502 }
4503 if (zbc_zone_is_conv(zsp)) {
4504 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4505 res = check_condition_result;
4506 goto fini;
4507 }
4508
4509 zc = zsp->z_cond;
4510 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4511 goto fini;
4512
4513 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4514 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4515 INSUFF_ZONE_ASCQ);
4516 res = check_condition_result;
4517 goto fini;
4518 }
4519
f0d1cf93
DG
4520 zbc_open_zone(devip, zsp, true);
4521fini:
4522 write_unlock(macc_lckp);
4523 return res;
4524}
4525
4526static void zbc_close_all(struct sdebug_dev_info *devip)
4527{
4528 unsigned int i;
4529
4530 for (i = 0; i < devip->nr_zones; i++)
4531 zbc_close_zone(devip, &devip->zstate[i]);
4532}
4533
4534static int resp_close_zone(struct scsi_cmnd *scp,
4535 struct sdebug_dev_info *devip)
4536{
4537 int res = 0;
4538 u64 z_id;
4539 u8 *cmd = scp->cmnd;
4540 struct sdeb_zone_state *zsp;
4541 bool all = cmd[14] & 0x01;
b6ff8ca7 4542 struct sdeb_store_info *sip = devip2sip(devip, false);
f0d1cf93
DG
4543 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4544
4545 if (!sdebug_dev_is_zoned(devip)) {
4546 mk_sense_invalid_opcode(scp);
4547 return check_condition_result;
4548 }
4549
4550 write_lock(macc_lckp);
4551
4552 if (all) {
4553 zbc_close_all(devip);
4554 goto fini;
4555 }
4556
4557 /* Close specified zone */
4558 z_id = get_unaligned_be64(cmd + 2);
4559 if (z_id >= sdebug_capacity) {
4560 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4561 res = check_condition_result;
4562 goto fini;
4563 }
4564
4565 zsp = zbc_zone(devip, z_id);
4566 if (z_id != zsp->z_start) {
4567 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4568 res = check_condition_result;
4569 goto fini;
4570 }
4571 if (zbc_zone_is_conv(zsp)) {
4572 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4573 res = check_condition_result;
4574 goto fini;
4575 }
4576
4577 zbc_close_zone(devip, zsp);
4578fini:
4579 write_unlock(macc_lckp);
4580 return res;
4581}
4582
4583static void zbc_finish_zone(struct sdebug_dev_info *devip,
4584 struct sdeb_zone_state *zsp, bool empty)
4585{
4586 enum sdebug_z_cond zc = zsp->z_cond;
4587
4588 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4589 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4590 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4591 zbc_close_zone(devip, zsp);
4592 if (zsp->z_cond == ZC4_CLOSED)
4593 devip->nr_closed--;
4594 zsp->z_wp = zsp->z_start + zsp->z_size;
4595 zsp->z_cond = ZC5_FULL;
4596 }
4597}
4598
4599static void zbc_finish_all(struct sdebug_dev_info *devip)
4600{
4601 unsigned int i;
4602
4603 for (i = 0; i < devip->nr_zones; i++)
4604 zbc_finish_zone(devip, &devip->zstate[i], false);
4605}
4606
4607static int resp_finish_zone(struct scsi_cmnd *scp,
4608 struct sdebug_dev_info *devip)
4609{
4610 struct sdeb_zone_state *zsp;
4611 int res = 0;
4612 u64 z_id;
4613 u8 *cmd = scp->cmnd;
4614 bool all = cmd[14] & 0x01;
b6ff8ca7 4615 struct sdeb_store_info *sip = devip2sip(devip, false);
f0d1cf93
DG
4616 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4617
4618 if (!sdebug_dev_is_zoned(devip)) {
4619 mk_sense_invalid_opcode(scp);
4620 return check_condition_result;
4621 }
4622
4623 write_lock(macc_lckp);
4624
4625 if (all) {
4626 zbc_finish_all(devip);
4627 goto fini;
4628 }
4629
4630 /* Finish the specified zone */
4631 z_id = get_unaligned_be64(cmd + 2);
4632 if (z_id >= sdebug_capacity) {
4633 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4634 res = check_condition_result;
4635 goto fini;
4636 }
4637
4638 zsp = zbc_zone(devip, z_id);
4639 if (z_id != zsp->z_start) {
4640 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4641 res = check_condition_result;
4642 goto fini;
4643 }
4644 if (zbc_zone_is_conv(zsp)) {
4645 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4646 res = check_condition_result;
4647 goto fini;
4648 }
4649
4650 zbc_finish_zone(devip, zsp, true);
4651fini:
4652 write_unlock(macc_lckp);
4653 return res;
4654}
4655
4656static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4657 struct sdeb_zone_state *zsp)
4658{
4659 enum sdebug_z_cond zc;
0d684d88 4660 struct sdeb_store_info *sip = devip2sip(devip, false);
f0d1cf93
DG
4661
4662 if (zbc_zone_is_conv(zsp))
4663 return;
4664
4665 zc = zsp->z_cond;
4666 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4667 zbc_close_zone(devip, zsp);
4668
4669 if (zsp->z_cond == ZC4_CLOSED)
4670 devip->nr_closed--;
4671
0d684d88
SK
4672 if (zsp->z_wp > zsp->z_start)
4673 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4674 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4675
64e14ece 4676 zsp->z_non_seq_resource = false;
f0d1cf93
DG
4677 zsp->z_wp = zsp->z_start;
4678 zsp->z_cond = ZC1_EMPTY;
4679}
4680
4681static void zbc_rwp_all(struct sdebug_dev_info *devip)
4682{
4683 unsigned int i;
4684
4685 for (i = 0; i < devip->nr_zones; i++)
4686 zbc_rwp_zone(devip, &devip->zstate[i]);
4687}
4688
4689static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4690{
4691 struct sdeb_zone_state *zsp;
4692 int res = 0;
4693 u64 z_id;
4694 u8 *cmd = scp->cmnd;
4695 bool all = cmd[14] & 0x01;
b6ff8ca7 4696 struct sdeb_store_info *sip = devip2sip(devip, false);
f0d1cf93
DG
4697 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4698
4699 if (!sdebug_dev_is_zoned(devip)) {
4700 mk_sense_invalid_opcode(scp);
4701 return check_condition_result;
4702 }
4703
4704 write_lock(macc_lckp);
4705
4706 if (all) {
4707 zbc_rwp_all(devip);
4708 goto fini;
4709 }
4710
4711 z_id = get_unaligned_be64(cmd + 2);
4712 if (z_id >= sdebug_capacity) {
4713 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4714 res = check_condition_result;
4715 goto fini;
4716 }
4717
4718 zsp = zbc_zone(devip, z_id);
4719 if (z_id != zsp->z_start) {
4720 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4721 res = check_condition_result;
4722 goto fini;
4723 }
4724 if (zbc_zone_is_conv(zsp)) {
4725 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4726 res = check_condition_result;
4727 goto fini;
4728 }
4729
4730 zbc_rwp_zone(devip, zsp);
4731fini:
4732 write_unlock(macc_lckp);
4733 return res;
4734}
4735
c4837394
DG
4736static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4737{
c10fa55f 4738 u16 hwq;
a6e76e6f 4739 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
c4837394 4740
f7c4cdc7 4741 hwq = blk_mq_unique_tag_to_hwq(tag);
c10fa55f 4742
f7c4cdc7
JG
4743 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4744 if (WARN_ON_ONCE(hwq >= submit_queues))
4745 hwq = 0;
c10fa55f 4746
458df78b 4747 return sdebug_q_arr + hwq;
c4837394
DG
4748}
4749
c10fa55f
JG
4750static u32 get_tag(struct scsi_cmnd *cmnd)
4751{
a6e76e6f 4752 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
c10fa55f
JG
4753}
4754
c4837394 4755/* Queued (deferred) command completions converge here. */
fd32119b 4756static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
1da177e4 4757{
7382f9d8 4758 bool aborted = sd_dp->aborted;
c4837394 4759 int qc_idx;
cbf67842 4760 int retiring = 0;
1da177e4 4761 unsigned long iflags;
c4837394 4762 struct sdebug_queue *sqp;
cbf67842
DG
4763 struct sdebug_queued_cmd *sqcp;
4764 struct scsi_cmnd *scp;
4765 struct sdebug_dev_info *devip;
1da177e4 4766
7382f9d8
DG
4767 if (unlikely(aborted))
4768 sd_dp->aborted = false;
c4837394
DG
4769 qc_idx = sd_dp->qc_idx;
4770 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4771 if (sdebug_statistics) {
4772 atomic_inc(&sdebug_completions);
4773 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4774 atomic_inc(&sdebug_miss_cpus);
4775 }
4776 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4777 pr_err("wild qc_idx=%d\n", qc_idx);
1da177e4
LT
4778 return;
4779 }
c4837394 4780 spin_lock_irqsave(&sqp->qc_lock, iflags);
4a0c6f43 4781 sd_dp->defer_t = SDEB_DEFER_NONE;
c4837394 4782 sqcp = &sqp->qc_arr[qc_idx];
cbf67842 4783 scp = sqcp->a_cmnd;
b01f6f83 4784 if (unlikely(scp == NULL)) {
c4837394 4785 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
c10fa55f
JG
4786 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4787 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
cbf67842
DG
4788 return;
4789 }
4790 devip = (struct sdebug_dev_info *)scp->device->hostdata;
f46eb0e9 4791 if (likely(devip))
cbf67842
DG
4792 atomic_dec(&devip->num_in_q);
4793 else
c1287970 4794 pr_err("devip=NULL\n");
f46eb0e9 4795 if (unlikely(atomic_read(&retired_max_queue) > 0))
cbf67842
DG
4796 retiring = 1;
4797
4798 sqcp->a_cmnd = NULL;
c4837394
DG
4799 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4800 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
c1287970 4801 pr_err("Unexpected completion\n");
1da177e4
LT
4802 return;
4803 }
cbf67842
DG
4804
4805 if (unlikely(retiring)) { /* user has reduced max_queue */
4806 int k, retval;
4807
4808 retval = atomic_read(&retired_max_queue);
c4837394
DG
4809 if (qc_idx >= retval) {
4810 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
c1287970 4811 pr_err("index %d too large\n", retval);
cbf67842
DG
4812 return;
4813 }
c4837394 4814 k = find_last_bit(sqp->in_use_bm, retval);
773642d9 4815 if ((k < sdebug_max_queue) || (k == retval))
cbf67842
DG
4816 atomic_set(&retired_max_queue, 0);
4817 else
4818 atomic_set(&retired_max_queue, k + 1);
1da177e4 4819 }
c4837394 4820 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7382f9d8
DG
4821 if (unlikely(aborted)) {
4822 if (sdebug_verbose)
4823 pr_info("bypassing scsi_done() due to aborted cmd\n");
4824 return;
4825 }
cbf67842 4826 scp->scsi_done(scp); /* callback to mid level */
1da177e4
LT
4827}
4828
cbf67842 4829/* When high resolution timer goes off this function is called. */
fd32119b 4830static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
cbf67842 4831{
a10bc12a
DG
4832 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4833 hrt);
4834 sdebug_q_cmd_complete(sd_dp);
cbf67842
DG
4835 return HRTIMER_NORESTART;
4836}
1da177e4 4837
a10bc12a 4838/* When work queue schedules work, it calls this function. */
fd32119b 4839static void sdebug_q_cmd_wq_complete(struct work_struct *work)
a10bc12a
DG
4840{
4841 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4842 ew.work);
4843 sdebug_q_cmd_complete(sd_dp);
4844}
4845
09ba24c1 4846static bool got_shared_uuid;
bf476433 4847static uuid_t shared_uuid;
09ba24c1 4848
f0d1cf93
DG
4849static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4850{
4851 struct sdeb_zone_state *zsp;
4852 sector_t capacity = get_sdebug_capacity();
4853 sector_t zstart = 0;
4854 unsigned int i;
4855
4856 /*
98e0a689
DLM
4857 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4858 * a zone size allowing for at least 4 zones on the device. Otherwise,
f0d1cf93
DG
4859 * use the specified zone size checking that at least 2 zones can be
4860 * created for the device.
4861 */
98e0a689 4862 if (!sdeb_zbc_zone_size_mb) {
f0d1cf93
DG
4863 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4864 >> ilog2(sdebug_sector_size);
4865 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4866 devip->zsize >>= 1;
4867 if (devip->zsize < 2) {
4868 pr_err("Device capacity too small\n");
4869 return -EINVAL;
4870 }
4871 } else {
108e36f0
DLM
4872 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4873 pr_err("Zone size is not a power of 2\n");
4874 return -EINVAL;
4875 }
98e0a689 4876 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
f0d1cf93
DG
4877 >> ilog2(sdebug_sector_size);
4878 if (devip->zsize >= capacity) {
4879 pr_err("Zone size too large for device capacity\n");
4880 return -EINVAL;
4881 }
4882 }
4883
108e36f0 4884 devip->zsize_shift = ilog2(devip->zsize);
f0d1cf93
DG
4885 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4886
aa8fecf9
DLM
4887 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4888 pr_err("Number of conventional zones too large\n");
4889 return -EINVAL;
4890 }
4891 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4892
64e14ece
DLM
4893 if (devip->zmodel == BLK_ZONED_HM) {
4894 /* zbc_max_open_zones can be 0, meaning "not reported" */
4895 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4896 devip->max_open = (devip->nr_zones - 1) / 2;
4897 else
4898 devip->max_open = sdeb_zbc_max_open;
4899 }
f0d1cf93
DG
4900
4901 devip->zstate = kcalloc(devip->nr_zones,
4902 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4903 if (!devip->zstate)
4904 return -ENOMEM;
4905
4906 for (i = 0; i < devip->nr_zones; i++) {
4907 zsp = &devip->zstate[i];
4908
4909 zsp->z_start = zstart;
4910
aa8fecf9 4911 if (i < devip->nr_conv_zones) {
64e14ece 4912 zsp->z_type = ZBC_ZONE_TYPE_CNV;
f0d1cf93
DG
4913 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4914 zsp->z_wp = (sector_t)-1;
4915 } else {
64e14ece
DLM
4916 if (devip->zmodel == BLK_ZONED_HM)
4917 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4918 else
4919 zsp->z_type = ZBC_ZONE_TYPE_SWP;
f0d1cf93
DG
4920 zsp->z_cond = ZC1_EMPTY;
4921 zsp->z_wp = zsp->z_start;
4922 }
4923
4924 if (zsp->z_start + devip->zsize < capacity)
4925 zsp->z_size = devip->zsize;
4926 else
4927 zsp->z_size = capacity - zsp->z_start;
4928
4929 zstart += zsp->z_size;
4930 }
4931
4932 return 0;
4933}
4934
fd32119b
DG
4935static struct sdebug_dev_info *sdebug_device_create(
4936 struct sdebug_host_info *sdbg_host, gfp_t flags)
5cb2fc06
FT
4937{
4938 struct sdebug_dev_info *devip;
4939
4940 devip = kzalloc(sizeof(*devip), flags);
4941 if (devip) {
09ba24c1 4942 if (sdebug_uuid_ctl == 1)
bf476433 4943 uuid_gen(&devip->lu_name);
09ba24c1
DG
4944 else if (sdebug_uuid_ctl == 2) {
4945 if (got_shared_uuid)
4946 devip->lu_name = shared_uuid;
4947 else {
bf476433 4948 uuid_gen(&shared_uuid);
09ba24c1
DG
4949 got_shared_uuid = true;
4950 devip->lu_name = shared_uuid;
4951 }
4952 }
5cb2fc06 4953 devip->sdbg_host = sdbg_host;
f0d1cf93 4954 if (sdeb_zbc_in_use) {
64e14ece 4955 devip->zmodel = sdeb_zbc_model;
f0d1cf93
DG
4956 if (sdebug_device_create_zones(devip)) {
4957 kfree(devip);
4958 return NULL;
4959 }
64e14ece
DLM
4960 } else {
4961 devip->zmodel = BLK_ZONED_NONE;
f0d1cf93
DG
4962 }
4963 devip->sdbg_host = sdbg_host;
fc13638a
DG
4964 devip->create_ts = ktime_get_boottime();
4965 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5cb2fc06
FT
4966 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4967 }
4968 return devip;
4969}
4970
f46eb0e9 4971static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
1da177e4 4972{
f46eb0e9
DG
4973 struct sdebug_host_info *sdbg_host;
4974 struct sdebug_dev_info *open_devip = NULL;
4975 struct sdebug_dev_info *devip;
1da177e4 4976
d1e4c9c5
FT
4977 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4978 if (!sdbg_host) {
c1287970 4979 pr_err("Host info NULL\n");
1da177e4 4980 return NULL;
9a051019 4981 }
ad0c7775 4982
1da177e4
LT
4983 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4984 if ((devip->used) && (devip->channel == sdev->channel) &&
9a051019
DG
4985 (devip->target == sdev->id) &&
4986 (devip->lun == sdev->lun))
4987 return devip;
1da177e4
LT
4988 else {
4989 if ((!devip->used) && (!open_devip))
4990 open_devip = devip;
4991 }
4992 }
5cb2fc06
FT
4993 if (!open_devip) { /* try and make a new one */
4994 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4995 if (!open_devip) {
c1287970 4996 pr_err("out of memory at line %d\n", __LINE__);
1da177e4
LT
4997 return NULL;
4998 }
1da177e4 4999 }
a75869d1
FT
5000
5001 open_devip->channel = sdev->channel;
5002 open_devip->target = sdev->id;
5003 open_devip->lun = sdev->lun;
5004 open_devip->sdbg_host = sdbg_host;
cbf67842
DG
5005 atomic_set(&open_devip->num_in_q, 0);
5006 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
c2248fc9 5007 open_devip->used = true;
a75869d1 5008 return open_devip;
1da177e4
LT
5009}
5010
8dea0d02 5011static int scsi_debug_slave_alloc(struct scsi_device *sdp)
1da177e4 5012{
773642d9 5013 if (sdebug_verbose)
c1287970 5014 pr_info("slave_alloc <%u %u %u %llu>\n",
8dea0d02 5015 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
8dea0d02
FT
5016 return 0;
5017}
1da177e4 5018
8dea0d02
FT
5019static int scsi_debug_slave_configure(struct scsi_device *sdp)
5020{
f46eb0e9
DG
5021 struct sdebug_dev_info *devip =
5022 (struct sdebug_dev_info *)sdp->hostdata;
a34c4e98 5023
773642d9 5024 if (sdebug_verbose)
c1287970 5025 pr_info("slave_configure <%u %u %u %llu>\n",
8dea0d02 5026 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
b01f6f83
DG
5027 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5028 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5029 if (devip == NULL) {
f46eb0e9 5030 devip = find_build_dev_info(sdp);
b01f6f83 5031 if (devip == NULL)
f46eb0e9
DG
5032 return 1; /* no resources, will be marked offline */
5033 }
c8b09f6f 5034 sdp->hostdata = devip;
773642d9 5035 if (sdebug_no_uld)
78d4e5a0 5036 sdp->no_uld_attach = 1;
9b760fd8 5037 config_cdb_len(sdp);
8dea0d02
FT
5038 return 0;
5039}
5040
5041static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5042{
5043 struct sdebug_dev_info *devip =
5044 (struct sdebug_dev_info *)sdp->hostdata;
a34c4e98 5045
773642d9 5046 if (sdebug_verbose)
c1287970 5047 pr_info("slave_destroy <%u %u %u %llu>\n",
8dea0d02
FT
5048 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5049 if (devip) {
25985edc 5050 /* make this slot available for re-use */
c2248fc9 5051 devip->used = false;
8dea0d02
FT
5052 sdp->hostdata = NULL;
5053 }
5054}
5055
10bde980
DG
5056static void stop_qc_helper(struct sdebug_defer *sd_dp,
5057 enum sdeb_defer_type defer_t)
c4837394
DG
5058{
5059 if (!sd_dp)
5060 return;
10bde980 5061 if (defer_t == SDEB_DEFER_HRT)
c4837394 5062 hrtimer_cancel(&sd_dp->hrt);
10bde980 5063 else if (defer_t == SDEB_DEFER_WQ)
c4837394
DG
5064 cancel_work_sync(&sd_dp->ew.work);
5065}
5066
a10bc12a
DG
5067/* If @cmnd found deletes its timer or work queue and returns true; else
5068 returns false */
5069static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
8dea0d02
FT
5070{
5071 unsigned long iflags;
c4837394 5072 int j, k, qmax, r_qmax;
10bde980 5073 enum sdeb_defer_type l_defer_t;
c4837394 5074 struct sdebug_queue *sqp;
8dea0d02 5075 struct sdebug_queued_cmd *sqcp;
cbf67842 5076 struct sdebug_dev_info *devip;
a10bc12a 5077 struct sdebug_defer *sd_dp;
8dea0d02 5078
c4837394
DG
5079 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5080 spin_lock_irqsave(&sqp->qc_lock, iflags);
5081 qmax = sdebug_max_queue;
5082 r_qmax = atomic_read(&retired_max_queue);
5083 if (r_qmax > qmax)
5084 qmax = r_qmax;
5085 for (k = 0; k < qmax; ++k) {
5086 if (test_bit(k, sqp->in_use_bm)) {
5087 sqcp = &sqp->qc_arr[k];
5088 if (cmnd != sqcp->a_cmnd)
5089 continue;
5090 /* found */
5091 devip = (struct sdebug_dev_info *)
5092 cmnd->device->hostdata;
5093 if (devip)
5094 atomic_dec(&devip->num_in_q);
5095 sqcp->a_cmnd = NULL;
5096 sd_dp = sqcp->sd_dp;
10bde980
DG
5097 if (sd_dp) {
5098 l_defer_t = sd_dp->defer_t;
5099 sd_dp->defer_t = SDEB_DEFER_NONE;
5100 } else
5101 l_defer_t = SDEB_DEFER_NONE;
c4837394 5102 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
10bde980 5103 stop_qc_helper(sd_dp, l_defer_t);
c4837394
DG
5104 clear_bit(k, sqp->in_use_bm);
5105 return true;
cbf67842 5106 }
8dea0d02 5107 }
c4837394 5108 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
8dea0d02 5109 }
a10bc12a 5110 return false;
8dea0d02
FT
5111}
5112
a10bc12a 5113/* Deletes (stops) timers or work queues of all queued commands */
8dea0d02
FT
5114static void stop_all_queued(void)
5115{
5116 unsigned long iflags;
c4837394 5117 int j, k;
10bde980 5118 enum sdeb_defer_type l_defer_t;
c4837394 5119 struct sdebug_queue *sqp;
8dea0d02 5120 struct sdebug_queued_cmd *sqcp;
cbf67842 5121 struct sdebug_dev_info *devip;
a10bc12a 5122 struct sdebug_defer *sd_dp;
8dea0d02 5123
c4837394
DG
5124 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5125 spin_lock_irqsave(&sqp->qc_lock, iflags);
5126 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5127 if (test_bit(k, sqp->in_use_bm)) {
5128 sqcp = &sqp->qc_arr[k];
5129 if (sqcp->a_cmnd == NULL)
5130 continue;
5131 devip = (struct sdebug_dev_info *)
5132 sqcp->a_cmnd->device->hostdata;
5133 if (devip)
5134 atomic_dec(&devip->num_in_q);
5135 sqcp->a_cmnd = NULL;
5136 sd_dp = sqcp->sd_dp;
10bde980
DG
5137 if (sd_dp) {
5138 l_defer_t = sd_dp->defer_t;
5139 sd_dp->defer_t = SDEB_DEFER_NONE;
5140 } else
5141 l_defer_t = SDEB_DEFER_NONE;
c4837394 5142 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
10bde980 5143 stop_qc_helper(sd_dp, l_defer_t);
c4837394
DG
5144 clear_bit(k, sqp->in_use_bm);
5145 spin_lock_irqsave(&sqp->qc_lock, iflags);
cbf67842 5146 }
8dea0d02 5147 }
c4837394 5148 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
8dea0d02 5149 }
1da177e4
LT
5150}
5151
cbf67842
DG
5152/* Free queued command memory on heap */
5153static void free_all_queued(void)
1da177e4 5154{
c4837394
DG
5155 int j, k;
5156 struct sdebug_queue *sqp;
cbf67842
DG
5157 struct sdebug_queued_cmd *sqcp;
5158
c4837394
DG
5159 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5160 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5161 sqcp = &sqp->qc_arr[k];
5162 kfree(sqcp->sd_dp);
5163 sqcp->sd_dp = NULL;
5164 }
cbf67842 5165 }
1da177e4
LT
5166}
5167
cbf67842 5168static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
1da177e4 5169{
a10bc12a
DG
5170 bool ok;
5171
cbf67842
DG
5172 ++num_aborts;
5173 if (SCpnt) {
a10bc12a
DG
5174 ok = stop_queued_cmnd(SCpnt);
5175 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5176 sdev_printk(KERN_INFO, SCpnt->device,
5177 "%s: command%s found\n", __func__,
5178 ok ? "" : " not");
cbf67842
DG
5179 }
5180 return SUCCESS;
1da177e4
LT
5181}
5182
91d4c752 5183static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
1da177e4 5184{
1da177e4 5185 ++num_dev_resets;
cbf67842
DG
5186 if (SCpnt && SCpnt->device) {
5187 struct scsi_device *sdp = SCpnt->device;
f46eb0e9
DG
5188 struct sdebug_dev_info *devip =
5189 (struct sdebug_dev_info *)sdp->hostdata;
cbf67842 5190
773642d9 5191 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
cbf67842 5192 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
1da177e4 5193 if (devip)
cbf67842
DG
5194 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5195 }
5196 return SUCCESS;
5197}
5198
5199static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5200{
5201 struct sdebug_host_info *sdbg_host;
5202 struct sdebug_dev_info *devip;
5203 struct scsi_device *sdp;
5204 struct Scsi_Host *hp;
5205 int k = 0;
5206
5207 ++num_target_resets;
5208 if (!SCpnt)
5209 goto lie;
5210 sdp = SCpnt->device;
5211 if (!sdp)
5212 goto lie;
773642d9 5213 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
cbf67842
DG
5214 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5215 hp = sdp->host;
5216 if (!hp)
5217 goto lie;
5218 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5219 if (sdbg_host) {
5220 list_for_each_entry(devip,
5221 &sdbg_host->dev_info_list,
5222 dev_list)
5223 if (devip->target == sdp->id) {
5224 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5225 ++k;
5226 }
1da177e4 5227 }
773642d9 5228 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
cbf67842
DG
5229 sdev_printk(KERN_INFO, sdp,
5230 "%s: %d device(s) found in target\n", __func__, k);
5231lie:
1da177e4
LT
5232 return SUCCESS;
5233}
5234
91d4c752 5235static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
1da177e4
LT
5236{
5237 struct sdebug_host_info *sdbg_host;
cbf67842 5238 struct sdebug_dev_info *devip;
9a051019
DG
5239 struct scsi_device *sdp;
5240 struct Scsi_Host *hp;
cbf67842 5241 int k = 0;
1da177e4 5242
1da177e4 5243 ++num_bus_resets;
cbf67842
DG
5244 if (!(SCpnt && SCpnt->device))
5245 goto lie;
5246 sdp = SCpnt->device;
773642d9 5247 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
cbf67842
DG
5248 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5249 hp = sdp->host;
5250 if (hp) {
d1e4c9c5 5251 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
1da177e4 5252 if (sdbg_host) {
cbf67842 5253 list_for_each_entry(devip,
9a051019 5254 &sdbg_host->dev_info_list,
cbf67842
DG
5255 dev_list) {
5256 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5257 ++k;
5258 }
1da177e4
LT
5259 }
5260 }
773642d9 5261 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
cbf67842
DG
5262 sdev_printk(KERN_INFO, sdp,
5263 "%s: %d device(s) found in host\n", __func__, k);
5264lie:
1da177e4
LT
5265 return SUCCESS;
5266}
5267
91d4c752 5268static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
1da177e4 5269{
91d4c752 5270 struct sdebug_host_info *sdbg_host;
cbf67842
DG
5271 struct sdebug_dev_info *devip;
5272 int k = 0;
1da177e4 5273
1da177e4 5274 ++num_host_resets;
773642d9 5275 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
cbf67842 5276 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
9a051019
DG
5277 spin_lock(&sdebug_host_list_lock);
5278 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
cbf67842
DG
5279 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5280 dev_list) {
5281 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5282 ++k;
5283 }
9a051019
DG
5284 }
5285 spin_unlock(&sdebug_host_list_lock);
1da177e4 5286 stop_all_queued();
773642d9 5287 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
cbf67842
DG
5288 sdev_printk(KERN_INFO, SCpnt->device,
5289 "%s: %d device(s) found\n", __func__, k);
1da177e4
LT
5290 return SUCCESS;
5291}
5292
87c715dc 5293static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
1da177e4 5294{
1442f76d 5295 struct msdos_partition *pp;
979e0dc3 5296 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
1da177e4
LT
5297 int sectors_per_part, num_sectors, k;
5298 int heads_by_sects, start_sec, end_sec;
5299
5300 /* assume partition table already zeroed */
773642d9 5301 if ((sdebug_num_parts < 1) || (store_size < 1048576))
1da177e4 5302 return;
773642d9
DG
5303 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5304 sdebug_num_parts = SDEBUG_MAX_PARTS;
c1287970 5305 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
1da177e4 5306 }
8c657235 5307 num_sectors = (int)get_sdebug_capacity();
1da177e4 5308 sectors_per_part = (num_sectors - sdebug_sectors_per)
773642d9 5309 / sdebug_num_parts;
1da177e4 5310 heads_by_sects = sdebug_heads * sdebug_sectors_per;
9a051019 5311 starts[0] = sdebug_sectors_per;
979e0dc3
JP
5312 max_part_secs = sectors_per_part;
5313 for (k = 1; k < sdebug_num_parts; ++k) {
1da177e4
LT
5314 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5315 * heads_by_sects;
979e0dc3
JP
5316 if (starts[k] - starts[k - 1] < max_part_secs)
5317 max_part_secs = starts[k] - starts[k - 1];
5318 }
773642d9
DG
5319 starts[sdebug_num_parts] = num_sectors;
5320 starts[sdebug_num_parts + 1] = 0;
1da177e4
LT
5321
5322 ramp[510] = 0x55; /* magic partition markings */
5323 ramp[511] = 0xAA;
1442f76d 5324 pp = (struct msdos_partition *)(ramp + 0x1be);
1da177e4
LT
5325 for (k = 0; starts[k + 1]; ++k, ++pp) {
5326 start_sec = starts[k];
979e0dc3 5327 end_sec = starts[k] + max_part_secs - 1;
1da177e4
LT
5328 pp->boot_ind = 0;
5329
5330 pp->cyl = start_sec / heads_by_sects;
5331 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5332 / sdebug_sectors_per;
5333 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5334
5335 pp->end_cyl = end_sec / heads_by_sects;
5336 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5337 / sdebug_sectors_per;
5338 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5339
150c3544
AM
5340 pp->start_sect = cpu_to_le32(start_sec);
5341 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
1da177e4
LT
5342 pp->sys_ind = 0x83; /* plain Linux partition */
5343 }
5344}
5345
c4837394
DG
5346static void block_unblock_all_queues(bool block)
5347{
5348 int j;
5349 struct sdebug_queue *sqp;
5350
5351 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5352 atomic_set(&sqp->blocked, (int)block);
5353}
5354
5355/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5356 * commands will be processed normally before triggers occur.
5357 */
5358static void tweak_cmnd_count(void)
5359{
5360 int count, modulo;
5361
5362 modulo = abs(sdebug_every_nth);
5363 if (modulo < 2)
5364 return;
5365 block_unblock_all_queues(true);
5366 count = atomic_read(&sdebug_cmnd_count);
5367 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5368 block_unblock_all_queues(false);
5369}
5370
5371static void clear_queue_stats(void)
5372{
5373 atomic_set(&sdebug_cmnd_count, 0);
5374 atomic_set(&sdebug_completions, 0);
5375 atomic_set(&sdebug_miss_cpus, 0);
5376 atomic_set(&sdebug_a_tsf, 0);
5377}
5378
3a90a63d 5379static bool inject_on_this_cmd(void)
c4837394 5380{
3a90a63d
DG
5381 if (sdebug_every_nth == 0)
5382 return false;
5383 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
c4837394
DG
5384}
5385
a2aede97
DG
5386#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5387
c4837394
DG
5388/* Complete the processing of the thread that queued a SCSI command to this
5389 * driver. It either completes the command by calling cmnd_done() or
5390 * schedules a hr timer or work queue then returns 0. Returns
5391 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5392 */
fd32119b 5393static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
f66b8517
MW
5394 int scsi_result,
5395 int (*pfp)(struct scsi_cmnd *,
5396 struct sdebug_dev_info *),
5397 int delta_jiff, int ndelay)
1da177e4 5398{
a2aede97 5399 bool new_sd_dp;
3a90a63d 5400 bool inject = false;
a6e76e6f 5401 bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
3a90a63d 5402 int k, num_in_q, qdepth;
a2aede97
DG
5403 unsigned long iflags;
5404 u64 ns_from_boot = 0;
c4837394
DG
5405 struct sdebug_queue *sqp;
5406 struct sdebug_queued_cmd *sqcp;
299b6c07 5407 struct scsi_device *sdp;
a10bc12a 5408 struct sdebug_defer *sd_dp;
299b6c07 5409
b01f6f83
DG
5410 if (unlikely(devip == NULL)) {
5411 if (scsi_result == 0)
f46eb0e9
DG
5412 scsi_result = DID_NO_CONNECT << 16;
5413 goto respond_in_thread;
cbf67842 5414 }
299b6c07
TW
5415 sdp = cmnd->device;
5416
cd62b7da
DG
5417 if (delta_jiff == 0)
5418 goto respond_in_thread;
1da177e4 5419
c4837394
DG
5420 sqp = get_queue(cmnd);
5421 spin_lock_irqsave(&sqp->qc_lock, iflags);
5422 if (unlikely(atomic_read(&sqp->blocked))) {
5423 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5424 return SCSI_MLQUEUE_HOST_BUSY;
5425 }
cbf67842
DG
5426 num_in_q = atomic_read(&devip->num_in_q);
5427 qdepth = cmnd->device->queue_depth;
f46eb0e9 5428 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
cd62b7da 5429 if (scsi_result) {
c4837394 5430 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
cd62b7da
DG
5431 goto respond_in_thread;
5432 } else
5433 scsi_result = device_qfull_result;
c4837394 5434 } else if (unlikely(sdebug_every_nth &&
f46eb0e9
DG
5435 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5436 (scsi_result == 0))) {
cbf67842
DG
5437 if ((num_in_q == (qdepth - 1)) &&
5438 (atomic_inc_return(&sdebug_a_tsf) >=
773642d9 5439 abs(sdebug_every_nth))) {
cbf67842 5440 atomic_set(&sdebug_a_tsf, 0);
3a90a63d 5441 inject = true;
cd62b7da 5442 scsi_result = device_qfull_result;
1da177e4
LT
5443 }
5444 }
1da177e4 5445
c4837394 5446 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
f46eb0e9 5447 if (unlikely(k >= sdebug_max_queue)) {
c4837394 5448 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
cd62b7da
DG
5449 if (scsi_result)
5450 goto respond_in_thread;
773642d9 5451 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
cd62b7da 5452 scsi_result = device_qfull_result;
773642d9 5453 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
cbf67842 5454 sdev_printk(KERN_INFO, sdp,
cd62b7da 5455 "%s: max_queue=%d exceeded, %s\n",
773642d9 5456 __func__, sdebug_max_queue,
cd62b7da
DG
5457 (scsi_result ? "status: TASK SET FULL" :
5458 "report: host busy"));
5459 if (scsi_result)
5460 goto respond_in_thread;
5461 else
cbf67842
DG
5462 return SCSI_MLQUEUE_HOST_BUSY;
5463 }
74595c04 5464 set_bit(k, sqp->in_use_bm);
cbf67842 5465 atomic_inc(&devip->num_in_q);
c4837394 5466 sqcp = &sqp->qc_arr[k];
cbf67842 5467 sqcp->a_cmnd = cmnd;
c4837394 5468 cmnd->host_scribble = (unsigned char *)sqcp;
a10bc12a 5469 sd_dp = sqcp->sd_dp;
c4837394 5470 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
c4b57d89 5471
74595c04 5472 if (!sd_dp) {
10bde980 5473 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
74595c04
DG
5474 if (!sd_dp) {
5475 atomic_dec(&devip->num_in_q);
5476 clear_bit(k, sqp->in_use_bm);
10bde980 5477 return SCSI_MLQUEUE_HOST_BUSY;
74595c04 5478 }
a2aede97
DG
5479 new_sd_dp = true;
5480 } else {
5481 new_sd_dp = false;
10bde980 5482 }
f66b8517 5483
c10fa55f
JG
5484 /* Set the hostwide tag */
5485 if (sdebug_host_max_queue)
5486 sd_dp->hc_idx = get_tag(cmnd);
5487
771f712b 5488 if (hipri)
a2aede97
DG
5489 ns_from_boot = ktime_get_boottime_ns();
5490
5491 /* one of the resp_*() response functions is called here */
3a90a63d 5492 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
f66b8517 5493 if (cmnd->result & SDEG_RES_IMMED_MASK) {
f66b8517
MW
5494 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5495 delta_jiff = ndelay = 0;
5496 }
5497 if (cmnd->result == 0 && scsi_result != 0)
5498 cmnd->result = scsi_result;
3a90a63d
DG
5499 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5500 if (atomic_read(&sdeb_inject_pending)) {
5501 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5502 atomic_set(&sdeb_inject_pending, 0);
5503 cmnd->result = check_condition_result;
5504 }
5505 }
f66b8517
MW
5506
5507 if (unlikely(sdebug_verbose && cmnd->result))
5508 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5509 __func__, cmnd->result);
5510
10bde980 5511 if (delta_jiff > 0 || ndelay > 0) {
b333a819 5512 ktime_t kt;
cbf67842 5513
b333a819 5514 if (delta_jiff > 0) {
0c4bc91d
DG
5515 u64 ns = jiffies_to_nsecs(delta_jiff);
5516
5517 if (sdebug_random && ns < U32_MAX) {
5518 ns = prandom_u32_max((u32)ns);
5519 } else if (sdebug_random) {
5520 ns >>= 12; /* scale to 4 usec precision */
5521 if (ns < U32_MAX) /* over 4 hours max */
5522 ns = prandom_u32_max((u32)ns);
5523 ns <<= 12;
5524 }
5525 kt = ns_to_ktime(ns);
5526 } else { /* ndelay has a 4.2 second max */
5527 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5528 (u32)ndelay;
a2aede97
DG
5529 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5530 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5531
5532 if (kt <= d) { /* elapsed duration >= kt */
223f91b4 5533 spin_lock_irqsave(&sqp->qc_lock, iflags);
a2aede97
DG
5534 sqcp->a_cmnd = NULL;
5535 atomic_dec(&devip->num_in_q);
5536 clear_bit(k, sqp->in_use_bm);
223f91b4 5537 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
a2aede97
DG
5538 if (new_sd_dp)
5539 kfree(sd_dp);
5540 /* call scsi_done() from this thread */
5541 cmnd->scsi_done(cmnd);
5542 return 0;
5543 }
5544 /* otherwise reduce kt by elapsed time */
5545 kt -= d;
5546 }
0c4bc91d 5547 }
771f712b
DG
5548 if (hipri) {
5549 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
4a0c6f43
DG
5550 spin_lock_irqsave(&sqp->qc_lock, iflags);
5551 if (!sd_dp->init_poll) {
5552 sd_dp->init_poll = true;
5553 sqcp->sd_dp = sd_dp;
5554 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5555 sd_dp->qc_idx = k;
5556 }
5557 sd_dp->defer_t = SDEB_DEFER_POLL;
5558 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5559 } else {
5560 if (!sd_dp->init_hrt) {
5561 sd_dp->init_hrt = true;
5562 sqcp->sd_dp = sd_dp;
5563 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5564 HRTIMER_MODE_REL_PINNED);
5565 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5566 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5567 sd_dp->qc_idx = k;
5568 }
5569 sd_dp->defer_t = SDEB_DEFER_HRT;
5570 /* schedule the invocation of scsi_done() for a later time */
5571 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
1da177e4 5572 }
c4837394
DG
5573 if (sdebug_statistics)
5574 sd_dp->issuing_cpu = raw_smp_processor_id();
c4837394 5575 } else { /* jdelay < 0, use work queue */
3a90a63d
DG
5576 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5577 atomic_read(&sdeb_inject_pending)))
7382f9d8 5578 sd_dp->aborted = true;
771f712b
DG
5579 if (hipri) {
5580 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
4a0c6f43
DG
5581 spin_lock_irqsave(&sqp->qc_lock, iflags);
5582 if (!sd_dp->init_poll) {
5583 sd_dp->init_poll = true;
5584 sqcp->sd_dp = sd_dp;
5585 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5586 sd_dp->qc_idx = k;
5587 }
5588 sd_dp->defer_t = SDEB_DEFER_POLL;
5589 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5590 } else {
5591 if (!sd_dp->init_wq) {
5592 sd_dp->init_wq = true;
5593 sqcp->sd_dp = sd_dp;
5594 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5595 sd_dp->qc_idx = k;
5596 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5597 }
5598 sd_dp->defer_t = SDEB_DEFER_WQ;
5599 schedule_work(&sd_dp->ew.work);
5600 }
5601 if (sdebug_statistics)
5602 sd_dp->issuing_cpu = raw_smp_processor_id();
5603 if (unlikely(sd_dp->aborted)) {
a6e76e6f
BVA
5604 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5605 scsi_cmd_to_rq(cmnd)->tag);
5606 blk_abort_request(scsi_cmd_to_rq(cmnd));
3a90a63d 5607 atomic_set(&sdeb_inject_pending, 0);
4a0c6f43 5608 sd_dp->aborted = false;
7382f9d8 5609 }
1da177e4 5610 }
3a90a63d
DG
5611 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5612 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5613 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
cbf67842 5614 return 0;
cd62b7da
DG
5615
5616respond_in_thread: /* call back to mid-layer using invocation thread */
f66b8517
MW
5617 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5618 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5619 if (cmnd->result == 0 && scsi_result != 0)
5620 cmnd->result = scsi_result;
cd62b7da
DG
5621 cmnd->scsi_done(cmnd);
5622 return 0;
1da177e4 5623}
cbf67842 5624
23183910
DG
5625/* Note: The following macros create attribute files in the
5626 /sys/module/scsi_debug/parameters directory. Unfortunately this
5627 driver is unaware of a change and cannot trigger auxiliary actions
5628 as it can when the corresponding attribute in the
5629 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5630 */
773642d9
DG
5631module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5632module_param_named(ato, sdebug_ato, int, S_IRUGO);
9b760fd8 5633module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
773642d9 5634module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
c2206098 5635module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
773642d9
DG
5636module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5637module_param_named(dif, sdebug_dif, int, S_IRUGO);
5638module_param_named(dix, sdebug_dix, int, S_IRUGO);
5639module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5640module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5641module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5642module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5643module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
c10fa55f 5644module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
e5203cf0 5645module_param_string(inq_product, sdebug_inq_product_id,
5d807076 5646 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
e5203cf0 5647module_param_string(inq_rev, sdebug_inq_product_rev,
5d807076
DG
5648 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5649module_param_string(inq_vendor, sdebug_inq_vendor_id,
5650 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5651module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
773642d9
DG
5652module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5653module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5654module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
773642d9 5655module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
ad0c7775 5656module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
773642d9
DG
5657module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5658module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5d807076
DG
5659module_param_named(medium_error_count, sdebug_medium_error_count, int,
5660 S_IRUGO | S_IWUSR);
5661module_param_named(medium_error_start, sdebug_medium_error_start, int,
5662 S_IRUGO | S_IWUSR);
773642d9
DG
5663module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5664module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5665module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5666module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5667module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5668module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5d807076 5669module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
773642d9 5670module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
87c715dc
DG
5671module_param_named(per_host_store, sdebug_per_host_store, bool,
5672 S_IRUGO | S_IWUSR);
773642d9
DG
5673module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5674module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
0c4bc91d 5675module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
773642d9
DG
5676module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5677module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5678module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
c4837394 5679module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
773642d9 5680module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
c4837394 5681module_param_named(submit_queues, submit_queues, int, S_IRUGO);
c4b57d89 5682module_param_named(poll_queues, poll_queues, int, S_IRUGO);
fc13638a 5683module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
773642d9
DG
5684module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5685module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5686module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5687module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
09ba24c1 5688module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5d807076 5689module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
773642d9 5690module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5b94e232 5691 S_IRUGO | S_IWUSR);
9447b6ce 5692module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
773642d9 5693module_param_named(write_same_length, sdebug_write_same_length, int,
5b94e232 5694 S_IRUGO | S_IWUSR);
9267e0eb 5695module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
380603a5 5696module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
aa8fecf9 5697module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
98e0a689 5698module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
1da177e4
LT
5699
5700MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5701MODULE_DESCRIPTION("SCSI debug adapter driver");
5702MODULE_LICENSE("GPL");
b01f6f83 5703MODULE_VERSION(SDEBUG_VERSION);
1da177e4 5704
5d807076 5705MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5b94e232 5706MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
9b760fd8 5707MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
0759c666 5708MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
cbf67842 5709MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
c2248fc9 5710MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5b94e232
MP
5711MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5712MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
c65b1445 5713MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
beb87c33 5714MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
23183910 5715MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5b94e232 5716MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
185dd232 5717MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
c10fa55f
JG
5718MODULE_PARM_DESC(host_max_queue,
5719 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
e5203cf0 5720MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
9b760fd8
DG
5721MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5722 SDEBUG_VERSION "\")");
5d807076
DG
5723MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5724MODULE_PARM_DESC(lbprz,
5725 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5b94e232
MP
5726MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5727MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5728MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5729MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
ad0c7775 5730MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
fc09acb7 5731MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
cbf67842 5732MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
d9da891a 5733MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5d807076 5734MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
cbf67842 5735MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
c65b1445 5736MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
78d4e5a0 5737MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
1da177e4 5738MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
c65b1445 5739MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
32c5844a 5740MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5d807076 5741MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6f3cbf55 5742MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5d807076 5743MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5b94e232 5744MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
fc09acb7 5745MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
1da177e4 5746MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
0c4bc91d 5747MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
d986788b 5748MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
760f3b03 5749MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
ea61fca5 5750MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
c4837394 5751MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
c2248fc9 5752MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
c4837394 5753MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
fc13638a 5754MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5b94e232
MP
5755MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5756MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6014759c
MP
5757MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5758MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
09ba24c1
DG
5759MODULE_PARM_DESC(uuid_ctl,
5760 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
c2248fc9 5761MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5b94e232 5762MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
9447b6ce 5763MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5b94e232 5764MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
9267e0eb 5765MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
380603a5 5766MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
aa8fecf9 5767MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
98e0a689 5768MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
1da177e4 5769
760f3b03
DG
5770#define SDEBUG_INFO_LEN 256
5771static char sdebug_info[SDEBUG_INFO_LEN];
1da177e4 5772
91d4c752 5773static const char *scsi_debug_info(struct Scsi_Host *shp)
1da177e4 5774{
c4837394
DG
5775 int k;
5776
760f3b03
DG
5777 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5778 my_name, SDEBUG_VERSION, sdebug_version_date);
5779 if (k >= (SDEBUG_INFO_LEN - 1))
c4837394 5780 return sdebug_info;
760f3b03
DG
5781 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5782 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5783 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5784 "statistics", (int)sdebug_statistics);
1da177e4
LT
5785 return sdebug_info;
5786}
5787
cbf67842 5788/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
fd32119b
DG
5789static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5790 int length)
1da177e4 5791{
c8ed555a
AV
5792 char arr[16];
5793 int opts;
5794 int minLen = length > 15 ? 15 : length;
1da177e4 5795
c8ed555a
AV
5796 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5797 return -EACCES;
5798 memcpy(arr, buffer, minLen);
5799 arr[minLen] = '\0';
5800 if (1 != sscanf(arr, "%d", &opts))
5801 return -EINVAL;
773642d9
DG
5802 sdebug_opts = opts;
5803 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5804 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5805 if (sdebug_every_nth != 0)
c4837394 5806 tweak_cmnd_count();
c8ed555a
AV
5807 return length;
5808}
1da177e4 5809
cbf67842
DG
5810/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5811 * same for each scsi_debug host (if more than one). Some of the counters
5812 * output are not atomics so might be inaccurate in a busy system. */
c8ed555a
AV
5813static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5814{
c4837394
DG
5815 int f, j, l;
5816 struct sdebug_queue *sqp;
87c715dc 5817 struct sdebug_host_info *sdhp;
c4837394
DG
5818
5819 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5820 SDEBUG_VERSION, sdebug_version_date);
5821 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5822 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5823 sdebug_opts, sdebug_every_nth);
5824 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5825 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5826 sdebug_sector_size, "bytes");
5827 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5828 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5829 num_aborts);
5830 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5831 num_dev_resets, num_target_resets, num_bus_resets,
5832 num_host_resets);
5833 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5834 dix_reads, dix_writes, dif_errors);
458df78b
BVA
5835 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5836 sdebug_statistics);
4a0c6f43 5837 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
c4837394
DG
5838 atomic_read(&sdebug_cmnd_count),
5839 atomic_read(&sdebug_completions),
5840 "miss_cpus", atomic_read(&sdebug_miss_cpus),
4a0c6f43
DG
5841 atomic_read(&sdebug_a_tsf),
5842 atomic_read(&sdeb_mq_poll_count));
c4837394
DG
5843
5844 seq_printf(m, "submit_queues=%d\n", submit_queues);
5845 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5846 seq_printf(m, " queue %d:\n", j);
5847 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5848 if (f != sdebug_max_queue) {
5849 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5850 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5851 "first,last bits", f, l);
5852 }
cbf67842 5853 }
87c715dc
DG
5854
5855 seq_printf(m, "this host_no=%d\n", host->host_no);
5856 if (!xa_empty(per_store_ap)) {
5857 bool niu;
5858 int idx;
5859 unsigned long l_idx;
5860 struct sdeb_store_info *sip;
5861
5862 seq_puts(m, "\nhost list:\n");
5863 j = 0;
5864 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5865 idx = sdhp->si_idx;
5866 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5867 sdhp->shost->host_no, idx);
5868 ++j;
5869 }
5870 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5871 sdeb_most_recent_idx);
5872 j = 0;
5873 xa_for_each(per_store_ap, l_idx, sip) {
5874 niu = xa_get_mark(per_store_ap, l_idx,
5875 SDEB_XA_NOT_IN_USE);
5876 idx = (int)l_idx;
5877 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5878 (niu ? " not_in_use" : ""));
5879 ++j;
5880 }
5881 }
c8ed555a 5882 return 0;
1da177e4
LT
5883}
5884
82069379 5885static ssize_t delay_show(struct device_driver *ddp, char *buf)
1da177e4 5886{
c2206098 5887 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
1da177e4 5888}
c4837394
DG
5889/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5890 * of delay is jiffies.
5891 */
82069379
AM
5892static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5893 size_t count)
1da177e4 5894{
c2206098 5895 int jdelay, res;
cbf67842 5896
b01f6f83 5897 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
cbf67842 5898 res = count;
c2206098 5899 if (sdebug_jdelay != jdelay) {
c4837394
DG
5900 int j, k;
5901 struct sdebug_queue *sqp;
5902
5903 block_unblock_all_queues(true);
5904 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5905 ++j, ++sqp) {
5906 k = find_first_bit(sqp->in_use_bm,
5907 sdebug_max_queue);
5908 if (k != sdebug_max_queue) {
5909 res = -EBUSY; /* queued commands */
5910 break;
5911 }
5912 }
5913 if (res > 0) {
c2206098 5914 sdebug_jdelay = jdelay;
773642d9 5915 sdebug_ndelay = 0;
cbf67842 5916 }
c4837394 5917 block_unblock_all_queues(false);
1da177e4 5918 }
cbf67842 5919 return res;
1da177e4
LT
5920 }
5921 return -EINVAL;
5922}
82069379 5923static DRIVER_ATTR_RW(delay);
1da177e4 5924
cbf67842
DG
5925static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5926{
773642d9 5927 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
cbf67842
DG
5928}
5929/* Returns -EBUSY if ndelay is being changed and commands are queued */
c2206098 5930/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
cbf67842 5931static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
fd32119b 5932 size_t count)
cbf67842 5933{
c4837394 5934 int ndelay, res;
cbf67842
DG
5935
5936 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
c4837394 5937 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
cbf67842 5938 res = count;
773642d9 5939 if (sdebug_ndelay != ndelay) {
c4837394
DG
5940 int j, k;
5941 struct sdebug_queue *sqp;
5942
5943 block_unblock_all_queues(true);
5944 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5945 ++j, ++sqp) {
5946 k = find_first_bit(sqp->in_use_bm,
5947 sdebug_max_queue);
5948 if (k != sdebug_max_queue) {
5949 res = -EBUSY; /* queued commands */
5950 break;
5951 }
5952 }
5953 if (res > 0) {
773642d9 5954 sdebug_ndelay = ndelay;
c2206098
DG
5955 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5956 : DEF_JDELAY;
cbf67842 5957 }
c4837394 5958 block_unblock_all_queues(false);
cbf67842
DG
5959 }
5960 return res;
5961 }
5962 return -EINVAL;
5963}
5964static DRIVER_ATTR_RW(ndelay);
5965
82069379 5966static ssize_t opts_show(struct device_driver *ddp, char *buf)
1da177e4 5967{
773642d9 5968 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
1da177e4
LT
5969}
5970
82069379
AM
5971static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5972 size_t count)
1da177e4 5973{
9a051019 5974 int opts;
1da177e4
LT
5975 char work[20];
5976
9a051019
DG
5977 if (sscanf(buf, "%10s", work) == 1) {
5978 if (strncasecmp(work, "0x", 2) == 0) {
5979 if (kstrtoint(work + 2, 16, &opts) == 0)
1da177e4
LT
5980 goto opts_done;
5981 } else {
9a051019 5982 if (kstrtoint(work, 10, &opts) == 0)
1da177e4
LT
5983 goto opts_done;
5984 }
5985 }
5986 return -EINVAL;
5987opts_done:
773642d9
DG
5988 sdebug_opts = opts;
5989 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5990 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
c4837394 5991 tweak_cmnd_count();
1da177e4
LT
5992 return count;
5993}
82069379 5994static DRIVER_ATTR_RW(opts);
1da177e4 5995
82069379 5996static ssize_t ptype_show(struct device_driver *ddp, char *buf)
1da177e4 5997{
773642d9 5998 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
1da177e4 5999}
82069379
AM
6000static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6001 size_t count)
1da177e4 6002{
9a051019 6003 int n;
1da177e4 6004
f0d1cf93
DG
6005 /* Cannot change from or to TYPE_ZBC with sysfs */
6006 if (sdebug_ptype == TYPE_ZBC)
6007 return -EINVAL;
6008
1da177e4 6009 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
f0d1cf93
DG
6010 if (n == TYPE_ZBC)
6011 return -EINVAL;
773642d9 6012 sdebug_ptype = n;
1da177e4
LT
6013 return count;
6014 }
6015 return -EINVAL;
6016}
82069379 6017static DRIVER_ATTR_RW(ptype);
1da177e4 6018
82069379 6019static ssize_t dsense_show(struct device_driver *ddp, char *buf)
1da177e4 6020{
773642d9 6021 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
1da177e4 6022}
82069379
AM
6023static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6024 size_t count)
1da177e4 6025{
9a051019 6026 int n;
1da177e4
LT
6027
6028 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
773642d9 6029 sdebug_dsense = n;
1da177e4
LT
6030 return count;
6031 }
6032 return -EINVAL;
6033}
82069379 6034static DRIVER_ATTR_RW(dsense);
1da177e4 6035
82069379 6036static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
23183910 6037{
773642d9 6038 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
23183910 6039}
82069379
AM
6040static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6041 size_t count)
23183910 6042{
87c715dc 6043 int n, idx;
23183910
DG
6044
6045 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
87c715dc
DG
6046 bool want_store = (n == 0);
6047 struct sdebug_host_info *sdhp;
6048
cbf67842 6049 n = (n > 0);
773642d9 6050 sdebug_fake_rw = (sdebug_fake_rw > 0);
87c715dc
DG
6051 if (sdebug_fake_rw == n)
6052 return count; /* not transitioning so do nothing */
6053
6054 if (want_store) { /* 1 --> 0 transition, set up store */
6055 if (sdeb_first_idx < 0) {
6056 idx = sdebug_add_store();
6057 if (idx < 0)
6058 return idx;
6059 } else {
6060 idx = sdeb_first_idx;
6061 xa_clear_mark(per_store_ap, idx,
6062 SDEB_XA_NOT_IN_USE);
6063 }
6064 /* make all hosts use same store */
6065 list_for_each_entry(sdhp, &sdebug_host_list,
6066 host_list) {
6067 if (sdhp->si_idx != idx) {
6068 xa_set_mark(per_store_ap, sdhp->si_idx,
6069 SDEB_XA_NOT_IN_USE);
6070 sdhp->si_idx = idx;
cbf67842 6071 }
cbf67842 6072 }
87c715dc
DG
6073 sdeb_most_recent_idx = idx;
6074 } else { /* 0 --> 1 transition is trigger for shrink */
6075 sdebug_erase_all_stores(true /* apart from first */);
cbf67842 6076 }
87c715dc 6077 sdebug_fake_rw = n;
23183910
DG
6078 return count;
6079 }
6080 return -EINVAL;
6081}
82069379 6082static DRIVER_ATTR_RW(fake_rw);
23183910 6083
82069379 6084static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
c65b1445 6085{
773642d9 6086 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
c65b1445 6087}
82069379
AM
6088static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6089 size_t count)
c65b1445 6090{
9a051019 6091 int n;
c65b1445
DG
6092
6093 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
773642d9 6094 sdebug_no_lun_0 = n;
c65b1445
DG
6095 return count;
6096 }
6097 return -EINVAL;
6098}
82069379 6099static DRIVER_ATTR_RW(no_lun_0);
c65b1445 6100
82069379 6101static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
1da177e4 6102{
773642d9 6103 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
1da177e4 6104}
82069379
AM
6105static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6106 size_t count)
1da177e4 6107{
9a051019 6108 int n;
1da177e4
LT
6109
6110 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
773642d9 6111 sdebug_num_tgts = n;
1da177e4
LT
6112 sdebug_max_tgts_luns();
6113 return count;
6114 }
6115 return -EINVAL;
6116}
82069379 6117static DRIVER_ATTR_RW(num_tgts);
1da177e4 6118
82069379 6119static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
1da177e4 6120{
773642d9 6121 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
1da177e4 6122}
82069379 6123static DRIVER_ATTR_RO(dev_size_mb);
1da177e4 6124
87c715dc
DG
6125static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6126{
6127 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6128}
6129
6130static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6131 size_t count)
6132{
6133 bool v;
6134
6135 if (kstrtobool(buf, &v))
6136 return -EINVAL;
6137
6138 sdebug_per_host_store = v;
6139 return count;
6140}
6141static DRIVER_ATTR_RW(per_host_store);
6142
82069379 6143static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
1da177e4 6144{
773642d9 6145 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
1da177e4 6146}
82069379 6147static DRIVER_ATTR_RO(num_parts);
1da177e4 6148
82069379 6149static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
1da177e4 6150{
773642d9 6151 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
1da177e4 6152}
82069379
AM
6153static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6154 size_t count)
1da177e4 6155{
9a051019 6156 int nth;
3a90a63d 6157 char work[20];
1da177e4 6158
3a90a63d
DG
6159 if (sscanf(buf, "%10s", work) == 1) {
6160 if (strncasecmp(work, "0x", 2) == 0) {
6161 if (kstrtoint(work + 2, 16, &nth) == 0)
6162 goto every_nth_done;
6163 } else {
6164 if (kstrtoint(work, 10, &nth) == 0)
6165 goto every_nth_done;
c4837394 6166 }
1da177e4
LT
6167 }
6168 return -EINVAL;
3a90a63d
DG
6169
6170every_nth_done:
6171 sdebug_every_nth = nth;
6172 if (nth && !sdebug_statistics) {
6173 pr_info("every_nth needs statistics=1, set it\n");
6174 sdebug_statistics = true;
6175 }
6176 tweak_cmnd_count();
6177 return count;
1da177e4 6178}
82069379 6179static DRIVER_ATTR_RW(every_nth);
1da177e4 6180
ad0c7775
DG
6181static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6182{
6183 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6184}
6185static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6186 size_t count)
6187{
6188 int n;
6189 bool changed;
6190
6191 if (kstrtoint(buf, 0, &n))
6192 return -EINVAL;
6193 if (n >= 0) {
6194 if (n > (int)SAM_LUN_AM_FLAT) {
6195 pr_warn("only LUN address methods 0 and 1 are supported\n");
6196 return -EINVAL;
6197 }
6198 changed = ((int)sdebug_lun_am != n);
6199 sdebug_lun_am = n;
6200 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6201 struct sdebug_host_info *sdhp;
6202 struct sdebug_dev_info *dp;
6203
6204 spin_lock(&sdebug_host_list_lock);
6205 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6206 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6207 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6208 }
6209 }
6210 spin_unlock(&sdebug_host_list_lock);
6211 }
6212 return count;
6213 }
6214 return -EINVAL;
6215}
6216static DRIVER_ATTR_RW(lun_format);
6217
82069379 6218static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
1da177e4 6219{
773642d9 6220 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
1da177e4 6221}
82069379
AM
6222static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6223 size_t count)
1da177e4 6224{
9a051019 6225 int n;
19c8ead7 6226 bool changed;
1da177e4
LT
6227
6228 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
8d039e22
DG
6229 if (n > 256) {
6230 pr_warn("max_luns can be no more than 256\n");
6231 return -EINVAL;
6232 }
773642d9
DG
6233 changed = (sdebug_max_luns != n);
6234 sdebug_max_luns = n;
1da177e4 6235 sdebug_max_tgts_luns();
773642d9 6236 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
19c8ead7
EM
6237 struct sdebug_host_info *sdhp;
6238 struct sdebug_dev_info *dp;
6239
6240 spin_lock(&sdebug_host_list_lock);
6241 list_for_each_entry(sdhp, &sdebug_host_list,
6242 host_list) {
6243 list_for_each_entry(dp, &sdhp->dev_info_list,
6244 dev_list) {
6245 set_bit(SDEBUG_UA_LUNS_CHANGED,
6246 dp->uas_bm);
6247 }
6248 }
6249 spin_unlock(&sdebug_host_list_lock);
6250 }
1da177e4
LT
6251 return count;
6252 }
6253 return -EINVAL;
6254}
82069379 6255static DRIVER_ATTR_RW(max_luns);
1da177e4 6256
82069379 6257static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
78d4e5a0 6258{
773642d9 6259 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
78d4e5a0 6260}
cbf67842
DG
6261/* N.B. max_queue can be changed while there are queued commands. In flight
6262 * commands beyond the new max_queue will be completed. */
82069379
AM
6263static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6264 size_t count)
78d4e5a0 6265{
c4837394
DG
6266 int j, n, k, a;
6267 struct sdebug_queue *sqp;
78d4e5a0
DG
6268
6269 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
c10fa55f
JG
6270 (n <= SDEBUG_CANQUEUE) &&
6271 (sdebug_host_max_queue == 0)) {
c4837394
DG
6272 block_unblock_all_queues(true);
6273 k = 0;
6274 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6275 ++j, ++sqp) {
6276 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6277 if (a > k)
6278 k = a;
6279 }
773642d9 6280 sdebug_max_queue = n;
c4837394 6281 if (k == SDEBUG_CANQUEUE)
cbf67842
DG
6282 atomic_set(&retired_max_queue, 0);
6283 else if (k >= n)
6284 atomic_set(&retired_max_queue, k + 1);
6285 else
6286 atomic_set(&retired_max_queue, 0);
c4837394 6287 block_unblock_all_queues(false);
78d4e5a0
DG
6288 return count;
6289 }
6290 return -EINVAL;
6291}
82069379 6292static DRIVER_ATTR_RW(max_queue);
78d4e5a0 6293
c10fa55f
JG
6294static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6295{
6296 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6297}
6298
6299/*
6300 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6301 * in range [0, sdebug_host_max_queue), we can't change it.
6302 */
6303static DRIVER_ATTR_RO(host_max_queue);
6304
82069379 6305static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
78d4e5a0 6306{
773642d9 6307 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
78d4e5a0 6308}
82069379 6309static DRIVER_ATTR_RO(no_uld);
78d4e5a0 6310
82069379 6311static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
1da177e4 6312{
773642d9 6313 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
1da177e4 6314}
82069379 6315static DRIVER_ATTR_RO(scsi_level);
1da177e4 6316
82069379 6317static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
c65b1445 6318{
773642d9 6319 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
c65b1445 6320}
82069379
AM
6321static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6322 size_t count)
c65b1445 6323{
9a051019 6324 int n;
0d01c5df 6325 bool changed;
c65b1445 6326
f0d1cf93
DG
6327 /* Ignore capacity change for ZBC drives for now */
6328 if (sdeb_zbc_in_use)
6329 return -ENOTSUPP;
6330
c65b1445 6331 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
773642d9
DG
6332 changed = (sdebug_virtual_gb != n);
6333 sdebug_virtual_gb = n;
28898873 6334 sdebug_capacity = get_sdebug_capacity();
0d01c5df
DG
6335 if (changed) {
6336 struct sdebug_host_info *sdhp;
6337 struct sdebug_dev_info *dp;
6338
4bc6b634 6339 spin_lock(&sdebug_host_list_lock);
0d01c5df
DG
6340 list_for_each_entry(sdhp, &sdebug_host_list,
6341 host_list) {
6342 list_for_each_entry(dp, &sdhp->dev_info_list,
6343 dev_list) {
6344 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6345 dp->uas_bm);
6346 }
6347 }
4bc6b634 6348 spin_unlock(&sdebug_host_list_lock);
0d01c5df 6349 }
c65b1445
DG
6350 return count;
6351 }
6352 return -EINVAL;
6353}
82069379 6354static DRIVER_ATTR_RW(virtual_gb);
c65b1445 6355
82069379 6356static ssize_t add_host_show(struct device_driver *ddp, char *buf)
1da177e4 6357{
87c715dc
DG
6358 /* absolute number of hosts currently active is what is shown */
6359 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
1da177e4
LT
6360}
6361
82069379
AM
6362static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6363 size_t count)
1da177e4 6364{
87c715dc
DG
6365 bool found;
6366 unsigned long idx;
6367 struct sdeb_store_info *sip;
6368 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
f3df41cf 6369 int delta_hosts;
1da177e4 6370
f3df41cf 6371 if (sscanf(buf, "%d", &delta_hosts) != 1)
1da177e4 6372 return -EINVAL;
1da177e4
LT
6373 if (delta_hosts > 0) {
6374 do {
87c715dc
DG
6375 found = false;
6376 if (want_phs) {
6377 xa_for_each_marked(per_store_ap, idx, sip,
6378 SDEB_XA_NOT_IN_USE) {
6379 sdeb_most_recent_idx = (int)idx;
6380 found = true;
6381 break;
6382 }
6383 if (found) /* re-use case */
6384 sdebug_add_host_helper((int)idx);
6385 else
6386 sdebug_do_add_host(true);
6387 } else {
6388 sdebug_do_add_host(false);
6389 }
1da177e4
LT
6390 } while (--delta_hosts);
6391 } else if (delta_hosts < 0) {
6392 do {
87c715dc 6393 sdebug_do_remove_host(false);
1da177e4
LT
6394 } while (++delta_hosts);
6395 }
6396 return count;
6397}
82069379 6398static DRIVER_ATTR_RW(add_host);
1da177e4 6399
82069379 6400static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
23183910 6401{
773642d9 6402 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
23183910 6403}
82069379
AM
6404static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6405 size_t count)
23183910
DG
6406{
6407 int n;
6408
6409 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
773642d9 6410 sdebug_vpd_use_hostno = n;
23183910
DG
6411 return count;
6412 }
6413 return -EINVAL;
6414}
82069379 6415static DRIVER_ATTR_RW(vpd_use_hostno);
23183910 6416
c4837394
DG
6417static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6418{
6419 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6420}
6421static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6422 size_t count)
6423{
6424 int n;
6425
6426 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6427 if (n > 0)
6428 sdebug_statistics = true;
6429 else {
6430 clear_queue_stats();
6431 sdebug_statistics = false;
6432 }
6433 return count;
6434 }
6435 return -EINVAL;
6436}
6437static DRIVER_ATTR_RW(statistics);
6438
82069379 6439static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
597136ab 6440{
773642d9 6441 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
597136ab 6442}
82069379 6443static DRIVER_ATTR_RO(sector_size);
597136ab 6444
c4837394
DG
6445static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6446{
6447 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6448}
6449static DRIVER_ATTR_RO(submit_queues);
6450
82069379 6451static ssize_t dix_show(struct device_driver *ddp, char *buf)
c6a44287 6452{
773642d9 6453 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
c6a44287 6454}
82069379 6455static DRIVER_ATTR_RO(dix);
c6a44287 6456
82069379 6457static ssize_t dif_show(struct device_driver *ddp, char *buf)
c6a44287 6458{
773642d9 6459 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
c6a44287 6460}
82069379 6461static DRIVER_ATTR_RO(dif);
c6a44287 6462
82069379 6463static ssize_t guard_show(struct device_driver *ddp, char *buf)
c6a44287 6464{
773642d9 6465 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
c6a44287 6466}
82069379 6467static DRIVER_ATTR_RO(guard);
c6a44287 6468
82069379 6469static ssize_t ato_show(struct device_driver *ddp, char *buf)
c6a44287 6470{
773642d9 6471 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
c6a44287 6472}
82069379 6473static DRIVER_ATTR_RO(ato);
c6a44287 6474
82069379 6475static ssize_t map_show(struct device_driver *ddp, char *buf)
44d92694 6476{
87c715dc 6477 ssize_t count = 0;
44d92694 6478
5b94e232 6479 if (!scsi_debug_lbp())
44d92694
MP
6480 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6481 sdebug_store_sectors);
6482
87c715dc
DG
6483 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6484 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6485
6486 if (sip)
6487 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6488 (int)map_size, sip->map_storep);
6489 }
44d92694 6490 buf[count++] = '\n';
c7badc90 6491 buf[count] = '\0';
44d92694
MP
6492
6493 return count;
6494}
82069379 6495static DRIVER_ATTR_RO(map);
44d92694 6496
0c4bc91d
DG
6497static ssize_t random_show(struct device_driver *ddp, char *buf)
6498{
6499 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6500}
6501
6502static ssize_t random_store(struct device_driver *ddp, const char *buf,
6503 size_t count)
6504{
6505 bool v;
6506
6507 if (kstrtobool(buf, &v))
6508 return -EINVAL;
6509
6510 sdebug_random = v;
6511 return count;
6512}
6513static DRIVER_ATTR_RW(random);
6514
82069379 6515static ssize_t removable_show(struct device_driver *ddp, char *buf)
d986788b 6516{
773642d9 6517 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
d986788b 6518}
82069379
AM
6519static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6520 size_t count)
d986788b
MP
6521{
6522 int n;
6523
6524 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
773642d9 6525 sdebug_removable = (n > 0);
d986788b
MP
6526 return count;
6527 }
6528 return -EINVAL;
6529}
82069379 6530static DRIVER_ATTR_RW(removable);
d986788b 6531
cbf67842
DG
6532static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6533{
773642d9 6534 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
cbf67842 6535}
185dd232 6536/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
cbf67842
DG
6537static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6538 size_t count)
6539{
185dd232 6540 int n;
cbf67842
DG
6541
6542 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
185dd232
DG
6543 sdebug_host_lock = (n > 0);
6544 return count;
cbf67842
DG
6545 }
6546 return -EINVAL;
6547}
6548static DRIVER_ATTR_RW(host_lock);
6549
c2248fc9
DG
6550static ssize_t strict_show(struct device_driver *ddp, char *buf)
6551{
773642d9 6552 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
c2248fc9
DG
6553}
6554static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6555 size_t count)
6556{
6557 int n;
6558
6559 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
773642d9 6560 sdebug_strict = (n > 0);
c2248fc9
DG
6561 return count;
6562 }
6563 return -EINVAL;
6564}
6565static DRIVER_ATTR_RW(strict);
6566
09ba24c1
DG
6567static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6568{
6569 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6570}
6571static DRIVER_ATTR_RO(uuid_ctl);
6572
9b760fd8
DG
6573static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6574{
6575 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6576}
6577static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6578 size_t count)
6579{
6580 int ret, n;
6581
6582 ret = kstrtoint(buf, 0, &n);
6583 if (ret)
6584 return ret;
6585 sdebug_cdb_len = n;
6586 all_config_cdb_len();
6587 return count;
6588}
6589static DRIVER_ATTR_RW(cdb_len);
6590
9267e0eb
DG
6591static const char * const zbc_model_strs_a[] = {
6592 [BLK_ZONED_NONE] = "none",
6593 [BLK_ZONED_HA] = "host-aware",
6594 [BLK_ZONED_HM] = "host-managed",
6595};
6596
6597static const char * const zbc_model_strs_b[] = {
6598 [BLK_ZONED_NONE] = "no",
6599 [BLK_ZONED_HA] = "aware",
6600 [BLK_ZONED_HM] = "managed",
6601};
6602
6603static const char * const zbc_model_strs_c[] = {
6604 [BLK_ZONED_NONE] = "0",
6605 [BLK_ZONED_HA] = "1",
6606 [BLK_ZONED_HM] = "2",
6607};
6608
6609static int sdeb_zbc_model_str(const char *cp)
6610{
6611 int res = sysfs_match_string(zbc_model_strs_a, cp);
6612
6613 if (res < 0) {
6614 res = sysfs_match_string(zbc_model_strs_b, cp);
6615 if (res < 0) {
6616 res = sysfs_match_string(zbc_model_strs_c, cp);
47742bde 6617 if (res < 0)
9267e0eb
DG
6618 return -EINVAL;
6619 }
6620 }
6621 return res;
6622}
6623
6624static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6625{
6626 return scnprintf(buf, PAGE_SIZE, "%s\n",
6627 zbc_model_strs_a[sdeb_zbc_model]);
6628}
6629static DRIVER_ATTR_RO(zbc);
cbf67842 6630
fc13638a
DG
6631static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6632{
6633 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6634}
6635static DRIVER_ATTR_RO(tur_ms_to_ready);
6636
82069379 6637/* Note: The following array creates attribute files in the
23183910
DG
6638 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6639 files (over those found in the /sys/module/scsi_debug/parameters
6640 directory) is that auxiliary actions can be triggered when an attribute
87c715dc 6641 is changed. For example see: add_host_store() above.
23183910 6642 */
6ecaff7f 6643
82069379
AM
6644static struct attribute *sdebug_drv_attrs[] = {
6645 &driver_attr_delay.attr,
6646 &driver_attr_opts.attr,
6647 &driver_attr_ptype.attr,
6648 &driver_attr_dsense.attr,
6649 &driver_attr_fake_rw.attr,
c10fa55f 6650 &driver_attr_host_max_queue.attr,
82069379
AM
6651 &driver_attr_no_lun_0.attr,
6652 &driver_attr_num_tgts.attr,
6653 &driver_attr_dev_size_mb.attr,
6654 &driver_attr_num_parts.attr,
6655 &driver_attr_every_nth.attr,
ad0c7775 6656 &driver_attr_lun_format.attr,
82069379
AM
6657 &driver_attr_max_luns.attr,
6658 &driver_attr_max_queue.attr,
6659 &driver_attr_no_uld.attr,
6660 &driver_attr_scsi_level.attr,
6661 &driver_attr_virtual_gb.attr,
6662 &driver_attr_add_host.attr,
87c715dc 6663 &driver_attr_per_host_store.attr,
82069379
AM
6664 &driver_attr_vpd_use_hostno.attr,
6665 &driver_attr_sector_size.attr,
c4837394
DG
6666 &driver_attr_statistics.attr,
6667 &driver_attr_submit_queues.attr,
82069379
AM
6668 &driver_attr_dix.attr,
6669 &driver_attr_dif.attr,
6670 &driver_attr_guard.attr,
6671 &driver_attr_ato.attr,
6672 &driver_attr_map.attr,
0c4bc91d 6673 &driver_attr_random.attr,
82069379 6674 &driver_attr_removable.attr,
cbf67842
DG
6675 &driver_attr_host_lock.attr,
6676 &driver_attr_ndelay.attr,
c2248fc9 6677 &driver_attr_strict.attr,
09ba24c1 6678 &driver_attr_uuid_ctl.attr,
9b760fd8 6679 &driver_attr_cdb_len.attr,
fc13638a 6680 &driver_attr_tur_ms_to_ready.attr,
9267e0eb 6681 &driver_attr_zbc.attr,
82069379
AM
6682 NULL,
6683};
6684ATTRIBUTE_GROUPS(sdebug_drv);
1da177e4 6685
11ddceca 6686static struct device *pseudo_primary;
8dea0d02 6687
1da177e4
LT
6688static int __init scsi_debug_init(void)
6689{
87c715dc 6690 bool want_store = (sdebug_fake_rw == 0);
5f2578e5 6691 unsigned long sz;
87c715dc
DG
6692 int k, ret, hosts_to_add;
6693 int idx = -1;
1da177e4 6694
87c715dc
DG
6695 ramdisk_lck_a[0] = &atomic_rw;
6696 ramdisk_lck_a[1] = &atomic_rw2;
cbf67842
DG
6697 atomic_set(&retired_max_queue, 0);
6698
773642d9 6699 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
c1287970 6700 pr_warn("ndelay must be less than 1 second, ignored\n");
773642d9
DG
6701 sdebug_ndelay = 0;
6702 } else if (sdebug_ndelay > 0)
c2206098 6703 sdebug_jdelay = JDELAY_OVERRIDDEN;
cbf67842 6704
773642d9 6705 switch (sdebug_sector_size) {
597136ab
MP
6706 case 512:
6707 case 1024:
6708 case 2048:
6709 case 4096:
6710 break;
6711 default:
773642d9 6712 pr_err("invalid sector_size %d\n", sdebug_sector_size);
597136ab
MP
6713 return -EINVAL;
6714 }
6715
773642d9 6716 switch (sdebug_dif) {
8475c811 6717 case T10_PI_TYPE0_PROTECTION:
f46eb0e9 6718 break;
8475c811
CH
6719 case T10_PI_TYPE1_PROTECTION:
6720 case T10_PI_TYPE2_PROTECTION:
6721 case T10_PI_TYPE3_PROTECTION:
f46eb0e9 6722 have_dif_prot = true;
c6a44287
MP
6723 break;
6724
6725 default:
c1287970 6726 pr_err("dif must be 0, 1, 2 or 3\n");
c6a44287
MP
6727 return -EINVAL;
6728 }
6729
aa5334c4
ML
6730 if (sdebug_num_tgts < 0) {
6731 pr_err("num_tgts must be >= 0\n");
6732 return -EINVAL;
6733 }
6734
773642d9 6735 if (sdebug_guard > 1) {
c1287970 6736 pr_err("guard must be 0 or 1\n");
c6a44287
MP
6737 return -EINVAL;
6738 }
6739
773642d9 6740 if (sdebug_ato > 1) {
c1287970 6741 pr_err("ato must be 0 or 1\n");
c6a44287
MP
6742 return -EINVAL;
6743 }
6744
773642d9
DG
6745 if (sdebug_physblk_exp > 15) {
6746 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
ea61fca5
MP
6747 return -EINVAL;
6748 }
ad0c7775
DG
6749
6750 sdebug_lun_am = sdebug_lun_am_i;
6751 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6752 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6753 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6754 }
6755
8d039e22 6756 if (sdebug_max_luns > 256) {
ad0c7775
DG
6757 if (sdebug_max_luns > 16384) {
6758 pr_warn("max_luns can be no more than 16384, use default\n");
6759 sdebug_max_luns = DEF_MAX_LUNS;
6760 }
6761 sdebug_lun_am = SAM_LUN_AM_FLAT;
8d039e22 6762 }
ea61fca5 6763
773642d9
DG
6764 if (sdebug_lowest_aligned > 0x3fff) {
6765 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
ea61fca5
MP
6766 return -EINVAL;
6767 }
6768
c4837394
DG
6769 if (submit_queues < 1) {
6770 pr_err("submit_queues must be 1 or more\n");
6771 return -EINVAL;
6772 }
c87bf24c
JG
6773
6774 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6775 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6776 return -EINVAL;
6777 }
6778
c10fa55f
JG
6779 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6780 (sdebug_host_max_queue < 0)) {
6781 pr_err("host_max_queue must be in range [0 %d]\n",
6782 SDEBUG_CANQUEUE);
6783 return -EINVAL;
6784 }
6785
6786 if (sdebug_host_max_queue &&
6787 (sdebug_max_queue != sdebug_host_max_queue)) {
6788 sdebug_max_queue = sdebug_host_max_queue;
6789 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6790 sdebug_max_queue);
6791 }
6792
c4837394
DG
6793 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6794 GFP_KERNEL);
6795 if (sdebug_q_arr == NULL)
6796 return -ENOMEM;
6797 for (k = 0; k < submit_queues; ++k)
6798 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6799
f0d1cf93 6800 /*
9267e0eb
DG
6801 * check for host managed zoned block device specified with
6802 * ptype=0x14 or zbc=XXX.
f0d1cf93 6803 */
9267e0eb
DG
6804 if (sdebug_ptype == TYPE_ZBC) {
6805 sdeb_zbc_model = BLK_ZONED_HM;
6806 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6807 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6808 if (k < 0) {
6809 ret = k;
3b01d7ea 6810 goto free_q_arr;
9267e0eb
DG
6811 }
6812 sdeb_zbc_model = k;
6813 switch (sdeb_zbc_model) {
6814 case BLK_ZONED_NONE:
64e14ece 6815 case BLK_ZONED_HA:
9267e0eb
DG
6816 sdebug_ptype = TYPE_DISK;
6817 break;
6818 case BLK_ZONED_HM:
6819 sdebug_ptype = TYPE_ZBC;
6820 break;
9267e0eb
DG
6821 default:
6822 pr_err("Invalid ZBC model\n");
3b01d7ea
DL
6823 ret = -EINVAL;
6824 goto free_q_arr;
9267e0eb
DG
6825 }
6826 }
6827 if (sdeb_zbc_model != BLK_ZONED_NONE) {
f0d1cf93 6828 sdeb_zbc_in_use = true;
9267e0eb
DG
6829 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6830 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6831 }
f0d1cf93 6832
9267e0eb
DG
6833 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6834 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
773642d9
DG
6835 if (sdebug_dev_size_mb < 1)
6836 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6837 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6838 sdebug_store_sectors = sz / sdebug_sector_size;
28898873 6839 sdebug_capacity = get_sdebug_capacity();
1da177e4
LT
6840
6841 /* play around with geometry, don't waste too much on track 0 */
6842 sdebug_heads = 8;
6843 sdebug_sectors_per = 32;
773642d9 6844 if (sdebug_dev_size_mb >= 256)
1da177e4 6845 sdebug_heads = 64;
773642d9 6846 else if (sdebug_dev_size_mb >= 16)
fa785f0a 6847 sdebug_heads = 32;
1da177e4
LT
6848 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6849 (sdebug_sectors_per * sdebug_heads);
6850 if (sdebug_cylinders_per >= 1024) {
6851 /* other LLDs do this; implies >= 1GB ram disk ... */
6852 sdebug_heads = 255;
6853 sdebug_sectors_per = 63;
6854 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6855 (sdebug_sectors_per * sdebug_heads);
6856 }
5b94e232 6857 if (scsi_debug_lbp()) {
773642d9
DG
6858 sdebug_unmap_max_blocks =
6859 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6014759c 6860
773642d9
DG
6861 sdebug_unmap_max_desc =
6862 clamp(sdebug_unmap_max_desc, 0U, 256U);
6014759c 6863
773642d9
DG
6864 sdebug_unmap_granularity =
6865 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6014759c 6866
773642d9
DG
6867 if (sdebug_unmap_alignment &&
6868 sdebug_unmap_granularity <=
6869 sdebug_unmap_alignment) {
c1287970 6870 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
c4837394 6871 ret = -EINVAL;
87c715dc 6872 goto free_q_arr;
44d92694 6873 }
87c715dc
DG
6874 }
6875 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6876 if (want_store) {
6877 idx = sdebug_add_store();
6878 if (idx < 0) {
6879 ret = idx;
6880 goto free_q_arr;
44d92694 6881 }
44d92694
MP
6882 }
6883
9b906779
NB
6884 pseudo_primary = root_device_register("pseudo_0");
6885 if (IS_ERR(pseudo_primary)) {
c1287970 6886 pr_warn("root_device_register() error\n");
9b906779 6887 ret = PTR_ERR(pseudo_primary);
6ecaff7f
RD
6888 goto free_vm;
6889 }
6890 ret = bus_register(&pseudo_lld_bus);
6891 if (ret < 0) {
c1287970 6892 pr_warn("bus_register error: %d\n", ret);
6ecaff7f
RD
6893 goto dev_unreg;
6894 }
6895 ret = driver_register(&sdebug_driverfs_driver);
6896 if (ret < 0) {
c1287970 6897 pr_warn("driver_register error: %d\n", ret);
6ecaff7f
RD
6898 goto bus_unreg;
6899 }
1da177e4 6900
87c715dc 6901 hosts_to_add = sdebug_add_host;
773642d9 6902 sdebug_add_host = 0;
1da177e4 6903
87c715dc
DG
6904 for (k = 0; k < hosts_to_add; k++) {
6905 if (want_store && k == 0) {
6906 ret = sdebug_add_host_helper(idx);
6907 if (ret < 0) {
6908 pr_err("add_host_helper k=%d, error=%d\n",
6909 k, -ret);
6910 break;
6911 }
6912 } else {
6913 ret = sdebug_do_add_host(want_store &&
6914 sdebug_per_host_store);
6915 if (ret < 0) {
6916 pr_err("add_host k=%d error=%d\n", k, -ret);
6917 break;
6918 }
9a051019
DG
6919 }
6920 }
773642d9 6921 if (sdebug_verbose)
87c715dc 6922 pr_info("built %d host(s)\n", sdebug_num_hosts);
c1287970 6923
1da177e4 6924 return 0;
6ecaff7f 6925
6ecaff7f
RD
6926bus_unreg:
6927 bus_unregister(&pseudo_lld_bus);
6928dev_unreg:
9b906779 6929 root_device_unregister(pseudo_primary);
6ecaff7f 6930free_vm:
87c715dc 6931 sdebug_erase_store(idx, NULL);
c4837394
DG
6932free_q_arr:
6933 kfree(sdebug_q_arr);
6ecaff7f 6934 return ret;
1da177e4
LT
6935}
6936
6937static void __exit scsi_debug_exit(void)
6938{
87c715dc 6939 int k = sdebug_num_hosts;
1da177e4
LT
6940
6941 stop_all_queued();
6942 for (; k; k--)
87c715dc 6943 sdebug_do_remove_host(true);
52ab9768 6944 free_all_queued();
1da177e4
LT
6945 driver_unregister(&sdebug_driverfs_driver);
6946 bus_unregister(&pseudo_lld_bus);
9b906779 6947 root_device_unregister(pseudo_primary);
1da177e4 6948
87c715dc
DG
6949 sdebug_erase_all_stores(false);
6950 xa_destroy(per_store_ap);
f852c596 6951 kfree(sdebug_q_arr);
1da177e4
LT
6952}
6953
6954device_initcall(scsi_debug_init);
6955module_exit(scsi_debug_exit);
6956
91d4c752 6957static void sdebug_release_adapter(struct device *dev)
1da177e4 6958{
9a051019 6959 struct sdebug_host_info *sdbg_host;
1da177e4
LT
6960
6961 sdbg_host = to_sdebug_host(dev);
9a051019 6962 kfree(sdbg_host);
1da177e4
LT
6963}
6964
87c715dc
DG
6965/* idx must be valid, if sip is NULL then it will be obtained using idx */
6966static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
1da177e4 6967{
87c715dc
DG
6968 if (idx < 0)
6969 return;
6970 if (!sip) {
6971 if (xa_empty(per_store_ap))
6972 return;
6973 sip = xa_load(per_store_ap, idx);
6974 if (!sip)
6975 return;
6976 }
6977 vfree(sip->map_storep);
6978 vfree(sip->dif_storep);
6979 vfree(sip->storep);
6980 xa_erase(per_store_ap, idx);
6981 kfree(sip);
6982}
6983
6984/* Assume apart_from_first==false only in shutdown case. */
6985static void sdebug_erase_all_stores(bool apart_from_first)
6986{
6987 unsigned long idx;
6988 struct sdeb_store_info *sip = NULL;
6989
6990 xa_for_each(per_store_ap, idx, sip) {
6991 if (apart_from_first)
6992 apart_from_first = false;
6993 else
6994 sdebug_erase_store(idx, sip);
6995 }
6996 if (apart_from_first)
6997 sdeb_most_recent_idx = sdeb_first_idx;
6998}
6999
7000/*
7001 * Returns store xarray new element index (idx) if >=0 else negated errno.
7002 * Limit the number of stores to 65536.
7003 */
7004static int sdebug_add_store(void)
7005{
7006 int res;
7007 u32 n_idx;
7008 unsigned long iflags;
7009 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7010 struct sdeb_store_info *sip = NULL;
7011 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7012
7013 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7014 if (!sip)
7015 return -ENOMEM;
7016
7017 xa_lock_irqsave(per_store_ap, iflags);
7018 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7019 if (unlikely(res < 0)) {
7020 xa_unlock_irqrestore(per_store_ap, iflags);
7021 kfree(sip);
7022 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7023 return res;
7024 }
7025 sdeb_most_recent_idx = n_idx;
7026 if (sdeb_first_idx < 0)
7027 sdeb_first_idx = n_idx;
7028 xa_unlock_irqrestore(per_store_ap, iflags);
7029
7030 res = -ENOMEM;
7031 sip->storep = vzalloc(sz);
7032 if (!sip->storep) {
7033 pr_err("user data oom\n");
7034 goto err;
7035 }
7036 if (sdebug_num_parts > 0)
7037 sdebug_build_parts(sip->storep, sz);
7038
7039 /* DIF/DIX: what T10 calls Protection Information (PI) */
7040 if (sdebug_dix) {
7041 int dif_size;
7042
7043 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7044 sip->dif_storep = vmalloc(dif_size);
7045
7046 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7047 sip->dif_storep);
7048
7049 if (!sip->dif_storep) {
7050 pr_err("DIX oom\n");
7051 goto err;
7052 }
7053 memset(sip->dif_storep, 0xff, dif_size);
7054 }
7055 /* Logical Block Provisioning */
7056 if (scsi_debug_lbp()) {
7057 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7058 sip->map_storep = vmalloc(array_size(sizeof(long),
7059 BITS_TO_LONGS(map_size)));
7060
7061 pr_info("%lu provisioning blocks\n", map_size);
7062
7063 if (!sip->map_storep) {
7064 pr_err("LBP map oom\n");
7065 goto err;
7066 }
7067
7068 bitmap_zero(sip->map_storep, map_size);
7069
7070 /* Map first 1KB for partition table */
7071 if (sdebug_num_parts)
7072 map_region(sip, 0, 2);
7073 }
7074
7075 rwlock_init(&sip->macc_lck);
7076 return (int)n_idx;
7077err:
7078 sdebug_erase_store((int)n_idx, sip);
7079 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7080 return res;
7081}
7082
7083static int sdebug_add_host_helper(int per_host_idx)
7084{
7085 int k, devs_per_host, idx;
7086 int error = -ENOMEM;
9a051019 7087 struct sdebug_host_info *sdbg_host;
8b40228f 7088 struct sdebug_dev_info *sdbg_devinfo, *tmp;
1da177e4 7089
9a051019 7090 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
87c715dc 7091 if (!sdbg_host)
9a051019 7092 return -ENOMEM;
87c715dc
DG
7093 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7094 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7095 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7096 sdbg_host->si_idx = idx;
1da177e4 7097
9a051019 7098 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
1da177e4 7099
773642d9 7100 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
9a051019 7101 for (k = 0; k < devs_per_host; k++) {
5cb2fc06 7102 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
87c715dc 7103 if (!sdbg_devinfo)
1da177e4 7104 goto clean;
9a051019 7105 }
1da177e4 7106
9a051019
DG
7107 spin_lock(&sdebug_host_list_lock);
7108 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7109 spin_unlock(&sdebug_host_list_lock);
1da177e4 7110
9a051019
DG
7111 sdbg_host->dev.bus = &pseudo_lld_bus;
7112 sdbg_host->dev.parent = pseudo_primary;
7113 sdbg_host->dev.release = &sdebug_release_adapter;
87c715dc 7114 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
1da177e4 7115
9a051019 7116 error = device_register(&sdbg_host->dev);
9a051019 7117 if (error)
1da177e4
LT
7118 goto clean;
7119
87c715dc
DG
7120 ++sdebug_num_hosts;
7121 return 0;
1da177e4
LT
7122
7123clean:
8b40228f
FT
7124 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7125 dev_list) {
1da177e4 7126 list_del(&sdbg_devinfo->dev_list);
f0d1cf93 7127 kfree(sdbg_devinfo->zstate);
1da177e4
LT
7128 kfree(sdbg_devinfo);
7129 }
1da177e4 7130 kfree(sdbg_host);
87c715dc 7131 pr_warn("%s: failed, errno=%d\n", __func__, -error);
9a051019 7132 return error;
1da177e4
LT
7133}
7134
87c715dc 7135static int sdebug_do_add_host(bool mk_new_store)
1da177e4 7136{
87c715dc
DG
7137 int ph_idx = sdeb_most_recent_idx;
7138
7139 if (mk_new_store) {
7140 ph_idx = sdebug_add_store();
7141 if (ph_idx < 0)
7142 return ph_idx;
7143 }
7144 return sdebug_add_host_helper(ph_idx);
7145}
7146
7147static void sdebug_do_remove_host(bool the_end)
7148{
7149 int idx = -1;
9a051019 7150 struct sdebug_host_info *sdbg_host = NULL;
87c715dc 7151 struct sdebug_host_info *sdbg_host2;
1da177e4 7152
9a051019
DG
7153 spin_lock(&sdebug_host_list_lock);
7154 if (!list_empty(&sdebug_host_list)) {
7155 sdbg_host = list_entry(sdebug_host_list.prev,
7156 struct sdebug_host_info, host_list);
87c715dc 7157 idx = sdbg_host->si_idx;
1da177e4 7158 }
87c715dc
DG
7159 if (!the_end && idx >= 0) {
7160 bool unique = true;
7161
7162 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7163 if (sdbg_host2 == sdbg_host)
7164 continue;
7165 if (idx == sdbg_host2->si_idx) {
7166 unique = false;
7167 break;
7168 }
7169 }
7170 if (unique) {
7171 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7172 if (idx == sdeb_most_recent_idx)
7173 --sdeb_most_recent_idx;
7174 }
7175 }
7176 if (sdbg_host)
7177 list_del(&sdbg_host->host_list);
9a051019 7178 spin_unlock(&sdebug_host_list_lock);
1da177e4
LT
7179
7180 if (!sdbg_host)
7181 return;
7182
773642d9 7183 device_unregister(&sdbg_host->dev);
87c715dc 7184 --sdebug_num_hosts;
1da177e4
LT
7185}
7186
fd32119b 7187static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
cbf67842
DG
7188{
7189 int num_in_q = 0;
cbf67842
DG
7190 struct sdebug_dev_info *devip;
7191
c4837394 7192 block_unblock_all_queues(true);
cbf67842
DG
7193 devip = (struct sdebug_dev_info *)sdev->hostdata;
7194 if (NULL == devip) {
c4837394 7195 block_unblock_all_queues(false);
cbf67842
DG
7196 return -ENODEV;
7197 }
7198 num_in_q = atomic_read(&devip->num_in_q);
c40ecc12 7199
fc09acb7
DG
7200 if (qdepth > SDEBUG_CANQUEUE) {
7201 qdepth = SDEBUG_CANQUEUE;
7202 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7203 qdepth, SDEBUG_CANQUEUE);
7204 }
c40ecc12
CH
7205 if (qdepth < 1)
7206 qdepth = 1;
fc09acb7
DG
7207 if (qdepth != sdev->queue_depth)
7208 scsi_change_queue_depth(sdev, qdepth);
c40ecc12 7209
773642d9 7210 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
c4837394 7211 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
c40ecc12 7212 __func__, qdepth, num_in_q);
cbf67842 7213 }
c4837394 7214 block_unblock_all_queues(false);
cbf67842
DG
7215 return sdev->queue_depth;
7216}
7217
c4837394 7218static bool fake_timeout(struct scsi_cmnd *scp)
817fd66b 7219{
c4837394 7220 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
773642d9
DG
7221 if (sdebug_every_nth < -1)
7222 sdebug_every_nth = -1;
7223 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
c4837394 7224 return true; /* ignore command causing timeout */
773642d9 7225 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
817fd66b 7226 scsi_medium_access_command(scp))
c4837394 7227 return true; /* time out reads and writes */
817fd66b 7228 }
c4837394 7229 return false;
817fd66b
DG
7230}
7231
fc13638a
DG
7232/* Response to TUR or media access command when device stopped */
7233static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7234{
7235 int stopped_state;
7236 u64 diff_ns = 0;
7237 ktime_t now_ts = ktime_get_boottime();
7238 struct scsi_device *sdp = scp->device;
7239
7240 stopped_state = atomic_read(&devip->stopped);
7241 if (stopped_state == 2) {
7242 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7243 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7244 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7245 /* tur_ms_to_ready timer extinguished */
7246 atomic_set(&devip->stopped, 0);
7247 return 0;
7248 }
7249 }
7250 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7251 if (sdebug_verbose)
7252 sdev_printk(KERN_INFO, sdp,
7253 "%s: Not ready: in process of becoming ready\n", my_name);
7254 if (scp->cmnd[0] == TEST_UNIT_READY) {
7255 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7256
7257 if (diff_ns <= tur_nanosecs_to_ready)
7258 diff_ns = tur_nanosecs_to_ready - diff_ns;
7259 else
7260 diff_ns = tur_nanosecs_to_ready;
7261 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7262 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7263 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7264 diff_ns);
7265 return check_condition_result;
7266 }
7267 }
7268 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7269 if (sdebug_verbose)
7270 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7271 my_name);
7272 return check_condition_result;
7273}
7274
c4b57d89
KD
7275static int sdebug_map_queues(struct Scsi_Host *shost)
7276{
7277 int i, qoff;
7278
7279 if (shost->nr_hw_queues == 1)
7280 return 0;
7281
7282 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7283 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7284
7285 map->nr_queues = 0;
7286
7287 if (i == HCTX_TYPE_DEFAULT)
7288 map->nr_queues = submit_queues - poll_queues;
7289 else if (i == HCTX_TYPE_POLL)
7290 map->nr_queues = poll_queues;
7291
7292 if (!map->nr_queues) {
7293 BUG_ON(i == HCTX_TYPE_DEFAULT);
7294 continue;
7295 }
7296
7297 map->queue_offset = qoff;
7298 blk_mq_map_queues(map);
7299
7300 qoff += map->nr_queues;
7301 }
7302
7303 return 0;
7304
7305}
7306
7307static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7308{
4a0c6f43
DG
7309 bool first;
7310 bool retiring = false;
7311 int num_entries = 0;
7312 unsigned int qc_idx = 0;
c4b57d89 7313 unsigned long iflags;
4a0c6f43 7314 ktime_t kt_from_boot = ktime_get_boottime();
c4b57d89
KD
7315 struct sdebug_queue *sqp;
7316 struct sdebug_queued_cmd *sqcp;
7317 struct scsi_cmnd *scp;
7318 struct sdebug_dev_info *devip;
4a0c6f43 7319 struct sdebug_defer *sd_dp;
c4b57d89
KD
7320
7321 sqp = sdebug_q_arr + queue_num;
4a0c6f43 7322 spin_lock_irqsave(&sqp->qc_lock, iflags);
c4b57d89 7323
4a0c6f43
DG
7324 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7325 if (first) {
7326 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7327 first = false;
7328 } else {
7329 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7330 }
7331 if (unlikely(qc_idx >= sdebug_max_queue))
7332 break;
c4b57d89
KD
7333
7334 sqcp = &sqp->qc_arr[qc_idx];
4a0c6f43
DG
7335 sd_dp = sqcp->sd_dp;
7336 if (unlikely(!sd_dp))
7337 continue;
c4b57d89
KD
7338 scp = sqcp->a_cmnd;
7339 if (unlikely(scp == NULL)) {
4a0c6f43 7340 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
c4b57d89 7341 queue_num, qc_idx, __func__);
4a0c6f43 7342 break;
c4b57d89 7343 }
4a0c6f43
DG
7344 if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7345 if (kt_from_boot < sd_dp->cmpl_ts)
7346 continue;
7347
7348 } else /* ignoring non REQ_HIPRI requests */
7349 continue;
c4b57d89
KD
7350 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7351 if (likely(devip))
7352 atomic_dec(&devip->num_in_q);
7353 else
7354 pr_err("devip=NULL from %s\n", __func__);
7355 if (unlikely(atomic_read(&retired_max_queue) > 0))
4a0c6f43 7356 retiring = true;
c4b57d89
KD
7357
7358 sqcp->a_cmnd = NULL;
7359 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4a0c6f43 7360 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
c4b57d89 7361 sqp, queue_num, qc_idx, __func__);
4a0c6f43 7362 break;
c4b57d89 7363 }
c4b57d89
KD
7364 if (unlikely(retiring)) { /* user has reduced max_queue */
7365 int k, retval;
7366
7367 retval = atomic_read(&retired_max_queue);
7368 if (qc_idx >= retval) {
7369 pr_err("index %d too large\n", retval);
4a0c6f43 7370 break;
c4b57d89
KD
7371 }
7372 k = find_last_bit(sqp->in_use_bm, retval);
7373 if ((k < sdebug_max_queue) || (k == retval))
7374 atomic_set(&retired_max_queue, 0);
7375 else
7376 atomic_set(&retired_max_queue, k + 1);
7377 }
4a0c6f43 7378 sd_dp->defer_t = SDEB_DEFER_NONE;
c4b57d89
KD
7379 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7380 scp->scsi_done(scp); /* callback to mid level */
4a0c6f43 7381 spin_lock_irqsave(&sqp->qc_lock, iflags);
c4b57d89 7382 num_entries++;
4a0c6f43 7383 }
c4b57d89 7384 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4a0c6f43
DG
7385 if (num_entries > 0)
7386 atomic_add(num_entries, &sdeb_mq_poll_count);
c4b57d89
KD
7387 return num_entries;
7388}
7389
fd32119b
DG
7390static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7391 struct scsi_cmnd *scp)
c2248fc9
DG
7392{
7393 u8 sdeb_i;
7394 struct scsi_device *sdp = scp->device;
7395 const struct opcode_info_t *oip;
7396 const struct opcode_info_t *r_oip;
7397 struct sdebug_dev_info *devip;
7398 u8 *cmd = scp->cmnd;
7399 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
f66b8517 7400 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
c2248fc9
DG
7401 int k, na;
7402 int errsts = 0;
ad0c7775 7403 u64 lun_index = sdp->lun & 0x3FFF;
c2248fc9
DG
7404 u32 flags;
7405 u16 sa;
7406 u8 opcode = cmd[0];
7407 bool has_wlun_rl;
3a90a63d 7408 bool inject_now;
c2248fc9
DG
7409
7410 scsi_set_resid(scp, 0);
3a90a63d 7411 if (sdebug_statistics) {
c4837394 7412 atomic_inc(&sdebug_cmnd_count);
3a90a63d
DG
7413 inject_now = inject_on_this_cmd();
7414 } else {
7415 inject_now = false;
7416 }
f46eb0e9
DG
7417 if (unlikely(sdebug_verbose &&
7418 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
c2248fc9
DG
7419 char b[120];
7420 int n, len, sb;
7421
7422 len = scp->cmd_len;
7423 sb = (int)sizeof(b);
7424 if (len > 32)
7425 strcpy(b, "too long, over 32 bytes");
7426 else {
7427 for (k = 0, n = 0; k < len && n < sb; ++k)
7428 n += scnprintf(b + n, sb - n, "%02x ",
7429 (u32)cmd[k]);
7430 }
458df78b 7431 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
a6e76e6f 7432 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
c2248fc9 7433 }
3a90a63d 7434 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7ee6d1b4 7435 return SCSI_MLQUEUE_HOST_BUSY;
34d55434 7436 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
ad0c7775 7437 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
f46eb0e9 7438 goto err_out;
c2248fc9
DG
7439
7440 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7441 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7442 devip = (struct sdebug_dev_info *)sdp->hostdata;
f46eb0e9
DG
7443 if (unlikely(!devip)) {
7444 devip = find_build_dev_info(sdp);
c2248fc9 7445 if (NULL == devip)
f46eb0e9 7446 goto err_out;
c2248fc9 7447 }
3a90a63d
DG
7448 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7449 atomic_set(&sdeb_inject_pending, 1);
7450
c2248fc9
DG
7451 na = oip->num_attached;
7452 r_pfp = oip->pfp;
7453 if (na) { /* multiple commands with this opcode */
7454 r_oip = oip;
7455 if (FF_SA & r_oip->flags) {
7456 if (F_SA_LOW & oip->flags)
7457 sa = 0x1f & cmd[1];
7458 else
7459 sa = get_unaligned_be16(cmd + 8);
7460 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7461 if (opcode == oip->opcode && sa == oip->sa)
7462 break;
7463 }
7464 } else { /* since no service action only check opcode */
7465 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7466 if (opcode == oip->opcode)
7467 break;
7468 }
7469 }
7470 if (k > na) {
7471 if (F_SA_LOW & r_oip->flags)
7472 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7473 else if (F_SA_HIGH & r_oip->flags)
7474 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7475 else
7476 mk_sense_invalid_opcode(scp);
7477 goto check_cond;
7478 }
7479 } /* else (when na==0) we assume the oip is a match */
7480 flags = oip->flags;
f46eb0e9 7481 if (unlikely(F_INV_OP & flags)) {
c2248fc9
DG
7482 mk_sense_invalid_opcode(scp);
7483 goto check_cond;
7484 }
f46eb0e9 7485 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
773642d9
DG
7486 if (sdebug_verbose)
7487 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7488 my_name, opcode, " supported for wlun");
c2248fc9
DG
7489 mk_sense_invalid_opcode(scp);
7490 goto check_cond;
7491 }
f46eb0e9 7492 if (unlikely(sdebug_strict)) { /* check cdb against mask */
c2248fc9
DG
7493 u8 rem;
7494 int j;
7495
7496 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7497 rem = ~oip->len_mask[k] & cmd[k];
7498 if (rem) {
7499 for (j = 7; j >= 0; --j, rem <<= 1) {
7500 if (0x80 & rem)
7501 break;
7502 }
7503 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7504 goto check_cond;
7505 }
7506 }
7507 }
f46eb0e9 7508 if (unlikely(!(F_SKIP_UA & flags) &&
b01f6f83
DG
7509 find_first_bit(devip->uas_bm,
7510 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
f46eb0e9 7511 errsts = make_ua(scp, devip);
c2248fc9
DG
7512 if (errsts)
7513 goto check_cond;
7514 }
fc13638a
DG
7515 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7516 atomic_read(&devip->stopped))) {
7517 errsts = resp_not_ready(scp, devip);
7518 if (errsts)
7519 goto fini;
c2248fc9 7520 }
773642d9 7521 if (sdebug_fake_rw && (F_FAKE_RW & flags))
c2248fc9 7522 goto fini;
f46eb0e9 7523 if (unlikely(sdebug_every_nth)) {
c4837394 7524 if (fake_timeout(scp))
c2248fc9
DG
7525 return 0; /* ignore command: make trouble */
7526 }
f46eb0e9 7527 if (likely(oip->pfp))
f66b8517
MW
7528 pfp = oip->pfp; /* calls a resp_* function */
7529 else
7530 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
c2248fc9
DG
7531
7532fini:
67da413f 7533 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
f66b8517 7534 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
75aa3209
DG
7535 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7536 sdebug_ndelay > 10000)) {
80c49563 7537 /*
75aa3209
DG
7538 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7539 * for Start Stop Unit (SSU) want at least 1 second delay and
7540 * if sdebug_jdelay>1 want a long delay of that many seconds.
7541 * For Synchronize Cache want 1/20 of SSU's delay.
80c49563
DG
7542 */
7543 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
4f2c8bf6 7544 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
80c49563 7545
4f2c8bf6 7546 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
f66b8517 7547 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
80c49563 7548 } else
f66b8517 7549 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
10bde980 7550 sdebug_ndelay);
c2248fc9 7551check_cond:
f66b8517 7552 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
f46eb0e9 7553err_out:
f66b8517 7554 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
c2248fc9
DG
7555}
7556
9e603ca0 7557static struct scsi_host_template sdebug_driver_template = {
c8ed555a
AV
7558 .show_info = scsi_debug_show_info,
7559 .write_info = scsi_debug_write_info,
9e603ca0
FT
7560 .proc_name = sdebug_proc_name,
7561 .name = "SCSI DEBUG",
7562 .info = scsi_debug_info,
7563 .slave_alloc = scsi_debug_slave_alloc,
7564 .slave_configure = scsi_debug_slave_configure,
7565 .slave_destroy = scsi_debug_slave_destroy,
7566 .ioctl = scsi_debug_ioctl,
185dd232 7567 .queuecommand = scsi_debug_queuecommand,
cbf67842 7568 .change_queue_depth = sdebug_change_qdepth,
c4b57d89
KD
7569 .map_queues = sdebug_map_queues,
7570 .mq_poll = sdebug_blk_mq_poll,
9e603ca0 7571 .eh_abort_handler = scsi_debug_abort,
9e603ca0 7572 .eh_device_reset_handler = scsi_debug_device_reset,
cbf67842
DG
7573 .eh_target_reset_handler = scsi_debug_target_reset,
7574 .eh_bus_reset_handler = scsi_debug_bus_reset,
9e603ca0 7575 .eh_host_reset_handler = scsi_debug_host_reset,
c4837394 7576 .can_queue = SDEBUG_CANQUEUE,
9e603ca0 7577 .this_id = 7,
65e8617f 7578 .sg_tablesize = SG_MAX_SEGMENTS,
cbf67842 7579 .cmd_per_lun = DEF_CMD_PER_LUN,
6bb5e6e7 7580 .max_sectors = -1U,
50c2e910 7581 .max_segment_size = -1U,
9e603ca0 7582 .module = THIS_MODULE,
c40ecc12 7583 .track_queue_depth = 1,
9e603ca0
FT
7584};
7585
91d4c752 7586static int sdebug_driver_probe(struct device *dev)
1da177e4 7587{
22017ed2
DG
7588 int error = 0;
7589 struct sdebug_host_info *sdbg_host;
7590 struct Scsi_Host *hpnt;
f46eb0e9 7591 int hprot;
1da177e4
LT
7592
7593 sdbg_host = to_sdebug_host(dev);
7594
f7c4cdc7 7595 sdebug_driver_template.can_queue = sdebug_max_queue;
fc09acb7 7596 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
2a3d4eb8 7597 if (!sdebug_clustering)
4af14d11
CH
7598 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7599
78d4e5a0
DG
7600 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7601 if (NULL == hpnt) {
c1287970 7602 pr_err("scsi_host_alloc failed\n");
78d4e5a0 7603 error = -ENODEV;
1da177e4 7604 return error;
78d4e5a0 7605 }
c4837394 7606 if (submit_queues > nr_cpu_ids) {
9b130ad5 7607 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
c4837394
DG
7608 my_name, submit_queues, nr_cpu_ids);
7609 submit_queues = nr_cpu_ids;
7610 }
c10fa55f
JG
7611 /*
7612 * Decide whether to tell scsi subsystem that we want mq. The
f7c4cdc7 7613 * following should give the same answer for each host.
c10fa55f 7614 */
f7c4cdc7
JG
7615 hpnt->nr_hw_queues = submit_queues;
7616 if (sdebug_host_max_queue)
7617 hpnt->host_tagset = 1;
1da177e4 7618
c4b57d89
KD
7619 /* poll queues are possible for nr_hw_queues > 1 */
7620 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7621 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7622 my_name, poll_queues, hpnt->nr_hw_queues);
7623 poll_queues = 0;
7624 }
7625
7626 /*
7627 * Poll queues don't need interrupts, but we need at least one I/O queue
7628 * left over for non-polled I/O.
7629 * If condition not met, trim poll_queues to 1 (just for simplicity).
7630 */
7631 if (poll_queues >= submit_queues) {
fc09acb7
DG
7632 if (submit_queues < 3)
7633 pr_warn("%s: trim poll_queues to 1\n", my_name);
7634 else
7635 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7636 my_name, submit_queues - 1);
c4b57d89
KD
7637 poll_queues = 1;
7638 }
7639 if (poll_queues)
7640 hpnt->nr_maps = 3;
7641
9a051019 7642 sdbg_host->shost = hpnt;
1da177e4 7643 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
773642d9
DG
7644 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7645 hpnt->max_id = sdebug_num_tgts + 1;
1da177e4 7646 else
773642d9
DG
7647 hpnt->max_id = sdebug_num_tgts;
7648 /* = sdebug_max_luns; */
f2d3fd29 7649 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1da177e4 7650
f46eb0e9 7651 hprot = 0;
c6a44287 7652
773642d9 7653 switch (sdebug_dif) {
c6a44287 7654
8475c811 7655 case T10_PI_TYPE1_PROTECTION:
f46eb0e9 7656 hprot = SHOST_DIF_TYPE1_PROTECTION;
773642d9 7657 if (sdebug_dix)
f46eb0e9 7658 hprot |= SHOST_DIX_TYPE1_PROTECTION;
c6a44287
MP
7659 break;
7660
8475c811 7661 case T10_PI_TYPE2_PROTECTION:
f46eb0e9 7662 hprot = SHOST_DIF_TYPE2_PROTECTION;
773642d9 7663 if (sdebug_dix)
f46eb0e9 7664 hprot |= SHOST_DIX_TYPE2_PROTECTION;
c6a44287
MP
7665 break;
7666
8475c811 7667 case T10_PI_TYPE3_PROTECTION:
f46eb0e9 7668 hprot = SHOST_DIF_TYPE3_PROTECTION;
773642d9 7669 if (sdebug_dix)
f46eb0e9 7670 hprot |= SHOST_DIX_TYPE3_PROTECTION;
c6a44287
MP
7671 break;
7672
7673 default:
773642d9 7674 if (sdebug_dix)
f46eb0e9 7675 hprot |= SHOST_DIX_TYPE0_PROTECTION;
c6a44287
MP
7676 break;
7677 }
7678
f46eb0e9 7679 scsi_host_set_prot(hpnt, hprot);
c6a44287 7680
f46eb0e9
DG
7681 if (have_dif_prot || sdebug_dix)
7682 pr_info("host protection%s%s%s%s%s%s%s\n",
7683 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7684 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7685 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7686 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7687 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7688 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7689 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
c6a44287 7690
773642d9 7691 if (sdebug_guard == 1)
c6a44287
MP
7692 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7693 else
7694 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7695
773642d9
DG
7696 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7697 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
c4837394
DG
7698 if (sdebug_every_nth) /* need stats counters for every_nth */
7699 sdebug_statistics = true;
9a051019
DG
7700 error = scsi_add_host(hpnt, &sdbg_host->dev);
7701 if (error) {
c1287970 7702 pr_err("scsi_add_host failed\n");
9a051019 7703 error = -ENODEV;
1da177e4 7704 scsi_host_put(hpnt);
87c715dc 7705 } else {
1da177e4 7706 scsi_scan_host(hpnt);
87c715dc 7707 }
1da177e4 7708
cbf67842 7709 return error;
1da177e4
LT
7710}
7711
fc7a6209 7712static void sdebug_driver_remove(struct device *dev)
1da177e4 7713{
9a051019 7714 struct sdebug_host_info *sdbg_host;
8b40228f 7715 struct sdebug_dev_info *sdbg_devinfo, *tmp;
1da177e4
LT
7716
7717 sdbg_host = to_sdebug_host(dev);
7718
9a051019 7719 scsi_remove_host(sdbg_host->shost);
1da177e4 7720
8b40228f
FT
7721 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7722 dev_list) {
9a051019 7723 list_del(&sdbg_devinfo->dev_list);
f0d1cf93 7724 kfree(sdbg_devinfo->zstate);
9a051019
DG
7725 kfree(sdbg_devinfo);
7726 }
1da177e4 7727
9a051019 7728 scsi_host_put(sdbg_host->shost);
1da177e4
LT
7729}
7730
8dea0d02
FT
7731static int pseudo_lld_bus_match(struct device *dev,
7732 struct device_driver *dev_driver)
1da177e4 7733{
8dea0d02 7734 return 1;
1da177e4 7735}
8dea0d02
FT
7736
7737static struct bus_type pseudo_lld_bus = {
7738 .name = "pseudo",
7739 .match = pseudo_lld_bus_match,
7740 .probe = sdebug_driver_probe,
7741 .remove = sdebug_driver_remove,
82069379 7742 .drv_groups = sdebug_drv_groups,
8dea0d02 7743};