]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/target/target_core_sbc.c
target: Fail WRITE_SAME w/ UNMAP=1 when emulate_tpws=0
[mirror_ubuntu-bionic-kernel.git] / drivers / target / target_core_sbc.c
CommitLineData
d6e0175c
CH
1/*
2 * SCSI Block Commands (SBC) parsing and emulation.
3 *
4c76251e 4 * (c) Copyright 2002-2013 Datera, Inc.
d6e0175c
CH
5 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/ratelimit.h>
41861fa8 26#include <linux/crc-t10dif.h>
d6e0175c
CH
27#include <asm/unaligned.h>
28#include <scsi/scsi.h>
68ff9b9b 29#include <scsi/scsi_tcq.h>
d6e0175c
CH
30
31#include <target/target_core_base.h>
32#include <target/target_core_backend.h>
33#include <target/target_core_fabric.h>
34
35#include "target_core_internal.h"
36#include "target_core_ua.h"
c66094bf 37#include "target_core_alua.h"
d6e0175c 38
afd73f1b
NB
39static sense_reason_t
40sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
41
de103c93
CH
42static sense_reason_t
43sbc_emulate_readcapacity(struct se_cmd *cmd)
1fd032ee
CH
44{
45 struct se_device *dev = cmd->se_dev;
8dc8632a 46 unsigned char *cdb = cmd->t_task_cdb;
1fd032ee 47 unsigned long long blocks_long = dev->transport->get_blocks(dev);
a50da144
PB
48 unsigned char *rbuf;
49 unsigned char buf[8];
1fd032ee
CH
50 u32 blocks;
51
8dc8632a
RD
52 /*
53 * SBC-2 says:
54 * If the PMI bit is set to zero and the LOGICAL BLOCK
55 * ADDRESS field is not set to zero, the device server shall
56 * terminate the command with CHECK CONDITION status with
57 * the sense key set to ILLEGAL REQUEST and the additional
58 * sense code set to INVALID FIELD IN CDB.
59 *
60 * In SBC-3, these fields are obsolete, but some SCSI
61 * compliance tests actually check this, so we might as well
62 * follow SBC-2.
63 */
64 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
65 return TCM_INVALID_CDB_FIELD;
66
1fd032ee
CH
67 if (blocks_long >= 0x00000000ffffffff)
68 blocks = 0xffffffff;
69 else
70 blocks = (u32)blocks_long;
71
1fd032ee
CH
72 buf[0] = (blocks >> 24) & 0xff;
73 buf[1] = (blocks >> 16) & 0xff;
74 buf[2] = (blocks >> 8) & 0xff;
75 buf[3] = blocks & 0xff;
0fd97ccf
CH
76 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
77 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
78 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
79 buf[7] = dev->dev_attrib.block_size & 0xff;
1fd032ee 80
a50da144 81 rbuf = transport_kmap_data_sg(cmd);
8b4b0dcb
NB
82 if (rbuf) {
83 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
84 transport_kunmap_data_sg(cmd);
85 }
1fd032ee 86
2426bd45 87 target_complete_cmd_with_length(cmd, GOOD, 8);
1fd032ee
CH
88 return 0;
89}
90
de103c93
CH
91static sense_reason_t
92sbc_emulate_readcapacity_16(struct se_cmd *cmd)
1fd032ee
CH
93{
94 struct se_device *dev = cmd->se_dev;
2d335983 95 struct se_session *sess = cmd->se_sess;
a50da144
PB
96 unsigned char *rbuf;
97 unsigned char buf[32];
1fd032ee
CH
98 unsigned long long blocks = dev->transport->get_blocks(dev);
99
a50da144 100 memset(buf, 0, sizeof(buf));
1fd032ee
CH
101 buf[0] = (blocks >> 56) & 0xff;
102 buf[1] = (blocks >> 48) & 0xff;
103 buf[2] = (blocks >> 40) & 0xff;
104 buf[3] = (blocks >> 32) & 0xff;
105 buf[4] = (blocks >> 24) & 0xff;
106 buf[5] = (blocks >> 16) & 0xff;
107 buf[6] = (blocks >> 8) & 0xff;
108 buf[7] = blocks & 0xff;
0fd97ccf
CH
109 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
110 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
111 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
112 buf[11] = dev->dev_attrib.block_size & 0xff;
56dac14c
NB
113 /*
114 * Set P_TYPE and PROT_EN bits for DIF support
115 */
2d335983
NB
116 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
117 if (dev->dev_attrib.pi_prot_type)
118 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
119 }
7f7caf6a
AG
120
121 if (dev->transport->get_lbppbe)
122 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
123
124 if (dev->transport->get_alignment_offset_lbas) {
125 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
126 buf[14] = (lalba >> 8) & 0x3f;
127 buf[15] = lalba & 0xff;
128 }
129
1fd032ee
CH
130 /*
131 * Set Thin Provisioning Enable bit following sbc3r22 in section
132 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
133 */
0fd97ccf 134 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
7f7caf6a 135 buf[14] |= 0x80;
1fd032ee 136
a50da144 137 rbuf = transport_kmap_data_sg(cmd);
8b4b0dcb
NB
138 if (rbuf) {
139 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
140 transport_kunmap_data_sg(cmd);
141 }
1fd032ee 142
2426bd45 143 target_complete_cmd_with_length(cmd, GOOD, 32);
1fd032ee
CH
144 return 0;
145}
146
972b29c8 147sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
1fd032ee 148{
1fd032ee 149 u32 num_blocks;
1fd032ee
CH
150
151 if (cmd->t_task_cdb[0] == WRITE_SAME)
152 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
153 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
154 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
155 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
156 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
157
158 /*
159 * Use the explicit range when non zero is supplied, otherwise calculate
160 * the remaining range based on ->get_blocks() - starting LBA.
161 */
6f974e8c
CH
162 if (num_blocks)
163 return num_blocks;
1fd032ee 164
6f974e8c
CH
165 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
166 cmd->t_task_lba + 1;
1fd032ee 167}
972b29c8 168EXPORT_SYMBOL(sbc_get_write_same_sectors);
1fd032ee 169
de103c93 170static sense_reason_t
1920ed61 171sbc_emulate_noop(struct se_cmd *cmd)
1a1ff38c
BK
172{
173 target_complete_cmd(cmd, GOOD);
174 return 0;
175}
176
d6e0175c
CH
177static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
178{
0fd97ccf 179 return cmd->se_dev->dev_attrib.block_size * sectors;
d6e0175c
CH
180}
181
d6e0175c
CH
182static inline u32 transport_get_sectors_6(unsigned char *cdb)
183{
184 /*
185 * Use 8-bit sector value. SBC-3 says:
186 *
187 * A TRANSFER LENGTH field set to zero specifies that 256
188 * logical blocks shall be written. Any other value
189 * specifies the number of logical blocks that shall be
190 * written.
191 */
192 return cdb[4] ? : 256;
193}
194
195static inline u32 transport_get_sectors_10(unsigned char *cdb)
196{
197 return (u32)(cdb[7] << 8) + cdb[8];
198}
199
200static inline u32 transport_get_sectors_12(unsigned char *cdb)
201{
202 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
203}
204
205static inline u32 transport_get_sectors_16(unsigned char *cdb)
206{
207 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
208 (cdb[12] << 8) + cdb[13];
209}
210
211/*
212 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
213 */
214static inline u32 transport_get_sectors_32(unsigned char *cdb)
215{
216 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
217 (cdb[30] << 8) + cdb[31];
218
219}
220
221static inline u32 transport_lba_21(unsigned char *cdb)
222{
223 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
224}
225
226static inline u32 transport_lba_32(unsigned char *cdb)
227{
228 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
229}
230
231static inline unsigned long long transport_lba_64(unsigned char *cdb)
232{
233 unsigned int __v1, __v2;
234
235 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
236 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
237
238 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
239}
240
241/*
242 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
243 */
244static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
245{
246 unsigned int __v1, __v2;
247
248 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
249 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
250
251 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
252}
253
cd063bef
NB
254static sense_reason_t
255sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
d6e0175c 256{
8e575c50
NB
257 struct se_device *dev = cmd->se_dev;
258 sector_t end_lba = dev->transport->get_blocks(dev) + 1;
972b29c8 259 unsigned int sectors = sbc_get_write_same_sectors(cmd);
afd73f1b 260 sense_reason_t ret;
773cbaf7 261
d6e0175c
CH
262 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
263 pr_err("WRITE_SAME PBDATA and LBDATA"
264 " bits not supported for Block Discard"
265 " Emulation\n");
cd063bef 266 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c 267 }
773cbaf7
NB
268 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
269 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
270 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
271 return TCM_INVALID_CDB_FIELD;
272 }
8e575c50
NB
273 /*
274 * Sanity check for LBA wrap and request past end of device.
275 */
276 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
277 ((cmd->t_task_lba + sectors) > end_lba)) {
278 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
279 (unsigned long long)end_lba, cmd->t_task_lba, sectors);
280 return TCM_ADDRESS_OUT_OF_RANGE;
281 }
282
5cb770bf
RD
283 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
284 if (flags[0] & 0x10) {
285 pr_warn("WRITE SAME with ANCHOR not supported\n");
286 return TCM_INVALID_CDB_FIELD;
287 }
d6e0175c 288 /*
cd063bef
NB
289 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
290 * translated into block discard requests within backend code.
d6e0175c 291 */
cd063bef
NB
292 if (flags[0] & 0x08) {
293 if (!ops->execute_write_same_unmap)
294 return TCM_UNSUPPORTED_SCSI_OPCODE;
295
d0a91295
NB
296 if (!dev->dev_attrib.emulate_tpws) {
297 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
298 " has emulate_tpws disabled\n");
299 return TCM_UNSUPPORTED_SCSI_OPCODE;
300 }
cd063bef
NB
301 cmd->execute_cmd = ops->execute_write_same_unmap;
302 return 0;
d6e0175c 303 }
cd063bef
NB
304 if (!ops->execute_write_same)
305 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c 306
afd73f1b
NB
307 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
308 if (ret)
309 return ret;
310
cd063bef 311 cmd->execute_cmd = ops->execute_write_same;
d6e0175c
CH
312 return 0;
313}
314
a6b0133c 315static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
d6e0175c
CH
316{
317 unsigned char *buf, *addr;
318 struct scatterlist *sg;
319 unsigned int offset;
a6b0133c
NB
320 sense_reason_t ret = TCM_NO_SENSE;
321 int i, count;
d6e0175c
CH
322 /*
323 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
324 *
325 * 1) read the specified logical block(s);
326 * 2) transfer logical blocks from the data-out buffer;
327 * 3) XOR the logical blocks transferred from the data-out buffer with
328 * the logical blocks read, storing the resulting XOR data in a buffer;
329 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
330 * blocks transferred from the data-out buffer; and
331 * 5) transfer the resulting XOR data to the data-in buffer.
332 */
333 buf = kmalloc(cmd->data_length, GFP_KERNEL);
334 if (!buf) {
335 pr_err("Unable to allocate xor_callback buf\n");
a6b0133c 336 return TCM_OUT_OF_RESOURCES;
d6e0175c
CH
337 }
338 /*
339 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
340 * into the locally allocated *buf
341 */
342 sg_copy_to_buffer(cmd->t_data_sg,
343 cmd->t_data_nents,
344 buf,
345 cmd->data_length);
346
347 /*
348 * Now perform the XOR against the BIDI read memory located at
349 * cmd->t_mem_bidi_list
350 */
351
352 offset = 0;
353 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
354 addr = kmap_atomic(sg_page(sg));
a6b0133c
NB
355 if (!addr) {
356 ret = TCM_OUT_OF_RESOURCES;
d6e0175c 357 goto out;
a6b0133c 358 }
d6e0175c
CH
359
360 for (i = 0; i < sg->length; i++)
361 *(addr + sg->offset + i) ^= *(buf + offset + i);
362
363 offset += sg->length;
364 kunmap_atomic(addr);
365 }
366
367out:
368 kfree(buf);
a6b0133c 369 return ret;
d6e0175c
CH
370}
371
a82a9538
NB
372static sense_reason_t
373sbc_execute_rw(struct se_cmd *cmd)
374{
375 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
376 cmd->data_direction);
377}
378
68ff9b9b
NB
379static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
380{
381 struct se_device *dev = cmd->se_dev;
382
d8855c15
NB
383 /*
384 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
385 * within target_complete_ok_work() if the command was successfully
386 * sent to the backend driver.
387 */
388 spin_lock_irq(&cmd->t_state_lock);
389 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
390 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
391 spin_unlock_irq(&cmd->t_state_lock);
392
68ff9b9b
NB
393 /*
394 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
395 * before the original READ I/O submission.
396 */
397 up(&dev->caw_sem);
398
399 return TCM_NO_SENSE;
400}
401
402static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
403{
404 struct se_device *dev = cmd->se_dev;
405 struct scatterlist *write_sg = NULL, *sg;
db60df88 406 unsigned char *buf = NULL, *addr;
68ff9b9b
NB
407 struct sg_mapping_iter m;
408 unsigned int offset = 0, len;
409 unsigned int nlbas = cmd->t_task_nolb;
410 unsigned int block_size = dev->dev_attrib.block_size;
411 unsigned int compare_len = (nlbas * block_size);
412 sense_reason_t ret = TCM_NO_SENSE;
413 int rc, i;
414
cf6d1f09
NB
415 /*
416 * Handle early failure in transport_generic_request_failure(),
417 * which will not have taken ->caw_mutex yet..
418 */
419 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
420 return TCM_NO_SENSE;
db60df88
NB
421 /*
422 * Immediately exit + release dev->caw_sem if command has already
423 * been failed with a non-zero SCSI status.
424 */
425 if (cmd->scsi_status) {
426 pr_err("compare_and_write_callback: non zero scsi_status:"
427 " 0x%02x\n", cmd->scsi_status);
428 goto out;
429 }
cf6d1f09 430
68ff9b9b
NB
431 buf = kzalloc(cmd->data_length, GFP_KERNEL);
432 if (!buf) {
433 pr_err("Unable to allocate compare_and_write buf\n");
a2890087
NB
434 ret = TCM_OUT_OF_RESOURCES;
435 goto out;
68ff9b9b
NB
436 }
437
a1e1774c 438 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
68ff9b9b
NB
439 GFP_KERNEL);
440 if (!write_sg) {
441 pr_err("Unable to allocate compare_and_write sg\n");
442 ret = TCM_OUT_OF_RESOURCES;
443 goto out;
444 }
a1e1774c 445 sg_init_table(write_sg, cmd->t_data_nents);
68ff9b9b
NB
446 /*
447 * Setup verify and write data payloads from total NumberLBAs.
448 */
449 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
450 cmd->data_length);
451 if (!rc) {
452 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
453 ret = TCM_OUT_OF_RESOURCES;
454 goto out;
455 }
456 /*
457 * Compare against SCSI READ payload against verify payload
458 */
459 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
460 addr = (unsigned char *)kmap_atomic(sg_page(sg));
461 if (!addr) {
462 ret = TCM_OUT_OF_RESOURCES;
463 goto out;
464 }
465
466 len = min(sg->length, compare_len);
467
468 if (memcmp(addr, buf + offset, len)) {
469 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
470 addr, buf + offset);
471 kunmap_atomic(addr);
472 goto miscompare;
473 }
474 kunmap_atomic(addr);
475
476 offset += len;
477 compare_len -= len;
478 if (!compare_len)
479 break;
480 }
481
482 i = 0;
483 len = cmd->t_task_nolb * block_size;
484 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
485 /*
486 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
487 */
488 while (len) {
489 sg_miter_next(&m);
490
491 if (block_size < PAGE_SIZE) {
492 sg_set_page(&write_sg[i], m.page, block_size,
493 block_size);
494 } else {
495 sg_miter_next(&m);
496 sg_set_page(&write_sg[i], m.page, block_size,
497 0);
498 }
499 len -= block_size;
500 i++;
501 }
502 sg_miter_stop(&m);
503 /*
504 * Save the original SGL + nents values before updating to new
505 * assignments, to be released in transport_free_pages() ->
506 * transport_reset_sgl_orig()
507 */
508 cmd->t_data_sg_orig = cmd->t_data_sg;
509 cmd->t_data_sg = write_sg;
510 cmd->t_data_nents_orig = cmd->t_data_nents;
511 cmd->t_data_nents = 1;
512
68d81f40 513 cmd->sam_task_attr = TCM_HEAD_TAG;
68ff9b9b
NB
514 cmd->transport_complete_callback = compare_and_write_post;
515 /*
516 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
517 * for submitting the adjusted SGL to write instance user-data.
518 */
519 cmd->execute_cmd = sbc_execute_rw;
520
521 spin_lock_irq(&cmd->t_state_lock);
522 cmd->t_state = TRANSPORT_PROCESSING;
523 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
524 spin_unlock_irq(&cmd->t_state_lock);
525
526 __target_execute_cmd(cmd);
527
528 kfree(buf);
529 return ret;
530
531miscompare:
532 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
533 dev->transport->name);
534 ret = TCM_MISCOMPARE_VERIFY;
535out:
536 /*
537 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
538 * sbc_compare_and_write() before the original READ I/O submission.
539 */
540 up(&dev->caw_sem);
541 kfree(write_sg);
542 kfree(buf);
543 return ret;
544}
545
546static sense_reason_t
547sbc_compare_and_write(struct se_cmd *cmd)
548{
549 struct se_device *dev = cmd->se_dev;
550 sense_reason_t ret;
551 int rc;
552 /*
553 * Submit the READ first for COMPARE_AND_WRITE to perform the
554 * comparision using SGLs at cmd->t_bidi_data_sg..
555 */
556 rc = down_interruptible(&dev->caw_sem);
557 if ((rc != 0) || signal_pending(current)) {
558 cmd->transport_complete_callback = NULL;
559 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
560 }
b7191253
NB
561 /*
562 * Reset cmd->data_length to individual block_size in order to not
563 * confuse backend drivers that depend on this value matching the
564 * size of the I/O being submitted.
565 */
566 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
68ff9b9b
NB
567
568 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
569 DMA_FROM_DEVICE);
570 if (ret) {
571 cmd->transport_complete_callback = NULL;
572 up(&dev->caw_sem);
573 return ret;
574 }
575 /*
576 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
577 * upon MISCOMPARE, or in compare_and_write_done() upon completion
578 * of WRITE instance user-data.
579 */
580 return TCM_NO_SENSE;
581}
582
19f9361a
SG
583static int
584sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
585 bool is_write, struct se_cmd *cmd)
586{
587 if (is_write) {
588 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
589 TARGET_PROT_DOUT_INSERT;
590 switch (protect) {
591 case 0x0:
592 case 0x3:
593 cmd->prot_checks = 0;
594 break;
595 case 0x1:
596 case 0x5:
597 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
598 if (prot_type == TARGET_DIF_TYPE1_PROT)
599 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
600 break;
601 case 0x2:
602 if (prot_type == TARGET_DIF_TYPE1_PROT)
603 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
604 break;
605 case 0x4:
606 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
607 break;
608 default:
609 pr_err("Unsupported protect field %d\n", protect);
610 return -EINVAL;
611 }
612 } else {
613 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
614 TARGET_PROT_DIN_STRIP;
615 switch (protect) {
616 case 0x0:
617 case 0x1:
618 case 0x5:
619 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
620 if (prot_type == TARGET_DIF_TYPE1_PROT)
621 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
622 break;
623 case 0x2:
624 if (prot_type == TARGET_DIF_TYPE1_PROT)
625 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
626 break;
627 case 0x3:
628 cmd->prot_checks = 0;
629 break;
630 case 0x4:
631 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
632 break;
633 default:
634 pr_err("Unsupported protect field %d\n", protect);
635 return -EINVAL;
636 }
637 }
638
639 return 0;
640}
641
f7b7c06f 642static sense_reason_t
499bf77b 643sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
19f9361a 644 u32 sectors, bool is_write)
499bf77b 645{
19f9361a
SG
646 u8 protect = cdb[1] >> 5;
647
f7b7c06f
NB
648 if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
649 if (protect && !dev->dev_attrib.pi_prot_type) {
650 pr_err("CDB contains protect bit, but device does not"
651 " advertise PROTECT=1 feature bit\n");
652 return TCM_INVALID_CDB_FIELD;
653 }
654 if (cmd->prot_pto)
655 return TCM_NO_SENSE;
656 }
499bf77b
NB
657
658 switch (dev->dev_attrib.pi_prot_type) {
659 case TARGET_DIF_TYPE3_PROT:
499bf77b
NB
660 cmd->reftag_seed = 0xffffffff;
661 break;
662 case TARGET_DIF_TYPE2_PROT:
19f9361a 663 if (protect)
f7b7c06f 664 return TCM_INVALID_CDB_FIELD;
499bf77b
NB
665
666 cmd->reftag_seed = cmd->t_task_lba;
667 break;
668 case TARGET_DIF_TYPE1_PROT:
499bf77b
NB
669 cmd->reftag_seed = cmd->t_task_lba;
670 break;
671 case TARGET_DIF_TYPE0_PROT:
672 default:
f7b7c06f 673 return TCM_NO_SENSE;
499bf77b
NB
674 }
675
19f9361a
SG
676 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
677 is_write, cmd))
f7b7c06f 678 return TCM_INVALID_CDB_FIELD;
19f9361a 679
499bf77b
NB
680 cmd->prot_type = dev->dev_attrib.pi_prot_type;
681 cmd->prot_length = dev->prot_length * sectors;
e2a4f55c
SG
682
683 /**
684 * In case protection information exists over the wire
685 * we modify command data length to describe pure data.
686 * The actual transfer length is data length + protection
687 * length
688 **/
689 if (protect)
690 cmd->data_length = sectors * dev->dev_attrib.block_size;
691
692 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
693 "prot_op=%d prot_checks=%d\n",
694 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
03abad9e 695 cmd->prot_op, cmd->prot_checks);
499bf77b 696
f7b7c06f 697 return TCM_NO_SENSE;
499bf77b
NB
698}
699
fde9f50f
NB
700static int
701sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
702{
703 if (cdb[1] & 0x10) {
704 if (!dev->dev_attrib.emulate_dpo) {
705 pr_err("Got CDB: 0x%02x with DPO bit set, but device"
706 " does not advertise support for DPO\n", cdb[0]);
707 return -EINVAL;
708 }
709 }
710 if (cdb[1] & 0x8) {
711 if (!dev->dev_attrib.emulate_fua_write ||
712 !dev->dev_attrib.emulate_write_cache) {
713 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
714 " does not advertise support for FUA write\n",
715 cdb[0]);
716 return -EINVAL;
717 }
718 cmd->se_cmd_flags |= SCF_FUA;
719 }
720 return 0;
721}
722
de103c93
CH
723sense_reason_t
724sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
d6e0175c 725{
d6e0175c
CH
726 struct se_device *dev = cmd->se_dev;
727 unsigned char *cdb = cmd->t_task_cdb;
1fd032ee 728 unsigned int size;
d6e0175c 729 u32 sectors = 0;
de103c93 730 sense_reason_t ret;
d6e0175c
CH
731
732 switch (cdb[0]) {
733 case READ_6:
734 sectors = transport_get_sectors_6(cdb);
735 cmd->t_task_lba = transport_lba_21(cdb);
736 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
737 cmd->execute_rw = ops->execute_rw;
738 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
739 break;
740 case READ_10:
741 sectors = transport_get_sectors_10(cdb);
742 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b 743
fde9f50f
NB
744 if (sbc_check_dpofua(dev, cmd, cdb))
745 return TCM_INVALID_CDB_FIELD;
746
f7b7c06f
NB
747 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
748 if (ret)
749 return ret;
499bf77b 750
d6e0175c 751 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
752 cmd->execute_rw = ops->execute_rw;
753 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
754 break;
755 case READ_12:
756 sectors = transport_get_sectors_12(cdb);
757 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b 758
fde9f50f
NB
759 if (sbc_check_dpofua(dev, cmd, cdb))
760 return TCM_INVALID_CDB_FIELD;
761
f7b7c06f
NB
762 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
763 if (ret)
764 return ret;
499bf77b 765
d6e0175c 766 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
767 cmd->execute_rw = ops->execute_rw;
768 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
769 break;
770 case READ_16:
771 sectors = transport_get_sectors_16(cdb);
772 cmd->t_task_lba = transport_lba_64(cdb);
499bf77b 773
fde9f50f
NB
774 if (sbc_check_dpofua(dev, cmd, cdb))
775 return TCM_INVALID_CDB_FIELD;
776
f7b7c06f
NB
777 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
778 if (ret)
779 return ret;
499bf77b 780
d6e0175c 781 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
782 cmd->execute_rw = ops->execute_rw;
783 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
784 break;
785 case WRITE_6:
786 sectors = transport_get_sectors_6(cdb);
787 cmd->t_task_lba = transport_lba_21(cdb);
788 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
789 cmd->execute_rw = ops->execute_rw;
790 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
791 break;
792 case WRITE_10:
793 case WRITE_VERIFY:
794 sectors = transport_get_sectors_10(cdb);
795 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b 796
fde9f50f
NB
797 if (sbc_check_dpofua(dev, cmd, cdb))
798 return TCM_INVALID_CDB_FIELD;
799
f7b7c06f
NB
800 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
801 if (ret)
802 return ret;
499bf77b 803
d6e0175c 804 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
805 cmd->execute_rw = ops->execute_rw;
806 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
807 break;
808 case WRITE_12:
809 sectors = transport_get_sectors_12(cdb);
810 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b 811
fde9f50f
NB
812 if (sbc_check_dpofua(dev, cmd, cdb))
813 return TCM_INVALID_CDB_FIELD;
814
f7b7c06f
NB
815 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
816 if (ret)
817 return ret;
499bf77b 818
d6e0175c 819 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
820 cmd->execute_rw = ops->execute_rw;
821 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
822 break;
823 case WRITE_16:
824 sectors = transport_get_sectors_16(cdb);
825 cmd->t_task_lba = transport_lba_64(cdb);
499bf77b 826
fde9f50f
NB
827 if (sbc_check_dpofua(dev, cmd, cdb))
828 return TCM_INVALID_CDB_FIELD;
829
f7b7c06f
NB
830 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
831 if (ret)
832 return ret;
499bf77b 833
d6e0175c 834 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
835 cmd->execute_rw = ops->execute_rw;
836 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
837 break;
838 case XDWRITEREAD_10:
de103c93 839 if (cmd->data_direction != DMA_TO_DEVICE ||
d6e0175c 840 !(cmd->se_cmd_flags & SCF_BIDI))
de103c93 841 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
842 sectors = transport_get_sectors_10(cdb);
843
fde9f50f
NB
844 if (sbc_check_dpofua(dev, cmd, cdb))
845 return TCM_INVALID_CDB_FIELD;
846
d6e0175c
CH
847 cmd->t_task_lba = transport_lba_32(cdb);
848 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
849
850 /*
851 * Setup BIDI XOR callback to be run after I/O completion.
852 */
a82a9538
NB
853 cmd->execute_rw = ops->execute_rw;
854 cmd->execute_cmd = sbc_execute_rw;
d6e0175c 855 cmd->transport_complete_callback = &xdreadwrite_callback;
d6e0175c
CH
856 break;
857 case VARIABLE_LENGTH_CMD:
858 {
859 u16 service_action = get_unaligned_be16(&cdb[8]);
860 switch (service_action) {
861 case XDWRITEREAD_32:
862 sectors = transport_get_sectors_32(cdb);
863
fde9f50f
NB
864 if (sbc_check_dpofua(dev, cmd, cdb))
865 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
866 /*
867 * Use WRITE_32 and READ_32 opcodes for the emulated
868 * XDWRITE_READ_32 logic.
869 */
870 cmd->t_task_lba = transport_lba_64_ext(cdb);
871 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
872
873 /*
874 * Setup BIDI XOR callback to be run during after I/O
875 * completion.
876 */
a82a9538
NB
877 cmd->execute_rw = ops->execute_rw;
878 cmd->execute_cmd = sbc_execute_rw;
d6e0175c 879 cmd->transport_complete_callback = &xdreadwrite_callback;
d6e0175c
CH
880 break;
881 case WRITE_SAME_32:
882 sectors = transport_get_sectors_32(cdb);
883 if (!sectors) {
884 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
885 " supported\n");
de103c93 886 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
887 }
888
1fd032ee 889 size = sbc_get_size(cmd, 1);
d6e0175c
CH
890 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
891
cd063bef 892 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
6b64e1fe 893 if (ret)
cd063bef 894 return ret;
d6e0175c
CH
895 break;
896 default:
897 pr_err("VARIABLE_LENGTH_CMD service action"
898 " 0x%04x not supported\n", service_action);
de103c93 899 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c
CH
900 }
901 break;
902 }
68ff9b9b
NB
903 case COMPARE_AND_WRITE:
904 sectors = cdb[13];
905 /*
906 * Currently enforce COMPARE_AND_WRITE for a single sector
907 */
908 if (sectors > 1) {
909 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
910 " than 1\n", sectors);
911 return TCM_INVALID_CDB_FIELD;
912 }
913 /*
914 * Double size because we have two buffers, note that
915 * zero is not an error..
916 */
917 size = 2 * sbc_get_size(cmd, sectors);
918 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
919 cmd->t_task_nolb = sectors;
920 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
921 cmd->execute_rw = ops->execute_rw;
922 cmd->execute_cmd = sbc_compare_and_write;
923 cmd->transport_complete_callback = compare_and_write_callback;
924 break;
d6e0175c 925 case READ_CAPACITY:
1fd032ee
CH
926 size = READ_CAP_LEN;
927 cmd->execute_cmd = sbc_emulate_readcapacity;
d6e0175c 928 break;
eb846d9f 929 case SERVICE_ACTION_IN_16:
d6e0175c
CH
930 switch (cmd->t_task_cdb[1] & 0x1f) {
931 case SAI_READ_CAPACITY_16:
1fd032ee 932 cmd->execute_cmd = sbc_emulate_readcapacity_16;
d6e0175c 933 break;
c66094bf
HR
934 case SAI_REPORT_REFERRALS:
935 cmd->execute_cmd = target_emulate_report_referrals;
936 break;
d6e0175c
CH
937 default:
938 pr_err("Unsupported SA: 0x%02x\n",
939 cmd->t_task_cdb[1] & 0x1f);
de103c93 940 return TCM_INVALID_CDB_FIELD;
d6e0175c 941 }
1fd032ee 942 size = (cdb[10] << 24) | (cdb[11] << 16) |
d6e0175c
CH
943 (cdb[12] << 8) | cdb[13];
944 break;
945 case SYNCHRONIZE_CACHE:
946 case SYNCHRONIZE_CACHE_16:
d6e0175c
CH
947 if (cdb[0] == SYNCHRONIZE_CACHE) {
948 sectors = transport_get_sectors_10(cdb);
949 cmd->t_task_lba = transport_lba_32(cdb);
950 } else {
951 sectors = transport_get_sectors_16(cdb);
952 cmd->t_task_lba = transport_lba_64(cdb);
953 }
6ef31dc7
CVB
954 if (ops->execute_sync_cache) {
955 cmd->execute_cmd = ops->execute_sync_cache;
956 goto check_lba;
d6e0175c 957 }
6ef31dc7
CVB
958 size = 0;
959 cmd->execute_cmd = sbc_emulate_noop;
d6e0175c
CH
960 break;
961 case UNMAP:
14150a6b 962 if (!ops->execute_unmap)
de103c93 963 return TCM_UNSUPPORTED_SCSI_OPCODE;
14150a6b 964
1fd032ee 965 size = get_unaligned_be16(&cdb[7]);
14150a6b 966 cmd->execute_cmd = ops->execute_unmap;
d6e0175c
CH
967 break;
968 case WRITE_SAME_16:
969 sectors = transport_get_sectors_16(cdb);
970 if (!sectors) {
971 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
de103c93 972 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
973 }
974
1fd032ee 975 size = sbc_get_size(cmd, 1);
d6e0175c
CH
976 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
977
cd063bef 978 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
6b64e1fe 979 if (ret)
cd063bef 980 return ret;
d6e0175c
CH
981 break;
982 case WRITE_SAME:
983 sectors = transport_get_sectors_10(cdb);
984 if (!sectors) {
985 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
de103c93 986 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
987 }
988
1fd032ee 989 size = sbc_get_size(cmd, 1);
d6e0175c
CH
990 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
991
992 /*
993 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
994 * of byte 1 bit 3 UNMAP instead of original reserved field
995 */
cd063bef 996 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
6b64e1fe 997 if (ret)
cd063bef 998 return ret;
d6e0175c
CH
999 break;
1000 case VERIFY:
1fd032ee 1001 size = 0;
c52716de
CVB
1002 sectors = transport_get_sectors_10(cdb);
1003 cmd->t_task_lba = transport_lba_32(cdb);
1920ed61 1004 cmd->execute_cmd = sbc_emulate_noop;
c52716de 1005 goto check_lba;
1a1ff38c
BK
1006 case REZERO_UNIT:
1007 case SEEK_6:
1008 case SEEK_10:
1009 /*
1010 * There are still clients out there which use these old SCSI-2
1011 * commands. This mainly happens when running VMs with legacy
1012 * guest systems, connected via SCSI command pass-through to
1013 * iSCSI targets. Make them happy and return status GOOD.
1014 */
1015 size = 0;
1016 cmd->execute_cmd = sbc_emulate_noop;
1017 break;
d6e0175c 1018 default:
1fd032ee 1019 ret = spc_parse_cdb(cmd, &size);
d6e0175c
CH
1020 if (ret)
1021 return ret;
1022 }
1023
1024 /* reject any command that we don't have a handler for */
20959c4b 1025 if (!cmd->execute_cmd)
de103c93 1026 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c
CH
1027
1028 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1fd032ee
CH
1029 unsigned long long end_lba;
1030
0fd97ccf 1031 if (sectors > dev->dev_attrib.fabric_max_sectors) {
d6e0175c
CH
1032 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
1033 " big sectors %u exceeds fabric_max_sectors:"
1034 " %u\n", cdb[0], sectors,
0fd97ccf 1035 dev->dev_attrib.fabric_max_sectors);
de103c93 1036 return TCM_INVALID_CDB_FIELD;
d6e0175c 1037 }
0fd97ccf 1038 if (sectors > dev->dev_attrib.hw_max_sectors) {
d6e0175c
CH
1039 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
1040 " big sectors %u exceeds backend hw_max_sectors:"
1041 " %u\n", cdb[0], sectors,
0fd97ccf 1042 dev->dev_attrib.hw_max_sectors);
de103c93 1043 return TCM_INVALID_CDB_FIELD;
d6e0175c 1044 }
6ef31dc7 1045check_lba:
1fd032ee 1046 end_lba = dev->transport->get_blocks(dev) + 1;
aa179935
NB
1047 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
1048 ((cmd->t_task_lba + sectors) > end_lba)) {
1fd032ee
CH
1049 pr_err("cmd exceeds last lba %llu "
1050 "(lba %llu, sectors %u)\n",
1051 end_lba, cmd->t_task_lba, sectors);
09ceadc7 1052 return TCM_ADDRESS_OUT_OF_RANGE;
1fd032ee
CH
1053 }
1054
68ff9b9b
NB
1055 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
1056 size = sbc_get_size(cmd, sectors);
d6e0175c
CH
1057 }
1058
de103c93 1059 return target_cmd_size_check(cmd, size);
d6e0175c
CH
1060}
1061EXPORT_SYMBOL(sbc_parse_cdb);
6f23ac8a 1062
6f23ac8a
CH
1063u32 sbc_get_device_type(struct se_device *dev)
1064{
1065 return TYPE_DISK;
1066}
1067EXPORT_SYMBOL(sbc_get_device_type);
86d71829
AH
1068
1069sense_reason_t
1070sbc_execute_unmap(struct se_cmd *cmd,
1071 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
1072 sector_t, sector_t),
1073 void *priv)
1074{
1075 struct se_device *dev = cmd->se_dev;
1076 unsigned char *buf, *ptr = NULL;
1077 sector_t lba;
1078 int size;
1079 u32 range;
1080 sense_reason_t ret = 0;
1081 int dl, bd_dl;
1082
1083 /* We never set ANC_SUP */
1084 if (cmd->t_task_cdb[1])
1085 return TCM_INVALID_CDB_FIELD;
1086
1087 if (cmd->data_length == 0) {
1088 target_complete_cmd(cmd, SAM_STAT_GOOD);
1089 return 0;
1090 }
1091
1092 if (cmd->data_length < 8) {
1093 pr_warn("UNMAP parameter list length %u too small\n",
1094 cmd->data_length);
1095 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1096 }
1097
1098 buf = transport_kmap_data_sg(cmd);
1099 if (!buf)
1100 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1101
1102 dl = get_unaligned_be16(&buf[0]);
1103 bd_dl = get_unaligned_be16(&buf[2]);
1104
1105 size = cmd->data_length - 8;
1106 if (bd_dl > size)
1107 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1108 cmd->data_length, bd_dl);
1109 else
1110 size = bd_dl;
1111
1112 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1113 ret = TCM_INVALID_PARAMETER_LIST;
1114 goto err;
1115 }
1116
1117 /* First UNMAP block descriptor starts at 8 byte offset */
1118 ptr = &buf[8];
1119 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1120 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1121
1122 while (size >= 16) {
1123 lba = get_unaligned_be64(&ptr[0]);
1124 range = get_unaligned_be32(&ptr[8]);
1125 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1126 (unsigned long long)lba, range);
1127
1128 if (range > dev->dev_attrib.max_unmap_lba_count) {
1129 ret = TCM_INVALID_PARAMETER_LIST;
1130 goto err;
1131 }
1132
1133 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1134 ret = TCM_ADDRESS_OUT_OF_RANGE;
1135 goto err;
1136 }
1137
1138 ret = do_unmap_fn(cmd, priv, lba, range);
1139 if (ret)
1140 goto err;
1141
1142 ptr += 16;
1143 size -= 16;
1144 }
1145
1146err:
1147 transport_kunmap_data_sg(cmd);
1148 if (!ret)
1149 target_complete_cmd(cmd, GOOD);
1150 return ret;
1151}
1152EXPORT_SYMBOL(sbc_execute_unmap);
41861fa8 1153
66a3d5bc
NB
1154void
1155sbc_dif_generate(struct se_cmd *cmd)
1156{
1157 struct se_device *dev = cmd->se_dev;
1158 struct se_dif_v1_tuple *sdt;
1159 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1160 sector_t sector = cmd->t_task_lba;
1161 void *daddr, *paddr;
1162 int i, j, offset = 0;
1163
1164 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1165 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1166 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1167
1168 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1169
1170 if (offset >= psg->length) {
1171 kunmap_atomic(paddr);
1172 psg = sg_next(psg);
1173 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1174 offset = 0;
1175 }
1176
1177 sdt = paddr + offset;
1178 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
1179 dev->dev_attrib.block_size));
1180 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
1181 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1182 sdt->app_tag = 0;
1183
1184 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
1185 " app_tag: 0x%04x ref_tag: %u\n",
1186 (unsigned long long)sector, sdt->guard_tag,
1187 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1188
1189 sector++;
1190 offset += sizeof(struct se_dif_v1_tuple);
1191 }
1192
1193 kunmap_atomic(paddr);
1194 kunmap_atomic(daddr);
1195 }
1196}
1197
41861fa8
NB
1198static sense_reason_t
1199sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1200 const void *p, sector_t sector, unsigned int ei_lba)
1201{
1202 int block_size = dev->dev_attrib.block_size;
1203 __be16 csum;
1204
1205 csum = cpu_to_be16(crc_t10dif(p, block_size));
1206
1207 if (sdt->guard_tag != csum) {
1208 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1209 " csum 0x%04x\n", (unsigned long long)sector,
1210 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1211 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1212 }
1213
1214 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1215 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1216 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1217 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1218 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1219 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1220 }
1221
1222 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1223 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1224 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1225 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1226 be32_to_cpu(sdt->ref_tag), ei_lba);
1227 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1228 }
1229
1230 return 0;
1231}
1232
1233static void
1234sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1235 struct scatterlist *sg, int sg_off)
1236{
1237 struct se_device *dev = cmd->se_dev;
1238 struct scatterlist *psg;
1239 void *paddr, *addr;
1240 unsigned int i, len, left;
10762e80 1241 unsigned int offset = sg_off;
41861fa8
NB
1242
1243 left = sectors * dev->prot_length;
1244
1245 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
16c0ae02 1246 unsigned int psg_len, copied = 0;
d6a65fdc 1247
41861fa8 1248 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
16c0ae02
SG
1249 psg_len = min(left, psg->length);
1250 while (psg_len) {
1251 len = min(psg_len, sg->length - offset);
1252 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1253
1254 if (read)
1255 memcpy(paddr + copied, addr, len);
1256 else
1257 memcpy(addr, paddr + copied, len);
1258
1259 left -= len;
1260 offset += len;
1261 copied += len;
1262 psg_len -= len;
1263
1264 if (offset >= sg->length) {
1265 sg = sg_next(sg);
1266 offset = 0;
1267 }
1268 kunmap_atomic(addr);
1269 }
41861fa8 1270 kunmap_atomic(paddr);
41861fa8
NB
1271 }
1272}
1273
1274sense_reason_t
1275sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1276 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1277{
1278 struct se_device *dev = cmd->se_dev;
1279 struct se_dif_v1_tuple *sdt;
1280 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1281 sector_t sector = start;
1282 void *daddr, *paddr;
1283 int i, j, offset = 0;
1284 sense_reason_t rc;
1285
1286 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1287 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1288 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1289
1290 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1291
1292 if (offset >= psg->length) {
1293 kunmap_atomic(paddr);
1294 psg = sg_next(psg);
1295 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1296 offset = 0;
1297 }
1298
1299 sdt = paddr + offset;
1300
1301 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1302 " app_tag: 0x%04x ref_tag: %u\n",
1303 (unsigned long long)sector, sdt->guard_tag,
1304 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1305
1306 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1307 ei_lba);
1308 if (rc) {
1309 kunmap_atomic(paddr);
1310 kunmap_atomic(daddr);
76736db3 1311 cmd->bad_sector = sector;
41861fa8
NB
1312 return rc;
1313 }
1314
1315 sector++;
1316 ei_lba++;
1317 offset += sizeof(struct se_dif_v1_tuple);
1318 }
1319
1320 kunmap_atomic(paddr);
1321 kunmap_atomic(daddr);
1322 }
1323 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1324
1325 return 0;
1326}
1327EXPORT_SYMBOL(sbc_dif_verify_write);
1328
395ccb25
NB
1329static sense_reason_t
1330__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1331 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
41861fa8
NB
1332{
1333 struct se_device *dev = cmd->se_dev;
1334 struct se_dif_v1_tuple *sdt;
fc272ec7 1335 struct scatterlist *dsg, *psg = sg;
41861fa8
NB
1336 sector_t sector = start;
1337 void *daddr, *paddr;
1338 int i, j, offset = sg_off;
1339 sense_reason_t rc;
1340
1341 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1342 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
fc272ec7 1343 paddr = kmap_atomic(sg_page(psg)) + sg->offset;
41861fa8
NB
1344
1345 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1346
fc272ec7 1347 if (offset >= psg->length) {
41861fa8 1348 kunmap_atomic(paddr);
fc272ec7
SG
1349 psg = sg_next(psg);
1350 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
41861fa8
NB
1351 offset = 0;
1352 }
1353
1354 sdt = paddr + offset;
1355
1356 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1357 " app_tag: 0x%04x ref_tag: %u\n",
1358 (unsigned long long)sector, sdt->guard_tag,
1359 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1360
1361 if (sdt->app_tag == cpu_to_be16(0xffff)) {
1362 sector++;
1363 offset += sizeof(struct se_dif_v1_tuple);
1364 continue;
1365 }
1366
1367 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1368 ei_lba);
1369 if (rc) {
1370 kunmap_atomic(paddr);
1371 kunmap_atomic(daddr);
76736db3 1372 cmd->bad_sector = sector;
41861fa8
NB
1373 return rc;
1374 }
1375
1376 sector++;
1377 ei_lba++;
1378 offset += sizeof(struct se_dif_v1_tuple);
1379 }
1380
1381 kunmap_atomic(paddr);
1382 kunmap_atomic(daddr);
1383 }
41861fa8
NB
1384
1385 return 0;
1386}
395ccb25
NB
1387
1388sense_reason_t
1389sbc_dif_read_strip(struct se_cmd *cmd)
1390{
1391 struct se_device *dev = cmd->se_dev;
1392 u32 sectors = cmd->prot_length / dev->prot_length;
1393
1394 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
1395 cmd->t_prot_sg, 0);
1396}
1397
1398sense_reason_t
1399sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1400 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1401{
1402 sense_reason_t rc;
1403
1404 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
1405 if (rc)
1406 return rc;
1407
1408 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1409 return 0;
1410}
41861fa8 1411EXPORT_SYMBOL(sbc_dif_verify_read);