]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/target/target_core_spc.c
target_core_alua: Use workqueue for ALUA transitioning
[mirror_ubuntu-artful-kernel.git] / drivers / target / target_core_spc.c
CommitLineData
88455ec4
CH
1/*
2 * SCSI Primary Commands (SPC) parsing and emulation.
3 *
4c76251e 4 * (c) Copyright 2002-2013 Datera, Inc.
88455ec4
CH
5 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <asm/unaligned.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_tcq.h>
29
30#include <target/target_core_base.h>
31#include <target/target_core_backend.h>
32#include <target/target_core_fabric.h>
33
34#include "target_core_internal.h"
eba2ca45 35#include "target_core_alua.h"
88455ec4
CH
36#include "target_core_pr.h"
37#include "target_core_ua.h"
04b1b795 38#include "target_core_xcopy.h"
88455ec4 39
1fd032ee
CH
40static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
41{
42 struct t10_alua_tg_pt_gp *tg_pt_gp;
43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
44
45 /*
46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
47 */
48 buf[5] = 0x80;
49
50 /*
125d0119 51 * Set TPGS field for explicit and/or implicit ALUA access type
1fd032ee
CH
52 * and opteration.
53 *
54 * See spc4r17 section 6.4.2 Table 135
55 */
56 if (!port)
57 return;
58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
59 if (!tg_pt_gp_mem)
60 return;
61
62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
64 if (tg_pt_gp)
65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
67}
68
0dfa1c5d
HR
69sense_reason_t
70spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
1fd032ee
CH
71{
72 struct se_lun *lun = cmd->se_lun;
73 struct se_device *dev = cmd->se_dev;
74
75 /* Set RMB (removable media) for tape devices */
76 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
77 buf[1] = 0x80;
78
48c2567d 79 buf[2] = 0x05; /* SPC-3 */
1fd032ee
CH
80
81 /*
82 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
83 *
84 * SPC4 says:
85 * A RESPONSE DATA FORMAT field set to 2h indicates that the
86 * standard INQUIRY data is in the format defined in this
87 * standard. Response data format values less than 2h are
88 * obsolete. Response data format values greater than 2h are
89 * reserved.
90 */
91 buf[3] = 2;
92
93 /*
94 * Enable SCCS and TPGS fields for Emulated ALUA
95 */
c87fbd56 96 spc_fill_alua_data(lun->lun_sep, buf);
1fd032ee 97
d397a445
NB
98 /*
99 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
100 */
101 if (dev->dev_attrib.emulate_3pc)
102 buf[5] |= 0x8;
103
1fd032ee
CH
104 buf[7] = 0x2; /* CmdQue=1 */
105
ee60bddb
NB
106 memcpy(&buf[8], "LIO-ORG ", 8);
107 memset(&buf[16], 0x20, 16);
108 memcpy(&buf[16], dev->t10_wwn.model,
109 min_t(size_t, strlen(dev->t10_wwn.model), 16));
110 memcpy(&buf[32], dev->t10_wwn.revision,
111 min_t(size_t, strlen(dev->t10_wwn.revision), 4));
1fd032ee
CH
112 buf[4] = 31; /* Set additional length to 31 */
113
114 return 0;
115}
0dfa1c5d 116EXPORT_SYMBOL(spc_emulate_inquiry_std);
1fd032ee
CH
117
118/* unit serial number */
de103c93
CH
119static sense_reason_t
120spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
1fd032ee
CH
121{
122 struct se_device *dev = cmd->se_dev;
123 u16 len = 0;
124
0fd97ccf 125 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
1fd032ee
CH
126 u32 unit_serial_len;
127
0fd97ccf 128 unit_serial_len = strlen(dev->t10_wwn.unit_serial);
1fd032ee
CH
129 unit_serial_len++; /* For NULL Terminator */
130
0fd97ccf 131 len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
1fd032ee
CH
132 len++; /* Extra Byte for NULL Terminator */
133 buf[3] = len;
134 }
135 return 0;
136}
137
68366026
NB
138void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
139 unsigned char *buf)
1fd032ee 140{
0fd97ccf 141 unsigned char *p = &dev->t10_wwn.unit_serial[0];
1fd032ee
CH
142 int cnt;
143 bool next = true;
144
145 /*
146 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
147 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
148 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
149 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
150 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
151 * per device uniqeness.
152 */
153 for (cnt = 0; *p && cnt < 13; p++) {
154 int val = hex_to_bin(*p);
155
156 if (val < 0)
157 continue;
158
159 if (next) {
160 next = false;
161 buf[cnt++] |= val;
162 } else {
163 next = true;
164 buf[cnt] = val << 4;
165 }
166 }
167}
168
169/*
170 * Device identification VPD, for a complete list of
171 * DESIGNATOR TYPEs see spc4r17 Table 459.
172 */
0dfa1c5d 173sense_reason_t
de103c93 174spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
1fd032ee
CH
175{
176 struct se_device *dev = cmd->se_dev;
177 struct se_lun *lun = cmd->se_lun;
178 struct se_port *port = NULL;
179 struct se_portal_group *tpg = NULL;
180 struct t10_alua_lu_gp_member *lu_gp_mem;
181 struct t10_alua_tg_pt_gp *tg_pt_gp;
182 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
0fd97ccf 183 unsigned char *prod = &dev->t10_wwn.model[0];
1fd032ee
CH
184 u32 prod_len;
185 u32 unit_serial_len, off = 0;
186 u16 len = 0, id_len;
187
188 off = 4;
189
190 /*
191 * NAA IEEE Registered Extended Assigned designator format, see
192 * spc4r17 section 7.7.3.6.5
193 *
194 * We depend upon a target_core_mod/ConfigFS provided
195 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
196 * value in order to return the NAA id.
197 */
0fd97ccf 198 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
1fd032ee
CH
199 goto check_t10_vend_desc;
200
201 /* CODE SET == Binary */
202 buf[off++] = 0x1;
203
204 /* Set ASSOCIATION == addressed logical unit: 0)b */
205 buf[off] = 0x00;
206
207 /* Identifier/Designator type == NAA identifier */
208 buf[off++] |= 0x3;
209 off++;
210
211 /* Identifier/Designator length */
212 buf[off++] = 0x10;
213
214 /*
215 * Start NAA IEEE Registered Extended Identifier/Designator
216 */
217 buf[off++] = (0x6 << 4);
218
219 /*
220 * Use OpenFabrics IEEE Company ID: 00 14 05
221 */
222 buf[off++] = 0x01;
223 buf[off++] = 0x40;
224 buf[off] = (0x5 << 4);
225
226 /*
227 * Return ConfigFS Unit Serial Number information for
228 * VENDOR_SPECIFIC_IDENTIFIER and
229 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
230 */
231 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
232
233 len = 20;
234 off = (len + 4);
235
236check_t10_vend_desc:
237 /*
238 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
239 */
240 id_len = 8; /* For Vendor field */
241 prod_len = 4; /* For VPD Header */
242 prod_len += 8; /* For Vendor field */
243 prod_len += strlen(prod);
244 prod_len++; /* For : */
245
0fd97ccf
CH
246 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
247 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
1fd032ee
CH
248 unit_serial_len++; /* For NULL Terminator */
249
250 id_len += sprintf(&buf[off+12], "%s:%s", prod,
0fd97ccf 251 &dev->t10_wwn.unit_serial[0]);
1fd032ee
CH
252 }
253 buf[off] = 0x2; /* ASCII */
254 buf[off+1] = 0x1; /* T10 Vendor ID */
255 buf[off+2] = 0x0;
256 memcpy(&buf[off+4], "LIO-ORG", 8);
257 /* Extra Byte for NULL Terminator */
258 id_len++;
259 /* Identifier Length */
260 buf[off+3] = id_len;
261 /* Header size for Designation descriptor */
262 len += (id_len + 4);
263 off += (id_len + 4);
264 /*
265 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
266 */
267 port = lun->lun_sep;
268 if (port) {
269 struct t10_alua_lu_gp *lu_gp;
270 u32 padding, scsi_name_len;
271 u16 lu_gp_id = 0;
272 u16 tg_pt_gp_id = 0;
273 u16 tpgt;
274
275 tpg = port->sep_tpg;
276 /*
277 * Relative target port identifer, see spc4r17
278 * section 7.7.3.7
279 *
280 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
281 * section 7.5.1 Table 362
282 */
283 buf[off] =
284 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
285 buf[off++] |= 0x1; /* CODE SET == Binary */
286 buf[off] = 0x80; /* Set PIV=1 */
287 /* Set ASSOCIATION == target port: 01b */
288 buf[off] |= 0x10;
289 /* DESIGNATOR TYPE == Relative target port identifer */
290 buf[off++] |= 0x4;
291 off++; /* Skip over Reserved */
292 buf[off++] = 4; /* DESIGNATOR LENGTH */
293 /* Skip over Obsolete field in RTPI payload
294 * in Table 472 */
295 off += 2;
296 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
297 buf[off++] = (port->sep_rtpi & 0xff);
298 len += 8; /* Header size + Designation descriptor */
299 /*
300 * Target port group identifier, see spc4r17
301 * section 7.7.3.8
302 *
303 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
304 * section 7.5.1 Table 362
305 */
1fd032ee
CH
306 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
307 if (!tg_pt_gp_mem)
308 goto check_lu_gp;
309
310 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
311 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
312 if (!tg_pt_gp) {
313 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
314 goto check_lu_gp;
315 }
316 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
317 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
318
319 buf[off] =
320 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
321 buf[off++] |= 0x1; /* CODE SET == Binary */
322 buf[off] = 0x80; /* Set PIV=1 */
323 /* Set ASSOCIATION == target port: 01b */
324 buf[off] |= 0x10;
325 /* DESIGNATOR TYPE == Target port group identifier */
326 buf[off++] |= 0x5;
327 off++; /* Skip over Reserved */
328 buf[off++] = 4; /* DESIGNATOR LENGTH */
329 off += 2; /* Skip over Reserved Field */
330 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
331 buf[off++] = (tg_pt_gp_id & 0xff);
332 len += 8; /* Header size + Designation descriptor */
333 /*
334 * Logical Unit Group identifier, see spc4r17
335 * section 7.7.3.8
336 */
337check_lu_gp:
338 lu_gp_mem = dev->dev_alua_lu_gp_mem;
339 if (!lu_gp_mem)
340 goto check_scsi_name;
341
342 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
343 lu_gp = lu_gp_mem->lu_gp;
344 if (!lu_gp) {
345 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
346 goto check_scsi_name;
347 }
348 lu_gp_id = lu_gp->lu_gp_id;
349 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
350
351 buf[off++] |= 0x1; /* CODE SET == Binary */
352 /* DESIGNATOR TYPE == Logical Unit Group identifier */
353 buf[off++] |= 0x6;
354 off++; /* Skip over Reserved */
355 buf[off++] = 4; /* DESIGNATOR LENGTH */
356 off += 2; /* Skip over Reserved Field */
357 buf[off++] = ((lu_gp_id >> 8) & 0xff);
358 buf[off++] = (lu_gp_id & 0xff);
359 len += 8; /* Header size + Designation descriptor */
360 /*
361 * SCSI name string designator, see spc4r17
362 * section 7.7.3.11
363 *
364 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
365 * section 7.5.1 Table 362
366 */
367check_scsi_name:
368 scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
369 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
370 scsi_name_len += 10;
371 /* Check for 4-byte padding */
372 padding = ((-scsi_name_len) & 3);
373 if (padding != 0)
374 scsi_name_len += padding;
375 /* Header size + Designation descriptor */
376 scsi_name_len += 4;
377
378 buf[off] =
379 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
380 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
381 buf[off] = 0x80; /* Set PIV=1 */
382 /* Set ASSOCIATION == target port: 01b */
383 buf[off] |= 0x10;
384 /* DESIGNATOR TYPE == SCSI name string */
385 buf[off++] |= 0x8;
386 off += 2; /* Skip over Reserved and length */
387 /*
388 * SCSI name string identifer containing, $FABRIC_MOD
389 * dependent information. For LIO-Target and iSCSI
390 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
391 * UTF-8 encoding.
392 */
393 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
394 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
395 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
396 scsi_name_len += 1 /* Include NULL terminator */;
397 /*
398 * The null-terminated, null-padded (see 4.4.2) SCSI
399 * NAME STRING field contains a UTF-8 format string.
400 * The number of bytes in the SCSI NAME STRING field
401 * (i.e., the value in the DESIGNATOR LENGTH field)
402 * shall be no larger than 256 and shall be a multiple
403 * of four.
404 */
405 if (padding)
406 scsi_name_len += padding;
407
408 buf[off-1] = scsi_name_len;
409 off += scsi_name_len;
410 /* Header size + Designation descriptor */
411 len += (scsi_name_len + 4);
412 }
413 buf[2] = ((len >> 8) & 0xff);
414 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
415 return 0;
416}
0dfa1c5d 417EXPORT_SYMBOL(spc_emulate_evpd_83);
1fd032ee 418
d0c8b259
NB
419static bool
420spc_check_dev_wce(struct se_device *dev)
421{
422 bool wce = false;
423
424 if (dev->transport->get_write_cache)
425 wce = dev->transport->get_write_cache(dev);
426 else if (dev->dev_attrib.emulate_write_cache > 0)
427 wce = true;
428
429 return wce;
430}
431
1fd032ee 432/* Extended INQUIRY Data VPD Page */
de103c93
CH
433static sense_reason_t
434spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
1fd032ee 435{
d0c8b259
NB
436 struct se_device *dev = cmd->se_dev;
437
1fd032ee
CH
438 buf[3] = 0x3c;
439 /* Set HEADSUP, ORDSUP, SIMPSUP */
440 buf[5] = 0x07;
441
442 /* If WriteCache emulation is enabled, set V_SUP */
d0c8b259 443 if (spc_check_dev_wce(dev))
1fd032ee
CH
444 buf[6] = 0x01;
445 return 0;
446}
447
448/* Block Limits VPD page */
de103c93
CH
449static sense_reason_t
450spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
1fd032ee
CH
451{
452 struct se_device *dev = cmd->se_dev;
453 u32 max_sectors;
454 int have_tp = 0;
7f7caf6a 455 int opt, min;
1fd032ee
CH
456
457 /*
458 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
459 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
460 * different page length for Thin Provisioning.
461 */
0fd97ccf 462 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
1fd032ee
CH
463 have_tp = 1;
464
465 buf[0] = dev->transport->get_device_type(dev);
466 buf[3] = have_tp ? 0x3c : 0x10;
467
468 /* Set WSNZ to 1 */
469 buf[4] = 0x01;
0123a9ec
NB
470 /*
471 * Set MAXIMUM COMPARE AND WRITE LENGTH
472 */
473 if (dev->dev_attrib.emulate_caw)
474 buf[5] = 0x01;
1fd032ee
CH
475
476 /*
477 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
478 */
7f7caf6a
AG
479 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
480 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
481 else
482 put_unaligned_be16(1, &buf[6]);
1fd032ee
CH
483
484 /*
485 * Set MAXIMUM TRANSFER LENGTH
486 */
0fd97ccf
CH
487 max_sectors = min(dev->dev_attrib.fabric_max_sectors,
488 dev->dev_attrib.hw_max_sectors);
1fd032ee
CH
489 put_unaligned_be32(max_sectors, &buf[8]);
490
491 /*
492 * Set OPTIMAL TRANSFER LENGTH
493 */
7f7caf6a
AG
494 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
495 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
496 else
497 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
1fd032ee
CH
498
499 /*
500 * Exit now if we don't support TP.
501 */
502 if (!have_tp)
773cbaf7 503 goto max_write_same;
1fd032ee
CH
504
505 /*
506 * Set MAXIMUM UNMAP LBA COUNT
507 */
0fd97ccf 508 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
1fd032ee
CH
509
510 /*
511 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
512 */
0fd97ccf 513 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
1fd032ee
CH
514 &buf[24]);
515
516 /*
517 * Set OPTIMAL UNMAP GRANULARITY
518 */
0fd97ccf 519 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
1fd032ee
CH
520
521 /*
522 * UNMAP GRANULARITY ALIGNMENT
523 */
0fd97ccf 524 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
1fd032ee 525 &buf[32]);
0fd97ccf 526 if (dev->dev_attrib.unmap_granularity_alignment != 0)
1fd032ee
CH
527 buf[32] |= 0x80; /* Set the UGAVALID bit */
528
773cbaf7
NB
529 /*
530 * MAXIMUM WRITE SAME LENGTH
531 */
532max_write_same:
533 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
534
1fd032ee
CH
535 return 0;
536}
537
538/* Block Device Characteristics VPD page */
de103c93
CH
539static sense_reason_t
540spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
1fd032ee
CH
541{
542 struct se_device *dev = cmd->se_dev;
543
544 buf[0] = dev->transport->get_device_type(dev);
545 buf[3] = 0x3c;
0fd97ccf 546 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
1fd032ee
CH
547
548 return 0;
549}
550
551/* Thin Provisioning VPD */
de103c93
CH
552static sense_reason_t
553spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
1fd032ee
CH
554{
555 struct se_device *dev = cmd->se_dev;
556
557 /*
558 * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
559 *
560 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
561 * zero, then the page length shall be set to 0004h. If the DP bit
562 * is set to one, then the page length shall be set to the value
563 * defined in table 162.
564 */
565 buf[0] = dev->transport->get_device_type(dev);
566
567 /*
568 * Set Hardcoded length mentioned above for DP=0
569 */
570 put_unaligned_be16(0x0004, &buf[2]);
571
572 /*
573 * The THRESHOLD EXPONENT field indicates the threshold set size in
574 * LBAs as a power of 2 (i.e., the threshold set size is equal to
575 * 2(threshold exponent)).
576 *
577 * Note that this is currently set to 0x00 as mkp says it will be
578 * changing again. We can enable this once it has settled in T10
579 * and is actually used by Linux/SCSI ML code.
580 */
581 buf[4] = 0x00;
582
583 /*
584 * A TPU bit set to one indicates that the device server supports
585 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
586 * that the device server does not support the UNMAP command.
587 */
0fd97ccf 588 if (dev->dev_attrib.emulate_tpu != 0)
1fd032ee
CH
589 buf[5] = 0x80;
590
591 /*
592 * A TPWS bit set to one indicates that the device server supports
593 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
594 * A TPWS bit set to zero indicates that the device server does not
595 * support the use of the WRITE SAME (16) command to unmap LBAs.
596 */
0fd97ccf 597 if (dev->dev_attrib.emulate_tpws != 0)
1fd032ee
CH
598 buf[5] |= 0x40;
599
600 return 0;
601}
602
de103c93
CH
603static sense_reason_t
604spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
1fd032ee
CH
605
606static struct {
607 uint8_t page;
de103c93 608 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
1fd032ee
CH
609} evpd_handlers[] = {
610 { .page = 0x00, .emulate = spc_emulate_evpd_00 },
611 { .page = 0x80, .emulate = spc_emulate_evpd_80 },
612 { .page = 0x83, .emulate = spc_emulate_evpd_83 },
613 { .page = 0x86, .emulate = spc_emulate_evpd_86 },
614 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
615 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
616 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
617};
618
619/* supported vital product data pages */
de103c93
CH
620static sense_reason_t
621spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
1fd032ee
CH
622{
623 int p;
624
625 /*
626 * Only report the INQUIRY EVPD=1 pages after a valid NAA
627 * Registered Extended LUN WWN has been set via ConfigFS
628 * during device creation/restart.
629 */
0fd97ccf 630 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
1fd032ee
CH
631 buf[3] = ARRAY_SIZE(evpd_handlers);
632 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
633 buf[p + 4] = evpd_handlers[p].page;
634 }
635
636 return 0;
637}
638
de103c93
CH
639static sense_reason_t
640spc_emulate_inquiry(struct se_cmd *cmd)
1fd032ee
CH
641{
642 struct se_device *dev = cmd->se_dev;
643 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
ffe7b0e9 644 unsigned char *rbuf;
1fd032ee 645 unsigned char *cdb = cmd->t_task_cdb;
ffe7b0e9 646 unsigned char buf[SE_INQUIRY_BUF];
de103c93
CH
647 sense_reason_t ret;
648 int p;
1fd032ee 649
dea5f099
NB
650 memset(buf, 0, SE_INQUIRY_BUF);
651
1fd032ee
CH
652 if (dev == tpg->tpg_virt_lun0.lun_se_dev)
653 buf[0] = 0x3f; /* Not connected */
654 else
655 buf[0] = dev->transport->get_device_type(dev);
656
657 if (!(cdb[1] & 0x1)) {
658 if (cdb[2]) {
659 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
660 cdb[2]);
de103c93 661 ret = TCM_INVALID_CDB_FIELD;
1fd032ee
CH
662 goto out;
663 }
664
665 ret = spc_emulate_inquiry_std(cmd, buf);
666 goto out;
667 }
668
669 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
670 if (cdb[2] == evpd_handlers[p].page) {
671 buf[1] = cdb[2];
672 ret = evpd_handlers[p].emulate(cmd, buf);
673 goto out;
674 }
675 }
676
677 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
de103c93 678 ret = TCM_INVALID_CDB_FIELD;
1fd032ee
CH
679
680out:
ffe7b0e9 681 rbuf = transport_kmap_data_sg(cmd);
49df9fc9
NB
682 if (rbuf) {
683 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
684 transport_kunmap_data_sg(cmd);
685 }
1fd032ee
CH
686
687 if (!ret)
688 target_complete_cmd(cmd, GOOD);
689 return ret;
690}
691
d4b2b867 692static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
1fd032ee
CH
693{
694 p[0] = 0x01;
695 p[1] = 0x0a;
696
d4b2b867
RD
697 /* No changeable values for now */
698 if (pc == 1)
699 goto out;
700
701out:
1fd032ee
CH
702 return 12;
703}
704
d4b2b867 705static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
1fd032ee
CH
706{
707 p[0] = 0x0a;
708 p[1] = 0x0a;
d4b2b867
RD
709
710 /* No changeable values for now */
711 if (pc == 1)
712 goto out;
713
1fd032ee
CH
714 p[2] = 2;
715 /*
716 * From spc4r23, 7.4.7 Control mode page
717 *
718 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
719 * restrictions on the algorithm used for reordering commands
720 * having the SIMPLE task attribute (see SAM-4).
721 *
722 * Table 368 -- QUEUE ALGORITHM MODIFIER field
723 * Code Description
724 * 0h Restricted reordering
725 * 1h Unrestricted reordering allowed
726 * 2h to 7h Reserved
727 * 8h to Fh Vendor specific
728 *
729 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
730 * the device server shall order the processing sequence of commands
731 * having the SIMPLE task attribute such that data integrity is maintained
732 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
733 * requests is halted at any time, the final value of all data observable
734 * on the medium shall be the same as if all the commands had been processed
735 * with the ORDERED task attribute).
736 *
737 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
738 * device server may reorder the processing sequence of commands having the
739 * SIMPLE task attribute in any manner. Any data integrity exposures related to
740 * command sequence order shall be explicitly handled by the application client
741 * through the selection of appropriate ommands and task attributes.
742 */
0fd97ccf 743 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
1fd032ee
CH
744 /*
745 * From spc4r17, section 7.4.6 Control mode Page
746 *
747 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
748 *
749 * 00b: The logical unit shall clear any unit attention condition
750 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
751 * status and shall not establish a unit attention condition when a com-
752 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
753 * status.
754 *
755 * 10b: The logical unit shall not clear any unit attention condition
756 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
757 * status and shall not establish a unit attention condition when
758 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
759 * CONFLICT status.
760 *
761 * 11b a The logical unit shall not clear any unit attention condition
762 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
763 * status and shall establish a unit attention condition for the
764 * initiator port associated with the I_T nexus on which the BUSY,
765 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
766 * Depending on the status, the additional sense code shall be set to
767 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
768 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
769 * command, a unit attention condition shall be established only once
770 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
771 * to the number of commands completed with one of those status codes.
772 */
0fd97ccf
CH
773 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
774 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
1fd032ee
CH
775 /*
776 * From spc4r17, section 7.4.6 Control mode Page
777 *
778 * Task Aborted Status (TAS) bit set to zero.
779 *
780 * A task aborted status (TAS) bit set to zero specifies that aborted
781 * tasks shall be terminated by the device server without any response
782 * to the application client. A TAS bit set to one specifies that tasks
783 * aborted by the actions of an I_T nexus other than the I_T nexus on
784 * which the command was received shall be completed with TASK ABORTED
785 * status (see SAM-4).
786 */
0fd97ccf 787 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
1fd032ee
CH
788 p[8] = 0xff;
789 p[9] = 0xff;
790 p[11] = 30;
791
d4b2b867 792out:
1fd032ee
CH
793 return 12;
794}
795
d4b2b867 796static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
1fd032ee
CH
797{
798 p[0] = 0x08;
799 p[1] = 0x12;
d4b2b867
RD
800
801 /* No changeable values for now */
802 if (pc == 1)
803 goto out;
804
d0c8b259 805 if (spc_check_dev_wce(dev))
1fd032ee
CH
806 p[2] = 0x04; /* Write Cache Enable */
807 p[12] = 0x20; /* Disabled Read Ahead */
808
d4b2b867 809out:
1fd032ee
CH
810 return 20;
811}
812
0f6d64ce
RD
813static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
814{
815 p[0] = 0x1c;
816 p[1] = 0x0a;
817
818 /* No changeable values for now */
819 if (pc == 1)
820 goto out;
821
822out:
823 return 12;
824}
825
d4b2b867
RD
826static struct {
827 uint8_t page;
828 uint8_t subpage;
829 int (*emulate)(struct se_device *, u8, unsigned char *);
830} modesense_handlers[] = {
831 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
832 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
833 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
0f6d64ce 834 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
d4b2b867
RD
835};
836
1fd032ee
CH
837static void spc_modesense_write_protect(unsigned char *buf, int type)
838{
839 /*
840 * I believe that the WP bit (bit 7) in the mode header is the same for
841 * all device types..
842 */
843 switch (type) {
844 case TYPE_DISK:
845 case TYPE_TAPE:
846 default:
847 buf[0] |= 0x80; /* WP bit */
848 break;
849 }
850}
851
852static void spc_modesense_dpofua(unsigned char *buf, int type)
853{
854 switch (type) {
855 case TYPE_DISK:
856 buf[0] |= 0x10; /* DPOFUA bit */
857 break;
858 default:
859 break;
860 }
861}
862
d4b2b867
RD
863static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
864{
865 *buf++ = 8;
866 put_unaligned_be32(min(blocks, 0xffffffffull), buf);
867 buf += 4;
868 put_unaligned_be32(block_size, buf);
869 return 9;
870}
871
872static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
873{
874 if (blocks <= 0xffffffff)
875 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
876
877 *buf++ = 1; /* LONGLBA */
878 buf += 2;
879 *buf++ = 16;
880 put_unaligned_be64(blocks, buf);
881 buf += 12;
882 put_unaligned_be32(block_size, buf);
883
884 return 17;
885}
886
de103c93 887static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1fd032ee
CH
888{
889 struct se_device *dev = cmd->se_dev;
890 char *cdb = cmd->t_task_cdb;
cab9609b 891 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
1fd032ee
CH
892 int type = dev->transport->get_device_type(dev);
893 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
d4b2b867
RD
894 bool dbd = !!(cdb[1] & 0x08);
895 bool llba = ten ? !!(cdb[1] & 0x10) : false;
896 u8 pc = cdb[2] >> 6;
897 u8 page = cdb[2] & 0x3f;
898 u8 subpage = cdb[3];
1fd032ee 899 int length = 0;
d4b2b867
RD
900 int ret;
901 int i;
1fd032ee 902
cab9609b
NB
903 memset(buf, 0, SE_MODE_PAGE_BUF);
904
fecae40a
NB
905 /*
906 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
907 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
908 */
909 length = ten ? 3 : 2;
d4b2b867
RD
910
911 /* DEVICE-SPECIFIC PARAMETER */
912 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
913 (cmd->se_deve &&
914 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
915 spc_modesense_write_protect(&buf[length], type);
916
d0c8b259 917 if ((spc_check_dev_wce(dev)) &&
d4b2b867
RD
918 (dev->dev_attrib.emulate_fua_write > 0))
919 spc_modesense_dpofua(&buf[length], type);
920
921 ++length;
922
923 /* BLOCK DESCRIPTOR */
924
925 /*
926 * For now we only include a block descriptor for disk (SBC)
927 * devices; other command sets use a slightly different format.
928 */
929 if (!dbd && type == TYPE_DISK) {
930 u64 blocks = dev->transport->get_blocks(dev);
931 u32 block_size = dev->dev_attrib.block_size;
932
933 if (ten) {
934 if (llba) {
935 length += spc_modesense_long_blockdesc(&buf[length],
936 blocks, block_size);
937 } else {
938 length += 3;
939 length += spc_modesense_blockdesc(&buf[length],
940 blocks, block_size);
941 }
942 } else {
943 length += spc_modesense_blockdesc(&buf[length], blocks,
944 block_size);
945 }
1fd032ee 946 } else {
d4b2b867
RD
947 if (ten)
948 length += 4;
949 else
950 length += 1;
1fd032ee
CH
951 }
952
d4b2b867
RD
953 if (page == 0x3f) {
954 if (subpage != 0x00 && subpage != 0xff) {
de103c93 955 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
de103c93 956 return TCM_INVALID_CDB_FIELD;
d4b2b867
RD
957 }
958
959 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
960 /*
961 * Tricky way to say all subpage 00h for
962 * subpage==0, all subpages for subpage==0xff
963 * (and we just checked above that those are
964 * the only two possibilities).
965 */
966 if ((modesense_handlers[i].subpage & ~subpage) == 0) {
967 ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
968 if (!ten && length + ret >= 255)
969 break;
970 length += ret;
971 }
972 }
973
974 goto set_length;
975 }
976
977 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
978 if (modesense_handlers[i].page == page &&
979 modesense_handlers[i].subpage == subpage) {
980 length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
981 goto set_length;
982 }
983
984 /*
985 * We don't intend to implement:
986 * - obsolete page 03h "format parameters" (checked by Solaris)
987 */
988 if (page != 0x03)
989 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
990 page, subpage);
991
de103c93 992 return TCM_UNKNOWN_MODE_PAGE;
d4b2b867
RD
993
994set_length:
995 if (ten)
996 put_unaligned_be16(length - 2, buf);
997 else
998 buf[0] = length - 1;
999
cab9609b
NB
1000 rbuf = transport_kmap_data_sg(cmd);
1001 if (rbuf) {
1002 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
1003 transport_kunmap_data_sg(cmd);
7a3f369c 1004 }
1fd032ee
CH
1005
1006 target_complete_cmd(cmd, GOOD);
1007 return 0;
1008}
1009
de103c93 1010static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
3a3c5e4a
RD
1011{
1012 struct se_device *dev = cmd->se_dev;
1013 char *cdb = cmd->t_task_cdb;
1014 bool ten = cdb[0] == MODE_SELECT_10;
1015 int off = ten ? 8 : 4;
1016 bool pf = !!(cdb[1] & 0x10);
1017 u8 page, subpage;
1018 unsigned char *buf;
1019 unsigned char tbuf[SE_MODE_PAGE_BUF];
1020 int length;
1021 int ret = 0;
1022 int i;
1023
71f41fe1
RD
1024 if (!cmd->data_length) {
1025 target_complete_cmd(cmd, GOOD);
1026 return 0;
1027 }
1028
1029 if (cmd->data_length < off + 2)
1030 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1031
3a3c5e4a 1032 buf = transport_kmap_data_sg(cmd);
de103c93
CH
1033 if (!buf)
1034 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3a3c5e4a
RD
1035
1036 if (!pf) {
de103c93 1037 ret = TCM_INVALID_CDB_FIELD;
3a3c5e4a
RD
1038 goto out;
1039 }
1040
1041 page = buf[off] & 0x3f;
1042 subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
1043
1044 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1045 if (modesense_handlers[i].page == page &&
1046 modesense_handlers[i].subpage == subpage) {
1047 memset(tbuf, 0, SE_MODE_PAGE_BUF);
1048 length = modesense_handlers[i].emulate(dev, 0, tbuf);
1049 goto check_contents;
1050 }
1051
de103c93 1052 ret = TCM_UNKNOWN_MODE_PAGE;
3a3c5e4a
RD
1053 goto out;
1054
1055check_contents:
71f41fe1
RD
1056 if (cmd->data_length < off + length) {
1057 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
1058 goto out;
1059 }
1060
de103c93
CH
1061 if (memcmp(buf + off, tbuf, length))
1062 ret = TCM_INVALID_PARAMETER_LIST;
3a3c5e4a
RD
1063
1064out:
1065 transport_kunmap_data_sg(cmd);
1066
1067 if (!ret)
1068 target_complete_cmd(cmd, GOOD);
1069 return ret;
1070}
1071
de103c93 1072static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
1fd032ee
CH
1073{
1074 unsigned char *cdb = cmd->t_task_cdb;
32a8811f 1075 unsigned char *rbuf;
1fd032ee 1076 u8 ua_asc = 0, ua_ascq = 0;
32a8811f
PB
1077 unsigned char buf[SE_SENSE_BUF];
1078
1079 memset(buf, 0, SE_SENSE_BUF);
1fd032ee
CH
1080
1081 if (cdb[1] & 0x01) {
1082 pr_err("REQUEST_SENSE description emulation not"
1083 " supported\n");
de103c93 1084 return TCM_INVALID_CDB_FIELD;
1fd032ee
CH
1085 }
1086
32a8811f 1087 rbuf = transport_kmap_data_sg(cmd);
de103c93
CH
1088 if (!rbuf)
1089 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1090
1091 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
1fd032ee
CH
1092 /*
1093 * CURRENT ERROR, UNIT ATTENTION
1094 */
1095 buf[0] = 0x70;
1096 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1097
1fd032ee
CH
1098 /*
1099 * The Additional Sense Code (ASC) from the UNIT ATTENTION
1100 */
1101 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1102 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1103 buf[7] = 0x0A;
1104 } else {
1105 /*
1106 * CURRENT ERROR, NO SENSE
1107 */
1108 buf[0] = 0x70;
1109 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1110
1fd032ee
CH
1111 /*
1112 * NO ADDITIONAL SENSE INFORMATION
1113 */
1114 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1115 buf[7] = 0x0A;
1116 }
1117
de103c93
CH
1118 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1119 transport_kunmap_data_sg(cmd);
32a8811f 1120
1fd032ee
CH
1121 target_complete_cmd(cmd, GOOD);
1122 return 0;
1123}
1124
de103c93 1125sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
d1b1f805
CH
1126{
1127 struct se_dev_entry *deve;
1128 struct se_session *sess = cmd->se_sess;
1129 unsigned char *buf;
1130 u32 lun_count = 0, offset = 8, i;
1131
1132 if (cmd->data_length < 16) {
1133 pr_warn("REPORT LUNS allocation length %u too small\n",
1134 cmd->data_length);
de103c93 1135 return TCM_INVALID_CDB_FIELD;
d1b1f805
CH
1136 }
1137
1138 buf = transport_kmap_data_sg(cmd);
1139 if (!buf)
de103c93 1140 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
d1b1f805
CH
1141
1142 /*
1143 * If no struct se_session pointer is present, this struct se_cmd is
1144 * coming via a target_core_mod PASSTHROUGH op, and not through
1145 * a $FABRIC_MOD. In that case, report LUN=0 only.
1146 */
1147 if (!sess) {
1148 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
1149 lun_count = 1;
1150 goto done;
1151 }
1152
1153 spin_lock_irq(&sess->se_node_acl->device_list_lock);
1154 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1155 deve = sess->se_node_acl->device_list[i];
1156 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
1157 continue;
1158 /*
1159 * We determine the correct LUN LIST LENGTH even once we
1160 * have reached the initial allocation length.
1161 * See SPC2-R20 7.19.
1162 */
1163 lun_count++;
1164 if ((offset + 8) > cmd->data_length)
1165 continue;
1166
1167 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
1168 offset += 8;
1169 }
1170 spin_unlock_irq(&sess->se_node_acl->device_list_lock);
1171
1172 /*
1173 * See SPC3 r07, page 159.
1174 */
1175done:
1176 lun_count *= 8;
1177 buf[0] = ((lun_count >> 24) & 0xff);
1178 buf[1] = ((lun_count >> 16) & 0xff);
1179 buf[2] = ((lun_count >> 8) & 0xff);
1180 buf[3] = (lun_count & 0xff);
1181 transport_kunmap_data_sg(cmd);
1182
1183 target_complete_cmd(cmd, GOOD);
1184 return 0;
1185}
8de530a5 1186EXPORT_SYMBOL(spc_emulate_report_luns);
d1b1f805 1187
de103c93
CH
1188static sense_reason_t
1189spc_emulate_testunitready(struct se_cmd *cmd)
1fd032ee
CH
1190{
1191 target_complete_cmd(cmd, GOOD);
1192 return 0;
1193}
1194
de103c93
CH
1195sense_reason_t
1196spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
88455ec4 1197{
eba2ca45 1198 struct se_device *dev = cmd->se_dev;
88455ec4
CH
1199 unsigned char *cdb = cmd->t_task_cdb;
1200
1201 switch (cdb[0]) {
1202 case MODE_SELECT:
1203 *size = cdb[4];
3a3c5e4a 1204 cmd->execute_cmd = spc_emulate_modeselect;
88455ec4
CH
1205 break;
1206 case MODE_SELECT_10:
1207 *size = (cdb[7] << 8) + cdb[8];
3a3c5e4a 1208 cmd->execute_cmd = spc_emulate_modeselect;
88455ec4
CH
1209 break;
1210 case MODE_SENSE:
1211 *size = cdb[4];
1fd032ee 1212 cmd->execute_cmd = spc_emulate_modesense;
88455ec4
CH
1213 break;
1214 case MODE_SENSE_10:
1215 *size = (cdb[7] << 8) + cdb[8];
1fd032ee 1216 cmd->execute_cmd = spc_emulate_modesense;
88455ec4
CH
1217 break;
1218 case LOG_SELECT:
1219 case LOG_SENSE:
1220 *size = (cdb[7] << 8) + cdb[8];
1221 break;
1222 case PERSISTENT_RESERVE_IN:
88455ec4 1223 *size = (cdb[7] << 8) + cdb[8];
d977f437 1224 cmd->execute_cmd = target_scsi3_emulate_pr_in;
88455ec4
CH
1225 break;
1226 case PERSISTENT_RESERVE_OUT:
88455ec4 1227 *size = (cdb[7] << 8) + cdb[8];
d977f437 1228 cmd->execute_cmd = target_scsi3_emulate_pr_out;
88455ec4
CH
1229 break;
1230 case RELEASE:
1231 case RELEASE_10:
1232 if (cdb[0] == RELEASE_10)
1233 *size = (cdb[7] << 8) | cdb[8];
1234 else
1235 *size = cmd->data_length;
1236
d977f437 1237 cmd->execute_cmd = target_scsi2_reservation_release;
88455ec4
CH
1238 break;
1239 case RESERVE:
1240 case RESERVE_10:
1241 /*
1242 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
1243 * Assume the passthrough or $FABRIC_MOD will tell us about it.
1244 */
1245 if (cdb[0] == RESERVE_10)
1246 *size = (cdb[7] << 8) | cdb[8];
1247 else
1248 *size = cmd->data_length;
1249
d977f437 1250 cmd->execute_cmd = target_scsi2_reservation_reserve;
88455ec4
CH
1251 break;
1252 case REQUEST_SENSE:
1253 *size = cdb[4];
1fd032ee 1254 cmd->execute_cmd = spc_emulate_request_sense;
88455ec4
CH
1255 break;
1256 case INQUIRY:
1257 *size = (cdb[3] << 8) + cdb[4];
1258
1259 /*
125d0119 1260 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
88455ec4
CH
1261 * See spc4r17 section 5.3
1262 */
019c4ca6 1263 cmd->sam_task_attr = MSG_HEAD_TAG;
1fd032ee 1264 cmd->execute_cmd = spc_emulate_inquiry;
88455ec4
CH
1265 break;
1266 case SECURITY_PROTOCOL_IN:
1267 case SECURITY_PROTOCOL_OUT:
1268 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1269 break;
1270 case EXTENDED_COPY:
04b1b795
NB
1271 *size = get_unaligned_be32(&cdb[10]);
1272 cmd->execute_cmd = target_do_xcopy;
1273 break;
88455ec4 1274 case RECEIVE_COPY_RESULTS:
04b1b795
NB
1275 *size = get_unaligned_be32(&cdb[10]);
1276 cmd->execute_cmd = target_do_receive_copy_results;
1277 break;
1278 case READ_ATTRIBUTE:
88455ec4
CH
1279 case WRITE_ATTRIBUTE:
1280 *size = (cdb[10] << 24) | (cdb[11] << 16) |
1281 (cdb[12] << 8) | cdb[13];
1282 break;
1283 case RECEIVE_DIAGNOSTIC:
1284 case SEND_DIAGNOSTIC:
1285 *size = (cdb[3] << 8) | cdb[4];
1286 break;
1287 case WRITE_BUFFER:
1288 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
1289 break;
1290 case REPORT_LUNS:
d1b1f805 1291 cmd->execute_cmd = spc_emulate_report_luns;
88455ec4
CH
1292 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1293 /*
125d0119 1294 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
88455ec4
CH
1295 * See spc4r17 section 5.3
1296 */
019c4ca6 1297 cmd->sam_task_attr = MSG_HEAD_TAG;
88455ec4
CH
1298 break;
1299 case TEST_UNIT_READY:
1fd032ee 1300 cmd->execute_cmd = spc_emulate_testunitready;
d6e0175c 1301 *size = 0;
88455ec4 1302 break;
eba2ca45
NB
1303 case MAINTENANCE_IN:
1304 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1305 /*
1306 * MAINTENANCE_IN from SCC-2
1307 * Check for emulated MI_REPORT_TARGET_PGS
1308 */
c87fbd56 1309 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
eba2ca45
NB
1310 cmd->execute_cmd =
1311 target_emulate_report_target_port_groups;
1312 }
1313 *size = get_unaligned_be32(&cdb[6]);
1314 } else {
1315 /*
1316 * GPCMD_SEND_KEY from multi media commands
1317 */
1318 *size = get_unaligned_be16(&cdb[8]);
1319 }
1320 break;
1321 case MAINTENANCE_OUT:
1322 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1323 /*
1324 * MAINTENANCE_OUT from SCC-2
1325 * Check for emulated MO_SET_TARGET_PGS.
1326 */
c87fbd56 1327 if (cdb[1] == MO_SET_TARGET_PGS) {
eba2ca45
NB
1328 cmd->execute_cmd =
1329 target_emulate_set_target_port_groups;
1330 }
1331 *size = get_unaligned_be32(&cdb[6]);
1332 } else {
1333 /*
1334 * GPCMD_SEND_KEY from multi media commands
1335 */
1336 *size = get_unaligned_be16(&cdb[8]);
1337 }
1338 break;
88455ec4
CH
1339 default:
1340 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
1341 " 0x%02x, sending CHECK_CONDITION.\n",
1342 cmd->se_tfo->get_fabric_name(), cdb[0]);
de103c93 1343 return TCM_UNSUPPORTED_SCSI_OPCODE;
88455ec4
CH
1344 }
1345
1346 return 0;
1347}
1348EXPORT_SYMBOL(spc_parse_cdb);