]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/osd/osd_initiator.c
Merge tag 'asm-generic-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / osd / osd_initiator.c
1 /*
2 * osd_initiator - Main body of the osd initiator library.
3 *
4 * Note: The file does not contain the advanced security functionality which
5 * is only needed by the security_manager's initiators.
6 *
7 * Copyright (C) 2008 Panasas Inc. All rights reserved.
8 *
9 * Authors:
10 * Boaz Harrosh <ooo@electrozaur.com>
11 * Benny Halevy <bhalevy@panasas.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the Panasas company nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
32 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
37 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
38 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
39 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <linux/slab.h>
43 #include <linux/module.h>
44
45 #include <scsi/osd_initiator.h>
46 #include <scsi/osd_sec.h>
47 #include <scsi/osd_attributes.h>
48 #include <scsi/osd_sense.h>
49
50 #include <scsi/scsi_device.h>
51
52 #include "osd_debug.h"
53
54 #ifndef __unused
55 # define __unused __attribute__((unused))
56 #endif
57
58 enum { OSD_REQ_RETRIES = 1 };
59
60 MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
61 MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
62 MODULE_LICENSE("GPL");
63
64 static inline void build_test(void)
65 {
66 /* structures were not packed */
67 BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
68 BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
69 BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
70 }
71
72 static const char *_osd_ver_desc(struct osd_request *or)
73 {
74 return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
75 }
76
77 #define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
78
79 static int _osd_get_print_system_info(struct osd_dev *od,
80 void *caps, struct osd_dev_info *odi)
81 {
82 struct osd_request *or;
83 struct osd_attr get_attrs[] = {
84 ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
85 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
86 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
87 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
88 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
89 ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
90 ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
91 ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
92 ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
93 ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
94 /* IBM-OSD-SIM Has a bug with this one put it last */
95 ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
96 };
97 void *iter = NULL, *pFirst;
98 int nelem = ARRAY_SIZE(get_attrs), a = 0;
99 int ret;
100
101 or = osd_start_request(od, GFP_KERNEL);
102 if (!or)
103 return -ENOMEM;
104
105 /* get attrs */
106 osd_req_get_attributes(or, &osd_root_object);
107 osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
108
109 ret = osd_finalize_request(or, 0, caps, NULL);
110 if (ret)
111 goto out;
112
113 ret = osd_execute_request(or);
114 if (ret) {
115 OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
116 goto out;
117 }
118
119 osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
120
121 OSD_INFO("Detected %s device\n",
122 _osd_ver_desc(or));
123
124 pFirst = get_attrs[a++].val_ptr;
125 OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
126 (char *)pFirst);
127
128 pFirst = get_attrs[a++].val_ptr;
129 OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
130 (char *)pFirst);
131
132 pFirst = get_attrs[a++].val_ptr;
133 OSD_INFO("PRODUCT_MODEL [%s]\n",
134 (char *)pFirst);
135
136 pFirst = get_attrs[a++].val_ptr;
137 OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
138 pFirst ? get_unaligned_be32(pFirst) : ~0U);
139
140 pFirst = get_attrs[a++].val_ptr;
141 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
142 (char *)pFirst);
143
144 odi->osdname_len = get_attrs[a].len;
145 /* Avoid NULL for memcmp optimization 0-length is good enough */
146 odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
147 if (!odi->osdname) {
148 ret = -ENOMEM;
149 goto out;
150 }
151 if (odi->osdname_len)
152 memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
153 OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
154 a++;
155
156 pFirst = get_attrs[a++].val_ptr;
157 OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
158 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
159
160 pFirst = get_attrs[a++].val_ptr;
161 OSD_INFO("USED_CAPACITY [0x%llx]\n",
162 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
163
164 pFirst = get_attrs[a++].val_ptr;
165 OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
166 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
167
168 if (a >= nelem)
169 goto out;
170
171 /* FIXME: Where are the time utilities */
172 pFirst = get_attrs[a++].val_ptr;
173 OSD_INFO("CLOCK [0x%6phN]\n", pFirst);
174
175 if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
176 unsigned len = get_attrs[a].len;
177 char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
178
179 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
180 sid_dump, sizeof(sid_dump), true);
181 OSD_INFO("OSD_SYSTEM_ID(%d)\n"
182 " [%s]\n", len, sid_dump);
183
184 if (unlikely(len > sizeof(odi->systemid))) {
185 OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
186 "device identification might not work\n", len);
187 len = sizeof(odi->systemid);
188 }
189 odi->systemid_len = len;
190 memcpy(odi->systemid, get_attrs[a].val_ptr, len);
191 a++;
192 }
193 out:
194 osd_end_request(or);
195 return ret;
196 }
197
198 int osd_auto_detect_ver(struct osd_dev *od,
199 void *caps, struct osd_dev_info *odi)
200 {
201 int ret;
202
203 /* Auto-detect the osd version */
204 ret = _osd_get_print_system_info(od, caps, odi);
205 if (ret) {
206 osd_dev_set_ver(od, OSD_VER1);
207 OSD_DEBUG("converting to OSD1\n");
208 ret = _osd_get_print_system_info(od, caps, odi);
209 }
210
211 return ret;
212 }
213 EXPORT_SYMBOL(osd_auto_detect_ver);
214
215 static unsigned _osd_req_cdb_len(struct osd_request *or)
216 {
217 return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
218 }
219
220 static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
221 {
222 return osd_req_is_ver1(or) ?
223 osdv1_attr_list_elem_size(len) :
224 osdv2_attr_list_elem_size(len);
225 }
226
227 static void _osd_req_alist_elem_encode(struct osd_request *or,
228 void *attr_last, const struct osd_attr *oa)
229 {
230 if (osd_req_is_ver1(or)) {
231 struct osdv1_attributes_list_element *attr = attr_last;
232
233 attr->attr_page = cpu_to_be32(oa->attr_page);
234 attr->attr_id = cpu_to_be32(oa->attr_id);
235 attr->attr_bytes = cpu_to_be16(oa->len);
236 memcpy(attr->attr_val, oa->val_ptr, oa->len);
237 } else {
238 struct osdv2_attributes_list_element *attr = attr_last;
239
240 attr->attr_page = cpu_to_be32(oa->attr_page);
241 attr->attr_id = cpu_to_be32(oa->attr_id);
242 attr->attr_bytes = cpu_to_be16(oa->len);
243 memcpy(attr->attr_val, oa->val_ptr, oa->len);
244 }
245 }
246
247 static int _osd_req_alist_elem_decode(struct osd_request *or,
248 void *cur_p, struct osd_attr *oa, unsigned max_bytes)
249 {
250 unsigned inc;
251 if (osd_req_is_ver1(or)) {
252 struct osdv1_attributes_list_element *attr = cur_p;
253
254 if (max_bytes < sizeof(*attr))
255 return -1;
256
257 oa->len = be16_to_cpu(attr->attr_bytes);
258 inc = _osd_req_alist_elem_size(or, oa->len);
259 if (inc > max_bytes)
260 return -1;
261
262 oa->attr_page = be32_to_cpu(attr->attr_page);
263 oa->attr_id = be32_to_cpu(attr->attr_id);
264
265 /* OSD1: On empty attributes we return a pointer to 2 bytes
266 * of zeros. This keeps similar behaviour with OSD2.
267 * (See below)
268 */
269 oa->val_ptr = likely(oa->len) ? attr->attr_val :
270 (u8 *)&attr->attr_bytes;
271 } else {
272 struct osdv2_attributes_list_element *attr = cur_p;
273
274 if (max_bytes < sizeof(*attr))
275 return -1;
276
277 oa->len = be16_to_cpu(attr->attr_bytes);
278 inc = _osd_req_alist_elem_size(or, oa->len);
279 if (inc > max_bytes)
280 return -1;
281
282 oa->attr_page = be32_to_cpu(attr->attr_page);
283 oa->attr_id = be32_to_cpu(attr->attr_id);
284
285 /* OSD2: For convenience, on empty attributes, we return 8 bytes
286 * of zeros here. This keeps the same behaviour with OSD2r04,
287 * and is nice with null terminating ASCII fields.
288 * oa->val_ptr == NULL marks the end-of-list, or error.
289 */
290 oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
291 }
292 return inc;
293 }
294
295 static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
296 {
297 return osd_req_is_ver1(or) ?
298 osdv1_list_size(list_head) :
299 osdv2_list_size(list_head);
300 }
301
302 static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
303 {
304 return osd_req_is_ver1(or) ?
305 sizeof(struct osdv1_attributes_list_header) :
306 sizeof(struct osdv2_attributes_list_header);
307 }
308
309 static void _osd_req_set_alist_type(struct osd_request *or,
310 void *list, int list_type)
311 {
312 if (osd_req_is_ver1(or)) {
313 struct osdv1_attributes_list_header *attr_list = list;
314
315 memset(attr_list, 0, sizeof(*attr_list));
316 attr_list->type = list_type;
317 } else {
318 struct osdv2_attributes_list_header *attr_list = list;
319
320 memset(attr_list, 0, sizeof(*attr_list));
321 attr_list->type = list_type;
322 }
323 }
324
325 static bool _osd_req_is_alist_type(struct osd_request *or,
326 void *list, int list_type)
327 {
328 if (!list)
329 return false;
330
331 if (osd_req_is_ver1(or)) {
332 struct osdv1_attributes_list_header *attr_list = list;
333
334 return attr_list->type == list_type;
335 } else {
336 struct osdv2_attributes_list_header *attr_list = list;
337
338 return attr_list->type == list_type;
339 }
340 }
341
342 /* This is for List-objects not Attributes-Lists */
343 static void _osd_req_encode_olist(struct osd_request *or,
344 struct osd_obj_id_list *list)
345 {
346 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
347
348 if (osd_req_is_ver1(or)) {
349 cdbh->v1.list_identifier = list->list_identifier;
350 cdbh->v1.start_address = list->continuation_id;
351 } else {
352 cdbh->v2.list_identifier = list->list_identifier;
353 cdbh->v2.start_address = list->continuation_id;
354 }
355 }
356
357 static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
358 u64 offset, unsigned *padding)
359 {
360 return __osd_encode_offset(offset, padding,
361 osd_req_is_ver1(or) ?
362 OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
363 OSD_OFFSET_MAX_SHIFT);
364 }
365
366 static struct osd_security_parameters *
367 _osd_req_sec_params(struct osd_request *or)
368 {
369 struct osd_cdb *ocdb = &or->cdb;
370
371 if (osd_req_is_ver1(or))
372 return (struct osd_security_parameters *)&ocdb->v1.sec_params;
373 else
374 return (struct osd_security_parameters *)&ocdb->v2.sec_params;
375 }
376
377 void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
378 {
379 memset(osdd, 0, sizeof(*osdd));
380 osdd->scsi_device = scsi_device;
381 osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
382 #ifdef OSD_VER1_SUPPORT
383 osdd->version = OSD_VER2;
384 #endif
385 /* TODO: Allocate pools for osd_request attributes ... */
386 }
387 EXPORT_SYMBOL(osd_dev_init);
388
389 void osd_dev_fini(struct osd_dev *osdd)
390 {
391 /* TODO: De-allocate pools */
392
393 osdd->scsi_device = NULL;
394 }
395 EXPORT_SYMBOL(osd_dev_fini);
396
397 static struct osd_request *_osd_request_alloc(gfp_t gfp)
398 {
399 struct osd_request *or;
400
401 /* TODO: Use mempool with one saved request */
402 or = kzalloc(sizeof(*or), gfp);
403 return or;
404 }
405
406 static void _osd_request_free(struct osd_request *or)
407 {
408 kfree(or);
409 }
410
411 struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
412 {
413 struct osd_request *or;
414
415 or = _osd_request_alloc(gfp);
416 if (!or)
417 return NULL;
418
419 or->osd_dev = dev;
420 or->alloc_flags = gfp;
421 or->timeout = dev->def_timeout;
422 or->retries = OSD_REQ_RETRIES;
423
424 return or;
425 }
426 EXPORT_SYMBOL(osd_start_request);
427
428 static void _osd_free_seg(struct osd_request *or __unused,
429 struct _osd_req_data_segment *seg)
430 {
431 if (!seg->buff || !seg->alloc_size)
432 return;
433
434 kfree(seg->buff);
435 seg->buff = NULL;
436 seg->alloc_size = 0;
437 }
438
439 static void _put_request(struct request *rq)
440 {
441 /*
442 * If osd_finalize_request() was called but the request was not
443 * executed through the block layer, then we must release BIOs.
444 * TODO: Keep error code in or->async_error. Need to audit all
445 * code paths.
446 */
447 if (unlikely(rq->bio))
448 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
449 else
450 blk_put_request(rq);
451 }
452
453 void osd_end_request(struct osd_request *or)
454 {
455 struct request *rq = or->request;
456
457 if (rq) {
458 if (rq->next_rq) {
459 _put_request(rq->next_rq);
460 rq->next_rq = NULL;
461 }
462
463 _put_request(rq);
464 }
465
466 _osd_free_seg(or, &or->get_attr);
467 _osd_free_seg(or, &or->enc_get_attr);
468 _osd_free_seg(or, &or->set_attr);
469 _osd_free_seg(or, &or->cdb_cont);
470
471 _osd_request_free(or);
472 }
473 EXPORT_SYMBOL(osd_end_request);
474
475 static void _set_error_resid(struct osd_request *or, struct request *req,
476 int error)
477 {
478 or->async_error = error;
479 or->req_errors = req->errors ? : error;
480 or->sense_len = req->sense_len;
481 if (or->out.req)
482 or->out.residual = or->out.req->resid_len;
483 if (or->in.req)
484 or->in.residual = or->in.req->resid_len;
485 }
486
487 int osd_execute_request(struct osd_request *or)
488 {
489 int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
490
491 _set_error_resid(or, or->request, error);
492 return error;
493 }
494 EXPORT_SYMBOL(osd_execute_request);
495
496 static void osd_request_async_done(struct request *req, int error)
497 {
498 struct osd_request *or = req->end_io_data;
499
500 _set_error_resid(or, req, error);
501 if (req->next_rq) {
502 __blk_put_request(req->q, req->next_rq);
503 req->next_rq = NULL;
504 }
505
506 __blk_put_request(req->q, req);
507 or->request = NULL;
508 or->in.req = NULL;
509 or->out.req = NULL;
510
511 if (or->async_done)
512 or->async_done(or, or->async_private);
513 else
514 osd_end_request(or);
515 }
516
517 int osd_execute_request_async(struct osd_request *or,
518 osd_req_done_fn *done, void *private)
519 {
520 or->request->end_io_data = or;
521 or->async_private = private;
522 or->async_done = done;
523
524 blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
525 osd_request_async_done);
526 return 0;
527 }
528 EXPORT_SYMBOL(osd_execute_request_async);
529
530 u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
531 u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
532
533 static int _osd_realloc_seg(struct osd_request *or,
534 struct _osd_req_data_segment *seg, unsigned max_bytes)
535 {
536 void *buff;
537
538 if (seg->alloc_size >= max_bytes)
539 return 0;
540
541 buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
542 if (!buff) {
543 OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
544 seg->alloc_size);
545 return -ENOMEM;
546 }
547
548 memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
549 seg->buff = buff;
550 seg->alloc_size = max_bytes;
551 return 0;
552 }
553
554 static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
555 {
556 OSD_DEBUG("total_bytes=%d\n", total_bytes);
557 return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
558 }
559
560 static int _alloc_set_attr_list(struct osd_request *or,
561 const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
562 {
563 unsigned total_bytes = add_bytes;
564
565 for (; nelem; --nelem, ++oa)
566 total_bytes += _osd_req_alist_elem_size(or, oa->len);
567
568 OSD_DEBUG("total_bytes=%d\n", total_bytes);
569 return _osd_realloc_seg(or, &or->set_attr, total_bytes);
570 }
571
572 static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
573 {
574 OSD_DEBUG("total_bytes=%d\n", max_bytes);
575 return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
576 }
577
578 static int _alloc_get_attr_list(struct osd_request *or)
579 {
580 OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
581 return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
582 }
583
584 /*
585 * Common to all OSD commands
586 */
587
588 static void _osdv1_req_encode_common(struct osd_request *or,
589 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
590 {
591 struct osdv1_cdb *ocdb = &or->cdb.v1;
592
593 /*
594 * For speed, the commands
595 * OSD_ACT_PERFORM_SCSI_COMMAND , V1 0x8F7E, V2 0x8F7C
596 * OSD_ACT_SCSI_TASK_MANAGEMENT , V1 0x8F7F, V2 0x8F7D
597 * are not supported here. Should pass zero and set after the call
598 */
599 act &= cpu_to_be16(~0x0080); /* V1 action code */
600
601 OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
602
603 ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
604 ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
605 ocdb->h.varlen_cdb.service_action = act;
606
607 ocdb->h.partition = cpu_to_be64(obj->partition);
608 ocdb->h.object = cpu_to_be64(obj->id);
609 ocdb->h.v1.length = cpu_to_be64(len);
610 ocdb->h.v1.start_address = cpu_to_be64(offset);
611 }
612
613 static void _osdv2_req_encode_common(struct osd_request *or,
614 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
615 {
616 struct osdv2_cdb *ocdb = &or->cdb.v2;
617
618 OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
619
620 ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
621 ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
622 ocdb->h.varlen_cdb.service_action = act;
623
624 ocdb->h.partition = cpu_to_be64(obj->partition);
625 ocdb->h.object = cpu_to_be64(obj->id);
626 ocdb->h.v2.length = cpu_to_be64(len);
627 ocdb->h.v2.start_address = cpu_to_be64(offset);
628 }
629
630 static void _osd_req_encode_common(struct osd_request *or,
631 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
632 {
633 if (osd_req_is_ver1(or))
634 _osdv1_req_encode_common(or, act, obj, offset, len);
635 else
636 _osdv2_req_encode_common(or, act, obj, offset, len);
637 }
638
639 /*
640 * Device commands
641 */
642 /*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
643 /*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
644
645 void osd_req_format(struct osd_request *or, u64 tot_capacity)
646 {
647 _osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
648 tot_capacity);
649 }
650 EXPORT_SYMBOL(osd_req_format);
651
652 int osd_req_list_dev_partitions(struct osd_request *or,
653 osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
654 {
655 return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
656 }
657 EXPORT_SYMBOL(osd_req_list_dev_partitions);
658
659 static void _osd_req_encode_flush(struct osd_request *or,
660 enum osd_options_flush_scope_values op)
661 {
662 struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
663
664 ocdb->command_specific_options = op;
665 }
666
667 void osd_req_flush_obsd(struct osd_request *or,
668 enum osd_options_flush_scope_values op)
669 {
670 _osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
671 _osd_req_encode_flush(or, op);
672 }
673 EXPORT_SYMBOL(osd_req_flush_obsd);
674
675 /*TODO: void osd_req_perform_scsi_command(struct osd_request *,
676 const u8 *cdb, ...); */
677 /*TODO: void osd_req_task_management(struct osd_request *, ...); */
678
679 /*
680 * Partition commands
681 */
682 static void _osd_req_encode_partition(struct osd_request *or,
683 __be16 act, osd_id partition)
684 {
685 struct osd_obj_id par = {
686 .partition = partition,
687 .id = 0,
688 };
689
690 _osd_req_encode_common(or, act, &par, 0, 0);
691 }
692
693 void osd_req_create_partition(struct osd_request *or, osd_id partition)
694 {
695 _osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
696 }
697 EXPORT_SYMBOL(osd_req_create_partition);
698
699 void osd_req_remove_partition(struct osd_request *or, osd_id partition)
700 {
701 _osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
702 }
703 EXPORT_SYMBOL(osd_req_remove_partition);
704
705 /*TODO: void osd_req_set_partition_key(struct osd_request *,
706 osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
707 u8 seed[OSD_CRYPTO_SEED_SIZE]); */
708
709 static int _osd_req_list_objects(struct osd_request *or,
710 __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
711 struct osd_obj_id_list *list, unsigned nelem)
712 {
713 struct request_queue *q = osd_request_queue(or->osd_dev);
714 u64 len = nelem * sizeof(osd_id) + sizeof(*list);
715 struct bio *bio;
716
717 _osd_req_encode_common(or, action, obj, (u64)initial_id, len);
718
719 if (list->list_identifier)
720 _osd_req_encode_olist(or, list);
721
722 WARN_ON(or->in.bio);
723 bio = bio_map_kern(q, list, len, or->alloc_flags);
724 if (IS_ERR(bio)) {
725 OSD_ERR("!!! Failed to allocate list_objects BIO\n");
726 return PTR_ERR(bio);
727 }
728
729 bio->bi_rw &= ~REQ_WRITE;
730 or->in.bio = bio;
731 or->in.total_bytes = bio->bi_iter.bi_size;
732 return 0;
733 }
734
735 int osd_req_list_partition_collections(struct osd_request *or,
736 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
737 unsigned nelem)
738 {
739 struct osd_obj_id par = {
740 .partition = partition,
741 .id = 0,
742 };
743
744 return osd_req_list_collection_objects(or, &par, initial_id, list,
745 nelem);
746 }
747 EXPORT_SYMBOL(osd_req_list_partition_collections);
748
749 int osd_req_list_partition_objects(struct osd_request *or,
750 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
751 unsigned nelem)
752 {
753 struct osd_obj_id par = {
754 .partition = partition,
755 .id = 0,
756 };
757
758 return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
759 nelem);
760 }
761 EXPORT_SYMBOL(osd_req_list_partition_objects);
762
763 void osd_req_flush_partition(struct osd_request *or,
764 osd_id partition, enum osd_options_flush_scope_values op)
765 {
766 _osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
767 _osd_req_encode_flush(or, op);
768 }
769 EXPORT_SYMBOL(osd_req_flush_partition);
770
771 /*
772 * Collection commands
773 */
774 /*TODO: void osd_req_create_collection(struct osd_request *,
775 const struct osd_obj_id *); */
776 /*TODO: void osd_req_remove_collection(struct osd_request *,
777 const struct osd_obj_id *); */
778
779 int osd_req_list_collection_objects(struct osd_request *or,
780 const struct osd_obj_id *obj, osd_id initial_id,
781 struct osd_obj_id_list *list, unsigned nelem)
782 {
783 return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
784 initial_id, list, nelem);
785 }
786 EXPORT_SYMBOL(osd_req_list_collection_objects);
787
788 /*TODO: void query(struct osd_request *, ...); V2 */
789
790 void osd_req_flush_collection(struct osd_request *or,
791 const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
792 {
793 _osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
794 _osd_req_encode_flush(or, op);
795 }
796 EXPORT_SYMBOL(osd_req_flush_collection);
797
798 /*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
799 /*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
800
801 /*
802 * Object commands
803 */
804 void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
805 {
806 _osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
807 }
808 EXPORT_SYMBOL(osd_req_create_object);
809
810 void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
811 {
812 _osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
813 }
814 EXPORT_SYMBOL(osd_req_remove_object);
815
816
817 /*TODO: void osd_req_create_multi(struct osd_request *or,
818 struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
819 */
820
821 void osd_req_write(struct osd_request *or,
822 const struct osd_obj_id *obj, u64 offset,
823 struct bio *bio, u64 len)
824 {
825 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
826 WARN_ON(or->out.bio || or->out.total_bytes);
827 WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
828 or->out.bio = bio;
829 or->out.total_bytes = len;
830 }
831 EXPORT_SYMBOL(osd_req_write);
832
833 int osd_req_write_kern(struct osd_request *or,
834 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
835 {
836 struct request_queue *req_q = osd_request_queue(or->osd_dev);
837 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
838
839 if (IS_ERR(bio))
840 return PTR_ERR(bio);
841
842 bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
843 osd_req_write(or, obj, offset, bio, len);
844 return 0;
845 }
846 EXPORT_SYMBOL(osd_req_write_kern);
847
848 /*TODO: void osd_req_append(struct osd_request *,
849 const struct osd_obj_id *, struct bio *data_out); */
850 /*TODO: void osd_req_create_write(struct osd_request *,
851 const struct osd_obj_id *, struct bio *data_out, u64 offset); */
852 /*TODO: void osd_req_clear(struct osd_request *,
853 const struct osd_obj_id *, u64 offset, u64 len); */
854 /*TODO: void osd_req_punch(struct osd_request *,
855 const struct osd_obj_id *, u64 offset, u64 len); V2 */
856
857 void osd_req_flush_object(struct osd_request *or,
858 const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
859 /*V2*/ u64 offset, /*V2*/ u64 len)
860 {
861 if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
862 OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
863 offset = 0;
864 len = 0;
865 }
866
867 _osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
868 _osd_req_encode_flush(or, op);
869 }
870 EXPORT_SYMBOL(osd_req_flush_object);
871
872 void osd_req_read(struct osd_request *or,
873 const struct osd_obj_id *obj, u64 offset,
874 struct bio *bio, u64 len)
875 {
876 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
877 WARN_ON(or->in.bio || or->in.total_bytes);
878 WARN_ON(bio->bi_rw & REQ_WRITE);
879 or->in.bio = bio;
880 or->in.total_bytes = len;
881 }
882 EXPORT_SYMBOL(osd_req_read);
883
884 int osd_req_read_kern(struct osd_request *or,
885 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
886 {
887 struct request_queue *req_q = osd_request_queue(or->osd_dev);
888 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
889
890 if (IS_ERR(bio))
891 return PTR_ERR(bio);
892
893 osd_req_read(or, obj, offset, bio, len);
894 return 0;
895 }
896 EXPORT_SYMBOL(osd_req_read_kern);
897
898 static int _add_sg_continuation_descriptor(struct osd_request *or,
899 const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
900 {
901 struct osd_sg_continuation_descriptor *oscd;
902 u32 oscd_size;
903 unsigned i;
904 int ret;
905
906 oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
907
908 if (!or->cdb_cont.total_bytes) {
909 /* First time, jump over the header, we will write to:
910 * cdb_cont.buff + cdb_cont.total_bytes
911 */
912 or->cdb_cont.total_bytes =
913 sizeof(struct osd_continuation_segment_header);
914 }
915
916 ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
917 if (unlikely(ret))
918 return ret;
919
920 oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
921 oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
922 oscd->hdr.pad_length = 0;
923 oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
924
925 *len = 0;
926 /* copy the sg entries and convert to network byte order */
927 for (i = 0; i < numentries; i++) {
928 oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
929 oscd->entries[i].len = cpu_to_be64(sglist[i].len);
930 *len += sglist[i].len;
931 }
932
933 or->cdb_cont.total_bytes += oscd_size;
934 OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
935 or->cdb_cont.total_bytes, oscd_size, numentries);
936 return 0;
937 }
938
939 static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
940 {
941 struct request_queue *req_q = osd_request_queue(or->osd_dev);
942 struct bio *bio;
943 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
944 struct osd_continuation_segment_header *cont_seg_hdr;
945
946 if (!or->cdb_cont.total_bytes)
947 return 0;
948
949 cont_seg_hdr = or->cdb_cont.buff;
950 cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
951 cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
952
953 /* create a bio for continuation segment */
954 bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
955 GFP_KERNEL);
956 if (IS_ERR(bio))
957 return PTR_ERR(bio);
958
959 bio->bi_rw |= REQ_WRITE;
960
961 /* integrity check the continuation before the bio is linked
962 * with the other data segments since the continuation
963 * integrity is separate from the other data segments.
964 */
965 osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
966
967 cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
968
969 /* we can't use _req_append_segment, because we need to link in the
970 * continuation bio to the head of the bio list - the
971 * continuation segment (if it exists) is always the first segment in
972 * the out data buffer.
973 */
974 bio->bi_next = or->out.bio;
975 or->out.bio = bio;
976 or->out.total_bytes += or->cdb_cont.total_bytes;
977
978 return 0;
979 }
980
981 /* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
982 * @sglist that has the scatter gather entries. Scatter-gather enables a write
983 * of multiple none-contiguous areas of an object, in a single call. The extents
984 * may overlap and/or be in any order. The only constrain is that:
985 * total_bytes(sglist) >= total_bytes(bio)
986 */
987 int osd_req_write_sg(struct osd_request *or,
988 const struct osd_obj_id *obj, struct bio *bio,
989 const struct osd_sg_entry *sglist, unsigned numentries)
990 {
991 u64 len;
992 int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
993
994 if (ret)
995 return ret;
996 osd_req_write(or, obj, 0, bio, len);
997
998 return 0;
999 }
1000 EXPORT_SYMBOL(osd_req_write_sg);
1001
1002 /* osd_req_read_sg: Read multiple extents of an object into @bio
1003 * See osd_req_write_sg
1004 */
1005 int osd_req_read_sg(struct osd_request *or,
1006 const struct osd_obj_id *obj, struct bio *bio,
1007 const struct osd_sg_entry *sglist, unsigned numentries)
1008 {
1009 u64 len;
1010 u64 off;
1011 int ret;
1012
1013 if (numentries > 1) {
1014 off = 0;
1015 ret = _add_sg_continuation_descriptor(or, sglist, numentries,
1016 &len);
1017 if (ret)
1018 return ret;
1019 } else {
1020 /* Optimize the case of single segment, read_sg is a
1021 * bidi operation.
1022 */
1023 len = sglist->len;
1024 off = sglist->offset;
1025 }
1026 osd_req_read(or, obj, off, bio, len);
1027
1028 return 0;
1029 }
1030 EXPORT_SYMBOL(osd_req_read_sg);
1031
1032 /* SG-list write/read Kern API
1033 *
1034 * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
1035 * of sg_entries. @numentries indicates how many pointers and sg_entries there
1036 * are. By requiring an array of buff pointers. This allows a caller to do a
1037 * single write/read and scatter into multiple buffers.
1038 * NOTE: Each buffer + len should not cross a page boundary.
1039 */
1040 static struct bio *_create_sg_bios(struct osd_request *or,
1041 void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
1042 {
1043 struct request_queue *q = osd_request_queue(or->osd_dev);
1044 struct bio *bio;
1045 unsigned i;
1046
1047 bio = bio_kmalloc(GFP_KERNEL, numentries);
1048 if (unlikely(!bio)) {
1049 OSD_DEBUG("Failed to allocate BIO size=%u\n", numentries);
1050 return ERR_PTR(-ENOMEM);
1051 }
1052
1053 for (i = 0; i < numentries; i++) {
1054 unsigned offset = offset_in_page(buff[i]);
1055 struct page *page = virt_to_page(buff[i]);
1056 unsigned len = sglist[i].len;
1057 unsigned added_len;
1058
1059 BUG_ON(offset + len > PAGE_SIZE);
1060 added_len = bio_add_pc_page(q, bio, page, len, offset);
1061 if (unlikely(len != added_len)) {
1062 OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
1063 len, added_len);
1064 bio_put(bio);
1065 return ERR_PTR(-ENOMEM);
1066 }
1067 }
1068
1069 return bio;
1070 }
1071
1072 int osd_req_write_sg_kern(struct osd_request *or,
1073 const struct osd_obj_id *obj, void **buff,
1074 const struct osd_sg_entry *sglist, unsigned numentries)
1075 {
1076 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1077 if (IS_ERR(bio))
1078 return PTR_ERR(bio);
1079
1080 bio->bi_rw |= REQ_WRITE;
1081 osd_req_write_sg(or, obj, bio, sglist, numentries);
1082
1083 return 0;
1084 }
1085 EXPORT_SYMBOL(osd_req_write_sg_kern);
1086
1087 int osd_req_read_sg_kern(struct osd_request *or,
1088 const struct osd_obj_id *obj, void **buff,
1089 const struct osd_sg_entry *sglist, unsigned numentries)
1090 {
1091 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1092 if (IS_ERR(bio))
1093 return PTR_ERR(bio);
1094
1095 osd_req_read_sg(or, obj, bio, sglist, numentries);
1096
1097 return 0;
1098 }
1099 EXPORT_SYMBOL(osd_req_read_sg_kern);
1100
1101
1102
1103 void osd_req_get_attributes(struct osd_request *or,
1104 const struct osd_obj_id *obj)
1105 {
1106 _osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
1107 }
1108 EXPORT_SYMBOL(osd_req_get_attributes);
1109
1110 void osd_req_set_attributes(struct osd_request *or,
1111 const struct osd_obj_id *obj)
1112 {
1113 _osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
1114 }
1115 EXPORT_SYMBOL(osd_req_set_attributes);
1116
1117 /*
1118 * Attributes List-mode
1119 */
1120
1121 int osd_req_add_set_attr_list(struct osd_request *or,
1122 const struct osd_attr *oa, unsigned nelem)
1123 {
1124 unsigned total_bytes = or->set_attr.total_bytes;
1125 void *attr_last;
1126 int ret;
1127
1128 if (or->attributes_mode &&
1129 or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1130 WARN_ON(1);
1131 return -EINVAL;
1132 }
1133 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1134
1135 if (!total_bytes) { /* first-time: allocate and put list header */
1136 total_bytes = _osd_req_sizeof_alist_header(or);
1137 ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1138 if (ret)
1139 return ret;
1140 _osd_req_set_alist_type(or, or->set_attr.buff,
1141 OSD_ATTR_LIST_SET_RETRIEVE);
1142 }
1143 attr_last = or->set_attr.buff + total_bytes;
1144
1145 for (; nelem; --nelem) {
1146 unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
1147
1148 total_bytes += elem_size;
1149 if (unlikely(or->set_attr.alloc_size < total_bytes)) {
1150 or->set_attr.total_bytes = total_bytes - elem_size;
1151 ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1152 if (ret)
1153 return ret;
1154 attr_last =
1155 or->set_attr.buff + or->set_attr.total_bytes;
1156 }
1157
1158 _osd_req_alist_elem_encode(or, attr_last, oa);
1159
1160 attr_last += elem_size;
1161 ++oa;
1162 }
1163
1164 or->set_attr.total_bytes = total_bytes;
1165 return 0;
1166 }
1167 EXPORT_SYMBOL(osd_req_add_set_attr_list);
1168
1169 static int _req_append_segment(struct osd_request *or,
1170 unsigned padding, struct _osd_req_data_segment *seg,
1171 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
1172 {
1173 void *pad_buff;
1174 int ret;
1175
1176 if (padding) {
1177 /* check if we can just add it to last buffer */
1178 if (last_seg &&
1179 (padding <= last_seg->alloc_size - last_seg->total_bytes))
1180 pad_buff = last_seg->buff + last_seg->total_bytes;
1181 else
1182 pad_buff = io->pad_buff;
1183
1184 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
1185 or->alloc_flags);
1186 if (ret)
1187 return ret;
1188 io->total_bytes += padding;
1189 }
1190
1191 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
1192 or->alloc_flags);
1193 if (ret)
1194 return ret;
1195
1196 io->total_bytes += seg->total_bytes;
1197 OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
1198 seg->total_bytes);
1199 return 0;
1200 }
1201
1202 static int _osd_req_finalize_set_attr_list(struct osd_request *or)
1203 {
1204 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1205 unsigned padding;
1206 int ret;
1207
1208 if (!or->set_attr.total_bytes) {
1209 cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
1210 return 0;
1211 }
1212
1213 cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
1214 cdbh->attrs_list.set_attr_offset =
1215 osd_req_encode_offset(or, or->out.total_bytes, &padding);
1216
1217 ret = _req_append_segment(or, padding, &or->set_attr,
1218 or->out.last_seg, &or->out);
1219 if (ret)
1220 return ret;
1221
1222 or->out.last_seg = &or->set_attr;
1223 return 0;
1224 }
1225
1226 int osd_req_add_get_attr_list(struct osd_request *or,
1227 const struct osd_attr *oa, unsigned nelem)
1228 {
1229 unsigned total_bytes = or->enc_get_attr.total_bytes;
1230 void *attr_last;
1231 int ret;
1232
1233 if (or->attributes_mode &&
1234 or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1235 WARN_ON(1);
1236 return -EINVAL;
1237 }
1238 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1239
1240 /* first time calc data-in list header size */
1241 if (!or->get_attr.total_bytes)
1242 or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
1243
1244 /* calc data-out info */
1245 if (!total_bytes) { /* first-time: allocate and put list header */
1246 unsigned max_bytes;
1247
1248 total_bytes = _osd_req_sizeof_alist_header(or);
1249 max_bytes = total_bytes +
1250 nelem * sizeof(struct osd_attributes_list_attrid);
1251 ret = _alloc_get_attr_desc(or, max_bytes);
1252 if (ret)
1253 return ret;
1254
1255 _osd_req_set_alist_type(or, or->enc_get_attr.buff,
1256 OSD_ATTR_LIST_GET);
1257 }
1258 attr_last = or->enc_get_attr.buff + total_bytes;
1259
1260 for (; nelem; --nelem) {
1261 struct osd_attributes_list_attrid *attrid;
1262 const unsigned cur_size = sizeof(*attrid);
1263
1264 total_bytes += cur_size;
1265 if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
1266 or->enc_get_attr.total_bytes = total_bytes - cur_size;
1267 ret = _alloc_get_attr_desc(or,
1268 total_bytes + nelem * sizeof(*attrid));
1269 if (ret)
1270 return ret;
1271 attr_last = or->enc_get_attr.buff +
1272 or->enc_get_attr.total_bytes;
1273 }
1274
1275 attrid = attr_last;
1276 attrid->attr_page = cpu_to_be32(oa->attr_page);
1277 attrid->attr_id = cpu_to_be32(oa->attr_id);
1278
1279 attr_last += cur_size;
1280
1281 /* calc data-in size */
1282 or->get_attr.total_bytes +=
1283 _osd_req_alist_elem_size(or, oa->len);
1284 ++oa;
1285 }
1286
1287 or->enc_get_attr.total_bytes = total_bytes;
1288
1289 OSD_DEBUG(
1290 "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n",
1291 or->get_attr.total_bytes,
1292 or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
1293 or->enc_get_attr.total_bytes,
1294 (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
1295 / sizeof(struct osd_attributes_list_attrid));
1296
1297 return 0;
1298 }
1299 EXPORT_SYMBOL(osd_req_add_get_attr_list);
1300
1301 static int _osd_req_finalize_get_attr_list(struct osd_request *or)
1302 {
1303 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1304 unsigned out_padding;
1305 unsigned in_padding;
1306 int ret;
1307
1308 if (!or->enc_get_attr.total_bytes) {
1309 cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
1310 cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
1311 return 0;
1312 }
1313
1314 ret = _alloc_get_attr_list(or);
1315 if (ret)
1316 return ret;
1317
1318 /* The out-going buffer info update */
1319 OSD_DEBUG("out-going\n");
1320 cdbh->attrs_list.get_attr_desc_bytes =
1321 cpu_to_be32(or->enc_get_attr.total_bytes);
1322
1323 cdbh->attrs_list.get_attr_desc_offset =
1324 osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1325
1326 ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
1327 or->out.last_seg, &or->out);
1328 if (ret)
1329 return ret;
1330 or->out.last_seg = &or->enc_get_attr;
1331
1332 /* The incoming buffer info update */
1333 OSD_DEBUG("in-coming\n");
1334 cdbh->attrs_list.get_attr_alloc_length =
1335 cpu_to_be32(or->get_attr.total_bytes);
1336
1337 cdbh->attrs_list.get_attr_offset =
1338 osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1339
1340 ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1341 &or->in);
1342 if (ret)
1343 return ret;
1344 or->in.last_seg = &or->get_attr;
1345
1346 return 0;
1347 }
1348
1349 int osd_req_decode_get_attr_list(struct osd_request *or,
1350 struct osd_attr *oa, int *nelem, void **iterator)
1351 {
1352 unsigned cur_bytes, returned_bytes;
1353 int n;
1354 const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
1355 void *cur_p;
1356
1357 if (!_osd_req_is_alist_type(or, or->get_attr.buff,
1358 OSD_ATTR_LIST_SET_RETRIEVE)) {
1359 oa->attr_page = 0;
1360 oa->attr_id = 0;
1361 oa->val_ptr = NULL;
1362 oa->len = 0;
1363 *iterator = NULL;
1364 return 0;
1365 }
1366
1367 if (*iterator) {
1368 BUG_ON((*iterator < or->get_attr.buff) ||
1369 (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
1370 cur_p = *iterator;
1371 cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
1372 returned_bytes = or->get_attr.total_bytes;
1373 } else { /* first time decode the list header */
1374 cur_bytes = sizeof_attr_list;
1375 returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
1376 sizeof_attr_list;
1377
1378 cur_p = or->get_attr.buff + sizeof_attr_list;
1379
1380 if (returned_bytes > or->get_attr.alloc_size) {
1381 OSD_DEBUG("target report: space was not big enough! "
1382 "Allocate=%u Needed=%u\n",
1383 or->get_attr.alloc_size,
1384 returned_bytes + sizeof_attr_list);
1385
1386 returned_bytes =
1387 or->get_attr.alloc_size - sizeof_attr_list;
1388 }
1389 or->get_attr.total_bytes = returned_bytes;
1390 }
1391
1392 for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
1393 int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
1394 returned_bytes - cur_bytes);
1395
1396 if (inc < 0) {
1397 OSD_ERR("BAD FOOD from target. list not valid!"
1398 "c=%d r=%d n=%d\n",
1399 cur_bytes, returned_bytes, n);
1400 oa->val_ptr = NULL;
1401 cur_bytes = returned_bytes; /* break the caller loop */
1402 break;
1403 }
1404
1405 cur_bytes += inc;
1406 cur_p += inc;
1407 ++oa;
1408 }
1409
1410 *iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
1411 *nelem = n;
1412 return returned_bytes - cur_bytes;
1413 }
1414 EXPORT_SYMBOL(osd_req_decode_get_attr_list);
1415
1416 /*
1417 * Attributes Page-mode
1418 */
1419
1420 int osd_req_add_get_attr_page(struct osd_request *or,
1421 u32 page_id, void *attar_page, unsigned max_page_len,
1422 const struct osd_attr *set_one_attr)
1423 {
1424 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1425
1426 if (or->attributes_mode &&
1427 or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1428 WARN_ON(1);
1429 return -EINVAL;
1430 }
1431 or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
1432
1433 or->get_attr.buff = attar_page;
1434 or->get_attr.total_bytes = max_page_len;
1435
1436 cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
1437 cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
1438
1439 if (!set_one_attr || !set_one_attr->attr_page)
1440 return 0; /* The set is optional */
1441
1442 or->set_attr.buff = set_one_attr->val_ptr;
1443 or->set_attr.total_bytes = set_one_attr->len;
1444
1445 cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
1446 cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
1447 cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
1448 return 0;
1449 }
1450 EXPORT_SYMBOL(osd_req_add_get_attr_page);
1451
1452 static int _osd_req_finalize_attr_page(struct osd_request *or)
1453 {
1454 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1455 unsigned in_padding, out_padding;
1456 int ret;
1457
1458 /* returned page */
1459 cdbh->attrs_page.get_attr_offset =
1460 osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1461
1462 ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1463 &or->in);
1464 if (ret)
1465 return ret;
1466
1467 if (or->set_attr.total_bytes == 0)
1468 return 0;
1469
1470 /* set one value */
1471 cdbh->attrs_page.set_attr_offset =
1472 osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1473
1474 ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
1475 &or->out);
1476 return ret;
1477 }
1478
1479 static inline void osd_sec_parms_set_out_offset(bool is_v1,
1480 struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1481 {
1482 if (is_v1)
1483 sec_parms->v1.data_out_integrity_check_offset = offset;
1484 else
1485 sec_parms->v2.data_out_integrity_check_offset = offset;
1486 }
1487
1488 static inline void osd_sec_parms_set_in_offset(bool is_v1,
1489 struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1490 {
1491 if (is_v1)
1492 sec_parms->v1.data_in_integrity_check_offset = offset;
1493 else
1494 sec_parms->v2.data_in_integrity_check_offset = offset;
1495 }
1496
1497 static int _osd_req_finalize_data_integrity(struct osd_request *or,
1498 bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
1499 const u8 *cap_key)
1500 {
1501 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1502 int ret;
1503
1504 if (!osd_is_sec_alldata(sec_parms))
1505 return 0;
1506
1507 if (has_out) {
1508 struct _osd_req_data_segment seg = {
1509 .buff = &or->out_data_integ,
1510 .total_bytes = sizeof(or->out_data_integ),
1511 };
1512 unsigned pad;
1513
1514 or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1515 or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1516 or->set_attr.total_bytes);
1517 or->out_data_integ.get_attributes_bytes = cpu_to_be64(
1518 or->enc_get_attr.total_bytes);
1519
1520 osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
1521 osd_req_encode_offset(or, or->out.total_bytes, &pad));
1522
1523 ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
1524 &or->out);
1525 if (ret)
1526 return ret;
1527 or->out.last_seg = NULL;
1528
1529 /* they are now all chained to request sign them all together */
1530 osd_sec_sign_data(&or->out_data_integ, out_data_bio,
1531 cap_key);
1532 }
1533
1534 if (has_in) {
1535 struct _osd_req_data_segment seg = {
1536 .buff = &or->in_data_integ,
1537 .total_bytes = sizeof(or->in_data_integ),
1538 };
1539 unsigned pad;
1540
1541 osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
1542 osd_req_encode_offset(or, or->in.total_bytes, &pad));
1543
1544 ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
1545 &or->in);
1546 if (ret)
1547 return ret;
1548
1549 or->in.last_seg = NULL;
1550 }
1551
1552 return 0;
1553 }
1554
1555 /*
1556 * osd_finalize_request and helpers
1557 */
1558 static struct request *_make_request(struct request_queue *q, bool has_write,
1559 struct _osd_io_info *oii, gfp_t flags)
1560 {
1561 if (oii->bio)
1562 return blk_make_request(q, oii->bio, flags);
1563 else {
1564 struct request *req;
1565
1566 req = blk_get_request(q, has_write ? WRITE : READ, flags);
1567 if (IS_ERR(req))
1568 return req;
1569
1570 blk_rq_set_block_pc(req);
1571 return req;
1572 }
1573 }
1574
1575 static int _init_blk_request(struct osd_request *or,
1576 bool has_in, bool has_out)
1577 {
1578 gfp_t flags = or->alloc_flags;
1579 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1580 struct request_queue *q = scsi_device->request_queue;
1581 struct request *req;
1582 int ret;
1583
1584 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1585 if (IS_ERR(req)) {
1586 ret = PTR_ERR(req);
1587 goto out;
1588 }
1589
1590 or->request = req;
1591 req->cmd_flags |= REQ_QUIET;
1592
1593 req->timeout = or->timeout;
1594 req->retries = or->retries;
1595 req->sense = or->sense;
1596 req->sense_len = 0;
1597
1598 if (has_out) {
1599 or->out.req = req;
1600 if (has_in) {
1601 /* allocate bidi request */
1602 req = _make_request(q, false, &or->in, flags);
1603 if (IS_ERR(req)) {
1604 OSD_DEBUG("blk_get_request for bidi failed\n");
1605 ret = PTR_ERR(req);
1606 goto out;
1607 }
1608 blk_rq_set_block_pc(req);
1609 or->in.req = or->request->next_rq = req;
1610 }
1611 } else if (has_in)
1612 or->in.req = req;
1613
1614 ret = 0;
1615 out:
1616 OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
1617 or, has_in, has_out, ret, or->request);
1618 return ret;
1619 }
1620
1621 int osd_finalize_request(struct osd_request *or,
1622 u8 options, const void *cap, const u8 *cap_key)
1623 {
1624 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1625 bool has_in, has_out;
1626 /* Save for data_integrity without the cdb_continuation */
1627 struct bio *out_data_bio = or->out.bio;
1628 u64 out_data_bytes = or->out.total_bytes;
1629 int ret;
1630
1631 if (options & OSD_REQ_FUA)
1632 cdbh->options |= OSD_CDB_FUA;
1633
1634 if (options & OSD_REQ_DPO)
1635 cdbh->options |= OSD_CDB_DPO;
1636
1637 if (options & OSD_REQ_BYPASS_TIMESTAMPS)
1638 cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
1639
1640 osd_set_caps(&or->cdb, cap);
1641
1642 has_in = or->in.bio || or->get_attr.total_bytes;
1643 has_out = or->out.bio || or->cdb_cont.total_bytes ||
1644 or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
1645
1646 ret = _osd_req_finalize_cdb_cont(or, cap_key);
1647 if (ret) {
1648 OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
1649 return ret;
1650 }
1651 ret = _init_blk_request(or, has_in, has_out);
1652 if (ret) {
1653 OSD_DEBUG("_init_blk_request failed\n");
1654 return ret;
1655 }
1656
1657 or->out.pad_buff = sg_out_pad_buffer;
1658 or->in.pad_buff = sg_in_pad_buffer;
1659
1660 if (!or->attributes_mode)
1661 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1662 cdbh->command_specific_options |= or->attributes_mode;
1663 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1664 ret = _osd_req_finalize_attr_page(or);
1665 if (ret) {
1666 OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
1667 return ret;
1668 }
1669 } else {
1670 /* TODO: I think that for the GET_ATTR command these 2 should
1671 * be reversed to keep them in execution order (for embeded
1672 * targets with low memory footprint)
1673 */
1674 ret = _osd_req_finalize_set_attr_list(or);
1675 if (ret) {
1676 OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
1677 return ret;
1678 }
1679
1680 ret = _osd_req_finalize_get_attr_list(or);
1681 if (ret) {
1682 OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
1683 return ret;
1684 }
1685 }
1686
1687 ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1688 out_data_bio, out_data_bytes,
1689 cap_key);
1690 if (ret)
1691 return ret;
1692
1693 osd_sec_sign_cdb(&or->cdb, cap_key);
1694
1695 or->request->cmd = or->cdb.buff;
1696 or->request->cmd_len = _osd_req_cdb_len(or);
1697
1698 return 0;
1699 }
1700 EXPORT_SYMBOL(osd_finalize_request);
1701
1702 static bool _is_osd_security_code(int code)
1703 {
1704 return (code == osd_security_audit_value_frozen) ||
1705 (code == osd_security_working_key_frozen) ||
1706 (code == osd_nonce_not_unique) ||
1707 (code == osd_nonce_timestamp_out_of_range) ||
1708 (code == osd_invalid_dataout_buffer_integrity_check_value);
1709 }
1710
1711 #define OSD_SENSE_PRINT1(fmt, a...) \
1712 do { \
1713 if (__cur_sense_need_output) \
1714 OSD_ERR(fmt, ##a); \
1715 } while (0)
1716
1717 #define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1(" " fmt, ##a)
1718
1719 int osd_req_decode_sense_full(struct osd_request *or,
1720 struct osd_sense_info *osi, bool silent,
1721 struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
1722 struct osd_attr *bad_attr_list, int max_attr)
1723 {
1724 int sense_len, original_sense_len;
1725 struct osd_sense_info local_osi;
1726 struct scsi_sense_descriptor_based *ssdb;
1727 void *cur_descriptor;
1728 #if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
1729 const bool __cur_sense_need_output = false;
1730 #else
1731 bool __cur_sense_need_output = !silent;
1732 #endif
1733 int ret;
1734
1735 if (likely(!or->req_errors))
1736 return 0;
1737
1738 osi = osi ? : &local_osi;
1739 memset(osi, 0, sizeof(*osi));
1740
1741 ssdb = (typeof(ssdb))or->sense;
1742 sense_len = or->sense_len;
1743 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1744 OSD_ERR("Block-layer returned error(0x%x) but "
1745 "sense_len(%u) || key(%d) is empty\n",
1746 or->req_errors, sense_len, ssdb->sense_key);
1747 goto analyze;
1748 }
1749
1750 if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
1751 OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
1752 ssdb->response_code, sense_len);
1753 goto analyze;
1754 }
1755
1756 osi->key = ssdb->sense_key;
1757 osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
1758 original_sense_len = ssdb->additional_sense_length + 8;
1759
1760 #if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
1761 if (__cur_sense_need_output)
1762 __cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
1763 #endif
1764 OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
1765 "additional_code=0x%x async_error=%d errors=0x%x\n",
1766 osi->key, original_sense_len, sense_len,
1767 osi->additional_code, or->async_error,
1768 or->req_errors);
1769
1770 if (original_sense_len < sense_len)
1771 sense_len = original_sense_len;
1772
1773 cur_descriptor = ssdb->ssd;
1774 sense_len -= sizeof(*ssdb);
1775 while (sense_len > 0) {
1776 struct scsi_sense_descriptor *ssd = cur_descriptor;
1777 int cur_len = ssd->additional_length + 2;
1778
1779 sense_len -= cur_len;
1780
1781 if (sense_len < 0)
1782 break; /* sense was truncated */
1783
1784 switch (ssd->descriptor_type) {
1785 case scsi_sense_information:
1786 case scsi_sense_command_specific_information:
1787 {
1788 struct scsi_sense_command_specific_data_descriptor
1789 *sscd = cur_descriptor;
1790
1791 osi->command_info =
1792 get_unaligned_be64(&sscd->information) ;
1793 OSD_SENSE_PRINT2(
1794 "command_specific_information 0x%llx \n",
1795 _LLU(osi->command_info));
1796 break;
1797 }
1798 case scsi_sense_key_specific:
1799 {
1800 struct scsi_sense_key_specific_data_descriptor
1801 *ssks = cur_descriptor;
1802
1803 osi->sense_info = get_unaligned_be16(&ssks->value);
1804 OSD_SENSE_PRINT2(
1805 "sense_key_specific_information %u"
1806 "sksv_cd_bpv_bp (0x%x)\n",
1807 osi->sense_info, ssks->sksv_cd_bpv_bp);
1808 break;
1809 }
1810 case osd_sense_object_identification:
1811 { /*FIXME: Keep first not last, Store in array*/
1812 struct osd_sense_identification_data_descriptor
1813 *osidd = cur_descriptor;
1814
1815 osi->not_initiated_command_functions =
1816 le32_to_cpu(osidd->not_initiated_functions);
1817 osi->completed_command_functions =
1818 le32_to_cpu(osidd->completed_functions);
1819 osi->obj.partition = be64_to_cpu(osidd->partition_id);
1820 osi->obj.id = be64_to_cpu(osidd->object_id);
1821 OSD_SENSE_PRINT2(
1822 "object_identification pid=0x%llx oid=0x%llx\n",
1823 _LLU(osi->obj.partition), _LLU(osi->obj.id));
1824 OSD_SENSE_PRINT2(
1825 "not_initiated_bits(%x) "
1826 "completed_command_bits(%x)\n",
1827 osi->not_initiated_command_functions,
1828 osi->completed_command_functions);
1829 break;
1830 }
1831 case osd_sense_response_integrity_check:
1832 {
1833 struct osd_sense_response_integrity_check_descriptor
1834 *osricd = cur_descriptor;
1835 const unsigned len =
1836 sizeof(osricd->integrity_check_value);
1837 char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
1838
1839 hex_dump_to_buffer(osricd->integrity_check_value, len,
1840 32, 1, key_dump, sizeof(key_dump), true);
1841 OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
1842 }
1843 case osd_sense_attribute_identification:
1844 {
1845 struct osd_sense_attributes_data_descriptor
1846 *osadd = cur_descriptor;
1847 unsigned len = min(cur_len, sense_len);
1848 struct osd_sense_attr *pattr = osadd->sense_attrs;
1849
1850 while (len >= sizeof(*pattr)) {
1851 u32 attr_page = be32_to_cpu(pattr->attr_page);
1852 u32 attr_id = be32_to_cpu(pattr->attr_id);
1853
1854 if (!osi->attr.attr_page) {
1855 osi->attr.attr_page = attr_page;
1856 osi->attr.attr_id = attr_id;
1857 }
1858
1859 if (bad_attr_list && max_attr) {
1860 bad_attr_list->attr_page = attr_page;
1861 bad_attr_list->attr_id = attr_id;
1862 bad_attr_list++;
1863 max_attr--;
1864 }
1865
1866 len -= sizeof(*pattr);
1867 OSD_SENSE_PRINT2(
1868 "osd_sense_attribute_identification"
1869 "attr_page=0x%x attr_id=0x%x\n",
1870 attr_page, attr_id);
1871 }
1872 }
1873 /*These are not legal for OSD*/
1874 case scsi_sense_field_replaceable_unit:
1875 OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
1876 break;
1877 case scsi_sense_stream_commands:
1878 OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
1879 break;
1880 case scsi_sense_block_commands:
1881 OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
1882 break;
1883 case scsi_sense_ata_return:
1884 OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
1885 break;
1886 default:
1887 if (ssd->descriptor_type <= scsi_sense_Reserved_last)
1888 OSD_SENSE_PRINT2(
1889 "scsi_sense Reserved descriptor (0x%x)",
1890 ssd->descriptor_type);
1891 else
1892 OSD_SENSE_PRINT2(
1893 "scsi_sense Vendor descriptor (0x%x)",
1894 ssd->descriptor_type);
1895 }
1896
1897 cur_descriptor += cur_len;
1898 }
1899
1900 analyze:
1901 if (!osi->key) {
1902 /* scsi sense is Empty, the request was never issued to target
1903 * linux return code might tell us what happened.
1904 */
1905 if (or->async_error == -ENOMEM)
1906 osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
1907 else
1908 osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
1909 ret = or->async_error;
1910 } else if (osi->key <= scsi_sk_recovered_error) {
1911 osi->osd_err_pri = 0;
1912 ret = 0;
1913 } else if (osi->additional_code == scsi_invalid_field_in_cdb) {
1914 if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
1915 osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
1916 ret = -EFAULT; /* caller should recover from this */
1917 } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
1918 osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
1919 ret = -ENOENT;
1920 } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
1921 osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
1922 ret = -EACCES;
1923 } else {
1924 osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1925 ret = -EINVAL;
1926 }
1927 } else if (osi->additional_code == osd_quota_error) {
1928 osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
1929 ret = -ENOSPC;
1930 } else if (_is_osd_security_code(osi->additional_code)) {
1931 osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1932 ret = -EINVAL;
1933 } else {
1934 osi->osd_err_pri = OSD_ERR_PRI_EIO;
1935 ret = -EIO;
1936 }
1937
1938 if (!or->out.residual)
1939 or->out.residual = or->out.total_bytes;
1940 if (!or->in.residual)
1941 or->in.residual = or->in.total_bytes;
1942
1943 return ret;
1944 }
1945 EXPORT_SYMBOL(osd_req_decode_sense_full);
1946
1947 /*
1948 * Implementation of osd_sec.h API
1949 * TODO: Move to a separate osd_sec.c file at a later stage.
1950 */
1951
1952 enum { OSD_SEC_CAP_V1_ALL_CAPS =
1953 OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE |
1954 OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
1955 OSD_SEC_CAP_WRITE | OSD_SEC_CAP_READ | OSD_SEC_CAP_POL_SEC |
1956 OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
1957 };
1958
1959 enum { OSD_SEC_CAP_V2_ALL_CAPS =
1960 OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
1961 };
1962
1963 void osd_sec_init_nosec_doall_caps(void *caps,
1964 const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
1965 {
1966 struct osd_capability *cap = caps;
1967 u8 type;
1968 u8 descriptor_type;
1969
1970 if (likely(obj->id)) {
1971 if (unlikely(is_collection)) {
1972 type = OSD_SEC_OBJ_COLLECTION;
1973 descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
1974 OSD_SEC_OBJ_DESC_COL;
1975 } else {
1976 type = OSD_SEC_OBJ_USER;
1977 descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
1978 }
1979 WARN_ON(!obj->partition);
1980 } else {
1981 type = obj->partition ? OSD_SEC_OBJ_PARTITION :
1982 OSD_SEC_OBJ_ROOT;
1983 descriptor_type = OSD_SEC_OBJ_DESC_PAR;
1984 }
1985
1986 memset(cap, 0, sizeof(*cap));
1987
1988 cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
1989 cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
1990 cap->h.security_method = OSD_SEC_NOSEC;
1991 /* cap->expiration_time;
1992 cap->AUDIT[30-10];
1993 cap->discriminator[42-30];
1994 cap->object_created_time; */
1995 cap->h.object_type = type;
1996 osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
1997 cap->h.object_descriptor_type = descriptor_type;
1998 cap->od.obj_desc.policy_access_tag = 0;
1999 cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
2000 cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
2001 }
2002 EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
2003
2004 /* FIXME: Extract version from caps pointer.
2005 * Also Pete's target only supports caps from OSDv1 for now
2006 */
2007 void osd_set_caps(struct osd_cdb *cdb, const void *caps)
2008 {
2009 /* NOTE: They start at same address */
2010 memcpy(&cdb->v1.caps, caps, OSDv1_CAP_LEN);
2011 }
2012
2013 bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
2014 {
2015 return false;
2016 }
2017
2018 void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
2019 {
2020 }
2021
2022 void osd_sec_sign_data(void *data_integ __unused,
2023 struct bio *bio __unused, const u8 *cap_key __unused)
2024 {
2025 }
2026
2027 /*
2028 * Declared in osd_protocol.h
2029 * 4.12.5 Data-In and Data-Out buffer offsets
2030 * byte offset = mantissa * (2^(exponent+8))
2031 * Returns the smallest allowed encoded offset that contains given @offset
2032 * The actual encoded offset returned is @offset + *@padding.
2033 */
2034 osd_cdb_offset __osd_encode_offset(
2035 u64 offset, unsigned *padding, int min_shift, int max_shift)
2036 {
2037 u64 try_offset = -1, mod, align;
2038 osd_cdb_offset be32_offset;
2039 int shift;
2040
2041 *padding = 0;
2042 if (!offset)
2043 return 0;
2044
2045 for (shift = min_shift; shift < max_shift; ++shift) {
2046 try_offset = offset >> shift;
2047 if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
2048 break;
2049 }
2050
2051 BUG_ON(shift == max_shift);
2052
2053 align = 1 << shift;
2054 mod = offset & (align - 1);
2055 if (mod) {
2056 *padding = align - mod;
2057 try_offset += 1;
2058 }
2059
2060 try_offset |= ((shift - 8) & 0xf) << 28;
2061 be32_offset = cpu_to_be32((u32)try_offset);
2062
2063 OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
2064 _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
2065 be32_offset, *padding);
2066 return be32_offset;
2067 }