2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/spinlock.h>
20 #include <linux/module.h>
21 #include <linux/idr.h>
22 #include <linux/timer.h>
23 #include <linux/parser.h>
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_host.h>
26 #include <linux/uio_driver.h>
27 #include <net/genetlink.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/target_core_backend.h>
31 #include <target/target_core_backend_configfs.h>
33 #include <linux/target_core_user.h>
36 * Define a shared-memory interface for LIO to pass SCSI commands and
37 * data to userspace for processing. This is to allow backends that
38 * are too complex for in-kernel support to be possible.
40 * It uses the UIO framework to do a lot of the device-creation and
41 * introspection work for us.
43 * See the .h file for how the ring is laid out. Note that while the
44 * command ring is defined, the particulars of the data area are
45 * not. Offset values in the command entry point to other locations
46 * internal to the mmap()ed area. There is separate space outside the
47 * command ring for data buffers. This leaves maximum flexibility for
48 * moving buffer allocations, or even page flipping or other
49 * allocation techniques, without altering the command ring layout.
52 * The user process must be assumed to be malicious. There's no way to
53 * prevent it breaking the command ring protocol if it wants, but in
54 * order to prevent other issues we must only ever read *data* from
55 * the shared memory area, not offsets or sizes. This applies to
56 * command ring entries as well as the mailbox. Extra code needed for
57 * this may have a 'UAM' comment.
61 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
63 #define CMDR_SIZE (16 * 4096)
64 #define DATA_SIZE (257 * 4096)
66 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
68 static struct device
*tcmu_root_device
;
74 /* User wants all cmds or just some */
81 #define TCMU_CONFIG_LEN 256
84 struct se_device se_dev
;
89 #define TCMU_DEV_BIT_OPEN 0
90 #define TCMU_DEV_BIT_BROKEN 1
92 enum passthru_level pass_level
;
94 struct uio_info uio_info
;
96 struct tcmu_mailbox
*mb_addr
;
99 u32 cmdr_last_cleaned
;
100 /* Offset of data ring from start of mb */
103 /* Ring head + tail values. */
104 /* Must add data_off and mb_addr to get the address */
108 wait_queue_head_t wait_cmdr
;
109 /* TODO should this be a mutex? */
110 spinlock_t cmdr_lock
;
113 spinlock_t commands_lock
;
115 struct timer_list timeout
;
117 char dev_config
[TCMU_CONFIG_LEN
];
120 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
122 #define CMDR_OFF sizeof(struct tcmu_mailbox)
125 struct se_cmd
*se_cmd
;
126 struct tcmu_dev
*tcmu_dev
;
130 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if
131 cmd has been completed then accessing se_cmd is off limits */
134 unsigned long deadline
;
136 #define TCMU_CMD_BIT_EXPIRED 0
140 static struct kmem_cache
*tcmu_cmd_cache
;
142 /* multicast group */
143 enum tcmu_multicast_groups
{
147 static const struct genl_multicast_group tcmu_mcgrps
[] = {
148 [TCMU_MCGRP_CONFIG
] = { .name
= "config", },
151 /* Our generic netlink family */
152 static struct genl_family tcmu_genl_family
= {
153 .id
= GENL_ID_GENERATE
,
157 .maxattr
= TCMU_ATTR_MAX
,
158 .mcgrps
= tcmu_mcgrps
,
159 .n_mcgrps
= ARRAY_SIZE(tcmu_mcgrps
),
162 static struct tcmu_cmd
*tcmu_alloc_cmd(struct se_cmd
*se_cmd
)
164 struct se_device
*se_dev
= se_cmd
->se_dev
;
165 struct tcmu_dev
*udev
= TCMU_DEV(se_dev
);
166 struct tcmu_cmd
*tcmu_cmd
;
169 tcmu_cmd
= kmem_cache_zalloc(tcmu_cmd_cache
, GFP_KERNEL
);
173 tcmu_cmd
->se_cmd
= se_cmd
;
174 tcmu_cmd
->tcmu_dev
= udev
;
175 tcmu_cmd
->data_length
= se_cmd
->data_length
;
177 tcmu_cmd
->deadline
= jiffies
+ msecs_to_jiffies(TCMU_TIME_OUT
);
179 idr_preload(GFP_KERNEL
);
180 spin_lock_irq(&udev
->commands_lock
);
181 cmd_id
= idr_alloc(&udev
->commands
, tcmu_cmd
, 0,
182 USHRT_MAX
, GFP_NOWAIT
);
183 spin_unlock_irq(&udev
->commands_lock
);
187 kmem_cache_free(tcmu_cmd_cache
, tcmu_cmd
);
190 tcmu_cmd
->cmd_id
= cmd_id
;
195 static inline void tcmu_flush_dcache_range(void *vaddr
, size_t size
)
197 unsigned long offset
= (unsigned long) vaddr
& ~PAGE_MASK
;
199 size
= round_up(size
+offset
, PAGE_SIZE
);
203 flush_dcache_page(virt_to_page(vaddr
));
209 * Some ring helper functions. We don't assume size is a power of 2 so
210 * we can't use circ_buf.h.
212 static inline size_t spc_used(size_t head
, size_t tail
, size_t size
)
214 int diff
= head
- tail
;
222 static inline size_t spc_free(size_t head
, size_t tail
, size_t size
)
224 /* Keep 1 byte unused or we can't tell full from empty */
225 return (size
- spc_used(head
, tail
, size
) - 1);
228 static inline size_t head_to_end(size_t head
, size_t size
)
233 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
236 * We can't queue a command until we have space available on the cmd ring *and* space
237 * space avail on the data ring.
239 * Called with ring lock held.
241 static bool is_ring_space_avail(struct tcmu_dev
*udev
, size_t cmd_size
, size_t data_needed
)
243 struct tcmu_mailbox
*mb
= udev
->mb_addr
;
248 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
250 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
253 * If cmd end-of-ring space is too small then we need space for a NOP plus
254 * original cmd - cmds are internally contiguous.
256 if (head_to_end(cmd_head
, udev
->cmdr_size
) >= cmd_size
)
257 cmd_needed
= cmd_size
;
259 cmd_needed
= cmd_size
+ head_to_end(cmd_head
, udev
->cmdr_size
);
261 space
= spc_free(cmd_head
, udev
->cmdr_last_cleaned
, udev
->cmdr_size
);
262 if (space
< cmd_needed
) {
263 pr_debug("no cmd space: %u %u %u\n", cmd_head
,
264 udev
->cmdr_last_cleaned
, udev
->cmdr_size
);
268 space
= spc_free(udev
->data_head
, udev
->data_tail
, udev
->data_size
);
269 if (space
< data_needed
) {
270 pr_debug("no data space: %zu %zu %zu\n", udev
->data_head
,
271 udev
->data_tail
, udev
->data_size
);
278 static int tcmu_queue_cmd_ring(struct tcmu_cmd
*tcmu_cmd
)
280 struct tcmu_dev
*udev
= tcmu_cmd
->tcmu_dev
;
281 struct se_cmd
*se_cmd
= tcmu_cmd
->se_cmd
;
282 size_t base_command_size
, command_size
;
283 struct tcmu_mailbox
*mb
;
284 struct tcmu_cmd_entry
*entry
;
286 struct scatterlist
*sg
;
292 if (test_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
))
296 * Must be a certain minimum size for response sense info, but
297 * also may be larger if the iov array is large.
299 * iovs = sgl_nents+1, for end-of-ring case, plus another 1
300 * b/c size == offsetof one-past-element.
302 base_command_size
= max(offsetof(struct tcmu_cmd_entry
,
303 req
.iov
[se_cmd
->t_data_nents
+ 2]),
304 sizeof(struct tcmu_cmd_entry
));
305 command_size
= base_command_size
306 + round_up(scsi_command_size(se_cmd
->t_task_cdb
), TCMU_OP_ALIGN_SIZE
);
308 WARN_ON(command_size
& (TCMU_OP_ALIGN_SIZE
-1));
310 spin_lock_irq(&udev
->cmdr_lock
);
313 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
314 if ((command_size
> (udev
->cmdr_size
/ 2))
315 || tcmu_cmd
->data_length
> (udev
->data_size
- 1))
316 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
317 "cmd/data ring buffers\n", command_size
, tcmu_cmd
->data_length
,
318 udev
->cmdr_size
, udev
->data_size
);
320 while (!is_ring_space_avail(udev
, command_size
, tcmu_cmd
->data_length
)) {
324 prepare_to_wait(&udev
->wait_cmdr
, &__wait
, TASK_INTERRUPTIBLE
);
326 pr_debug("sleeping for ring space\n");
327 spin_unlock_irq(&udev
->cmdr_lock
);
328 ret
= schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT
));
329 finish_wait(&udev
->wait_cmdr
, &__wait
);
331 pr_warn("tcmu: command timed out\n");
335 spin_lock_irq(&udev
->cmdr_lock
);
337 /* We dropped cmdr_lock, cmd_head is stale */
338 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
341 /* Insert a PAD if end-of-ring space is too small */
342 if (head_to_end(cmd_head
, udev
->cmdr_size
) < command_size
) {
343 size_t pad_size
= head_to_end(cmd_head
, udev
->cmdr_size
);
345 entry
= (void *) mb
+ CMDR_OFF
+ cmd_head
;
346 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
347 tcmu_hdr_set_op(&entry
->hdr
.len_op
, TCMU_OP_PAD
);
348 tcmu_hdr_set_len(&entry
->hdr
.len_op
, pad_size
);
349 entry
->hdr
.cmd_id
= 0; /* not used for PAD */
350 entry
->hdr
.kflags
= 0;
351 entry
->hdr
.uflags
= 0;
353 UPDATE_HEAD(mb
->cmd_head
, pad_size
, udev
->cmdr_size
);
355 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
356 WARN_ON(cmd_head
!= 0);
359 entry
= (void *) mb
+ CMDR_OFF
+ cmd_head
;
360 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
361 tcmu_hdr_set_op(&entry
->hdr
.len_op
, TCMU_OP_CMD
);
362 tcmu_hdr_set_len(&entry
->hdr
.len_op
, command_size
);
363 entry
->hdr
.cmd_id
= tcmu_cmd
->cmd_id
;
364 entry
->hdr
.kflags
= 0;
365 entry
->hdr
.uflags
= 0;
368 * Fix up iovecs, and handle if allocation in data ring wrapped.
370 iov
= &entry
->req
.iov
[0];
371 for_each_sg(se_cmd
->t_data_sg
, sg
, se_cmd
->t_data_nents
, i
) {
372 size_t copy_bytes
= min((size_t)sg
->length
,
373 head_to_end(udev
->data_head
, udev
->data_size
));
374 void *from
= kmap_atomic(sg_page(sg
)) + sg
->offset
;
375 void *to
= (void *) mb
+ udev
->data_off
+ udev
->data_head
;
377 if (tcmu_cmd
->se_cmd
->data_direction
== DMA_TO_DEVICE
) {
378 memcpy(to
, from
, copy_bytes
);
379 tcmu_flush_dcache_range(to
, copy_bytes
);
382 /* Even iov_base is relative to mb_addr */
383 iov
->iov_len
= copy_bytes
;
384 iov
->iov_base
= (void __user
*) udev
->data_off
+
389 UPDATE_HEAD(udev
->data_head
, copy_bytes
, udev
->data_size
);
391 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
392 if (sg
->length
!= copy_bytes
) {
394 copy_bytes
= sg
->length
- copy_bytes
;
396 iov
->iov_len
= copy_bytes
;
397 iov
->iov_base
= (void __user
*) udev
->data_off
+
400 if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
401 to
= (void *) mb
+ udev
->data_off
+ udev
->data_head
;
402 memcpy(to
, from
, copy_bytes
);
403 tcmu_flush_dcache_range(to
, copy_bytes
);
409 UPDATE_HEAD(udev
->data_head
, copy_bytes
, udev
->data_size
);
414 entry
->req
.iov_cnt
= iov_cnt
;
415 entry
->req
.iov_bidi_cnt
= 0;
416 entry
->req
.iov_dif_cnt
= 0;
418 /* All offsets relative to mb_addr, not start of entry! */
419 cdb_off
= CMDR_OFF
+ cmd_head
+ base_command_size
;
420 memcpy((void *) mb
+ cdb_off
, se_cmd
->t_task_cdb
, scsi_command_size(se_cmd
->t_task_cdb
));
421 entry
->req
.cdb_off
= cdb_off
;
422 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
424 UPDATE_HEAD(mb
->cmd_head
, command_size
, udev
->cmdr_size
);
425 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
427 spin_unlock_irq(&udev
->cmdr_lock
);
429 /* TODO: only if FLUSH and FUA? */
430 uio_event_notify(&udev
->uio_info
);
432 mod_timer(&udev
->timeout
,
433 round_jiffies_up(jiffies
+ msecs_to_jiffies(TCMU_TIME_OUT
)));
438 static int tcmu_queue_cmd(struct se_cmd
*se_cmd
)
440 struct se_device
*se_dev
= se_cmd
->se_dev
;
441 struct tcmu_dev
*udev
= TCMU_DEV(se_dev
);
442 struct tcmu_cmd
*tcmu_cmd
;
445 tcmu_cmd
= tcmu_alloc_cmd(se_cmd
);
449 ret
= tcmu_queue_cmd_ring(tcmu_cmd
);
451 pr_err("TCMU: Could not queue command\n");
452 spin_lock_irq(&udev
->commands_lock
);
453 idr_remove(&udev
->commands
, tcmu_cmd
->cmd_id
);
454 spin_unlock_irq(&udev
->commands_lock
);
456 kmem_cache_free(tcmu_cmd_cache
, tcmu_cmd
);
462 static void tcmu_handle_completion(struct tcmu_cmd
*cmd
, struct tcmu_cmd_entry
*entry
)
464 struct se_cmd
*se_cmd
= cmd
->se_cmd
;
465 struct tcmu_dev
*udev
= cmd
->tcmu_dev
;
467 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
)) {
468 /* cmd has been completed already from timeout, just reclaim data
470 UPDATE_HEAD(udev
->data_tail
, cmd
->data_length
, udev
->data_size
);
474 if (entry
->hdr
.uflags
& TCMU_UFLAG_UNKNOWN_OP
) {
475 UPDATE_HEAD(udev
->data_tail
, cmd
->data_length
, udev
->data_size
);
476 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
478 transport_generic_request_failure(cmd
->se_cmd
,
479 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
);
481 kmem_cache_free(tcmu_cmd_cache
, cmd
);
485 if (entry
->rsp
.scsi_status
== SAM_STAT_CHECK_CONDITION
) {
486 memcpy(se_cmd
->sense_buffer
, entry
->rsp
.sense_buffer
,
487 se_cmd
->scsi_sense_length
);
489 UPDATE_HEAD(udev
->data_tail
, cmd
->data_length
, udev
->data_size
);
491 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
) {
492 struct scatterlist
*sg
;
495 /* It'd be easier to look at entry's iovec again, but UAM */
496 for_each_sg(se_cmd
->t_data_sg
, sg
, se_cmd
->t_data_nents
, i
) {
501 copy_bytes
= min((size_t)sg
->length
,
502 head_to_end(udev
->data_tail
, udev
->data_size
));
504 to
= kmap_atomic(sg_page(sg
)) + sg
->offset
;
505 WARN_ON(sg
->length
+ sg
->offset
> PAGE_SIZE
);
506 from
= (void *) udev
->mb_addr
+ udev
->data_off
+ udev
->data_tail
;
507 tcmu_flush_dcache_range(from
, copy_bytes
);
508 memcpy(to
, from
, copy_bytes
);
510 UPDATE_HEAD(udev
->data_tail
, copy_bytes
, udev
->data_size
);
512 /* Uh oh, wrapped the data buffer for this sg's data */
513 if (sg
->length
!= copy_bytes
) {
514 from
= (void *) udev
->mb_addr
+ udev
->data_off
+ udev
->data_tail
;
515 WARN_ON(udev
->data_tail
);
517 copy_bytes
= sg
->length
- copy_bytes
;
518 tcmu_flush_dcache_range(from
, copy_bytes
);
519 memcpy(to
, from
, copy_bytes
);
521 UPDATE_HEAD(udev
->data_tail
, copy_bytes
, udev
->data_size
);
527 } else if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
528 UPDATE_HEAD(udev
->data_tail
, cmd
->data_length
, udev
->data_size
);
530 pr_warn("TCMU: data direction was %d!\n", se_cmd
->data_direction
);
533 target_complete_cmd(cmd
->se_cmd
, entry
->rsp
.scsi_status
);
536 kmem_cache_free(tcmu_cmd_cache
, cmd
);
539 static unsigned int tcmu_handle_completions(struct tcmu_dev
*udev
)
541 struct tcmu_mailbox
*mb
;
546 if (test_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
)) {
547 pr_err("ring broken, not handling completions\n");
551 spin_lock_irqsave(&udev
->cmdr_lock
, flags
);
554 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
556 while (udev
->cmdr_last_cleaned
!= ACCESS_ONCE(mb
->cmd_tail
)) {
558 struct tcmu_cmd_entry
*entry
= (void *) mb
+ CMDR_OFF
+ udev
->cmdr_last_cleaned
;
559 struct tcmu_cmd
*cmd
;
561 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
563 if (tcmu_hdr_get_op(entry
->hdr
.len_op
) == TCMU_OP_PAD
) {
564 UPDATE_HEAD(udev
->cmdr_last_cleaned
,
565 tcmu_hdr_get_len(entry
->hdr
.len_op
),
569 WARN_ON(tcmu_hdr_get_op(entry
->hdr
.len_op
) != TCMU_OP_CMD
);
571 spin_lock(&udev
->commands_lock
);
572 cmd
= idr_find(&udev
->commands
, entry
->hdr
.cmd_id
);
574 idr_remove(&udev
->commands
, cmd
->cmd_id
);
575 spin_unlock(&udev
->commands_lock
);
578 pr_err("cmd_id not found, ring is broken\n");
579 set_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
);
583 tcmu_handle_completion(cmd
, entry
);
585 UPDATE_HEAD(udev
->cmdr_last_cleaned
,
586 tcmu_hdr_get_len(entry
->hdr
.len_op
),
592 if (mb
->cmd_tail
== mb
->cmd_head
)
593 del_timer(&udev
->timeout
); /* no more pending cmds */
595 spin_unlock_irqrestore(&udev
->cmdr_lock
, flags
);
597 wake_up(&udev
->wait_cmdr
);
602 static int tcmu_check_expired_cmd(int id
, void *p
, void *data
)
604 struct tcmu_cmd
*cmd
= p
;
606 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
))
609 if (!time_after(cmd
->deadline
, jiffies
))
612 set_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
);
613 target_complete_cmd(cmd
->se_cmd
, SAM_STAT_CHECK_CONDITION
);
616 kmem_cache_free(tcmu_cmd_cache
, cmd
);
621 static void tcmu_device_timedout(unsigned long data
)
623 struct tcmu_dev
*udev
= (struct tcmu_dev
*)data
;
627 handled
= tcmu_handle_completions(udev
);
629 pr_warn("%d completions handled from timeout\n", handled
);
631 spin_lock_irqsave(&udev
->commands_lock
, flags
);
632 idr_for_each(&udev
->commands
, tcmu_check_expired_cmd
, NULL
);
633 spin_unlock_irqrestore(&udev
->commands_lock
, flags
);
636 * We don't need to wakeup threads on wait_cmdr since they have their
641 static int tcmu_attach_hba(struct se_hba
*hba
, u32 host_id
)
643 struct tcmu_hba
*tcmu_hba
;
645 tcmu_hba
= kzalloc(sizeof(struct tcmu_hba
), GFP_KERNEL
);
649 tcmu_hba
->host_id
= host_id
;
650 hba
->hba_ptr
= tcmu_hba
;
655 static void tcmu_detach_hba(struct se_hba
*hba
)
661 static struct se_device
*tcmu_alloc_device(struct se_hba
*hba
, const char *name
)
663 struct tcmu_dev
*udev
;
665 udev
= kzalloc(sizeof(struct tcmu_dev
), GFP_KERNEL
);
669 udev
->name
= kstrdup(name
, GFP_KERNEL
);
677 init_waitqueue_head(&udev
->wait_cmdr
);
678 spin_lock_init(&udev
->cmdr_lock
);
680 idr_init(&udev
->commands
);
681 spin_lock_init(&udev
->commands_lock
);
683 setup_timer(&udev
->timeout
, tcmu_device_timedout
,
684 (unsigned long)udev
);
686 udev
->pass_level
= TCMU_PASS_ALL
;
688 return &udev
->se_dev
;
691 static int tcmu_irqcontrol(struct uio_info
*info
, s32 irq_on
)
693 struct tcmu_dev
*tcmu_dev
= container_of(info
, struct tcmu_dev
, uio_info
);
695 tcmu_handle_completions(tcmu_dev
);
701 * mmap code from uio.c. Copied here because we want to hook mmap()
702 * and this stuff must come along.
704 static int tcmu_find_mem_index(struct vm_area_struct
*vma
)
706 struct tcmu_dev
*udev
= vma
->vm_private_data
;
707 struct uio_info
*info
= &udev
->uio_info
;
709 if (vma
->vm_pgoff
< MAX_UIO_MAPS
) {
710 if (info
->mem
[vma
->vm_pgoff
].size
== 0)
712 return (int)vma
->vm_pgoff
;
717 static int tcmu_vma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
719 struct tcmu_dev
*udev
= vma
->vm_private_data
;
720 struct uio_info
*info
= &udev
->uio_info
;
722 unsigned long offset
;
725 int mi
= tcmu_find_mem_index(vma
);
727 return VM_FAULT_SIGBUS
;
730 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
733 offset
= (vmf
->pgoff
- mi
) << PAGE_SHIFT
;
735 addr
= (void *)(unsigned long)info
->mem
[mi
].addr
+ offset
;
736 if (info
->mem
[mi
].memtype
== UIO_MEM_LOGICAL
)
737 page
= virt_to_page(addr
);
739 page
= vmalloc_to_page(addr
);
745 static const struct vm_operations_struct tcmu_vm_ops
= {
746 .fault
= tcmu_vma_fault
,
749 static int tcmu_mmap(struct uio_info
*info
, struct vm_area_struct
*vma
)
751 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
753 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
754 vma
->vm_ops
= &tcmu_vm_ops
;
756 vma
->vm_private_data
= udev
;
758 /* Ensure the mmap is exactly the right size */
759 if (vma_pages(vma
) != (TCMU_RING_SIZE
>> PAGE_SHIFT
))
765 static int tcmu_open(struct uio_info
*info
, struct inode
*inode
)
767 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
769 /* O_EXCL not supported for char devs, so fake it? */
770 if (test_and_set_bit(TCMU_DEV_BIT_OPEN
, &udev
->flags
))
778 static int tcmu_release(struct uio_info
*info
, struct inode
*inode
)
780 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
782 clear_bit(TCMU_DEV_BIT_OPEN
, &udev
->flags
);
789 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd
, const char *name
, int minor
)
795 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
799 msg_header
= genlmsg_put(skb
, 0, 0, &tcmu_genl_family
, 0, cmd
);
803 ret
= nla_put_string(skb
, TCMU_ATTR_DEVICE
, name
);
807 ret
= nla_put_u32(skb
, TCMU_ATTR_MINOR
, minor
);
811 genlmsg_end(skb
, msg_header
);
813 ret
= genlmsg_multicast(&tcmu_genl_family
, skb
, 0,
814 TCMU_MCGRP_CONFIG
, GFP_KERNEL
);
816 /* We don't care if no one is listening */
826 static int tcmu_configure_device(struct se_device
*dev
)
828 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
829 struct tcmu_hba
*hba
= udev
->hba
->hba_ptr
;
830 struct uio_info
*info
;
831 struct tcmu_mailbox
*mb
;
837 info
= &udev
->uio_info
;
839 size
= snprintf(NULL
, 0, "tcm-user/%u/%s/%s", hba
->host_id
, udev
->name
,
841 size
+= 1; /* for \0 */
842 str
= kmalloc(size
, GFP_KERNEL
);
846 used
= snprintf(str
, size
, "tcm-user/%u/%s", hba
->host_id
, udev
->name
);
848 if (udev
->dev_config
[0])
849 snprintf(str
+ used
, size
- used
, "/%s", udev
->dev_config
);
853 udev
->mb_addr
= vzalloc(TCMU_RING_SIZE
);
854 if (!udev
->mb_addr
) {
859 /* mailbox fits in first part of CMDR space */
860 udev
->cmdr_size
= CMDR_SIZE
- CMDR_OFF
;
861 udev
->data_off
= CMDR_SIZE
;
862 udev
->data_size
= TCMU_RING_SIZE
- CMDR_SIZE
;
865 mb
->version
= TCMU_MAILBOX_VERSION
;
866 mb
->cmdr_off
= CMDR_OFF
;
867 mb
->cmdr_size
= udev
->cmdr_size
;
869 WARN_ON(!PAGE_ALIGNED(udev
->data_off
));
870 WARN_ON(udev
->data_size
% PAGE_SIZE
);
872 info
->version
= xstr(TCMU_MAILBOX_VERSION
);
874 info
->mem
[0].name
= "tcm-user command & data buffer";
875 info
->mem
[0].addr
= (phys_addr_t
) udev
->mb_addr
;
876 info
->mem
[0].size
= TCMU_RING_SIZE
;
877 info
->mem
[0].memtype
= UIO_MEM_VIRTUAL
;
879 info
->irqcontrol
= tcmu_irqcontrol
;
880 info
->irq
= UIO_IRQ_CUSTOM
;
882 info
->mmap
= tcmu_mmap
;
883 info
->open
= tcmu_open
;
884 info
->release
= tcmu_release
;
886 ret
= uio_register_device(tcmu_root_device
, info
);
890 /* Other attributes can be configured in userspace */
891 dev
->dev_attrib
.hw_block_size
= 512;
892 dev
->dev_attrib
.hw_max_sectors
= 128;
893 dev
->dev_attrib
.hw_queue_depth
= 128;
895 ret
= tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE
, udev
->uio_info
.name
,
896 udev
->uio_info
.uio_dev
->minor
);
903 uio_unregister_device(&udev
->uio_info
);
905 vfree(udev
->mb_addr
);
912 static int tcmu_check_pending_cmd(int id
, void *p
, void *data
)
914 struct tcmu_cmd
*cmd
= p
;
916 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
))
921 static void tcmu_free_device(struct se_device
*dev
)
923 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
926 del_timer_sync(&udev
->timeout
);
928 vfree(udev
->mb_addr
);
930 /* Upper layer should drain all requests before calling this */
931 spin_lock_irq(&udev
->commands_lock
);
932 i
= idr_for_each(&udev
->commands
, tcmu_check_pending_cmd
, NULL
);
933 idr_destroy(&udev
->commands
);
934 spin_unlock_irq(&udev
->commands_lock
);
937 /* Device was configured */
938 if (udev
->uio_info
.uio_dev
) {
939 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE
, udev
->uio_info
.name
,
940 udev
->uio_info
.uio_dev
->minor
);
942 uio_unregister_device(&udev
->uio_info
);
943 kfree(udev
->uio_info
.name
);
951 Opt_dev_config
, Opt_dev_size
, Opt_err
, Opt_pass_level
,
954 static match_table_t tokens
= {
955 {Opt_dev_config
, "dev_config=%s"},
956 {Opt_dev_size
, "dev_size=%u"},
957 {Opt_pass_level
, "pass_level=%u"},
961 static ssize_t
tcmu_set_configfs_dev_params(struct se_device
*dev
,
962 const char *page
, ssize_t count
)
964 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
965 char *orig
, *ptr
, *opts
, *arg_p
;
966 substring_t args
[MAX_OPT_ARGS
];
970 opts
= kstrdup(page
, GFP_KERNEL
);
976 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
980 token
= match_token(ptr
, tokens
, args
);
983 if (match_strlcpy(udev
->dev_config
, &args
[0],
984 TCMU_CONFIG_LEN
) == 0) {
988 pr_debug("TCMU: Referencing Path: %s\n", udev
->dev_config
);
991 arg_p
= match_strdup(&args
[0]);
996 ret
= kstrtoul(arg_p
, 0, (unsigned long *) &udev
->dev_size
);
999 pr_err("kstrtoul() failed for dev_size=\n");
1001 case Opt_pass_level
:
1002 match_int(args
, &arg
);
1003 if (arg
>= TCMU_PASS_INVALID
) {
1004 pr_warn("TCMU: Invalid pass_level: %d\n", arg
);
1008 pr_debug("TCMU: Setting pass_level to %d\n", arg
);
1009 udev
->pass_level
= arg
;
1017 return (!ret
) ? count
: ret
;
1020 static ssize_t
tcmu_show_configfs_dev_params(struct se_device
*dev
, char *b
)
1022 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1025 bl
= sprintf(b
+ bl
, "Config: %s ",
1026 udev
->dev_config
[0] ? udev
->dev_config
: "NULL");
1027 bl
+= sprintf(b
+ bl
, "Size: %zu PassLevel: %u\n",
1028 udev
->dev_size
, udev
->pass_level
);
1033 static sector_t
tcmu_get_blocks(struct se_device
*dev
)
1035 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1037 return div_u64(udev
->dev_size
- dev
->dev_attrib
.block_size
,
1038 dev
->dev_attrib
.block_size
);
1041 static sense_reason_t
1042 tcmu_execute_rw(struct se_cmd
*se_cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
1043 enum dma_data_direction data_direction
)
1047 ret
= tcmu_queue_cmd(se_cmd
);
1050 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
1052 return TCM_NO_SENSE
;
1055 static sense_reason_t
1056 tcmu_pass_op(struct se_cmd
*se_cmd
)
1058 int ret
= tcmu_queue_cmd(se_cmd
);
1061 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
1063 return TCM_NO_SENSE
;
1066 static struct sbc_ops tcmu_sbc_ops
= {
1067 .execute_rw
= tcmu_execute_rw
,
1068 .execute_sync_cache
= tcmu_pass_op
,
1069 .execute_write_same
= tcmu_pass_op
,
1070 .execute_write_same_unmap
= tcmu_pass_op
,
1071 .execute_unmap
= tcmu_pass_op
,
1074 static sense_reason_t
1075 tcmu_parse_cdb(struct se_cmd
*cmd
)
1077 unsigned char *cdb
= cmd
->t_task_cdb
;
1078 struct tcmu_dev
*udev
= TCMU_DEV(cmd
->se_dev
);
1081 switch (udev
->pass_level
) {
1083 /* We're just like pscsi, then */
1085 * For REPORT LUNS we always need to emulate the response, for everything
1090 cmd
->execute_cmd
= spc_emulate_report_luns
;
1101 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1104 cmd
->execute_cmd
= tcmu_pass_op
;
1109 ret
= sbc_parse_cdb(cmd
, &tcmu_sbc_ops
);
1112 pr_err("Unknown tcm-user pass level %d\n", udev
->pass_level
);
1113 ret
= TCM_CHECK_CONDITION_ABORT_CMD
;
1119 DEF_TB_DEFAULT_ATTRIBS(tcmu
);
1121 static struct configfs_attribute
*tcmu_backend_dev_attrs
[] = {
1122 &tcmu_dev_attrib_emulate_model_alias
.attr
,
1123 &tcmu_dev_attrib_emulate_dpo
.attr
,
1124 &tcmu_dev_attrib_emulate_fua_write
.attr
,
1125 &tcmu_dev_attrib_emulate_fua_read
.attr
,
1126 &tcmu_dev_attrib_emulate_write_cache
.attr
,
1127 &tcmu_dev_attrib_emulate_ua_intlck_ctrl
.attr
,
1128 &tcmu_dev_attrib_emulate_tas
.attr
,
1129 &tcmu_dev_attrib_emulate_tpu
.attr
,
1130 &tcmu_dev_attrib_emulate_tpws
.attr
,
1131 &tcmu_dev_attrib_emulate_caw
.attr
,
1132 &tcmu_dev_attrib_emulate_3pc
.attr
,
1133 &tcmu_dev_attrib_pi_prot_type
.attr
,
1134 &tcmu_dev_attrib_hw_pi_prot_type
.attr
,
1135 &tcmu_dev_attrib_pi_prot_format
.attr
,
1136 &tcmu_dev_attrib_enforce_pr_isids
.attr
,
1137 &tcmu_dev_attrib_is_nonrot
.attr
,
1138 &tcmu_dev_attrib_emulate_rest_reord
.attr
,
1139 &tcmu_dev_attrib_force_pr_aptpl
.attr
,
1140 &tcmu_dev_attrib_hw_block_size
.attr
,
1141 &tcmu_dev_attrib_block_size
.attr
,
1142 &tcmu_dev_attrib_hw_max_sectors
.attr
,
1143 &tcmu_dev_attrib_optimal_sectors
.attr
,
1144 &tcmu_dev_attrib_hw_queue_depth
.attr
,
1145 &tcmu_dev_attrib_queue_depth
.attr
,
1146 &tcmu_dev_attrib_max_unmap_lba_count
.attr
,
1147 &tcmu_dev_attrib_max_unmap_block_desc_count
.attr
,
1148 &tcmu_dev_attrib_unmap_granularity
.attr
,
1149 &tcmu_dev_attrib_unmap_granularity_alignment
.attr
,
1150 &tcmu_dev_attrib_max_write_same_len
.attr
,
1154 static struct se_subsystem_api tcmu_template
= {
1156 .inquiry_prod
= "USER",
1157 .inquiry_rev
= TCMU_VERSION
,
1158 .owner
= THIS_MODULE
,
1159 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
1160 .attach_hba
= tcmu_attach_hba
,
1161 .detach_hba
= tcmu_detach_hba
,
1162 .alloc_device
= tcmu_alloc_device
,
1163 .configure_device
= tcmu_configure_device
,
1164 .free_device
= tcmu_free_device
,
1165 .parse_cdb
= tcmu_parse_cdb
,
1166 .set_configfs_dev_params
= tcmu_set_configfs_dev_params
,
1167 .show_configfs_dev_params
= tcmu_show_configfs_dev_params
,
1168 .get_device_type
= sbc_get_device_type
,
1169 .get_blocks
= tcmu_get_blocks
,
1172 static int __init
tcmu_module_init(void)
1174 struct target_backend_cits
*tbc
= &tcmu_template
.tb_cits
;
1177 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry
) % TCMU_OP_ALIGN_SIZE
) != 0);
1179 tcmu_cmd_cache
= kmem_cache_create("tcmu_cmd_cache",
1180 sizeof(struct tcmu_cmd
),
1181 __alignof__(struct tcmu_cmd
),
1183 if (!tcmu_cmd_cache
)
1186 tcmu_root_device
= root_device_register("tcm_user");
1187 if (IS_ERR(tcmu_root_device
)) {
1188 ret
= PTR_ERR(tcmu_root_device
);
1189 goto out_free_cache
;
1192 ret
= genl_register_family(&tcmu_genl_family
);
1194 goto out_unreg_device
;
1197 target_core_setup_sub_cits(&tcmu_template
);
1198 tbc
->tb_dev_attrib_cit
.ct_attrs
= tcmu_backend_dev_attrs
;
1200 ret
= transport_subsystem_register(&tcmu_template
);
1202 goto out_unreg_genl
;
1207 genl_unregister_family(&tcmu_genl_family
);
1209 root_device_unregister(tcmu_root_device
);
1211 kmem_cache_destroy(tcmu_cmd_cache
);
1216 static void __exit
tcmu_module_exit(void)
1218 transport_subsystem_release(&tcmu_template
);
1219 genl_unregister_family(&tcmu_genl_family
);
1220 root_device_unregister(tcmu_root_device
);
1221 kmem_cache_destroy(tcmu_cmd_cache
);
1224 MODULE_DESCRIPTION("TCM USER subsystem plugin");
1225 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1226 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1227 MODULE_LICENSE("GPL");
1229 module_init(tcmu_module_init
);
1230 module_exit(tcmu_module_exit
);