2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 * Copyright (C) 2015 Arrikto, Inc.
5 * Copyright (C) 2017 Chinamobile, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/spinlock.h>
22 #include <linux/module.h>
23 #include <linux/idr.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/parser.h>
27 #include <linux/vmalloc.h>
28 #include <linux/uio_driver.h>
29 #include <linux/radix-tree.h>
30 #include <linux/stringify.h>
31 #include <linux/bitops.h>
32 #include <linux/highmem.h>
33 #include <linux/configfs.h>
34 #include <linux/mutex.h>
35 #include <linux/kthread.h>
36 #include <net/genetlink.h>
37 #include <scsi/scsi_common.h>
38 #include <scsi/scsi_proto.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41 #include <target/target_core_backend.h>
43 #include <linux/target_core_user.h>
46 * Define a shared-memory interface for LIO to pass SCSI commands and
47 * data to userspace for processing. This is to allow backends that
48 * are too complex for in-kernel support to be possible.
50 * It uses the UIO framework to do a lot of the device-creation and
51 * introspection work for us.
53 * See the .h file for how the ring is laid out. Note that while the
54 * command ring is defined, the particulars of the data area are
55 * not. Offset values in the command entry point to other locations
56 * internal to the mmap()ed area. There is separate space outside the
57 * command ring for data buffers. This leaves maximum flexibility for
58 * moving buffer allocations, or even page flipping or other
59 * allocation techniques, without altering the command ring layout.
62 * The user process must be assumed to be malicious. There's no way to
63 * prevent it breaking the command ring protocol if it wants, but in
64 * order to prevent other issues we must only ever read *data* from
65 * the shared memory area, not offsets or sizes. This applies to
66 * command ring entries as well as the mailbox. Extra code needed for
67 * this may have a 'UAM' comment.
70 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
72 /* For cmd area, the size is fixed 8MB */
73 #define CMDR_SIZE (8 * 1024 * 1024)
76 * For data area, the block size is PAGE_SIZE and
77 * the total size is 256K * PAGE_SIZE.
79 #define DATA_BLOCK_SIZE PAGE_SIZE
80 #define DATA_BLOCK_BITS (256 * 1024)
81 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
82 #define DATA_BLOCK_INIT_BITS 128
84 /* The total size of the ring is 8M + 256K * PAGE_SIZE */
85 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
87 /* Default maximum of the global data blocks(512K * PAGE_SIZE) */
88 #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
90 static u8 tcmu_kern_cmd_reply_supported
;
91 static u8 tcmu_netlink_blocked
;
93 static struct device
*tcmu_root_device
;
99 #define TCMU_CONFIG_LEN 256
101 static DEFINE_MUTEX(tcmu_nl_cmd_mutex
);
102 static LIST_HEAD(tcmu_nl_cmd_list
);
107 /* wake up thread waiting for reply */
108 struct completion complete
;
109 struct list_head nl_list
;
110 struct tcmu_dev
*udev
;
116 struct list_head node
;
118 struct se_device se_dev
;
123 #define TCMU_DEV_BIT_OPEN 0
124 #define TCMU_DEV_BIT_BROKEN 1
127 struct uio_info uio_info
;
131 struct tcmu_mailbox
*mb_addr
;
134 u32 cmdr_last_cleaned
;
135 /* Offset of data area from start of mb */
136 /* Must add data_off and mb_addr to get the address */
140 wait_queue_head_t wait_cmdr
;
141 struct mutex cmdr_lock
;
146 DECLARE_BITMAP(data_bitmap
, DATA_BLOCK_BITS
);
147 struct radix_tree_root data_blocks
;
150 spinlock_t commands_lock
;
152 struct timer_list timeout
;
153 unsigned int cmd_time_out
;
155 struct tcmu_nl_cmd curr_nl_cmd
;
157 char dev_config
[TCMU_CONFIG_LEN
];
159 int nl_reply_supported
;
162 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
164 #define CMDR_OFF sizeof(struct tcmu_mailbox)
167 struct se_cmd
*se_cmd
;
168 struct tcmu_dev
*tcmu_dev
;
172 /* Can't use se_cmd when cleaning up expired cmds, because if
173 cmd has been completed then accessing se_cmd is off limits */
178 unsigned long deadline
;
180 #define TCMU_CMD_BIT_EXPIRED 0
184 static struct task_struct
*unmap_thread
;
185 static wait_queue_head_t unmap_wait
;
186 static DEFINE_MUTEX(root_udev_mutex
);
187 static LIST_HEAD(root_udev
);
189 static atomic_t global_db_count
= ATOMIC_INIT(0);
191 static struct kmem_cache
*tcmu_cmd_cache
;
193 static int tcmu_get_block_netlink(char *buffer
,
194 const struct kernel_param
*kp
)
196 return sprintf(buffer
, "%s\n", tcmu_netlink_blocked
?
197 "blocked" : "unblocked");
200 static int tcmu_set_block_netlink(const char *str
,
201 const struct kernel_param
*kp
)
206 ret
= kstrtou8(str
, 0, &val
);
211 pr_err("Invalid block netlink value %u\n", val
);
215 tcmu_netlink_blocked
= val
;
219 static const struct kernel_param_ops tcmu_block_netlink_op
= {
220 .set
= tcmu_set_block_netlink
,
221 .get
= tcmu_get_block_netlink
,
224 module_param_cb(block_netlink
, &tcmu_block_netlink_op
, NULL
, S_IWUSR
| S_IRUGO
);
225 MODULE_PARM_DESC(block_netlink
, "Block new netlink commands.");
227 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd
*nl_cmd
)
229 struct tcmu_dev
*udev
= nl_cmd
->udev
;
231 if (!tcmu_netlink_blocked
) {
232 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
236 if (nl_cmd
->cmd
!= TCMU_CMD_UNSPEC
) {
237 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd
->cmd
, udev
->name
);
238 nl_cmd
->status
= -EINTR
;
239 list_del(&nl_cmd
->nl_list
);
240 complete(&nl_cmd
->complete
);
245 static int tcmu_set_reset_netlink(const char *str
,
246 const struct kernel_param
*kp
)
248 struct tcmu_nl_cmd
*nl_cmd
, *tmp_cmd
;
252 ret
= kstrtou8(str
, 0, &val
);
257 pr_err("Invalid reset netlink value %u\n", val
);
261 mutex_lock(&tcmu_nl_cmd_mutex
);
262 list_for_each_entry_safe(nl_cmd
, tmp_cmd
, &tcmu_nl_cmd_list
, nl_list
) {
263 ret
= tcmu_fail_netlink_cmd(nl_cmd
);
267 mutex_unlock(&tcmu_nl_cmd_mutex
);
272 static const struct kernel_param_ops tcmu_reset_netlink_op
= {
273 .set
= tcmu_set_reset_netlink
,
276 module_param_cb(reset_netlink
, &tcmu_reset_netlink_op
, NULL
, S_IWUSR
);
277 MODULE_PARM_DESC(reset_netlink
, "Reset netlink commands.");
279 /* multicast group */
280 enum tcmu_multicast_groups
{
284 static const struct genl_multicast_group tcmu_mcgrps
[] = {
285 [TCMU_MCGRP_CONFIG
] = { .name
= "config", },
288 static struct nla_policy tcmu_attr_policy
[TCMU_ATTR_MAX
+1] = {
289 [TCMU_ATTR_DEVICE
] = { .type
= NLA_STRING
},
290 [TCMU_ATTR_MINOR
] = { .type
= NLA_U32
},
291 [TCMU_ATTR_CMD_STATUS
] = { .type
= NLA_S32
},
292 [TCMU_ATTR_DEVICE_ID
] = { .type
= NLA_U32
},
293 [TCMU_ATTR_SUPP_KERN_CMD_REPLY
] = { .type
= NLA_U8
},
296 static int tcmu_genl_cmd_done(struct genl_info
*info
, int completed_cmd
)
298 struct tcmu_dev
*udev
= NULL
;
299 struct tcmu_nl_cmd
*nl_cmd
;
300 int dev_id
, rc
, ret
= 0;
302 if (!info
->attrs
[TCMU_ATTR_CMD_STATUS
] ||
303 !info
->attrs
[TCMU_ATTR_DEVICE_ID
]) {
304 printk(KERN_ERR
"TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
308 dev_id
= nla_get_u32(info
->attrs
[TCMU_ATTR_DEVICE_ID
]);
309 rc
= nla_get_s32(info
->attrs
[TCMU_ATTR_CMD_STATUS
]);
311 mutex_lock(&tcmu_nl_cmd_mutex
);
312 list_for_each_entry(nl_cmd
, &tcmu_nl_cmd_list
, nl_list
) {
313 if (nl_cmd
->udev
->se_dev
.dev_index
== dev_id
) {
320 pr_err(KERN_ERR
"tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
321 completed_cmd
, rc
, dev_id
);
325 list_del(&nl_cmd
->nl_list
);
327 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
328 udev
->name
, dev_id
, nl_cmd
->cmd
, completed_cmd
, rc
,
331 if (nl_cmd
->cmd
!= completed_cmd
) {
332 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
333 udev
->name
, completed_cmd
, nl_cmd
->cmd
);
339 complete(&nl_cmd
->complete
);
341 mutex_unlock(&tcmu_nl_cmd_mutex
);
345 static int tcmu_genl_rm_dev_done(struct sk_buff
*skb
, struct genl_info
*info
)
347 return tcmu_genl_cmd_done(info
, TCMU_CMD_REMOVED_DEVICE
);
350 static int tcmu_genl_add_dev_done(struct sk_buff
*skb
, struct genl_info
*info
)
352 return tcmu_genl_cmd_done(info
, TCMU_CMD_ADDED_DEVICE
);
355 static int tcmu_genl_reconfig_dev_done(struct sk_buff
*skb
,
356 struct genl_info
*info
)
358 return tcmu_genl_cmd_done(info
, TCMU_CMD_RECONFIG_DEVICE
);
361 static int tcmu_genl_set_features(struct sk_buff
*skb
, struct genl_info
*info
)
363 if (info
->attrs
[TCMU_ATTR_SUPP_KERN_CMD_REPLY
]) {
364 tcmu_kern_cmd_reply_supported
=
365 nla_get_u8(info
->attrs
[TCMU_ATTR_SUPP_KERN_CMD_REPLY
]);
366 printk(KERN_INFO
"tcmu daemon: command reply support %u.\n",
367 tcmu_kern_cmd_reply_supported
);
373 static const struct genl_ops tcmu_genl_ops
[] = {
375 .cmd
= TCMU_CMD_SET_FEATURES
,
376 .flags
= GENL_ADMIN_PERM
,
377 .policy
= tcmu_attr_policy
,
378 .doit
= tcmu_genl_set_features
,
381 .cmd
= TCMU_CMD_ADDED_DEVICE_DONE
,
382 .flags
= GENL_ADMIN_PERM
,
383 .policy
= tcmu_attr_policy
,
384 .doit
= tcmu_genl_add_dev_done
,
387 .cmd
= TCMU_CMD_REMOVED_DEVICE_DONE
,
388 .flags
= GENL_ADMIN_PERM
,
389 .policy
= tcmu_attr_policy
,
390 .doit
= tcmu_genl_rm_dev_done
,
393 .cmd
= TCMU_CMD_RECONFIG_DEVICE_DONE
,
394 .flags
= GENL_ADMIN_PERM
,
395 .policy
= tcmu_attr_policy
,
396 .doit
= tcmu_genl_reconfig_dev_done
,
400 /* Our generic netlink family */
401 static struct genl_family tcmu_genl_family __ro_after_init
= {
402 .module
= THIS_MODULE
,
406 .maxattr
= TCMU_ATTR_MAX
,
407 .mcgrps
= tcmu_mcgrps
,
408 .n_mcgrps
= ARRAY_SIZE(tcmu_mcgrps
),
410 .ops
= tcmu_genl_ops
,
411 .n_ops
= ARRAY_SIZE(tcmu_genl_ops
),
414 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
415 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
416 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
417 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
419 static void tcmu_cmd_free_data(struct tcmu_cmd
*tcmu_cmd
, uint32_t len
)
421 struct tcmu_dev
*udev
= tcmu_cmd
->tcmu_dev
;
424 for (i
= 0; i
< len
; i
++)
425 clear_bit(tcmu_cmd
->dbi
[i
], udev
->data_bitmap
);
428 static inline bool tcmu_get_empty_block(struct tcmu_dev
*udev
,
429 struct tcmu_cmd
*tcmu_cmd
)
434 dbi
= find_first_zero_bit(udev
->data_bitmap
, udev
->dbi_thresh
);
435 if (dbi
== udev
->dbi_thresh
)
438 page
= radix_tree_lookup(&udev
->data_blocks
, dbi
);
440 if (atomic_add_return(1, &global_db_count
) >
441 TCMU_GLOBAL_MAX_BLOCKS
) {
442 atomic_dec(&global_db_count
);
446 /* try to get new page from the mm */
447 page
= alloc_page(GFP_KERNEL
);
451 ret
= radix_tree_insert(&udev
->data_blocks
, dbi
, page
);
456 if (dbi
> udev
->dbi_max
)
459 set_bit(dbi
, udev
->data_bitmap
);
460 tcmu_cmd_set_dbi(tcmu_cmd
, dbi
);
466 atomic_dec(&global_db_count
);
470 static bool tcmu_get_empty_blocks(struct tcmu_dev
*udev
,
471 struct tcmu_cmd
*tcmu_cmd
)
475 udev
->waiting_global
= false;
477 for (i
= tcmu_cmd
->dbi_cur
; i
< tcmu_cmd
->dbi_cnt
; i
++) {
478 if (!tcmu_get_empty_block(udev
, tcmu_cmd
))
484 udev
->waiting_global
= true;
485 /* Try to wake up the unmap thread */
486 wake_up(&unmap_wait
);
490 static inline struct page
*
491 tcmu_get_block_page(struct tcmu_dev
*udev
, uint32_t dbi
)
493 return radix_tree_lookup(&udev
->data_blocks
, dbi
);
496 static inline void tcmu_free_cmd(struct tcmu_cmd
*tcmu_cmd
)
498 kfree(tcmu_cmd
->dbi
);
499 kmem_cache_free(tcmu_cmd_cache
, tcmu_cmd
);
502 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd
*tcmu_cmd
)
504 struct se_cmd
*se_cmd
= tcmu_cmd
->se_cmd
;
505 size_t data_length
= round_up(se_cmd
->data_length
, DATA_BLOCK_SIZE
);
507 if (se_cmd
->se_cmd_flags
& SCF_BIDI
) {
508 BUG_ON(!(se_cmd
->t_bidi_data_sg
&& se_cmd
->t_bidi_data_nents
));
509 data_length
+= round_up(se_cmd
->t_bidi_data_sg
->length
,
516 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd
*tcmu_cmd
)
518 size_t data_length
= tcmu_cmd_get_data_length(tcmu_cmd
);
520 return data_length
/ DATA_BLOCK_SIZE
;
523 static struct tcmu_cmd
*tcmu_alloc_cmd(struct se_cmd
*se_cmd
)
525 struct se_device
*se_dev
= se_cmd
->se_dev
;
526 struct tcmu_dev
*udev
= TCMU_DEV(se_dev
);
527 struct tcmu_cmd
*tcmu_cmd
;
529 tcmu_cmd
= kmem_cache_zalloc(tcmu_cmd_cache
, GFP_KERNEL
);
533 tcmu_cmd
->se_cmd
= se_cmd
;
534 tcmu_cmd
->tcmu_dev
= udev
;
536 tcmu_cmd_reset_dbi_cur(tcmu_cmd
);
537 tcmu_cmd
->dbi_cnt
= tcmu_cmd_get_block_cnt(tcmu_cmd
);
538 tcmu_cmd
->dbi
= kcalloc(tcmu_cmd
->dbi_cnt
, sizeof(uint32_t),
540 if (!tcmu_cmd
->dbi
) {
541 kmem_cache_free(tcmu_cmd_cache
, tcmu_cmd
);
548 static inline void tcmu_flush_dcache_range(void *vaddr
, size_t size
)
550 unsigned long offset
= offset_in_page(vaddr
);
552 size
= round_up(size
+offset
, PAGE_SIZE
);
556 flush_dcache_page(virt_to_page(vaddr
));
562 * Some ring helper functions. We don't assume size is a power of 2 so
563 * we can't use circ_buf.h.
565 static inline size_t spc_used(size_t head
, size_t tail
, size_t size
)
567 int diff
= head
- tail
;
575 static inline size_t spc_free(size_t head
, size_t tail
, size_t size
)
577 /* Keep 1 byte unused or we can't tell full from empty */
578 return (size
- spc_used(head
, tail
, size
) - 1);
581 static inline size_t head_to_end(size_t head
, size_t size
)
586 static inline void new_iov(struct iovec
**iov
, int *iov_cnt
,
587 struct tcmu_dev
*udev
)
596 memset(iovec
, 0, sizeof(struct iovec
));
599 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
601 /* offset is relative to mb_addr */
602 static inline size_t get_block_offset_user(struct tcmu_dev
*dev
,
603 int dbi
, int remaining
)
605 return dev
->data_off
+ dbi
* DATA_BLOCK_SIZE
+
606 DATA_BLOCK_SIZE
- remaining
;
609 static inline size_t iov_tail(struct iovec
*iov
)
611 return (size_t)iov
->iov_base
+ iov
->iov_len
;
614 static int scatter_data_area(struct tcmu_dev
*udev
,
615 struct tcmu_cmd
*tcmu_cmd
, struct scatterlist
*data_sg
,
616 unsigned int data_nents
, struct iovec
**iov
,
617 int *iov_cnt
, bool copy_data
)
620 int block_remaining
= 0;
621 void *from
, *to
= NULL
;
622 size_t copy_bytes
, to_offset
, offset
;
623 struct scatterlist
*sg
;
626 for_each_sg(data_sg
, sg
, data_nents
, i
) {
627 int sg_remaining
= sg
->length
;
628 from
= kmap_atomic(sg_page(sg
)) + sg
->offset
;
629 while (sg_remaining
> 0) {
630 if (block_remaining
== 0) {
634 block_remaining
= DATA_BLOCK_SIZE
;
635 dbi
= tcmu_cmd_get_dbi(tcmu_cmd
);
636 page
= tcmu_get_block_page(udev
, dbi
);
637 to
= kmap_atomic(page
);
640 copy_bytes
= min_t(size_t, sg_remaining
,
642 to_offset
= get_block_offset_user(udev
, dbi
,
646 to_offset
== iov_tail(*iov
)) {
647 (*iov
)->iov_len
+= copy_bytes
;
649 new_iov(iov
, iov_cnt
, udev
);
650 (*iov
)->iov_base
= (void __user
*)to_offset
;
651 (*iov
)->iov_len
= copy_bytes
;
654 offset
= DATA_BLOCK_SIZE
- block_remaining
;
656 from
+ sg
->length
- sg_remaining
,
658 tcmu_flush_dcache_range(to
, copy_bytes
);
660 sg_remaining
-= copy_bytes
;
661 block_remaining
-= copy_bytes
;
663 kunmap_atomic(from
- sg
->offset
);
671 static void gather_data_area(struct tcmu_dev
*udev
, struct tcmu_cmd
*cmd
,
674 struct se_cmd
*se_cmd
= cmd
->se_cmd
;
676 int block_remaining
= 0;
677 void *from
= NULL
, *to
;
678 size_t copy_bytes
, offset
;
679 struct scatterlist
*sg
, *data_sg
;
681 unsigned int data_nents
;
685 data_sg
= se_cmd
->t_data_sg
;
686 data_nents
= se_cmd
->t_data_nents
;
690 * For bidi case, the first count blocks are for Data-Out
691 * buffer blocks, and before gathering the Data-In buffer
692 * the Data-Out buffer blocks should be discarded.
694 count
= DIV_ROUND_UP(se_cmd
->data_length
, DATA_BLOCK_SIZE
);
696 data_sg
= se_cmd
->t_bidi_data_sg
;
697 data_nents
= se_cmd
->t_bidi_data_nents
;
700 tcmu_cmd_set_dbi_cur(cmd
, count
);
702 for_each_sg(data_sg
, sg
, data_nents
, i
) {
703 int sg_remaining
= sg
->length
;
704 to
= kmap_atomic(sg_page(sg
)) + sg
->offset
;
705 while (sg_remaining
> 0) {
706 if (block_remaining
== 0) {
710 block_remaining
= DATA_BLOCK_SIZE
;
711 dbi
= tcmu_cmd_get_dbi(cmd
);
712 page
= tcmu_get_block_page(udev
, dbi
);
713 from
= kmap_atomic(page
);
715 copy_bytes
= min_t(size_t, sg_remaining
,
717 offset
= DATA_BLOCK_SIZE
- block_remaining
;
718 tcmu_flush_dcache_range(from
, copy_bytes
);
719 memcpy(to
+ sg
->length
- sg_remaining
, from
+ offset
,
722 sg_remaining
-= copy_bytes
;
723 block_remaining
-= copy_bytes
;
725 kunmap_atomic(to
- sg
->offset
);
731 static inline size_t spc_bitmap_free(unsigned long *bitmap
, uint32_t thresh
)
733 return DATA_BLOCK_SIZE
* (thresh
- bitmap_weight(bitmap
, thresh
));
737 * We can't queue a command until we have space available on the cmd ring *and*
738 * space available on the data area.
740 * Called with ring lock held.
742 static bool is_ring_space_avail(struct tcmu_dev
*udev
, struct tcmu_cmd
*cmd
,
743 size_t cmd_size
, size_t data_needed
)
745 struct tcmu_mailbox
*mb
= udev
->mb_addr
;
746 uint32_t blocks_needed
= (data_needed
+ DATA_BLOCK_SIZE
- 1)
748 size_t space
, cmd_needed
;
751 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
753 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
756 * If cmd end-of-ring space is too small then we need space for a NOP plus
757 * original cmd - cmds are internally contiguous.
759 if (head_to_end(cmd_head
, udev
->cmdr_size
) >= cmd_size
)
760 cmd_needed
= cmd_size
;
762 cmd_needed
= cmd_size
+ head_to_end(cmd_head
, udev
->cmdr_size
);
764 space
= spc_free(cmd_head
, udev
->cmdr_last_cleaned
, udev
->cmdr_size
);
765 if (space
< cmd_needed
) {
766 pr_debug("no cmd space: %u %u %u\n", cmd_head
,
767 udev
->cmdr_last_cleaned
, udev
->cmdr_size
);
771 /* try to check and get the data blocks as needed */
772 space
= spc_bitmap_free(udev
->data_bitmap
, udev
->dbi_thresh
);
773 if (space
< data_needed
) {
774 unsigned long blocks_left
= DATA_BLOCK_BITS
- udev
->dbi_thresh
;
777 if (blocks_left
< blocks_needed
) {
778 pr_debug("no data space: only %lu available, but ask for %zu\n",
779 blocks_left
* DATA_BLOCK_SIZE
,
784 /* Try to expand the thresh */
785 if (!udev
->dbi_thresh
) {
786 /* From idle state */
787 uint32_t init_thresh
= DATA_BLOCK_INIT_BITS
;
789 udev
->dbi_thresh
= max(blocks_needed
, init_thresh
);
792 * Grow the data area by max(blocks needed,
793 * dbi_thresh / 2), but limited to the max
794 * DATA_BLOCK_BITS size.
796 grow
= max(blocks_needed
, udev
->dbi_thresh
/ 2);
797 udev
->dbi_thresh
+= grow
;
798 if (udev
->dbi_thresh
> DATA_BLOCK_BITS
)
799 udev
->dbi_thresh
= DATA_BLOCK_BITS
;
803 return tcmu_get_empty_blocks(udev
, cmd
);
806 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt
)
808 return max(offsetof(struct tcmu_cmd_entry
, req
.iov
[iov_cnt
]),
809 sizeof(struct tcmu_cmd_entry
));
812 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd
*tcmu_cmd
,
813 size_t base_command_size
)
815 struct se_cmd
*se_cmd
= tcmu_cmd
->se_cmd
;
818 command_size
= base_command_size
+
819 round_up(scsi_command_size(se_cmd
->t_task_cdb
),
822 WARN_ON(command_size
& (TCMU_OP_ALIGN_SIZE
-1));
827 static int tcmu_setup_cmd_timer(struct tcmu_cmd
*tcmu_cmd
)
829 struct tcmu_dev
*udev
= tcmu_cmd
->tcmu_dev
;
830 unsigned long tmo
= udev
->cmd_time_out
;
833 if (tcmu_cmd
->cmd_id
)
836 cmd_id
= idr_alloc(&udev
->commands
, tcmu_cmd
, 1, USHRT_MAX
, GFP_NOWAIT
);
838 pr_err("tcmu: Could not allocate cmd id.\n");
841 tcmu_cmd
->cmd_id
= cmd_id
;
846 tcmu_cmd
->deadline
= round_jiffies_up(jiffies
+ msecs_to_jiffies(tmo
));
847 mod_timer(&udev
->timeout
, tcmu_cmd
->deadline
);
851 static sense_reason_t
852 tcmu_queue_cmd_ring(struct tcmu_cmd
*tcmu_cmd
)
854 struct tcmu_dev
*udev
= tcmu_cmd
->tcmu_dev
;
855 struct se_cmd
*se_cmd
= tcmu_cmd
->se_cmd
;
856 size_t base_command_size
, command_size
;
857 struct tcmu_mailbox
*mb
;
858 struct tcmu_cmd_entry
*entry
;
863 bool copy_to_data_area
;
864 size_t data_length
= tcmu_cmd_get_data_length(tcmu_cmd
);
866 if (test_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
))
867 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
870 * Must be a certain minimum size for response sense info, but
871 * also may be larger if the iov array is large.
873 * We prepare as many iovs as possbile for potential uses here,
874 * because it's expensive to tell how many regions are freed in
875 * the bitmap & global data pool, as the size calculated here
876 * will only be used to do the checks.
878 * The size will be recalculated later as actually needed to save
881 base_command_size
= tcmu_cmd_get_base_cmd_size(tcmu_cmd
->dbi_cnt
);
882 command_size
= tcmu_cmd_get_cmd_size(tcmu_cmd
, base_command_size
);
884 mutex_lock(&udev
->cmdr_lock
);
887 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
888 if ((command_size
> (udev
->cmdr_size
/ 2)) ||
889 data_length
> udev
->data_size
) {
890 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
891 "cmd ring/data area\n", command_size
, data_length
,
892 udev
->cmdr_size
, udev
->data_size
);
893 mutex_unlock(&udev
->cmdr_lock
);
894 return TCM_INVALID_CDB_FIELD
;
897 while (!is_ring_space_avail(udev
, tcmu_cmd
, command_size
, data_length
)) {
902 * Don't leave commands partially setup because the unmap
903 * thread might need the blocks to make forward progress.
905 tcmu_cmd_free_data(tcmu_cmd
, tcmu_cmd
->dbi_cur
);
906 tcmu_cmd_reset_dbi_cur(tcmu_cmd
);
908 prepare_to_wait(&udev
->wait_cmdr
, &__wait
, TASK_INTERRUPTIBLE
);
910 pr_debug("sleeping for ring space\n");
911 mutex_unlock(&udev
->cmdr_lock
);
912 if (udev
->cmd_time_out
)
913 ret
= schedule_timeout(
914 msecs_to_jiffies(udev
->cmd_time_out
));
916 ret
= schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT
));
917 finish_wait(&udev
->wait_cmdr
, &__wait
);
919 pr_warn("tcmu: command timed out\n");
920 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
923 mutex_lock(&udev
->cmdr_lock
);
925 /* We dropped cmdr_lock, cmd_head is stale */
926 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
929 /* Insert a PAD if end-of-ring space is too small */
930 if (head_to_end(cmd_head
, udev
->cmdr_size
) < command_size
) {
931 size_t pad_size
= head_to_end(cmd_head
, udev
->cmdr_size
);
933 entry
= (void *) mb
+ CMDR_OFF
+ cmd_head
;
934 tcmu_hdr_set_op(&entry
->hdr
.len_op
, TCMU_OP_PAD
);
935 tcmu_hdr_set_len(&entry
->hdr
.len_op
, pad_size
);
936 entry
->hdr
.cmd_id
= 0; /* not used for PAD */
937 entry
->hdr
.kflags
= 0;
938 entry
->hdr
.uflags
= 0;
939 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
941 UPDATE_HEAD(mb
->cmd_head
, pad_size
, udev
->cmdr_size
);
942 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
944 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
945 WARN_ON(cmd_head
!= 0);
948 entry
= (void *) mb
+ CMDR_OFF
+ cmd_head
;
949 memset(entry
, 0, command_size
);
950 tcmu_hdr_set_op(&entry
->hdr
.len_op
, TCMU_OP_CMD
);
952 /* Handle allocating space from the data area */
953 tcmu_cmd_reset_dbi_cur(tcmu_cmd
);
954 iov
= &entry
->req
.iov
[0];
956 copy_to_data_area
= (se_cmd
->data_direction
== DMA_TO_DEVICE
957 || se_cmd
->se_cmd_flags
& SCF_BIDI
);
958 ret
= scatter_data_area(udev
, tcmu_cmd
, se_cmd
->t_data_sg
,
959 se_cmd
->t_data_nents
, &iov
, &iov_cnt
,
962 tcmu_cmd_free_data(tcmu_cmd
, tcmu_cmd
->dbi_cnt
);
963 mutex_unlock(&udev
->cmdr_lock
);
965 pr_err("tcmu: alloc and scatter data failed\n");
966 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
968 entry
->req
.iov_cnt
= iov_cnt
;
970 /* Handle BIDI commands */
972 if (se_cmd
->se_cmd_flags
& SCF_BIDI
) {
974 ret
= scatter_data_area(udev
, tcmu_cmd
,
975 se_cmd
->t_bidi_data_sg
,
976 se_cmd
->t_bidi_data_nents
,
977 &iov
, &iov_cnt
, false);
979 tcmu_cmd_free_data(tcmu_cmd
, tcmu_cmd
->dbi_cnt
);
980 mutex_unlock(&udev
->cmdr_lock
);
982 pr_err("tcmu: alloc and scatter bidi data failed\n");
983 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
986 entry
->req
.iov_bidi_cnt
= iov_cnt
;
988 ret
= tcmu_setup_cmd_timer(tcmu_cmd
);
990 tcmu_cmd_free_data(tcmu_cmd
, tcmu_cmd
->dbi_cnt
);
991 mutex_unlock(&udev
->cmdr_lock
);
992 return TCM_OUT_OF_RESOURCES
;
994 entry
->hdr
.cmd_id
= tcmu_cmd
->cmd_id
;
997 * Recalaulate the command's base size and size according
998 * to the actual needs
1000 base_command_size
= tcmu_cmd_get_base_cmd_size(entry
->req
.iov_cnt
+
1001 entry
->req
.iov_bidi_cnt
);
1002 command_size
= tcmu_cmd_get_cmd_size(tcmu_cmd
, base_command_size
);
1004 tcmu_hdr_set_len(&entry
->hdr
.len_op
, command_size
);
1006 /* All offsets relative to mb_addr, not start of entry! */
1007 cdb_off
= CMDR_OFF
+ cmd_head
+ base_command_size
;
1008 memcpy((void *) mb
+ cdb_off
, se_cmd
->t_task_cdb
, scsi_command_size(se_cmd
->t_task_cdb
));
1009 entry
->req
.cdb_off
= cdb_off
;
1010 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
1012 UPDATE_HEAD(mb
->cmd_head
, command_size
, udev
->cmdr_size
);
1013 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
1014 mutex_unlock(&udev
->cmdr_lock
);
1016 /* TODO: only if FLUSH and FUA? */
1017 uio_event_notify(&udev
->uio_info
);
1019 if (udev
->cmd_time_out
)
1020 mod_timer(&udev
->timeout
, round_jiffies_up(jiffies
+
1021 msecs_to_jiffies(udev
->cmd_time_out
)));
1023 return TCM_NO_SENSE
;
1026 static sense_reason_t
1027 tcmu_queue_cmd(struct se_cmd
*se_cmd
)
1029 struct tcmu_cmd
*tcmu_cmd
;
1032 tcmu_cmd
= tcmu_alloc_cmd(se_cmd
);
1034 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
1036 ret
= tcmu_queue_cmd_ring(tcmu_cmd
);
1037 if (ret
!= TCM_NO_SENSE
) {
1038 pr_err("TCMU: Could not queue command\n");
1040 tcmu_free_cmd(tcmu_cmd
);
1046 static void tcmu_handle_completion(struct tcmu_cmd
*cmd
, struct tcmu_cmd_entry
*entry
)
1048 struct se_cmd
*se_cmd
= cmd
->se_cmd
;
1049 struct tcmu_dev
*udev
= cmd
->tcmu_dev
;
1052 * cmd has been completed already from timeout, just reclaim
1053 * data area space and free cmd
1055 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
))
1058 tcmu_cmd_reset_dbi_cur(cmd
);
1060 if (entry
->hdr
.uflags
& TCMU_UFLAG_UNKNOWN_OP
) {
1061 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1063 entry
->rsp
.scsi_status
= SAM_STAT_CHECK_CONDITION
;
1064 } else if (entry
->rsp
.scsi_status
== SAM_STAT_CHECK_CONDITION
) {
1065 transport_copy_sense_to_cmd(se_cmd
, entry
->rsp
.sense_buffer
);
1066 } else if (se_cmd
->se_cmd_flags
& SCF_BIDI
) {
1067 /* Get Data-In buffer before clean up */
1068 gather_data_area(udev
, cmd
, true);
1069 } else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
) {
1070 gather_data_area(udev
, cmd
, false);
1071 } else if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
1073 } else if (se_cmd
->data_direction
!= DMA_NONE
) {
1074 pr_warn("TCMU: data direction was %d!\n",
1075 se_cmd
->data_direction
);
1078 target_complete_cmd(cmd
->se_cmd
, entry
->rsp
.scsi_status
);
1082 tcmu_cmd_free_data(cmd
, cmd
->dbi_cnt
);
1086 static unsigned int tcmu_handle_completions(struct tcmu_dev
*udev
)
1088 struct tcmu_mailbox
*mb
;
1091 if (test_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
)) {
1092 pr_err("ring broken, not handling completions\n");
1097 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
1099 while (udev
->cmdr_last_cleaned
!= READ_ONCE(mb
->cmd_tail
)) {
1101 struct tcmu_cmd_entry
*entry
= (void *) mb
+ CMDR_OFF
+ udev
->cmdr_last_cleaned
;
1102 struct tcmu_cmd
*cmd
;
1104 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
1106 if (tcmu_hdr_get_op(entry
->hdr
.len_op
) == TCMU_OP_PAD
) {
1107 UPDATE_HEAD(udev
->cmdr_last_cleaned
,
1108 tcmu_hdr_get_len(entry
->hdr
.len_op
),
1112 WARN_ON(tcmu_hdr_get_op(entry
->hdr
.len_op
) != TCMU_OP_CMD
);
1114 spin_lock(&udev
->commands_lock
);
1115 cmd
= idr_remove(&udev
->commands
, entry
->hdr
.cmd_id
);
1116 spin_unlock(&udev
->commands_lock
);
1119 pr_err("cmd_id not found, ring is broken\n");
1120 set_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
);
1124 tcmu_handle_completion(cmd
, entry
);
1126 UPDATE_HEAD(udev
->cmdr_last_cleaned
,
1127 tcmu_hdr_get_len(entry
->hdr
.len_op
),
1133 if (mb
->cmd_tail
== mb
->cmd_head
)
1134 del_timer(&udev
->timeout
); /* no more pending cmds */
1136 wake_up(&udev
->wait_cmdr
);
1141 static int tcmu_check_expired_cmd(int id
, void *p
, void *data
)
1143 struct tcmu_cmd
*cmd
= p
;
1145 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
))
1148 if (!time_after(jiffies
, cmd
->deadline
))
1151 set_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
);
1152 target_complete_cmd(cmd
->se_cmd
, SAM_STAT_CHECK_CONDITION
);
1158 static void tcmu_device_timedout(struct timer_list
*t
)
1160 struct tcmu_dev
*udev
= from_timer(udev
, t
, timeout
);
1161 unsigned long flags
;
1163 spin_lock_irqsave(&udev
->commands_lock
, flags
);
1164 idr_for_each(&udev
->commands
, tcmu_check_expired_cmd
, NULL
);
1165 spin_unlock_irqrestore(&udev
->commands_lock
, flags
);
1167 /* Try to wake up the ummap thread */
1168 wake_up(&unmap_wait
);
1171 * We don't need to wakeup threads on wait_cmdr since they have their
1176 static int tcmu_attach_hba(struct se_hba
*hba
, u32 host_id
)
1178 struct tcmu_hba
*tcmu_hba
;
1180 tcmu_hba
= kzalloc(sizeof(struct tcmu_hba
), GFP_KERNEL
);
1184 tcmu_hba
->host_id
= host_id
;
1185 hba
->hba_ptr
= tcmu_hba
;
1190 static void tcmu_detach_hba(struct se_hba
*hba
)
1192 kfree(hba
->hba_ptr
);
1193 hba
->hba_ptr
= NULL
;
1196 static struct se_device
*tcmu_alloc_device(struct se_hba
*hba
, const char *name
)
1198 struct tcmu_dev
*udev
;
1200 udev
= kzalloc(sizeof(struct tcmu_dev
), GFP_KERNEL
);
1203 kref_init(&udev
->kref
);
1205 udev
->name
= kstrdup(name
, GFP_KERNEL
);
1212 udev
->cmd_time_out
= TCMU_TIME_OUT
;
1214 init_waitqueue_head(&udev
->wait_cmdr
);
1215 mutex_init(&udev
->cmdr_lock
);
1217 idr_init(&udev
->commands
);
1218 spin_lock_init(&udev
->commands_lock
);
1220 timer_setup(&udev
->timeout
, tcmu_device_timedout
, 0);
1222 INIT_RADIX_TREE(&udev
->data_blocks
, GFP_KERNEL
);
1224 return &udev
->se_dev
;
1227 static int tcmu_irqcontrol(struct uio_info
*info
, s32 irq_on
)
1229 struct tcmu_dev
*tcmu_dev
= container_of(info
, struct tcmu_dev
, uio_info
);
1231 mutex_lock(&tcmu_dev
->cmdr_lock
);
1232 tcmu_handle_completions(tcmu_dev
);
1233 mutex_unlock(&tcmu_dev
->cmdr_lock
);
1239 * mmap code from uio.c. Copied here because we want to hook mmap()
1240 * and this stuff must come along.
1242 static int tcmu_find_mem_index(struct vm_area_struct
*vma
)
1244 struct tcmu_dev
*udev
= vma
->vm_private_data
;
1245 struct uio_info
*info
= &udev
->uio_info
;
1247 if (vma
->vm_pgoff
< MAX_UIO_MAPS
) {
1248 if (info
->mem
[vma
->vm_pgoff
].size
== 0)
1250 return (int)vma
->vm_pgoff
;
1255 static struct page
*tcmu_try_get_block_page(struct tcmu_dev
*udev
, uint32_t dbi
)
1260 mutex_lock(&udev
->cmdr_lock
);
1261 page
= tcmu_get_block_page(udev
, dbi
);
1263 mutex_unlock(&udev
->cmdr_lock
);
1268 * Normally it shouldn't be here:
1269 * Only when the userspace has touched the blocks which
1270 * are out of the tcmu_cmd's data iov[], and will return
1273 pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi
);
1274 pr_warn("Mostly it will be a bug of userspace, please have a check!\n");
1276 if (dbi
>= udev
->dbi_thresh
) {
1277 /* Extern the udev->dbi_thresh to dbi + 1 */
1278 udev
->dbi_thresh
= dbi
+ 1;
1279 udev
->dbi_max
= dbi
;
1282 page
= radix_tree_lookup(&udev
->data_blocks
, dbi
);
1284 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1286 mutex_unlock(&udev
->cmdr_lock
);
1290 ret
= radix_tree_insert(&udev
->data_blocks
, dbi
, page
);
1292 mutex_unlock(&udev
->cmdr_lock
);
1298 * Since this case is rare in page fault routine, here we
1299 * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS
1300 * to reduce possible page fault call trace.
1302 atomic_inc(&global_db_count
);
1304 mutex_unlock(&udev
->cmdr_lock
);
1309 static int tcmu_vma_fault(struct vm_fault
*vmf
)
1311 struct tcmu_dev
*udev
= vmf
->vma
->vm_private_data
;
1312 struct uio_info
*info
= &udev
->uio_info
;
1314 unsigned long offset
;
1317 int mi
= tcmu_find_mem_index(vmf
->vma
);
1319 return VM_FAULT_SIGBUS
;
1322 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1325 offset
= (vmf
->pgoff
- mi
) << PAGE_SHIFT
;
1327 if (offset
< udev
->data_off
) {
1328 /* For the vmalloc()ed cmd area pages */
1329 addr
= (void *)(unsigned long)info
->mem
[mi
].addr
+ offset
;
1330 page
= vmalloc_to_page(addr
);
1334 /* For the dynamically growing data area pages */
1335 dbi
= (offset
- udev
->data_off
) / DATA_BLOCK_SIZE
;
1336 page
= tcmu_try_get_block_page(udev
, dbi
);
1338 return VM_FAULT_NOPAGE
;
1346 static const struct vm_operations_struct tcmu_vm_ops
= {
1347 .fault
= tcmu_vma_fault
,
1350 static int tcmu_mmap(struct uio_info
*info
, struct vm_area_struct
*vma
)
1352 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
1354 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
1355 vma
->vm_ops
= &tcmu_vm_ops
;
1357 vma
->vm_private_data
= udev
;
1359 /* Ensure the mmap is exactly the right size */
1360 if (vma_pages(vma
) != (TCMU_RING_SIZE
>> PAGE_SHIFT
))
1366 static int tcmu_open(struct uio_info
*info
, struct inode
*inode
)
1368 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
1370 /* O_EXCL not supported for char devs, so fake it? */
1371 if (test_and_set_bit(TCMU_DEV_BIT_OPEN
, &udev
->flags
))
1374 udev
->inode
= inode
;
1375 kref_get(&udev
->kref
);
1382 static void tcmu_dev_call_rcu(struct rcu_head
*p
)
1384 struct se_device
*dev
= container_of(p
, struct se_device
, rcu_head
);
1385 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1387 kfree(udev
->uio_info
.name
);
1392 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd
*cmd
)
1394 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
)) {
1395 kmem_cache_free(tcmu_cmd_cache
, cmd
);
1401 static void tcmu_blocks_release(struct tcmu_dev
*udev
)
1406 /* Try to release all block pages */
1407 mutex_lock(&udev
->cmdr_lock
);
1408 for (i
= 0; i
<= udev
->dbi_max
; i
++) {
1409 page
= radix_tree_delete(&udev
->data_blocks
, i
);
1412 atomic_dec(&global_db_count
);
1415 mutex_unlock(&udev
->cmdr_lock
);
1418 static void tcmu_dev_kref_release(struct kref
*kref
)
1420 struct tcmu_dev
*udev
= container_of(kref
, struct tcmu_dev
, kref
);
1421 struct se_device
*dev
= &udev
->se_dev
;
1422 struct tcmu_cmd
*cmd
;
1423 bool all_expired
= true;
1426 vfree(udev
->mb_addr
);
1427 udev
->mb_addr
= NULL
;
1429 /* Upper layer should drain all requests before calling this */
1430 spin_lock_irq(&udev
->commands_lock
);
1431 idr_for_each_entry(&udev
->commands
, cmd
, i
) {
1432 if (tcmu_check_and_free_pending_cmd(cmd
) != 0)
1433 all_expired
= false;
1435 idr_destroy(&udev
->commands
);
1436 spin_unlock_irq(&udev
->commands_lock
);
1437 WARN_ON(!all_expired
);
1439 tcmu_blocks_release(udev
);
1441 call_rcu(&dev
->rcu_head
, tcmu_dev_call_rcu
);
1444 static int tcmu_release(struct uio_info
*info
, struct inode
*inode
)
1446 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
1448 clear_bit(TCMU_DEV_BIT_OPEN
, &udev
->flags
);
1450 pr_debug("close\n");
1451 /* release ref from open */
1452 kref_put(&udev
->kref
, tcmu_dev_kref_release
);
1456 static int tcmu_init_genl_cmd_reply(struct tcmu_dev
*udev
, int cmd
)
1458 struct tcmu_nl_cmd
*nl_cmd
= &udev
->curr_nl_cmd
;
1460 if (!tcmu_kern_cmd_reply_supported
)
1463 if (udev
->nl_reply_supported
<= 0)
1466 mutex_lock(&tcmu_nl_cmd_mutex
);
1468 if (tcmu_netlink_blocked
) {
1469 mutex_unlock(&tcmu_nl_cmd_mutex
);
1470 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd
,
1475 if (nl_cmd
->cmd
!= TCMU_CMD_UNSPEC
) {
1476 mutex_unlock(&tcmu_nl_cmd_mutex
);
1477 pr_warn("netlink cmd %d already executing on %s\n",
1478 nl_cmd
->cmd
, udev
->name
);
1482 memset(nl_cmd
, 0, sizeof(*nl_cmd
));
1484 nl_cmd
->udev
= udev
;
1485 init_completion(&nl_cmd
->complete
);
1486 INIT_LIST_HEAD(&nl_cmd
->nl_list
);
1488 list_add_tail(&nl_cmd
->nl_list
, &tcmu_nl_cmd_list
);
1490 mutex_unlock(&tcmu_nl_cmd_mutex
);
1494 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev
*udev
)
1496 struct tcmu_nl_cmd
*nl_cmd
= &udev
->curr_nl_cmd
;
1499 if (!tcmu_kern_cmd_reply_supported
)
1502 if (udev
->nl_reply_supported
<= 0)
1505 pr_debug("sleeping for nl reply\n");
1506 wait_for_completion(&nl_cmd
->complete
);
1508 mutex_lock(&tcmu_nl_cmd_mutex
);
1509 nl_cmd
->cmd
= TCMU_CMD_UNSPEC
;
1510 ret
= nl_cmd
->status
;
1511 mutex_unlock(&tcmu_nl_cmd_mutex
);
1516 static int tcmu_netlink_event(struct tcmu_dev
*udev
, enum tcmu_genl_cmd cmd
,
1517 int reconfig_attr
, const void *reconfig_data
)
1519 struct sk_buff
*skb
;
1523 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
1527 msg_header
= genlmsg_put(skb
, 0, 0, &tcmu_genl_family
, 0, cmd
);
1531 ret
= nla_put_string(skb
, TCMU_ATTR_DEVICE
, udev
->uio_info
.name
);
1535 ret
= nla_put_u32(skb
, TCMU_ATTR_MINOR
, udev
->uio_info
.uio_dev
->minor
);
1539 ret
= nla_put_u32(skb
, TCMU_ATTR_DEVICE_ID
, udev
->se_dev
.dev_index
);
1543 if (cmd
== TCMU_CMD_RECONFIG_DEVICE
) {
1544 switch (reconfig_attr
) {
1545 case TCMU_ATTR_DEV_CFG
:
1546 ret
= nla_put_string(skb
, reconfig_attr
, reconfig_data
);
1548 case TCMU_ATTR_DEV_SIZE
:
1549 ret
= nla_put_u64_64bit(skb
, reconfig_attr
,
1550 *((u64
*)reconfig_data
),
1553 case TCMU_ATTR_WRITECACHE
:
1554 ret
= nla_put_u8(skb
, reconfig_attr
,
1555 *((u8
*)reconfig_data
));
1565 genlmsg_end(skb
, msg_header
);
1567 ret
= tcmu_init_genl_cmd_reply(udev
, cmd
);
1573 ret
= genlmsg_multicast_allns(&tcmu_genl_family
, skb
, 0,
1574 TCMU_MCGRP_CONFIG
, GFP_KERNEL
);
1575 /* We don't care if no one is listening */
1579 ret
= tcmu_wait_genl_cmd_reply(udev
);
1587 static int tcmu_update_uio_info(struct tcmu_dev
*udev
)
1589 struct tcmu_hba
*hba
= udev
->hba
->hba_ptr
;
1590 struct uio_info
*info
;
1594 info
= &udev
->uio_info
;
1595 size
= snprintf(NULL
, 0, "tcm-user/%u/%s/%s", hba
->host_id
, udev
->name
,
1597 size
+= 1; /* for \0 */
1598 str
= kmalloc(size
, GFP_KERNEL
);
1602 used
= snprintf(str
, size
, "tcm-user/%u/%s", hba
->host_id
, udev
->name
);
1603 if (udev
->dev_config
[0])
1604 snprintf(str
+ used
, size
- used
, "/%s", udev
->dev_config
);
1606 /* If the old string exists, free it */
1613 static int tcmu_configure_device(struct se_device
*dev
)
1615 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1616 struct uio_info
*info
;
1617 struct tcmu_mailbox
*mb
;
1620 ret
= tcmu_update_uio_info(udev
);
1624 info
= &udev
->uio_info
;
1626 udev
->mb_addr
= vzalloc(CMDR_SIZE
);
1627 if (!udev
->mb_addr
) {
1632 /* mailbox fits in first part of CMDR space */
1633 udev
->cmdr_size
= CMDR_SIZE
- CMDR_OFF
;
1634 udev
->data_off
= CMDR_SIZE
;
1635 udev
->data_size
= DATA_SIZE
;
1636 udev
->dbi_thresh
= 0; /* Default in Idle state */
1637 udev
->waiting_global
= false;
1639 /* Initialise the mailbox of the ring buffer */
1641 mb
->version
= TCMU_MAILBOX_VERSION
;
1642 mb
->flags
= TCMU_MAILBOX_FLAG_CAP_OOOC
;
1643 mb
->cmdr_off
= CMDR_OFF
;
1644 mb
->cmdr_size
= udev
->cmdr_size
;
1646 WARN_ON(!PAGE_ALIGNED(udev
->data_off
));
1647 WARN_ON(udev
->data_size
% PAGE_SIZE
);
1648 WARN_ON(udev
->data_size
% DATA_BLOCK_SIZE
);
1650 info
->version
= __stringify(TCMU_MAILBOX_VERSION
);
1652 info
->mem
[0].name
= "tcm-user command & data buffer";
1653 info
->mem
[0].addr
= (phys_addr_t
)(uintptr_t)udev
->mb_addr
;
1654 info
->mem
[0].size
= TCMU_RING_SIZE
;
1655 info
->mem
[0].memtype
= UIO_MEM_NONE
;
1657 info
->irqcontrol
= tcmu_irqcontrol
;
1658 info
->irq
= UIO_IRQ_CUSTOM
;
1660 info
->mmap
= tcmu_mmap
;
1661 info
->open
= tcmu_open
;
1662 info
->release
= tcmu_release
;
1664 ret
= uio_register_device(tcmu_root_device
, info
);
1668 /* User can set hw_block_size before enable the device */
1669 if (dev
->dev_attrib
.hw_block_size
== 0)
1670 dev
->dev_attrib
.hw_block_size
= 512;
1671 /* Other attributes can be configured in userspace */
1672 if (!dev
->dev_attrib
.hw_max_sectors
)
1673 dev
->dev_attrib
.hw_max_sectors
= 128;
1674 if (!dev
->dev_attrib
.emulate_write_cache
)
1675 dev
->dev_attrib
.emulate_write_cache
= 0;
1676 dev
->dev_attrib
.hw_queue_depth
= 128;
1678 /* If user didn't explicitly disable netlink reply support, use
1679 * module scope setting.
1681 if (udev
->nl_reply_supported
>= 0)
1682 udev
->nl_reply_supported
= tcmu_kern_cmd_reply_supported
;
1685 * Get a ref incase userspace does a close on the uio device before
1686 * LIO has initiated tcmu_free_device.
1688 kref_get(&udev
->kref
);
1690 ret
= tcmu_netlink_event(udev
, TCMU_CMD_ADDED_DEVICE
, 0, NULL
);
1694 mutex_lock(&root_udev_mutex
);
1695 list_add(&udev
->node
, &root_udev
);
1696 mutex_unlock(&root_udev_mutex
);
1701 kref_put(&udev
->kref
, tcmu_dev_kref_release
);
1702 uio_unregister_device(&udev
->uio_info
);
1704 vfree(udev
->mb_addr
);
1705 udev
->mb_addr
= NULL
;
1713 static bool tcmu_dev_configured(struct tcmu_dev
*udev
)
1715 return udev
->uio_info
.uio_dev
? true : false;
1718 static void tcmu_free_device(struct se_device
*dev
)
1720 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1722 /* release ref from init */
1723 kref_put(&udev
->kref
, tcmu_dev_kref_release
);
1726 static void tcmu_destroy_device(struct se_device
*dev
)
1728 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1730 del_timer_sync(&udev
->timeout
);
1732 mutex_lock(&root_udev_mutex
);
1733 list_del(&udev
->node
);
1734 mutex_unlock(&root_udev_mutex
);
1736 tcmu_netlink_event(udev
, TCMU_CMD_REMOVED_DEVICE
, 0, NULL
);
1738 uio_unregister_device(&udev
->uio_info
);
1740 /* release ref from configure */
1741 kref_put(&udev
->kref
, tcmu_dev_kref_release
);
1745 Opt_dev_config
, Opt_dev_size
, Opt_hw_block_size
, Opt_hw_max_sectors
,
1746 Opt_nl_reply_supported
, Opt_err
,
1749 static match_table_t tokens
= {
1750 {Opt_dev_config
, "dev_config=%s"},
1751 {Opt_dev_size
, "dev_size=%u"},
1752 {Opt_hw_block_size
, "hw_block_size=%u"},
1753 {Opt_hw_max_sectors
, "hw_max_sectors=%u"},
1754 {Opt_nl_reply_supported
, "nl_reply_supported=%d"},
1758 static int tcmu_set_dev_attrib(substring_t
*arg
, u32
*dev_attrib
)
1760 unsigned long tmp_ul
;
1764 arg_p
= match_strdup(arg
);
1768 ret
= kstrtoul(arg_p
, 0, &tmp_ul
);
1771 pr_err("kstrtoul() failed for dev attrib\n");
1775 pr_err("dev attrib must be nonzero\n");
1778 *dev_attrib
= tmp_ul
;
1782 static ssize_t
tcmu_set_configfs_dev_params(struct se_device
*dev
,
1783 const char *page
, ssize_t count
)
1785 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1786 char *orig
, *ptr
, *opts
, *arg_p
;
1787 substring_t args
[MAX_OPT_ARGS
];
1790 opts
= kstrdup(page
, GFP_KERNEL
);
1796 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
1800 token
= match_token(ptr
, tokens
, args
);
1802 case Opt_dev_config
:
1803 if (match_strlcpy(udev
->dev_config
, &args
[0],
1804 TCMU_CONFIG_LEN
) == 0) {
1808 pr_debug("TCMU: Referencing Path: %s\n", udev
->dev_config
);
1811 arg_p
= match_strdup(&args
[0]);
1816 ret
= kstrtoul(arg_p
, 0, (unsigned long *) &udev
->dev_size
);
1819 pr_err("kstrtoul() failed for dev_size=\n");
1821 case Opt_hw_block_size
:
1822 ret
= tcmu_set_dev_attrib(&args
[0],
1823 &(dev
->dev_attrib
.hw_block_size
));
1825 case Opt_hw_max_sectors
:
1826 ret
= tcmu_set_dev_attrib(&args
[0],
1827 &(dev
->dev_attrib
.hw_max_sectors
));
1829 case Opt_nl_reply_supported
:
1830 arg_p
= match_strdup(&args
[0]);
1835 ret
= kstrtoint(arg_p
, 0, &udev
->nl_reply_supported
);
1838 pr_err("kstrtoint() failed for nl_reply_supported=\n");
1849 return (!ret
) ? count
: ret
;
1852 static ssize_t
tcmu_show_configfs_dev_params(struct se_device
*dev
, char *b
)
1854 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1857 bl
= sprintf(b
+ bl
, "Config: %s ",
1858 udev
->dev_config
[0] ? udev
->dev_config
: "NULL");
1859 bl
+= sprintf(b
+ bl
, "Size: %zu\n", udev
->dev_size
);
1864 static sector_t
tcmu_get_blocks(struct se_device
*dev
)
1866 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1868 return div_u64(udev
->dev_size
- dev
->dev_attrib
.block_size
,
1869 dev
->dev_attrib
.block_size
);
1872 static sense_reason_t
1873 tcmu_parse_cdb(struct se_cmd
*cmd
)
1875 return passthrough_parse_cdb(cmd
, tcmu_queue_cmd
);
1878 static ssize_t
tcmu_cmd_time_out_show(struct config_item
*item
, char *page
)
1880 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
1881 struct se_dev_attrib
, da_group
);
1882 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
1884 return snprintf(page
, PAGE_SIZE
, "%lu\n", udev
->cmd_time_out
/ MSEC_PER_SEC
);
1887 static ssize_t
tcmu_cmd_time_out_store(struct config_item
*item
, const char *page
,
1890 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
1891 struct se_dev_attrib
, da_group
);
1892 struct tcmu_dev
*udev
= container_of(da
->da_dev
,
1893 struct tcmu_dev
, se_dev
);
1897 if (da
->da_dev
->export_count
) {
1898 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
1902 ret
= kstrtou32(page
, 0, &val
);
1906 udev
->cmd_time_out
= val
* MSEC_PER_SEC
;
1909 CONFIGFS_ATTR(tcmu_
, cmd_time_out
);
1911 static ssize_t
tcmu_dev_config_show(struct config_item
*item
, char *page
)
1913 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
1914 struct se_dev_attrib
, da_group
);
1915 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
1917 return snprintf(page
, PAGE_SIZE
, "%s\n", udev
->dev_config
);
1920 static ssize_t
tcmu_dev_config_store(struct config_item
*item
, const char *page
,
1923 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
1924 struct se_dev_attrib
, da_group
);
1925 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
1929 if (!len
|| len
> TCMU_CONFIG_LEN
- 1)
1932 /* Check if device has been configured before */
1933 if (tcmu_dev_configured(udev
)) {
1934 ret
= tcmu_netlink_event(udev
, TCMU_CMD_RECONFIG_DEVICE
,
1935 TCMU_ATTR_DEV_CFG
, page
);
1937 pr_err("Unable to reconfigure device\n");
1940 strlcpy(udev
->dev_config
, page
, TCMU_CONFIG_LEN
);
1942 ret
= tcmu_update_uio_info(udev
);
1947 strlcpy(udev
->dev_config
, page
, TCMU_CONFIG_LEN
);
1951 CONFIGFS_ATTR(tcmu_
, dev_config
);
1953 static ssize_t
tcmu_dev_size_show(struct config_item
*item
, char *page
)
1955 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
1956 struct se_dev_attrib
, da_group
);
1957 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
1959 return snprintf(page
, PAGE_SIZE
, "%zu\n", udev
->dev_size
);
1962 static ssize_t
tcmu_dev_size_store(struct config_item
*item
, const char *page
,
1965 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
1966 struct se_dev_attrib
, da_group
);
1967 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
1971 ret
= kstrtou64(page
, 0, &val
);
1975 /* Check if device has been configured before */
1976 if (tcmu_dev_configured(udev
)) {
1977 ret
= tcmu_netlink_event(udev
, TCMU_CMD_RECONFIG_DEVICE
,
1978 TCMU_ATTR_DEV_SIZE
, &val
);
1980 pr_err("Unable to reconfigure device\n");
1984 udev
->dev_size
= val
;
1987 CONFIGFS_ATTR(tcmu_
, dev_size
);
1989 static ssize_t
tcmu_nl_reply_supported_show(struct config_item
*item
,
1992 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
1993 struct se_dev_attrib
, da_group
);
1994 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
1996 return snprintf(page
, PAGE_SIZE
, "%d\n", udev
->nl_reply_supported
);
1999 static ssize_t
tcmu_nl_reply_supported_store(struct config_item
*item
,
2000 const char *page
, size_t count
)
2002 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2003 struct se_dev_attrib
, da_group
);
2004 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2008 ret
= kstrtos8(page
, 0, &val
);
2012 udev
->nl_reply_supported
= val
;
2015 CONFIGFS_ATTR(tcmu_
, nl_reply_supported
);
2017 static ssize_t
tcmu_emulate_write_cache_show(struct config_item
*item
,
2020 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2021 struct se_dev_attrib
, da_group
);
2023 return snprintf(page
, PAGE_SIZE
, "%i\n", da
->emulate_write_cache
);
2026 static ssize_t
tcmu_emulate_write_cache_store(struct config_item
*item
,
2027 const char *page
, size_t count
)
2029 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2030 struct se_dev_attrib
, da_group
);
2031 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2035 ret
= kstrtou8(page
, 0, &val
);
2039 /* Check if device has been configured before */
2040 if (tcmu_dev_configured(udev
)) {
2041 ret
= tcmu_netlink_event(udev
, TCMU_CMD_RECONFIG_DEVICE
,
2042 TCMU_ATTR_WRITECACHE
, &val
);
2044 pr_err("Unable to reconfigure device\n");
2049 da
->emulate_write_cache
= val
;
2052 CONFIGFS_ATTR(tcmu_
, emulate_write_cache
);
2054 static struct configfs_attribute
*tcmu_attrib_attrs
[] = {
2055 &tcmu_attr_cmd_time_out
,
2056 &tcmu_attr_dev_config
,
2057 &tcmu_attr_dev_size
,
2058 &tcmu_attr_emulate_write_cache
,
2059 &tcmu_attr_nl_reply_supported
,
2063 static struct configfs_attribute
**tcmu_attrs
;
2065 static struct target_backend_ops tcmu_ops
= {
2067 .owner
= THIS_MODULE
,
2068 .transport_flags
= TRANSPORT_FLAG_PASSTHROUGH
,
2069 .attach_hba
= tcmu_attach_hba
,
2070 .detach_hba
= tcmu_detach_hba
,
2071 .alloc_device
= tcmu_alloc_device
,
2072 .configure_device
= tcmu_configure_device
,
2073 .destroy_device
= tcmu_destroy_device
,
2074 .free_device
= tcmu_free_device
,
2075 .parse_cdb
= tcmu_parse_cdb
,
2076 .set_configfs_dev_params
= tcmu_set_configfs_dev_params
,
2077 .show_configfs_dev_params
= tcmu_show_configfs_dev_params
,
2078 .get_device_type
= sbc_get_device_type
,
2079 .get_blocks
= tcmu_get_blocks
,
2080 .tb_dev_attrib_attrs
= NULL
,
2083 static int unmap_thread_fn(void *data
)
2085 struct tcmu_dev
*udev
;
2087 uint32_t start
, end
, block
;
2091 while (!kthread_should_stop()) {
2092 DEFINE_WAIT(__wait
);
2094 prepare_to_wait(&unmap_wait
, &__wait
, TASK_INTERRUPTIBLE
);
2096 finish_wait(&unmap_wait
, &__wait
);
2098 if (kthread_should_stop())
2101 mutex_lock(&root_udev_mutex
);
2102 list_for_each_entry(udev
, &root_udev
, node
) {
2103 mutex_lock(&udev
->cmdr_lock
);
2105 /* Try to complete the finished commands first */
2106 tcmu_handle_completions(udev
);
2108 /* Skip the udevs waiting the global pool or in idle */
2109 if (udev
->waiting_global
|| !udev
->dbi_thresh
) {
2110 mutex_unlock(&udev
->cmdr_lock
);
2114 end
= udev
->dbi_max
+ 1;
2115 block
= find_last_bit(udev
->data_bitmap
, end
);
2116 if (block
== udev
->dbi_max
) {
2118 * The last bit is dbi_max, so there is
2119 * no need to shrink any blocks.
2121 mutex_unlock(&udev
->cmdr_lock
);
2123 } else if (block
== end
) {
2124 /* The current udev will goto idle state */
2125 udev
->dbi_thresh
= start
= 0;
2128 udev
->dbi_thresh
= start
= block
+ 1;
2129 udev
->dbi_max
= block
;
2132 /* Here will truncate the data area from off */
2133 off
= udev
->data_off
+ start
* DATA_BLOCK_SIZE
;
2134 unmap_mapping_range(udev
->inode
->i_mapping
, off
, 0, 1);
2136 /* Release the block pages */
2137 for (i
= start
; i
< end
; i
++) {
2138 page
= radix_tree_delete(&udev
->data_blocks
, i
);
2141 atomic_dec(&global_db_count
);
2144 mutex_unlock(&udev
->cmdr_lock
);
2148 * Try to wake up the udevs who are waiting
2149 * for the global data pool.
2151 list_for_each_entry(udev
, &root_udev
, node
) {
2152 if (udev
->waiting_global
)
2153 wake_up(&udev
->wait_cmdr
);
2155 mutex_unlock(&root_udev_mutex
);
2161 static int __init
tcmu_module_init(void)
2163 int ret
, i
, k
, len
= 0;
2165 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry
) % TCMU_OP_ALIGN_SIZE
) != 0);
2167 tcmu_cmd_cache
= kmem_cache_create("tcmu_cmd_cache",
2168 sizeof(struct tcmu_cmd
),
2169 __alignof__(struct tcmu_cmd
),
2171 if (!tcmu_cmd_cache
)
2174 tcmu_root_device
= root_device_register("tcm_user");
2175 if (IS_ERR(tcmu_root_device
)) {
2176 ret
= PTR_ERR(tcmu_root_device
);
2177 goto out_free_cache
;
2180 ret
= genl_register_family(&tcmu_genl_family
);
2182 goto out_unreg_device
;
2185 for (i
= 0; passthrough_attrib_attrs
[i
] != NULL
; i
++) {
2186 len
+= sizeof(struct configfs_attribute
*);
2188 for (i
= 0; tcmu_attrib_attrs
[i
] != NULL
; i
++) {
2189 len
+= sizeof(struct configfs_attribute
*);
2191 len
+= sizeof(struct configfs_attribute
*);
2193 tcmu_attrs
= kzalloc(len
, GFP_KERNEL
);
2196 goto out_unreg_genl
;
2199 for (i
= 0; passthrough_attrib_attrs
[i
] != NULL
; i
++) {
2200 tcmu_attrs
[i
] = passthrough_attrib_attrs
[i
];
2202 for (k
= 0; tcmu_attrib_attrs
[k
] != NULL
; k
++) {
2203 tcmu_attrs
[i
] = tcmu_attrib_attrs
[k
];
2206 tcmu_ops
.tb_dev_attrib_attrs
= tcmu_attrs
;
2208 ret
= transport_backend_register(&tcmu_ops
);
2212 init_waitqueue_head(&unmap_wait
);
2213 unmap_thread
= kthread_run(unmap_thread_fn
, NULL
, "tcmu_unmap");
2214 if (IS_ERR(unmap_thread
)) {
2215 ret
= PTR_ERR(unmap_thread
);
2216 goto out_unreg_transport
;
2221 out_unreg_transport
:
2222 target_backend_unregister(&tcmu_ops
);
2226 genl_unregister_family(&tcmu_genl_family
);
2228 root_device_unregister(tcmu_root_device
);
2230 kmem_cache_destroy(tcmu_cmd_cache
);
2235 static void __exit
tcmu_module_exit(void)
2237 kthread_stop(unmap_thread
);
2238 target_backend_unregister(&tcmu_ops
);
2240 genl_unregister_family(&tcmu_genl_family
);
2241 root_device_unregister(tcmu_root_device
);
2242 kmem_cache_destroy(tcmu_cmd_cache
);
2245 MODULE_DESCRIPTION("TCM USER subsystem plugin");
2246 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2247 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2248 MODULE_LICENSE("GPL");
2250 module_init(tcmu_module_init
);
2251 module_exit(tcmu_module_exit
);