2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 * Copyright (C) 2015 Arrikto, Inc.
5 * Copyright (C) 2017 Chinamobile, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/spinlock.h>
22 #include <linux/module.h>
23 #include <linux/idr.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/parser.h>
27 #include <linux/vmalloc.h>
28 #include <linux/uio_driver.h>
29 #include <linux/radix-tree.h>
30 #include <linux/stringify.h>
31 #include <linux/bitops.h>
32 #include <linux/highmem.h>
33 #include <linux/configfs.h>
34 #include <linux/mutex.h>
35 #include <linux/workqueue.h>
36 #include <net/genetlink.h>
37 #include <scsi/scsi_common.h>
38 #include <scsi/scsi_proto.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41 #include <target/target_core_backend.h>
43 #include <linux/target_core_user.h>
50 * Define a shared-memory interface for LIO to pass SCSI commands and
51 * data to userspace for processing. This is to allow backends that
52 * are too complex for in-kernel support to be possible.
54 * It uses the UIO framework to do a lot of the device-creation and
55 * introspection work for us.
57 * See the .h file for how the ring is laid out. Note that while the
58 * command ring is defined, the particulars of the data area are
59 * not. Offset values in the command entry point to other locations
60 * internal to the mmap-ed area. There is separate space outside the
61 * command ring for data buffers. This leaves maximum flexibility for
62 * moving buffer allocations, or even page flipping or other
63 * allocation techniques, without altering the command ring layout.
66 * The user process must be assumed to be malicious. There's no way to
67 * prevent it breaking the command ring protocol if it wants, but in
68 * order to prevent other issues we must only ever read *data* from
69 * the shared memory area, not offsets or sizes. This applies to
70 * command ring entries as well as the mailbox. Extra code needed for
71 * this may have a 'UAM' comment.
74 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
76 /* For cmd area, the size is fixed 8MB */
77 #define CMDR_SIZE (8 * 1024 * 1024)
80 * For data area, the block size is PAGE_SIZE and
81 * the total size is 256K * PAGE_SIZE.
83 #define DATA_BLOCK_SIZE PAGE_SIZE
84 #define DATA_BLOCK_SHIFT PAGE_SHIFT
85 #define DATA_BLOCK_BITS_DEF (256 * 1024)
87 #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
88 #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
91 * Default number of global data blocks(512K * PAGE_SIZE)
92 * when the unmap thread will be started.
94 #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
96 static u8 tcmu_kern_cmd_reply_supported
;
98 static struct device
*tcmu_root_device
;
104 #define TCMU_CONFIG_LEN 256
106 static DEFINE_MUTEX(tcmu_nl_cmd_mutex
);
107 static LIST_HEAD(tcmu_nl_cmd_list
);
112 /* wake up thread waiting for reply */
113 struct completion complete
;
114 struct list_head nl_list
;
115 struct tcmu_dev
*udev
;
121 struct list_head node
;
124 struct se_device se_dev
;
129 #define TCMU_DEV_BIT_OPEN 0
130 #define TCMU_DEV_BIT_BROKEN 1
131 #define TCMU_DEV_BIT_BLOCKED 2
134 struct uio_info uio_info
;
138 struct tcmu_mailbox
*mb_addr
;
141 u32 cmdr_last_cleaned
;
142 /* Offset of data area from start of mb */
143 /* Must add data_off and mb_addr to get the address */
149 struct mutex cmdr_lock
;
150 struct list_head cmdr_queue
;
154 unsigned long *data_bitmap
;
155 struct radix_tree_root data_blocks
;
159 struct timer_list cmd_timer
;
160 unsigned int cmd_time_out
;
162 struct timer_list qfull_timer
;
165 struct list_head timedout_entry
;
167 struct tcmu_nl_cmd curr_nl_cmd
;
169 char dev_config
[TCMU_CONFIG_LEN
];
171 int nl_reply_supported
;
174 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
176 #define CMDR_OFF sizeof(struct tcmu_mailbox)
179 struct se_cmd
*se_cmd
;
180 struct tcmu_dev
*tcmu_dev
;
181 struct list_head cmdr_queue_entry
;
185 /* Can't use se_cmd when cleaning up expired cmds, because if
186 cmd has been completed then accessing se_cmd is off limits */
191 unsigned long deadline
;
193 #define TCMU_CMD_BIT_EXPIRED 0
197 * To avoid dead lock the mutex lock order should always be:
199 * mutex_lock(&root_udev_mutex);
201 * mutex_lock(&tcmu_dev->cmdr_lock);
202 * mutex_unlock(&tcmu_dev->cmdr_lock);
204 * mutex_unlock(&root_udev_mutex);
206 static DEFINE_MUTEX(root_udev_mutex
);
207 static LIST_HEAD(root_udev
);
209 static DEFINE_SPINLOCK(timed_out_udevs_lock
);
210 static LIST_HEAD(timed_out_udevs
);
212 static struct kmem_cache
*tcmu_cmd_cache
;
214 static atomic_t global_db_count
= ATOMIC_INIT(0);
215 static struct delayed_work tcmu_unmap_work
;
216 static int tcmu_global_max_blocks
= TCMU_GLOBAL_MAX_BLOCKS_DEF
;
218 static int tcmu_set_global_max_data_area(const char *str
,
219 const struct kernel_param
*kp
)
221 int ret
, max_area_mb
;
223 ret
= kstrtoint(str
, 10, &max_area_mb
);
227 if (max_area_mb
<= 0) {
228 pr_err("global_max_data_area must be larger than 0.\n");
232 tcmu_global_max_blocks
= TCMU_MBS_TO_BLOCKS(max_area_mb
);
233 if (atomic_read(&global_db_count
) > tcmu_global_max_blocks
)
234 schedule_delayed_work(&tcmu_unmap_work
, 0);
236 cancel_delayed_work_sync(&tcmu_unmap_work
);
241 static int tcmu_get_global_max_data_area(char *buffer
,
242 const struct kernel_param
*kp
)
244 return sprintf(buffer
, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks
));
247 static const struct kernel_param_ops tcmu_global_max_data_area_op
= {
248 .set
= tcmu_set_global_max_data_area
,
249 .get
= tcmu_get_global_max_data_area
,
252 module_param_cb(global_max_data_area_mb
, &tcmu_global_max_data_area_op
, NULL
,
254 MODULE_PARM_DESC(global_max_data_area_mb
,
255 "Max MBs allowed to be allocated to all the tcmu device's "
258 /* multicast group */
259 enum tcmu_multicast_groups
{
263 static const struct genl_multicast_group tcmu_mcgrps
[] = {
264 [TCMU_MCGRP_CONFIG
] = { .name
= "config", },
267 static struct nla_policy tcmu_attr_policy
[TCMU_ATTR_MAX
+1] = {
268 [TCMU_ATTR_DEVICE
] = { .type
= NLA_STRING
},
269 [TCMU_ATTR_MINOR
] = { .type
= NLA_U32
},
270 [TCMU_ATTR_CMD_STATUS
] = { .type
= NLA_S32
},
271 [TCMU_ATTR_DEVICE_ID
] = { .type
= NLA_U32
},
272 [TCMU_ATTR_SUPP_KERN_CMD_REPLY
] = { .type
= NLA_U8
},
275 static int tcmu_genl_cmd_done(struct genl_info
*info
, int completed_cmd
)
277 struct tcmu_dev
*udev
= NULL
;
278 struct tcmu_nl_cmd
*nl_cmd
;
279 int dev_id
, rc
, ret
= 0;
281 if (!info
->attrs
[TCMU_ATTR_CMD_STATUS
] ||
282 !info
->attrs
[TCMU_ATTR_DEVICE_ID
]) {
283 printk(KERN_ERR
"TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
287 dev_id
= nla_get_u32(info
->attrs
[TCMU_ATTR_DEVICE_ID
]);
288 rc
= nla_get_s32(info
->attrs
[TCMU_ATTR_CMD_STATUS
]);
290 mutex_lock(&tcmu_nl_cmd_mutex
);
291 list_for_each_entry(nl_cmd
, &tcmu_nl_cmd_list
, nl_list
) {
292 if (nl_cmd
->udev
->se_dev
.dev_index
== dev_id
) {
299 pr_err(KERN_ERR
"tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
300 completed_cmd
, rc
, dev_id
);
304 list_del(&nl_cmd
->nl_list
);
306 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d\n",
307 udev
->name
, dev_id
, nl_cmd
->cmd
, completed_cmd
, rc
);
309 if (nl_cmd
->cmd
!= completed_cmd
) {
310 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
311 udev
->name
, completed_cmd
, nl_cmd
->cmd
);
317 complete(&nl_cmd
->complete
);
319 mutex_unlock(&tcmu_nl_cmd_mutex
);
323 static int tcmu_genl_rm_dev_done(struct sk_buff
*skb
, struct genl_info
*info
)
325 return tcmu_genl_cmd_done(info
, TCMU_CMD_REMOVED_DEVICE
);
328 static int tcmu_genl_add_dev_done(struct sk_buff
*skb
, struct genl_info
*info
)
330 return tcmu_genl_cmd_done(info
, TCMU_CMD_ADDED_DEVICE
);
333 static int tcmu_genl_reconfig_dev_done(struct sk_buff
*skb
,
334 struct genl_info
*info
)
336 return tcmu_genl_cmd_done(info
, TCMU_CMD_RECONFIG_DEVICE
);
339 static int tcmu_genl_set_features(struct sk_buff
*skb
, struct genl_info
*info
)
341 if (info
->attrs
[TCMU_ATTR_SUPP_KERN_CMD_REPLY
]) {
342 tcmu_kern_cmd_reply_supported
=
343 nla_get_u8(info
->attrs
[TCMU_ATTR_SUPP_KERN_CMD_REPLY
]);
344 printk(KERN_INFO
"tcmu daemon: command reply support %u.\n",
345 tcmu_kern_cmd_reply_supported
);
351 static const struct genl_ops tcmu_genl_ops
[] = {
353 .cmd
= TCMU_CMD_SET_FEATURES
,
354 .flags
= GENL_ADMIN_PERM
,
355 .policy
= tcmu_attr_policy
,
356 .doit
= tcmu_genl_set_features
,
359 .cmd
= TCMU_CMD_ADDED_DEVICE_DONE
,
360 .flags
= GENL_ADMIN_PERM
,
361 .policy
= tcmu_attr_policy
,
362 .doit
= tcmu_genl_add_dev_done
,
365 .cmd
= TCMU_CMD_REMOVED_DEVICE_DONE
,
366 .flags
= GENL_ADMIN_PERM
,
367 .policy
= tcmu_attr_policy
,
368 .doit
= tcmu_genl_rm_dev_done
,
371 .cmd
= TCMU_CMD_RECONFIG_DEVICE_DONE
,
372 .flags
= GENL_ADMIN_PERM
,
373 .policy
= tcmu_attr_policy
,
374 .doit
= tcmu_genl_reconfig_dev_done
,
378 /* Our generic netlink family */
379 static struct genl_family tcmu_genl_family __ro_after_init
= {
380 .module
= THIS_MODULE
,
384 .maxattr
= TCMU_ATTR_MAX
,
385 .mcgrps
= tcmu_mcgrps
,
386 .n_mcgrps
= ARRAY_SIZE(tcmu_mcgrps
),
388 .ops
= tcmu_genl_ops
,
389 .n_ops
= ARRAY_SIZE(tcmu_genl_ops
),
392 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
393 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
394 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
395 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
397 static void tcmu_cmd_free_data(struct tcmu_cmd
*tcmu_cmd
, uint32_t len
)
399 struct tcmu_dev
*udev
= tcmu_cmd
->tcmu_dev
;
402 for (i
= 0; i
< len
; i
++)
403 clear_bit(tcmu_cmd
->dbi
[i
], udev
->data_bitmap
);
406 static inline bool tcmu_get_empty_block(struct tcmu_dev
*udev
,
407 struct tcmu_cmd
*tcmu_cmd
)
412 dbi
= find_first_zero_bit(udev
->data_bitmap
, udev
->dbi_thresh
);
413 if (dbi
== udev
->dbi_thresh
)
416 page
= radix_tree_lookup(&udev
->data_blocks
, dbi
);
418 if (atomic_add_return(1, &global_db_count
) >
419 tcmu_global_max_blocks
)
420 schedule_delayed_work(&tcmu_unmap_work
, 0);
422 /* try to get new page from the mm */
423 page
= alloc_page(GFP_KERNEL
);
427 ret
= radix_tree_insert(&udev
->data_blocks
, dbi
, page
);
432 if (dbi
> udev
->dbi_max
)
435 set_bit(dbi
, udev
->data_bitmap
);
436 tcmu_cmd_set_dbi(tcmu_cmd
, dbi
);
442 atomic_dec(&global_db_count
);
446 static bool tcmu_get_empty_blocks(struct tcmu_dev
*udev
,
447 struct tcmu_cmd
*tcmu_cmd
)
451 for (i
= tcmu_cmd
->dbi_cur
; i
< tcmu_cmd
->dbi_cnt
; i
++) {
452 if (!tcmu_get_empty_block(udev
, tcmu_cmd
))
458 static inline struct page
*
459 tcmu_get_block_page(struct tcmu_dev
*udev
, uint32_t dbi
)
461 return radix_tree_lookup(&udev
->data_blocks
, dbi
);
464 static inline void tcmu_free_cmd(struct tcmu_cmd
*tcmu_cmd
)
466 kfree(tcmu_cmd
->dbi
);
467 kmem_cache_free(tcmu_cmd_cache
, tcmu_cmd
);
470 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd
*tcmu_cmd
)
472 struct se_cmd
*se_cmd
= tcmu_cmd
->se_cmd
;
473 size_t data_length
= round_up(se_cmd
->data_length
, DATA_BLOCK_SIZE
);
475 if (se_cmd
->se_cmd_flags
& SCF_BIDI
) {
476 BUG_ON(!(se_cmd
->t_bidi_data_sg
&& se_cmd
->t_bidi_data_nents
));
477 data_length
+= round_up(se_cmd
->t_bidi_data_sg
->length
,
484 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd
*tcmu_cmd
)
486 size_t data_length
= tcmu_cmd_get_data_length(tcmu_cmd
);
488 return data_length
/ DATA_BLOCK_SIZE
;
491 static struct tcmu_cmd
*tcmu_alloc_cmd(struct se_cmd
*se_cmd
)
493 struct se_device
*se_dev
= se_cmd
->se_dev
;
494 struct tcmu_dev
*udev
= TCMU_DEV(se_dev
);
495 struct tcmu_cmd
*tcmu_cmd
;
497 tcmu_cmd
= kmem_cache_zalloc(tcmu_cmd_cache
, GFP_KERNEL
);
501 INIT_LIST_HEAD(&tcmu_cmd
->cmdr_queue_entry
);
502 tcmu_cmd
->se_cmd
= se_cmd
;
503 tcmu_cmd
->tcmu_dev
= udev
;
505 tcmu_cmd_reset_dbi_cur(tcmu_cmd
);
506 tcmu_cmd
->dbi_cnt
= tcmu_cmd_get_block_cnt(tcmu_cmd
);
507 tcmu_cmd
->dbi
= kcalloc(tcmu_cmd
->dbi_cnt
, sizeof(uint32_t),
509 if (!tcmu_cmd
->dbi
) {
510 kmem_cache_free(tcmu_cmd_cache
, tcmu_cmd
);
517 static inline void tcmu_flush_dcache_range(void *vaddr
, size_t size
)
519 unsigned long offset
= offset_in_page(vaddr
);
520 void *start
= vaddr
- offset
;
522 size
= round_up(size
+offset
, PAGE_SIZE
);
525 flush_dcache_page(virt_to_page(start
));
532 * Some ring helper functions. We don't assume size is a power of 2 so
533 * we can't use circ_buf.h.
535 static inline size_t spc_used(size_t head
, size_t tail
, size_t size
)
537 int diff
= head
- tail
;
545 static inline size_t spc_free(size_t head
, size_t tail
, size_t size
)
547 /* Keep 1 byte unused or we can't tell full from empty */
548 return (size
- spc_used(head
, tail
, size
) - 1);
551 static inline size_t head_to_end(size_t head
, size_t size
)
556 static inline void new_iov(struct iovec
**iov
, int *iov_cnt
)
565 memset(iovec
, 0, sizeof(struct iovec
));
568 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
570 /* offset is relative to mb_addr */
571 static inline size_t get_block_offset_user(struct tcmu_dev
*dev
,
572 int dbi
, int remaining
)
574 return dev
->data_off
+ dbi
* DATA_BLOCK_SIZE
+
575 DATA_BLOCK_SIZE
- remaining
;
578 static inline size_t iov_tail(struct iovec
*iov
)
580 return (size_t)iov
->iov_base
+ iov
->iov_len
;
583 static void scatter_data_area(struct tcmu_dev
*udev
,
584 struct tcmu_cmd
*tcmu_cmd
, struct scatterlist
*data_sg
,
585 unsigned int data_nents
, struct iovec
**iov
,
586 int *iov_cnt
, bool copy_data
)
589 int block_remaining
= 0;
590 void *from
, *to
= NULL
;
591 size_t copy_bytes
, to_offset
, offset
;
592 struct scatterlist
*sg
;
595 for_each_sg(data_sg
, sg
, data_nents
, i
) {
596 int sg_remaining
= sg
->length
;
597 from
= kmap_atomic(sg_page(sg
)) + sg
->offset
;
598 while (sg_remaining
> 0) {
599 if (block_remaining
== 0) {
603 block_remaining
= DATA_BLOCK_SIZE
;
604 dbi
= tcmu_cmd_get_dbi(tcmu_cmd
);
605 page
= tcmu_get_block_page(udev
, dbi
);
606 to
= kmap_atomic(page
);
610 * Covert to virtual offset of the ring data area.
612 to_offset
= get_block_offset_user(udev
, dbi
,
616 * The following code will gather and map the blocks
617 * to the same iovec when the blocks are all next to
620 copy_bytes
= min_t(size_t, sg_remaining
,
623 to_offset
== iov_tail(*iov
)) {
625 * Will append to the current iovec, because
626 * the current block page is next to the
629 (*iov
)->iov_len
+= copy_bytes
;
632 * Will allocate a new iovec because we are
633 * first time here or the current block page
634 * is not next to the previous one.
636 new_iov(iov
, iov_cnt
);
637 (*iov
)->iov_base
= (void __user
*)to_offset
;
638 (*iov
)->iov_len
= copy_bytes
;
642 offset
= DATA_BLOCK_SIZE
- block_remaining
;
644 from
+ sg
->length
- sg_remaining
,
646 tcmu_flush_dcache_range(to
, copy_bytes
);
649 sg_remaining
-= copy_bytes
;
650 block_remaining
-= copy_bytes
;
652 kunmap_atomic(from
- sg
->offset
);
659 static void gather_data_area(struct tcmu_dev
*udev
, struct tcmu_cmd
*cmd
,
662 struct se_cmd
*se_cmd
= cmd
->se_cmd
;
664 int block_remaining
= 0;
665 void *from
= NULL
, *to
;
666 size_t copy_bytes
, offset
;
667 struct scatterlist
*sg
, *data_sg
;
669 unsigned int data_nents
;
673 data_sg
= se_cmd
->t_data_sg
;
674 data_nents
= se_cmd
->t_data_nents
;
678 * For bidi case, the first count blocks are for Data-Out
679 * buffer blocks, and before gathering the Data-In buffer
680 * the Data-Out buffer blocks should be discarded.
682 count
= DIV_ROUND_UP(se_cmd
->data_length
, DATA_BLOCK_SIZE
);
684 data_sg
= se_cmd
->t_bidi_data_sg
;
685 data_nents
= se_cmd
->t_bidi_data_nents
;
688 tcmu_cmd_set_dbi_cur(cmd
, count
);
690 for_each_sg(data_sg
, sg
, data_nents
, i
) {
691 int sg_remaining
= sg
->length
;
692 to
= kmap_atomic(sg_page(sg
)) + sg
->offset
;
693 while (sg_remaining
> 0) {
694 if (block_remaining
== 0) {
698 block_remaining
= DATA_BLOCK_SIZE
;
699 dbi
= tcmu_cmd_get_dbi(cmd
);
700 page
= tcmu_get_block_page(udev
, dbi
);
701 from
= kmap_atomic(page
);
703 copy_bytes
= min_t(size_t, sg_remaining
,
705 offset
= DATA_BLOCK_SIZE
- block_remaining
;
706 tcmu_flush_dcache_range(from
, copy_bytes
);
707 memcpy(to
+ sg
->length
- sg_remaining
, from
+ offset
,
710 sg_remaining
-= copy_bytes
;
711 block_remaining
-= copy_bytes
;
713 kunmap_atomic(to
- sg
->offset
);
719 static inline size_t spc_bitmap_free(unsigned long *bitmap
, uint32_t thresh
)
721 return thresh
- bitmap_weight(bitmap
, thresh
);
725 * We can't queue a command until we have space available on the cmd ring *and*
726 * space available on the data area.
728 * Called with ring lock held.
730 static bool is_ring_space_avail(struct tcmu_dev
*udev
, struct tcmu_cmd
*cmd
,
731 size_t cmd_size
, size_t data_needed
)
733 struct tcmu_mailbox
*mb
= udev
->mb_addr
;
734 uint32_t blocks_needed
= (data_needed
+ DATA_BLOCK_SIZE
- 1)
736 size_t space
, cmd_needed
;
739 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
741 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
744 * If cmd end-of-ring space is too small then we need space for a NOP plus
745 * original cmd - cmds are internally contiguous.
747 if (head_to_end(cmd_head
, udev
->cmdr_size
) >= cmd_size
)
748 cmd_needed
= cmd_size
;
750 cmd_needed
= cmd_size
+ head_to_end(cmd_head
, udev
->cmdr_size
);
752 space
= spc_free(cmd_head
, udev
->cmdr_last_cleaned
, udev
->cmdr_size
);
753 if (space
< cmd_needed
) {
754 pr_debug("no cmd space: %u %u %u\n", cmd_head
,
755 udev
->cmdr_last_cleaned
, udev
->cmdr_size
);
759 /* try to check and get the data blocks as needed */
760 space
= spc_bitmap_free(udev
->data_bitmap
, udev
->dbi_thresh
);
761 if ((space
* DATA_BLOCK_SIZE
) < data_needed
) {
762 unsigned long blocks_left
=
763 (udev
->max_blocks
- udev
->dbi_thresh
) + space
;
765 if (blocks_left
< blocks_needed
) {
766 pr_debug("no data space: only %lu available, but ask for %zu\n",
767 blocks_left
* DATA_BLOCK_SIZE
,
772 udev
->dbi_thresh
+= blocks_needed
;
773 if (udev
->dbi_thresh
> udev
->max_blocks
)
774 udev
->dbi_thresh
= udev
->max_blocks
;
777 return tcmu_get_empty_blocks(udev
, cmd
);
780 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt
)
782 return max(offsetof(struct tcmu_cmd_entry
, req
.iov
[iov_cnt
]),
783 sizeof(struct tcmu_cmd_entry
));
786 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd
*tcmu_cmd
,
787 size_t base_command_size
)
789 struct se_cmd
*se_cmd
= tcmu_cmd
->se_cmd
;
792 command_size
= base_command_size
+
793 round_up(scsi_command_size(se_cmd
->t_task_cdb
),
796 WARN_ON(command_size
& (TCMU_OP_ALIGN_SIZE
-1));
801 static int tcmu_setup_cmd_timer(struct tcmu_cmd
*tcmu_cmd
, unsigned int tmo
,
802 struct timer_list
*timer
)
804 struct tcmu_dev
*udev
= tcmu_cmd
->tcmu_dev
;
807 if (tcmu_cmd
->cmd_id
)
810 cmd_id
= idr_alloc(&udev
->commands
, tcmu_cmd
, 1, USHRT_MAX
, GFP_NOWAIT
);
812 pr_err("tcmu: Could not allocate cmd id.\n");
815 tcmu_cmd
->cmd_id
= cmd_id
;
817 pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd
->cmd_id
,
818 udev
->name
, tmo
/ MSEC_PER_SEC
);
824 tcmu_cmd
->deadline
= round_jiffies_up(jiffies
+ msecs_to_jiffies(tmo
));
825 mod_timer(timer
, tcmu_cmd
->deadline
);
829 static int add_to_cmdr_queue(struct tcmu_cmd
*tcmu_cmd
)
831 struct tcmu_dev
*udev
= tcmu_cmd
->tcmu_dev
;
836 * For backwards compat if qfull_time_out is not set use
837 * cmd_time_out and if that's not set use the default time out.
839 if (!udev
->qfull_time_out
)
841 else if (udev
->qfull_time_out
> 0)
842 tmo
= udev
->qfull_time_out
;
843 else if (udev
->cmd_time_out
)
844 tmo
= udev
->cmd_time_out
;
848 ret
= tcmu_setup_cmd_timer(tcmu_cmd
, tmo
, &udev
->qfull_timer
);
852 list_add_tail(&tcmu_cmd
->cmdr_queue_entry
, &udev
->cmdr_queue
);
853 pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
854 tcmu_cmd
->cmd_id
, udev
->name
);
859 * queue_cmd_ring - queue cmd to ring or internally
860 * @tcmu_cmd: cmd to queue
861 * @scsi_err: TCM error code if failure (-1) returned.
864 * -1 we cannot queue internally or to the ring.
866 * 1 internally queued to wait for ring memory to free.
868 static sense_reason_t
queue_cmd_ring(struct tcmu_cmd
*tcmu_cmd
, int *scsi_err
)
870 struct tcmu_dev
*udev
= tcmu_cmd
->tcmu_dev
;
871 struct se_cmd
*se_cmd
= tcmu_cmd
->se_cmd
;
872 size_t base_command_size
, command_size
;
873 struct tcmu_mailbox
*mb
;
874 struct tcmu_cmd_entry
*entry
;
879 bool copy_to_data_area
;
880 size_t data_length
= tcmu_cmd_get_data_length(tcmu_cmd
);
882 *scsi_err
= TCM_NO_SENSE
;
884 if (test_bit(TCMU_DEV_BIT_BLOCKED
, &udev
->flags
)) {
885 *scsi_err
= TCM_LUN_BUSY
;
889 if (test_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
)) {
890 *scsi_err
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
895 * Must be a certain minimum size for response sense info, but
896 * also may be larger if the iov array is large.
898 * We prepare as many iovs as possbile for potential uses here,
899 * because it's expensive to tell how many regions are freed in
900 * the bitmap & global data pool, as the size calculated here
901 * will only be used to do the checks.
903 * The size will be recalculated later as actually needed to save
906 base_command_size
= tcmu_cmd_get_base_cmd_size(tcmu_cmd
->dbi_cnt
);
907 command_size
= tcmu_cmd_get_cmd_size(tcmu_cmd
, base_command_size
);
909 if (!list_empty(&udev
->cmdr_queue
))
913 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
914 if ((command_size
> (udev
->cmdr_size
/ 2)) ||
915 data_length
> udev
->data_size
) {
916 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
917 "cmd ring/data area\n", command_size
, data_length
,
918 udev
->cmdr_size
, udev
->data_size
);
919 *scsi_err
= TCM_INVALID_CDB_FIELD
;
923 if (!is_ring_space_avail(udev
, tcmu_cmd
, command_size
, data_length
)) {
925 * Don't leave commands partially setup because the unmap
926 * thread might need the blocks to make forward progress.
928 tcmu_cmd_free_data(tcmu_cmd
, tcmu_cmd
->dbi_cur
);
929 tcmu_cmd_reset_dbi_cur(tcmu_cmd
);
933 /* Insert a PAD if end-of-ring space is too small */
934 if (head_to_end(cmd_head
, udev
->cmdr_size
) < command_size
) {
935 size_t pad_size
= head_to_end(cmd_head
, udev
->cmdr_size
);
937 entry
= (void *) mb
+ CMDR_OFF
+ cmd_head
;
938 tcmu_hdr_set_op(&entry
->hdr
.len_op
, TCMU_OP_PAD
);
939 tcmu_hdr_set_len(&entry
->hdr
.len_op
, pad_size
);
940 entry
->hdr
.cmd_id
= 0; /* not used for PAD */
941 entry
->hdr
.kflags
= 0;
942 entry
->hdr
.uflags
= 0;
943 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
945 UPDATE_HEAD(mb
->cmd_head
, pad_size
, udev
->cmdr_size
);
946 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
948 cmd_head
= mb
->cmd_head
% udev
->cmdr_size
; /* UAM */
949 WARN_ON(cmd_head
!= 0);
952 entry
= (void *) mb
+ CMDR_OFF
+ cmd_head
;
953 memset(entry
, 0, command_size
);
954 tcmu_hdr_set_op(&entry
->hdr
.len_op
, TCMU_OP_CMD
);
956 /* Handle allocating space from the data area */
957 tcmu_cmd_reset_dbi_cur(tcmu_cmd
);
958 iov
= &entry
->req
.iov
[0];
960 copy_to_data_area
= (se_cmd
->data_direction
== DMA_TO_DEVICE
961 || se_cmd
->se_cmd_flags
& SCF_BIDI
);
962 scatter_data_area(udev
, tcmu_cmd
, se_cmd
->t_data_sg
,
963 se_cmd
->t_data_nents
, &iov
, &iov_cnt
,
965 entry
->req
.iov_cnt
= iov_cnt
;
967 /* Handle BIDI commands */
969 if (se_cmd
->se_cmd_flags
& SCF_BIDI
) {
971 scatter_data_area(udev
, tcmu_cmd
, se_cmd
->t_bidi_data_sg
,
972 se_cmd
->t_bidi_data_nents
, &iov
, &iov_cnt
,
975 entry
->req
.iov_bidi_cnt
= iov_cnt
;
977 ret
= tcmu_setup_cmd_timer(tcmu_cmd
, udev
->cmd_time_out
,
980 tcmu_cmd_free_data(tcmu_cmd
, tcmu_cmd
->dbi_cnt
);
981 mutex_unlock(&udev
->cmdr_lock
);
983 *scsi_err
= TCM_OUT_OF_RESOURCES
;
986 entry
->hdr
.cmd_id
= tcmu_cmd
->cmd_id
;
989 * Recalaulate the command's base size and size according
990 * to the actual needs
992 base_command_size
= tcmu_cmd_get_base_cmd_size(entry
->req
.iov_cnt
+
993 entry
->req
.iov_bidi_cnt
);
994 command_size
= tcmu_cmd_get_cmd_size(tcmu_cmd
, base_command_size
);
996 tcmu_hdr_set_len(&entry
->hdr
.len_op
, command_size
);
998 /* All offsets relative to mb_addr, not start of entry! */
999 cdb_off
= CMDR_OFF
+ cmd_head
+ base_command_size
;
1000 memcpy((void *) mb
+ cdb_off
, se_cmd
->t_task_cdb
, scsi_command_size(se_cmd
->t_task_cdb
));
1001 entry
->req
.cdb_off
= cdb_off
;
1002 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
1004 UPDATE_HEAD(mb
->cmd_head
, command_size
, udev
->cmdr_size
);
1005 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
1007 /* TODO: only if FLUSH and FUA? */
1008 uio_event_notify(&udev
->uio_info
);
1013 if (add_to_cmdr_queue(tcmu_cmd
)) {
1014 *scsi_err
= TCM_OUT_OF_RESOURCES
;
1021 static sense_reason_t
1022 tcmu_queue_cmd(struct se_cmd
*se_cmd
)
1024 struct se_device
*se_dev
= se_cmd
->se_dev
;
1025 struct tcmu_dev
*udev
= TCMU_DEV(se_dev
);
1026 struct tcmu_cmd
*tcmu_cmd
;
1027 sense_reason_t scsi_ret
;
1030 tcmu_cmd
= tcmu_alloc_cmd(se_cmd
);
1032 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
1034 mutex_lock(&udev
->cmdr_lock
);
1035 ret
= queue_cmd_ring(tcmu_cmd
, &scsi_ret
);
1036 mutex_unlock(&udev
->cmdr_lock
);
1038 tcmu_free_cmd(tcmu_cmd
);
1042 static void tcmu_handle_completion(struct tcmu_cmd
*cmd
, struct tcmu_cmd_entry
*entry
)
1044 struct se_cmd
*se_cmd
= cmd
->se_cmd
;
1045 struct tcmu_dev
*udev
= cmd
->tcmu_dev
;
1048 * cmd has been completed already from timeout, just reclaim
1049 * data area space and free cmd
1051 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
))
1054 tcmu_cmd_reset_dbi_cur(cmd
);
1056 if (entry
->hdr
.uflags
& TCMU_UFLAG_UNKNOWN_OP
) {
1057 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1059 entry
->rsp
.scsi_status
= SAM_STAT_CHECK_CONDITION
;
1060 } else if (entry
->rsp
.scsi_status
== SAM_STAT_CHECK_CONDITION
) {
1061 transport_copy_sense_to_cmd(se_cmd
, entry
->rsp
.sense_buffer
);
1062 } else if (se_cmd
->se_cmd_flags
& SCF_BIDI
) {
1063 /* Get Data-In buffer before clean up */
1064 gather_data_area(udev
, cmd
, true);
1065 } else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
) {
1066 gather_data_area(udev
, cmd
, false);
1067 } else if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
1069 } else if (se_cmd
->data_direction
!= DMA_NONE
) {
1070 pr_warn("TCMU: data direction was %d!\n",
1071 se_cmd
->data_direction
);
1074 target_complete_cmd(cmd
->se_cmd
, entry
->rsp
.scsi_status
);
1078 tcmu_cmd_free_data(cmd
, cmd
->dbi_cnt
);
1082 static unsigned int tcmu_handle_completions(struct tcmu_dev
*udev
)
1084 struct tcmu_mailbox
*mb
;
1087 if (test_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
)) {
1088 pr_err("ring broken, not handling completions\n");
1093 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
1095 while (udev
->cmdr_last_cleaned
!= READ_ONCE(mb
->cmd_tail
)) {
1097 struct tcmu_cmd_entry
*entry
= (void *) mb
+ CMDR_OFF
+ udev
->cmdr_last_cleaned
;
1098 struct tcmu_cmd
*cmd
;
1100 tcmu_flush_dcache_range(entry
, sizeof(*entry
));
1102 if (tcmu_hdr_get_op(entry
->hdr
.len_op
) == TCMU_OP_PAD
) {
1103 UPDATE_HEAD(udev
->cmdr_last_cleaned
,
1104 tcmu_hdr_get_len(entry
->hdr
.len_op
),
1108 WARN_ON(tcmu_hdr_get_op(entry
->hdr
.len_op
) != TCMU_OP_CMD
);
1110 cmd
= idr_remove(&udev
->commands
, entry
->hdr
.cmd_id
);
1112 pr_err("cmd_id %u not found, ring is broken\n",
1114 set_bit(TCMU_DEV_BIT_BROKEN
, &udev
->flags
);
1118 tcmu_handle_completion(cmd
, entry
);
1120 UPDATE_HEAD(udev
->cmdr_last_cleaned
,
1121 tcmu_hdr_get_len(entry
->hdr
.len_op
),
1127 if (mb
->cmd_tail
== mb
->cmd_head
) {
1128 /* no more pending commands */
1129 del_timer(&udev
->cmd_timer
);
1131 if (list_empty(&udev
->cmdr_queue
)) {
1133 * no more pending or waiting commands so try to
1134 * reclaim blocks if needed.
1136 if (atomic_read(&global_db_count
) >
1137 tcmu_global_max_blocks
)
1138 schedule_delayed_work(&tcmu_unmap_work
, 0);
1145 static int tcmu_check_expired_cmd(int id
, void *p
, void *data
)
1147 struct tcmu_cmd
*cmd
= p
;
1148 struct tcmu_dev
*udev
= cmd
->tcmu_dev
;
1150 struct se_cmd
*se_cmd
;
1153 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
))
1156 if (!time_after(jiffies
, cmd
->deadline
))
1159 is_running
= list_empty(&cmd
->cmdr_queue_entry
);
1160 se_cmd
= cmd
->se_cmd
;
1164 * If cmd_time_out is disabled but qfull is set deadline
1165 * will only reflect the qfull timeout. Ignore it.
1167 if (!udev
->cmd_time_out
)
1170 set_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
);
1172 * target_complete_cmd will translate this to LUN COMM FAILURE
1174 scsi_status
= SAM_STAT_CHECK_CONDITION
;
1176 list_del_init(&cmd
->cmdr_queue_entry
);
1178 idr_remove(&udev
->commands
, id
);
1180 scsi_status
= SAM_STAT_TASK_SET_FULL
;
1183 pr_debug("Timing out cmd %u on dev %s that is %s.\n",
1184 id
, udev
->name
, is_running
? "inflight" : "queued");
1186 target_complete_cmd(se_cmd
, scsi_status
);
1190 static void tcmu_device_timedout(struct tcmu_dev
*udev
)
1192 spin_lock(&timed_out_udevs_lock
);
1193 if (list_empty(&udev
->timedout_entry
))
1194 list_add_tail(&udev
->timedout_entry
, &timed_out_udevs
);
1195 spin_unlock(&timed_out_udevs_lock
);
1197 schedule_delayed_work(&tcmu_unmap_work
, 0);
1200 static void tcmu_cmd_timedout(struct timer_list
*t
)
1202 struct tcmu_dev
*udev
= from_timer(udev
, t
, cmd_timer
);
1204 pr_debug("%s cmd timeout has expired\n", udev
->name
);
1205 tcmu_device_timedout(udev
);
1208 static void tcmu_qfull_timedout(struct timer_list
*t
)
1210 struct tcmu_dev
*udev
= from_timer(udev
, t
, qfull_timer
);
1212 pr_debug("%s qfull timeout has expired\n", udev
->name
);
1213 tcmu_device_timedout(udev
);
1216 static int tcmu_attach_hba(struct se_hba
*hba
, u32 host_id
)
1218 struct tcmu_hba
*tcmu_hba
;
1220 tcmu_hba
= kzalloc(sizeof(struct tcmu_hba
), GFP_KERNEL
);
1224 tcmu_hba
->host_id
= host_id
;
1225 hba
->hba_ptr
= tcmu_hba
;
1230 static void tcmu_detach_hba(struct se_hba
*hba
)
1232 kfree(hba
->hba_ptr
);
1233 hba
->hba_ptr
= NULL
;
1236 static struct se_device
*tcmu_alloc_device(struct se_hba
*hba
, const char *name
)
1238 struct tcmu_dev
*udev
;
1240 udev
= kzalloc(sizeof(struct tcmu_dev
), GFP_KERNEL
);
1243 kref_init(&udev
->kref
);
1245 udev
->name
= kstrdup(name
, GFP_KERNEL
);
1252 udev
->cmd_time_out
= TCMU_TIME_OUT
;
1253 udev
->qfull_time_out
= -1;
1255 udev
->max_blocks
= DATA_BLOCK_BITS_DEF
;
1256 mutex_init(&udev
->cmdr_lock
);
1258 INIT_LIST_HEAD(&udev
->timedout_entry
);
1259 INIT_LIST_HEAD(&udev
->cmdr_queue
);
1260 idr_init(&udev
->commands
);
1262 timer_setup(&udev
->qfull_timer
, tcmu_qfull_timedout
, 0);
1263 timer_setup(&udev
->cmd_timer
, tcmu_cmd_timedout
, 0);
1265 INIT_RADIX_TREE(&udev
->data_blocks
, GFP_KERNEL
);
1267 return &udev
->se_dev
;
1270 static bool run_cmdr_queue(struct tcmu_dev
*udev
, bool fail
)
1272 struct tcmu_cmd
*tcmu_cmd
, *tmp_cmd
;
1274 bool drained
= true;
1275 sense_reason_t scsi_ret
;
1278 if (list_empty(&udev
->cmdr_queue
))
1281 pr_debug("running %s's cmdr queue forcefail %d\n", udev
->name
, fail
);
1283 list_splice_init(&udev
->cmdr_queue
, &cmds
);
1285 list_for_each_entry_safe(tcmu_cmd
, tmp_cmd
, &cmds
, cmdr_queue_entry
) {
1286 list_del_init(&tcmu_cmd
->cmdr_queue_entry
);
1288 pr_debug("removing cmd %u on dev %s from queue\n",
1289 tcmu_cmd
->cmd_id
, udev
->name
);
1292 idr_remove(&udev
->commands
, tcmu_cmd
->cmd_id
);
1294 * We were not able to even start the command, so
1295 * fail with busy to allow a retry in case runner
1296 * was only temporarily down. If the device is being
1297 * removed then LIO core will do the right thing and
1300 target_complete_cmd(tcmu_cmd
->se_cmd
, SAM_STAT_BUSY
);
1301 tcmu_free_cmd(tcmu_cmd
);
1305 ret
= queue_cmd_ring(tcmu_cmd
, &scsi_ret
);
1307 pr_debug("cmd %u on dev %s failed with %u\n",
1308 tcmu_cmd
->cmd_id
, udev
->name
, scsi_ret
);
1310 idr_remove(&udev
->commands
, tcmu_cmd
->cmd_id
);
1312 * Ignore scsi_ret for now. target_complete_cmd
1315 target_complete_cmd(tcmu_cmd
->se_cmd
,
1316 SAM_STAT_CHECK_CONDITION
);
1317 tcmu_free_cmd(tcmu_cmd
);
1318 } else if (ret
> 0) {
1319 pr_debug("ran out of space during cmdr queue run\n");
1321 * cmd was requeued, so just put all cmds back in
1324 list_splice_tail(&cmds
, &udev
->cmdr_queue
);
1329 if (list_empty(&udev
->cmdr_queue
))
1330 del_timer(&udev
->qfull_timer
);
1335 static int tcmu_irqcontrol(struct uio_info
*info
, s32 irq_on
)
1337 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
1339 mutex_lock(&udev
->cmdr_lock
);
1340 tcmu_handle_completions(udev
);
1341 run_cmdr_queue(udev
, false);
1342 mutex_unlock(&udev
->cmdr_lock
);
1348 * mmap code from uio.c. Copied here because we want to hook mmap()
1349 * and this stuff must come along.
1351 static int tcmu_find_mem_index(struct vm_area_struct
*vma
)
1353 struct tcmu_dev
*udev
= vma
->vm_private_data
;
1354 struct uio_info
*info
= &udev
->uio_info
;
1356 if (vma
->vm_pgoff
< MAX_UIO_MAPS
) {
1357 if (info
->mem
[vma
->vm_pgoff
].size
== 0)
1359 return (int)vma
->vm_pgoff
;
1364 static struct page
*tcmu_try_get_block_page(struct tcmu_dev
*udev
, uint32_t dbi
)
1368 mutex_lock(&udev
->cmdr_lock
);
1369 page
= tcmu_get_block_page(udev
, dbi
);
1371 mutex_unlock(&udev
->cmdr_lock
);
1376 * Userspace messed up and passed in a address not in the
1377 * data iov passed to it.
1379 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
1382 mutex_unlock(&udev
->cmdr_lock
);
1387 static vm_fault_t
tcmu_vma_fault(struct vm_fault
*vmf
)
1389 struct tcmu_dev
*udev
= vmf
->vma
->vm_private_data
;
1390 struct uio_info
*info
= &udev
->uio_info
;
1392 unsigned long offset
;
1395 int mi
= tcmu_find_mem_index(vmf
->vma
);
1397 return VM_FAULT_SIGBUS
;
1400 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1403 offset
= (vmf
->pgoff
- mi
) << PAGE_SHIFT
;
1405 if (offset
< udev
->data_off
) {
1406 /* For the vmalloc()ed cmd area pages */
1407 addr
= (void *)(unsigned long)info
->mem
[mi
].addr
+ offset
;
1408 page
= vmalloc_to_page(addr
);
1412 /* For the dynamically growing data area pages */
1413 dbi
= (offset
- udev
->data_off
) / DATA_BLOCK_SIZE
;
1414 page
= tcmu_try_get_block_page(udev
, dbi
);
1416 return VM_FAULT_SIGBUS
;
1424 static const struct vm_operations_struct tcmu_vm_ops
= {
1425 .fault
= tcmu_vma_fault
,
1428 static int tcmu_mmap(struct uio_info
*info
, struct vm_area_struct
*vma
)
1430 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
1432 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
1433 vma
->vm_ops
= &tcmu_vm_ops
;
1435 vma
->vm_private_data
= udev
;
1437 /* Ensure the mmap is exactly the right size */
1438 if (vma_pages(vma
) != (udev
->ring_size
>> PAGE_SHIFT
))
1444 static int tcmu_open(struct uio_info
*info
, struct inode
*inode
)
1446 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
1448 /* O_EXCL not supported for char devs, so fake it? */
1449 if (test_and_set_bit(TCMU_DEV_BIT_OPEN
, &udev
->flags
))
1452 udev
->inode
= inode
;
1453 kref_get(&udev
->kref
);
1460 static void tcmu_dev_call_rcu(struct rcu_head
*p
)
1462 struct se_device
*dev
= container_of(p
, struct se_device
, rcu_head
);
1463 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1465 kfree(udev
->uio_info
.name
);
1470 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd
*cmd
)
1472 if (test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
)) {
1473 kmem_cache_free(tcmu_cmd_cache
, cmd
);
1479 static void tcmu_blocks_release(struct radix_tree_root
*blocks
,
1485 for (i
= start
; i
< end
; i
++) {
1486 page
= radix_tree_delete(blocks
, i
);
1489 atomic_dec(&global_db_count
);
1494 static void tcmu_dev_kref_release(struct kref
*kref
)
1496 struct tcmu_dev
*udev
= container_of(kref
, struct tcmu_dev
, kref
);
1497 struct se_device
*dev
= &udev
->se_dev
;
1498 struct tcmu_cmd
*cmd
;
1499 bool all_expired
= true;
1502 vfree(udev
->mb_addr
);
1503 udev
->mb_addr
= NULL
;
1505 spin_lock_bh(&timed_out_udevs_lock
);
1506 if (!list_empty(&udev
->timedout_entry
))
1507 list_del(&udev
->timedout_entry
);
1508 spin_unlock_bh(&timed_out_udevs_lock
);
1510 /* Upper layer should drain all requests before calling this */
1511 mutex_lock(&udev
->cmdr_lock
);
1512 idr_for_each_entry(&udev
->commands
, cmd
, i
) {
1513 if (tcmu_check_and_free_pending_cmd(cmd
) != 0)
1514 all_expired
= false;
1516 idr_destroy(&udev
->commands
);
1517 WARN_ON(!all_expired
);
1519 tcmu_blocks_release(&udev
->data_blocks
, 0, udev
->dbi_max
+ 1);
1520 kfree(udev
->data_bitmap
);
1521 mutex_unlock(&udev
->cmdr_lock
);
1523 call_rcu(&dev
->rcu_head
, tcmu_dev_call_rcu
);
1526 static int tcmu_release(struct uio_info
*info
, struct inode
*inode
)
1528 struct tcmu_dev
*udev
= container_of(info
, struct tcmu_dev
, uio_info
);
1530 clear_bit(TCMU_DEV_BIT_OPEN
, &udev
->flags
);
1532 pr_debug("close\n");
1533 /* release ref from open */
1534 kref_put(&udev
->kref
, tcmu_dev_kref_release
);
1538 static int tcmu_init_genl_cmd_reply(struct tcmu_dev
*udev
, int cmd
)
1540 struct tcmu_nl_cmd
*nl_cmd
= &udev
->curr_nl_cmd
;
1542 if (!tcmu_kern_cmd_reply_supported
)
1545 if (udev
->nl_reply_supported
<= 0)
1548 mutex_lock(&tcmu_nl_cmd_mutex
);
1550 if (nl_cmd
->cmd
!= TCMU_CMD_UNSPEC
) {
1551 mutex_unlock(&tcmu_nl_cmd_mutex
);
1552 pr_warn("netlink cmd %d already executing on %s\n",
1553 nl_cmd
->cmd
, udev
->name
);
1557 memset(nl_cmd
, 0, sizeof(*nl_cmd
));
1559 nl_cmd
->udev
= udev
;
1560 init_completion(&nl_cmd
->complete
);
1561 INIT_LIST_HEAD(&nl_cmd
->nl_list
);
1563 list_add_tail(&nl_cmd
->nl_list
, &tcmu_nl_cmd_list
);
1565 mutex_unlock(&tcmu_nl_cmd_mutex
);
1569 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev
*udev
)
1571 struct tcmu_nl_cmd
*nl_cmd
= &udev
->curr_nl_cmd
;
1574 if (!tcmu_kern_cmd_reply_supported
)
1577 if (udev
->nl_reply_supported
<= 0)
1580 pr_debug("sleeping for nl reply\n");
1581 wait_for_completion(&nl_cmd
->complete
);
1583 mutex_lock(&tcmu_nl_cmd_mutex
);
1584 nl_cmd
->cmd
= TCMU_CMD_UNSPEC
;
1585 ret
= nl_cmd
->status
;
1587 mutex_unlock(&tcmu_nl_cmd_mutex
);
1592 static int tcmu_netlink_event_init(struct tcmu_dev
*udev
,
1593 enum tcmu_genl_cmd cmd
,
1594 struct sk_buff
**buf
, void **hdr
)
1596 struct sk_buff
*skb
;
1600 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
1604 msg_header
= genlmsg_put(skb
, 0, 0, &tcmu_genl_family
, 0, cmd
);
1608 ret
= nla_put_string(skb
, TCMU_ATTR_DEVICE
, udev
->uio_info
.name
);
1612 ret
= nla_put_u32(skb
, TCMU_ATTR_MINOR
, udev
->uio_info
.uio_dev
->minor
);
1616 ret
= nla_put_u32(skb
, TCMU_ATTR_DEVICE_ID
, udev
->se_dev
.dev_index
);
1629 static int tcmu_netlink_event_send(struct tcmu_dev
*udev
,
1630 enum tcmu_genl_cmd cmd
,
1631 struct sk_buff
*skb
, void *msg_header
)
1635 genlmsg_end(skb
, msg_header
);
1637 ret
= tcmu_init_genl_cmd_reply(udev
, cmd
);
1643 ret
= genlmsg_multicast_allns(&tcmu_genl_family
, skb
, 0,
1644 TCMU_MCGRP_CONFIG
, GFP_KERNEL
);
1645 /* We don't care if no one is listening */
1649 ret
= tcmu_wait_genl_cmd_reply(udev
);
1653 static int tcmu_send_dev_add_event(struct tcmu_dev
*udev
)
1655 struct sk_buff
*skb
= NULL
;
1656 void *msg_header
= NULL
;
1659 ret
= tcmu_netlink_event_init(udev
, TCMU_CMD_ADDED_DEVICE
, &skb
,
1663 return tcmu_netlink_event_send(udev
, TCMU_CMD_ADDED_DEVICE
, skb
,
1667 static int tcmu_send_dev_remove_event(struct tcmu_dev
*udev
)
1669 struct sk_buff
*skb
= NULL
;
1670 void *msg_header
= NULL
;
1673 ret
= tcmu_netlink_event_init(udev
, TCMU_CMD_REMOVED_DEVICE
,
1677 return tcmu_netlink_event_send(udev
, TCMU_CMD_REMOVED_DEVICE
,
1681 static int tcmu_update_uio_info(struct tcmu_dev
*udev
)
1683 struct tcmu_hba
*hba
= udev
->hba
->hba_ptr
;
1684 struct uio_info
*info
;
1688 info
= &udev
->uio_info
;
1689 size
= snprintf(NULL
, 0, "tcm-user/%u/%s/%s", hba
->host_id
, udev
->name
,
1691 size
+= 1; /* for \0 */
1692 str
= kmalloc(size
, GFP_KERNEL
);
1696 used
= snprintf(str
, size
, "tcm-user/%u/%s", hba
->host_id
, udev
->name
);
1697 if (udev
->dev_config
[0])
1698 snprintf(str
+ used
, size
- used
, "/%s", udev
->dev_config
);
1700 /* If the old string exists, free it */
1707 static int tcmu_configure_device(struct se_device
*dev
)
1709 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1710 struct uio_info
*info
;
1711 struct tcmu_mailbox
*mb
;
1714 ret
= tcmu_update_uio_info(udev
);
1718 info
= &udev
->uio_info
;
1720 udev
->data_bitmap
= kcalloc(BITS_TO_LONGS(udev
->max_blocks
),
1721 sizeof(unsigned long),
1723 if (!udev
->data_bitmap
) {
1725 goto err_bitmap_alloc
;
1728 udev
->mb_addr
= vzalloc(CMDR_SIZE
);
1729 if (!udev
->mb_addr
) {
1734 /* mailbox fits in first part of CMDR space */
1735 udev
->cmdr_size
= CMDR_SIZE
- CMDR_OFF
;
1736 udev
->data_off
= CMDR_SIZE
;
1737 udev
->data_size
= udev
->max_blocks
* DATA_BLOCK_SIZE
;
1738 udev
->dbi_thresh
= 0; /* Default in Idle state */
1740 /* Initialise the mailbox of the ring buffer */
1742 mb
->version
= TCMU_MAILBOX_VERSION
;
1743 mb
->flags
= TCMU_MAILBOX_FLAG_CAP_OOOC
;
1744 mb
->cmdr_off
= CMDR_OFF
;
1745 mb
->cmdr_size
= udev
->cmdr_size
;
1747 WARN_ON(!PAGE_ALIGNED(udev
->data_off
));
1748 WARN_ON(udev
->data_size
% PAGE_SIZE
);
1749 WARN_ON(udev
->data_size
% DATA_BLOCK_SIZE
);
1751 info
->version
= __stringify(TCMU_MAILBOX_VERSION
);
1753 info
->mem
[0].name
= "tcm-user command & data buffer";
1754 info
->mem
[0].addr
= (phys_addr_t
)(uintptr_t)udev
->mb_addr
;
1755 info
->mem
[0].size
= udev
->ring_size
= udev
->data_size
+ CMDR_SIZE
;
1756 info
->mem
[0].memtype
= UIO_MEM_NONE
;
1758 info
->irqcontrol
= tcmu_irqcontrol
;
1759 info
->irq
= UIO_IRQ_CUSTOM
;
1761 info
->mmap
= tcmu_mmap
;
1762 info
->open
= tcmu_open
;
1763 info
->release
= tcmu_release
;
1765 ret
= uio_register_device(tcmu_root_device
, info
);
1769 /* User can set hw_block_size before enable the device */
1770 if (dev
->dev_attrib
.hw_block_size
== 0)
1771 dev
->dev_attrib
.hw_block_size
= 512;
1772 /* Other attributes can be configured in userspace */
1773 if (!dev
->dev_attrib
.hw_max_sectors
)
1774 dev
->dev_attrib
.hw_max_sectors
= 128;
1775 if (!dev
->dev_attrib
.emulate_write_cache
)
1776 dev
->dev_attrib
.emulate_write_cache
= 0;
1777 dev
->dev_attrib
.hw_queue_depth
= 128;
1779 /* If user didn't explicitly disable netlink reply support, use
1780 * module scope setting.
1782 if (udev
->nl_reply_supported
>= 0)
1783 udev
->nl_reply_supported
= tcmu_kern_cmd_reply_supported
;
1786 * Get a ref incase userspace does a close on the uio device before
1787 * LIO has initiated tcmu_free_device.
1789 kref_get(&udev
->kref
);
1791 ret
= tcmu_send_dev_add_event(udev
);
1795 mutex_lock(&root_udev_mutex
);
1796 list_add(&udev
->node
, &root_udev
);
1797 mutex_unlock(&root_udev_mutex
);
1802 kref_put(&udev
->kref
, tcmu_dev_kref_release
);
1803 uio_unregister_device(&udev
->uio_info
);
1805 vfree(udev
->mb_addr
);
1806 udev
->mb_addr
= NULL
;
1808 kfree(udev
->data_bitmap
);
1809 udev
->data_bitmap
= NULL
;
1817 static bool tcmu_dev_configured(struct tcmu_dev
*udev
)
1819 return udev
->uio_info
.uio_dev
? true : false;
1822 static void tcmu_free_device(struct se_device
*dev
)
1824 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1826 /* release ref from init */
1827 kref_put(&udev
->kref
, tcmu_dev_kref_release
);
1830 static void tcmu_destroy_device(struct se_device
*dev
)
1832 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1834 del_timer_sync(&udev
->cmd_timer
);
1835 del_timer_sync(&udev
->qfull_timer
);
1837 mutex_lock(&root_udev_mutex
);
1838 list_del(&udev
->node
);
1839 mutex_unlock(&root_udev_mutex
);
1841 tcmu_send_dev_remove_event(udev
);
1843 uio_unregister_device(&udev
->uio_info
);
1845 /* release ref from configure */
1846 kref_put(&udev
->kref
, tcmu_dev_kref_release
);
1849 static void tcmu_unblock_dev(struct tcmu_dev
*udev
)
1851 mutex_lock(&udev
->cmdr_lock
);
1852 clear_bit(TCMU_DEV_BIT_BLOCKED
, &udev
->flags
);
1853 mutex_unlock(&udev
->cmdr_lock
);
1856 static void tcmu_block_dev(struct tcmu_dev
*udev
)
1858 mutex_lock(&udev
->cmdr_lock
);
1860 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED
, &udev
->flags
))
1863 /* complete IO that has executed successfully */
1864 tcmu_handle_completions(udev
);
1865 /* fail IO waiting to be queued */
1866 run_cmdr_queue(udev
, true);
1869 mutex_unlock(&udev
->cmdr_lock
);
1872 static void tcmu_reset_ring(struct tcmu_dev
*udev
, u8 err_level
)
1874 struct tcmu_mailbox
*mb
;
1875 struct tcmu_cmd
*cmd
;
1878 mutex_lock(&udev
->cmdr_lock
);
1880 idr_for_each_entry(&udev
->commands
, cmd
, i
) {
1881 if (!list_empty(&cmd
->cmdr_queue_entry
))
1884 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
1885 cmd
->cmd_id
, udev
->name
,
1886 test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
));
1888 idr_remove(&udev
->commands
, i
);
1889 if (!test_bit(TCMU_CMD_BIT_EXPIRED
, &cmd
->flags
)) {
1890 if (err_level
== 1) {
1892 * Userspace was not able to start the
1893 * command or it is retryable.
1895 target_complete_cmd(cmd
->se_cmd
, SAM_STAT_BUSY
);
1898 target_complete_cmd(cmd
->se_cmd
,
1899 SAM_STAT_CHECK_CONDITION
);
1902 tcmu_cmd_free_data(cmd
, cmd
->dbi_cnt
);
1907 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
1908 pr_debug("mb last %u head %u tail %u\n", udev
->cmdr_last_cleaned
,
1909 mb
->cmd_tail
, mb
->cmd_head
);
1911 udev
->cmdr_last_cleaned
= 0;
1914 tcmu_flush_dcache_range(mb
, sizeof(*mb
));
1916 del_timer(&udev
->cmd_timer
);
1918 mutex_unlock(&udev
->cmdr_lock
);
1922 Opt_dev_config
, Opt_dev_size
, Opt_hw_block_size
, Opt_hw_max_sectors
,
1923 Opt_nl_reply_supported
, Opt_max_data_area_mb
, Opt_err
,
1926 static match_table_t tokens
= {
1927 {Opt_dev_config
, "dev_config=%s"},
1928 {Opt_dev_size
, "dev_size=%u"},
1929 {Opt_hw_block_size
, "hw_block_size=%u"},
1930 {Opt_hw_max_sectors
, "hw_max_sectors=%u"},
1931 {Opt_nl_reply_supported
, "nl_reply_supported=%d"},
1932 {Opt_max_data_area_mb
, "max_data_area_mb=%u"},
1936 static int tcmu_set_dev_attrib(substring_t
*arg
, u32
*dev_attrib
)
1938 unsigned long tmp_ul
;
1942 arg_p
= match_strdup(arg
);
1946 ret
= kstrtoul(arg_p
, 0, &tmp_ul
);
1949 pr_err("kstrtoul() failed for dev attrib\n");
1953 pr_err("dev attrib must be nonzero\n");
1956 *dev_attrib
= tmp_ul
;
1960 static ssize_t
tcmu_set_configfs_dev_params(struct se_device
*dev
,
1961 const char *page
, ssize_t count
)
1963 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
1964 char *orig
, *ptr
, *opts
, *arg_p
;
1965 substring_t args
[MAX_OPT_ARGS
];
1966 int ret
= 0, token
, tmpval
;
1968 opts
= kstrdup(page
, GFP_KERNEL
);
1974 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
1978 token
= match_token(ptr
, tokens
, args
);
1980 case Opt_dev_config
:
1981 if (match_strlcpy(udev
->dev_config
, &args
[0],
1982 TCMU_CONFIG_LEN
) == 0) {
1986 pr_debug("TCMU: Referencing Path: %s\n", udev
->dev_config
);
1989 arg_p
= match_strdup(&args
[0]);
1994 ret
= kstrtoul(arg_p
, 0, (unsigned long *) &udev
->dev_size
);
1997 pr_err("kstrtoul() failed for dev_size=\n");
1999 case Opt_hw_block_size
:
2000 ret
= tcmu_set_dev_attrib(&args
[0],
2001 &(dev
->dev_attrib
.hw_block_size
));
2003 case Opt_hw_max_sectors
:
2004 ret
= tcmu_set_dev_attrib(&args
[0],
2005 &(dev
->dev_attrib
.hw_max_sectors
));
2007 case Opt_nl_reply_supported
:
2008 arg_p
= match_strdup(&args
[0]);
2013 ret
= kstrtoint(arg_p
, 0, &udev
->nl_reply_supported
);
2016 pr_err("kstrtoint() failed for nl_reply_supported=\n");
2018 case Opt_max_data_area_mb
:
2019 if (dev
->export_count
) {
2020 pr_err("Unable to set max_data_area_mb while exports exist\n");
2025 arg_p
= match_strdup(&args
[0]);
2030 ret
= kstrtoint(arg_p
, 0, &tmpval
);
2033 pr_err("kstrtoint() failed for max_data_area_mb=\n");
2038 pr_err("Invalid max_data_area %d\n", tmpval
);
2043 udev
->max_blocks
= TCMU_MBS_TO_BLOCKS(tmpval
);
2044 if (udev
->max_blocks
> tcmu_global_max_blocks
) {
2045 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2047 TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks
));
2048 udev
->max_blocks
= tcmu_global_max_blocks
;
2060 return (!ret
) ? count
: ret
;
2063 static ssize_t
tcmu_show_configfs_dev_params(struct se_device
*dev
, char *b
)
2065 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
2068 bl
= sprintf(b
+ bl
, "Config: %s ",
2069 udev
->dev_config
[0] ? udev
->dev_config
: "NULL");
2070 bl
+= sprintf(b
+ bl
, "Size: %zu ", udev
->dev_size
);
2071 bl
+= sprintf(b
+ bl
, "MaxDataAreaMB: %u\n",
2072 TCMU_BLOCKS_TO_MBS(udev
->max_blocks
));
2077 static sector_t
tcmu_get_blocks(struct se_device
*dev
)
2079 struct tcmu_dev
*udev
= TCMU_DEV(dev
);
2081 return div_u64(udev
->dev_size
- dev
->dev_attrib
.block_size
,
2082 dev
->dev_attrib
.block_size
);
2085 static sense_reason_t
2086 tcmu_parse_cdb(struct se_cmd
*cmd
)
2088 return passthrough_parse_cdb(cmd
, tcmu_queue_cmd
);
2091 static ssize_t
tcmu_cmd_time_out_show(struct config_item
*item
, char *page
)
2093 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2094 struct se_dev_attrib
, da_group
);
2095 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2097 return snprintf(page
, PAGE_SIZE
, "%lu\n", udev
->cmd_time_out
/ MSEC_PER_SEC
);
2100 static ssize_t
tcmu_cmd_time_out_store(struct config_item
*item
, const char *page
,
2103 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2104 struct se_dev_attrib
, da_group
);
2105 struct tcmu_dev
*udev
= container_of(da
->da_dev
,
2106 struct tcmu_dev
, se_dev
);
2110 if (da
->da_dev
->export_count
) {
2111 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2115 ret
= kstrtou32(page
, 0, &val
);
2119 udev
->cmd_time_out
= val
* MSEC_PER_SEC
;
2122 CONFIGFS_ATTR(tcmu_
, cmd_time_out
);
2124 static ssize_t
tcmu_qfull_time_out_show(struct config_item
*item
, char *page
)
2126 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2127 struct se_dev_attrib
, da_group
);
2128 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2130 return snprintf(page
, PAGE_SIZE
, "%ld\n", udev
->qfull_time_out
<= 0 ?
2131 udev
->qfull_time_out
:
2132 udev
->qfull_time_out
/ MSEC_PER_SEC
);
2135 static ssize_t
tcmu_qfull_time_out_store(struct config_item
*item
,
2136 const char *page
, size_t count
)
2138 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2139 struct se_dev_attrib
, da_group
);
2140 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2144 ret
= kstrtos32(page
, 0, &val
);
2149 udev
->qfull_time_out
= val
* MSEC_PER_SEC
;
2150 } else if (val
== -1) {
2151 udev
->qfull_time_out
= val
;
2153 printk(KERN_ERR
"Invalid qfull timeout value %d\n", val
);
2158 CONFIGFS_ATTR(tcmu_
, qfull_time_out
);
2160 static ssize_t
tcmu_max_data_area_mb_show(struct config_item
*item
, char *page
)
2162 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2163 struct se_dev_attrib
, da_group
);
2164 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2166 return snprintf(page
, PAGE_SIZE
, "%u\n",
2167 TCMU_BLOCKS_TO_MBS(udev
->max_blocks
));
2169 CONFIGFS_ATTR_RO(tcmu_
, max_data_area_mb
);
2171 static ssize_t
tcmu_dev_config_show(struct config_item
*item
, char *page
)
2173 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2174 struct se_dev_attrib
, da_group
);
2175 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2177 return snprintf(page
, PAGE_SIZE
, "%s\n", udev
->dev_config
);
2180 static int tcmu_send_dev_config_event(struct tcmu_dev
*udev
,
2181 const char *reconfig_data
)
2183 struct sk_buff
*skb
= NULL
;
2184 void *msg_header
= NULL
;
2187 ret
= tcmu_netlink_event_init(udev
, TCMU_CMD_RECONFIG_DEVICE
,
2191 ret
= nla_put_string(skb
, TCMU_ATTR_DEV_CFG
, reconfig_data
);
2196 return tcmu_netlink_event_send(udev
, TCMU_CMD_RECONFIG_DEVICE
,
2201 static ssize_t
tcmu_dev_config_store(struct config_item
*item
, const char *page
,
2204 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2205 struct se_dev_attrib
, da_group
);
2206 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2210 if (!len
|| len
> TCMU_CONFIG_LEN
- 1)
2213 /* Check if device has been configured before */
2214 if (tcmu_dev_configured(udev
)) {
2215 ret
= tcmu_send_dev_config_event(udev
, page
);
2217 pr_err("Unable to reconfigure device\n");
2220 strlcpy(udev
->dev_config
, page
, TCMU_CONFIG_LEN
);
2222 ret
= tcmu_update_uio_info(udev
);
2227 strlcpy(udev
->dev_config
, page
, TCMU_CONFIG_LEN
);
2231 CONFIGFS_ATTR(tcmu_
, dev_config
);
2233 static ssize_t
tcmu_dev_size_show(struct config_item
*item
, char *page
)
2235 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2236 struct se_dev_attrib
, da_group
);
2237 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2239 return snprintf(page
, PAGE_SIZE
, "%zu\n", udev
->dev_size
);
2242 static int tcmu_send_dev_size_event(struct tcmu_dev
*udev
, u64 size
)
2244 struct sk_buff
*skb
= NULL
;
2245 void *msg_header
= NULL
;
2248 ret
= tcmu_netlink_event_init(udev
, TCMU_CMD_RECONFIG_DEVICE
,
2252 ret
= nla_put_u64_64bit(skb
, TCMU_ATTR_DEV_SIZE
,
2253 size
, TCMU_ATTR_PAD
);
2258 return tcmu_netlink_event_send(udev
, TCMU_CMD_RECONFIG_DEVICE
,
2262 static ssize_t
tcmu_dev_size_store(struct config_item
*item
, const char *page
,
2265 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2266 struct se_dev_attrib
, da_group
);
2267 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2271 ret
= kstrtou64(page
, 0, &val
);
2275 /* Check if device has been configured before */
2276 if (tcmu_dev_configured(udev
)) {
2277 ret
= tcmu_send_dev_size_event(udev
, val
);
2279 pr_err("Unable to reconfigure device\n");
2283 udev
->dev_size
= val
;
2286 CONFIGFS_ATTR(tcmu_
, dev_size
);
2288 static ssize_t
tcmu_nl_reply_supported_show(struct config_item
*item
,
2291 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2292 struct se_dev_attrib
, da_group
);
2293 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2295 return snprintf(page
, PAGE_SIZE
, "%d\n", udev
->nl_reply_supported
);
2298 static ssize_t
tcmu_nl_reply_supported_store(struct config_item
*item
,
2299 const char *page
, size_t count
)
2301 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2302 struct se_dev_attrib
, da_group
);
2303 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2307 ret
= kstrtos8(page
, 0, &val
);
2311 udev
->nl_reply_supported
= val
;
2314 CONFIGFS_ATTR(tcmu_
, nl_reply_supported
);
2316 static ssize_t
tcmu_emulate_write_cache_show(struct config_item
*item
,
2319 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2320 struct se_dev_attrib
, da_group
);
2322 return snprintf(page
, PAGE_SIZE
, "%i\n", da
->emulate_write_cache
);
2325 static int tcmu_send_emulate_write_cache(struct tcmu_dev
*udev
, u8 val
)
2327 struct sk_buff
*skb
= NULL
;
2328 void *msg_header
= NULL
;
2331 ret
= tcmu_netlink_event_init(udev
, TCMU_CMD_RECONFIG_DEVICE
,
2335 ret
= nla_put_u8(skb
, TCMU_ATTR_WRITECACHE
, val
);
2340 return tcmu_netlink_event_send(udev
, TCMU_CMD_RECONFIG_DEVICE
,
2344 static ssize_t
tcmu_emulate_write_cache_store(struct config_item
*item
,
2345 const char *page
, size_t count
)
2347 struct se_dev_attrib
*da
= container_of(to_config_group(item
),
2348 struct se_dev_attrib
, da_group
);
2349 struct tcmu_dev
*udev
= TCMU_DEV(da
->da_dev
);
2353 ret
= kstrtou8(page
, 0, &val
);
2357 /* Check if device has been configured before */
2358 if (tcmu_dev_configured(udev
)) {
2359 ret
= tcmu_send_emulate_write_cache(udev
, val
);
2361 pr_err("Unable to reconfigure device\n");
2366 da
->emulate_write_cache
= val
;
2369 CONFIGFS_ATTR(tcmu_
, emulate_write_cache
);
2371 static ssize_t
tcmu_block_dev_show(struct config_item
*item
, char *page
)
2373 struct se_device
*se_dev
= container_of(to_config_group(item
),
2376 struct tcmu_dev
*udev
= TCMU_DEV(se_dev
);
2378 if (test_bit(TCMU_DEV_BIT_BLOCKED
, &udev
->flags
))
2379 return snprintf(page
, PAGE_SIZE
, "%s\n", "blocked");
2381 return snprintf(page
, PAGE_SIZE
, "%s\n", "unblocked");
2384 static ssize_t
tcmu_block_dev_store(struct config_item
*item
, const char *page
,
2387 struct se_device
*se_dev
= container_of(to_config_group(item
),
2390 struct tcmu_dev
*udev
= TCMU_DEV(se_dev
);
2394 ret
= kstrtou8(page
, 0, &val
);
2399 pr_err("Invalid block value %d\n", val
);
2404 tcmu_unblock_dev(udev
);
2406 tcmu_block_dev(udev
);
2409 CONFIGFS_ATTR(tcmu_
, block_dev
);
2411 static ssize_t
tcmu_reset_ring_store(struct config_item
*item
, const char *page
,
2414 struct se_device
*se_dev
= container_of(to_config_group(item
),
2417 struct tcmu_dev
*udev
= TCMU_DEV(se_dev
);
2421 ret
= kstrtou8(page
, 0, &val
);
2425 if (val
!= 1 && val
!= 2) {
2426 pr_err("Invalid reset ring value %d\n", val
);
2430 tcmu_reset_ring(udev
, val
);
2433 CONFIGFS_ATTR_WO(tcmu_
, reset_ring
);
2435 static struct configfs_attribute
*tcmu_attrib_attrs
[] = {
2436 &tcmu_attr_cmd_time_out
,
2437 &tcmu_attr_qfull_time_out
,
2438 &tcmu_attr_max_data_area_mb
,
2439 &tcmu_attr_dev_config
,
2440 &tcmu_attr_dev_size
,
2441 &tcmu_attr_emulate_write_cache
,
2442 &tcmu_attr_nl_reply_supported
,
2446 static struct configfs_attribute
**tcmu_attrs
;
2448 static struct configfs_attribute
*tcmu_action_attrs
[] = {
2449 &tcmu_attr_block_dev
,
2450 &tcmu_attr_reset_ring
,
2454 static struct target_backend_ops tcmu_ops
= {
2456 .owner
= THIS_MODULE
,
2457 .transport_flags
= TRANSPORT_FLAG_PASSTHROUGH
,
2458 .attach_hba
= tcmu_attach_hba
,
2459 .detach_hba
= tcmu_detach_hba
,
2460 .alloc_device
= tcmu_alloc_device
,
2461 .configure_device
= tcmu_configure_device
,
2462 .destroy_device
= tcmu_destroy_device
,
2463 .free_device
= tcmu_free_device
,
2464 .parse_cdb
= tcmu_parse_cdb
,
2465 .set_configfs_dev_params
= tcmu_set_configfs_dev_params
,
2466 .show_configfs_dev_params
= tcmu_show_configfs_dev_params
,
2467 .get_device_type
= sbc_get_device_type
,
2468 .get_blocks
= tcmu_get_blocks
,
2469 .tb_dev_action_attrs
= tcmu_action_attrs
,
2472 static void find_free_blocks(void)
2474 struct tcmu_dev
*udev
;
2476 u32 start
, end
, block
, total_freed
= 0;
2478 if (atomic_read(&global_db_count
) <= tcmu_global_max_blocks
)
2481 mutex_lock(&root_udev_mutex
);
2482 list_for_each_entry(udev
, &root_udev
, node
) {
2483 mutex_lock(&udev
->cmdr_lock
);
2485 /* Try to complete the finished commands first */
2486 tcmu_handle_completions(udev
);
2488 /* Skip the udevs in idle */
2489 if (!udev
->dbi_thresh
) {
2490 mutex_unlock(&udev
->cmdr_lock
);
2494 end
= udev
->dbi_max
+ 1;
2495 block
= find_last_bit(udev
->data_bitmap
, end
);
2496 if (block
== udev
->dbi_max
) {
2498 * The last bit is dbi_max, so it is not possible
2499 * reclaim any blocks.
2501 mutex_unlock(&udev
->cmdr_lock
);
2503 } else if (block
== end
) {
2504 /* The current udev will goto idle state */
2505 udev
->dbi_thresh
= start
= 0;
2508 udev
->dbi_thresh
= start
= block
+ 1;
2509 udev
->dbi_max
= block
;
2512 /* Here will truncate the data area from off */
2513 off
= udev
->data_off
+ start
* DATA_BLOCK_SIZE
;
2514 unmap_mapping_range(udev
->inode
->i_mapping
, off
, 0, 1);
2516 /* Release the block pages */
2517 tcmu_blocks_release(&udev
->data_blocks
, start
, end
);
2518 mutex_unlock(&udev
->cmdr_lock
);
2520 total_freed
+= end
- start
;
2521 pr_debug("Freed %u blocks (total %u) from %s.\n", end
- start
,
2522 total_freed
, udev
->name
);
2524 mutex_unlock(&root_udev_mutex
);
2526 if (atomic_read(&global_db_count
) > tcmu_global_max_blocks
)
2527 schedule_delayed_work(&tcmu_unmap_work
, msecs_to_jiffies(5000));
2530 static void check_timedout_devices(void)
2532 struct tcmu_dev
*udev
, *tmp_dev
;
2535 spin_lock_bh(&timed_out_udevs_lock
);
2536 list_splice_init(&timed_out_udevs
, &devs
);
2538 list_for_each_entry_safe(udev
, tmp_dev
, &devs
, timedout_entry
) {
2539 list_del_init(&udev
->timedout_entry
);
2540 spin_unlock_bh(&timed_out_udevs_lock
);
2542 mutex_lock(&udev
->cmdr_lock
);
2543 idr_for_each(&udev
->commands
, tcmu_check_expired_cmd
, NULL
);
2544 mutex_unlock(&udev
->cmdr_lock
);
2546 spin_lock_bh(&timed_out_udevs_lock
);
2549 spin_unlock_bh(&timed_out_udevs_lock
);
2552 static void tcmu_unmap_work_fn(struct work_struct
*work
)
2554 check_timedout_devices();
2558 static int __init
tcmu_module_init(void)
2560 int ret
, i
, k
, len
= 0;
2562 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry
) % TCMU_OP_ALIGN_SIZE
) != 0);
2564 INIT_DELAYED_WORK(&tcmu_unmap_work
, tcmu_unmap_work_fn
);
2566 tcmu_cmd_cache
= kmem_cache_create("tcmu_cmd_cache",
2567 sizeof(struct tcmu_cmd
),
2568 __alignof__(struct tcmu_cmd
),
2570 if (!tcmu_cmd_cache
)
2573 tcmu_root_device
= root_device_register("tcm_user");
2574 if (IS_ERR(tcmu_root_device
)) {
2575 ret
= PTR_ERR(tcmu_root_device
);
2576 goto out_free_cache
;
2579 ret
= genl_register_family(&tcmu_genl_family
);
2581 goto out_unreg_device
;
2584 for (i
= 0; passthrough_attrib_attrs
[i
] != NULL
; i
++) {
2585 len
+= sizeof(struct configfs_attribute
*);
2587 for (i
= 0; tcmu_attrib_attrs
[i
] != NULL
; i
++) {
2588 len
+= sizeof(struct configfs_attribute
*);
2590 len
+= sizeof(struct configfs_attribute
*);
2592 tcmu_attrs
= kzalloc(len
, GFP_KERNEL
);
2595 goto out_unreg_genl
;
2598 for (i
= 0; passthrough_attrib_attrs
[i
] != NULL
; i
++) {
2599 tcmu_attrs
[i
] = passthrough_attrib_attrs
[i
];
2601 for (k
= 0; tcmu_attrib_attrs
[k
] != NULL
; k
++) {
2602 tcmu_attrs
[i
] = tcmu_attrib_attrs
[k
];
2605 tcmu_ops
.tb_dev_attrib_attrs
= tcmu_attrs
;
2607 ret
= transport_backend_register(&tcmu_ops
);
2616 genl_unregister_family(&tcmu_genl_family
);
2618 root_device_unregister(tcmu_root_device
);
2620 kmem_cache_destroy(tcmu_cmd_cache
);
2625 static void __exit
tcmu_module_exit(void)
2627 cancel_delayed_work_sync(&tcmu_unmap_work
);
2628 target_backend_unregister(&tcmu_ops
);
2630 genl_unregister_family(&tcmu_genl_family
);
2631 root_device_unregister(tcmu_root_device
);
2632 kmem_cache_destroy(tcmu_cmd_cache
);
2635 MODULE_DESCRIPTION("TCM USER subsystem plugin");
2636 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2637 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2638 MODULE_LICENSE("GPL");
2640 module_init(tcmu_module_init
);
2641 module_exit(tcmu_module_exit
);