]>
Commit | Line | Data |
---|---|---|
7c9e7a6f AG |
1 | /* |
2 | * Copyright (C) 2013 Shaohua Li <shli@kernel.org> | |
3 | * Copyright (C) 2014 Red Hat, Inc. | |
f97ec7db | 4 | * Copyright (C) 2015 Arrikto, Inc. |
141685a3 | 5 | * Copyright (C) 2017 Chinamobile, Inc. |
7c9e7a6f AG |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2, as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program; if not, write to the Free Software Foundation, Inc., | |
18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
19 | */ | |
20 | ||
21 | #include <linux/spinlock.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/idr.h> | |
ba929992 | 24 | #include <linux/kernel.h> |
7c9e7a6f AG |
25 | #include <linux/timer.h> |
26 | #include <linux/parser.h> | |
5538d294 | 27 | #include <linux/vmalloc.h> |
7c9e7a6f | 28 | #include <linux/uio_driver.h> |
141685a3 | 29 | #include <linux/radix-tree.h> |
ac64a2ce | 30 | #include <linux/stringify.h> |
26418649 | 31 | #include <linux/bitops.h> |
f5045724 | 32 | #include <linux/highmem.h> |
7d7a7435 | 33 | #include <linux/configfs.h> |
b6df4b79 XL |
34 | #include <linux/mutex.h> |
35 | #include <linux/kthread.h> | |
7c9e7a6f | 36 | #include <net/genetlink.h> |
ba929992 BVA |
37 | #include <scsi/scsi_common.h> |
38 | #include <scsi/scsi_proto.h> | |
7c9e7a6f AG |
39 | #include <target/target_core_base.h> |
40 | #include <target/target_core_fabric.h> | |
41 | #include <target/target_core_backend.h> | |
e9f720d6 | 42 | |
7c9e7a6f AG |
43 | #include <linux/target_core_user.h> |
44 | ||
45 | /* | |
46 | * Define a shared-memory interface for LIO to pass SCSI commands and | |
47 | * data to userspace for processing. This is to allow backends that | |
48 | * are too complex for in-kernel support to be possible. | |
49 | * | |
50 | * It uses the UIO framework to do a lot of the device-creation and | |
51 | * introspection work for us. | |
52 | * | |
53 | * See the .h file for how the ring is laid out. Note that while the | |
54 | * command ring is defined, the particulars of the data area are | |
55 | * not. Offset values in the command entry point to other locations | |
56 | * internal to the mmap()ed area. There is separate space outside the | |
57 | * command ring for data buffers. This leaves maximum flexibility for | |
58 | * moving buffer allocations, or even page flipping or other | |
59 | * allocation techniques, without altering the command ring layout. | |
60 | * | |
61 | * SECURITY: | |
62 | * The user process must be assumed to be malicious. There's no way to | |
63 | * prevent it breaking the command ring protocol if it wants, but in | |
64 | * order to prevent other issues we must only ever read *data* from | |
65 | * the shared memory area, not offsets or sizes. This applies to | |
66 | * command ring entries as well as the mailbox. Extra code needed for | |
67 | * this may have a 'UAM' comment. | |
68 | */ | |
69 | ||
7c9e7a6f AG |
70 | #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) |
71 | ||
b6df4b79 XL |
72 | /* For cmd area, the size is fixed 8MB */ |
73 | #define CMDR_SIZE (8 * 1024 * 1024) | |
26418649 | 74 | |
b6df4b79 XL |
75 | /* |
76 | * For data area, the block size is PAGE_SIZE and | |
77 | * the total size is 256K * PAGE_SIZE. | |
78 | */ | |
79 | #define DATA_BLOCK_SIZE PAGE_SIZE | |
80 | #define DATA_BLOCK_BITS (256 * 1024) | |
26418649 | 81 | #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE) |
b6df4b79 | 82 | #define DATA_BLOCK_INIT_BITS 128 |
7c9e7a6f | 83 | |
b6df4b79 | 84 | /* The total size of the ring is 8M + 256K * PAGE_SIZE */ |
7c9e7a6f AG |
85 | #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) |
86 | ||
b6df4b79 XL |
87 | /* Default maximum of the global data blocks(512K * PAGE_SIZE) */ |
88 | #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024) | |
89 | ||
b3af66e2 | 90 | static u8 tcmu_kern_cmd_reply_supported; |
c2a3c5cf | 91 | static u8 tcmu_netlink_blocked; |
b3af66e2 | 92 | |
7c9e7a6f AG |
93 | static struct device *tcmu_root_device; |
94 | ||
95 | struct tcmu_hba { | |
96 | u32 host_id; | |
97 | }; | |
98 | ||
7c9e7a6f AG |
99 | #define TCMU_CONFIG_LEN 256 |
100 | ||
dc8cca4b MC |
101 | static DEFINE_MUTEX(tcmu_nl_cmd_mutex); |
102 | static LIST_HEAD(tcmu_nl_cmd_list); | |
103 | ||
104 | struct tcmu_dev; | |
105 | ||
b3af66e2 MC |
106 | struct tcmu_nl_cmd { |
107 | /* wake up thread waiting for reply */ | |
108 | struct completion complete; | |
dc8cca4b MC |
109 | struct list_head nl_list; |
110 | struct tcmu_dev *udev; | |
b3af66e2 MC |
111 | int cmd; |
112 | int status; | |
113 | }; | |
114 | ||
7c9e7a6f | 115 | struct tcmu_dev { |
b6df4b79 | 116 | struct list_head node; |
f3cdbe39 | 117 | struct kref kref; |
7c9e7a6f AG |
118 | struct se_device se_dev; |
119 | ||
120 | char *name; | |
121 | struct se_hba *hba; | |
122 | ||
123 | #define TCMU_DEV_BIT_OPEN 0 | |
124 | #define TCMU_DEV_BIT_BROKEN 1 | |
125 | unsigned long flags; | |
7c9e7a6f AG |
126 | |
127 | struct uio_info uio_info; | |
128 | ||
b6df4b79 XL |
129 | struct inode *inode; |
130 | ||
7c9e7a6f AG |
131 | struct tcmu_mailbox *mb_addr; |
132 | size_t dev_size; | |
133 | u32 cmdr_size; | |
134 | u32 cmdr_last_cleaned; | |
3d9b9555 | 135 | /* Offset of data area from start of mb */ |
26418649 | 136 | /* Must add data_off and mb_addr to get the address */ |
7c9e7a6f AG |
137 | size_t data_off; |
138 | size_t data_size; | |
26418649 | 139 | |
7c9e7a6f | 140 | wait_queue_head_t wait_cmdr; |
b6df4b79 | 141 | struct mutex cmdr_lock; |
7c9e7a6f | 142 | |
b6df4b79 | 143 | bool waiting_global; |
141685a3 | 144 | uint32_t dbi_max; |
b6df4b79 | 145 | uint32_t dbi_thresh; |
141685a3 XL |
146 | DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS); |
147 | struct radix_tree_root data_blocks; | |
148 | ||
7c9e7a6f AG |
149 | struct idr commands; |
150 | spinlock_t commands_lock; | |
151 | ||
152 | struct timer_list timeout; | |
af980e46 | 153 | unsigned int cmd_time_out; |
7c9e7a6f | 154 | |
b3af66e2 | 155 | struct tcmu_nl_cmd curr_nl_cmd; |
b3af66e2 | 156 | |
7c9e7a6f | 157 | char dev_config[TCMU_CONFIG_LEN]; |
b849b456 KN |
158 | |
159 | int nl_reply_supported; | |
7c9e7a6f AG |
160 | }; |
161 | ||
162 | #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) | |
163 | ||
164 | #define CMDR_OFF sizeof(struct tcmu_mailbox) | |
165 | ||
166 | struct tcmu_cmd { | |
167 | struct se_cmd *se_cmd; | |
168 | struct tcmu_dev *tcmu_dev; | |
169 | ||
170 | uint16_t cmd_id; | |
171 | ||
26418649 | 172 | /* Can't use se_cmd when cleaning up expired cmds, because if |
7c9e7a6f | 173 | cmd has been completed then accessing se_cmd is off limits */ |
141685a3 XL |
174 | uint32_t dbi_cnt; |
175 | uint32_t dbi_cur; | |
176 | uint32_t *dbi; | |
7c9e7a6f AG |
177 | |
178 | unsigned long deadline; | |
179 | ||
180 | #define TCMU_CMD_BIT_EXPIRED 0 | |
181 | unsigned long flags; | |
182 | }; | |
183 | ||
b6df4b79 XL |
184 | static struct task_struct *unmap_thread; |
185 | static wait_queue_head_t unmap_wait; | |
186 | static DEFINE_MUTEX(root_udev_mutex); | |
187 | static LIST_HEAD(root_udev); | |
188 | ||
189 | static atomic_t global_db_count = ATOMIC_INIT(0); | |
190 | ||
7c9e7a6f AG |
191 | static struct kmem_cache *tcmu_cmd_cache; |
192 | ||
c2a3c5cf MC |
193 | static int tcmu_get_block_netlink(char *buffer, |
194 | const struct kernel_param *kp) | |
195 | { | |
196 | return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? | |
197 | "blocked" : "unblocked"); | |
198 | } | |
199 | ||
200 | static int tcmu_set_block_netlink(const char *str, | |
201 | const struct kernel_param *kp) | |
202 | { | |
203 | int ret; | |
204 | u8 val; | |
205 | ||
206 | ret = kstrtou8(str, 0, &val); | |
207 | if (ret < 0) | |
208 | return ret; | |
209 | ||
210 | if (val > 1) { | |
211 | pr_err("Invalid block netlink value %u\n", val); | |
212 | return -EINVAL; | |
213 | } | |
214 | ||
215 | tcmu_netlink_blocked = val; | |
216 | return 0; | |
217 | } | |
218 | ||
219 | static const struct kernel_param_ops tcmu_block_netlink_op = { | |
220 | .set = tcmu_set_block_netlink, | |
221 | .get = tcmu_get_block_netlink, | |
222 | }; | |
223 | ||
224 | module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); | |
225 | MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); | |
226 | ||
227 | static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) | |
228 | { | |
229 | struct tcmu_dev *udev = nl_cmd->udev; | |
230 | ||
231 | if (!tcmu_netlink_blocked) { | |
232 | pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); | |
233 | return -EBUSY; | |
234 | } | |
235 | ||
236 | if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { | |
237 | pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); | |
238 | nl_cmd->status = -EINTR; | |
239 | list_del(&nl_cmd->nl_list); | |
240 | complete(&nl_cmd->complete); | |
241 | } | |
242 | return 0; | |
243 | } | |
244 | ||
245 | static int tcmu_set_reset_netlink(const char *str, | |
246 | const struct kernel_param *kp) | |
247 | { | |
248 | struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; | |
249 | int ret; | |
250 | u8 val; | |
251 | ||
252 | ret = kstrtou8(str, 0, &val); | |
253 | if (ret < 0) | |
254 | return ret; | |
255 | ||
256 | if (val != 1) { | |
257 | pr_err("Invalid reset netlink value %u\n", val); | |
258 | return -EINVAL; | |
259 | } | |
260 | ||
261 | mutex_lock(&tcmu_nl_cmd_mutex); | |
262 | list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { | |
263 | ret = tcmu_fail_netlink_cmd(nl_cmd); | |
264 | if (ret) | |
265 | break; | |
266 | } | |
267 | mutex_unlock(&tcmu_nl_cmd_mutex); | |
268 | ||
269 | return ret; | |
270 | } | |
271 | ||
272 | static const struct kernel_param_ops tcmu_reset_netlink_op = { | |
273 | .set = tcmu_set_reset_netlink, | |
274 | }; | |
275 | ||
276 | module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); | |
277 | MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); | |
278 | ||
7c9e7a6f AG |
279 | /* multicast group */ |
280 | enum tcmu_multicast_groups { | |
281 | TCMU_MCGRP_CONFIG, | |
282 | }; | |
283 | ||
284 | static const struct genl_multicast_group tcmu_mcgrps[] = { | |
285 | [TCMU_MCGRP_CONFIG] = { .name = "config", }, | |
286 | }; | |
287 | ||
b3af66e2 MC |
288 | static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { |
289 | [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, | |
290 | [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, | |
291 | [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, | |
292 | [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, | |
293 | [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, | |
294 | }; | |
295 | ||
296 | static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) | |
297 | { | |
dc8cca4b | 298 | struct tcmu_dev *udev = NULL; |
b3af66e2 MC |
299 | struct tcmu_nl_cmd *nl_cmd; |
300 | int dev_id, rc, ret = 0; | |
b3af66e2 MC |
301 | |
302 | if (!info->attrs[TCMU_ATTR_CMD_STATUS] || | |
303 | !info->attrs[TCMU_ATTR_DEVICE_ID]) { | |
304 | printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); | |
305 | return -EINVAL; | |
306 | } | |
307 | ||
308 | dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); | |
309 | rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); | |
310 | ||
dc8cca4b MC |
311 | mutex_lock(&tcmu_nl_cmd_mutex); |
312 | list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { | |
313 | if (nl_cmd->udev->se_dev.dev_index == dev_id) { | |
314 | udev = nl_cmd->udev; | |
315 | break; | |
316 | } | |
b3af66e2 | 317 | } |
b3af66e2 | 318 | |
dc8cca4b MC |
319 | if (!udev) { |
320 | pr_err(KERN_ERR "tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", | |
321 | completed_cmd, rc, dev_id); | |
322 | ret = -ENODEV; | |
323 | goto unlock; | |
324 | } | |
325 | list_del(&nl_cmd->nl_list); | |
b3af66e2 | 326 | |
c2a3c5cf MC |
327 | pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", |
328 | udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, | |
329 | nl_cmd->status); | |
b3af66e2 MC |
330 | |
331 | if (nl_cmd->cmd != completed_cmd) { | |
dc8cca4b MC |
332 | pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", |
333 | udev->name, completed_cmd, nl_cmd->cmd); | |
b3af66e2 | 334 | ret = -EINVAL; |
dc8cca4b | 335 | goto unlock; |
b3af66e2 MC |
336 | } |
337 | ||
dc8cca4b MC |
338 | nl_cmd->status = rc; |
339 | complete(&nl_cmd->complete); | |
340 | unlock: | |
341 | mutex_unlock(&tcmu_nl_cmd_mutex); | |
b3af66e2 MC |
342 | return ret; |
343 | } | |
344 | ||
345 | static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) | |
346 | { | |
347 | return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); | |
348 | } | |
349 | ||
350 | static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) | |
351 | { | |
352 | return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); | |
353 | } | |
354 | ||
355 | static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, | |
356 | struct genl_info *info) | |
357 | { | |
358 | return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); | |
359 | } | |
360 | ||
361 | static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) | |
362 | { | |
363 | if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { | |
364 | tcmu_kern_cmd_reply_supported = | |
365 | nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); | |
366 | printk(KERN_INFO "tcmu daemon: command reply support %u.\n", | |
367 | tcmu_kern_cmd_reply_supported); | |
368 | } | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
373 | static const struct genl_ops tcmu_genl_ops[] = { | |
374 | { | |
375 | .cmd = TCMU_CMD_SET_FEATURES, | |
376 | .flags = GENL_ADMIN_PERM, | |
377 | .policy = tcmu_attr_policy, | |
378 | .doit = tcmu_genl_set_features, | |
379 | }, | |
380 | { | |
381 | .cmd = TCMU_CMD_ADDED_DEVICE_DONE, | |
382 | .flags = GENL_ADMIN_PERM, | |
383 | .policy = tcmu_attr_policy, | |
384 | .doit = tcmu_genl_add_dev_done, | |
385 | }, | |
386 | { | |
387 | .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, | |
388 | .flags = GENL_ADMIN_PERM, | |
389 | .policy = tcmu_attr_policy, | |
390 | .doit = tcmu_genl_rm_dev_done, | |
391 | }, | |
392 | { | |
393 | .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, | |
394 | .flags = GENL_ADMIN_PERM, | |
395 | .policy = tcmu_attr_policy, | |
396 | .doit = tcmu_genl_reconfig_dev_done, | |
397 | }, | |
398 | }; | |
399 | ||
7c9e7a6f | 400 | /* Our generic netlink family */ |
56989f6d | 401 | static struct genl_family tcmu_genl_family __ro_after_init = { |
489111e5 | 402 | .module = THIS_MODULE, |
7c9e7a6f AG |
403 | .hdrsize = 0, |
404 | .name = "TCM-USER", | |
b3af66e2 | 405 | .version = 2, |
7c9e7a6f AG |
406 | .maxattr = TCMU_ATTR_MAX, |
407 | .mcgrps = tcmu_mcgrps, | |
408 | .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), | |
20c08b36 | 409 | .netnsok = true, |
b3af66e2 MC |
410 | .ops = tcmu_genl_ops, |
411 | .n_ops = ARRAY_SIZE(tcmu_genl_ops), | |
7c9e7a6f AG |
412 | }; |
413 | ||
141685a3 XL |
414 | #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) |
415 | #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) | |
416 | #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) | |
417 | #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) | |
418 | ||
b6df4b79 | 419 | static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) |
141685a3 XL |
420 | { |
421 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | |
422 | uint32_t i; | |
423 | ||
b6df4b79 | 424 | for (i = 0; i < len; i++) |
141685a3 XL |
425 | clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); |
426 | } | |
427 | ||
b6df4b79 XL |
428 | static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, |
429 | struct tcmu_cmd *tcmu_cmd) | |
141685a3 | 430 | { |
b6df4b79 XL |
431 | struct page *page; |
432 | int ret, dbi; | |
141685a3 | 433 | |
b6df4b79 XL |
434 | dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); |
435 | if (dbi == udev->dbi_thresh) | |
436 | return false; | |
141685a3 | 437 | |
b6df4b79 XL |
438 | page = radix_tree_lookup(&udev->data_blocks, dbi); |
439 | if (!page) { | |
b6df4b79 XL |
440 | if (atomic_add_return(1, &global_db_count) > |
441 | TCMU_GLOBAL_MAX_BLOCKS) { | |
442 | atomic_dec(&global_db_count); | |
443 | return false; | |
141685a3 XL |
444 | } |
445 | ||
b6df4b79 XL |
446 | /* try to get new page from the mm */ |
447 | page = alloc_page(GFP_KERNEL); | |
448 | if (!page) | |
daf78c30 | 449 | goto err_alloc; |
b6df4b79 XL |
450 | |
451 | ret = radix_tree_insert(&udev->data_blocks, dbi, page); | |
daf78c30 XL |
452 | if (ret) |
453 | goto err_insert; | |
141685a3 XL |
454 | } |
455 | ||
b6df4b79 XL |
456 | if (dbi > udev->dbi_max) |
457 | udev->dbi_max = dbi; | |
458 | ||
459 | set_bit(dbi, udev->data_bitmap); | |
460 | tcmu_cmd_set_dbi(tcmu_cmd, dbi); | |
461 | ||
462 | return true; | |
daf78c30 XL |
463 | err_insert: |
464 | __free_page(page); | |
465 | err_alloc: | |
466 | atomic_dec(&global_db_count); | |
467 | return false; | |
141685a3 XL |
468 | } |
469 | ||
b6df4b79 XL |
470 | static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, |
471 | struct tcmu_cmd *tcmu_cmd) | |
472 | { | |
473 | int i; | |
474 | ||
475 | udev->waiting_global = false; | |
476 | ||
477 | for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { | |
478 | if (!tcmu_get_empty_block(udev, tcmu_cmd)) | |
479 | goto err; | |
480 | } | |
481 | return true; | |
482 | ||
483 | err: | |
484 | udev->waiting_global = true; | |
485 | /* Try to wake up the unmap thread */ | |
486 | wake_up(&unmap_wait); | |
487 | return false; | |
488 | } | |
489 | ||
490 | static inline struct page * | |
491 | tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) | |
141685a3 XL |
492 | { |
493 | return radix_tree_lookup(&udev->data_blocks, dbi); | |
494 | } | |
495 | ||
496 | static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) | |
497 | { | |
498 | kfree(tcmu_cmd->dbi); | |
499 | kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); | |
500 | } | |
501 | ||
502 | static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) | |
503 | { | |
504 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; | |
505 | size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); | |
506 | ||
507 | if (se_cmd->se_cmd_flags & SCF_BIDI) { | |
508 | BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); | |
509 | data_length += round_up(se_cmd->t_bidi_data_sg->length, | |
510 | DATA_BLOCK_SIZE); | |
511 | } | |
512 | ||
513 | return data_length; | |
514 | } | |
515 | ||
516 | static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) | |
517 | { | |
518 | size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); | |
519 | ||
520 | return data_length / DATA_BLOCK_SIZE; | |
521 | } | |
522 | ||
7c9e7a6f AG |
523 | static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) |
524 | { | |
525 | struct se_device *se_dev = se_cmd->se_dev; | |
526 | struct tcmu_dev *udev = TCMU_DEV(se_dev); | |
527 | struct tcmu_cmd *tcmu_cmd; | |
7c9e7a6f AG |
528 | |
529 | tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); | |
530 | if (!tcmu_cmd) | |
531 | return NULL; | |
532 | ||
533 | tcmu_cmd->se_cmd = se_cmd; | |
534 | tcmu_cmd->tcmu_dev = udev; | |
7c9e7a6f | 535 | |
141685a3 XL |
536 | tcmu_cmd_reset_dbi_cur(tcmu_cmd); |
537 | tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd); | |
538 | tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), | |
539 | GFP_KERNEL); | |
540 | if (!tcmu_cmd->dbi) { | |
541 | kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); | |
542 | return NULL; | |
543 | } | |
544 | ||
7c9e7a6f AG |
545 | return tcmu_cmd; |
546 | } | |
547 | ||
548 | static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) | |
549 | { | |
b75d8063 | 550 | unsigned long offset = offset_in_page(vaddr); |
7c9e7a6f AG |
551 | |
552 | size = round_up(size+offset, PAGE_SIZE); | |
553 | vaddr -= offset; | |
554 | ||
555 | while (size) { | |
556 | flush_dcache_page(virt_to_page(vaddr)); | |
557 | size -= PAGE_SIZE; | |
558 | } | |
559 | } | |
560 | ||
561 | /* | |
562 | * Some ring helper functions. We don't assume size is a power of 2 so | |
563 | * we can't use circ_buf.h. | |
564 | */ | |
565 | static inline size_t spc_used(size_t head, size_t tail, size_t size) | |
566 | { | |
567 | int diff = head - tail; | |
568 | ||
569 | if (diff >= 0) | |
570 | return diff; | |
571 | else | |
572 | return size + diff; | |
573 | } | |
574 | ||
575 | static inline size_t spc_free(size_t head, size_t tail, size_t size) | |
576 | { | |
577 | /* Keep 1 byte unused or we can't tell full from empty */ | |
578 | return (size - spc_used(head, tail, size) - 1); | |
579 | } | |
580 | ||
581 | static inline size_t head_to_end(size_t head, size_t size) | |
582 | { | |
583 | return size - head; | |
584 | } | |
585 | ||
f1dbd087 SY |
586 | static inline void new_iov(struct iovec **iov, int *iov_cnt, |
587 | struct tcmu_dev *udev) | |
588 | { | |
589 | struct iovec *iovec; | |
590 | ||
591 | if (*iov_cnt != 0) | |
592 | (*iov)++; | |
593 | (*iov_cnt)++; | |
594 | ||
595 | iovec = *iov; | |
596 | memset(iovec, 0, sizeof(struct iovec)); | |
f1dbd087 SY |
597 | } |
598 | ||
7c9e7a6f AG |
599 | #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) |
600 | ||
26418649 | 601 | /* offset is relative to mb_addr */ |
141685a3 XL |
602 | static inline size_t get_block_offset_user(struct tcmu_dev *dev, |
603 | int dbi, int remaining) | |
26418649 | 604 | { |
141685a3 | 605 | return dev->data_off + dbi * DATA_BLOCK_SIZE + |
26418649 SY |
606 | DATA_BLOCK_SIZE - remaining; |
607 | } | |
608 | ||
daf78c30 | 609 | static inline size_t iov_tail(struct iovec *iov) |
26418649 SY |
610 | { |
611 | return (size_t)iov->iov_base + iov->iov_len; | |
612 | } | |
613 | ||
b6df4b79 | 614 | static int scatter_data_area(struct tcmu_dev *udev, |
141685a3 XL |
615 | struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, |
616 | unsigned int data_nents, struct iovec **iov, | |
617 | int *iov_cnt, bool copy_data) | |
f97ec7db | 618 | { |
141685a3 | 619 | int i, dbi; |
26418649 | 620 | int block_remaining = 0; |
141685a3 XL |
621 | void *from, *to = NULL; |
622 | size_t copy_bytes, to_offset, offset; | |
f97ec7db | 623 | struct scatterlist *sg; |
b6df4b79 | 624 | struct page *page; |
f97ec7db IT |
625 | |
626 | for_each_sg(data_sg, sg, data_nents, i) { | |
26418649 | 627 | int sg_remaining = sg->length; |
f97ec7db | 628 | from = kmap_atomic(sg_page(sg)) + sg->offset; |
26418649 SY |
629 | while (sg_remaining > 0) { |
630 | if (block_remaining == 0) { | |
b6df4b79 XL |
631 | if (to) |
632 | kunmap_atomic(to); | |
633 | ||
26418649 | 634 | block_remaining = DATA_BLOCK_SIZE; |
b6df4b79 XL |
635 | dbi = tcmu_cmd_get_dbi(tcmu_cmd); |
636 | page = tcmu_get_block_page(udev, dbi); | |
637 | to = kmap_atomic(page); | |
26418649 | 638 | } |
141685a3 | 639 | |
26418649 SY |
640 | copy_bytes = min_t(size_t, sg_remaining, |
641 | block_remaining); | |
141685a3 | 642 | to_offset = get_block_offset_user(udev, dbi, |
26418649 | 643 | block_remaining); |
141685a3 | 644 | |
26418649 | 645 | if (*iov_cnt != 0 && |
daf78c30 | 646 | to_offset == iov_tail(*iov)) { |
26418649 SY |
647 | (*iov)->iov_len += copy_bytes; |
648 | } else { | |
649 | new_iov(iov, iov_cnt, udev); | |
141685a3 | 650 | (*iov)->iov_base = (void __user *)to_offset; |
26418649 SY |
651 | (*iov)->iov_len = copy_bytes; |
652 | } | |
f97ec7db | 653 | if (copy_data) { |
c542942c XL |
654 | offset = DATA_BLOCK_SIZE - block_remaining; |
655 | memcpy(to + offset, | |
656 | from + sg->length - sg_remaining, | |
657 | copy_bytes); | |
f97ec7db IT |
658 | tcmu_flush_dcache_range(to, copy_bytes); |
659 | } | |
26418649 SY |
660 | sg_remaining -= copy_bytes; |
661 | block_remaining -= copy_bytes; | |
f97ec7db | 662 | } |
e2e21bd8 | 663 | kunmap_atomic(from - sg->offset); |
f97ec7db | 664 | } |
b6df4b79 XL |
665 | if (to) |
666 | kunmap_atomic(to); | |
f97ec7db | 667 | |
141685a3 | 668 | return 0; |
0c28481f SY |
669 | } |
670 | ||
a5d68ba8 XL |
671 | static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, |
672 | bool bidi) | |
f97ec7db | 673 | { |
a5d68ba8 | 674 | struct se_cmd *se_cmd = cmd->se_cmd; |
141685a3 | 675 | int i, dbi; |
26418649 | 676 | int block_remaining = 0; |
b6df4b79 | 677 | void *from = NULL, *to; |
141685a3 | 678 | size_t copy_bytes, offset; |
a5d68ba8 | 679 | struct scatterlist *sg, *data_sg; |
b6df4b79 | 680 | struct page *page; |
a5d68ba8 | 681 | unsigned int data_nents; |
141685a3 | 682 | uint32_t count = 0; |
a5d68ba8 XL |
683 | |
684 | if (!bidi) { | |
685 | data_sg = se_cmd->t_data_sg; | |
686 | data_nents = se_cmd->t_data_nents; | |
687 | } else { | |
a5d68ba8 XL |
688 | |
689 | /* | |
690 | * For bidi case, the first count blocks are for Data-Out | |
691 | * buffer blocks, and before gathering the Data-In buffer | |
692 | * the Data-Out buffer blocks should be discarded. | |
693 | */ | |
694 | count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); | |
a5d68ba8 XL |
695 | |
696 | data_sg = se_cmd->t_bidi_data_sg; | |
697 | data_nents = se_cmd->t_bidi_data_nents; | |
698 | } | |
f97ec7db | 699 | |
141685a3 XL |
700 | tcmu_cmd_set_dbi_cur(cmd, count); |
701 | ||
f97ec7db | 702 | for_each_sg(data_sg, sg, data_nents, i) { |
26418649 | 703 | int sg_remaining = sg->length; |
f97ec7db | 704 | to = kmap_atomic(sg_page(sg)) + sg->offset; |
26418649 SY |
705 | while (sg_remaining > 0) { |
706 | if (block_remaining == 0) { | |
b6df4b79 XL |
707 | if (from) |
708 | kunmap_atomic(from); | |
709 | ||
26418649 | 710 | block_remaining = DATA_BLOCK_SIZE; |
141685a3 | 711 | dbi = tcmu_cmd_get_dbi(cmd); |
b6df4b79 XL |
712 | page = tcmu_get_block_page(udev, dbi); |
713 | from = kmap_atomic(page); | |
26418649 SY |
714 | } |
715 | copy_bytes = min_t(size_t, sg_remaining, | |
716 | block_remaining); | |
141685a3 | 717 | offset = DATA_BLOCK_SIZE - block_remaining; |
f97ec7db | 718 | tcmu_flush_dcache_range(from, copy_bytes); |
c542942c | 719 | memcpy(to + sg->length - sg_remaining, from + offset, |
26418649 | 720 | copy_bytes); |
f97ec7db | 721 | |
26418649 SY |
722 | sg_remaining -= copy_bytes; |
723 | block_remaining -= copy_bytes; | |
f97ec7db | 724 | } |
e2e21bd8 | 725 | kunmap_atomic(to - sg->offset); |
f97ec7db | 726 | } |
b6df4b79 XL |
727 | if (from) |
728 | kunmap_atomic(from); | |
f97ec7db IT |
729 | } |
730 | ||
b6df4b79 | 731 | static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) |
26418649 | 732 | { |
b6df4b79 | 733 | return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh)); |
26418649 SY |
734 | } |
735 | ||
7c9e7a6f | 736 | /* |
f97ec7db | 737 | * We can't queue a command until we have space available on the cmd ring *and* |
3d9b9555 | 738 | * space available on the data area. |
7c9e7a6f AG |
739 | * |
740 | * Called with ring lock held. | |
741 | */ | |
b6df4b79 XL |
742 | static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, |
743 | size_t cmd_size, size_t data_needed) | |
7c9e7a6f AG |
744 | { |
745 | struct tcmu_mailbox *mb = udev->mb_addr; | |
b6df4b79 XL |
746 | uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1) |
747 | / DATA_BLOCK_SIZE; | |
0241fd39 | 748 | size_t space, cmd_needed; |
7c9e7a6f AG |
749 | u32 cmd_head; |
750 | ||
751 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
752 | ||
753 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
754 | ||
f56574a2 AG |
755 | /* |
756 | * If cmd end-of-ring space is too small then we need space for a NOP plus | |
757 | * original cmd - cmds are internally contiguous. | |
758 | */ | |
759 | if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) | |
760 | cmd_needed = cmd_size; | |
761 | else | |
762 | cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); | |
763 | ||
7c9e7a6f AG |
764 | space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); |
765 | if (space < cmd_needed) { | |
766 | pr_debug("no cmd space: %u %u %u\n", cmd_head, | |
767 | udev->cmdr_last_cleaned, udev->cmdr_size); | |
768 | return false; | |
769 | } | |
770 | ||
b6df4b79 XL |
771 | /* try to check and get the data blocks as needed */ |
772 | space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); | |
7c9e7a6f | 773 | if (space < data_needed) { |
b6df4b79 XL |
774 | unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh; |
775 | unsigned long grow; | |
776 | ||
777 | if (blocks_left < blocks_needed) { | |
778 | pr_debug("no data space: only %lu available, but ask for %zu\n", | |
779 | blocks_left * DATA_BLOCK_SIZE, | |
780 | data_needed); | |
781 | return false; | |
782 | } | |
783 | ||
784 | /* Try to expand the thresh */ | |
785 | if (!udev->dbi_thresh) { | |
786 | /* From idle state */ | |
787 | uint32_t init_thresh = DATA_BLOCK_INIT_BITS; | |
788 | ||
789 | udev->dbi_thresh = max(blocks_needed, init_thresh); | |
790 | } else { | |
791 | /* | |
792 | * Grow the data area by max(blocks needed, | |
793 | * dbi_thresh / 2), but limited to the max | |
794 | * DATA_BLOCK_BITS size. | |
795 | */ | |
796 | grow = max(blocks_needed, udev->dbi_thresh / 2); | |
797 | udev->dbi_thresh += grow; | |
798 | if (udev->dbi_thresh > DATA_BLOCK_BITS) | |
799 | udev->dbi_thresh = DATA_BLOCK_BITS; | |
800 | } | |
7c9e7a6f AG |
801 | } |
802 | ||
daf78c30 | 803 | return tcmu_get_empty_blocks(udev, cmd); |
7c9e7a6f AG |
804 | } |
805 | ||
fe25cc34 XL |
806 | static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) |
807 | { | |
808 | return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), | |
809 | sizeof(struct tcmu_cmd_entry)); | |
810 | } | |
811 | ||
812 | static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, | |
813 | size_t base_command_size) | |
814 | { | |
815 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; | |
816 | size_t command_size; | |
817 | ||
818 | command_size = base_command_size + | |
819 | round_up(scsi_command_size(se_cmd->t_task_cdb), | |
820 | TCMU_OP_ALIGN_SIZE); | |
821 | ||
822 | WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); | |
823 | ||
824 | return command_size; | |
825 | } | |
826 | ||
0d44374c MC |
827 | static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd) |
828 | { | |
829 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | |
830 | unsigned long tmo = udev->cmd_time_out; | |
831 | int cmd_id; | |
832 | ||
833 | if (tcmu_cmd->cmd_id) | |
834 | return 0; | |
835 | ||
836 | cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); | |
837 | if (cmd_id < 0) { | |
838 | pr_err("tcmu: Could not allocate cmd id.\n"); | |
839 | return cmd_id; | |
840 | } | |
841 | tcmu_cmd->cmd_id = cmd_id; | |
842 | ||
843 | if (!tmo) | |
844 | return 0; | |
845 | ||
846 | tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); | |
847 | mod_timer(&udev->timeout, tcmu_cmd->deadline); | |
848 | return 0; | |
849 | } | |
850 | ||
02eb924f AG |
851 | static sense_reason_t |
852 | tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |
7c9e7a6f AG |
853 | { |
854 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | |
855 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; | |
856 | size_t base_command_size, command_size; | |
7c9e7a6f | 857 | struct tcmu_mailbox *mb; |
7c9e7a6f | 858 | struct tcmu_cmd_entry *entry; |
7c9e7a6f | 859 | struct iovec *iov; |
141685a3 | 860 | int iov_cnt, ret; |
7c9e7a6f AG |
861 | uint32_t cmd_head; |
862 | uint64_t cdb_off; | |
f97ec7db | 863 | bool copy_to_data_area; |
ab22d260 | 864 | size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); |
7c9e7a6f AG |
865 | |
866 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) | |
02eb924f | 867 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
7c9e7a6f AG |
868 | |
869 | /* | |
870 | * Must be a certain minimum size for response sense info, but | |
871 | * also may be larger if the iov array is large. | |
872 | * | |
fe25cc34 XL |
873 | * We prepare as many iovs as possbile for potential uses here, |
874 | * because it's expensive to tell how many regions are freed in | |
875 | * the bitmap & global data pool, as the size calculated here | |
876 | * will only be used to do the checks. | |
877 | * | |
878 | * The size will be recalculated later as actually needed to save | |
879 | * cmd area memories. | |
880 | */ | |
881 | base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); | |
882 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); | |
7c9e7a6f | 883 | |
b6df4b79 | 884 | mutex_lock(&udev->cmdr_lock); |
7c9e7a6f AG |
885 | |
886 | mb = udev->mb_addr; | |
887 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
554617b2 AG |
888 | if ((command_size > (udev->cmdr_size / 2)) || |
889 | data_length > udev->data_size) { | |
890 | pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " | |
3d9b9555 | 891 | "cmd ring/data area\n", command_size, data_length, |
7c9e7a6f | 892 | udev->cmdr_size, udev->data_size); |
b6df4b79 | 893 | mutex_unlock(&udev->cmdr_lock); |
554617b2 AG |
894 | return TCM_INVALID_CDB_FIELD; |
895 | } | |
7c9e7a6f | 896 | |
b6df4b79 | 897 | while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { |
7c9e7a6f AG |
898 | int ret; |
899 | DEFINE_WAIT(__wait); | |
900 | ||
6bc46be4 MC |
901 | /* |
902 | * Don't leave commands partially setup because the unmap | |
903 | * thread might need the blocks to make forward progress. | |
904 | */ | |
905 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); | |
906 | tcmu_cmd_reset_dbi_cur(tcmu_cmd); | |
907 | ||
7c9e7a6f AG |
908 | prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); |
909 | ||
910 | pr_debug("sleeping for ring space\n"); | |
b6df4b79 | 911 | mutex_unlock(&udev->cmdr_lock); |
af980e46 MC |
912 | if (udev->cmd_time_out) |
913 | ret = schedule_timeout( | |
914 | msecs_to_jiffies(udev->cmd_time_out)); | |
915 | else | |
916 | ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); | |
7c9e7a6f AG |
917 | finish_wait(&udev->wait_cmdr, &__wait); |
918 | if (!ret) { | |
919 | pr_warn("tcmu: command timed out\n"); | |
02eb924f | 920 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
7c9e7a6f AG |
921 | } |
922 | ||
b6df4b79 | 923 | mutex_lock(&udev->cmdr_lock); |
7c9e7a6f AG |
924 | |
925 | /* We dropped cmdr_lock, cmd_head is stale */ | |
926 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
927 | } | |
928 | ||
f56574a2 AG |
929 | /* Insert a PAD if end-of-ring space is too small */ |
930 | if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { | |
931 | size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); | |
932 | ||
7c9e7a6f | 933 | entry = (void *) mb + CMDR_OFF + cmd_head; |
0ad46af8 AG |
934 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); |
935 | tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); | |
936 | entry->hdr.cmd_id = 0; /* not used for PAD */ | |
937 | entry->hdr.kflags = 0; | |
938 | entry->hdr.uflags = 0; | |
9d62bc0e | 939 | tcmu_flush_dcache_range(entry, sizeof(*entry)); |
7c9e7a6f AG |
940 | |
941 | UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); | |
9d62bc0e | 942 | tcmu_flush_dcache_range(mb, sizeof(*mb)); |
7c9e7a6f AG |
943 | |
944 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
945 | WARN_ON(cmd_head != 0); | |
946 | } | |
947 | ||
948 | entry = (void *) mb + CMDR_OFF + cmd_head; | |
b3743c71 | 949 | memset(entry, 0, command_size); |
0ad46af8 | 950 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); |
7c9e7a6f | 951 | |
3d9b9555 | 952 | /* Handle allocating space from the data area */ |
b6df4b79 | 953 | tcmu_cmd_reset_dbi_cur(tcmu_cmd); |
7c9e7a6f | 954 | iov = &entry->req.iov[0]; |
f97ec7db | 955 | iov_cnt = 0; |
e4648b01 IT |
956 | copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE |
957 | || se_cmd->se_cmd_flags & SCF_BIDI); | |
b6df4b79 XL |
958 | ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, |
959 | se_cmd->t_data_nents, &iov, &iov_cnt, | |
960 | copy_to_data_area); | |
141685a3 | 961 | if (ret) { |
b6df4b79 XL |
962 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); |
963 | mutex_unlock(&udev->cmdr_lock); | |
964 | ||
141685a3 XL |
965 | pr_err("tcmu: alloc and scatter data failed\n"); |
966 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
967 | } | |
7c9e7a6f AG |
968 | entry->req.iov_cnt = iov_cnt; |
969 | ||
e4648b01 | 970 | /* Handle BIDI commands */ |
b3743c71 | 971 | iov_cnt = 0; |
ab22d260 | 972 | if (se_cmd->se_cmd_flags & SCF_BIDI) { |
ab22d260 | 973 | iov++; |
b6df4b79 | 974 | ret = scatter_data_area(udev, tcmu_cmd, |
141685a3 XL |
975 | se_cmd->t_bidi_data_sg, |
976 | se_cmd->t_bidi_data_nents, | |
977 | &iov, &iov_cnt, false); | |
978 | if (ret) { | |
b6df4b79 XL |
979 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); |
980 | mutex_unlock(&udev->cmdr_lock); | |
981 | ||
141685a3 XL |
982 | pr_err("tcmu: alloc and scatter bidi data failed\n"); |
983 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
984 | } | |
ab22d260 | 985 | } |
b3743c71 | 986 | entry->req.iov_bidi_cnt = iov_cnt; |
26418649 | 987 | |
0d44374c MC |
988 | ret = tcmu_setup_cmd_timer(tcmu_cmd); |
989 | if (ret) { | |
990 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); | |
97488c73 | 991 | mutex_unlock(&udev->cmdr_lock); |
0d44374c MC |
992 | return TCM_OUT_OF_RESOURCES; |
993 | } | |
994 | entry->hdr.cmd_id = tcmu_cmd->cmd_id; | |
995 | ||
fe25cc34 XL |
996 | /* |
997 | * Recalaulate the command's base size and size according | |
998 | * to the actual needs | |
999 | */ | |
1000 | base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt + | |
1001 | entry->req.iov_bidi_cnt); | |
1002 | command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); | |
1003 | ||
1004 | tcmu_hdr_set_len(&entry->hdr.len_op, command_size); | |
1005 | ||
7c9e7a6f AG |
1006 | /* All offsets relative to mb_addr, not start of entry! */ |
1007 | cdb_off = CMDR_OFF + cmd_head + base_command_size; | |
1008 | memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); | |
1009 | entry->req.cdb_off = cdb_off; | |
1010 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | |
1011 | ||
1012 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); | |
1013 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
b6df4b79 | 1014 | mutex_unlock(&udev->cmdr_lock); |
7c9e7a6f AG |
1015 | |
1016 | /* TODO: only if FLUSH and FUA? */ | |
1017 | uio_event_notify(&udev->uio_info); | |
1018 | ||
af980e46 MC |
1019 | if (udev->cmd_time_out) |
1020 | mod_timer(&udev->timeout, round_jiffies_up(jiffies + | |
1021 | msecs_to_jiffies(udev->cmd_time_out))); | |
7c9e7a6f | 1022 | |
02eb924f | 1023 | return TCM_NO_SENSE; |
7c9e7a6f AG |
1024 | } |
1025 | ||
02eb924f AG |
1026 | static sense_reason_t |
1027 | tcmu_queue_cmd(struct se_cmd *se_cmd) | |
7c9e7a6f | 1028 | { |
7c9e7a6f | 1029 | struct tcmu_cmd *tcmu_cmd; |
ecaf597b | 1030 | sense_reason_t ret; |
7c9e7a6f AG |
1031 | |
1032 | tcmu_cmd = tcmu_alloc_cmd(se_cmd); | |
1033 | if (!tcmu_cmd) | |
02eb924f | 1034 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
7c9e7a6f AG |
1035 | |
1036 | ret = tcmu_queue_cmd_ring(tcmu_cmd); | |
02eb924f | 1037 | if (ret != TCM_NO_SENSE) { |
7c9e7a6f | 1038 | pr_err("TCMU: Could not queue command\n"); |
7c9e7a6f | 1039 | |
141685a3 | 1040 | tcmu_free_cmd(tcmu_cmd); |
7c9e7a6f AG |
1041 | } |
1042 | ||
1043 | return ret; | |
1044 | } | |
1045 | ||
1046 | static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) | |
1047 | { | |
1048 | struct se_cmd *se_cmd = cmd->se_cmd; | |
1049 | struct tcmu_dev *udev = cmd->tcmu_dev; | |
1050 | ||
141685a3 XL |
1051 | /* |
1052 | * cmd has been completed already from timeout, just reclaim | |
1053 | * data area space and free cmd | |
1054 | */ | |
1055 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | |
1056 | goto out; | |
b25c7863 | 1057 | |
141685a3 | 1058 | tcmu_cmd_reset_dbi_cur(cmd); |
7c9e7a6f | 1059 | |
0ad46af8 | 1060 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { |
0ad46af8 AG |
1061 | pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", |
1062 | cmd->se_cmd); | |
ed97d0cd AG |
1063 | entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; |
1064 | } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { | |
406f74c2 | 1065 | transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); |
e4648b01 | 1066 | } else if (se_cmd->se_cmd_flags & SCF_BIDI) { |
26418649 | 1067 | /* Get Data-In buffer before clean up */ |
a5d68ba8 | 1068 | gather_data_area(udev, cmd, true); |
e4648b01 | 1069 | } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { |
a5d68ba8 | 1070 | gather_data_area(udev, cmd, false); |
7c9e7a6f | 1071 | } else if (se_cmd->data_direction == DMA_TO_DEVICE) { |
141685a3 | 1072 | /* TODO: */ |
2bc396a2 IT |
1073 | } else if (se_cmd->data_direction != DMA_NONE) { |
1074 | pr_warn("TCMU: data direction was %d!\n", | |
1075 | se_cmd->data_direction); | |
7c9e7a6f AG |
1076 | } |
1077 | ||
1078 | target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); | |
7c9e7a6f | 1079 | |
141685a3 XL |
1080 | out: |
1081 | cmd->se_cmd = NULL; | |
b6df4b79 | 1082 | tcmu_cmd_free_data(cmd, cmd->dbi_cnt); |
141685a3 | 1083 | tcmu_free_cmd(cmd); |
7c9e7a6f AG |
1084 | } |
1085 | ||
1086 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |
1087 | { | |
1088 | struct tcmu_mailbox *mb; | |
7c9e7a6f AG |
1089 | int handled = 0; |
1090 | ||
1091 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { | |
1092 | pr_err("ring broken, not handling completions\n"); | |
1093 | return 0; | |
1094 | } | |
1095 | ||
7c9e7a6f AG |
1096 | mb = udev->mb_addr; |
1097 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
1098 | ||
6aa7de05 | 1099 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { |
7c9e7a6f AG |
1100 | |
1101 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | |
1102 | struct tcmu_cmd *cmd; | |
1103 | ||
1104 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | |
1105 | ||
0ad46af8 AG |
1106 | if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { |
1107 | UPDATE_HEAD(udev->cmdr_last_cleaned, | |
1108 | tcmu_hdr_get_len(entry->hdr.len_op), | |
1109 | udev->cmdr_size); | |
7c9e7a6f AG |
1110 | continue; |
1111 | } | |
0ad46af8 | 1112 | WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); |
7c9e7a6f AG |
1113 | |
1114 | spin_lock(&udev->commands_lock); | |
d3e709e6 | 1115 | cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); |
7c9e7a6f AG |
1116 | spin_unlock(&udev->commands_lock); |
1117 | ||
1118 | if (!cmd) { | |
1119 | pr_err("cmd_id not found, ring is broken\n"); | |
1120 | set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); | |
1121 | break; | |
1122 | } | |
1123 | ||
1124 | tcmu_handle_completion(cmd, entry); | |
1125 | ||
0ad46af8 AG |
1126 | UPDATE_HEAD(udev->cmdr_last_cleaned, |
1127 | tcmu_hdr_get_len(entry->hdr.len_op), | |
1128 | udev->cmdr_size); | |
7c9e7a6f AG |
1129 | |
1130 | handled++; | |
1131 | } | |
1132 | ||
1133 | if (mb->cmd_tail == mb->cmd_head) | |
1134 | del_timer(&udev->timeout); /* no more pending cmds */ | |
1135 | ||
7c9e7a6f AG |
1136 | wake_up(&udev->wait_cmdr); |
1137 | ||
1138 | return handled; | |
1139 | } | |
1140 | ||
1141 | static int tcmu_check_expired_cmd(int id, void *p, void *data) | |
1142 | { | |
1143 | struct tcmu_cmd *cmd = p; | |
1144 | ||
1145 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | |
1146 | return 0; | |
1147 | ||
611e2267 | 1148 | if (!time_after(jiffies, cmd->deadline)) |
7c9e7a6f AG |
1149 | return 0; |
1150 | ||
1151 | set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); | |
1152 | target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); | |
1153 | cmd->se_cmd = NULL; | |
1154 | ||
7c9e7a6f AG |
1155 | return 0; |
1156 | } | |
1157 | ||
e99e88a9 | 1158 | static void tcmu_device_timedout(struct timer_list *t) |
7c9e7a6f | 1159 | { |
e99e88a9 | 1160 | struct tcmu_dev *udev = from_timer(udev, t, timeout); |
7c9e7a6f | 1161 | unsigned long flags; |
7c9e7a6f AG |
1162 | |
1163 | spin_lock_irqsave(&udev->commands_lock, flags); | |
1164 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); | |
1165 | spin_unlock_irqrestore(&udev->commands_lock, flags); | |
1166 | ||
b6df4b79 XL |
1167 | /* Try to wake up the ummap thread */ |
1168 | wake_up(&unmap_wait); | |
1169 | ||
7c9e7a6f AG |
1170 | /* |
1171 | * We don't need to wakeup threads on wait_cmdr since they have their | |
1172 | * own timeout. | |
1173 | */ | |
1174 | } | |
1175 | ||
1176 | static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) | |
1177 | { | |
1178 | struct tcmu_hba *tcmu_hba; | |
1179 | ||
1180 | tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); | |
1181 | if (!tcmu_hba) | |
1182 | return -ENOMEM; | |
1183 | ||
1184 | tcmu_hba->host_id = host_id; | |
1185 | hba->hba_ptr = tcmu_hba; | |
1186 | ||
1187 | return 0; | |
1188 | } | |
1189 | ||
1190 | static void tcmu_detach_hba(struct se_hba *hba) | |
1191 | { | |
1192 | kfree(hba->hba_ptr); | |
1193 | hba->hba_ptr = NULL; | |
1194 | } | |
1195 | ||
1196 | static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |
1197 | { | |
1198 | struct tcmu_dev *udev; | |
1199 | ||
1200 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); | |
1201 | if (!udev) | |
1202 | return NULL; | |
f3cdbe39 | 1203 | kref_init(&udev->kref); |
7c9e7a6f AG |
1204 | |
1205 | udev->name = kstrdup(name, GFP_KERNEL); | |
1206 | if (!udev->name) { | |
1207 | kfree(udev); | |
1208 | return NULL; | |
1209 | } | |
1210 | ||
1211 | udev->hba = hba; | |
af980e46 | 1212 | udev->cmd_time_out = TCMU_TIME_OUT; |
7c9e7a6f AG |
1213 | |
1214 | init_waitqueue_head(&udev->wait_cmdr); | |
b6df4b79 | 1215 | mutex_init(&udev->cmdr_lock); |
7c9e7a6f AG |
1216 | |
1217 | idr_init(&udev->commands); | |
1218 | spin_lock_init(&udev->commands_lock); | |
1219 | ||
e99e88a9 | 1220 | timer_setup(&udev->timeout, tcmu_device_timedout, 0); |
7c9e7a6f | 1221 | |
c22adc0b XL |
1222 | INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); |
1223 | ||
7c9e7a6f AG |
1224 | return &udev->se_dev; |
1225 | } | |
1226 | ||
1227 | static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) | |
1228 | { | |
1229 | struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); | |
1230 | ||
b6df4b79 | 1231 | mutex_lock(&tcmu_dev->cmdr_lock); |
7c9e7a6f | 1232 | tcmu_handle_completions(tcmu_dev); |
b6df4b79 | 1233 | mutex_unlock(&tcmu_dev->cmdr_lock); |
7c9e7a6f AG |
1234 | |
1235 | return 0; | |
1236 | } | |
1237 | ||
1238 | /* | |
1239 | * mmap code from uio.c. Copied here because we want to hook mmap() | |
1240 | * and this stuff must come along. | |
1241 | */ | |
1242 | static int tcmu_find_mem_index(struct vm_area_struct *vma) | |
1243 | { | |
1244 | struct tcmu_dev *udev = vma->vm_private_data; | |
1245 | struct uio_info *info = &udev->uio_info; | |
1246 | ||
1247 | if (vma->vm_pgoff < MAX_UIO_MAPS) { | |
1248 | if (info->mem[vma->vm_pgoff].size == 0) | |
1249 | return -1; | |
1250 | return (int)vma->vm_pgoff; | |
1251 | } | |
1252 | return -1; | |
1253 | } | |
1254 | ||
b6df4b79 XL |
1255 | static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) |
1256 | { | |
1257 | struct page *page; | |
1258 | int ret; | |
1259 | ||
1260 | mutex_lock(&udev->cmdr_lock); | |
1261 | page = tcmu_get_block_page(udev, dbi); | |
1262 | if (likely(page)) { | |
1263 | mutex_unlock(&udev->cmdr_lock); | |
1264 | return page; | |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * Normally it shouldn't be here: | |
1269 | * Only when the userspace has touched the blocks which | |
1270 | * are out of the tcmu_cmd's data iov[], and will return | |
1271 | * one zeroed page. | |
1272 | */ | |
1273 | pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi); | |
1274 | pr_warn("Mostly it will be a bug of userspace, please have a check!\n"); | |
1275 | ||
1276 | if (dbi >= udev->dbi_thresh) { | |
1277 | /* Extern the udev->dbi_thresh to dbi + 1 */ | |
1278 | udev->dbi_thresh = dbi + 1; | |
1279 | udev->dbi_max = dbi; | |
1280 | } | |
1281 | ||
1282 | page = radix_tree_lookup(&udev->data_blocks, dbi); | |
1283 | if (!page) { | |
1284 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | |
1285 | if (!page) { | |
1286 | mutex_unlock(&udev->cmdr_lock); | |
1287 | return NULL; | |
1288 | } | |
1289 | ||
1290 | ret = radix_tree_insert(&udev->data_blocks, dbi, page); | |
1291 | if (ret) { | |
1292 | mutex_unlock(&udev->cmdr_lock); | |
1293 | __free_page(page); | |
1294 | return NULL; | |
1295 | } | |
1296 | ||
1297 | /* | |
1298 | * Since this case is rare in page fault routine, here we | |
1299 | * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS | |
1300 | * to reduce possible page fault call trace. | |
1301 | */ | |
1302 | atomic_inc(&global_db_count); | |
1303 | } | |
1304 | mutex_unlock(&udev->cmdr_lock); | |
1305 | ||
1306 | return page; | |
1307 | } | |
1308 | ||
11bac800 | 1309 | static int tcmu_vma_fault(struct vm_fault *vmf) |
7c9e7a6f | 1310 | { |
11bac800 | 1311 | struct tcmu_dev *udev = vmf->vma->vm_private_data; |
7c9e7a6f AG |
1312 | struct uio_info *info = &udev->uio_info; |
1313 | struct page *page; | |
1314 | unsigned long offset; | |
1315 | void *addr; | |
1316 | ||
11bac800 | 1317 | int mi = tcmu_find_mem_index(vmf->vma); |
7c9e7a6f AG |
1318 | if (mi < 0) |
1319 | return VM_FAULT_SIGBUS; | |
1320 | ||
1321 | /* | |
1322 | * We need to subtract mi because userspace uses offset = N*PAGE_SIZE | |
1323 | * to use mem[N]. | |
1324 | */ | |
1325 | offset = (vmf->pgoff - mi) << PAGE_SHIFT; | |
1326 | ||
141685a3 XL |
1327 | if (offset < udev->data_off) { |
1328 | /* For the vmalloc()ed cmd area pages */ | |
1329 | addr = (void *)(unsigned long)info->mem[mi].addr + offset; | |
7c9e7a6f | 1330 | page = vmalloc_to_page(addr); |
141685a3 | 1331 | } else { |
141685a3 XL |
1332 | uint32_t dbi; |
1333 | ||
b6df4b79 | 1334 | /* For the dynamically growing data area pages */ |
141685a3 | 1335 | dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; |
b6df4b79 XL |
1336 | page = tcmu_try_get_block_page(udev, dbi); |
1337 | if (!page) | |
141685a3 | 1338 | return VM_FAULT_NOPAGE; |
141685a3 XL |
1339 | } |
1340 | ||
7c9e7a6f AG |
1341 | get_page(page); |
1342 | vmf->page = page; | |
1343 | return 0; | |
1344 | } | |
1345 | ||
1346 | static const struct vm_operations_struct tcmu_vm_ops = { | |
1347 | .fault = tcmu_vma_fault, | |
1348 | }; | |
1349 | ||
1350 | static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) | |
1351 | { | |
1352 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
1353 | ||
1354 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | |
1355 | vma->vm_ops = &tcmu_vm_ops; | |
1356 | ||
1357 | vma->vm_private_data = udev; | |
1358 | ||
1359 | /* Ensure the mmap is exactly the right size */ | |
1360 | if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) | |
1361 | return -EINVAL; | |
1362 | ||
1363 | return 0; | |
1364 | } | |
1365 | ||
1366 | static int tcmu_open(struct uio_info *info, struct inode *inode) | |
1367 | { | |
1368 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
1369 | ||
1370 | /* O_EXCL not supported for char devs, so fake it? */ | |
1371 | if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) | |
1372 | return -EBUSY; | |
1373 | ||
b6df4b79 | 1374 | udev->inode = inode; |
9260695d | 1375 | kref_get(&udev->kref); |
b6df4b79 | 1376 | |
7c9e7a6f AG |
1377 | pr_debug("open\n"); |
1378 | ||
1379 | return 0; | |
1380 | } | |
1381 | ||
f3cdbe39 MC |
1382 | static void tcmu_dev_call_rcu(struct rcu_head *p) |
1383 | { | |
1384 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | |
1385 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1386 | ||
1387 | kfree(udev->uio_info.name); | |
1388 | kfree(udev->name); | |
1389 | kfree(udev); | |
1390 | } | |
1391 | ||
c22adc0b XL |
1392 | static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) |
1393 | { | |
1394 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { | |
1395 | kmem_cache_free(tcmu_cmd_cache, cmd); | |
1396 | return 0; | |
1397 | } | |
1398 | return -EINVAL; | |
1399 | } | |
1400 | ||
1401 | static void tcmu_blocks_release(struct tcmu_dev *udev) | |
1402 | { | |
1403 | int i; | |
1404 | struct page *page; | |
1405 | ||
1406 | /* Try to release all block pages */ | |
1407 | mutex_lock(&udev->cmdr_lock); | |
1408 | for (i = 0; i <= udev->dbi_max; i++) { | |
1409 | page = radix_tree_delete(&udev->data_blocks, i); | |
1410 | if (page) { | |
1411 | __free_page(page); | |
1412 | atomic_dec(&global_db_count); | |
1413 | } | |
1414 | } | |
1415 | mutex_unlock(&udev->cmdr_lock); | |
1416 | } | |
1417 | ||
f3cdbe39 MC |
1418 | static void tcmu_dev_kref_release(struct kref *kref) |
1419 | { | |
1420 | struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); | |
1421 | struct se_device *dev = &udev->se_dev; | |
c22adc0b XL |
1422 | struct tcmu_cmd *cmd; |
1423 | bool all_expired = true; | |
1424 | int i; | |
1425 | ||
1426 | vfree(udev->mb_addr); | |
1427 | udev->mb_addr = NULL; | |
1428 | ||
1429 | /* Upper layer should drain all requests before calling this */ | |
1430 | spin_lock_irq(&udev->commands_lock); | |
1431 | idr_for_each_entry(&udev->commands, cmd, i) { | |
1432 | if (tcmu_check_and_free_pending_cmd(cmd) != 0) | |
1433 | all_expired = false; | |
1434 | } | |
1435 | idr_destroy(&udev->commands); | |
1436 | spin_unlock_irq(&udev->commands_lock); | |
1437 | WARN_ON(!all_expired); | |
1438 | ||
1439 | tcmu_blocks_release(udev); | |
f3cdbe39 MC |
1440 | |
1441 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | |
1442 | } | |
1443 | ||
7c9e7a6f AG |
1444 | static int tcmu_release(struct uio_info *info, struct inode *inode) |
1445 | { | |
1446 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
1447 | ||
1448 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); | |
1449 | ||
1450 | pr_debug("close\n"); | |
9260695d | 1451 | /* release ref from open */ |
f3cdbe39 | 1452 | kref_put(&udev->kref, tcmu_dev_kref_release); |
7c9e7a6f AG |
1453 | return 0; |
1454 | } | |
1455 | ||
11406be9 | 1456 | static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) |
b3af66e2 MC |
1457 | { |
1458 | struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; | |
1459 | ||
1460 | if (!tcmu_kern_cmd_reply_supported) | |
11406be9 | 1461 | return 0; |
b849b456 KN |
1462 | |
1463 | if (udev->nl_reply_supported <= 0) | |
11406be9 | 1464 | return 0; |
b849b456 | 1465 | |
dc8cca4b | 1466 | mutex_lock(&tcmu_nl_cmd_mutex); |
b3af66e2 | 1467 | |
c2a3c5cf MC |
1468 | if (tcmu_netlink_blocked) { |
1469 | mutex_unlock(&tcmu_nl_cmd_mutex); | |
1470 | pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, | |
1471 | udev->name); | |
1472 | return -EAGAIN; | |
1473 | } | |
1474 | ||
b3af66e2 | 1475 | if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { |
dc8cca4b | 1476 | mutex_unlock(&tcmu_nl_cmd_mutex); |
11406be9 MC |
1477 | pr_warn("netlink cmd %d already executing on %s\n", |
1478 | nl_cmd->cmd, udev->name); | |
1479 | return -EBUSY; | |
b3af66e2 MC |
1480 | } |
1481 | ||
1482 | memset(nl_cmd, 0, sizeof(*nl_cmd)); | |
1483 | nl_cmd->cmd = cmd; | |
dc8cca4b | 1484 | nl_cmd->udev = udev; |
b3af66e2 | 1485 | init_completion(&nl_cmd->complete); |
dc8cca4b MC |
1486 | INIT_LIST_HEAD(&nl_cmd->nl_list); |
1487 | ||
1488 | list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); | |
b3af66e2 | 1489 | |
dc8cca4b | 1490 | mutex_unlock(&tcmu_nl_cmd_mutex); |
11406be9 | 1491 | return 0; |
b3af66e2 MC |
1492 | } |
1493 | ||
1494 | static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) | |
1495 | { | |
1496 | struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; | |
1497 | int ret; | |
b3af66e2 MC |
1498 | |
1499 | if (!tcmu_kern_cmd_reply_supported) | |
1500 | return 0; | |
1501 | ||
b849b456 KN |
1502 | if (udev->nl_reply_supported <= 0) |
1503 | return 0; | |
1504 | ||
b3af66e2 MC |
1505 | pr_debug("sleeping for nl reply\n"); |
1506 | wait_for_completion(&nl_cmd->complete); | |
1507 | ||
dc8cca4b | 1508 | mutex_lock(&tcmu_nl_cmd_mutex); |
b3af66e2 MC |
1509 | nl_cmd->cmd = TCMU_CMD_UNSPEC; |
1510 | ret = nl_cmd->status; | |
dc8cca4b | 1511 | mutex_unlock(&tcmu_nl_cmd_mutex); |
b3af66e2 | 1512 | |
11406be9 | 1513 | return ret; |
b3af66e2 MC |
1514 | } |
1515 | ||
1516 | static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd, | |
1517 | int reconfig_attr, const void *reconfig_data) | |
7c9e7a6f AG |
1518 | { |
1519 | struct sk_buff *skb; | |
1520 | void *msg_header; | |
6e14eab9 | 1521 | int ret = -ENOMEM; |
7c9e7a6f AG |
1522 | |
1523 | skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | |
1524 | if (!skb) | |
6e14eab9 | 1525 | return ret; |
7c9e7a6f AG |
1526 | |
1527 | msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); | |
6e14eab9 NB |
1528 | if (!msg_header) |
1529 | goto free_skb; | |
7c9e7a6f | 1530 | |
b3af66e2 | 1531 | ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); |
6e14eab9 NB |
1532 | if (ret < 0) |
1533 | goto free_skb; | |
7c9e7a6f | 1534 | |
b3af66e2 MC |
1535 | ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); |
1536 | if (ret < 0) | |
1537 | goto free_skb; | |
1538 | ||
1539 | ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); | |
6e14eab9 NB |
1540 | if (ret < 0) |
1541 | goto free_skb; | |
7c9e7a6f | 1542 | |
2d76443e MC |
1543 | if (cmd == TCMU_CMD_RECONFIG_DEVICE) { |
1544 | switch (reconfig_attr) { | |
1545 | case TCMU_ATTR_DEV_CFG: | |
1546 | ret = nla_put_string(skb, reconfig_attr, reconfig_data); | |
1547 | break; | |
1548 | case TCMU_ATTR_DEV_SIZE: | |
1549 | ret = nla_put_u64_64bit(skb, reconfig_attr, | |
1550 | *((u64 *)reconfig_data), | |
1551 | TCMU_ATTR_PAD); | |
1552 | break; | |
1553 | case TCMU_ATTR_WRITECACHE: | |
1554 | ret = nla_put_u8(skb, reconfig_attr, | |
1555 | *((u8 *)reconfig_data)); | |
1556 | break; | |
1557 | default: | |
1558 | BUG(); | |
1559 | } | |
1560 | ||
1561 | if (ret < 0) | |
1562 | goto free_skb; | |
1563 | } | |
8a45885c | 1564 | |
053c095a | 1565 | genlmsg_end(skb, msg_header); |
7c9e7a6f | 1566 | |
11406be9 MC |
1567 | ret = tcmu_init_genl_cmd_reply(udev, cmd); |
1568 | if (ret) { | |
1569 | nlmsg_free(skb); | |
1570 | return ret; | |
1571 | } | |
b3af66e2 | 1572 | |
20c08b36 | 1573 | ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, |
7c9e7a6f | 1574 | TCMU_MCGRP_CONFIG, GFP_KERNEL); |
7c9e7a6f AG |
1575 | /* We don't care if no one is listening */ |
1576 | if (ret == -ESRCH) | |
1577 | ret = 0; | |
b3af66e2 MC |
1578 | if (!ret) |
1579 | ret = tcmu_wait_genl_cmd_reply(udev); | |
7c9e7a6f AG |
1580 | |
1581 | return ret; | |
6e14eab9 NB |
1582 | free_skb: |
1583 | nlmsg_free(skb); | |
1584 | return ret; | |
7c9e7a6f AG |
1585 | } |
1586 | ||
de8c5221 | 1587 | static int tcmu_update_uio_info(struct tcmu_dev *udev) |
7c9e7a6f | 1588 | { |
7c9e7a6f AG |
1589 | struct tcmu_hba *hba = udev->hba->hba_ptr; |
1590 | struct uio_info *info; | |
de8c5221 | 1591 | size_t size, used; |
7c9e7a6f AG |
1592 | char *str; |
1593 | ||
1594 | info = &udev->uio_info; | |
7c9e7a6f AG |
1595 | size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, |
1596 | udev->dev_config); | |
1597 | size += 1; /* for \0 */ | |
1598 | str = kmalloc(size, GFP_KERNEL); | |
1599 | if (!str) | |
1600 | return -ENOMEM; | |
1601 | ||
1602 | used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); | |
7c9e7a6f AG |
1603 | if (udev->dev_config[0]) |
1604 | snprintf(str + used, size - used, "/%s", udev->dev_config); | |
1605 | ||
ededd039 BL |
1606 | /* If the old string exists, free it */ |
1607 | kfree(info->name); | |
7c9e7a6f AG |
1608 | info->name = str; |
1609 | ||
de8c5221 BL |
1610 | return 0; |
1611 | } | |
1612 | ||
1613 | static int tcmu_configure_device(struct se_device *dev) | |
1614 | { | |
1615 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1616 | struct uio_info *info; | |
1617 | struct tcmu_mailbox *mb; | |
1618 | int ret = 0; | |
1619 | ||
1620 | ret = tcmu_update_uio_info(udev); | |
1621 | if (ret) | |
1622 | return ret; | |
1623 | ||
1624 | info = &udev->uio_info; | |
1625 | ||
141685a3 | 1626 | udev->mb_addr = vzalloc(CMDR_SIZE); |
7c9e7a6f AG |
1627 | if (!udev->mb_addr) { |
1628 | ret = -ENOMEM; | |
1629 | goto err_vzalloc; | |
1630 | } | |
1631 | ||
1632 | /* mailbox fits in first part of CMDR space */ | |
1633 | udev->cmdr_size = CMDR_SIZE - CMDR_OFF; | |
1634 | udev->data_off = CMDR_SIZE; | |
141685a3 | 1635 | udev->data_size = DATA_SIZE; |
b6df4b79 XL |
1636 | udev->dbi_thresh = 0; /* Default in Idle state */ |
1637 | udev->waiting_global = false; | |
7c9e7a6f | 1638 | |
141685a3 | 1639 | /* Initialise the mailbox of the ring buffer */ |
7c9e7a6f | 1640 | mb = udev->mb_addr; |
0ad46af8 | 1641 | mb->version = TCMU_MAILBOX_VERSION; |
32c76de3 | 1642 | mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; |
7c9e7a6f AG |
1643 | mb->cmdr_off = CMDR_OFF; |
1644 | mb->cmdr_size = udev->cmdr_size; | |
1645 | ||
1646 | WARN_ON(!PAGE_ALIGNED(udev->data_off)); | |
1647 | WARN_ON(udev->data_size % PAGE_SIZE); | |
26418649 | 1648 | WARN_ON(udev->data_size % DATA_BLOCK_SIZE); |
7c9e7a6f | 1649 | |
ac64a2ce | 1650 | info->version = __stringify(TCMU_MAILBOX_VERSION); |
7c9e7a6f AG |
1651 | |
1652 | info->mem[0].name = "tcm-user command & data buffer"; | |
0633e123 | 1653 | info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; |
7c9e7a6f | 1654 | info->mem[0].size = TCMU_RING_SIZE; |
141685a3 | 1655 | info->mem[0].memtype = UIO_MEM_NONE; |
7c9e7a6f AG |
1656 | |
1657 | info->irqcontrol = tcmu_irqcontrol; | |
1658 | info->irq = UIO_IRQ_CUSTOM; | |
1659 | ||
1660 | info->mmap = tcmu_mmap; | |
1661 | info->open = tcmu_open; | |
1662 | info->release = tcmu_release; | |
1663 | ||
1664 | ret = uio_register_device(tcmu_root_device, info); | |
1665 | if (ret) | |
1666 | goto err_register; | |
1667 | ||
81ee28de SY |
1668 | /* User can set hw_block_size before enable the device */ |
1669 | if (dev->dev_attrib.hw_block_size == 0) | |
1670 | dev->dev_attrib.hw_block_size = 512; | |
7c9e7a6f | 1671 | /* Other attributes can be configured in userspace */ |
3abaa2bf MC |
1672 | if (!dev->dev_attrib.hw_max_sectors) |
1673 | dev->dev_attrib.hw_max_sectors = 128; | |
9a8bb606 BL |
1674 | if (!dev->dev_attrib.emulate_write_cache) |
1675 | dev->dev_attrib.emulate_write_cache = 0; | |
7c9e7a6f AG |
1676 | dev->dev_attrib.hw_queue_depth = 128; |
1677 | ||
b849b456 KN |
1678 | /* If user didn't explicitly disable netlink reply support, use |
1679 | * module scope setting. | |
1680 | */ | |
1681 | if (udev->nl_reply_supported >= 0) | |
1682 | udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; | |
1683 | ||
f3cdbe39 MC |
1684 | /* |
1685 | * Get a ref incase userspace does a close on the uio device before | |
1686 | * LIO has initiated tcmu_free_device. | |
1687 | */ | |
1688 | kref_get(&udev->kref); | |
1689 | ||
b3af66e2 | 1690 | ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL); |
7c9e7a6f AG |
1691 | if (ret) |
1692 | goto err_netlink; | |
1693 | ||
b6df4b79 XL |
1694 | mutex_lock(&root_udev_mutex); |
1695 | list_add(&udev->node, &root_udev); | |
1696 | mutex_unlock(&root_udev_mutex); | |
1697 | ||
7c9e7a6f AG |
1698 | return 0; |
1699 | ||
1700 | err_netlink: | |
f3cdbe39 | 1701 | kref_put(&udev->kref, tcmu_dev_kref_release); |
7c9e7a6f AG |
1702 | uio_unregister_device(&udev->uio_info); |
1703 | err_register: | |
1704 | vfree(udev->mb_addr); | |
c22adc0b | 1705 | udev->mb_addr = NULL; |
7c9e7a6f AG |
1706 | err_vzalloc: |
1707 | kfree(info->name); | |
f3cdbe39 | 1708 | info->name = NULL; |
7c9e7a6f AG |
1709 | |
1710 | return ret; | |
1711 | } | |
1712 | ||
972c7f16 MC |
1713 | static bool tcmu_dev_configured(struct tcmu_dev *udev) |
1714 | { | |
1715 | return udev->uio_info.uio_dev ? true : false; | |
1716 | } | |
1717 | ||
7c9e7a6f | 1718 | static void tcmu_free_device(struct se_device *dev) |
92634706 MC |
1719 | { |
1720 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1721 | ||
1722 | /* release ref from init */ | |
1723 | kref_put(&udev->kref, tcmu_dev_kref_release); | |
1724 | } | |
1725 | ||
1726 | static void tcmu_destroy_device(struct se_device *dev) | |
7c9e7a6f AG |
1727 | { |
1728 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
7c9e7a6f AG |
1729 | |
1730 | del_timer_sync(&udev->timeout); | |
1731 | ||
b6df4b79 XL |
1732 | mutex_lock(&root_udev_mutex); |
1733 | list_del(&udev->node); | |
1734 | mutex_unlock(&root_udev_mutex); | |
1735 | ||
531283ff | 1736 | tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL); |
7c9e7a6f | 1737 | |
531283ff | 1738 | uio_unregister_device(&udev->uio_info); |
9260695d MC |
1739 | |
1740 | /* release ref from configure */ | |
1741 | kref_put(&udev->kref, tcmu_dev_kref_release); | |
7c9e7a6f AG |
1742 | } |
1743 | ||
1744 | enum { | |
3abaa2bf | 1745 | Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, |
b849b456 | 1746 | Opt_nl_reply_supported, Opt_err, |
7c9e7a6f AG |
1747 | }; |
1748 | ||
1749 | static match_table_t tokens = { | |
1750 | {Opt_dev_config, "dev_config=%s"}, | |
1751 | {Opt_dev_size, "dev_size=%u"}, | |
9c1cd1b6 | 1752 | {Opt_hw_block_size, "hw_block_size=%u"}, |
3abaa2bf | 1753 | {Opt_hw_max_sectors, "hw_max_sectors=%u"}, |
b849b456 | 1754 | {Opt_nl_reply_supported, "nl_reply_supported=%d"}, |
7c9e7a6f AG |
1755 | {Opt_err, NULL} |
1756 | }; | |
1757 | ||
3abaa2bf MC |
1758 | static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) |
1759 | { | |
1760 | unsigned long tmp_ul; | |
1761 | char *arg_p; | |
1762 | int ret; | |
1763 | ||
1764 | arg_p = match_strdup(arg); | |
1765 | if (!arg_p) | |
1766 | return -ENOMEM; | |
1767 | ||
1768 | ret = kstrtoul(arg_p, 0, &tmp_ul); | |
1769 | kfree(arg_p); | |
1770 | if (ret < 0) { | |
1771 | pr_err("kstrtoul() failed for dev attrib\n"); | |
1772 | return ret; | |
1773 | } | |
1774 | if (!tmp_ul) { | |
1775 | pr_err("dev attrib must be nonzero\n"); | |
1776 | return -EINVAL; | |
1777 | } | |
1778 | *dev_attrib = tmp_ul; | |
1779 | return 0; | |
1780 | } | |
1781 | ||
7c9e7a6f AG |
1782 | static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, |
1783 | const char *page, ssize_t count) | |
1784 | { | |
1785 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1786 | char *orig, *ptr, *opts, *arg_p; | |
1787 | substring_t args[MAX_OPT_ARGS]; | |
1788 | int ret = 0, token; | |
7c9e7a6f AG |
1789 | |
1790 | opts = kstrdup(page, GFP_KERNEL); | |
1791 | if (!opts) | |
1792 | return -ENOMEM; | |
1793 | ||
1794 | orig = opts; | |
1795 | ||
1796 | while ((ptr = strsep(&opts, ",\n")) != NULL) { | |
1797 | if (!*ptr) | |
1798 | continue; | |
1799 | ||
1800 | token = match_token(ptr, tokens, args); | |
1801 | switch (token) { | |
1802 | case Opt_dev_config: | |
1803 | if (match_strlcpy(udev->dev_config, &args[0], | |
1804 | TCMU_CONFIG_LEN) == 0) { | |
1805 | ret = -EINVAL; | |
1806 | break; | |
1807 | } | |
1808 | pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); | |
1809 | break; | |
1810 | case Opt_dev_size: | |
1811 | arg_p = match_strdup(&args[0]); | |
1812 | if (!arg_p) { | |
1813 | ret = -ENOMEM; | |
1814 | break; | |
1815 | } | |
1816 | ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); | |
1817 | kfree(arg_p); | |
1818 | if (ret < 0) | |
1819 | pr_err("kstrtoul() failed for dev_size=\n"); | |
1820 | break; | |
9c1cd1b6 | 1821 | case Opt_hw_block_size: |
3abaa2bf MC |
1822 | ret = tcmu_set_dev_attrib(&args[0], |
1823 | &(dev->dev_attrib.hw_block_size)); | |
1824 | break; | |
1825 | case Opt_hw_max_sectors: | |
1826 | ret = tcmu_set_dev_attrib(&args[0], | |
1827 | &(dev->dev_attrib.hw_max_sectors)); | |
9c1cd1b6 | 1828 | break; |
b849b456 KN |
1829 | case Opt_nl_reply_supported: |
1830 | arg_p = match_strdup(&args[0]); | |
1831 | if (!arg_p) { | |
1832 | ret = -ENOMEM; | |
1833 | break; | |
1834 | } | |
16b93277 | 1835 | ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported); |
b849b456 KN |
1836 | kfree(arg_p); |
1837 | if (ret < 0) | |
16b93277 | 1838 | pr_err("kstrtoint() failed for nl_reply_supported=\n"); |
b849b456 | 1839 | break; |
7c9e7a6f AG |
1840 | default: |
1841 | break; | |
1842 | } | |
2579325c MC |
1843 | |
1844 | if (ret) | |
1845 | break; | |
7c9e7a6f AG |
1846 | } |
1847 | ||
1848 | kfree(orig); | |
1849 | return (!ret) ? count : ret; | |
1850 | } | |
1851 | ||
1852 | static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) | |
1853 | { | |
1854 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1855 | ssize_t bl = 0; | |
1856 | ||
1857 | bl = sprintf(b + bl, "Config: %s ", | |
1858 | udev->dev_config[0] ? udev->dev_config : "NULL"); | |
7d7a7435 | 1859 | bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); |
7c9e7a6f AG |
1860 | |
1861 | return bl; | |
1862 | } | |
1863 | ||
1864 | static sector_t tcmu_get_blocks(struct se_device *dev) | |
1865 | { | |
1866 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1867 | ||
1868 | return div_u64(udev->dev_size - dev->dev_attrib.block_size, | |
1869 | dev->dev_attrib.block_size); | |
1870 | } | |
1871 | ||
7c9e7a6f | 1872 | static sense_reason_t |
9c1cd1b6 | 1873 | tcmu_parse_cdb(struct se_cmd *cmd) |
7c9e7a6f | 1874 | { |
02eb924f | 1875 | return passthrough_parse_cdb(cmd, tcmu_queue_cmd); |
7c9e7a6f AG |
1876 | } |
1877 | ||
7d7a7435 NB |
1878 | static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) |
1879 | { | |
1880 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
1881 | struct se_dev_attrib, da_group); | |
b5ab697c | 1882 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); |
7d7a7435 NB |
1883 | |
1884 | return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); | |
1885 | } | |
1886 | ||
1887 | static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, | |
1888 | size_t count) | |
1889 | { | |
1890 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
1891 | struct se_dev_attrib, da_group); | |
1892 | struct tcmu_dev *udev = container_of(da->da_dev, | |
1893 | struct tcmu_dev, se_dev); | |
1894 | u32 val; | |
1895 | int ret; | |
1896 | ||
1897 | if (da->da_dev->export_count) { | |
1898 | pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); | |
1899 | return -EINVAL; | |
1900 | } | |
1901 | ||
1902 | ret = kstrtou32(page, 0, &val); | |
1903 | if (ret < 0) | |
1904 | return ret; | |
1905 | ||
7d7a7435 NB |
1906 | udev->cmd_time_out = val * MSEC_PER_SEC; |
1907 | return count; | |
1908 | } | |
1909 | CONFIGFS_ATTR(tcmu_, cmd_time_out); | |
1910 | ||
2d76443e | 1911 | static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) |
ee018252 BL |
1912 | { |
1913 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
1914 | struct se_dev_attrib, da_group); | |
1915 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
1916 | ||
1917 | return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); | |
1918 | } | |
1919 | ||
2d76443e MC |
1920 | static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, |
1921 | size_t count) | |
ee018252 BL |
1922 | { |
1923 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
1924 | struct se_dev_attrib, da_group); | |
1925 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2d76443e | 1926 | int ret, len; |
ee018252 | 1927 | |
2d76443e MC |
1928 | len = strlen(page); |
1929 | if (!len || len > TCMU_CONFIG_LEN - 1) | |
ee018252 | 1930 | return -EINVAL; |
ee018252 BL |
1931 | |
1932 | /* Check if device has been configured before */ | |
1933 | if (tcmu_dev_configured(udev)) { | |
b3af66e2 | 1934 | ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, |
2d76443e | 1935 | TCMU_ATTR_DEV_CFG, page); |
ee018252 BL |
1936 | if (ret) { |
1937 | pr_err("Unable to reconfigure device\n"); | |
1938 | return ret; | |
1939 | } | |
de8c5221 BL |
1940 | strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); |
1941 | ||
1942 | ret = tcmu_update_uio_info(udev); | |
1943 | if (ret) | |
1944 | return ret; | |
1945 | return count; | |
ee018252 | 1946 | } |
2d76443e | 1947 | strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); |
ee018252 BL |
1948 | |
1949 | return count; | |
1950 | } | |
2d76443e | 1951 | CONFIGFS_ATTR(tcmu_, dev_config); |
ee018252 | 1952 | |
801fc54d BL |
1953 | static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) |
1954 | { | |
1955 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
1956 | struct se_dev_attrib, da_group); | |
1957 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
1958 | ||
1959 | return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); | |
1960 | } | |
1961 | ||
1962 | static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, | |
1963 | size_t count) | |
1964 | { | |
1965 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
1966 | struct se_dev_attrib, da_group); | |
1967 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2d76443e | 1968 | u64 val; |
801fc54d BL |
1969 | int ret; |
1970 | ||
2d76443e | 1971 | ret = kstrtou64(page, 0, &val); |
801fc54d BL |
1972 | if (ret < 0) |
1973 | return ret; | |
801fc54d BL |
1974 | |
1975 | /* Check if device has been configured before */ | |
1976 | if (tcmu_dev_configured(udev)) { | |
b3af66e2 | 1977 | ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, |
2d76443e | 1978 | TCMU_ATTR_DEV_SIZE, &val); |
801fc54d BL |
1979 | if (ret) { |
1980 | pr_err("Unable to reconfigure device\n"); | |
1981 | return ret; | |
1982 | } | |
1983 | } | |
2d76443e | 1984 | udev->dev_size = val; |
801fc54d BL |
1985 | return count; |
1986 | } | |
1987 | CONFIGFS_ATTR(tcmu_, dev_size); | |
1988 | ||
b849b456 KN |
1989 | static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, |
1990 | char *page) | |
1991 | { | |
1992 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
1993 | struct se_dev_attrib, da_group); | |
1994 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
1995 | ||
1996 | return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); | |
1997 | } | |
1998 | ||
1999 | static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, | |
2000 | const char *page, size_t count) | |
2001 | { | |
2002 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2003 | struct se_dev_attrib, da_group); | |
2004 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | |
2005 | s8 val; | |
2006 | int ret; | |
2007 | ||
2008 | ret = kstrtos8(page, 0, &val); | |
2009 | if (ret < 0) | |
2010 | return ret; | |
2011 | ||
2012 | udev->nl_reply_supported = val; | |
2013 | return count; | |
2014 | } | |
2015 | CONFIGFS_ATTR(tcmu_, nl_reply_supported); | |
2016 | ||
9a8bb606 BL |
2017 | static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, |
2018 | char *page) | |
2019 | { | |
2020 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2021 | struct se_dev_attrib, da_group); | |
2022 | ||
2023 | return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); | |
2024 | } | |
2025 | ||
2026 | static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, | |
2027 | const char *page, size_t count) | |
2028 | { | |
2029 | struct se_dev_attrib *da = container_of(to_config_group(item), | |
2030 | struct se_dev_attrib, da_group); | |
1068be7b | 2031 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); |
2d76443e | 2032 | u8 val; |
9a8bb606 BL |
2033 | int ret; |
2034 | ||
2d76443e | 2035 | ret = kstrtou8(page, 0, &val); |
9a8bb606 BL |
2036 | if (ret < 0) |
2037 | return ret; | |
2038 | ||
1068be7b BL |
2039 | /* Check if device has been configured before */ |
2040 | if (tcmu_dev_configured(udev)) { | |
b3af66e2 | 2041 | ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, |
2d76443e | 2042 | TCMU_ATTR_WRITECACHE, &val); |
1068be7b BL |
2043 | if (ret) { |
2044 | pr_err("Unable to reconfigure device\n"); | |
2045 | return ret; | |
2046 | } | |
2047 | } | |
2d76443e MC |
2048 | |
2049 | da->emulate_write_cache = val; | |
9a8bb606 BL |
2050 | return count; |
2051 | } | |
2052 | CONFIGFS_ATTR(tcmu_, emulate_write_cache); | |
2053 | ||
5821783b | 2054 | static struct configfs_attribute *tcmu_attrib_attrs[] = { |
801fc54d | 2055 | &tcmu_attr_cmd_time_out, |
2d76443e | 2056 | &tcmu_attr_dev_config, |
801fc54d BL |
2057 | &tcmu_attr_dev_size, |
2058 | &tcmu_attr_emulate_write_cache, | |
b849b456 | 2059 | &tcmu_attr_nl_reply_supported, |
801fc54d BL |
2060 | NULL, |
2061 | }; | |
2062 | ||
7d7a7435 NB |
2063 | static struct configfs_attribute **tcmu_attrs; |
2064 | ||
2065 | static struct target_backend_ops tcmu_ops = { | |
7c9e7a6f | 2066 | .name = "user", |
7c9e7a6f | 2067 | .owner = THIS_MODULE, |
a3541703 | 2068 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, |
7c9e7a6f AG |
2069 | .attach_hba = tcmu_attach_hba, |
2070 | .detach_hba = tcmu_detach_hba, | |
2071 | .alloc_device = tcmu_alloc_device, | |
2072 | .configure_device = tcmu_configure_device, | |
92634706 | 2073 | .destroy_device = tcmu_destroy_device, |
7c9e7a6f AG |
2074 | .free_device = tcmu_free_device, |
2075 | .parse_cdb = tcmu_parse_cdb, | |
2076 | .set_configfs_dev_params = tcmu_set_configfs_dev_params, | |
2077 | .show_configfs_dev_params = tcmu_show_configfs_dev_params, | |
2078 | .get_device_type = sbc_get_device_type, | |
2079 | .get_blocks = tcmu_get_blocks, | |
7d7a7435 | 2080 | .tb_dev_attrib_attrs = NULL, |
7c9e7a6f AG |
2081 | }; |
2082 | ||
b6df4b79 XL |
2083 | static int unmap_thread_fn(void *data) |
2084 | { | |
2085 | struct tcmu_dev *udev; | |
2086 | loff_t off; | |
2087 | uint32_t start, end, block; | |
2088 | struct page *page; | |
2089 | int i; | |
2090 | ||
07932a02 | 2091 | while (!kthread_should_stop()) { |
b6df4b79 XL |
2092 | DEFINE_WAIT(__wait); |
2093 | ||
2094 | prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); | |
2095 | schedule(); | |
2096 | finish_wait(&unmap_wait, &__wait); | |
2097 | ||
d906d8af MC |
2098 | if (kthread_should_stop()) |
2099 | break; | |
2100 | ||
b6df4b79 XL |
2101 | mutex_lock(&root_udev_mutex); |
2102 | list_for_each_entry(udev, &root_udev, node) { | |
2103 | mutex_lock(&udev->cmdr_lock); | |
2104 | ||
2105 | /* Try to complete the finished commands first */ | |
2106 | tcmu_handle_completions(udev); | |
2107 | ||
2108 | /* Skip the udevs waiting the global pool or in idle */ | |
2109 | if (udev->waiting_global || !udev->dbi_thresh) { | |
2110 | mutex_unlock(&udev->cmdr_lock); | |
2111 | continue; | |
2112 | } | |
2113 | ||
2114 | end = udev->dbi_max + 1; | |
2115 | block = find_last_bit(udev->data_bitmap, end); | |
2116 | if (block == udev->dbi_max) { | |
2117 | /* | |
2118 | * The last bit is dbi_max, so there is | |
2119 | * no need to shrink any blocks. | |
2120 | */ | |
2121 | mutex_unlock(&udev->cmdr_lock); | |
2122 | continue; | |
2123 | } else if (block == end) { | |
2124 | /* The current udev will goto idle state */ | |
2125 | udev->dbi_thresh = start = 0; | |
2126 | udev->dbi_max = 0; | |
2127 | } else { | |
2128 | udev->dbi_thresh = start = block + 1; | |
2129 | udev->dbi_max = block; | |
2130 | } | |
2131 | ||
2132 | /* Here will truncate the data area from off */ | |
2133 | off = udev->data_off + start * DATA_BLOCK_SIZE; | |
2134 | unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); | |
2135 | ||
2136 | /* Release the block pages */ | |
2137 | for (i = start; i < end; i++) { | |
2138 | page = radix_tree_delete(&udev->data_blocks, i); | |
2139 | if (page) { | |
2140 | __free_page(page); | |
2141 | atomic_dec(&global_db_count); | |
2142 | } | |
2143 | } | |
2144 | mutex_unlock(&udev->cmdr_lock); | |
2145 | } | |
2146 | ||
2147 | /* | |
2148 | * Try to wake up the udevs who are waiting | |
2149 | * for the global data pool. | |
2150 | */ | |
2151 | list_for_each_entry(udev, &root_udev, node) { | |
2152 | if (udev->waiting_global) | |
2153 | wake_up(&udev->wait_cmdr); | |
2154 | } | |
2155 | mutex_unlock(&root_udev_mutex); | |
2156 | } | |
2157 | ||
2158 | return 0; | |
2159 | } | |
2160 | ||
7c9e7a6f AG |
2161 | static int __init tcmu_module_init(void) |
2162 | { | |
801fc54d | 2163 | int ret, i, k, len = 0; |
7c9e7a6f AG |
2164 | |
2165 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); | |
2166 | ||
2167 | tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", | |
2168 | sizeof(struct tcmu_cmd), | |
2169 | __alignof__(struct tcmu_cmd), | |
2170 | 0, NULL); | |
2171 | if (!tcmu_cmd_cache) | |
2172 | return -ENOMEM; | |
2173 | ||
2174 | tcmu_root_device = root_device_register("tcm_user"); | |
2175 | if (IS_ERR(tcmu_root_device)) { | |
2176 | ret = PTR_ERR(tcmu_root_device); | |
2177 | goto out_free_cache; | |
2178 | } | |
2179 | ||
2180 | ret = genl_register_family(&tcmu_genl_family); | |
2181 | if (ret < 0) { | |
2182 | goto out_unreg_device; | |
2183 | } | |
2184 | ||
7d7a7435 NB |
2185 | for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { |
2186 | len += sizeof(struct configfs_attribute *); | |
2187 | } | |
801fc54d BL |
2188 | for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) { |
2189 | len += sizeof(struct configfs_attribute *); | |
2190 | } | |
2191 | len += sizeof(struct configfs_attribute *); | |
7d7a7435 NB |
2192 | |
2193 | tcmu_attrs = kzalloc(len, GFP_KERNEL); | |
2194 | if (!tcmu_attrs) { | |
2195 | ret = -ENOMEM; | |
2196 | goto out_unreg_genl; | |
2197 | } | |
2198 | ||
2199 | for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { | |
2200 | tcmu_attrs[i] = passthrough_attrib_attrs[i]; | |
2201 | } | |
801fc54d BL |
2202 | for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) { |
2203 | tcmu_attrs[i] = tcmu_attrib_attrs[k]; | |
2204 | i++; | |
2205 | } | |
7d7a7435 NB |
2206 | tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; |
2207 | ||
0a06d430 | 2208 | ret = transport_backend_register(&tcmu_ops); |
7c9e7a6f | 2209 | if (ret) |
7d7a7435 | 2210 | goto out_attrs; |
7c9e7a6f | 2211 | |
b6df4b79 XL |
2212 | init_waitqueue_head(&unmap_wait); |
2213 | unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap"); | |
2214 | if (IS_ERR(unmap_thread)) { | |
2215 | ret = PTR_ERR(unmap_thread); | |
2216 | goto out_unreg_transport; | |
2217 | } | |
2218 | ||
7c9e7a6f AG |
2219 | return 0; |
2220 | ||
b6df4b79 XL |
2221 | out_unreg_transport: |
2222 | target_backend_unregister(&tcmu_ops); | |
7d7a7435 NB |
2223 | out_attrs: |
2224 | kfree(tcmu_attrs); | |
7c9e7a6f AG |
2225 | out_unreg_genl: |
2226 | genl_unregister_family(&tcmu_genl_family); | |
2227 | out_unreg_device: | |
2228 | root_device_unregister(tcmu_root_device); | |
2229 | out_free_cache: | |
2230 | kmem_cache_destroy(tcmu_cmd_cache); | |
2231 | ||
2232 | return ret; | |
2233 | } | |
2234 | ||
2235 | static void __exit tcmu_module_exit(void) | |
2236 | { | |
b6df4b79 | 2237 | kthread_stop(unmap_thread); |
0a06d430 | 2238 | target_backend_unregister(&tcmu_ops); |
7d7a7435 | 2239 | kfree(tcmu_attrs); |
7c9e7a6f AG |
2240 | genl_unregister_family(&tcmu_genl_family); |
2241 | root_device_unregister(tcmu_root_device); | |
2242 | kmem_cache_destroy(tcmu_cmd_cache); | |
2243 | } | |
2244 | ||
2245 | MODULE_DESCRIPTION("TCM USER subsystem plugin"); | |
2246 | MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); | |
2247 | MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); | |
2248 | MODULE_LICENSE("GPL"); | |
2249 | ||
2250 | module_init(tcmu_module_init); | |
2251 | module_exit(tcmu_module_exit); |