]>
Commit | Line | Data |
---|---|---|
7c9e7a6f AG |
1 | /* |
2 | * Copyright (C) 2013 Shaohua Li <shli@kernel.org> | |
3 | * Copyright (C) 2014 Red Hat, Inc. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | */ | |
18 | ||
19 | #include <linux/spinlock.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/idr.h> | |
22 | #include <linux/timer.h> | |
23 | #include <linux/parser.h> | |
24 | #include <scsi/scsi.h> | |
25 | #include <scsi/scsi_host.h> | |
26 | #include <linux/uio_driver.h> | |
27 | #include <net/genetlink.h> | |
28 | #include <target/target_core_base.h> | |
29 | #include <target/target_core_fabric.h> | |
30 | #include <target/target_core_backend.h> | |
e9f720d6 NB |
31 | #include <target/target_core_backend_configfs.h> |
32 | ||
7c9e7a6f AG |
33 | #include <linux/target_core_user.h> |
34 | ||
35 | /* | |
36 | * Define a shared-memory interface for LIO to pass SCSI commands and | |
37 | * data to userspace for processing. This is to allow backends that | |
38 | * are too complex for in-kernel support to be possible. | |
39 | * | |
40 | * It uses the UIO framework to do a lot of the device-creation and | |
41 | * introspection work for us. | |
42 | * | |
43 | * See the .h file for how the ring is laid out. Note that while the | |
44 | * command ring is defined, the particulars of the data area are | |
45 | * not. Offset values in the command entry point to other locations | |
46 | * internal to the mmap()ed area. There is separate space outside the | |
47 | * command ring for data buffers. This leaves maximum flexibility for | |
48 | * moving buffer allocations, or even page flipping or other | |
49 | * allocation techniques, without altering the command ring layout. | |
50 | * | |
51 | * SECURITY: | |
52 | * The user process must be assumed to be malicious. There's no way to | |
53 | * prevent it breaking the command ring protocol if it wants, but in | |
54 | * order to prevent other issues we must only ever read *data* from | |
55 | * the shared memory area, not offsets or sizes. This applies to | |
56 | * command ring entries as well as the mailbox. Extra code needed for | |
57 | * this may have a 'UAM' comment. | |
58 | */ | |
59 | ||
60 | ||
61 | #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) | |
62 | ||
63 | #define CMDR_SIZE (16 * 4096) | |
64 | #define DATA_SIZE (257 * 4096) | |
65 | ||
66 | #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) | |
67 | ||
68 | static struct device *tcmu_root_device; | |
69 | ||
70 | struct tcmu_hba { | |
71 | u32 host_id; | |
72 | }; | |
73 | ||
7c9e7a6f AG |
74 | #define TCMU_CONFIG_LEN 256 |
75 | ||
76 | struct tcmu_dev { | |
77 | struct se_device se_dev; | |
78 | ||
79 | char *name; | |
80 | struct se_hba *hba; | |
81 | ||
82 | #define TCMU_DEV_BIT_OPEN 0 | |
83 | #define TCMU_DEV_BIT_BROKEN 1 | |
84 | unsigned long flags; | |
7c9e7a6f AG |
85 | |
86 | struct uio_info uio_info; | |
87 | ||
88 | struct tcmu_mailbox *mb_addr; | |
89 | size_t dev_size; | |
90 | u32 cmdr_size; | |
91 | u32 cmdr_last_cleaned; | |
92 | /* Offset of data ring from start of mb */ | |
93 | size_t data_off; | |
94 | size_t data_size; | |
95 | /* Ring head + tail values. */ | |
96 | /* Must add data_off and mb_addr to get the address */ | |
97 | size_t data_head; | |
98 | size_t data_tail; | |
99 | ||
100 | wait_queue_head_t wait_cmdr; | |
101 | /* TODO should this be a mutex? */ | |
102 | spinlock_t cmdr_lock; | |
103 | ||
104 | struct idr commands; | |
105 | spinlock_t commands_lock; | |
106 | ||
107 | struct timer_list timeout; | |
108 | ||
109 | char dev_config[TCMU_CONFIG_LEN]; | |
110 | }; | |
111 | ||
112 | #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) | |
113 | ||
114 | #define CMDR_OFF sizeof(struct tcmu_mailbox) | |
115 | ||
116 | struct tcmu_cmd { | |
117 | struct se_cmd *se_cmd; | |
118 | struct tcmu_dev *tcmu_dev; | |
119 | ||
120 | uint16_t cmd_id; | |
121 | ||
122 | /* Can't use se_cmd->data_length when cleaning up expired cmds, because if | |
123 | cmd has been completed then accessing se_cmd is off limits */ | |
124 | size_t data_length; | |
125 | ||
126 | unsigned long deadline; | |
127 | ||
128 | #define TCMU_CMD_BIT_EXPIRED 0 | |
129 | unsigned long flags; | |
130 | }; | |
131 | ||
132 | static struct kmem_cache *tcmu_cmd_cache; | |
133 | ||
134 | /* multicast group */ | |
135 | enum tcmu_multicast_groups { | |
136 | TCMU_MCGRP_CONFIG, | |
137 | }; | |
138 | ||
139 | static const struct genl_multicast_group tcmu_mcgrps[] = { | |
140 | [TCMU_MCGRP_CONFIG] = { .name = "config", }, | |
141 | }; | |
142 | ||
143 | /* Our generic netlink family */ | |
144 | static struct genl_family tcmu_genl_family = { | |
145 | .id = GENL_ID_GENERATE, | |
146 | .hdrsize = 0, | |
147 | .name = "TCM-USER", | |
148 | .version = 1, | |
149 | .maxattr = TCMU_ATTR_MAX, | |
150 | .mcgrps = tcmu_mcgrps, | |
151 | .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), | |
152 | }; | |
153 | ||
154 | static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) | |
155 | { | |
156 | struct se_device *se_dev = se_cmd->se_dev; | |
157 | struct tcmu_dev *udev = TCMU_DEV(se_dev); | |
158 | struct tcmu_cmd *tcmu_cmd; | |
159 | int cmd_id; | |
160 | ||
161 | tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); | |
162 | if (!tcmu_cmd) | |
163 | return NULL; | |
164 | ||
165 | tcmu_cmd->se_cmd = se_cmd; | |
166 | tcmu_cmd->tcmu_dev = udev; | |
167 | tcmu_cmd->data_length = se_cmd->data_length; | |
168 | ||
169 | tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); | |
170 | ||
171 | idr_preload(GFP_KERNEL); | |
172 | spin_lock_irq(&udev->commands_lock); | |
173 | cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0, | |
174 | USHRT_MAX, GFP_NOWAIT); | |
175 | spin_unlock_irq(&udev->commands_lock); | |
176 | idr_preload_end(); | |
177 | ||
178 | if (cmd_id < 0) { | |
179 | kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); | |
180 | return NULL; | |
181 | } | |
182 | tcmu_cmd->cmd_id = cmd_id; | |
183 | ||
184 | return tcmu_cmd; | |
185 | } | |
186 | ||
187 | static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) | |
188 | { | |
189 | unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK; | |
190 | ||
191 | size = round_up(size+offset, PAGE_SIZE); | |
192 | vaddr -= offset; | |
193 | ||
194 | while (size) { | |
195 | flush_dcache_page(virt_to_page(vaddr)); | |
196 | size -= PAGE_SIZE; | |
197 | } | |
198 | } | |
199 | ||
200 | /* | |
201 | * Some ring helper functions. We don't assume size is a power of 2 so | |
202 | * we can't use circ_buf.h. | |
203 | */ | |
204 | static inline size_t spc_used(size_t head, size_t tail, size_t size) | |
205 | { | |
206 | int diff = head - tail; | |
207 | ||
208 | if (diff >= 0) | |
209 | return diff; | |
210 | else | |
211 | return size + diff; | |
212 | } | |
213 | ||
214 | static inline size_t spc_free(size_t head, size_t tail, size_t size) | |
215 | { | |
216 | /* Keep 1 byte unused or we can't tell full from empty */ | |
217 | return (size - spc_used(head, tail, size) - 1); | |
218 | } | |
219 | ||
220 | static inline size_t head_to_end(size_t head, size_t size) | |
221 | { | |
222 | return size - head; | |
223 | } | |
224 | ||
225 | #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) | |
226 | ||
227 | /* | |
228 | * We can't queue a command until we have space available on the cmd ring *and* space | |
229 | * space avail on the data ring. | |
230 | * | |
231 | * Called with ring lock held. | |
232 | */ | |
f56574a2 | 233 | static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed) |
7c9e7a6f AG |
234 | { |
235 | struct tcmu_mailbox *mb = udev->mb_addr; | |
236 | size_t space; | |
237 | u32 cmd_head; | |
f56574a2 | 238 | size_t cmd_needed; |
7c9e7a6f AG |
239 | |
240 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
241 | ||
242 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
243 | ||
f56574a2 AG |
244 | /* |
245 | * If cmd end-of-ring space is too small then we need space for a NOP plus | |
246 | * original cmd - cmds are internally contiguous. | |
247 | */ | |
248 | if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) | |
249 | cmd_needed = cmd_size; | |
250 | else | |
251 | cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); | |
252 | ||
7c9e7a6f AG |
253 | space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); |
254 | if (space < cmd_needed) { | |
255 | pr_debug("no cmd space: %u %u %u\n", cmd_head, | |
256 | udev->cmdr_last_cleaned, udev->cmdr_size); | |
257 | return false; | |
258 | } | |
259 | ||
260 | space = spc_free(udev->data_head, udev->data_tail, udev->data_size); | |
261 | if (space < data_needed) { | |
262 | pr_debug("no data space: %zu %zu %zu\n", udev->data_head, | |
263 | udev->data_tail, udev->data_size); | |
264 | return false; | |
265 | } | |
266 | ||
267 | return true; | |
268 | } | |
269 | ||
270 | static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |
271 | { | |
272 | struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; | |
273 | struct se_cmd *se_cmd = tcmu_cmd->se_cmd; | |
274 | size_t base_command_size, command_size; | |
7c9e7a6f | 275 | struct tcmu_mailbox *mb; |
7c9e7a6f AG |
276 | struct tcmu_cmd_entry *entry; |
277 | int i; | |
278 | struct scatterlist *sg; | |
279 | struct iovec *iov; | |
280 | int iov_cnt = 0; | |
281 | uint32_t cmd_head; | |
282 | uint64_t cdb_off; | |
283 | ||
284 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) | |
285 | return -EINVAL; | |
286 | ||
287 | /* | |
288 | * Must be a certain minimum size for response sense info, but | |
289 | * also may be larger if the iov array is large. | |
290 | * | |
291 | * iovs = sgl_nents+1, for end-of-ring case, plus another 1 | |
292 | * b/c size == offsetof one-past-element. | |
293 | */ | |
294 | base_command_size = max(offsetof(struct tcmu_cmd_entry, | |
295 | req.iov[se_cmd->t_data_nents + 2]), | |
296 | sizeof(struct tcmu_cmd_entry)); | |
297 | command_size = base_command_size | |
298 | + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); | |
299 | ||
300 | WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); | |
301 | ||
302 | spin_lock_irq(&udev->cmdr_lock); | |
303 | ||
304 | mb = udev->mb_addr; | |
305 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
306 | if ((command_size > (udev->cmdr_size / 2)) | |
307 | || tcmu_cmd->data_length > (udev->data_size - 1)) | |
308 | pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " | |
309 | "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length, | |
310 | udev->cmdr_size, udev->data_size); | |
311 | ||
f56574a2 | 312 | while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) { |
7c9e7a6f AG |
313 | int ret; |
314 | DEFINE_WAIT(__wait); | |
315 | ||
316 | prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); | |
317 | ||
318 | pr_debug("sleeping for ring space\n"); | |
319 | spin_unlock_irq(&udev->cmdr_lock); | |
320 | ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); | |
321 | finish_wait(&udev->wait_cmdr, &__wait); | |
322 | if (!ret) { | |
323 | pr_warn("tcmu: command timed out\n"); | |
324 | return -ETIMEDOUT; | |
325 | } | |
326 | ||
327 | spin_lock_irq(&udev->cmdr_lock); | |
328 | ||
329 | /* We dropped cmdr_lock, cmd_head is stale */ | |
330 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
331 | } | |
332 | ||
f56574a2 AG |
333 | /* Insert a PAD if end-of-ring space is too small */ |
334 | if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { | |
335 | size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); | |
336 | ||
7c9e7a6f AG |
337 | entry = (void *) mb + CMDR_OFF + cmd_head; |
338 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | |
0ad46af8 AG |
339 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); |
340 | tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); | |
341 | entry->hdr.cmd_id = 0; /* not used for PAD */ | |
342 | entry->hdr.kflags = 0; | |
343 | entry->hdr.uflags = 0; | |
7c9e7a6f AG |
344 | |
345 | UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); | |
346 | ||
347 | cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ | |
348 | WARN_ON(cmd_head != 0); | |
349 | } | |
350 | ||
351 | entry = (void *) mb + CMDR_OFF + cmd_head; | |
352 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | |
0ad46af8 AG |
353 | tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); |
354 | tcmu_hdr_set_len(&entry->hdr.len_op, command_size); | |
355 | entry->hdr.cmd_id = tcmu_cmd->cmd_id; | |
356 | entry->hdr.kflags = 0; | |
357 | entry->hdr.uflags = 0; | |
7c9e7a6f AG |
358 | |
359 | /* | |
360 | * Fix up iovecs, and handle if allocation in data ring wrapped. | |
361 | */ | |
362 | iov = &entry->req.iov[0]; | |
363 | for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { | |
364 | size_t copy_bytes = min((size_t)sg->length, | |
365 | head_to_end(udev->data_head, udev->data_size)); | |
366 | void *from = kmap_atomic(sg_page(sg)) + sg->offset; | |
367 | void *to = (void *) mb + udev->data_off + udev->data_head; | |
368 | ||
369 | if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) { | |
370 | memcpy(to, from, copy_bytes); | |
371 | tcmu_flush_dcache_range(to, copy_bytes); | |
372 | } | |
373 | ||
374 | /* Even iov_base is relative to mb_addr */ | |
375 | iov->iov_len = copy_bytes; | |
b3c95172 CH |
376 | iov->iov_base = (void __user *) udev->data_off + |
377 | udev->data_head; | |
7c9e7a6f AG |
378 | iov_cnt++; |
379 | iov++; | |
380 | ||
381 | UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); | |
382 | ||
383 | /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */ | |
384 | if (sg->length != copy_bytes) { | |
385 | from += copy_bytes; | |
386 | copy_bytes = sg->length - copy_bytes; | |
387 | ||
388 | iov->iov_len = copy_bytes; | |
b3c95172 CH |
389 | iov->iov_base = (void __user *) udev->data_off + |
390 | udev->data_head; | |
7c9e7a6f AG |
391 | |
392 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | |
393 | to = (void *) mb + udev->data_off + udev->data_head; | |
394 | memcpy(to, from, copy_bytes); | |
395 | tcmu_flush_dcache_range(to, copy_bytes); | |
396 | } | |
397 | ||
398 | iov_cnt++; | |
399 | iov++; | |
400 | ||
401 | UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); | |
402 | } | |
403 | ||
404 | kunmap_atomic(from); | |
405 | } | |
406 | entry->req.iov_cnt = iov_cnt; | |
0ad46af8 AG |
407 | entry->req.iov_bidi_cnt = 0; |
408 | entry->req.iov_dif_cnt = 0; | |
7c9e7a6f AG |
409 | |
410 | /* All offsets relative to mb_addr, not start of entry! */ | |
411 | cdb_off = CMDR_OFF + cmd_head + base_command_size; | |
412 | memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); | |
413 | entry->req.cdb_off = cdb_off; | |
414 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | |
415 | ||
416 | UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); | |
417 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
418 | ||
419 | spin_unlock_irq(&udev->cmdr_lock); | |
420 | ||
421 | /* TODO: only if FLUSH and FUA? */ | |
422 | uio_event_notify(&udev->uio_info); | |
423 | ||
424 | mod_timer(&udev->timeout, | |
425 | round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); | |
426 | ||
427 | return 0; | |
428 | } | |
429 | ||
430 | static int tcmu_queue_cmd(struct se_cmd *se_cmd) | |
431 | { | |
432 | struct se_device *se_dev = se_cmd->se_dev; | |
433 | struct tcmu_dev *udev = TCMU_DEV(se_dev); | |
434 | struct tcmu_cmd *tcmu_cmd; | |
435 | int ret; | |
436 | ||
437 | tcmu_cmd = tcmu_alloc_cmd(se_cmd); | |
438 | if (!tcmu_cmd) | |
439 | return -ENOMEM; | |
440 | ||
441 | ret = tcmu_queue_cmd_ring(tcmu_cmd); | |
442 | if (ret < 0) { | |
443 | pr_err("TCMU: Could not queue command\n"); | |
444 | spin_lock_irq(&udev->commands_lock); | |
445 | idr_remove(&udev->commands, tcmu_cmd->cmd_id); | |
446 | spin_unlock_irq(&udev->commands_lock); | |
447 | ||
448 | kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); | |
449 | } | |
450 | ||
451 | return ret; | |
452 | } | |
453 | ||
454 | static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) | |
455 | { | |
456 | struct se_cmd *se_cmd = cmd->se_cmd; | |
457 | struct tcmu_dev *udev = cmd->tcmu_dev; | |
458 | ||
459 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { | |
460 | /* cmd has been completed already from timeout, just reclaim data | |
461 | ring space */ | |
462 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); | |
463 | return; | |
464 | } | |
465 | ||
0ad46af8 AG |
466 | if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { |
467 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); | |
468 | pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", | |
469 | cmd->se_cmd); | |
470 | transport_generic_request_failure(cmd->se_cmd, | |
471 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); | |
472 | cmd->se_cmd = NULL; | |
473 | kmem_cache_free(tcmu_cmd_cache, cmd); | |
474 | return; | |
475 | } | |
476 | ||
7c9e7a6f AG |
477 | if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { |
478 | memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, | |
479 | se_cmd->scsi_sense_length); | |
480 | ||
481 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); | |
482 | } | |
483 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) { | |
484 | struct scatterlist *sg; | |
485 | int i; | |
486 | ||
487 | /* It'd be easier to look at entry's iovec again, but UAM */ | |
488 | for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { | |
489 | size_t copy_bytes; | |
490 | void *to; | |
491 | void *from; | |
492 | ||
493 | copy_bytes = min((size_t)sg->length, | |
494 | head_to_end(udev->data_tail, udev->data_size)); | |
495 | ||
496 | to = kmap_atomic(sg_page(sg)) + sg->offset; | |
497 | WARN_ON(sg->length + sg->offset > PAGE_SIZE); | |
498 | from = (void *) udev->mb_addr + udev->data_off + udev->data_tail; | |
499 | tcmu_flush_dcache_range(from, copy_bytes); | |
500 | memcpy(to, from, copy_bytes); | |
501 | ||
502 | UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); | |
503 | ||
504 | /* Uh oh, wrapped the data buffer for this sg's data */ | |
505 | if (sg->length != copy_bytes) { | |
506 | from = (void *) udev->mb_addr + udev->data_off + udev->data_tail; | |
507 | WARN_ON(udev->data_tail); | |
508 | to += copy_bytes; | |
509 | copy_bytes = sg->length - copy_bytes; | |
510 | tcmu_flush_dcache_range(from, copy_bytes); | |
511 | memcpy(to, from, copy_bytes); | |
512 | ||
513 | UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); | |
514 | } | |
515 | ||
516 | kunmap_atomic(to); | |
517 | } | |
518 | ||
519 | } else if (se_cmd->data_direction == DMA_TO_DEVICE) { | |
520 | UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); | |
2bc396a2 IT |
521 | } else if (se_cmd->data_direction != DMA_NONE) { |
522 | pr_warn("TCMU: data direction was %d!\n", | |
523 | se_cmd->data_direction); | |
7c9e7a6f AG |
524 | } |
525 | ||
526 | target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); | |
527 | cmd->se_cmd = NULL; | |
528 | ||
529 | kmem_cache_free(tcmu_cmd_cache, cmd); | |
530 | } | |
531 | ||
532 | static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |
533 | { | |
534 | struct tcmu_mailbox *mb; | |
535 | LIST_HEAD(cpl_cmds); | |
536 | unsigned long flags; | |
537 | int handled = 0; | |
538 | ||
539 | if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { | |
540 | pr_err("ring broken, not handling completions\n"); | |
541 | return 0; | |
542 | } | |
543 | ||
544 | spin_lock_irqsave(&udev->cmdr_lock, flags); | |
545 | ||
546 | mb = udev->mb_addr; | |
547 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | |
548 | ||
549 | while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { | |
550 | ||
551 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | |
552 | struct tcmu_cmd *cmd; | |
553 | ||
554 | tcmu_flush_dcache_range(entry, sizeof(*entry)); | |
555 | ||
0ad46af8 AG |
556 | if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { |
557 | UPDATE_HEAD(udev->cmdr_last_cleaned, | |
558 | tcmu_hdr_get_len(entry->hdr.len_op), | |
559 | udev->cmdr_size); | |
7c9e7a6f AG |
560 | continue; |
561 | } | |
0ad46af8 | 562 | WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); |
7c9e7a6f AG |
563 | |
564 | spin_lock(&udev->commands_lock); | |
0ad46af8 | 565 | cmd = idr_find(&udev->commands, entry->hdr.cmd_id); |
7c9e7a6f AG |
566 | if (cmd) |
567 | idr_remove(&udev->commands, cmd->cmd_id); | |
568 | spin_unlock(&udev->commands_lock); | |
569 | ||
570 | if (!cmd) { | |
571 | pr_err("cmd_id not found, ring is broken\n"); | |
572 | set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); | |
573 | break; | |
574 | } | |
575 | ||
576 | tcmu_handle_completion(cmd, entry); | |
577 | ||
0ad46af8 AG |
578 | UPDATE_HEAD(udev->cmdr_last_cleaned, |
579 | tcmu_hdr_get_len(entry->hdr.len_op), | |
580 | udev->cmdr_size); | |
7c9e7a6f AG |
581 | |
582 | handled++; | |
583 | } | |
584 | ||
585 | if (mb->cmd_tail == mb->cmd_head) | |
586 | del_timer(&udev->timeout); /* no more pending cmds */ | |
587 | ||
588 | spin_unlock_irqrestore(&udev->cmdr_lock, flags); | |
589 | ||
590 | wake_up(&udev->wait_cmdr); | |
591 | ||
592 | return handled; | |
593 | } | |
594 | ||
595 | static int tcmu_check_expired_cmd(int id, void *p, void *data) | |
596 | { | |
597 | struct tcmu_cmd *cmd = p; | |
598 | ||
599 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | |
600 | return 0; | |
601 | ||
602 | if (!time_after(cmd->deadline, jiffies)) | |
603 | return 0; | |
604 | ||
605 | set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); | |
606 | target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); | |
607 | cmd->se_cmd = NULL; | |
608 | ||
609 | kmem_cache_free(tcmu_cmd_cache, cmd); | |
610 | ||
611 | return 0; | |
612 | } | |
613 | ||
614 | static void tcmu_device_timedout(unsigned long data) | |
615 | { | |
616 | struct tcmu_dev *udev = (struct tcmu_dev *)data; | |
617 | unsigned long flags; | |
618 | int handled; | |
619 | ||
620 | handled = tcmu_handle_completions(udev); | |
621 | ||
622 | pr_warn("%d completions handled from timeout\n", handled); | |
623 | ||
624 | spin_lock_irqsave(&udev->commands_lock, flags); | |
625 | idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); | |
626 | spin_unlock_irqrestore(&udev->commands_lock, flags); | |
627 | ||
628 | /* | |
629 | * We don't need to wakeup threads on wait_cmdr since they have their | |
630 | * own timeout. | |
631 | */ | |
632 | } | |
633 | ||
634 | static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) | |
635 | { | |
636 | struct tcmu_hba *tcmu_hba; | |
637 | ||
638 | tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); | |
639 | if (!tcmu_hba) | |
640 | return -ENOMEM; | |
641 | ||
642 | tcmu_hba->host_id = host_id; | |
643 | hba->hba_ptr = tcmu_hba; | |
644 | ||
645 | return 0; | |
646 | } | |
647 | ||
648 | static void tcmu_detach_hba(struct se_hba *hba) | |
649 | { | |
650 | kfree(hba->hba_ptr); | |
651 | hba->hba_ptr = NULL; | |
652 | } | |
653 | ||
654 | static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |
655 | { | |
656 | struct tcmu_dev *udev; | |
657 | ||
658 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); | |
659 | if (!udev) | |
660 | return NULL; | |
661 | ||
662 | udev->name = kstrdup(name, GFP_KERNEL); | |
663 | if (!udev->name) { | |
664 | kfree(udev); | |
665 | return NULL; | |
666 | } | |
667 | ||
668 | udev->hba = hba; | |
669 | ||
670 | init_waitqueue_head(&udev->wait_cmdr); | |
671 | spin_lock_init(&udev->cmdr_lock); | |
672 | ||
673 | idr_init(&udev->commands); | |
674 | spin_lock_init(&udev->commands_lock); | |
675 | ||
676 | setup_timer(&udev->timeout, tcmu_device_timedout, | |
677 | (unsigned long)udev); | |
678 | ||
7c9e7a6f AG |
679 | return &udev->se_dev; |
680 | } | |
681 | ||
682 | static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) | |
683 | { | |
684 | struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); | |
685 | ||
686 | tcmu_handle_completions(tcmu_dev); | |
687 | ||
688 | return 0; | |
689 | } | |
690 | ||
691 | /* | |
692 | * mmap code from uio.c. Copied here because we want to hook mmap() | |
693 | * and this stuff must come along. | |
694 | */ | |
695 | static int tcmu_find_mem_index(struct vm_area_struct *vma) | |
696 | { | |
697 | struct tcmu_dev *udev = vma->vm_private_data; | |
698 | struct uio_info *info = &udev->uio_info; | |
699 | ||
700 | if (vma->vm_pgoff < MAX_UIO_MAPS) { | |
701 | if (info->mem[vma->vm_pgoff].size == 0) | |
702 | return -1; | |
703 | return (int)vma->vm_pgoff; | |
704 | } | |
705 | return -1; | |
706 | } | |
707 | ||
708 | static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
709 | { | |
710 | struct tcmu_dev *udev = vma->vm_private_data; | |
711 | struct uio_info *info = &udev->uio_info; | |
712 | struct page *page; | |
713 | unsigned long offset; | |
714 | void *addr; | |
715 | ||
716 | int mi = tcmu_find_mem_index(vma); | |
717 | if (mi < 0) | |
718 | return VM_FAULT_SIGBUS; | |
719 | ||
720 | /* | |
721 | * We need to subtract mi because userspace uses offset = N*PAGE_SIZE | |
722 | * to use mem[N]. | |
723 | */ | |
724 | offset = (vmf->pgoff - mi) << PAGE_SHIFT; | |
725 | ||
726 | addr = (void *)(unsigned long)info->mem[mi].addr + offset; | |
727 | if (info->mem[mi].memtype == UIO_MEM_LOGICAL) | |
728 | page = virt_to_page(addr); | |
729 | else | |
730 | page = vmalloc_to_page(addr); | |
731 | get_page(page); | |
732 | vmf->page = page; | |
733 | return 0; | |
734 | } | |
735 | ||
736 | static const struct vm_operations_struct tcmu_vm_ops = { | |
737 | .fault = tcmu_vma_fault, | |
738 | }; | |
739 | ||
740 | static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) | |
741 | { | |
742 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
743 | ||
744 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | |
745 | vma->vm_ops = &tcmu_vm_ops; | |
746 | ||
747 | vma->vm_private_data = udev; | |
748 | ||
749 | /* Ensure the mmap is exactly the right size */ | |
750 | if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) | |
751 | return -EINVAL; | |
752 | ||
753 | return 0; | |
754 | } | |
755 | ||
756 | static int tcmu_open(struct uio_info *info, struct inode *inode) | |
757 | { | |
758 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
759 | ||
760 | /* O_EXCL not supported for char devs, so fake it? */ | |
761 | if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) | |
762 | return -EBUSY; | |
763 | ||
764 | pr_debug("open\n"); | |
765 | ||
766 | return 0; | |
767 | } | |
768 | ||
769 | static int tcmu_release(struct uio_info *info, struct inode *inode) | |
770 | { | |
771 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | |
772 | ||
773 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); | |
774 | ||
775 | pr_debug("close\n"); | |
776 | ||
777 | return 0; | |
778 | } | |
779 | ||
780 | static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor) | |
781 | { | |
782 | struct sk_buff *skb; | |
783 | void *msg_header; | |
6e14eab9 | 784 | int ret = -ENOMEM; |
7c9e7a6f AG |
785 | |
786 | skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | |
787 | if (!skb) | |
6e14eab9 | 788 | return ret; |
7c9e7a6f AG |
789 | |
790 | msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); | |
6e14eab9 NB |
791 | if (!msg_header) |
792 | goto free_skb; | |
7c9e7a6f AG |
793 | |
794 | ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name); | |
6e14eab9 NB |
795 | if (ret < 0) |
796 | goto free_skb; | |
7c9e7a6f AG |
797 | |
798 | ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor); | |
6e14eab9 NB |
799 | if (ret < 0) |
800 | goto free_skb; | |
7c9e7a6f | 801 | |
053c095a | 802 | genlmsg_end(skb, msg_header); |
7c9e7a6f AG |
803 | |
804 | ret = genlmsg_multicast(&tcmu_genl_family, skb, 0, | |
805 | TCMU_MCGRP_CONFIG, GFP_KERNEL); | |
806 | ||
807 | /* We don't care if no one is listening */ | |
808 | if (ret == -ESRCH) | |
809 | ret = 0; | |
810 | ||
811 | return ret; | |
6e14eab9 NB |
812 | free_skb: |
813 | nlmsg_free(skb); | |
814 | return ret; | |
7c9e7a6f AG |
815 | } |
816 | ||
817 | static int tcmu_configure_device(struct se_device *dev) | |
818 | { | |
819 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
820 | struct tcmu_hba *hba = udev->hba->hba_ptr; | |
821 | struct uio_info *info; | |
822 | struct tcmu_mailbox *mb; | |
823 | size_t size; | |
824 | size_t used; | |
825 | int ret = 0; | |
826 | char *str; | |
827 | ||
828 | info = &udev->uio_info; | |
829 | ||
830 | size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, | |
831 | udev->dev_config); | |
832 | size += 1; /* for \0 */ | |
833 | str = kmalloc(size, GFP_KERNEL); | |
834 | if (!str) | |
835 | return -ENOMEM; | |
836 | ||
837 | used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); | |
838 | ||
839 | if (udev->dev_config[0]) | |
840 | snprintf(str + used, size - used, "/%s", udev->dev_config); | |
841 | ||
842 | info->name = str; | |
843 | ||
844 | udev->mb_addr = vzalloc(TCMU_RING_SIZE); | |
845 | if (!udev->mb_addr) { | |
846 | ret = -ENOMEM; | |
847 | goto err_vzalloc; | |
848 | } | |
849 | ||
850 | /* mailbox fits in first part of CMDR space */ | |
851 | udev->cmdr_size = CMDR_SIZE - CMDR_OFF; | |
852 | udev->data_off = CMDR_SIZE; | |
853 | udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; | |
854 | ||
855 | mb = udev->mb_addr; | |
0ad46af8 | 856 | mb->version = TCMU_MAILBOX_VERSION; |
7c9e7a6f AG |
857 | mb->cmdr_off = CMDR_OFF; |
858 | mb->cmdr_size = udev->cmdr_size; | |
859 | ||
860 | WARN_ON(!PAGE_ALIGNED(udev->data_off)); | |
861 | WARN_ON(udev->data_size % PAGE_SIZE); | |
862 | ||
0ad46af8 | 863 | info->version = xstr(TCMU_MAILBOX_VERSION); |
7c9e7a6f AG |
864 | |
865 | info->mem[0].name = "tcm-user command & data buffer"; | |
866 | info->mem[0].addr = (phys_addr_t) udev->mb_addr; | |
867 | info->mem[0].size = TCMU_RING_SIZE; | |
868 | info->mem[0].memtype = UIO_MEM_VIRTUAL; | |
869 | ||
870 | info->irqcontrol = tcmu_irqcontrol; | |
871 | info->irq = UIO_IRQ_CUSTOM; | |
872 | ||
873 | info->mmap = tcmu_mmap; | |
874 | info->open = tcmu_open; | |
875 | info->release = tcmu_release; | |
876 | ||
877 | ret = uio_register_device(tcmu_root_device, info); | |
878 | if (ret) | |
879 | goto err_register; | |
880 | ||
881 | /* Other attributes can be configured in userspace */ | |
882 | dev->dev_attrib.hw_block_size = 512; | |
883 | dev->dev_attrib.hw_max_sectors = 128; | |
884 | dev->dev_attrib.hw_queue_depth = 128; | |
885 | ||
886 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, | |
887 | udev->uio_info.uio_dev->minor); | |
888 | if (ret) | |
889 | goto err_netlink; | |
890 | ||
891 | return 0; | |
892 | ||
893 | err_netlink: | |
894 | uio_unregister_device(&udev->uio_info); | |
895 | err_register: | |
896 | vfree(udev->mb_addr); | |
897 | err_vzalloc: | |
898 | kfree(info->name); | |
899 | ||
900 | return ret; | |
901 | } | |
902 | ||
903 | static int tcmu_check_pending_cmd(int id, void *p, void *data) | |
904 | { | |
905 | struct tcmu_cmd *cmd = p; | |
906 | ||
907 | if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) | |
908 | return 0; | |
909 | return -EINVAL; | |
910 | } | |
911 | ||
912 | static void tcmu_free_device(struct se_device *dev) | |
913 | { | |
914 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
915 | int i; | |
916 | ||
917 | del_timer_sync(&udev->timeout); | |
918 | ||
919 | vfree(udev->mb_addr); | |
920 | ||
921 | /* Upper layer should drain all requests before calling this */ | |
922 | spin_lock_irq(&udev->commands_lock); | |
923 | i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL); | |
924 | idr_destroy(&udev->commands); | |
925 | spin_unlock_irq(&udev->commands_lock); | |
926 | WARN_ON(i); | |
927 | ||
928 | /* Device was configured */ | |
929 | if (udev->uio_info.uio_dev) { | |
930 | tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, | |
931 | udev->uio_info.uio_dev->minor); | |
932 | ||
933 | uio_unregister_device(&udev->uio_info); | |
934 | kfree(udev->uio_info.name); | |
935 | kfree(udev->name); | |
936 | } | |
937 | ||
938 | kfree(udev); | |
939 | } | |
940 | ||
941 | enum { | |
9c1cd1b6 | 942 | Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, |
7c9e7a6f AG |
943 | }; |
944 | ||
945 | static match_table_t tokens = { | |
946 | {Opt_dev_config, "dev_config=%s"}, | |
947 | {Opt_dev_size, "dev_size=%u"}, | |
9c1cd1b6 | 948 | {Opt_hw_block_size, "hw_block_size=%u"}, |
7c9e7a6f AG |
949 | {Opt_err, NULL} |
950 | }; | |
951 | ||
952 | static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | |
953 | const char *page, ssize_t count) | |
954 | { | |
955 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
956 | char *orig, *ptr, *opts, *arg_p; | |
957 | substring_t args[MAX_OPT_ARGS]; | |
958 | int ret = 0, token; | |
9c1cd1b6 | 959 | unsigned long tmp_ul; |
7c9e7a6f AG |
960 | |
961 | opts = kstrdup(page, GFP_KERNEL); | |
962 | if (!opts) | |
963 | return -ENOMEM; | |
964 | ||
965 | orig = opts; | |
966 | ||
967 | while ((ptr = strsep(&opts, ",\n")) != NULL) { | |
968 | if (!*ptr) | |
969 | continue; | |
970 | ||
971 | token = match_token(ptr, tokens, args); | |
972 | switch (token) { | |
973 | case Opt_dev_config: | |
974 | if (match_strlcpy(udev->dev_config, &args[0], | |
975 | TCMU_CONFIG_LEN) == 0) { | |
976 | ret = -EINVAL; | |
977 | break; | |
978 | } | |
979 | pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); | |
980 | break; | |
981 | case Opt_dev_size: | |
982 | arg_p = match_strdup(&args[0]); | |
983 | if (!arg_p) { | |
984 | ret = -ENOMEM; | |
985 | break; | |
986 | } | |
987 | ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); | |
988 | kfree(arg_p); | |
989 | if (ret < 0) | |
990 | pr_err("kstrtoul() failed for dev_size=\n"); | |
991 | break; | |
9c1cd1b6 AG |
992 | case Opt_hw_block_size: |
993 | arg_p = match_strdup(&args[0]); | |
994 | if (!arg_p) { | |
995 | ret = -ENOMEM; | |
996 | break; | |
997 | } | |
998 | ret = kstrtoul(arg_p, 0, &tmp_ul); | |
999 | kfree(arg_p); | |
1000 | if (ret < 0) { | |
1001 | pr_err("kstrtoul() failed for hw_block_size=\n"); | |
1002 | break; | |
1003 | } | |
1004 | if (!tmp_ul) { | |
1005 | pr_err("hw_block_size must be nonzero\n"); | |
1006 | break; | |
1007 | } | |
1008 | dev->dev_attrib.hw_block_size = tmp_ul; | |
1009 | break; | |
7c9e7a6f AG |
1010 | default: |
1011 | break; | |
1012 | } | |
1013 | } | |
1014 | ||
1015 | kfree(orig); | |
1016 | return (!ret) ? count : ret; | |
1017 | } | |
1018 | ||
1019 | static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) | |
1020 | { | |
1021 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1022 | ssize_t bl = 0; | |
1023 | ||
1024 | bl = sprintf(b + bl, "Config: %s ", | |
1025 | udev->dev_config[0] ? udev->dev_config : "NULL"); | |
8ee83a74 | 1026 | bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); |
7c9e7a6f AG |
1027 | |
1028 | return bl; | |
1029 | } | |
1030 | ||
1031 | static sector_t tcmu_get_blocks(struct se_device *dev) | |
1032 | { | |
1033 | struct tcmu_dev *udev = TCMU_DEV(dev); | |
1034 | ||
1035 | return div_u64(udev->dev_size - dev->dev_attrib.block_size, | |
1036 | dev->dev_attrib.block_size); | |
1037 | } | |
1038 | ||
1039 | static sense_reason_t | |
9c1cd1b6 | 1040 | tcmu_pass_op(struct se_cmd *se_cmd) |
7c9e7a6f | 1041 | { |
9c1cd1b6 | 1042 | int ret = tcmu_queue_cmd(se_cmd); |
7c9e7a6f AG |
1043 | |
1044 | if (ret != 0) | |
1045 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
1046 | else | |
1047 | return TCM_NO_SENSE; | |
1048 | } | |
1049 | ||
1050 | static sense_reason_t | |
9c1cd1b6 | 1051 | tcmu_parse_cdb(struct se_cmd *cmd) |
7c9e7a6f | 1052 | { |
7bfea53b | 1053 | return passthrough_parse_cdb(cmd, tcmu_pass_op); |
7c9e7a6f AG |
1054 | } |
1055 | ||
9c1cd1b6 AG |
1056 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); |
1057 | TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type); | |
1058 | ||
1059 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size); | |
1060 | TB_DEV_ATTR_RO(tcmu, hw_block_size); | |
1061 | ||
1062 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors); | |
1063 | TB_DEV_ATTR_RO(tcmu, hw_max_sectors); | |
1064 | ||
1065 | DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth); | |
1066 | TB_DEV_ATTR_RO(tcmu, hw_queue_depth); | |
e9f720d6 NB |
1067 | |
1068 | static struct configfs_attribute *tcmu_backend_dev_attrs[] = { | |
e9f720d6 | 1069 | &tcmu_dev_attrib_hw_pi_prot_type.attr, |
e9f720d6 | 1070 | &tcmu_dev_attrib_hw_block_size.attr, |
e9f720d6 | 1071 | &tcmu_dev_attrib_hw_max_sectors.attr, |
e9f720d6 | 1072 | &tcmu_dev_attrib_hw_queue_depth.attr, |
e9f720d6 NB |
1073 | NULL, |
1074 | }; | |
1075 | ||
7c9e7a6f AG |
1076 | static struct se_subsystem_api tcmu_template = { |
1077 | .name = "user", | |
1078 | .inquiry_prod = "USER", | |
1079 | .inquiry_rev = TCMU_VERSION, | |
1080 | .owner = THIS_MODULE, | |
a3541703 | 1081 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, |
7c9e7a6f AG |
1082 | .attach_hba = tcmu_attach_hba, |
1083 | .detach_hba = tcmu_detach_hba, | |
1084 | .alloc_device = tcmu_alloc_device, | |
1085 | .configure_device = tcmu_configure_device, | |
1086 | .free_device = tcmu_free_device, | |
1087 | .parse_cdb = tcmu_parse_cdb, | |
1088 | .set_configfs_dev_params = tcmu_set_configfs_dev_params, | |
1089 | .show_configfs_dev_params = tcmu_show_configfs_dev_params, | |
1090 | .get_device_type = sbc_get_device_type, | |
1091 | .get_blocks = tcmu_get_blocks, | |
1092 | }; | |
1093 | ||
1094 | static int __init tcmu_module_init(void) | |
1095 | { | |
e9f720d6 | 1096 | struct target_backend_cits *tbc = &tcmu_template.tb_cits; |
7c9e7a6f AG |
1097 | int ret; |
1098 | ||
1099 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); | |
1100 | ||
1101 | tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", | |
1102 | sizeof(struct tcmu_cmd), | |
1103 | __alignof__(struct tcmu_cmd), | |
1104 | 0, NULL); | |
1105 | if (!tcmu_cmd_cache) | |
1106 | return -ENOMEM; | |
1107 | ||
1108 | tcmu_root_device = root_device_register("tcm_user"); | |
1109 | if (IS_ERR(tcmu_root_device)) { | |
1110 | ret = PTR_ERR(tcmu_root_device); | |
1111 | goto out_free_cache; | |
1112 | } | |
1113 | ||
1114 | ret = genl_register_family(&tcmu_genl_family); | |
1115 | if (ret < 0) { | |
1116 | goto out_unreg_device; | |
1117 | } | |
1118 | ||
e9f720d6 NB |
1119 | target_core_setup_sub_cits(&tcmu_template); |
1120 | tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs; | |
1121 | ||
7c9e7a6f AG |
1122 | ret = transport_subsystem_register(&tcmu_template); |
1123 | if (ret) | |
1124 | goto out_unreg_genl; | |
1125 | ||
1126 | return 0; | |
1127 | ||
1128 | out_unreg_genl: | |
1129 | genl_unregister_family(&tcmu_genl_family); | |
1130 | out_unreg_device: | |
1131 | root_device_unregister(tcmu_root_device); | |
1132 | out_free_cache: | |
1133 | kmem_cache_destroy(tcmu_cmd_cache); | |
1134 | ||
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | static void __exit tcmu_module_exit(void) | |
1139 | { | |
1140 | transport_subsystem_release(&tcmu_template); | |
1141 | genl_unregister_family(&tcmu_genl_family); | |
1142 | root_device_unregister(tcmu_root_device); | |
1143 | kmem_cache_destroy(tcmu_cmd_cache); | |
1144 | } | |
1145 | ||
1146 | MODULE_DESCRIPTION("TCM USER subsystem plugin"); | |
1147 | MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); | |
1148 | MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); | |
1149 | MODULE_LICENSE("GPL"); | |
1150 | ||
1151 | module_init(tcmu_module_init); | |
1152 | module_exit(tcmu_module_exit); |