]>
Commit | Line | Data |
---|---|---|
1 | /******************************************************************************* | |
2 | * Filename: target_core_tpg.c | |
3 | * | |
4 | * This file contains generic Target Portal Group related functions. | |
5 | * | |
6 | * (c) Copyright 2002-2013 Datera, Inc. | |
7 | * | |
8 | * Nicholas A. Bellinger <nab@kernel.org> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
23 | * | |
24 | ******************************************************************************/ | |
25 | ||
26 | #include <linux/net.h> | |
27 | #include <linux/string.h> | |
28 | #include <linux/timer.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/spinlock.h> | |
31 | #include <linux/in.h> | |
32 | #include <linux/export.h> | |
33 | #include <net/sock.h> | |
34 | #include <net/tcp.h> | |
35 | #include <scsi/scsi.h> | |
36 | #include <scsi/scsi_cmnd.h> | |
37 | ||
38 | #include <target/target_core_base.h> | |
39 | #include <target/target_core_backend.h> | |
40 | #include <target/target_core_fabric.h> | |
41 | ||
42 | #include "target_core_internal.h" | |
43 | ||
44 | extern struct se_device *g_lun0_dev; | |
45 | ||
46 | static DEFINE_SPINLOCK(tpg_lock); | |
47 | static LIST_HEAD(tpg_list); | |
48 | ||
49 | /* core_clear_initiator_node_from_tpg(): | |
50 | * | |
51 | * | |
52 | */ | |
53 | static void core_clear_initiator_node_from_tpg( | |
54 | struct se_node_acl *nacl, | |
55 | struct se_portal_group *tpg) | |
56 | { | |
57 | int i; | |
58 | struct se_dev_entry *deve; | |
59 | struct se_lun *lun; | |
60 | ||
61 | spin_lock_irq(&nacl->device_list_lock); | |
62 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
63 | deve = nacl->device_list[i]; | |
64 | ||
65 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
66 | continue; | |
67 | ||
68 | if (!deve->se_lun) { | |
69 | pr_err("%s device entries device pointer is" | |
70 | " NULL, but Initiator has access.\n", | |
71 | tpg->se_tpg_tfo->get_fabric_name()); | |
72 | continue; | |
73 | } | |
74 | ||
75 | lun = deve->se_lun; | |
76 | spin_unlock_irq(&nacl->device_list_lock); | |
77 | core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, | |
78 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); | |
79 | ||
80 | spin_lock_irq(&nacl->device_list_lock); | |
81 | } | |
82 | spin_unlock_irq(&nacl->device_list_lock); | |
83 | } | |
84 | ||
85 | /* __core_tpg_get_initiator_node_acl(): | |
86 | * | |
87 | * spin_lock_bh(&tpg->acl_node_lock); must be held when calling | |
88 | */ | |
89 | struct se_node_acl *__core_tpg_get_initiator_node_acl( | |
90 | struct se_portal_group *tpg, | |
91 | const char *initiatorname) | |
92 | { | |
93 | struct se_node_acl *acl; | |
94 | ||
95 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | |
96 | if (!strcmp(acl->initiatorname, initiatorname)) | |
97 | return acl; | |
98 | } | |
99 | ||
100 | return NULL; | |
101 | } | |
102 | ||
103 | /* core_tpg_get_initiator_node_acl(): | |
104 | * | |
105 | * | |
106 | */ | |
107 | struct se_node_acl *core_tpg_get_initiator_node_acl( | |
108 | struct se_portal_group *tpg, | |
109 | unsigned char *initiatorname) | |
110 | { | |
111 | struct se_node_acl *acl; | |
112 | ||
113 | spin_lock_irq(&tpg->acl_node_lock); | |
114 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | |
115 | spin_unlock_irq(&tpg->acl_node_lock); | |
116 | ||
117 | return acl; | |
118 | } | |
119 | EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); | |
120 | ||
121 | /* core_tpg_add_node_to_devs(): | |
122 | * | |
123 | * | |
124 | */ | |
125 | void core_tpg_add_node_to_devs( | |
126 | struct se_node_acl *acl, | |
127 | struct se_portal_group *tpg) | |
128 | { | |
129 | int i = 0; | |
130 | u32 lun_access = 0; | |
131 | struct se_lun *lun; | |
132 | struct se_device *dev; | |
133 | ||
134 | spin_lock(&tpg->tpg_lun_lock); | |
135 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
136 | lun = tpg->tpg_lun_list[i]; | |
137 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) | |
138 | continue; | |
139 | ||
140 | spin_unlock(&tpg->tpg_lun_lock); | |
141 | ||
142 | dev = lun->lun_se_dev; | |
143 | /* | |
144 | * By default in LIO-Target $FABRIC_MOD, | |
145 | * demo_mode_write_protect is ON, or READ_ONLY; | |
146 | */ | |
147 | if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { | |
148 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | |
149 | } else { | |
150 | /* | |
151 | * Allow only optical drives to issue R/W in default RO | |
152 | * demo mode. | |
153 | */ | |
154 | if (dev->transport->get_device_type(dev) == TYPE_DISK) | |
155 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
156 | else | |
157 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | |
158 | } | |
159 | ||
160 | pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" | |
161 | " access for LUN in Demo Mode\n", | |
162 | tpg->se_tpg_tfo->get_fabric_name(), | |
163 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, | |
164 | (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? | |
165 | "READ-WRITE" : "READ-ONLY"); | |
166 | ||
167 | core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, | |
168 | lun_access, acl, tpg); | |
169 | spin_lock(&tpg->tpg_lun_lock); | |
170 | } | |
171 | spin_unlock(&tpg->tpg_lun_lock); | |
172 | } | |
173 | ||
174 | /* core_set_queue_depth_for_node(): | |
175 | * | |
176 | * | |
177 | */ | |
178 | static int core_set_queue_depth_for_node( | |
179 | struct se_portal_group *tpg, | |
180 | struct se_node_acl *acl) | |
181 | { | |
182 | if (!acl->queue_depth) { | |
183 | pr_err("Queue depth for %s Initiator Node: %s is 0," | |
184 | "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), | |
185 | acl->initiatorname); | |
186 | acl->queue_depth = 1; | |
187 | } | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
192 | void array_free(void *array, int n) | |
193 | { | |
194 | void **a = array; | |
195 | int i; | |
196 | ||
197 | for (i = 0; i < n; i++) | |
198 | kfree(a[i]); | |
199 | kfree(a); | |
200 | } | |
201 | ||
202 | static void *array_zalloc(int n, size_t size, gfp_t flags) | |
203 | { | |
204 | void **a; | |
205 | int i; | |
206 | ||
207 | a = kzalloc(n * sizeof(void*), flags); | |
208 | if (!a) | |
209 | return NULL; | |
210 | for (i = 0; i < n; i++) { | |
211 | a[i] = kzalloc(size, flags); | |
212 | if (!a[i]) { | |
213 | array_free(a, n); | |
214 | return NULL; | |
215 | } | |
216 | } | |
217 | return a; | |
218 | } | |
219 | ||
220 | /* core_create_device_list_for_node(): | |
221 | * | |
222 | * | |
223 | */ | |
224 | static int core_create_device_list_for_node(struct se_node_acl *nacl) | |
225 | { | |
226 | struct se_dev_entry *deve; | |
227 | int i; | |
228 | ||
229 | nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, | |
230 | sizeof(struct se_dev_entry), GFP_KERNEL); | |
231 | if (!nacl->device_list) { | |
232 | pr_err("Unable to allocate memory for" | |
233 | " struct se_node_acl->device_list\n"); | |
234 | return -ENOMEM; | |
235 | } | |
236 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
237 | deve = nacl->device_list[i]; | |
238 | ||
239 | atomic_set(&deve->ua_count, 0); | |
240 | atomic_set(&deve->pr_ref_count, 0); | |
241 | spin_lock_init(&deve->ua_lock); | |
242 | INIT_LIST_HEAD(&deve->alua_port_list); | |
243 | INIT_LIST_HEAD(&deve->ua_list); | |
244 | } | |
245 | ||
246 | return 0; | |
247 | } | |
248 | ||
249 | /* core_tpg_check_initiator_node_acl() | |
250 | * | |
251 | * | |
252 | */ | |
253 | struct se_node_acl *core_tpg_check_initiator_node_acl( | |
254 | struct se_portal_group *tpg, | |
255 | unsigned char *initiatorname) | |
256 | { | |
257 | struct se_node_acl *acl; | |
258 | ||
259 | acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | |
260 | if (acl) | |
261 | return acl; | |
262 | ||
263 | if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) | |
264 | return NULL; | |
265 | ||
266 | acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); | |
267 | if (!acl) | |
268 | return NULL; | |
269 | ||
270 | INIT_LIST_HEAD(&acl->acl_list); | |
271 | INIT_LIST_HEAD(&acl->acl_sess_list); | |
272 | kref_init(&acl->acl_kref); | |
273 | init_completion(&acl->acl_free_comp); | |
274 | spin_lock_init(&acl->device_list_lock); | |
275 | spin_lock_init(&acl->nacl_sess_lock); | |
276 | atomic_set(&acl->acl_pr_ref_count, 0); | |
277 | acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); | |
278 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | |
279 | acl->se_tpg = tpg; | |
280 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | |
281 | acl->dynamic_node_acl = 1; | |
282 | ||
283 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | |
284 | ||
285 | if (core_create_device_list_for_node(acl) < 0) { | |
286 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | |
287 | return NULL; | |
288 | } | |
289 | ||
290 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | |
291 | core_free_device_list_for_node(acl, tpg); | |
292 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | |
293 | return NULL; | |
294 | } | |
295 | /* | |
296 | * Here we only create demo-mode MappedLUNs from the active | |
297 | * TPG LUNs if the fabric is not explicitly asking for | |
298 | * tpg_check_demo_mode_login_only() == 1. | |
299 | */ | |
300 | if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || | |
301 | (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) | |
302 | core_tpg_add_node_to_devs(acl, tpg); | |
303 | ||
304 | spin_lock_irq(&tpg->acl_node_lock); | |
305 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | |
306 | tpg->num_node_acls++; | |
307 | spin_unlock_irq(&tpg->acl_node_lock); | |
308 | ||
309 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" | |
310 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | |
311 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | |
312 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | |
313 | ||
314 | return acl; | |
315 | } | |
316 | EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); | |
317 | ||
318 | void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) | |
319 | { | |
320 | while (atomic_read(&nacl->acl_pr_ref_count) != 0) | |
321 | cpu_relax(); | |
322 | } | |
323 | ||
324 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) | |
325 | { | |
326 | int i; | |
327 | struct se_lun *lun; | |
328 | ||
329 | spin_lock(&tpg->tpg_lun_lock); | |
330 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
331 | lun = tpg->tpg_lun_list[i]; | |
332 | ||
333 | if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || | |
334 | (lun->lun_se_dev == NULL)) | |
335 | continue; | |
336 | ||
337 | spin_unlock(&tpg->tpg_lun_lock); | |
338 | core_dev_del_lun(tpg, lun->unpacked_lun); | |
339 | spin_lock(&tpg->tpg_lun_lock); | |
340 | } | |
341 | spin_unlock(&tpg->tpg_lun_lock); | |
342 | } | |
343 | EXPORT_SYMBOL(core_tpg_clear_object_luns); | |
344 | ||
345 | /* core_tpg_add_initiator_node_acl(): | |
346 | * | |
347 | * | |
348 | */ | |
349 | struct se_node_acl *core_tpg_add_initiator_node_acl( | |
350 | struct se_portal_group *tpg, | |
351 | struct se_node_acl *se_nacl, | |
352 | const char *initiatorname, | |
353 | u32 queue_depth) | |
354 | { | |
355 | struct se_node_acl *acl = NULL; | |
356 | ||
357 | spin_lock_irq(&tpg->acl_node_lock); | |
358 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | |
359 | if (acl) { | |
360 | if (acl->dynamic_node_acl) { | |
361 | acl->dynamic_node_acl = 0; | |
362 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" | |
363 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), | |
364 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); | |
365 | spin_unlock_irq(&tpg->acl_node_lock); | |
366 | /* | |
367 | * Release the locally allocated struct se_node_acl | |
368 | * because * core_tpg_add_initiator_node_acl() returned | |
369 | * a pointer to an existing demo mode node ACL. | |
370 | */ | |
371 | if (se_nacl) | |
372 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, | |
373 | se_nacl); | |
374 | goto done; | |
375 | } | |
376 | ||
377 | pr_err("ACL entry for %s Initiator" | |
378 | " Node %s already exists for TPG %u, ignoring" | |
379 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | |
380 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
381 | spin_unlock_irq(&tpg->acl_node_lock); | |
382 | return ERR_PTR(-EEXIST); | |
383 | } | |
384 | spin_unlock_irq(&tpg->acl_node_lock); | |
385 | ||
386 | if (!se_nacl) { | |
387 | pr_err("struct se_node_acl pointer is NULL\n"); | |
388 | return ERR_PTR(-EINVAL); | |
389 | } | |
390 | /* | |
391 | * For v4.x logic the se_node_acl_s is hanging off a fabric | |
392 | * dependent structure allocated via | |
393 | * struct target_core_fabric_ops->fabric_make_nodeacl() | |
394 | */ | |
395 | acl = se_nacl; | |
396 | ||
397 | INIT_LIST_HEAD(&acl->acl_list); | |
398 | INIT_LIST_HEAD(&acl->acl_sess_list); | |
399 | kref_init(&acl->acl_kref); | |
400 | init_completion(&acl->acl_free_comp); | |
401 | spin_lock_init(&acl->device_list_lock); | |
402 | spin_lock_init(&acl->nacl_sess_lock); | |
403 | atomic_set(&acl->acl_pr_ref_count, 0); | |
404 | acl->queue_depth = queue_depth; | |
405 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | |
406 | acl->se_tpg = tpg; | |
407 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | |
408 | ||
409 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | |
410 | ||
411 | if (core_create_device_list_for_node(acl) < 0) { | |
412 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | |
413 | return ERR_PTR(-ENOMEM); | |
414 | } | |
415 | ||
416 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | |
417 | core_free_device_list_for_node(acl, tpg); | |
418 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); | |
419 | return ERR_PTR(-EINVAL); | |
420 | } | |
421 | ||
422 | spin_lock_irq(&tpg->acl_node_lock); | |
423 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | |
424 | tpg->num_node_acls++; | |
425 | spin_unlock_irq(&tpg->acl_node_lock); | |
426 | ||
427 | done: | |
428 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" | |
429 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | |
430 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | |
431 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | |
432 | ||
433 | return acl; | |
434 | } | |
435 | EXPORT_SYMBOL(core_tpg_add_initiator_node_acl); | |
436 | ||
437 | /* core_tpg_del_initiator_node_acl(): | |
438 | * | |
439 | * | |
440 | */ | |
441 | int core_tpg_del_initiator_node_acl( | |
442 | struct se_portal_group *tpg, | |
443 | struct se_node_acl *acl, | |
444 | int force) | |
445 | { | |
446 | LIST_HEAD(sess_list); | |
447 | struct se_session *sess, *sess_tmp; | |
448 | unsigned long flags; | |
449 | int rc; | |
450 | ||
451 | spin_lock_irq(&tpg->acl_node_lock); | |
452 | if (acl->dynamic_node_acl) { | |
453 | acl->dynamic_node_acl = 0; | |
454 | } | |
455 | list_del(&acl->acl_list); | |
456 | tpg->num_node_acls--; | |
457 | spin_unlock_irq(&tpg->acl_node_lock); | |
458 | ||
459 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); | |
460 | acl->acl_stop = 1; | |
461 | ||
462 | list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, | |
463 | sess_acl_list) { | |
464 | if (sess->sess_tearing_down != 0) | |
465 | continue; | |
466 | ||
467 | target_get_session(sess); | |
468 | list_move(&sess->sess_acl_list, &sess_list); | |
469 | } | |
470 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); | |
471 | ||
472 | list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) { | |
473 | list_del(&sess->sess_acl_list); | |
474 | ||
475 | rc = tpg->se_tpg_tfo->shutdown_session(sess); | |
476 | target_put_session(sess); | |
477 | if (!rc) | |
478 | continue; | |
479 | target_put_session(sess); | |
480 | } | |
481 | target_put_nacl(acl); | |
482 | /* | |
483 | * Wait for last target_put_nacl() to complete in target_complete_nacl() | |
484 | * for active fabric session transport_deregister_session() callbacks. | |
485 | */ | |
486 | wait_for_completion(&acl->acl_free_comp); | |
487 | ||
488 | core_tpg_wait_for_nacl_pr_ref(acl); | |
489 | core_clear_initiator_node_from_tpg(acl, tpg); | |
490 | core_free_device_list_for_node(acl, tpg); | |
491 | ||
492 | pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" | |
493 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), | |
494 | tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, | |
495 | tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); | |
496 | ||
497 | return 0; | |
498 | } | |
499 | EXPORT_SYMBOL(core_tpg_del_initiator_node_acl); | |
500 | ||
501 | /* core_tpg_set_initiator_node_queue_depth(): | |
502 | * | |
503 | * | |
504 | */ | |
505 | int core_tpg_set_initiator_node_queue_depth( | |
506 | struct se_portal_group *tpg, | |
507 | unsigned char *initiatorname, | |
508 | u32 queue_depth, | |
509 | int force) | |
510 | { | |
511 | struct se_session *sess, *init_sess = NULL; | |
512 | struct se_node_acl *acl; | |
513 | unsigned long flags; | |
514 | int dynamic_acl = 0; | |
515 | ||
516 | spin_lock_irq(&tpg->acl_node_lock); | |
517 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | |
518 | if (!acl) { | |
519 | pr_err("Access Control List entry for %s Initiator" | |
520 | " Node %s does not exists for TPG %hu, ignoring" | |
521 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | |
522 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
523 | spin_unlock_irq(&tpg->acl_node_lock); | |
524 | return -ENODEV; | |
525 | } | |
526 | if (acl->dynamic_node_acl) { | |
527 | acl->dynamic_node_acl = 0; | |
528 | dynamic_acl = 1; | |
529 | } | |
530 | spin_unlock_irq(&tpg->acl_node_lock); | |
531 | ||
532 | spin_lock_irqsave(&tpg->session_lock, flags); | |
533 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { | |
534 | if (sess->se_node_acl != acl) | |
535 | continue; | |
536 | ||
537 | if (!force) { | |
538 | pr_err("Unable to change queue depth for %s" | |
539 | " Initiator Node: %s while session is" | |
540 | " operational. To forcefully change the queue" | |
541 | " depth and force session reinstatement" | |
542 | " use the \"force=1\" parameter.\n", | |
543 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | |
544 | spin_unlock_irqrestore(&tpg->session_lock, flags); | |
545 | ||
546 | spin_lock_irq(&tpg->acl_node_lock); | |
547 | if (dynamic_acl) | |
548 | acl->dynamic_node_acl = 1; | |
549 | spin_unlock_irq(&tpg->acl_node_lock); | |
550 | return -EEXIST; | |
551 | } | |
552 | /* | |
553 | * Determine if the session needs to be closed by our context. | |
554 | */ | |
555 | if (!tpg->se_tpg_tfo->shutdown_session(sess)) | |
556 | continue; | |
557 | ||
558 | init_sess = sess; | |
559 | break; | |
560 | } | |
561 | ||
562 | /* | |
563 | * User has requested to change the queue depth for a Initiator Node. | |
564 | * Change the value in the Node's struct se_node_acl, and call | |
565 | * core_set_queue_depth_for_node() to add the requested queue depth. | |
566 | * | |
567 | * Finally call tpg->se_tpg_tfo->close_session() to force session | |
568 | * reinstatement to occur if there is an active session for the | |
569 | * $FABRIC_MOD Initiator Node in question. | |
570 | */ | |
571 | acl->queue_depth = queue_depth; | |
572 | ||
573 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | |
574 | spin_unlock_irqrestore(&tpg->session_lock, flags); | |
575 | /* | |
576 | * Force session reinstatement if | |
577 | * core_set_queue_depth_for_node() failed, because we assume | |
578 | * the $FABRIC_MOD has already the set session reinstatement | |
579 | * bit from tpg->se_tpg_tfo->shutdown_session() called above. | |
580 | */ | |
581 | if (init_sess) | |
582 | tpg->se_tpg_tfo->close_session(init_sess); | |
583 | ||
584 | spin_lock_irq(&tpg->acl_node_lock); | |
585 | if (dynamic_acl) | |
586 | acl->dynamic_node_acl = 1; | |
587 | spin_unlock_irq(&tpg->acl_node_lock); | |
588 | return -EINVAL; | |
589 | } | |
590 | spin_unlock_irqrestore(&tpg->session_lock, flags); | |
591 | /* | |
592 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, | |
593 | * forcefully shutdown the $FABRIC_MOD session/nexus. | |
594 | */ | |
595 | if (init_sess) | |
596 | tpg->se_tpg_tfo->close_session(init_sess); | |
597 | ||
598 | pr_debug("Successfully changed queue depth to: %d for Initiator" | |
599 | " Node: %s on %s Target Portal Group: %u\n", queue_depth, | |
600 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), | |
601 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
602 | ||
603 | spin_lock_irq(&tpg->acl_node_lock); | |
604 | if (dynamic_acl) | |
605 | acl->dynamic_node_acl = 1; | |
606 | spin_unlock_irq(&tpg->acl_node_lock); | |
607 | ||
608 | return 0; | |
609 | } | |
610 | EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); | |
611 | ||
612 | /* core_tpg_set_initiator_node_tag(): | |
613 | * | |
614 | * Initiator nodeacl tags are not used internally, but may be used by | |
615 | * userspace to emulate aliases or groups. | |
616 | * Returns length of newly-set tag or -EINVAL. | |
617 | */ | |
618 | int core_tpg_set_initiator_node_tag( | |
619 | struct se_portal_group *tpg, | |
620 | struct se_node_acl *acl, | |
621 | const char *new_tag) | |
622 | { | |
623 | if (strlen(new_tag) >= MAX_ACL_TAG_SIZE) | |
624 | return -EINVAL; | |
625 | ||
626 | if (!strncmp("NULL", new_tag, 4)) { | |
627 | acl->acl_tag[0] = '\0'; | |
628 | return 0; | |
629 | } | |
630 | ||
631 | return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag); | |
632 | } | |
633 | EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); | |
634 | ||
635 | static void core_tpg_lun_ref_release(struct percpu_ref *ref) | |
636 | { | |
637 | struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); | |
638 | ||
639 | complete(&lun->lun_ref_comp); | |
640 | } | |
641 | ||
642 | static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | |
643 | { | |
644 | /* Set in core_dev_setup_virtual_lun0() */ | |
645 | struct se_device *dev = g_lun0_dev; | |
646 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; | |
647 | u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
648 | int ret; | |
649 | ||
650 | lun->unpacked_lun = 0; | |
651 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | |
652 | atomic_set(&lun->lun_acl_count, 0); | |
653 | init_completion(&lun->lun_shutdown_comp); | |
654 | INIT_LIST_HEAD(&lun->lun_acl_list); | |
655 | spin_lock_init(&lun->lun_acl_lock); | |
656 | spin_lock_init(&lun->lun_sep_lock); | |
657 | init_completion(&lun->lun_ref_comp); | |
658 | ||
659 | ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev); | |
660 | if (ret < 0) | |
661 | return ret; | |
662 | ||
663 | return 0; | |
664 | } | |
665 | ||
666 | static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg) | |
667 | { | |
668 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; | |
669 | ||
670 | core_tpg_post_dellun(se_tpg, lun); | |
671 | } | |
672 | ||
673 | int core_tpg_register( | |
674 | struct target_core_fabric_ops *tfo, | |
675 | struct se_wwn *se_wwn, | |
676 | struct se_portal_group *se_tpg, | |
677 | void *tpg_fabric_ptr, | |
678 | int se_tpg_type) | |
679 | { | |
680 | struct se_lun *lun; | |
681 | u32 i; | |
682 | ||
683 | se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG, | |
684 | sizeof(struct se_lun), GFP_KERNEL); | |
685 | if (!se_tpg->tpg_lun_list) { | |
686 | pr_err("Unable to allocate struct se_portal_group->" | |
687 | "tpg_lun_list\n"); | |
688 | return -ENOMEM; | |
689 | } | |
690 | ||
691 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
692 | lun = se_tpg->tpg_lun_list[i]; | |
693 | lun->unpacked_lun = i; | |
694 | lun->lun_link_magic = SE_LUN_LINK_MAGIC; | |
695 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | |
696 | atomic_set(&lun->lun_acl_count, 0); | |
697 | init_completion(&lun->lun_shutdown_comp); | |
698 | INIT_LIST_HEAD(&lun->lun_acl_list); | |
699 | spin_lock_init(&lun->lun_acl_lock); | |
700 | spin_lock_init(&lun->lun_sep_lock); | |
701 | init_completion(&lun->lun_ref_comp); | |
702 | } | |
703 | ||
704 | se_tpg->se_tpg_type = se_tpg_type; | |
705 | se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr; | |
706 | se_tpg->se_tpg_tfo = tfo; | |
707 | se_tpg->se_tpg_wwn = se_wwn; | |
708 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); | |
709 | INIT_LIST_HEAD(&se_tpg->acl_node_list); | |
710 | INIT_LIST_HEAD(&se_tpg->se_tpg_node); | |
711 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); | |
712 | spin_lock_init(&se_tpg->acl_node_lock); | |
713 | spin_lock_init(&se_tpg->session_lock); | |
714 | spin_lock_init(&se_tpg->tpg_lun_lock); | |
715 | ||
716 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { | |
717 | if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { | |
718 | array_free(se_tpg->tpg_lun_list, | |
719 | TRANSPORT_MAX_LUNS_PER_TPG); | |
720 | return -ENOMEM; | |
721 | } | |
722 | } | |
723 | ||
724 | spin_lock_bh(&tpg_lock); | |
725 | list_add_tail(&se_tpg->se_tpg_node, &tpg_list); | |
726 | spin_unlock_bh(&tpg_lock); | |
727 | ||
728 | pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" | |
729 | " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), | |
730 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | |
731 | "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? | |
732 | "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); | |
733 | ||
734 | return 0; | |
735 | } | |
736 | EXPORT_SYMBOL(core_tpg_register); | |
737 | ||
738 | int core_tpg_deregister(struct se_portal_group *se_tpg) | |
739 | { | |
740 | struct se_node_acl *nacl, *nacl_tmp; | |
741 | ||
742 | pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | |
743 | " for endpoint: %s Portal Tag %u\n", | |
744 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | |
745 | "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), | |
746 | se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), | |
747 | se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); | |
748 | ||
749 | spin_lock_bh(&tpg_lock); | |
750 | list_del(&se_tpg->se_tpg_node); | |
751 | spin_unlock_bh(&tpg_lock); | |
752 | ||
753 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | |
754 | cpu_relax(); | |
755 | /* | |
756 | * Release any remaining demo-mode generated se_node_acl that have | |
757 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 | |
758 | * in transport_deregister_session(). | |
759 | */ | |
760 | spin_lock_irq(&se_tpg->acl_node_lock); | |
761 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, | |
762 | acl_list) { | |
763 | list_del(&nacl->acl_list); | |
764 | se_tpg->num_node_acls--; | |
765 | spin_unlock_irq(&se_tpg->acl_node_lock); | |
766 | ||
767 | core_tpg_wait_for_nacl_pr_ref(nacl); | |
768 | core_free_device_list_for_node(nacl, se_tpg); | |
769 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); | |
770 | ||
771 | spin_lock_irq(&se_tpg->acl_node_lock); | |
772 | } | |
773 | spin_unlock_irq(&se_tpg->acl_node_lock); | |
774 | ||
775 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | |
776 | core_tpg_release_virtual_lun0(se_tpg); | |
777 | ||
778 | se_tpg->se_tpg_fabric_ptr = NULL; | |
779 | array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG); | |
780 | return 0; | |
781 | } | |
782 | EXPORT_SYMBOL(core_tpg_deregister); | |
783 | ||
784 | struct se_lun *core_tpg_alloc_lun( | |
785 | struct se_portal_group *tpg, | |
786 | u32 unpacked_lun) | |
787 | { | |
788 | struct se_lun *lun; | |
789 | ||
790 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
791 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | |
792 | "-1: %u for Target Portal Group: %u\n", | |
793 | tpg->se_tpg_tfo->get_fabric_name(), | |
794 | unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, | |
795 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
796 | return ERR_PTR(-EOVERFLOW); | |
797 | } | |
798 | ||
799 | spin_lock(&tpg->tpg_lun_lock); | |
800 | lun = tpg->tpg_lun_list[unpacked_lun]; | |
801 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { | |
802 | pr_err("TPG Logical Unit Number: %u is already active" | |
803 | " on %s Target Portal Group: %u, ignoring request.\n", | |
804 | unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), | |
805 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
806 | spin_unlock(&tpg->tpg_lun_lock); | |
807 | return ERR_PTR(-EINVAL); | |
808 | } | |
809 | spin_unlock(&tpg->tpg_lun_lock); | |
810 | ||
811 | return lun; | |
812 | } | |
813 | ||
814 | int core_tpg_add_lun( | |
815 | struct se_portal_group *tpg, | |
816 | struct se_lun *lun, | |
817 | u32 lun_access, | |
818 | struct se_device *dev) | |
819 | { | |
820 | int ret; | |
821 | ||
822 | ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); | |
823 | if (ret < 0) | |
824 | return ret; | |
825 | ||
826 | ret = core_dev_export(dev, tpg, lun); | |
827 | if (ret < 0) { | |
828 | percpu_ref_cancel_init(&lun->lun_ref); | |
829 | return ret; | |
830 | } | |
831 | ||
832 | spin_lock(&tpg->tpg_lun_lock); | |
833 | lun->lun_access = lun_access; | |
834 | lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; | |
835 | spin_unlock(&tpg->tpg_lun_lock); | |
836 | ||
837 | return 0; | |
838 | } | |
839 | ||
840 | struct se_lun *core_tpg_pre_dellun( | |
841 | struct se_portal_group *tpg, | |
842 | u32 unpacked_lun) | |
843 | { | |
844 | struct se_lun *lun; | |
845 | ||
846 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
847 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | |
848 | "-1: %u for Target Portal Group: %u\n", | |
849 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, | |
850 | TRANSPORT_MAX_LUNS_PER_TPG-1, | |
851 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
852 | return ERR_PTR(-EOVERFLOW); | |
853 | } | |
854 | ||
855 | spin_lock(&tpg->tpg_lun_lock); | |
856 | lun = tpg->tpg_lun_list[unpacked_lun]; | |
857 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | |
858 | pr_err("%s Logical Unit Number: %u is not active on" | |
859 | " Target Portal Group: %u, ignoring request.\n", | |
860 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, | |
861 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
862 | spin_unlock(&tpg->tpg_lun_lock); | |
863 | return ERR_PTR(-ENODEV); | |
864 | } | |
865 | spin_unlock(&tpg->tpg_lun_lock); | |
866 | ||
867 | return lun; | |
868 | } | |
869 | ||
870 | int core_tpg_post_dellun( | |
871 | struct se_portal_group *tpg, | |
872 | struct se_lun *lun) | |
873 | { | |
874 | core_clear_lun_from_tpg(lun, tpg); | |
875 | transport_clear_lun_ref(lun); | |
876 | ||
877 | core_dev_unexport(lun->lun_se_dev, tpg, lun); | |
878 | ||
879 | spin_lock(&tpg->tpg_lun_lock); | |
880 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | |
881 | spin_unlock(&tpg->tpg_lun_lock); | |
882 | ||
883 | return 0; | |
884 | } |