]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/target/sbp/sbp_target.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / target / sbp / sbp_target.c
1 /*
2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
3 *
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/delay.h>
32 #include <linux/firewire.h>
33 #include <linux/firewire-constants.h>
34 #include <scsi/scsi_proto.h>
35 #include <scsi/scsi_tcq.h>
36 #include <target/target_core_base.h>
37 #include <target/target_core_backend.h>
38 #include <target/target_core_fabric.h>
39 #include <asm/unaligned.h>
40
41 #include "sbp_target.h"
42
43 /* FireWire address region for management and command block address handlers */
44 static const struct fw_address_region sbp_register_region = {
45 .start = CSR_REGISTER_BASE + 0x10000,
46 .end = 0x1000000000000ULL,
47 };
48
49 static const u32 sbp_unit_directory_template[] = {
50 0x1200609e, /* unit_specifier_id: NCITS/T10 */
51 0x13010483, /* unit_sw_version: 1155D Rev 4 */
52 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
53 0x390104d8, /* command_set: SPC-2 */
54 0x3b000000, /* command_set_revision: 0 */
55 0x3c000001, /* firmware_revision: 1 */
56 };
57
58 #define SESSION_MAINTENANCE_INTERVAL HZ
59
60 static atomic_t login_id = ATOMIC_INIT(0);
61
62 static void session_maintenance_work(struct work_struct *);
63 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
64 unsigned long long, void *, size_t);
65
66 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
67 {
68 int ret;
69 __be32 high, low;
70
71 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
72 req->node_addr, req->generation, req->speed,
73 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
74 &high, sizeof(high));
75 if (ret != RCODE_COMPLETE)
76 return ret;
77
78 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
79 req->node_addr, req->generation, req->speed,
80 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
81 &low, sizeof(low));
82 if (ret != RCODE_COMPLETE)
83 return ret;
84
85 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
86
87 return RCODE_COMPLETE;
88 }
89
90 static struct sbp_session *sbp_session_find_by_guid(
91 struct sbp_tpg *tpg, u64 guid)
92 {
93 struct se_session *se_sess;
94 struct sbp_session *sess, *found = NULL;
95
96 spin_lock_bh(&tpg->se_tpg.session_lock);
97 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
98 sess = se_sess->fabric_sess_ptr;
99 if (sess->guid == guid)
100 found = sess;
101 }
102 spin_unlock_bh(&tpg->se_tpg.session_lock);
103
104 return found;
105 }
106
107 static struct sbp_login_descriptor *sbp_login_find_by_lun(
108 struct sbp_session *session, u32 unpacked_lun)
109 {
110 struct sbp_login_descriptor *login, *found = NULL;
111
112 spin_lock_bh(&session->lock);
113 list_for_each_entry(login, &session->login_list, link) {
114 if (login->login_lun == unpacked_lun)
115 found = login;
116 }
117 spin_unlock_bh(&session->lock);
118
119 return found;
120 }
121
122 static int sbp_login_count_all_by_lun(
123 struct sbp_tpg *tpg,
124 u32 unpacked_lun,
125 int exclusive)
126 {
127 struct se_session *se_sess;
128 struct sbp_session *sess;
129 struct sbp_login_descriptor *login;
130 int count = 0;
131
132 spin_lock_bh(&tpg->se_tpg.session_lock);
133 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
134 sess = se_sess->fabric_sess_ptr;
135
136 spin_lock_bh(&sess->lock);
137 list_for_each_entry(login, &sess->login_list, link) {
138 if (login->login_lun != unpacked_lun)
139 continue;
140
141 if (!exclusive || login->exclusive)
142 count++;
143 }
144 spin_unlock_bh(&sess->lock);
145 }
146 spin_unlock_bh(&tpg->se_tpg.session_lock);
147
148 return count;
149 }
150
151 static struct sbp_login_descriptor *sbp_login_find_by_id(
152 struct sbp_tpg *tpg, int login_id)
153 {
154 struct se_session *se_sess;
155 struct sbp_session *sess;
156 struct sbp_login_descriptor *login, *found = NULL;
157
158 spin_lock_bh(&tpg->se_tpg.session_lock);
159 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
160 sess = se_sess->fabric_sess_ptr;
161
162 spin_lock_bh(&sess->lock);
163 list_for_each_entry(login, &sess->login_list, link) {
164 if (login->login_id == login_id)
165 found = login;
166 }
167 spin_unlock_bh(&sess->lock);
168 }
169 spin_unlock_bh(&tpg->se_tpg.session_lock);
170
171 return found;
172 }
173
174 static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
175 {
176 struct se_portal_group *se_tpg = &tpg->se_tpg;
177 struct se_lun *se_lun;
178
179 rcu_read_lock();
180 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
181 if (se_lun->unpacked_lun == login_lun) {
182 rcu_read_unlock();
183 *err = 0;
184 return login_lun;
185 }
186 }
187 rcu_read_unlock();
188
189 *err = -ENODEV;
190 return login_lun;
191 }
192
193 static struct sbp_session *sbp_session_create(
194 struct sbp_tpg *tpg,
195 u64 guid)
196 {
197 struct sbp_session *sess;
198 int ret;
199 char guid_str[17];
200
201 snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
202
203 sess = kmalloc(sizeof(*sess), GFP_KERNEL);
204 if (!sess)
205 return ERR_PTR(-ENOMEM);
206
207 spin_lock_init(&sess->lock);
208 INIT_LIST_HEAD(&sess->login_list);
209 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
210 sess->guid = guid;
211
212 sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
213 sizeof(struct sbp_target_request),
214 TARGET_PROT_NORMAL, guid_str,
215 sess, NULL);
216 if (IS_ERR(sess->se_sess)) {
217 pr_err("failed to init se_session\n");
218 ret = PTR_ERR(sess->se_sess);
219 kfree(sess);
220 return ERR_PTR(ret);
221 }
222
223 return sess;
224 }
225
226 static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
227 {
228 spin_lock_bh(&sess->lock);
229 if (!list_empty(&sess->login_list)) {
230 spin_unlock_bh(&sess->lock);
231 return;
232 }
233 spin_unlock_bh(&sess->lock);
234
235 if (cancel_work)
236 cancel_delayed_work_sync(&sess->maint_work);
237
238 target_remove_session(sess->se_sess);
239
240 if (sess->card)
241 fw_card_put(sess->card);
242
243 kfree(sess);
244 }
245
246 static void sbp_target_agent_unregister(struct sbp_target_agent *);
247
248 static void sbp_login_release(struct sbp_login_descriptor *login,
249 bool cancel_work)
250 {
251 struct sbp_session *sess = login->sess;
252
253 /* FIXME: abort/wait on tasks */
254
255 sbp_target_agent_unregister(login->tgt_agt);
256
257 if (sess) {
258 spin_lock_bh(&sess->lock);
259 list_del(&login->link);
260 spin_unlock_bh(&sess->lock);
261
262 sbp_session_release(sess, cancel_work);
263 }
264
265 kfree(login);
266 }
267
268 static struct sbp_target_agent *sbp_target_agent_register(
269 struct sbp_login_descriptor *);
270
271 static void sbp_management_request_login(
272 struct sbp_management_agent *agent, struct sbp_management_request *req,
273 int *status_data_size)
274 {
275 struct sbp_tport *tport = agent->tport;
276 struct sbp_tpg *tpg = tport->tpg;
277 struct sbp_session *sess;
278 struct sbp_login_descriptor *login;
279 struct sbp_login_response_block *response;
280 u64 guid;
281 u32 unpacked_lun;
282 int login_response_len, ret;
283
284 unpacked_lun = sbp_get_lun_from_tpg(tpg,
285 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
286 if (ret) {
287 pr_notice("login to unknown LUN: %d\n",
288 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
289
290 req->status.status = cpu_to_be32(
291 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
292 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
293 return;
294 }
295
296 ret = read_peer_guid(&guid, req);
297 if (ret != RCODE_COMPLETE) {
298 pr_warn("failed to read peer GUID: %d\n", ret);
299
300 req->status.status = cpu_to_be32(
301 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
302 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
303 return;
304 }
305
306 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
307 unpacked_lun, guid);
308
309 sess = sbp_session_find_by_guid(tpg, guid);
310 if (sess) {
311 login = sbp_login_find_by_lun(sess, unpacked_lun);
312 if (login) {
313 pr_notice("initiator already logged-in\n");
314
315 /*
316 * SBP-2 R4 says we should return access denied, but
317 * that can confuse initiators. Instead we need to
318 * treat this like a reconnect, but send the login
319 * response block like a fresh login.
320 *
321 * This is required particularly in the case of Apple
322 * devices booting off the FireWire target, where
323 * the firmware has an active login to the target. When
324 * the OS takes control of the session it issues its own
325 * LOGIN rather than a RECONNECT. To avoid the machine
326 * waiting until the reconnect_hold expires, we can skip
327 * the ACCESS_DENIED errors to speed things up.
328 */
329
330 goto already_logged_in;
331 }
332 }
333
334 /*
335 * check exclusive bit in login request
336 * reject with access_denied if any logins present
337 */
338 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
339 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
340 pr_warn("refusing exclusive login with other active logins\n");
341
342 req->status.status = cpu_to_be32(
343 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
344 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
345 return;
346 }
347
348 /*
349 * check exclusive bit in any existing login descriptor
350 * reject with access_denied if any exclusive logins present
351 */
352 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
353 pr_warn("refusing login while another exclusive login present\n");
354
355 req->status.status = cpu_to_be32(
356 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
357 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
358 return;
359 }
360
361 /*
362 * check we haven't exceeded the number of allowed logins
363 * reject with resources_unavailable if we have
364 */
365 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
366 tport->max_logins_per_lun) {
367 pr_warn("max number of logins reached\n");
368
369 req->status.status = cpu_to_be32(
370 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
371 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
372 return;
373 }
374
375 if (!sess) {
376 sess = sbp_session_create(tpg, guid);
377 if (IS_ERR(sess)) {
378 switch (PTR_ERR(sess)) {
379 case -EPERM:
380 ret = SBP_STATUS_ACCESS_DENIED;
381 break;
382 default:
383 ret = SBP_STATUS_RESOURCES_UNAVAIL;
384 break;
385 }
386
387 req->status.status = cpu_to_be32(
388 STATUS_BLOCK_RESP(
389 STATUS_RESP_REQUEST_COMPLETE) |
390 STATUS_BLOCK_SBP_STATUS(ret));
391 return;
392 }
393
394 sess->node_id = req->node_addr;
395 sess->card = fw_card_get(req->card);
396 sess->generation = req->generation;
397 sess->speed = req->speed;
398
399 schedule_delayed_work(&sess->maint_work,
400 SESSION_MAINTENANCE_INTERVAL);
401 }
402
403 /* only take the latest reconnect_hold into account */
404 sess->reconnect_hold = min(
405 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
406 tport->max_reconnect_timeout) - 1;
407
408 login = kmalloc(sizeof(*login), GFP_KERNEL);
409 if (!login) {
410 pr_err("failed to allocate login descriptor\n");
411
412 sbp_session_release(sess, true);
413
414 req->status.status = cpu_to_be32(
415 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
416 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
417 return;
418 }
419
420 login->sess = sess;
421 login->login_lun = unpacked_lun;
422 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
423 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
424 login->login_id = atomic_inc_return(&login_id);
425
426 login->tgt_agt = sbp_target_agent_register(login);
427 if (IS_ERR(login->tgt_agt)) {
428 ret = PTR_ERR(login->tgt_agt);
429 pr_err("failed to map command block handler: %d\n", ret);
430
431 sbp_session_release(sess, true);
432 kfree(login);
433
434 req->status.status = cpu_to_be32(
435 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
436 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
437 return;
438 }
439
440 spin_lock_bh(&sess->lock);
441 list_add_tail(&login->link, &sess->login_list);
442 spin_unlock_bh(&sess->lock);
443
444 already_logged_in:
445 response = kzalloc(sizeof(*response), GFP_KERNEL);
446 if (!response) {
447 pr_err("failed to allocate login response block\n");
448
449 sbp_login_release(login, true);
450
451 req->status.status = cpu_to_be32(
452 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
453 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
454 return;
455 }
456
457 login_response_len = clamp_val(
458 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
459 12, sizeof(*response));
460 response->misc = cpu_to_be32(
461 ((login_response_len & 0xffff) << 16) |
462 (login->login_id & 0xffff));
463 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
464 addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
465 &response->command_block_agent);
466
467 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
468 sess->node_id, sess->generation, sess->speed,
469 sbp2_pointer_to_addr(&req->orb.ptr2), response,
470 login_response_len);
471 if (ret != RCODE_COMPLETE) {
472 pr_debug("failed to write login response block: %x\n", ret);
473
474 kfree(response);
475 sbp_login_release(login, true);
476
477 req->status.status = cpu_to_be32(
478 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
479 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
480 return;
481 }
482
483 kfree(response);
484
485 req->status.status = cpu_to_be32(
486 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
487 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
488 }
489
490 static void sbp_management_request_query_logins(
491 struct sbp_management_agent *agent, struct sbp_management_request *req,
492 int *status_data_size)
493 {
494 pr_notice("QUERY LOGINS not implemented\n");
495 /* FIXME: implement */
496
497 req->status.status = cpu_to_be32(
498 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
499 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
500 }
501
502 static void sbp_management_request_reconnect(
503 struct sbp_management_agent *agent, struct sbp_management_request *req,
504 int *status_data_size)
505 {
506 struct sbp_tport *tport = agent->tport;
507 struct sbp_tpg *tpg = tport->tpg;
508 int ret;
509 u64 guid;
510 struct sbp_login_descriptor *login;
511
512 ret = read_peer_guid(&guid, req);
513 if (ret != RCODE_COMPLETE) {
514 pr_warn("failed to read peer GUID: %d\n", ret);
515
516 req->status.status = cpu_to_be32(
517 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
518 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
519 return;
520 }
521
522 pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
523
524 login = sbp_login_find_by_id(tpg,
525 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
526
527 if (!login) {
528 pr_err("mgt_agent RECONNECT unknown login ID\n");
529
530 req->status.status = cpu_to_be32(
531 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
532 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
533 return;
534 }
535
536 if (login->sess->guid != guid) {
537 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
538
539 req->status.status = cpu_to_be32(
540 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
541 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
542 return;
543 }
544
545 spin_lock_bh(&login->sess->lock);
546 if (login->sess->card)
547 fw_card_put(login->sess->card);
548
549 /* update the node details */
550 login->sess->generation = req->generation;
551 login->sess->node_id = req->node_addr;
552 login->sess->card = fw_card_get(req->card);
553 login->sess->speed = req->speed;
554 spin_unlock_bh(&login->sess->lock);
555
556 req->status.status = cpu_to_be32(
557 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
558 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
559 }
560
561 static void sbp_management_request_logout(
562 struct sbp_management_agent *agent, struct sbp_management_request *req,
563 int *status_data_size)
564 {
565 struct sbp_tport *tport = agent->tport;
566 struct sbp_tpg *tpg = tport->tpg;
567 int id;
568 struct sbp_login_descriptor *login;
569
570 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
571
572 login = sbp_login_find_by_id(tpg, id);
573 if (!login) {
574 pr_warn("cannot find login: %d\n", id);
575
576 req->status.status = cpu_to_be32(
577 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
578 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
579 return;
580 }
581
582 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
583 login->login_lun, login->login_id);
584
585 if (req->node_addr != login->sess->node_id) {
586 pr_warn("logout from different node ID\n");
587
588 req->status.status = cpu_to_be32(
589 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
590 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
591 return;
592 }
593
594 sbp_login_release(login, true);
595
596 req->status.status = cpu_to_be32(
597 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
598 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
599 }
600
601 static void session_check_for_reset(struct sbp_session *sess)
602 {
603 bool card_valid = false;
604
605 spin_lock_bh(&sess->lock);
606
607 if (sess->card) {
608 spin_lock_irq(&sess->card->lock);
609 card_valid = (sess->card->local_node != NULL);
610 spin_unlock_irq(&sess->card->lock);
611
612 if (!card_valid) {
613 fw_card_put(sess->card);
614 sess->card = NULL;
615 }
616 }
617
618 if (!card_valid || (sess->generation != sess->card->generation)) {
619 pr_info("Waiting for reconnect from node: %016llx\n",
620 sess->guid);
621
622 sess->node_id = -1;
623 sess->reconnect_expires = get_jiffies_64() +
624 ((sess->reconnect_hold + 1) * HZ);
625 }
626
627 spin_unlock_bh(&sess->lock);
628 }
629
630 static void session_reconnect_expired(struct sbp_session *sess)
631 {
632 struct sbp_login_descriptor *login, *temp;
633 LIST_HEAD(login_list);
634
635 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
636
637 spin_lock_bh(&sess->lock);
638 list_for_each_entry_safe(login, temp, &sess->login_list, link) {
639 login->sess = NULL;
640 list_move_tail(&login->link, &login_list);
641 }
642 spin_unlock_bh(&sess->lock);
643
644 list_for_each_entry_safe(login, temp, &login_list, link) {
645 list_del(&login->link);
646 sbp_login_release(login, false);
647 }
648
649 sbp_session_release(sess, false);
650 }
651
652 static void session_maintenance_work(struct work_struct *work)
653 {
654 struct sbp_session *sess = container_of(work, struct sbp_session,
655 maint_work.work);
656
657 /* could be called while tearing down the session */
658 spin_lock_bh(&sess->lock);
659 if (list_empty(&sess->login_list)) {
660 spin_unlock_bh(&sess->lock);
661 return;
662 }
663 spin_unlock_bh(&sess->lock);
664
665 if (sess->node_id != -1) {
666 /* check for bus reset and make node_id invalid */
667 session_check_for_reset(sess);
668
669 schedule_delayed_work(&sess->maint_work,
670 SESSION_MAINTENANCE_INTERVAL);
671 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
672 /* still waiting for reconnect */
673 schedule_delayed_work(&sess->maint_work,
674 SESSION_MAINTENANCE_INTERVAL);
675 } else {
676 /* reconnect timeout has expired */
677 session_reconnect_expired(sess);
678 }
679 }
680
681 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
682 struct sbp_target_agent *agent)
683 {
684 int state;
685
686 switch (tcode) {
687 case TCODE_READ_QUADLET_REQUEST:
688 pr_debug("tgt_agent AGENT_STATE READ\n");
689
690 spin_lock_bh(&agent->lock);
691 state = agent->state;
692 spin_unlock_bh(&agent->lock);
693
694 *(__be32 *)data = cpu_to_be32(state);
695
696 return RCODE_COMPLETE;
697
698 case TCODE_WRITE_QUADLET_REQUEST:
699 /* ignored */
700 return RCODE_COMPLETE;
701
702 default:
703 return RCODE_TYPE_ERROR;
704 }
705 }
706
707 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
708 struct sbp_target_agent *agent)
709 {
710 switch (tcode) {
711 case TCODE_WRITE_QUADLET_REQUEST:
712 pr_debug("tgt_agent AGENT_RESET\n");
713 spin_lock_bh(&agent->lock);
714 agent->state = AGENT_STATE_RESET;
715 spin_unlock_bh(&agent->lock);
716 return RCODE_COMPLETE;
717
718 default:
719 return RCODE_TYPE_ERROR;
720 }
721 }
722
723 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
724 struct sbp_target_agent *agent)
725 {
726 struct sbp2_pointer *ptr = data;
727
728 switch (tcode) {
729 case TCODE_WRITE_BLOCK_REQUEST:
730 spin_lock_bh(&agent->lock);
731 if (agent->state != AGENT_STATE_SUSPENDED &&
732 agent->state != AGENT_STATE_RESET) {
733 spin_unlock_bh(&agent->lock);
734 pr_notice("Ignoring ORB_POINTER write while active.\n");
735 return RCODE_CONFLICT_ERROR;
736 }
737 agent->state = AGENT_STATE_ACTIVE;
738 spin_unlock_bh(&agent->lock);
739
740 agent->orb_pointer = sbp2_pointer_to_addr(ptr);
741 agent->doorbell = false;
742
743 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
744 agent->orb_pointer);
745
746 queue_work(system_unbound_wq, &agent->work);
747
748 return RCODE_COMPLETE;
749
750 case TCODE_READ_BLOCK_REQUEST:
751 pr_debug("tgt_agent ORB_POINTER READ\n");
752 spin_lock_bh(&agent->lock);
753 addr_to_sbp2_pointer(agent->orb_pointer, ptr);
754 spin_unlock_bh(&agent->lock);
755 return RCODE_COMPLETE;
756
757 default:
758 return RCODE_TYPE_ERROR;
759 }
760 }
761
762 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
763 struct sbp_target_agent *agent)
764 {
765 switch (tcode) {
766 case TCODE_WRITE_QUADLET_REQUEST:
767 spin_lock_bh(&agent->lock);
768 if (agent->state != AGENT_STATE_SUSPENDED) {
769 spin_unlock_bh(&agent->lock);
770 pr_debug("Ignoring DOORBELL while active.\n");
771 return RCODE_CONFLICT_ERROR;
772 }
773 agent->state = AGENT_STATE_ACTIVE;
774 spin_unlock_bh(&agent->lock);
775
776 agent->doorbell = true;
777
778 pr_debug("tgt_agent DOORBELL\n");
779
780 queue_work(system_unbound_wq, &agent->work);
781
782 return RCODE_COMPLETE;
783
784 case TCODE_READ_QUADLET_REQUEST:
785 return RCODE_COMPLETE;
786
787 default:
788 return RCODE_TYPE_ERROR;
789 }
790 }
791
792 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
793 int tcode, void *data, struct sbp_target_agent *agent)
794 {
795 switch (tcode) {
796 case TCODE_WRITE_QUADLET_REQUEST:
797 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
798 /* ignored as we don't send unsolicited status */
799 return RCODE_COMPLETE;
800
801 case TCODE_READ_QUADLET_REQUEST:
802 return RCODE_COMPLETE;
803
804 default:
805 return RCODE_TYPE_ERROR;
806 }
807 }
808
809 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
810 int tcode, int destination, int source, int generation,
811 unsigned long long offset, void *data, size_t length,
812 void *callback_data)
813 {
814 struct sbp_target_agent *agent = callback_data;
815 struct sbp_session *sess = agent->login->sess;
816 int sess_gen, sess_node, rcode;
817
818 spin_lock_bh(&sess->lock);
819 sess_gen = sess->generation;
820 sess_node = sess->node_id;
821 spin_unlock_bh(&sess->lock);
822
823 if (generation != sess_gen) {
824 pr_notice("ignoring request with wrong generation\n");
825 rcode = RCODE_TYPE_ERROR;
826 goto out;
827 }
828
829 if (source != sess_node) {
830 pr_notice("ignoring request from foreign node (%x != %x)\n",
831 source, sess_node);
832 rcode = RCODE_TYPE_ERROR;
833 goto out;
834 }
835
836 /* turn offset into the offset from the start of the block */
837 offset -= agent->handler.offset;
838
839 if (offset == 0x00 && length == 4) {
840 /* AGENT_STATE */
841 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
842 } else if (offset == 0x04 && length == 4) {
843 /* AGENT_RESET */
844 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
845 } else if (offset == 0x08 && length == 8) {
846 /* ORB_POINTER */
847 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
848 } else if (offset == 0x10 && length == 4) {
849 /* DOORBELL */
850 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
851 } else if (offset == 0x14 && length == 4) {
852 /* UNSOLICITED_STATUS_ENABLE */
853 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
854 data, agent);
855 } else {
856 rcode = RCODE_ADDRESS_ERROR;
857 }
858
859 out:
860 fw_send_response(card, request, rcode);
861 }
862
863 static void sbp_handle_command(struct sbp_target_request *);
864 static int sbp_send_status(struct sbp_target_request *);
865 static void sbp_free_request(struct sbp_target_request *);
866
867 static void tgt_agent_process_work(struct work_struct *work)
868 {
869 struct sbp_target_request *req =
870 container_of(work, struct sbp_target_request, work);
871
872 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
873 req->orb_pointer,
874 sbp2_pointer_to_addr(&req->orb.next_orb),
875 sbp2_pointer_to_addr(&req->orb.data_descriptor),
876 be32_to_cpu(req->orb.misc));
877
878 if (req->orb_pointer >> 32)
879 pr_debug("ORB with high bits set\n");
880
881 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
882 case 0:/* Format specified by this standard */
883 sbp_handle_command(req);
884 return;
885 case 1: /* Reserved for future standardization */
886 case 2: /* Vendor-dependent */
887 req->status.status |= cpu_to_be32(
888 STATUS_BLOCK_RESP(
889 STATUS_RESP_REQUEST_COMPLETE) |
890 STATUS_BLOCK_DEAD(0) |
891 STATUS_BLOCK_LEN(1) |
892 STATUS_BLOCK_SBP_STATUS(
893 SBP_STATUS_REQ_TYPE_NOTSUPP));
894 sbp_send_status(req);
895 return;
896 case 3: /* Dummy ORB */
897 req->status.status |= cpu_to_be32(
898 STATUS_BLOCK_RESP(
899 STATUS_RESP_REQUEST_COMPLETE) |
900 STATUS_BLOCK_DEAD(0) |
901 STATUS_BLOCK_LEN(1) |
902 STATUS_BLOCK_SBP_STATUS(
903 SBP_STATUS_DUMMY_ORB_COMPLETE));
904 sbp_send_status(req);
905 return;
906 default:
907 BUG();
908 }
909 }
910
911 /* used to double-check we haven't been issued an AGENT_RESET */
912 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
913 {
914 bool active;
915
916 spin_lock_bh(&agent->lock);
917 active = (agent->state == AGENT_STATE_ACTIVE);
918 spin_unlock_bh(&agent->lock);
919
920 return active;
921 }
922
923 static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
924 struct fw_card *card, u64 next_orb)
925 {
926 struct se_session *se_sess = sess->se_sess;
927 struct sbp_target_request *req;
928 int tag, cpu;
929
930 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
931 if (tag < 0)
932 return ERR_PTR(-ENOMEM);
933
934 req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
935 memset(req, 0, sizeof(*req));
936 req->se_cmd.map_tag = tag;
937 req->se_cmd.map_cpu = cpu;
938 req->se_cmd.tag = next_orb;
939
940 return req;
941 }
942
943 static void tgt_agent_fetch_work(struct work_struct *work)
944 {
945 struct sbp_target_agent *agent =
946 container_of(work, struct sbp_target_agent, work);
947 struct sbp_session *sess = agent->login->sess;
948 struct sbp_target_request *req;
949 int ret;
950 bool doorbell = agent->doorbell;
951 u64 next_orb = agent->orb_pointer;
952
953 while (next_orb && tgt_agent_check_active(agent)) {
954 req = sbp_mgt_get_req(sess, sess->card, next_orb);
955 if (IS_ERR(req)) {
956 spin_lock_bh(&agent->lock);
957 agent->state = AGENT_STATE_DEAD;
958 spin_unlock_bh(&agent->lock);
959 return;
960 }
961
962 req->login = agent->login;
963 req->orb_pointer = next_orb;
964
965 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
966 req->orb_pointer >> 32));
967 req->status.orb_low = cpu_to_be32(
968 req->orb_pointer & 0xfffffffc);
969
970 /* read in the ORB */
971 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
972 sess->node_id, sess->generation, sess->speed,
973 req->orb_pointer, &req->orb, sizeof(req->orb));
974 if (ret != RCODE_COMPLETE) {
975 pr_debug("tgt_orb fetch failed: %x\n", ret);
976 req->status.status |= cpu_to_be32(
977 STATUS_BLOCK_SRC(
978 STATUS_SRC_ORB_FINISHED) |
979 STATUS_BLOCK_RESP(
980 STATUS_RESP_TRANSPORT_FAILURE) |
981 STATUS_BLOCK_DEAD(1) |
982 STATUS_BLOCK_LEN(1) |
983 STATUS_BLOCK_SBP_STATUS(
984 SBP_STATUS_UNSPECIFIED_ERROR));
985 spin_lock_bh(&agent->lock);
986 agent->state = AGENT_STATE_DEAD;
987 spin_unlock_bh(&agent->lock);
988
989 sbp_send_status(req);
990 return;
991 }
992
993 /* check the next_ORB field */
994 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
995 next_orb = 0;
996 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
997 STATUS_SRC_ORB_FINISHED));
998 } else {
999 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1000 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1001 STATUS_SRC_ORB_CONTINUING));
1002 }
1003
1004 if (tgt_agent_check_active(agent) && !doorbell) {
1005 INIT_WORK(&req->work, tgt_agent_process_work);
1006 queue_work(system_unbound_wq, &req->work);
1007 } else {
1008 /* don't process this request, just check next_ORB */
1009 sbp_free_request(req);
1010 }
1011
1012 spin_lock_bh(&agent->lock);
1013 doorbell = agent->doorbell = false;
1014
1015 /* check if we should carry on processing */
1016 if (next_orb)
1017 agent->orb_pointer = next_orb;
1018 else
1019 agent->state = AGENT_STATE_SUSPENDED;
1020
1021 spin_unlock_bh(&agent->lock);
1022 };
1023 }
1024
1025 static struct sbp_target_agent *sbp_target_agent_register(
1026 struct sbp_login_descriptor *login)
1027 {
1028 struct sbp_target_agent *agent;
1029 int ret;
1030
1031 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1032 if (!agent)
1033 return ERR_PTR(-ENOMEM);
1034
1035 spin_lock_init(&agent->lock);
1036
1037 agent->handler.length = 0x20;
1038 agent->handler.address_callback = tgt_agent_rw;
1039 agent->handler.callback_data = agent;
1040
1041 agent->login = login;
1042 agent->state = AGENT_STATE_RESET;
1043 INIT_WORK(&agent->work, tgt_agent_fetch_work);
1044 agent->orb_pointer = 0;
1045 agent->doorbell = false;
1046
1047 ret = fw_core_add_address_handler(&agent->handler,
1048 &sbp_register_region);
1049 if (ret < 0) {
1050 kfree(agent);
1051 return ERR_PTR(ret);
1052 }
1053
1054 return agent;
1055 }
1056
1057 static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1058 {
1059 fw_core_remove_address_handler(&agent->handler);
1060 cancel_work_sync(&agent->work);
1061 kfree(agent);
1062 }
1063
1064 /*
1065 * Simple wrapper around fw_run_transaction that retries the transaction several
1066 * times in case of failure, with an exponential backoff.
1067 */
1068 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1069 int generation, int speed, unsigned long long offset,
1070 void *payload, size_t length)
1071 {
1072 int attempt, ret, delay;
1073
1074 for (attempt = 1; attempt <= 5; attempt++) {
1075 ret = fw_run_transaction(card, tcode, destination_id,
1076 generation, speed, offset, payload, length);
1077
1078 switch (ret) {
1079 case RCODE_COMPLETE:
1080 case RCODE_TYPE_ERROR:
1081 case RCODE_ADDRESS_ERROR:
1082 case RCODE_GENERATION:
1083 return ret;
1084
1085 default:
1086 delay = 5 * attempt * attempt;
1087 usleep_range(delay, delay * 2);
1088 }
1089 }
1090
1091 return ret;
1092 }
1093
1094 /*
1095 * Wrapper around sbp_run_transaction that gets the card, destination,
1096 * generation and speed out of the request's session.
1097 */
1098 static int sbp_run_request_transaction(struct sbp_target_request *req,
1099 int tcode, unsigned long long offset, void *payload,
1100 size_t length)
1101 {
1102 struct sbp_login_descriptor *login = req->login;
1103 struct sbp_session *sess = login->sess;
1104 struct fw_card *card;
1105 int node_id, generation, speed, ret;
1106
1107 spin_lock_bh(&sess->lock);
1108 card = fw_card_get(sess->card);
1109 node_id = sess->node_id;
1110 generation = sess->generation;
1111 speed = sess->speed;
1112 spin_unlock_bh(&sess->lock);
1113
1114 ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1115 offset, payload, length);
1116
1117 fw_card_put(card);
1118
1119 return ret;
1120 }
1121
1122 static int sbp_fetch_command(struct sbp_target_request *req)
1123 {
1124 int ret, cmd_len, copy_len;
1125
1126 cmd_len = scsi_command_size(req->orb.command_block);
1127
1128 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1129 if (!req->cmd_buf)
1130 return -ENOMEM;
1131
1132 memcpy(req->cmd_buf, req->orb.command_block,
1133 min_t(int, cmd_len, sizeof(req->orb.command_block)));
1134
1135 if (cmd_len > sizeof(req->orb.command_block)) {
1136 pr_debug("sbp_fetch_command: filling in long command\n");
1137 copy_len = cmd_len - sizeof(req->orb.command_block);
1138
1139 ret = sbp_run_request_transaction(req,
1140 TCODE_READ_BLOCK_REQUEST,
1141 req->orb_pointer + sizeof(req->orb),
1142 req->cmd_buf + sizeof(req->orb.command_block),
1143 copy_len);
1144 if (ret != RCODE_COMPLETE)
1145 return -EIO;
1146 }
1147
1148 return 0;
1149 }
1150
1151 static int sbp_fetch_page_table(struct sbp_target_request *req)
1152 {
1153 int pg_tbl_sz, ret;
1154 struct sbp_page_table_entry *pg_tbl;
1155
1156 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1157 return 0;
1158
1159 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1160 sizeof(struct sbp_page_table_entry);
1161
1162 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1163 if (!pg_tbl)
1164 return -ENOMEM;
1165
1166 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1167 sbp2_pointer_to_addr(&req->orb.data_descriptor),
1168 pg_tbl, pg_tbl_sz);
1169 if (ret != RCODE_COMPLETE) {
1170 kfree(pg_tbl);
1171 return -EIO;
1172 }
1173
1174 req->pg_tbl = pg_tbl;
1175 return 0;
1176 }
1177
1178 static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1179 u32 *data_len, enum dma_data_direction *data_dir)
1180 {
1181 int data_size, direction, idx;
1182
1183 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1184 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1185
1186 if (!data_size) {
1187 *data_len = 0;
1188 *data_dir = DMA_NONE;
1189 return;
1190 }
1191
1192 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1193
1194 if (req->pg_tbl) {
1195 *data_len = 0;
1196 for (idx = 0; idx < data_size; idx++) {
1197 *data_len += be16_to_cpu(
1198 req->pg_tbl[idx].segment_length);
1199 }
1200 } else {
1201 *data_len = data_size;
1202 }
1203 }
1204
1205 static void sbp_handle_command(struct sbp_target_request *req)
1206 {
1207 struct sbp_login_descriptor *login = req->login;
1208 struct sbp_session *sess = login->sess;
1209 int ret, unpacked_lun;
1210 u32 data_length;
1211 enum dma_data_direction data_dir;
1212
1213 ret = sbp_fetch_command(req);
1214 if (ret) {
1215 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1216 goto err;
1217 }
1218
1219 ret = sbp_fetch_page_table(req);
1220 if (ret) {
1221 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1222 ret);
1223 goto err;
1224 }
1225
1226 unpacked_lun = req->login->login_lun;
1227 sbp_calc_data_length_direction(req, &data_length, &data_dir);
1228
1229 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1230 req->orb_pointer, unpacked_lun, data_length, data_dir);
1231
1232 /* only used for printk until we do TMRs */
1233 req->se_cmd.tag = req->orb_pointer;
1234 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1235 req->sense_buf, unpacked_lun, data_length,
1236 TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
1237 goto err;
1238
1239 return;
1240
1241 err:
1242 req->status.status |= cpu_to_be32(
1243 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1244 STATUS_BLOCK_DEAD(0) |
1245 STATUS_BLOCK_LEN(1) |
1246 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1247 sbp_send_status(req);
1248 }
1249
1250 /*
1251 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1252 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1253 */
1254 static int sbp_rw_data(struct sbp_target_request *req)
1255 {
1256 struct sbp_session *sess = req->login->sess;
1257 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1258 generation, num_pte, length, tfr_length,
1259 rcode = RCODE_COMPLETE;
1260 struct sbp_page_table_entry *pte;
1261 unsigned long long offset;
1262 struct fw_card *card;
1263 struct sg_mapping_iter iter;
1264
1265 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1266 tcode = TCODE_WRITE_BLOCK_REQUEST;
1267 sg_miter_flags = SG_MITER_FROM_SG;
1268 } else {
1269 tcode = TCODE_READ_BLOCK_REQUEST;
1270 sg_miter_flags = SG_MITER_TO_SG;
1271 }
1272
1273 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1274 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1275
1276 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1277 if (pg_size) {
1278 pr_err("sbp_run_transaction: page size ignored\n");
1279 pg_size = 0x100 << pg_size;
1280 }
1281
1282 spin_lock_bh(&sess->lock);
1283 card = fw_card_get(sess->card);
1284 node_id = sess->node_id;
1285 generation = sess->generation;
1286 spin_unlock_bh(&sess->lock);
1287
1288 if (req->pg_tbl) {
1289 pte = req->pg_tbl;
1290 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1291
1292 offset = 0;
1293 length = 0;
1294 } else {
1295 pte = NULL;
1296 num_pte = 0;
1297
1298 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1299 length = req->se_cmd.data_length;
1300 }
1301
1302 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1303 sg_miter_flags);
1304
1305 while (length || num_pte) {
1306 if (!length) {
1307 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1308 be32_to_cpu(pte->segment_base_lo);
1309 length = be16_to_cpu(pte->segment_length);
1310
1311 pte++;
1312 num_pte--;
1313 }
1314
1315 sg_miter_next(&iter);
1316
1317 tfr_length = min3(length, max_payload, (int)iter.length);
1318
1319 /* FIXME: take page_size into account */
1320
1321 rcode = sbp_run_transaction(card, tcode, node_id,
1322 generation, speed,
1323 offset, iter.addr, tfr_length);
1324
1325 if (rcode != RCODE_COMPLETE)
1326 break;
1327
1328 length -= tfr_length;
1329 offset += tfr_length;
1330 iter.consumed = tfr_length;
1331 }
1332
1333 sg_miter_stop(&iter);
1334 fw_card_put(card);
1335
1336 if (rcode == RCODE_COMPLETE) {
1337 WARN_ON(length != 0);
1338 return 0;
1339 } else {
1340 return -EIO;
1341 }
1342 }
1343
1344 static int sbp_send_status(struct sbp_target_request *req)
1345 {
1346 int rc, ret = 0, length;
1347 struct sbp_login_descriptor *login = req->login;
1348
1349 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1350
1351 rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1352 login->status_fifo_addr, &req->status, length);
1353 if (rc != RCODE_COMPLETE) {
1354 pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1355 ret = -EIO;
1356 goto put_ref;
1357 }
1358
1359 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1360 req->orb_pointer);
1361 /*
1362 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1363 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1364 * final se_cmd->cmd_kref put.
1365 */
1366 put_ref:
1367 target_put_sess_cmd(&req->se_cmd);
1368 return ret;
1369 }
1370
1371 static void sbp_sense_mangle(struct sbp_target_request *req)
1372 {
1373 struct se_cmd *se_cmd = &req->se_cmd;
1374 u8 *sense = req->sense_buf;
1375 u8 *status = req->status.data;
1376
1377 WARN_ON(se_cmd->scsi_sense_length < 18);
1378
1379 switch (sense[0] & 0x7f) { /* sfmt */
1380 case 0x70: /* current, fixed */
1381 status[0] = 0 << 6;
1382 break;
1383 case 0x71: /* deferred, fixed */
1384 status[0] = 1 << 6;
1385 break;
1386 case 0x72: /* current, descriptor */
1387 case 0x73: /* deferred, descriptor */
1388 default:
1389 /*
1390 * TODO: SBP-3 specifies what we should do with descriptor
1391 * format sense data
1392 */
1393 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1394 sense[0]);
1395 req->status.status |= cpu_to_be32(
1396 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1397 STATUS_BLOCK_DEAD(0) |
1398 STATUS_BLOCK_LEN(1) |
1399 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1400 return;
1401 }
1402
1403 status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1404 status[1] =
1405 (sense[0] & 0x80) | /* valid */
1406 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
1407 (sense[2] & 0x0f); /* sense_key */
1408 status[2] = se_cmd->scsi_asc; /* sense_code */
1409 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
1410
1411 /* information */
1412 status[4] = sense[3];
1413 status[5] = sense[4];
1414 status[6] = sense[5];
1415 status[7] = sense[6];
1416
1417 /* CDB-dependent */
1418 status[8] = sense[8];
1419 status[9] = sense[9];
1420 status[10] = sense[10];
1421 status[11] = sense[11];
1422
1423 /* fru */
1424 status[12] = sense[14];
1425
1426 /* sense_key-dependent */
1427 status[13] = sense[15];
1428 status[14] = sense[16];
1429 status[15] = sense[17];
1430
1431 req->status.status |= cpu_to_be32(
1432 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1433 STATUS_BLOCK_DEAD(0) |
1434 STATUS_BLOCK_LEN(5) |
1435 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1436 }
1437
1438 static int sbp_send_sense(struct sbp_target_request *req)
1439 {
1440 struct se_cmd *se_cmd = &req->se_cmd;
1441
1442 if (se_cmd->scsi_sense_length) {
1443 sbp_sense_mangle(req);
1444 } else {
1445 req->status.status |= cpu_to_be32(
1446 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1447 STATUS_BLOCK_DEAD(0) |
1448 STATUS_BLOCK_LEN(1) |
1449 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1450 }
1451
1452 return sbp_send_status(req);
1453 }
1454
1455 static void sbp_free_request(struct sbp_target_request *req)
1456 {
1457 struct se_cmd *se_cmd = &req->se_cmd;
1458 struct se_session *se_sess = se_cmd->se_sess;
1459
1460 kfree(req->pg_tbl);
1461 kfree(req->cmd_buf);
1462
1463 target_free_tag(se_sess, se_cmd);
1464 }
1465
1466 static void sbp_mgt_agent_process(struct work_struct *work)
1467 {
1468 struct sbp_management_agent *agent =
1469 container_of(work, struct sbp_management_agent, work);
1470 struct sbp_management_request *req = agent->request;
1471 int ret;
1472 int status_data_len = 0;
1473
1474 /* fetch the ORB from the initiator */
1475 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1476 req->node_addr, req->generation, req->speed,
1477 agent->orb_offset, &req->orb, sizeof(req->orb));
1478 if (ret != RCODE_COMPLETE) {
1479 pr_debug("mgt_orb fetch failed: %x\n", ret);
1480 goto out;
1481 }
1482
1483 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1484 sbp2_pointer_to_addr(&req->orb.ptr1),
1485 sbp2_pointer_to_addr(&req->orb.ptr2),
1486 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1487 sbp2_pointer_to_addr(&req->orb.status_fifo));
1488
1489 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1490 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1491 pr_err("mgt_orb bad request\n");
1492 goto out;
1493 }
1494
1495 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1496 case MANAGEMENT_ORB_FUNCTION_LOGIN:
1497 sbp_management_request_login(agent, req, &status_data_len);
1498 break;
1499
1500 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1501 sbp_management_request_query_logins(agent, req,
1502 &status_data_len);
1503 break;
1504
1505 case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1506 sbp_management_request_reconnect(agent, req, &status_data_len);
1507 break;
1508
1509 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1510 pr_notice("SET PASSWORD not implemented\n");
1511
1512 req->status.status = cpu_to_be32(
1513 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1514 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1515
1516 break;
1517
1518 case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1519 sbp_management_request_logout(agent, req, &status_data_len);
1520 break;
1521
1522 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1523 pr_notice("ABORT TASK not implemented\n");
1524
1525 req->status.status = cpu_to_be32(
1526 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1527 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1528
1529 break;
1530
1531 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1532 pr_notice("ABORT TASK SET not implemented\n");
1533
1534 req->status.status = cpu_to_be32(
1535 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1536 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1537
1538 break;
1539
1540 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1541 pr_notice("LOGICAL UNIT RESET not implemented\n");
1542
1543 req->status.status = cpu_to_be32(
1544 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1545 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1546
1547 break;
1548
1549 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1550 pr_notice("TARGET RESET not implemented\n");
1551
1552 req->status.status = cpu_to_be32(
1553 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1554 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1555
1556 break;
1557
1558 default:
1559 pr_notice("unknown management function 0x%x\n",
1560 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1561
1562 req->status.status = cpu_to_be32(
1563 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1564 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1565
1566 break;
1567 }
1568
1569 req->status.status |= cpu_to_be32(
1570 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1571 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1572 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1573 req->status.orb_low = cpu_to_be32(agent->orb_offset);
1574
1575 /* write the status block back to the initiator */
1576 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1577 req->node_addr, req->generation, req->speed,
1578 sbp2_pointer_to_addr(&req->orb.status_fifo),
1579 &req->status, 8 + status_data_len);
1580 if (ret != RCODE_COMPLETE) {
1581 pr_debug("mgt_orb status write failed: %x\n", ret);
1582 goto out;
1583 }
1584
1585 out:
1586 fw_card_put(req->card);
1587 kfree(req);
1588
1589 spin_lock_bh(&agent->lock);
1590 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1591 spin_unlock_bh(&agent->lock);
1592 }
1593
1594 static void sbp_mgt_agent_rw(struct fw_card *card,
1595 struct fw_request *request, int tcode, int destination, int source,
1596 int generation, unsigned long long offset, void *data, size_t length,
1597 void *callback_data)
1598 {
1599 struct sbp_management_agent *agent = callback_data;
1600 struct sbp2_pointer *ptr = data;
1601 int rcode = RCODE_ADDRESS_ERROR;
1602
1603 if (!agent->tport->enable)
1604 goto out;
1605
1606 if ((offset != agent->handler.offset) || (length != 8))
1607 goto out;
1608
1609 if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1610 struct sbp_management_request *req;
1611 int prev_state;
1612
1613 spin_lock_bh(&agent->lock);
1614 prev_state = agent->state;
1615 agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1616 spin_unlock_bh(&agent->lock);
1617
1618 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1619 pr_notice("ignoring management request while busy\n");
1620 rcode = RCODE_CONFLICT_ERROR;
1621 goto out;
1622 }
1623 req = kzalloc(sizeof(*req), GFP_ATOMIC);
1624 if (!req) {
1625 rcode = RCODE_CONFLICT_ERROR;
1626 goto out;
1627 }
1628
1629 req->card = fw_card_get(card);
1630 req->generation = generation;
1631 req->node_addr = source;
1632 req->speed = fw_get_request_speed(request);
1633
1634 agent->orb_offset = sbp2_pointer_to_addr(ptr);
1635 agent->request = req;
1636
1637 queue_work(system_unbound_wq, &agent->work);
1638 rcode = RCODE_COMPLETE;
1639 } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1640 addr_to_sbp2_pointer(agent->orb_offset, ptr);
1641 rcode = RCODE_COMPLETE;
1642 } else {
1643 rcode = RCODE_TYPE_ERROR;
1644 }
1645
1646 out:
1647 fw_send_response(card, request, rcode);
1648 }
1649
1650 static struct sbp_management_agent *sbp_management_agent_register(
1651 struct sbp_tport *tport)
1652 {
1653 int ret;
1654 struct sbp_management_agent *agent;
1655
1656 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1657 if (!agent)
1658 return ERR_PTR(-ENOMEM);
1659
1660 spin_lock_init(&agent->lock);
1661 agent->tport = tport;
1662 agent->handler.length = 0x08;
1663 agent->handler.address_callback = sbp_mgt_agent_rw;
1664 agent->handler.callback_data = agent;
1665 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1666 INIT_WORK(&agent->work, sbp_mgt_agent_process);
1667 agent->orb_offset = 0;
1668 agent->request = NULL;
1669
1670 ret = fw_core_add_address_handler(&agent->handler,
1671 &sbp_register_region);
1672 if (ret < 0) {
1673 kfree(agent);
1674 return ERR_PTR(ret);
1675 }
1676
1677 return agent;
1678 }
1679
1680 static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1681 {
1682 fw_core_remove_address_handler(&agent->handler);
1683 cancel_work_sync(&agent->work);
1684 kfree(agent);
1685 }
1686
1687 static int sbp_check_true(struct se_portal_group *se_tpg)
1688 {
1689 return 1;
1690 }
1691
1692 static int sbp_check_false(struct se_portal_group *se_tpg)
1693 {
1694 return 0;
1695 }
1696
1697 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1698 {
1699 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1700 struct sbp_tport *tport = tpg->tport;
1701
1702 return &tport->tport_name[0];
1703 }
1704
1705 static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1706 {
1707 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1708 return tpg->tport_tpgt;
1709 }
1710
1711 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1712 {
1713 return 1;
1714 }
1715
1716 static void sbp_release_cmd(struct se_cmd *se_cmd)
1717 {
1718 struct sbp_target_request *req = container_of(se_cmd,
1719 struct sbp_target_request, se_cmd);
1720
1721 sbp_free_request(req);
1722 }
1723
1724 static u32 sbp_sess_get_index(struct se_session *se_sess)
1725 {
1726 return 0;
1727 }
1728
1729 static int sbp_write_pending(struct se_cmd *se_cmd)
1730 {
1731 struct sbp_target_request *req = container_of(se_cmd,
1732 struct sbp_target_request, se_cmd);
1733 int ret;
1734
1735 ret = sbp_rw_data(req);
1736 if (ret) {
1737 req->status.status |= cpu_to_be32(
1738 STATUS_BLOCK_RESP(
1739 STATUS_RESP_TRANSPORT_FAILURE) |
1740 STATUS_BLOCK_DEAD(0) |
1741 STATUS_BLOCK_LEN(1) |
1742 STATUS_BLOCK_SBP_STATUS(
1743 SBP_STATUS_UNSPECIFIED_ERROR));
1744 sbp_send_status(req);
1745 return ret;
1746 }
1747
1748 target_execute_cmd(se_cmd);
1749 return 0;
1750 }
1751
1752 static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1753 {
1754 return;
1755 }
1756
1757 static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1758 {
1759 return 0;
1760 }
1761
1762 static int sbp_queue_data_in(struct se_cmd *se_cmd)
1763 {
1764 struct sbp_target_request *req = container_of(se_cmd,
1765 struct sbp_target_request, se_cmd);
1766 int ret;
1767
1768 ret = sbp_rw_data(req);
1769 if (ret) {
1770 req->status.status |= cpu_to_be32(
1771 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1772 STATUS_BLOCK_DEAD(0) |
1773 STATUS_BLOCK_LEN(1) |
1774 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1775 sbp_send_status(req);
1776 return ret;
1777 }
1778
1779 return sbp_send_sense(req);
1780 }
1781
1782 /*
1783 * Called after command (no data transfer) or after the write (to device)
1784 * operation is completed
1785 */
1786 static int sbp_queue_status(struct se_cmd *se_cmd)
1787 {
1788 struct sbp_target_request *req = container_of(se_cmd,
1789 struct sbp_target_request, se_cmd);
1790
1791 return sbp_send_sense(req);
1792 }
1793
1794 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1795 {
1796 }
1797
1798 static void sbp_aborted_task(struct se_cmd *se_cmd)
1799 {
1800 return;
1801 }
1802
1803 static int sbp_check_stop_free(struct se_cmd *se_cmd)
1804 {
1805 struct sbp_target_request *req = container_of(se_cmd,
1806 struct sbp_target_request, se_cmd);
1807
1808 return transport_generic_free_cmd(&req->se_cmd, 0);
1809 }
1810
1811 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1812 {
1813 struct se_lun *lun;
1814 int count = 0;
1815
1816 rcu_read_lock();
1817 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1818 count++;
1819 rcu_read_unlock();
1820
1821 return count;
1822 }
1823
1824 static int sbp_update_unit_directory(struct sbp_tport *tport)
1825 {
1826 struct se_lun *lun;
1827 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1828 u32 *data;
1829
1830 if (tport->unit_directory.data) {
1831 fw_core_remove_descriptor(&tport->unit_directory);
1832 kfree(tport->unit_directory.data);
1833 tport->unit_directory.data = NULL;
1834 }
1835
1836 if (!tport->enable || !tport->tpg)
1837 return 0;
1838
1839 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1840
1841 /*
1842 * Number of entries in the final unit directory:
1843 * - all of those in the template
1844 * - management_agent
1845 * - unit_characteristics
1846 * - reconnect_timeout
1847 * - unit unique ID
1848 * - one for each LUN
1849 *
1850 * MUST NOT include leaf or sub-directory entries
1851 */
1852 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1853
1854 if (tport->directory_id != -1)
1855 num_entries++;
1856
1857 /* allocate num_entries + 4 for the header and unique ID leaf */
1858 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1859 if (!data)
1860 return -ENOMEM;
1861
1862 /* directory_length */
1863 data[idx++] = num_entries << 16;
1864
1865 /* directory_id */
1866 if (tport->directory_id != -1)
1867 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1868
1869 /* unit directory template */
1870 memcpy(&data[idx], sbp_unit_directory_template,
1871 sizeof(sbp_unit_directory_template));
1872 idx += ARRAY_SIZE(sbp_unit_directory_template);
1873
1874 /* management_agent */
1875 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1876 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1877
1878 /* unit_characteristics */
1879 data[idx++] = 0x3a000000 |
1880 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1881 SBP_ORB_FETCH_SIZE;
1882
1883 /* reconnect_timeout */
1884 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1885
1886 /* unit unique ID (leaf is just after LUNs) */
1887 data[idx++] = 0x8d000000 | (num_luns + 1);
1888
1889 rcu_read_lock();
1890 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1891 struct se_device *dev;
1892 int type;
1893 /*
1894 * rcu_dereference_raw protected by se_lun->lun_group symlink
1895 * reference to se_device->dev_group.
1896 */
1897 dev = rcu_dereference_raw(lun->lun_se_dev);
1898 type = dev->transport->get_device_type(dev);
1899
1900 /* logical_unit_number */
1901 data[idx++] = 0x14000000 |
1902 ((type << 16) & 0x1f0000) |
1903 (lun->unpacked_lun & 0xffff);
1904 }
1905 rcu_read_unlock();
1906
1907 /* unit unique ID leaf */
1908 data[idx++] = 2 << 16;
1909 data[idx++] = tport->guid >> 32;
1910 data[idx++] = tport->guid;
1911
1912 tport->unit_directory.length = idx;
1913 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1914 tport->unit_directory.data = data;
1915
1916 ret = fw_core_add_descriptor(&tport->unit_directory);
1917 if (ret < 0) {
1918 kfree(tport->unit_directory.data);
1919 tport->unit_directory.data = NULL;
1920 }
1921
1922 return ret;
1923 }
1924
1925 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1926 {
1927 const char *cp;
1928 char c, nibble;
1929 int pos = 0, err;
1930
1931 *wwn = 0;
1932 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1933 c = *cp;
1934 if (c == '\n' && cp[1] == '\0')
1935 continue;
1936 if (c == '\0') {
1937 err = 2;
1938 if (pos != 16)
1939 goto fail;
1940 return cp - name;
1941 }
1942 err = 3;
1943 if (isdigit(c))
1944 nibble = c - '0';
1945 else if (isxdigit(c))
1946 nibble = tolower(c) - 'a' + 10;
1947 else
1948 goto fail;
1949 *wwn = (*wwn << 4) | nibble;
1950 pos++;
1951 }
1952 err = 4;
1953 fail:
1954 printk(KERN_INFO "err %u len %zu pos %u\n",
1955 err, cp - name, pos);
1956 return -1;
1957 }
1958
1959 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1960 {
1961 return snprintf(buf, len, "%016llx", wwn);
1962 }
1963
1964 static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1965 {
1966 u64 guid = 0;
1967
1968 if (sbp_parse_wwn(name, &guid) < 0)
1969 return -EINVAL;
1970 return 0;
1971 }
1972
1973 static int sbp_post_link_lun(
1974 struct se_portal_group *se_tpg,
1975 struct se_lun *se_lun)
1976 {
1977 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1978
1979 return sbp_update_unit_directory(tpg->tport);
1980 }
1981
1982 static void sbp_pre_unlink_lun(
1983 struct se_portal_group *se_tpg,
1984 struct se_lun *se_lun)
1985 {
1986 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1987 struct sbp_tport *tport = tpg->tport;
1988 int ret;
1989
1990 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
1991 tport->enable = 0;
1992
1993 ret = sbp_update_unit_directory(tport);
1994 if (ret < 0)
1995 pr_err("unlink LUN: failed to update unit directory\n");
1996 }
1997
1998 static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
1999 const char *name)
2000 {
2001 struct sbp_tport *tport =
2002 container_of(wwn, struct sbp_tport, tport_wwn);
2003
2004 struct sbp_tpg *tpg;
2005 unsigned long tpgt;
2006 int ret;
2007
2008 if (strstr(name, "tpgt_") != name)
2009 return ERR_PTR(-EINVAL);
2010 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2011 return ERR_PTR(-EINVAL);
2012
2013 if (tport->tpg) {
2014 pr_err("Only one TPG per Unit is possible.\n");
2015 return ERR_PTR(-EBUSY);
2016 }
2017
2018 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2019 if (!tpg)
2020 return ERR_PTR(-ENOMEM);
2021
2022 tpg->tport = tport;
2023 tpg->tport_tpgt = tpgt;
2024 tport->tpg = tpg;
2025
2026 /* default attribute values */
2027 tport->enable = 0;
2028 tport->directory_id = -1;
2029 tport->mgt_orb_timeout = 15;
2030 tport->max_reconnect_timeout = 5;
2031 tport->max_logins_per_lun = 1;
2032
2033 tport->mgt_agt = sbp_management_agent_register(tport);
2034 if (IS_ERR(tport->mgt_agt)) {
2035 ret = PTR_ERR(tport->mgt_agt);
2036 goto out_free_tpg;
2037 }
2038
2039 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2040 if (ret < 0)
2041 goto out_unreg_mgt_agt;
2042
2043 return &tpg->se_tpg;
2044
2045 out_unreg_mgt_agt:
2046 sbp_management_agent_unregister(tport->mgt_agt);
2047 out_free_tpg:
2048 tport->tpg = NULL;
2049 kfree(tpg);
2050 return ERR_PTR(ret);
2051 }
2052
2053 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2054 {
2055 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2056 struct sbp_tport *tport = tpg->tport;
2057
2058 core_tpg_deregister(se_tpg);
2059 sbp_management_agent_unregister(tport->mgt_agt);
2060 tport->tpg = NULL;
2061 kfree(tpg);
2062 }
2063
2064 static struct se_wwn *sbp_make_tport(
2065 struct target_fabric_configfs *tf,
2066 struct config_group *group,
2067 const char *name)
2068 {
2069 struct sbp_tport *tport;
2070 u64 guid = 0;
2071
2072 if (sbp_parse_wwn(name, &guid) < 0)
2073 return ERR_PTR(-EINVAL);
2074
2075 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2076 if (!tport)
2077 return ERR_PTR(-ENOMEM);
2078
2079 tport->guid = guid;
2080 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2081
2082 return &tport->tport_wwn;
2083 }
2084
2085 static void sbp_drop_tport(struct se_wwn *wwn)
2086 {
2087 struct sbp_tport *tport =
2088 container_of(wwn, struct sbp_tport, tport_wwn);
2089
2090 kfree(tport);
2091 }
2092
2093 static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2094 {
2095 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2096 }
2097
2098 CONFIGFS_ATTR_RO(sbp_wwn_, version);
2099
2100 static struct configfs_attribute *sbp_wwn_attrs[] = {
2101 &sbp_wwn_attr_version,
2102 NULL,
2103 };
2104
2105 static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2106 {
2107 struct se_portal_group *se_tpg = to_tpg(item);
2108 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2109 struct sbp_tport *tport = tpg->tport;
2110
2111 if (tport->directory_id == -1)
2112 return sprintf(page, "implicit\n");
2113 else
2114 return sprintf(page, "%06x\n", tport->directory_id);
2115 }
2116
2117 static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2118 const char *page, size_t count)
2119 {
2120 struct se_portal_group *se_tpg = to_tpg(item);
2121 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2122 struct sbp_tport *tport = tpg->tport;
2123 unsigned long val;
2124
2125 if (tport->enable) {
2126 pr_err("Cannot change the directory_id on an active target.\n");
2127 return -EBUSY;
2128 }
2129
2130 if (strstr(page, "implicit") == page) {
2131 tport->directory_id = -1;
2132 } else {
2133 if (kstrtoul(page, 16, &val) < 0)
2134 return -EINVAL;
2135 if (val > 0xffffff)
2136 return -EINVAL;
2137
2138 tport->directory_id = val;
2139 }
2140
2141 return count;
2142 }
2143
2144 static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2145 {
2146 struct se_portal_group *se_tpg = to_tpg(item);
2147 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2148 struct sbp_tport *tport = tpg->tport;
2149 return sprintf(page, "%d\n", tport->enable);
2150 }
2151
2152 static ssize_t sbp_tpg_enable_store(struct config_item *item,
2153 const char *page, size_t count)
2154 {
2155 struct se_portal_group *se_tpg = to_tpg(item);
2156 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2157 struct sbp_tport *tport = tpg->tport;
2158 unsigned long val;
2159 int ret;
2160
2161 if (kstrtoul(page, 0, &val) < 0)
2162 return -EINVAL;
2163 if ((val != 0) && (val != 1))
2164 return -EINVAL;
2165
2166 if (tport->enable == val)
2167 return count;
2168
2169 if (val) {
2170 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2171 pr_err("Cannot enable a target with no LUNs!\n");
2172 return -EINVAL;
2173 }
2174 } else {
2175 /* XXX: force-shutdown sessions instead? */
2176 spin_lock_bh(&se_tpg->session_lock);
2177 if (!list_empty(&se_tpg->tpg_sess_list)) {
2178 spin_unlock_bh(&se_tpg->session_lock);
2179 return -EBUSY;
2180 }
2181 spin_unlock_bh(&se_tpg->session_lock);
2182 }
2183
2184 tport->enable = val;
2185
2186 ret = sbp_update_unit_directory(tport);
2187 if (ret < 0) {
2188 pr_err("Could not update Config ROM\n");
2189 return ret;
2190 }
2191
2192 return count;
2193 }
2194
2195 CONFIGFS_ATTR(sbp_tpg_, directory_id);
2196 CONFIGFS_ATTR(sbp_tpg_, enable);
2197
2198 static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2199 &sbp_tpg_attr_directory_id,
2200 &sbp_tpg_attr_enable,
2201 NULL,
2202 };
2203
2204 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2205 char *page)
2206 {
2207 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2208 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2209 struct sbp_tport *tport = tpg->tport;
2210 return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2211 }
2212
2213 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2214 const char *page, size_t count)
2215 {
2216 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2217 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2218 struct sbp_tport *tport = tpg->tport;
2219 unsigned long val;
2220 int ret;
2221
2222 if (kstrtoul(page, 0, &val) < 0)
2223 return -EINVAL;
2224 if ((val < 1) || (val > 127))
2225 return -EINVAL;
2226
2227 if (tport->mgt_orb_timeout == val)
2228 return count;
2229
2230 tport->mgt_orb_timeout = val;
2231
2232 ret = sbp_update_unit_directory(tport);
2233 if (ret < 0)
2234 return ret;
2235
2236 return count;
2237 }
2238
2239 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2240 char *page)
2241 {
2242 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2243 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2244 struct sbp_tport *tport = tpg->tport;
2245 return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2246 }
2247
2248 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2249 const char *page, size_t count)
2250 {
2251 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2252 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2253 struct sbp_tport *tport = tpg->tport;
2254 unsigned long val;
2255 int ret;
2256
2257 if (kstrtoul(page, 0, &val) < 0)
2258 return -EINVAL;
2259 if ((val < 1) || (val > 32767))
2260 return -EINVAL;
2261
2262 if (tport->max_reconnect_timeout == val)
2263 return count;
2264
2265 tport->max_reconnect_timeout = val;
2266
2267 ret = sbp_update_unit_directory(tport);
2268 if (ret < 0)
2269 return ret;
2270
2271 return count;
2272 }
2273
2274 static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2275 char *page)
2276 {
2277 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2278 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2279 struct sbp_tport *tport = tpg->tport;
2280 return sprintf(page, "%d\n", tport->max_logins_per_lun);
2281 }
2282
2283 static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2284 const char *page, size_t count)
2285 {
2286 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2287 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2288 struct sbp_tport *tport = tpg->tport;
2289 unsigned long val;
2290
2291 if (kstrtoul(page, 0, &val) < 0)
2292 return -EINVAL;
2293 if ((val < 1) || (val > 127))
2294 return -EINVAL;
2295
2296 /* XXX: also check against current count? */
2297
2298 tport->max_logins_per_lun = val;
2299
2300 return count;
2301 }
2302
2303 CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2304 CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2305 CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2306
2307 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2308 &sbp_tpg_attrib_attr_mgt_orb_timeout,
2309 &sbp_tpg_attrib_attr_max_reconnect_timeout,
2310 &sbp_tpg_attrib_attr_max_logins_per_lun,
2311 NULL,
2312 };
2313
2314 static const struct target_core_fabric_ops sbp_ops = {
2315 .module = THIS_MODULE,
2316 .fabric_name = "sbp",
2317 .tpg_get_wwn = sbp_get_fabric_wwn,
2318 .tpg_get_tag = sbp_get_tag,
2319 .tpg_check_demo_mode = sbp_check_true,
2320 .tpg_check_demo_mode_cache = sbp_check_true,
2321 .tpg_check_demo_mode_write_protect = sbp_check_false,
2322 .tpg_check_prod_mode_write_protect = sbp_check_false,
2323 .tpg_get_inst_index = sbp_tpg_get_inst_index,
2324 .release_cmd = sbp_release_cmd,
2325 .sess_get_index = sbp_sess_get_index,
2326 .write_pending = sbp_write_pending,
2327 .set_default_node_attributes = sbp_set_default_node_attrs,
2328 .get_cmd_state = sbp_get_cmd_state,
2329 .queue_data_in = sbp_queue_data_in,
2330 .queue_status = sbp_queue_status,
2331 .queue_tm_rsp = sbp_queue_tm_rsp,
2332 .aborted_task = sbp_aborted_task,
2333 .check_stop_free = sbp_check_stop_free,
2334
2335 .fabric_make_wwn = sbp_make_tport,
2336 .fabric_drop_wwn = sbp_drop_tport,
2337 .fabric_make_tpg = sbp_make_tpg,
2338 .fabric_drop_tpg = sbp_drop_tpg,
2339 .fabric_post_link = sbp_post_link_lun,
2340 .fabric_pre_unlink = sbp_pre_unlink_lun,
2341 .fabric_make_np = NULL,
2342 .fabric_drop_np = NULL,
2343 .fabric_init_nodeacl = sbp_init_nodeacl,
2344
2345 .tfc_wwn_attrs = sbp_wwn_attrs,
2346 .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
2347 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
2348 };
2349
2350 static int __init sbp_init(void)
2351 {
2352 return target_register_template(&sbp_ops);
2353 };
2354
2355 static void __exit sbp_exit(void)
2356 {
2357 target_unregister_template(&sbp_ops);
2358 };
2359
2360 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2361 MODULE_LICENSE("GPL");
2362 module_init(sbp_init);
2363 module_exit(sbp_exit);