]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/libfc/fc_disc.c
[SCSI] libfc: A modular Fibre Channel library
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / libfc / fc_disc.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * Target Discovery
22 *
23 * This block discovers all FC-4 remote ports, including FCP initiators. It
24 * also handles RSCN events and re-discovery if necessary.
25 */
26
27/*
28 * DISC LOCKING
29 *
30 * The disc mutex is can be locked when acquiring rport locks, but may not
31 * be held when acquiring the lport lock. Refer to fc_lport.c for more
32 * details.
33 */
34
35#include <linux/timer.h>
36#include <linux/err.h>
37#include <asm/unaligned.h>
38
39#include <scsi/fc/fc_gs.h>
40
41#include <scsi/libfc.h>
42
43#define FC_DISC_RETRY_LIMIT 3 /* max retries */
44#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
45
46#define FC_DISC_DELAY 3
47
48static int fc_disc_debug;
49
50#define FC_DEBUG_DISC(fmt...) \
51 do { \
52 if (fc_disc_debug) \
53 FC_DBG(fmt); \
54 } while (0)
55
56static void fc_disc_gpn_ft_req(struct fc_disc *);
57static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
58static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
59 struct fc_rport_identifiers *);
60static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
61static void fc_disc_done(struct fc_disc *);
62static void fc_disc_timeout(struct work_struct *);
63static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
64static void fc_disc_restart(struct fc_disc *);
65
66/**
67 * fc_disc_lookup_rport - lookup a remote port by port_id
68 * @lport: Fibre Channel host port instance
69 * @port_id: remote port port_id to match
70 */
71struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
72 u32 port_id)
73{
74 const struct fc_disc *disc = &lport->disc;
75 struct fc_rport *rport, *found = NULL;
76 struct fc_rport_libfc_priv *rdata;
77 int disc_found = 0;
78
79 list_for_each_entry(rdata, &disc->rports, peers) {
80 rport = PRIV_TO_RPORT(rdata);
81 if (rport->port_id == port_id) {
82 disc_found = 1;
83 found = rport;
84 break;
85 }
86 }
87
88 if (!disc_found)
89 found = NULL;
90
91 return found;
92}
93
94/**
95 * fc_disc_stop_rports - delete all the remote ports associated with the lport
96 * @disc: The discovery job to stop rports on
97 *
98 * Locking Note: This function expects that the lport mutex is locked before
99 * calling it.
100 */
101void fc_disc_stop_rports(struct fc_disc *disc)
102{
103 struct fc_lport *lport;
104 struct fc_rport *rport;
105 struct fc_rport_libfc_priv *rdata, *next;
106
107 lport = disc->lport;
108
109 mutex_lock(&disc->disc_mutex);
110 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
111 rport = PRIV_TO_RPORT(rdata);
112 list_del(&rdata->peers);
113 lport->tt.rport_logoff(rport);
114 }
115
116 mutex_unlock(&disc->disc_mutex);
117}
118
119/**
120 * fc_disc_rport_callback - Event handler for rport events
121 * @lport: The lport which is receiving the event
122 * @rport: The rport which the event has occured on
123 * @event: The event that occured
124 *
125 * Locking Note: The rport lock should not be held when calling
126 * this function.
127 */
128static void fc_disc_rport_callback(struct fc_lport *lport,
129 struct fc_rport *rport,
130 enum fc_rport_event event)
131{
132 struct fc_rport_libfc_priv *rdata = rport->dd_data;
133 struct fc_disc *disc = &lport->disc;
134 int found = 0;
135
136 FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
137 rport->port_id);
138
139 if (event == RPORT_EV_CREATED) {
140 if (disc) {
141 found = 1;
142 mutex_lock(&disc->disc_mutex);
143 list_add_tail(&rdata->peers, &disc->rports);
144 mutex_unlock(&disc->disc_mutex);
145 }
146 }
147
148 if (!found)
149 FC_DEBUG_DISC("The rport (%6x) is not maintained "
150 "by the discovery layer\n", rport->port_id);
151}
152
153/**
154 * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
155 * @sp: Current sequence of the RSCN exchange
156 * @fp: RSCN Frame
157 * @lport: Fibre Channel host port instance
158 *
159 * Locking Note: This function expects that the disc_mutex is locked
160 * before it is called.
161 */
162static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
163 struct fc_disc *disc)
164{
165 struct fc_lport *lport;
166 struct fc_rport *rport;
167 struct fc_rport_libfc_priv *rdata;
168 struct fc_els_rscn *rp;
169 struct fc_els_rscn_page *pp;
170 struct fc_seq_els_data rjt_data;
171 unsigned int len;
172 int redisc = 0;
173 enum fc_els_rscn_ev_qual ev_qual;
174 enum fc_els_rscn_addr_fmt fmt;
175 LIST_HEAD(disc_ports);
176 struct fc_disc_port *dp, *next;
177
178 lport = disc->lport;
179
180 FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
181 fc_host_port_id(lport->host));
182
183 /* make sure the frame contains an RSCN message */
184 rp = fc_frame_payload_get(fp, sizeof(*rp));
185 if (!rp)
186 goto reject;
187 /* make sure the page length is as expected (4 bytes) */
188 if (rp->rscn_page_len != sizeof(*pp))
189 goto reject;
190 /* get the RSCN payload length */
191 len = ntohs(rp->rscn_plen);
192 if (len < sizeof(*rp))
193 goto reject;
194 /* make sure the frame contains the expected payload */
195 rp = fc_frame_payload_get(fp, len);
196 if (!rp)
197 goto reject;
198 /* payload must be a multiple of the RSCN page size */
199 len -= sizeof(*rp);
200 if (len % sizeof(*pp))
201 goto reject;
202
203 for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
204 ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
205 ev_qual &= ELS_RSCN_EV_QUAL_MASK;
206 fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
207 fmt &= ELS_RSCN_ADDR_FMT_MASK;
208 /*
209 * if we get an address format other than port
210 * (area, domain, fabric), then do a full discovery
211 */
212 switch (fmt) {
213 case ELS_ADDR_FMT_PORT:
214 FC_DEBUG_DISC("Port address format for port (%6x)\n",
215 ntoh24(pp->rscn_fid));
216 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
217 if (!dp) {
218 redisc = 1;
219 break;
220 }
221 dp->lp = lport;
222 dp->ids.port_id = ntoh24(pp->rscn_fid);
223 dp->ids.port_name = -1;
224 dp->ids.node_name = -1;
225 dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
226 list_add_tail(&dp->peers, &disc_ports);
227 break;
228 case ELS_ADDR_FMT_AREA:
229 case ELS_ADDR_FMT_DOM:
230 case ELS_ADDR_FMT_FAB:
231 default:
232 FC_DEBUG_DISC("Address format is (%d)\n", fmt);
233 redisc = 1;
234 break;
235 }
236 }
237 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
238 if (redisc) {
239 FC_DEBUG_DISC("RSCN received: rediscovering\n");
240 fc_disc_restart(disc);
241 } else {
242 FC_DEBUG_DISC("RSCN received: not rediscovering. "
243 "redisc %d state %d in_prog %d\n",
244 redisc, lport->state, disc->pending);
245 list_for_each_entry_safe(dp, next, &disc_ports, peers) {
246 list_del(&dp->peers);
247 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
248 if (rport) {
249 rdata = RPORT_TO_PRIV(rport);
250 list_del(&rdata->peers);
251 lport->tt.rport_logoff(rport);
252 }
253 fc_disc_single(disc, dp);
254 }
255 }
256 fc_frame_free(fp);
257 return;
258reject:
259 FC_DEBUG_DISC("Received a bad RSCN frame\n");
260 rjt_data.fp = NULL;
261 rjt_data.reason = ELS_RJT_LOGIC;
262 rjt_data.explan = ELS_EXPL_NONE;
263 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
264 fc_frame_free(fp);
265}
266
267/**
268 * fc_disc_recv_req - Handle incoming requests
269 * @sp: Current sequence of the request exchange
270 * @fp: The frame
271 * @lport: The FC local port
272 *
273 * Locking Note: This function is called from the EM and will lock
274 * the disc_mutex before calling the handler for the
275 * request.
276 */
277static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
278 struct fc_lport *lport)
279{
280 u8 op;
281 struct fc_disc *disc = &lport->disc;
282
283 op = fc_frame_payload_op(fp);
284 switch (op) {
285 case ELS_RSCN:
286 mutex_lock(&disc->disc_mutex);
287 fc_disc_recv_rscn_req(sp, fp, disc);
288 mutex_unlock(&disc->disc_mutex);
289 break;
290 default:
291 FC_DBG("Received an unsupported request. opcode (%x)\n", op);
292 break;
293 }
294}
295
296/**
297 * fc_disc_restart - Restart discovery
298 * @lport: FC discovery context
299 *
300 * Locking Note: This function expects that the disc mutex
301 * is already locked.
302 */
303static void fc_disc_restart(struct fc_disc *disc)
304{
305 struct fc_rport *rport;
306 struct fc_rport_libfc_priv *rdata, *next;
307 struct fc_lport *lport = disc->lport;
308
309 FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
310 fc_host_port_id(lport->host));
311
312 list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
313 rport = PRIV_TO_RPORT(rdata);
314 FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
315 list_del(&rdata->peers);
316 lport->tt.rport_logoff(rport);
317 }
318
319 disc->requested = 1;
320 if (!disc->pending)
321 fc_disc_gpn_ft_req(disc);
322}
323
324/**
325 * fc_disc_start - Fibre Channel Target discovery
326 * @lport: FC local port
327 *
328 * Returns non-zero if discovery cannot be started.
329 */
330static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
331 enum fc_disc_event),
332 struct fc_lport *lport)
333{
334 struct fc_rport *rport;
335 struct fc_rport_identifiers ids;
336 struct fc_disc *disc = &lport->disc;
337
338 /*
339 * At this point we may have a new disc job or an existing
340 * one. Either way, let's lock when we make changes to it
341 * and send the GPN_FT request.
342 */
343 mutex_lock(&disc->disc_mutex);
344
345 disc->disc_callback = disc_callback;
346
347 /*
348 * If not ready, or already running discovery, just set request flag.
349 */
350 disc->requested = 1;
351
352 if (disc->pending) {
353 mutex_unlock(&disc->disc_mutex);
354 return;
355 }
356
357 /*
358 * Handle point-to-point mode as a simple discovery
359 * of the remote port. Yucky, yucky, yuck, yuck!
360 */
361 rport = disc->lport->ptp_rp;
362 if (rport) {
363 ids.port_id = rport->port_id;
364 ids.port_name = rport->port_name;
365 ids.node_name = rport->node_name;
366 ids.roles = FC_RPORT_ROLE_UNKNOWN;
367 get_device(&rport->dev);
368
369 if (!fc_disc_new_target(disc, rport, &ids)) {
370 disc->event = DISC_EV_SUCCESS;
371 fc_disc_done(disc);
372 }
373 put_device(&rport->dev);
374 } else {
375 fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
376 }
377
378 mutex_unlock(&disc->disc_mutex);
379}
380
381static struct fc_rport_operations fc_disc_rport_ops = {
382 .event_callback = fc_disc_rport_callback,
383};
384
385/**
386 * fc_disc_new_target - Handle new target found by discovery
387 * @lport: FC local port
388 * @rport: The previous FC remote port (NULL if new remote port)
389 * @ids: Identifiers for the new FC remote port
390 *
391 * Locking Note: This function expects that the disc_mutex is locked
392 * before it is called.
393 */
394static int fc_disc_new_target(struct fc_disc *disc,
395 struct fc_rport *rport,
396 struct fc_rport_identifiers *ids)
397{
398 struct fc_lport *lport = disc->lport;
399 struct fc_rport_libfc_priv *rp;
400 int error = 0;
401
402 if (rport && ids->port_name) {
403 if (rport->port_name == -1) {
404 /*
405 * Set WWN and fall through to notify of create.
406 */
407 fc_rport_set_name(rport, ids->port_name,
408 rport->node_name);
409 } else if (rport->port_name != ids->port_name) {
410 /*
411 * This is a new port with the same FCID as
412 * a previously-discovered port. Presumably the old
413 * port logged out and a new port logged in and was
414 * assigned the same FCID. This should be rare.
415 * Delete the old one and fall thru to re-create.
416 */
417 fc_disc_del_target(disc, rport);
418 rport = NULL;
419 }
420 }
421 if (((ids->port_name != -1) || (ids->port_id != -1)) &&
422 ids->port_id != fc_host_port_id(lport->host) &&
423 ids->port_name != lport->wwpn) {
424 if (!rport) {
425 rport = lport->tt.rport_lookup(lport, ids->port_id);
426 if (!rport) {
427 struct fc_disc_port dp;
428 dp.lp = lport;
429 dp.ids.port_id = ids->port_id;
430 dp.ids.port_name = ids->port_name;
431 dp.ids.node_name = ids->node_name;
432 dp.ids.roles = ids->roles;
433 rport = fc_rport_rogue_create(&dp);
434 }
435 if (!rport)
436 error = -ENOMEM;
437 }
438 if (rport) {
439 rp = rport->dd_data;
440 rp->ops = &fc_disc_rport_ops;
441 rp->rp_state = RPORT_ST_INIT;
442 lport->tt.rport_login(rport);
443 }
444 }
445 return error;
446}
447
448/**
449 * fc_disc_del_target - Delete a target
450 * @disc: FC discovery context
451 * @rport: The remote port to be removed
452 */
453static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
454{
455 struct fc_lport *lport = disc->lport;
456 struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
457 list_del(&rdata->peers);
458 lport->tt.rport_logoff(rport);
459}
460
461/**
462 * fc_disc_done - Discovery has been completed
463 * @disc: FC discovery context
464 */
465static void fc_disc_done(struct fc_disc *disc)
466{
467 struct fc_lport *lport = disc->lport;
468
469 FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
470 fc_host_port_id(lport->host));
471
472 disc->disc_callback(lport, disc->event);
473 disc->event = DISC_EV_NONE;
474
475 if (disc->requested)
476 fc_disc_gpn_ft_req(disc);
477 else
478 disc->pending = 0;
479}
480
481/**
482 * fc_disc_error - Handle error on dNS request
483 * @disc: FC discovery context
484 * @fp: The frame pointer
485 */
486static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
487{
488 struct fc_lport *lport = disc->lport;
489 unsigned long delay = 0;
490 if (fc_disc_debug)
491 FC_DBG("Error %ld, retries %d/%d\n",
492 PTR_ERR(fp), disc->retry_count,
493 FC_DISC_RETRY_LIMIT);
494
495 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
496 /*
497 * Memory allocation failure, or the exchange timed out,
498 * retry after delay.
499 */
500 if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
501 /* go ahead and retry */
502 if (!fp)
503 delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
504 else {
505 delay = msecs_to_jiffies(lport->e_d_tov);
506
507 /* timeout faster first time */
508 if (!disc->retry_count)
509 delay /= 4;
510 }
511 disc->retry_count++;
512 schedule_delayed_work(&disc->disc_work, delay);
513 } else {
514 /* exceeded retries */
515 disc->event = DISC_EV_FAILED;
516 fc_disc_done(disc);
517 }
518 }
519}
520
521/**
522 * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
523 * @lport: FC discovery context
524 *
525 * Locking Note: This function expects that the disc_mutex is locked
526 * before it is called.
527 */
528static void fc_disc_gpn_ft_req(struct fc_disc *disc)
529{
530 struct fc_frame *fp;
531 struct fc_lport *lport = disc->lport;
532
533 WARN_ON(!fc_lport_test_ready(lport));
534
535 disc->pending = 1;
536 disc->requested = 0;
537
538 disc->buf_len = 0;
539 disc->seq_count = 0;
540 fp = fc_frame_alloc(lport,
541 sizeof(struct fc_ct_hdr) +
542 sizeof(struct fc_ns_gid_ft));
543 if (!fp)
544 goto err;
545
546 if (lport->tt.elsct_send(lport, NULL, fp,
547 FC_NS_GPN_FT,
548 fc_disc_gpn_ft_resp,
549 disc, lport->e_d_tov))
550 return;
551err:
552 fc_disc_error(disc, fp);
553}
554
555/**
556 * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
557 * @lport: Fibre Channel host port instance
558 * @buf: GPN_FT response buffer
559 * @len: size of response buffer
560 */
561static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
562{
563 struct fc_lport *lport;
564 struct fc_gpn_ft_resp *np;
565 char *bp;
566 size_t plen;
567 size_t tlen;
568 int error = 0;
569 struct fc_disc_port dp;
570 struct fc_rport *rport;
571 struct fc_rport_libfc_priv *rdata;
572
573 lport = disc->lport;
574
575 /*
576 * Handle partial name record left over from previous call.
577 */
578 bp = buf;
579 plen = len;
580 np = (struct fc_gpn_ft_resp *)bp;
581 tlen = disc->buf_len;
582 if (tlen) {
583 WARN_ON(tlen >= sizeof(*np));
584 plen = sizeof(*np) - tlen;
585 WARN_ON(plen <= 0);
586 WARN_ON(plen >= sizeof(*np));
587 if (plen > len)
588 plen = len;
589 np = &disc->partial_buf;
590 memcpy((char *)np + tlen, bp, plen);
591
592 /*
593 * Set bp so that the loop below will advance it to the
594 * first valid full name element.
595 */
596 bp -= tlen;
597 len += tlen;
598 plen += tlen;
599 disc->buf_len = (unsigned char) plen;
600 if (plen == sizeof(*np))
601 disc->buf_len = 0;
602 }
603
604 /*
605 * Handle full name records, including the one filled from above.
606 * Normally, np == bp and plen == len, but from the partial case above,
607 * bp, len describe the overall buffer, and np, plen describe the
608 * partial buffer, which if would usually be full now.
609 * After the first time through the loop, things return to "normal".
610 */
611 while (plen >= sizeof(*np)) {
612 dp.lp = lport;
613 dp.ids.port_id = ntoh24(np->fp_fid);
614 dp.ids.port_name = ntohll(np->fp_wwpn);
615 dp.ids.node_name = -1;
616 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
617
618 if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
619 (dp.ids.port_name != lport->wwpn)) {
620 rport = fc_rport_rogue_create(&dp);
621 if (rport) {
622 rdata = rport->dd_data;
623 rdata->ops = &fc_disc_rport_ops;
624 rdata->local_port = lport;
625 lport->tt.rport_login(rport);
626 } else
627 FC_DBG("Failed to allocate memory for "
628 "the newly discovered port (%6x)\n",
629 dp.ids.port_id);
630 }
631
632 if (np->fp_flags & FC_NS_FID_LAST) {
633 disc->event = DISC_EV_SUCCESS;
634 fc_disc_done(disc);
635 len = 0;
636 break;
637 }
638 len -= sizeof(*np);
639 bp += sizeof(*np);
640 np = (struct fc_gpn_ft_resp *)bp;
641 plen = len;
642 }
643
644 /*
645 * Save any partial record at the end of the buffer for next time.
646 */
647 if (error == 0 && len > 0 && len < sizeof(*np)) {
648 if (np != &disc->partial_buf) {
649 FC_DEBUG_DISC("Partial buffer remains "
650 "for discovery by (%6x)\n",
651 fc_host_port_id(lport->host));
652 memcpy(&disc->partial_buf, np, len);
653 }
654 disc->buf_len = (unsigned char) len;
655 } else {
656 disc->buf_len = 0;
657 }
658 return error;
659}
660
661/*
662 * Handle retry of memory allocation for remote ports.
663 */
664static void fc_disc_timeout(struct work_struct *work)
665{
666 struct fc_disc *disc = container_of(work,
667 struct fc_disc,
668 disc_work.work);
669 mutex_lock(&disc->disc_mutex);
670 if (disc->requested && !disc->pending)
671 fc_disc_gpn_ft_req(disc);
672 mutex_unlock(&disc->disc_mutex);
673}
674
675/**
676 * fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
677 * @sp: Current sequence of GPN_FT exchange
678 * @fp: response frame
679 * @lp_arg: Fibre Channel host port instance
680 *
681 * Locking Note: This function expects that the disc_mutex is locked
682 * before it is called.
683 */
684static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
685 void *disc_arg)
686{
687 struct fc_disc *disc = disc_arg;
688 struct fc_ct_hdr *cp;
689 struct fc_frame_header *fh;
690 unsigned int seq_cnt;
691 void *buf = NULL;
692 unsigned int len;
693 int error;
694
695 FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
696 fc_host_port_id(disc->lport->host));
697
698 if (IS_ERR(fp)) {
699 fc_disc_error(disc, fp);
700 return;
701 }
702
703 WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
704 fh = fc_frame_header_get(fp);
705 len = fr_len(fp) - sizeof(*fh);
706 seq_cnt = ntohs(fh->fh_seq_cnt);
707 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
708 disc->seq_count == 0) {
709 cp = fc_frame_payload_get(fp, sizeof(*cp));
710 if (!cp) {
711 FC_DBG("GPN_FT response too short, len %d\n",
712 fr_len(fp));
713 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
714
715 /*
716 * Accepted. Parse response.
717 */
718 buf = cp + 1;
719 len -= sizeof(*cp);
720 } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
721 FC_DBG("GPN_FT rejected reason %x exp %x "
722 "(check zoning)\n", cp->ct_reason,
723 cp->ct_explan);
724 disc->event = DISC_EV_FAILED;
725 fc_disc_done(disc);
726 } else {
727 FC_DBG("GPN_FT unexpected response code %x\n",
728 ntohs(cp->ct_cmd));
729 }
730 } else if (fr_sof(fp) == FC_SOF_N3 &&
731 seq_cnt == disc->seq_count) {
732 buf = fh + 1;
733 } else {
734 FC_DBG("GPN_FT unexpected frame - out of sequence? "
735 "seq_cnt %x expected %x sof %x eof %x\n",
736 seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
737 }
738 if (buf) {
739 error = fc_disc_gpn_ft_parse(disc, buf, len);
740 if (error)
741 fc_disc_error(disc, fp);
742 else
743 disc->seq_count++;
744 }
745 fc_frame_free(fp);
746}
747
748/**
749 * fc_disc_single - Discover the directory information for a single target
750 * @lport: FC local port
751 * @dp: The port to rediscover
752 *
753 * Locking Note: This function expects that the disc_mutex is locked
754 * before it is called.
755 */
756static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
757{
758 struct fc_lport *lport;
759 struct fc_rport *rport;
760 struct fc_rport *new_rport;
761 struct fc_rport_libfc_priv *rdata;
762
763 lport = disc->lport;
764
765 if (dp->ids.port_id == fc_host_port_id(lport->host))
766 goto out;
767
768 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
769 if (rport)
770 fc_disc_del_target(disc, rport);
771
772 new_rport = fc_rport_rogue_create(dp);
773 if (new_rport) {
774 rdata = new_rport->dd_data;
775 rdata->ops = &fc_disc_rport_ops;
776 kfree(dp);
777 lport->tt.rport_login(new_rport);
778 }
779 return;
780out:
781 kfree(dp);
782}
783
784/**
785 * fc_disc_stop - Stop discovery for a given lport
786 * @lport: The lport that discovery should stop for
787 */
788void fc_disc_stop(struct fc_lport *lport)
789{
790 struct fc_disc *disc = &lport->disc;
791
792 if (disc) {
793 cancel_delayed_work_sync(&disc->disc_work);
794 fc_disc_stop_rports(disc);
795 }
796}
797
798/**
799 * fc_disc_stop_final - Stop discovery for a given lport
800 * @lport: The lport that discovery should stop for
801 *
802 * This function will block until discovery has been
803 * completely stopped and all rports have been deleted.
804 */
805void fc_disc_stop_final(struct fc_lport *lport)
806{
807 fc_disc_stop(lport);
808 lport->tt.rport_flush_queue();
809}
810
811/**
812 * fc_disc_init - Initialize the discovery block
813 * @lport: FC local port
814 */
815int fc_disc_init(struct fc_lport *lport)
816{
817 struct fc_disc *disc;
818
819 if (!lport->tt.disc_start)
820 lport->tt.disc_start = fc_disc_start;
821
822 if (!lport->tt.disc_stop)
823 lport->tt.disc_stop = fc_disc_stop;
824
825 if (!lport->tt.disc_stop_final)
826 lport->tt.disc_stop_final = fc_disc_stop_final;
827
828 if (!lport->tt.disc_recv_req)
829 lport->tt.disc_recv_req = fc_disc_recv_req;
830
831 if (!lport->tt.rport_lookup)
832 lport->tt.rport_lookup = fc_disc_lookup_rport;
833
834 disc = &lport->disc;
835 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
836 mutex_init(&disc->disc_mutex);
837 INIT_LIST_HEAD(&disc->rports);
838
839 disc->lport = lport;
840 disc->delay = FC_DISC_DELAY;
841 disc->event = DISC_EV_NONE;
842
843 return 0;
844}
845EXPORT_SYMBOL(fc_disc_init);