]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/libfc/fc_lport.c
[SCSI] libfc: Register Symbolic Port Name (RSPN_ID)
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / libfc / fc_lport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HEIRARCHY
36 *
37 * The following heirarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the heirarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler becuase no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, succesful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64/*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90#include <linux/timer.h>
91#include <asm/unaligned.h>
92
93#include <scsi/fc/fc_gs.h>
94
95#include <scsi/libfc.h>
96#include <scsi/fc_encode.h>
97
8866a5d9
RL
98#include "fc_libfc.h"
99
42e9a92f
RL
100/* Fabric IDs to use for point-to-point mode, chosen on whims. */
101#define FC_LOCAL_PTP_FID_LO 0x010101
102#define FC_LOCAL_PTP_FID_HI 0x010102
103
104#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
105
42e9a92f
RL
106static void fc_lport_error(struct fc_lport *, struct fc_frame *);
107
108static void fc_lport_enter_reset(struct fc_lport *);
109static void fc_lport_enter_flogi(struct fc_lport *);
110static void fc_lport_enter_dns(struct fc_lport *);
c9c7bd7a 111static void fc_lport_enter_rnn_id(struct fc_lport *);
5baa17c3 112static void fc_lport_enter_rsnn_nn(struct fc_lport *);
c9866a54 113static void fc_lport_enter_rspn_id(struct fc_lport *);
42e9a92f
RL
114static void fc_lport_enter_rft_id(struct fc_lport *);
115static void fc_lport_enter_scr(struct fc_lport *);
116static void fc_lport_enter_ready(struct fc_lport *);
117static void fc_lport_enter_logo(struct fc_lport *);
118
119static const char *fc_lport_state_names[] = {
b1d9fd55 120 [LPORT_ST_DISABLED] = "disabled",
42e9a92f
RL
121 [LPORT_ST_FLOGI] = "FLOGI",
122 [LPORT_ST_DNS] = "dNS",
c9c7bd7a 123 [LPORT_ST_RNN_ID] = "RNN_ID",
5baa17c3 124 [LPORT_ST_RSNN_NN] = "RSNN_NN",
c9866a54 125 [LPORT_ST_RSPN_ID] = "RSPN_ID",
42e9a92f
RL
126 [LPORT_ST_RFT_ID] = "RFT_ID",
127 [LPORT_ST_SCR] = "SCR",
128 [LPORT_ST_READY] = "Ready",
129 [LPORT_ST_LOGO] = "LOGO",
130 [LPORT_ST_RESET] = "reset",
131};
132
133static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
134{
135 fc_frame_free(fp);
136 return 0;
137}
138
139/**
34f42a07 140 * fc_lport_rport_callback() - Event handler for rport events
42e9a92f 141 * @lport: The lport which is receiving the event
9fb9d328 142 * @rdata: private remote port data
42e9a92f
RL
143 * @event: The event that occured
144 *
145 * Locking Note: The rport lock should not be held when calling
146 * this function.
147 */
148static void fc_lport_rport_callback(struct fc_lport *lport,
9fb9d328 149 struct fc_rport_priv *rdata,
42e9a92f
RL
150 enum fc_rport_event event)
151{
7414705e 152 FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
f211fa51 153 rdata->ids.port_id);
42e9a92f 154
b5cbf083 155 mutex_lock(&lport->lp_mutex);
42e9a92f 156 switch (event) {
4c0f62b5 157 case RPORT_EV_READY:
b5cbf083
JE
158 if (lport->state == LPORT_ST_DNS) {
159 lport->dns_rp = rdata;
c9c7bd7a 160 fc_lport_enter_rnn_id(lport);
b5cbf083
JE
161 } else {
162 FC_LPORT_DBG(lport, "Received an READY event "
163 "on port (%6x) for the directory "
164 "server, but the lport is not "
165 "in the DNS state, it's in the "
166 "%d state", rdata->ids.port_id,
167 lport->state);
168 lport->tt.rport_logoff(rdata);
169 }
42e9a92f
RL
170 break;
171 case RPORT_EV_LOGO:
172 case RPORT_EV_FAILED:
173 case RPORT_EV_STOP:
b5cbf083 174 lport->dns_rp = NULL;
42e9a92f
RL
175 break;
176 case RPORT_EV_NONE:
177 break;
178 }
b5cbf083 179 mutex_unlock(&lport->lp_mutex);
42e9a92f
RL
180}
181
182/**
34f42a07 183 * fc_lport_state() - Return a string which represents the lport's state
42e9a92f
RL
184 * @lport: The lport whose state is to converted to a string
185 */
186static const char *fc_lport_state(struct fc_lport *lport)
187{
188 const char *cp;
189
190 cp = fc_lport_state_names[lport->state];
191 if (!cp)
192 cp = "unknown";
193 return cp;
194}
195
196/**
34f42a07 197 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
42e9a92f
RL
198 * @lport: The lport to attach the ptp rport to
199 * @fid: The FID of the ptp rport
200 * @remote_wwpn: The WWPN of the ptp rport
201 * @remote_wwnn: The WWNN of the ptp rport
202 */
203static void fc_lport_ptp_setup(struct fc_lport *lport,
204 u32 remote_fid, u64 remote_wwpn,
205 u64 remote_wwnn)
206{
48f00902
JE
207 mutex_lock(&lport->disc.disc_mutex);
208 if (lport->ptp_rp)
42e9a92f 209 lport->tt.rport_logoff(lport->ptp_rp);
9737e6a7
RL
210 lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
211 lport->ptp_rp->ids.port_name = remote_wwpn;
212 lport->ptp_rp->ids.node_name = remote_wwnn;
48f00902 213 mutex_unlock(&lport->disc.disc_mutex);
42e9a92f
RL
214
215 lport->tt.rport_login(lport->ptp_rp);
216
217 fc_lport_enter_ready(lport);
218}
219
220void fc_get_host_port_type(struct Scsi_Host *shost)
221{
222 /* TODO - currently just NPORT */
223 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
224}
225EXPORT_SYMBOL(fc_get_host_port_type);
226
227void fc_get_host_port_state(struct Scsi_Host *shost)
228{
229 struct fc_lport *lp = shost_priv(shost);
230
8faecddb
CL
231 mutex_lock(&lp->lp_mutex);
232 if (!lp->link_up)
233 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
42e9a92f 234 else
8faecddb
CL
235 switch (lp->state) {
236 case LPORT_ST_READY:
237 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
238 break;
239 default:
240 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
241 }
242 mutex_unlock(&lp->lp_mutex);
42e9a92f
RL
243}
244EXPORT_SYMBOL(fc_get_host_port_state);
245
246void fc_get_host_speed(struct Scsi_Host *shost)
247{
248 struct fc_lport *lport = shost_priv(shost);
249
250 fc_host_speed(shost) = lport->link_speed;
251}
252EXPORT_SYMBOL(fc_get_host_speed);
253
254struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
255{
42e9a92f
RL
256 struct fc_host_statistics *fcoe_stats;
257 struct fc_lport *lp = shost_priv(shost);
258 struct timespec v0, v1;
582b45bc 259 unsigned int cpu;
42e9a92f
RL
260
261 fcoe_stats = &lp->host_stats;
262 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
263
264 jiffies_to_timespec(jiffies, &v0);
265 jiffies_to_timespec(lp->boot_time, &v1);
266 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
267
582b45bc
RL
268 for_each_possible_cpu(cpu) {
269 struct fcoe_dev_stats *stats;
270
271 stats = per_cpu_ptr(lp->dev_stats, cpu);
272
42e9a92f
RL
273 fcoe_stats->tx_frames += stats->TxFrames;
274 fcoe_stats->tx_words += stats->TxWords;
275 fcoe_stats->rx_frames += stats->RxFrames;
276 fcoe_stats->rx_words += stats->RxWords;
277 fcoe_stats->error_frames += stats->ErrorFrames;
278 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
279 fcoe_stats->fcp_input_requests += stats->InputRequests;
280 fcoe_stats->fcp_output_requests += stats->OutputRequests;
281 fcoe_stats->fcp_control_requests += stats->ControlRequests;
282 fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
283 fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
284 fcoe_stats->link_failure_count += stats->LinkFailureCount;
285 }
286 fcoe_stats->lip_count = -1;
287 fcoe_stats->nos_count = -1;
288 fcoe_stats->loss_of_sync_count = -1;
289 fcoe_stats->loss_of_signal_count = -1;
290 fcoe_stats->prim_seq_protocol_err_count = -1;
291 fcoe_stats->dumped_frames = -1;
292 return fcoe_stats;
293}
294EXPORT_SYMBOL(fc_get_host_stats);
295
296/*
297 * Fill in FLOGI command for request.
298 */
299static void
300fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
301 unsigned int op)
302{
303 struct fc_els_csp *sp;
304 struct fc_els_cssp *cp;
305
306 memset(flogi, 0, sizeof(*flogi));
307 flogi->fl_cmd = (u8) op;
308 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
309 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
310 sp = &flogi->fl_csp;
311 sp->sp_hi_ver = 0x20;
312 sp->sp_lo_ver = 0x20;
313 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
314 sp->sp_bb_data = htons((u16) lport->mfs);
315 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
316 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
317 if (op != ELS_FLOGI) {
318 sp->sp_features = htons(FC_SP_FT_CIRO);
319 sp->sp_tot_seq = htons(255); /* seq. we accept */
320 sp->sp_rel_off = htons(0x1f);
321 sp->sp_e_d_tov = htonl(lport->e_d_tov);
322
323 cp->cp_rdfs = htons((u16) lport->mfs);
324 cp->cp_con_seq = htons(255);
325 cp->cp_open_seq = 1;
326 }
327}
328
329/*
330 * Add a supported FC-4 type.
331 */
332static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
333{
334 __be32 *mp;
335
336 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
337 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
338}
339
340/**
34f42a07 341 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
42e9a92f
RL
342 * @lport: Fibre Channel local port recieving the RLIR
343 * @sp: current sequence in the RLIR exchange
344 * @fp: RLIR request frame
345 *
1b69bc06 346 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
347 * this function.
348 */
349static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
350 struct fc_lport *lport)
351{
7414705e
RL
352 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
353 fc_lport_state(lport));
42e9a92f
RL
354
355 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
356 fc_frame_free(fp);
357}
358
359/**
34f42a07 360 * fc_lport_recv_echo_req() - Handle received ECHO request
42e9a92f
RL
361 * @lport: Fibre Channel local port recieving the ECHO
362 * @sp: current sequence in the ECHO exchange
363 * @fp: ECHO request frame
364 *
1b69bc06 365 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
366 * this function.
367 */
368static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
369 struct fc_lport *lport)
370{
371 struct fc_frame *fp;
372 struct fc_exch *ep = fc_seq_exch(sp);
373 unsigned int len;
374 void *pp;
375 void *dp;
376 u32 f_ctl;
377
1b69bc06 378 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
7414705e 379 fc_lport_state(lport));
42e9a92f
RL
380
381 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
382 pp = fc_frame_payload_get(in_fp, len);
383
384 if (len < sizeof(__be32))
385 len = sizeof(__be32);
386
387 fp = fc_frame_alloc(lport, len);
388 if (fp) {
389 dp = fc_frame_payload_get(fp, len);
390 memcpy(dp, pp, len);
1b69bc06 391 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
42e9a92f
RL
392 sp = lport->tt.seq_start_next(sp);
393 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
394 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
395 FC_TYPE_ELS, f_ctl, 0);
396 lport->tt.seq_send(lport, sp, fp);
397 }
398 fc_frame_free(in_fp);
399}
400
401/**
1b69bc06
JE
402 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
403 * @sp: The sequence in the RNID exchange
404 * @fp: The RNID request frame
405 * @lport: The local port recieving the RNID
42e9a92f 406 *
1b69bc06 407 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
408 * this function.
409 */
410static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
411 struct fc_lport *lport)
412{
413 struct fc_frame *fp;
414 struct fc_exch *ep = fc_seq_exch(sp);
415 struct fc_els_rnid *req;
416 struct {
417 struct fc_els_rnid_resp rnid;
418 struct fc_els_rnid_cid cid;
419 struct fc_els_rnid_gen gen;
420 } *rp;
421 struct fc_seq_els_data rjt_data;
422 u8 fmt;
423 size_t len;
424 u32 f_ctl;
425
7414705e
RL
426 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
427 fc_lport_state(lport));
42e9a92f
RL
428
429 req = fc_frame_payload_get(in_fp, sizeof(*req));
430 if (!req) {
431 rjt_data.fp = NULL;
432 rjt_data.reason = ELS_RJT_LOGIC;
433 rjt_data.explan = ELS_EXPL_NONE;
434 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
435 } else {
436 fmt = req->rnid_fmt;
437 len = sizeof(*rp);
438 if (fmt != ELS_RNIDF_GEN ||
439 ntohl(lport->rnid_gen.rnid_atype) == 0) {
440 fmt = ELS_RNIDF_NONE; /* nothing to provide */
441 len -= sizeof(rp->gen);
442 }
443 fp = fc_frame_alloc(lport, len);
444 if (fp) {
445 rp = fc_frame_payload_get(fp, len);
446 memset(rp, 0, len);
447 rp->rnid.rnid_cmd = ELS_LS_ACC;
448 rp->rnid.rnid_fmt = fmt;
449 rp->rnid.rnid_cid_len = sizeof(rp->cid);
450 rp->cid.rnid_wwpn = htonll(lport->wwpn);
451 rp->cid.rnid_wwnn = htonll(lport->wwnn);
452 if (fmt == ELS_RNIDF_GEN) {
453 rp->rnid.rnid_sid_len = sizeof(rp->gen);
454 memcpy(&rp->gen, &lport->rnid_gen,
455 sizeof(rp->gen));
456 }
457 sp = lport->tt.seq_start_next(sp);
458 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
459 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
460 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
461 FC_TYPE_ELS, f_ctl, 0);
462 lport->tt.seq_send(lport, sp, fp);
463 }
464 }
465 fc_frame_free(in_fp);
466}
467
42e9a92f 468/**
34f42a07 469 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
42e9a92f
RL
470 * @lport: Fibre Channel local port recieving the LOGO
471 * @sp: current sequence in the LOGO exchange
472 * @fp: LOGO request frame
473 *
474 * Locking Note: The lport lock is exected to be held before calling
475 * this function.
476 */
477static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
478 struct fc_lport *lport)
479{
480 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
481 fc_lport_enter_reset(lport);
482 fc_frame_free(fp);
483}
484
485/**
34f42a07 486 * fc_fabric_login() - Start the lport state machine
42e9a92f
RL
487 * @lport: The lport that should log into the fabric
488 *
489 * Locking Note: This function should not be called
490 * with the lport lock held.
491 */
492int fc_fabric_login(struct fc_lport *lport)
493{
494 int rc = -1;
495
496 mutex_lock(&lport->lp_mutex);
b1d9fd55 497 if (lport->state == LPORT_ST_DISABLED) {
42e9a92f
RL
498 fc_lport_enter_reset(lport);
499 rc = 0;
500 }
501 mutex_unlock(&lport->lp_mutex);
502
503 return rc;
504}
505EXPORT_SYMBOL(fc_fabric_login);
506
507/**
8faecddb 508 * __fc_linkup() - Handler for transport linkup events
42e9a92f 509 * @lport: The lport whose link is up
8faecddb
CL
510 *
511 * Locking: must be called with the lp_mutex held
42e9a92f 512 */
8faecddb 513void __fc_linkup(struct fc_lport *lport)
42e9a92f 514{
bc0e17f6
VD
515 if (!lport->link_up) {
516 lport->link_up = 1;
42e9a92f
RL
517
518 if (lport->state == LPORT_ST_RESET)
519 fc_lport_enter_flogi(lport);
520 }
8faecddb
CL
521}
522
523/**
524 * fc_linkup() - Handler for transport linkup events
525 * @lport: The lport whose link is up
526 */
527void fc_linkup(struct fc_lport *lport)
528{
529 printk(KERN_INFO "libfc: Link up on port (%6x)\n",
530 fc_host_port_id(lport->host));
531
532 mutex_lock(&lport->lp_mutex);
533 __fc_linkup(lport);
42e9a92f
RL
534 mutex_unlock(&lport->lp_mutex);
535}
536EXPORT_SYMBOL(fc_linkup);
537
538/**
8faecddb 539 * __fc_linkdown() - Handler for transport linkdown events
42e9a92f 540 * @lport: The lport whose link is down
8faecddb
CL
541 *
542 * Locking: must be called with the lp_mutex held
42e9a92f 543 */
8faecddb 544void __fc_linkdown(struct fc_lport *lport)
42e9a92f 545{
bc0e17f6
VD
546 if (lport->link_up) {
547 lport->link_up = 0;
42e9a92f
RL
548 fc_lport_enter_reset(lport);
549 lport->tt.fcp_cleanup(lport);
550 }
8faecddb
CL
551}
552
553/**
554 * fc_linkdown() - Handler for transport linkdown events
555 * @lport: The lport whose link is down
556 */
557void fc_linkdown(struct fc_lport *lport)
558{
559 printk(KERN_INFO "libfc: Link down on port (%6x)\n",
560 fc_host_port_id(lport->host));
561
562 mutex_lock(&lport->lp_mutex);
563 __fc_linkdown(lport);
42e9a92f
RL
564 mutex_unlock(&lport->lp_mutex);
565}
566EXPORT_SYMBOL(fc_linkdown);
567
42e9a92f 568/**
34f42a07 569 * fc_fabric_logoff() - Logout of the fabric
42e9a92f
RL
570 * @lport: fc_lport pointer to logoff the fabric
571 *
572 * Return value:
573 * 0 for success, -1 for failure
34f42a07 574 */
42e9a92f
RL
575int fc_fabric_logoff(struct fc_lport *lport)
576{
577 lport->tt.disc_stop_final(lport);
578 mutex_lock(&lport->lp_mutex);
a0fd2e49
AJ
579 if (lport->dns_rp)
580 lport->tt.rport_logoff(lport->dns_rp);
581 mutex_unlock(&lport->lp_mutex);
582 lport->tt.rport_flush_queue();
583 mutex_lock(&lport->lp_mutex);
42e9a92f
RL
584 fc_lport_enter_logo(lport);
585 mutex_unlock(&lport->lp_mutex);
f7db2c15 586 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
587 return 0;
588}
589EXPORT_SYMBOL(fc_fabric_logoff);
590
591/**
34f42a07 592 * fc_lport_destroy() - unregister a fc_lport
42e9a92f
RL
593 * @lport: fc_lport pointer to unregister
594 *
595 * Return value:
596 * None
597 * Note:
598 * exit routine for fc_lport instance
599 * clean-up all the allocated memory
600 * and free up other system resources.
601 *
34f42a07 602 */
42e9a92f
RL
603int fc_lport_destroy(struct fc_lport *lport)
604{
bbf15669 605 mutex_lock(&lport->lp_mutex);
b1d9fd55 606 lport->state = LPORT_ST_DISABLED;
bbf15669 607 lport->link_up = 0;
42e9a92f 608 lport->tt.frame_send = fc_frame_drop;
bbf15669
AJ
609 mutex_unlock(&lport->lp_mutex);
610
42e9a92f 611 lport->tt.fcp_abort_io(lport);
e9ba8b42 612 lport->tt.disc_stop_final(lport);
1f6ff364 613 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f
RL
614 return 0;
615}
616EXPORT_SYMBOL(fc_lport_destroy);
617
618/**
34f42a07 619 * fc_set_mfs() - sets up the mfs for the corresponding fc_lport
42e9a92f
RL
620 * @lport: fc_lport pointer to unregister
621 * @mfs: the new mfs for fc_lport
622 *
623 * Set mfs for the given fc_lport to the new mfs.
624 *
625 * Return: 0 for success
34f42a07 626 */
42e9a92f
RL
627int fc_set_mfs(struct fc_lport *lport, u32 mfs)
628{
629 unsigned int old_mfs;
630 int rc = -EINVAL;
631
632 mutex_lock(&lport->lp_mutex);
633
634 old_mfs = lport->mfs;
635
636 if (mfs >= FC_MIN_MAX_FRAME) {
637 mfs &= ~3;
638 if (mfs > FC_MAX_FRAME)
639 mfs = FC_MAX_FRAME;
640 mfs -= sizeof(struct fc_frame_header);
641 lport->mfs = mfs;
642 rc = 0;
643 }
644
645 if (!rc && mfs < old_mfs)
646 fc_lport_enter_reset(lport);
647
648 mutex_unlock(&lport->lp_mutex);
649
650 return rc;
651}
652EXPORT_SYMBOL(fc_set_mfs);
653
654/**
34f42a07 655 * fc_lport_disc_callback() - Callback for discovery events
42e9a92f
RL
656 * @lport: FC local port
657 * @event: The discovery event
658 */
659void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
660{
661 switch (event) {
662 case DISC_EV_SUCCESS:
7414705e 663 FC_LPORT_DBG(lport, "Discovery succeeded\n");
42e9a92f
RL
664 break;
665 case DISC_EV_FAILED:
7414705e
RL
666 printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
667 fc_host_port_id(lport->host));
42e9a92f
RL
668 mutex_lock(&lport->lp_mutex);
669 fc_lport_enter_reset(lport);
670 mutex_unlock(&lport->lp_mutex);
671 break;
672 case DISC_EV_NONE:
673 WARN_ON(1);
674 break;
675 }
676}
677
678/**
34f42a07 679 * fc_rport_enter_ready() - Enter the ready state and start discovery
42e9a92f
RL
680 * @lport: Fibre Channel local port that is ready
681 *
682 * Locking Note: The lport lock is expected to be held before calling
683 * this routine.
684 */
685static void fc_lport_enter_ready(struct fc_lport *lport)
686{
7414705e
RL
687 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
688 fc_lport_state(lport));
42e9a92f
RL
689
690 fc_lport_state_enter(lport, LPORT_ST_READY);
8faecddb
CL
691 if (lport->vport)
692 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
693 fc_vports_linkchange(lport);
42e9a92f 694
29d898e9
JE
695 if (!lport->ptp_rp)
696 lport->tt.disc_start(fc_lport_disc_callback, lport);
42e9a92f
RL
697}
698
699/**
34f42a07 700 * fc_lport_recv_flogi_req() - Receive a FLOGI request
42e9a92f
RL
701 * @sp_in: The sequence the FLOGI is on
702 * @rx_fp: The frame the FLOGI is in
703 * @lport: The lport that recieved the request
704 *
705 * A received FLOGI request indicates a point-to-point connection.
706 * Accept it with the common service parameters indicating our N port.
707 * Set up to do a PLOGI if we have the higher-number WWPN.
708 *
1b69bc06 709 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
710 * this function.
711 */
712static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
713 struct fc_frame *rx_fp,
714 struct fc_lport *lport)
715{
716 struct fc_frame *fp;
717 struct fc_frame_header *fh;
718 struct fc_seq *sp;
719 struct fc_exch *ep;
720 struct fc_els_flogi *flp;
721 struct fc_els_flogi *new_flp;
722 u64 remote_wwpn;
723 u32 remote_fid;
724 u32 local_fid;
725 u32 f_ctl;
726
7414705e
RL
727 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
728 fc_lport_state(lport));
42e9a92f
RL
729
730 fh = fc_frame_header_get(rx_fp);
731 remote_fid = ntoh24(fh->fh_s_id);
732 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
733 if (!flp)
734 goto out;
735 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
736 if (remote_wwpn == lport->wwpn) {
7414705e
RL
737 printk(KERN_WARNING "libfc: Received FLOGI from port "
738 "with same WWPN %llx\n", remote_wwpn);
42e9a92f
RL
739 goto out;
740 }
7414705e 741 FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
42e9a92f
RL
742
743 /*
744 * XXX what is the right thing to do for FIDs?
745 * The originator might expect our S_ID to be 0xfffffe.
746 * But if so, both of us could end up with the same FID.
747 */
748 local_fid = FC_LOCAL_PTP_FID_LO;
749 if (remote_wwpn < lport->wwpn) {
750 local_fid = FC_LOCAL_PTP_FID_HI;
751 if (!remote_fid || remote_fid == local_fid)
752 remote_fid = FC_LOCAL_PTP_FID_LO;
753 } else if (!remote_fid) {
754 remote_fid = FC_LOCAL_PTP_FID_HI;
755 }
756
757 fc_host_port_id(lport->host) = local_fid;
758
759 fp = fc_frame_alloc(lport, sizeof(*flp));
760 if (fp) {
761 sp = lport->tt.seq_start_next(fr_seq(rx_fp));
762 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
763 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
764 new_flp->fl_cmd = (u8) ELS_LS_ACC;
765
766 /*
767 * Send the response. If this fails, the originator should
768 * repeat the sequence.
769 */
770 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
771 ep = fc_seq_exch(sp);
772 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
773 FC_TYPE_ELS, f_ctl, 0);
774 lport->tt.seq_send(lport, sp, fp);
775
776 } else {
777 fc_lport_error(lport, fp);
778 }
779 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
780 get_unaligned_be64(&flp->fl_wwnn));
781
42e9a92f
RL
782out:
783 sp = fr_seq(rx_fp);
784 fc_frame_free(rx_fp);
785}
786
787/**
34f42a07 788 * fc_lport_recv_req() - The generic lport request handler
42e9a92f
RL
789 * @lport: The lport that received the request
790 * @sp: The sequence the request is on
791 * @fp: The frame the request is in
792 *
793 * This function will see if the lport handles the request or
794 * if an rport should handle the request.
795 *
796 * Locking Note: This function should not be called with the lport
797 * lock held becuase it will grab the lock.
798 */
799static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
800 struct fc_frame *fp)
801{
802 struct fc_frame_header *fh = fc_frame_header_get(fp);
803 void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
42e9a92f
RL
804
805 mutex_lock(&lport->lp_mutex);
806
807 /*
808 * Handle special ELS cases like FLOGI, LOGO, and
809 * RSCN here. These don't require a session.
810 * Even if we had a session, it might not be ready.
811 */
e9ba8b42
JE
812 if (!lport->link_up)
813 fc_frame_free(fp);
814 else if (fh->fh_type == FC_TYPE_ELS &&
815 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
42e9a92f
RL
816 /*
817 * Check opcode.
818 */
131203a1 819 recv = lport->tt.rport_recv_req;
42e9a92f
RL
820 switch (fc_frame_payload_op(fp)) {
821 case ELS_FLOGI:
822 recv = fc_lport_recv_flogi_req;
823 break;
824 case ELS_LOGO:
825 fh = fc_frame_header_get(fp);
826 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
827 recv = fc_lport_recv_logo_req;
828 break;
829 case ELS_RSCN:
830 recv = lport->tt.disc_recv_req;
831 break;
832 case ELS_ECHO:
833 recv = fc_lport_recv_echo_req;
834 break;
835 case ELS_RLIR:
836 recv = fc_lport_recv_rlir_req;
837 break;
838 case ELS_RNID:
839 recv = fc_lport_recv_rnid_req;
840 break;
42e9a92f
RL
841 }
842
131203a1 843 recv(sp, fp, lport);
42e9a92f 844 } else {
7414705e
RL
845 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
846 fr_eof(fp));
42e9a92f
RL
847 fc_frame_free(fp);
848 }
849 mutex_unlock(&lport->lp_mutex);
850
851 /*
852 * The common exch_done for all request may not be good
853 * if any request requires longer hold on exhange. XXX
854 */
855 lport->tt.exch_done(sp);
856}
857
858/**
34f42a07 859 * fc_lport_reset() - Reset an lport
42e9a92f
RL
860 * @lport: The lport which should be reset
861 *
862 * Locking Note: This functions should not be called with the
863 * lport lock held.
864 */
865int fc_lport_reset(struct fc_lport *lport)
866{
f7db2c15 867 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
868 mutex_lock(&lport->lp_mutex);
869 fc_lport_enter_reset(lport);
870 mutex_unlock(&lport->lp_mutex);
871 return 0;
872}
873EXPORT_SYMBOL(fc_lport_reset);
874
875/**
1190d925 876 * fc_lport_reset_locked() - Reset the local port
42e9a92f
RL
877 * @lport: Fibre Channel local port to be reset
878 *
879 * Locking Note: The lport lock is expected to be held before calling
880 * this routine.
881 */
1190d925 882static void fc_lport_reset_locked(struct fc_lport *lport)
42e9a92f 883{
42e9a92f
RL
884 if (lport->dns_rp)
885 lport->tt.rport_logoff(lport->dns_rp);
886
48f00902 887 lport->ptp_rp = NULL;
42e9a92f
RL
888
889 lport->tt.disc_stop(lport);
890
1f6ff364 891 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f
RL
892 fc_host_fabric_name(lport->host) = 0;
893 fc_host_port_id(lport->host) = 0;
1190d925 894}
42e9a92f 895
1190d925
JE
896/**
897 * fc_lport_enter_reset() - Reset the local port
898 * @lport: Fibre Channel local port to be reset
899 *
900 * Locking Note: The lport lock is expected to be held before calling
901 * this routine.
902 */
903static void fc_lport_enter_reset(struct fc_lport *lport)
904{
905 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
906 fc_lport_state(lport));
907
8faecddb
CL
908 if (lport->vport) {
909 if (lport->link_up)
910 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
911 else
912 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
913 }
1190d925 914 fc_lport_state_enter(lport, LPORT_ST_RESET);
8faecddb 915 fc_vports_linkchange(lport);
1190d925 916 fc_lport_reset_locked(lport);
bc0e17f6 917 if (lport->link_up)
42e9a92f
RL
918 fc_lport_enter_flogi(lport);
919}
920
1190d925
JE
921/**
922 * fc_lport_enter_disabled() - disable the local port
923 * @lport: Fibre Channel local port to be reset
924 *
925 * Locking Note: The lport lock is expected to be held before calling
926 * this routine.
927 */
928static void fc_lport_enter_disabled(struct fc_lport *lport)
929{
930 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
931 fc_lport_state(lport));
932
933 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
8faecddb 934 fc_vports_linkchange(lport);
1190d925
JE
935 fc_lport_reset_locked(lport);
936}
937
42e9a92f 938/**
34f42a07 939 * fc_lport_error() - Handler for any errors
42e9a92f
RL
940 * @lport: The fc_lport object
941 * @fp: The frame pointer
942 *
943 * If the error was caused by a resource allocation failure
944 * then wait for half a second and retry, otherwise retry
945 * after the e_d_tov time.
946 */
947static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
948{
949 unsigned long delay = 0;
7414705e
RL
950 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
951 PTR_ERR(fp), fc_lport_state(lport),
952 lport->retry_count);
42e9a92f
RL
953
954 if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
955 /*
956 * Memory allocation failure, or the exchange timed out.
957 * Retry after delay
958 */
959 if (lport->retry_count < lport->max_retry_count) {
960 lport->retry_count++;
961 if (!fp)
962 delay = msecs_to_jiffies(500);
963 else
964 delay = msecs_to_jiffies(lport->e_d_tov);
965
966 schedule_delayed_work(&lport->retry_work, delay);
967 } else {
968 switch (lport->state) {
b1d9fd55 969 case LPORT_ST_DISABLED:
42e9a92f
RL
970 case LPORT_ST_READY:
971 case LPORT_ST_RESET:
c9c7bd7a 972 case LPORT_ST_RNN_ID:
5baa17c3 973 case LPORT_ST_RSNN_NN:
c9866a54 974 case LPORT_ST_RSPN_ID:
42e9a92f
RL
975 case LPORT_ST_RFT_ID:
976 case LPORT_ST_SCR:
977 case LPORT_ST_DNS:
978 case LPORT_ST_FLOGI:
979 case LPORT_ST_LOGO:
980 fc_lport_enter_reset(lport);
981 break;
982 }
983 }
984 }
985}
986
987/**
34f42a07 988 * fc_lport_rft_id_resp() - Handle response to Register Fibre
28cc0e31
CL
989 * Channel Types by ID (RFT_ID) request
990 * @sp: current sequence in RFT_ID exchange
42e9a92f
RL
991 * @fp: response frame
992 * @lp_arg: Fibre Channel host port instance
993 *
994 * Locking Note: This function will be called without the lport lock
995 * held, but it will lock, call an _enter_* function or fc_lport_error
996 * and then unlock the lport.
997 */
998static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
999 void *lp_arg)
1000{
1001 struct fc_lport *lport = lp_arg;
1002 struct fc_frame_header *fh;
1003 struct fc_ct_hdr *ct;
1004
f657d299
JE
1005 FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp));
1006
42e9a92f
RL
1007 if (fp == ERR_PTR(-FC_EX_CLOSED))
1008 return;
1009
1010 mutex_lock(&lport->lp_mutex);
1011
42e9a92f 1012 if (lport->state != LPORT_ST_RFT_ID) {
7414705e
RL
1013 FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
1014 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1015 if (IS_ERR(fp))
1016 goto err;
42e9a92f
RL
1017 goto out;
1018 }
1019
76f6804e
AJ
1020 if (IS_ERR(fp)) {
1021 fc_lport_error(lport, fp);
1022 goto err;
1023 }
1024
42e9a92f
RL
1025 fh = fc_frame_header_get(fp);
1026 ct = fc_frame_payload_get(fp, sizeof(*ct));
1027
1028 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1029 ct->ct_fs_type == FC_FST_DIR &&
1030 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1031 ntohs(ct->ct_cmd) == FC_FS_ACC)
1032 fc_lport_enter_scr(lport);
1033 else
1034 fc_lport_error(lport, fp);
1035out:
1036 fc_frame_free(fp);
1037err:
1038 mutex_unlock(&lport->lp_mutex);
1039}
1040
c9866a54
CL
1041/**
1042 * fc_lport_rspn_id_resp() - Handle response to Register Symbolic Port Name
1043 * by ID (RSPN_ID) request
1044 * @sp: current sequence in RSPN_ID exchange
1045 * @fp: response frame
1046 * @lp_arg: Fibre Channel host port instance
1047 *
1048 * Locking Note: This function will be called without the lport lock
1049 * held, but it will lock, call an _enter_* function or fc_lport_error
1050 * and then unlock the lport.
1051 */
1052static void fc_lport_rspn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1053 void *lp_arg)
1054{
1055 struct fc_lport *lport = lp_arg;
1056 struct fc_frame_header *fh;
1057 struct fc_ct_hdr *ct;
1058
1059 FC_LPORT_DBG(lport, "Received a RSPN_ID %s\n", fc_els_resp_type(fp));
1060
1061 if (fp == ERR_PTR(-FC_EX_CLOSED))
1062 return;
1063
1064 mutex_lock(&lport->lp_mutex);
1065
1066 if (lport->state != LPORT_ST_RSPN_ID) {
1067 FC_LPORT_DBG(lport, "Received a RSPN_ID response, but in state "
1068 "%s\n", fc_lport_state(lport));
1069 if (IS_ERR(fp))
1070 goto err;
1071 goto out;
1072 }
1073
1074 if (IS_ERR(fp)) {
1075 fc_lport_error(lport, fp);
1076 goto err;
1077 }
1078
1079 fh = fc_frame_header_get(fp);
1080 ct = fc_frame_payload_get(fp, sizeof(*ct));
1081 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1082 ct->ct_fs_type == FC_FST_DIR &&
1083 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1084 ntohs(ct->ct_cmd) == FC_FS_ACC)
1085 fc_lport_enter_rspn_id(lport);
1086 else
1087 fc_lport_error(lport, fp);
1088
1089out:
1090 fc_frame_free(fp);
1091err:
1092 mutex_unlock(&lport->lp_mutex);
1093}
5baa17c3
CL
1094/**
1095 * fc_lport_rsnn_nn_resp() - Handle response to Register Symbolic Node Name
1096 * by Node Name (RSNN_NN) request
1097 * @sp: current sequence in RSNN_NN exchange
1098 * @fp: response frame
1099 * @lp_arg: Fibre Channel host port instance
1100 *
1101 * Locking Note: This function will be called without the lport lock
1102 * held, but it will lock, call an _enter_* function or fc_lport_error
1103 * and then unlock the lport.
1104 */
1105static void fc_lport_rsnn_nn_resp(struct fc_seq *sp, struct fc_frame *fp,
1106 void *lp_arg)
1107{
1108 struct fc_lport *lport = lp_arg;
1109 struct fc_frame_header *fh;
1110 struct fc_ct_hdr *ct;
1111
1112 FC_LPORT_DBG(lport, "Received a RSNN_NN %s\n", fc_els_resp_type(fp));
1113
1114 if (fp == ERR_PTR(-FC_EX_CLOSED))
1115 return;
1116
1117 mutex_lock(&lport->lp_mutex);
1118
1119 if (lport->state != LPORT_ST_RSNN_NN) {
1120 FC_LPORT_DBG(lport, "Received a RSNN_NN response, but in state "
1121 "%s\n", fc_lport_state(lport));
1122 if (IS_ERR(fp))
1123 goto err;
1124 goto out;
1125 }
1126
1127 if (IS_ERR(fp)) {
1128 fc_lport_error(lport, fp);
1129 goto err;
1130 }
1131
1132 fh = fc_frame_header_get(fp);
1133 ct = fc_frame_payload_get(fp, sizeof(*ct));
1134 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1135 ct->ct_fs_type == FC_FST_DIR &&
1136 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1137 ntohs(ct->ct_cmd) == FC_FS_ACC)
1138 fc_lport_enter_rsnn_nn(lport);
1139 else
1140 fc_lport_error(lport, fp);
1141
1142out:
1143 fc_frame_free(fp);
1144err:
1145 mutex_unlock(&lport->lp_mutex);
1146}
1147
c9c7bd7a
CL
1148/**
1149 * fc_lport_rnn_id_resp() - Handle response to Register Node
1150 * Name by ID (RNN_ID) request
1151 * @sp: current sequence in RNN_ID exchange
1152 * @fp: response frame
1153 * @lp_arg: Fibre Channel host port instance
1154 *
1155 * Locking Note: This function will be called without the lport lock
1156 * held, but it will lock, call an _enter_* function or fc_lport_error
1157 * and then unlock the lport.
1158 */
1159static void fc_lport_rnn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1160 void *lp_arg)
1161{
1162 struct fc_lport *lport = lp_arg;
1163 struct fc_frame_header *fh;
1164 struct fc_ct_hdr *ct;
1165
1166 FC_LPORT_DBG(lport, "Received a RNN_ID %s\n", fc_els_resp_type(fp));
1167
1168 if (fp == ERR_PTR(-FC_EX_CLOSED))
1169 return;
1170
1171 mutex_lock(&lport->lp_mutex);
1172
1173 if (lport->state != LPORT_ST_RNN_ID) {
1174 FC_LPORT_DBG(lport, "Received a RNN_ID response, but in state "
1175 "%s\n", fc_lport_state(lport));
1176 if (IS_ERR(fp))
1177 goto err;
1178 goto out;
1179 }
1180
1181 if (IS_ERR(fp)) {
1182 fc_lport_error(lport, fp);
1183 goto err;
1184 }
1185
1186 fh = fc_frame_header_get(fp);
1187 ct = fc_frame_payload_get(fp, sizeof(*ct));
1188 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1189 ct->ct_fs_type == FC_FST_DIR &&
1190 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1191 ntohs(ct->ct_cmd) == FC_FS_ACC)
1192 fc_lport_enter_rft_id(lport);
1193 else
1194 fc_lport_error(lport, fp);
1195
1196out:
1197 fc_frame_free(fp);
1198err:
1199 mutex_unlock(&lport->lp_mutex);
1200}
1201
42e9a92f 1202/**
34f42a07 1203 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
42e9a92f
RL
1204 * @sp: current sequence in SCR exchange
1205 * @fp: response frame
1206 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1207 *
1208 * Locking Note: This function will be called without the lport lock
1209 * held, but it will lock, call an _enter_* function or fc_lport_error
1210 * and then unlock the lport.
1211 */
1212static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1213 void *lp_arg)
1214{
1215 struct fc_lport *lport = lp_arg;
1216 u8 op;
1217
f657d299
JE
1218 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1219
42e9a92f
RL
1220 if (fp == ERR_PTR(-FC_EX_CLOSED))
1221 return;
1222
1223 mutex_lock(&lport->lp_mutex);
1224
42e9a92f 1225 if (lport->state != LPORT_ST_SCR) {
7414705e
RL
1226 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1227 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1228 if (IS_ERR(fp))
1229 goto err;
42e9a92f
RL
1230 goto out;
1231 }
1232
76f6804e
AJ
1233 if (IS_ERR(fp)) {
1234 fc_lport_error(lport, fp);
1235 goto err;
1236 }
1237
42e9a92f
RL
1238 op = fc_frame_payload_op(fp);
1239 if (op == ELS_LS_ACC)
1240 fc_lport_enter_ready(lport);
1241 else
1242 fc_lport_error(lport, fp);
1243
1244out:
1245 fc_frame_free(fp);
1246err:
1247 mutex_unlock(&lport->lp_mutex);
1248}
1249
1250/**
34f42a07 1251 * fc_lport_enter_scr() - Send a State Change Register (SCR) request
42e9a92f
RL
1252 * @lport: Fibre Channel local port to register for state changes
1253 *
1254 * Locking Note: The lport lock is expected to be held before calling
1255 * this routine.
1256 */
1257static void fc_lport_enter_scr(struct fc_lport *lport)
1258{
1259 struct fc_frame *fp;
1260
7414705e
RL
1261 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1262 fc_lport_state(lport));
42e9a92f
RL
1263
1264 fc_lport_state_enter(lport, LPORT_ST_SCR);
1265
1266 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1267 if (!fp) {
1268 fc_lport_error(lport, fp);
1269 return;
1270 }
1271
a46f327a 1272 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
42e9a92f 1273 fc_lport_scr_resp, lport, lport->e_d_tov))
8f550f93 1274 fc_lport_error(lport, NULL);
42e9a92f
RL
1275}
1276
1277/**
34f42a07 1278 * fc_lport_enter_rft_id() - Register FC4-types with the name server
42e9a92f
RL
1279 * @lport: Fibre Channel local port to register
1280 *
1281 * Locking Note: The lport lock is expected to be held before calling
1282 * this routine.
1283 */
1284static void fc_lport_enter_rft_id(struct fc_lport *lport)
1285{
1286 struct fc_frame *fp;
1287 struct fc_ns_fts *lps;
1288 int i;
1289
7414705e
RL
1290 FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
1291 fc_lport_state(lport));
42e9a92f
RL
1292
1293 fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
1294
1295 lps = &lport->fcts;
1296 i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
1297 while (--i >= 0)
1298 if (ntohl(lps->ff_type_map[i]) != 0)
1299 break;
1300 if (i < 0) {
1301 /* nothing to register, move on to SCR */
1302 fc_lport_enter_scr(lport);
1303 return;
1304 }
1305
1306 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1307 sizeof(struct fc_ns_rft));
1308 if (!fp) {
1309 fc_lport_error(lport, fp);
1310 return;
1311 }
1312
a46f327a 1313 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
42e9a92f
RL
1314 fc_lport_rft_id_resp,
1315 lport, lport->e_d_tov))
1316 fc_lport_error(lport, fp);
1317}
1318
c9866a54
CL
1319/**
1320 * fc_rport_enter_rspn_id() - Register symbolic port name with the name server
1321 * @lport: Fibre Channel local port to register
1322 *
1323 * Locking Note: The lport lock is expected to be held before calling
1324 * this routine.
1325 */
1326static void fc_lport_enter_rspn_id(struct fc_lport *lport)
1327{
1328 struct fc_frame *fp;
1329 size_t len;
1330
1331 FC_LPORT_DBG(lport, "Entered RSPN_ID state from %s state\n",
1332 fc_lport_state(lport));
1333
1334 fc_lport_state_enter(lport, LPORT_ST_RSPN_ID);
1335
1336 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1337 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1338 sizeof(struct fc_ns_rspn) + len);
1339 if (!fp) {
1340 fc_lport_error(lport, fp);
1341 return;
1342 }
1343
1344 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
1345 fc_lport_rspn_id_resp,
1346 lport, lport->e_d_tov))
1347 fc_lport_error(lport, fp);
1348}
1349
5baa17c3
CL
1350/**
1351 * fc_rport_enter_rsnn_nn() - Register symbolic node name with the name server
1352 * @lport: Fibre Channel local port to register
1353 *
1354 * Locking Note: The lport lock is expected to be held before calling
1355 * this routine.
1356 */
1357static void fc_lport_enter_rsnn_nn(struct fc_lport *lport)
1358{
1359 struct fc_frame *fp;
1360 size_t len;
1361
1362 FC_LPORT_DBG(lport, "Entered RSNN_NN state from %s state\n",
1363 fc_lport_state(lport));
1364
1365 fc_lport_state_enter(lport, LPORT_ST_RSNN_NN);
1366
1367 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1368 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1369 sizeof(struct fc_ns_rsnn) + len);
1370 if (!fp) {
1371 fc_lport_error(lport, fp);
1372 return;
1373 }
1374
1375 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSNN_NN,
1376 fc_lport_rsnn_nn_resp,
1377 lport, lport->e_d_tov))
1378 fc_lport_error(lport, fp);
1379}
1380
c9c7bd7a
CL
1381/**
1382 * fc_rport_enter_rnn_id() - Register node name with the name server
1383 * @lport: Fibre Channel local port to register
1384 *
1385 * Locking Note: The lport lock is expected to be held before calling
1386 * this routine.
1387 */
1388static void fc_lport_enter_rnn_id(struct fc_lport *lport)
1389{
1390 struct fc_frame *fp;
1391
1392 FC_LPORT_DBG(lport, "Entered RNN_ID state from %s state\n",
1393 fc_lport_state(lport));
1394
1395 fc_lport_state_enter(lport, LPORT_ST_RNN_ID);
1396
1397 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
1398 sizeof(struct fc_ns_rn_id));
1399 if (!fp) {
1400 fc_lport_error(lport, fp);
1401 return;
1402 }
1403
1404 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RNN_ID,
1405 fc_lport_rnn_id_resp,
1406 lport, lport->e_d_tov))
1407 fc_lport_error(lport, fp);
1408}
1409
42e9a92f
RL
1410static struct fc_rport_operations fc_lport_rport_ops = {
1411 .event_callback = fc_lport_rport_callback,
1412};
1413
1414/**
34f42a07 1415 * fc_rport_enter_dns() - Create a rport to the name server
42e9a92f
RL
1416 * @lport: Fibre Channel local port requesting a rport for the name server
1417 *
1418 * Locking Note: The lport lock is expected to be held before calling
1419 * this routine.
1420 */
1421static void fc_lport_enter_dns(struct fc_lport *lport)
1422{
ab28f1fd 1423 struct fc_rport_priv *rdata;
42e9a92f 1424
7414705e
RL
1425 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1426 fc_lport_state(lport));
42e9a92f
RL
1427
1428 fc_lport_state_enter(lport, LPORT_ST_DNS);
1429
48f00902 1430 mutex_lock(&lport->disc.disc_mutex);
9737e6a7 1431 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
48f00902 1432 mutex_unlock(&lport->disc.disc_mutex);
9fb9d328 1433 if (!rdata)
42e9a92f
RL
1434 goto err;
1435
42e9a92f 1436 rdata->ops = &fc_lport_rport_ops;
9fb9d328 1437 lport->tt.rport_login(rdata);
42e9a92f
RL
1438 return;
1439
1440err:
1441 fc_lport_error(lport, NULL);
1442}
1443
1444/**
34f42a07 1445 * fc_lport_timeout() - Handler for the retry_work timer.
42e9a92f
RL
1446 * @work: The work struct of the fc_lport
1447 */
1448static void fc_lport_timeout(struct work_struct *work)
1449{
1450 struct fc_lport *lport =
1451 container_of(work, struct fc_lport,
1452 retry_work.work);
1453
1454 mutex_lock(&lport->lp_mutex);
1455
1456 switch (lport->state) {
b1d9fd55 1457 case LPORT_ST_DISABLED:
22655ac2
JE
1458 WARN_ON(1);
1459 break;
42e9a92f 1460 case LPORT_ST_READY:
42e9a92f
RL
1461 WARN_ON(1);
1462 break;
22655ac2
JE
1463 case LPORT_ST_RESET:
1464 break;
42e9a92f
RL
1465 case LPORT_ST_FLOGI:
1466 fc_lport_enter_flogi(lport);
1467 break;
1468 case LPORT_ST_DNS:
1469 fc_lport_enter_dns(lport);
1470 break;
c9c7bd7a
CL
1471 case LPORT_ST_RNN_ID:
1472 fc_lport_enter_rnn_id(lport);
1473 break;
5baa17c3
CL
1474 case LPORT_ST_RSNN_NN:
1475 fc_lport_enter_rsnn_nn(lport);
1476 break;
c9866a54
CL
1477 case LPORT_ST_RSPN_ID:
1478 fc_lport_enter_rspn_id(lport);
1479 break;
42e9a92f
RL
1480 case LPORT_ST_RFT_ID:
1481 fc_lport_enter_rft_id(lport);
1482 break;
1483 case LPORT_ST_SCR:
1484 fc_lport_enter_scr(lport);
1485 break;
1486 case LPORT_ST_LOGO:
1487 fc_lport_enter_logo(lport);
1488 break;
1489 }
1490
1491 mutex_unlock(&lport->lp_mutex);
1492}
1493
1494/**
34f42a07 1495 * fc_lport_logo_resp() - Handle response to LOGO request
42e9a92f
RL
1496 * @sp: current sequence in LOGO exchange
1497 * @fp: response frame
1498 * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
1499 *
1500 * Locking Note: This function will be called without the lport lock
1501 * held, but it will lock, call an _enter_* function or fc_lport_error
1502 * and then unlock the lport.
1503 */
11b56188 1504void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
42e9a92f
RL
1505 void *lp_arg)
1506{
1507 struct fc_lport *lport = lp_arg;
1508 u8 op;
1509
f657d299
JE
1510 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1511
42e9a92f
RL
1512 if (fp == ERR_PTR(-FC_EX_CLOSED))
1513 return;
1514
1515 mutex_lock(&lport->lp_mutex);
1516
42e9a92f 1517 if (lport->state != LPORT_ST_LOGO) {
7414705e
RL
1518 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1519 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1520 if (IS_ERR(fp))
1521 goto err;
42e9a92f
RL
1522 goto out;
1523 }
1524
76f6804e
AJ
1525 if (IS_ERR(fp)) {
1526 fc_lport_error(lport, fp);
1527 goto err;
1528 }
1529
42e9a92f
RL
1530 op = fc_frame_payload_op(fp);
1531 if (op == ELS_LS_ACC)
1190d925 1532 fc_lport_enter_disabled(lport);
42e9a92f
RL
1533 else
1534 fc_lport_error(lport, fp);
1535
1536out:
1537 fc_frame_free(fp);
1538err:
1539 mutex_unlock(&lport->lp_mutex);
1540}
11b56188 1541EXPORT_SYMBOL(fc_lport_logo_resp);
42e9a92f
RL
1542
1543/**
34f42a07 1544 * fc_rport_enter_logo() - Logout of the fabric
42e9a92f
RL
1545 * @lport: Fibre Channel local port to be logged out
1546 *
1547 * Locking Note: The lport lock is expected to be held before calling
1548 * this routine.
1549 */
1550static void fc_lport_enter_logo(struct fc_lport *lport)
1551{
1552 struct fc_frame *fp;
1553 struct fc_els_logo *logo;
1554
7414705e
RL
1555 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1556 fc_lport_state(lport));
42e9a92f
RL
1557
1558 fc_lport_state_enter(lport, LPORT_ST_LOGO);
8faecddb 1559 fc_vports_linkchange(lport);
42e9a92f 1560
42e9a92f
RL
1561 fp = fc_frame_alloc(lport, sizeof(*logo));
1562 if (!fp) {
1563 fc_lport_error(lport, fp);
1564 return;
1565 }
1566
a46f327a
JE
1567 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1568 fc_lport_logo_resp, lport, lport->e_d_tov))
8f550f93 1569 fc_lport_error(lport, NULL);
42e9a92f
RL
1570}
1571
1572/**
34f42a07 1573 * fc_lport_flogi_resp() - Handle response to FLOGI request
42e9a92f
RL
1574 * @sp: current sequence in FLOGI exchange
1575 * @fp: response frame
1576 * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
1577 *
1578 * Locking Note: This function will be called without the lport lock
1579 * held, but it will lock, call an _enter_* function or fc_lport_error
1580 * and then unlock the lport.
1581 */
11b56188 1582void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
42e9a92f
RL
1583 void *lp_arg)
1584{
1585 struct fc_lport *lport = lp_arg;
1586 struct fc_frame_header *fh;
1587 struct fc_els_flogi *flp;
1588 u32 did;
1589 u16 csp_flags;
1590 unsigned int r_a_tov;
1591 unsigned int e_d_tov;
1592 u16 mfs;
1593
f657d299
JE
1594 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1595
42e9a92f
RL
1596 if (fp == ERR_PTR(-FC_EX_CLOSED))
1597 return;
1598
1599 mutex_lock(&lport->lp_mutex);
1600
42e9a92f 1601 if (lport->state != LPORT_ST_FLOGI) {
7414705e
RL
1602 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1603 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1604 if (IS_ERR(fp))
1605 goto err;
42e9a92f
RL
1606 goto out;
1607 }
1608
76f6804e
AJ
1609 if (IS_ERR(fp)) {
1610 fc_lport_error(lport, fp);
1611 goto err;
1612 }
1613
42e9a92f
RL
1614 fh = fc_frame_header_get(fp);
1615 did = ntoh24(fh->fh_d_id);
1616 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
1617
7414705e
RL
1618 printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
1619 did);
42e9a92f
RL
1620 fc_host_port_id(lport->host) = did;
1621
1622 flp = fc_frame_payload_get(fp, sizeof(*flp));
1623 if (flp) {
1624 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1625 FC_SP_BB_DATA_MASK;
1626 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1627 mfs < lport->mfs)
1628 lport->mfs = mfs;
1629 csp_flags = ntohs(flp->fl_csp.sp_features);
1630 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1631 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1632 if (csp_flags & FC_SP_FT_EDTR)
1633 e_d_tov /= 1000000;
db36c06c
CL
1634
1635 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1636
42e9a92f
RL
1637 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1638 if (e_d_tov > lport->e_d_tov)
1639 lport->e_d_tov = e_d_tov;
1640 lport->r_a_tov = 2 * e_d_tov;
7414705e
RL
1641 printk(KERN_INFO "libfc: Port (%6x) entered "
1642 "point to point mode\n", did);
42e9a92f
RL
1643 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
1644 get_unaligned_be64(
1645 &flp->fl_wwpn),
1646 get_unaligned_be64(
1647 &flp->fl_wwnn));
1648 } else {
1649 lport->e_d_tov = e_d_tov;
1650 lport->r_a_tov = r_a_tov;
1651 fc_host_fabric_name(lport->host) =
1652 get_unaligned_be64(&flp->fl_wwnn);
1653 fc_lport_enter_dns(lport);
1654 }
1655 }
42e9a92f 1656 } else {
7414705e 1657 FC_LPORT_DBG(lport, "Bad FLOGI response\n");
42e9a92f
RL
1658 }
1659
1660out:
1661 fc_frame_free(fp);
1662err:
1663 mutex_unlock(&lport->lp_mutex);
1664}
11b56188 1665EXPORT_SYMBOL(fc_lport_flogi_resp);
42e9a92f
RL
1666
1667/**
34f42a07 1668 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
42e9a92f
RL
1669 * @lport: Fibre Channel local port to be logged in to the fabric
1670 *
1671 * Locking Note: The lport lock is expected to be held before calling
1672 * this routine.
1673 */
1674void fc_lport_enter_flogi(struct fc_lport *lport)
1675{
1676 struct fc_frame *fp;
1677
7414705e
RL
1678 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1679 fc_lport_state(lport));
42e9a92f
RL
1680
1681 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1682
1683 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1684 if (!fp)
1685 return fc_lport_error(lport, fp);
1686
db36c06c
CL
1687 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1688 lport->vport ? ELS_FDISC : ELS_FLOGI,
42e9a92f 1689 fc_lport_flogi_resp, lport, lport->e_d_tov))
8f550f93 1690 fc_lport_error(lport, NULL);
42e9a92f
RL
1691}
1692
1693/* Configure a fc_lport */
1694int fc_lport_config(struct fc_lport *lport)
1695{
1696 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1697 mutex_init(&lport->lp_mutex);
1698
b1d9fd55 1699 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
42e9a92f
RL
1700
1701 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1702 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1703
1704 return 0;
1705}
1706EXPORT_SYMBOL(fc_lport_config);
1707
1708int fc_lport_init(struct fc_lport *lport)
1709{
1710 if (!lport->tt.lport_recv)
1711 lport->tt.lport_recv = fc_lport_recv_req;
1712
1713 if (!lport->tt.lport_reset)
1714 lport->tt.lport_reset = fc_lport_reset;
1715
1716 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1717 fc_host_node_name(lport->host) = lport->wwnn;
1718 fc_host_port_name(lport->host) = lport->wwpn;
1719 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1720 memset(fc_host_supported_fc4s(lport->host), 0,
1721 sizeof(fc_host_supported_fc4s(lport->host)));
1722 fc_host_supported_fc4s(lport->host)[2] = 1;
1723 fc_host_supported_fc4s(lport->host)[7] = 1;
1724
1725 /* This value is also unchanging */
1726 memset(fc_host_active_fc4s(lport->host), 0,
1727 sizeof(fc_host_active_fc4s(lport->host)));
1728 fc_host_active_fc4s(lport->host)[2] = 1;
1729 fc_host_active_fc4s(lport->host)[7] = 1;
1730 fc_host_maxframe_size(lport->host) = lport->mfs;
1731 fc_host_supported_speeds(lport->host) = 0;
1732 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1733 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1734 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1735 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1736
1737 return 0;
1738}
1739EXPORT_SYMBOL(fc_lport_init);