]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/libfc/fc_lport.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / libfc / fc_lport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
732bee7a 35 * HIERARCHY
42e9a92f 36 *
732bee7a 37 * The following hierarchy defines the locking rules. A greater lock
42e9a92f 38 * may be held before acquiring a lesser lock, but a lesser lock should never
732bee7a 39 * be held while attempting to acquire a greater lock. Here is the hierarchy-
42e9a92f
RL
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
25985edc 55 * callback handler because no other rport work in this queue can be executed
42e9a92f
RL
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
af901ca1 59 * notification. Currently, successful discovery causes the lport to take no
42e9a92f
RL
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64/*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90#include <linux/timer.h>
77a2b73a 91#include <linux/delay.h>
acf3368f 92#include <linux/module.h>
5a0e3ad6 93#include <linux/slab.h>
42e9a92f
RL
94#include <asm/unaligned.h>
95
96#include <scsi/fc/fc_gs.h>
97
98#include <scsi/libfc.h>
99#include <scsi/fc_encode.h>
a51ab396 100#include <linux/scatterlist.h>
42e9a92f 101
8866a5d9
RL
102#include "fc_libfc.h"
103
42e9a92f
RL
104/* Fabric IDs to use for point-to-point mode, chosen on whims. */
105#define FC_LOCAL_PTP_FID_LO 0x010101
106#define FC_LOCAL_PTP_FID_HI 0x010102
107
108#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
109
42e9a92f
RL
110static void fc_lport_error(struct fc_lport *, struct fc_frame *);
111
112static void fc_lport_enter_reset(struct fc_lport *);
113static void fc_lport_enter_flogi(struct fc_lport *);
114static void fc_lport_enter_dns(struct fc_lport *);
c914f7d1 115static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
42e9a92f
RL
116static void fc_lport_enter_scr(struct fc_lport *);
117static void fc_lport_enter_ready(struct fc_lport *);
118static void fc_lport_enter_logo(struct fc_lport *);
d78c317f
NP
119static void fc_lport_enter_fdmi(struct fc_lport *lport);
120static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
42e9a92f
RL
121
122static const char *fc_lport_state_names[] = {
b1d9fd55 123 [LPORT_ST_DISABLED] = "disabled",
42e9a92f
RL
124 [LPORT_ST_FLOGI] = "FLOGI",
125 [LPORT_ST_DNS] = "dNS",
c9c7bd7a 126 [LPORT_ST_RNN_ID] = "RNN_ID",
5baa17c3 127 [LPORT_ST_RSNN_NN] = "RSNN_NN",
c9866a54 128 [LPORT_ST_RSPN_ID] = "RSPN_ID",
42e9a92f 129 [LPORT_ST_RFT_ID] = "RFT_ID",
ab593b18 130 [LPORT_ST_RFF_ID] = "RFF_ID",
d78c317f
NP
131 [LPORT_ST_FDMI] = "FDMI",
132 [LPORT_ST_RHBA] = "RHBA",
133 [LPORT_ST_RPA] = "RPA",
134 [LPORT_ST_DHBA] = "DHBA",
135 [LPORT_ST_DPRT] = "DPRT",
42e9a92f
RL
136 [LPORT_ST_SCR] = "SCR",
137 [LPORT_ST_READY] = "Ready",
138 [LPORT_ST_LOGO] = "LOGO",
139 [LPORT_ST_RESET] = "reset",
140};
141
a51ab396
SM
142/**
143 * struct fc_bsg_info - FC Passthrough managemet structure
144 * @job: The passthrough job
145 * @lport: The local port to pass through a command
146 * @rsp_code: The expected response code
3a3b42bf 147 * @sg: job->reply_payload.sg_list
a51ab396
SM
148 * @nents: job->reply_payload.sg_cnt
149 * @offset: The offset into the response data
150 */
151struct fc_bsg_info {
75cc8cfc 152 struct bsg_job *job;
a51ab396
SM
153 struct fc_lport *lport;
154 u16 rsp_code;
155 struct scatterlist *sg;
156 u32 nents;
157 size_t offset;
158};
159
3a3b42bf
RL
160/**
161 * fc_frame_drop() - Dummy frame handler
162 * @lport: The local port the frame was received on
163 * @fp: The received frame
164 */
42e9a92f
RL
165static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
166{
167 fc_frame_free(fp);
168 return 0;
169}
170
171/**
34f42a07 172 * fc_lport_rport_callback() - Event handler for rport events
42e9a92f 173 * @lport: The lport which is receiving the event
9fb9d328 174 * @rdata: private remote port data
25985edc 175 * @event: The event that occurred
42e9a92f
RL
176 *
177 * Locking Note: The rport lock should not be held when calling
178 * this function.
179 */
180static void fc_lport_rport_callback(struct fc_lport *lport,
9fb9d328 181 struct fc_rport_priv *rdata,
42e9a92f
RL
182 enum fc_rport_event event)
183{
ce8b5df0 184 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
f211fa51 185 rdata->ids.port_id);
42e9a92f 186
b5cbf083 187 mutex_lock(&lport->lp_mutex);
42e9a92f 188 switch (event) {
4c0f62b5 189 case RPORT_EV_READY:
b5cbf083 190 if (lport->state == LPORT_ST_DNS) {
3a3b42bf 191 lport->dns_rdata = rdata;
c914f7d1 192 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
d78c317f
NP
193 } else if (lport->state == LPORT_ST_FDMI) {
194 lport->ms_rdata = rdata;
195 fc_lport_enter_ms(lport, LPORT_ST_DHBA);
b5cbf083
JE
196 } else {
197 FC_LPORT_DBG(lport, "Received an READY event "
ce8b5df0 198 "on port (%6.6x) for the directory "
b5cbf083 199 "server, but the lport is not "
d78c317f 200 "in the DNS or FDMI state, it's in the "
b5cbf083
JE
201 "%d state", rdata->ids.port_id,
202 lport->state);
c96c792a 203 fc_rport_logoff(rdata);
b5cbf083 204 }
42e9a92f
RL
205 break;
206 case RPORT_EV_LOGO:
207 case RPORT_EV_FAILED:
208 case RPORT_EV_STOP:
d78c317f
NP
209 if (rdata->ids.port_id == FC_FID_DIR_SERV)
210 lport->dns_rdata = NULL;
211 else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
212 lport->ms_rdata = NULL;
42e9a92f
RL
213 break;
214 case RPORT_EV_NONE:
215 break;
216 }
b5cbf083 217 mutex_unlock(&lport->lp_mutex);
42e9a92f
RL
218}
219
220/**
34f42a07 221 * fc_lport_state() - Return a string which represents the lport's state
42e9a92f
RL
222 * @lport: The lport whose state is to converted to a string
223 */
224static const char *fc_lport_state(struct fc_lport *lport)
225{
226 const char *cp;
227
228 cp = fc_lport_state_names[lport->state];
229 if (!cp)
230 cp = "unknown";
231 return cp;
232}
233
234/**
34f42a07 235 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
3a3b42bf
RL
236 * @lport: The lport to attach the ptp rport to
237 * @remote_fid: The FID of the ptp rport
42e9a92f
RL
238 * @remote_wwpn: The WWPN of the ptp rport
239 * @remote_wwnn: The WWNN of the ptp rport
a407c593
HR
240 *
241 * Locking Note: The lport lock is expected to be held before calling
242 * this routine.
42e9a92f
RL
243 */
244static void fc_lport_ptp_setup(struct fc_lport *lport,
245 u32 remote_fid, u64 remote_wwpn,
246 u64 remote_wwnn)
247{
2f2ac4a0 248 if (lport->ptp_rdata) {
c96c792a 249 fc_rport_logoff(lport->ptp_rdata);
944ef968 250 kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
2f2ac4a0 251 }
a407c593 252 mutex_lock(&lport->disc.disc_mutex);
2580064b 253 lport->ptp_rdata = fc_rport_create(lport, remote_fid);
2f2ac4a0 254 kref_get(&lport->ptp_rdata->kref);
3a3b42bf
RL
255 lport->ptp_rdata->ids.port_name = remote_wwpn;
256 lport->ptp_rdata->ids.node_name = remote_wwnn;
48f00902 257 mutex_unlock(&lport->disc.disc_mutex);
42e9a92f 258
05d7d3b0 259 fc_rport_login(lport->ptp_rdata);
42e9a92f
RL
260
261 fc_lport_enter_ready(lport);
262}
263
3a3b42bf
RL
264/**
265 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
266 * @shost: The SCSI host whose port state is to be determined
267 */
42e9a92f
RL
268void fc_get_host_port_state(struct Scsi_Host *shost)
269{
3a3b42bf 270 struct fc_lport *lport = shost_priv(shost);
42e9a92f 271
3a3b42bf
RL
272 mutex_lock(&lport->lp_mutex);
273 if (!lport->link_up)
8faecddb 274 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
42e9a92f 275 else
3a3b42bf 276 switch (lport->state) {
8faecddb
CL
277 case LPORT_ST_READY:
278 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
279 break;
280 default:
281 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
282 }
3a3b42bf 283 mutex_unlock(&lport->lp_mutex);
42e9a92f
RL
284}
285EXPORT_SYMBOL(fc_get_host_port_state);
286
3a3b42bf
RL
287/**
288 * fc_get_host_speed() - Return the speed of the given Scsi_Host
289 * @shost: The SCSI host whose port speed is to be determined
290 */
42e9a92f
RL
291void fc_get_host_speed(struct Scsi_Host *shost)
292{
293 struct fc_lport *lport = shost_priv(shost);
294
295 fc_host_speed(shost) = lport->link_speed;
296}
297EXPORT_SYMBOL(fc_get_host_speed);
298
3a3b42bf
RL
299/**
300 * fc_get_host_stats() - Return the Scsi_Host's statistics
301 * @shost: The SCSI host whose statistics are to be returned
302 */
42e9a92f
RL
303struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
304{
1bd49b48 305 struct fc_host_statistics *fc_stats;
3a3b42bf 306 struct fc_lport *lport = shost_priv(shost);
582b45bc 307 unsigned int cpu;
5f0e385f
JE
308 u64 fcp_in_bytes = 0;
309 u64 fcp_out_bytes = 0;
42e9a92f 310
1bd49b48
VD
311 fc_stats = &lport->host_stats;
312 memset(fc_stats, 0, sizeof(struct fc_host_statistics));
42e9a92f 313
208da78e 314 fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
42e9a92f 315
582b45bc 316 for_each_possible_cpu(cpu) {
1bd49b48
VD
317 struct fc_stats *stats;
318
319 stats = per_cpu_ptr(lport->stats, cpu);
320
321 fc_stats->tx_frames += stats->TxFrames;
322 fc_stats->tx_words += stats->TxWords;
323 fc_stats->rx_frames += stats->RxFrames;
324 fc_stats->rx_words += stats->RxWords;
325 fc_stats->error_frames += stats->ErrorFrames;
326 fc_stats->invalid_crc_count += stats->InvalidCRCCount;
327 fc_stats->fcp_input_requests += stats->InputRequests;
328 fc_stats->fcp_output_requests += stats->OutputRequests;
329 fc_stats->fcp_control_requests += stats->ControlRequests;
5f0e385f
JE
330 fcp_in_bytes += stats->InputBytes;
331 fcp_out_bytes += stats->OutputBytes;
4e5fae7a
VD
332 fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
333 fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
334 fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
1bd49b48 335 fc_stats->link_failure_count += stats->LinkFailureCount;
42e9a92f 336 }
1bd49b48
VD
337 fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
338 fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
339 fc_stats->lip_count = -1;
340 fc_stats->nos_count = -1;
341 fc_stats->loss_of_sync_count = -1;
342 fc_stats->loss_of_signal_count = -1;
343 fc_stats->prim_seq_protocol_err_count = -1;
344 fc_stats->dumped_frames = -1;
4e5fae7a
VD
345
346 /* update exches stats */
347 fc_exch_update_stats(lport);
348
1bd49b48 349 return fc_stats;
42e9a92f
RL
350}
351EXPORT_SYMBOL(fc_get_host_stats);
352
3a3b42bf
RL
353/**
354 * fc_lport_flogi_fill() - Fill in FLOGI command for request
355 * @lport: The local port the FLOGI is for
356 * @flogi: The FLOGI command
357 * @op: The opcode
42e9a92f 358 */
3a3b42bf
RL
359static void fc_lport_flogi_fill(struct fc_lport *lport,
360 struct fc_els_flogi *flogi,
361 unsigned int op)
42e9a92f
RL
362{
363 struct fc_els_csp *sp;
364 struct fc_els_cssp *cp;
365
366 memset(flogi, 0, sizeof(*flogi));
367 flogi->fl_cmd = (u8) op;
368 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
369 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
370 sp = &flogi->fl_csp;
371 sp->sp_hi_ver = 0x20;
372 sp->sp_lo_ver = 0x20;
373 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
374 sp->sp_bb_data = htons((u16) lport->mfs);
375 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
376 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
377 if (op != ELS_FLOGI) {
378 sp->sp_features = htons(FC_SP_FT_CIRO);
379 sp->sp_tot_seq = htons(255); /* seq. we accept */
380 sp->sp_rel_off = htons(0x1f);
381 sp->sp_e_d_tov = htonl(lport->e_d_tov);
382
383 cp->cp_rdfs = htons((u16) lport->mfs);
384 cp->cp_con_seq = htons(255);
385 cp->cp_open_seq = 1;
386 }
387}
388
3a3b42bf
RL
389/**
390 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
391 * @lport: The local port to add a new FC-4 type to
392 * @type: The new FC-4 type
42e9a92f
RL
393 */
394static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
395{
396 __be32 *mp;
397
398 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
399 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
400}
401
402/**
34f42a07 403 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
25985edc 404 * @lport: Fibre Channel local port receiving the RLIR
92261156 405 * @fp: The RLIR request frame
42e9a92f 406 *
1b69bc06 407 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
408 * this function.
409 */
92261156 410static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
42e9a92f 411{
7414705e
RL
412 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
413 fc_lport_state(lport));
42e9a92f 414
7ab24dd1 415 fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
42e9a92f
RL
416 fc_frame_free(fp);
417}
418
419/**
34f42a07 420 * fc_lport_recv_echo_req() - Handle received ECHO request
25985edc 421 * @lport: The local port receiving the ECHO
92261156 422 * @fp: ECHO request frame
42e9a92f 423 *
1b69bc06 424 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
425 * this function.
426 */
92261156
JE
427static void fc_lport_recv_echo_req(struct fc_lport *lport,
428 struct fc_frame *in_fp)
42e9a92f
RL
429{
430 struct fc_frame *fp;
42e9a92f
RL
431 unsigned int len;
432 void *pp;
433 void *dp;
42e9a92f 434
1b69bc06 435 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
7414705e 436 fc_lport_state(lport));
42e9a92f
RL
437
438 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
439 pp = fc_frame_payload_get(in_fp, len);
440
441 if (len < sizeof(__be32))
442 len = sizeof(__be32);
443
444 fp = fc_frame_alloc(lport, len);
445 if (fp) {
446 dp = fc_frame_payload_get(fp, len);
447 memcpy(dp, pp, len);
1b69bc06 448 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
24f089e2
JE
449 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
450 lport->tt.frame_send(lport, fp);
42e9a92f
RL
451 }
452 fc_frame_free(in_fp);
453}
454
455/**
1b69bc06 456 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
25985edc 457 * @lport: The local port receiving the RNID
92261156 458 * @fp: The RNID request frame
42e9a92f 459 *
1b69bc06 460 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
461 * this function.
462 */
92261156
JE
463static void fc_lport_recv_rnid_req(struct fc_lport *lport,
464 struct fc_frame *in_fp)
42e9a92f
RL
465{
466 struct fc_frame *fp;
42e9a92f
RL
467 struct fc_els_rnid *req;
468 struct {
469 struct fc_els_rnid_resp rnid;
470 struct fc_els_rnid_cid cid;
471 struct fc_els_rnid_gen gen;
472 } *rp;
473 struct fc_seq_els_data rjt_data;
474 u8 fmt;
475 size_t len;
42e9a92f 476
7414705e
RL
477 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
478 fc_lport_state(lport));
42e9a92f
RL
479
480 req = fc_frame_payload_get(in_fp, sizeof(*req));
481 if (!req) {
42e9a92f
RL
482 rjt_data.reason = ELS_RJT_LOGIC;
483 rjt_data.explan = ELS_EXPL_NONE;
7ab24dd1 484 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
42e9a92f
RL
485 } else {
486 fmt = req->rnid_fmt;
487 len = sizeof(*rp);
488 if (fmt != ELS_RNIDF_GEN ||
489 ntohl(lport->rnid_gen.rnid_atype) == 0) {
490 fmt = ELS_RNIDF_NONE; /* nothing to provide */
491 len -= sizeof(rp->gen);
492 }
493 fp = fc_frame_alloc(lport, len);
494 if (fp) {
495 rp = fc_frame_payload_get(fp, len);
496 memset(rp, 0, len);
497 rp->rnid.rnid_cmd = ELS_LS_ACC;
498 rp->rnid.rnid_fmt = fmt;
499 rp->rnid.rnid_cid_len = sizeof(rp->cid);
500 rp->cid.rnid_wwpn = htonll(lport->wwpn);
501 rp->cid.rnid_wwnn = htonll(lport->wwnn);
502 if (fmt == ELS_RNIDF_GEN) {
503 rp->rnid.rnid_sid_len = sizeof(rp->gen);
504 memcpy(&rp->gen, &lport->rnid_gen,
505 sizeof(rp->gen));
506 }
24f089e2
JE
507 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
508 lport->tt.frame_send(lport, fp);
42e9a92f
RL
509 }
510 }
511 fc_frame_free(in_fp);
512}
513
42e9a92f 514/**
34f42a07 515 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
25985edc 516 * @lport: The local port receiving the LOGO
92261156 517 * @fp: The LOGO request frame
42e9a92f 518 *
c1d45424 519 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
520 * this function.
521 */
92261156 522static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
42e9a92f 523{
7ab24dd1 524 fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
42e9a92f
RL
525 fc_lport_enter_reset(lport);
526 fc_frame_free(fp);
527}
528
529/**
34f42a07 530 * fc_fabric_login() - Start the lport state machine
3a3b42bf 531 * @lport: The local port that should log into the fabric
42e9a92f
RL
532 *
533 * Locking Note: This function should not be called
534 * with the lport lock held.
535 */
536int fc_fabric_login(struct fc_lport *lport)
537{
538 int rc = -1;
539
540 mutex_lock(&lport->lp_mutex);
55a66d3c
VD
541 if (lport->state == LPORT_ST_DISABLED ||
542 lport->state == LPORT_ST_LOGO) {
543 fc_lport_state_enter(lport, LPORT_ST_RESET);
42e9a92f
RL
544 fc_lport_enter_reset(lport);
545 rc = 0;
546 }
547 mutex_unlock(&lport->lp_mutex);
548
549 return rc;
550}
551EXPORT_SYMBOL(fc_fabric_login);
552
553/**
8faecddb 554 * __fc_linkup() - Handler for transport linkup events
42e9a92f 555 * @lport: The lport whose link is up
8faecddb
CL
556 *
557 * Locking: must be called with the lp_mutex held
42e9a92f 558 */
8faecddb 559void __fc_linkup(struct fc_lport *lport)
42e9a92f 560{
bc0e17f6
VD
561 if (!lport->link_up) {
562 lport->link_up = 1;
42e9a92f
RL
563
564 if (lport->state == LPORT_ST_RESET)
565 fc_lport_enter_flogi(lport);
566 }
8faecddb
CL
567}
568
569/**
570 * fc_linkup() - Handler for transport linkup events
3a3b42bf 571 * @lport: The local port whose link is up
8faecddb
CL
572 */
573void fc_linkup(struct fc_lport *lport)
574{
ce8b5df0 575 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
7b2787ec 576 lport->host->host_no, lport->port_id);
8faecddb
CL
577
578 mutex_lock(&lport->lp_mutex);
579 __fc_linkup(lport);
42e9a92f
RL
580 mutex_unlock(&lport->lp_mutex);
581}
582EXPORT_SYMBOL(fc_linkup);
583
584/**
8faecddb 585 * __fc_linkdown() - Handler for transport linkdown events
42e9a92f 586 * @lport: The lport whose link is down
8faecddb
CL
587 *
588 * Locking: must be called with the lp_mutex held
42e9a92f 589 */
8faecddb 590void __fc_linkdown(struct fc_lport *lport)
42e9a92f 591{
bc0e17f6
VD
592 if (lport->link_up) {
593 lport->link_up = 0;
42e9a92f
RL
594 fc_lport_enter_reset(lport);
595 lport->tt.fcp_cleanup(lport);
596 }
8faecddb
CL
597}
598
599/**
600 * fc_linkdown() - Handler for transport linkdown events
3a3b42bf 601 * @lport: The local port whose link is down
8faecddb
CL
602 */
603void fc_linkdown(struct fc_lport *lport)
604{
ce8b5df0 605 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
7b2787ec 606 lport->host->host_no, lport->port_id);
8faecddb
CL
607
608 mutex_lock(&lport->lp_mutex);
609 __fc_linkdown(lport);
42e9a92f
RL
610 mutex_unlock(&lport->lp_mutex);
611}
612EXPORT_SYMBOL(fc_linkdown);
613
42e9a92f 614/**
34f42a07 615 * fc_fabric_logoff() - Logout of the fabric
3a3b42bf 616 * @lport: The local port to logoff the fabric
42e9a92f
RL
617 *
618 * Return value:
619 * 0 for success, -1 for failure
34f42a07 620 */
42e9a92f
RL
621int fc_fabric_logoff(struct fc_lport *lport)
622{
623 lport->tt.disc_stop_final(lport);
624 mutex_lock(&lport->lp_mutex);
3a3b42bf 625 if (lport->dns_rdata)
c96c792a 626 fc_rport_logoff(lport->dns_rdata);
a0fd2e49 627 mutex_unlock(&lport->lp_mutex);
5922a957 628 fc_rport_flush_queue();
a0fd2e49 629 mutex_lock(&lport->lp_mutex);
42e9a92f
RL
630 fc_lport_enter_logo(lport);
631 mutex_unlock(&lport->lp_mutex);
f7db2c15 632 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
633 return 0;
634}
635EXPORT_SYMBOL(fc_fabric_logoff);
636
637/**
3a3b42bf
RL
638 * fc_lport_destroy() - Unregister a fc_lport
639 * @lport: The local port to unregister
42e9a92f 640 *
42e9a92f
RL
641 * Note:
642 * exit routine for fc_lport instance
643 * clean-up all the allocated memory
644 * and free up other system resources.
645 *
34f42a07 646 */
42e9a92f
RL
647int fc_lport_destroy(struct fc_lport *lport)
648{
bbf15669 649 mutex_lock(&lport->lp_mutex);
b1d9fd55 650 lport->state = LPORT_ST_DISABLED;
bbf15669 651 lport->link_up = 0;
42e9a92f 652 lport->tt.frame_send = fc_frame_drop;
bbf15669
AJ
653 mutex_unlock(&lport->lp_mutex);
654
42e9a92f 655 lport->tt.fcp_abort_io(lport);
e9ba8b42 656 lport->tt.disc_stop_final(lport);
1f6ff364 657 lport->tt.exch_mgr_reset(lport, 0, 0);
061446a1 658 cancel_delayed_work_sync(&lport->retry_work);
70d53b04 659 fc_fc4_del_lport(lport);
42e9a92f
RL
660 return 0;
661}
662EXPORT_SYMBOL(fc_lport_destroy);
663
664/**
3a3b42bf
RL
665 * fc_set_mfs() - Set the maximum frame size for a local port
666 * @lport: The local port to set the MFS for
667 * @mfs: The new MFS
34f42a07 668 */
42e9a92f
RL
669int fc_set_mfs(struct fc_lport *lport, u32 mfs)
670{
671 unsigned int old_mfs;
672 int rc = -EINVAL;
673
674 mutex_lock(&lport->lp_mutex);
675
676 old_mfs = lport->mfs;
677
678 if (mfs >= FC_MIN_MAX_FRAME) {
679 mfs &= ~3;
680 if (mfs > FC_MAX_FRAME)
681 mfs = FC_MAX_FRAME;
682 mfs -= sizeof(struct fc_frame_header);
683 lport->mfs = mfs;
684 rc = 0;
685 }
686
687 if (!rc && mfs < old_mfs)
688 fc_lport_enter_reset(lport);
689
690 mutex_unlock(&lport->lp_mutex);
691
692 return rc;
693}
694EXPORT_SYMBOL(fc_set_mfs);
695
696/**
34f42a07 697 * fc_lport_disc_callback() - Callback for discovery events
3a3b42bf 698 * @lport: The local port receiving the event
42e9a92f
RL
699 * @event: The discovery event
700 */
c6b21c93
BVA
701static void fc_lport_disc_callback(struct fc_lport *lport,
702 enum fc_disc_event event)
42e9a92f
RL
703{
704 switch (event) {
705 case DISC_EV_SUCCESS:
7414705e 706 FC_LPORT_DBG(lport, "Discovery succeeded\n");
42e9a92f
RL
707 break;
708 case DISC_EV_FAILED:
e6d8a1b0 709 printk(KERN_ERR "host%d: libfc: "
ce8b5df0 710 "Discovery failed for port (%6.6x)\n",
7b2787ec 711 lport->host->host_no, lport->port_id);
42e9a92f
RL
712 mutex_lock(&lport->lp_mutex);
713 fc_lport_enter_reset(lport);
714 mutex_unlock(&lport->lp_mutex);
715 break;
716 case DISC_EV_NONE:
717 WARN_ON(1);
718 break;
719 }
720}
721
722/**
34f42a07 723 * fc_rport_enter_ready() - Enter the ready state and start discovery
3a3b42bf 724 * @lport: The local port that is ready
42e9a92f
RL
725 *
726 * Locking Note: The lport lock is expected to be held before calling
727 * this routine.
728 */
729static void fc_lport_enter_ready(struct fc_lport *lport)
730{
7414705e
RL
731 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
732 fc_lport_state(lport));
42e9a92f
RL
733
734 fc_lport_state_enter(lport, LPORT_ST_READY);
8faecddb
CL
735 if (lport->vport)
736 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
737 fc_vports_linkchange(lport);
42e9a92f 738
3a3b42bf 739 if (!lport->ptp_rdata)
29d898e9 740 lport->tt.disc_start(fc_lport_disc_callback, lport);
42e9a92f
RL
741}
742
093bb6a2
JE
743/**
744 * fc_lport_set_port_id() - set the local port Port ID
745 * @lport: The local port which will have its Port ID set.
746 * @port_id: The new port ID.
747 * @fp: The frame containing the incoming request, or NULL.
748 *
749 * Locking Note: The lport lock is expected to be held before calling
750 * this function.
751 */
752static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
753 struct fc_frame *fp)
754{
755 if (port_id)
ce8b5df0 756 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
093bb6a2
JE
757 lport->host->host_no, port_id);
758
7b2787ec
RL
759 lport->port_id = port_id;
760
761 /* Update the fc_host */
093bb6a2 762 fc_host_port_id(lport->host) = port_id;
7b2787ec 763
093bb6a2
JE
764 if (lport->tt.lport_set_port_id)
765 lport->tt.lport_set_port_id(lport, port_id, fp);
766}
767
3726f358
JE
768/**
769 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
770 * @lport: The local port which will have its Port ID set.
771 * @port_id: The new port ID.
772 *
773 * Called by the lower-level driver when transport sets the local port_id.
774 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
775 * discovery to be skipped.
776 */
777void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
778{
779 mutex_lock(&lport->lp_mutex);
780
781 fc_lport_set_port_id(lport, port_id, NULL);
782
783 switch (lport->state) {
784 case LPORT_ST_RESET:
785 case LPORT_ST_FLOGI:
786 if (port_id)
787 fc_lport_enter_ready(lport);
788 break;
789 default:
790 break;
791 }
792 mutex_unlock(&lport->lp_mutex);
793}
794EXPORT_SYMBOL(fc_lport_set_local_id);
795
42e9a92f 796/**
34f42a07 797 * fc_lport_recv_flogi_req() - Receive a FLOGI request
25985edc 798 * @lport: The local port that received the request
92261156 799 * @rx_fp: The FLOGI frame
42e9a92f
RL
800 *
801 * A received FLOGI request indicates a point-to-point connection.
802 * Accept it with the common service parameters indicating our N port.
803 * Set up to do a PLOGI if we have the higher-number WWPN.
804 *
1b69bc06 805 * Locking Note: The lport lock is expected to be held before calling
42e9a92f
RL
806 * this function.
807 */
92261156
JE
808static void fc_lport_recv_flogi_req(struct fc_lport *lport,
809 struct fc_frame *rx_fp)
42e9a92f
RL
810{
811 struct fc_frame *fp;
24f089e2 812 struct fc_frame_header *fh;
42e9a92f
RL
813 struct fc_els_flogi *flp;
814 struct fc_els_flogi *new_flp;
815 u64 remote_wwpn;
816 u32 remote_fid;
817 u32 local_fid;
42e9a92f 818
7414705e
RL
819 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
820 fc_lport_state(lport));
42e9a92f 821
251748a9 822 remote_fid = fc_frame_sid(rx_fp);
42e9a92f
RL
823 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
824 if (!flp)
825 goto out;
826 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
827 if (remote_wwpn == lport->wwpn) {
e6d8a1b0 828 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
9f8f3aa6 829 "with same WWPN %16.16llx\n",
e6d8a1b0 830 lport->host->host_no, remote_wwpn);
42e9a92f
RL
831 goto out;
832 }
9f8f3aa6 833 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
42e9a92f
RL
834
835 /*
836 * XXX what is the right thing to do for FIDs?
837 * The originator might expect our S_ID to be 0xfffffe.
838 * But if so, both of us could end up with the same FID.
839 */
840 local_fid = FC_LOCAL_PTP_FID_LO;
841 if (remote_wwpn < lport->wwpn) {
842 local_fid = FC_LOCAL_PTP_FID_HI;
843 if (!remote_fid || remote_fid == local_fid)
844 remote_fid = FC_LOCAL_PTP_FID_LO;
845 } else if (!remote_fid) {
846 remote_fid = FC_LOCAL_PTP_FID_HI;
847 }
848
093bb6a2 849 fc_lport_set_port_id(lport, local_fid, rx_fp);
42e9a92f
RL
850
851 fp = fc_frame_alloc(lport, sizeof(*flp));
852 if (fp) {
42e9a92f
RL
853 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
854 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
855 new_flp->fl_cmd = (u8) ELS_LS_ACC;
856
857 /*
858 * Send the response. If this fails, the originator should
859 * repeat the sequence.
860 */
24f089e2
JE
861 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
862 fh = fc_frame_header_get(fp);
863 hton24(fh->fh_s_id, local_fid);
864 hton24(fh->fh_d_id, remote_fid);
865 lport->tt.frame_send(lport, fp);
42e9a92f
RL
866
867 } else {
868 fc_lport_error(lport, fp);
869 }
870 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
871 get_unaligned_be64(&flp->fl_wwnn));
42e9a92f 872out:
42e9a92f
RL
873 fc_frame_free(rx_fp);
874}
875
876/**
96ad8464 877 * fc_lport_recv_els_req() - The generic lport ELS request handler
3a3b42bf 878 * @lport: The local port that received the request
3a3b42bf 879 * @fp: The request frame
42e9a92f
RL
880 *
881 * This function will see if the lport handles the request or
882 * if an rport should handle the request.
883 *
884 * Locking Note: This function should not be called with the lport
25985edc 885 * lock held because it will grab the lock.
42e9a92f 886 */
96ad8464
JE
887static void fc_lport_recv_els_req(struct fc_lport *lport,
888 struct fc_frame *fp)
42e9a92f 889{
92261156 890 void (*recv)(struct fc_lport *, struct fc_frame *);
42e9a92f
RL
891
892 mutex_lock(&lport->lp_mutex);
893
894 /*
895 * Handle special ELS cases like FLOGI, LOGO, and
896 * RSCN here. These don't require a session.
897 * Even if we had a session, it might not be ready.
898 */
e9ba8b42
JE
899 if (!lport->link_up)
900 fc_frame_free(fp);
96ad8464 901 else {
42e9a92f
RL
902 /*
903 * Check opcode.
904 */
e76ee65f 905 recv = fc_rport_recv_req;
42e9a92f
RL
906 switch (fc_frame_payload_op(fp)) {
907 case ELS_FLOGI:
a7b12a27
JE
908 if (!lport->point_to_multipoint)
909 recv = fc_lport_recv_flogi_req;
42e9a92f
RL
910 break;
911 case ELS_LOGO:
251748a9 912 if (fc_frame_sid(fp) == FC_FID_FLOGI)
42e9a92f
RL
913 recv = fc_lport_recv_logo_req;
914 break;
915 case ELS_RSCN:
916 recv = lport->tt.disc_recv_req;
917 break;
918 case ELS_ECHO:
919 recv = fc_lport_recv_echo_req;
920 break;
921 case ELS_RLIR:
922 recv = fc_lport_recv_rlir_req;
923 break;
924 case ELS_RNID:
925 recv = fc_lport_recv_rnid_req;
926 break;
42e9a92f
RL
927 }
928
92261156 929 recv(lport, fp);
42e9a92f
RL
930 }
931 mutex_unlock(&lport->lp_mutex);
42e9a92f
RL
932}
933
96ad8464
JE
934static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
935 const struct fc_els_spp *spp_in,
936 struct fc_els_spp *spp_out)
937{
938 return FC_SPP_RESP_INVL;
939}
940
941struct fc4_prov fc_lport_els_prov = {
942 .prli = fc_lport_els_prli,
943 .recv = fc_lport_recv_els_req,
944};
945
946/**
c5cb444c 947 * fc_lport_recv() - The generic lport request handler
96ad8464
JE
948 * @lport: The lport that received the request
949 * @fp: The frame the request is in
950 *
951 * Locking Note: This function should not be called with the lport
25985edc 952 * lock held because it may grab the lock.
96ad8464 953 */
c5cb444c 954void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
96ad8464
JE
955{
956 struct fc_frame_header *fh = fc_frame_header_get(fp);
957 struct fc_seq *sp = fr_seq(fp);
958 struct fc4_prov *prov;
959
960 /*
961 * Use RCU read lock and module_lock to be sure module doesn't
962 * deregister and get unloaded while we're calling it.
963 * try_module_get() is inlined and accepts a NULL parameter.
964 * Only ELSes and FCP target ops should come through here.
965 * The locking is unfortunate, and a better scheme is being sought.
966 */
967
968 rcu_read_lock();
969 if (fh->fh_type >= FC_FC4_PROV_SIZE)
970 goto drop;
971 prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
972 if (!prov || !try_module_get(prov->module))
973 goto drop;
974 rcu_read_unlock();
975 prov->recv(lport, fp);
976 module_put(prov->module);
977 return;
978drop:
979 rcu_read_unlock();
980 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
981 fc_frame_free(fp);
db95fc00 982 if (sp)
768c72cc 983 fc_exch_done(sp);
96ad8464 984}
c5cb444c 985EXPORT_SYMBOL(fc_lport_recv);
96ad8464 986
42e9a92f 987/**
3a3b42bf
RL
988 * fc_lport_reset() - Reset a local port
989 * @lport: The local port which should be reset
42e9a92f
RL
990 *
991 * Locking Note: This functions should not be called with the
992 * lport lock held.
993 */
994int fc_lport_reset(struct fc_lport *lport)
995{
f7db2c15 996 cancel_delayed_work_sync(&lport->retry_work);
42e9a92f
RL
997 mutex_lock(&lport->lp_mutex);
998 fc_lport_enter_reset(lport);
999 mutex_unlock(&lport->lp_mutex);
1000 return 0;
1001}
1002EXPORT_SYMBOL(fc_lport_reset);
1003
1004/**
3a3b42bf
RL
1005 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
1006 * @lport: The local port to be reset
42e9a92f
RL
1007 *
1008 * Locking Note: The lport lock is expected to be held before calling
1009 * this routine.
1010 */
1190d925 1011static void fc_lport_reset_locked(struct fc_lport *lport)
42e9a92f 1012{
a407c593 1013 if (lport->dns_rdata) {
c96c792a 1014 fc_rport_logoff(lport->dns_rdata);
a407c593
HR
1015 lport->dns_rdata = NULL;
1016 }
42e9a92f 1017
2f2ac4a0 1018 if (lport->ptp_rdata) {
c96c792a 1019 fc_rport_logoff(lport->ptp_rdata);
944ef968 1020 kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
2f2ac4a0
JE
1021 lport->ptp_rdata = NULL;
1022 }
42e9a92f
RL
1023
1024 lport->tt.disc_stop(lport);
1025
1f6ff364 1026 lport->tt.exch_mgr_reset(lport, 0, 0);
42e9a92f 1027 fc_host_fabric_name(lport->host) = 0;
093bb6a2 1028
3726f358 1029 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
093bb6a2 1030 fc_lport_set_port_id(lport, 0, NULL);
1190d925 1031}
42e9a92f 1032
1190d925
JE
1033/**
1034 * fc_lport_enter_reset() - Reset the local port
3a3b42bf 1035 * @lport: The local port to be reset
1190d925
JE
1036 *
1037 * Locking Note: The lport lock is expected to be held before calling
1038 * this routine.
1039 */
1040static void fc_lport_enter_reset(struct fc_lport *lport)
1041{
1042 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
1043 fc_lport_state(lport));
1044
55a66d3c
VD
1045 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
1046 return;
1047
8faecddb
CL
1048 if (lport->vport) {
1049 if (lport->link_up)
1050 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
1051 else
1052 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
1053 }
1190d925 1054 fc_lport_state_enter(lport, LPORT_ST_RESET);
9b7d1613
VD
1055 fc_host_post_event(lport->host, fc_get_event_number(),
1056 FCH_EVT_LIPRESET, 0);
8faecddb 1057 fc_vports_linkchange(lport);
1190d925 1058 fc_lport_reset_locked(lport);
b6e3c840 1059 if (lport->link_up)
42e9a92f
RL
1060 fc_lport_enter_flogi(lport);
1061}
1062
1190d925 1063/**
3a3b42bf
RL
1064 * fc_lport_enter_disabled() - Disable the local port
1065 * @lport: The local port to be reset
1190d925
JE
1066 *
1067 * Locking Note: The lport lock is expected to be held before calling
1068 * this routine.
1069 */
1070static void fc_lport_enter_disabled(struct fc_lport *lport)
1071{
1072 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
1073 fc_lport_state(lport));
1074
1075 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
8faecddb 1076 fc_vports_linkchange(lport);
1190d925
JE
1077 fc_lport_reset_locked(lport);
1078}
1079
42e9a92f 1080/**
34f42a07 1081 * fc_lport_error() - Handler for any errors
3a3b42bf
RL
1082 * @lport: The local port that the error was on
1083 * @fp: The error code encoded in a frame pointer
42e9a92f
RL
1084 *
1085 * If the error was caused by a resource allocation failure
1086 * then wait for half a second and retry, otherwise retry
1087 * after the e_d_tov time.
1088 */
1089static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1090{
1091 unsigned long delay = 0;
7414705e 1092 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
b20d9bfd 1093 IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
7414705e 1094 lport->retry_count);
42e9a92f 1095
7f985231
BPG
1096 if (PTR_ERR(fp) == -FC_EX_CLOSED)
1097 return;
1098
1099 /*
1100 * Memory allocation failure, or the exchange timed out
1101 * or we received LS_RJT.
1102 * Retry after delay
1103 */
1104 if (lport->retry_count < lport->max_retry_count) {
1105 lport->retry_count++;
1106 if (!fp)
1107 delay = msecs_to_jiffies(500);
1108 else
1109 delay = msecs_to_jiffies(lport->e_d_tov);
1110
1111 schedule_delayed_work(&lport->retry_work, delay);
1112 } else
1113 fc_lport_enter_reset(lport);
42e9a92f
RL
1114}
1115
1116/**
7cccc157 1117 * fc_lport_ns_resp() - Handle response to a name server
3a3b42bf
RL
1118 * registration exchange
1119 * @sp: current sequence in exchange
1120 * @fp: response frame
42e9a92f
RL
1121 * @lp_arg: Fibre Channel host port instance
1122 *
1123 * Locking Note: This function will be called without the lport lock
3a3b42bf 1124 * held, but it will lock, call an _enter_* function or fc_lport_error()
42e9a92f
RL
1125 * and then unlock the lport.
1126 */
7cccc157
CL
1127static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1128 void *lp_arg)
42e9a92f
RL
1129{
1130 struct fc_lport *lport = lp_arg;
1131 struct fc_frame_header *fh;
1132 struct fc_ct_hdr *ct;
1133
7cccc157 1134 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
f657d299 1135
42e9a92f
RL
1136 if (fp == ERR_PTR(-FC_EX_CLOSED))
1137 return;
1138
1139 mutex_lock(&lport->lp_mutex);
1140
ab593b18 1141 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
7cccc157 1142 FC_LPORT_DBG(lport, "Received a name server response, "
3a3b42bf 1143 "but in state %s\n", fc_lport_state(lport));
76f6804e
AJ
1144 if (IS_ERR(fp))
1145 goto err;
42e9a92f
RL
1146 goto out;
1147 }
1148
76f6804e
AJ
1149 if (IS_ERR(fp)) {
1150 fc_lport_error(lport, fp);
1151 goto err;
1152 }
1153
42e9a92f
RL
1154 fh = fc_frame_header_get(fp);
1155 ct = fc_frame_payload_get(fp, sizeof(*ct));
1156
1157 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1158 ct->ct_fs_type == FC_FST_DIR &&
1159 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1160 ntohs(ct->ct_cmd) == FC_FS_ACC)
7cccc157
CL
1161 switch (lport->state) {
1162 case LPORT_ST_RNN_ID:
c914f7d1 1163 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
7cccc157
CL
1164 break;
1165 case LPORT_ST_RSNN_NN:
c914f7d1 1166 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
7cccc157
CL
1167 break;
1168 case LPORT_ST_RSPN_ID:
c914f7d1 1169 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
7cccc157
CL
1170 break;
1171 case LPORT_ST_RFT_ID:
ab593b18
JE
1172 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1173 break;
1174 case LPORT_ST_RFF_ID:
d78c317f
NP
1175 if (lport->fdmi_enabled)
1176 fc_lport_enter_fdmi(lport);
1177 else
1178 fc_lport_enter_scr(lport);
7cccc157
CL
1179 break;
1180 default:
1181 /* should have already been caught by state checks */
1182 break;
1183 }
c9c7bd7a
CL
1184 else
1185 fc_lport_error(lport, fp);
c9c7bd7a
CL
1186out:
1187 fc_frame_free(fp);
1188err:
1189 mutex_unlock(&lport->lp_mutex);
1190}
1191
d78c317f
NP
1192/**
1193 * fc_lport_ms_resp() - Handle response to a management server
1194 * exchange
1195 * @sp: current sequence in exchange
1196 * @fp: response frame
1197 * @lp_arg: Fibre Channel host port instance
1198 *
1199 * Locking Note: This function will be called without the lport lock
1200 * held, but it will lock, call an _enter_* function or fc_lport_error()
1201 * and then unlock the lport.
1202 */
1203static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
1204 void *lp_arg)
1205{
1206 struct fc_lport *lport = lp_arg;
1207 struct fc_frame_header *fh;
1208 struct fc_ct_hdr *ct;
1209
1210 FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
1211
1212 if (fp == ERR_PTR(-FC_EX_CLOSED))
1213 return;
1214
1215 mutex_lock(&lport->lp_mutex);
1216
1217 if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
1218 FC_LPORT_DBG(lport, "Received a management server response, "
1219 "but in state %s\n", fc_lport_state(lport));
1220 if (IS_ERR(fp))
1221 goto err;
1222 goto out;
1223 }
1224
1225 if (IS_ERR(fp)) {
1226 fc_lport_error(lport, fp);
1227 goto err;
1228 }
1229
1230 fh = fc_frame_header_get(fp);
1231 ct = fc_frame_payload_get(fp, sizeof(*ct));
1232
1233 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1234 ct->ct_fs_type == FC_FST_MGMT &&
1235 ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
1236 FC_LPORT_DBG(lport, "Received a management server response, "
1237 "reason=%d explain=%d\n",
1238 ct->ct_reason,
1239 ct->ct_explan);
1240
1241 switch (lport->state) {
1242 case LPORT_ST_RHBA:
1243 if (ntohs(ct->ct_cmd) == FC_FS_ACC)
1244 fc_lport_enter_ms(lport, LPORT_ST_RPA);
1245 else /* Error Skip RPA */
1246 fc_lport_enter_scr(lport);
1247 break;
1248 case LPORT_ST_RPA:
1249 fc_lport_enter_scr(lport);
1250 break;
1251 case LPORT_ST_DPRT:
1252 fc_lport_enter_ms(lport, LPORT_ST_RHBA);
1253 break;
1254 case LPORT_ST_DHBA:
1255 fc_lport_enter_ms(lport, LPORT_ST_DPRT);
1256 break;
1257 default:
1258 /* should have already been caught by state checks */
1259 break;
1260 }
1261 } else {
1262 /* Invalid Frame? */
1263 fc_lport_error(lport, fp);
1264 }
1265out:
1266 fc_frame_free(fp);
1267err:
1268 mutex_unlock(&lport->lp_mutex);
1269}
1270
42e9a92f 1271/**
34f42a07 1272 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
3a3b42bf
RL
1273 * @sp: current sequence in SCR exchange
1274 * @fp: response frame
42e9a92f
RL
1275 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1276 *
1277 * Locking Note: This function will be called without the lport lock
1278 * held, but it will lock, call an _enter_* function or fc_lport_error
1279 * and then unlock the lport.
1280 */
1281static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1282 void *lp_arg)
1283{
1284 struct fc_lport *lport = lp_arg;
1285 u8 op;
1286
f657d299
JE
1287 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1288
42e9a92f
RL
1289 if (fp == ERR_PTR(-FC_EX_CLOSED))
1290 return;
1291
1292 mutex_lock(&lport->lp_mutex);
1293
42e9a92f 1294 if (lport->state != LPORT_ST_SCR) {
7414705e
RL
1295 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1296 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1297 if (IS_ERR(fp))
1298 goto err;
42e9a92f
RL
1299 goto out;
1300 }
1301
76f6804e
AJ
1302 if (IS_ERR(fp)) {
1303 fc_lport_error(lport, fp);
1304 goto err;
1305 }
1306
42e9a92f
RL
1307 op = fc_frame_payload_op(fp);
1308 if (op == ELS_LS_ACC)
1309 fc_lport_enter_ready(lport);
1310 else
1311 fc_lport_error(lport, fp);
1312
1313out:
1314 fc_frame_free(fp);
1315err:
1316 mutex_unlock(&lport->lp_mutex);
1317}
1318
1319/**
3a3b42bf
RL
1320 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1321 * @lport: The local port to register for state changes
42e9a92f
RL
1322 *
1323 * Locking Note: The lport lock is expected to be held before calling
1324 * this routine.
1325 */
1326static void fc_lport_enter_scr(struct fc_lport *lport)
1327{
1328 struct fc_frame *fp;
1329
7414705e
RL
1330 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1331 fc_lport_state(lport));
42e9a92f
RL
1332
1333 fc_lport_state_enter(lport, LPORT_ST_SCR);
1334
1335 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1336 if (!fp) {
1337 fc_lport_error(lport, fp);
1338 return;
1339 }
1340
a46f327a 1341 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
b94f8951
JE
1342 fc_lport_scr_resp, lport,
1343 2 * lport->r_a_tov))
8f550f93 1344 fc_lport_error(lport, NULL);
42e9a92f
RL
1345}
1346
1347/**
c914f7d1 1348 * fc_lport_enter_ns() - register some object with the name server
42e9a92f
RL
1349 * @lport: Fibre Channel local port to register
1350 *
1351 * Locking Note: The lport lock is expected to be held before calling
1352 * this routine.
1353 */
c914f7d1 1354static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
c9866a54
CL
1355{
1356 struct fc_frame *fp;
c914f7d1
CL
1357 enum fc_ns_req cmd;
1358 int size = sizeof(struct fc_ct_hdr);
c9866a54
CL
1359 size_t len;
1360
c914f7d1
CL
1361 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1362 fc_lport_state_names[state],
c9866a54
CL
1363 fc_lport_state(lport));
1364
c914f7d1 1365 fc_lport_state_enter(lport, state);
c9866a54 1366
c914f7d1
CL
1367 switch (state) {
1368 case LPORT_ST_RNN_ID:
1369 cmd = FC_NS_RNN_ID;
1370 size += sizeof(struct fc_ns_rn_id);
1371 break;
1372 case LPORT_ST_RSNN_NN:
1373 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1374 /* if there is no symbolic name, skip to RFT_ID */
1375 if (!len)
1376 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1377 cmd = FC_NS_RSNN_NN;
1378 size += sizeof(struct fc_ns_rsnn) + len;
1379 break;
1380 case LPORT_ST_RSPN_ID:
1381 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1382 /* if there is no symbolic name, skip to RFT_ID */
1383 if (!len)
1384 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1385 cmd = FC_NS_RSPN_ID;
1386 size += sizeof(struct fc_ns_rspn) + len;
1387 break;
1388 case LPORT_ST_RFT_ID:
1389 cmd = FC_NS_RFT_ID;
1390 size += sizeof(struct fc_ns_rft);
1391 break;
ab593b18
JE
1392 case LPORT_ST_RFF_ID:
1393 cmd = FC_NS_RFF_ID;
1394 size += sizeof(struct fc_ns_rff_id);
1395 break;
c914f7d1
CL
1396 default:
1397 fc_lport_error(lport, NULL);
5baa17c3
CL
1398 return;
1399 }
1400
c914f7d1 1401 fp = fc_frame_alloc(lport, size);
c9c7bd7a
CL
1402 if (!fp) {
1403 fc_lport_error(lport, fp);
1404 return;
1405 }
1406
c914f7d1 1407 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
7cccc157 1408 fc_lport_ns_resp,
b94f8951 1409 lport, 3 * lport->r_a_tov))
c9c7bd7a
CL
1410 fc_lport_error(lport, fp);
1411}
1412
42e9a92f
RL
1413static struct fc_rport_operations fc_lport_rport_ops = {
1414 .event_callback = fc_lport_rport_callback,
1415};
1416
1417/**
3a3b42bf
RL
1418 * fc_rport_enter_dns() - Create a fc_rport for the name server
1419 * @lport: The local port requesting a remote port for the name server
42e9a92f
RL
1420 *
1421 * Locking Note: The lport lock is expected to be held before calling
1422 * this routine.
1423 */
1424static void fc_lport_enter_dns(struct fc_lport *lport)
1425{
ab28f1fd 1426 struct fc_rport_priv *rdata;
42e9a92f 1427
7414705e
RL
1428 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1429 fc_lport_state(lport));
42e9a92f
RL
1430
1431 fc_lport_state_enter(lport, LPORT_ST_DNS);
1432
48f00902 1433 mutex_lock(&lport->disc.disc_mutex);
2580064b 1434 rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
48f00902 1435 mutex_unlock(&lport->disc.disc_mutex);
9fb9d328 1436 if (!rdata)
42e9a92f
RL
1437 goto err;
1438
42e9a92f 1439 rdata->ops = &fc_lport_rport_ops;
05d7d3b0 1440 fc_rport_login(rdata);
42e9a92f
RL
1441 return;
1442
1443err:
1444 fc_lport_error(lport, NULL);
1445}
1446
d78c317f
NP
1447/**
1448 * fc_lport_enter_ms() - management server commands
1449 * @lport: Fibre Channel local port to register
1450 *
1451 * Locking Note: The lport lock is expected to be held before calling
1452 * this routine.
1453 */
1454static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
1455{
1456 struct fc_frame *fp;
1457 enum fc_fdmi_req cmd;
1458 int size = sizeof(struct fc_ct_hdr);
1459 size_t len;
1460 int numattrs;
1461
1462 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1463 fc_lport_state_names[state],
1464 fc_lport_state(lport));
1465
1466 fc_lport_state_enter(lport, state);
1467
1468 switch (state) {
1469 case LPORT_ST_RHBA:
1470 cmd = FC_FDMI_RHBA;
1471 /* Number of HBA Attributes */
1472 numattrs = 10;
1473 len = sizeof(struct fc_fdmi_rhba);
1474 len -= sizeof(struct fc_fdmi_attr_entry);
1475 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
1476 len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
1477 len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
1478 len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
1479 len += FC_FDMI_HBA_ATTR_MODEL_LEN;
1480 len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
1481 len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
1482 len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
1483 len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
1484 len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
1485 len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
1486
1487 size += len;
1488 break;
1489 case LPORT_ST_RPA:
1490 cmd = FC_FDMI_RPA;
1491 /* Number of Port Attributes */
1492 numattrs = 6;
1493 len = sizeof(struct fc_fdmi_rpa);
1494 len -= sizeof(struct fc_fdmi_attr_entry);
1495 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
1496 len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
1497 len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
1498 len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
1499 len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
1500 len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
1501 len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
1502
1503 size += len;
1504 break;
1505 case LPORT_ST_DPRT:
1506 cmd = FC_FDMI_DPRT;
1507 len = sizeof(struct fc_fdmi_dprt);
1508 size += len;
1509 break;
1510 case LPORT_ST_DHBA:
1511 cmd = FC_FDMI_DHBA;
1512 len = sizeof(struct fc_fdmi_dhba);
1513 size += len;
1514 break;
1515 default:
1516 fc_lport_error(lport, NULL);
1517 return;
1518 }
1519
1520 FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
1521 cmd, (int)len, size);
1522 fp = fc_frame_alloc(lport, size);
1523 if (!fp) {
1524 fc_lport_error(lport, fp);
1525 return;
1526 }
1527
1528 if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
1529 fc_lport_ms_resp,
1530 lport, 3 * lport->r_a_tov))
1531 fc_lport_error(lport, fp);
1532}
1533
1534/**
1535 * fc_rport_enter_fdmi() - Create a fc_rport for the management server
1536 * @lport: The local port requesting a remote port for the management server
1537 *
1538 * Locking Note: The lport lock is expected to be held before calling
1539 * this routine.
1540 */
1541static void fc_lport_enter_fdmi(struct fc_lport *lport)
1542{
1543 struct fc_rport_priv *rdata;
1544
1545 FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
1546 fc_lport_state(lport));
1547
1548 fc_lport_state_enter(lport, LPORT_ST_FDMI);
1549
1550 mutex_lock(&lport->disc.disc_mutex);
2580064b 1551 rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
d78c317f
NP
1552 mutex_unlock(&lport->disc.disc_mutex);
1553 if (!rdata)
1554 goto err;
1555
1556 rdata->ops = &fc_lport_rport_ops;
05d7d3b0 1557 fc_rport_login(rdata);
d78c317f
NP
1558 return;
1559
1560err:
1561 fc_lport_error(lport, NULL);
1562}
1563
42e9a92f 1564/**
3a3b42bf
RL
1565 * fc_lport_timeout() - Handler for the retry_work timer
1566 * @work: The work struct of the local port
42e9a92f
RL
1567 */
1568static void fc_lport_timeout(struct work_struct *work)
1569{
1570 struct fc_lport *lport =
1571 container_of(work, struct fc_lport,
1572 retry_work.work);
1573
1574 mutex_lock(&lport->lp_mutex);
1575
1576 switch (lport->state) {
b1d9fd55 1577 case LPORT_ST_DISABLED:
22655ac2 1578 break;
42e9a92f 1579 case LPORT_ST_READY:
42e9a92f 1580 break;
22655ac2
JE
1581 case LPORT_ST_RESET:
1582 break;
42e9a92f
RL
1583 case LPORT_ST_FLOGI:
1584 fc_lport_enter_flogi(lport);
1585 break;
1586 case LPORT_ST_DNS:
1587 fc_lport_enter_dns(lport);
1588 break;
c9c7bd7a 1589 case LPORT_ST_RNN_ID:
5baa17c3 1590 case LPORT_ST_RSNN_NN:
c9866a54 1591 case LPORT_ST_RSPN_ID:
42e9a92f 1592 case LPORT_ST_RFT_ID:
ab593b18 1593 case LPORT_ST_RFF_ID:
c914f7d1 1594 fc_lport_enter_ns(lport, lport->state);
42e9a92f 1595 break;
d78c317f
NP
1596 case LPORT_ST_FDMI:
1597 fc_lport_enter_fdmi(lport);
1598 break;
1599 case LPORT_ST_RHBA:
1600 case LPORT_ST_RPA:
1601 case LPORT_ST_DHBA:
1602 case LPORT_ST_DPRT:
ac166d2f
VD
1603 FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
1604 fc_lport_state(lport));
1605 /* fall thru */
42e9a92f
RL
1606 case LPORT_ST_SCR:
1607 fc_lport_enter_scr(lport);
1608 break;
1609 case LPORT_ST_LOGO:
1610 fc_lport_enter_logo(lport);
1611 break;
1612 }
1613
1614 mutex_unlock(&lport->lp_mutex);
1615}
1616
1617/**
34f42a07 1618 * fc_lport_logo_resp() - Handle response to LOGO request
3a3b42bf
RL
1619 * @sp: The sequence that the LOGO was on
1620 * @fp: The LOGO frame
1621 * @lp_arg: The lport port that received the LOGO request
42e9a92f
RL
1622 *
1623 * Locking Note: This function will be called without the lport lock
3a3b42bf 1624 * held, but it will lock, call an _enter_* function or fc_lport_error()
42e9a92f
RL
1625 * and then unlock the lport.
1626 */
11b56188 1627void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
3a3b42bf 1628 void *lp_arg)
42e9a92f
RL
1629{
1630 struct fc_lport *lport = lp_arg;
1631 u8 op;
1632
f657d299
JE
1633 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1634
42e9a92f
RL
1635 if (fp == ERR_PTR(-FC_EX_CLOSED))
1636 return;
1637
1638 mutex_lock(&lport->lp_mutex);
1639
42e9a92f 1640 if (lport->state != LPORT_ST_LOGO) {
7414705e
RL
1641 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1642 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1643 if (IS_ERR(fp))
1644 goto err;
42e9a92f
RL
1645 goto out;
1646 }
1647
76f6804e
AJ
1648 if (IS_ERR(fp)) {
1649 fc_lport_error(lport, fp);
1650 goto err;
1651 }
1652
42e9a92f
RL
1653 op = fc_frame_payload_op(fp);
1654 if (op == ELS_LS_ACC)
1190d925 1655 fc_lport_enter_disabled(lport);
42e9a92f
RL
1656 else
1657 fc_lport_error(lport, fp);
1658
1659out:
1660 fc_frame_free(fp);
1661err:
1662 mutex_unlock(&lport->lp_mutex);
1663}
11b56188 1664EXPORT_SYMBOL(fc_lport_logo_resp);
42e9a92f
RL
1665
1666/**
34f42a07 1667 * fc_rport_enter_logo() - Logout of the fabric
3a3b42bf 1668 * @lport: The local port to be logged out
42e9a92f
RL
1669 *
1670 * Locking Note: The lport lock is expected to be held before calling
1671 * this routine.
1672 */
1673static void fc_lport_enter_logo(struct fc_lport *lport)
1674{
1675 struct fc_frame *fp;
1676 struct fc_els_logo *logo;
1677
7414705e
RL
1678 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1679 fc_lport_state(lport));
42e9a92f
RL
1680
1681 fc_lport_state_enter(lport, LPORT_ST_LOGO);
8faecddb 1682 fc_vports_linkchange(lport);
42e9a92f 1683
42e9a92f
RL
1684 fp = fc_frame_alloc(lport, sizeof(*logo));
1685 if (!fp) {
1686 fc_lport_error(lport, fp);
1687 return;
1688 }
1689
a46f327a 1690 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
b94f8951
JE
1691 fc_lport_logo_resp, lport,
1692 2 * lport->r_a_tov))
8f550f93 1693 fc_lport_error(lport, NULL);
42e9a92f
RL
1694}
1695
1696/**
34f42a07 1697 * fc_lport_flogi_resp() - Handle response to FLOGI request
3a3b42bf
RL
1698 * @sp: The sequence that the FLOGI was on
1699 * @fp: The FLOGI response frame
1700 * @lp_arg: The lport port that received the FLOGI response
42e9a92f
RL
1701 *
1702 * Locking Note: This function will be called without the lport lock
3a3b42bf 1703 * held, but it will lock, call an _enter_* function or fc_lport_error()
42e9a92f
RL
1704 * and then unlock the lport.
1705 */
11b56188 1706void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
3a3b42bf 1707 void *lp_arg)
42e9a92f
RL
1708{
1709 struct fc_lport *lport = lp_arg;
907c07d4 1710 struct fc_frame_header *fh;
42e9a92f
RL
1711 struct fc_els_flogi *flp;
1712 u32 did;
1713 u16 csp_flags;
1714 unsigned int r_a_tov;
1715 unsigned int e_d_tov;
1716 u16 mfs;
1717
f657d299
JE
1718 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1719
42e9a92f
RL
1720 if (fp == ERR_PTR(-FC_EX_CLOSED))
1721 return;
1722
1723 mutex_lock(&lport->lp_mutex);
1724
42e9a92f 1725 if (lport->state != LPORT_ST_FLOGI) {
7414705e
RL
1726 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1727 "%s\n", fc_lport_state(lport));
76f6804e
AJ
1728 if (IS_ERR(fp))
1729 goto err;
42e9a92f
RL
1730 goto out;
1731 }
1732
76f6804e
AJ
1733 if (IS_ERR(fp)) {
1734 fc_lport_error(lport, fp);
1735 goto err;
1736 }
1737
907c07d4 1738 fh = fc_frame_header_get(fp);
251748a9 1739 did = fc_frame_did(fp);
907c07d4
VD
1740 if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
1741 fc_frame_payload_op(fp) != ELS_LS_ACC) {
1742 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
7f985231 1743 fc_lport_error(lport, fp);
907c07d4
VD
1744 goto err;
1745 }
1746
1747 flp = fc_frame_payload_get(fp, sizeof(*flp));
1748 if (!flp) {
1749 FC_LPORT_DBG(lport, "FLOGI bad response\n");
1750 fc_lport_error(lport, fp);
1751 goto err;
1752 }
1753
1754 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1755 FC_SP_BB_DATA_MASK;
93f90e51
VD
1756
1757 if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
73d67aa4
VD
1758 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1759 "lport->mfs:%hu\n", mfs, lport->mfs);
1760 fc_lport_error(lport, fp);
1761 goto err;
1762 }
1763
93f90e51
VD
1764 if (mfs <= lport->mfs) {
1765 lport->mfs = mfs;
1766 fc_host_maxframe_size(lport->host) = mfs;
1767 }
1768
907c07d4
VD
1769 csp_flags = ntohs(flp->fl_csp.sp_features);
1770 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1771 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1772 if (csp_flags & FC_SP_FT_EDTR)
1773 e_d_tov /= 1000000;
1774
1775 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1776
1777 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1778 if (e_d_tov > lport->e_d_tov)
1779 lport->e_d_tov = e_d_tov;
76e72ad1 1780 lport->r_a_tov = 2 * lport->e_d_tov;
907c07d4
VD
1781 fc_lport_set_port_id(lport, did, fp);
1782 printk(KERN_INFO "host%d: libfc: "
1783 "Port (%6.6x) entered "
1784 "point-to-point mode\n",
1785 lport->host->host_no, did);
1786 fc_lport_ptp_setup(lport, fc_frame_sid(fp),
1787 get_unaligned_be64(
1788 &flp->fl_wwpn),
1789 get_unaligned_be64(
1790 &flp->fl_wwnn));
1791 } else {
76e72ad1
HR
1792 if (e_d_tov > lport->e_d_tov)
1793 lport->e_d_tov = e_d_tov;
1794 if (r_a_tov > lport->r_a_tov)
1795 lport->r_a_tov = r_a_tov;
907c07d4
VD
1796 fc_host_fabric_name(lport->host) =
1797 get_unaligned_be64(&flp->fl_wwnn);
1798 fc_lport_set_port_id(lport, did, fp);
1799 fc_lport_enter_dns(lport);
60a3c4df 1800 }
42e9a92f
RL
1801
1802out:
1803 fc_frame_free(fp);
1804err:
1805 mutex_unlock(&lport->lp_mutex);
1806}
11b56188 1807EXPORT_SYMBOL(fc_lport_flogi_resp);
42e9a92f
RL
1808
1809/**
34f42a07 1810 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
42e9a92f
RL
1811 * @lport: Fibre Channel local port to be logged in to the fabric
1812 *
1813 * Locking Note: The lport lock is expected to be held before calling
1814 * this routine.
1815 */
c6b21c93 1816static void fc_lport_enter_flogi(struct fc_lport *lport)
42e9a92f
RL
1817{
1818 struct fc_frame *fp;
1819
7414705e
RL
1820 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1821 fc_lport_state(lport));
42e9a92f
RL
1822
1823 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1824
3726f358
JE
1825 if (lport->point_to_multipoint) {
1826 if (lport->port_id)
1827 fc_lport_enter_ready(lport);
1828 return;
1829 }
1830
42e9a92f
RL
1831 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1832 if (!fp)
1833 return fc_lport_error(lport, fp);
1834
db36c06c
CL
1835 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1836 lport->vport ? ELS_FDISC : ELS_FLOGI,
b94f8951
JE
1837 fc_lport_flogi_resp, lport,
1838 lport->vport ? 2 * lport->r_a_tov :
1839 lport->e_d_tov))
8f550f93 1840 fc_lport_error(lport, NULL);
42e9a92f
RL
1841}
1842
3a3b42bf
RL
1843/**
1844 * fc_lport_config() - Configure a fc_lport
1845 * @lport: The local port to be configured
1846 */
42e9a92f
RL
1847int fc_lport_config(struct fc_lport *lport)
1848{
1849 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1850 mutex_init(&lport->lp_mutex);
1851
b1d9fd55 1852 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
42e9a92f
RL
1853
1854 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1855 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
acc1a921 1856 fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
42e9a92f
RL
1857
1858 return 0;
1859}
1860EXPORT_SYMBOL(fc_lport_config);
1861
3a3b42bf
RL
1862/**
1863 * fc_lport_init() - Initialize the lport layer for a local port
1864 * @lport: The local port to initialize the exchange layer for
1865 */
42e9a92f
RL
1866int fc_lport_init(struct fc_lport *lport)
1867{
42e9a92f
RL
1868 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1869 fc_host_node_name(lport->host) = lport->wwnn;
1870 fc_host_port_name(lport->host) = lport->wwpn;
1871 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1872 memset(fc_host_supported_fc4s(lport->host), 0,
1873 sizeof(fc_host_supported_fc4s(lport->host)));
1874 fc_host_supported_fc4s(lport->host)[2] = 1;
1875 fc_host_supported_fc4s(lport->host)[7] = 1;
1876
1877 /* This value is also unchanging */
1878 memset(fc_host_active_fc4s(lport->host), 0,
1879 sizeof(fc_host_active_fc4s(lport->host)));
1880 fc_host_active_fc4s(lport->host)[2] = 1;
1881 fc_host_active_fc4s(lport->host)[7] = 1;
1882 fc_host_maxframe_size(lport->host) = lport->mfs;
1883 fc_host_supported_speeds(lport->host) = 0;
1884 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1885 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1886 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1887 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
70d53b04 1888 fc_fc4_add_lport(lport);
42e9a92f
RL
1889
1890 return 0;
1891}
1892EXPORT_SYMBOL(fc_lport_init);
a51ab396
SM
1893
1894/**
3a3b42bf
RL
1895 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1896 * @sp: The sequence for the FC Passthrough response
1897 * @fp: The response frame
1898 * @info_arg: The BSG info that the response is for
a51ab396
SM
1899 */
1900static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1901 void *info_arg)
1902{
1903 struct fc_bsg_info *info = info_arg;
75cc8cfc 1904 struct bsg_job *job = info->job;
01e0e15c 1905 struct fc_bsg_reply *bsg_reply = job->reply;
a51ab396
SM
1906 struct fc_lport *lport = info->lport;
1907 struct fc_frame_header *fh;
1908 size_t len;
1909 void *buf;
1910
1911 if (IS_ERR(fp)) {
01e0e15c 1912 bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
a51ab396
SM
1913 -ECONNABORTED : -ETIMEDOUT;
1914 job->reply_len = sizeof(uint32_t);
06548160 1915 bsg_job_done(job, bsg_reply->result,
1abaede7 1916 bsg_reply->reply_payload_rcv_len);
a51ab396
SM
1917 kfree(info);
1918 return;
1919 }
1920
1921 mutex_lock(&lport->lp_mutex);
1922 fh = fc_frame_header_get(fp);
1923 len = fr_len(fp) - sizeof(*fh);
1924 buf = fc_frame_payload_get(fp, 0);
1925
1926 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1927 /* Get the response code from the first frame payload */
1928 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1929 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1930 (unsigned short)fc_frame_payload_op(fp);
1931
1932 /* Save the reply status of the job */
01e0e15c 1933 bsg_reply->reply_data.ctels_reply.status =
a51ab396
SM
1934 (cmd == info->rsp_code) ?
1935 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1936 }
1937
01e0e15c 1938 bsg_reply->reply_payload_rcv_len +=
a51ab396 1939 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
77dfce07 1940 &info->offset, NULL);
a51ab396
SM
1941
1942 if (fr_eof(fp) == FC_EOF_T &&
1943 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1944 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
01e0e15c 1945 if (bsg_reply->reply_payload_rcv_len >
a51ab396 1946 job->reply_payload.payload_len)
01e0e15c 1947 bsg_reply->reply_payload_rcv_len =
a51ab396 1948 job->reply_payload.payload_len;
01e0e15c 1949 bsg_reply->result = 0;
06548160 1950 bsg_job_done(job, bsg_reply->result,
1abaede7 1951 bsg_reply->reply_payload_rcv_len);
a51ab396
SM
1952 kfree(info);
1953 }
1954 fc_frame_free(fp);
1955 mutex_unlock(&lport->lp_mutex);
1956}
1957
1958/**
3a3b42bf
RL
1959 * fc_lport_els_request() - Send ELS passthrough request
1960 * @job: The BSG Passthrough job
a51ab396 1961 * @lport: The local port sending the request
3a3b42bf 1962 * @did: The destination port id
a51ab396
SM
1963 *
1964 * Locking Note: The lport lock is expected to be held before calling
1965 * this routine.
1966 */
75cc8cfc 1967static int fc_lport_els_request(struct bsg_job *job,
a51ab396
SM
1968 struct fc_lport *lport,
1969 u32 did, u32 tov)
1970{
1971 struct fc_bsg_info *info;
1972 struct fc_frame *fp;
1973 struct fc_frame_header *fh;
1974 char *pp;
1975 int len;
1976
70d919fb 1977 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
a51ab396
SM
1978 if (!fp)
1979 return -ENOMEM;
1980
1981 len = job->request_payload.payload_len;
1982 pp = fc_frame_payload_get(fp, len);
1983
1984 sg_copy_to_buffer(job->request_payload.sg_list,
1985 job->request_payload.sg_cnt,
1986 pp, len);
1987
1988 fh = fc_frame_header_get(fp);
1989 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1990 hton24(fh->fh_d_id, did);
7b2787ec 1991 hton24(fh->fh_s_id, lport->port_id);
a51ab396 1992 fh->fh_type = FC_TYPE_ELS;
24f089e2 1993 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
a51ab396
SM
1994 fh->fh_cs_ctl = 0;
1995 fh->fh_df_ctl = 0;
1996 fh->fh_parm_offset = 0;
1997
1998 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1999 if (!info) {
2000 fc_frame_free(fp);
2001 return -ENOMEM;
2002 }
2003
2004 info->job = job;
2005 info->lport = lport;
2006 info->rsp_code = ELS_LS_ACC;
2007 info->nents = job->reply_payload.sg_cnt;
2008 info->sg = job->reply_payload.sg_list;
2009
3afd2d15
HR
2010 if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
2011 NULL, info, tov)) {
72e0daad 2012 kfree(info);
a51ab396 2013 return -ECOMM;
72e0daad 2014 }
a51ab396
SM
2015 return 0;
2016}
2017
2018/**
3a3b42bf
RL
2019 * fc_lport_ct_request() - Send CT Passthrough request
2020 * @job: The BSG Passthrough job
a51ab396
SM
2021 * @lport: The local port sending the request
2022 * @did: The destination FC-ID
3a3b42bf 2023 * @tov: The timeout period to wait for the response
a51ab396
SM
2024 *
2025 * Locking Note: The lport lock is expected to be held before calling
2026 * this routine.
2027 */
75cc8cfc 2028static int fc_lport_ct_request(struct bsg_job *job,
a51ab396
SM
2029 struct fc_lport *lport, u32 did, u32 tov)
2030{
2031 struct fc_bsg_info *info;
2032 struct fc_frame *fp;
2033 struct fc_frame_header *fh;
2034 struct fc_ct_req *ct;
2035 size_t len;
2036
2037 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
2038 job->request_payload.payload_len);
2039 if (!fp)
2040 return -ENOMEM;
2041
2042 len = job->request_payload.payload_len;
2043 ct = fc_frame_payload_get(fp, len);
2044
2045 sg_copy_to_buffer(job->request_payload.sg_list,
2046 job->request_payload.sg_cnt,
2047 ct, len);
2048
2049 fh = fc_frame_header_get(fp);
2050 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
2051 hton24(fh->fh_d_id, did);
7b2787ec 2052 hton24(fh->fh_s_id, lport->port_id);
a51ab396 2053 fh->fh_type = FC_TYPE_CT;
24f089e2 2054 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
a51ab396
SM
2055 fh->fh_cs_ctl = 0;
2056 fh->fh_df_ctl = 0;
2057 fh->fh_parm_offset = 0;
2058
2059 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
2060 if (!info) {
2061 fc_frame_free(fp);
2062 return -ENOMEM;
2063 }
2064
2065 info->job = job;
2066 info->lport = lport;
2067 info->rsp_code = FC_FS_ACC;
2068 info->nents = job->reply_payload.sg_cnt;
2069 info->sg = job->reply_payload.sg_list;
2070
3afd2d15
HR
2071 if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
2072 NULL, info, tov)) {
2d6dfb00 2073 kfree(info);
a51ab396 2074 return -ECOMM;
2d6dfb00 2075 }
a51ab396
SM
2076 return 0;
2077}
2078
2079/**
2080 * fc_lport_bsg_request() - The common entry point for sending
3a3b42bf
RL
2081 * FC Passthrough requests
2082 * @job: The BSG passthrough job
a51ab396 2083 */
75cc8cfc 2084int fc_lport_bsg_request(struct bsg_job *job)
a51ab396 2085{
01e0e15c
JT
2086 struct fc_bsg_request *bsg_request = job->request;
2087 struct fc_bsg_reply *bsg_reply = job->reply;
a51ab396 2088 struct request *rsp = job->req->next_rq;
cd21c605 2089 struct Scsi_Host *shost = fc_bsg_to_shost(job);
a51ab396
SM
2090 struct fc_lport *lport = shost_priv(shost);
2091 struct fc_rport *rport;
2092 struct fc_rport_priv *rdata;
2093 int rc = -EINVAL;
baa6719f 2094 u32 did, tov;
a51ab396 2095
01e0e15c 2096 bsg_reply->reply_payload_rcv_len = 0;
b248df30
HD
2097 if (rsp)
2098 rsp->resid_len = job->reply_payload.payload_len;
a51ab396
SM
2099
2100 mutex_lock(&lport->lp_mutex);
2101
01e0e15c 2102 switch (bsg_request->msgcode) {
a51ab396 2103 case FC_BSG_RPT_ELS:
1d69b122 2104 rport = fc_bsg_to_rport(job);
a51ab396
SM
2105 if (!rport)
2106 break;
2107
2108 rdata = rport->dd_data;
2109 rc = fc_lport_els_request(job, lport, rport->port_id,
2110 rdata->e_d_tov);
2111 break;
2112
2113 case FC_BSG_RPT_CT:
1d69b122 2114 rport = fc_bsg_to_rport(job);
a51ab396
SM
2115 if (!rport)
2116 break;
2117
2118 rdata = rport->dd_data;
2119 rc = fc_lport_ct_request(job, lport, rport->port_id,
2120 rdata->e_d_tov);
2121 break;
2122
2123 case FC_BSG_HST_CT:
01e0e15c 2124 did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
baa6719f 2125 if (did == FC_FID_DIR_SERV) {
3a3b42bf 2126 rdata = lport->dns_rdata;
baa6719f
HR
2127 if (!rdata)
2128 break;
2129 tov = rdata->e_d_tov;
2130 } else {
e87b7777 2131 rdata = fc_rport_lookup(lport, did);
baa6719f
HR
2132 if (!rdata)
2133 break;
2134 tov = rdata->e_d_tov;
944ef968 2135 kref_put(&rdata->kref, fc_rport_destroy);
baa6719f 2136 }
a51ab396 2137
baa6719f 2138 rc = fc_lport_ct_request(job, lport, did, tov);
a51ab396
SM
2139 break;
2140
2141 case FC_BSG_HST_ELS_NOLOGIN:
01e0e15c 2142 did = ntoh24(bsg_request->rqst_data.h_els.port_id);
a51ab396
SM
2143 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
2144 break;
2145 }
2146
2147 mutex_unlock(&lport->lp_mutex);
2148 return rc;
2149}
2150EXPORT_SYMBOL(fc_lport_bsg_request);