]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/libfc/fc_lport.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / libfc / fc_lport.c
1 /*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * PORT LOCKING NOTES
22 *
23 * These comments only apply to the 'port code' which consists of the lport,
24 * disc and rport blocks.
25 *
26 * MOTIVATION
27 *
28 * The lport, disc and rport blocks all have mutexes that are used to protect
29 * those objects. The main motivation for these locks is to prevent from
30 * having an lport reset just before we send a frame. In that scenario the
31 * lport's FID would get set to zero and then we'd send a frame with an
32 * invalid SID. We also need to ensure that states don't change unexpectedly
33 * while processing another state.
34 *
35 * HIERARCHY
36 *
37 * The following hierarchy defines the locking rules. A greater lock
38 * may be held before acquiring a lesser lock, but a lesser lock should never
39 * be held while attempting to acquire a greater lock. Here is the hierarchy-
40 *
41 * lport > disc, lport > rport, disc > rport
42 *
43 * CALLBACKS
44 *
45 * The callbacks cause complications with this scheme. There is a callback
46 * from the rport (to either lport or disc) and a callback from disc
47 * (to the lport).
48 *
49 * As rports exit the rport state machine a callback is made to the owner of
50 * the rport to notify success or failure. Since the callback is likely to
51 * cause the lport or disc to grab its lock we cannot hold the rport lock
52 * while making the callback. To ensure that the rport is not free'd while
53 * processing the callback the rport callbacks are serialized through a
54 * single-threaded workqueue. An rport would never be free'd while in a
55 * callback handler because no other rport work in this queue can be executed
56 * at the same time.
57 *
58 * When discovery succeeds or fails a callback is made to the lport as
59 * notification. Currently, successful discovery causes the lport to take no
60 * action. A failure will cause the lport to reset. There is likely a circular
61 * locking problem with this implementation.
62 */
63
64 /*
65 * LPORT LOCKING
66 *
67 * The critical sections protected by the lport's mutex are quite broad and
68 * may be improved upon in the future. The lport code and its locking doesn't
69 * influence the I/O path, so excessive locking doesn't penalize I/O
70 * performance.
71 *
72 * The strategy is to lock whenever processing a request or response. Note
73 * that every _enter_* function corresponds to a state change. They generally
74 * change the lports state and then send a request out on the wire. We lock
75 * before calling any of these functions to protect that state change. This
76 * means that the entry points into the lport block manage the locks while
77 * the state machine can transition between states (i.e. _enter_* functions)
78 * while always staying protected.
79 *
80 * When handling responses we also hold the lport mutex broadly. When the
81 * lport receives the response frame it locks the mutex and then calls the
82 * appropriate handler for the particuar response. Generally a response will
83 * trigger a state change and so the lock must already be held.
84 *
85 * Retries also have to consider the locking. The retries occur from a work
86 * context and the work function will lock the lport and then retry the state
87 * (i.e. _enter_* function).
88 */
89
90 #include <linux/timer.h>
91 #include <linux/delay.h>
92 #include <linux/module.h>
93 #include <linux/slab.h>
94 #include <asm/unaligned.h>
95
96 #include <scsi/fc/fc_gs.h>
97
98 #include <scsi/libfc.h>
99 #include <scsi/fc_encode.h>
100 #include <linux/scatterlist.h>
101
102 #include "fc_libfc.h"
103
104 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
105 #define FC_LOCAL_PTP_FID_LO 0x010101
106 #define FC_LOCAL_PTP_FID_HI 0x010102
107
108 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
109
110 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
111
112 static void fc_lport_enter_reset(struct fc_lport *);
113 static void fc_lport_enter_flogi(struct fc_lport *);
114 static void fc_lport_enter_dns(struct fc_lport *);
115 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
116 static void fc_lport_enter_scr(struct fc_lport *);
117 static void fc_lport_enter_ready(struct fc_lport *);
118 static void fc_lport_enter_logo(struct fc_lport *);
119 static void fc_lport_enter_fdmi(struct fc_lport *lport);
120 static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
121
122 static const char *fc_lport_state_names[] = {
123 [LPORT_ST_DISABLED] = "disabled",
124 [LPORT_ST_FLOGI] = "FLOGI",
125 [LPORT_ST_DNS] = "dNS",
126 [LPORT_ST_RNN_ID] = "RNN_ID",
127 [LPORT_ST_RSNN_NN] = "RSNN_NN",
128 [LPORT_ST_RSPN_ID] = "RSPN_ID",
129 [LPORT_ST_RFT_ID] = "RFT_ID",
130 [LPORT_ST_RFF_ID] = "RFF_ID",
131 [LPORT_ST_FDMI] = "FDMI",
132 [LPORT_ST_RHBA] = "RHBA",
133 [LPORT_ST_RPA] = "RPA",
134 [LPORT_ST_DHBA] = "DHBA",
135 [LPORT_ST_DPRT] = "DPRT",
136 [LPORT_ST_SCR] = "SCR",
137 [LPORT_ST_READY] = "Ready",
138 [LPORT_ST_LOGO] = "LOGO",
139 [LPORT_ST_RESET] = "reset",
140 };
141
142 /**
143 * struct fc_bsg_info - FC Passthrough managemet structure
144 * @job: The passthrough job
145 * @lport: The local port to pass through a command
146 * @rsp_code: The expected response code
147 * @sg: job->reply_payload.sg_list
148 * @nents: job->reply_payload.sg_cnt
149 * @offset: The offset into the response data
150 */
151 struct fc_bsg_info {
152 struct bsg_job *job;
153 struct fc_lport *lport;
154 u16 rsp_code;
155 struct scatterlist *sg;
156 u32 nents;
157 size_t offset;
158 };
159
160 /**
161 * fc_frame_drop() - Dummy frame handler
162 * @lport: The local port the frame was received on
163 * @fp: The received frame
164 */
165 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
166 {
167 fc_frame_free(fp);
168 return 0;
169 }
170
171 /**
172 * fc_lport_rport_callback() - Event handler for rport events
173 * @lport: The lport which is receiving the event
174 * @rdata: private remote port data
175 * @event: The event that occurred
176 *
177 * Locking Note: The rport lock should not be held when calling
178 * this function.
179 */
180 static void fc_lport_rport_callback(struct fc_lport *lport,
181 struct fc_rport_priv *rdata,
182 enum fc_rport_event event)
183 {
184 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
185 rdata->ids.port_id);
186
187 mutex_lock(&lport->lp_mutex);
188 switch (event) {
189 case RPORT_EV_READY:
190 if (lport->state == LPORT_ST_DNS) {
191 lport->dns_rdata = rdata;
192 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
193 } else if (lport->state == LPORT_ST_FDMI) {
194 lport->ms_rdata = rdata;
195 fc_lport_enter_ms(lport, LPORT_ST_DHBA);
196 } else {
197 FC_LPORT_DBG(lport, "Received an READY event "
198 "on port (%6.6x) for the directory "
199 "server, but the lport is not "
200 "in the DNS or FDMI state, it's in the "
201 "%d state", rdata->ids.port_id,
202 lport->state);
203 fc_rport_logoff(rdata);
204 }
205 break;
206 case RPORT_EV_LOGO:
207 case RPORT_EV_FAILED:
208 case RPORT_EV_STOP:
209 if (rdata->ids.port_id == FC_FID_DIR_SERV)
210 lport->dns_rdata = NULL;
211 else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
212 lport->ms_rdata = NULL;
213 break;
214 case RPORT_EV_NONE:
215 break;
216 }
217 mutex_unlock(&lport->lp_mutex);
218 }
219
220 /**
221 * fc_lport_state() - Return a string which represents the lport's state
222 * @lport: The lport whose state is to converted to a string
223 */
224 static const char *fc_lport_state(struct fc_lport *lport)
225 {
226 const char *cp;
227
228 cp = fc_lport_state_names[lport->state];
229 if (!cp)
230 cp = "unknown";
231 return cp;
232 }
233
234 /**
235 * fc_lport_ptp_setup() - Create an rport for point-to-point mode
236 * @lport: The lport to attach the ptp rport to
237 * @remote_fid: The FID of the ptp rport
238 * @remote_wwpn: The WWPN of the ptp rport
239 * @remote_wwnn: The WWNN of the ptp rport
240 *
241 * Locking Note: The lport lock is expected to be held before calling
242 * this routine.
243 */
244 static void fc_lport_ptp_setup(struct fc_lport *lport,
245 u32 remote_fid, u64 remote_wwpn,
246 u64 remote_wwnn)
247 {
248 if (lport->ptp_rdata) {
249 fc_rport_logoff(lport->ptp_rdata);
250 kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
251 }
252 mutex_lock(&lport->disc.disc_mutex);
253 lport->ptp_rdata = fc_rport_create(lport, remote_fid);
254 kref_get(&lport->ptp_rdata->kref);
255 lport->ptp_rdata->ids.port_name = remote_wwpn;
256 lport->ptp_rdata->ids.node_name = remote_wwnn;
257 mutex_unlock(&lport->disc.disc_mutex);
258
259 fc_rport_login(lport->ptp_rdata);
260
261 fc_lport_enter_ready(lport);
262 }
263
264 /**
265 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
266 * @shost: The SCSI host whose port state is to be determined
267 */
268 void fc_get_host_port_state(struct Scsi_Host *shost)
269 {
270 struct fc_lport *lport = shost_priv(shost);
271
272 mutex_lock(&lport->lp_mutex);
273 if (!lport->link_up)
274 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
275 else
276 switch (lport->state) {
277 case LPORT_ST_READY:
278 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
279 break;
280 default:
281 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
282 }
283 mutex_unlock(&lport->lp_mutex);
284 }
285 EXPORT_SYMBOL(fc_get_host_port_state);
286
287 /**
288 * fc_get_host_speed() - Return the speed of the given Scsi_Host
289 * @shost: The SCSI host whose port speed is to be determined
290 */
291 void fc_get_host_speed(struct Scsi_Host *shost)
292 {
293 struct fc_lport *lport = shost_priv(shost);
294
295 fc_host_speed(shost) = lport->link_speed;
296 }
297 EXPORT_SYMBOL(fc_get_host_speed);
298
299 /**
300 * fc_get_host_stats() - Return the Scsi_Host's statistics
301 * @shost: The SCSI host whose statistics are to be returned
302 */
303 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
304 {
305 struct fc_host_statistics *fc_stats;
306 struct fc_lport *lport = shost_priv(shost);
307 unsigned int cpu;
308 u64 fcp_in_bytes = 0;
309 u64 fcp_out_bytes = 0;
310
311 fc_stats = &lport->host_stats;
312 memset(fc_stats, 0, sizeof(struct fc_host_statistics));
313
314 fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
315
316 for_each_possible_cpu(cpu) {
317 struct fc_stats *stats;
318
319 stats = per_cpu_ptr(lport->stats, cpu);
320
321 fc_stats->tx_frames += stats->TxFrames;
322 fc_stats->tx_words += stats->TxWords;
323 fc_stats->rx_frames += stats->RxFrames;
324 fc_stats->rx_words += stats->RxWords;
325 fc_stats->error_frames += stats->ErrorFrames;
326 fc_stats->invalid_crc_count += stats->InvalidCRCCount;
327 fc_stats->fcp_input_requests += stats->InputRequests;
328 fc_stats->fcp_output_requests += stats->OutputRequests;
329 fc_stats->fcp_control_requests += stats->ControlRequests;
330 fcp_in_bytes += stats->InputBytes;
331 fcp_out_bytes += stats->OutputBytes;
332 fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
333 fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
334 fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
335 fc_stats->link_failure_count += stats->LinkFailureCount;
336 }
337 fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
338 fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
339 fc_stats->lip_count = -1;
340 fc_stats->nos_count = -1;
341 fc_stats->loss_of_sync_count = -1;
342 fc_stats->loss_of_signal_count = -1;
343 fc_stats->prim_seq_protocol_err_count = -1;
344 fc_stats->dumped_frames = -1;
345
346 /* update exches stats */
347 fc_exch_update_stats(lport);
348
349 return fc_stats;
350 }
351 EXPORT_SYMBOL(fc_get_host_stats);
352
353 /**
354 * fc_lport_flogi_fill() - Fill in FLOGI command for request
355 * @lport: The local port the FLOGI is for
356 * @flogi: The FLOGI command
357 * @op: The opcode
358 */
359 static void fc_lport_flogi_fill(struct fc_lport *lport,
360 struct fc_els_flogi *flogi,
361 unsigned int op)
362 {
363 struct fc_els_csp *sp;
364 struct fc_els_cssp *cp;
365
366 memset(flogi, 0, sizeof(*flogi));
367 flogi->fl_cmd = (u8) op;
368 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
369 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
370 sp = &flogi->fl_csp;
371 sp->sp_hi_ver = 0x20;
372 sp->sp_lo_ver = 0x20;
373 sp->sp_bb_cred = htons(10); /* this gets set by gateway */
374 sp->sp_bb_data = htons((u16) lport->mfs);
375 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
376 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
377 if (op != ELS_FLOGI) {
378 sp->sp_features = htons(FC_SP_FT_CIRO);
379 sp->sp_tot_seq = htons(255); /* seq. we accept */
380 sp->sp_rel_off = htons(0x1f);
381 sp->sp_e_d_tov = htonl(lport->e_d_tov);
382
383 cp->cp_rdfs = htons((u16) lport->mfs);
384 cp->cp_con_seq = htons(255);
385 cp->cp_open_seq = 1;
386 }
387 }
388
389 /**
390 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
391 * @lport: The local port to add a new FC-4 type to
392 * @type: The new FC-4 type
393 */
394 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
395 {
396 __be32 *mp;
397
398 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
399 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
400 }
401
402 /**
403 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
404 * @lport: Fibre Channel local port receiving the RLIR
405 * @fp: The RLIR request frame
406 *
407 * Locking Note: The lport lock is expected to be held before calling
408 * this function.
409 */
410 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
411 {
412 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
413 fc_lport_state(lport));
414
415 fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
416 fc_frame_free(fp);
417 }
418
419 /**
420 * fc_lport_recv_echo_req() - Handle received ECHO request
421 * @lport: The local port receiving the ECHO
422 * @fp: ECHO request frame
423 *
424 * Locking Note: The lport lock is expected to be held before calling
425 * this function.
426 */
427 static void fc_lport_recv_echo_req(struct fc_lport *lport,
428 struct fc_frame *in_fp)
429 {
430 struct fc_frame *fp;
431 unsigned int len;
432 void *pp;
433 void *dp;
434
435 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
436 fc_lport_state(lport));
437
438 len = fr_len(in_fp) - sizeof(struct fc_frame_header);
439 pp = fc_frame_payload_get(in_fp, len);
440
441 if (len < sizeof(__be32))
442 len = sizeof(__be32);
443
444 fp = fc_frame_alloc(lport, len);
445 if (fp) {
446 dp = fc_frame_payload_get(fp, len);
447 memcpy(dp, pp, len);
448 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
449 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
450 lport->tt.frame_send(lport, fp);
451 }
452 fc_frame_free(in_fp);
453 }
454
455 /**
456 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
457 * @lport: The local port receiving the RNID
458 * @fp: The RNID request frame
459 *
460 * Locking Note: The lport lock is expected to be held before calling
461 * this function.
462 */
463 static void fc_lport_recv_rnid_req(struct fc_lport *lport,
464 struct fc_frame *in_fp)
465 {
466 struct fc_frame *fp;
467 struct fc_els_rnid *req;
468 struct {
469 struct fc_els_rnid_resp rnid;
470 struct fc_els_rnid_cid cid;
471 struct fc_els_rnid_gen gen;
472 } *rp;
473 struct fc_seq_els_data rjt_data;
474 u8 fmt;
475 size_t len;
476
477 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
478 fc_lport_state(lport));
479
480 req = fc_frame_payload_get(in_fp, sizeof(*req));
481 if (!req) {
482 rjt_data.reason = ELS_RJT_LOGIC;
483 rjt_data.explan = ELS_EXPL_NONE;
484 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
485 } else {
486 fmt = req->rnid_fmt;
487 len = sizeof(*rp);
488 if (fmt != ELS_RNIDF_GEN ||
489 ntohl(lport->rnid_gen.rnid_atype) == 0) {
490 fmt = ELS_RNIDF_NONE; /* nothing to provide */
491 len -= sizeof(rp->gen);
492 }
493 fp = fc_frame_alloc(lport, len);
494 if (fp) {
495 rp = fc_frame_payload_get(fp, len);
496 memset(rp, 0, len);
497 rp->rnid.rnid_cmd = ELS_LS_ACC;
498 rp->rnid.rnid_fmt = fmt;
499 rp->rnid.rnid_cid_len = sizeof(rp->cid);
500 rp->cid.rnid_wwpn = htonll(lport->wwpn);
501 rp->cid.rnid_wwnn = htonll(lport->wwnn);
502 if (fmt == ELS_RNIDF_GEN) {
503 rp->rnid.rnid_sid_len = sizeof(rp->gen);
504 memcpy(&rp->gen, &lport->rnid_gen,
505 sizeof(rp->gen));
506 }
507 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
508 lport->tt.frame_send(lport, fp);
509 }
510 }
511 fc_frame_free(in_fp);
512 }
513
514 /**
515 * fc_lport_recv_logo_req() - Handle received fabric LOGO request
516 * @lport: The local port receiving the LOGO
517 * @fp: The LOGO request frame
518 *
519 * Locking Note: The lport lock is expected to be held before calling
520 * this function.
521 */
522 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
523 {
524 fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
525 fc_lport_enter_reset(lport);
526 fc_frame_free(fp);
527 }
528
529 /**
530 * fc_fabric_login() - Start the lport state machine
531 * @lport: The local port that should log into the fabric
532 *
533 * Locking Note: This function should not be called
534 * with the lport lock held.
535 */
536 int fc_fabric_login(struct fc_lport *lport)
537 {
538 int rc = -1;
539
540 mutex_lock(&lport->lp_mutex);
541 if (lport->state == LPORT_ST_DISABLED ||
542 lport->state == LPORT_ST_LOGO) {
543 fc_lport_state_enter(lport, LPORT_ST_RESET);
544 fc_lport_enter_reset(lport);
545 rc = 0;
546 }
547 mutex_unlock(&lport->lp_mutex);
548
549 return rc;
550 }
551 EXPORT_SYMBOL(fc_fabric_login);
552
553 /**
554 * __fc_linkup() - Handler for transport linkup events
555 * @lport: The lport whose link is up
556 *
557 * Locking: must be called with the lp_mutex held
558 */
559 void __fc_linkup(struct fc_lport *lport)
560 {
561 if (!lport->link_up) {
562 lport->link_up = 1;
563
564 if (lport->state == LPORT_ST_RESET)
565 fc_lport_enter_flogi(lport);
566 }
567 }
568
569 /**
570 * fc_linkup() - Handler for transport linkup events
571 * @lport: The local port whose link is up
572 */
573 void fc_linkup(struct fc_lport *lport)
574 {
575 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
576 lport->host->host_no, lport->port_id);
577
578 mutex_lock(&lport->lp_mutex);
579 __fc_linkup(lport);
580 mutex_unlock(&lport->lp_mutex);
581 }
582 EXPORT_SYMBOL(fc_linkup);
583
584 /**
585 * __fc_linkdown() - Handler for transport linkdown events
586 * @lport: The lport whose link is down
587 *
588 * Locking: must be called with the lp_mutex held
589 */
590 void __fc_linkdown(struct fc_lport *lport)
591 {
592 if (lport->link_up) {
593 lport->link_up = 0;
594 fc_lport_enter_reset(lport);
595 lport->tt.fcp_cleanup(lport);
596 }
597 }
598
599 /**
600 * fc_linkdown() - Handler for transport linkdown events
601 * @lport: The local port whose link is down
602 */
603 void fc_linkdown(struct fc_lport *lport)
604 {
605 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
606 lport->host->host_no, lport->port_id);
607
608 mutex_lock(&lport->lp_mutex);
609 __fc_linkdown(lport);
610 mutex_unlock(&lport->lp_mutex);
611 }
612 EXPORT_SYMBOL(fc_linkdown);
613
614 /**
615 * fc_fabric_logoff() - Logout of the fabric
616 * @lport: The local port to logoff the fabric
617 *
618 * Return value:
619 * 0 for success, -1 for failure
620 */
621 int fc_fabric_logoff(struct fc_lport *lport)
622 {
623 lport->tt.disc_stop_final(lport);
624 mutex_lock(&lport->lp_mutex);
625 if (lport->dns_rdata)
626 fc_rport_logoff(lport->dns_rdata);
627 mutex_unlock(&lport->lp_mutex);
628 fc_rport_flush_queue();
629 mutex_lock(&lport->lp_mutex);
630 fc_lport_enter_logo(lport);
631 mutex_unlock(&lport->lp_mutex);
632 cancel_delayed_work_sync(&lport->retry_work);
633 return 0;
634 }
635 EXPORT_SYMBOL(fc_fabric_logoff);
636
637 /**
638 * fc_lport_destroy() - Unregister a fc_lport
639 * @lport: The local port to unregister
640 *
641 * Note:
642 * exit routine for fc_lport instance
643 * clean-up all the allocated memory
644 * and free up other system resources.
645 *
646 */
647 int fc_lport_destroy(struct fc_lport *lport)
648 {
649 mutex_lock(&lport->lp_mutex);
650 lport->state = LPORT_ST_DISABLED;
651 lport->link_up = 0;
652 lport->tt.frame_send = fc_frame_drop;
653 mutex_unlock(&lport->lp_mutex);
654
655 lport->tt.fcp_abort_io(lport);
656 lport->tt.disc_stop_final(lport);
657 lport->tt.exch_mgr_reset(lport, 0, 0);
658 cancel_delayed_work_sync(&lport->retry_work);
659 fc_fc4_del_lport(lport);
660 return 0;
661 }
662 EXPORT_SYMBOL(fc_lport_destroy);
663
664 /**
665 * fc_set_mfs() - Set the maximum frame size for a local port
666 * @lport: The local port to set the MFS for
667 * @mfs: The new MFS
668 */
669 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
670 {
671 unsigned int old_mfs;
672 int rc = -EINVAL;
673
674 mutex_lock(&lport->lp_mutex);
675
676 old_mfs = lport->mfs;
677
678 if (mfs >= FC_MIN_MAX_FRAME) {
679 mfs &= ~3;
680 if (mfs > FC_MAX_FRAME)
681 mfs = FC_MAX_FRAME;
682 mfs -= sizeof(struct fc_frame_header);
683 lport->mfs = mfs;
684 rc = 0;
685 }
686
687 if (!rc && mfs < old_mfs)
688 fc_lport_enter_reset(lport);
689
690 mutex_unlock(&lport->lp_mutex);
691
692 return rc;
693 }
694 EXPORT_SYMBOL(fc_set_mfs);
695
696 /**
697 * fc_lport_disc_callback() - Callback for discovery events
698 * @lport: The local port receiving the event
699 * @event: The discovery event
700 */
701 static void fc_lport_disc_callback(struct fc_lport *lport,
702 enum fc_disc_event event)
703 {
704 switch (event) {
705 case DISC_EV_SUCCESS:
706 FC_LPORT_DBG(lport, "Discovery succeeded\n");
707 break;
708 case DISC_EV_FAILED:
709 printk(KERN_ERR "host%d: libfc: "
710 "Discovery failed for port (%6.6x)\n",
711 lport->host->host_no, lport->port_id);
712 mutex_lock(&lport->lp_mutex);
713 fc_lport_enter_reset(lport);
714 mutex_unlock(&lport->lp_mutex);
715 break;
716 case DISC_EV_NONE:
717 WARN_ON(1);
718 break;
719 }
720 }
721
722 /**
723 * fc_rport_enter_ready() - Enter the ready state and start discovery
724 * @lport: The local port that is ready
725 *
726 * Locking Note: The lport lock is expected to be held before calling
727 * this routine.
728 */
729 static void fc_lport_enter_ready(struct fc_lport *lport)
730 {
731 FC_LPORT_DBG(lport, "Entered READY from state %s\n",
732 fc_lport_state(lport));
733
734 fc_lport_state_enter(lport, LPORT_ST_READY);
735 if (lport->vport)
736 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
737 fc_vports_linkchange(lport);
738
739 if (!lport->ptp_rdata)
740 lport->tt.disc_start(fc_lport_disc_callback, lport);
741 }
742
743 /**
744 * fc_lport_set_port_id() - set the local port Port ID
745 * @lport: The local port which will have its Port ID set.
746 * @port_id: The new port ID.
747 * @fp: The frame containing the incoming request, or NULL.
748 *
749 * Locking Note: The lport lock is expected to be held before calling
750 * this function.
751 */
752 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
753 struct fc_frame *fp)
754 {
755 if (port_id)
756 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
757 lport->host->host_no, port_id);
758
759 lport->port_id = port_id;
760
761 /* Update the fc_host */
762 fc_host_port_id(lport->host) = port_id;
763
764 if (lport->tt.lport_set_port_id)
765 lport->tt.lport_set_port_id(lport, port_id, fp);
766 }
767
768 /**
769 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
770 * @lport: The local port which will have its Port ID set.
771 * @port_id: The new port ID.
772 *
773 * Called by the lower-level driver when transport sets the local port_id.
774 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
775 * discovery to be skipped.
776 */
777 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
778 {
779 mutex_lock(&lport->lp_mutex);
780
781 fc_lport_set_port_id(lport, port_id, NULL);
782
783 switch (lport->state) {
784 case LPORT_ST_RESET:
785 case LPORT_ST_FLOGI:
786 if (port_id)
787 fc_lport_enter_ready(lport);
788 break;
789 default:
790 break;
791 }
792 mutex_unlock(&lport->lp_mutex);
793 }
794 EXPORT_SYMBOL(fc_lport_set_local_id);
795
796 /**
797 * fc_lport_recv_flogi_req() - Receive a FLOGI request
798 * @lport: The local port that received the request
799 * @rx_fp: The FLOGI frame
800 *
801 * A received FLOGI request indicates a point-to-point connection.
802 * Accept it with the common service parameters indicating our N port.
803 * Set up to do a PLOGI if we have the higher-number WWPN.
804 *
805 * Locking Note: The lport lock is expected to be held before calling
806 * this function.
807 */
808 static void fc_lport_recv_flogi_req(struct fc_lport *lport,
809 struct fc_frame *rx_fp)
810 {
811 struct fc_frame *fp;
812 struct fc_frame_header *fh;
813 struct fc_els_flogi *flp;
814 struct fc_els_flogi *new_flp;
815 u64 remote_wwpn;
816 u32 remote_fid;
817 u32 local_fid;
818
819 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
820 fc_lport_state(lport));
821
822 remote_fid = fc_frame_sid(rx_fp);
823 flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
824 if (!flp)
825 goto out;
826 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
827 if (remote_wwpn == lport->wwpn) {
828 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
829 "with same WWPN %16.16llx\n",
830 lport->host->host_no, remote_wwpn);
831 goto out;
832 }
833 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
834
835 /*
836 * XXX what is the right thing to do for FIDs?
837 * The originator might expect our S_ID to be 0xfffffe.
838 * But if so, both of us could end up with the same FID.
839 */
840 local_fid = FC_LOCAL_PTP_FID_LO;
841 if (remote_wwpn < lport->wwpn) {
842 local_fid = FC_LOCAL_PTP_FID_HI;
843 if (!remote_fid || remote_fid == local_fid)
844 remote_fid = FC_LOCAL_PTP_FID_LO;
845 } else if (!remote_fid) {
846 remote_fid = FC_LOCAL_PTP_FID_HI;
847 }
848
849 fc_lport_set_port_id(lport, local_fid, rx_fp);
850
851 fp = fc_frame_alloc(lport, sizeof(*flp));
852 if (fp) {
853 new_flp = fc_frame_payload_get(fp, sizeof(*flp));
854 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
855 new_flp->fl_cmd = (u8) ELS_LS_ACC;
856
857 /*
858 * Send the response. If this fails, the originator should
859 * repeat the sequence.
860 */
861 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
862 fh = fc_frame_header_get(fp);
863 hton24(fh->fh_s_id, local_fid);
864 hton24(fh->fh_d_id, remote_fid);
865 lport->tt.frame_send(lport, fp);
866
867 } else {
868 fc_lport_error(lport, fp);
869 }
870 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
871 get_unaligned_be64(&flp->fl_wwnn));
872 out:
873 fc_frame_free(rx_fp);
874 }
875
876 /**
877 * fc_lport_recv_els_req() - The generic lport ELS request handler
878 * @lport: The local port that received the request
879 * @fp: The request frame
880 *
881 * This function will see if the lport handles the request or
882 * if an rport should handle the request.
883 *
884 * Locking Note: This function should not be called with the lport
885 * lock held because it will grab the lock.
886 */
887 static void fc_lport_recv_els_req(struct fc_lport *lport,
888 struct fc_frame *fp)
889 {
890 void (*recv)(struct fc_lport *, struct fc_frame *);
891
892 mutex_lock(&lport->lp_mutex);
893
894 /*
895 * Handle special ELS cases like FLOGI, LOGO, and
896 * RSCN here. These don't require a session.
897 * Even if we had a session, it might not be ready.
898 */
899 if (!lport->link_up)
900 fc_frame_free(fp);
901 else {
902 /*
903 * Check opcode.
904 */
905 recv = fc_rport_recv_req;
906 switch (fc_frame_payload_op(fp)) {
907 case ELS_FLOGI:
908 if (!lport->point_to_multipoint)
909 recv = fc_lport_recv_flogi_req;
910 break;
911 case ELS_LOGO:
912 if (fc_frame_sid(fp) == FC_FID_FLOGI)
913 recv = fc_lport_recv_logo_req;
914 break;
915 case ELS_RSCN:
916 recv = lport->tt.disc_recv_req;
917 break;
918 case ELS_ECHO:
919 recv = fc_lport_recv_echo_req;
920 break;
921 case ELS_RLIR:
922 recv = fc_lport_recv_rlir_req;
923 break;
924 case ELS_RNID:
925 recv = fc_lport_recv_rnid_req;
926 break;
927 }
928
929 recv(lport, fp);
930 }
931 mutex_unlock(&lport->lp_mutex);
932 }
933
934 static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
935 const struct fc_els_spp *spp_in,
936 struct fc_els_spp *spp_out)
937 {
938 return FC_SPP_RESP_INVL;
939 }
940
941 struct fc4_prov fc_lport_els_prov = {
942 .prli = fc_lport_els_prli,
943 .recv = fc_lport_recv_els_req,
944 };
945
946 /**
947 * fc_lport_recv() - The generic lport request handler
948 * @lport: The lport that received the request
949 * @fp: The frame the request is in
950 *
951 * Locking Note: This function should not be called with the lport
952 * lock held because it may grab the lock.
953 */
954 void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
955 {
956 struct fc_frame_header *fh = fc_frame_header_get(fp);
957 struct fc_seq *sp = fr_seq(fp);
958 struct fc4_prov *prov;
959
960 /*
961 * Use RCU read lock and module_lock to be sure module doesn't
962 * deregister and get unloaded while we're calling it.
963 * try_module_get() is inlined and accepts a NULL parameter.
964 * Only ELSes and FCP target ops should come through here.
965 * The locking is unfortunate, and a better scheme is being sought.
966 */
967
968 rcu_read_lock();
969 if (fh->fh_type >= FC_FC4_PROV_SIZE)
970 goto drop;
971 prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
972 if (!prov || !try_module_get(prov->module))
973 goto drop;
974 rcu_read_unlock();
975 prov->recv(lport, fp);
976 module_put(prov->module);
977 return;
978 drop:
979 rcu_read_unlock();
980 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
981 fc_frame_free(fp);
982 if (sp)
983 fc_exch_done(sp);
984 }
985 EXPORT_SYMBOL(fc_lport_recv);
986
987 /**
988 * fc_lport_reset() - Reset a local port
989 * @lport: The local port which should be reset
990 *
991 * Locking Note: This functions should not be called with the
992 * lport lock held.
993 */
994 int fc_lport_reset(struct fc_lport *lport)
995 {
996 cancel_delayed_work_sync(&lport->retry_work);
997 mutex_lock(&lport->lp_mutex);
998 fc_lport_enter_reset(lport);
999 mutex_unlock(&lport->lp_mutex);
1000 return 0;
1001 }
1002 EXPORT_SYMBOL(fc_lport_reset);
1003
1004 /**
1005 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
1006 * @lport: The local port to be reset
1007 *
1008 * Locking Note: The lport lock is expected to be held before calling
1009 * this routine.
1010 */
1011 static void fc_lport_reset_locked(struct fc_lport *lport)
1012 {
1013 if (lport->dns_rdata) {
1014 fc_rport_logoff(lport->dns_rdata);
1015 lport->dns_rdata = NULL;
1016 }
1017
1018 if (lport->ptp_rdata) {
1019 fc_rport_logoff(lport->ptp_rdata);
1020 kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
1021 lport->ptp_rdata = NULL;
1022 }
1023
1024 lport->tt.disc_stop(lport);
1025
1026 lport->tt.exch_mgr_reset(lport, 0, 0);
1027 fc_host_fabric_name(lport->host) = 0;
1028
1029 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
1030 fc_lport_set_port_id(lport, 0, NULL);
1031 }
1032
1033 /**
1034 * fc_lport_enter_reset() - Reset the local port
1035 * @lport: The local port to be reset
1036 *
1037 * Locking Note: The lport lock is expected to be held before calling
1038 * this routine.
1039 */
1040 static void fc_lport_enter_reset(struct fc_lport *lport)
1041 {
1042 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
1043 fc_lport_state(lport));
1044
1045 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
1046 return;
1047
1048 if (lport->vport) {
1049 if (lport->link_up)
1050 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
1051 else
1052 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
1053 }
1054 fc_lport_state_enter(lport, LPORT_ST_RESET);
1055 fc_host_post_event(lport->host, fc_get_event_number(),
1056 FCH_EVT_LIPRESET, 0);
1057 fc_vports_linkchange(lport);
1058 fc_lport_reset_locked(lport);
1059 if (lport->link_up)
1060 fc_lport_enter_flogi(lport);
1061 }
1062
1063 /**
1064 * fc_lport_enter_disabled() - Disable the local port
1065 * @lport: The local port to be reset
1066 *
1067 * Locking Note: The lport lock is expected to be held before calling
1068 * this routine.
1069 */
1070 static void fc_lport_enter_disabled(struct fc_lport *lport)
1071 {
1072 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
1073 fc_lport_state(lport));
1074
1075 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1076 fc_vports_linkchange(lport);
1077 fc_lport_reset_locked(lport);
1078 }
1079
1080 /**
1081 * fc_lport_error() - Handler for any errors
1082 * @lport: The local port that the error was on
1083 * @fp: The error code encoded in a frame pointer
1084 *
1085 * If the error was caused by a resource allocation failure
1086 * then wait for half a second and retry, otherwise retry
1087 * after the e_d_tov time.
1088 */
1089 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
1090 {
1091 unsigned long delay = 0;
1092 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
1093 IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
1094 lport->retry_count);
1095
1096 if (PTR_ERR(fp) == -FC_EX_CLOSED)
1097 return;
1098
1099 /*
1100 * Memory allocation failure, or the exchange timed out
1101 * or we received LS_RJT.
1102 * Retry after delay
1103 */
1104 if (lport->retry_count < lport->max_retry_count) {
1105 lport->retry_count++;
1106 if (!fp)
1107 delay = msecs_to_jiffies(500);
1108 else
1109 delay = msecs_to_jiffies(lport->e_d_tov);
1110
1111 schedule_delayed_work(&lport->retry_work, delay);
1112 } else
1113 fc_lport_enter_reset(lport);
1114 }
1115
1116 /**
1117 * fc_lport_ns_resp() - Handle response to a name server
1118 * registration exchange
1119 * @sp: current sequence in exchange
1120 * @fp: response frame
1121 * @lp_arg: Fibre Channel host port instance
1122 *
1123 * Locking Note: This function will be called without the lport lock
1124 * held, but it will lock, call an _enter_* function or fc_lport_error()
1125 * and then unlock the lport.
1126 */
1127 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1128 void *lp_arg)
1129 {
1130 struct fc_lport *lport = lp_arg;
1131 struct fc_frame_header *fh;
1132 struct fc_ct_hdr *ct;
1133
1134 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
1135
1136 if (fp == ERR_PTR(-FC_EX_CLOSED))
1137 return;
1138
1139 mutex_lock(&lport->lp_mutex);
1140
1141 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
1142 FC_LPORT_DBG(lport, "Received a name server response, "
1143 "but in state %s\n", fc_lport_state(lport));
1144 if (IS_ERR(fp))
1145 goto err;
1146 goto out;
1147 }
1148
1149 if (IS_ERR(fp)) {
1150 fc_lport_error(lport, fp);
1151 goto err;
1152 }
1153
1154 fh = fc_frame_header_get(fp);
1155 ct = fc_frame_payload_get(fp, sizeof(*ct));
1156
1157 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1158 ct->ct_fs_type == FC_FST_DIR &&
1159 ct->ct_fs_subtype == FC_NS_SUBTYPE &&
1160 ntohs(ct->ct_cmd) == FC_FS_ACC)
1161 switch (lport->state) {
1162 case LPORT_ST_RNN_ID:
1163 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
1164 break;
1165 case LPORT_ST_RSNN_NN:
1166 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
1167 break;
1168 case LPORT_ST_RSPN_ID:
1169 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1170 break;
1171 case LPORT_ST_RFT_ID:
1172 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1173 break;
1174 case LPORT_ST_RFF_ID:
1175 if (lport->fdmi_enabled)
1176 fc_lport_enter_fdmi(lport);
1177 else
1178 fc_lport_enter_scr(lport);
1179 break;
1180 default:
1181 /* should have already been caught by state checks */
1182 break;
1183 }
1184 else
1185 fc_lport_error(lport, fp);
1186 out:
1187 fc_frame_free(fp);
1188 err:
1189 mutex_unlock(&lport->lp_mutex);
1190 }
1191
1192 /**
1193 * fc_lport_ms_resp() - Handle response to a management server
1194 * exchange
1195 * @sp: current sequence in exchange
1196 * @fp: response frame
1197 * @lp_arg: Fibre Channel host port instance
1198 *
1199 * Locking Note: This function will be called without the lport lock
1200 * held, but it will lock, call an _enter_* function or fc_lport_error()
1201 * and then unlock the lport.
1202 */
1203 static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
1204 void *lp_arg)
1205 {
1206 struct fc_lport *lport = lp_arg;
1207 struct fc_frame_header *fh;
1208 struct fc_ct_hdr *ct;
1209
1210 FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
1211
1212 if (fp == ERR_PTR(-FC_EX_CLOSED))
1213 return;
1214
1215 mutex_lock(&lport->lp_mutex);
1216
1217 if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
1218 FC_LPORT_DBG(lport, "Received a management server response, "
1219 "but in state %s\n", fc_lport_state(lport));
1220 if (IS_ERR(fp))
1221 goto err;
1222 goto out;
1223 }
1224
1225 if (IS_ERR(fp)) {
1226 fc_lport_error(lport, fp);
1227 goto err;
1228 }
1229
1230 fh = fc_frame_header_get(fp);
1231 ct = fc_frame_payload_get(fp, sizeof(*ct));
1232
1233 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1234 ct->ct_fs_type == FC_FST_MGMT &&
1235 ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
1236 FC_LPORT_DBG(lport, "Received a management server response, "
1237 "reason=%d explain=%d\n",
1238 ct->ct_reason,
1239 ct->ct_explan);
1240
1241 switch (lport->state) {
1242 case LPORT_ST_RHBA:
1243 if (ntohs(ct->ct_cmd) == FC_FS_ACC)
1244 fc_lport_enter_ms(lport, LPORT_ST_RPA);
1245 else /* Error Skip RPA */
1246 fc_lport_enter_scr(lport);
1247 break;
1248 case LPORT_ST_RPA:
1249 fc_lport_enter_scr(lport);
1250 break;
1251 case LPORT_ST_DPRT:
1252 fc_lport_enter_ms(lport, LPORT_ST_RHBA);
1253 break;
1254 case LPORT_ST_DHBA:
1255 fc_lport_enter_ms(lport, LPORT_ST_DPRT);
1256 break;
1257 default:
1258 /* should have already been caught by state checks */
1259 break;
1260 }
1261 } else {
1262 /* Invalid Frame? */
1263 fc_lport_error(lport, fp);
1264 }
1265 out:
1266 fc_frame_free(fp);
1267 err:
1268 mutex_unlock(&lport->lp_mutex);
1269 }
1270
1271 /**
1272 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1273 * @sp: current sequence in SCR exchange
1274 * @fp: response frame
1275 * @lp_arg: Fibre Channel lport port instance that sent the registration request
1276 *
1277 * Locking Note: This function will be called without the lport lock
1278 * held, but it will lock, call an _enter_* function or fc_lport_error
1279 * and then unlock the lport.
1280 */
1281 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
1282 void *lp_arg)
1283 {
1284 struct fc_lport *lport = lp_arg;
1285 u8 op;
1286
1287 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
1288
1289 if (fp == ERR_PTR(-FC_EX_CLOSED))
1290 return;
1291
1292 mutex_lock(&lport->lp_mutex);
1293
1294 if (lport->state != LPORT_ST_SCR) {
1295 FC_LPORT_DBG(lport, "Received a SCR response, but in state "
1296 "%s\n", fc_lport_state(lport));
1297 if (IS_ERR(fp))
1298 goto err;
1299 goto out;
1300 }
1301
1302 if (IS_ERR(fp)) {
1303 fc_lport_error(lport, fp);
1304 goto err;
1305 }
1306
1307 op = fc_frame_payload_op(fp);
1308 if (op == ELS_LS_ACC)
1309 fc_lport_enter_ready(lport);
1310 else
1311 fc_lport_error(lport, fp);
1312
1313 out:
1314 fc_frame_free(fp);
1315 err:
1316 mutex_unlock(&lport->lp_mutex);
1317 }
1318
1319 /**
1320 * fc_lport_enter_scr() - Send a SCR (State Change Register) request
1321 * @lport: The local port to register for state changes
1322 *
1323 * Locking Note: The lport lock is expected to be held before calling
1324 * this routine.
1325 */
1326 static void fc_lport_enter_scr(struct fc_lport *lport)
1327 {
1328 struct fc_frame *fp;
1329
1330 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
1331 fc_lport_state(lport));
1332
1333 fc_lport_state_enter(lport, LPORT_ST_SCR);
1334
1335 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
1336 if (!fp) {
1337 fc_lport_error(lport, fp);
1338 return;
1339 }
1340
1341 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
1342 fc_lport_scr_resp, lport,
1343 2 * lport->r_a_tov))
1344 fc_lport_error(lport, NULL);
1345 }
1346
1347 /**
1348 * fc_lport_enter_ns() - register some object with the name server
1349 * @lport: Fibre Channel local port to register
1350 *
1351 * Locking Note: The lport lock is expected to be held before calling
1352 * this routine.
1353 */
1354 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
1355 {
1356 struct fc_frame *fp;
1357 enum fc_ns_req cmd;
1358 int size = sizeof(struct fc_ct_hdr);
1359 size_t len;
1360
1361 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1362 fc_lport_state_names[state],
1363 fc_lport_state(lport));
1364
1365 fc_lport_state_enter(lport, state);
1366
1367 switch (state) {
1368 case LPORT_ST_RNN_ID:
1369 cmd = FC_NS_RNN_ID;
1370 size += sizeof(struct fc_ns_rn_id);
1371 break;
1372 case LPORT_ST_RSNN_NN:
1373 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1374 /* if there is no symbolic name, skip to RFT_ID */
1375 if (!len)
1376 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1377 cmd = FC_NS_RSNN_NN;
1378 size += sizeof(struct fc_ns_rsnn) + len;
1379 break;
1380 case LPORT_ST_RSPN_ID:
1381 len = strnlen(fc_host_symbolic_name(lport->host), 255);
1382 /* if there is no symbolic name, skip to RFT_ID */
1383 if (!len)
1384 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
1385 cmd = FC_NS_RSPN_ID;
1386 size += sizeof(struct fc_ns_rspn) + len;
1387 break;
1388 case LPORT_ST_RFT_ID:
1389 cmd = FC_NS_RFT_ID;
1390 size += sizeof(struct fc_ns_rft);
1391 break;
1392 case LPORT_ST_RFF_ID:
1393 cmd = FC_NS_RFF_ID;
1394 size += sizeof(struct fc_ns_rff_id);
1395 break;
1396 default:
1397 fc_lport_error(lport, NULL);
1398 return;
1399 }
1400
1401 fp = fc_frame_alloc(lport, size);
1402 if (!fp) {
1403 fc_lport_error(lport, fp);
1404 return;
1405 }
1406
1407 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
1408 fc_lport_ns_resp,
1409 lport, 3 * lport->r_a_tov))
1410 fc_lport_error(lport, fp);
1411 }
1412
1413 static struct fc_rport_operations fc_lport_rport_ops = {
1414 .event_callback = fc_lport_rport_callback,
1415 };
1416
1417 /**
1418 * fc_rport_enter_dns() - Create a fc_rport for the name server
1419 * @lport: The local port requesting a remote port for the name server
1420 *
1421 * Locking Note: The lport lock is expected to be held before calling
1422 * this routine.
1423 */
1424 static void fc_lport_enter_dns(struct fc_lport *lport)
1425 {
1426 struct fc_rport_priv *rdata;
1427
1428 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
1429 fc_lport_state(lport));
1430
1431 fc_lport_state_enter(lport, LPORT_ST_DNS);
1432
1433 mutex_lock(&lport->disc.disc_mutex);
1434 rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
1435 mutex_unlock(&lport->disc.disc_mutex);
1436 if (!rdata)
1437 goto err;
1438
1439 rdata->ops = &fc_lport_rport_ops;
1440 fc_rport_login(rdata);
1441 return;
1442
1443 err:
1444 fc_lport_error(lport, NULL);
1445 }
1446
1447 /**
1448 * fc_lport_enter_ms() - management server commands
1449 * @lport: Fibre Channel local port to register
1450 *
1451 * Locking Note: The lport lock is expected to be held before calling
1452 * this routine.
1453 */
1454 static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
1455 {
1456 struct fc_frame *fp;
1457 enum fc_fdmi_req cmd;
1458 int size = sizeof(struct fc_ct_hdr);
1459 size_t len;
1460 int numattrs;
1461
1462 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1463 fc_lport_state_names[state],
1464 fc_lport_state(lport));
1465
1466 fc_lport_state_enter(lport, state);
1467
1468 switch (state) {
1469 case LPORT_ST_RHBA:
1470 cmd = FC_FDMI_RHBA;
1471 /* Number of HBA Attributes */
1472 numattrs = 10;
1473 len = sizeof(struct fc_fdmi_rhba);
1474 len -= sizeof(struct fc_fdmi_attr_entry);
1475 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
1476 len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
1477 len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
1478 len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
1479 len += FC_FDMI_HBA_ATTR_MODEL_LEN;
1480 len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
1481 len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
1482 len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
1483 len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
1484 len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
1485 len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
1486
1487 size += len;
1488 break;
1489 case LPORT_ST_RPA:
1490 cmd = FC_FDMI_RPA;
1491 /* Number of Port Attributes */
1492 numattrs = 6;
1493 len = sizeof(struct fc_fdmi_rpa);
1494 len -= sizeof(struct fc_fdmi_attr_entry);
1495 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
1496 len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
1497 len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
1498 len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
1499 len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
1500 len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
1501 len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
1502
1503 size += len;
1504 break;
1505 case LPORT_ST_DPRT:
1506 cmd = FC_FDMI_DPRT;
1507 len = sizeof(struct fc_fdmi_dprt);
1508 size += len;
1509 break;
1510 case LPORT_ST_DHBA:
1511 cmd = FC_FDMI_DHBA;
1512 len = sizeof(struct fc_fdmi_dhba);
1513 size += len;
1514 break;
1515 default:
1516 fc_lport_error(lport, NULL);
1517 return;
1518 }
1519
1520 FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
1521 cmd, (int)len, size);
1522 fp = fc_frame_alloc(lport, size);
1523 if (!fp) {
1524 fc_lport_error(lport, fp);
1525 return;
1526 }
1527
1528 if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
1529 fc_lport_ms_resp,
1530 lport, 3 * lport->r_a_tov))
1531 fc_lport_error(lport, fp);
1532 }
1533
1534 /**
1535 * fc_rport_enter_fdmi() - Create a fc_rport for the management server
1536 * @lport: The local port requesting a remote port for the management server
1537 *
1538 * Locking Note: The lport lock is expected to be held before calling
1539 * this routine.
1540 */
1541 static void fc_lport_enter_fdmi(struct fc_lport *lport)
1542 {
1543 struct fc_rport_priv *rdata;
1544
1545 FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
1546 fc_lport_state(lport));
1547
1548 fc_lport_state_enter(lport, LPORT_ST_FDMI);
1549
1550 mutex_lock(&lport->disc.disc_mutex);
1551 rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
1552 mutex_unlock(&lport->disc.disc_mutex);
1553 if (!rdata)
1554 goto err;
1555
1556 rdata->ops = &fc_lport_rport_ops;
1557 fc_rport_login(rdata);
1558 return;
1559
1560 err:
1561 fc_lport_error(lport, NULL);
1562 }
1563
1564 /**
1565 * fc_lport_timeout() - Handler for the retry_work timer
1566 * @work: The work struct of the local port
1567 */
1568 static void fc_lport_timeout(struct work_struct *work)
1569 {
1570 struct fc_lport *lport =
1571 container_of(work, struct fc_lport,
1572 retry_work.work);
1573
1574 mutex_lock(&lport->lp_mutex);
1575
1576 switch (lport->state) {
1577 case LPORT_ST_DISABLED:
1578 break;
1579 case LPORT_ST_READY:
1580 break;
1581 case LPORT_ST_RESET:
1582 break;
1583 case LPORT_ST_FLOGI:
1584 fc_lport_enter_flogi(lport);
1585 break;
1586 case LPORT_ST_DNS:
1587 fc_lport_enter_dns(lport);
1588 break;
1589 case LPORT_ST_RNN_ID:
1590 case LPORT_ST_RSNN_NN:
1591 case LPORT_ST_RSPN_ID:
1592 case LPORT_ST_RFT_ID:
1593 case LPORT_ST_RFF_ID:
1594 fc_lport_enter_ns(lport, lport->state);
1595 break;
1596 case LPORT_ST_FDMI:
1597 fc_lport_enter_fdmi(lport);
1598 break;
1599 case LPORT_ST_RHBA:
1600 case LPORT_ST_RPA:
1601 case LPORT_ST_DHBA:
1602 case LPORT_ST_DPRT:
1603 FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
1604 fc_lport_state(lport));
1605 /* fall thru */
1606 case LPORT_ST_SCR:
1607 fc_lport_enter_scr(lport);
1608 break;
1609 case LPORT_ST_LOGO:
1610 fc_lport_enter_logo(lport);
1611 break;
1612 }
1613
1614 mutex_unlock(&lport->lp_mutex);
1615 }
1616
1617 /**
1618 * fc_lport_logo_resp() - Handle response to LOGO request
1619 * @sp: The sequence that the LOGO was on
1620 * @fp: The LOGO frame
1621 * @lp_arg: The lport port that received the LOGO request
1622 *
1623 * Locking Note: This function will be called without the lport lock
1624 * held, but it will lock, call an _enter_* function or fc_lport_error()
1625 * and then unlock the lport.
1626 */
1627 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
1628 void *lp_arg)
1629 {
1630 struct fc_lport *lport = lp_arg;
1631 u8 op;
1632
1633 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
1634
1635 if (fp == ERR_PTR(-FC_EX_CLOSED))
1636 return;
1637
1638 mutex_lock(&lport->lp_mutex);
1639
1640 if (lport->state != LPORT_ST_LOGO) {
1641 FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
1642 "%s\n", fc_lport_state(lport));
1643 if (IS_ERR(fp))
1644 goto err;
1645 goto out;
1646 }
1647
1648 if (IS_ERR(fp)) {
1649 fc_lport_error(lport, fp);
1650 goto err;
1651 }
1652
1653 op = fc_frame_payload_op(fp);
1654 if (op == ELS_LS_ACC)
1655 fc_lport_enter_disabled(lport);
1656 else
1657 fc_lport_error(lport, fp);
1658
1659 out:
1660 fc_frame_free(fp);
1661 err:
1662 mutex_unlock(&lport->lp_mutex);
1663 }
1664 EXPORT_SYMBOL(fc_lport_logo_resp);
1665
1666 /**
1667 * fc_rport_enter_logo() - Logout of the fabric
1668 * @lport: The local port to be logged out
1669 *
1670 * Locking Note: The lport lock is expected to be held before calling
1671 * this routine.
1672 */
1673 static void fc_lport_enter_logo(struct fc_lport *lport)
1674 {
1675 struct fc_frame *fp;
1676 struct fc_els_logo *logo;
1677
1678 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
1679 fc_lport_state(lport));
1680
1681 fc_lport_state_enter(lport, LPORT_ST_LOGO);
1682 fc_vports_linkchange(lport);
1683
1684 fp = fc_frame_alloc(lport, sizeof(*logo));
1685 if (!fp) {
1686 fc_lport_error(lport, fp);
1687 return;
1688 }
1689
1690 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
1691 fc_lport_logo_resp, lport,
1692 2 * lport->r_a_tov))
1693 fc_lport_error(lport, NULL);
1694 }
1695
1696 /**
1697 * fc_lport_flogi_resp() - Handle response to FLOGI request
1698 * @sp: The sequence that the FLOGI was on
1699 * @fp: The FLOGI response frame
1700 * @lp_arg: The lport port that received the FLOGI response
1701 *
1702 * Locking Note: This function will be called without the lport lock
1703 * held, but it will lock, call an _enter_* function or fc_lport_error()
1704 * and then unlock the lport.
1705 */
1706 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1707 void *lp_arg)
1708 {
1709 struct fc_lport *lport = lp_arg;
1710 struct fc_frame_header *fh;
1711 struct fc_els_flogi *flp;
1712 u32 did;
1713 u16 csp_flags;
1714 unsigned int r_a_tov;
1715 unsigned int e_d_tov;
1716 u16 mfs;
1717
1718 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
1719
1720 if (fp == ERR_PTR(-FC_EX_CLOSED))
1721 return;
1722
1723 mutex_lock(&lport->lp_mutex);
1724
1725 if (lport->state != LPORT_ST_FLOGI) {
1726 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
1727 "%s\n", fc_lport_state(lport));
1728 if (IS_ERR(fp))
1729 goto err;
1730 goto out;
1731 }
1732
1733 if (IS_ERR(fp)) {
1734 fc_lport_error(lport, fp);
1735 goto err;
1736 }
1737
1738 fh = fc_frame_header_get(fp);
1739 did = fc_frame_did(fp);
1740 if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
1741 fc_frame_payload_op(fp) != ELS_LS_ACC) {
1742 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
1743 fc_lport_error(lport, fp);
1744 goto err;
1745 }
1746
1747 flp = fc_frame_payload_get(fp, sizeof(*flp));
1748 if (!flp) {
1749 FC_LPORT_DBG(lport, "FLOGI bad response\n");
1750 fc_lport_error(lport, fp);
1751 goto err;
1752 }
1753
1754 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1755 FC_SP_BB_DATA_MASK;
1756
1757 if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
1758 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1759 "lport->mfs:%hu\n", mfs, lport->mfs);
1760 fc_lport_error(lport, fp);
1761 goto err;
1762 }
1763
1764 if (mfs <= lport->mfs) {
1765 lport->mfs = mfs;
1766 fc_host_maxframe_size(lport->host) = mfs;
1767 }
1768
1769 csp_flags = ntohs(flp->fl_csp.sp_features);
1770 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1771 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
1772 if (csp_flags & FC_SP_FT_EDTR)
1773 e_d_tov /= 1000000;
1774
1775 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
1776
1777 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
1778 if (e_d_tov > lport->e_d_tov)
1779 lport->e_d_tov = e_d_tov;
1780 lport->r_a_tov = 2 * lport->e_d_tov;
1781 fc_lport_set_port_id(lport, did, fp);
1782 printk(KERN_INFO "host%d: libfc: "
1783 "Port (%6.6x) entered "
1784 "point-to-point mode\n",
1785 lport->host->host_no, did);
1786 fc_lport_ptp_setup(lport, fc_frame_sid(fp),
1787 get_unaligned_be64(
1788 &flp->fl_wwpn),
1789 get_unaligned_be64(
1790 &flp->fl_wwnn));
1791 } else {
1792 if (e_d_tov > lport->e_d_tov)
1793 lport->e_d_tov = e_d_tov;
1794 if (r_a_tov > lport->r_a_tov)
1795 lport->r_a_tov = r_a_tov;
1796 fc_host_fabric_name(lport->host) =
1797 get_unaligned_be64(&flp->fl_wwnn);
1798 fc_lport_set_port_id(lport, did, fp);
1799 fc_lport_enter_dns(lport);
1800 }
1801
1802 out:
1803 fc_frame_free(fp);
1804 err:
1805 mutex_unlock(&lport->lp_mutex);
1806 }
1807 EXPORT_SYMBOL(fc_lport_flogi_resp);
1808
1809 /**
1810 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
1811 * @lport: Fibre Channel local port to be logged in to the fabric
1812 *
1813 * Locking Note: The lport lock is expected to be held before calling
1814 * this routine.
1815 */
1816 static void fc_lport_enter_flogi(struct fc_lport *lport)
1817 {
1818 struct fc_frame *fp;
1819
1820 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
1821 fc_lport_state(lport));
1822
1823 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
1824
1825 if (lport->point_to_multipoint) {
1826 if (lport->port_id)
1827 fc_lport_enter_ready(lport);
1828 return;
1829 }
1830
1831 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
1832 if (!fp)
1833 return fc_lport_error(lport, fp);
1834
1835 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
1836 lport->vport ? ELS_FDISC : ELS_FLOGI,
1837 fc_lport_flogi_resp, lport,
1838 lport->vport ? 2 * lport->r_a_tov :
1839 lport->e_d_tov))
1840 fc_lport_error(lport, NULL);
1841 }
1842
1843 /**
1844 * fc_lport_config() - Configure a fc_lport
1845 * @lport: The local port to be configured
1846 */
1847 int fc_lport_config(struct fc_lport *lport)
1848 {
1849 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
1850 mutex_init(&lport->lp_mutex);
1851
1852 fc_lport_state_enter(lport, LPORT_ST_DISABLED);
1853
1854 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
1855 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
1856 fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
1857
1858 return 0;
1859 }
1860 EXPORT_SYMBOL(fc_lport_config);
1861
1862 /**
1863 * fc_lport_init() - Initialize the lport layer for a local port
1864 * @lport: The local port to initialize the exchange layer for
1865 */
1866 int fc_lport_init(struct fc_lport *lport)
1867 {
1868 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1869 fc_host_node_name(lport->host) = lport->wwnn;
1870 fc_host_port_name(lport->host) = lport->wwpn;
1871 fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
1872 memset(fc_host_supported_fc4s(lport->host), 0,
1873 sizeof(fc_host_supported_fc4s(lport->host)));
1874 fc_host_supported_fc4s(lport->host)[2] = 1;
1875 fc_host_supported_fc4s(lport->host)[7] = 1;
1876
1877 /* This value is also unchanging */
1878 memset(fc_host_active_fc4s(lport->host), 0,
1879 sizeof(fc_host_active_fc4s(lport->host)));
1880 fc_host_active_fc4s(lport->host)[2] = 1;
1881 fc_host_active_fc4s(lport->host)[7] = 1;
1882 fc_host_maxframe_size(lport->host) = lport->mfs;
1883 fc_host_supported_speeds(lport->host) = 0;
1884 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
1885 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
1886 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
1887 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
1888 fc_fc4_add_lport(lport);
1889
1890 return 0;
1891 }
1892 EXPORT_SYMBOL(fc_lport_init);
1893
1894 /**
1895 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
1896 * @sp: The sequence for the FC Passthrough response
1897 * @fp: The response frame
1898 * @info_arg: The BSG info that the response is for
1899 */
1900 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1901 void *info_arg)
1902 {
1903 struct fc_bsg_info *info = info_arg;
1904 struct bsg_job *job = info->job;
1905 struct fc_bsg_reply *bsg_reply = job->reply;
1906 struct fc_lport *lport = info->lport;
1907 struct fc_frame_header *fh;
1908 size_t len;
1909 void *buf;
1910
1911 if (IS_ERR(fp)) {
1912 bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
1913 -ECONNABORTED : -ETIMEDOUT;
1914 job->reply_len = sizeof(uint32_t);
1915 bsg_job_done(job, bsg_reply->result,
1916 bsg_reply->reply_payload_rcv_len);
1917 kfree(info);
1918 return;
1919 }
1920
1921 mutex_lock(&lport->lp_mutex);
1922 fh = fc_frame_header_get(fp);
1923 len = fr_len(fp) - sizeof(*fh);
1924 buf = fc_frame_payload_get(fp, 0);
1925
1926 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
1927 /* Get the response code from the first frame payload */
1928 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
1929 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
1930 (unsigned short)fc_frame_payload_op(fp);
1931
1932 /* Save the reply status of the job */
1933 bsg_reply->reply_data.ctels_reply.status =
1934 (cmd == info->rsp_code) ?
1935 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
1936 }
1937
1938 bsg_reply->reply_payload_rcv_len +=
1939 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1940 &info->offset, NULL);
1941
1942 if (fr_eof(fp) == FC_EOF_T &&
1943 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1944 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1945 if (bsg_reply->reply_payload_rcv_len >
1946 job->reply_payload.payload_len)
1947 bsg_reply->reply_payload_rcv_len =
1948 job->reply_payload.payload_len;
1949 bsg_reply->result = 0;
1950 bsg_job_done(job, bsg_reply->result,
1951 bsg_reply->reply_payload_rcv_len);
1952 kfree(info);
1953 }
1954 fc_frame_free(fp);
1955 mutex_unlock(&lport->lp_mutex);
1956 }
1957
1958 /**
1959 * fc_lport_els_request() - Send ELS passthrough request
1960 * @job: The BSG Passthrough job
1961 * @lport: The local port sending the request
1962 * @did: The destination port id
1963 *
1964 * Locking Note: The lport lock is expected to be held before calling
1965 * this routine.
1966 */
1967 static int fc_lport_els_request(struct bsg_job *job,
1968 struct fc_lport *lport,
1969 u32 did, u32 tov)
1970 {
1971 struct fc_bsg_info *info;
1972 struct fc_frame *fp;
1973 struct fc_frame_header *fh;
1974 char *pp;
1975 int len;
1976
1977 fp = fc_frame_alloc(lport, job->request_payload.payload_len);
1978 if (!fp)
1979 return -ENOMEM;
1980
1981 len = job->request_payload.payload_len;
1982 pp = fc_frame_payload_get(fp, len);
1983
1984 sg_copy_to_buffer(job->request_payload.sg_list,
1985 job->request_payload.sg_cnt,
1986 pp, len);
1987
1988 fh = fc_frame_header_get(fp);
1989 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1990 hton24(fh->fh_d_id, did);
1991 hton24(fh->fh_s_id, lport->port_id);
1992 fh->fh_type = FC_TYPE_ELS;
1993 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
1994 fh->fh_cs_ctl = 0;
1995 fh->fh_df_ctl = 0;
1996 fh->fh_parm_offset = 0;
1997
1998 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
1999 if (!info) {
2000 fc_frame_free(fp);
2001 return -ENOMEM;
2002 }
2003
2004 info->job = job;
2005 info->lport = lport;
2006 info->rsp_code = ELS_LS_ACC;
2007 info->nents = job->reply_payload.sg_cnt;
2008 info->sg = job->reply_payload.sg_list;
2009
2010 if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
2011 NULL, info, tov)) {
2012 kfree(info);
2013 return -ECOMM;
2014 }
2015 return 0;
2016 }
2017
2018 /**
2019 * fc_lport_ct_request() - Send CT Passthrough request
2020 * @job: The BSG Passthrough job
2021 * @lport: The local port sending the request
2022 * @did: The destination FC-ID
2023 * @tov: The timeout period to wait for the response
2024 *
2025 * Locking Note: The lport lock is expected to be held before calling
2026 * this routine.
2027 */
2028 static int fc_lport_ct_request(struct bsg_job *job,
2029 struct fc_lport *lport, u32 did, u32 tov)
2030 {
2031 struct fc_bsg_info *info;
2032 struct fc_frame *fp;
2033 struct fc_frame_header *fh;
2034 struct fc_ct_req *ct;
2035 size_t len;
2036
2037 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
2038 job->request_payload.payload_len);
2039 if (!fp)
2040 return -ENOMEM;
2041
2042 len = job->request_payload.payload_len;
2043 ct = fc_frame_payload_get(fp, len);
2044
2045 sg_copy_to_buffer(job->request_payload.sg_list,
2046 job->request_payload.sg_cnt,
2047 ct, len);
2048
2049 fh = fc_frame_header_get(fp);
2050 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
2051 hton24(fh->fh_d_id, did);
2052 hton24(fh->fh_s_id, lport->port_id);
2053 fh->fh_type = FC_TYPE_CT;
2054 hton24(fh->fh_f_ctl, FC_FCTL_REQ);
2055 fh->fh_cs_ctl = 0;
2056 fh->fh_df_ctl = 0;
2057 fh->fh_parm_offset = 0;
2058
2059 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
2060 if (!info) {
2061 fc_frame_free(fp);
2062 return -ENOMEM;
2063 }
2064
2065 info->job = job;
2066 info->lport = lport;
2067 info->rsp_code = FC_FS_ACC;
2068 info->nents = job->reply_payload.sg_cnt;
2069 info->sg = job->reply_payload.sg_list;
2070
2071 if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
2072 NULL, info, tov)) {
2073 kfree(info);
2074 return -ECOMM;
2075 }
2076 return 0;
2077 }
2078
2079 /**
2080 * fc_lport_bsg_request() - The common entry point for sending
2081 * FC Passthrough requests
2082 * @job: The BSG passthrough job
2083 */
2084 int fc_lport_bsg_request(struct bsg_job *job)
2085 {
2086 struct fc_bsg_request *bsg_request = job->request;
2087 struct fc_bsg_reply *bsg_reply = job->reply;
2088 struct request *rsp = job->req->next_rq;
2089 struct Scsi_Host *shost = fc_bsg_to_shost(job);
2090 struct fc_lport *lport = shost_priv(shost);
2091 struct fc_rport *rport;
2092 struct fc_rport_priv *rdata;
2093 int rc = -EINVAL;
2094 u32 did, tov;
2095
2096 bsg_reply->reply_payload_rcv_len = 0;
2097 if (rsp)
2098 scsi_req(rsp)->resid_len = job->reply_payload.payload_len;
2099
2100 mutex_lock(&lport->lp_mutex);
2101
2102 switch (bsg_request->msgcode) {
2103 case FC_BSG_RPT_ELS:
2104 rport = fc_bsg_to_rport(job);
2105 if (!rport)
2106 break;
2107
2108 rdata = rport->dd_data;
2109 rc = fc_lport_els_request(job, lport, rport->port_id,
2110 rdata->e_d_tov);
2111 break;
2112
2113 case FC_BSG_RPT_CT:
2114 rport = fc_bsg_to_rport(job);
2115 if (!rport)
2116 break;
2117
2118 rdata = rport->dd_data;
2119 rc = fc_lport_ct_request(job, lport, rport->port_id,
2120 rdata->e_d_tov);
2121 break;
2122
2123 case FC_BSG_HST_CT:
2124 did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
2125 if (did == FC_FID_DIR_SERV) {
2126 rdata = lport->dns_rdata;
2127 if (!rdata)
2128 break;
2129 tov = rdata->e_d_tov;
2130 } else {
2131 rdata = fc_rport_lookup(lport, did);
2132 if (!rdata)
2133 break;
2134 tov = rdata->e_d_tov;
2135 kref_put(&rdata->kref, fc_rport_destroy);
2136 }
2137
2138 rc = fc_lport_ct_request(job, lport, did, tov);
2139 break;
2140
2141 case FC_BSG_HST_ELS_NOLOGIN:
2142 did = ntoh24(bsg_request->rqst_data.h_els.port_id);
2143 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
2144 break;
2145 }
2146
2147 mutex_unlock(&lport->lp_mutex);
2148 return rc;
2149 }
2150 EXPORT_SYMBOL(fc_lport_bsg_request);