]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/libfc/fc_rport.c
[SCSI] libfc: fix memory corruption caused by double frees and bad error handling
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / libfc / fc_rport.c
1 /*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20 /*
21 * RPORT GENERAL INFO
22 *
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
27 *
28 * fc_rport's represent N_Port's within the fabric.
29 */
30
31 /*
32 * RPORT LOCKING
33 *
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
38 *
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
45 */
46
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
54
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
57
58 struct workqueue_struct *rport_event_queue;
59
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
65 static void fc_rport_enter_adisc(struct fc_rport_priv *);
66
67 static void fc_rport_recv_plogi_req(struct fc_lport *,
68 struct fc_seq *, struct fc_frame *);
69 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
70 struct fc_seq *, struct fc_frame *);
71 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
72 struct fc_seq *, struct fc_frame *);
73 static void fc_rport_recv_logo_req(struct fc_lport *,
74 struct fc_seq *, struct fc_frame *);
75 static void fc_rport_timeout(struct work_struct *);
76 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
78 static void fc_rport_work(struct work_struct *);
79
80 static const char *fc_rport_state_names[] = {
81 [RPORT_ST_INIT] = "Init",
82 [RPORT_ST_PLOGI] = "PLOGI",
83 [RPORT_ST_PRLI] = "PRLI",
84 [RPORT_ST_RTV] = "RTV",
85 [RPORT_ST_READY] = "Ready",
86 [RPORT_ST_LOGO] = "LOGO",
87 [RPORT_ST_ADISC] = "ADISC",
88 [RPORT_ST_DELETE] = "Delete",
89 };
90
91 /**
92 * fc_rport_lookup() - lookup a remote port by port_id
93 * @lport: Fibre Channel host port instance
94 * @port_id: remote port port_id to match
95 */
96 static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
97 u32 port_id)
98 {
99 struct fc_rport_priv *rdata;
100
101 list_for_each_entry(rdata, &lport->disc.rports, peers)
102 if (rdata->ids.port_id == port_id &&
103 rdata->rp_state != RPORT_ST_DELETE)
104 return rdata;
105 return NULL;
106 }
107
108 /**
109 * fc_rport_create() - Create a new remote port
110 * @lport: The local port that the new remote port is for
111 * @port_id: The port ID for the new remote port
112 *
113 * Locking note: must be called with the disc_mutex held.
114 */
115 static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
116 u32 port_id)
117 {
118 struct fc_rport_priv *rdata;
119
120 rdata = lport->tt.rport_lookup(lport, port_id);
121 if (rdata)
122 return rdata;
123
124 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
125 if (!rdata)
126 return NULL;
127
128 rdata->ids.node_name = -1;
129 rdata->ids.port_name = -1;
130 rdata->ids.port_id = port_id;
131 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
132
133 kref_init(&rdata->kref);
134 mutex_init(&rdata->rp_mutex);
135 rdata->local_port = lport;
136 rdata->rp_state = RPORT_ST_INIT;
137 rdata->event = RPORT_EV_NONE;
138 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
139 rdata->e_d_tov = lport->e_d_tov;
140 rdata->r_a_tov = lport->r_a_tov;
141 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
142 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
143 INIT_WORK(&rdata->event_work, fc_rport_work);
144 if (port_id != FC_FID_DIR_SERV)
145 list_add(&rdata->peers, &lport->disc.rports);
146 return rdata;
147 }
148
149 /**
150 * fc_rport_destroy() - free a remote port after last reference is released.
151 * @kref: pointer to kref inside struct fc_rport_priv
152 */
153 static void fc_rport_destroy(struct kref *kref)
154 {
155 struct fc_rport_priv *rdata;
156
157 rdata = container_of(kref, struct fc_rport_priv, kref);
158 kfree(rdata);
159 }
160
161 /**
162 * fc_rport_state() - return a string for the state the rport is in
163 * @rdata: remote port private data
164 */
165 static const char *fc_rport_state(struct fc_rport_priv *rdata)
166 {
167 const char *cp;
168
169 cp = fc_rport_state_names[rdata->rp_state];
170 if (!cp)
171 cp = "Unknown";
172 return cp;
173 }
174
175 /**
176 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
177 * @rport: Pointer to Fibre Channel remote port structure
178 * @timeout: timeout in seconds
179 */
180 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
181 {
182 if (timeout)
183 rport->dev_loss_tmo = timeout + 5;
184 else
185 rport->dev_loss_tmo = 30;
186 }
187 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
188
189 /**
190 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
191 * @flp: FLOGI payload structure
192 * @maxval: upper limit, may be less than what is in the service parameters
193 */
194 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
195 unsigned int maxval)
196 {
197 unsigned int mfs;
198
199 /*
200 * Get max payload from the common service parameters and the
201 * class 3 receive data field size.
202 */
203 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
204 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
205 maxval = mfs;
206 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
207 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
208 maxval = mfs;
209 return maxval;
210 }
211
212 /**
213 * fc_rport_state_enter() - Change the rport's state
214 * @rdata: The rport whose state should change
215 * @new: The new state of the rport
216 *
217 * Locking Note: Called with the rport lock held
218 */
219 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
220 enum fc_rport_state new)
221 {
222 if (rdata->rp_state != new)
223 rdata->retries = 0;
224 rdata->rp_state = new;
225 }
226
227 static void fc_rport_work(struct work_struct *work)
228 {
229 u32 port_id;
230 struct fc_rport_priv *rdata =
231 container_of(work, struct fc_rport_priv, event_work);
232 struct fc_rport_libfc_priv *rp;
233 enum fc_rport_event event;
234 struct fc_lport *lport = rdata->local_port;
235 struct fc_rport_operations *rport_ops;
236 struct fc_rport_identifiers ids;
237 struct fc_rport *rport;
238
239 mutex_lock(&rdata->rp_mutex);
240 event = rdata->event;
241 rport_ops = rdata->ops;
242 rport = rdata->rport;
243
244 FC_RPORT_DBG(rdata, "work event %u\n", event);
245
246 switch (event) {
247 case RPORT_EV_READY:
248 ids = rdata->ids;
249 rdata->event = RPORT_EV_NONE;
250 kref_get(&rdata->kref);
251 mutex_unlock(&rdata->rp_mutex);
252
253 if (!rport)
254 rport = fc_remote_port_add(lport->host, 0, &ids);
255 if (!rport) {
256 FC_RPORT_DBG(rdata, "Failed to add the rport\n");
257 lport->tt.rport_logoff(rdata);
258 kref_put(&rdata->kref, lport->tt.rport_destroy);
259 return;
260 }
261 mutex_lock(&rdata->rp_mutex);
262 if (rdata->rport)
263 FC_RPORT_DBG(rdata, "rport already allocated\n");
264 rdata->rport = rport;
265 rport->maxframe_size = rdata->maxframe_size;
266 rport->supported_classes = rdata->supported_classes;
267
268 rp = rport->dd_data;
269 rp->local_port = lport;
270 rp->rp_state = rdata->rp_state;
271 rp->flags = rdata->flags;
272 rp->e_d_tov = rdata->e_d_tov;
273 rp->r_a_tov = rdata->r_a_tov;
274 mutex_unlock(&rdata->rp_mutex);
275
276 if (rport_ops && rport_ops->event_callback) {
277 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
278 rport_ops->event_callback(lport, rdata, event);
279 }
280 kref_put(&rdata->kref, lport->tt.rport_destroy);
281 break;
282
283 case RPORT_EV_FAILED:
284 case RPORT_EV_LOGO:
285 case RPORT_EV_STOP:
286 port_id = rdata->ids.port_id;
287 mutex_unlock(&rdata->rp_mutex);
288
289 if (port_id != FC_FID_DIR_SERV) {
290 mutex_lock(&lport->disc.disc_mutex);
291 list_del(&rdata->peers);
292 mutex_unlock(&lport->disc.disc_mutex);
293 }
294
295 if (rport_ops && rport_ops->event_callback) {
296 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
297 rport_ops->event_callback(lport, rdata, event);
298 }
299 cancel_delayed_work_sync(&rdata->retry_work);
300
301 /*
302 * Reset any outstanding exchanges before freeing rport.
303 */
304 lport->tt.exch_mgr_reset(lport, 0, port_id);
305 lport->tt.exch_mgr_reset(lport, port_id, 0);
306
307 if (rport) {
308 rp = rport->dd_data;
309 rp->rp_state = RPORT_ST_DELETE;
310 mutex_lock(&rdata->rp_mutex);
311 rdata->rport = NULL;
312 mutex_unlock(&rdata->rp_mutex);
313 fc_remote_port_delete(rport);
314 }
315 kref_put(&rdata->kref, lport->tt.rport_destroy);
316 break;
317
318 default:
319 mutex_unlock(&rdata->rp_mutex);
320 break;
321 }
322 }
323
324 /**
325 * fc_rport_login() - Start the remote port login state machine
326 * @rdata: private remote port
327 *
328 * Locking Note: Called without the rport lock held. This
329 * function will hold the rport lock, call an _enter_*
330 * function and then unlock the rport.
331 *
332 * This indicates the intent to be logged into the remote port.
333 * If it appears we are already logged in, ADISC is used to verify
334 * the setup.
335 */
336 int fc_rport_login(struct fc_rport_priv *rdata)
337 {
338 mutex_lock(&rdata->rp_mutex);
339
340 switch (rdata->rp_state) {
341 case RPORT_ST_READY:
342 FC_RPORT_DBG(rdata, "ADISC port\n");
343 fc_rport_enter_adisc(rdata);
344 break;
345 default:
346 FC_RPORT_DBG(rdata, "Login to port\n");
347 fc_rport_enter_plogi(rdata);
348 break;
349 }
350 mutex_unlock(&rdata->rp_mutex);
351
352 return 0;
353 }
354
355 /**
356 * fc_rport_enter_delete() - schedule a remote port to be deleted.
357 * @rdata: private remote port
358 * @event: event to report as the reason for deletion
359 *
360 * Locking Note: Called with the rport lock held.
361 *
362 * Allow state change into DELETE only once.
363 *
364 * Call queue_work only if there's no event already pending.
365 * Set the new event so that the old pending event will not occur.
366 * Since we have the mutex, even if fc_rport_work() is already started,
367 * it'll see the new event.
368 */
369 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
370 enum fc_rport_event event)
371 {
372 if (rdata->rp_state == RPORT_ST_DELETE)
373 return;
374
375 FC_RPORT_DBG(rdata, "Delete port\n");
376
377 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
378
379 if (rdata->event == RPORT_EV_NONE)
380 queue_work(rport_event_queue, &rdata->event_work);
381 rdata->event = event;
382 }
383
384 /**
385 * fc_rport_logoff() - Logoff and remove an rport
386 * @rdata: private remote port
387 *
388 * Locking Note: Called without the rport lock held. This
389 * function will hold the rport lock, call an _enter_*
390 * function and then unlock the rport.
391 */
392 int fc_rport_logoff(struct fc_rport_priv *rdata)
393 {
394 mutex_lock(&rdata->rp_mutex);
395
396 FC_RPORT_DBG(rdata, "Remove port\n");
397
398 if (rdata->rp_state == RPORT_ST_DELETE) {
399 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
400 mutex_unlock(&rdata->rp_mutex);
401 goto out;
402 }
403
404 fc_rport_enter_logo(rdata);
405
406 /*
407 * Change the state to Delete so that we discard
408 * the response.
409 */
410 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
411 mutex_unlock(&rdata->rp_mutex);
412
413 out:
414 return 0;
415 }
416
417 /**
418 * fc_rport_enter_ready() - The rport is ready
419 * @rdata: private remote port
420 *
421 * Locking Note: The rport lock is expected to be held before calling
422 * this routine.
423 */
424 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
425 {
426 fc_rport_state_enter(rdata, RPORT_ST_READY);
427
428 FC_RPORT_DBG(rdata, "Port is Ready\n");
429
430 if (rdata->event == RPORT_EV_NONE)
431 queue_work(rport_event_queue, &rdata->event_work);
432 rdata->event = RPORT_EV_READY;
433 }
434
435 /**
436 * fc_rport_timeout() - Handler for the retry_work timer.
437 * @work: The work struct of the fc_rport_priv
438 *
439 * Locking Note: Called without the rport lock held. This
440 * function will hold the rport lock, call an _enter_*
441 * function and then unlock the rport.
442 */
443 static void fc_rport_timeout(struct work_struct *work)
444 {
445 struct fc_rport_priv *rdata =
446 container_of(work, struct fc_rport_priv, retry_work.work);
447
448 mutex_lock(&rdata->rp_mutex);
449
450 switch (rdata->rp_state) {
451 case RPORT_ST_PLOGI:
452 fc_rport_enter_plogi(rdata);
453 break;
454 case RPORT_ST_PRLI:
455 fc_rport_enter_prli(rdata);
456 break;
457 case RPORT_ST_RTV:
458 fc_rport_enter_rtv(rdata);
459 break;
460 case RPORT_ST_LOGO:
461 fc_rport_enter_logo(rdata);
462 break;
463 case RPORT_ST_ADISC:
464 fc_rport_enter_adisc(rdata);
465 break;
466 case RPORT_ST_READY:
467 case RPORT_ST_INIT:
468 case RPORT_ST_DELETE:
469 break;
470 }
471
472 mutex_unlock(&rdata->rp_mutex);
473 }
474
475 /**
476 * fc_rport_error() - Error handler, called once retries have been exhausted
477 * @rdata: private remote port
478 * @fp: The frame pointer
479 *
480 * Locking Note: The rport lock is expected to be held before
481 * calling this routine
482 */
483 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
484 {
485 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
486 IS_ERR(fp) ? -PTR_ERR(fp) : 0,
487 fc_rport_state(rdata), rdata->retries);
488
489 switch (rdata->rp_state) {
490 case RPORT_ST_PLOGI:
491 case RPORT_ST_LOGO:
492 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
493 break;
494 case RPORT_ST_RTV:
495 fc_rport_enter_ready(rdata);
496 break;
497 case RPORT_ST_PRLI:
498 case RPORT_ST_ADISC:
499 fc_rport_enter_logo(rdata);
500 break;
501 case RPORT_ST_DELETE:
502 case RPORT_ST_READY:
503 case RPORT_ST_INIT:
504 break;
505 }
506 }
507
508 /**
509 * fc_rport_error_retry() - Error handler when retries are desired
510 * @rdata: private remote port data
511 * @fp: The frame pointer
512 *
513 * If the error was an exchange timeout retry immediately,
514 * otherwise wait for E_D_TOV.
515 *
516 * Locking Note: The rport lock is expected to be held before
517 * calling this routine
518 */
519 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
520 struct fc_frame *fp)
521 {
522 unsigned long delay = FC_DEF_E_D_TOV;
523
524 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
525 if (PTR_ERR(fp) == -FC_EX_CLOSED)
526 return fc_rport_error(rdata, fp);
527
528 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
529 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
530 PTR_ERR(fp), fc_rport_state(rdata));
531 rdata->retries++;
532 /* no additional delay on exchange timeouts */
533 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
534 delay = 0;
535 schedule_delayed_work(&rdata->retry_work, delay);
536 return;
537 }
538
539 return fc_rport_error(rdata, fp);
540 }
541
542 /**
543 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
544 * @sp: current sequence in the PLOGI exchange
545 * @fp: response frame
546 * @rdata_arg: private remote port data
547 *
548 * Locking Note: This function will be called without the rport lock
549 * held, but it will lock, call an _enter_* function or fc_rport_error
550 * and then unlock the rport.
551 */
552 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
553 void *rdata_arg)
554 {
555 struct fc_rport_priv *rdata = rdata_arg;
556 struct fc_lport *lport = rdata->local_port;
557 struct fc_els_flogi *plp = NULL;
558 unsigned int tov;
559 u16 csp_seq;
560 u16 cssp_seq;
561 u8 op;
562
563 mutex_lock(&rdata->rp_mutex);
564
565 FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
566
567 if (rdata->rp_state != RPORT_ST_PLOGI) {
568 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
569 "%s\n", fc_rport_state(rdata));
570 if (IS_ERR(fp))
571 goto err;
572 goto out;
573 }
574
575 if (IS_ERR(fp)) {
576 fc_rport_error_retry(rdata, fp);
577 goto err;
578 }
579
580 op = fc_frame_payload_op(fp);
581 if (op == ELS_LS_ACC &&
582 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
583 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
584 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
585
586 tov = ntohl(plp->fl_csp.sp_e_d_tov);
587 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
588 tov /= 1000;
589 if (tov > rdata->e_d_tov)
590 rdata->e_d_tov = tov;
591 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
592 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
593 if (cssp_seq < csp_seq)
594 csp_seq = cssp_seq;
595 rdata->max_seq = csp_seq;
596 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
597 fc_rport_enter_prli(rdata);
598 } else
599 fc_rport_error_retry(rdata, fp);
600
601 out:
602 fc_frame_free(fp);
603 err:
604 mutex_unlock(&rdata->rp_mutex);
605 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
606 }
607
608 /**
609 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
610 * @rdata: private remote port data
611 *
612 * Locking Note: The rport lock is expected to be held before calling
613 * this routine.
614 */
615 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
616 {
617 struct fc_lport *lport = rdata->local_port;
618 struct fc_frame *fp;
619
620 FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
621 fc_rport_state(rdata));
622
623 fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
624
625 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
626 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
627 if (!fp) {
628 fc_rport_error_retry(rdata, fp);
629 return;
630 }
631 rdata->e_d_tov = lport->e_d_tov;
632
633 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
634 fc_rport_plogi_resp, rdata, lport->e_d_tov))
635 fc_rport_error_retry(rdata, NULL);
636 else
637 kref_get(&rdata->kref);
638 }
639
640 /**
641 * fc_rport_prli_resp() - Process Login (PRLI) response handler
642 * @sp: current sequence in the PRLI exchange
643 * @fp: response frame
644 * @rdata_arg: private remote port data
645 *
646 * Locking Note: This function will be called without the rport lock
647 * held, but it will lock, call an _enter_* function or fc_rport_error
648 * and then unlock the rport.
649 */
650 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
651 void *rdata_arg)
652 {
653 struct fc_rport_priv *rdata = rdata_arg;
654 struct {
655 struct fc_els_prli prli;
656 struct fc_els_spp spp;
657 } *pp;
658 u32 roles = FC_RPORT_ROLE_UNKNOWN;
659 u32 fcp_parm = 0;
660 u8 op;
661
662 mutex_lock(&rdata->rp_mutex);
663
664 FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
665
666 if (rdata->rp_state != RPORT_ST_PRLI) {
667 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
668 "%s\n", fc_rport_state(rdata));
669 if (IS_ERR(fp))
670 goto err;
671 goto out;
672 }
673
674 if (IS_ERR(fp)) {
675 fc_rport_error_retry(rdata, fp);
676 goto err;
677 }
678
679 /* reinitialize remote port roles */
680 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
681
682 op = fc_frame_payload_op(fp);
683 if (op == ELS_LS_ACC) {
684 pp = fc_frame_payload_get(fp, sizeof(*pp));
685 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
686 fcp_parm = ntohl(pp->spp.spp_params);
687 if (fcp_parm & FCP_SPPF_RETRY)
688 rdata->flags |= FC_RP_FLAGS_RETRY;
689 }
690
691 rdata->supported_classes = FC_COS_CLASS3;
692 if (fcp_parm & FCP_SPPF_INIT_FCN)
693 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
694 if (fcp_parm & FCP_SPPF_TARG_FCN)
695 roles |= FC_RPORT_ROLE_FCP_TARGET;
696
697 rdata->ids.roles = roles;
698 fc_rport_enter_rtv(rdata);
699
700 } else {
701 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
702 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
703 }
704
705 out:
706 fc_frame_free(fp);
707 err:
708 mutex_unlock(&rdata->rp_mutex);
709 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
710 }
711
712 /**
713 * fc_rport_logo_resp() - Logout (LOGO) response handler
714 * @sp: current sequence in the LOGO exchange
715 * @fp: response frame
716 * @rdata_arg: private remote port data
717 *
718 * Locking Note: This function will be called without the rport lock
719 * held, but it will lock, call an _enter_* function or fc_rport_error
720 * and then unlock the rport.
721 */
722 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
723 void *rdata_arg)
724 {
725 struct fc_rport_priv *rdata = rdata_arg;
726 u8 op;
727
728 mutex_lock(&rdata->rp_mutex);
729
730 FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
731
732 if (rdata->rp_state != RPORT_ST_LOGO) {
733 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
734 "%s\n", fc_rport_state(rdata));
735 if (IS_ERR(fp))
736 goto err;
737 goto out;
738 }
739
740 if (IS_ERR(fp)) {
741 fc_rport_error_retry(rdata, fp);
742 goto err;
743 }
744
745 op = fc_frame_payload_op(fp);
746 if (op != ELS_LS_ACC)
747 FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n",
748 op);
749 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
750
751 out:
752 fc_frame_free(fp);
753 err:
754 mutex_unlock(&rdata->rp_mutex);
755 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
756 }
757
758 /**
759 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
760 * @rdata: private remote port data
761 *
762 * Locking Note: The rport lock is expected to be held before calling
763 * this routine.
764 */
765 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
766 {
767 struct fc_lport *lport = rdata->local_port;
768 struct {
769 struct fc_els_prli prli;
770 struct fc_els_spp spp;
771 } *pp;
772 struct fc_frame *fp;
773
774 /*
775 * If the rport is one of the well known addresses
776 * we skip PRLI and RTV and go straight to READY.
777 */
778 if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
779 fc_rport_enter_ready(rdata);
780 return;
781 }
782
783 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
784 fc_rport_state(rdata));
785
786 fc_rport_state_enter(rdata, RPORT_ST_PRLI);
787
788 fp = fc_frame_alloc(lport, sizeof(*pp));
789 if (!fp) {
790 fc_rport_error_retry(rdata, fp);
791 return;
792 }
793
794 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
795 fc_rport_prli_resp, rdata, lport->e_d_tov))
796 fc_rport_error_retry(rdata, NULL);
797 else
798 kref_get(&rdata->kref);
799 }
800
801 /**
802 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
803 * @sp: current sequence in the RTV exchange
804 * @fp: response frame
805 * @rdata_arg: private remote port data
806 *
807 * Many targets don't seem to support this.
808 *
809 * Locking Note: This function will be called without the rport lock
810 * held, but it will lock, call an _enter_* function or fc_rport_error
811 * and then unlock the rport.
812 */
813 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
814 void *rdata_arg)
815 {
816 struct fc_rport_priv *rdata = rdata_arg;
817 u8 op;
818
819 mutex_lock(&rdata->rp_mutex);
820
821 FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
822
823 if (rdata->rp_state != RPORT_ST_RTV) {
824 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
825 "%s\n", fc_rport_state(rdata));
826 if (IS_ERR(fp))
827 goto err;
828 goto out;
829 }
830
831 if (IS_ERR(fp)) {
832 fc_rport_error(rdata, fp);
833 goto err;
834 }
835
836 op = fc_frame_payload_op(fp);
837 if (op == ELS_LS_ACC) {
838 struct fc_els_rtv_acc *rtv;
839 u32 toq;
840 u32 tov;
841
842 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
843 if (rtv) {
844 toq = ntohl(rtv->rtv_toq);
845 tov = ntohl(rtv->rtv_r_a_tov);
846 if (tov == 0)
847 tov = 1;
848 rdata->r_a_tov = tov;
849 tov = ntohl(rtv->rtv_e_d_tov);
850 if (toq & FC_ELS_RTV_EDRES)
851 tov /= 1000000;
852 if (tov == 0)
853 tov = 1;
854 rdata->e_d_tov = tov;
855 }
856 }
857
858 fc_rport_enter_ready(rdata);
859
860 out:
861 fc_frame_free(fp);
862 err:
863 mutex_unlock(&rdata->rp_mutex);
864 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
865 }
866
867 /**
868 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
869 * @rdata: private remote port data
870 *
871 * Locking Note: The rport lock is expected to be held before calling
872 * this routine.
873 */
874 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
875 {
876 struct fc_frame *fp;
877 struct fc_lport *lport = rdata->local_port;
878
879 FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
880 fc_rport_state(rdata));
881
882 fc_rport_state_enter(rdata, RPORT_ST_RTV);
883
884 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
885 if (!fp) {
886 fc_rport_error_retry(rdata, fp);
887 return;
888 }
889
890 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
891 fc_rport_rtv_resp, rdata, lport->e_d_tov))
892 fc_rport_error_retry(rdata, NULL);
893 else
894 kref_get(&rdata->kref);
895 }
896
897 /**
898 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
899 * @rdata: private remote port data
900 *
901 * Locking Note: The rport lock is expected to be held before calling
902 * this routine.
903 */
904 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
905 {
906 struct fc_lport *lport = rdata->local_port;
907 struct fc_frame *fp;
908
909 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
910 fc_rport_state(rdata));
911
912 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
913
914 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
915 if (!fp) {
916 fc_rport_error_retry(rdata, fp);
917 return;
918 }
919
920 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
921 fc_rport_logo_resp, rdata, lport->e_d_tov))
922 fc_rport_error_retry(rdata, NULL);
923 else
924 kref_get(&rdata->kref);
925 }
926
927 /**
928 * fc_rport_els_adisc_resp() - Address Discovery response handler
929 * @sp: current sequence in the ADISC exchange
930 * @fp: response frame
931 * @rdata_arg: remote port private.
932 *
933 * Locking Note: This function will be called without the rport lock
934 * held, but it will lock, call an _enter_* function or fc_rport_error
935 * and then unlock the rport.
936 */
937 static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
938 void *rdata_arg)
939 {
940 struct fc_rport_priv *rdata = rdata_arg;
941 struct fc_els_adisc *adisc;
942 u8 op;
943
944 mutex_lock(&rdata->rp_mutex);
945
946 FC_RPORT_DBG(rdata, "Received a ADISC response\n");
947
948 if (rdata->rp_state != RPORT_ST_ADISC) {
949 FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
950 fc_rport_state(rdata));
951 if (IS_ERR(fp))
952 goto err;
953 goto out;
954 }
955
956 if (IS_ERR(fp)) {
957 fc_rport_error(rdata, fp);
958 goto err;
959 }
960
961 /*
962 * If address verification failed. Consider us logged out of the rport.
963 * Since the rport is still in discovery, we want to be
964 * logged in, so go to PLOGI state. Otherwise, go back to READY.
965 */
966 op = fc_frame_payload_op(fp);
967 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
968 if (op != ELS_LS_ACC || !adisc ||
969 ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
970 get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
971 get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
972 FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
973 fc_rport_enter_plogi(rdata);
974 } else {
975 FC_RPORT_DBG(rdata, "ADISC OK\n");
976 fc_rport_enter_ready(rdata);
977 }
978 out:
979 fc_frame_free(fp);
980 err:
981 mutex_unlock(&rdata->rp_mutex);
982 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
983 }
984
985 /**
986 * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer
987 * @rdata: remote port private data
988 *
989 * Locking Note: The rport lock is expected to be held before calling
990 * this routine.
991 */
992 static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
993 {
994 struct fc_lport *lport = rdata->local_port;
995 struct fc_frame *fp;
996
997 FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
998 fc_rport_state(rdata));
999
1000 fc_rport_state_enter(rdata, RPORT_ST_ADISC);
1001
1002 fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
1003 if (!fp) {
1004 fc_rport_error_retry(rdata, fp);
1005 return;
1006 }
1007 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1008 fc_rport_adisc_resp, rdata, lport->e_d_tov))
1009 fc_rport_error_retry(rdata, NULL);
1010 else
1011 kref_get(&rdata->kref);
1012 }
1013
1014 /**
1015 * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request
1016 * @rdata: remote port private
1017 * @sp: current sequence in the ADISC exchange
1018 * @in_fp: ADISC request frame
1019 *
1020 * Locking Note: Called with the lport and rport locks held.
1021 */
1022 static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1023 struct fc_seq *sp, struct fc_frame *in_fp)
1024 {
1025 struct fc_lport *lport = rdata->local_port;
1026 struct fc_frame *fp;
1027 struct fc_exch *ep = fc_seq_exch(sp);
1028 struct fc_els_adisc *adisc;
1029 struct fc_seq_els_data rjt_data;
1030 u32 f_ctl;
1031
1032 FC_RPORT_DBG(rdata, "Received ADISC request\n");
1033
1034 adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
1035 if (!adisc) {
1036 rjt_data.fp = NULL;
1037 rjt_data.reason = ELS_RJT_PROT;
1038 rjt_data.explan = ELS_EXPL_INV_LEN;
1039 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1040 goto drop;
1041 }
1042
1043 fp = fc_frame_alloc(lport, sizeof(*adisc));
1044 if (!fp)
1045 goto drop;
1046 fc_adisc_fill(lport, fp);
1047 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
1048 adisc->adisc_cmd = ELS_LS_ACC;
1049 sp = lport->tt.seq_start_next(sp);
1050 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1051 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1052 FC_TYPE_ELS, f_ctl, 0);
1053 lport->tt.seq_send(lport, sp, fp);
1054 drop:
1055 fc_frame_free(in_fp);
1056 }
1057
1058 /**
1059 * fc_rport_recv_els_req() - handle a validated ELS request.
1060 * @lport: Fibre Channel local port
1061 * @sp: current sequence in the PLOGI exchange
1062 * @fp: response frame
1063 *
1064 * Handle incoming ELS requests that require port login.
1065 * The ELS opcode has already been validated by the caller.
1066 *
1067 * Locking Note: Called with the lport lock held.
1068 */
1069 static void fc_rport_recv_els_req(struct fc_lport *lport,
1070 struct fc_seq *sp, struct fc_frame *fp)
1071 {
1072 struct fc_rport_priv *rdata;
1073 struct fc_frame_header *fh;
1074 struct fc_seq_els_data els_data;
1075
1076 els_data.fp = NULL;
1077 els_data.reason = ELS_RJT_UNAB;
1078 els_data.explan = ELS_EXPL_PLOGI_REQD;
1079
1080 fh = fc_frame_header_get(fp);
1081
1082 mutex_lock(&lport->disc.disc_mutex);
1083 rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
1084 if (!rdata) {
1085 mutex_unlock(&lport->disc.disc_mutex);
1086 goto reject;
1087 }
1088 mutex_lock(&rdata->rp_mutex);
1089 mutex_unlock(&lport->disc.disc_mutex);
1090
1091 switch (rdata->rp_state) {
1092 case RPORT_ST_PRLI:
1093 case RPORT_ST_RTV:
1094 case RPORT_ST_READY:
1095 case RPORT_ST_ADISC:
1096 break;
1097 default:
1098 mutex_unlock(&rdata->rp_mutex);
1099 goto reject;
1100 }
1101
1102 switch (fc_frame_payload_op(fp)) {
1103 case ELS_PRLI:
1104 fc_rport_recv_prli_req(rdata, sp, fp);
1105 break;
1106 case ELS_PRLO:
1107 fc_rport_recv_prlo_req(rdata, sp, fp);
1108 break;
1109 case ELS_ADISC:
1110 fc_rport_recv_adisc_req(rdata, sp, fp);
1111 break;
1112 case ELS_RRQ:
1113 els_data.fp = fp;
1114 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
1115 break;
1116 case ELS_REC:
1117 els_data.fp = fp;
1118 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
1119 break;
1120 default:
1121 fc_frame_free(fp); /* can't happen */
1122 break;
1123 }
1124
1125 mutex_unlock(&rdata->rp_mutex);
1126 return;
1127
1128 reject:
1129 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1130 fc_frame_free(fp);
1131 }
1132
1133 /**
1134 * fc_rport_recv_req() - Handle a received ELS request from a rport
1135 * @sp: current sequence in the PLOGI exchange
1136 * @fp: response frame
1137 * @lport: Fibre Channel local port
1138 *
1139 * Locking Note: Called with the lport lock held.
1140 */
1141 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1142 struct fc_lport *lport)
1143 {
1144 struct fc_seq_els_data els_data;
1145
1146 /*
1147 * Handle PLOGI and LOGO requests separately, since they
1148 * don't require prior login.
1149 * Check for unsupported opcodes first and reject them.
1150 * For some ops, it would be incorrect to reject with "PLOGI required".
1151 */
1152 switch (fc_frame_payload_op(fp)) {
1153 case ELS_PLOGI:
1154 fc_rport_recv_plogi_req(lport, sp, fp);
1155 break;
1156 case ELS_LOGO:
1157 fc_rport_recv_logo_req(lport, sp, fp);
1158 break;
1159 case ELS_PRLI:
1160 case ELS_PRLO:
1161 case ELS_ADISC:
1162 case ELS_RRQ:
1163 case ELS_REC:
1164 fc_rport_recv_els_req(lport, sp, fp);
1165 break;
1166 default:
1167 fc_frame_free(fp);
1168 els_data.fp = NULL;
1169 els_data.reason = ELS_RJT_UNSUP;
1170 els_data.explan = ELS_EXPL_NONE;
1171 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1172 break;
1173 }
1174 }
1175
1176 /**
1177 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
1178 * @lport: local port
1179 * @sp: current sequence in the PLOGI exchange
1180 * @fp: PLOGI request frame
1181 *
1182 * Locking Note: The rport lock is held before calling this function.
1183 */
1184 static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1185 struct fc_seq *sp, struct fc_frame *rx_fp)
1186 {
1187 struct fc_disc *disc;
1188 struct fc_rport_priv *rdata;
1189 struct fc_frame *fp = rx_fp;
1190 struct fc_exch *ep;
1191 struct fc_frame_header *fh;
1192 struct fc_els_flogi *pl;
1193 struct fc_seq_els_data rjt_data;
1194 u32 sid, f_ctl;
1195
1196 rjt_data.fp = NULL;
1197 fh = fc_frame_header_get(fp);
1198 sid = ntoh24(fh->fh_s_id);
1199
1200 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1201
1202 pl = fc_frame_payload_get(fp, sizeof(*pl));
1203 if (!pl) {
1204 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
1205 rjt_data.reason = ELS_RJT_PROT;
1206 rjt_data.explan = ELS_EXPL_INV_LEN;
1207 goto reject;
1208 }
1209
1210 disc = &lport->disc;
1211 mutex_lock(&disc->disc_mutex);
1212 rdata = lport->tt.rport_create(lport, sid);
1213 if (!rdata) {
1214 mutex_unlock(&disc->disc_mutex);
1215 rjt_data.reason = ELS_RJT_UNAB;
1216 rjt_data.explan = ELS_EXPL_INSUF_RES;
1217 goto reject;
1218 }
1219
1220 mutex_lock(&rdata->rp_mutex);
1221 mutex_unlock(&disc->disc_mutex);
1222
1223 rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
1224 rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
1225
1226 /*
1227 * If the rport was just created, possibly due to the incoming PLOGI,
1228 * set the state appropriately and accept the PLOGI.
1229 *
1230 * If we had also sent a PLOGI, and if the received PLOGI is from a
1231 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1232 * "command already in progress".
1233 *
1234 * XXX TBD: If the session was ready before, the PLOGI should result in
1235 * all outstanding exchanges being reset.
1236 */
1237 switch (rdata->rp_state) {
1238 case RPORT_ST_INIT:
1239 FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1240 break;
1241 case RPORT_ST_PLOGI:
1242 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1243 if (rdata->ids.port_name < lport->wwpn) {
1244 mutex_unlock(&rdata->rp_mutex);
1245 rjt_data.reason = ELS_RJT_INPROG;
1246 rjt_data.explan = ELS_EXPL_NONE;
1247 goto reject;
1248 }
1249 break;
1250 case RPORT_ST_PRLI:
1251 case RPORT_ST_READY:
1252 case RPORT_ST_ADISC:
1253 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1254 "- ignored for now\n", rdata->rp_state);
1255 /* XXX TBD - should reset */
1256 break;
1257 case RPORT_ST_DELETE:
1258 default:
1259 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
1260 rdata->rp_state);
1261 fc_frame_free(rx_fp);
1262 goto out;
1263 }
1264
1265 /*
1266 * Get session payload size from incoming PLOGI.
1267 */
1268 rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1269 fc_frame_free(rx_fp);
1270
1271 /*
1272 * Send LS_ACC. If this fails, the originator should retry.
1273 */
1274 sp = lport->tt.seq_start_next(sp);
1275 if (!sp)
1276 goto out;
1277 fp = fc_frame_alloc(lport, sizeof(*pl));
1278 if (!fp)
1279 goto out;
1280
1281 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1282 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1283 ep = fc_seq_exch(sp);
1284 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1285 FC_TYPE_ELS, f_ctl, 0);
1286 lport->tt.seq_send(lport, sp, fp);
1287 fc_rport_enter_prli(rdata);
1288 out:
1289 mutex_unlock(&rdata->rp_mutex);
1290 return;
1291
1292 reject:
1293 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1294 fc_frame_free(fp);
1295 }
1296
1297 /**
1298 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1299 * @rdata: private remote port data
1300 * @sp: current sequence in the PRLI exchange
1301 * @fp: PRLI request frame
1302 *
1303 * Locking Note: The rport lock is exected to be held before calling
1304 * this function.
1305 */
1306 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1307 struct fc_seq *sp, struct fc_frame *rx_fp)
1308 {
1309 struct fc_lport *lport = rdata->local_port;
1310 struct fc_exch *ep;
1311 struct fc_frame *fp;
1312 struct fc_frame_header *fh;
1313 struct {
1314 struct fc_els_prli prli;
1315 struct fc_els_spp spp;
1316 } *pp;
1317 struct fc_els_spp *rspp; /* request service param page */
1318 struct fc_els_spp *spp; /* response spp */
1319 unsigned int len;
1320 unsigned int plen;
1321 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1322 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1323 enum fc_els_spp_resp resp;
1324 struct fc_seq_els_data rjt_data;
1325 u32 f_ctl;
1326 u32 fcp_parm;
1327 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1328 rjt_data.fp = NULL;
1329
1330 fh = fc_frame_header_get(rx_fp);
1331
1332 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1333 fc_rport_state(rdata));
1334
1335 switch (rdata->rp_state) {
1336 case RPORT_ST_PRLI:
1337 case RPORT_ST_RTV:
1338 case RPORT_ST_READY:
1339 case RPORT_ST_ADISC:
1340 reason = ELS_RJT_NONE;
1341 break;
1342 default:
1343 fc_frame_free(rx_fp);
1344 return;
1345 break;
1346 }
1347 len = fr_len(rx_fp) - sizeof(*fh);
1348 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1349 if (pp == NULL) {
1350 reason = ELS_RJT_PROT;
1351 explan = ELS_EXPL_INV_LEN;
1352 } else {
1353 plen = ntohs(pp->prli.prli_len);
1354 if ((plen % 4) != 0 || plen > len) {
1355 reason = ELS_RJT_PROT;
1356 explan = ELS_EXPL_INV_LEN;
1357 } else if (plen < len) {
1358 len = plen;
1359 }
1360 plen = pp->prli.prli_spp_len;
1361 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1362 plen > len || len < sizeof(*pp)) {
1363 reason = ELS_RJT_PROT;
1364 explan = ELS_EXPL_INV_LEN;
1365 }
1366 rspp = &pp->spp;
1367 }
1368 if (reason != ELS_RJT_NONE ||
1369 (fp = fc_frame_alloc(lport, len)) == NULL) {
1370 rjt_data.reason = reason;
1371 rjt_data.explan = explan;
1372 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1373 } else {
1374 sp = lport->tt.seq_start_next(sp);
1375 WARN_ON(!sp);
1376 pp = fc_frame_payload_get(fp, len);
1377 WARN_ON(!pp);
1378 memset(pp, 0, len);
1379 pp->prli.prli_cmd = ELS_LS_ACC;
1380 pp->prli.prli_spp_len = plen;
1381 pp->prli.prli_len = htons(len);
1382 len -= sizeof(struct fc_els_prli);
1383
1384 /* reinitialize remote port roles */
1385 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1386
1387 /*
1388 * Go through all the service parameter pages and build
1389 * response. If plen indicates longer SPP than standard,
1390 * use that. The entire response has been pre-cleared above.
1391 */
1392 spp = &pp->spp;
1393 while (len >= plen) {
1394 spp->spp_type = rspp->spp_type;
1395 spp->spp_type_ext = rspp->spp_type_ext;
1396 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1397 resp = FC_SPP_RESP_ACK;
1398 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1399 resp = FC_SPP_RESP_NO_PA;
1400 switch (rspp->spp_type) {
1401 case 0: /* common to all FC-4 types */
1402 break;
1403 case FC_TYPE_FCP:
1404 fcp_parm = ntohl(rspp->spp_params);
1405 if (fcp_parm & FCP_SPPF_RETRY)
1406 rdata->flags |= FC_RP_FLAGS_RETRY;
1407 rdata->supported_classes = FC_COS_CLASS3;
1408 if (fcp_parm & FCP_SPPF_INIT_FCN)
1409 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1410 if (fcp_parm & FCP_SPPF_TARG_FCN)
1411 roles |= FC_RPORT_ROLE_FCP_TARGET;
1412 rdata->ids.roles = roles;
1413
1414 spp->spp_params =
1415 htonl(lport->service_params);
1416 break;
1417 default:
1418 resp = FC_SPP_RESP_INVL;
1419 break;
1420 }
1421 spp->spp_flags |= resp;
1422 len -= plen;
1423 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1424 spp = (struct fc_els_spp *)((char *)spp + plen);
1425 }
1426
1427 /*
1428 * Send LS_ACC. If this fails, the originator should retry.
1429 */
1430 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1431 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1432 ep = fc_seq_exch(sp);
1433 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1434 FC_TYPE_ELS, f_ctl, 0);
1435 lport->tt.seq_send(lport, sp, fp);
1436
1437 /*
1438 * Get lock and re-check state.
1439 */
1440 switch (rdata->rp_state) {
1441 case RPORT_ST_PRLI:
1442 fc_rport_enter_ready(rdata);
1443 break;
1444 case RPORT_ST_READY:
1445 case RPORT_ST_ADISC:
1446 break;
1447 default:
1448 break;
1449 }
1450 }
1451 fc_frame_free(rx_fp);
1452 }
1453
1454 /**
1455 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1456 * @rdata: private remote port data
1457 * @sp: current sequence in the PRLO exchange
1458 * @fp: PRLO request frame
1459 *
1460 * Locking Note: The rport lock is exected to be held before calling
1461 * this function.
1462 */
1463 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1464 struct fc_seq *sp,
1465 struct fc_frame *fp)
1466 {
1467 struct fc_lport *lport = rdata->local_port;
1468
1469 struct fc_frame_header *fh;
1470 struct fc_seq_els_data rjt_data;
1471
1472 fh = fc_frame_header_get(fp);
1473
1474 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1475 fc_rport_state(rdata));
1476
1477 rjt_data.fp = NULL;
1478 rjt_data.reason = ELS_RJT_UNAB;
1479 rjt_data.explan = ELS_EXPL_NONE;
1480 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1481 fc_frame_free(fp);
1482 }
1483
1484 /**
1485 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1486 * @lport: local port.
1487 * @sp: current sequence in the LOGO exchange
1488 * @fp: LOGO request frame
1489 *
1490 * Locking Note: The rport lock is exected to be held before calling
1491 * this function.
1492 */
1493 static void fc_rport_recv_logo_req(struct fc_lport *lport,
1494 struct fc_seq *sp,
1495 struct fc_frame *fp)
1496 {
1497 struct fc_frame_header *fh;
1498 struct fc_rport_priv *rdata;
1499 u32 sid;
1500
1501 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1502
1503 fh = fc_frame_header_get(fp);
1504 sid = ntoh24(fh->fh_s_id);
1505
1506 mutex_lock(&lport->disc.disc_mutex);
1507 rdata = lport->tt.rport_lookup(lport, sid);
1508 if (rdata) {
1509 mutex_lock(&rdata->rp_mutex);
1510 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1511 fc_rport_state(rdata));
1512
1513 /*
1514 * If the remote port was created due to discovery,
1515 * log back in. It may have seen a stale RSCN about us.
1516 */
1517 if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id)
1518 fc_rport_enter_plogi(rdata);
1519 else
1520 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1521 mutex_unlock(&rdata->rp_mutex);
1522 } else
1523 FC_RPORT_ID_DBG(lport, sid,
1524 "Received LOGO from non-logged-in port\n");
1525 mutex_unlock(&lport->disc.disc_mutex);
1526 fc_frame_free(fp);
1527 }
1528
1529 static void fc_rport_flush_queue(void)
1530 {
1531 flush_workqueue(rport_event_queue);
1532 }
1533
1534 int fc_rport_init(struct fc_lport *lport)
1535 {
1536 if (!lport->tt.rport_lookup)
1537 lport->tt.rport_lookup = fc_rport_lookup;
1538
1539 if (!lport->tt.rport_create)
1540 lport->tt.rport_create = fc_rport_create;
1541
1542 if (!lport->tt.rport_login)
1543 lport->tt.rport_login = fc_rport_login;
1544
1545 if (!lport->tt.rport_logoff)
1546 lport->tt.rport_logoff = fc_rport_logoff;
1547
1548 if (!lport->tt.rport_recv_req)
1549 lport->tt.rport_recv_req = fc_rport_recv_req;
1550
1551 if (!lport->tt.rport_flush_queue)
1552 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1553
1554 if (!lport->tt.rport_destroy)
1555 lport->tt.rport_destroy = fc_rport_destroy;
1556
1557 return 0;
1558 }
1559 EXPORT_SYMBOL(fc_rport_init);
1560
1561 int fc_setup_rport(void)
1562 {
1563 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1564 if (!rport_event_queue)
1565 return -ENOMEM;
1566 return 0;
1567 }
1568
1569 void fc_destroy_rport(void)
1570 {
1571 destroy_workqueue(rport_event_queue);
1572 }
1573
1574 void fc_rport_terminate_io(struct fc_rport *rport)
1575 {
1576 struct fc_rport_libfc_priv *rp = rport->dd_data;
1577 struct fc_lport *lport = rp->local_port;
1578
1579 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1580 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1581 }
1582 EXPORT_SYMBOL(fc_rport_terminate_io);