]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/libfc/fc_rport.c
[SCSI] libfc: make rport module maintain the rport list
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / libfc / fc_rport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * RPORT GENERAL INFO
22 *
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
27 *
28 * fc_rport's represent N_Port's within the fabric.
29 */
30
31/*
32 * RPORT LOCKING
33 *
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
38 *
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
45 */
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/interrupt.h>
50#include <linux/rcupdate.h>
51#include <linux/timer.h>
52#include <linux/workqueue.h>
53#include <asm/unaligned.h>
54
55#include <scsi/libfc.h>
56#include <scsi/fc_encode.h>
57
42e9a92f
RL
58struct workqueue_struct *rport_event_queue;
59
9fb9d328
JE
60static void fc_rport_enter_plogi(struct fc_rport_priv *);
61static void fc_rport_enter_prli(struct fc_rport_priv *);
62static void fc_rport_enter_rtv(struct fc_rport_priv *);
63static void fc_rport_enter_ready(struct fc_rport_priv *);
64static void fc_rport_enter_logo(struct fc_rport_priv *);
42e9a92f 65
9fb9d328 66static void fc_rport_recv_plogi_req(struct fc_rport_priv *,
42e9a92f 67 struct fc_seq *, struct fc_frame *);
9fb9d328 68static void fc_rport_recv_prli_req(struct fc_rport_priv *,
42e9a92f 69 struct fc_seq *, struct fc_frame *);
9fb9d328 70static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
42e9a92f 71 struct fc_seq *, struct fc_frame *);
9fb9d328 72static void fc_rport_recv_logo_req(struct fc_rport_priv *,
42e9a92f
RL
73 struct fc_seq *, struct fc_frame *);
74static void fc_rport_timeout(struct work_struct *);
9fb9d328
JE
75static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
76static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
42e9a92f
RL
77static void fc_rport_work(struct work_struct *);
78
79static const char *fc_rport_state_names[] = {
42e9a92f
RL
80 [RPORT_ST_INIT] = "Init",
81 [RPORT_ST_PLOGI] = "PLOGI",
82 [RPORT_ST_PRLI] = "PRLI",
83 [RPORT_ST_RTV] = "RTV",
84 [RPORT_ST_READY] = "Ready",
85 [RPORT_ST_LOGO] = "LOGO",
14194054 86 [RPORT_ST_DELETE] = "Delete",
42e9a92f
RL
87};
88
9e9d0452
JE
89/**
90 * fc_rport_create() - create remote port in INIT state.
91 * @lport: local port.
92 * @ids: remote port identifiers.
93 *
48f00902 94 * Locking note: must be called with the disc_mutex held.
9e9d0452
JE
95 */
96static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
97 struct fc_rport_identifiers *ids)
42e9a92f 98{
ab28f1fd 99 struct fc_rport_priv *rdata;
42e9a92f 100
9e9d0452
JE
101 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
102 if (!rdata)
42e9a92f
RL
103 return NULL;
104
f211fa51
JE
105 rdata->ids = *ids;
106 kref_init(&rdata->kref);
42e9a92f 107 mutex_init(&rdata->rp_mutex);
795d86f5 108 rdata->local_port = lport;
42e9a92f
RL
109 rdata->rp_state = RPORT_ST_INIT;
110 rdata->event = RPORT_EV_NONE;
111 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
795d86f5
JE
112 rdata->e_d_tov = lport->e_d_tov;
113 rdata->r_a_tov = lport->r_a_tov;
f211fa51 114 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
42e9a92f
RL
115 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
116 INIT_WORK(&rdata->event_work, fc_rport_work);
48f00902
JE
117 if (ids->port_id != FC_FID_DIR_SERV)
118 list_add(&rdata->peers, &lport->disc.rports);
9fb9d328 119 return rdata;
42e9a92f
RL
120}
121
f211fa51
JE
122/**
123 * fc_rport_destroy() - free a remote port after last reference is released.
124 * @kref: pointer to kref inside struct fc_rport_priv
125 */
126static void fc_rport_destroy(struct kref *kref)
127{
128 struct fc_rport_priv *rdata;
f211fa51
JE
129
130 rdata = container_of(kref, struct fc_rport_priv, kref);
9e9d0452 131 kfree(rdata);
f211fa51
JE
132}
133
42e9a92f 134/**
34f42a07 135 * fc_rport_state() - return a string for the state the rport is in
9fb9d328 136 * @rdata: remote port private data
42e9a92f 137 */
9fb9d328 138static const char *fc_rport_state(struct fc_rport_priv *rdata)
42e9a92f
RL
139{
140 const char *cp;
42e9a92f
RL
141
142 cp = fc_rport_state_names[rdata->rp_state];
143 if (!cp)
144 cp = "Unknown";
145 return cp;
146}
147
148/**
34f42a07 149 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
42e9a92f
RL
150 * @rport: Pointer to Fibre Channel remote port structure
151 * @timeout: timeout in seconds
152 */
153void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
154{
155 if (timeout)
156 rport->dev_loss_tmo = timeout + 5;
157 else
158 rport->dev_loss_tmo = 30;
159}
160EXPORT_SYMBOL(fc_set_rport_loss_tmo);
161
162/**
34f42a07 163 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
42e9a92f
RL
164 * @flp: FLOGI payload structure
165 * @maxval: upper limit, may be less than what is in the service parameters
166 */
b2ab99c9
RL
167static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
168 unsigned int maxval)
42e9a92f
RL
169{
170 unsigned int mfs;
171
172 /*
173 * Get max payload from the common service parameters and the
174 * class 3 receive data field size.
175 */
176 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
177 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
178 maxval = mfs;
179 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
180 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
181 maxval = mfs;
182 return maxval;
183}
184
185/**
34f42a07 186 * fc_rport_state_enter() - Change the rport's state
9fb9d328 187 * @rdata: The rport whose state should change
42e9a92f
RL
188 * @new: The new state of the rport
189 *
190 * Locking Note: Called with the rport lock held
191 */
9fb9d328 192static void fc_rport_state_enter(struct fc_rport_priv *rdata,
42e9a92f
RL
193 enum fc_rport_state new)
194{
42e9a92f
RL
195 if (rdata->rp_state != new)
196 rdata->retries = 0;
197 rdata->rp_state = new;
198}
199
200static void fc_rport_work(struct work_struct *work)
201{
571f824c 202 u32 port_id;
ab28f1fd
JE
203 struct fc_rport_priv *rdata =
204 container_of(work, struct fc_rport_priv, event_work);
9e9d0452 205 struct fc_rport_libfc_priv *rp;
42e9a92f 206 enum fc_rport_event event;
42e9a92f
RL
207 struct fc_lport *lport = rdata->local_port;
208 struct fc_rport_operations *rport_ops;
629f4427 209 struct fc_rport_identifiers ids;
f211fa51 210 struct fc_rport *rport;
42e9a92f
RL
211
212 mutex_lock(&rdata->rp_mutex);
213 event = rdata->event;
214 rport_ops = rdata->ops;
f211fa51 215 rport = rdata->rport;
42e9a92f 216
9e9d0452
JE
217 FC_RPORT_DBG(rdata, "work event %u\n", event);
218
629f4427 219 switch (event) {
4c0f62b5 220 case RPORT_EV_READY:
f211fa51 221 ids = rdata->ids;
5f7ea3b7 222 rdata->event = RPORT_EV_NONE;
9e9d0452 223 kref_get(&rdata->kref);
42e9a92f
RL
224 mutex_unlock(&rdata->rp_mutex);
225
9e9d0452
JE
226 if (!rport)
227 rport = fc_remote_port_add(lport->host, 0, &ids);
228 if (!rport) {
229 FC_RPORT_DBG(rdata, "Failed to add the rport\n");
230 lport->tt.rport_logoff(rdata);
231 kref_put(&rdata->kref, lport->tt.rport_destroy);
232 return;
42e9a92f 233 }
9e9d0452
JE
234 mutex_lock(&rdata->rp_mutex);
235 if (rdata->rport)
236 FC_RPORT_DBG(rdata, "rport already allocated\n");
237 rdata->rport = rport;
238 rport->maxframe_size = rdata->maxframe_size;
239 rport->supported_classes = rdata->supported_classes;
240
241 rp = rport->dd_data;
242 rp->local_port = lport;
243 rp->rp_state = rdata->rp_state;
244 rp->flags = rdata->flags;
245 rp->e_d_tov = rdata->e_d_tov;
246 rp->r_a_tov = rdata->r_a_tov;
247 mutex_unlock(&rdata->rp_mutex);
248
249 if (rport_ops->event_callback) {
250 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
9fb9d328 251 rport_ops->event_callback(lport, rdata, event);
9e9d0452
JE
252 }
253 kref_put(&rdata->kref, lport->tt.rport_destroy);
629f4427
JE
254 break;
255
256 case RPORT_EV_FAILED:
257 case RPORT_EV_LOGO:
258 case RPORT_EV_STOP:
9e9d0452 259 port_id = rdata->ids.port_id;
42e9a92f 260 mutex_unlock(&rdata->rp_mutex);
9e9d0452 261
48f00902
JE
262 if (port_id != FC_FID_DIR_SERV) {
263 mutex_lock(&lport->disc.disc_mutex);
264 list_del(&rdata->peers);
265 mutex_unlock(&lport->disc.disc_mutex);
266 }
267
9e9d0452
JE
268 if (rport_ops->event_callback) {
269 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
9fb9d328 270 rport_ops->event_callback(lport, rdata, event);
9e9d0452 271 }
201e5795 272 cancel_delayed_work_sync(&rdata->retry_work);
9e9d0452
JE
273
274 /*
275 * Reset any outstanding exchanges before freeing rport.
276 */
277 lport->tt.exch_mgr_reset(lport, 0, port_id);
278 lport->tt.exch_mgr_reset(lport, port_id, 0);
279
280 if (rport) {
281 rp = rport->dd_data;
282 rp->rp_state = RPORT_ST_DELETE;
283 mutex_lock(&rdata->rp_mutex);
284 rdata->rport = NULL;
285 mutex_unlock(&rdata->rp_mutex);
42e9a92f 286 fc_remote_port_delete(rport);
571f824c 287 }
9e9d0452 288 kref_put(&rdata->kref, lport->tt.rport_destroy);
629f4427
JE
289 break;
290
291 default:
42e9a92f 292 mutex_unlock(&rdata->rp_mutex);
629f4427
JE
293 break;
294 }
42e9a92f
RL
295}
296
297/**
34f42a07 298 * fc_rport_login() - Start the remote port login state machine
9fb9d328 299 * @rdata: private remote port
42e9a92f
RL
300 *
301 * Locking Note: Called without the rport lock held. This
302 * function will hold the rport lock, call an _enter_*
303 * function and then unlock the rport.
304 */
9fb9d328 305int fc_rport_login(struct fc_rport_priv *rdata)
42e9a92f 306{
42e9a92f
RL
307 mutex_lock(&rdata->rp_mutex);
308
9fb9d328 309 FC_RPORT_DBG(rdata, "Login to port\n");
42e9a92f 310
9fb9d328 311 fc_rport_enter_plogi(rdata);
42e9a92f
RL
312
313 mutex_unlock(&rdata->rp_mutex);
314
315 return 0;
316}
317
5f7ea3b7
JE
318/**
319 * fc_rport_enter_delete() - schedule a remote port to be deleted.
9fb9d328 320 * @rdata: private remote port
5f7ea3b7
JE
321 * @event: event to report as the reason for deletion
322 *
323 * Locking Note: Called with the rport lock held.
324 *
325 * Allow state change into DELETE only once.
326 *
327 * Call queue_work only if there's no event already pending.
328 * Set the new event so that the old pending event will not occur.
329 * Since we have the mutex, even if fc_rport_work() is already started,
330 * it'll see the new event.
331 */
9fb9d328 332static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
5f7ea3b7
JE
333 enum fc_rport_event event)
334{
5f7ea3b7
JE
335 if (rdata->rp_state == RPORT_ST_DELETE)
336 return;
337
9fb9d328 338 FC_RPORT_DBG(rdata, "Delete port\n");
5f7ea3b7 339
9fb9d328 340 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
5f7ea3b7
JE
341
342 if (rdata->event == RPORT_EV_NONE)
343 queue_work(rport_event_queue, &rdata->event_work);
344 rdata->event = event;
345}
346
42e9a92f 347/**
34f42a07 348 * fc_rport_logoff() - Logoff and remove an rport
9fb9d328 349 * @rdata: private remote port
42e9a92f
RL
350 *
351 * Locking Note: Called without the rport lock held. This
352 * function will hold the rport lock, call an _enter_*
353 * function and then unlock the rport.
354 */
9fb9d328 355int fc_rport_logoff(struct fc_rport_priv *rdata)
42e9a92f 356{
42e9a92f
RL
357 mutex_lock(&rdata->rp_mutex);
358
9fb9d328 359 FC_RPORT_DBG(rdata, "Remove port\n");
42e9a92f 360
14194054 361 if (rdata->rp_state == RPORT_ST_DELETE) {
9fb9d328 362 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
b4c6f546
AJ
363 mutex_unlock(&rdata->rp_mutex);
364 goto out;
365 }
366
9fb9d328 367 fc_rport_enter_logo(rdata);
42e9a92f
RL
368
369 /*
14194054 370 * Change the state to Delete so that we discard
42e9a92f
RL
371 * the response.
372 */
9fb9d328 373 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
42e9a92f
RL
374 mutex_unlock(&rdata->rp_mutex);
375
b4c6f546 376out:
42e9a92f
RL
377 return 0;
378}
379
380/**
34f42a07 381 * fc_rport_enter_ready() - The rport is ready
9fb9d328 382 * @rdata: private remote port
42e9a92f
RL
383 *
384 * Locking Note: The rport lock is expected to be held before calling
385 * this routine.
386 */
9fb9d328 387static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
42e9a92f 388{
9fb9d328 389 fc_rport_state_enter(rdata, RPORT_ST_READY);
42e9a92f 390
9fb9d328 391 FC_RPORT_DBG(rdata, "Port is Ready\n");
42e9a92f 392
5f7ea3b7
JE
393 if (rdata->event == RPORT_EV_NONE)
394 queue_work(rport_event_queue, &rdata->event_work);
4c0f62b5 395 rdata->event = RPORT_EV_READY;
42e9a92f
RL
396}
397
398/**
34f42a07 399 * fc_rport_timeout() - Handler for the retry_work timer.
ab28f1fd 400 * @work: The work struct of the fc_rport_priv
42e9a92f
RL
401 *
402 * Locking Note: Called without the rport lock held. This
403 * function will hold the rport lock, call an _enter_*
404 * function and then unlock the rport.
405 */
406static void fc_rport_timeout(struct work_struct *work)
407{
ab28f1fd
JE
408 struct fc_rport_priv *rdata =
409 container_of(work, struct fc_rport_priv, retry_work.work);
42e9a92f
RL
410
411 mutex_lock(&rdata->rp_mutex);
412
413 switch (rdata->rp_state) {
414 case RPORT_ST_PLOGI:
9fb9d328 415 fc_rport_enter_plogi(rdata);
42e9a92f
RL
416 break;
417 case RPORT_ST_PRLI:
9fb9d328 418 fc_rport_enter_prli(rdata);
42e9a92f
RL
419 break;
420 case RPORT_ST_RTV:
9fb9d328 421 fc_rport_enter_rtv(rdata);
42e9a92f
RL
422 break;
423 case RPORT_ST_LOGO:
9fb9d328 424 fc_rport_enter_logo(rdata);
42e9a92f
RL
425 break;
426 case RPORT_ST_READY:
427 case RPORT_ST_INIT:
14194054 428 case RPORT_ST_DELETE:
42e9a92f
RL
429 break;
430 }
431
432 mutex_unlock(&rdata->rp_mutex);
42e9a92f
RL
433}
434
435/**
34f42a07 436 * fc_rport_error() - Error handler, called once retries have been exhausted
9fb9d328 437 * @rdata: private remote port
42e9a92f
RL
438 * @fp: The frame pointer
439 *
42e9a92f
RL
440 * Locking Note: The rport lock is expected to be held before
441 * calling this routine
442 */
9fb9d328 443static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
42e9a92f 444{
9fb9d328 445 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
cdbe6dfe
JE
446 IS_ERR(fp) ? -PTR_ERR(fp) : 0,
447 fc_rport_state(rdata), rdata->retries);
42e9a92f 448
6755db1c
CL
449 switch (rdata->rp_state) {
450 case RPORT_ST_PLOGI:
451 case RPORT_ST_PRLI:
452 case RPORT_ST_LOGO:
9fb9d328 453 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
6755db1c
CL
454 break;
455 case RPORT_ST_RTV:
9fb9d328 456 fc_rport_enter_ready(rdata);
6755db1c 457 break;
14194054 458 case RPORT_ST_DELETE:
6755db1c
CL
459 case RPORT_ST_READY:
460 case RPORT_ST_INIT:
461 break;
42e9a92f
RL
462 }
463}
464
6755db1c 465/**
34f42a07 466 * fc_rport_error_retry() - Error handler when retries are desired
9fb9d328 467 * @rdata: private remote port data
6755db1c
CL
468 * @fp: The frame pointer
469 *
470 * If the error was an exchange timeout retry immediately,
471 * otherwise wait for E_D_TOV.
472 *
473 * Locking Note: The rport lock is expected to be held before
474 * calling this routine
475 */
9fb9d328
JE
476static void fc_rport_error_retry(struct fc_rport_priv *rdata,
477 struct fc_frame *fp)
6755db1c 478{
6755db1c
CL
479 unsigned long delay = FC_DEF_E_D_TOV;
480
481 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
482 if (PTR_ERR(fp) == -FC_EX_CLOSED)
9fb9d328 483 return fc_rport_error(rdata, fp);
6755db1c 484
a3666955 485 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
9fb9d328
JE
486 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
487 PTR_ERR(fp), fc_rport_state(rdata));
6755db1c
CL
488 rdata->retries++;
489 /* no additional delay on exchange timeouts */
490 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
491 delay = 0;
6755db1c
CL
492 schedule_delayed_work(&rdata->retry_work, delay);
493 return;
494 }
495
9fb9d328 496 return fc_rport_error(rdata, fp);
6755db1c
CL
497}
498
42e9a92f 499/**
34f42a07 500 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
42e9a92f
RL
501 * @sp: current sequence in the PLOGI exchange
502 * @fp: response frame
9fb9d328 503 * @rdata_arg: private remote port data
42e9a92f
RL
504 *
505 * Locking Note: This function will be called without the rport lock
506 * held, but it will lock, call an _enter_* function or fc_rport_error
507 * and then unlock the rport.
508 */
509static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 510 void *rdata_arg)
42e9a92f 511{
9fb9d328 512 struct fc_rport_priv *rdata = rdata_arg;
42e9a92f 513 struct fc_lport *lport = rdata->local_port;
a29e7646 514 struct fc_els_flogi *plp = NULL;
42e9a92f
RL
515 unsigned int tov;
516 u16 csp_seq;
517 u16 cssp_seq;
518 u8 op;
519
520 mutex_lock(&rdata->rp_mutex);
521
9fb9d328 522 FC_RPORT_DBG(rdata, "Received a PLOGI response\n");
42e9a92f
RL
523
524 if (rdata->rp_state != RPORT_ST_PLOGI) {
9fb9d328
JE
525 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
526 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
527 if (IS_ERR(fp))
528 goto err;
42e9a92f
RL
529 goto out;
530 }
531
76f6804e 532 if (IS_ERR(fp)) {
9fb9d328 533 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
534 goto err;
535 }
536
42e9a92f
RL
537 op = fc_frame_payload_op(fp);
538 if (op == ELS_LS_ACC &&
539 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
f211fa51
JE
540 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
541 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
42e9a92f
RL
542
543 tov = ntohl(plp->fl_csp.sp_e_d_tov);
544 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
545 tov /= 1000;
546 if (tov > rdata->e_d_tov)
547 rdata->e_d_tov = tov;
548 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
549 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
550 if (cssp_seq < csp_seq)
551 csp_seq = cssp_seq;
552 rdata->max_seq = csp_seq;
f211fa51 553 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
42e9a92f
RL
554
555 /*
556 * If the rport is one of the well known addresses
557 * we skip PRLI and RTV and go straight to READY.
558 */
f211fa51 559 if (rdata->ids.port_id >= FC_FID_DOM_MGR)
9fb9d328 560 fc_rport_enter_ready(rdata);
42e9a92f 561 else
9fb9d328 562 fc_rport_enter_prli(rdata);
42e9a92f 563 } else
9fb9d328 564 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
565
566out:
567 fc_frame_free(fp);
568err:
569 mutex_unlock(&rdata->rp_mutex);
f211fa51 570 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
42e9a92f
RL
571}
572
573/**
34f42a07 574 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
9fb9d328 575 * @rdata: private remote port data
42e9a92f
RL
576 *
577 * Locking Note: The rport lock is expected to be held before calling
578 * this routine.
579 */
9fb9d328 580static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
42e9a92f 581{
42e9a92f
RL
582 struct fc_lport *lport = rdata->local_port;
583 struct fc_frame *fp;
584
9fb9d328
JE
585 FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
586 fc_rport_state(rdata));
42e9a92f 587
9fb9d328 588 fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
42e9a92f 589
f211fa51 590 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
42e9a92f
RL
591 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
592 if (!fp) {
9fb9d328 593 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
594 return;
595 }
596 rdata->e_d_tov = lport->e_d_tov;
597
f211fa51 598 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
9fb9d328
JE
599 fc_rport_plogi_resp, rdata, lport->e_d_tov))
600 fc_rport_error_retry(rdata, fp);
42e9a92f 601 else
f211fa51 602 kref_get(&rdata->kref);
42e9a92f
RL
603}
604
605/**
34f42a07 606 * fc_rport_prli_resp() - Process Login (PRLI) response handler
42e9a92f
RL
607 * @sp: current sequence in the PRLI exchange
608 * @fp: response frame
9fb9d328 609 * @rdata_arg: private remote port data
42e9a92f
RL
610 *
611 * Locking Note: This function will be called without the rport lock
612 * held, but it will lock, call an _enter_* function or fc_rport_error
613 * and then unlock the rport.
614 */
615static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 616 void *rdata_arg)
42e9a92f 617{
9fb9d328 618 struct fc_rport_priv *rdata = rdata_arg;
42e9a92f
RL
619 struct {
620 struct fc_els_prli prli;
621 struct fc_els_spp spp;
622 } *pp;
623 u32 roles = FC_RPORT_ROLE_UNKNOWN;
624 u32 fcp_parm = 0;
625 u8 op;
626
627 mutex_lock(&rdata->rp_mutex);
628
9fb9d328 629 FC_RPORT_DBG(rdata, "Received a PRLI response\n");
42e9a92f
RL
630
631 if (rdata->rp_state != RPORT_ST_PRLI) {
9fb9d328
JE
632 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
633 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
634 if (IS_ERR(fp))
635 goto err;
42e9a92f
RL
636 goto out;
637 }
638
76f6804e 639 if (IS_ERR(fp)) {
9fb9d328 640 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
641 goto err;
642 }
643
42e9a92f
RL
644 op = fc_frame_payload_op(fp);
645 if (op == ELS_LS_ACC) {
646 pp = fc_frame_payload_get(fp, sizeof(*pp));
647 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
648 fcp_parm = ntohl(pp->spp.spp_params);
649 if (fcp_parm & FCP_SPPF_RETRY)
650 rdata->flags |= FC_RP_FLAGS_RETRY;
651 }
652
f211fa51 653 rdata->supported_classes = FC_COS_CLASS3;
42e9a92f
RL
654 if (fcp_parm & FCP_SPPF_INIT_FCN)
655 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
656 if (fcp_parm & FCP_SPPF_TARG_FCN)
657 roles |= FC_RPORT_ROLE_FCP_TARGET;
658
f211fa51 659 rdata->ids.roles = roles;
9fb9d328 660 fc_rport_enter_rtv(rdata);
42e9a92f
RL
661
662 } else {
9fb9d328
JE
663 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
664 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
42e9a92f
RL
665 }
666
667out:
668 fc_frame_free(fp);
669err:
670 mutex_unlock(&rdata->rp_mutex);
f211fa51 671 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
42e9a92f
RL
672}
673
674/**
34f42a07 675 * fc_rport_logo_resp() - Logout (LOGO) response handler
42e9a92f
RL
676 * @sp: current sequence in the LOGO exchange
677 * @fp: response frame
9fb9d328 678 * @rdata_arg: private remote port data
42e9a92f
RL
679 *
680 * Locking Note: This function will be called without the rport lock
681 * held, but it will lock, call an _enter_* function or fc_rport_error
682 * and then unlock the rport.
683 */
684static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 685 void *rdata_arg)
42e9a92f 686{
9fb9d328 687 struct fc_rport_priv *rdata = rdata_arg;
42e9a92f
RL
688 u8 op;
689
690 mutex_lock(&rdata->rp_mutex);
691
9fb9d328 692 FC_RPORT_DBG(rdata, "Received a LOGO response\n");
42e9a92f 693
42e9a92f 694 if (rdata->rp_state != RPORT_ST_LOGO) {
9fb9d328
JE
695 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
696 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
697 if (IS_ERR(fp))
698 goto err;
42e9a92f
RL
699 goto out;
700 }
701
76f6804e 702 if (IS_ERR(fp)) {
9fb9d328 703 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
704 goto err;
705 }
706
42e9a92f
RL
707 op = fc_frame_payload_op(fp);
708 if (op == ELS_LS_ACC) {
9fb9d328 709 fc_rport_enter_rtv(rdata);
42e9a92f 710 } else {
9fb9d328
JE
711 FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
712 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
42e9a92f
RL
713 }
714
715out:
716 fc_frame_free(fp);
717err:
718 mutex_unlock(&rdata->rp_mutex);
f211fa51 719 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
42e9a92f
RL
720}
721
722/**
34f42a07 723 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
9fb9d328 724 * @rdata: private remote port data
42e9a92f
RL
725 *
726 * Locking Note: The rport lock is expected to be held before calling
727 * this routine.
728 */
9fb9d328 729static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
42e9a92f 730{
42e9a92f
RL
731 struct fc_lport *lport = rdata->local_port;
732 struct {
733 struct fc_els_prli prli;
734 struct fc_els_spp spp;
735 } *pp;
736 struct fc_frame *fp;
737
9fb9d328
JE
738 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
739 fc_rport_state(rdata));
42e9a92f 740
9fb9d328 741 fc_rport_state_enter(rdata, RPORT_ST_PRLI);
42e9a92f
RL
742
743 fp = fc_frame_alloc(lport, sizeof(*pp));
744 if (!fp) {
9fb9d328 745 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
746 return;
747 }
748
f211fa51 749 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
9fb9d328
JE
750 fc_rport_prli_resp, rdata, lport->e_d_tov))
751 fc_rport_error_retry(rdata, fp);
42e9a92f 752 else
f211fa51 753 kref_get(&rdata->kref);
42e9a92f
RL
754}
755
756/**
34f42a07 757 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
42e9a92f
RL
758 * @sp: current sequence in the RTV exchange
759 * @fp: response frame
9fb9d328 760 * @rdata_arg: private remote port data
42e9a92f
RL
761 *
762 * Many targets don't seem to support this.
763 *
764 * Locking Note: This function will be called without the rport lock
765 * held, but it will lock, call an _enter_* function or fc_rport_error
766 * and then unlock the rport.
767 */
768static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 769 void *rdata_arg)
42e9a92f 770{
9fb9d328 771 struct fc_rport_priv *rdata = rdata_arg;
42e9a92f
RL
772 u8 op;
773
774 mutex_lock(&rdata->rp_mutex);
775
9fb9d328 776 FC_RPORT_DBG(rdata, "Received a RTV response\n");
42e9a92f
RL
777
778 if (rdata->rp_state != RPORT_ST_RTV) {
9fb9d328
JE
779 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
780 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
781 if (IS_ERR(fp))
782 goto err;
42e9a92f
RL
783 goto out;
784 }
785
76f6804e 786 if (IS_ERR(fp)) {
9fb9d328 787 fc_rport_error(rdata, fp);
76f6804e
AJ
788 goto err;
789 }
790
42e9a92f
RL
791 op = fc_frame_payload_op(fp);
792 if (op == ELS_LS_ACC) {
793 struct fc_els_rtv_acc *rtv;
794 u32 toq;
795 u32 tov;
796
797 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
798 if (rtv) {
799 toq = ntohl(rtv->rtv_toq);
800 tov = ntohl(rtv->rtv_r_a_tov);
801 if (tov == 0)
802 tov = 1;
803 rdata->r_a_tov = tov;
804 tov = ntohl(rtv->rtv_e_d_tov);
805 if (toq & FC_ELS_RTV_EDRES)
806 tov /= 1000000;
807 if (tov == 0)
808 tov = 1;
809 rdata->e_d_tov = tov;
810 }
811 }
812
9fb9d328 813 fc_rport_enter_ready(rdata);
42e9a92f
RL
814
815out:
816 fc_frame_free(fp);
817err:
818 mutex_unlock(&rdata->rp_mutex);
f211fa51 819 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
42e9a92f
RL
820}
821
822/**
34f42a07 823 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
9fb9d328 824 * @rdata: private remote port data
42e9a92f
RL
825 *
826 * Locking Note: The rport lock is expected to be held before calling
827 * this routine.
828 */
9fb9d328 829static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
42e9a92f
RL
830{
831 struct fc_frame *fp;
42e9a92f
RL
832 struct fc_lport *lport = rdata->local_port;
833
9fb9d328
JE
834 FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
835 fc_rport_state(rdata));
42e9a92f 836
9fb9d328 837 fc_rport_state_enter(rdata, RPORT_ST_RTV);
42e9a92f
RL
838
839 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
840 if (!fp) {
9fb9d328 841 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
842 return;
843 }
844
f211fa51 845 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
9fb9d328
JE
846 fc_rport_rtv_resp, rdata, lport->e_d_tov))
847 fc_rport_error_retry(rdata, fp);
42e9a92f 848 else
f211fa51 849 kref_get(&rdata->kref);
42e9a92f
RL
850}
851
852/**
34f42a07 853 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
9fb9d328 854 * @rdata: private remote port data
42e9a92f
RL
855 *
856 * Locking Note: The rport lock is expected to be held before calling
857 * this routine.
858 */
9fb9d328 859static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
42e9a92f 860{
42e9a92f
RL
861 struct fc_lport *lport = rdata->local_port;
862 struct fc_frame *fp;
863
9fb9d328
JE
864 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
865 fc_rport_state(rdata));
42e9a92f 866
9fb9d328 867 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
42e9a92f
RL
868
869 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
870 if (!fp) {
9fb9d328 871 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
872 return;
873 }
874
f211fa51 875 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
9fb9d328
JE
876 fc_rport_logo_resp, rdata, lport->e_d_tov))
877 fc_rport_error_retry(rdata, fp);
42e9a92f 878 else
f211fa51 879 kref_get(&rdata->kref);
42e9a92f
RL
880}
881
882
883/**
34f42a07 884 * fc_rport_recv_req() - Receive a request from a rport
42e9a92f
RL
885 * @sp: current sequence in the PLOGI exchange
886 * @fp: response frame
9fb9d328 887 * @rdata_arg: private remote port data
42e9a92f
RL
888 *
889 * Locking Note: Called without the rport lock held. This
890 * function will hold the rport lock, call an _enter_*
891 * function and then unlock the rport.
892 */
893void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 894 struct fc_rport_priv *rdata)
42e9a92f 895{
42e9a92f
RL
896 struct fc_lport *lport = rdata->local_port;
897
898 struct fc_frame_header *fh;
899 struct fc_seq_els_data els_data;
900 u8 op;
901
902 mutex_lock(&rdata->rp_mutex);
903
904 els_data.fp = NULL;
905 els_data.explan = ELS_EXPL_NONE;
906 els_data.reason = ELS_RJT_NONE;
907
908 fh = fc_frame_header_get(fp);
909
910 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
911 op = fc_frame_payload_op(fp);
912 switch (op) {
913 case ELS_PLOGI:
9fb9d328 914 fc_rport_recv_plogi_req(rdata, sp, fp);
42e9a92f
RL
915 break;
916 case ELS_PRLI:
9fb9d328 917 fc_rport_recv_prli_req(rdata, sp, fp);
42e9a92f
RL
918 break;
919 case ELS_PRLO:
9fb9d328 920 fc_rport_recv_prlo_req(rdata, sp, fp);
42e9a92f
RL
921 break;
922 case ELS_LOGO:
9fb9d328 923 fc_rport_recv_logo_req(rdata, sp, fp);
42e9a92f
RL
924 break;
925 case ELS_RRQ:
926 els_data.fp = fp;
927 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
928 break;
929 case ELS_REC:
930 els_data.fp = fp;
931 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
932 break;
933 default:
934 els_data.reason = ELS_RJT_UNSUP;
935 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
936 break;
937 }
938 }
939
940 mutex_unlock(&rdata->rp_mutex);
941}
942
943/**
34f42a07 944 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
9fb9d328 945 * @rdata: private remote port data
42e9a92f
RL
946 * @sp: current sequence in the PLOGI exchange
947 * @fp: PLOGI request frame
948 *
949 * Locking Note: The rport lock is exected to be held before calling
950 * this function.
951 */
9fb9d328 952static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata,
42e9a92f
RL
953 struct fc_seq *sp, struct fc_frame *rx_fp)
954{
42e9a92f
RL
955 struct fc_lport *lport = rdata->local_port;
956 struct fc_frame *fp = rx_fp;
957 struct fc_exch *ep;
958 struct fc_frame_header *fh;
959 struct fc_els_flogi *pl;
960 struct fc_seq_els_data rjt_data;
961 u32 sid;
962 u64 wwpn;
963 u64 wwnn;
964 enum fc_els_rjt_reason reject = 0;
965 u32 f_ctl;
966 rjt_data.fp = NULL;
967
968 fh = fc_frame_header_get(fp);
969
9fb9d328
JE
970 FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n",
971 fc_rport_state(rdata));
42e9a92f
RL
972
973 sid = ntoh24(fh->fh_s_id);
974 pl = fc_frame_payload_get(fp, sizeof(*pl));
975 if (!pl) {
9fb9d328 976 FC_RPORT_DBG(rdata, "Received PLOGI too short\n");
42e9a92f
RL
977 WARN_ON(1);
978 /* XXX TBD: send reject? */
979 fc_frame_free(fp);
980 return;
981 }
982 wwpn = get_unaligned_be64(&pl->fl_wwpn);
983 wwnn = get_unaligned_be64(&pl->fl_wwnn);
984
985 /*
986 * If the session was just created, possibly due to the incoming PLOGI,
987 * set the state appropriately and accept the PLOGI.
988 *
989 * If we had also sent a PLOGI, and if the received PLOGI is from a
990 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
991 * "command already in progress".
992 *
993 * XXX TBD: If the session was ready before, the PLOGI should result in
994 * all outstanding exchanges being reset.
995 */
996 switch (rdata->rp_state) {
997 case RPORT_ST_INIT:
9fb9d328 998 FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT "
7414705e 999 "- reject\n", (unsigned long long)wwpn);
42e9a92f
RL
1000 reject = ELS_RJT_UNSUP;
1001 break;
1002 case RPORT_ST_PLOGI:
9fb9d328 1003 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n",
7414705e 1004 rdata->rp_state);
42e9a92f
RL
1005 if (wwpn < lport->wwpn)
1006 reject = ELS_RJT_INPROG;
1007 break;
1008 case RPORT_ST_PRLI:
1009 case RPORT_ST_READY:
9fb9d328 1010 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
7414705e 1011 "- ignored for now\n", rdata->rp_state);
42e9a92f
RL
1012 /* XXX TBD - should reset */
1013 break;
14194054 1014 case RPORT_ST_DELETE:
42e9a92f 1015 default:
9fb9d328 1016 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected "
7414705e 1017 "state %d\n", rdata->rp_state);
b4c6f546
AJ
1018 fc_frame_free(fp);
1019 return;
42e9a92f
RL
1020 break;
1021 }
1022
1023 if (reject) {
1024 rjt_data.reason = reject;
1025 rjt_data.explan = ELS_EXPL_NONE;
1026 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1027 fc_frame_free(fp);
1028 } else {
1029 fp = fc_frame_alloc(lport, sizeof(*pl));
1030 if (fp == NULL) {
1031 fp = rx_fp;
1032 rjt_data.reason = ELS_RJT_UNAB;
1033 rjt_data.explan = ELS_EXPL_NONE;
1034 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1035 fc_frame_free(fp);
1036 } else {
1037 sp = lport->tt.seq_start_next(sp);
1038 WARN_ON(!sp);
f211fa51
JE
1039 rdata->ids.port_name = wwpn;
1040 rdata->ids.node_name = wwnn;
42e9a92f
RL
1041
1042 /*
1043 * Get session payload size from incoming PLOGI.
1044 */
f211fa51 1045 rdata->maxframe_size =
42e9a92f
RL
1046 fc_plogi_get_maxframe(pl, lport->mfs);
1047 fc_frame_free(rx_fp);
1048 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1049
1050 /*
1051 * Send LS_ACC. If this fails,
1052 * the originator should retry.
1053 */
1054 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1055 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1056 ep = fc_seq_exch(sp);
1057 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1058 FC_TYPE_ELS, f_ctl, 0);
1059 lport->tt.seq_send(lport, sp, fp);
1060 if (rdata->rp_state == RPORT_ST_PLOGI)
9fb9d328 1061 fc_rport_enter_prli(rdata);
42e9a92f
RL
1062 }
1063 }
1064}
1065
1066/**
34f42a07 1067 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
9fb9d328 1068 * @rdata: private remote port data
42e9a92f
RL
1069 * @sp: current sequence in the PRLI exchange
1070 * @fp: PRLI request frame
1071 *
1072 * Locking Note: The rport lock is exected to be held before calling
1073 * this function.
1074 */
9fb9d328 1075static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
42e9a92f
RL
1076 struct fc_seq *sp, struct fc_frame *rx_fp)
1077{
42e9a92f
RL
1078 struct fc_lport *lport = rdata->local_port;
1079 struct fc_exch *ep;
1080 struct fc_frame *fp;
1081 struct fc_frame_header *fh;
1082 struct {
1083 struct fc_els_prli prli;
1084 struct fc_els_spp spp;
1085 } *pp;
1086 struct fc_els_spp *rspp; /* request service param page */
1087 struct fc_els_spp *spp; /* response spp */
1088 unsigned int len;
1089 unsigned int plen;
1090 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1091 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1092 enum fc_els_spp_resp resp;
1093 struct fc_seq_els_data rjt_data;
1094 u32 f_ctl;
1095 u32 fcp_parm;
1096 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1097 rjt_data.fp = NULL;
1098
1099 fh = fc_frame_header_get(rx_fp);
1100
9fb9d328
JE
1101 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1102 fc_rport_state(rdata));
42e9a92f
RL
1103
1104 switch (rdata->rp_state) {
1105 case RPORT_ST_PRLI:
1106 case RPORT_ST_READY:
1107 reason = ELS_RJT_NONE;
1108 break;
1109 default:
b4c6f546
AJ
1110 fc_frame_free(rx_fp);
1111 return;
42e9a92f
RL
1112 break;
1113 }
1114 len = fr_len(rx_fp) - sizeof(*fh);
1115 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1116 if (pp == NULL) {
1117 reason = ELS_RJT_PROT;
1118 explan = ELS_EXPL_INV_LEN;
1119 } else {
1120 plen = ntohs(pp->prli.prli_len);
1121 if ((plen % 4) != 0 || plen > len) {
1122 reason = ELS_RJT_PROT;
1123 explan = ELS_EXPL_INV_LEN;
1124 } else if (plen < len) {
1125 len = plen;
1126 }
1127 plen = pp->prli.prli_spp_len;
1128 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1129 plen > len || len < sizeof(*pp)) {
1130 reason = ELS_RJT_PROT;
1131 explan = ELS_EXPL_INV_LEN;
1132 }
1133 rspp = &pp->spp;
1134 }
1135 if (reason != ELS_RJT_NONE ||
1136 (fp = fc_frame_alloc(lport, len)) == NULL) {
1137 rjt_data.reason = reason;
1138 rjt_data.explan = explan;
1139 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1140 } else {
1141 sp = lport->tt.seq_start_next(sp);
1142 WARN_ON(!sp);
1143 pp = fc_frame_payload_get(fp, len);
1144 WARN_ON(!pp);
1145 memset(pp, 0, len);
1146 pp->prli.prli_cmd = ELS_LS_ACC;
1147 pp->prli.prli_spp_len = plen;
1148 pp->prli.prli_len = htons(len);
1149 len -= sizeof(struct fc_els_prli);
1150
1151 /*
1152 * Go through all the service parameter pages and build
1153 * response. If plen indicates longer SPP than standard,
1154 * use that. The entire response has been pre-cleared above.
1155 */
1156 spp = &pp->spp;
1157 while (len >= plen) {
1158 spp->spp_type = rspp->spp_type;
1159 spp->spp_type_ext = rspp->spp_type_ext;
1160 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1161 resp = FC_SPP_RESP_ACK;
1162 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1163 resp = FC_SPP_RESP_NO_PA;
1164 switch (rspp->spp_type) {
1165 case 0: /* common to all FC-4 types */
1166 break;
1167 case FC_TYPE_FCP:
1168 fcp_parm = ntohl(rspp->spp_params);
1169 if (fcp_parm * FCP_SPPF_RETRY)
1170 rdata->flags |= FC_RP_FLAGS_RETRY;
f211fa51 1171 rdata->supported_classes = FC_COS_CLASS3;
42e9a92f
RL
1172 if (fcp_parm & FCP_SPPF_INIT_FCN)
1173 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1174 if (fcp_parm & FCP_SPPF_TARG_FCN)
1175 roles |= FC_RPORT_ROLE_FCP_TARGET;
f211fa51 1176 rdata->ids.roles = roles;
42e9a92f
RL
1177
1178 spp->spp_params =
1179 htonl(lport->service_params);
1180 break;
1181 default:
1182 resp = FC_SPP_RESP_INVL;
1183 break;
1184 }
1185 spp->spp_flags |= resp;
1186 len -= plen;
1187 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1188 spp = (struct fc_els_spp *)((char *)spp + plen);
1189 }
1190
1191 /*
1192 * Send LS_ACC. If this fails, the originator should retry.
1193 */
1194 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1195 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1196 ep = fc_seq_exch(sp);
1197 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1198 FC_TYPE_ELS, f_ctl, 0);
1199 lport->tt.seq_send(lport, sp, fp);
1200
1201 /*
1202 * Get lock and re-check state.
1203 */
1204 switch (rdata->rp_state) {
1205 case RPORT_ST_PRLI:
9fb9d328 1206 fc_rport_enter_ready(rdata);
42e9a92f
RL
1207 break;
1208 case RPORT_ST_READY:
1209 break;
1210 default:
1211 break;
1212 }
1213 }
1214 fc_frame_free(rx_fp);
1215}
1216
1217/**
34f42a07 1218 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
9fb9d328 1219 * @rdata: private remote port data
42e9a92f
RL
1220 * @sp: current sequence in the PRLO exchange
1221 * @fp: PRLO request frame
1222 *
1223 * Locking Note: The rport lock is exected to be held before calling
1224 * this function.
1225 */
9fb9d328
JE
1226static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1227 struct fc_seq *sp,
42e9a92f
RL
1228 struct fc_frame *fp)
1229{
42e9a92f
RL
1230 struct fc_lport *lport = rdata->local_port;
1231
1232 struct fc_frame_header *fh;
1233 struct fc_seq_els_data rjt_data;
1234
1235 fh = fc_frame_header_get(fp);
1236
9fb9d328
JE
1237 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1238 fc_rport_state(rdata));
42e9a92f 1239
14194054 1240 if (rdata->rp_state == RPORT_ST_DELETE) {
b4c6f546
AJ
1241 fc_frame_free(fp);
1242 return;
1243 }
1244
42e9a92f
RL
1245 rjt_data.fp = NULL;
1246 rjt_data.reason = ELS_RJT_UNAB;
1247 rjt_data.explan = ELS_EXPL_NONE;
1248 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1249 fc_frame_free(fp);
1250}
1251
1252/**
34f42a07 1253 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
9fb9d328 1254 * @rdata: private remote port data
42e9a92f
RL
1255 * @sp: current sequence in the LOGO exchange
1256 * @fp: LOGO request frame
1257 *
1258 * Locking Note: The rport lock is exected to be held before calling
1259 * this function.
1260 */
9fb9d328
JE
1261static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata,
1262 struct fc_seq *sp,
42e9a92f
RL
1263 struct fc_frame *fp)
1264{
1265 struct fc_frame_header *fh;
42e9a92f
RL
1266 struct fc_lport *lport = rdata->local_port;
1267
1268 fh = fc_frame_header_get(fp);
1269
9fb9d328
JE
1270 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1271 fc_rport_state(rdata));
42e9a92f 1272
14194054 1273 if (rdata->rp_state == RPORT_ST_DELETE) {
b4c6f546
AJ
1274 fc_frame_free(fp);
1275 return;
1276 }
1277
00fea930 1278 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
42e9a92f
RL
1279
1280 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1281 fc_frame_free(fp);
1282}
1283
1284static void fc_rport_flush_queue(void)
1285{
1286 flush_workqueue(rport_event_queue);
1287}
1288
42e9a92f
RL
1289int fc_rport_init(struct fc_lport *lport)
1290{
5101ff99 1291 if (!lport->tt.rport_create)
9e9d0452 1292 lport->tt.rport_create = fc_rport_create;
5101ff99 1293
42e9a92f
RL
1294 if (!lport->tt.rport_login)
1295 lport->tt.rport_login = fc_rport_login;
1296
1297 if (!lport->tt.rport_logoff)
1298 lport->tt.rport_logoff = fc_rport_logoff;
1299
1300 if (!lport->tt.rport_recv_req)
1301 lport->tt.rport_recv_req = fc_rport_recv_req;
1302
1303 if (!lport->tt.rport_flush_queue)
1304 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1305
f211fa51
JE
1306 if (!lport->tt.rport_destroy)
1307 lport->tt.rport_destroy = fc_rport_destroy;
1308
42e9a92f
RL
1309 return 0;
1310}
1311EXPORT_SYMBOL(fc_rport_init);
1312
b0d428ad 1313int fc_setup_rport(void)
42e9a92f
RL
1314{
1315 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1316 if (!rport_event_queue)
1317 return -ENOMEM;
1318 return 0;
1319}
1320EXPORT_SYMBOL(fc_setup_rport);
1321
b0d428ad 1322void fc_destroy_rport(void)
42e9a92f
RL
1323{
1324 destroy_workqueue(rport_event_queue);
1325}
1326EXPORT_SYMBOL(fc_destroy_rport);
1327
1328void fc_rport_terminate_io(struct fc_rport *rport)
1329{
ab28f1fd
JE
1330 struct fc_rport_libfc_priv *rp = rport->dd_data;
1331 struct fc_lport *lport = rp->local_port;
42e9a92f 1332
1f6ff364
AJ
1333 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1334 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
42e9a92f
RL
1335}
1336EXPORT_SYMBOL(fc_rport_terminate_io);