]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/libfc/fc_rport.c
[SCSI] libfc: Remove fc_fcp_complete
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / libfc / fc_rport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * RPORT GENERAL INFO
22 *
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
27 *
28 * fc_rport's represent N_Port's within the fabric.
29 */
30
31/*
32 * RPORT LOCKING
33 *
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
38 *
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
45 */
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/interrupt.h>
50#include <linux/rcupdate.h>
51#include <linux/timer.h>
52#include <linux/workqueue.h>
53#include <asm/unaligned.h>
54
55#include <scsi/libfc.h>
56#include <scsi/fc_encode.h>
57
42e9a92f
RL
58struct workqueue_struct *rport_event_queue;
59
9fb9d328
JE
60static void fc_rport_enter_plogi(struct fc_rport_priv *);
61static void fc_rport_enter_prli(struct fc_rport_priv *);
62static void fc_rport_enter_rtv(struct fc_rport_priv *);
63static void fc_rport_enter_ready(struct fc_rport_priv *);
64static void fc_rport_enter_logo(struct fc_rport_priv *);
370c3bd0 65static void fc_rport_enter_adisc(struct fc_rport_priv *);
42e9a92f 66
3ac6f98f 67static void fc_rport_recv_plogi_req(struct fc_lport *,
42e9a92f 68 struct fc_seq *, struct fc_frame *);
9fb9d328 69static void fc_rport_recv_prli_req(struct fc_rport_priv *,
42e9a92f 70 struct fc_seq *, struct fc_frame *);
9fb9d328 71static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
42e9a92f 72 struct fc_seq *, struct fc_frame *);
83fe6a93 73static void fc_rport_recv_logo_req(struct fc_lport *,
42e9a92f
RL
74 struct fc_seq *, struct fc_frame *);
75static void fc_rport_timeout(struct work_struct *);
9fb9d328
JE
76static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
77static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
42e9a92f
RL
78static void fc_rport_work(struct work_struct *);
79
80static const char *fc_rport_state_names[] = {
42e9a92f
RL
81 [RPORT_ST_INIT] = "Init",
82 [RPORT_ST_PLOGI] = "PLOGI",
83 [RPORT_ST_PRLI] = "PRLI",
84 [RPORT_ST_RTV] = "RTV",
85 [RPORT_ST_READY] = "Ready",
86 [RPORT_ST_LOGO] = "LOGO",
370c3bd0 87 [RPORT_ST_ADISC] = "ADISC",
14194054 88 [RPORT_ST_DELETE] = "Delete",
b4a9c7ed 89 [RPORT_ST_RESTART] = "Restart",
42e9a92f
RL
90};
91
8025b5db
JE
92/**
93 * fc_rport_lookup() - lookup a remote port by port_id
94 * @lport: Fibre Channel host port instance
95 * @port_id: remote port port_id to match
96 */
97static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
98 u32 port_id)
99{
100 struct fc_rport_priv *rdata;
101
102 list_for_each_entry(rdata, &lport->disc.rports, peers)
b4a9c7ed 103 if (rdata->ids.port_id == port_id)
8025b5db
JE
104 return rdata;
105 return NULL;
106}
107
9e9d0452 108/**
9737e6a7
RL
109 * fc_rport_create() - Create a new remote port
110 * @lport: The local port that the new remote port is for
111 * @port_id: The port ID for the new remote port
9e9d0452 112 *
48f00902 113 * Locking note: must be called with the disc_mutex held.
9e9d0452
JE
114 */
115static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
9737e6a7 116 u32 port_id)
42e9a92f 117{
ab28f1fd 118 struct fc_rport_priv *rdata;
42e9a92f 119
9737e6a7 120 rdata = lport->tt.rport_lookup(lport, port_id);
19f97e3c
JE
121 if (rdata)
122 return rdata;
123
9e9d0452
JE
124 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
125 if (!rdata)
42e9a92f
RL
126 return NULL;
127
9737e6a7
RL
128 rdata->ids.node_name = -1;
129 rdata->ids.port_name = -1;
130 rdata->ids.port_id = port_id;
131 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
132
f211fa51 133 kref_init(&rdata->kref);
42e9a92f 134 mutex_init(&rdata->rp_mutex);
795d86f5 135 rdata->local_port = lport;
42e9a92f
RL
136 rdata->rp_state = RPORT_ST_INIT;
137 rdata->event = RPORT_EV_NONE;
138 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
795d86f5
JE
139 rdata->e_d_tov = lport->e_d_tov;
140 rdata->r_a_tov = lport->r_a_tov;
f211fa51 141 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
42e9a92f
RL
142 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
143 INIT_WORK(&rdata->event_work, fc_rport_work);
9737e6a7 144 if (port_id != FC_FID_DIR_SERV)
48f00902 145 list_add(&rdata->peers, &lport->disc.rports);
9fb9d328 146 return rdata;
42e9a92f
RL
147}
148
f211fa51
JE
149/**
150 * fc_rport_destroy() - free a remote port after last reference is released.
151 * @kref: pointer to kref inside struct fc_rport_priv
152 */
153static void fc_rport_destroy(struct kref *kref)
154{
155 struct fc_rport_priv *rdata;
f211fa51
JE
156
157 rdata = container_of(kref, struct fc_rport_priv, kref);
9e9d0452 158 kfree(rdata);
f211fa51
JE
159}
160
42e9a92f 161/**
34f42a07 162 * fc_rport_state() - return a string for the state the rport is in
9fb9d328 163 * @rdata: remote port private data
42e9a92f 164 */
9fb9d328 165static const char *fc_rport_state(struct fc_rport_priv *rdata)
42e9a92f
RL
166{
167 const char *cp;
42e9a92f
RL
168
169 cp = fc_rport_state_names[rdata->rp_state];
170 if (!cp)
171 cp = "Unknown";
172 return cp;
173}
174
175/**
34f42a07 176 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
42e9a92f
RL
177 * @rport: Pointer to Fibre Channel remote port structure
178 * @timeout: timeout in seconds
179 */
180void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
181{
182 if (timeout)
183 rport->dev_loss_tmo = timeout + 5;
184 else
185 rport->dev_loss_tmo = 30;
186}
187EXPORT_SYMBOL(fc_set_rport_loss_tmo);
188
189/**
34f42a07 190 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
42e9a92f
RL
191 * @flp: FLOGI payload structure
192 * @maxval: upper limit, may be less than what is in the service parameters
193 */
b2ab99c9
RL
194static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
195 unsigned int maxval)
42e9a92f
RL
196{
197 unsigned int mfs;
198
199 /*
200 * Get max payload from the common service parameters and the
201 * class 3 receive data field size.
202 */
203 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
204 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
205 maxval = mfs;
206 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
207 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
208 maxval = mfs;
209 return maxval;
210}
211
212/**
34f42a07 213 * fc_rport_state_enter() - Change the rport's state
9fb9d328 214 * @rdata: The rport whose state should change
42e9a92f
RL
215 * @new: The new state of the rport
216 *
217 * Locking Note: Called with the rport lock held
218 */
9fb9d328 219static void fc_rport_state_enter(struct fc_rport_priv *rdata,
42e9a92f
RL
220 enum fc_rport_state new)
221{
42e9a92f
RL
222 if (rdata->rp_state != new)
223 rdata->retries = 0;
224 rdata->rp_state = new;
225}
226
227static void fc_rport_work(struct work_struct *work)
228{
571f824c 229 u32 port_id;
ab28f1fd
JE
230 struct fc_rport_priv *rdata =
231 container_of(work, struct fc_rport_priv, event_work);
9e9d0452 232 struct fc_rport_libfc_priv *rp;
42e9a92f 233 enum fc_rport_event event;
42e9a92f
RL
234 struct fc_lport *lport = rdata->local_port;
235 struct fc_rport_operations *rport_ops;
629f4427 236 struct fc_rport_identifiers ids;
f211fa51 237 struct fc_rport *rport;
b4a9c7ed 238 int restart = 0;
42e9a92f
RL
239
240 mutex_lock(&rdata->rp_mutex);
241 event = rdata->event;
242 rport_ops = rdata->ops;
f211fa51 243 rport = rdata->rport;
42e9a92f 244
9e9d0452
JE
245 FC_RPORT_DBG(rdata, "work event %u\n", event);
246
629f4427 247 switch (event) {
4c0f62b5 248 case RPORT_EV_READY:
f211fa51 249 ids = rdata->ids;
5f7ea3b7 250 rdata->event = RPORT_EV_NONE;
9e9d0452 251 kref_get(&rdata->kref);
42e9a92f
RL
252 mutex_unlock(&rdata->rp_mutex);
253
9e9d0452
JE
254 if (!rport)
255 rport = fc_remote_port_add(lport->host, 0, &ids);
256 if (!rport) {
257 FC_RPORT_DBG(rdata, "Failed to add the rport\n");
258 lport->tt.rport_logoff(rdata);
259 kref_put(&rdata->kref, lport->tt.rport_destroy);
260 return;
42e9a92f 261 }
9e9d0452
JE
262 mutex_lock(&rdata->rp_mutex);
263 if (rdata->rport)
264 FC_RPORT_DBG(rdata, "rport already allocated\n");
265 rdata->rport = rport;
266 rport->maxframe_size = rdata->maxframe_size;
267 rport->supported_classes = rdata->supported_classes;
268
269 rp = rport->dd_data;
270 rp->local_port = lport;
271 rp->rp_state = rdata->rp_state;
272 rp->flags = rdata->flags;
273 rp->e_d_tov = rdata->e_d_tov;
274 rp->r_a_tov = rdata->r_a_tov;
275 mutex_unlock(&rdata->rp_mutex);
276
8345592b 277 if (rport_ops && rport_ops->event_callback) {
9e9d0452 278 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
9fb9d328 279 rport_ops->event_callback(lport, rdata, event);
9e9d0452
JE
280 }
281 kref_put(&rdata->kref, lport->tt.rport_destroy);
629f4427
JE
282 break;
283
284 case RPORT_EV_FAILED:
285 case RPORT_EV_LOGO:
286 case RPORT_EV_STOP:
9e9d0452 287 port_id = rdata->ids.port_id;
42e9a92f 288 mutex_unlock(&rdata->rp_mutex);
9e9d0452 289
48f00902 290 if (port_id != FC_FID_DIR_SERV) {
b4a9c7ed
JE
291 /*
292 * We must drop rp_mutex before taking disc_mutex.
293 * Re-evaluate state to allow for restart.
294 * A transition to RESTART state must only happen
295 * while disc_mutex is held and rdata is on the list.
296 */
48f00902 297 mutex_lock(&lport->disc.disc_mutex);
b4a9c7ed
JE
298 mutex_lock(&rdata->rp_mutex);
299 if (rdata->rp_state == RPORT_ST_RESTART)
300 restart = 1;
301 else
302 list_del(&rdata->peers);
303 mutex_unlock(&rdata->rp_mutex);
48f00902
JE
304 mutex_unlock(&lport->disc.disc_mutex);
305 }
306
8345592b 307 if (rport_ops && rport_ops->event_callback) {
9e9d0452 308 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
9fb9d328 309 rport_ops->event_callback(lport, rdata, event);
9e9d0452 310 }
201e5795 311 cancel_delayed_work_sync(&rdata->retry_work);
9e9d0452
JE
312
313 /*
314 * Reset any outstanding exchanges before freeing rport.
315 */
316 lport->tt.exch_mgr_reset(lport, 0, port_id);
317 lport->tt.exch_mgr_reset(lport, port_id, 0);
318
319 if (rport) {
320 rp = rport->dd_data;
321 rp->rp_state = RPORT_ST_DELETE;
322 mutex_lock(&rdata->rp_mutex);
323 rdata->rport = NULL;
324 mutex_unlock(&rdata->rp_mutex);
42e9a92f 325 fc_remote_port_delete(rport);
571f824c 326 }
b4a9c7ed
JE
327 if (restart) {
328 mutex_lock(&rdata->rp_mutex);
329 FC_RPORT_DBG(rdata, "work restart\n");
330 fc_rport_enter_plogi(rdata);
331 mutex_unlock(&rdata->rp_mutex);
332 } else
333 kref_put(&rdata->kref, lport->tt.rport_destroy);
629f4427
JE
334 break;
335
336 default:
42e9a92f 337 mutex_unlock(&rdata->rp_mutex);
629f4427
JE
338 break;
339 }
42e9a92f
RL
340}
341
342/**
34f42a07 343 * fc_rport_login() - Start the remote port login state machine
9fb9d328 344 * @rdata: private remote port
42e9a92f
RL
345 *
346 * Locking Note: Called without the rport lock held. This
347 * function will hold the rport lock, call an _enter_*
348 * function and then unlock the rport.
370c3bd0
JE
349 *
350 * This indicates the intent to be logged into the remote port.
351 * If it appears we are already logged in, ADISC is used to verify
352 * the setup.
42e9a92f 353 */
9fb9d328 354int fc_rport_login(struct fc_rport_priv *rdata)
42e9a92f 355{
42e9a92f
RL
356 mutex_lock(&rdata->rp_mutex);
357
370c3bd0
JE
358 switch (rdata->rp_state) {
359 case RPORT_ST_READY:
360 FC_RPORT_DBG(rdata, "ADISC port\n");
361 fc_rport_enter_adisc(rdata);
362 break;
b4a9c7ed
JE
363 case RPORT_ST_RESTART:
364 break;
365 case RPORT_ST_DELETE:
366 FC_RPORT_DBG(rdata, "Restart deleted port\n");
367 fc_rport_state_enter(rdata, RPORT_ST_RESTART);
368 break;
370c3bd0
JE
369 default:
370 FC_RPORT_DBG(rdata, "Login to port\n");
371 fc_rport_enter_plogi(rdata);
372 break;
373 }
42e9a92f
RL
374 mutex_unlock(&rdata->rp_mutex);
375
376 return 0;
377}
378
5f7ea3b7
JE
379/**
380 * fc_rport_enter_delete() - schedule a remote port to be deleted.
9fb9d328 381 * @rdata: private remote port
5f7ea3b7
JE
382 * @event: event to report as the reason for deletion
383 *
384 * Locking Note: Called with the rport lock held.
385 *
386 * Allow state change into DELETE only once.
387 *
388 * Call queue_work only if there's no event already pending.
389 * Set the new event so that the old pending event will not occur.
390 * Since we have the mutex, even if fc_rport_work() is already started,
391 * it'll see the new event.
392 */
9fb9d328 393static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
5f7ea3b7
JE
394 enum fc_rport_event event)
395{
5f7ea3b7
JE
396 if (rdata->rp_state == RPORT_ST_DELETE)
397 return;
398
9fb9d328 399 FC_RPORT_DBG(rdata, "Delete port\n");
5f7ea3b7 400
9fb9d328 401 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
5f7ea3b7
JE
402
403 if (rdata->event == RPORT_EV_NONE)
404 queue_work(rport_event_queue, &rdata->event_work);
405 rdata->event = event;
406}
407
42e9a92f 408/**
34f42a07 409 * fc_rport_logoff() - Logoff and remove an rport
9fb9d328 410 * @rdata: private remote port
42e9a92f
RL
411 *
412 * Locking Note: Called without the rport lock held. This
413 * function will hold the rport lock, call an _enter_*
414 * function and then unlock the rport.
415 */
9fb9d328 416int fc_rport_logoff(struct fc_rport_priv *rdata)
42e9a92f 417{
42e9a92f
RL
418 mutex_lock(&rdata->rp_mutex);
419
9fb9d328 420 FC_RPORT_DBG(rdata, "Remove port\n");
42e9a92f 421
14194054 422 if (rdata->rp_state == RPORT_ST_DELETE) {
9fb9d328 423 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
b4c6f546
AJ
424 goto out;
425 }
426
b4a9c7ed
JE
427 if (rdata->rp_state == RPORT_ST_RESTART)
428 FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n");
429 else
430 fc_rport_enter_logo(rdata);
42e9a92f
RL
431
432 /*
14194054 433 * Change the state to Delete so that we discard
42e9a92f
RL
434 * the response.
435 */
9fb9d328 436 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
b4c6f546 437out:
b4a9c7ed 438 mutex_unlock(&rdata->rp_mutex);
42e9a92f
RL
439 return 0;
440}
441
442/**
34f42a07 443 * fc_rport_enter_ready() - The rport is ready
9fb9d328 444 * @rdata: private remote port
42e9a92f
RL
445 *
446 * Locking Note: The rport lock is expected to be held before calling
447 * this routine.
448 */
9fb9d328 449static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
42e9a92f 450{
9fb9d328 451 fc_rport_state_enter(rdata, RPORT_ST_READY);
42e9a92f 452
9fb9d328 453 FC_RPORT_DBG(rdata, "Port is Ready\n");
42e9a92f 454
5f7ea3b7
JE
455 if (rdata->event == RPORT_EV_NONE)
456 queue_work(rport_event_queue, &rdata->event_work);
4c0f62b5 457 rdata->event = RPORT_EV_READY;
42e9a92f
RL
458}
459
460/**
34f42a07 461 * fc_rport_timeout() - Handler for the retry_work timer.
ab28f1fd 462 * @work: The work struct of the fc_rport_priv
42e9a92f
RL
463 *
464 * Locking Note: Called without the rport lock held. This
465 * function will hold the rport lock, call an _enter_*
466 * function and then unlock the rport.
467 */
468static void fc_rport_timeout(struct work_struct *work)
469{
ab28f1fd
JE
470 struct fc_rport_priv *rdata =
471 container_of(work, struct fc_rport_priv, retry_work.work);
42e9a92f
RL
472
473 mutex_lock(&rdata->rp_mutex);
474
475 switch (rdata->rp_state) {
476 case RPORT_ST_PLOGI:
9fb9d328 477 fc_rport_enter_plogi(rdata);
42e9a92f
RL
478 break;
479 case RPORT_ST_PRLI:
9fb9d328 480 fc_rport_enter_prli(rdata);
42e9a92f
RL
481 break;
482 case RPORT_ST_RTV:
9fb9d328 483 fc_rport_enter_rtv(rdata);
42e9a92f
RL
484 break;
485 case RPORT_ST_LOGO:
9fb9d328 486 fc_rport_enter_logo(rdata);
42e9a92f 487 break;
370c3bd0
JE
488 case RPORT_ST_ADISC:
489 fc_rport_enter_adisc(rdata);
490 break;
42e9a92f
RL
491 case RPORT_ST_READY:
492 case RPORT_ST_INIT:
14194054 493 case RPORT_ST_DELETE:
b4a9c7ed 494 case RPORT_ST_RESTART:
42e9a92f
RL
495 break;
496 }
497
498 mutex_unlock(&rdata->rp_mutex);
42e9a92f
RL
499}
500
501/**
34f42a07 502 * fc_rport_error() - Error handler, called once retries have been exhausted
9fb9d328 503 * @rdata: private remote port
42e9a92f
RL
504 * @fp: The frame pointer
505 *
42e9a92f
RL
506 * Locking Note: The rport lock is expected to be held before
507 * calling this routine
508 */
9fb9d328 509static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
42e9a92f 510{
9fb9d328 511 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
cdbe6dfe
JE
512 IS_ERR(fp) ? -PTR_ERR(fp) : 0,
513 fc_rport_state(rdata), rdata->retries);
42e9a92f 514
6755db1c
CL
515 switch (rdata->rp_state) {
516 case RPORT_ST_PLOGI:
6755db1c 517 case RPORT_ST_LOGO:
9fb9d328 518 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
6755db1c
CL
519 break;
520 case RPORT_ST_RTV:
9fb9d328 521 fc_rport_enter_ready(rdata);
6755db1c 522 break;
370c3bd0
JE
523 case RPORT_ST_PRLI:
524 case RPORT_ST_ADISC:
525 fc_rport_enter_logo(rdata);
526 break;
14194054 527 case RPORT_ST_DELETE:
b4a9c7ed 528 case RPORT_ST_RESTART:
6755db1c
CL
529 case RPORT_ST_READY:
530 case RPORT_ST_INIT:
531 break;
42e9a92f
RL
532 }
533}
534
6755db1c 535/**
34f42a07 536 * fc_rport_error_retry() - Error handler when retries are desired
9fb9d328 537 * @rdata: private remote port data
6755db1c
CL
538 * @fp: The frame pointer
539 *
540 * If the error was an exchange timeout retry immediately,
541 * otherwise wait for E_D_TOV.
542 *
543 * Locking Note: The rport lock is expected to be held before
544 * calling this routine
545 */
9fb9d328
JE
546static void fc_rport_error_retry(struct fc_rport_priv *rdata,
547 struct fc_frame *fp)
6755db1c 548{
6755db1c
CL
549 unsigned long delay = FC_DEF_E_D_TOV;
550
551 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
552 if (PTR_ERR(fp) == -FC_EX_CLOSED)
9fb9d328 553 return fc_rport_error(rdata, fp);
6755db1c 554
a3666955 555 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
9fb9d328
JE
556 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
557 PTR_ERR(fp), fc_rport_state(rdata));
6755db1c
CL
558 rdata->retries++;
559 /* no additional delay on exchange timeouts */
560 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
561 delay = 0;
6755db1c
CL
562 schedule_delayed_work(&rdata->retry_work, delay);
563 return;
564 }
565
9fb9d328 566 return fc_rport_error(rdata, fp);
6755db1c
CL
567}
568
42e9a92f 569/**
34f42a07 570 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
42e9a92f
RL
571 * @sp: current sequence in the PLOGI exchange
572 * @fp: response frame
9fb9d328 573 * @rdata_arg: private remote port data
42e9a92f
RL
574 *
575 * Locking Note: This function will be called without the rport lock
576 * held, but it will lock, call an _enter_* function or fc_rport_error
577 * and then unlock the rport.
578 */
579static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 580 void *rdata_arg)
42e9a92f 581{
9fb9d328 582 struct fc_rport_priv *rdata = rdata_arg;
42e9a92f 583 struct fc_lport *lport = rdata->local_port;
a29e7646 584 struct fc_els_flogi *plp = NULL;
42e9a92f
RL
585 unsigned int tov;
586 u16 csp_seq;
587 u16 cssp_seq;
588 u8 op;
589
590 mutex_lock(&rdata->rp_mutex);
591
f657d299 592 FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
42e9a92f
RL
593
594 if (rdata->rp_state != RPORT_ST_PLOGI) {
9fb9d328
JE
595 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
596 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
597 if (IS_ERR(fp))
598 goto err;
42e9a92f
RL
599 goto out;
600 }
601
76f6804e 602 if (IS_ERR(fp)) {
9fb9d328 603 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
604 goto err;
605 }
606
42e9a92f
RL
607 op = fc_frame_payload_op(fp);
608 if (op == ELS_LS_ACC &&
609 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
f211fa51
JE
610 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
611 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
42e9a92f
RL
612
613 tov = ntohl(plp->fl_csp.sp_e_d_tov);
614 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
615 tov /= 1000;
616 if (tov > rdata->e_d_tov)
617 rdata->e_d_tov = tov;
618 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
619 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
620 if (cssp_seq < csp_seq)
621 csp_seq = cssp_seq;
622 rdata->max_seq = csp_seq;
f211fa51 623 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
3ac6f98f 624 fc_rport_enter_prli(rdata);
42e9a92f 625 } else
9fb9d328 626 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
627
628out:
629 fc_frame_free(fp);
630err:
631 mutex_unlock(&rdata->rp_mutex);
f211fa51 632 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
42e9a92f
RL
633}
634
635/**
34f42a07 636 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
9fb9d328 637 * @rdata: private remote port data
42e9a92f
RL
638 *
639 * Locking Note: The rport lock is expected to be held before calling
640 * this routine.
641 */
9fb9d328 642static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
42e9a92f 643{
42e9a92f
RL
644 struct fc_lport *lport = rdata->local_port;
645 struct fc_frame *fp;
646
9fb9d328
JE
647 FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
648 fc_rport_state(rdata));
42e9a92f 649
9fb9d328 650 fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
42e9a92f 651
f211fa51 652 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
42e9a92f
RL
653 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
654 if (!fp) {
9fb9d328 655 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
656 return;
657 }
658 rdata->e_d_tov = lport->e_d_tov;
659
f211fa51 660 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
9fb9d328 661 fc_rport_plogi_resp, rdata, lport->e_d_tov))
8f550f93 662 fc_rport_error_retry(rdata, NULL);
42e9a92f 663 else
f211fa51 664 kref_get(&rdata->kref);
42e9a92f
RL
665}
666
667/**
34f42a07 668 * fc_rport_prli_resp() - Process Login (PRLI) response handler
42e9a92f
RL
669 * @sp: current sequence in the PRLI exchange
670 * @fp: response frame
9fb9d328 671 * @rdata_arg: private remote port data
42e9a92f
RL
672 *
673 * Locking Note: This function will be called without the rport lock
674 * held, but it will lock, call an _enter_* function or fc_rport_error
675 * and then unlock the rport.
676 */
677static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 678 void *rdata_arg)
42e9a92f 679{
9fb9d328 680 struct fc_rport_priv *rdata = rdata_arg;
42e9a92f
RL
681 struct {
682 struct fc_els_prli prli;
683 struct fc_els_spp spp;
684 } *pp;
685 u32 roles = FC_RPORT_ROLE_UNKNOWN;
686 u32 fcp_parm = 0;
687 u8 op;
688
689 mutex_lock(&rdata->rp_mutex);
690
f657d299 691 FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
42e9a92f
RL
692
693 if (rdata->rp_state != RPORT_ST_PRLI) {
9fb9d328
JE
694 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
695 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
696 if (IS_ERR(fp))
697 goto err;
42e9a92f
RL
698 goto out;
699 }
700
76f6804e 701 if (IS_ERR(fp)) {
9fb9d328 702 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
703 goto err;
704 }
705
6bd054cb
RL
706 /* reinitialize remote port roles */
707 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
708
42e9a92f
RL
709 op = fc_frame_payload_op(fp);
710 if (op == ELS_LS_ACC) {
711 pp = fc_frame_payload_get(fp, sizeof(*pp));
712 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
713 fcp_parm = ntohl(pp->spp.spp_params);
714 if (fcp_parm & FCP_SPPF_RETRY)
715 rdata->flags |= FC_RP_FLAGS_RETRY;
716 }
717
f211fa51 718 rdata->supported_classes = FC_COS_CLASS3;
42e9a92f
RL
719 if (fcp_parm & FCP_SPPF_INIT_FCN)
720 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
721 if (fcp_parm & FCP_SPPF_TARG_FCN)
722 roles |= FC_RPORT_ROLE_FCP_TARGET;
723
f211fa51 724 rdata->ids.roles = roles;
9fb9d328 725 fc_rport_enter_rtv(rdata);
42e9a92f
RL
726
727 } else {
9fb9d328
JE
728 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
729 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
42e9a92f
RL
730 }
731
732out:
733 fc_frame_free(fp);
734err:
735 mutex_unlock(&rdata->rp_mutex);
f211fa51 736 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
42e9a92f
RL
737}
738
739/**
34f42a07 740 * fc_rport_logo_resp() - Logout (LOGO) response handler
42e9a92f
RL
741 * @sp: current sequence in the LOGO exchange
742 * @fp: response frame
9fb9d328 743 * @rdata_arg: private remote port data
42e9a92f
RL
744 *
745 * Locking Note: This function will be called without the rport lock
746 * held, but it will lock, call an _enter_* function or fc_rport_error
747 * and then unlock the rport.
748 */
749static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 750 void *rdata_arg)
42e9a92f 751{
9fb9d328 752 struct fc_rport_priv *rdata = rdata_arg;
42e9a92f
RL
753 u8 op;
754
755 mutex_lock(&rdata->rp_mutex);
756
f657d299 757 FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
42e9a92f 758
42e9a92f 759 if (rdata->rp_state != RPORT_ST_LOGO) {
9fb9d328
JE
760 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
761 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
762 if (IS_ERR(fp))
763 goto err;
42e9a92f
RL
764 goto out;
765 }
766
76f6804e 767 if (IS_ERR(fp)) {
9fb9d328 768 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
769 goto err;
770 }
771
42e9a92f 772 op = fc_frame_payload_op(fp);
68a1750b
JE
773 if (op != ELS_LS_ACC)
774 FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n",
775 op);
776 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
42e9a92f
RL
777
778out:
779 fc_frame_free(fp);
780err:
781 mutex_unlock(&rdata->rp_mutex);
f211fa51 782 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
42e9a92f
RL
783}
784
785/**
34f42a07 786 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
9fb9d328 787 * @rdata: private remote port data
42e9a92f
RL
788 *
789 * Locking Note: The rport lock is expected to be held before calling
790 * this routine.
791 */
9fb9d328 792static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
42e9a92f 793{
42e9a92f
RL
794 struct fc_lport *lport = rdata->local_port;
795 struct {
796 struct fc_els_prli prli;
797 struct fc_els_spp spp;
798 } *pp;
799 struct fc_frame *fp;
800
3ac6f98f
JE
801 /*
802 * If the rport is one of the well known addresses
803 * we skip PRLI and RTV and go straight to READY.
804 */
805 if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
806 fc_rport_enter_ready(rdata);
807 return;
808 }
809
9fb9d328
JE
810 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
811 fc_rport_state(rdata));
42e9a92f 812
9fb9d328 813 fc_rport_state_enter(rdata, RPORT_ST_PRLI);
42e9a92f
RL
814
815 fp = fc_frame_alloc(lport, sizeof(*pp));
816 if (!fp) {
9fb9d328 817 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
818 return;
819 }
820
f211fa51 821 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
9fb9d328 822 fc_rport_prli_resp, rdata, lport->e_d_tov))
8f550f93 823 fc_rport_error_retry(rdata, NULL);
42e9a92f 824 else
f211fa51 825 kref_get(&rdata->kref);
42e9a92f
RL
826}
827
828/**
34f42a07 829 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
42e9a92f
RL
830 * @sp: current sequence in the RTV exchange
831 * @fp: response frame
9fb9d328 832 * @rdata_arg: private remote port data
42e9a92f
RL
833 *
834 * Many targets don't seem to support this.
835 *
836 * Locking Note: This function will be called without the rport lock
837 * held, but it will lock, call an _enter_* function or fc_rport_error
838 * and then unlock the rport.
839 */
840static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 841 void *rdata_arg)
42e9a92f 842{
9fb9d328 843 struct fc_rport_priv *rdata = rdata_arg;
42e9a92f
RL
844 u8 op;
845
846 mutex_lock(&rdata->rp_mutex);
847
f657d299 848 FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
42e9a92f
RL
849
850 if (rdata->rp_state != RPORT_ST_RTV) {
9fb9d328
JE
851 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
852 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
853 if (IS_ERR(fp))
854 goto err;
42e9a92f
RL
855 goto out;
856 }
857
76f6804e 858 if (IS_ERR(fp)) {
9fb9d328 859 fc_rport_error(rdata, fp);
76f6804e
AJ
860 goto err;
861 }
862
42e9a92f
RL
863 op = fc_frame_payload_op(fp);
864 if (op == ELS_LS_ACC) {
865 struct fc_els_rtv_acc *rtv;
866 u32 toq;
867 u32 tov;
868
869 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
870 if (rtv) {
871 toq = ntohl(rtv->rtv_toq);
872 tov = ntohl(rtv->rtv_r_a_tov);
873 if (tov == 0)
874 tov = 1;
875 rdata->r_a_tov = tov;
876 tov = ntohl(rtv->rtv_e_d_tov);
877 if (toq & FC_ELS_RTV_EDRES)
878 tov /= 1000000;
879 if (tov == 0)
880 tov = 1;
881 rdata->e_d_tov = tov;
882 }
883 }
884
9fb9d328 885 fc_rport_enter_ready(rdata);
42e9a92f
RL
886
887out:
888 fc_frame_free(fp);
889err:
890 mutex_unlock(&rdata->rp_mutex);
f211fa51 891 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
42e9a92f
RL
892}
893
894/**
34f42a07 895 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
9fb9d328 896 * @rdata: private remote port data
42e9a92f
RL
897 *
898 * Locking Note: The rport lock is expected to be held before calling
899 * this routine.
900 */
9fb9d328 901static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
42e9a92f
RL
902{
903 struct fc_frame *fp;
42e9a92f
RL
904 struct fc_lport *lport = rdata->local_port;
905
9fb9d328
JE
906 FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
907 fc_rport_state(rdata));
42e9a92f 908
9fb9d328 909 fc_rport_state_enter(rdata, RPORT_ST_RTV);
42e9a92f
RL
910
911 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
912 if (!fp) {
9fb9d328 913 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
914 return;
915 }
916
f211fa51 917 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
9fb9d328 918 fc_rport_rtv_resp, rdata, lport->e_d_tov))
8f550f93 919 fc_rport_error_retry(rdata, NULL);
42e9a92f 920 else
f211fa51 921 kref_get(&rdata->kref);
42e9a92f
RL
922}
923
924/**
34f42a07 925 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
9fb9d328 926 * @rdata: private remote port data
42e9a92f
RL
927 *
928 * Locking Note: The rport lock is expected to be held before calling
929 * this routine.
930 */
9fb9d328 931static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
42e9a92f 932{
42e9a92f
RL
933 struct fc_lport *lport = rdata->local_port;
934 struct fc_frame *fp;
935
9fb9d328
JE
936 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
937 fc_rport_state(rdata));
42e9a92f 938
9fb9d328 939 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
42e9a92f
RL
940
941 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
942 if (!fp) {
9fb9d328 943 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
944 return;
945 }
946
f211fa51 947 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
9fb9d328 948 fc_rport_logo_resp, rdata, lport->e_d_tov))
8f550f93 949 fc_rport_error_retry(rdata, NULL);
42e9a92f 950 else
f211fa51 951 kref_get(&rdata->kref);
42e9a92f
RL
952}
953
370c3bd0
JE
954/**
955 * fc_rport_els_adisc_resp() - Address Discovery response handler
956 * @sp: current sequence in the ADISC exchange
957 * @fp: response frame
958 * @rdata_arg: remote port private.
959 *
960 * Locking Note: This function will be called without the rport lock
961 * held, but it will lock, call an _enter_* function or fc_rport_error
962 * and then unlock the rport.
963 */
964static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
965 void *rdata_arg)
966{
967 struct fc_rport_priv *rdata = rdata_arg;
968 struct fc_els_adisc *adisc;
969 u8 op;
970
971 mutex_lock(&rdata->rp_mutex);
972
973 FC_RPORT_DBG(rdata, "Received a ADISC response\n");
974
975 if (rdata->rp_state != RPORT_ST_ADISC) {
976 FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
977 fc_rport_state(rdata));
978 if (IS_ERR(fp))
979 goto err;
980 goto out;
981 }
982
983 if (IS_ERR(fp)) {
984 fc_rport_error(rdata, fp);
985 goto err;
986 }
987
988 /*
989 * If address verification failed. Consider us logged out of the rport.
990 * Since the rport is still in discovery, we want to be
991 * logged in, so go to PLOGI state. Otherwise, go back to READY.
992 */
993 op = fc_frame_payload_op(fp);
994 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
995 if (op != ELS_LS_ACC || !adisc ||
996 ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
997 get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
998 get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
999 FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
1000 fc_rport_enter_plogi(rdata);
1001 } else {
1002 FC_RPORT_DBG(rdata, "ADISC OK\n");
1003 fc_rport_enter_ready(rdata);
1004 }
1005out:
1006 fc_frame_free(fp);
1007err:
1008 mutex_unlock(&rdata->rp_mutex);
1009 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
1010}
1011
1012/**
1013 * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer
1014 * @rdata: remote port private data
1015 *
1016 * Locking Note: The rport lock is expected to be held before calling
1017 * this routine.
1018 */
1019static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
1020{
1021 struct fc_lport *lport = rdata->local_port;
1022 struct fc_frame *fp;
1023
1024 FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
1025 fc_rport_state(rdata));
1026
1027 fc_rport_state_enter(rdata, RPORT_ST_ADISC);
1028
1029 fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
1030 if (!fp) {
1031 fc_rport_error_retry(rdata, fp);
1032 return;
1033 }
1034 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1035 fc_rport_adisc_resp, rdata, lport->e_d_tov))
8f550f93 1036 fc_rport_error_retry(rdata, NULL);
370c3bd0
JE
1037 else
1038 kref_get(&rdata->kref);
1039}
1040
8abbe3a4
JE
1041/**
1042 * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request
1043 * @rdata: remote port private
1044 * @sp: current sequence in the ADISC exchange
1045 * @in_fp: ADISC request frame
1046 *
1047 * Locking Note: Called with the lport and rport locks held.
1048 */
1049static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1050 struct fc_seq *sp, struct fc_frame *in_fp)
1051{
1052 struct fc_lport *lport = rdata->local_port;
1053 struct fc_frame *fp;
1054 struct fc_exch *ep = fc_seq_exch(sp);
1055 struct fc_els_adisc *adisc;
1056 struct fc_seq_els_data rjt_data;
1057 u32 f_ctl;
1058
1059 FC_RPORT_DBG(rdata, "Received ADISC request\n");
1060
1061 adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
1062 if (!adisc) {
1063 rjt_data.fp = NULL;
1064 rjt_data.reason = ELS_RJT_PROT;
1065 rjt_data.explan = ELS_EXPL_INV_LEN;
1066 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1067 goto drop;
1068 }
1069
1070 fp = fc_frame_alloc(lport, sizeof(*adisc));
1071 if (!fp)
1072 goto drop;
1073 fc_adisc_fill(lport, fp);
1074 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
1075 adisc->adisc_cmd = ELS_LS_ACC;
1076 sp = lport->tt.seq_start_next(sp);
1077 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1078 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1079 FC_TYPE_ELS, f_ctl, 0);
1080 lport->tt.seq_send(lport, sp, fp);
1081drop:
1082 fc_frame_free(in_fp);
1083}
1084
42e9a92f 1085/**
83fe6a93
JE
1086 * fc_rport_recv_els_req() - handle a validated ELS request.
1087 * @lport: Fibre Channel local port
42e9a92f
RL
1088 * @sp: current sequence in the PLOGI exchange
1089 * @fp: response frame
83fe6a93
JE
1090 *
1091 * Handle incoming ELS requests that require port login.
1092 * The ELS opcode has already been validated by the caller.
42e9a92f 1093 *
131203a1 1094 * Locking Note: Called with the lport lock held.
42e9a92f 1095 */
83fe6a93
JE
1096static void fc_rport_recv_els_req(struct fc_lport *lport,
1097 struct fc_seq *sp, struct fc_frame *fp)
42e9a92f 1098{
131203a1 1099 struct fc_rport_priv *rdata;
42e9a92f
RL
1100 struct fc_frame_header *fh;
1101 struct fc_seq_els_data els_data;
42e9a92f 1102
42e9a92f 1103 els_data.fp = NULL;
83fe6a93
JE
1104 els_data.reason = ELS_RJT_UNAB;
1105 els_data.explan = ELS_EXPL_PLOGI_REQD;
3ac6f98f 1106
42e9a92f
RL
1107 fh = fc_frame_header_get(fp);
1108
25b37b98 1109 mutex_lock(&lport->disc.disc_mutex);
83fe6a93 1110 rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
131203a1 1111 if (!rdata) {
25b37b98 1112 mutex_unlock(&lport->disc.disc_mutex);
83fe6a93 1113 goto reject;
131203a1
JE
1114 }
1115 mutex_lock(&rdata->rp_mutex);
25b37b98 1116 mutex_unlock(&lport->disc.disc_mutex);
131203a1 1117
83fe6a93
JE
1118 switch (rdata->rp_state) {
1119 case RPORT_ST_PRLI:
1120 case RPORT_ST_RTV:
1121 case RPORT_ST_READY:
370c3bd0 1122 case RPORT_ST_ADISC:
83fe6a93
JE
1123 break;
1124 default:
1125 mutex_unlock(&rdata->rp_mutex);
1126 goto reject;
1127 }
1128
1129 switch (fc_frame_payload_op(fp)) {
131203a1
JE
1130 case ELS_PRLI:
1131 fc_rport_recv_prli_req(rdata, sp, fp);
1132 break;
1133 case ELS_PRLO:
1134 fc_rport_recv_prlo_req(rdata, sp, fp);
1135 break;
8abbe3a4
JE
1136 case ELS_ADISC:
1137 fc_rport_recv_adisc_req(rdata, sp, fp);
1138 break;
131203a1
JE
1139 case ELS_RRQ:
1140 els_data.fp = fp;
1141 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
1142 break;
1143 case ELS_REC:
1144 els_data.fp = fp;
1145 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
1146 break;
1147 default:
83fe6a93 1148 fc_frame_free(fp); /* can't happen */
131203a1 1149 break;
42e9a92f
RL
1150 }
1151
1152 mutex_unlock(&rdata->rp_mutex);
83fe6a93
JE
1153 return;
1154
1155reject:
1156 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1157 fc_frame_free(fp);
1158}
1159
1160/**
1161 * fc_rport_recv_req() - Handle a received ELS request from a rport
1162 * @sp: current sequence in the PLOGI exchange
1163 * @fp: response frame
1164 * @lport: Fibre Channel local port
1165 *
1166 * Locking Note: Called with the lport lock held.
1167 */
1168void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1169 struct fc_lport *lport)
1170{
1171 struct fc_seq_els_data els_data;
1172
1173 /*
1174 * Handle PLOGI and LOGO requests separately, since they
1175 * don't require prior login.
1176 * Check for unsupported opcodes first and reject them.
1177 * For some ops, it would be incorrect to reject with "PLOGI required".
1178 */
1179 switch (fc_frame_payload_op(fp)) {
1180 case ELS_PLOGI:
1181 fc_rport_recv_plogi_req(lport, sp, fp);
1182 break;
1183 case ELS_LOGO:
1184 fc_rport_recv_logo_req(lport, sp, fp);
1185 break;
1186 case ELS_PRLI:
1187 case ELS_PRLO:
8abbe3a4 1188 case ELS_ADISC:
83fe6a93
JE
1189 case ELS_RRQ:
1190 case ELS_REC:
1191 fc_rport_recv_els_req(lport, sp, fp);
1192 break;
1193 default:
1194 fc_frame_free(fp);
1195 els_data.fp = NULL;
1196 els_data.reason = ELS_RJT_UNSUP;
1197 els_data.explan = ELS_EXPL_NONE;
1198 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1199 break;
1200 }
42e9a92f
RL
1201}
1202
1203/**
34f42a07 1204 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
3ac6f98f 1205 * @lport: local port
42e9a92f
RL
1206 * @sp: current sequence in the PLOGI exchange
1207 * @fp: PLOGI request frame
1208 *
3ac6f98f 1209 * Locking Note: The rport lock is held before calling this function.
42e9a92f 1210 */
3ac6f98f 1211static void fc_rport_recv_plogi_req(struct fc_lport *lport,
42e9a92f
RL
1212 struct fc_seq *sp, struct fc_frame *rx_fp)
1213{
3ac6f98f
JE
1214 struct fc_disc *disc;
1215 struct fc_rport_priv *rdata;
42e9a92f
RL
1216 struct fc_frame *fp = rx_fp;
1217 struct fc_exch *ep;
1218 struct fc_frame_header *fh;
1219 struct fc_els_flogi *pl;
1220 struct fc_seq_els_data rjt_data;
3ac6f98f 1221 u32 sid, f_ctl;
42e9a92f 1222
3ac6f98f 1223 rjt_data.fp = NULL;
42e9a92f 1224 fh = fc_frame_header_get(fp);
3ac6f98f 1225 sid = ntoh24(fh->fh_s_id);
42e9a92f 1226
3ac6f98f 1227 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
42e9a92f 1228
42e9a92f
RL
1229 pl = fc_frame_payload_get(fp, sizeof(*pl));
1230 if (!pl) {
3ac6f98f
JE
1231 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
1232 rjt_data.reason = ELS_RJT_PROT;
1233 rjt_data.explan = ELS_EXPL_INV_LEN;
1234 goto reject;
42e9a92f 1235 }
3ac6f98f
JE
1236
1237 disc = &lport->disc;
1238 mutex_lock(&disc->disc_mutex);
1239 rdata = lport->tt.rport_create(lport, sid);
1240 if (!rdata) {
1241 mutex_unlock(&disc->disc_mutex);
1242 rjt_data.reason = ELS_RJT_UNAB;
1243 rjt_data.explan = ELS_EXPL_INSUF_RES;
1244 goto reject;
1245 }
1246
1247 mutex_lock(&rdata->rp_mutex);
1248 mutex_unlock(&disc->disc_mutex);
1249
1250 rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
1251 rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
42e9a92f
RL
1252
1253 /*
3ac6f98f 1254 * If the rport was just created, possibly due to the incoming PLOGI,
42e9a92f
RL
1255 * set the state appropriately and accept the PLOGI.
1256 *
1257 * If we had also sent a PLOGI, and if the received PLOGI is from a
1258 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1259 * "command already in progress".
1260 *
1261 * XXX TBD: If the session was ready before, the PLOGI should result in
1262 * all outstanding exchanges being reset.
1263 */
1264 switch (rdata->rp_state) {
1265 case RPORT_ST_INIT:
3ac6f98f 1266 FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
42e9a92f
RL
1267 break;
1268 case RPORT_ST_PLOGI:
3ac6f98f
JE
1269 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1270 if (rdata->ids.port_name < lport->wwpn) {
1271 mutex_unlock(&rdata->rp_mutex);
1272 rjt_data.reason = ELS_RJT_INPROG;
1273 rjt_data.explan = ELS_EXPL_NONE;
1274 goto reject;
1275 }
42e9a92f
RL
1276 break;
1277 case RPORT_ST_PRLI:
b4a9c7ed 1278 case RPORT_ST_RTV:
42e9a92f 1279 case RPORT_ST_READY:
370c3bd0
JE
1280 case RPORT_ST_ADISC:
1281 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1282 "- ignored for now\n", rdata->rp_state);
1283 /* XXX TBD - should reset */
42e9a92f 1284 break;
14194054 1285 case RPORT_ST_DELETE:
b4a9c7ed
JE
1286 case RPORT_ST_LOGO:
1287 case RPORT_ST_RESTART:
1288 FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
1289 fc_rport_state(rdata));
1290 mutex_unlock(&rdata->rp_mutex);
1291 rjt_data.reason = ELS_RJT_BUSY;
1292 rjt_data.explan = ELS_EXPL_NONE;
1293 goto reject;
42e9a92f
RL
1294 }
1295
3ac6f98f
JE
1296 /*
1297 * Get session payload size from incoming PLOGI.
1298 */
1299 rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1300 fc_frame_free(rx_fp);
1301
1302 /*
1303 * Send LS_ACC. If this fails, the originator should retry.
1304 */
1305 sp = lport->tt.seq_start_next(sp);
1306 if (!sp)
1307 goto out;
1308 fp = fc_frame_alloc(lport, sizeof(*pl));
1309 if (!fp)
1310 goto out;
1311
1312 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1313 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1314 ep = fc_seq_exch(sp);
1315 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1316 FC_TYPE_ELS, f_ctl, 0);
1317 lport->tt.seq_send(lport, sp, fp);
1318 fc_rport_enter_prli(rdata);
1319out:
1320 mutex_unlock(&rdata->rp_mutex);
1321 return;
1322
1323reject:
1324 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1325 fc_frame_free(fp);
42e9a92f
RL
1326}
1327
1328/**
34f42a07 1329 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
9fb9d328 1330 * @rdata: private remote port data
42e9a92f
RL
1331 * @sp: current sequence in the PRLI exchange
1332 * @fp: PRLI request frame
1333 *
1334 * Locking Note: The rport lock is exected to be held before calling
1335 * this function.
1336 */
9fb9d328 1337static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
42e9a92f
RL
1338 struct fc_seq *sp, struct fc_frame *rx_fp)
1339{
42e9a92f
RL
1340 struct fc_lport *lport = rdata->local_port;
1341 struct fc_exch *ep;
1342 struct fc_frame *fp;
1343 struct fc_frame_header *fh;
1344 struct {
1345 struct fc_els_prli prli;
1346 struct fc_els_spp spp;
1347 } *pp;
1348 struct fc_els_spp *rspp; /* request service param page */
1349 struct fc_els_spp *spp; /* response spp */
1350 unsigned int len;
1351 unsigned int plen;
1352 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1353 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1354 enum fc_els_spp_resp resp;
1355 struct fc_seq_els_data rjt_data;
1356 u32 f_ctl;
1357 u32 fcp_parm;
1358 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1359 rjt_data.fp = NULL;
1360
1361 fh = fc_frame_header_get(rx_fp);
1362
9fb9d328
JE
1363 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1364 fc_rport_state(rdata));
42e9a92f
RL
1365
1366 switch (rdata->rp_state) {
1367 case RPORT_ST_PRLI:
3ac6f98f 1368 case RPORT_ST_RTV:
42e9a92f 1369 case RPORT_ST_READY:
370c3bd0 1370 case RPORT_ST_ADISC:
42e9a92f
RL
1371 reason = ELS_RJT_NONE;
1372 break;
1373 default:
b4c6f546
AJ
1374 fc_frame_free(rx_fp);
1375 return;
42e9a92f
RL
1376 break;
1377 }
1378 len = fr_len(rx_fp) - sizeof(*fh);
1379 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1380 if (pp == NULL) {
1381 reason = ELS_RJT_PROT;
1382 explan = ELS_EXPL_INV_LEN;
1383 } else {
1384 plen = ntohs(pp->prli.prli_len);
1385 if ((plen % 4) != 0 || plen > len) {
1386 reason = ELS_RJT_PROT;
1387 explan = ELS_EXPL_INV_LEN;
1388 } else if (plen < len) {
1389 len = plen;
1390 }
1391 plen = pp->prli.prli_spp_len;
1392 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1393 plen > len || len < sizeof(*pp)) {
1394 reason = ELS_RJT_PROT;
1395 explan = ELS_EXPL_INV_LEN;
1396 }
1397 rspp = &pp->spp;
1398 }
1399 if (reason != ELS_RJT_NONE ||
1400 (fp = fc_frame_alloc(lport, len)) == NULL) {
1401 rjt_data.reason = reason;
1402 rjt_data.explan = explan;
1403 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1404 } else {
1405 sp = lport->tt.seq_start_next(sp);
1406 WARN_ON(!sp);
1407 pp = fc_frame_payload_get(fp, len);
1408 WARN_ON(!pp);
1409 memset(pp, 0, len);
1410 pp->prli.prli_cmd = ELS_LS_ACC;
1411 pp->prli.prli_spp_len = plen;
1412 pp->prli.prli_len = htons(len);
1413 len -= sizeof(struct fc_els_prli);
1414
6bd054cb
RL
1415 /* reinitialize remote port roles */
1416 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1417
42e9a92f
RL
1418 /*
1419 * Go through all the service parameter pages and build
1420 * response. If plen indicates longer SPP than standard,
1421 * use that. The entire response has been pre-cleared above.
1422 */
1423 spp = &pp->spp;
1424 while (len >= plen) {
1425 spp->spp_type = rspp->spp_type;
1426 spp->spp_type_ext = rspp->spp_type_ext;
1427 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1428 resp = FC_SPP_RESP_ACK;
1429 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1430 resp = FC_SPP_RESP_NO_PA;
1431 switch (rspp->spp_type) {
1432 case 0: /* common to all FC-4 types */
1433 break;
1434 case FC_TYPE_FCP:
1435 fcp_parm = ntohl(rspp->spp_params);
85b5893c 1436 if (fcp_parm & FCP_SPPF_RETRY)
42e9a92f 1437 rdata->flags |= FC_RP_FLAGS_RETRY;
f211fa51 1438 rdata->supported_classes = FC_COS_CLASS3;
42e9a92f
RL
1439 if (fcp_parm & FCP_SPPF_INIT_FCN)
1440 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1441 if (fcp_parm & FCP_SPPF_TARG_FCN)
1442 roles |= FC_RPORT_ROLE_FCP_TARGET;
f211fa51 1443 rdata->ids.roles = roles;
42e9a92f
RL
1444
1445 spp->spp_params =
1446 htonl(lport->service_params);
1447 break;
1448 default:
1449 resp = FC_SPP_RESP_INVL;
1450 break;
1451 }
1452 spp->spp_flags |= resp;
1453 len -= plen;
1454 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1455 spp = (struct fc_els_spp *)((char *)spp + plen);
1456 }
1457
1458 /*
1459 * Send LS_ACC. If this fails, the originator should retry.
1460 */
1461 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1462 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1463 ep = fc_seq_exch(sp);
1464 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1465 FC_TYPE_ELS, f_ctl, 0);
1466 lport->tt.seq_send(lport, sp, fp);
1467
1468 /*
1469 * Get lock and re-check state.
1470 */
1471 switch (rdata->rp_state) {
1472 case RPORT_ST_PRLI:
9fb9d328 1473 fc_rport_enter_ready(rdata);
42e9a92f
RL
1474 break;
1475 case RPORT_ST_READY:
370c3bd0 1476 case RPORT_ST_ADISC:
42e9a92f
RL
1477 break;
1478 default:
1479 break;
1480 }
1481 }
1482 fc_frame_free(rx_fp);
1483}
1484
1485/**
34f42a07 1486 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
9fb9d328 1487 * @rdata: private remote port data
42e9a92f
RL
1488 * @sp: current sequence in the PRLO exchange
1489 * @fp: PRLO request frame
1490 *
1491 * Locking Note: The rport lock is exected to be held before calling
1492 * this function.
1493 */
9fb9d328
JE
1494static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1495 struct fc_seq *sp,
42e9a92f
RL
1496 struct fc_frame *fp)
1497{
42e9a92f
RL
1498 struct fc_lport *lport = rdata->local_port;
1499
1500 struct fc_frame_header *fh;
1501 struct fc_seq_els_data rjt_data;
1502
1503 fh = fc_frame_header_get(fp);
1504
9fb9d328
JE
1505 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1506 fc_rport_state(rdata));
42e9a92f
RL
1507
1508 rjt_data.fp = NULL;
1509 rjt_data.reason = ELS_RJT_UNAB;
1510 rjt_data.explan = ELS_EXPL_NONE;
1511 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1512 fc_frame_free(fp);
1513}
1514
1515/**
34f42a07 1516 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
83fe6a93 1517 * @lport: local port.
42e9a92f
RL
1518 * @sp: current sequence in the LOGO exchange
1519 * @fp: LOGO request frame
1520 *
1521 * Locking Note: The rport lock is exected to be held before calling
1522 * this function.
1523 */
83fe6a93 1524static void fc_rport_recv_logo_req(struct fc_lport *lport,
9fb9d328 1525 struct fc_seq *sp,
42e9a92f
RL
1526 struct fc_frame *fp)
1527{
1528 struct fc_frame_header *fh;
83fe6a93
JE
1529 struct fc_rport_priv *rdata;
1530 u32 sid;
42e9a92f 1531
feab4ae7
JE
1532 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1533
42e9a92f 1534 fh = fc_frame_header_get(fp);
83fe6a93 1535 sid = ntoh24(fh->fh_s_id);
42e9a92f 1536
83fe6a93
JE
1537 mutex_lock(&lport->disc.disc_mutex);
1538 rdata = lport->tt.rport_lookup(lport, sid);
1539 if (rdata) {
1540 mutex_lock(&rdata->rp_mutex);
1541 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1542 fc_rport_state(rdata));
feab4ae7 1543
b4a9c7ed
JE
1544 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1545
feab4ae7 1546 /*
b4a9c7ed
JE
1547 * If the remote port was created due to discovery, set state
1548 * to log back in. It may have seen a stale RSCN about us.
feab4ae7 1549 */
b4a9c7ed
JE
1550 if (rdata->disc_id)
1551 fc_rport_state_enter(rdata, RPORT_ST_RESTART);
83fe6a93
JE
1552 mutex_unlock(&rdata->rp_mutex);
1553 } else
1554 FC_RPORT_ID_DBG(lport, sid,
1555 "Received LOGO from non-logged-in port\n");
1556 mutex_unlock(&lport->disc.disc_mutex);
42e9a92f
RL
1557 fc_frame_free(fp);
1558}
1559
1560static void fc_rport_flush_queue(void)
1561{
1562 flush_workqueue(rport_event_queue);
1563}
1564
42e9a92f
RL
1565int fc_rport_init(struct fc_lport *lport)
1566{
8025b5db
JE
1567 if (!lport->tt.rport_lookup)
1568 lport->tt.rport_lookup = fc_rport_lookup;
1569
5101ff99 1570 if (!lport->tt.rport_create)
9e9d0452 1571 lport->tt.rport_create = fc_rport_create;
5101ff99 1572
42e9a92f
RL
1573 if (!lport->tt.rport_login)
1574 lport->tt.rport_login = fc_rport_login;
1575
1576 if (!lport->tt.rport_logoff)
1577 lport->tt.rport_logoff = fc_rport_logoff;
1578
1579 if (!lport->tt.rport_recv_req)
1580 lport->tt.rport_recv_req = fc_rport_recv_req;
1581
1582 if (!lport->tt.rport_flush_queue)
1583 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1584
f211fa51
JE
1585 if (!lport->tt.rport_destroy)
1586 lport->tt.rport_destroy = fc_rport_destroy;
1587
42e9a92f
RL
1588 return 0;
1589}
1590EXPORT_SYMBOL(fc_rport_init);
1591
b0d428ad 1592int fc_setup_rport(void)
42e9a92f
RL
1593{
1594 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1595 if (!rport_event_queue)
1596 return -ENOMEM;
1597 return 0;
1598}
42e9a92f 1599
b0d428ad 1600void fc_destroy_rport(void)
42e9a92f
RL
1601{
1602 destroy_workqueue(rport_event_queue);
1603}
42e9a92f
RL
1604
1605void fc_rport_terminate_io(struct fc_rport *rport)
1606{
ab28f1fd
JE
1607 struct fc_rport_libfc_priv *rp = rport->dd_data;
1608 struct fc_lport *lport = rp->local_port;
42e9a92f 1609
1f6ff364
AJ
1610 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1611 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
42e9a92f
RL
1612}
1613EXPORT_SYMBOL(fc_rport_terminate_io);