]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/csiostor/csio_lnode.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / csiostor / csio_lnode.c
CommitLineData
a3667aae
NKI
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/delay.h>
37#include <linux/slab.h>
38#include <linux/utsname.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_transport_fc.h>
41#include <asm/unaligned.h>
42#include <scsi/fc/fc_els.h>
43#include <scsi/fc/fc_fs.h>
44#include <scsi/fc/fc_gs.h>
45#include <scsi/fc/fc_ms.h>
46
47#include "csio_hw.h"
48#include "csio_mb.h"
49#include "csio_lnode.h"
50#include "csio_rnode.h"
51
52int csio_fcoe_rnodes = 1024;
53int csio_fdmi_enable = 1;
54
55#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
56
57/* Lnode SM declarations */
58static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
59static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
60static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
61static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
62
63static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
64 void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
65 enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
66
67/* LN event mapping */
68static enum csio_ln_ev fwevt_to_lnevt[] = {
69 CSIO_LNE_NONE, /* None */
70 CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
71 CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
72 CSIO_LNE_NONE, /* PLOGI_RCVD */
73 CSIO_LNE_NONE, /* PLOGO_RCVD */
74 CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
75 CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
76 CSIO_LNE_NONE, /* PRLI_RCVD */
77 CSIO_LNE_NONE, /* PRLO_RCVD */
78 CSIO_LNE_NONE, /* NPORT_ID_CHGD */
79 CSIO_LNE_LOGO, /* FLOGO_RCVD */
80 CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
81 CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
82 CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
83 CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
84 CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
85 CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
86 CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
87 CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
88 CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
89 CSIO_LNE_NONE, /* PRLI_TMO */
90 CSIO_LNE_NONE, /* ADISC_TMO */
91 CSIO_LNE_NONE, /* RSCN_DEV_LOST */
92 CSIO_LNE_NONE, /* SCR_ACC_RCVD */
93 CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
94 CSIO_LNE_NONE, /* LOGO_SNT */
95 CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
96};
97
98#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
99 CSIO_LNE_NONE : \
100 fwevt_to_lnevt[_evt])
101
102#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
103#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
104#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
105#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
106
107/*
108 * csio_ln_match_by_portid - lookup lnode using given portid.
109 * @hw: HW module
110 * @portid: port-id.
111 *
112 * If found, returns lnode matching given portid otherwise returns NULL.
113 */
114static struct csio_lnode *
115csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
116{
117 struct csio_lnode *ln = hw->rln;
118 struct list_head *tmp;
119
120 /* Match siblings lnode with portid */
121 list_for_each(tmp, &hw->sln_head) {
122 ln = (struct csio_lnode *) tmp;
123 if (ln->portid == portid)
124 return ln;
125 }
126
127 return NULL;
128}
129
130/*
131 * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
132 * @hw - HW module
133 * @vnpi - vnp index.
134 * Returns - If found, returns lnode matching given vnp id
135 * otherwise returns NULL.
136 */
137static struct csio_lnode *
138csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
139{
140 struct list_head *tmp1, *tmp2;
141 struct csio_lnode *sln = NULL, *cln = NULL;
142
143 if (list_empty(&hw->sln_head)) {
144 CSIO_INC_STATS(hw, n_lnlkup_miss);
145 return NULL;
146 }
147 /* Traverse sibling lnodes */
148 list_for_each(tmp1, &hw->sln_head) {
149 sln = (struct csio_lnode *) tmp1;
150
151 /* Match sibling lnode */
152 if (sln->vnp_flowid == vnp_id)
153 return sln;
154
155 if (list_empty(&sln->cln_head))
156 continue;
157
158 /* Traverse children lnodes */
159 list_for_each(tmp2, &sln->cln_head) {
160 cln = (struct csio_lnode *) tmp2;
161
162 if (cln->vnp_flowid == vnp_id)
163 return cln;
164 }
165 }
166 CSIO_INC_STATS(hw, n_lnlkup_miss);
167 return NULL;
168}
169
170/**
171 * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
172 * @hw: HW module.
173 * @wwpn: WWPN.
174 *
175 * If found, returns lnode matching given wwpn, returns NULL otherwise.
176 */
177struct csio_lnode *
178csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
179{
180 struct list_head *tmp1, *tmp2;
181 struct csio_lnode *sln = NULL, *cln = NULL;
182
183 if (list_empty(&hw->sln_head)) {
184 CSIO_INC_STATS(hw, n_lnlkup_miss);
185 return NULL;
186 }
187 /* Traverse sibling lnodes */
188 list_for_each(tmp1, &hw->sln_head) {
189 sln = (struct csio_lnode *) tmp1;
190
191 /* Match sibling lnode */
192 if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
193 return sln;
194
195 if (list_empty(&sln->cln_head))
196 continue;
197
198 /* Traverse children lnodes */
199 list_for_each(tmp2, &sln->cln_head) {
200 cln = (struct csio_lnode *) tmp2;
201
202 if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
203 return cln;
204 }
205 }
206 return NULL;
207}
208
209/* FDMI */
210static void
211csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
212{
213 struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
214 cmd->ct_rev = FC_CT_REV;
215 cmd->ct_fs_type = type;
216 cmd->ct_fs_subtype = sub_type;
5036f0a0 217 cmd->ct_cmd = htons(op);
a3667aae
NKI
218}
219
220static int
221csio_hostname(uint8_t *buf, size_t buf_len)
222{
223 if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
224 return 0;
225 return -1;
226}
227
228static int
229csio_osname(uint8_t *buf, size_t buf_len)
230{
231 if (snprintf(buf, buf_len, "%s %s %s",
232 init_utsname()->sysname,
233 init_utsname()->release,
234 init_utsname()->version) > 0)
235 return 0;
236
237 return -1;
238}
239
240static inline void
42c335f7 241csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
a3667aae 242{
42c335f7 243 uint16_t len;
a3667aae 244 struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
42c335f7
KC
245
246 if (WARN_ON(val_len > U16_MAX))
247 return;
248
249 len = val_len;
250
a3667aae
NKI
251 ae->type = htons(type);
252 len += 4; /* includes attribute type and length */
253 len = (len + 3) & ~3; /* should be multiple of 4 bytes */
254 ae->len = htons(len);
42c335f7
KC
255 memcpy(ae->value, val, val_len);
256 if (len > val_len)
257 memset(ae->value + val_len, 0, len - val_len);
a3667aae
NKI
258 *ptr += len;
259}
260
261/*
262 * csio_ln_fdmi_done - FDMI registeration completion
263 * @hw: HW context
264 * @fdmi_req: fdmi request
265 */
266static void
267csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
268{
269 void *cmd;
270 struct csio_lnode *ln = fdmi_req->lnode;
271
272 if (fdmi_req->wr_status != FW_SUCCESS) {
273 csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
274 fdmi_req->wr_status);
275 CSIO_INC_STATS(ln, n_fdmi_err);
276 }
277
278 cmd = fdmi_req->dma_buf.vaddr;
279 if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
280 csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
281 csio_ct_reason(cmd), csio_ct_expl(cmd));
282 }
283}
284
285/*
286 * csio_ln_fdmi_rhba_cbfn - RHBA completion
287 * @hw: HW context
288 * @fdmi_req: fdmi request
289 */
290static void
291csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
292{
293 void *cmd;
294 uint8_t *pld;
295 uint32_t len = 0;
5036f0a0
NKI
296 __be32 val;
297 __be16 mfs;
298 uint32_t numattrs = 0;
a3667aae
NKI
299 struct csio_lnode *ln = fdmi_req->lnode;
300 struct fs_fdmi_attrs *attrib_blk;
301 struct fc_fdmi_port_name *port_name;
302 uint8_t buf[64];
a3667aae 303 uint8_t *fc4_type;
92de8b95 304 unsigned long flags;
a3667aae
NKI
305
306 if (fdmi_req->wr_status != FW_SUCCESS) {
307 csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
308 fdmi_req->wr_status);
309 CSIO_INC_STATS(ln, n_fdmi_err);
310 }
311
312 cmd = fdmi_req->dma_buf.vaddr;
313 if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
314 csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
315 csio_ct_reason(cmd), csio_ct_expl(cmd));
316 }
317
318 if (!csio_is_rnode_ready(fdmi_req->rnode)) {
319 CSIO_INC_STATS(ln, n_fdmi_err);
320 return;
321 }
322
323 /* Prepare CT hdr for RPA cmd */
324 memset(cmd, 0, FC_CT_HDR_LEN);
5036f0a0 325 csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
a3667aae
NKI
326
327 /* Prepare RPA payload */
328 pld = (uint8_t *)csio_ct_get_pld(cmd);
329 port_name = (struct fc_fdmi_port_name *)pld;
330 memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
331 pld += sizeof(*port_name);
332
333 /* Start appending Port attributes */
334 attrib_blk = (struct fs_fdmi_attrs *)pld;
335 attrib_blk->numattrs = 0;
336 len += sizeof(attrib_blk->numattrs);
337 pld += sizeof(attrib_blk->numattrs);
338
339 fc4_type = &buf[0];
340 memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
341 fc4_type[2] = 1;
342 fc4_type[7] = 1;
343 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
344 fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
5036f0a0 345 numattrs++;
a3667aae
NKI
346 val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
347 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
42c335f7 348 &val,
a3667aae 349 FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
5036f0a0 350 numattrs++;
a3667aae
NKI
351
352 if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
353 val = htonl(FC_PORTSPEED_1GBIT);
354 else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
355 val = htonl(FC_PORTSPEED_10GBIT);
356 else
357 val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
358 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
42c335f7 359 &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
5036f0a0 360 numattrs++;
a3667aae 361
5036f0a0 362 mfs = ln->ln_sparm.csp.sp_bb_data;
a3667aae 363 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
42c335f7 364 &mfs, sizeof(mfs));
5036f0a0 365 numattrs++;
a3667aae
NKI
366
367 strcpy(buf, "csiostor");
368 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
42c335f7 369 strlen(buf));
5036f0a0 370 numattrs++;
a3667aae
NKI
371
372 if (!csio_hostname(buf, sizeof(buf))) {
373 csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
42c335f7 374 buf, strlen(buf));
5036f0a0 375 numattrs++;
a3667aae 376 }
5036f0a0 377 attrib_blk->numattrs = htonl(numattrs);
a3667aae
NKI
378 len = (uint32_t)(pld - (uint8_t *)cmd);
379
380 /* Submit FDMI RPA request */
92de8b95 381 spin_lock_irqsave(&hw->lock, flags);
a3667aae
NKI
382 if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
383 FCOE_CT, &fdmi_req->dma_buf, len)) {
384 CSIO_INC_STATS(ln, n_fdmi_err);
385 csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
386 }
92de8b95 387 spin_unlock_irqrestore(&hw->lock, flags);
a3667aae
NKI
388}
389
390/*
391 * csio_ln_fdmi_dprt_cbfn - DPRT completion
392 * @hw: HW context
393 * @fdmi_req: fdmi request
394 */
395static void
396csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
397{
398 void *cmd;
399 uint8_t *pld;
400 uint32_t len = 0;
5036f0a0
NKI
401 uint32_t numattrs = 0;
402 __be32 maxpayload = htonl(65536);
a3667aae
NKI
403 struct fc_fdmi_hba_identifier *hbaid;
404 struct csio_lnode *ln = fdmi_req->lnode;
405 struct fc_fdmi_rpl *reg_pl;
406 struct fs_fdmi_attrs *attrib_blk;
407 uint8_t buf[64];
92de8b95 408 unsigned long flags;
a3667aae
NKI
409
410 if (fdmi_req->wr_status != FW_SUCCESS) {
411 csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
412 fdmi_req->wr_status);
413 CSIO_INC_STATS(ln, n_fdmi_err);
414 }
415
416 if (!csio_is_rnode_ready(fdmi_req->rnode)) {
417 CSIO_INC_STATS(ln, n_fdmi_err);
418 return;
419 }
420 cmd = fdmi_req->dma_buf.vaddr;
421 if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
422 csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
423 csio_ct_reason(cmd), csio_ct_expl(cmd));
424 }
425
426 /* Prepare CT hdr for RHBA cmd */
427 memset(cmd, 0, FC_CT_HDR_LEN);
5036f0a0 428 csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
a3667aae
NKI
429 len = FC_CT_HDR_LEN;
430
431 /* Prepare RHBA payload */
432 pld = (uint8_t *)csio_ct_get_pld(cmd);
433 hbaid = (struct fc_fdmi_hba_identifier *)pld;
434 memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
435 pld += sizeof(*hbaid);
436
437 /* Register one port per hba */
438 reg_pl = (struct fc_fdmi_rpl *)pld;
5036f0a0 439 reg_pl->numport = htonl(1);
a3667aae
NKI
440 memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
441 pld += sizeof(*reg_pl);
442
443 /* Start appending HBA attributes hba */
444 attrib_blk = (struct fs_fdmi_attrs *)pld;
445 attrib_blk->numattrs = 0;
446 len += sizeof(attrib_blk->numattrs);
447 pld += sizeof(attrib_blk->numattrs);
448
449 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
450 FC_FDMI_HBA_ATTR_NODENAME_LEN);
5036f0a0 451 numattrs++;
a3667aae
NKI
452
453 memset(buf, 0, sizeof(buf));
454
455 strcpy(buf, "Chelsio Communications");
456 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
42c335f7 457 strlen(buf));
5036f0a0 458 numattrs++;
a3667aae 459 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
42c335f7 460 hw->vpd.sn, sizeof(hw->vpd.sn));
5036f0a0 461 numattrs++;
a3667aae 462 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
42c335f7 463 sizeof(hw->vpd.id));
5036f0a0 464 numattrs++;
a3667aae 465 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
42c335f7 466 hw->model_desc, strlen(hw->model_desc));
5036f0a0 467 numattrs++;
a3667aae 468 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
42c335f7 469 hw->hw_ver, sizeof(hw->hw_ver));
5036f0a0 470 numattrs++;
a3667aae 471 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
42c335f7 472 hw->fwrev_str, strlen(hw->fwrev_str));
5036f0a0 473 numattrs++;
a3667aae
NKI
474
475 if (!csio_osname(buf, sizeof(buf))) {
476 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
42c335f7 477 buf, strlen(buf));
5036f0a0 478 numattrs++;
a3667aae
NKI
479 }
480
481 csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
42c335f7 482 &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
a3667aae 483 len = (uint32_t)(pld - (uint8_t *)cmd);
5036f0a0
NKI
484 numattrs++;
485 attrib_blk->numattrs = htonl(numattrs);
a3667aae
NKI
486
487 /* Submit FDMI RHBA request */
92de8b95 488 spin_lock_irqsave(&hw->lock, flags);
a3667aae
NKI
489 if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
490 FCOE_CT, &fdmi_req->dma_buf, len)) {
491 CSIO_INC_STATS(ln, n_fdmi_err);
492 csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
493 }
92de8b95 494 spin_unlock_irqrestore(&hw->lock, flags);
a3667aae
NKI
495}
496
497/*
498 * csio_ln_fdmi_dhba_cbfn - DHBA completion
499 * @hw: HW context
500 * @fdmi_req: fdmi request
501 */
502static void
503csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
504{
505 struct csio_lnode *ln = fdmi_req->lnode;
506 void *cmd;
507 struct fc_fdmi_port_name *port_name;
508 uint32_t len;
92de8b95 509 unsigned long flags;
a3667aae
NKI
510
511 if (fdmi_req->wr_status != FW_SUCCESS) {
512 csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
513 fdmi_req->wr_status);
514 CSIO_INC_STATS(ln, n_fdmi_err);
515 }
516
517 if (!csio_is_rnode_ready(fdmi_req->rnode)) {
518 CSIO_INC_STATS(ln, n_fdmi_err);
519 return;
520 }
521 cmd = fdmi_req->dma_buf.vaddr;
522 if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
523 csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
524 csio_ct_reason(cmd), csio_ct_expl(cmd));
525 }
526
527 /* Send FDMI cmd to de-register any Port attributes if registered
528 * before
529 */
530
531 /* Prepare FDMI DPRT cmd */
532 memset(cmd, 0, FC_CT_HDR_LEN);
5036f0a0 533 csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
a3667aae
NKI
534 len = FC_CT_HDR_LEN;
535 port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
536 memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
537 len += sizeof(*port_name);
538
539 /* Submit FDMI request */
92de8b95 540 spin_lock_irqsave(&hw->lock, flags);
a3667aae
NKI
541 if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
542 FCOE_CT, &fdmi_req->dma_buf, len)) {
543 CSIO_INC_STATS(ln, n_fdmi_err);
544 csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
545 }
92de8b95 546 spin_unlock_irqrestore(&hw->lock, flags);
a3667aae
NKI
547}
548
549/**
550 * csio_ln_fdmi_start - Start an FDMI request.
551 * @ln: lnode
552 * @context: session context
553 *
554 * Issued with lock held.
555 */
556int
557csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
558{
559 struct csio_ioreq *fdmi_req;
560 struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
561 void *cmd;
562 struct fc_fdmi_hba_identifier *hbaid;
563 uint32_t len;
564
565 if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
566 return -EPROTONOSUPPORT;
567
568 if (!csio_is_rnode_ready(fdmi_rn))
569 CSIO_INC_STATS(ln, n_fdmi_err);
570
571 /* Send FDMI cmd to de-register any HBA attributes if registered
572 * before
573 */
574
575 fdmi_req = ln->mgmt_req;
576 fdmi_req->lnode = ln;
577 fdmi_req->rnode = fdmi_rn;
578
579 /* Prepare FDMI DHBA cmd */
580 cmd = fdmi_req->dma_buf.vaddr;
581 memset(cmd, 0, FC_CT_HDR_LEN);
5036f0a0 582 csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
a3667aae
NKI
583 len = FC_CT_HDR_LEN;
584
585 hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
586 memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
587 len += sizeof(*hbaid);
588
589 /* Submit FDMI request */
590 if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
591 FCOE_CT, &fdmi_req->dma_buf, len)) {
592 CSIO_INC_STATS(ln, n_fdmi_err);
593 csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
594 }
595
596 return 0;
597}
598
599/*
600 * csio_ln_vnp_read_cbfn - vnp read completion handler.
601 * @hw: HW lnode
602 * @cbfn: Completion handler.
603 *
604 * Reads vnp response and updates ln parameters.
605 */
606static void
607csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
608{
609 struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
610 struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
611 struct fc_els_csp *csp;
612 struct fc_els_cssp *clsp;
613 enum fw_retval retval;
5036f0a0 614 __be32 nport_id;
a3667aae 615
e2ac9628 616 retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
a3667aae
NKI
617 if (retval != FW_SUCCESS) {
618 csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
619 mempool_free(mbp, hw->mb_mempool);
620 return;
621 }
622
623 spin_lock_irq(&hw->lock);
624
625 memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
5036f0a0
NKI
626 memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
627 ln->nport_id = ntohl(nport_id);
628 ln->nport_id = ln->nport_id >> 8;
a3667aae
NKI
629
630 /* Update WWNs */
631 /*
632 * This may look like a duplication of what csio_fcoe_enable_link()
633 * does, but is absolutely necessary if the vnpi changes between
634 * a FCOE LINK UP and FCOE LINK DOWN.
635 */
636 memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
637 memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
638
639 /* Copy common sparam */
640 csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
641 ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
642 ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
5036f0a0
NKI
643 ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
644 ln->ln_sparm.csp.sp_features = csp->sp_features;
645 ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
646 ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
647 ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
a3667aae
NKI
648
649 /* Copy word 0 & word 1 of class sparam */
650 clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
5036f0a0
NKI
651 ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
652 ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
653 ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
654 ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
a3667aae
NKI
655
656 spin_unlock_irq(&hw->lock);
657
658 mempool_free(mbp, hw->mb_mempool);
659
660 /* Send an event to update local attribs */
661 csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
662}
663
664/*
665 * csio_ln_vnp_read - Read vnp params.
666 * @ln: lnode
667 * @cbfn: Completion handler.
668 *
669 * Issued with lock held.
670 */
671static int
672csio_ln_vnp_read(struct csio_lnode *ln,
673 void (*cbfn) (struct csio_hw *, struct csio_mb *))
674{
675 struct csio_hw *hw = ln->hwp;
676 struct csio_mb *mbp;
677
678 /* Allocate Mbox request */
679 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
680 if (!mbp) {
681 CSIO_INC_STATS(hw, n_err_nomem);
682 return -ENOMEM;
683 }
684
685 /* Prepare VNP Command */
686 csio_fcoe_vnp_read_init_mb(ln, mbp,
687 CSIO_MB_DEFAULT_TMO,
688 ln->fcf_flowid,
689 ln->vnp_flowid,
690 cbfn);
691
692 /* Issue MBOX cmd */
693 if (csio_mb_issue(hw, mbp)) {
694 csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
695 mempool_free(mbp, hw->mb_mempool);
696 return -EINVAL;
697 }
698
699 return 0;
700}
701
702/*
703 * csio_fcoe_enable_link - Enable fcoe link.
704 * @ln: lnode
705 * @enable: enable/disable
706 * Issued with lock held.
707 * Issues mbox cmd to bring up FCOE link on port associated with given ln.
708 */
709static int
710csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
711{
712 struct csio_hw *hw = ln->hwp;
713 struct csio_mb *mbp;
714 enum fw_retval retval;
715 uint8_t portid;
716 uint8_t sub_op;
717 struct fw_fcoe_link_cmd *lcmd;
718 int i;
719
720 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
721 if (!mbp) {
722 CSIO_INC_STATS(hw, n_err_nomem);
723 return -ENOMEM;
724 }
725
726 portid = ln->portid;
727 sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
728
729 csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
730 sub_op ? "UP" : "DOWN", portid);
731
732 csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
733 portid, sub_op, 0, 0, 0, NULL);
734
735 if (csio_mb_issue(hw, mbp)) {
736 csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
737 portid);
738 mempool_free(mbp, hw->mb_mempool);
739 return -EINVAL;
740 }
741
742 retval = csio_mb_fw_retval(mbp);
743 if (retval != FW_SUCCESS) {
744 csio_err(hw,
745 "FCOE LINK %s cmd on port[%d] failed with "
746 "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
747 mempool_free(mbp, hw->mb_mempool);
748 return -EINVAL;
749 }
750
751 if (!enable)
752 goto out;
753
754 lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
755
756 memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
757 memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
758
759 for (i = 0; i < CSIO_MAX_PPORTS; i++)
760 if (hw->pport[i].portid == portid)
761 memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
762
763out:
764 mempool_free(mbp, hw->mb_mempool);
765 return 0;
766}
767
768/*
769 * csio_ln_read_fcf_cbfn - Read fcf parameters
770 * @ln: lnode
771 *
772 * read fcf response and Update ln fcf information.
773 */
774static void
775csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
776{
777 struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
778 struct csio_fcf_info *fcf_info;
779 struct fw_fcoe_fcf_cmd *rsp =
780 (struct fw_fcoe_fcf_cmd *)(mbp->mb);
781 enum fw_retval retval;
782
e2ac9628 783 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
a3667aae
NKI
784 if (retval != FW_SUCCESS) {
785 csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
786 retval);
787 mempool_free(mbp, hw->mb_mempool);
788 return;
789 }
790
791 spin_lock_irq(&hw->lock);
792 fcf_info = ln->fcfinfo;
793 fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
794 ntohs(rsp->priority_pkd));
795 fcf_info->vf_id = ntohs(rsp->vf_id);
796 fcf_info->vlan_id = rsp->vlan_id;
797 fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
798 fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
799 fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
800 fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
801 fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
802 fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
803 fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
804 memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
805 memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
806 memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
807 memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
808 memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
809
810 spin_unlock_irq(&hw->lock);
811
812 mempool_free(mbp, hw->mb_mempool);
813}
814
815/*
816 * csio_ln_read_fcf_entry - Read fcf entry.
817 * @ln: lnode
818 * @cbfn: Completion handler.
819 *
820 * Issued with lock held.
821 */
822static int
823csio_ln_read_fcf_entry(struct csio_lnode *ln,
824 void (*cbfn) (struct csio_hw *, struct csio_mb *))
825{
826 struct csio_hw *hw = ln->hwp;
827 struct csio_mb *mbp;
828
829 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
830 if (!mbp) {
831 CSIO_INC_STATS(hw, n_err_nomem);
832 return -ENOMEM;
833 }
834
835 /* Get FCoE FCF information */
836 csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
837 ln->portid, ln->fcf_flowid, cbfn);
838
839 if (csio_mb_issue(hw, mbp)) {
840 csio_err(hw, "failed to issue FCOE FCF cmd\n");
841 mempool_free(mbp, hw->mb_mempool);
842 return -EINVAL;
843 }
844
845 return 0;
846}
847
848/*
849 * csio_handle_link_up - Logical Linkup event.
850 * @hw - HW module.
851 * @portid - Physical port number
852 * @fcfi - FCF index.
853 * @vnpi - VNP index.
854 * Returns - none.
855 *
856 * This event is received from FW, when virtual link is established between
857 * Physical port[ENode] and FCF. If its new vnpi, then local node object is
858 * created on this FCF and set to [ONLINE] state.
859 * Lnode waits for FW_RDEV_CMD event to be received indicating that
860 * Fabric login is completed and lnode moves to [READY] state.
861 *
862 * This called with hw lock held
863 */
864static void
865csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
866 uint32_t vnpi)
867{
868 struct csio_lnode *ln = NULL;
869
870 /* Lookup lnode based on vnpi */
871 ln = csio_ln_lookup_by_vnpi(hw, vnpi);
872 if (!ln) {
873 /* Pick lnode based on portid */
874 ln = csio_ln_lookup_by_portid(hw, portid);
875 if (!ln) {
876 csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
877 portid);
878 CSIO_DB_ASSERT(0);
879 return;
880 }
881
882 /* Check if lnode has valid vnp flowid */
883 if (ln->vnp_flowid != CSIO_INVALID_IDX) {
884 /* New VN-Port */
885 spin_unlock_irq(&hw->lock);
886 csio_lnode_alloc(hw);
887 spin_lock_irq(&hw->lock);
888 if (!ln) {
889 csio_err(hw,
890 "failed to allocate fcoe lnode"
891 "for port:%d vnpi:x%x\n",
892 portid, vnpi);
893 CSIO_DB_ASSERT(0);
894 return;
895 }
896 ln->portid = portid;
897 }
898 ln->vnp_flowid = vnpi;
899 ln->dev_num &= ~0xFFFF;
900 ln->dev_num |= vnpi;
901 }
902
903 /*Initialize fcfi */
904 ln->fcf_flowid = fcfi;
905
906 csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
907
908 CSIO_INC_STATS(ln, n_link_up);
909
910 /* Send LINKUP event to SM */
911 csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
912}
913
914/*
915 * csio_post_event_rns
916 * @ln - FCOE lnode
917 * @evt - Given rnode event
918 * Returns - none
919 *
920 * Posts given rnode event to all FCOE rnodes connected with given Lnode.
921 * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
922 * event.
923 *
924 * This called with hw lock held
925 */
926static void
927csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
928{
929 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
930 struct list_head *tmp, *next;
931 struct csio_rnode *rn;
932
933 list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
934 rn = (struct csio_rnode *) tmp;
935 csio_post_event(&rn->sm, evt);
936 }
937}
938
939/*
940 * csio_cleanup_rns
941 * @ln - FCOE lnode
942 * Returns - none
943 *
944 * Frees all FCOE rnodes connected with given Lnode.
945 *
946 * This called with hw lock held
947 */
948static void
949csio_cleanup_rns(struct csio_lnode *ln)
950{
951 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
952 struct list_head *tmp, *next_rn;
953 struct csio_rnode *rn;
954
955 list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
956 rn = (struct csio_rnode *) tmp;
957 csio_put_rnode(ln, rn);
958 }
959
960}
961
962/*
963 * csio_post_event_lns
964 * @ln - FCOE lnode
965 * @evt - Given lnode event
966 * Returns - none
967 *
968 * Posts given lnode event to all FCOE lnodes connected with given Lnode.
969 * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
970 * event.
971 *
972 * This called with hw lock held
973 */
974static void
975csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
976{
977 struct list_head *tmp;
978 struct csio_lnode *cln, *sln;
979
980 /* If NPIV lnode, send evt only to that and return */
981 if (csio_is_npiv_ln(ln)) {
982 csio_post_event(&ln->sm, evt);
983 return;
984 }
985
986 sln = ln;
987 /* Traverse children lnodes list and send evt */
988 list_for_each(tmp, &sln->cln_head) {
989 cln = (struct csio_lnode *) tmp;
990 csio_post_event(&cln->sm, evt);
991 }
992
993 /* Send evt to parent lnode */
994 csio_post_event(&ln->sm, evt);
995}
996
997/*
998 * csio_ln_down - Lcoal nport is down
999 * @ln - FCOE Lnode
1000 * Returns - none
1001 *
1002 * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
1003 *
1004 * This called with hw lock held
1005 */
1006static void
1007csio_ln_down(struct csio_lnode *ln)
1008{
1009 csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
1010}
1011
1012/*
1013 * csio_handle_link_down - Logical Linkdown event.
1014 * @hw - HW module.
1015 * @portid - Physical port number
1016 * @fcfi - FCF index.
1017 * @vnpi - VNP index.
1018 * Returns - none
1019 *
1020 * This event is received from FW, when virtual link goes down between
1021 * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
1022 * this vnpi[VN-Port] will be de-instantiated.
1023 *
1024 * This called with hw lock held
1025 */
1026static void
1027csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
1028 uint32_t vnpi)
1029{
1030 struct csio_fcf_info *fp;
1031 struct csio_lnode *ln;
1032
1033 /* Lookup lnode based on vnpi */
1034 ln = csio_ln_lookup_by_vnpi(hw, vnpi);
1035 if (ln) {
1036 fp = ln->fcfinfo;
1037 CSIO_INC_STATS(ln, n_link_down);
1038
1039 /*Warn if linkdown received if lnode is not in ready state */
1040 if (!csio_is_lnode_ready(ln)) {
1041 csio_ln_warn(ln,
1042 "warn: FCOE link is already in offline "
1043 "Ignoring Fcoe linkdown event on portid %d\n",
1044 portid);
1045 CSIO_INC_STATS(ln, n_evt_drop);
1046 return;
1047 }
1048
1049 /* Verify portid */
1050 if (fp->portid != portid) {
1051 csio_ln_warn(ln,
1052 "warn: FCOE linkdown recv with "
1053 "invalid port %d\n", portid);
1054 CSIO_INC_STATS(ln, n_evt_drop);
1055 return;
1056 }
1057
1058 /* verify fcfi */
1059 if (ln->fcf_flowid != fcfi) {
1060 csio_ln_warn(ln,
1061 "warn: FCOE linkdown recv with "
1062 "invalid fcfi x%x\n", fcfi);
1063 CSIO_INC_STATS(ln, n_evt_drop);
1064 return;
1065 }
1066
1067 csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
1068
1069 /* Send LINK_DOWN event to lnode s/m */
1070 csio_ln_down(ln);
1071
1072 return;
1073 } else {
1074 csio_warn(hw,
1075 "warn: FCOE linkdown recv with invalid vnpi x%x\n",
1076 vnpi);
1077 CSIO_INC_STATS(hw, n_evt_drop);
1078 }
1079}
1080
1081/*
1082 * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
1083 * @ln: Lnode module
1084 *
1085 * Returns True if FCOE lnode is in ready state.
1086 */
1087int
1088csio_is_lnode_ready(struct csio_lnode *ln)
1089{
1090 return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
1091}
1092
1093/*****************************************************************************/
1094/* START: Lnode SM */
1095/*****************************************************************************/
1096/*
1097 * csio_lns_uninit - The request in uninit state.
1098 * @ln - FCOE lnode.
1099 * @evt - Event to be processed.
1100 *
1101 * Process the given lnode event which is currently in "uninit" state.
1102 * Invoked with HW lock held.
1103 * Return - none.
1104 */
1105static void
1106csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
1107{
1108 struct csio_hw *hw = csio_lnode_to_hw(ln);
1109 struct csio_lnode *rln = hw->rln;
1110 int rv;
1111
1112 CSIO_INC_STATS(ln, n_evt_sm[evt]);
1113 switch (evt) {
1114 case CSIO_LNE_LINKUP:
1115 csio_set_state(&ln->sm, csio_lns_online);
1116 /* Read FCF only for physical lnode */
1117 if (csio_is_phys_ln(ln)) {
1118 rv = csio_ln_read_fcf_entry(ln,
1119 csio_ln_read_fcf_cbfn);
1120 if (rv != 0) {
1121 /* TODO: Send HW RESET event */
1122 CSIO_INC_STATS(ln, n_err);
1123 break;
1124 }
1125
1126 /* Add FCF record */
1127 list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
1128 }
1129
1130 rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
1131 if (rv != 0) {
1132 /* TODO: Send HW RESET event */
1133 CSIO_INC_STATS(ln, n_err);
1134 }
1135 break;
1136
1137 case CSIO_LNE_DOWN_LINK:
1138 break;
1139
1140 default:
1141 csio_ln_dbg(ln,
1142 "unexp ln event %d recv from did:x%x in "
1143 "ln state[uninit].\n", evt, ln->nport_id);
1144 CSIO_INC_STATS(ln, n_evt_unexp);
1145 break;
1146 } /* switch event */
1147}
1148
1149/*
1150 * csio_lns_online - The request in online state.
1151 * @ln - FCOE lnode.
1152 * @evt - Event to be processed.
1153 *
1154 * Process the given lnode event which is currently in "online" state.
1155 * Invoked with HW lock held.
1156 * Return - none.
1157 */
1158static void
1159csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
1160{
1161 struct csio_hw *hw = csio_lnode_to_hw(ln);
1162
1163 CSIO_INC_STATS(ln, n_evt_sm[evt]);
1164 switch (evt) {
1165 case CSIO_LNE_LINKUP:
1166 csio_ln_warn(ln,
1167 "warn: FCOE link is up already "
1168 "Ignoring linkup on port:%d\n", ln->portid);
1169 CSIO_INC_STATS(ln, n_evt_drop);
1170 break;
1171
1172 case CSIO_LNE_FAB_INIT_DONE:
1173 csio_set_state(&ln->sm, csio_lns_ready);
1174
1175 spin_unlock_irq(&hw->lock);
1176 csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
1177 spin_lock_irq(&hw->lock);
1178
1179 break;
1180
1181 case CSIO_LNE_LINK_DOWN:
1182 /* Fall through */
1183 case CSIO_LNE_DOWN_LINK:
1184 csio_set_state(&ln->sm, csio_lns_uninit);
1185 if (csio_is_phys_ln(ln)) {
1186 /* Remove FCF entry */
1187 list_del_init(&ln->fcfinfo->list);
1188 }
1189 break;
1190
1191 default:
1192 csio_ln_dbg(ln,
1193 "unexp ln event %d recv from did:x%x in "
1194 "ln state[uninit].\n", evt, ln->nport_id);
1195 CSIO_INC_STATS(ln, n_evt_unexp);
1196
1197 break;
1198 } /* switch event */
1199}
1200
1201/*
1202 * csio_lns_ready - The request in ready state.
1203 * @ln - FCOE lnode.
1204 * @evt - Event to be processed.
1205 *
1206 * Process the given lnode event which is currently in "ready" state.
1207 * Invoked with HW lock held.
1208 * Return - none.
1209 */
1210static void
1211csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
1212{
1213 struct csio_hw *hw = csio_lnode_to_hw(ln);
1214
1215 CSIO_INC_STATS(ln, n_evt_sm[evt]);
1216 switch (evt) {
1217 case CSIO_LNE_FAB_INIT_DONE:
1218 csio_ln_dbg(ln,
1219 "ignoring event %d recv from did x%x"
1220 "in ln state[ready].\n", evt, ln->nport_id);
1221 CSIO_INC_STATS(ln, n_evt_drop);
1222 break;
1223
1224 case CSIO_LNE_LINK_DOWN:
1225 csio_set_state(&ln->sm, csio_lns_offline);
1226 csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1227
1228 spin_unlock_irq(&hw->lock);
1229 csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
1230 spin_lock_irq(&hw->lock);
1231
1232 if (csio_is_phys_ln(ln)) {
1233 /* Remove FCF entry */
1234 list_del_init(&ln->fcfinfo->list);
1235 }
1236 break;
1237
1238 case CSIO_LNE_DOWN_LINK:
1239 csio_set_state(&ln->sm, csio_lns_offline);
1240 csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1241
1242 /* Host need to issue aborts in case if FW has not returned
1243 * WRs with status "ABORTED"
1244 */
1245 spin_unlock_irq(&hw->lock);
1246 csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
1247 spin_lock_irq(&hw->lock);
1248
1249 if (csio_is_phys_ln(ln)) {
1250 /* Remove FCF entry */
1251 list_del_init(&ln->fcfinfo->list);
1252 }
1253 break;
1254
1255 case CSIO_LNE_CLOSE:
1256 csio_set_state(&ln->sm, csio_lns_uninit);
1257 csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
1258 break;
1259
1260 case CSIO_LNE_LOGO:
1261 csio_set_state(&ln->sm, csio_lns_offline);
1262 csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1263 break;
1264
1265 default:
1266 csio_ln_dbg(ln,
1267 "unexp ln event %d recv from did:x%x in "
1268 "ln state[uninit].\n", evt, ln->nport_id);
1269 CSIO_INC_STATS(ln, n_evt_unexp);
1270 CSIO_DB_ASSERT(0);
1271 break;
1272 } /* switch event */
1273}
1274
1275/*
1276 * csio_lns_offline - The request in offline state.
1277 * @ln - FCOE lnode.
1278 * @evt - Event to be processed.
1279 *
1280 * Process the given lnode event which is currently in "offline" state.
1281 * Invoked with HW lock held.
1282 * Return - none.
1283 */
1284static void
1285csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
1286{
1287 struct csio_hw *hw = csio_lnode_to_hw(ln);
1288 struct csio_lnode *rln = hw->rln;
1289 int rv;
1290
1291 CSIO_INC_STATS(ln, n_evt_sm[evt]);
1292 switch (evt) {
1293 case CSIO_LNE_LINKUP:
1294 csio_set_state(&ln->sm, csio_lns_online);
1295 /* Read FCF only for physical lnode */
1296 if (csio_is_phys_ln(ln)) {
1297 rv = csio_ln_read_fcf_entry(ln,
1298 csio_ln_read_fcf_cbfn);
1299 if (rv != 0) {
1300 /* TODO: Send HW RESET event */
1301 CSIO_INC_STATS(ln, n_err);
1302 break;
1303 }
1304
1305 /* Add FCF record */
1306 list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
1307 }
1308
1309 rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
1310 if (rv != 0) {
1311 /* TODO: Send HW RESET event */
1312 CSIO_INC_STATS(ln, n_err);
1313 }
1314 break;
1315
1316 case CSIO_LNE_LINK_DOWN:
1317 case CSIO_LNE_DOWN_LINK:
1318 case CSIO_LNE_LOGO:
1319 csio_ln_dbg(ln,
1320 "ignoring event %d recv from did x%x"
1321 "in ln state[offline].\n", evt, ln->nport_id);
1322 CSIO_INC_STATS(ln, n_evt_drop);
1323 break;
1324
1325 case CSIO_LNE_CLOSE:
1326 csio_set_state(&ln->sm, csio_lns_uninit);
1327 csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
1328 break;
1329
1330 default:
1331 csio_ln_dbg(ln,
1332 "unexp ln event %d recv from did:x%x in "
1333 "ln state[offline]\n", evt, ln->nport_id);
1334 CSIO_INC_STATS(ln, n_evt_unexp);
1335 CSIO_DB_ASSERT(0);
1336 break;
1337 } /* switch event */
1338}
1339
1340/*****************************************************************************/
1341/* END: Lnode SM */
1342/*****************************************************************************/
1343
1344static void
1345csio_free_fcfinfo(struct kref *kref)
1346{
1347 struct csio_fcf_info *fcfinfo = container_of(kref,
1348 struct csio_fcf_info, kref);
1349 kfree(fcfinfo);
1350}
1351
1352/* Helper routines for attributes */
1353/*
1354 * csio_lnode_state_to_str - Get current state of FCOE lnode.
1355 * @ln - lnode
1356 * @str - state of lnode.
1357 *
1358 */
1359void
1360csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
1361{
1362 if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
1363 strcpy(str, "UNINIT");
1364 return;
1365 }
1366 if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
1367 strcpy(str, "READY");
1368 return;
1369 }
1370 if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
1371 strcpy(str, "OFFLINE");
1372 return;
1373 }
1374 strcpy(str, "UNKNOWN");
1375} /* csio_lnode_state_to_str */
1376
1377
1378int
1379csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
1380 struct fw_fcoe_port_stats *port_stats)
1381{
1382 struct csio_mb *mbp;
1383 struct fw_fcoe_port_cmd_params portparams;
1384 enum fw_retval retval;
1385 int idx;
1386
1387 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1388 if (!mbp) {
1389 csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
1390 return -EINVAL;
1391 }
1392 portparams.portid = portid;
1393
1394 for (idx = 1; idx <= 3; idx++) {
1395 portparams.idx = (idx-1)*6 + 1;
1396 portparams.nstats = 6;
1397 if (idx == 3)
1398 portparams.nstats = 4;
1399 csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
1400 &portparams, NULL);
1401 if (csio_mb_issue(hw, mbp)) {
1402 csio_err(hw, "Issue of FCoE port params failed!\n");
1403 mempool_free(mbp, hw->mb_mempool);
1404 return -EINVAL;
1405 }
1406 csio_mb_process_portparams_rsp(hw, mbp, &retval,
1407 &portparams, port_stats);
1408 }
1409
1410 mempool_free(mbp, hw->mb_mempool);
1411 return 0;
1412}
1413
1414/*
1415 * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
1416 * @wr - WR.
1417 * @len - WR len.
1418 * This handler is invoked when an outstanding mgmt WR is completed.
1419 * Its invoked in the context of FW event worker thread for every
1420 * mgmt event received.
1421 * Return - none.
1422 */
1423
1424static void
1425csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
1426{
1427 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1428 struct csio_ioreq *io_req = NULL;
1429 struct fw_fcoe_els_ct_wr *wr_cmd;
1430
1431
1432 wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
1433
1434 if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
1435 csio_err(mgmtm->hw,
1436 "Invalid ELS CT WR length recvd, len:%x\n", len);
1437 mgmtm->stats.n_err++;
1438 return;
1439 }
1440
1441 io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
1442 io_req->wr_status = csio_wr_status(wr_cmd);
1443
1444 /* lookup ioreq exists in our active Q */
1445 spin_lock_irq(&hw->lock);
1446 if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
1447 csio_err(mgmtm->hw,
1448 "Error- Invalid IO handle recv in WR. handle: %p\n",
1449 io_req);
1450 mgmtm->stats.n_err++;
1451 spin_unlock_irq(&hw->lock);
1452 return;
1453 }
1454
1455 mgmtm = csio_hw_to_mgmtm(hw);
1456
1457 /* Dequeue from active queue */
1458 list_del_init(&io_req->sm.sm_list);
1459 mgmtm->stats.n_active--;
1460 spin_unlock_irq(&hw->lock);
1461
1462 /* io_req will be freed by completion handler */
1463 if (io_req->io_cbfn)
1464 io_req->io_cbfn(hw, io_req);
1465}
1466
1467/**
1468 * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
1469 * @hw: HW module
1470 * @cpl_op: CPL opcode
1471 * @cmd: FW cmd/WR.
1472 *
1473 * Process received FCoE cmd/WR event from FW.
1474 */
1475void
1476csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
1477{
1478 struct csio_lnode *ln;
1479 struct csio_rnode *rn;
1480 uint8_t portid, opcode = *(uint8_t *)cmd;
1481 struct fw_fcoe_link_cmd *lcmd;
1482 struct fw_wr_hdr *wr;
1483 struct fw_rdev_wr *rdev_wr;
1484 enum fw_fcoe_link_status lstatus;
1485 uint32_t fcfi, rdev_flowid, vnpi;
1486 enum csio_ln_ev evt;
1487
1488 if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
1489
1490 lcmd = (struct fw_fcoe_link_cmd *)cmd;
1491 lstatus = lcmd->lstatus;
1492 portid = FW_FCOE_LINK_CMD_PORTID_GET(
1493 ntohl(lcmd->op_to_portid));
1494 fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
1495 vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
1496
1497 if (lstatus == FCOE_LINKUP) {
1498
1499 /* HW lock here */
1500 spin_lock_irq(&hw->lock);
1501 csio_handle_link_up(hw, portid, fcfi, vnpi);
1502 spin_unlock_irq(&hw->lock);
1503 /* HW un lock here */
1504
1505 } else if (lstatus == FCOE_LINKDOWN) {
1506
1507 /* HW lock here */
1508 spin_lock_irq(&hw->lock);
1509 csio_handle_link_down(hw, portid, fcfi, vnpi);
1510 spin_unlock_irq(&hw->lock);
1511 /* HW un lock here */
1512 } else {
1513 csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
5036f0a0 1514 lcmd->lstatus);
a3667aae
NKI
1515 CSIO_INC_STATS(hw, n_cpl_unexp);
1516 }
1517 } else if (cpl_op == CPL_FW6_PLD) {
1518 wr = (struct fw_wr_hdr *) (cmd + 4);
e2ac9628 1519 if (FW_WR_OP_G(be32_to_cpu(wr->hi))
a3667aae
NKI
1520 == FW_RDEV_WR) {
1521
1522 rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
1523
1524 rdev_flowid = FW_RDEV_WR_FLOWID_GET(
1525 ntohl(rdev_wr->alloc_to_len16));
1526 vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
1527 ntohl(rdev_wr->flags_to_assoc_flowid));
1528
1529 csio_dbg(hw,
1530 "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
1531 "vnpi:0x%x\n", rdev_flowid,
1532 rdev_wr->event_cause, vnpi);
1533
1534 if (rdev_wr->protocol != PROT_FCOE) {
1535 csio_err(hw,
1536 "FW_RDEV_WR: invalid proto:x%x "
1537 "received with flowid:x%x\n",
1538 rdev_wr->protocol,
1539 rdev_flowid);
1540 CSIO_INC_STATS(hw, n_evt_drop);
1541 return;
1542 }
1543
1544 /* HW lock here */
1545 spin_lock_irq(&hw->lock);
1546 ln = csio_ln_lookup_by_vnpi(hw, vnpi);
1547 if (!ln) {
1548 csio_err(hw,
1549 "FW_DEV_WR: invalid vnpi:x%x received "
1550 "with flowid:x%x\n", vnpi, rdev_flowid);
1551 CSIO_INC_STATS(hw, n_evt_drop);
1552 goto out_pld;
1553 }
1554
1555 rn = csio_confirm_rnode(ln, rdev_flowid,
1556 &rdev_wr->u.fcoe_rdev);
1557 if (!rn) {
1558 csio_ln_dbg(ln,
1559 "Failed to confirm rnode "
1560 "for flowid:x%x\n", rdev_flowid);
1561 CSIO_INC_STATS(hw, n_evt_drop);
1562 goto out_pld;
1563 }
1564
1565 /* save previous event for debugging */
1566 ln->prev_evt = ln->cur_evt;
1567 ln->cur_evt = rdev_wr->event_cause;
1568 CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
1569
1570 /* Translate all the fabric events to lnode SM events */
1571 evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
1572 if (evt) {
1573 csio_ln_dbg(ln,
1574 "Posting event to lnode event:%d "
1575 "cause:%d flowid:x%x\n", evt,
1576 rdev_wr->event_cause, rdev_flowid);
1577 csio_post_event(&ln->sm, evt);
1578 }
1579
1580 /* Handover event to rn SM here. */
1581 csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
1582out_pld:
1583 spin_unlock_irq(&hw->lock);
1584 return;
1585 } else {
1586 csio_warn(hw, "unexpected WR op(0x%x) recv\n",
e2ac9628 1587 FW_WR_OP_G(be32_to_cpu((wr->hi))));
a3667aae
NKI
1588 CSIO_INC_STATS(hw, n_cpl_unexp);
1589 }
1590 } else if (cpl_op == CPL_FW6_MSG) {
1591 wr = (struct fw_wr_hdr *) (cmd);
e2ac9628 1592 if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
a3667aae
NKI
1593 csio_ln_mgmt_wr_handler(hw, wr,
1594 sizeof(struct fw_fcoe_els_ct_wr));
1595 } else {
1596 csio_warn(hw, "unexpected WR op(0x%x) recv\n",
e2ac9628 1597 FW_WR_OP_G(be32_to_cpu((wr->hi))));
a3667aae
NKI
1598 CSIO_INC_STATS(hw, n_cpl_unexp);
1599 }
1600 } else {
1601 csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
1602 CSIO_INC_STATS(hw, n_cpl_unexp);
1603 }
1604}
1605
1606/**
1607 * csio_lnode_start - Kickstart lnode discovery.
1608 * @ln: lnode
1609 *
1610 * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
1611 */
1612int
1613csio_lnode_start(struct csio_lnode *ln)
1614{
1615 int rv = 0;
1616 if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
1617 rv = csio_fcoe_enable_link(ln, 1);
1618 ln->flags |= CSIO_LNF_LINK_ENABLE;
1619 }
1620
1621 return rv;
1622}
1623
1624/**
1625 * csio_lnode_stop - Stop the lnode.
1626 * @ln: lnode
1627 *
1628 * This routine is invoked by HW module to stop lnode and its associated NPIV
1629 * lnodes.
1630 */
1631void
1632csio_lnode_stop(struct csio_lnode *ln)
1633{
1634 csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
1635 if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
1636 csio_fcoe_enable_link(ln, 0);
1637 ln->flags &= ~CSIO_LNF_LINK_ENABLE;
1638 }
1639 csio_ln_dbg(ln, "stopping ln :%p\n", ln);
1640}
1641
1642/**
1643 * csio_lnode_close - Close an lnode.
1644 * @ln: lnode
1645 *
1646 * This routine is invoked by HW module to close an lnode and its
1647 * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
1648 * set to uninitialized state.
1649 */
1650void
1651csio_lnode_close(struct csio_lnode *ln)
1652{
1653 csio_post_event_lns(ln, CSIO_LNE_CLOSE);
1654 if (csio_is_phys_ln(ln))
1655 ln->vnp_flowid = CSIO_INVALID_IDX;
1656
1657 csio_ln_dbg(ln, "closed ln :%p\n", ln);
1658}
1659
1660/*
1661 * csio_ln_prep_ecwr - Prepare ELS/CT WR.
1662 * @io_req - IO request.
1663 * @wr_len - WR len
1664 * @immd_len - WR immediate data
1665 * @sub_op - Sub opcode
1666 * @sid - source portid.
1667 * @did - destination portid
1668 * @flow_id - flowid
1669 * @fw_wr - ELS/CT WR to be prepared.
1670 * Returns: 0 - on success
1671 */
1672static int
1673csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
1674 uint32_t immd_len, uint8_t sub_op, uint32_t sid,
1675 uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
1676{
1677 struct fw_fcoe_els_ct_wr *wr;
5036f0a0 1678 __be32 port_id;
a3667aae
NKI
1679
1680 wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
e2ac9628 1681 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) |
a3667aae
NKI
1682 FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
1683
1684 wr_len = DIV_ROUND_UP(wr_len, 16);
e2ac9628
HS
1685 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) |
1686 FW_WR_LEN16_V(wr_len));
a3667aae
NKI
1687 wr->els_ct_type = sub_op;
1688 wr->ctl_pri = 0;
1689 wr->cp_en_class = 0;
1690 wr->cookie = io_req->fw_handle;
5036f0a0
NKI
1691 wr->iqid = cpu_to_be16(csio_q_physiqid(
1692 io_req->lnode->hwp, io_req->iq_idx));
a3667aae
NKI
1693 wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
1694 wr->tmo_val = (uint8_t) io_req->tmo;
1695 port_id = htonl(sid);
1696 memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
1697 port_id = htonl(did);
1698 memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
1699
1700 /* Prepare RSP SGL */
1701 wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
1702 wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
1703 return 0;
1704}
1705
1706/*
1707 * csio_ln_mgmt_submit_wr - Post elsct work request.
1708 * @mgmtm - mgmtm
1709 * @io_req - io request.
1710 * @sub_op - ELS or CT request type
1711 * @pld - Dma Payload buffer
1712 * @pld_len - Payload len
1713 * Prepares ELSCT Work request and sents it to FW.
1714 * Returns: 0 - on success
1715 */
1716static int
1717csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
1718 uint8_t sub_op, struct csio_dma_buf *pld,
1719 uint32_t pld_len)
1720{
1721 struct csio_wr_pair wrp;
1722 struct csio_lnode *ln = io_req->lnode;
1723 struct csio_rnode *rn = io_req->rnode;
1724 struct csio_hw *hw = mgmtm->hw;
1725 uint8_t fw_wr[64];
1726 struct ulptx_sgl dsgl;
1727 uint32_t wr_size = 0;
1728 uint8_t im_len = 0;
1729 uint32_t wr_off = 0;
1730
1731 int ret = 0;
1732
1733 /* Calculate WR Size for this ELS REQ */
1734 wr_size = sizeof(struct fw_fcoe_els_ct_wr);
1735
1736 /* Send as immediate data if pld < 256 */
1737 if (pld_len < 256) {
1738 wr_size += ALIGN(pld_len, 8);
1739 im_len = (uint8_t)pld_len;
1740 } else
1741 wr_size += sizeof(struct ulptx_sgl);
1742
1743 /* Roundup WR size in units of 16 bytes */
1744 wr_size = ALIGN(wr_size, 16);
1745
1746 /* Get WR to send ELS REQ */
1747 ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
1748 if (ret != 0) {
1749 csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
1750 io_req, ret);
1751 return ret;
1752 }
1753
1754 /* Prepare Generic WR used by all ELS/CT cmd */
1755 csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
1756 ln->nport_id, rn->nport_id,
1757 csio_rn_flowid(rn),
1758 &fw_wr[0]);
1759
1760 /* Copy ELS/CT WR CMD */
1761 csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
1762 sizeof(struct fw_fcoe_els_ct_wr));
1763 wr_off += sizeof(struct fw_fcoe_els_ct_wr);
1764
1765 /* Copy payload to Immediate section of WR */
1766 if (im_len)
1767 csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
1768 else {
1769 /* Program DSGL to dma payload */
d7990b0c 1770 dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
bdc590b9 1771 ULPTX_MORE_F | ULPTX_NSGE_V(1));
a3667aae
NKI
1772 dsgl.len0 = cpu_to_be32(pld_len);
1773 dsgl.addr0 = cpu_to_be64(pld->paddr);
1774 csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
1775 sizeof(struct ulptx_sgl));
1776 }
1777
1778 /* Issue work request to xmit ELS/CT req to FW */
1779 csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
1780 return ret;
1781}
1782
1783/*
1784 * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
1785 * @io_req - IO Request
1786 * @io_cbfn - Completion handler.
1787 * @req_type - ELS or CT request type
1788 * @pld - Dma Payload buffer
1789 * @pld_len - Payload len
1790 *
1791 *
1792 * This API used submit managment ELS/CT request.
1793 * This called with hw lock held
1794 * Returns: 0 - on success
1795 * -ENOMEM - on error.
1796 */
1797static int
1798csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
1799 void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
1800 enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
1801 uint32_t pld_len)
1802{
1803 struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
1804 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1805 int rv;
1806
42c335f7
KC
1807 BUG_ON(pld_len > pld->len);
1808
a3667aae
NKI
1809 io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
1810 io_req->fw_handle = (uintptr_t) (io_req);
1811 io_req->eq_idx = mgmtm->eq_idx;
1812 io_req->iq_idx = mgmtm->iq_idx;
1813
1814 rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
1815 if (rv == 0) {
1816 list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
1817 mgmtm->stats.n_active++;
1818 }
1819 return rv;
1820}
1821
1822/*
1823 * csio_ln_fdmi_init - FDMI Init entry point.
1824 * @ln: lnode
1825 */
1826static int
1827csio_ln_fdmi_init(struct csio_lnode *ln)
1828{
1829 struct csio_hw *hw = csio_lnode_to_hw(ln);
1830 struct csio_dma_buf *dma_buf;
1831
1832 /* Allocate MGMT request required for FDMI */
1833 ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
1834 if (!ln->mgmt_req) {
1835 csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
1836 CSIO_INC_STATS(hw, n_err_nomem);
1837 return -ENOMEM;
1838 }
1839
1840 /* Allocate Dma buffers for FDMI response Payload */
1841 dma_buf = &ln->mgmt_req->dma_buf;
1842 dma_buf->len = 2048;
1843 dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
1844 &dma_buf->paddr);
1845 if (!dma_buf->vaddr) {
1846 csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
1847 kfree(ln->mgmt_req);
1848 ln->mgmt_req = NULL;
1849 return -ENOMEM;
1850 }
1851
1852 ln->flags |= CSIO_LNF_FDMI_ENABLE;
1853 return 0;
1854}
1855
1856/*
1857 * csio_ln_fdmi_exit - FDMI exit entry point.
1858 * @ln: lnode
1859 */
1860static int
1861csio_ln_fdmi_exit(struct csio_lnode *ln)
1862{
1863 struct csio_dma_buf *dma_buf;
1864 struct csio_hw *hw = csio_lnode_to_hw(ln);
1865
1866 if (!ln->mgmt_req)
1867 return 0;
1868
1869 dma_buf = &ln->mgmt_req->dma_buf;
1870 if (dma_buf->vaddr)
1871 pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
1872 dma_buf->paddr);
1873
1874 kfree(ln->mgmt_req);
1875 return 0;
1876}
1877
1878int
1879csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
1880 unsigned long time, unsigned long max_scan_ticks,
1881 unsigned long delta_scan_ticks)
1882{
1883 int rv = 0;
1884
1885 if (time >= max_scan_ticks)
1886 return 1;
1887
1888 if (!ln->tgt_scan_tick)
1889 ln->tgt_scan_tick = ticks;
1890
1891 if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
1892 if (!ln->last_scan_ntgts)
1893 ln->last_scan_ntgts = ln->n_scsi_tgts;
1894 else {
1895 if (ln->last_scan_ntgts == ln->n_scsi_tgts)
1896 return 1;
1897
1898 ln->last_scan_ntgts = ln->n_scsi_tgts;
1899 }
1900 ln->tgt_scan_tick = ticks;
1901 }
1902 return rv;
1903}
1904
1905/*
1906 * csio_notify_lnodes:
1907 * @hw: HW module
1908 * @note: Notification
1909 *
1910 * Called from the HW SM to fan out notifications to the
1911 * Lnode SM. Since the HW SM is entered with lock held,
1912 * there is no need to hold locks here.
1913 *
1914 */
1915void
1916csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
1917{
1918 struct list_head *tmp;
1919 struct csio_lnode *ln;
1920
1921 csio_dbg(hw, "Notifying all nodes of event %d\n", note);
1922
1923 /* Traverse children lnodes list and send evt */
1924 list_for_each(tmp, &hw->sln_head) {
1925 ln = (struct csio_lnode *) tmp;
1926
1927 switch (note) {
1928 case CSIO_LN_NOTIFY_HWREADY:
1929 csio_lnode_start(ln);
1930 break;
1931
1932 case CSIO_LN_NOTIFY_HWRESET:
1933 case CSIO_LN_NOTIFY_HWREMOVE:
1934 csio_lnode_close(ln);
1935 break;
1936
1937 case CSIO_LN_NOTIFY_HWSTOP:
1938 csio_lnode_stop(ln);
1939 break;
1940
1941 default:
1942 break;
1943
1944 }
1945 }
1946}
1947
1948/*
1949 * csio_disable_lnodes:
1950 * @hw: HW module
1951 * @portid:port id
1952 * @disable: disable/enable flag.
1953 * If disable=1, disables all lnode hosted on given physical port.
1954 * otherwise enables all the lnodes on given phsysical port.
1955 * This routine need to called with hw lock held.
1956 */
1957void
1958csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
1959{
1960 struct list_head *tmp;
1961 struct csio_lnode *ln;
1962
1963 csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
1964
1965 /* Traverse sibling lnodes list and send evt */
1966 list_for_each(tmp, &hw->sln_head) {
1967 ln = (struct csio_lnode *) tmp;
1968 if (ln->portid != portid)
1969 continue;
1970
1971 if (disable)
1972 csio_lnode_stop(ln);
1973 else
1974 csio_lnode_start(ln);
1975 }
1976}
1977
1978/*
1979 * csio_ln_init - Initialize an lnode.
1980 * @ln: lnode
1981 *
1982 */
1983static int
1984csio_ln_init(struct csio_lnode *ln)
1985{
1986 int rv = -EINVAL;
1987 struct csio_lnode *rln, *pln;
1988 struct csio_hw *hw = csio_lnode_to_hw(ln);
1989
1990 csio_init_state(&ln->sm, csio_lns_uninit);
1991 ln->vnp_flowid = CSIO_INVALID_IDX;
1992 ln->fcf_flowid = CSIO_INVALID_IDX;
1993
1994 if (csio_is_root_ln(ln)) {
1995
1996 /* This is the lnode used during initialization */
1997
1998 ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
1999 if (!ln->fcfinfo) {
2000 csio_ln_err(ln, "Failed to alloc FCF record\n");
2001 CSIO_INC_STATS(hw, n_err_nomem);
2002 goto err;
2003 }
2004
2005 INIT_LIST_HEAD(&ln->fcf_lsthead);
2006 kref_init(&ln->fcfinfo->kref);
2007
2008 if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
2009 goto err;
2010
2011 } else { /* Either a non-root physical or a virtual lnode */
2012
2013 /*
2014 * THe rest is common for non-root physical and NPIV lnodes.
2015 * Just get references to all other modules
2016 */
2017 rln = csio_root_lnode(ln);
2018
2019 if (csio_is_npiv_ln(ln)) {
2020 /* NPIV */
2021 pln = csio_parent_lnode(ln);
2022 kref_get(&pln->fcfinfo->kref);
2023 ln->fcfinfo = pln->fcfinfo;
2024 } else {
2025 /* Another non-root physical lnode (FCF) */
2026 ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
2027 GFP_KERNEL);
2028 if (!ln->fcfinfo) {
2029 csio_ln_err(ln, "Failed to alloc FCF info\n");
2030 CSIO_INC_STATS(hw, n_err_nomem);
2031 goto err;
2032 }
2033
2034 kref_init(&ln->fcfinfo->kref);
2035
2036 if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
2037 goto err;
2038 }
2039
2040 } /* if (!csio_is_root_ln(ln)) */
2041
2042 return 0;
2043err:
2044 return rv;
2045}
2046
2047static void
2048csio_ln_exit(struct csio_lnode *ln)
2049{
2050 struct csio_lnode *pln;
2051
2052 csio_cleanup_rns(ln);
2053 if (csio_is_npiv_ln(ln)) {
2054 pln = csio_parent_lnode(ln);
2055 kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
2056 } else {
2057 kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
2058 if (csio_fdmi_enable)
2059 csio_ln_fdmi_exit(ln);
2060 }
2061 ln->fcfinfo = NULL;
2062}
2063
2064/**
2065 * csio_lnode_init - Initialize the members of an lnode.
2066 * @ln: lnode
2067 *
2068 */
2069int
2070csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
2071 struct csio_lnode *pln)
2072{
2073 int rv = -EINVAL;
2074
2075 /* Link this lnode to hw */
2076 csio_lnode_to_hw(ln) = hw;
2077
2078 /* Link child to parent if child lnode */
2079 if (pln)
2080 ln->pln = pln;
2081 else
2082 ln->pln = NULL;
2083
2084 /* Initialize scsi_tgt and timers to zero */
2085 ln->n_scsi_tgts = 0;
2086 ln->last_scan_ntgts = 0;
2087 ln->tgt_scan_tick = 0;
2088
2089 /* Initialize rnode list */
2090 INIT_LIST_HEAD(&ln->rnhead);
2091 INIT_LIST_HEAD(&ln->cln_head);
2092
2093 /* Initialize log level for debug */
2094 ln->params.log_level = hw->params.log_level;
2095
2096 if (csio_ln_init(ln))
2097 goto err;
2098
2099 /* Add lnode to list of sibling or children lnodes */
2100 spin_lock_irq(&hw->lock);
2101 list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
2102 if (pln)
2103 pln->num_vports++;
2104 spin_unlock_irq(&hw->lock);
2105
2106 hw->num_lns++;
2107
2108 return 0;
2109err:
2110 csio_lnode_to_hw(ln) = NULL;
2111 return rv;
2112}
2113
2114/**
2115 * csio_lnode_exit - De-instantiate an lnode.
2116 * @ln: lnode
2117 *
2118 */
2119void
2120csio_lnode_exit(struct csio_lnode *ln)
2121{
2122 struct csio_hw *hw = csio_lnode_to_hw(ln);
2123
2124 csio_ln_exit(ln);
2125
2126 /* Remove this lnode from hw->sln_head */
2127 spin_lock_irq(&hw->lock);
2128
2129 list_del_init(&ln->sm.sm_list);
2130
2131 /* If it is children lnode, decrement the
2132 * counter in its parent lnode
2133 */
2134 if (ln->pln)
2135 ln->pln->num_vports--;
2136
2137 /* Update root lnode pointer */
2138 if (list_empty(&hw->sln_head))
2139 hw->rln = NULL;
2140 else
2141 hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
2142
2143 spin_unlock_irq(&hw->lock);
2144
2145 csio_lnode_to_hw(ln) = NULL;
2146 hw->num_lns--;
2147}