]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/bfa/bfa_svc.c
[SCSI] bfa: IOC auto recovery fix.
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / bfa / bfa_svc.c
CommitLineData
a36c61f9
KG
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
f16a1750 18#include "bfad_drv.h"
a36c61f9
KG
19#include "bfa_plog.h"
20#include "bfa_cs.h"
21#include "bfa_modules.h"
a36c61f9
KG
22
23BFA_TRC_FILE(HAL, FCXP);
24BFA_MODULE(fcxp);
25BFA_MODULE(sgpg);
26BFA_MODULE(lps);
27BFA_MODULE(fcport);
28BFA_MODULE(rport);
29BFA_MODULE(uf);
30
5fbe25c7 31/*
a36c61f9
KG
32 * LPS related definitions
33 */
34#define BFA_LPS_MIN_LPORTS (1)
35#define BFA_LPS_MAX_LPORTS (256)
36
37/*
38 * Maximum Vports supported per physical port or vf.
39 */
40#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
41#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
42
a36c61f9 43
5fbe25c7 44/*
a36c61f9
KG
45 * FC PORT related definitions
46 */
47/*
48 * The port is considered disabled if corresponding physical port or IOC are
49 * disabled explicitly
50 */
51#define BFA_PORT_IS_DISABLED(bfa) \
52 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
53 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
54
5fbe25c7 55/*
a36c61f9
KG
56 * BFA port state machine events
57 */
58enum bfa_fcport_sm_event {
59 BFA_FCPORT_SM_START = 1, /* start port state machine */
60 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
61 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
62 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
63 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
64 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
65 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
66 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
67 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
68};
69
5fbe25c7 70/*
a36c61f9
KG
71 * BFA port link notification state machine events
72 */
73
74enum bfa_fcport_ln_sm_event {
75 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
76 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
77 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
78};
79
5fbe25c7 80/*
a36c61f9
KG
81 * RPORT related definitions
82 */
83#define bfa_rport_offline_cb(__rp) do { \
84 if ((__rp)->bfa->fcs) \
85 bfa_cb_rport_offline((__rp)->rport_drv); \
86 else { \
87 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
88 __bfa_cb_rport_offline, (__rp)); \
89 } \
90} while (0)
91
92#define bfa_rport_online_cb(__rp) do { \
93 if ((__rp)->bfa->fcs) \
94 bfa_cb_rport_online((__rp)->rport_drv); \
95 else { \
96 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
97 __bfa_cb_rport_online, (__rp)); \
98 } \
99} while (0)
100
5fbe25c7 101/*
a36c61f9
KG
102 * forward declarations FCXP related functions
103 */
104static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
105static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
106 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
107static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
108 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
109static void bfa_fcxp_qresume(void *cbarg);
110static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
111 struct bfi_fcxp_send_req_s *send_req);
112
5fbe25c7 113/*
a36c61f9
KG
114 * forward declarations for LPS functions
115 */
116static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
117 u32 *dm_len);
118static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
119 struct bfa_iocfc_cfg_s *cfg,
120 struct bfa_meminfo_s *meminfo,
121 struct bfa_pcidev_s *pcidev);
122static void bfa_lps_detach(struct bfa_s *bfa);
123static void bfa_lps_start(struct bfa_s *bfa);
124static void bfa_lps_stop(struct bfa_s *bfa);
125static void bfa_lps_iocdisable(struct bfa_s *bfa);
126static void bfa_lps_login_rsp(struct bfa_s *bfa,
127 struct bfi_lps_login_rsp_s *rsp);
128static void bfa_lps_logout_rsp(struct bfa_s *bfa,
129 struct bfi_lps_logout_rsp_s *rsp);
130static void bfa_lps_reqq_resume(void *lps_arg);
131static void bfa_lps_free(struct bfa_lps_s *lps);
132static void bfa_lps_send_login(struct bfa_lps_s *lps);
133static void bfa_lps_send_logout(struct bfa_lps_s *lps);
134static void bfa_lps_login_comp(struct bfa_lps_s *lps);
135static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
136static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
137
5fbe25c7 138/*
a36c61f9
KG
139 * forward declaration for LPS state machine
140 */
141static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
142static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
143static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
144 event);
145static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
146static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
147static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
148 event);
149
5fbe25c7 150/*
a36c61f9
KG
151 * forward declaration for FC Port functions
152 */
153static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
154static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
155static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
156static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
157static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
158static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
159static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
160 enum bfa_port_linkstate event, bfa_boolean_t trunk);
161static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
162 enum bfa_port_linkstate event);
163static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
164static void bfa_fcport_stats_get_timeout(void *cbarg);
165static void bfa_fcport_stats_clr_timeout(void *cbarg);
166static void bfa_trunk_iocdisable(struct bfa_s *bfa);
167
5fbe25c7 168/*
a36c61f9
KG
169 * forward declaration for FC PORT state machine
170 */
171static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
172 enum bfa_fcport_sm_event event);
173static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
174 enum bfa_fcport_sm_event event);
175static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
176 enum bfa_fcport_sm_event event);
177static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
178 enum bfa_fcport_sm_event event);
179static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
180 enum bfa_fcport_sm_event event);
181static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
182 enum bfa_fcport_sm_event event);
183static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
184 enum bfa_fcport_sm_event event);
185static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
186 enum bfa_fcport_sm_event event);
187static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
188 enum bfa_fcport_sm_event event);
189static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
190 enum bfa_fcport_sm_event event);
191static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
192 enum bfa_fcport_sm_event event);
193static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
194 enum bfa_fcport_sm_event event);
195
196static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
197 enum bfa_fcport_ln_sm_event event);
198static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
199 enum bfa_fcport_ln_sm_event event);
200static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
201 enum bfa_fcport_ln_sm_event event);
202static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
203 enum bfa_fcport_ln_sm_event event);
204static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
205 enum bfa_fcport_ln_sm_event event);
206static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
207 enum bfa_fcport_ln_sm_event event);
208static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
209 enum bfa_fcport_ln_sm_event event);
210
211static struct bfa_sm_table_s hal_port_sm_table[] = {
212 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
213 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
214 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
215 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
216 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
217 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
218 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
219 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
220 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
221 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
222 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
223 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
224};
225
226
5fbe25c7 227/*
a36c61f9
KG
228 * forward declaration for RPORT related functions
229 */
230static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
231static void bfa_rport_free(struct bfa_rport_s *rport);
232static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
233static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
234static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
235static void __bfa_cb_rport_online(void *cbarg,
236 bfa_boolean_t complete);
237static void __bfa_cb_rport_offline(void *cbarg,
238 bfa_boolean_t complete);
239
5fbe25c7 240/*
a36c61f9
KG
241 * forward declaration for RPORT state machine
242 */
243static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
244 enum bfa_rport_event event);
245static void bfa_rport_sm_created(struct bfa_rport_s *rp,
246 enum bfa_rport_event event);
247static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
248 enum bfa_rport_event event);
249static void bfa_rport_sm_online(struct bfa_rport_s *rp,
250 enum bfa_rport_event event);
251static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
252 enum bfa_rport_event event);
253static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
254 enum bfa_rport_event event);
255static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
256 enum bfa_rport_event event);
257static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
258 enum bfa_rport_event event);
259static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
260 enum bfa_rport_event event);
261static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
262 enum bfa_rport_event event);
263static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
264 enum bfa_rport_event event);
265static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
266 enum bfa_rport_event event);
267static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
268 enum bfa_rport_event event);
269
5fbe25c7 270/*
a36c61f9
KG
271 * PLOG related definitions
272 */
273static int
274plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
275{
276 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
277 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
278 return 1;
279
280 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
281 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
282 return 1;
283
284 return 0;
285}
286
f16a1750
MZ
287static u64
288bfa_get_log_time(void)
289{
290 u64 system_time = 0;
291 struct timeval tv;
292 do_gettimeofday(&tv);
293
294 /* We are interested in seconds only. */
295 system_time = tv.tv_sec;
296 return system_time;
297}
298
a36c61f9
KG
299static void
300bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
301{
302 u16 tail;
303 struct bfa_plog_rec_s *pl_recp;
304
305 if (plog->plog_enabled == 0)
306 return;
307
308 if (plkd_validate_logrec(pl_rec)) {
309 bfa_assert(0);
310 return;
311 }
312
313 tail = plog->tail;
314
315 pl_recp = &(plog->plog_recs[tail]);
316
6a18b167 317 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
a36c61f9 318
f16a1750 319 pl_recp->tv = bfa_get_log_time();
a36c61f9
KG
320 BFA_PL_LOG_REC_INCR(plog->tail);
321
322 if (plog->head == plog->tail)
323 BFA_PL_LOG_REC_INCR(plog->head);
324}
325
326void
327bfa_plog_init(struct bfa_plog_s *plog)
328{
6a18b167 329 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
a36c61f9 330
6a18b167 331 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
a36c61f9
KG
332 plog->head = plog->tail = 0;
333 plog->plog_enabled = 1;
334}
335
336void
337bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
338 enum bfa_plog_eid event,
339 u16 misc, char *log_str)
340{
341 struct bfa_plog_rec_s lp;
342
343 if (plog->plog_enabled) {
6a18b167 344 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
a36c61f9
KG
345 lp.mid = mid;
346 lp.eid = event;
347 lp.log_type = BFA_PL_LOG_TYPE_STRING;
348 lp.misc = misc;
349 strncpy(lp.log_entry.string_log, log_str,
350 BFA_PL_STRING_LOG_SZ - 1);
351 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
352 bfa_plog_add(plog, &lp);
353 }
354}
355
356void
357bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
358 enum bfa_plog_eid event,
359 u16 misc, u32 *intarr, u32 num_ints)
360{
361 struct bfa_plog_rec_s lp;
362 u32 i;
363
364 if (num_ints > BFA_PL_INT_LOG_SZ)
365 num_ints = BFA_PL_INT_LOG_SZ;
366
367 if (plog->plog_enabled) {
6a18b167 368 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
a36c61f9
KG
369 lp.mid = mid;
370 lp.eid = event;
371 lp.log_type = BFA_PL_LOG_TYPE_INT;
372 lp.misc = misc;
373
374 for (i = 0; i < num_ints; i++)
6a18b167 375 lp.log_entry.int_log[i] = intarr[i];
a36c61f9
KG
376
377 lp.log_num_ints = (u8) num_ints;
378
379 bfa_plog_add(plog, &lp);
380 }
381}
382
383void
384bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
385 enum bfa_plog_eid event,
386 u16 misc, struct fchs_s *fchdr)
387{
388 struct bfa_plog_rec_s lp;
389 u32 *tmp_int = (u32 *) fchdr;
390 u32 ints[BFA_PL_INT_LOG_SZ];
391
392 if (plog->plog_enabled) {
6a18b167 393 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
a36c61f9
KG
394
395 ints[0] = tmp_int[0];
396 ints[1] = tmp_int[1];
397 ints[2] = tmp_int[4];
398
399 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
400 }
401}
402
403void
404bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
405 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
406 u32 pld_w0)
407{
408 struct bfa_plog_rec_s lp;
409 u32 *tmp_int = (u32 *) fchdr;
410 u32 ints[BFA_PL_INT_LOG_SZ];
411
412 if (plog->plog_enabled) {
6a18b167 413 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
a36c61f9
KG
414
415 ints[0] = tmp_int[0];
416 ints[1] = tmp_int[1];
417 ints[2] = tmp_int[4];
418 ints[3] = pld_w0;
419
420 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
421 }
422}
423
a36c61f9 424
5fbe25c7 425/*
a36c61f9
KG
426 * fcxp_pvt BFA FCXP private functions
427 */
428
429static void
430claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
431{
432 u8 *dm_kva = NULL;
433 u64 dm_pa;
434 u32 buf_pool_sz;
435
436 dm_kva = bfa_meminfo_dma_virt(mi);
437 dm_pa = bfa_meminfo_dma_phys(mi);
438
439 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
440
441 /*
442 * Initialize the fcxp req payload list
443 */
444 mod->req_pld_list_kva = dm_kva;
445 mod->req_pld_list_pa = dm_pa;
446 dm_kva += buf_pool_sz;
447 dm_pa += buf_pool_sz;
6a18b167 448 memset(mod->req_pld_list_kva, 0, buf_pool_sz);
a36c61f9
KG
449
450 /*
451 * Initialize the fcxp rsp payload list
452 */
453 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
454 mod->rsp_pld_list_kva = dm_kva;
455 mod->rsp_pld_list_pa = dm_pa;
456 dm_kva += buf_pool_sz;
457 dm_pa += buf_pool_sz;
6a18b167 458 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
a36c61f9
KG
459
460 bfa_meminfo_dma_virt(mi) = dm_kva;
461 bfa_meminfo_dma_phys(mi) = dm_pa;
462}
463
464static void
465claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
466{
467 u16 i;
468 struct bfa_fcxp_s *fcxp;
469
470 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
6a18b167 471 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
a36c61f9
KG
472
473 INIT_LIST_HEAD(&mod->fcxp_free_q);
474 INIT_LIST_HEAD(&mod->fcxp_active_q);
475
476 mod->fcxp_list = fcxp;
477
478 for (i = 0; i < mod->num_fcxps; i++) {
479 fcxp->fcxp_mod = mod;
480 fcxp->fcxp_tag = i;
481
482 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
483 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
484 fcxp->reqq_waiting = BFA_FALSE;
485
486 fcxp = fcxp + 1;
487 }
488
489 bfa_meminfo_kva(mi) = (void *)fcxp;
490}
491
492static void
493bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
494 u32 *dm_len)
495{
496 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
497
498 if (num_fcxp_reqs == 0)
499 return;
500
501 /*
502 * Account for req/rsp payload
503 */
504 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
505 if (cfg->drvcfg.min_cfg)
506 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
507 else
508 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
509
510 /*
511 * Account for fcxp structs
512 */
513 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
514}
515
516static void
517bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
518 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
519{
520 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
521
6a18b167 522 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
a36c61f9
KG
523 mod->bfa = bfa;
524 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
525
5fbe25c7 526 /*
a36c61f9
KG
527 * Initialize FCXP request and response payload sizes.
528 */
529 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
530 if (!cfg->drvcfg.min_cfg)
531 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
532
533 INIT_LIST_HEAD(&mod->wait_q);
534
535 claim_fcxp_req_rsp_mem(mod, meminfo);
536 claim_fcxps_mem(mod, meminfo);
537}
538
539static void
540bfa_fcxp_detach(struct bfa_s *bfa)
541{
542}
543
544static void
545bfa_fcxp_start(struct bfa_s *bfa)
546{
547}
548
549static void
550bfa_fcxp_stop(struct bfa_s *bfa)
551{
552}
553
554static void
555bfa_fcxp_iocdisable(struct bfa_s *bfa)
556{
557 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
558 struct bfa_fcxp_s *fcxp;
559 struct list_head *qe, *qen;
560
561 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
562 fcxp = (struct bfa_fcxp_s *) qe;
563 if (fcxp->caller == NULL) {
564 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
565 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
566 bfa_fcxp_free(fcxp);
567 } else {
568 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
569 bfa_cb_queue(bfa, &fcxp->hcb_qe,
570 __bfa_fcxp_send_cbfn, fcxp);
571 }
572 }
573}
574
575static struct bfa_fcxp_s *
576bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
577{
578 struct bfa_fcxp_s *fcxp;
579
580 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
581
582 if (fcxp)
583 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
584
585 return fcxp;
586}
587
588static void
589bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
590 struct bfa_s *bfa,
591 u8 *use_ibuf,
592 u32 *nr_sgles,
593 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
594 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
595 struct list_head *r_sgpg_q,
596 int n_sgles,
597 bfa_fcxp_get_sgaddr_t sga_cbfn,
598 bfa_fcxp_get_sglen_t sglen_cbfn)
599{
600
601 bfa_assert(bfa != NULL);
602
603 bfa_trc(bfa, fcxp->fcxp_tag);
604
605 if (n_sgles == 0) {
606 *use_ibuf = 1;
607 } else {
608 bfa_assert(*sga_cbfn != NULL);
609 bfa_assert(*sglen_cbfn != NULL);
610
611 *use_ibuf = 0;
612 *r_sga_cbfn = sga_cbfn;
613 *r_sglen_cbfn = sglen_cbfn;
614
615 *nr_sgles = n_sgles;
616
617 /*
618 * alloc required sgpgs
619 */
620 if (n_sgles > BFI_SGE_INLINE)
621 bfa_assert(0);
622 }
623
624}
625
626static void
627bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
628 void *caller, struct bfa_s *bfa, int nreq_sgles,
629 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
630 bfa_fcxp_get_sglen_t req_sglen_cbfn,
631 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
632 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
633{
634
635 bfa_assert(bfa != NULL);
636
637 bfa_trc(bfa, fcxp->fcxp_tag);
638
639 fcxp->caller = caller;
640
641 bfa_fcxp_init_reqrsp(fcxp, bfa,
642 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
643 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
644 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
645
646 bfa_fcxp_init_reqrsp(fcxp, bfa,
647 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
648 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
649 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
650
651}
652
653static void
654bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
655{
656 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
657 struct bfa_fcxp_wqe_s *wqe;
658
659 bfa_q_deq(&mod->wait_q, &wqe);
660 if (wqe) {
661 bfa_trc(mod->bfa, fcxp->fcxp_tag);
662
663 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
664 wqe->nrsp_sgles, wqe->req_sga_cbfn,
665 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
666 wqe->rsp_sglen_cbfn);
667
668 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
669 return;
670 }
671
672 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
673 list_del(&fcxp->qe);
674 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
675}
676
677static void
678bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
679 bfa_status_t req_status, u32 rsp_len,
680 u32 resid_len, struct fchs_s *rsp_fchs)
681{
682 /* discarded fcxp completion */
683}
684
685static void
686__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
687{
688 struct bfa_fcxp_s *fcxp = cbarg;
689
690 if (complete) {
691 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
692 fcxp->rsp_status, fcxp->rsp_len,
693 fcxp->residue_len, &fcxp->rsp_fchs);
694 } else {
695 bfa_fcxp_free(fcxp);
696 }
697}
698
699static void
700hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
701{
702 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
703 struct bfa_fcxp_s *fcxp;
ba816ea8 704 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
a36c61f9
KG
705
706 bfa_trc(bfa, fcxp_tag);
707
ba816ea8 708 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
a36c61f9 709
5fbe25c7 710 /*
a36c61f9
KG
711 * @todo f/w should not set residue to non-0 when everything
712 * is received.
713 */
714 if (fcxp_rsp->req_status == BFA_STATUS_OK)
715 fcxp_rsp->residue_len = 0;
716 else
ba816ea8 717 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
a36c61f9
KG
718
719 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
720
721 bfa_assert(fcxp->send_cbfn != NULL);
722
723 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
724
725 if (fcxp->send_cbfn != NULL) {
726 bfa_trc(mod->bfa, (NULL == fcxp->caller));
727 if (fcxp->caller == NULL) {
728 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
729 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
730 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
731 /*
732 * fcxp automatically freed on return from the callback
733 */
734 bfa_fcxp_free(fcxp);
735 } else {
736 fcxp->rsp_status = fcxp_rsp->req_status;
737 fcxp->rsp_len = fcxp_rsp->rsp_len;
738 fcxp->residue_len = fcxp_rsp->residue_len;
739 fcxp->rsp_fchs = fcxp_rsp->fchs;
740
741 bfa_cb_queue(bfa, &fcxp->hcb_qe,
742 __bfa_fcxp_send_cbfn, fcxp);
743 }
744 } else {
745 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
746 }
747}
748
749static void
750hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
751{
752 union bfi_addr_u sga_zero = { {0} };
753
754 sge->sg_len = reqlen;
755 sge->flags = BFI_SGE_DATA_LAST;
756 bfa_dma_addr_set(sge[0].sga, req_pa);
757 bfa_sge_to_be(sge);
758 sge++;
759
760 sge->sga = sga_zero;
761 sge->sg_len = reqlen;
762 sge->flags = BFI_SGE_PGDLEN;
763 bfa_sge_to_be(sge);
764}
765
766static void
767hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
768 struct fchs_s *fchs)
769{
770 /*
771 * TODO: TX ox_id
772 */
773 if (reqlen > 0) {
774 if (fcxp->use_ireqbuf) {
775 u32 pld_w0 =
776 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
777
778 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
779 BFA_PL_EID_TX,
780 reqlen + sizeof(struct fchs_s), fchs,
781 pld_w0);
782 } else {
783 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
784 BFA_PL_EID_TX,
785 reqlen + sizeof(struct fchs_s),
786 fchs);
787 }
788 } else {
789 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
790 reqlen + sizeof(struct fchs_s), fchs);
791 }
792}
793
794static void
795hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
796 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
797{
798 if (fcxp_rsp->rsp_len > 0) {
799 if (fcxp->use_irspbuf) {
800 u32 pld_w0 =
801 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
802
803 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
804 BFA_PL_EID_RX,
805 (u16) fcxp_rsp->rsp_len,
806 &fcxp_rsp->fchs, pld_w0);
807 } else {
808 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
809 BFA_PL_EID_RX,
810 (u16) fcxp_rsp->rsp_len,
811 &fcxp_rsp->fchs);
812 }
813 } else {
814 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
815 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
816 }
817}
818
5fbe25c7 819/*
a36c61f9
KG
820 * Handler to resume sending fcxp when space in available in cpe queue.
821 */
822static void
823bfa_fcxp_qresume(void *cbarg)
824{
825 struct bfa_fcxp_s *fcxp = cbarg;
826 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
827 struct bfi_fcxp_send_req_s *send_req;
828
829 fcxp->reqq_waiting = BFA_FALSE;
830 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
831 bfa_fcxp_queue(fcxp, send_req);
832}
833
5fbe25c7 834/*
a36c61f9
KG
835 * Queue fcxp send request to foimrware.
836 */
837static void
838bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
839{
840 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
841 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
842 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
843 struct bfa_rport_s *rport = reqi->bfa_rport;
844
845 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
846 bfa_lpuid(bfa));
847
ba816ea8 848 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
a36c61f9
KG
849 if (rport) {
850 send_req->rport_fw_hndl = rport->fw_handle;
ba816ea8 851 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
a36c61f9 852 if (send_req->max_frmsz == 0)
ba816ea8 853 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
a36c61f9
KG
854 } else {
855 send_req->rport_fw_hndl = 0;
ba816ea8 856 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
a36c61f9
KG
857 }
858
ba816ea8 859 send_req->vf_id = cpu_to_be16(reqi->vf_id);
a36c61f9
KG
860 send_req->lp_tag = reqi->lp_tag;
861 send_req->class = reqi->class;
862 send_req->rsp_timeout = rspi->rsp_timeout;
863 send_req->cts = reqi->cts;
864 send_req->fchs = reqi->fchs;
865
ba816ea8
JH
866 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
867 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
a36c61f9
KG
868
869 /*
870 * setup req sgles
871 */
872 if (fcxp->use_ireqbuf == 1) {
873 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
874 BFA_FCXP_REQ_PLD_PA(fcxp));
875 } else {
876 if (fcxp->nreq_sgles > 0) {
877 bfa_assert(fcxp->nreq_sgles == 1);
878 hal_fcxp_set_local_sges(send_req->req_sge,
879 reqi->req_tot_len,
880 fcxp->req_sga_cbfn(fcxp->caller,
881 0));
882 } else {
883 bfa_assert(reqi->req_tot_len == 0);
884 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
885 }
886 }
887
888 /*
889 * setup rsp sgles
890 */
891 if (fcxp->use_irspbuf == 1) {
892 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
893
894 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
895 BFA_FCXP_RSP_PLD_PA(fcxp));
896
897 } else {
898 if (fcxp->nrsp_sgles > 0) {
899 bfa_assert(fcxp->nrsp_sgles == 1);
900 hal_fcxp_set_local_sges(send_req->rsp_sge,
901 rspi->rsp_maxlen,
902 fcxp->rsp_sga_cbfn(fcxp->caller,
903 0));
904 } else {
905 bfa_assert(rspi->rsp_maxlen == 0);
906 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
907 }
908 }
909
910 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
911
912 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
913
914 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
915 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
916}
917
5fbe25c7 918/*
a36c61f9
KG
919 * Allocate an FCXP instance to send a response or to send a request
920 * that has a response. Request/response buffers are allocated by caller.
921 *
922 * @param[in] bfa BFA bfa instance
923 * @param[in] nreq_sgles Number of SG elements required for request
924 * buffer. 0, if fcxp internal buffers are used.
925 * Use bfa_fcxp_get_reqbuf() to get the
926 * internal req buffer.
927 * @param[in] req_sgles SG elements describing request buffer. Will be
928 * copied in by BFA and hence can be freed on
929 * return from this function.
930 * @param[in] get_req_sga function ptr to be called to get a request SG
931 * Address (given the sge index).
932 * @param[in] get_req_sglen function ptr to be called to get a request SG
933 * len (given the sge index).
934 * @param[in] get_rsp_sga function ptr to be called to get a response SG
935 * Address (given the sge index).
936 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
937 * len (given the sge index).
938 *
939 * @return FCXP instance. NULL on failure.
940 */
941struct bfa_fcxp_s *
942bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
943 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
944 bfa_fcxp_get_sglen_t req_sglen_cbfn,
945 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
946 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
947{
948 struct bfa_fcxp_s *fcxp = NULL;
949
950 bfa_assert(bfa != NULL);
951
952 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
953 if (fcxp == NULL)
954 return NULL;
955
956 bfa_trc(bfa, fcxp->fcxp_tag);
957
958 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
959 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
960
961 return fcxp;
962}
963
5fbe25c7 964/*
a36c61f9
KG
965 * Get the internal request buffer pointer
966 *
967 * @param[in] fcxp BFA fcxp pointer
968 *
969 * @return pointer to the internal request buffer
970 */
971void *
972bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
973{
974 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
975 void *reqbuf;
976
977 bfa_assert(fcxp->use_ireqbuf == 1);
978 reqbuf = ((u8 *)mod->req_pld_list_kva) +
979 fcxp->fcxp_tag * mod->req_pld_sz;
980 return reqbuf;
981}
982
983u32
984bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
985{
986 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
987
988 return mod->req_pld_sz;
989}
990
5fbe25c7 991/*
a36c61f9
KG
992 * Get the internal response buffer pointer
993 *
994 * @param[in] fcxp BFA fcxp pointer
995 *
996 * @return pointer to the internal request buffer
997 */
998void *
999bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1000{
1001 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1002 void *rspbuf;
1003
1004 bfa_assert(fcxp->use_irspbuf == 1);
1005
1006 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1007 fcxp->fcxp_tag * mod->rsp_pld_sz;
1008 return rspbuf;
1009}
1010
5fbe25c7 1011/*
da99dcc9 1012 * Free the BFA FCXP
a36c61f9
KG
1013 *
1014 * @param[in] fcxp BFA fcxp pointer
1015 *
1016 * @return void
1017 */
1018void
1019bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1020{
1021 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1022
1023 bfa_assert(fcxp != NULL);
1024 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1025 bfa_fcxp_put(fcxp);
1026}
1027
5fbe25c7 1028/*
a36c61f9
KG
1029 * Send a FCXP request
1030 *
1031 * @param[in] fcxp BFA fcxp pointer
1032 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1033 * @param[in] vf_id virtual Fabric ID
1034 * @param[in] lp_tag lport tag
1035 * @param[in] cts use Continous sequence
1036 * @param[in] cos fc Class of Service
1037 * @param[in] reqlen request length, does not include FCHS length
1038 * @param[in] fchs fc Header Pointer. The header content will be copied
1039 * in by BFA.
1040 *
1041 * @param[in] cbfn call back function to be called on receiving
1042 * the response
1043 * @param[in] cbarg arg for cbfn
1044 * @param[in] rsp_timeout
1045 * response timeout
1046 *
1047 * @return bfa_status_t
1048 */
1049void
1050bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1051 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1052 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1053 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1054{
1055 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1056 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1057 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1058 struct bfi_fcxp_send_req_s *send_req;
1059
1060 bfa_trc(bfa, fcxp->fcxp_tag);
1061
5fbe25c7 1062 /*
a36c61f9
KG
1063 * setup request/response info
1064 */
1065 reqi->bfa_rport = rport;
1066 reqi->vf_id = vf_id;
1067 reqi->lp_tag = lp_tag;
1068 reqi->class = cos;
1069 rspi->rsp_timeout = rsp_timeout;
1070 reqi->cts = cts;
1071 reqi->fchs = *fchs;
1072 reqi->req_tot_len = reqlen;
1073 rspi->rsp_maxlen = rsp_maxlen;
1074 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1075 fcxp->send_cbarg = cbarg;
1076
5fbe25c7 1077 /*
a36c61f9
KG
1078 * If no room in CPE queue, wait for space in request queue
1079 */
1080 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1081 if (!send_req) {
1082 bfa_trc(bfa, fcxp->fcxp_tag);
1083 fcxp->reqq_waiting = BFA_TRUE;
1084 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1085 return;
1086 }
1087
1088 bfa_fcxp_queue(fcxp, send_req);
1089}
1090
5fbe25c7 1091/*
a36c61f9
KG
1092 * Abort a BFA FCXP
1093 *
1094 * @param[in] fcxp BFA fcxp pointer
1095 *
1096 * @return void
1097 */
1098bfa_status_t
1099bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1100{
1101 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1102 bfa_assert(0);
1103 return BFA_STATUS_OK;
1104}
1105
1106void
1107bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1108 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1109 void *caller, int nreq_sgles,
1110 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1111 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1112 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1113 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1114{
1115 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1116
1117 bfa_assert(list_empty(&mod->fcxp_free_q));
1118
1119 wqe->alloc_cbfn = alloc_cbfn;
1120 wqe->alloc_cbarg = alloc_cbarg;
1121 wqe->caller = caller;
1122 wqe->bfa = bfa;
1123 wqe->nreq_sgles = nreq_sgles;
1124 wqe->nrsp_sgles = nrsp_sgles;
1125 wqe->req_sga_cbfn = req_sga_cbfn;
1126 wqe->req_sglen_cbfn = req_sglen_cbfn;
1127 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1128 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1129
1130 list_add_tail(&wqe->qe, &mod->wait_q);
1131}
1132
1133void
1134bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1135{
1136 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1137
1138 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1139 list_del(&wqe->qe);
1140}
1141
1142void
1143bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1144{
5fbe25c7 1145 /*
a36c61f9
KG
1146 * If waiting for room in request queue, cancel reqq wait
1147 * and free fcxp.
1148 */
1149 if (fcxp->reqq_waiting) {
1150 fcxp->reqq_waiting = BFA_FALSE;
1151 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1152 bfa_fcxp_free(fcxp);
1153 return;
1154 }
1155
1156 fcxp->send_cbfn = bfa_fcxp_null_comp;
1157}
1158
a36c61f9
KG
1159void
1160bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1161{
1162 switch (msg->mhdr.msg_id) {
1163 case BFI_FCXP_I2H_SEND_RSP:
1164 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1165 break;
1166
1167 default:
1168 bfa_trc(bfa, msg->mhdr.msg_id);
1169 bfa_assert(0);
1170 }
1171}
1172
1173u32
1174bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1175{
1176 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1177
1178 return mod->rsp_pld_sz;
1179}
1180
1181
5fbe25c7 1182/*
a36c61f9
KG
1183 * BFA LPS state machine functions
1184 */
1185
5fbe25c7 1186/*
a36c61f9
KG
1187 * Init state -- no login
1188 */
1189static void
1190bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1191{
1192 bfa_trc(lps->bfa, lps->lp_tag);
1193 bfa_trc(lps->bfa, event);
1194
1195 switch (event) {
1196 case BFA_LPS_SM_LOGIN:
1197 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1198 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1199 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1200 } else {
1201 bfa_sm_set_state(lps, bfa_lps_sm_login);
1202 bfa_lps_send_login(lps);
1203 }
1204
1205 if (lps->fdisc)
1206 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1207 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1208 else
1209 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1210 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1211 break;
1212
1213 case BFA_LPS_SM_LOGOUT:
1214 bfa_lps_logout_comp(lps);
1215 break;
1216
1217 case BFA_LPS_SM_DELETE:
1218 bfa_lps_free(lps);
1219 break;
1220
1221 case BFA_LPS_SM_RX_CVL:
1222 case BFA_LPS_SM_OFFLINE:
1223 break;
1224
1225 case BFA_LPS_SM_FWRSP:
1226 /*
1227 * Could happen when fabric detects loopback and discards
1228 * the lps request. Fw will eventually sent out the timeout
1229 * Just ignore
1230 */
1231 break;
1232
1233 default:
1234 bfa_sm_fault(lps->bfa, event);
1235 }
1236}
1237
5fbe25c7 1238/*
a36c61f9
KG
1239 * login is in progress -- awaiting response from firmware
1240 */
1241static void
1242bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1243{
1244 bfa_trc(lps->bfa, lps->lp_tag);
1245 bfa_trc(lps->bfa, event);
1246
1247 switch (event) {
1248 case BFA_LPS_SM_FWRSP:
1249 if (lps->status == BFA_STATUS_OK) {
1250 bfa_sm_set_state(lps, bfa_lps_sm_online);
1251 if (lps->fdisc)
1252 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1253 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1254 else
1255 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1256 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1257 } else {
1258 bfa_sm_set_state(lps, bfa_lps_sm_init);
1259 if (lps->fdisc)
1260 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1261 BFA_PL_EID_LOGIN, 0,
1262 "FDISC Fail (RJT or timeout)");
1263 else
1264 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1265 BFA_PL_EID_LOGIN, 0,
1266 "FLOGI Fail (RJT or timeout)");
1267 }
1268 bfa_lps_login_comp(lps);
1269 break;
1270
1271 case BFA_LPS_SM_OFFLINE:
1272 bfa_sm_set_state(lps, bfa_lps_sm_init);
1273 break;
1274
1275 default:
1276 bfa_sm_fault(lps->bfa, event);
1277 }
1278}
1279
5fbe25c7 1280/*
a36c61f9
KG
1281 * login pending - awaiting space in request queue
1282 */
1283static void
1284bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1285{
1286 bfa_trc(lps->bfa, lps->lp_tag);
1287 bfa_trc(lps->bfa, event);
1288
1289 switch (event) {
1290 case BFA_LPS_SM_RESUME:
1291 bfa_sm_set_state(lps, bfa_lps_sm_login);
1292 break;
1293
1294 case BFA_LPS_SM_OFFLINE:
1295 bfa_sm_set_state(lps, bfa_lps_sm_init);
1296 bfa_reqq_wcancel(&lps->wqe);
1297 break;
1298
1299 case BFA_LPS_SM_RX_CVL:
1300 /*
1301 * Login was not even sent out; so when getting out
1302 * of this state, it will appear like a login retry
1303 * after Clear virtual link
1304 */
1305 break;
1306
1307 default:
1308 bfa_sm_fault(lps->bfa, event);
1309 }
1310}
1311
5fbe25c7 1312/*
a36c61f9
KG
1313 * login complete
1314 */
1315static void
1316bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1317{
1318 bfa_trc(lps->bfa, lps->lp_tag);
1319 bfa_trc(lps->bfa, event);
1320
1321 switch (event) {
1322 case BFA_LPS_SM_LOGOUT:
1323 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1324 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1325 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1326 } else {
1327 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1328 bfa_lps_send_logout(lps);
1329 }
1330 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1331 BFA_PL_EID_LOGO, 0, "Logout");
1332 break;
1333
1334 case BFA_LPS_SM_RX_CVL:
1335 bfa_sm_set_state(lps, bfa_lps_sm_init);
1336
1337 /* Let the vport module know about this event */
1338 bfa_lps_cvl_event(lps);
1339 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1340 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1341 break;
1342
1343 case BFA_LPS_SM_OFFLINE:
1344 case BFA_LPS_SM_DELETE:
1345 bfa_sm_set_state(lps, bfa_lps_sm_init);
1346 break;
1347
1348 default:
1349 bfa_sm_fault(lps->bfa, event);
1350 }
1351}
1352
5fbe25c7 1353/*
a36c61f9
KG
1354 * logout in progress - awaiting firmware response
1355 */
1356static void
1357bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1358{
1359 bfa_trc(lps->bfa, lps->lp_tag);
1360 bfa_trc(lps->bfa, event);
1361
1362 switch (event) {
1363 case BFA_LPS_SM_FWRSP:
1364 bfa_sm_set_state(lps, bfa_lps_sm_init);
1365 bfa_lps_logout_comp(lps);
1366 break;
1367
1368 case BFA_LPS_SM_OFFLINE:
1369 bfa_sm_set_state(lps, bfa_lps_sm_init);
1370 break;
1371
1372 default:
1373 bfa_sm_fault(lps->bfa, event);
1374 }
1375}
1376
5fbe25c7 1377/*
a36c61f9
KG
1378 * logout pending -- awaiting space in request queue
1379 */
1380static void
1381bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1382{
1383 bfa_trc(lps->bfa, lps->lp_tag);
1384 bfa_trc(lps->bfa, event);
1385
1386 switch (event) {
1387 case BFA_LPS_SM_RESUME:
1388 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1389 bfa_lps_send_logout(lps);
1390 break;
1391
1392 case BFA_LPS_SM_OFFLINE:
1393 bfa_sm_set_state(lps, bfa_lps_sm_init);
1394 bfa_reqq_wcancel(&lps->wqe);
1395 break;
1396
1397 default:
1398 bfa_sm_fault(lps->bfa, event);
1399 }
1400}
1401
1402
1403
5fbe25c7 1404/*
a36c61f9
KG
1405 * lps_pvt BFA LPS private functions
1406 */
1407
5fbe25c7 1408/*
a36c61f9
KG
1409 * return memory requirement
1410 */
1411static void
1412bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1413 u32 *dm_len)
1414{
1415 if (cfg->drvcfg.min_cfg)
1416 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1417 else
1418 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1419}
1420
5fbe25c7 1421/*
a36c61f9
KG
1422 * bfa module attach at initialization time
1423 */
1424static void
1425bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1426 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1427{
1428 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1429 struct bfa_lps_s *lps;
1430 int i;
1431
6a18b167 1432 memset(mod, 0, sizeof(struct bfa_lps_mod_s));
a36c61f9
KG
1433 mod->num_lps = BFA_LPS_MAX_LPORTS;
1434 if (cfg->drvcfg.min_cfg)
1435 mod->num_lps = BFA_LPS_MIN_LPORTS;
1436 else
1437 mod->num_lps = BFA_LPS_MAX_LPORTS;
1438 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1439
1440 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1441
1442 INIT_LIST_HEAD(&mod->lps_free_q);
1443 INIT_LIST_HEAD(&mod->lps_active_q);
1444
1445 for (i = 0; i < mod->num_lps; i++, lps++) {
1446 lps->bfa = bfa;
1447 lps->lp_tag = (u8) i;
1448 lps->reqq = BFA_REQQ_LPS;
1449 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1450 list_add_tail(&lps->qe, &mod->lps_free_q);
1451 }
1452}
1453
1454static void
1455bfa_lps_detach(struct bfa_s *bfa)
1456{
1457}
1458
1459static void
1460bfa_lps_start(struct bfa_s *bfa)
1461{
1462}
1463
1464static void
1465bfa_lps_stop(struct bfa_s *bfa)
1466{
1467}
1468
5fbe25c7 1469/*
a36c61f9
KG
1470 * IOC in disabled state -- consider all lps offline
1471 */
1472static void
1473bfa_lps_iocdisable(struct bfa_s *bfa)
1474{
1475 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1476 struct bfa_lps_s *lps;
1477 struct list_head *qe, *qen;
1478
1479 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1480 lps = (struct bfa_lps_s *) qe;
1481 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1482 }
1483}
1484
5fbe25c7 1485/*
a36c61f9
KG
1486 * Firmware login response
1487 */
1488static void
1489bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1490{
1491 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1492 struct bfa_lps_s *lps;
1493
1494 bfa_assert(rsp->lp_tag < mod->num_lps);
1495 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1496
1497 lps->status = rsp->status;
1498 switch (rsp->status) {
1499 case BFA_STATUS_OK:
1500 lps->fport = rsp->f_port;
1501 lps->npiv_en = rsp->npiv_en;
1502 lps->lp_pid = rsp->lp_pid;
ba816ea8 1503 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
a36c61f9
KG
1504 lps->pr_pwwn = rsp->port_name;
1505 lps->pr_nwwn = rsp->node_name;
1506 lps->auth_req = rsp->auth_req;
1507 lps->lp_mac = rsp->lp_mac;
1508 lps->brcd_switch = rsp->brcd_switch;
1509 lps->fcf_mac = rsp->fcf_mac;
1510
1511 break;
1512
1513 case BFA_STATUS_FABRIC_RJT:
1514 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1515 lps->lsrjt_expl = rsp->lsrjt_expl;
1516
1517 break;
1518
1519 case BFA_STATUS_EPROTOCOL:
1520 lps->ext_status = rsp->ext_status;
1521
1522 break;
1523
1524 default:
1525 /* Nothing to do with other status */
1526 break;
1527 }
1528
1529 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1530}
1531
5fbe25c7 1532/*
a36c61f9
KG
1533 * Firmware logout response
1534 */
1535static void
1536bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1537{
1538 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1539 struct bfa_lps_s *lps;
1540
1541 bfa_assert(rsp->lp_tag < mod->num_lps);
1542 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1543
1544 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1545}
1546
5fbe25c7 1547/*
a36c61f9
KG
1548 * Firmware received a Clear virtual link request (for FCoE)
1549 */
1550static void
1551bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1552{
1553 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1554 struct bfa_lps_s *lps;
1555
1556 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1557
1558 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1559}
1560
5fbe25c7 1561/*
a36c61f9
KG
1562 * Space is available in request queue, resume queueing request to firmware.
1563 */
1564static void
1565bfa_lps_reqq_resume(void *lps_arg)
1566{
1567 struct bfa_lps_s *lps = lps_arg;
1568
1569 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1570}
1571
5fbe25c7 1572/*
a36c61f9
KG
1573 * lps is freed -- triggered by vport delete
1574 */
1575static void
1576bfa_lps_free(struct bfa_lps_s *lps)
1577{
1578 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1579
1580 lps->lp_pid = 0;
1581 list_del(&lps->qe);
1582 list_add_tail(&lps->qe, &mod->lps_free_q);
1583}
1584
5fbe25c7 1585/*
a36c61f9
KG
1586 * send login request to firmware
1587 */
1588static void
1589bfa_lps_send_login(struct bfa_lps_s *lps)
1590{
1591 struct bfi_lps_login_req_s *m;
1592
1593 m = bfa_reqq_next(lps->bfa, lps->reqq);
1594 bfa_assert(m);
1595
1596 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1597 bfa_lpuid(lps->bfa));
1598
1599 m->lp_tag = lps->lp_tag;
1600 m->alpa = lps->alpa;
ba816ea8 1601 m->pdu_size = cpu_to_be16(lps->pdusz);
a36c61f9
KG
1602 m->pwwn = lps->pwwn;
1603 m->nwwn = lps->nwwn;
1604 m->fdisc = lps->fdisc;
1605 m->auth_en = lps->auth_en;
1606
1607 bfa_reqq_produce(lps->bfa, lps->reqq);
1608}
1609
5fbe25c7 1610/*
a36c61f9
KG
1611 * send logout request to firmware
1612 */
1613static void
1614bfa_lps_send_logout(struct bfa_lps_s *lps)
1615{
1616 struct bfi_lps_logout_req_s *m;
1617
1618 m = bfa_reqq_next(lps->bfa, lps->reqq);
1619 bfa_assert(m);
1620
1621 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1622 bfa_lpuid(lps->bfa));
1623
1624 m->lp_tag = lps->lp_tag;
1625 m->port_name = lps->pwwn;
1626 bfa_reqq_produce(lps->bfa, lps->reqq);
1627}
1628
5fbe25c7 1629/*
a36c61f9
KG
1630 * Indirect login completion handler for non-fcs
1631 */
1632static void
1633bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1634{
1635 struct bfa_lps_s *lps = arg;
1636
1637 if (!complete)
1638 return;
1639
1640 if (lps->fdisc)
1641 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1642 else
1643 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1644}
1645
5fbe25c7 1646/*
a36c61f9
KG
1647 * Login completion handler -- direct call for fcs, queue for others
1648 */
1649static void
1650bfa_lps_login_comp(struct bfa_lps_s *lps)
1651{
1652 if (!lps->bfa->fcs) {
1653 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1654 lps);
1655 return;
1656 }
1657
1658 if (lps->fdisc)
1659 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1660 else
1661 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1662}
1663
5fbe25c7 1664/*
a36c61f9
KG
1665 * Indirect logout completion handler for non-fcs
1666 */
1667static void
1668bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1669{
1670 struct bfa_lps_s *lps = arg;
1671
1672 if (!complete)
1673 return;
1674
1675 if (lps->fdisc)
1676 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1677}
1678
5fbe25c7 1679/*
a36c61f9
KG
1680 * Logout completion handler -- direct call for fcs, queue for others
1681 */
1682static void
1683bfa_lps_logout_comp(struct bfa_lps_s *lps)
1684{
1685 if (!lps->bfa->fcs) {
1686 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1687 lps);
1688 return;
1689 }
1690 if (lps->fdisc)
1691 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1692}
1693
5fbe25c7 1694/*
a36c61f9
KG
1695 * Clear virtual link completion handler for non-fcs
1696 */
1697static void
1698bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1699{
1700 struct bfa_lps_s *lps = arg;
1701
1702 if (!complete)
1703 return;
1704
1705 /* Clear virtual link to base port will result in link down */
1706 if (lps->fdisc)
1707 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1708}
1709
5fbe25c7 1710/*
a36c61f9
KG
1711 * Received Clear virtual link event --direct call for fcs,
1712 * queue for others
1713 */
1714static void
1715bfa_lps_cvl_event(struct bfa_lps_s *lps)
1716{
1717 if (!lps->bfa->fcs) {
1718 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1719 lps);
1720 return;
1721 }
1722
1723 /* Clear virtual link to base port will result in link down */
1724 if (lps->fdisc)
1725 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1726}
1727
1728
1729
5fbe25c7 1730/*
a36c61f9
KG
1731 * lps_public BFA LPS public functions
1732 */
1733
1734u32
1735bfa_lps_get_max_vport(struct bfa_s *bfa)
1736{
1737 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1738 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1739 else
1740 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1741}
1742
5fbe25c7 1743/*
a36c61f9
KG
1744 * Allocate a lport srvice tag.
1745 */
1746struct bfa_lps_s *
1747bfa_lps_alloc(struct bfa_s *bfa)
1748{
1749 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1750 struct bfa_lps_s *lps = NULL;
1751
1752 bfa_q_deq(&mod->lps_free_q, &lps);
1753
1754 if (lps == NULL)
1755 return NULL;
1756
1757 list_add_tail(&lps->qe, &mod->lps_active_q);
1758
1759 bfa_sm_set_state(lps, bfa_lps_sm_init);
1760 return lps;
1761}
1762
5fbe25c7 1763/*
a36c61f9
KG
1764 * Free lport service tag. This can be called anytime after an alloc.
1765 * No need to wait for any pending login/logout completions.
1766 */
1767void
1768bfa_lps_delete(struct bfa_lps_s *lps)
1769{
1770 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1771}
1772
5fbe25c7 1773/*
a36c61f9
KG
1774 * Initiate a lport login.
1775 */
1776void
1777bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1778 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1779{
1780 lps->uarg = uarg;
1781 lps->alpa = alpa;
1782 lps->pdusz = pdusz;
1783 lps->pwwn = pwwn;
1784 lps->nwwn = nwwn;
1785 lps->fdisc = BFA_FALSE;
1786 lps->auth_en = auth_en;
1787 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1788}
1789
5fbe25c7 1790/*
a36c61f9
KG
1791 * Initiate a lport fdisc login.
1792 */
1793void
1794bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1795 wwn_t nwwn)
1796{
1797 lps->uarg = uarg;
1798 lps->alpa = 0;
1799 lps->pdusz = pdusz;
1800 lps->pwwn = pwwn;
1801 lps->nwwn = nwwn;
1802 lps->fdisc = BFA_TRUE;
1803 lps->auth_en = BFA_FALSE;
1804 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1805}
1806
a36c61f9 1807
5fbe25c7 1808/*
a36c61f9
KG
1809 * Initiate a lport FDSIC logout.
1810 */
1811void
1812bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1813{
1814 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1815}
1816
a36c61f9 1817
5fbe25c7 1818/*
a36c61f9
KG
1819 * Return lport services tag given the pid
1820 */
1821u8
1822bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1823{
1824 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1825 struct bfa_lps_s *lps;
1826 int i;
1827
1828 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1829 if (lps->lp_pid == pid)
1830 return lps->lp_tag;
1831 }
1832
1833 /* Return base port tag anyway */
1834 return 0;
1835}
1836
a36c61f9 1837
5fbe25c7 1838/*
a36c61f9
KG
1839 * return port id assigned to the base lport
1840 */
1841u32
1842bfa_lps_get_base_pid(struct bfa_s *bfa)
1843{
1844 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1845
1846 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1847}
1848
5fbe25c7 1849/*
a36c61f9
KG
1850 * LPS firmware message class handler.
1851 */
1852void
1853bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1854{
1855 union bfi_lps_i2h_msg_u msg;
1856
1857 bfa_trc(bfa, m->mhdr.msg_id);
1858 msg.msg = m;
1859
1860 switch (m->mhdr.msg_id) {
1861 case BFI_LPS_H2I_LOGIN_RSP:
1862 bfa_lps_login_rsp(bfa, msg.login_rsp);
1863 break;
1864
1865 case BFI_LPS_H2I_LOGOUT_RSP:
1866 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1867 break;
1868
1869 case BFI_LPS_H2I_CVL_EVENT:
1870 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1871 break;
1872
1873 default:
1874 bfa_trc(bfa, m->mhdr.msg_id);
1875 bfa_assert(0);
1876 }
1877}
1878
5fbe25c7 1879/*
a36c61f9
KG
1880 * FC PORT state machine functions
1881 */
1882static void
1883bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
1884 enum bfa_fcport_sm_event event)
1885{
1886 bfa_trc(fcport->bfa, event);
1887
1888 switch (event) {
1889 case BFA_FCPORT_SM_START:
5fbe25c7 1890 /*
a36c61f9
KG
1891 * Start event after IOC is configured and BFA is started.
1892 */
f3a060ca
KG
1893 fcport->use_flash_cfg = BFA_TRUE;
1894
a36c61f9
KG
1895 if (bfa_fcport_send_enable(fcport)) {
1896 bfa_trc(fcport->bfa, BFA_TRUE);
1897 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1898 } else {
1899 bfa_trc(fcport->bfa, BFA_FALSE);
1900 bfa_sm_set_state(fcport,
1901 bfa_fcport_sm_enabling_qwait);
1902 }
1903 break;
1904
1905 case BFA_FCPORT_SM_ENABLE:
5fbe25c7 1906 /*
a36c61f9
KG
1907 * Port is persistently configured to be in enabled state. Do
1908 * not change state. Port enabling is done when START event is
1909 * received.
1910 */
1911 break;
1912
1913 case BFA_FCPORT_SM_DISABLE:
5fbe25c7 1914 /*
a36c61f9
KG
1915 * If a port is persistently configured to be disabled, the
1916 * first event will a port disable request.
1917 */
1918 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1919 break;
1920
1921 case BFA_FCPORT_SM_HWFAIL:
1922 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1923 break;
1924
1925 default:
1926 bfa_sm_fault(fcport->bfa, event);
1927 }
1928}
1929
1930static void
1931bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
1932 enum bfa_fcport_sm_event event)
1933{
1934 char pwwn_buf[BFA_STRING_32];
1935 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1936 bfa_trc(fcport->bfa, event);
1937
1938 switch (event) {
1939 case BFA_FCPORT_SM_QRESUME:
1940 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1941 bfa_fcport_send_enable(fcport);
1942 break;
1943
1944 case BFA_FCPORT_SM_STOP:
1945 bfa_reqq_wcancel(&fcport->reqq_wait);
1946 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
1947 break;
1948
1949 case BFA_FCPORT_SM_ENABLE:
5fbe25c7 1950 /*
a36c61f9
KG
1951 * Already enable is in progress.
1952 */
1953 break;
1954
1955 case BFA_FCPORT_SM_DISABLE:
5fbe25c7 1956 /*
a36c61f9
KG
1957 * Just send disable request to firmware when room becomes
1958 * available in request queue.
1959 */
1960 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1961 bfa_reqq_wcancel(&fcport->reqq_wait);
1962 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
1963 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
1964 wwn2str(pwwn_buf, fcport->pwwn);
88166242 1965 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
1966 "Base port disabled: WWN = %s\n", pwwn_buf);
1967 break;
1968
1969 case BFA_FCPORT_SM_LINKUP:
1970 case BFA_FCPORT_SM_LINKDOWN:
5fbe25c7 1971 /*
a36c61f9
KG
1972 * Possible to get link events when doing back-to-back
1973 * enable/disables.
1974 */
1975 break;
1976
1977 case BFA_FCPORT_SM_HWFAIL:
1978 bfa_reqq_wcancel(&fcport->reqq_wait);
1979 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1980 break;
1981
1982 default:
1983 bfa_sm_fault(fcport->bfa, event);
1984 }
1985}
1986
1987static void
1988bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
1989 enum bfa_fcport_sm_event event)
1990{
1991 char pwwn_buf[BFA_STRING_32];
1992 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1993 bfa_trc(fcport->bfa, event);
1994
1995 switch (event) {
1996 case BFA_FCPORT_SM_FWRSP:
1997 case BFA_FCPORT_SM_LINKDOWN:
1998 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
1999 break;
2000
2001 case BFA_FCPORT_SM_LINKUP:
2002 bfa_fcport_update_linkinfo(fcport);
2003 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2004
2005 bfa_assert(fcport->event_cbfn);
2006 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2007 break;
2008
2009 case BFA_FCPORT_SM_ENABLE:
5fbe25c7 2010 /*
a36c61f9
KG
2011 * Already being enabled.
2012 */
2013 break;
2014
2015 case BFA_FCPORT_SM_DISABLE:
2016 if (bfa_fcport_send_disable(fcport))
2017 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2018 else
2019 bfa_sm_set_state(fcport,
2020 bfa_fcport_sm_disabling_qwait);
2021
2022 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2023 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2024 wwn2str(pwwn_buf, fcport->pwwn);
88166242 2025 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2026 "Base port disabled: WWN = %s\n", pwwn_buf);
2027 break;
2028
2029 case BFA_FCPORT_SM_STOP:
2030 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2031 break;
2032
2033 case BFA_FCPORT_SM_HWFAIL:
2034 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2035 break;
2036
2037 default:
2038 bfa_sm_fault(fcport->bfa, event);
2039 }
2040}
2041
2042static void
2043bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2044 enum bfa_fcport_sm_event event)
2045{
2046 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2047 char pwwn_buf[BFA_STRING_32];
2048 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2049
2050 bfa_trc(fcport->bfa, event);
2051
2052 switch (event) {
2053 case BFA_FCPORT_SM_LINKUP:
2054 bfa_fcport_update_linkinfo(fcport);
2055 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2056 bfa_assert(fcport->event_cbfn);
2057 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2058 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2059 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2060
2061 bfa_trc(fcport->bfa,
2062 pevent->link_state.vc_fcf.fcf.fipenabled);
2063 bfa_trc(fcport->bfa,
2064 pevent->link_state.vc_fcf.fcf.fipfailed);
2065
2066 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2067 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2068 BFA_PL_EID_FIP_FCF_DISC, 0,
2069 "FIP FCF Discovery Failed");
2070 else
2071 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2072 BFA_PL_EID_FIP_FCF_DISC, 0,
2073 "FIP FCF Discovered");
2074 }
2075
2076 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2077 wwn2str(pwwn_buf, fcport->pwwn);
88166242 2078 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2079 "Base port online: WWN = %s\n", pwwn_buf);
2080 break;
2081
2082 case BFA_FCPORT_SM_LINKDOWN:
5fbe25c7 2083 /*
a36c61f9
KG
2084 * Possible to get link down event.
2085 */
2086 break;
2087
2088 case BFA_FCPORT_SM_ENABLE:
5fbe25c7 2089 /*
a36c61f9
KG
2090 * Already enabled.
2091 */
2092 break;
2093
2094 case BFA_FCPORT_SM_DISABLE:
2095 if (bfa_fcport_send_disable(fcport))
2096 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2097 else
2098 bfa_sm_set_state(fcport,
2099 bfa_fcport_sm_disabling_qwait);
2100
2101 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2102 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2103 wwn2str(pwwn_buf, fcport->pwwn);
88166242 2104 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2105 "Base port disabled: WWN = %s\n", pwwn_buf);
2106 break;
2107
2108 case BFA_FCPORT_SM_STOP:
2109 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2110 break;
2111
2112 case BFA_FCPORT_SM_HWFAIL:
2113 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2114 break;
2115
2116 default:
2117 bfa_sm_fault(fcport->bfa, event);
2118 }
2119}
2120
2121static void
2122bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2123 enum bfa_fcport_sm_event event)
2124{
2125 char pwwn_buf[BFA_STRING_32];
2126 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2127
2128 bfa_trc(fcport->bfa, event);
2129
2130 switch (event) {
2131 case BFA_FCPORT_SM_ENABLE:
5fbe25c7 2132 /*
a36c61f9
KG
2133 * Already enabled.
2134 */
2135 break;
2136
2137 case BFA_FCPORT_SM_DISABLE:
2138 if (bfa_fcport_send_disable(fcport))
2139 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2140 else
2141 bfa_sm_set_state(fcport,
2142 bfa_fcport_sm_disabling_qwait);
2143
2144 bfa_fcport_reset_linkinfo(fcport);
2145 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2146 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2147 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2148 wwn2str(pwwn_buf, fcport->pwwn);
88166242 2149 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9 2150 "Base port offline: WWN = %s\n", pwwn_buf);
88166242 2151 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2152 "Base port disabled: WWN = %s\n", pwwn_buf);
2153 break;
2154
2155 case BFA_FCPORT_SM_LINKDOWN:
2156 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2157 bfa_fcport_reset_linkinfo(fcport);
2158 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2159 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2160 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2161 wwn2str(pwwn_buf, fcport->pwwn);
2162 if (BFA_PORT_IS_DISABLED(fcport->bfa))
88166242 2163 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2164 "Base port offline: WWN = %s\n", pwwn_buf);
2165 else
88166242 2166 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
a36c61f9
KG
2167 "Base port (WWN = %s) "
2168 "lost fabric connectivity\n", pwwn_buf);
2169 break;
2170
2171 case BFA_FCPORT_SM_STOP:
2172 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2173 bfa_fcport_reset_linkinfo(fcport);
2174 wwn2str(pwwn_buf, fcport->pwwn);
2175 if (BFA_PORT_IS_DISABLED(fcport->bfa))
88166242 2176 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2177 "Base port offline: WWN = %s\n", pwwn_buf);
2178 else
88166242 2179 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
a36c61f9
KG
2180 "Base port (WWN = %s) "
2181 "lost fabric connectivity\n", pwwn_buf);
2182 break;
2183
2184 case BFA_FCPORT_SM_HWFAIL:
2185 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2186 bfa_fcport_reset_linkinfo(fcport);
2187 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2188 wwn2str(pwwn_buf, fcport->pwwn);
2189 if (BFA_PORT_IS_DISABLED(fcport->bfa))
88166242 2190 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2191 "Base port offline: WWN = %s\n", pwwn_buf);
2192 else
88166242 2193 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
a36c61f9
KG
2194 "Base port (WWN = %s) "
2195 "lost fabric connectivity\n", pwwn_buf);
2196 break;
2197
2198 default:
2199 bfa_sm_fault(fcport->bfa, event);
2200 }
2201}
2202
2203static void
2204bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2205 enum bfa_fcport_sm_event event)
2206{
2207 bfa_trc(fcport->bfa, event);
2208
2209 switch (event) {
2210 case BFA_FCPORT_SM_QRESUME:
2211 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2212 bfa_fcport_send_disable(fcport);
2213 break;
2214
2215 case BFA_FCPORT_SM_STOP:
2216 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2217 bfa_reqq_wcancel(&fcport->reqq_wait);
2218 break;
2219
2220 case BFA_FCPORT_SM_ENABLE:
2221 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2222 break;
2223
2224 case BFA_FCPORT_SM_DISABLE:
5fbe25c7 2225 /*
a36c61f9
KG
2226 * Already being disabled.
2227 */
2228 break;
2229
2230 case BFA_FCPORT_SM_LINKUP:
2231 case BFA_FCPORT_SM_LINKDOWN:
5fbe25c7 2232 /*
a36c61f9
KG
2233 * Possible to get link events when doing back-to-back
2234 * enable/disables.
2235 */
2236 break;
2237
2238 case BFA_FCPORT_SM_HWFAIL:
2239 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2240 bfa_reqq_wcancel(&fcport->reqq_wait);
2241 break;
2242
2243 default:
2244 bfa_sm_fault(fcport->bfa, event);
2245 }
2246}
2247
2248static void
2249bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2250 enum bfa_fcport_sm_event event)
2251{
2252 bfa_trc(fcport->bfa, event);
2253
2254 switch (event) {
2255 case BFA_FCPORT_SM_QRESUME:
2256 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2257 bfa_fcport_send_disable(fcport);
2258 if (bfa_fcport_send_enable(fcport))
2259 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2260 else
2261 bfa_sm_set_state(fcport,
2262 bfa_fcport_sm_enabling_qwait);
2263 break;
2264
2265 case BFA_FCPORT_SM_STOP:
2266 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2267 bfa_reqq_wcancel(&fcport->reqq_wait);
2268 break;
2269
2270 case BFA_FCPORT_SM_ENABLE:
2271 break;
2272
2273 case BFA_FCPORT_SM_DISABLE:
2274 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2275 break;
2276
2277 case BFA_FCPORT_SM_LINKUP:
2278 case BFA_FCPORT_SM_LINKDOWN:
5fbe25c7 2279 /*
a36c61f9
KG
2280 * Possible to get link events when doing back-to-back
2281 * enable/disables.
2282 */
2283 break;
2284
2285 case BFA_FCPORT_SM_HWFAIL:
2286 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2287 bfa_reqq_wcancel(&fcport->reqq_wait);
2288 break;
2289
2290 default:
2291 bfa_sm_fault(fcport->bfa, event);
2292 }
2293}
2294
2295static void
2296bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2297 enum bfa_fcport_sm_event event)
2298{
2299 char pwwn_buf[BFA_STRING_32];
2300 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2301 bfa_trc(fcport->bfa, event);
2302
2303 switch (event) {
2304 case BFA_FCPORT_SM_FWRSP:
2305 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2306 break;
2307
2308 case BFA_FCPORT_SM_DISABLE:
5fbe25c7 2309 /*
a36c61f9
KG
2310 * Already being disabled.
2311 */
2312 break;
2313
2314 case BFA_FCPORT_SM_ENABLE:
2315 if (bfa_fcport_send_enable(fcport))
2316 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2317 else
2318 bfa_sm_set_state(fcport,
2319 bfa_fcport_sm_enabling_qwait);
2320
2321 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2322 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2323 wwn2str(pwwn_buf, fcport->pwwn);
88166242 2324 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2325 "Base port enabled: WWN = %s\n", pwwn_buf);
2326 break;
2327
2328 case BFA_FCPORT_SM_STOP:
2329 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2330 break;
2331
2332 case BFA_FCPORT_SM_LINKUP:
2333 case BFA_FCPORT_SM_LINKDOWN:
5fbe25c7 2334 /*
a36c61f9
KG
2335 * Possible to get link events when doing back-to-back
2336 * enable/disables.
2337 */
2338 break;
2339
2340 case BFA_FCPORT_SM_HWFAIL:
2341 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2342 break;
2343
2344 default:
2345 bfa_sm_fault(fcport->bfa, event);
2346 }
2347}
2348
2349static void
2350bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2351 enum bfa_fcport_sm_event event)
2352{
2353 char pwwn_buf[BFA_STRING_32];
2354 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2355 bfa_trc(fcport->bfa, event);
2356
2357 switch (event) {
2358 case BFA_FCPORT_SM_START:
5fbe25c7 2359 /*
a36c61f9
KG
2360 * Ignore start event for a port that is disabled.
2361 */
2362 break;
2363
2364 case BFA_FCPORT_SM_STOP:
2365 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2366 break;
2367
2368 case BFA_FCPORT_SM_ENABLE:
2369 if (bfa_fcport_send_enable(fcport))
2370 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2371 else
2372 bfa_sm_set_state(fcport,
2373 bfa_fcport_sm_enabling_qwait);
2374
2375 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2376 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2377 wwn2str(pwwn_buf, fcport->pwwn);
88166242 2378 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
a36c61f9
KG
2379 "Base port enabled: WWN = %s\n", pwwn_buf);
2380 break;
2381
2382 case BFA_FCPORT_SM_DISABLE:
5fbe25c7 2383 /*
a36c61f9
KG
2384 * Already disabled.
2385 */
2386 break;
2387
2388 case BFA_FCPORT_SM_HWFAIL:
2389 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2390 break;
2391
2392 default:
2393 bfa_sm_fault(fcport->bfa, event);
2394 }
2395}
2396
2397static void
2398bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2399 enum bfa_fcport_sm_event event)
2400{
2401 bfa_trc(fcport->bfa, event);
2402
2403 switch (event) {
2404 case BFA_FCPORT_SM_START:
2405 if (bfa_fcport_send_enable(fcport))
2406 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2407 else
2408 bfa_sm_set_state(fcport,
2409 bfa_fcport_sm_enabling_qwait);
2410 break;
2411
2412 default:
5fbe25c7 2413 /*
a36c61f9
KG
2414 * Ignore all other events.
2415 */
2416 ;
2417 }
2418}
2419
5fbe25c7 2420/*
a36c61f9
KG
2421 * Port is enabled. IOC is down/failed.
2422 */
2423static void
2424bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2425 enum bfa_fcport_sm_event event)
2426{
2427 bfa_trc(fcport->bfa, event);
2428
2429 switch (event) {
2430 case BFA_FCPORT_SM_START:
2431 if (bfa_fcport_send_enable(fcport))
2432 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2433 else
2434 bfa_sm_set_state(fcport,
2435 bfa_fcport_sm_enabling_qwait);
2436 break;
2437
2438 default:
5fbe25c7 2439 /*
a36c61f9
KG
2440 * Ignore all events.
2441 */
2442 ;
2443 }
2444}
2445
5fbe25c7 2446/*
a36c61f9
KG
2447 * Port is disabled. IOC is down/failed.
2448 */
2449static void
2450bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2451 enum bfa_fcport_sm_event event)
2452{
2453 bfa_trc(fcport->bfa, event);
2454
2455 switch (event) {
2456 case BFA_FCPORT_SM_START:
2457 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2458 break;
2459
2460 case BFA_FCPORT_SM_ENABLE:
2461 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2462 break;
2463
2464 default:
5fbe25c7 2465 /*
a36c61f9
KG
2466 * Ignore all events.
2467 */
2468 ;
2469 }
2470}
2471
5fbe25c7 2472/*
a36c61f9
KG
2473 * Link state is down
2474 */
2475static void
2476bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2477 enum bfa_fcport_ln_sm_event event)
2478{
2479 bfa_trc(ln->fcport->bfa, event);
2480
2481 switch (event) {
2482 case BFA_FCPORT_LN_SM_LINKUP:
2483 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2484 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2485 break;
2486
2487 default:
2488 bfa_sm_fault(ln->fcport->bfa, event);
2489 }
2490}
2491
5fbe25c7 2492/*
a36c61f9
KG
2493 * Link state is waiting for down notification
2494 */
2495static void
2496bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2497 enum bfa_fcport_ln_sm_event event)
2498{
2499 bfa_trc(ln->fcport->bfa, event);
2500
2501 switch (event) {
2502 case BFA_FCPORT_LN_SM_LINKUP:
2503 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2504 break;
2505
2506 case BFA_FCPORT_LN_SM_NOTIFICATION:
2507 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2508 break;
2509
2510 default:
2511 bfa_sm_fault(ln->fcport->bfa, event);
2512 }
2513}
2514
5fbe25c7 2515/*
a36c61f9
KG
2516 * Link state is waiting for down notification and there is a pending up
2517 */
2518static void
2519bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2520 enum bfa_fcport_ln_sm_event event)
2521{
2522 bfa_trc(ln->fcport->bfa, event);
2523
2524 switch (event) {
2525 case BFA_FCPORT_LN_SM_LINKDOWN:
2526 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2527 break;
2528
2529 case BFA_FCPORT_LN_SM_NOTIFICATION:
2530 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2531 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2532 break;
2533
2534 default:
2535 bfa_sm_fault(ln->fcport->bfa, event);
2536 }
2537}
2538
5fbe25c7 2539/*
a36c61f9
KG
2540 * Link state is up
2541 */
2542static void
2543bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2544 enum bfa_fcport_ln_sm_event event)
2545{
2546 bfa_trc(ln->fcport->bfa, event);
2547
2548 switch (event) {
2549 case BFA_FCPORT_LN_SM_LINKDOWN:
2550 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2551 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2552 break;
2553
2554 default:
2555 bfa_sm_fault(ln->fcport->bfa, event);
2556 }
2557}
2558
5fbe25c7 2559/*
a36c61f9
KG
2560 * Link state is waiting for up notification
2561 */
2562static void
2563bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2564 enum bfa_fcport_ln_sm_event event)
2565{
2566 bfa_trc(ln->fcport->bfa, event);
2567
2568 switch (event) {
2569 case BFA_FCPORT_LN_SM_LINKDOWN:
2570 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2571 break;
2572
2573 case BFA_FCPORT_LN_SM_NOTIFICATION:
2574 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2575 break;
2576
2577 default:
2578 bfa_sm_fault(ln->fcport->bfa, event);
2579 }
2580}
2581
5fbe25c7 2582/*
a36c61f9
KG
2583 * Link state is waiting for up notification and there is a pending down
2584 */
2585static void
2586bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2587 enum bfa_fcport_ln_sm_event event)
2588{
2589 bfa_trc(ln->fcport->bfa, event);
2590
2591 switch (event) {
2592 case BFA_FCPORT_LN_SM_LINKUP:
2593 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2594 break;
2595
2596 case BFA_FCPORT_LN_SM_NOTIFICATION:
2597 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2598 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2599 break;
2600
2601 default:
2602 bfa_sm_fault(ln->fcport->bfa, event);
2603 }
2604}
2605
5fbe25c7 2606/*
a36c61f9
KG
2607 * Link state is waiting for up notification and there are pending down and up
2608 */
2609static void
2610bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2611 enum bfa_fcport_ln_sm_event event)
2612{
2613 bfa_trc(ln->fcport->bfa, event);
2614
2615 switch (event) {
2616 case BFA_FCPORT_LN_SM_LINKDOWN:
2617 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2618 break;
2619
2620 case BFA_FCPORT_LN_SM_NOTIFICATION:
2621 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2622 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2623 break;
2624
2625 default:
2626 bfa_sm_fault(ln->fcport->bfa, event);
2627 }
2628}
2629
a36c61f9
KG
2630static void
2631__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2632{
2633 struct bfa_fcport_ln_s *ln = cbarg;
2634
2635 if (complete)
2636 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2637 else
2638 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2639}
2640
5fbe25c7 2641/*
a36c61f9
KG
2642 * Send SCN notification to upper layers.
2643 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2644 */
2645static void
2646bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2647 bfa_boolean_t trunk)
2648{
2649 if (fcport->cfg.trunked && !trunk)
2650 return;
2651
2652 switch (event) {
2653 case BFA_PORT_LINKUP:
2654 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2655 break;
2656 case BFA_PORT_LINKDOWN:
2657 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2658 break;
2659 default:
2660 bfa_assert(0);
2661 }
2662}
2663
2664static void
2665bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2666{
2667 struct bfa_fcport_s *fcport = ln->fcport;
2668
2669 if (fcport->bfa->fcs) {
2670 fcport->event_cbfn(fcport->event_cbarg, event);
2671 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2672 } else {
2673 ln->ln_event = event;
2674 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2675 __bfa_cb_fcport_event, ln);
2676 }
2677}
2678
2679#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2680 BFA_CACHELINE_SZ))
2681
2682static void
2683bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2684 u32 *dm_len)
2685{
2686 *dm_len += FCPORT_STATS_DMA_SZ;
2687}
2688
2689static void
2690bfa_fcport_qresume(void *cbarg)
2691{
2692 struct bfa_fcport_s *fcport = cbarg;
2693
2694 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2695}
2696
2697static void
2698bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2699{
2700 u8 *dm_kva;
2701 u64 dm_pa;
2702
2703 dm_kva = bfa_meminfo_dma_virt(meminfo);
2704 dm_pa = bfa_meminfo_dma_phys(meminfo);
2705
2706 fcport->stats_kva = dm_kva;
2707 fcport->stats_pa = dm_pa;
2708 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2709
2710 dm_kva += FCPORT_STATS_DMA_SZ;
2711 dm_pa += FCPORT_STATS_DMA_SZ;
2712
2713 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2714 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2715}
2716
5fbe25c7 2717/*
a36c61f9
KG
2718 * Memory initialization.
2719 */
2720static void
2721bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2722 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2723{
2724 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2725 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2726 struct bfa_fcport_ln_s *ln = &fcport->ln;
f16a1750 2727 struct timeval tv;
a36c61f9 2728
6a18b167 2729 memset(fcport, 0, sizeof(struct bfa_fcport_s));
a36c61f9
KG
2730 fcport->bfa = bfa;
2731 ln->fcport = fcport;
2732
2733 bfa_fcport_mem_claim(fcport, meminfo);
2734
2735 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2736 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2737
5fbe25c7 2738 /*
a36c61f9
KG
2739 * initialize time stamp for stats reset
2740 */
f16a1750 2741 do_gettimeofday(&tv);
a36c61f9
KG
2742 fcport->stats_reset_time = tv.tv_sec;
2743
5fbe25c7 2744 /*
a36c61f9
KG
2745 * initialize and set default configuration
2746 */
2747 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2748 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2749 port_cfg->trunked = BFA_FALSE;
2750 port_cfg->maxfrsize = 0;
2751
2752 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2753
2754 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2755}
2756
2757static void
2758bfa_fcport_detach(struct bfa_s *bfa)
2759{
2760}
2761
5fbe25c7 2762/*
a36c61f9
KG
2763 * Called when IOC is ready.
2764 */
2765static void
2766bfa_fcport_start(struct bfa_s *bfa)
2767{
2768 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2769}
2770
5fbe25c7 2771/*
a36c61f9
KG
2772 * Called before IOC is stopped.
2773 */
2774static void
2775bfa_fcport_stop(struct bfa_s *bfa)
2776{
2777 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2778 bfa_trunk_iocdisable(bfa);
2779}
2780
5fbe25c7 2781/*
a36c61f9
KG
2782 * Called when IOC failure is detected.
2783 */
2784static void
2785bfa_fcport_iocdisable(struct bfa_s *bfa)
2786{
2787 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2788
2789 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2790 bfa_trunk_iocdisable(bfa);
2791}
2792
2793static void
2794bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2795{
2796 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2797 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2798
2799 fcport->speed = pevent->link_state.speed;
2800 fcport->topology = pevent->link_state.topology;
2801
2802 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2803 fcport->myalpa = 0;
2804
2805 /* QoS Details */
6a18b167
JH
2806 fcport->qos_attr = pevent->link_state.qos_attr;
2807 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
a36c61f9 2808
5fbe25c7 2809 /*
a36c61f9
KG
2810 * update trunk state if applicable
2811 */
2812 if (!fcport->cfg.trunked)
2813 trunk->attr.state = BFA_TRUNK_DISABLED;
2814
2815 /* update FCoE specific */
ba816ea8 2816 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
a36c61f9
KG
2817
2818 bfa_trc(fcport->bfa, fcport->speed);
2819 bfa_trc(fcport->bfa, fcport->topology);
2820}
2821
2822static void
2823bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2824{
2825 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2826 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2827}
2828
5fbe25c7 2829/*
a36c61f9
KG
2830 * Send port enable message to firmware.
2831 */
2832static bfa_boolean_t
2833bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2834{
2835 struct bfi_fcport_enable_req_s *m;
2836
5fbe25c7 2837 /*
a36c61f9
KG
2838 * Increment message tag before queue check, so that responses to old
2839 * requests are discarded.
2840 */
2841 fcport->msgtag++;
2842
5fbe25c7 2843 /*
a36c61f9
KG
2844 * check for room in queue to send request now
2845 */
2846 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2847 if (!m) {
2848 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2849 &fcport->reqq_wait);
2850 return BFA_FALSE;
2851 }
2852
2853 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2854 bfa_lpuid(fcport->bfa));
2855 m->nwwn = fcport->nwwn;
2856 m->pwwn = fcport->pwwn;
2857 m->port_cfg = fcport->cfg;
2858 m->msgtag = fcport->msgtag;
ba816ea8 2859 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
f3a060ca 2860 m->use_flash_cfg = fcport->use_flash_cfg;
a36c61f9
KG
2861 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2862 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2863 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2864
5fbe25c7 2865 /*
a36c61f9
KG
2866 * queue I/O message to firmware
2867 */
2868 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2869 return BFA_TRUE;
2870}
2871
5fbe25c7 2872/*
a36c61f9
KG
2873 * Send port disable message to firmware.
2874 */
2875static bfa_boolean_t
2876bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
2877{
2878 struct bfi_fcport_req_s *m;
2879
5fbe25c7 2880 /*
a36c61f9
KG
2881 * Increment message tag before queue check, so that responses to old
2882 * requests are discarded.
2883 */
2884 fcport->msgtag++;
2885
5fbe25c7 2886 /*
a36c61f9
KG
2887 * check for room in queue to send request now
2888 */
2889 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2890 if (!m) {
2891 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2892 &fcport->reqq_wait);
2893 return BFA_FALSE;
2894 }
2895
2896 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
2897 bfa_lpuid(fcport->bfa));
2898 m->msgtag = fcport->msgtag;
2899
5fbe25c7 2900 /*
a36c61f9
KG
2901 * queue I/O message to firmware
2902 */
2903 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2904
2905 return BFA_TRUE;
2906}
2907
2908static void
2909bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
2910{
f7f73812
MZ
2911 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
2912 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
a36c61f9
KG
2913
2914 bfa_trc(fcport->bfa, fcport->pwwn);
2915 bfa_trc(fcport->bfa, fcport->nwwn);
2916}
2917
2918static void
2919bfa_fcport_send_txcredit(void *port_cbarg)
2920{
2921
2922 struct bfa_fcport_s *fcport = port_cbarg;
2923 struct bfi_fcport_set_svc_params_req_s *m;
2924
5fbe25c7 2925 /*
a36c61f9
KG
2926 * check for room in queue to send request now
2927 */
2928 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2929 if (!m) {
2930 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
2931 return;
2932 }
2933
2934 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
2935 bfa_lpuid(fcport->bfa));
ba816ea8 2936 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
a36c61f9 2937
5fbe25c7 2938 /*
a36c61f9
KG
2939 * queue I/O message to firmware
2940 */
2941 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2942}
2943
2944static void
2945bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
2946 struct bfa_qos_stats_s *s)
2947{
2948 u32 *dip = (u32 *) d;
50444a34 2949 __be32 *sip = (__be32 *) s;
a36c61f9
KG
2950 int i;
2951
2952 /* Now swap the 32 bit fields */
2953 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
ba816ea8 2954 dip[i] = be32_to_cpu(sip[i]);
a36c61f9
KG
2955}
2956
2957static void
2958bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
2959 struct bfa_fcoe_stats_s *s)
2960{
2961 u32 *dip = (u32 *) d;
50444a34 2962 __be32 *sip = (__be32 *) s;
a36c61f9
KG
2963 int i;
2964
2965 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
2966 i = i + 2) {
f16a1750 2967#ifdef __BIG_ENDIAN
ba816ea8
JH
2968 dip[i] = be32_to_cpu(sip[i]);
2969 dip[i + 1] = be32_to_cpu(sip[i + 1]);
a36c61f9 2970#else
ba816ea8
JH
2971 dip[i] = be32_to_cpu(sip[i + 1]);
2972 dip[i + 1] = be32_to_cpu(sip[i]);
a36c61f9
KG
2973#endif
2974 }
2975}
2976
2977static void
2978__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
2979{
2980 struct bfa_fcport_s *fcport = cbarg;
2981
2982 if (complete) {
2983 if (fcport->stats_status == BFA_STATUS_OK) {
f16a1750 2984 struct timeval tv;
a36c61f9
KG
2985
2986 /* Swap FC QoS or FCoE stats */
2987 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2988 bfa_fcport_qos_stats_swap(
2989 &fcport->stats_ret->fcqos,
2990 &fcport->stats->fcqos);
2991 } else {
2992 bfa_fcport_fcoe_stats_swap(
2993 &fcport->stats_ret->fcoe,
2994 &fcport->stats->fcoe);
2995
f16a1750 2996 do_gettimeofday(&tv);
a36c61f9
KG
2997 fcport->stats_ret->fcoe.secs_reset =
2998 tv.tv_sec - fcport->stats_reset_time;
2999 }
3000 }
3001 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3002 } else {
3003 fcport->stats_busy = BFA_FALSE;
3004 fcport->stats_status = BFA_STATUS_OK;
3005 }
3006}
3007
3008static void
3009bfa_fcport_stats_get_timeout(void *cbarg)
3010{
3011 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3012
3013 bfa_trc(fcport->bfa, fcport->stats_qfull);
3014
3015 if (fcport->stats_qfull) {
3016 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3017 fcport->stats_qfull = BFA_FALSE;
3018 }
3019
3020 fcport->stats_status = BFA_STATUS_ETIMER;
3021 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3022 fcport);
3023}
3024
3025static void
3026bfa_fcport_send_stats_get(void *cbarg)
3027{
3028 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3029 struct bfi_fcport_req_s *msg;
3030
3031 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3032
3033 if (!msg) {
3034 fcport->stats_qfull = BFA_TRUE;
3035 bfa_reqq_winit(&fcport->stats_reqq_wait,
3036 bfa_fcport_send_stats_get, fcport);
3037 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3038 &fcport->stats_reqq_wait);
3039 return;
3040 }
3041 fcport->stats_qfull = BFA_FALSE;
3042
6a18b167 3043 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
a36c61f9
KG
3044 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3045 bfa_lpuid(fcport->bfa));
3046 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3047}
3048
3049static void
3050__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3051{
3052 struct bfa_fcport_s *fcport = cbarg;
3053
3054 if (complete) {
f16a1750 3055 struct timeval tv;
a36c61f9 3056
5fbe25c7 3057 /*
a36c61f9
KG
3058 * re-initialize time stamp for stats reset
3059 */
f16a1750 3060 do_gettimeofday(&tv);
a36c61f9
KG
3061 fcport->stats_reset_time = tv.tv_sec;
3062
3063 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3064 } else {
3065 fcport->stats_busy = BFA_FALSE;
3066 fcport->stats_status = BFA_STATUS_OK;
3067 }
3068}
3069
3070static void
3071bfa_fcport_stats_clr_timeout(void *cbarg)
3072{
3073 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3074
3075 bfa_trc(fcport->bfa, fcport->stats_qfull);
3076
3077 if (fcport->stats_qfull) {
3078 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3079 fcport->stats_qfull = BFA_FALSE;
3080 }
3081
3082 fcport->stats_status = BFA_STATUS_ETIMER;
3083 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3084 __bfa_cb_fcport_stats_clr, fcport);
3085}
3086
3087static void
3088bfa_fcport_send_stats_clear(void *cbarg)
3089{
3090 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3091 struct bfi_fcport_req_s *msg;
3092
3093 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3094
3095 if (!msg) {
3096 fcport->stats_qfull = BFA_TRUE;
3097 bfa_reqq_winit(&fcport->stats_reqq_wait,
3098 bfa_fcport_send_stats_clear, fcport);
3099 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3100 &fcport->stats_reqq_wait);
3101 return;
3102 }
3103 fcport->stats_qfull = BFA_FALSE;
3104
6a18b167 3105 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
a36c61f9
KG
3106 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3107 bfa_lpuid(fcport->bfa));
3108 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3109}
3110
5fbe25c7 3111/*
a36c61f9
KG
3112 * Handle trunk SCN event from firmware.
3113 */
3114static void
3115bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3116{
3117 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3118 struct bfi_fcport_trunk_link_s *tlink;
3119 struct bfa_trunk_link_attr_s *lattr;
3120 enum bfa_trunk_state state_prev;
3121 int i;
3122 int link_bm = 0;
3123
3124 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3125 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3126 scn->trunk_state == BFA_TRUNK_OFFLINE);
3127
3128 bfa_trc(fcport->bfa, trunk->attr.state);
3129 bfa_trc(fcport->bfa, scn->trunk_state);
3130 bfa_trc(fcport->bfa, scn->trunk_speed);
3131
5fbe25c7 3132 /*
a36c61f9
KG
3133 * Save off new state for trunk attribute query
3134 */
3135 state_prev = trunk->attr.state;
3136 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3137 trunk->attr.state = scn->trunk_state;
3138 trunk->attr.speed = scn->trunk_speed;
3139 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3140 lattr = &trunk->attr.link_attr[i];
3141 tlink = &scn->tlink[i];
3142
3143 lattr->link_state = tlink->state;
3144 lattr->trunk_wwn = tlink->trunk_wwn;
3145 lattr->fctl = tlink->fctl;
3146 lattr->speed = tlink->speed;
ba816ea8 3147 lattr->deskew = be32_to_cpu(tlink->deskew);
a36c61f9
KG
3148
3149 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3150 fcport->speed = tlink->speed;
3151 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3152 link_bm |= 1 << i;
3153 }
3154
3155 bfa_trc(fcport->bfa, lattr->link_state);
3156 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3157 bfa_trc(fcport->bfa, lattr->fctl);
3158 bfa_trc(fcport->bfa, lattr->speed);
3159 bfa_trc(fcport->bfa, lattr->deskew);
3160 }
3161
3162 switch (link_bm) {
3163 case 3:
3164 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3165 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3166 break;
3167 case 2:
3168 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3169 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3170 break;
3171 case 1:
3172 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3173 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3174 break;
3175 default:
3176 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3177 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3178 }
3179
5fbe25c7 3180 /*
a36c61f9
KG
3181 * Notify upper layers if trunk state changed.
3182 */
3183 if ((state_prev != trunk->attr.state) ||
3184 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3185 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3186 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3187 }
3188}
3189
3190static void
3191bfa_trunk_iocdisable(struct bfa_s *bfa)
3192{
3193 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3194 int i = 0;
3195
5fbe25c7 3196 /*
a36c61f9
KG
3197 * In trunked mode, notify upper layers that link is down
3198 */
3199 if (fcport->cfg.trunked) {
3200 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3201 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3202
3203 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3204 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3205 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3206 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3207 fcport->trunk.attr.link_attr[i].fctl =
3208 BFA_TRUNK_LINK_FCTL_NORMAL;
3209 fcport->trunk.attr.link_attr[i].link_state =
3210 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3211 fcport->trunk.attr.link_attr[i].speed =
3212 BFA_PORT_SPEED_UNKNOWN;
3213 fcport->trunk.attr.link_attr[i].deskew = 0;
3214 }
3215 }
3216}
3217
5fbe25c7 3218/*
a36c61f9
KG
3219 * Called to initialize port attributes
3220 */
3221void
3222bfa_fcport_init(struct bfa_s *bfa)
3223{
3224 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3225
5fbe25c7 3226 /*
a36c61f9
KG
3227 * Initialize port attributes from IOC hardware data.
3228 */
3229 bfa_fcport_set_wwns(fcport);
3230 if (fcport->cfg.maxfrsize == 0)
3231 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3232 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3233 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3234
3235 bfa_assert(fcport->cfg.maxfrsize);
3236 bfa_assert(fcport->cfg.rx_bbcredit);
3237 bfa_assert(fcport->speed_sup);
3238}
3239
5fbe25c7 3240/*
a36c61f9
KG
3241 * Firmware message handler.
3242 */
3243void
3244bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3245{
3246 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3247 union bfi_fcport_i2h_msg_u i2hmsg;
3248
3249 i2hmsg.msg = msg;
3250 fcport->event_arg.i2hmsg = i2hmsg;
3251
3252 bfa_trc(bfa, msg->mhdr.msg_id);
3253 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3254
3255 switch (msg->mhdr.msg_id) {
3256 case BFI_FCPORT_I2H_ENABLE_RSP:
f3a060ca
KG
3257 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3258
3259 if (fcport->use_flash_cfg) {
3260 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3261 fcport->cfg.maxfrsize =
3262 cpu_to_be16(fcport->cfg.maxfrsize);
3263 fcport->cfg.path_tov =
3264 cpu_to_be16(fcport->cfg.path_tov);
3265 fcport->cfg.q_depth =
3266 cpu_to_be16(fcport->cfg.q_depth);
3267
3268 if (fcport->cfg.trunked)
3269 fcport->trunk.attr.state =
3270 BFA_TRUNK_OFFLINE;
3271 else
3272 fcport->trunk.attr.state =
3273 BFA_TRUNK_DISABLED;
3274 fcport->use_flash_cfg = BFA_FALSE;
3275 }
3276
a36c61f9 3277 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
f3a060ca 3278 }
a36c61f9
KG
3279 break;
3280
3281 case BFI_FCPORT_I2H_DISABLE_RSP:
3282 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3283 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3284 break;
3285
3286 case BFI_FCPORT_I2H_EVENT:
3287 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3288 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3289 else
3290 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3291 break;
3292
3293 case BFI_FCPORT_I2H_TRUNK_SCN:
3294 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3295 break;
3296
3297 case BFI_FCPORT_I2H_STATS_GET_RSP:
3298 /*
3299 * check for timer pop before processing the rsp
3300 */
3301 if (fcport->stats_busy == BFA_FALSE ||
3302 fcport->stats_status == BFA_STATUS_ETIMER)
3303 break;
3304
3305 bfa_timer_stop(&fcport->timer);
3306 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3307 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3308 __bfa_cb_fcport_stats_get, fcport);
3309 break;
3310
3311 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3312 /*
3313 * check for timer pop before processing the rsp
3314 */
3315 if (fcport->stats_busy == BFA_FALSE ||
3316 fcport->stats_status == BFA_STATUS_ETIMER)
3317 break;
3318
3319 bfa_timer_stop(&fcport->timer);
3320 fcport->stats_status = BFA_STATUS_OK;
3321 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3322 __bfa_cb_fcport_stats_clr, fcport);
3323 break;
3324
3325 case BFI_FCPORT_I2H_ENABLE_AEN:
3326 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3327 break;
3328
3329 case BFI_FCPORT_I2H_DISABLE_AEN:
3330 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3331 break;
3332
3333 default:
3334 bfa_assert(0);
3335 break;
3336 }
3337}
3338
5fbe25c7 3339/*
a36c61f9
KG
3340 * Registered callback for port events.
3341 */
3342void
3343bfa_fcport_event_register(struct bfa_s *bfa,
3344 void (*cbfn) (void *cbarg,
3345 enum bfa_port_linkstate event),
3346 void *cbarg)
3347{
3348 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3349
3350 fcport->event_cbfn = cbfn;
3351 fcport->event_cbarg = cbarg;
3352}
3353
3354bfa_status_t
3355bfa_fcport_enable(struct bfa_s *bfa)
3356{
3357 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3358
3359 if (bfa_ioc_is_disabled(&bfa->ioc))
3360 return BFA_STATUS_IOC_DISABLED;
3361
3362 if (fcport->diag_busy)
3363 return BFA_STATUS_DIAG_BUSY;
3364
3365 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3366 return BFA_STATUS_OK;
3367}
3368
3369bfa_status_t
3370bfa_fcport_disable(struct bfa_s *bfa)
3371{
3372
3373 if (bfa_ioc_is_disabled(&bfa->ioc))
3374 return BFA_STATUS_IOC_DISABLED;
3375
3376 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3377 return BFA_STATUS_OK;
3378}
3379
5fbe25c7 3380/*
a36c61f9
KG
3381 * Configure port speed.
3382 */
3383bfa_status_t
3384bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3385{
3386 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3387
3388 bfa_trc(bfa, speed);
3389
3390 if (fcport->cfg.trunked == BFA_TRUE)
3391 return BFA_STATUS_TRUNK_ENABLED;
3392 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3393 bfa_trc(bfa, fcport->speed_sup);
3394 return BFA_STATUS_UNSUPP_SPEED;
3395 }
3396
3397 fcport->cfg.speed = speed;
3398
3399 return BFA_STATUS_OK;
3400}
3401
5fbe25c7 3402/*
a36c61f9
KG
3403 * Get current speed.
3404 */
3405enum bfa_port_speed
3406bfa_fcport_get_speed(struct bfa_s *bfa)
3407{
3408 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3409
3410 return fcport->speed;
3411}
3412
5fbe25c7 3413/*
a36c61f9
KG
3414 * Configure port topology.
3415 */
3416bfa_status_t
3417bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3418{
3419 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3420
3421 bfa_trc(bfa, topology);
3422 bfa_trc(bfa, fcport->cfg.topology);
3423
3424 switch (topology) {
3425 case BFA_PORT_TOPOLOGY_P2P:
3426 case BFA_PORT_TOPOLOGY_LOOP:
3427 case BFA_PORT_TOPOLOGY_AUTO:
3428 break;
3429
3430 default:
3431 return BFA_STATUS_EINVAL;
3432 }
3433
3434 fcport->cfg.topology = topology;
3435 return BFA_STATUS_OK;
3436}
3437
5fbe25c7 3438/*
a36c61f9
KG
3439 * Get current topology.
3440 */
3441enum bfa_port_topology
3442bfa_fcport_get_topology(struct bfa_s *bfa)
3443{
3444 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3445
3446 return fcport->topology;
3447}
3448
3449bfa_status_t
3450bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3451{
3452 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3453
3454 bfa_trc(bfa, alpa);
3455 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3456 bfa_trc(bfa, fcport->cfg.hardalpa);
3457
3458 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3459 fcport->cfg.hardalpa = alpa;
3460
3461 return BFA_STATUS_OK;
3462}
3463
3464bfa_status_t
3465bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3466{
3467 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3468
3469 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3470 bfa_trc(bfa, fcport->cfg.hardalpa);
3471
3472 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3473 return BFA_STATUS_OK;
3474}
3475
3476bfa_boolean_t
3477bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3478{
3479 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3480
3481 *alpa = fcport->cfg.hardalpa;
3482 return fcport->cfg.cfg_hardalpa;
3483}
3484
3485u8
3486bfa_fcport_get_myalpa(struct bfa_s *bfa)
3487{
3488 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3489
3490 return fcport->myalpa;
3491}
3492
3493bfa_status_t
3494bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3495{
3496 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3497
3498 bfa_trc(bfa, maxfrsize);
3499 bfa_trc(bfa, fcport->cfg.maxfrsize);
3500
3501 /* with in range */
3502 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3503 return BFA_STATUS_INVLD_DFSZ;
3504
3505 /* power of 2, if not the max frame size of 2112 */
3506 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3507 return BFA_STATUS_INVLD_DFSZ;
3508
3509 fcport->cfg.maxfrsize = maxfrsize;
3510 return BFA_STATUS_OK;
3511}
3512
3513u16
3514bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3515{
3516 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3517
3518 return fcport->cfg.maxfrsize;
3519}
3520
3521u8
3522bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3523{
3524 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3525
3526 return fcport->cfg.rx_bbcredit;
3527}
3528
3529void
3530bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3531{
3532 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3533
3534 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3535 bfa_fcport_send_txcredit(fcport);
3536}
3537
5fbe25c7 3538/*
a36c61f9
KG
3539 * Get port attributes.
3540 */
3541
3542wwn_t
3543bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3544{
3545 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3546 if (node)
3547 return fcport->nwwn;
3548 else
3549 return fcport->pwwn;
3550}
3551
3552void
3553bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3554{
3555 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3556
6a18b167 3557 memset(attr, 0, sizeof(struct bfa_port_attr_s));
a36c61f9
KG
3558
3559 attr->nwwn = fcport->nwwn;
3560 attr->pwwn = fcport->pwwn;
3561
f7f73812
MZ
3562 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3563 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
a36c61f9 3564
6a18b167 3565 memcpy(&attr->pport_cfg, &fcport->cfg,
a36c61f9
KG
3566 sizeof(struct bfa_port_cfg_s));
3567 /* speed attributes */
3568 attr->pport_cfg.speed = fcport->cfg.speed;
3569 attr->speed_supported = fcport->speed_sup;
3570 attr->speed = fcport->speed;
3571 attr->cos_supported = FC_CLASS_3;
3572
3573 /* topology attributes */
3574 attr->pport_cfg.topology = fcport->cfg.topology;
3575 attr->topology = fcport->topology;
3576 attr->pport_cfg.trunked = fcport->cfg.trunked;
3577
3578 /* beacon attributes */
3579 attr->beacon = fcport->beacon;
3580 attr->link_e2e_beacon = fcport->link_e2e_beacon;
f7f73812 3581 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
a36c61f9
KG
3582 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3583
3584 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3585 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3586 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3587 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3588 attr->port_state = BFA_PORT_ST_IOCDIS;
3589 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3590 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3591
3592 /* FCoE vlan */
3593 attr->fcoe_vlan = fcport->fcoe_vlan;
3594}
3595
3596#define BFA_FCPORT_STATS_TOV 1000
3597
5fbe25c7 3598/*
a36c61f9
KG
3599 * Fetch port statistics (FCQoS or FCoE).
3600 */
3601bfa_status_t
3602bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3603 bfa_cb_port_t cbfn, void *cbarg)
3604{
3605 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3606
3607 if (fcport->stats_busy) {
3608 bfa_trc(bfa, fcport->stats_busy);
3609 return BFA_STATUS_DEVBUSY;
3610 }
3611
3612 fcport->stats_busy = BFA_TRUE;
3613 fcport->stats_ret = stats;
3614 fcport->stats_cbfn = cbfn;
3615 fcport->stats_cbarg = cbarg;
3616
3617 bfa_fcport_send_stats_get(fcport);
3618
3619 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3620 fcport, BFA_FCPORT_STATS_TOV);
3621 return BFA_STATUS_OK;
3622}
3623
5fbe25c7 3624/*
a36c61f9
KG
3625 * Reset port statistics (FCQoS or FCoE).
3626 */
3627bfa_status_t
3628bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3629{
3630 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3631
3632 if (fcport->stats_busy) {
3633 bfa_trc(bfa, fcport->stats_busy);
3634 return BFA_STATUS_DEVBUSY;
3635 }
3636
3637 fcport->stats_busy = BFA_TRUE;
3638 fcport->stats_cbfn = cbfn;
3639 fcport->stats_cbarg = cbarg;
3640
3641 bfa_fcport_send_stats_clear(fcport);
3642
3643 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3644 fcport, BFA_FCPORT_STATS_TOV);
3645 return BFA_STATUS_OK;
3646}
3647
a36c61f9 3648
5fbe25c7 3649/*
a36c61f9
KG
3650 * Fetch port attributes.
3651 */
3652bfa_boolean_t
3653bfa_fcport_is_disabled(struct bfa_s *bfa)
3654{
3655 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3656
3657 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3658 BFA_PORT_ST_DISABLED;
3659
3660}
3661
3662bfa_boolean_t
3663bfa_fcport_is_ratelim(struct bfa_s *bfa)
3664{
3665 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3666
3667 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3668
3669}
3670
5fbe25c7 3671/*
a36c61f9
KG
3672 * Get default minimum ratelim speed
3673 */
3674enum bfa_port_speed
3675bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3676{
3677 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3678
3679 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3680 return fcport->cfg.trl_def_speed;
3681
3682}
a36c61f9
KG
3683
3684bfa_boolean_t
3685bfa_fcport_is_linkup(struct bfa_s *bfa)
3686{
3687 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3688
3689 return (!fcport->cfg.trunked &&
3690 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3691 (fcport->cfg.trunked &&
3692 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3693}
3694
3695bfa_boolean_t
3696bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3697{
3698 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3699
3700 return fcport->cfg.qos_enabled;
3701}
3702
5fbe25c7 3703/*
a36c61f9
KG
3704 * Rport State machine functions
3705 */
5fbe25c7 3706/*
a36c61f9
KG
3707 * Beginning state, only online event expected.
3708 */
3709static void
3710bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3711{
3712 bfa_trc(rp->bfa, rp->rport_tag);
3713 bfa_trc(rp->bfa, event);
3714
3715 switch (event) {
3716 case BFA_RPORT_SM_CREATE:
3717 bfa_stats(rp, sm_un_cr);
3718 bfa_sm_set_state(rp, bfa_rport_sm_created);
3719 break;
3720
3721 default:
3722 bfa_stats(rp, sm_un_unexp);
3723 bfa_sm_fault(rp->bfa, event);
3724 }
3725}
3726
3727static void
3728bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3729{
3730 bfa_trc(rp->bfa, rp->rport_tag);
3731 bfa_trc(rp->bfa, event);
3732
3733 switch (event) {
3734 case BFA_RPORT_SM_ONLINE:
3735 bfa_stats(rp, sm_cr_on);
3736 if (bfa_rport_send_fwcreate(rp))
3737 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3738 else
3739 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3740 break;
3741
3742 case BFA_RPORT_SM_DELETE:
3743 bfa_stats(rp, sm_cr_del);
3744 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3745 bfa_rport_free(rp);
3746 break;
3747
3748 case BFA_RPORT_SM_HWFAIL:
3749 bfa_stats(rp, sm_cr_hwf);
3750 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3751 break;
3752
3753 default:
3754 bfa_stats(rp, sm_cr_unexp);
3755 bfa_sm_fault(rp->bfa, event);
3756 }
3757}
3758
5fbe25c7 3759/*
a36c61f9
KG
3760 * Waiting for rport create response from firmware.
3761 */
3762static void
3763bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3764{
3765 bfa_trc(rp->bfa, rp->rport_tag);
3766 bfa_trc(rp->bfa, event);
3767
3768 switch (event) {
3769 case BFA_RPORT_SM_FWRSP:
3770 bfa_stats(rp, sm_fwc_rsp);
3771 bfa_sm_set_state(rp, bfa_rport_sm_online);
3772 bfa_rport_online_cb(rp);
3773 break;
3774
3775 case BFA_RPORT_SM_DELETE:
3776 bfa_stats(rp, sm_fwc_del);
3777 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3778 break;
3779
3780 case BFA_RPORT_SM_OFFLINE:
3781 bfa_stats(rp, sm_fwc_off);
3782 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3783 break;
3784
3785 case BFA_RPORT_SM_HWFAIL:
3786 bfa_stats(rp, sm_fwc_hwf);
3787 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3788 break;
3789
3790 default:
3791 bfa_stats(rp, sm_fwc_unexp);
3792 bfa_sm_fault(rp->bfa, event);
3793 }
3794}
3795
5fbe25c7 3796/*
a36c61f9
KG
3797 * Request queue is full, awaiting queue resume to send create request.
3798 */
3799static void
3800bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3801{
3802 bfa_trc(rp->bfa, rp->rport_tag);
3803 bfa_trc(rp->bfa, event);
3804
3805 switch (event) {
3806 case BFA_RPORT_SM_QRESUME:
3807 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3808 bfa_rport_send_fwcreate(rp);
3809 break;
3810
3811 case BFA_RPORT_SM_DELETE:
3812 bfa_stats(rp, sm_fwc_del);
3813 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3814 bfa_reqq_wcancel(&rp->reqq_wait);
3815 bfa_rport_free(rp);
3816 break;
3817
3818 case BFA_RPORT_SM_OFFLINE:
3819 bfa_stats(rp, sm_fwc_off);
3820 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3821 bfa_reqq_wcancel(&rp->reqq_wait);
3822 bfa_rport_offline_cb(rp);
3823 break;
3824
3825 case BFA_RPORT_SM_HWFAIL:
3826 bfa_stats(rp, sm_fwc_hwf);
3827 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3828 bfa_reqq_wcancel(&rp->reqq_wait);
3829 break;
3830
3831 default:
3832 bfa_stats(rp, sm_fwc_unexp);
3833 bfa_sm_fault(rp->bfa, event);
3834 }
3835}
3836
5fbe25c7 3837/*
a36c61f9
KG
3838 * Online state - normal parking state.
3839 */
3840static void
3841bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
3842{
3843 struct bfi_rport_qos_scn_s *qos_scn;
3844
3845 bfa_trc(rp->bfa, rp->rport_tag);
3846 bfa_trc(rp->bfa, event);
3847
3848 switch (event) {
3849 case BFA_RPORT_SM_OFFLINE:
3850 bfa_stats(rp, sm_on_off);
3851 if (bfa_rport_send_fwdelete(rp))
3852 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3853 else
3854 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
3855 break;
3856
3857 case BFA_RPORT_SM_DELETE:
3858 bfa_stats(rp, sm_on_del);
3859 if (bfa_rport_send_fwdelete(rp))
3860 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3861 else
3862 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3863 break;
3864
3865 case BFA_RPORT_SM_HWFAIL:
3866 bfa_stats(rp, sm_on_hwf);
3867 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3868 break;
3869
3870 case BFA_RPORT_SM_SET_SPEED:
3871 bfa_rport_send_fwspeed(rp);
3872 break;
3873
3874 case BFA_RPORT_SM_QOS_SCN:
3875 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
3876 rp->qos_attr = qos_scn->new_qos_attr;
3877 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
3878 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
3879 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
3880 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
3881
3882 qos_scn->old_qos_attr.qos_flow_id =
ba816ea8 3883 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
a36c61f9 3884 qos_scn->new_qos_attr.qos_flow_id =
ba816ea8 3885 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
a36c61f9
KG
3886
3887 if (qos_scn->old_qos_attr.qos_flow_id !=
3888 qos_scn->new_qos_attr.qos_flow_id)
3889 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
3890 qos_scn->old_qos_attr,
3891 qos_scn->new_qos_attr);
3892 if (qos_scn->old_qos_attr.qos_priority !=
3893 qos_scn->new_qos_attr.qos_priority)
3894 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
3895 qos_scn->old_qos_attr,
3896 qos_scn->new_qos_attr);
3897 break;
3898
3899 default:
3900 bfa_stats(rp, sm_on_unexp);
3901 bfa_sm_fault(rp->bfa, event);
3902 }
3903}
3904
5fbe25c7 3905/*
a36c61f9
KG
3906 * Firmware rport is being deleted - awaiting f/w response.
3907 */
3908static void
3909bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
3910{
3911 bfa_trc(rp->bfa, rp->rport_tag);
3912 bfa_trc(rp->bfa, event);
3913
3914 switch (event) {
3915 case BFA_RPORT_SM_FWRSP:
3916 bfa_stats(rp, sm_fwd_rsp);
3917 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3918 bfa_rport_offline_cb(rp);
3919 break;
3920
3921 case BFA_RPORT_SM_DELETE:
3922 bfa_stats(rp, sm_fwd_del);
3923 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3924 break;
3925
3926 case BFA_RPORT_SM_HWFAIL:
3927 bfa_stats(rp, sm_fwd_hwf);
3928 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3929 bfa_rport_offline_cb(rp);
3930 break;
3931
3932 default:
3933 bfa_stats(rp, sm_fwd_unexp);
3934 bfa_sm_fault(rp->bfa, event);
3935 }
3936}
3937
3938static void
3939bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3940{
3941 bfa_trc(rp->bfa, rp->rport_tag);
3942 bfa_trc(rp->bfa, event);
3943
3944 switch (event) {
3945 case BFA_RPORT_SM_QRESUME:
3946 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3947 bfa_rport_send_fwdelete(rp);
3948 break;
3949
3950 case BFA_RPORT_SM_DELETE:
3951 bfa_stats(rp, sm_fwd_del);
3952 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3953 break;
3954
3955 case BFA_RPORT_SM_HWFAIL:
3956 bfa_stats(rp, sm_fwd_hwf);
3957 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3958 bfa_reqq_wcancel(&rp->reqq_wait);
3959 bfa_rport_offline_cb(rp);
3960 break;
3961
3962 default:
3963 bfa_stats(rp, sm_fwd_unexp);
3964 bfa_sm_fault(rp->bfa, event);
3965 }
3966}
3967
5fbe25c7 3968/*
a36c61f9
KG
3969 * Offline state.
3970 */
3971static void
3972bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
3973{
3974 bfa_trc(rp->bfa, rp->rport_tag);
3975 bfa_trc(rp->bfa, event);
3976
3977 switch (event) {
3978 case BFA_RPORT_SM_DELETE:
3979 bfa_stats(rp, sm_off_del);
3980 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3981 bfa_rport_free(rp);
3982 break;
3983
3984 case BFA_RPORT_SM_ONLINE:
3985 bfa_stats(rp, sm_off_on);
3986 if (bfa_rport_send_fwcreate(rp))
3987 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3988 else
3989 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3990 break;
3991
3992 case BFA_RPORT_SM_HWFAIL:
3993 bfa_stats(rp, sm_off_hwf);
3994 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3995 break;
3996
3997 default:
3998 bfa_stats(rp, sm_off_unexp);
3999 bfa_sm_fault(rp->bfa, event);
4000 }
4001}
4002
5fbe25c7 4003/*
a36c61f9
KG
4004 * Rport is deleted, waiting for firmware response to delete.
4005 */
4006static void
4007bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4008{
4009 bfa_trc(rp->bfa, rp->rport_tag);
4010 bfa_trc(rp->bfa, event);
4011
4012 switch (event) {
4013 case BFA_RPORT_SM_FWRSP:
4014 bfa_stats(rp, sm_del_fwrsp);
4015 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4016 bfa_rport_free(rp);
4017 break;
4018
4019 case BFA_RPORT_SM_HWFAIL:
4020 bfa_stats(rp, sm_del_hwf);
4021 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4022 bfa_rport_free(rp);
4023 break;
4024
4025 default:
4026 bfa_sm_fault(rp->bfa, event);
4027 }
4028}
4029
4030static void
4031bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4032{
4033 bfa_trc(rp->bfa, rp->rport_tag);
4034 bfa_trc(rp->bfa, event);
4035
4036 switch (event) {
4037 case BFA_RPORT_SM_QRESUME:
4038 bfa_stats(rp, sm_del_fwrsp);
4039 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4040 bfa_rport_send_fwdelete(rp);
4041 break;
4042
4043 case BFA_RPORT_SM_HWFAIL:
4044 bfa_stats(rp, sm_del_hwf);
4045 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4046 bfa_reqq_wcancel(&rp->reqq_wait);
4047 bfa_rport_free(rp);
4048 break;
4049
4050 default:
4051 bfa_sm_fault(rp->bfa, event);
4052 }
4053}
4054
5fbe25c7 4055/*
a36c61f9
KG
4056 * Waiting for rport create response from firmware. A delete is pending.
4057 */
4058static void
4059bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4060 enum bfa_rport_event event)
4061{
4062 bfa_trc(rp->bfa, rp->rport_tag);
4063 bfa_trc(rp->bfa, event);
4064
4065 switch (event) {
4066 case BFA_RPORT_SM_FWRSP:
4067 bfa_stats(rp, sm_delp_fwrsp);
4068 if (bfa_rport_send_fwdelete(rp))
4069 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4070 else
4071 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4072 break;
4073
4074 case BFA_RPORT_SM_HWFAIL:
4075 bfa_stats(rp, sm_delp_hwf);
4076 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4077 bfa_rport_free(rp);
4078 break;
4079
4080 default:
4081 bfa_stats(rp, sm_delp_unexp);
4082 bfa_sm_fault(rp->bfa, event);
4083 }
4084}
4085
5fbe25c7 4086/*
a36c61f9
KG
4087 * Waiting for rport create response from firmware. Rport offline is pending.
4088 */
4089static void
4090bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4091 enum bfa_rport_event event)
4092{
4093 bfa_trc(rp->bfa, rp->rport_tag);
4094 bfa_trc(rp->bfa, event);
4095
4096 switch (event) {
4097 case BFA_RPORT_SM_FWRSP:
4098 bfa_stats(rp, sm_offp_fwrsp);
4099 if (bfa_rport_send_fwdelete(rp))
4100 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4101 else
4102 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4103 break;
4104
4105 case BFA_RPORT_SM_DELETE:
4106 bfa_stats(rp, sm_offp_del);
4107 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4108 break;
4109
4110 case BFA_RPORT_SM_HWFAIL:
4111 bfa_stats(rp, sm_offp_hwf);
4112 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4113 break;
4114
4115 default:
4116 bfa_stats(rp, sm_offp_unexp);
4117 bfa_sm_fault(rp->bfa, event);
4118 }
4119}
4120
5fbe25c7 4121/*
a36c61f9
KG
4122 * IOC h/w failed.
4123 */
4124static void
4125bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4126{
4127 bfa_trc(rp->bfa, rp->rport_tag);
4128 bfa_trc(rp->bfa, event);
4129
4130 switch (event) {
4131 case BFA_RPORT_SM_OFFLINE:
4132 bfa_stats(rp, sm_iocd_off);
4133 bfa_rport_offline_cb(rp);
4134 break;
4135
4136 case BFA_RPORT_SM_DELETE:
4137 bfa_stats(rp, sm_iocd_del);
4138 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4139 bfa_rport_free(rp);
4140 break;
4141
4142 case BFA_RPORT_SM_ONLINE:
4143 bfa_stats(rp, sm_iocd_on);
4144 if (bfa_rport_send_fwcreate(rp))
4145 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4146 else
4147 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4148 break;
4149
4150 case BFA_RPORT_SM_HWFAIL:
4151 break;
4152
4153 default:
4154 bfa_stats(rp, sm_iocd_unexp);
4155 bfa_sm_fault(rp->bfa, event);
4156 }
4157}
4158
4159
4160
5fbe25c7 4161/*
a36c61f9
KG
4162 * bfa_rport_private BFA rport private functions
4163 */
4164
4165static void
4166__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4167{
4168 struct bfa_rport_s *rp = cbarg;
4169
4170 if (complete)
4171 bfa_cb_rport_online(rp->rport_drv);
4172}
4173
4174static void
4175__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4176{
4177 struct bfa_rport_s *rp = cbarg;
4178
4179 if (complete)
4180 bfa_cb_rport_offline(rp->rport_drv);
4181}
4182
4183static void
4184bfa_rport_qresume(void *cbarg)
4185{
4186 struct bfa_rport_s *rp = cbarg;
4187
4188 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4189}
4190
4191static void
4192bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4193 u32 *dm_len)
4194{
4195 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4196 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4197
4198 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4199}
4200
4201static void
4202bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4203 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4204{
4205 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4206 struct bfa_rport_s *rp;
4207 u16 i;
4208
4209 INIT_LIST_HEAD(&mod->rp_free_q);
4210 INIT_LIST_HEAD(&mod->rp_active_q);
4211
4212 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4213 mod->rps_list = rp;
4214 mod->num_rports = cfg->fwcfg.num_rports;
4215
4216 bfa_assert(mod->num_rports &&
4217 !(mod->num_rports & (mod->num_rports - 1)));
4218
4219 for (i = 0; i < mod->num_rports; i++, rp++) {
6a18b167 4220 memset(rp, 0, sizeof(struct bfa_rport_s));
a36c61f9
KG
4221 rp->bfa = bfa;
4222 rp->rport_tag = i;
4223 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4224
5fbe25c7 4225 /*
a36c61f9
KG
4226 * - is unused
4227 */
4228 if (i)
4229 list_add_tail(&rp->qe, &mod->rp_free_q);
4230
4231 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4232 }
4233
5fbe25c7 4234 /*
a36c61f9
KG
4235 * consume memory
4236 */
4237 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4238}
4239
4240static void
4241bfa_rport_detach(struct bfa_s *bfa)
4242{
4243}
4244
4245static void
4246bfa_rport_start(struct bfa_s *bfa)
4247{
4248}
4249
4250static void
4251bfa_rport_stop(struct bfa_s *bfa)
4252{
4253}
4254
4255static void
4256bfa_rport_iocdisable(struct bfa_s *bfa)
4257{
4258 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4259 struct bfa_rport_s *rport;
4260 struct list_head *qe, *qen;
4261
4262 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4263 rport = (struct bfa_rport_s *) qe;
4264 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4265 }
4266}
4267
4268static struct bfa_rport_s *
4269bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4270{
4271 struct bfa_rport_s *rport;
4272
4273 bfa_q_deq(&mod->rp_free_q, &rport);
4274 if (rport)
4275 list_add_tail(&rport->qe, &mod->rp_active_q);
4276
4277 return rport;
4278}
4279
4280static void
4281bfa_rport_free(struct bfa_rport_s *rport)
4282{
4283 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4284
4285 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4286 list_del(&rport->qe);
4287 list_add_tail(&rport->qe, &mod->rp_free_q);
4288}
4289
4290static bfa_boolean_t
4291bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4292{
4293 struct bfi_rport_create_req_s *m;
4294
5fbe25c7 4295 /*
a36c61f9
KG
4296 * check for room in queue to send request now
4297 */
4298 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4299 if (!m) {
4300 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4301 return BFA_FALSE;
4302 }
4303
4304 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4305 bfa_lpuid(rp->bfa));
4306 m->bfa_handle = rp->rport_tag;
ba816ea8 4307 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
a36c61f9
KG
4308 m->pid = rp->rport_info.pid;
4309 m->lp_tag = rp->rport_info.lp_tag;
4310 m->local_pid = rp->rport_info.local_pid;
4311 m->fc_class = rp->rport_info.fc_class;
4312 m->vf_en = rp->rport_info.vf_en;
4313 m->vf_id = rp->rport_info.vf_id;
4314 m->cisc = rp->rport_info.cisc;
4315
5fbe25c7 4316 /*
a36c61f9
KG
4317 * queue I/O message to firmware
4318 */
4319 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4320 return BFA_TRUE;
4321}
4322
4323static bfa_boolean_t
4324bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4325{
4326 struct bfi_rport_delete_req_s *m;
4327
5fbe25c7 4328 /*
a36c61f9
KG
4329 * check for room in queue to send request now
4330 */
4331 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4332 if (!m) {
4333 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4334 return BFA_FALSE;
4335 }
4336
4337 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4338 bfa_lpuid(rp->bfa));
4339 m->fw_handle = rp->fw_handle;
4340
5fbe25c7 4341 /*
a36c61f9
KG
4342 * queue I/O message to firmware
4343 */
4344 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4345 return BFA_TRUE;
4346}
4347
4348static bfa_boolean_t
4349bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4350{
4351 struct bfa_rport_speed_req_s *m;
4352
5fbe25c7 4353 /*
a36c61f9
KG
4354 * check for room in queue to send request now
4355 */
4356 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4357 if (!m) {
4358 bfa_trc(rp->bfa, rp->rport_info.speed);
4359 return BFA_FALSE;
4360 }
4361
4362 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4363 bfa_lpuid(rp->bfa));
4364 m->fw_handle = rp->fw_handle;
4365 m->speed = (u8)rp->rport_info.speed;
4366
5fbe25c7 4367 /*
a36c61f9
KG
4368 * queue I/O message to firmware
4369 */
4370 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4371 return BFA_TRUE;
4372}
4373
4374
4375
5fbe25c7 4376/*
a36c61f9
KG
4377 * bfa_rport_public
4378 */
4379
5fbe25c7 4380/*
a36c61f9
KG
4381 * Rport interrupt processing.
4382 */
4383void
4384bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4385{
4386 union bfi_rport_i2h_msg_u msg;
4387 struct bfa_rport_s *rp;
4388
4389 bfa_trc(bfa, m->mhdr.msg_id);
4390
4391 msg.msg = m;
4392
4393 switch (m->mhdr.msg_id) {
4394 case BFI_RPORT_I2H_CREATE_RSP:
4395 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4396 rp->fw_handle = msg.create_rsp->fw_handle;
4397 rp->qos_attr = msg.create_rsp->qos_attr;
4398 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4399 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4400 break;
4401
4402 case BFI_RPORT_I2H_DELETE_RSP:
4403 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4404 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4405 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4406 break;
4407
4408 case BFI_RPORT_I2H_QOS_SCN:
4409 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4410 rp->event_arg.fw_msg = msg.qos_scn_evt;
4411 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4412 break;
4413
4414 default:
4415 bfa_trc(bfa, m->mhdr.msg_id);
4416 bfa_assert(0);
4417 }
4418}
4419
4420
4421
5fbe25c7 4422/*
a36c61f9
KG
4423 * bfa_rport_api
4424 */
4425
4426struct bfa_rport_s *
4427bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4428{
4429 struct bfa_rport_s *rp;
4430
4431 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4432
4433 if (rp == NULL)
4434 return NULL;
4435
4436 rp->bfa = bfa;
4437 rp->rport_drv = rport_drv;
f7f73812 4438 memset(&rp->stats, 0, sizeof(rp->stats));
a36c61f9
KG
4439
4440 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4441 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4442
4443 return rp;
4444}
4445
a36c61f9
KG
4446void
4447bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4448{
4449 bfa_assert(rport_info->max_frmsz != 0);
4450
5fbe25c7 4451 /*
a36c61f9
KG
4452 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4453 * responses. Default to minimum size.
4454 */
4455 if (rport_info->max_frmsz == 0) {
4456 bfa_trc(rport->bfa, rport->rport_tag);
4457 rport_info->max_frmsz = FC_MIN_PDUSZ;
4458 }
4459
6a18b167 4460 rport->rport_info = *rport_info;
a36c61f9
KG
4461 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4462}
4463
a36c61f9
KG
4464void
4465bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4466{
4467 bfa_assert(speed != 0);
4468 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4469
4470 rport->rport_info.speed = speed;
4471 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4472}
4473
a36c61f9 4474
5fbe25c7 4475/*
a36c61f9
KG
4476 * SGPG related functions
4477 */
4478
5fbe25c7 4479/*
a36c61f9
KG
4480 * Compute and return memory needed by FCP(im) module.
4481 */
4482static void
4483bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4484 u32 *dm_len)
4485{
4486 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4487 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4488
4489 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4490 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4491}
4492
4493
4494static void
4495bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4496 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4497{
4498 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4499 int i;
4500 struct bfa_sgpg_s *hsgpg;
4501 struct bfi_sgpg_s *sgpg;
4502 u64 align_len;
4503
4504 union {
4505 u64 pa;
4506 union bfi_addr_u addr;
4507 } sgpg_pa, sgpg_pa_tmp;
4508
4509 INIT_LIST_HEAD(&mod->sgpg_q);
4510 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4511
4512 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4513
4514 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4515 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4516 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4517 mod->sgpg_arr_pa += align_len;
4518 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4519 align_len);
4520 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4521 align_len);
4522
4523 hsgpg = mod->hsgpg_arr;
4524 sgpg = mod->sgpg_arr;
4525 sgpg_pa.pa = mod->sgpg_arr_pa;
4526 mod->free_sgpgs = mod->num_sgpgs;
4527
4528 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4529
4530 for (i = 0; i < mod->num_sgpgs; i++) {
6a18b167
JH
4531 memset(hsgpg, 0, sizeof(*hsgpg));
4532 memset(sgpg, 0, sizeof(*sgpg));
a36c61f9
KG
4533
4534 hsgpg->sgpg = sgpg;
4535 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4536 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4537 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4538
4539 hsgpg++;
4540 sgpg++;
4541 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4542 }
4543
4544 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4545 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4546 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4547}
4548
4549static void
4550bfa_sgpg_detach(struct bfa_s *bfa)
4551{
4552}
4553
4554static void
4555bfa_sgpg_start(struct bfa_s *bfa)
4556{
4557}
4558
4559static void
4560bfa_sgpg_stop(struct bfa_s *bfa)
4561{
4562}
4563
4564static void
4565bfa_sgpg_iocdisable(struct bfa_s *bfa)
4566{
4567}
4568
a36c61f9
KG
4569bfa_status_t
4570bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4571{
4572 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4573 struct bfa_sgpg_s *hsgpg;
4574 int i;
4575
4576 bfa_trc_fp(bfa, nsgpgs);
4577
4578 if (mod->free_sgpgs < nsgpgs)
4579 return BFA_STATUS_ENOMEM;
4580
4581 for (i = 0; i < nsgpgs; i++) {
4582 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4583 bfa_assert(hsgpg);
4584 list_add_tail(&hsgpg->qe, sgpg_q);
4585 }
4586
4587 mod->free_sgpgs -= nsgpgs;
4588 return BFA_STATUS_OK;
4589}
4590
4591void
4592bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4593{
4594 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4595 struct bfa_sgpg_wqe_s *wqe;
4596
4597 bfa_trc_fp(bfa, nsgpg);
4598
4599 mod->free_sgpgs += nsgpg;
4600 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
4601
4602 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4603
4604 if (list_empty(&mod->sgpg_wait_q))
4605 return;
4606
5fbe25c7 4607 /*
a36c61f9
KG
4608 * satisfy as many waiting requests as possible
4609 */
4610 do {
4611 wqe = bfa_q_first(&mod->sgpg_wait_q);
4612 if (mod->free_sgpgs < wqe->nsgpg)
4613 nsgpg = mod->free_sgpgs;
4614 else
4615 nsgpg = wqe->nsgpg;
4616 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4617 wqe->nsgpg -= nsgpg;
4618 if (wqe->nsgpg == 0) {
4619 list_del(&wqe->qe);
4620 wqe->cbfn(wqe->cbarg);
4621 }
4622 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4623}
4624
4625void
4626bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4627{
4628 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4629
4630 bfa_assert(nsgpg > 0);
4631 bfa_assert(nsgpg > mod->free_sgpgs);
4632
4633 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4634
5fbe25c7 4635 /*
a36c61f9
KG
4636 * allocate any left to this one first
4637 */
4638 if (mod->free_sgpgs) {
5fbe25c7 4639 /*
a36c61f9
KG
4640 * no one else is waiting for SGPG
4641 */
4642 bfa_assert(list_empty(&mod->sgpg_wait_q));
4643 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4644 wqe->nsgpg -= mod->free_sgpgs;
4645 mod->free_sgpgs = 0;
4646 }
4647
4648 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4649}
4650
4651void
4652bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4653{
4654 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4655
4656 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4657 list_del(&wqe->qe);
4658
4659 if (wqe->nsgpg_total != wqe->nsgpg)
4660 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4661 wqe->nsgpg_total - wqe->nsgpg);
4662}
4663
4664void
4665bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4666 void *cbarg)
4667{
4668 INIT_LIST_HEAD(&wqe->sgpg_q);
4669 wqe->cbfn = cbfn;
4670 wqe->cbarg = cbarg;
4671}
4672
5fbe25c7 4673/*
a36c61f9
KG
4674 * UF related functions
4675 */
4676/*
4677 *****************************************************************************
4678 * Internal functions
4679 *****************************************************************************
4680 */
4681static void
4682__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4683{
4684 struct bfa_uf_s *uf = cbarg;
4685 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4686
4687 if (complete)
4688 ufm->ufrecv(ufm->cbarg, uf);
4689}
4690
4691static void
4692claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4693{
4694 u32 uf_pb_tot_sz;
4695
4696 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4697 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4698 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4699 BFA_DMA_ALIGN_SZ);
4700
4701 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4702 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4703
6a18b167 4704 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
a36c61f9
KG
4705}
4706
4707static void
4708claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4709{
4710 struct bfi_uf_buf_post_s *uf_bp_msg;
4711 struct bfi_sge_s *sge;
4712 union bfi_addr_u sga_zero = { {0} };
4713 u16 i;
4714 u16 buf_len;
4715
4716 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
4717 uf_bp_msg = ufm->uf_buf_posts;
4718
4719 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4720 i++, uf_bp_msg++) {
6a18b167 4721 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
a36c61f9
KG
4722
4723 uf_bp_msg->buf_tag = i;
4724 buf_len = sizeof(struct bfa_uf_buf_s);
ba816ea8 4725 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
a36c61f9
KG
4726 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4727 bfa_lpuid(ufm->bfa));
4728
4729 sge = uf_bp_msg->sge;
4730 sge[0].sg_len = buf_len;
4731 sge[0].flags = BFI_SGE_DATA_LAST;
4732 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4733 bfa_sge_to_be(sge);
4734
4735 sge[1].sg_len = buf_len;
4736 sge[1].flags = BFI_SGE_PGDLEN;
4737 sge[1].sga = sga_zero;
4738 bfa_sge_to_be(&sge[1]);
4739 }
4740
5fbe25c7 4741 /*
a36c61f9
KG
4742 * advance pointer beyond consumed memory
4743 */
4744 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
4745}
4746
4747static void
4748claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4749{
4750 u16 i;
4751 struct bfa_uf_s *uf;
4752
4753 /*
4754 * Claim block of memory for UF list
4755 */
4756 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
4757
4758 /*
4759 * Initialize UFs and queue it in UF free queue
4760 */
4761 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
6a18b167 4762 memset(uf, 0, sizeof(struct bfa_uf_s));
a36c61f9
KG
4763 uf->bfa = ufm->bfa;
4764 uf->uf_tag = i;
4765 uf->pb_len = sizeof(struct bfa_uf_buf_s);
4766 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
4767 uf->buf_pa = ufm_pbs_pa(ufm, i);
4768 list_add_tail(&uf->qe, &ufm->uf_free_q);
4769 }
4770
5fbe25c7 4771 /*
a36c61f9
KG
4772 * advance memory pointer
4773 */
4774 bfa_meminfo_kva(mi) = (u8 *) uf;
4775}
4776
4777static void
4778uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4779{
4780 claim_uf_pbs(ufm, mi);
4781 claim_ufs(ufm, mi);
4782 claim_uf_post_msgs(ufm, mi);
4783}
4784
4785static void
4786bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
4787{
4788 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4789
4790 /*
4791 * dma-able memory for UF posted bufs
4792 */
4793 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
4794 BFA_DMA_ALIGN_SZ);
4795
4796 /*
4797 * kernel Virtual memory for UFs and UF buf post msg copies
4798 */
4799 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4800 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4801}
4802
4803static void
4804bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4805 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4806{
4807 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4808
6a18b167 4809 memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
a36c61f9
KG
4810 ufm->bfa = bfa;
4811 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4812 INIT_LIST_HEAD(&ufm->uf_free_q);
4813 INIT_LIST_HEAD(&ufm->uf_posted_q);
4814
4815 uf_mem_claim(ufm, meminfo);
4816}
4817
4818static void
4819bfa_uf_detach(struct bfa_s *bfa)
4820{
4821}
4822
4823static struct bfa_uf_s *
4824bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
4825{
4826 struct bfa_uf_s *uf;
4827
4828 bfa_q_deq(&uf_mod->uf_free_q, &uf);
4829 return uf;
4830}
4831
4832static void
4833bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
4834{
4835 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
4836}
4837
4838static bfa_status_t
4839bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
4840{
4841 struct bfi_uf_buf_post_s *uf_post_msg;
4842
4843 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
4844 if (!uf_post_msg)
4845 return BFA_STATUS_FAILED;
4846
6a18b167 4847 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
a36c61f9
KG
4848 sizeof(struct bfi_uf_buf_post_s));
4849 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
4850
4851 bfa_trc(ufm->bfa, uf->uf_tag);
4852
4853 list_add_tail(&uf->qe, &ufm->uf_posted_q);
4854 return BFA_STATUS_OK;
4855}
4856
4857static void
4858bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
4859{
4860 struct bfa_uf_s *uf;
4861
4862 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
4863 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
4864 break;
4865 }
4866}
4867
4868static void
4869uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
4870{
4871 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4872 u16 uf_tag = m->buf_tag;
4873 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
4874 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
4875 u8 *buf = &uf_buf->d[0];
4876 struct fchs_s *fchs;
4877
ba816ea8
JH
4878 m->frm_len = be16_to_cpu(m->frm_len);
4879 m->xfr_len = be16_to_cpu(m->xfr_len);
a36c61f9
KG
4880
4881 fchs = (struct fchs_s *)uf_buf;
4882
4883 list_del(&uf->qe); /* dequeue from posted queue */
4884
4885 uf->data_ptr = buf;
4886 uf->data_len = m->xfr_len;
4887
4888 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
4889
4890 if (uf->data_len == sizeof(struct fchs_s)) {
4891 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
4892 uf->data_len, (struct fchs_s *)buf);
4893 } else {
4894 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
4895 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
4896 BFA_PL_EID_RX, uf->data_len,
4897 (struct fchs_s *)buf, pld_w0);
4898 }
4899
4900 if (bfa->fcs)
4901 __bfa_cb_uf_recv(uf, BFA_TRUE);
4902 else
4903 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
4904}
4905
4906static void
4907bfa_uf_stop(struct bfa_s *bfa)
4908{
4909}
4910
4911static void
4912bfa_uf_iocdisable(struct bfa_s *bfa)
4913{
4914 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4915 struct bfa_uf_s *uf;
4916 struct list_head *qe, *qen;
4917
4918 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
4919 uf = (struct bfa_uf_s *) qe;
4920 list_del(&uf->qe);
4921 bfa_uf_put(ufm, uf);
4922 }
4923}
4924
4925static void
4926bfa_uf_start(struct bfa_s *bfa)
4927{
4928 bfa_uf_post_all(BFA_UF_MOD(bfa));
4929}
4930
5fbe25c7 4931/*
a36c61f9
KG
4932 * Register handler for all unsolicted recieve frames.
4933 *
4934 * @param[in] bfa BFA instance
4935 * @param[in] ufrecv receive handler function
4936 * @param[in] cbarg receive handler arg
4937 */
4938void
4939bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
4940{
4941 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4942
4943 ufm->ufrecv = ufrecv;
4944 ufm->cbarg = cbarg;
4945}
4946
5fbe25c7 4947/*
a36c61f9
KG
4948 * Free an unsolicited frame back to BFA.
4949 *
4950 * @param[in] uf unsolicited frame to be freed
4951 *
4952 * @return None
4953 */
4954void
4955bfa_uf_free(struct bfa_uf_s *uf)
4956{
4957 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
4958 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
4959}
4960
4961
4962
5fbe25c7 4963/*
a36c61f9
KG
4964 * uf_pub BFA uf module public functions
4965 */
4966void
4967bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
4968{
4969 bfa_trc(bfa, msg->mhdr.msg_id);
4970
4971 switch (msg->mhdr.msg_id) {
4972 case BFI_UF_I2H_FRM_RCVD:
4973 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
4974 break;
4975
4976 default:
4977 bfa_trc(bfa, msg->mhdr.msg_id);
4978 bfa_assert(0);
4979 }
4980}
4981
4982