]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/sfc/base/ef10_ev.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / sfc / base / ef10_ev.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 *
f67539c2
TL
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2012-2019 Solarflare Communications Inc.
11fdf7f2
TL
5 */
6
7#include "efx.h"
8#include "efx_impl.h"
9#if EFSYS_OPT_MON_STATS
10#include "mcdi_mon.h"
11#endif
12
9f95a23c 13#if EFX_OPTS_EF10()
11fdf7f2
TL
14
15/*
16 * Non-interrupting event queue requires interrrupting event queue to
17 * refer to for wake-up events even if wake ups are never used.
18 * It could be even non-allocated event queue.
19 */
20#define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
21
22static __checkReturn boolean_t
23ef10_ev_rx(
24 __in efx_evq_t *eep,
25 __in efx_qword_t *eqp,
26 __in const efx_ev_callbacks_t *eecp,
27 __in_opt void *arg);
28
29static __checkReturn boolean_t
30ef10_ev_tx(
31 __in efx_evq_t *eep,
32 __in efx_qword_t *eqp,
33 __in const efx_ev_callbacks_t *eecp,
34 __in_opt void *arg);
35
36static __checkReturn boolean_t
37ef10_ev_driver(
38 __in efx_evq_t *eep,
39 __in efx_qword_t *eqp,
40 __in const efx_ev_callbacks_t *eecp,
41 __in_opt void *arg);
42
43static __checkReturn boolean_t
44ef10_ev_drv_gen(
45 __in efx_evq_t *eep,
46 __in efx_qword_t *eqp,
47 __in const efx_ev_callbacks_t *eecp,
48 __in_opt void *arg);
49
50static __checkReturn boolean_t
51ef10_ev_mcdi(
52 __in efx_evq_t *eep,
53 __in efx_qword_t *eqp,
54 __in const efx_ev_callbacks_t *eecp,
55 __in_opt void *arg);
56
57
58static __checkReturn efx_rc_t
59efx_mcdi_set_evq_tmr(
60 __in efx_nic_t *enp,
61 __in uint32_t instance,
62 __in uint32_t mode,
63 __in uint32_t timer_ns)
64{
65 efx_mcdi_req_t req;
9f95a23c
TL
66 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
67 MC_CMD_SET_EVQ_TMR_OUT_LEN);
11fdf7f2
TL
68 efx_rc_t rc;
69
11fdf7f2
TL
70 req.emr_cmd = MC_CMD_SET_EVQ_TMR;
71 req.emr_in_buf = payload;
72 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
73 req.emr_out_buf = payload;
74 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
75
76 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
77 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
78 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
79 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
80
81 efx_mcdi_execute(enp, &req);
82
83 if (req.emr_rc != 0) {
84 rc = req.emr_rc;
85 goto fail1;
86 }
87
88 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
89 rc = EMSGSIZE;
90 goto fail2;
91 }
92
93 return (0);
94
95fail2:
96 EFSYS_PROBE(fail2);
97fail1:
98 EFSYS_PROBE1(fail1, efx_rc_t, rc);
99
100 return (rc);
101}
102
103static __checkReturn efx_rc_t
104efx_mcdi_init_evq(
105 __in efx_nic_t *enp,
106 __in unsigned int instance,
107 __in efsys_mem_t *esmp,
108 __in size_t nevs,
109 __in uint32_t irq,
110 __in uint32_t us,
111 __in uint32_t flags,
112 __in boolean_t low_latency)
113{
114 efx_mcdi_req_t req;
9f95a23c
TL
115 EFX_MCDI_DECLARE_BUF(payload,
116 MC_CMD_INIT_EVQ_IN_LEN(EF10_EVQ_MAXNBUFS),
117 MC_CMD_INIT_EVQ_OUT_LEN);
11fdf7f2
TL
118 efx_qword_t *dma_addr;
119 uint64_t addr;
120 int npages;
121 int i;
122 boolean_t interrupting;
123 int ev_cut_through;
124 efx_rc_t rc;
125
9f95a23c
TL
126 npages = efx_evq_nbufs(enp, nevs);
127 if (npages > EF10_EVQ_MAXNBUFS) {
11fdf7f2
TL
128 rc = EINVAL;
129 goto fail1;
130 }
131
11fdf7f2
TL
132 req.emr_cmd = MC_CMD_INIT_EVQ;
133 req.emr_in_buf = payload;
134 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
135 req.emr_out_buf = payload;
136 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
137
138 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
139 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
140 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
141
142 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
143 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
144
145 /*
146 * On Huntington RX and TX event batching can only be requested together
147 * (even if the datapath firmware doesn't actually support RX
148 * batching). If event cut through is enabled no RX batching will occur.
149 *
150 * So always enable RX and TX event batching, and enable event cut
151 * through if we want low latency operation.
152 */
153 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
154 case EFX_EVQ_FLAGS_TYPE_AUTO:
155 ev_cut_through = low_latency ? 1 : 0;
156 break;
157 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
158 ev_cut_through = 0;
159 break;
160 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
161 ev_cut_through = 1;
162 break;
163 default:
164 rc = EINVAL;
165 goto fail2;
166 }
167 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
168 INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
169 INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
170 INIT_EVQ_IN_FLAG_INT_ARMD, 0,
171 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
172 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
173 INIT_EVQ_IN_FLAG_TX_MERGE, 1);
174
175 /* If the value is zero then disable the timer */
176 if (us == 0) {
177 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
178 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
179 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
180 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
181 } else {
182 unsigned int ticks;
183
184 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
185 goto fail3;
186
187 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
188 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
189 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
190 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
191 }
192
193 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
194 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
195 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
196
197 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
198 addr = EFSYS_MEM_ADDR(esmp);
199
200 for (i = 0; i < npages; i++) {
201 EFX_POPULATE_QWORD_2(*dma_addr,
202 EFX_DWORD_1, (uint32_t)(addr >> 32),
203 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
204
205 dma_addr++;
206 addr += EFX_BUF_SIZE;
207 }
208
209 efx_mcdi_execute(enp, &req);
210
211 if (req.emr_rc != 0) {
212 rc = req.emr_rc;
213 goto fail4;
214 }
215
216 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
217 rc = EMSGSIZE;
218 goto fail5;
219 }
220
221 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
222
223 return (0);
224
225fail5:
226 EFSYS_PROBE(fail5);
227fail4:
228 EFSYS_PROBE(fail4);
229fail3:
230 EFSYS_PROBE(fail3);
231fail2:
232 EFSYS_PROBE(fail2);
233fail1:
234 EFSYS_PROBE1(fail1, efx_rc_t, rc);
235
236 return (rc);
237}
238
239
240static __checkReturn efx_rc_t
241efx_mcdi_init_evq_v2(
242 __in efx_nic_t *enp,
243 __in unsigned int instance,
244 __in efsys_mem_t *esmp,
245 __in size_t nevs,
246 __in uint32_t irq,
247 __in uint32_t us,
248 __in uint32_t flags)
249{
250 efx_mcdi_req_t req;
9f95a23c
TL
251 EFX_MCDI_DECLARE_BUF(payload,
252 MC_CMD_INIT_EVQ_V2_IN_LEN(EF10_EVQ_MAXNBUFS),
253 MC_CMD_INIT_EVQ_V2_OUT_LEN);
11fdf7f2
TL
254 boolean_t interrupting;
255 unsigned int evq_type;
256 efx_qword_t *dma_addr;
257 uint64_t addr;
258 int npages;
259 int i;
260 efx_rc_t rc;
261
9f95a23c
TL
262 npages = efx_evq_nbufs(enp, nevs);
263 if (npages > EF10_EVQ_MAXNBUFS) {
11fdf7f2
TL
264 rc = EINVAL;
265 goto fail1;
266 }
267
11fdf7f2
TL
268 req.emr_cmd = MC_CMD_INIT_EVQ;
269 req.emr_in_buf = payload;
270 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
271 req.emr_out_buf = payload;
272 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
273
274 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
275 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
276 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
277
278 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
279 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
280
281 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
282 case EFX_EVQ_FLAGS_TYPE_AUTO:
283 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
284 break;
285 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
286 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
287 break;
288 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
289 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
290 break;
291 default:
292 rc = EINVAL;
293 goto fail2;
294 }
295 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
296 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
297 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
298 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
299 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
300
301 /* If the value is zero then disable the timer */
302 if (us == 0) {
303 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
304 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
305 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
306 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
307 } else {
308 unsigned int ticks;
309
310 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
311 goto fail3;
312
313 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
314 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
315 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
316 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
317 }
318
319 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
320 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
321 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
322
323 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
324 addr = EFSYS_MEM_ADDR(esmp);
325
326 for (i = 0; i < npages; i++) {
327 EFX_POPULATE_QWORD_2(*dma_addr,
328 EFX_DWORD_1, (uint32_t)(addr >> 32),
329 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
330
331 dma_addr++;
332 addr += EFX_BUF_SIZE;
333 }
334
335 efx_mcdi_execute(enp, &req);
336
337 if (req.emr_rc != 0) {
338 rc = req.emr_rc;
339 goto fail4;
340 }
341
342 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
343 rc = EMSGSIZE;
344 goto fail5;
345 }
346
347 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
348
349 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
350 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
351
352 return (0);
353
354fail5:
355 EFSYS_PROBE(fail5);
356fail4:
357 EFSYS_PROBE(fail4);
358fail3:
359 EFSYS_PROBE(fail3);
360fail2:
361 EFSYS_PROBE(fail2);
362fail1:
363 EFSYS_PROBE1(fail1, efx_rc_t, rc);
364
365 return (rc);
366}
367
368static __checkReturn efx_rc_t
369efx_mcdi_fini_evq(
370 __in efx_nic_t *enp,
371 __in uint32_t instance)
372{
373 efx_mcdi_req_t req;
9f95a23c
TL
374 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
375 MC_CMD_FINI_EVQ_OUT_LEN);
11fdf7f2
TL
376 efx_rc_t rc;
377
11fdf7f2
TL
378 req.emr_cmd = MC_CMD_FINI_EVQ;
379 req.emr_in_buf = payload;
380 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
381 req.emr_out_buf = payload;
382 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
383
384 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
385
386 efx_mcdi_execute_quiet(enp, &req);
387
388 if (req.emr_rc != 0) {
389 rc = req.emr_rc;
390 goto fail1;
391 }
392
393 return (0);
394
395fail1:
396 /*
397 * EALREADY is not an error, but indicates that the MC has rebooted and
398 * that the EVQ has already been destroyed.
399 */
400 if (rc != EALREADY)
401 EFSYS_PROBE1(fail1, efx_rc_t, rc);
402
403 return (rc);
404}
405
406
407
408 __checkReturn efx_rc_t
409ef10_ev_init(
410 __in efx_nic_t *enp)
411{
412 _NOTE(ARGUNUSED(enp))
413 return (0);
414}
415
416 void
417ef10_ev_fini(
418 __in efx_nic_t *enp)
419{
420 _NOTE(ARGUNUSED(enp))
421}
422
423 __checkReturn efx_rc_t
424ef10_ev_qcreate(
425 __in efx_nic_t *enp,
426 __in unsigned int index,
427 __in efsys_mem_t *esmp,
428 __in size_t ndescs,
429 __in uint32_t id,
430 __in uint32_t us,
431 __in uint32_t flags,
432 __in efx_evq_t *eep)
433{
434 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
435 uint32_t irq;
436 efx_rc_t rc;
437
438 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
11fdf7f2 439
9f95a23c 440 if (index >= encp->enc_evq_limit) {
11fdf7f2
TL
441 rc = EINVAL;
442 goto fail1;
443 }
444
9f95a23c 445 if (us > encp->enc_evq_timer_max_us) {
11fdf7f2
TL
446 rc = EINVAL;
447 goto fail2;
448 }
449
9f95a23c
TL
450 /*
451 * NO_CONT_EV mode is only requested from the firmware when creating
452 * receive queues, but here it needs to be specified at event queue
453 * creation, as the event handler needs to know which format is in use.
454 *
455 * If EFX_EVQ_FLAGS_NO_CONT_EV is specified, all receive queues for this
456 * event queue will be created in NO_CONT_EV mode.
457 *
458 * See SF-109306-TC 5.11 "Events for RXQs in NO_CONT_EV mode".
459 */
460 if (flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
461 if (enp->en_nic_cfg.enc_no_cont_ev_mode_supported == B_FALSE) {
462 rc = EINVAL;
463 goto fail3;
464 }
11fdf7f2
TL
465 }
466
467 /* Set up the handler table */
468 eep->ee_rx = ef10_ev_rx;
469 eep->ee_tx = ef10_ev_tx;
470 eep->ee_driver = ef10_ev_driver;
471 eep->ee_drv_gen = ef10_ev_drv_gen;
472 eep->ee_mcdi = ef10_ev_mcdi;
473
474 /* Set up the event queue */
475 /* INIT_EVQ expects function-relative vector number */
476 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
477 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
478 irq = index;
479 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
480 irq = index;
481 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
482 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
483 } else {
484 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
485 }
486
487 /*
488 * Interrupts may be raised for events immediately after the queue is
489 * created. See bug58606.
490 */
491
492 if (encp->enc_init_evq_v2_supported) {
493 /*
494 * On Medford the low latency license is required to enable RX
495 * and event cut through and to disable RX batching. If event
496 * queue type in flags is auto, we let the firmware decide the
497 * settings to use. If the adapter has a low latency license,
498 * it will choose the best settings for low latency, otherwise
499 * it will choose the best settings for throughput.
500 */
501 rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
502 flags);
503 if (rc != 0)
504 goto fail4;
505 } else {
506 /*
507 * On Huntington we need to specify the settings to use.
508 * If event queue type in flags is auto, we favour throughput
509 * if the adapter is running virtualization supporting firmware
510 * (i.e. the full featured firmware variant)
511 * and latency otherwise. The Ethernet Virtual Bridging
512 * capability is used to make this decision. (Note though that
513 * the low latency firmware variant is also best for
514 * throughput and corresponding type should be specified
515 * to choose it.)
516 */
517 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
518 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
519 low_latency);
520 if (rc != 0)
521 goto fail5;
522 }
523
524 return (0);
525
526fail5:
527 EFSYS_PROBE(fail5);
528fail4:
529 EFSYS_PROBE(fail4);
530fail3:
531 EFSYS_PROBE(fail3);
532fail2:
533 EFSYS_PROBE(fail2);
534fail1:
535 EFSYS_PROBE1(fail1, efx_rc_t, rc);
536
537 return (rc);
538}
539
540 void
541ef10_ev_qdestroy(
542 __in efx_evq_t *eep)
543{
544 efx_nic_t *enp = eep->ee_enp;
545
9f95a23c 546 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
11fdf7f2
TL
547
548 (void) efx_mcdi_fini_evq(enp, eep->ee_index);
549}
550
551 __checkReturn efx_rc_t
552ef10_ev_qprime(
553 __in efx_evq_t *eep,
554 __in unsigned int count)
555{
556 efx_nic_t *enp = eep->ee_enp;
557 uint32_t rptr;
558 efx_dword_t dword;
559
560 rptr = count & eep->ee_mask;
561
562 if (enp->en_nic_cfg.enc_bug35388_workaround) {
9f95a23c 563 EFX_STATIC_ASSERT(EF10_EVQ_MINNEVS >
11fdf7f2 564 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
9f95a23c 565 EFX_STATIC_ASSERT(EF10_EVQ_MAXNEVS <
11fdf7f2
TL
566 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
567
568 EFX_POPULATE_DWORD_2(dword,
569 ERF_DD_EVQ_IND_RPTR_FLAGS,
570 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
571 ERF_DD_EVQ_IND_RPTR,
572 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
573 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
574 &dword, B_FALSE);
575
576 EFX_POPULATE_DWORD_2(dword,
577 ERF_DD_EVQ_IND_RPTR_FLAGS,
578 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
579 ERF_DD_EVQ_IND_RPTR,
580 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
581 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
582 &dword, B_FALSE);
583 } else {
584 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
585 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
586 &dword, B_FALSE);
587 }
588
589 return (0);
590}
591
592static __checkReturn efx_rc_t
593efx_mcdi_driver_event(
594 __in efx_nic_t *enp,
595 __in uint32_t evq,
596 __in efx_qword_t data)
597{
598 efx_mcdi_req_t req;
9f95a23c
TL
599 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
600 MC_CMD_DRIVER_EVENT_OUT_LEN);
11fdf7f2
TL
601 efx_rc_t rc;
602
603 req.emr_cmd = MC_CMD_DRIVER_EVENT;
604 req.emr_in_buf = payload;
605 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
606 req.emr_out_buf = payload;
607 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
608
609 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
610
611 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
612 EFX_QWORD_FIELD(data, EFX_DWORD_0));
613 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
614 EFX_QWORD_FIELD(data, EFX_DWORD_1));
615
616 efx_mcdi_execute(enp, &req);
617
618 if (req.emr_rc != 0) {
619 rc = req.emr_rc;
620 goto fail1;
621 }
622
623 return (0);
624
625fail1:
626 EFSYS_PROBE1(fail1, efx_rc_t, rc);
627
628 return (rc);
629}
630
631 void
632ef10_ev_qpost(
633 __in efx_evq_t *eep,
634 __in uint16_t data)
635{
636 efx_nic_t *enp = eep->ee_enp;
637 efx_qword_t event;
638
639 EFX_POPULATE_QWORD_3(event,
640 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
641 ESF_DZ_DRV_SUB_CODE, 0,
642 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
643
644 (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
645}
646
647 __checkReturn efx_rc_t
648ef10_ev_qmoderate(
649 __in efx_evq_t *eep,
650 __in unsigned int us)
651{
652 efx_nic_t *enp = eep->ee_enp;
653 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
654 efx_dword_t dword;
655 uint32_t mode;
656 efx_rc_t rc;
657
658 /* Check that hardware and MCDI use the same timer MODE values */
659 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
660 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
661 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
662 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
663 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
664 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
665 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
666 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
667
668 if (us > encp->enc_evq_timer_max_us) {
669 rc = EINVAL;
670 goto fail1;
671 }
672
673 /* If the value is zero then disable the timer */
674 if (us == 0) {
675 mode = FFE_CZ_TIMER_MODE_DIS;
676 } else {
677 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
678 }
679
680 if (encp->enc_bug61265_workaround) {
681 uint32_t ns = us * 1000;
682
683 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
684 if (rc != 0)
685 goto fail2;
686 } else {
687 unsigned int ticks;
688
689 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
690 goto fail3;
691
692 if (encp->enc_bug35388_workaround) {
693 EFX_POPULATE_DWORD_3(dword,
694 ERF_DD_EVQ_IND_TIMER_FLAGS,
695 EFE_DD_EVQ_IND_TIMER_FLAGS,
696 ERF_DD_EVQ_IND_TIMER_MODE, mode,
697 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
698 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
699 eep->ee_index, &dword, 0);
700 } else {
701 /*
702 * NOTE: The TMR_REL field introduced in Medford2 is
703 * ignored on earlier EF10 controllers. See bug66418
704 * comment 9 for details.
705 */
706 EFX_POPULATE_DWORD_3(dword,
707 ERF_DZ_TC_TIMER_MODE, mode,
708 ERF_DZ_TC_TIMER_VAL, ticks,
709 ERF_FZ_TC_TMR_REL_VAL, ticks);
710 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
711 eep->ee_index, &dword, 0);
712 }
713 }
714
715 return (0);
716
717fail3:
718 EFSYS_PROBE(fail3);
719fail2:
720 EFSYS_PROBE(fail2);
721fail1:
722 EFSYS_PROBE1(fail1, efx_rc_t, rc);
723
724 return (rc);
725}
726
727
728#if EFSYS_OPT_QSTATS
729 void
730ef10_ev_qstats_update(
731 __in efx_evq_t *eep,
732 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
733{
734 unsigned int id;
735
736 for (id = 0; id < EV_NQSTATS; id++) {
737 efsys_stat_t *essp = &stat[id];
738
739 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
740 eep->ee_stat[id] = 0;
741 }
742}
743#endif /* EFSYS_OPT_QSTATS */
744
745#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
746
747static __checkReturn boolean_t
748ef10_ev_rx_packed_stream(
749 __in efx_evq_t *eep,
750 __in efx_qword_t *eqp,
751 __in const efx_ev_callbacks_t *eecp,
752 __in_opt void *arg)
753{
754 uint32_t label;
755 uint32_t pkt_count_lbits;
756 uint16_t flags;
757 boolean_t should_abort;
758 efx_evq_rxq_state_t *eersp;
759 unsigned int pkt_count;
760 unsigned int current_id;
761 boolean_t new_buffer;
762
763 pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
764 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
765 new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
766
767 flags = 0;
768
769 eersp = &eep->ee_rxq_state[label];
770
771 /*
772 * RX_DSC_PTR_LBITS has least significant bits of the global
773 * (not per-buffer) packet counter. It is guaranteed that
774 * maximum number of completed packets fits in lbits-mask.
775 * So, modulo lbits-mask arithmetic should be used to calculate
776 * packet counter increment.
777 */
778 pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
779 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
780 eersp->eers_rx_stream_npackets += pkt_count;
781
782 if (new_buffer) {
783 flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
784#if EFSYS_OPT_RX_PACKED_STREAM
785 /*
786 * If both packed stream and equal stride super-buffer
787 * modes are compiled in, in theory credits should be
788 * be maintained for packed stream only, but right now
789 * these modes are not distinguished in the event queue
790 * Rx queue state and it is OK to increment the counter
791 * regardless (it might be event cheaper than branching
792 * since neighbour structure member are updated as well).
793 */
794 eersp->eers_rx_packed_stream_credits++;
795#endif
796 eersp->eers_rx_read_ptr++;
797 }
798 current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
799
800 /* Check for errors that invalidate checksum and L3/L4 fields */
801 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
802 /* RX frame truncated */
803 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
804 flags |= EFX_DISCARD;
805 goto deliver;
806 }
807 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
808 /* Bad Ethernet frame CRC */
809 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
810 flags |= EFX_DISCARD;
811 goto deliver;
812 }
813
814 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
9f95a23c 815 EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
11fdf7f2
TL
816 flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
817 goto deliver;
818 }
819
820 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
821 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
822
823 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
824 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
825
826deliver:
827 /* If we're not discarding the packet then it is ok */
828 if (~flags & EFX_DISCARD)
829 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
830
831 EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
832 should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
833 flags);
834
835 return (should_abort);
836}
837
838#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
839
840static __checkReturn boolean_t
841ef10_ev_rx(
842 __in efx_evq_t *eep,
843 __in efx_qword_t *eqp,
844 __in const efx_ev_callbacks_t *eecp,
845 __in_opt void *arg)
846{
847 efx_nic_t *enp = eep->ee_enp;
848 uint32_t size;
849 uint32_t label;
850 uint32_t mac_class;
851 uint32_t eth_tag_class;
852 uint32_t l3_class;
853 uint32_t l4_class;
854 uint32_t next_read_lbits;
855 uint16_t flags;
856 boolean_t cont;
857 boolean_t should_abort;
858 efx_evq_rxq_state_t *eersp;
859 unsigned int desc_count;
860 unsigned int last_used_id;
861
862 EFX_EV_QSTAT_INCR(eep, EV_RX);
863
9f95a23c
TL
864 /* Discard events after RXQ/TXQ errors, or hardware not available */
865 if (enp->en_reset_flags &
866 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
11fdf7f2
TL
867 return (B_FALSE);
868
869 /* Basic packet information */
870 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
871 eersp = &eep->ee_rxq_state[label];
872
873#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
874 /*
875 * Packed stream events are very different,
876 * so handle them separately
877 */
878 if (eersp->eers_rx_packed_stream)
879 return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
880#endif
881
882 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
883 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
884 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
885 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
886 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
887 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
888
889 /*
890 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
891 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
892 * and values for all EF10 controllers.
893 */
894 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
895 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
896 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
897 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
898
899 l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
900
901 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
902 /* Drop this event */
903 return (B_FALSE);
904 }
905 flags = 0;
906
907 if (cont != 0) {
908 /*
909 * This may be part of a scattered frame, or it may be a
910 * truncated frame if scatter is disabled on this RXQ.
911 * Overlength frames can be received if e.g. a VF is configured
912 * for 1500 MTU but connected to a port set to 9000 MTU
913 * (see bug56567).
914 * FIXME: There is not yet any driver that supports scatter on
915 * Huntington. Scatter support is required for OSX.
916 */
917 flags |= EFX_PKT_CONT;
918 }
919
920 if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
921 flags |= EFX_PKT_UNICAST;
922
9f95a23c
TL
923 /*
924 * Increment the count of descriptors read.
925 *
926 * In NO_CONT_EV mode, RX_DSC_PTR_LBITS is actually a packet count, but
927 * when scatter is disabled, there is only one descriptor per packet and
928 * so it can be treated the same.
929 *
930 * TODO: Support scatter in NO_CONT_EV mode.
931 */
11fdf7f2
TL
932 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
933 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
934 eersp->eers_rx_read_ptr += desc_count;
935
9f95a23c
TL
936 /* Calculate the index of the last descriptor consumed */
937 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
938
939 if (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
940 if (desc_count > 1)
941 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
942
943 /* Always read the length from the prefix in NO_CONT_EV mode. */
944 flags |= EFX_PKT_PREFIX_LEN;
945
946 /*
947 * Check for an aborted scatter, signalled by the ABORT bit in
948 * NO_CONT_EV mode. The ABORT bit was not used before NO_CONT_EV
949 * mode was added as it was broken in Huntington silicon.
950 */
951 if (EFX_QWORD_FIELD(*eqp, ESF_EZ_RX_ABORT) != 0) {
952 flags |= EFX_DISCARD;
953 goto deliver;
954 }
955 } else if (desc_count > 1) {
956 /*
957 * FIXME: add error checking to make sure this a batched event.
958 * This could also be an aborted scatter, see Bug36629.
959 */
11fdf7f2
TL
960 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
961 flags |= EFX_PKT_PREFIX_LEN;
962 }
963
11fdf7f2
TL
964 /* Check for errors that invalidate checksum and L3/L4 fields */
965 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
966 /* RX frame truncated */
967 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
968 flags |= EFX_DISCARD;
969 goto deliver;
970 }
971 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
972 /* Bad Ethernet frame CRC */
973 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
974 flags |= EFX_DISCARD;
975 goto deliver;
976 }
977 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
978 /*
979 * Hardware parse failed, due to malformed headers
980 * or headers that are too long for the parser.
981 * Headers and checksums must be validated by the host.
982 */
9f95a23c 983 EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
11fdf7f2
TL
984 goto deliver;
985 }
986
987 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
988 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
989 flags |= EFX_PKT_VLAN_TAGGED;
990 }
991
992 switch (l3_class) {
993 case ESE_DZ_L3_CLASS_IP4:
994 case ESE_DZ_L3_CLASS_IP4_FRAG:
995 flags |= EFX_PKT_IPV4;
996 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
997 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
998 } else {
999 flags |= EFX_CKSUM_IPV4;
1000 }
1001
1002 /*
1003 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1004 * only 2 bits wide on Medford2. Check it is safe to use the
1005 * Medford2 field and values for all EF10 controllers.
1006 */
1007 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1008 ESF_DE_RX_L4_CLASS_LBN);
1009 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1010 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1011 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1012 ESE_DE_L4_CLASS_UNKNOWN);
1013
1014 if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1015 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
1016 flags |= EFX_PKT_TCP;
1017 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1018 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
1019 flags |= EFX_PKT_UDP;
1020 } else {
1021 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
1022 }
1023 break;
1024
1025 case ESE_DZ_L3_CLASS_IP6:
1026 case ESE_DZ_L3_CLASS_IP6_FRAG:
1027 flags |= EFX_PKT_IPV6;
1028
1029 /*
1030 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1031 * only 2 bits wide on Medford2. Check it is safe to use the
1032 * Medford2 field and values for all EF10 controllers.
1033 */
1034 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1035 ESF_DE_RX_L4_CLASS_LBN);
1036 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1037 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1038 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1039 ESE_DE_L4_CLASS_UNKNOWN);
1040
1041 if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1042 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
1043 flags |= EFX_PKT_TCP;
1044 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1045 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
1046 flags |= EFX_PKT_UDP;
1047 } else {
1048 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
1049 }
1050 break;
1051
1052 default:
1053 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
1054 break;
1055 }
1056
1057 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
1058 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
1059 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
1060 } else {
1061 flags |= EFX_CKSUM_TCPUDP;
1062 }
1063 }
1064
1065deliver:
1066 /* If we're not discarding the packet then it is ok */
1067 if (~flags & EFX_DISCARD)
1068 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
1069
1070 EFSYS_ASSERT(eecp->eec_rx != NULL);
1071 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
1072
1073 return (should_abort);
1074}
1075
1076static __checkReturn boolean_t
1077ef10_ev_tx(
1078 __in efx_evq_t *eep,
1079 __in efx_qword_t *eqp,
1080 __in const efx_ev_callbacks_t *eecp,
1081 __in_opt void *arg)
1082{
1083 efx_nic_t *enp = eep->ee_enp;
1084 uint32_t id;
1085 uint32_t label;
1086 boolean_t should_abort;
1087
1088 EFX_EV_QSTAT_INCR(eep, EV_TX);
1089
9f95a23c
TL
1090 /* Discard events after RXQ/TXQ errors, or hardware not available */
1091 if (enp->en_reset_flags &
1092 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
11fdf7f2
TL
1093 return (B_FALSE);
1094
1095 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
1096 /* Drop this event */
1097 return (B_FALSE);
1098 }
1099
1100 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
1101 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
1102 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
1103
1104 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
1105
1106 EFSYS_ASSERT(eecp->eec_tx != NULL);
1107 should_abort = eecp->eec_tx(arg, label, id);
1108
1109 return (should_abort);
1110}
1111
1112static __checkReturn boolean_t
1113ef10_ev_driver(
1114 __in efx_evq_t *eep,
1115 __in efx_qword_t *eqp,
1116 __in const efx_ev_callbacks_t *eecp,
1117 __in_opt void *arg)
1118{
1119 unsigned int code;
1120 boolean_t should_abort;
1121
1122 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
1123 should_abort = B_FALSE;
1124
1125 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
1126 switch (code) {
1127 case ESE_DZ_DRV_TIMER_EV: {
1128 uint32_t id;
1129
1130 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
1131
1132 EFSYS_ASSERT(eecp->eec_timer != NULL);
1133 should_abort = eecp->eec_timer(arg, id);
1134 break;
1135 }
1136
1137 case ESE_DZ_DRV_WAKE_UP_EV: {
1138 uint32_t id;
1139
1140 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
1141
1142 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1143 should_abort = eecp->eec_wake_up(arg, id);
1144 break;
1145 }
1146
1147 case ESE_DZ_DRV_START_UP_EV:
1148 EFSYS_ASSERT(eecp->eec_initialized != NULL);
1149 should_abort = eecp->eec_initialized(arg);
1150 break;
1151
1152 default:
1153 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1154 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1155 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1156 break;
1157 }
1158
1159 return (should_abort);
1160}
1161
1162static __checkReturn boolean_t
1163ef10_ev_drv_gen(
1164 __in efx_evq_t *eep,
1165 __in efx_qword_t *eqp,
1166 __in const efx_ev_callbacks_t *eecp,
1167 __in_opt void *arg)
1168{
1169 uint32_t data;
1170 boolean_t should_abort;
1171
1172 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1173 should_abort = B_FALSE;
1174
1175 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1176 if (data >= ((uint32_t)1 << 16)) {
1177 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1178 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1179 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1180
1181 return (B_TRUE);
1182 }
1183
1184 EFSYS_ASSERT(eecp->eec_software != NULL);
1185 should_abort = eecp->eec_software(arg, (uint16_t)data);
1186
1187 return (should_abort);
1188}
1189
1190static __checkReturn boolean_t
1191ef10_ev_mcdi(
1192 __in efx_evq_t *eep,
1193 __in efx_qword_t *eqp,
1194 __in const efx_ev_callbacks_t *eecp,
1195 __in_opt void *arg)
1196{
1197 efx_nic_t *enp = eep->ee_enp;
1198 unsigned int code;
1199 boolean_t should_abort = B_FALSE;
1200
1201 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1202
1203 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1204 switch (code) {
1205 case MCDI_EVENT_CODE_BADSSERT:
1206 efx_mcdi_ev_death(enp, EINTR);
1207 break;
1208
1209 case MCDI_EVENT_CODE_CMDDONE:
1210 efx_mcdi_ev_cpl(enp,
1211 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1212 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1213 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1214 break;
1215
1216#if EFSYS_OPT_MCDI_PROXY_AUTH
1217 case MCDI_EVENT_CODE_PROXY_RESPONSE:
1218 /*
1219 * This event notifies a function that an authorization request
1220 * has been processed. If the request was authorized then the
1221 * function can now re-send the original MCDI request.
1222 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1223 */
1224 efx_mcdi_ev_proxy_response(enp,
1225 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1226 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1227 break;
1228#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1229
f67539c2
TL
1230#if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
1231 case MCDI_EVENT_CODE_PROXY_REQUEST:
1232 efx_mcdi_ev_proxy_request(enp,
1233 MCDI_EV_FIELD(eqp, PROXY_REQUEST_BUFF_INDEX));
1234 break;
1235#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
1236
11fdf7f2
TL
1237 case MCDI_EVENT_CODE_LINKCHANGE: {
1238 efx_link_mode_t link_mode;
1239
1240 ef10_phy_link_ev(enp, eqp, &link_mode);
1241 should_abort = eecp->eec_link_change(arg, link_mode);
1242 break;
1243 }
1244
1245 case MCDI_EVENT_CODE_SENSOREVT: {
1246#if EFSYS_OPT_MON_STATS
1247 efx_mon_stat_t id;
1248 efx_mon_stat_value_t value;
1249 efx_rc_t rc;
1250
1251 /* Decode monitor stat for MCDI sensor (if supported) */
1252 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1253 /* Report monitor stat change */
1254 should_abort = eecp->eec_monitor(arg, id, value);
1255 } else if (rc == ENOTSUP) {
1256 should_abort = eecp->eec_exception(arg,
1257 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1258 MCDI_EV_FIELD(eqp, DATA));
1259 } else {
1260 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1261 }
1262#endif
1263 break;
1264 }
1265
1266 case MCDI_EVENT_CODE_SCHEDERR:
1267 /* Informational only */
1268 break;
1269
1270 case MCDI_EVENT_CODE_REBOOT:
1271 /* Falcon/Siena only (should not been seen with Huntington). */
1272 efx_mcdi_ev_death(enp, EIO);
1273 break;
1274
1275 case MCDI_EVENT_CODE_MC_REBOOT:
1276 /* MC_REBOOT event is used for Huntington (EF10) and later. */
1277 efx_mcdi_ev_death(enp, EIO);
1278 break;
1279
1280 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1281#if EFSYS_OPT_MAC_STATS
1282 if (eecp->eec_mac_stats != NULL) {
1283 eecp->eec_mac_stats(arg,
1284 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1285 }
1286#endif
1287 break;
1288
1289 case MCDI_EVENT_CODE_FWALERT: {
1290 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1291
1292 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1293 should_abort = eecp->eec_exception(arg,
1294 EFX_EXCEPTION_FWALERT_SRAM,
1295 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1296 else
1297 should_abort = eecp->eec_exception(arg,
1298 EFX_EXCEPTION_UNKNOWN_FWALERT,
1299 MCDI_EV_FIELD(eqp, DATA));
1300 break;
1301 }
1302
1303 case MCDI_EVENT_CODE_TX_ERR: {
1304 /*
1305 * After a TXQ error is detected, firmware sends a TX_ERR event.
1306 * This may be followed by TX completions (which we discard),
1307 * and then finally by a TX_FLUSH event. Firmware destroys the
1308 * TXQ automatically after sending the TX_FLUSH event.
1309 */
1310 enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1311
1312 EFSYS_PROBE2(tx_descq_err,
1313 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1314 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1315
1316 /* Inform the driver that a reset is required. */
1317 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1318 MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1319 break;
1320 }
1321
1322 case MCDI_EVENT_CODE_TX_FLUSH: {
1323 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1324
1325 /*
1326 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1327 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1328 * We want to wait for all completions, so ignore the events
1329 * with TX_FLUSH_TO_DRIVER.
1330 */
1331 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1332 should_abort = B_FALSE;
1333 break;
1334 }
1335
1336 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1337
1338 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1339
1340 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1341 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1342 break;
1343 }
1344
1345 case MCDI_EVENT_CODE_RX_ERR: {
1346 /*
1347 * After an RXQ error is detected, firmware sends an RX_ERR
1348 * event. This may be followed by RX events (which we discard),
1349 * and then finally by an RX_FLUSH event. Firmware destroys the
1350 * RXQ automatically after sending the RX_FLUSH event.
1351 */
1352 enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1353
1354 EFSYS_PROBE2(rx_descq_err,
1355 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1356 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1357
1358 /* Inform the driver that a reset is required. */
1359 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1360 MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1361 break;
1362 }
1363
1364 case MCDI_EVENT_CODE_RX_FLUSH: {
1365 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1366
1367 /*
1368 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1369 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1370 * We want to wait for all completions, so ignore the events
1371 * with RX_FLUSH_TO_DRIVER.
1372 */
1373 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1374 should_abort = B_FALSE;
1375 break;
1376 }
1377
1378 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1379
1380 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1381
1382 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1383 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1384 break;
1385 }
1386
1387 default:
1388 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1389 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1390 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1391 break;
1392 }
1393
1394 return (should_abort);
1395}
1396
1397 void
1398ef10_ev_rxlabel_init(
1399 __in efx_evq_t *eep,
1400 __in efx_rxq_t *erp,
1401 __in unsigned int label,
1402 __in efx_rxq_type_t type)
1403{
1404 efx_evq_rxq_state_t *eersp;
1405#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1406 boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
1407 boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
1408#endif
1409
1410 _NOTE(ARGUNUSED(type))
1411 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1412 eersp = &eep->ee_rxq_state[label];
1413
1414 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1415
1416#if EFSYS_OPT_RX_PACKED_STREAM
1417 /*
1418 * For packed stream modes, the very first event will
1419 * have a new buffer flag set, so it will be incremented,
1420 * yielding the correct pointer. That results in a simpler
1421 * code than trying to detect start-of-the-world condition
1422 * in the event handler.
1423 */
1424 eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
1425#else
1426 eersp->eers_rx_read_ptr = 0;
1427#endif
1428 eersp->eers_rx_mask = erp->er_mask;
1429#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1430 eersp->eers_rx_stream_npackets = 0;
1431 eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
1432#endif
1433#if EFSYS_OPT_RX_PACKED_STREAM
1434 if (packed_stream) {
1435 eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
1436 EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
1437 EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
1438 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
1439 /*
1440 * A single credit is allocated to the queue when it is started.
1441 * It is immediately spent by the first packet which has NEW
1442 * BUFFER flag set, though, but still we shall take into
1443 * account, as to not wrap around the maximum number of credits
1444 * accidentally
1445 */
1446 eersp->eers_rx_packed_stream_credits--;
1447 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
1448 EFX_RX_PACKED_STREAM_MAX_CREDITS);
1449 }
1450#endif
1451}
1452
1453 void
1454ef10_ev_rxlabel_fini(
1455 __in efx_evq_t *eep,
1456 __in unsigned int label)
1457{
1458 efx_evq_rxq_state_t *eersp;
1459
1460 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1461 eersp = &eep->ee_rxq_state[label];
1462
1463 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1464
1465 eersp->eers_rx_read_ptr = 0;
1466 eersp->eers_rx_mask = 0;
1467#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1468 eersp->eers_rx_stream_npackets = 0;
1469 eersp->eers_rx_packed_stream = B_FALSE;
1470#endif
1471#if EFSYS_OPT_RX_PACKED_STREAM
1472 eersp->eers_rx_packed_stream_credits = 0;
1473#endif
1474}
1475
9f95a23c 1476#endif /* EFX_OPTS_EF10() */