]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/sfc/base/ef10_ev.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / sfc / base / ef10_ev.c
1 /*
2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
29 */
30
31 #include "efx.h"
32 #include "efx_impl.h"
33 #if EFSYS_OPT_MON_STATS
34 #include "mcdi_mon.h"
35 #endif
36
37 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
38
39 #if EFSYS_OPT_QSTATS
40 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
41 do { \
42 (_eep)->ee_stat[_stat]++; \
43 _NOTE(CONSTANTCONDITION) \
44 } while (B_FALSE)
45 #else
46 #define EFX_EV_QSTAT_INCR(_eep, _stat)
47 #endif
48
49 /*
50 * Non-interrupting event queue requires interrrupting event queue to
51 * refer to for wake-up events even if wake ups are never used.
52 * It could be even non-allocated event queue.
53 */
54 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
55
56 static __checkReturn boolean_t
57 ef10_ev_rx(
58 __in efx_evq_t *eep,
59 __in efx_qword_t *eqp,
60 __in const efx_ev_callbacks_t *eecp,
61 __in_opt void *arg);
62
63 static __checkReturn boolean_t
64 ef10_ev_tx(
65 __in efx_evq_t *eep,
66 __in efx_qword_t *eqp,
67 __in const efx_ev_callbacks_t *eecp,
68 __in_opt void *arg);
69
70 static __checkReturn boolean_t
71 ef10_ev_driver(
72 __in efx_evq_t *eep,
73 __in efx_qword_t *eqp,
74 __in const efx_ev_callbacks_t *eecp,
75 __in_opt void *arg);
76
77 static __checkReturn boolean_t
78 ef10_ev_drv_gen(
79 __in efx_evq_t *eep,
80 __in efx_qword_t *eqp,
81 __in const efx_ev_callbacks_t *eecp,
82 __in_opt void *arg);
83
84 static __checkReturn boolean_t
85 ef10_ev_mcdi(
86 __in efx_evq_t *eep,
87 __in efx_qword_t *eqp,
88 __in const efx_ev_callbacks_t *eecp,
89 __in_opt void *arg);
90
91
92 static __checkReturn efx_rc_t
93 efx_mcdi_set_evq_tmr(
94 __in efx_nic_t *enp,
95 __in uint32_t instance,
96 __in uint32_t mode,
97 __in uint32_t timer_ns)
98 {
99 efx_mcdi_req_t req;
100 uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
101 MC_CMD_SET_EVQ_TMR_OUT_LEN)];
102 efx_rc_t rc;
103
104 (void) memset(payload, 0, sizeof (payload));
105 req.emr_cmd = MC_CMD_SET_EVQ_TMR;
106 req.emr_in_buf = payload;
107 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
108 req.emr_out_buf = payload;
109 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
110
111 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
112 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
113 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
114 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
115
116 efx_mcdi_execute(enp, &req);
117
118 if (req.emr_rc != 0) {
119 rc = req.emr_rc;
120 goto fail1;
121 }
122
123 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
124 rc = EMSGSIZE;
125 goto fail2;
126 }
127
128 return (0);
129
130 fail2:
131 EFSYS_PROBE(fail2);
132 fail1:
133 EFSYS_PROBE1(fail1, efx_rc_t, rc);
134
135 return (rc);
136 }
137
138 static __checkReturn efx_rc_t
139 efx_mcdi_init_evq(
140 __in efx_nic_t *enp,
141 __in unsigned int instance,
142 __in efsys_mem_t *esmp,
143 __in size_t nevs,
144 __in uint32_t irq,
145 __in uint32_t us,
146 __in uint32_t flags,
147 __in boolean_t low_latency)
148 {
149 efx_mcdi_req_t req;
150 uint8_t payload[
151 MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
152 MC_CMD_INIT_EVQ_OUT_LEN)];
153 efx_qword_t *dma_addr;
154 uint64_t addr;
155 int npages;
156 int i;
157 boolean_t interrupting;
158 int ev_cut_through;
159 efx_rc_t rc;
160
161 npages = EFX_EVQ_NBUFS(nevs);
162 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
163 rc = EINVAL;
164 goto fail1;
165 }
166
167 (void) memset(payload, 0, sizeof (payload));
168 req.emr_cmd = MC_CMD_INIT_EVQ;
169 req.emr_in_buf = payload;
170 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
171 req.emr_out_buf = payload;
172 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
173
174 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
175 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
176 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
177
178 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
179 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
180
181 /*
182 * On Huntington RX and TX event batching can only be requested together
183 * (even if the datapath firmware doesn't actually support RX
184 * batching). If event cut through is enabled no RX batching will occur.
185 *
186 * So always enable RX and TX event batching, and enable event cut
187 * through if we want low latency operation.
188 */
189 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
190 case EFX_EVQ_FLAGS_TYPE_AUTO:
191 ev_cut_through = low_latency ? 1 : 0;
192 break;
193 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
194 ev_cut_through = 0;
195 break;
196 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
197 ev_cut_through = 1;
198 break;
199 default:
200 rc = EINVAL;
201 goto fail2;
202 }
203 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
204 INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
205 INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
206 INIT_EVQ_IN_FLAG_INT_ARMD, 0,
207 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
208 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
209 INIT_EVQ_IN_FLAG_TX_MERGE, 1);
210
211 /* If the value is zero then disable the timer */
212 if (us == 0) {
213 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
214 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
215 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
216 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
217 } else {
218 unsigned int ticks;
219
220 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
221 goto fail3;
222
223 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
224 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
225 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
226 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
227 }
228
229 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
230 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
231 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
232
233 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
234 addr = EFSYS_MEM_ADDR(esmp);
235
236 for (i = 0; i < npages; i++) {
237 EFX_POPULATE_QWORD_2(*dma_addr,
238 EFX_DWORD_1, (uint32_t)(addr >> 32),
239 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
240
241 dma_addr++;
242 addr += EFX_BUF_SIZE;
243 }
244
245 efx_mcdi_execute(enp, &req);
246
247 if (req.emr_rc != 0) {
248 rc = req.emr_rc;
249 goto fail4;
250 }
251
252 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
253 rc = EMSGSIZE;
254 goto fail5;
255 }
256
257 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
258
259 return (0);
260
261 fail5:
262 EFSYS_PROBE(fail5);
263 fail4:
264 EFSYS_PROBE(fail4);
265 fail3:
266 EFSYS_PROBE(fail3);
267 fail2:
268 EFSYS_PROBE(fail2);
269 fail1:
270 EFSYS_PROBE1(fail1, efx_rc_t, rc);
271
272 return (rc);
273 }
274
275
276 static __checkReturn efx_rc_t
277 efx_mcdi_init_evq_v2(
278 __in efx_nic_t *enp,
279 __in unsigned int instance,
280 __in efsys_mem_t *esmp,
281 __in size_t nevs,
282 __in uint32_t irq,
283 __in uint32_t us,
284 __in uint32_t flags)
285 {
286 efx_mcdi_req_t req;
287 uint8_t payload[
288 MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
289 MC_CMD_INIT_EVQ_V2_OUT_LEN)];
290 boolean_t interrupting;
291 unsigned int evq_type;
292 efx_qword_t *dma_addr;
293 uint64_t addr;
294 int npages;
295 int i;
296 efx_rc_t rc;
297
298 npages = EFX_EVQ_NBUFS(nevs);
299 if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
300 rc = EINVAL;
301 goto fail1;
302 }
303
304 (void) memset(payload, 0, sizeof (payload));
305 req.emr_cmd = MC_CMD_INIT_EVQ;
306 req.emr_in_buf = payload;
307 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
308 req.emr_out_buf = payload;
309 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
310
311 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
312 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
313 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
314
315 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
316 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
317
318 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
319 case EFX_EVQ_FLAGS_TYPE_AUTO:
320 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
321 break;
322 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
323 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
324 break;
325 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
326 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
327 break;
328 default:
329 rc = EINVAL;
330 goto fail2;
331 }
332 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
333 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
334 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
335 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
336 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
337
338 /* If the value is zero then disable the timer */
339 if (us == 0) {
340 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
341 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
342 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
343 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
344 } else {
345 unsigned int ticks;
346
347 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
348 goto fail3;
349
350 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
351 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
352 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
353 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
354 }
355
356 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
357 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
358 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
359
360 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
361 addr = EFSYS_MEM_ADDR(esmp);
362
363 for (i = 0; i < npages; i++) {
364 EFX_POPULATE_QWORD_2(*dma_addr,
365 EFX_DWORD_1, (uint32_t)(addr >> 32),
366 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
367
368 dma_addr++;
369 addr += EFX_BUF_SIZE;
370 }
371
372 efx_mcdi_execute(enp, &req);
373
374 if (req.emr_rc != 0) {
375 rc = req.emr_rc;
376 goto fail4;
377 }
378
379 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
380 rc = EMSGSIZE;
381 goto fail5;
382 }
383
384 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
385
386 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
387 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
388
389 return (0);
390
391 fail5:
392 EFSYS_PROBE(fail5);
393 fail4:
394 EFSYS_PROBE(fail4);
395 fail3:
396 EFSYS_PROBE(fail3);
397 fail2:
398 EFSYS_PROBE(fail2);
399 fail1:
400 EFSYS_PROBE1(fail1, efx_rc_t, rc);
401
402 return (rc);
403 }
404
405 static __checkReturn efx_rc_t
406 efx_mcdi_fini_evq(
407 __in efx_nic_t *enp,
408 __in uint32_t instance)
409 {
410 efx_mcdi_req_t req;
411 uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
412 MC_CMD_FINI_EVQ_OUT_LEN)];
413 efx_rc_t rc;
414
415 (void) memset(payload, 0, sizeof (payload));
416 req.emr_cmd = MC_CMD_FINI_EVQ;
417 req.emr_in_buf = payload;
418 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
419 req.emr_out_buf = payload;
420 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
421
422 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
423
424 efx_mcdi_execute_quiet(enp, &req);
425
426 if (req.emr_rc != 0) {
427 rc = req.emr_rc;
428 goto fail1;
429 }
430
431 return (0);
432
433 fail1:
434 EFSYS_PROBE1(fail1, efx_rc_t, rc);
435
436 return (rc);
437 }
438
439
440
441 __checkReturn efx_rc_t
442 ef10_ev_init(
443 __in efx_nic_t *enp)
444 {
445 _NOTE(ARGUNUSED(enp))
446 return (0);
447 }
448
449 void
450 ef10_ev_fini(
451 __in efx_nic_t *enp)
452 {
453 _NOTE(ARGUNUSED(enp))
454 }
455
456 __checkReturn efx_rc_t
457 ef10_ev_qcreate(
458 __in efx_nic_t *enp,
459 __in unsigned int index,
460 __in efsys_mem_t *esmp,
461 __in size_t n,
462 __in uint32_t id,
463 __in uint32_t us,
464 __in uint32_t flags,
465 __in efx_evq_t *eep)
466 {
467 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
468 uint32_t irq;
469 efx_rc_t rc;
470
471 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
472 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
473 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
474
475 if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
476 rc = EINVAL;
477 goto fail1;
478 }
479
480 if (index >= encp->enc_evq_limit) {
481 rc = EINVAL;
482 goto fail2;
483 }
484
485 if (us > encp->enc_evq_timer_max_us) {
486 rc = EINVAL;
487 goto fail3;
488 }
489
490 /* Set up the handler table */
491 eep->ee_rx = ef10_ev_rx;
492 eep->ee_tx = ef10_ev_tx;
493 eep->ee_driver = ef10_ev_driver;
494 eep->ee_drv_gen = ef10_ev_drv_gen;
495 eep->ee_mcdi = ef10_ev_mcdi;
496
497 /* Set up the event queue */
498 /* INIT_EVQ expects function-relative vector number */
499 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
500 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
501 irq = index;
502 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
503 irq = index;
504 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
505 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
506 } else {
507 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
508 }
509
510 /*
511 * Interrupts may be raised for events immediately after the queue is
512 * created. See bug58606.
513 */
514
515 if (encp->enc_init_evq_v2_supported) {
516 /*
517 * On Medford the low latency license is required to enable RX
518 * and event cut through and to disable RX batching. If event
519 * queue type in flags is auto, we let the firmware decide the
520 * settings to use. If the adapter has a low latency license,
521 * it will choose the best settings for low latency, otherwise
522 * it will choose the best settings for throughput.
523 */
524 rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
525 if (rc != 0)
526 goto fail4;
527 } else {
528 /*
529 * On Huntington we need to specify the settings to use.
530 * If event queue type in flags is auto, we favour throughput
531 * if the adapter is running virtualization supporting firmware
532 * (i.e. the full featured firmware variant)
533 * and latency otherwise. The Ethernet Virtual Bridging
534 * capability is used to make this decision. (Note though that
535 * the low latency firmware variant is also best for
536 * throughput and corresponding type should be specified
537 * to choose it.)
538 */
539 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
540 rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
541 low_latency);
542 if (rc != 0)
543 goto fail5;
544 }
545
546 return (0);
547
548 fail5:
549 EFSYS_PROBE(fail5);
550 fail4:
551 EFSYS_PROBE(fail4);
552 fail3:
553 EFSYS_PROBE(fail3);
554 fail2:
555 EFSYS_PROBE(fail2);
556 fail1:
557 EFSYS_PROBE1(fail1, efx_rc_t, rc);
558
559 return (rc);
560 }
561
562 void
563 ef10_ev_qdestroy(
564 __in efx_evq_t *eep)
565 {
566 efx_nic_t *enp = eep->ee_enp;
567
568 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
569 enp->en_family == EFX_FAMILY_MEDFORD);
570
571 (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
572 }
573
574 __checkReturn efx_rc_t
575 ef10_ev_qprime(
576 __in efx_evq_t *eep,
577 __in unsigned int count)
578 {
579 efx_nic_t *enp = eep->ee_enp;
580 uint32_t rptr;
581 efx_dword_t dword;
582
583 rptr = count & eep->ee_mask;
584
585 if (enp->en_nic_cfg.enc_bug35388_workaround) {
586 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
587 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
588 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
589 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
590
591 EFX_POPULATE_DWORD_2(dword,
592 ERF_DD_EVQ_IND_RPTR_FLAGS,
593 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
594 ERF_DD_EVQ_IND_RPTR,
595 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
596 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
597 &dword, B_FALSE);
598
599 EFX_POPULATE_DWORD_2(dword,
600 ERF_DD_EVQ_IND_RPTR_FLAGS,
601 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
602 ERF_DD_EVQ_IND_RPTR,
603 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
604 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
605 &dword, B_FALSE);
606 } else {
607 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
608 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
609 &dword, B_FALSE);
610 }
611
612 return (0);
613 }
614
615 static __checkReturn efx_rc_t
616 efx_mcdi_driver_event(
617 __in efx_nic_t *enp,
618 __in uint32_t evq,
619 __in efx_qword_t data)
620 {
621 efx_mcdi_req_t req;
622 uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
623 MC_CMD_DRIVER_EVENT_OUT_LEN)];
624 efx_rc_t rc;
625
626 req.emr_cmd = MC_CMD_DRIVER_EVENT;
627 req.emr_in_buf = payload;
628 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
629 req.emr_out_buf = payload;
630 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
631
632 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
633
634 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
635 EFX_QWORD_FIELD(data, EFX_DWORD_0));
636 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
637 EFX_QWORD_FIELD(data, EFX_DWORD_1));
638
639 efx_mcdi_execute(enp, &req);
640
641 if (req.emr_rc != 0) {
642 rc = req.emr_rc;
643 goto fail1;
644 }
645
646 return (0);
647
648 fail1:
649 EFSYS_PROBE1(fail1, efx_rc_t, rc);
650
651 return (rc);
652 }
653
654 void
655 ef10_ev_qpost(
656 __in efx_evq_t *eep,
657 __in uint16_t data)
658 {
659 efx_nic_t *enp = eep->ee_enp;
660 efx_qword_t event;
661
662 EFX_POPULATE_QWORD_3(event,
663 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
664 ESF_DZ_DRV_SUB_CODE, 0,
665 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
666
667 (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
668 }
669
670 __checkReturn efx_rc_t
671 ef10_ev_qmoderate(
672 __in efx_evq_t *eep,
673 __in unsigned int us)
674 {
675 efx_nic_t *enp = eep->ee_enp;
676 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
677 efx_dword_t dword;
678 uint32_t mode;
679 efx_rc_t rc;
680
681 /* Check that hardware and MCDI use the same timer MODE values */
682 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
683 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
684 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
685 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
686 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
687 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
688 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
689 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
690
691 if (us > encp->enc_evq_timer_max_us) {
692 rc = EINVAL;
693 goto fail1;
694 }
695
696 /* If the value is zero then disable the timer */
697 if (us == 0) {
698 mode = FFE_CZ_TIMER_MODE_DIS;
699 } else {
700 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
701 }
702
703 if (encp->enc_bug61265_workaround) {
704 uint32_t ns = us * 1000;
705
706 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
707 if (rc != 0)
708 goto fail2;
709 } else {
710 unsigned int ticks;
711
712 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
713 goto fail3;
714
715 if (encp->enc_bug35388_workaround) {
716 EFX_POPULATE_DWORD_3(dword,
717 ERF_DD_EVQ_IND_TIMER_FLAGS,
718 EFE_DD_EVQ_IND_TIMER_FLAGS,
719 ERF_DD_EVQ_IND_TIMER_MODE, mode,
720 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
721 EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
722 eep->ee_index, &dword, 0);
723 } else {
724 EFX_POPULATE_DWORD_2(dword,
725 ERF_DZ_TC_TIMER_MODE, mode,
726 ERF_DZ_TC_TIMER_VAL, ticks);
727 EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
728 eep->ee_index, &dword, 0);
729 }
730 }
731
732 return (0);
733
734 fail3:
735 EFSYS_PROBE(fail3);
736 fail2:
737 EFSYS_PROBE(fail2);
738 fail1:
739 EFSYS_PROBE1(fail1, efx_rc_t, rc);
740
741 return (rc);
742 }
743
744
745 #if EFSYS_OPT_QSTATS
746 void
747 ef10_ev_qstats_update(
748 __in efx_evq_t *eep,
749 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
750 {
751 unsigned int id;
752
753 for (id = 0; id < EV_NQSTATS; id++) {
754 efsys_stat_t *essp = &stat[id];
755
756 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
757 eep->ee_stat[id] = 0;
758 }
759 }
760 #endif /* EFSYS_OPT_QSTATS */
761
762 #if EFSYS_OPT_RX_PACKED_STREAM
763
764 static __checkReturn boolean_t
765 ef10_ev_rx_packed_stream(
766 __in efx_evq_t *eep,
767 __in efx_qword_t *eqp,
768 __in const efx_ev_callbacks_t *eecp,
769 __in_opt void *arg)
770 {
771 uint32_t label;
772 uint32_t next_read_lbits;
773 uint16_t flags;
774 boolean_t should_abort;
775 efx_evq_rxq_state_t *eersp;
776 unsigned int pkt_count;
777 unsigned int current_id;
778 boolean_t new_buffer;
779
780 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
781 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
782 new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
783
784 flags = 0;
785
786 eersp = &eep->ee_rxq_state[label];
787 pkt_count = (EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS) + 1 +
788 next_read_lbits - eersp->eers_rx_stream_npackets) &
789 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
790 eersp->eers_rx_stream_npackets += pkt_count;
791
792 if (new_buffer) {
793 flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
794 if (eersp->eers_rx_packed_stream_credits <
795 EFX_RX_PACKED_STREAM_MAX_CREDITS)
796 eersp->eers_rx_packed_stream_credits++;
797 eersp->eers_rx_read_ptr++;
798 }
799 current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
800
801 /* Check for errors that invalidate checksum and L3/L4 fields */
802 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
803 /* RX frame truncated (error flag is misnamed) */
804 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
805 flags |= EFX_DISCARD;
806 goto deliver;
807 }
808 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
809 /* Bad Ethernet frame CRC */
810 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
811 flags |= EFX_DISCARD;
812 goto deliver;
813 }
814
815 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
816 flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
817 goto deliver;
818 }
819
820 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
821 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
822
823 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
824 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
825
826 deliver:
827 /* If we're not discarding the packet then it is ok */
828 if (~flags & EFX_DISCARD)
829 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
830
831 EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
832 should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
833 flags);
834
835 return (should_abort);
836 }
837
838 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
839
840 static __checkReturn boolean_t
841 ef10_ev_rx(
842 __in efx_evq_t *eep,
843 __in efx_qword_t *eqp,
844 __in const efx_ev_callbacks_t *eecp,
845 __in_opt void *arg)
846 {
847 efx_nic_t *enp = eep->ee_enp;
848 uint32_t size;
849 uint32_t label;
850 uint32_t mac_class;
851 uint32_t eth_tag_class;
852 uint32_t l3_class;
853 uint32_t l4_class;
854 uint32_t next_read_lbits;
855 uint16_t flags;
856 boolean_t cont;
857 boolean_t should_abort;
858 efx_evq_rxq_state_t *eersp;
859 unsigned int desc_count;
860 unsigned int last_used_id;
861
862 EFX_EV_QSTAT_INCR(eep, EV_RX);
863
864 /* Discard events after RXQ/TXQ errors */
865 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
866 return (B_FALSE);
867
868 /* Basic packet information */
869 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
870 eersp = &eep->ee_rxq_state[label];
871
872 #if EFSYS_OPT_RX_PACKED_STREAM
873 /*
874 * Packed stream events are very different,
875 * so handle them separately
876 */
877 if (eersp->eers_rx_packed_stream)
878 return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
879 #endif
880
881 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
882 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
883 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
884 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
885 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
886 l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
887 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
888
889 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
890 /* Drop this event */
891 return (B_FALSE);
892 }
893 flags = 0;
894
895 if (cont != 0) {
896 /*
897 * This may be part of a scattered frame, or it may be a
898 * truncated frame if scatter is disabled on this RXQ.
899 * Overlength frames can be received if e.g. a VF is configured
900 * for 1500 MTU but connected to a port set to 9000 MTU
901 * (see bug56567).
902 * FIXME: There is not yet any driver that supports scatter on
903 * Huntington. Scatter support is required for OSX.
904 */
905 flags |= EFX_PKT_CONT;
906 }
907
908 if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
909 flags |= EFX_PKT_UNICAST;
910
911 /* Increment the count of descriptors read */
912 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
913 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
914 eersp->eers_rx_read_ptr += desc_count;
915
916 /*
917 * FIXME: add error checking to make sure this a batched event.
918 * This could also be an aborted scatter, see Bug36629.
919 */
920 if (desc_count > 1) {
921 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
922 flags |= EFX_PKT_PREFIX_LEN;
923 }
924
925 /* Calculate the index of the last descriptor consumed */
926 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
927
928 /* Check for errors that invalidate checksum and L3/L4 fields */
929 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
930 /* RX frame truncated (error flag is misnamed) */
931 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
932 flags |= EFX_DISCARD;
933 goto deliver;
934 }
935 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
936 /* Bad Ethernet frame CRC */
937 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
938 flags |= EFX_DISCARD;
939 goto deliver;
940 }
941 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
942 /*
943 * Hardware parse failed, due to malformed headers
944 * or headers that are too long for the parser.
945 * Headers and checksums must be validated by the host.
946 */
947 /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
948 goto deliver;
949 }
950
951 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
952 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
953 flags |= EFX_PKT_VLAN_TAGGED;
954 }
955
956 switch (l3_class) {
957 case ESE_DZ_L3_CLASS_IP4:
958 case ESE_DZ_L3_CLASS_IP4_FRAG:
959 flags |= EFX_PKT_IPV4;
960 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
961 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
962 } else {
963 flags |= EFX_CKSUM_IPV4;
964 }
965
966 if (l4_class == ESE_DZ_L4_CLASS_TCP) {
967 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
968 flags |= EFX_PKT_TCP;
969 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
970 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
971 flags |= EFX_PKT_UDP;
972 } else {
973 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
974 }
975 break;
976
977 case ESE_DZ_L3_CLASS_IP6:
978 case ESE_DZ_L3_CLASS_IP6_FRAG:
979 flags |= EFX_PKT_IPV6;
980
981 if (l4_class == ESE_DZ_L4_CLASS_TCP) {
982 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
983 flags |= EFX_PKT_TCP;
984 } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
985 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
986 flags |= EFX_PKT_UDP;
987 } else {
988 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
989 }
990 break;
991
992 default:
993 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
994 break;
995 }
996
997 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
998 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
999 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
1000 } else {
1001 flags |= EFX_CKSUM_TCPUDP;
1002 }
1003 }
1004
1005 deliver:
1006 /* If we're not discarding the packet then it is ok */
1007 if (~flags & EFX_DISCARD)
1008 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
1009
1010 EFSYS_ASSERT(eecp->eec_rx != NULL);
1011 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
1012
1013 return (should_abort);
1014 }
1015
1016 static __checkReturn boolean_t
1017 ef10_ev_tx(
1018 __in efx_evq_t *eep,
1019 __in efx_qword_t *eqp,
1020 __in const efx_ev_callbacks_t *eecp,
1021 __in_opt void *arg)
1022 {
1023 efx_nic_t *enp = eep->ee_enp;
1024 uint32_t id;
1025 uint32_t label;
1026 boolean_t should_abort;
1027
1028 EFX_EV_QSTAT_INCR(eep, EV_TX);
1029
1030 /* Discard events after RXQ/TXQ errors */
1031 if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
1032 return (B_FALSE);
1033
1034 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
1035 /* Drop this event */
1036 return (B_FALSE);
1037 }
1038
1039 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
1040 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
1041 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
1042
1043 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
1044
1045 EFSYS_ASSERT(eecp->eec_tx != NULL);
1046 should_abort = eecp->eec_tx(arg, label, id);
1047
1048 return (should_abort);
1049 }
1050
1051 static __checkReturn boolean_t
1052 ef10_ev_driver(
1053 __in efx_evq_t *eep,
1054 __in efx_qword_t *eqp,
1055 __in const efx_ev_callbacks_t *eecp,
1056 __in_opt void *arg)
1057 {
1058 unsigned int code;
1059 boolean_t should_abort;
1060
1061 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
1062 should_abort = B_FALSE;
1063
1064 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
1065 switch (code) {
1066 case ESE_DZ_DRV_TIMER_EV: {
1067 uint32_t id;
1068
1069 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
1070
1071 EFSYS_ASSERT(eecp->eec_timer != NULL);
1072 should_abort = eecp->eec_timer(arg, id);
1073 break;
1074 }
1075
1076 case ESE_DZ_DRV_WAKE_UP_EV: {
1077 uint32_t id;
1078
1079 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
1080
1081 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1082 should_abort = eecp->eec_wake_up(arg, id);
1083 break;
1084 }
1085
1086 case ESE_DZ_DRV_START_UP_EV:
1087 EFSYS_ASSERT(eecp->eec_initialized != NULL);
1088 should_abort = eecp->eec_initialized(arg);
1089 break;
1090
1091 default:
1092 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1093 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1094 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1095 break;
1096 }
1097
1098 return (should_abort);
1099 }
1100
1101 static __checkReturn boolean_t
1102 ef10_ev_drv_gen(
1103 __in efx_evq_t *eep,
1104 __in efx_qword_t *eqp,
1105 __in const efx_ev_callbacks_t *eecp,
1106 __in_opt void *arg)
1107 {
1108 uint32_t data;
1109 boolean_t should_abort;
1110
1111 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1112 should_abort = B_FALSE;
1113
1114 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1115 if (data >= ((uint32_t)1 << 16)) {
1116 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1117 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1118 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1119
1120 return (B_TRUE);
1121 }
1122
1123 EFSYS_ASSERT(eecp->eec_software != NULL);
1124 should_abort = eecp->eec_software(arg, (uint16_t)data);
1125
1126 return (should_abort);
1127 }
1128
1129 static __checkReturn boolean_t
1130 ef10_ev_mcdi(
1131 __in efx_evq_t *eep,
1132 __in efx_qword_t *eqp,
1133 __in const efx_ev_callbacks_t *eecp,
1134 __in_opt void *arg)
1135 {
1136 efx_nic_t *enp = eep->ee_enp;
1137 unsigned int code;
1138 boolean_t should_abort = B_FALSE;
1139
1140 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1141
1142 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1143 switch (code) {
1144 case MCDI_EVENT_CODE_BADSSERT:
1145 efx_mcdi_ev_death(enp, EINTR);
1146 break;
1147
1148 case MCDI_EVENT_CODE_CMDDONE:
1149 efx_mcdi_ev_cpl(enp,
1150 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1151 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1152 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1153 break;
1154
1155 #if EFSYS_OPT_MCDI_PROXY_AUTH
1156 case MCDI_EVENT_CODE_PROXY_RESPONSE:
1157 /*
1158 * This event notifies a function that an authorization request
1159 * has been processed. If the request was authorized then the
1160 * function can now re-send the original MCDI request.
1161 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1162 */
1163 efx_mcdi_ev_proxy_response(enp,
1164 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1165 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1166 break;
1167 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1168
1169 case MCDI_EVENT_CODE_LINKCHANGE: {
1170 efx_link_mode_t link_mode;
1171
1172 ef10_phy_link_ev(enp, eqp, &link_mode);
1173 should_abort = eecp->eec_link_change(arg, link_mode);
1174 break;
1175 }
1176
1177 case MCDI_EVENT_CODE_SENSOREVT: {
1178 #if EFSYS_OPT_MON_STATS
1179 efx_mon_stat_t id;
1180 efx_mon_stat_value_t value;
1181 efx_rc_t rc;
1182
1183 /* Decode monitor stat for MCDI sensor (if supported) */
1184 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1185 /* Report monitor stat change */
1186 should_abort = eecp->eec_monitor(arg, id, value);
1187 } else if (rc == ENOTSUP) {
1188 should_abort = eecp->eec_exception(arg,
1189 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1190 MCDI_EV_FIELD(eqp, DATA));
1191 } else {
1192 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1193 }
1194 #endif
1195 break;
1196 }
1197
1198 case MCDI_EVENT_CODE_SCHEDERR:
1199 /* Informational only */
1200 break;
1201
1202 case MCDI_EVENT_CODE_REBOOT:
1203 /* Falcon/Siena only (should not been seen with Huntington). */
1204 efx_mcdi_ev_death(enp, EIO);
1205 break;
1206
1207 case MCDI_EVENT_CODE_MC_REBOOT:
1208 /* MC_REBOOT event is used for Huntington (EF10) and later. */
1209 efx_mcdi_ev_death(enp, EIO);
1210 break;
1211
1212 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1213 #if EFSYS_OPT_MAC_STATS
1214 if (eecp->eec_mac_stats != NULL) {
1215 eecp->eec_mac_stats(arg,
1216 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1217 }
1218 #endif
1219 break;
1220
1221 case MCDI_EVENT_CODE_FWALERT: {
1222 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1223
1224 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1225 should_abort = eecp->eec_exception(arg,
1226 EFX_EXCEPTION_FWALERT_SRAM,
1227 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1228 else
1229 should_abort = eecp->eec_exception(arg,
1230 EFX_EXCEPTION_UNKNOWN_FWALERT,
1231 MCDI_EV_FIELD(eqp, DATA));
1232 break;
1233 }
1234
1235 case MCDI_EVENT_CODE_TX_ERR: {
1236 /*
1237 * After a TXQ error is detected, firmware sends a TX_ERR event.
1238 * This may be followed by TX completions (which we discard),
1239 * and then finally by a TX_FLUSH event. Firmware destroys the
1240 * TXQ automatically after sending the TX_FLUSH event.
1241 */
1242 enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1243
1244 EFSYS_PROBE2(tx_descq_err,
1245 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1246 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1247
1248 /* Inform the driver that a reset is required. */
1249 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1250 MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1251 break;
1252 }
1253
1254 case MCDI_EVENT_CODE_TX_FLUSH: {
1255 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1256
1257 /*
1258 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1259 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1260 * We want to wait for all completions, so ignore the events
1261 * with TX_FLUSH_TO_DRIVER.
1262 */
1263 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1264 should_abort = B_FALSE;
1265 break;
1266 }
1267
1268 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1269
1270 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1271
1272 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1273 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1274 break;
1275 }
1276
1277 case MCDI_EVENT_CODE_RX_ERR: {
1278 /*
1279 * After an RXQ error is detected, firmware sends an RX_ERR
1280 * event. This may be followed by RX events (which we discard),
1281 * and then finally by an RX_FLUSH event. Firmware destroys the
1282 * RXQ automatically after sending the RX_FLUSH event.
1283 */
1284 enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1285
1286 EFSYS_PROBE2(rx_descq_err,
1287 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1288 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1289
1290 /* Inform the driver that a reset is required. */
1291 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1292 MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1293 break;
1294 }
1295
1296 case MCDI_EVENT_CODE_RX_FLUSH: {
1297 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1298
1299 /*
1300 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1301 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1302 * We want to wait for all completions, so ignore the events
1303 * with RX_FLUSH_TO_DRIVER.
1304 */
1305 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1306 should_abort = B_FALSE;
1307 break;
1308 }
1309
1310 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1311
1312 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1313
1314 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1315 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1316 break;
1317 }
1318
1319 default:
1320 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1321 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1322 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1323 break;
1324 }
1325
1326 return (should_abort);
1327 }
1328
1329 void
1330 ef10_ev_rxlabel_init(
1331 __in efx_evq_t *eep,
1332 __in efx_rxq_t *erp,
1333 __in unsigned int label,
1334 __in boolean_t packed_stream)
1335 {
1336 efx_evq_rxq_state_t *eersp;
1337
1338 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1339 eersp = &eep->ee_rxq_state[label];
1340
1341 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1342
1343 #if EFSYS_OPT_RX_PACKED_STREAM
1344 /*
1345 * For packed stream modes, the very first event will
1346 * have a new buffer flag set, so it will be incremented,
1347 * yielding the correct pointer. That results in a simpler
1348 * code than trying to detect start-of-the-world condition
1349 * in the event handler.
1350 */
1351 eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
1352 #else
1353 eersp->eers_rx_read_ptr = 0;
1354 #endif
1355 eersp->eers_rx_mask = erp->er_mask;
1356 #if EFSYS_OPT_RX_PACKED_STREAM
1357 eersp->eers_rx_stream_npackets = 0;
1358 eersp->eers_rx_packed_stream = packed_stream;
1359 if (packed_stream) {
1360 eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
1361 (EFX_RX_PACKED_STREAM_MEM_PER_CREDIT /
1362 EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
1363 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
1364 /*
1365 * A single credit is allocated to the queue when it is started.
1366 * It is immediately spent by the first packet which has NEW
1367 * BUFFER flag set, though, but still we shall take into
1368 * account, as to not wrap around the maximum number of credits
1369 * accidentally
1370 */
1371 eersp->eers_rx_packed_stream_credits--;
1372 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
1373 EFX_RX_PACKED_STREAM_MAX_CREDITS);
1374 }
1375 #else
1376 EFSYS_ASSERT(!packed_stream);
1377 #endif
1378 }
1379
1380 void
1381 ef10_ev_rxlabel_fini(
1382 __in efx_evq_t *eep,
1383 __in unsigned int label)
1384 {
1385 efx_evq_rxq_state_t *eersp;
1386
1387 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1388 eersp = &eep->ee_rxq_state[label];
1389
1390 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1391
1392 eersp->eers_rx_read_ptr = 0;
1393 eersp->eers_rx_mask = 0;
1394 #if EFSYS_OPT_RX_PACKED_STREAM
1395 eersp->eers_rx_stream_npackets = 0;
1396 eersp->eers_rx_packed_stream = B_FALSE;
1397 eersp->eers_rx_packed_stream_credits = 0;
1398 #endif
1399 }
1400
1401 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */