]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/bnx2x/ecore_sp.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / bnx2x / ecore_sp.h
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2007-2013 Broadcom Corporation.
7c673cae
FG
3 *
4 * Eric Davis <edavis@broadcom.com>
5 * David Christensen <davidch@broadcom.com>
6 * Gary Zambrano <zambrano@broadcom.com>
7 *
8 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
11fdf7f2 9 * Copyright (c) 2015-2018 Cavium Inc.
7c673cae 10 * All rights reserved.
11fdf7f2 11 * www.cavium.com
7c673cae
FG
12 */
13
14#ifndef ECORE_SP_H
15#define ECORE_SP_H
16
17#include <rte_byteorder.h>
18
19#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
20#ifndef __LITTLE_ENDIAN
21#define __LITTLE_ENDIAN RTE_LITTLE_ENDIAN
22#endif
23#undef __BIG_ENDIAN
24#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
25#ifndef __BIG_ENDIAN
26#define __BIG_ENDIAN RTE_BIG_ENDIAN
27#endif
28#undef __LITTLE_ENDIAN
29#endif
30
31#include "ecore_mfw_req.h"
32#include "ecore_fw_defs.h"
33#include "ecore_hsi.h"
34#include "ecore_reg.h"
35
36struct bnx2x_softc;
11fdf7f2 37typedef rte_iova_t ecore_dma_addr_t; /* expected to be 64 bit wide */
7c673cae
FG
38typedef volatile int ecore_atomic_t;
39
40
41#define ETH_ALEN ETHER_ADDR_LEN /* 6 */
42
43#define ECORE_SWCID_SHIFT 17
44#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1)
45
46#define ECORE_MC_HASH_SIZE 8
47#define ECORE_MC_HASH_OFFSET(sc, i) \
48 (BAR_TSTRORM_INTMEM + \
49 TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4)
50
51#define ECORE_MAX_MULTICAST 64
52#define ECORE_MAX_EMUL_MULTI 1
53
54#define IRO sc->iro_array
55
56typedef rte_spinlock_t ECORE_MUTEX;
57#define ECORE_MUTEX_INIT(_mutex) rte_spinlock_init(_mutex)
58#define ECORE_MUTEX_LOCK(_mutex) rte_spinlock_lock(_mutex)
59#define ECORE_MUTEX_UNLOCK(_mutex) rte_spinlock_unlock(_mutex)
60
61typedef rte_spinlock_t ECORE_MUTEX_SPIN;
62#define ECORE_SPIN_LOCK_INIT(_spin, _sc) rte_spinlock_init(_spin)
63#define ECORE_SPIN_LOCK_BH(_spin) rte_spinlock_lock(_spin) /* bh = bottom-half */
64#define ECORE_SPIN_UNLOCK_BH(_spin) rte_spinlock_unlock(_spin) /* bh = bottom-half */
65
66#define ECORE_SMP_MB_AFTER_CLEAR_BIT() mb()
67#define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb()
68#define ECORE_SMP_MB() mb()
69#define ECORE_SMP_RMB() rmb()
70#define ECORE_SMP_WMB() wmb()
71#define ECORE_MMIOWB() wmb()
72
73#define ECORE_SET_BIT_NA(bit, var) (*var |= (1 << bit))
74#define ECORE_CLEAR_BIT_NA(bit, var) (*var &= ~(1 << bit))
75
76#define ECORE_TEST_BIT(bit, var) bnx2x_test_bit(bit, var)
77#define ECORE_SET_BIT(bit, var) bnx2x_set_bit(bit, var)
78#define ECORE_CLEAR_BIT(bit, var) bnx2x_clear_bit(bit, var)
79#define ECORE_TEST_AND_CLEAR_BIT(bit, var) bnx2x_test_and_clear_bit(bit, var)
80
81#define atomic_load_acq_int (int)*
82#define atomic_store_rel_int(a, v) (*a = v)
83#define atomic_cmpset_acq_int(a, o, n) ((*a = (o & (n)) | (n)) ^ o)
84
85#define atomic_load_acq_long (long)*
86#define atomic_store_rel_long(a, v) (*a = v)
87#define atomic_set_acq_long(a, v) (*a |= v)
88#define atomic_clear_acq_long(a, v) (*a &= ~v)
89#define atomic_cmpset_acq_long(a, o, n) ((*a = (o & (n)) | (n)) ^ o)
90#define atomic_subtract_acq_long(a, v) (*a -= v)
91#define atomic_add_acq_long(a, v) (*a += v)
92
93#define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a)
94#define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v)
95#define ECORE_ATOMIC_CMPXCHG(a, o, n) bnx2x_cmpxchg((volatile int *)a, o, n)
96
97#define ECORE_RET_PENDING(pending_bit, pending) \
98 (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS)
99
100#define ECORE_SET_FLAG(value, mask, flag) \
101 do { \
102 (value) &= ~(mask); \
103 (value) |= ((flag) << (mask##_SHIFT)); \
104 } while (0)
105
106#define ECORE_GET_FLAG(value, mask) \
107 (((value) &= (mask)) >> (mask##_SHIFT))
108
109#define ECORE_MIGHT_SLEEP()
110
111#define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id)
112
113#define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s)
11fdf7f2 114#define ECORE_MEMCPY(_a, _b, _s) rte_memcpy(_a, _b, _s)
7c673cae
FG
115#define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s)
116
117#define ECORE_CPU_TO_LE16(x) htole16(x)
118#define ECORE_CPU_TO_LE32(x) htole32(x)
119
120#define ECORE_WAIT(_s, _t) DELAY(1000)
121#define ECORE_MSLEEP(_t) DELAY((_t) * 1000)
122
123#define ECORE_LIKELY(x) likely(x)
124#define ECORE_UNLIKELY(x) unlikely(x)
125
126#define ECORE_ZALLOC(_size, _flags, _sc) \
127 rte_zmalloc("", _size, RTE_CACHE_LINE_SIZE)
128
129#define ECORE_CALLOC(_len, _size, _flags, _sc) \
130 rte_calloc("", _len, _size, RTE_CACHE_LINE_SIZE)
131
132#define ECORE_FREE(_s, _buf, _size) \
133 rte_free(_buf)
134
135#define SC_ILT(sc) ((sc)->ilt)
136#define ILOG2(x) bnx2x_ilog2(x)
137
138#define ECORE_ILT_ZALLOC(x, y, size, str) \
139 do { \
140 x = rte_malloc("", sizeof(struct bnx2x_dma), RTE_CACHE_LINE_SIZE); \
141 if (x) { \
142 if (bnx2x_dma_alloc((struct bnx2x_softc *)sc, \
143 size, (struct bnx2x_dma *)x, \
144 str, RTE_CACHE_LINE_SIZE) != 0) { \
145 rte_free(x); \
146 x = NULL; \
147 *y = 0; \
148 } else { \
149 *y = ((struct bnx2x_dma *)x)->paddr; \
150 } \
151 } \
152 } while (0)
153
9f95a23c
TL
154#define ECORE_ILT_FREE(x, y, size) \
155 do { \
156 if (x) { \
157 bnx2x_dma_free((struct bnx2x_dma *)x); \
158 rte_free(x); \
159 x = NULL; \
160 y = 0; \
161 } \
162 } while (0)
7c673cae
FG
163
164#define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE
165
166#define ECORE_IS_MF_SD_MODE IS_MF_SD_MODE
167#define ECORE_IS_MF_SI_MODE IS_MF_SI_MODE
168#define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE
169
170#define ECORE_SET_CTX_VALIDATION bnx2x_set_ctx_validation
171
172#define ECORE_UPDATE_COALESCE_SB_INDEX bnx2x_update_coalesce_sb_index
173
174#define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a))
175
176#define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN
177
178#define ECORE_PATH_ID SC_PATH
179#define ECORE_PORT_ID SC_PORT
180#define ECORE_FUNC_ID SC_FUNC
181#define ECORE_ABS_FUNC_ID SC_ABS_FUNC
182
183#define CRCPOLY_LE 0xedb88320
184uint32_t ecore_calc_crc32(uint32_t crc, uint8_t const *p,
185 uint32_t len, uint32_t magic);
186
187uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc);
188
189
190static inline uint32_t
191ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len)
192{
193 return ecore_calc_crc32(seed, mac, len, CRCPOLY_LE);
194}
195
196#define ecore_sp_post(_sc, _a, _b, _c, _d) \
197 bnx2x_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d)
198
199#define ECORE_DBG_BREAK_IF(exp) \
200 do { \
201 if (unlikely(exp)) { \
202 rte_panic("ECORE"); \
203 } \
204 } while (0)
205
206#define ECORE_BUG() \
207 do { \
208 rte_panic("BUG (%s:%d)", __FILE__, __LINE__); \
209 } while(0);
210
211#define ECORE_BUG_ON(exp) \
212 do { \
213 if (likely(exp)) { \
214 rte_panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \
215 } \
216 } while (0)
217
218
9f95a23c
TL
219#define ECORE_MSG(sc, m, ...) \
220 PMD_DRV_LOG(DEBUG, sc, m, ##__VA_ARGS__)
7c673cae
FG
221
222typedef struct _ecore_list_entry_t
223{
224 struct _ecore_list_entry_t *next, *prev;
225} ecore_list_entry_t;
226
227typedef struct ecore_list_t
228{
229 ecore_list_entry_t *head, *tail;
230 unsigned long cnt;
231} ecore_list_t;
232
233/* initialize the list */
234#define ECORE_LIST_INIT(_list) \
235 do { \
236 (_list)->head = NULL; \
237 (_list)->tail = NULL; \
238 (_list)->cnt = 0; \
239 } while (0)
240
241/* return TRUE if the element is the last on the list */
242#define ECORE_LIST_IS_LAST(_elem, _list) \
243 (_elem == (_list)->tail)
244
245/* return TRUE if the list is empty */
246#define ECORE_LIST_IS_EMPTY(_list) \
247 ((_list)->cnt == 0)
248
249/* return the first element */
250#define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \
251 (cast *)((_list)->head)
252
253/* return the next element */
254#define ECORE_LIST_NEXT(_elem, _link, cast) \
255 (cast *)((&((_elem)->_link))->next)
256
257/* push an element on the head of the list */
258#define ECORE_LIST_PUSH_HEAD(_elem, _list) \
259 do { \
260 (_elem)->prev = (ecore_list_entry_t *)0; \
261 (_elem)->next = (_list)->head; \
262 if ((_list)->tail == (ecore_list_entry_t *)0) { \
263 (_list)->tail = (_elem); \
264 } else { \
265 (_list)->head->prev = (_elem); \
266 } \
267 (_list)->head = (_elem); \
268 (_list)->cnt++; \
269 } while (0)
270
271/* push an element on the tail of the list */
272#define ECORE_LIST_PUSH_TAIL(_elem, _list) \
273 do { \
274 (_elem)->next = (ecore_list_entry_t *)0; \
275 (_elem)->prev = (_list)->tail; \
276 if ((_list)->tail) { \
277 (_list)->tail->next = (_elem); \
278 } else { \
279 (_list)->head = (_elem); \
280 } \
281 (_list)->tail = (_elem); \
282 (_list)->cnt++; \
283 } while (0)
284
285/* push list1 on the head of list2 and return with list1 as empty */
286#define ECORE_LIST_SPLICE_INIT(_list1, _list2) \
287 do { \
288 (_list1)->tail->next = (_list2)->head; \
289 if ((_list2)->head) { \
290 (_list2)->head->prev = (_list1)->tail; \
291 } else { \
292 (_list2)->tail = (_list1)->tail; \
293 } \
294 (_list2)->head = (_list1)->head; \
295 (_list2)->cnt += (_list1)->cnt; \
296 (_list1)->head = NULL; \
297 (_list1)->tail = NULL; \
298 (_list1)->cnt = 0; \
299 } while (0)
300
301/* remove an element from the list */
302#define ECORE_LIST_REMOVE_ENTRY(_elem, _list) \
303 do { \
304 if ((_list)->head == (_elem)) { \
305 if ((_list)->head) { \
306 (_list)->head = (_list)->head->next; \
307 if ((_list)->head) { \
308 (_list)->head->prev = (ecore_list_entry_t *)0; \
309 } else { \
310 (_list)->tail = (ecore_list_entry_t *)0; \
311 } \
312 (_list)->cnt--; \
313 } \
314 } else if ((_list)->tail == (_elem)) { \
315 if ((_list)->tail) { \
316 (_list)->tail = (_list)->tail->prev; \
317 if ((_list)->tail) { \
318 (_list)->tail->next = (ecore_list_entry_t *)0; \
319 } else { \
320 (_list)->head = (ecore_list_entry_t *)0; \
321 } \
322 (_list)->cnt--; \
323 } \
324 } else { \
325 (_elem)->prev->next = (_elem)->next; \
326 (_elem)->next->prev = (_elem)->prev; \
327 (_list)->cnt--; \
328 } \
329 } while (0)
330
331/* walk the list */
332#define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \
333 for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \
334 pos; \
335 pos = ECORE_LIST_NEXT(pos, _link, cast))
336
337/* walk the list (safely) */
338#define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \
339 for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint), \
340 n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL; \
341 pos != NULL; \
342 pos = (cast *)n, \
343 n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL)
344
345
346/* Manipulate a bit vector defined as an array of uint64_t */
347
348/* Number of bits in one sge_mask array element */
349#define BIT_VEC64_ELEM_SZ 64
350#define BIT_VEC64_ELEM_SHIFT 6
351#define BIT_VEC64_ELEM_MASK ((uint64_t)BIT_VEC64_ELEM_SZ - 1)
352
353#define __BIT_VEC64_SET_BIT(el, bit) \
354 do { \
355 el = ((el) | ((uint64_t)0x1 << (bit))); \
356 } while (0)
357
358#define __BIT_VEC64_CLEAR_BIT(el, bit) \
359 do { \
360 el = ((el) & (~((uint64_t)0x1 << (bit)))); \
361 } while (0)
362
363#define BIT_VEC64_SET_BIT(vec64, idx) \
364 __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
365 (idx) & BIT_VEC64_ELEM_MASK)
366
367#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
368 __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
369 (idx) & BIT_VEC64_ELEM_MASK)
370
371#define BIT_VEC64_TEST_BIT(vec64, idx) \
372 (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
373 ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
374
375/*
376 * Creates a bitmask of all ones in less significant bits.
377 * idx - index of the most significant bit in the created mask
378 */
379#define BIT_VEC64_ONES_MASK(idx) \
380 (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
381#define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0))
382
383/* fill in a MAC address the way the FW likes it */
384static inline void
385ecore_set_fw_mac_addr(uint16_t *fw_hi,
386 uint16_t *fw_mid,
387 uint16_t *fw_lo,
388 uint8_t *mac)
389{
390 ((uint8_t *)fw_hi)[0] = mac[1];
391 ((uint8_t *)fw_hi)[1] = mac[0];
392 ((uint8_t *)fw_mid)[0] = mac[3];
393 ((uint8_t *)fw_mid)[1] = mac[2];
394 ((uint8_t *)fw_lo)[0] = mac[5];
395 ((uint8_t *)fw_lo)[1] = mac[4];
396}
397
398
399enum ecore_status_t {
400 ECORE_EXISTS = -6,
401 ECORE_IO = -5,
402 ECORE_TIMEOUT = -4,
403 ECORE_INVAL = -3,
404 ECORE_BUSY = -2,
405 ECORE_NOMEM = -1,
406 ECORE_SUCCESS = 0,
407 /* PENDING is not an error and should be positive */
408 ECORE_PENDING = 1,
409};
410
411enum {
412 SWITCH_UPDATE,
413 AFEX_UPDATE,
414};
415
416
417
418
419struct bnx2x_softc;
420struct eth_context;
421
422/* Bits representing general command's configuration */
423enum {
424 RAMROD_TX,
425 RAMROD_RX,
426 /* Wait until all pending commands complete */
427 RAMROD_COMP_WAIT,
428 /* Don't send a ramrod, only update a registry */
429 RAMROD_DRV_CLR_ONLY,
430 /* Configure HW according to the current object state */
431 RAMROD_RESTORE,
432 /* Execute the next command now */
433 RAMROD_EXEC,
434 /* Don't add a new command and continue execution of posponed
435 * commands. If not set a new command will be added to the
436 * pending commands list.
437 */
438 RAMROD_CONT,
439 /* If there is another pending ramrod, wait until it finishes and
440 * re-try to submit this one. This flag can be set only in sleepable
441 * context, and should not be set from the context that completes the
442 * ramrods as deadlock will occur.
443 */
444 RAMROD_RETRY,
445};
446
447typedef enum {
448 ECORE_OBJ_TYPE_RX,
449 ECORE_OBJ_TYPE_TX,
450 ECORE_OBJ_TYPE_RX_TX,
451} ecore_obj_type;
452
453/* Public slow path states */
454enum {
455 ECORE_FILTER_MAC_PENDING,
456 ECORE_FILTER_VLAN_PENDING,
457 ECORE_FILTER_VLAN_MAC_PENDING,
458 ECORE_FILTER_RX_MODE_PENDING,
459 ECORE_FILTER_RX_MODE_SCHED,
460 ECORE_FILTER_ISCSI_ETH_START_SCHED,
461 ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
462 ECORE_FILTER_FCOE_ETH_START_SCHED,
463 ECORE_FILTER_FCOE_ETH_STOP_SCHED,
464 ECORE_FILTER_MCAST_PENDING,
465 ECORE_FILTER_MCAST_SCHED,
466 ECORE_FILTER_RSS_CONF_PENDING,
467 ECORE_AFEX_FCOE_Q_UPDATE_PENDING,
468 ECORE_AFEX_PENDING_VIFSET_MCP_ACK
469};
470
471struct ecore_raw_obj {
472 uint8_t func_id;
473
474 /* Queue params */
475 uint8_t cl_id;
476 uint32_t cid;
477
478 /* Ramrod data buffer params */
479 void *rdata;
480 ecore_dma_addr_t rdata_mapping;
481
482 /* Ramrod state params */
483 int state; /* "ramrod is pending" state bit */
484 unsigned long *pstate; /* pointer to state buffer */
485
486 ecore_obj_type obj_type;
487
488 int (*wait_comp)(struct bnx2x_softc *sc,
489 struct ecore_raw_obj *o);
490
491 int (*check_pending)(struct ecore_raw_obj *o);
492 void (*clear_pending)(struct ecore_raw_obj *o);
493 void (*set_pending)(struct ecore_raw_obj *o);
494};
495
496/************************* VLAN-MAC commands related parameters ***************/
497struct ecore_mac_ramrod_data {
498 uint8_t mac[ETH_ALEN];
499 uint8_t is_inner_mac;
500};
501
502struct ecore_vlan_ramrod_data {
503 uint16_t vlan;
504};
505
506struct ecore_vlan_mac_ramrod_data {
507 uint8_t mac[ETH_ALEN];
508 uint8_t is_inner_mac;
509 uint16_t vlan;
510};
511
512union ecore_classification_ramrod_data {
513 struct ecore_mac_ramrod_data mac;
514 struct ecore_vlan_ramrod_data vlan;
515 struct ecore_vlan_mac_ramrod_data vlan_mac;
516};
517
518/* VLAN_MAC commands */
519enum ecore_vlan_mac_cmd {
520 ECORE_VLAN_MAC_ADD,
521 ECORE_VLAN_MAC_DEL,
522 ECORE_VLAN_MAC_MOVE,
523};
524
525struct ecore_vlan_mac_data {
526 /* Requested command: ECORE_VLAN_MAC_XX */
527 enum ecore_vlan_mac_cmd cmd;
528 /* used to contain the data related vlan_mac_flags bits from
529 * ramrod parameters.
530 */
531 unsigned long vlan_mac_flags;
532
533 /* Needed for MOVE command */
534 struct ecore_vlan_mac_obj *target_obj;
535
536 union ecore_classification_ramrod_data u;
537};
538
539/*************************** Exe Queue obj ************************************/
540union ecore_exe_queue_cmd_data {
541 struct ecore_vlan_mac_data vlan_mac;
542
543 struct {
544 } mcast;
545};
546
547struct ecore_exeq_elem {
548 ecore_list_entry_t link;
549
550 /* Length of this element in the exe_chunk. */
551 int cmd_len;
552
553 union ecore_exe_queue_cmd_data cmd_data;
554};
555
556union ecore_qable_obj;
557
558union ecore_exeq_comp_elem {
559 union event_ring_elem *elem;
560};
561
562struct ecore_exe_queue_obj;
563
564typedef int (*exe_q_validate)(struct bnx2x_softc *sc,
565 union ecore_qable_obj *o,
566 struct ecore_exeq_elem *elem);
567
568typedef int (*exe_q_remove)(struct bnx2x_softc *sc,
569 union ecore_qable_obj *o,
570 struct ecore_exeq_elem *elem);
571
572/* Return positive if entry was optimized, 0 - if not, negative
573 * in case of an error.
574 */
575typedef int (*exe_q_optimize)(struct bnx2x_softc *sc,
576 union ecore_qable_obj *o,
577 struct ecore_exeq_elem *elem);
578typedef int (*exe_q_execute)(struct bnx2x_softc *sc,
579 union ecore_qable_obj *o,
580 ecore_list_t *exe_chunk,
581 unsigned long *ramrod_flags);
582typedef struct ecore_exeq_elem *
583 (*exe_q_get)(struct ecore_exe_queue_obj *o,
584 struct ecore_exeq_elem *elem);
585
586struct ecore_exe_queue_obj {
587 /* Commands pending for an execution. */
588 ecore_list_t exe_queue;
589
590 /* Commands pending for an completion. */
591 ecore_list_t pending_comp;
592
593 ECORE_MUTEX_SPIN lock;
594
595 /* Maximum length of commands' list for one execution */
596 int exe_chunk_len;
597
598 union ecore_qable_obj *owner;
599
600 /****** Virtual functions ******/
601 /**
602 * Called before commands execution for commands that are really
603 * going to be executed (after 'optimize').
604 *
605 * Must run under exe_queue->lock
606 */
607 exe_q_validate validate;
608
609 /**
610 * Called before removing pending commands, cleaning allocated
611 * resources (e.g., credits from validate)
612 */
613 exe_q_remove remove;
614
615 /**
616 * This will try to cancel the current pending commands list
617 * considering the new command.
618 *
619 * Returns the number of optimized commands or a negative error code
620 *
621 * Must run under exe_queue->lock
622 */
623 exe_q_optimize optimize;
624
625 /**
626 * Run the next commands chunk (owner specific).
627 */
628 exe_q_execute execute;
629
630 /**
631 * Return the exe_queue element containing the specific command
632 * if any. Otherwise return NULL.
633 */
634 exe_q_get get;
635};
636/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
637/*
638 * Element in the VLAN_MAC registry list having all current configured
639 * rules.
640 */
641struct ecore_vlan_mac_registry_elem {
642 ecore_list_entry_t link;
643
644 /* Used to store the cam offset used for the mac/vlan/vlan-mac.
645 * Relevant for 57711 only. VLANs and MACs share the
646 * same CAM for these chips.
647 */
648 int cam_offset;
649
650 /* Needed for DEL and RESTORE flows */
651 unsigned long vlan_mac_flags;
652
653 union ecore_classification_ramrod_data u;
654};
655
656/* Bits representing VLAN_MAC commands specific flags */
657enum {
658 ECORE_UC_LIST_MAC,
659 ECORE_ETH_MAC,
660 ECORE_ISCSI_ETH_MAC,
661 ECORE_NETQ_ETH_MAC,
662 ECORE_DONT_CONSUME_CAM_CREDIT,
663 ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
664};
665
666struct ecore_vlan_mac_ramrod_params {
667 /* Object to run the command from */
668 struct ecore_vlan_mac_obj *vlan_mac_obj;
669
670 /* General command flags: COMP_WAIT, etc. */
671 unsigned long ramrod_flags;
672
673 /* Command specific configuration request */
674 struct ecore_vlan_mac_data user_req;
675};
676
677struct ecore_vlan_mac_obj {
678 struct ecore_raw_obj raw;
679
680 /* Bookkeeping list: will prevent the addition of already existing
681 * entries.
682 */
683 ecore_list_t head;
684 /* Implement a simple reader/writer lock on the head list.
685 * all these fields should only be accessed under the exe_queue lock
686 */
687 uint8_t head_reader; /* Num. of readers accessing head list */
688 int head_exe_request; /* Pending execution request. */
689 unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
690
691 /* Execution queue interface instance */
692 struct ecore_exe_queue_obj exe_queue;
693
694 /* MACs credit pool */
695 struct ecore_credit_pool_obj *macs_pool;
696
697 /* VLANs credit pool */
698 struct ecore_credit_pool_obj *vlans_pool;
699
700 /* RAMROD command to be used */
701 int ramrod_cmd;
702
703 /* copy first n elements onto preallocated buffer
704 *
705 * @param n number of elements to get
706 * @param buf buffer preallocated by caller into which elements
707 * will be copied. Note elements are 4-byte aligned
708 * so buffer size must be able to accommodate the
709 * aligned elements.
710 *
711 * @return number of copied bytes
712 */
713
714 int (*get_n_elements)(struct bnx2x_softc *sc,
715 struct ecore_vlan_mac_obj *o, int n, uint8_t *base,
716 uint8_t stride, uint8_t size);
717
718 /**
719 * Checks if ADD-ramrod with the given params may be performed.
720 *
721 * @return zero if the element may be added
722 */
723
724 int (*check_add)(struct bnx2x_softc *sc,
725 struct ecore_vlan_mac_obj *o,
726 union ecore_classification_ramrod_data *data);
727
728 /**
729 * Checks if DEL-ramrod with the given params may be performed.
730 *
731 * @return TRUE if the element may be deleted
732 */
733 struct ecore_vlan_mac_registry_elem *
734 (*check_del)(struct bnx2x_softc *sc,
735 struct ecore_vlan_mac_obj *o,
736 union ecore_classification_ramrod_data *data);
737
738 /**
739 * Checks if DEL-ramrod with the given params may be performed.
740 *
741 * @return TRUE if the element may be deleted
742 */
743 int (*check_move)(struct bnx2x_softc *sc,
744 struct ecore_vlan_mac_obj *src_o,
745 struct ecore_vlan_mac_obj *dst_o,
746 union ecore_classification_ramrod_data *data);
747
748 /**
749 * Update the relevant credit object(s) (consume/return
750 * correspondingly).
751 */
752 int (*get_credit)(struct ecore_vlan_mac_obj *o);
753 int (*put_credit)(struct ecore_vlan_mac_obj *o);
754 int (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset);
755 int (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset);
756
757 /**
758 * Configures one rule in the ramrod data buffer.
759 */
760 void (*set_one_rule)(struct bnx2x_softc *sc,
761 struct ecore_vlan_mac_obj *o,
762 struct ecore_exeq_elem *elem, int rule_idx,
763 int cam_offset);
764
765 /**
766 * Delete all configured elements having the given
767 * vlan_mac_flags specification. Assumes no pending for
768 * execution commands. Will schedule all all currently
769 * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
770 * specification for deletion and will use the given
771 * ramrod_flags for the last DEL operation.
772 *
773 * @param sc
774 * @param o
775 * @param ramrod_flags RAMROD_XX flags
776 *
777 * @return 0 if the last operation has completed successfully
778 * and there are no more elements left, positive value
779 * if there are pending for completion commands,
780 * negative value in case of failure.
781 */
782 int (*delete_all)(struct bnx2x_softc *sc,
783 struct ecore_vlan_mac_obj *o,
784 unsigned long *vlan_mac_flags,
785 unsigned long *ramrod_flags);
786
787 /**
788 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
789 * configured elements list.
790 *
791 * @param sc
792 * @param p Command parameters (RAMROD_COMP_WAIT bit in
793 * ramrod_flags is only taken into an account)
794 * @param ppos a pointer to the cookie that should be given back in the
795 * next call to make function handle the next element. If
796 * *ppos is set to NULL it will restart the iterator.
797 * If returned *ppos == NULL this means that the last
798 * element has been handled.
799 *
800 * @return int
801 */
802 int (*restore)(struct bnx2x_softc *sc,
803 struct ecore_vlan_mac_ramrod_params *p,
804 struct ecore_vlan_mac_registry_elem **ppos);
805
806 /**
807 * Should be called on a completion arrival.
808 *
809 * @param sc
810 * @param o
811 * @param cqe Completion element we are handling
812 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
813 * pending commands will be executed.
814 * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
815 * may also be set if needed.
816 *
817 * @return 0 if there are neither pending nor waiting for
818 * completion commands. Positive value if there are
819 * pending for execution or for completion commands.
820 * Negative value in case of an error (including an
821 * error in the cqe).
822 */
823 int (*complete)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o,
824 union event_ring_elem *cqe,
825 unsigned long *ramrod_flags);
826
827 /**
828 * Wait for completion of all commands. Don't schedule new ones,
829 * just wait. It assumes that the completion code will schedule
830 * for new commands.
831 */
832 int (*wait)(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *o);
833};
834
835enum {
836 ECORE_LLH_CAM_ISCSI_ETH_LINE = 0,
837 ECORE_LLH_CAM_ETH_LINE,
838 ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
839};
840
841/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
842
843/* RX_MODE ramrod special flags: set in rx_mode_flags field in
844 * a ecore_rx_mode_ramrod_params.
845 */
846enum {
847 ECORE_RX_MODE_FCOE_ETH,
848 ECORE_RX_MODE_ISCSI_ETH,
849};
850
851enum {
852 ECORE_ACCEPT_UNICAST,
853 ECORE_ACCEPT_MULTICAST,
854 ECORE_ACCEPT_ALL_UNICAST,
855 ECORE_ACCEPT_ALL_MULTICAST,
856 ECORE_ACCEPT_BROADCAST,
857 ECORE_ACCEPT_UNMATCHED,
858 ECORE_ACCEPT_ANY_VLAN
859};
860
861struct ecore_rx_mode_ramrod_params {
862 struct ecore_rx_mode_obj *rx_mode_obj;
863 unsigned long *pstate;
864 int state;
865 uint8_t cl_id;
866 uint32_t cid;
867 uint8_t func_id;
868 unsigned long ramrod_flags;
869 unsigned long rx_mode_flags;
870
871 /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
872 * a tstorm_eth_mac_filter_config (e1x).
873 */
874 void *rdata;
875 ecore_dma_addr_t rdata_mapping;
876
877 /* Rx mode settings */
878 unsigned long rx_accept_flags;
879
880 /* internal switching settings */
881 unsigned long tx_accept_flags;
882};
883
884struct ecore_rx_mode_obj {
885 int (*config_rx_mode)(struct bnx2x_softc *sc,
886 struct ecore_rx_mode_ramrod_params *p);
887
888 int (*wait_comp)(struct bnx2x_softc *sc,
889 struct ecore_rx_mode_ramrod_params *p);
890};
891
892/********************** Set multicast group ***********************************/
893
894struct ecore_mcast_list_elem {
895 ecore_list_entry_t link;
896 uint8_t *mac;
897};
898
899union ecore_mcast_config_data {
900 uint8_t *mac;
901 uint8_t bin; /* used in a RESTORE flow */
902};
903
904struct ecore_mcast_ramrod_params {
905 struct ecore_mcast_obj *mcast_obj;
906
907 /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
908 unsigned long ramrod_flags;
909
910 ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */
911 int mcast_list_len;
912};
913
914enum ecore_mcast_cmd {
915 ECORE_MCAST_CMD_ADD,
916 ECORE_MCAST_CMD_CONT,
917 ECORE_MCAST_CMD_DEL,
918 ECORE_MCAST_CMD_RESTORE,
919};
920
921struct ecore_mcast_obj {
922 struct ecore_raw_obj raw;
923
924 union {
925 struct {
926 #define ECORE_MCAST_BINS_NUM 256
927 #define ECORE_MCAST_VEC_SZ (ECORE_MCAST_BINS_NUM / 64)
928 uint64_t vec[ECORE_MCAST_VEC_SZ];
929
930 /** Number of BINs to clear. Should be updated
931 * immediately when a command arrives in order to
932 * properly create DEL commands.
933 */
934 int num_bins_set;
935 } aprox_match;
936
937 struct {
938 ecore_list_t macs;
939 int num_macs_set;
940 } exact_match;
941 } registry;
942
943 /* Pending commands */
944 ecore_list_t pending_cmds_head;
945
946 /* A state that is set in raw.pstate, when there are pending commands */
947 int sched_state;
948
949 /* Maximal number of mcast MACs configured in one command */
950 int max_cmd_len;
951
952 /* Total number of currently pending MACs to configure: both
953 * in the pending commands list and in the current command.
954 */
955 int total_pending_num;
956
957 uint8_t engine_id;
958
959 /**
960 * @param cmd command to execute (ECORE_MCAST_CMD_X, see above)
961 */
962 int (*config_mcast)(struct bnx2x_softc *sc,
963 struct ecore_mcast_ramrod_params *p,
964 enum ecore_mcast_cmd cmd);
965
966 /**
967 * Fills the ramrod data during the RESTORE flow.
968 *
969 * @param sc
970 * @param o
971 * @param start_idx Registry index to start from
972 * @param rdata_idx Index in the ramrod data to start from
973 *
974 * @return -1 if we handled the whole registry or index of the last
975 * handled registry element.
976 */
977 int (*hdl_restore)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o,
978 int start_bin, int *rdata_idx);
979
980 int (*enqueue_cmd)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o,
981 struct ecore_mcast_ramrod_params *p,
982 enum ecore_mcast_cmd cmd);
983
984 void (*set_one_rule)(struct bnx2x_softc *sc,
985 struct ecore_mcast_obj *o, int idx,
986 union ecore_mcast_config_data *cfg_data,
987 enum ecore_mcast_cmd cmd);
988
989 /** Checks if there are more mcast MACs to be set or a previous
990 * command is still pending.
991 */
992 int (*check_pending)(struct ecore_mcast_obj *o);
993
994 /**
995 * Set/Clear/Check SCHEDULED state of the object
996 */
997 void (*set_sched)(struct ecore_mcast_obj *o);
998 void (*clear_sched)(struct ecore_mcast_obj *o);
999 int (*check_sched)(struct ecore_mcast_obj *o);
1000
1001 /* Wait until all pending commands complete */
1002 int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_mcast_obj *o);
1003
1004 /**
1005 * Handle the internal object counters needed for proper
1006 * commands handling. Checks that the provided parameters are
1007 * feasible.
1008 */
1009 int (*validate)(struct bnx2x_softc *sc,
1010 struct ecore_mcast_ramrod_params *p,
1011 enum ecore_mcast_cmd cmd);
1012
1013 /**
1014 * Restore the values of internal counters in case of a failure.
1015 */
1016 void (*revert)(struct bnx2x_softc *sc,
1017 struct ecore_mcast_ramrod_params *p,
1018 int old_num_bins);
1019
1020 int (*get_registry_size)(struct ecore_mcast_obj *o);
1021 void (*set_registry_size)(struct ecore_mcast_obj *o, int n);
1022};
1023
1024/*************************** Credit handling **********************************/
1025struct ecore_credit_pool_obj {
1026
1027 /* Current amount of credit in the pool */
1028 ecore_atomic_t credit;
1029
1030 /* Maximum allowed credit. put() will check against it. */
1031 int pool_sz;
1032
1033 /* Allocate a pool table statically.
1034 *
1035 * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
1036 *
1037 * The set bit in the table will mean that the entry is available.
1038 */
1039#define ECORE_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
1040 uint64_t pool_mirror[ECORE_POOL_VEC_SIZE];
1041
1042 /* Base pool offset (initialized differently */
1043 int base_pool_offset;
1044
1045 /**
1046 * Get the next free pool entry.
1047 *
1048 * @return TRUE if there was a free entry in the pool
1049 */
1050 int (*get_entry)(struct ecore_credit_pool_obj *o, int *entry);
1051
1052 /**
1053 * Return the entry back to the pool.
1054 *
1055 * @return TRUE if entry is legal and has been successfully
1056 * returned to the pool.
1057 */
1058 int (*put_entry)(struct ecore_credit_pool_obj *o, int entry);
1059
1060 /**
1061 * Get the requested amount of credit from the pool.
1062 *
1063 * @param cnt Amount of requested credit
1064 * @return TRUE if the operation is successful
1065 */
1066 int (*get)(struct ecore_credit_pool_obj *o, int cnt);
1067
1068 /**
1069 * Returns the credit to the pool.
1070 *
1071 * @param cnt Amount of credit to return
1072 * @return TRUE if the operation is successful
1073 */
1074 int (*put)(struct ecore_credit_pool_obj *o, int cnt);
1075
1076 /**
1077 * Reads the current amount of credit.
1078 */
1079 int (*check)(struct ecore_credit_pool_obj *o);
1080};
1081
1082/*************************** RSS configuration ********************************/
1083enum {
1084 /* RSS_MODE bits are mutually exclusive */
1085 ECORE_RSS_MODE_DISABLED,
1086 ECORE_RSS_MODE_REGULAR,
1087
1088 ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
1089
1090 ECORE_RSS_IPV4,
1091 ECORE_RSS_IPV4_TCP,
1092 ECORE_RSS_IPV4_UDP,
1093 ECORE_RSS_IPV6,
1094 ECORE_RSS_IPV6_TCP,
1095 ECORE_RSS_IPV6_UDP,
1096
1097 ECORE_RSS_TUNNELING,
1098};
1099
1100struct ecore_config_rss_params {
1101 struct ecore_rss_config_obj *rss_obj;
1102
1103 /* may have RAMROD_COMP_WAIT set only */
1104 unsigned long ramrod_flags;
1105
1106 /* ECORE_RSS_X bits */
1107 unsigned long rss_flags;
1108
1109 /* Number hash bits to take into an account */
1110 uint8_t rss_result_mask;
1111
1112 /* Indirection table */
1113 uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
1114
1115 /* RSS hash values */
1116 uint32_t rss_key[10];
1117
11fdf7f2 1118 /* valid only if ECORE_RSS_UPDATE_TOE is set */
7c673cae
FG
1119 uint16_t toe_rss_bitmap;
1120
11fdf7f2 1121 /* valid if ECORE_RSS_TUNNELING is set */
7c673cae
FG
1122 uint16_t tunnel_value;
1123 uint16_t tunnel_mask;
1124};
1125
1126struct ecore_rss_config_obj {
1127 struct ecore_raw_obj raw;
1128
1129 /* RSS engine to use */
1130 uint8_t engine_id;
1131
1132 /* Last configured indirection table */
1133 uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
1134
1135 /* flags for enabling 4-tupple hash on UDP */
1136 uint8_t udp_rss_v4;
1137 uint8_t udp_rss_v6;
1138
1139 int (*config_rss)(struct bnx2x_softc *sc,
1140 struct ecore_config_rss_params *p);
1141};
1142
1143/*********************** Queue state update ***********************************/
1144
1145/* UPDATE command options */
1146enum {
1147 ECORE_Q_UPDATE_IN_VLAN_REM,
1148 ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
1149 ECORE_Q_UPDATE_OUT_VLAN_REM,
1150 ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
1151 ECORE_Q_UPDATE_ANTI_SPOOF,
1152 ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
1153 ECORE_Q_UPDATE_ACTIVATE,
1154 ECORE_Q_UPDATE_ACTIVATE_CHNG,
1155 ECORE_Q_UPDATE_DEF_VLAN_EN,
1156 ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
1157 ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
1158 ECORE_Q_UPDATE_SILENT_VLAN_REM,
1159 ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
1160 ECORE_Q_UPDATE_TX_SWITCHING,
1161};
1162
1163/* Allowed Queue states */
1164enum ecore_q_state {
1165 ECORE_Q_STATE_RESET,
1166 ECORE_Q_STATE_INITIALIZED,
1167 ECORE_Q_STATE_ACTIVE,
1168 ECORE_Q_STATE_MULTI_COS,
1169 ECORE_Q_STATE_MCOS_TERMINATED,
1170 ECORE_Q_STATE_INACTIVE,
1171 ECORE_Q_STATE_STOPPED,
1172 ECORE_Q_STATE_TERMINATED,
1173 ECORE_Q_STATE_FLRED,
1174 ECORE_Q_STATE_MAX,
1175};
1176
1177/* Allowed Queue states */
1178enum ecore_q_logical_state {
1179 ECORE_Q_LOGICAL_STATE_ACTIVE,
1180 ECORE_Q_LOGICAL_STATE_STOPPED,
1181};
1182
1183/* Allowed commands */
1184enum ecore_queue_cmd {
1185 ECORE_Q_CMD_INIT,
1186 ECORE_Q_CMD_SETUP,
1187 ECORE_Q_CMD_SETUP_TX_ONLY,
1188 ECORE_Q_CMD_DEACTIVATE,
1189 ECORE_Q_CMD_ACTIVATE,
1190 ECORE_Q_CMD_UPDATE,
1191 ECORE_Q_CMD_UPDATE_TPA,
1192 ECORE_Q_CMD_HALT,
1193 ECORE_Q_CMD_CFC_DEL,
1194 ECORE_Q_CMD_TERMINATE,
1195 ECORE_Q_CMD_EMPTY,
1196 ECORE_Q_CMD_MAX,
1197};
1198
1199/* queue SETUP + INIT flags */
1200enum {
1201 ECORE_Q_FLG_TPA,
1202 ECORE_Q_FLG_TPA_IPV6,
1203 ECORE_Q_FLG_TPA_GRO,
1204 ECORE_Q_FLG_STATS,
1205 ECORE_Q_FLG_ZERO_STATS,
1206 ECORE_Q_FLG_ACTIVE,
1207 ECORE_Q_FLG_OV,
1208 ECORE_Q_FLG_VLAN,
1209 ECORE_Q_FLG_COS,
1210 ECORE_Q_FLG_HC,
1211 ECORE_Q_FLG_HC_EN,
1212 ECORE_Q_FLG_DHC,
1213 ECORE_Q_FLG_OOO,
1214 ECORE_Q_FLG_FCOE,
1215 ECORE_Q_FLG_LEADING_RSS,
1216 ECORE_Q_FLG_MCAST,
1217 ECORE_Q_FLG_DEF_VLAN,
1218 ECORE_Q_FLG_TX_SWITCH,
1219 ECORE_Q_FLG_TX_SEC,
1220 ECORE_Q_FLG_ANTI_SPOOF,
1221 ECORE_Q_FLG_SILENT_VLAN_REM,
1222 ECORE_Q_FLG_FORCE_DEFAULT_PRI,
1223 ECORE_Q_FLG_REFUSE_OUTBAND_VLAN,
1224 ECORE_Q_FLG_PCSUM_ON_PKT,
1225 ECORE_Q_FLG_TUN_INC_INNER_IP_ID
1226};
1227
1228/* Queue type options: queue type may be a combination of below. */
1229enum ecore_q_type {
1230 ECORE_Q_TYPE_FWD,
1231 ECORE_Q_TYPE_HAS_RX,
1232 ECORE_Q_TYPE_HAS_TX,
1233};
1234
1235#define ECORE_PRIMARY_CID_INDEX 0
1236#define ECORE_MULTI_TX_COS_E1X 3 /* QM only */
1237#define ECORE_MULTI_TX_COS_E2_E3A0 2
1238#define ECORE_MULTI_TX_COS_E3B0 3
1239#define ECORE_MULTI_TX_COS 3 /* Maximum possible */
1240#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
1241
1242struct ecore_queue_init_params {
1243 struct {
1244 unsigned long flags;
1245 uint16_t hc_rate;
1246 uint8_t fw_sb_id;
1247 uint8_t sb_cq_index;
1248 } tx;
1249
1250 struct {
1251 unsigned long flags;
1252 uint16_t hc_rate;
1253 uint8_t fw_sb_id;
1254 uint8_t sb_cq_index;
1255 } rx;
1256
1257 /* CID context in the host memory */
1258 struct eth_context *cxts[ECORE_MULTI_TX_COS];
1259
1260 /* maximum number of cos supported by hardware */
1261 uint8_t max_cos;
1262};
1263
1264struct ecore_queue_terminate_params {
1265 /* index within the tx_only cids of this queue object */
1266 uint8_t cid_index;
1267};
1268
1269struct ecore_queue_cfc_del_params {
1270 /* index within the tx_only cids of this queue object */
1271 uint8_t cid_index;
1272};
1273
1274struct ecore_queue_update_params {
1275 unsigned long update_flags; /* ECORE_Q_UPDATE_XX bits */
1276 uint16_t def_vlan;
1277 uint16_t silent_removal_value;
1278 uint16_t silent_removal_mask;
1279/* index within the tx_only cids of this queue object */
1280 uint8_t cid_index;
1281};
1282
1283struct rxq_pause_params {
1284 uint16_t bd_th_lo;
1285 uint16_t bd_th_hi;
1286 uint16_t rcq_th_lo;
1287 uint16_t rcq_th_hi;
11fdf7f2
TL
1288 uint16_t sge_th_lo; /* valid if ECORE_Q_FLG_TPA */
1289 uint16_t sge_th_hi; /* valid if ECORE_Q_FLG_TPA */
7c673cae
FG
1290 uint16_t pri_map;
1291};
1292
1293/* general */
1294struct ecore_general_setup_params {
11fdf7f2 1295 /* valid if ECORE_Q_FLG_STATS */
7c673cae
FG
1296 uint8_t stat_id;
1297
1298 uint8_t spcl_id;
1299 uint16_t mtu;
1300 uint8_t cos;
1301};
1302
1303struct ecore_rxq_setup_params {
1304 /* dma */
1305 ecore_dma_addr_t dscr_map;
1306 ecore_dma_addr_t rcq_map;
1307 ecore_dma_addr_t rcq_np_map;
1308
1309 uint16_t drop_flags;
1310 uint16_t buf_sz;
1311 uint8_t fw_sb_id;
1312 uint8_t cl_qzone_id;
1313
11fdf7f2 1314 /* valid if ECORE_Q_FLG_TPA */
7c673cae
FG
1315 uint16_t tpa_agg_sz;
1316 uint8_t max_tpa_queues;
1317 uint8_t rss_engine_id;
1318
11fdf7f2 1319 /* valid if ECORE_Q_FLG_MCAST */
7c673cae
FG
1320 uint8_t mcast_engine_id;
1321
1322 uint8_t cache_line_log;
1323
1324 uint8_t sb_cq_index;
1325
11fdf7f2 1326 /* valid if BXN2X_Q_FLG_SILENT_VLAN_REM */
7c673cae
FG
1327 uint16_t silent_removal_value;
1328 uint16_t silent_removal_mask;
1329};
1330
1331struct ecore_txq_setup_params {
1332 /* dma */
1333 ecore_dma_addr_t dscr_map;
1334
1335 uint8_t fw_sb_id;
1336 uint8_t sb_cq_index;
11fdf7f2 1337 uint8_t cos; /* valid if ECORE_Q_FLG_COS */
7c673cae
FG
1338 uint16_t traffic_type;
1339 /* equals to the leading rss client id, used for TX classification*/
1340 uint8_t tss_leading_cl_id;
1341
11fdf7f2 1342 /* valid if ECORE_Q_FLG_DEF_VLAN */
7c673cae
FG
1343 uint16_t default_vlan;
1344};
1345
1346struct ecore_queue_setup_params {
1347 struct ecore_general_setup_params gen_params;
1348 struct ecore_txq_setup_params txq_params;
1349 struct ecore_rxq_setup_params rxq_params;
1350 struct rxq_pause_params pause_params;
1351 unsigned long flags;
1352};
1353
1354struct ecore_queue_setup_tx_only_params {
1355 struct ecore_general_setup_params gen_params;
1356 struct ecore_txq_setup_params txq_params;
1357 unsigned long flags;
1358 /* index within the tx_only cids of this queue object */
1359 uint8_t cid_index;
1360};
1361
1362struct ecore_queue_state_params {
1363 struct ecore_queue_sp_obj *q_obj;
1364
1365 /* Current command */
1366 enum ecore_queue_cmd cmd;
1367
1368 /* may have RAMROD_COMP_WAIT set only */
1369 unsigned long ramrod_flags;
1370
1371 /* Params according to the current command */
1372 union {
1373 struct ecore_queue_update_params update;
1374 struct ecore_queue_setup_params setup;
1375 struct ecore_queue_init_params init;
1376 struct ecore_queue_setup_tx_only_params tx_only;
1377 struct ecore_queue_terminate_params terminate;
1378 struct ecore_queue_cfc_del_params cfc_del;
1379 } params;
1380};
1381
1382struct ecore_viflist_params {
1383 uint8_t echo_res;
1384 uint8_t func_bit_map_res;
1385};
1386
1387struct ecore_queue_sp_obj {
1388 uint32_t cids[ECORE_MULTI_TX_COS];
1389 uint8_t cl_id;
1390 uint8_t func_id;
1391
1392 /* number of traffic classes supported by queue.
1393 * The primary connection of the queue supports the first traffic
1394 * class. Any further traffic class is supported by a tx-only
1395 * connection.
1396 *
1397 * Therefore max_cos is also a number of valid entries in the cids
1398 * array.
1399 */
1400 uint8_t max_cos;
1401 uint8_t num_tx_only, next_tx_only;
1402
1403 enum ecore_q_state state, next_state;
1404
1405 /* bits from enum ecore_q_type */
1406 unsigned long type;
1407
1408 /* ECORE_Q_CMD_XX bits. This object implements "one
1409 * pending" paradigm but for debug and tracing purposes it's
1410 * more convenient to have different bits for different
1411 * commands.
1412 */
1413 unsigned long pending;
1414
1415 /* Buffer to use as a ramrod data and its mapping */
1416 void *rdata;
1417 ecore_dma_addr_t rdata_mapping;
1418
1419 /**
1420 * Performs one state change according to the given parameters.
1421 *
1422 * @return 0 in case of success and negative value otherwise.
1423 */
1424 int (*send_cmd)(struct bnx2x_softc *sc,
1425 struct ecore_queue_state_params *params);
1426
1427 /**
1428 * Sets the pending bit according to the requested transition.
1429 */
1430 int (*set_pending)(struct ecore_queue_sp_obj *o,
1431 struct ecore_queue_state_params *params);
1432
1433 /**
1434 * Checks that the requested state transition is legal.
1435 */
1436 int (*check_transition)(struct bnx2x_softc *sc,
1437 struct ecore_queue_sp_obj *o,
1438 struct ecore_queue_state_params *params);
1439
1440 /**
1441 * Completes the pending command.
1442 */
1443 int (*complete_cmd)(struct bnx2x_softc *sc,
1444 struct ecore_queue_sp_obj *o,
1445 enum ecore_queue_cmd);
1446
1447 int (*wait_comp)(struct bnx2x_softc *sc,
1448 struct ecore_queue_sp_obj *o,
1449 enum ecore_queue_cmd cmd);
1450};
1451
1452/********************** Function state update *********************************/
1453/* Allowed Function states */
1454enum ecore_func_state {
1455 ECORE_F_STATE_RESET,
1456 ECORE_F_STATE_INITIALIZED,
1457 ECORE_F_STATE_STARTED,
1458 ECORE_F_STATE_TX_STOPPED,
1459 ECORE_F_STATE_MAX,
1460};
1461
1462/* Allowed Function commands */
1463enum ecore_func_cmd {
1464 ECORE_F_CMD_HW_INIT,
1465 ECORE_F_CMD_START,
1466 ECORE_F_CMD_STOP,
1467 ECORE_F_CMD_HW_RESET,
1468 ECORE_F_CMD_AFEX_UPDATE,
1469 ECORE_F_CMD_AFEX_VIFLISTS,
1470 ECORE_F_CMD_TX_STOP,
1471 ECORE_F_CMD_TX_START,
1472 ECORE_F_CMD_SWITCH_UPDATE,
1473 ECORE_F_CMD_MAX,
1474};
1475
1476struct ecore_func_hw_init_params {
1477 /* A load phase returned by MCP.
1478 *
1479 * May be:
1480 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1481 * FW_MSG_CODE_DRV_LOAD_COMMON
1482 * FW_MSG_CODE_DRV_LOAD_PORT
1483 * FW_MSG_CODE_DRV_LOAD_FUNCTION
1484 */
1485 uint32_t load_phase;
1486};
1487
1488struct ecore_func_hw_reset_params {
1489 /* A load phase returned by MCP.
1490 *
1491 * May be:
1492 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1493 * FW_MSG_CODE_DRV_LOAD_COMMON
1494 * FW_MSG_CODE_DRV_LOAD_PORT
1495 * FW_MSG_CODE_DRV_LOAD_FUNCTION
1496 */
1497 uint32_t reset_phase;
1498};
1499
1500struct ecore_func_start_params {
1501 /* Multi Function mode:
1502 * - Single Function
1503 * - Switch Dependent
1504 * - Switch Independent
1505 */
1506 uint16_t mf_mode;
1507
1508 /* Switch Dependent mode outer VLAN tag */
1509 uint16_t sd_vlan_tag;
1510
1511 /* Function cos mode */
1512 uint8_t network_cos_mode;
1513
1514 /* NVGRE classification enablement */
1515 uint8_t nvgre_clss_en;
1516
1517 /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
1518 uint8_t gre_tunnel_mode;
1519
1520 /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
1521 uint8_t gre_tunnel_rss;
1522
1523};
1524
1525struct ecore_func_switch_update_params {
1526 uint8_t suspend;
1527};
1528
1529struct ecore_func_afex_update_params {
1530 uint16_t vif_id;
1531 uint16_t afex_default_vlan;
1532 uint8_t allowed_priorities;
1533};
1534
1535struct ecore_func_afex_viflists_params {
1536 uint16_t vif_list_index;
1537 uint8_t func_bit_map;
1538 uint8_t afex_vif_list_command;
1539 uint8_t func_to_clear;
1540};
1541struct ecore_func_tx_start_params {
1542 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1543 uint8_t dcb_enabled;
1544 uint8_t dcb_version;
1545 uint8_t dont_add_pri_0;
1546};
1547
1548struct ecore_func_state_params {
1549 struct ecore_func_sp_obj *f_obj;
1550
1551 /* Current command */
1552 enum ecore_func_cmd cmd;
1553
1554 /* may have RAMROD_COMP_WAIT set only */
1555 unsigned long ramrod_flags;
1556
1557 /* Params according to the current command */
1558 union {
1559 struct ecore_func_hw_init_params hw_init;
1560 struct ecore_func_hw_reset_params hw_reset;
1561 struct ecore_func_start_params start;
1562 struct ecore_func_switch_update_params switch_update;
1563 struct ecore_func_afex_update_params afex_update;
1564 struct ecore_func_afex_viflists_params afex_viflists;
1565 struct ecore_func_tx_start_params tx_start;
1566 } params;
1567};
1568
1569struct ecore_func_sp_drv_ops {
1570 /* Init tool + runtime initialization:
1571 * - Common Chip
1572 * - Common (per Path)
1573 * - Port
1574 * - Function phases
1575 */
1576 int (*init_hw_cmn_chip)(struct bnx2x_softc *sc);
1577 int (*init_hw_cmn)(struct bnx2x_softc *sc);
1578 int (*init_hw_port)(struct bnx2x_softc *sc);
1579 int (*init_hw_func)(struct bnx2x_softc *sc);
1580
1581 /* Reset Function HW: Common, Port, Function phases. */
1582 void (*reset_hw_cmn)(struct bnx2x_softc *sc);
1583 void (*reset_hw_port)(struct bnx2x_softc *sc);
1584 void (*reset_hw_func)(struct bnx2x_softc *sc);
1585
1586 /* Prepare/Release FW resources */
1587 int (*init_fw)(struct bnx2x_softc *sc);
1588 void (*release_fw)(struct bnx2x_softc *sc);
1589};
1590
1591struct ecore_func_sp_obj {
1592 enum ecore_func_state state, next_state;
1593
1594 /* ECORE_FUNC_CMD_XX bits. This object implements "one
1595 * pending" paradigm but for debug and tracing purposes it's
1596 * more convenient to have different bits for different
1597 * commands.
1598 */
1599 unsigned long pending;
1600
1601 /* Buffer to use as a ramrod data and its mapping */
1602 void *rdata;
1603 ecore_dma_addr_t rdata_mapping;
1604
1605 /* Buffer to use as a afex ramrod data and its mapping.
1606 * This can't be same rdata as above because afex ramrod requests
1607 * can arrive to the object in parallel to other ramrod requests.
1608 */
1609 void *afex_rdata;
1610 ecore_dma_addr_t afex_rdata_mapping;
1611
1612 /* this mutex validates that when pending flag is taken, the next
1613 * ramrod to be sent will be the one set the pending bit
1614 */
1615 ECORE_MUTEX one_pending_mutex;
1616
1617 /* Driver interface */
1618 struct ecore_func_sp_drv_ops *drv;
1619
1620 /**
1621 * Performs one state change according to the given parameters.
1622 *
1623 * @return 0 in case of success and negative value otherwise.
1624 */
1625 int (*send_cmd)(struct bnx2x_softc *sc,
1626 struct ecore_func_state_params *params);
1627
1628 /**
1629 * Checks that the requested state transition is legal.
1630 */
1631 int (*check_transition)(struct bnx2x_softc *sc,
1632 struct ecore_func_sp_obj *o,
1633 struct ecore_func_state_params *params);
1634
1635 /**
1636 * Completes the pending command.
1637 */
1638 int (*complete_cmd)(struct bnx2x_softc *sc,
1639 struct ecore_func_sp_obj *o,
1640 enum ecore_func_cmd cmd);
1641
1642 int (*wait_comp)(struct bnx2x_softc *sc, struct ecore_func_sp_obj *o,
1643 enum ecore_func_cmd cmd);
1644};
1645
1646/********************** Interfaces ********************************************/
1647/* Queueable objects set */
1648union ecore_qable_obj {
1649 struct ecore_vlan_mac_obj vlan_mac;
1650};
1651/************** Function state update *********/
1652void ecore_init_func_obj(struct bnx2x_softc *sc,
1653 struct ecore_func_sp_obj *obj,
1654 void *rdata, ecore_dma_addr_t rdata_mapping,
1655 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
1656 struct ecore_func_sp_drv_ops *drv_iface);
1657
1658int ecore_func_state_change(struct bnx2x_softc *sc,
1659 struct ecore_func_state_params *params);
1660
1661enum ecore_func_state ecore_func_get_state(struct bnx2x_softc *sc,
1662 struct ecore_func_sp_obj *o);
1663/******************* Queue State **************/
1664void ecore_init_queue_obj(struct bnx2x_softc *sc,
1665 struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids,
1666 uint8_t cid_cnt, uint8_t func_id, void *rdata,
1667 ecore_dma_addr_t rdata_mapping, unsigned long type);
1668
1669int ecore_queue_state_change(struct bnx2x_softc *sc,
1670 struct ecore_queue_state_params *params);
1671
1672/********************* VLAN-MAC ****************/
1673void ecore_init_mac_obj(struct bnx2x_softc *sc,
1674 struct ecore_vlan_mac_obj *mac_obj,
1675 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
1676 ecore_dma_addr_t rdata_mapping, int state,
1677 unsigned long *pstate, ecore_obj_type type,
1678 struct ecore_credit_pool_obj *macs_pool);
1679
1680void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
1681 struct ecore_vlan_mac_obj *o);
1682int ecore_vlan_mac_h_write_lock(struct bnx2x_softc *sc,
1683 struct ecore_vlan_mac_obj *o);
1684void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
1685 struct ecore_vlan_mac_obj *o);
1686int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1687 struct ecore_vlan_mac_ramrod_params *p);
1688
1689int ecore_vlan_mac_move(struct bnx2x_softc *sc,
1690 struct ecore_vlan_mac_ramrod_params *p,
1691 struct ecore_vlan_mac_obj *dest_o);
1692
1693/********************* RX MODE ****************/
1694
1695void ecore_init_rx_mode_obj(struct bnx2x_softc *sc,
1696 struct ecore_rx_mode_obj *o);
1697
1698/**
1699 * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
1700 *
1701 * @p: Command parameters
1702 *
1703 * Return: 0 - if operation was successful and there is no pending completions,
1704 * positive number - if there are pending completions,
1705 * negative - if there were errors
1706 */
1707int ecore_config_rx_mode(struct bnx2x_softc *sc,
1708 struct ecore_rx_mode_ramrod_params *p);
1709
1710/****************** MULTICASTS ****************/
1711
1712void ecore_init_mcast_obj(struct bnx2x_softc *sc,
1713 struct ecore_mcast_obj *mcast_obj,
1714 uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
1715 uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
1716 int state, unsigned long *pstate,
1717 ecore_obj_type type);
1718
1719/**
1720 * ecore_config_mcast - Configure multicast MACs list.
1721 *
1722 * @cmd: command to execute: BNX2X_MCAST_CMD_X
1723 *
1724 * May configure a new list
1725 * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up
1726 * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current
1727 * configuration, continue to execute the pending commands
1728 * (ECORE_MCAST_CMD_CONT).
1729 *
1730 * If previous command is still pending or if number of MACs to
1731 * configure is more that maximum number of MACs in one command,
1732 * the current command will be enqueued to the tail of the
1733 * pending commands list.
1734 *
11fdf7f2 1735 * Return: 0 is operation was successful and there are no pending completions,
7c673cae
FG
1736 * negative if there were errors, positive if there are pending
1737 * completions.
1738 */
1739int ecore_config_mcast(struct bnx2x_softc *sc,
1740 struct ecore_mcast_ramrod_params *p,
1741 enum ecore_mcast_cmd cmd);
1742
1743/****************** CREDIT POOL ****************/
1744void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
1745 struct ecore_credit_pool_obj *p, uint8_t func_id,
1746 uint8_t func_num);
1747void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
1748 struct ecore_credit_pool_obj *p, uint8_t func_id,
1749 uint8_t func_num);
1750
1751/****************** RSS CONFIGURATION ****************/
1752void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
1753 uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
1754 void *rdata, ecore_dma_addr_t rdata_mapping,
1755 int state, unsigned long *pstate,
1756 ecore_obj_type type);
1757
1758/**
1759 * ecore_config_rss - Updates RSS configuration according to provided parameters
1760 *
1761 * Return: 0 in case of success
1762 */
1763int ecore_config_rss(struct bnx2x_softc *sc,
1764 struct ecore_config_rss_params *p);
1765
1766
1767#endif /* ECORE_SP_H */