]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / cxgbe / base / adapter.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
3 * All rights reserved.
4 */
5
6 /* This file should not be included directly. Include common.h instead. */
7
8 #ifndef __T4_ADAPTER_H__
9 #define __T4_ADAPTER_H__
10
11 #include <rte_bus_pci.h>
12 #include <rte_mbuf.h>
13 #include <rte_io.h>
14 #include <rte_rwlock.h>
15 #include <rte_ethdev.h>
16
17 #include "../cxgbe_compat.h"
18 #include "../cxgbe_ofld.h"
19 #include "t4_regs_values.h"
20
21 enum {
22 MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
23 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
24 };
25
26 struct adapter;
27 struct sge_rspq;
28
29 enum {
30 PORT_RSS_DONE = (1 << 0),
31 };
32
33 struct port_info {
34 struct adapter *adapter; /* adapter that this port belongs to */
35 struct rte_eth_dev *eth_dev; /* associated rte eth device */
36 struct port_stats stats_base; /* port statistics base */
37 struct link_config link_cfg; /* link configuration info */
38
39 unsigned long flags; /* port related flags */
40 short int xact_addr_filt; /* index of exact MAC address filter */
41
42 u16 viid; /* associated virtual interface id */
43 s8 mdio_addr; /* address of the PHY */
44 u8 port_type; /* firmware port type */
45 u8 mod_type; /* firmware module type */
46 u8 port_id; /* physical port ID */
47 u8 pidx; /* port index for this PF */
48 u8 tx_chan; /* associated channel */
49
50 u8 n_rx_qsets; /* # of rx qsets */
51 u8 n_tx_qsets; /* # of tx qsets */
52 u8 first_qset; /* index of first qset */
53
54 u16 *rss; /* rss table */
55 u8 rss_mode; /* rss mode */
56 u16 rss_size; /* size of VI's RSS table slice */
57 u64 rss_hf; /* RSS Hash Function */
58
59 /* viid fields either returned by fw
60 * or decoded by parsing viid by driver.
61 */
62 u8 vin;
63 u8 vivld;
64 };
65
66 /* Enable or disable autonegotiation. If this is set to enable,
67 * the forced link modes above are completely ignored.
68 */
69 #define AUTONEG_DISABLE 0x00
70 #define AUTONEG_ENABLE 0x01
71
72 enum { /* adapter flags */
73 FULL_INIT_DONE = (1 << 0),
74 USING_MSI = (1 << 1),
75 USING_MSIX = (1 << 2),
76 FW_QUEUE_BOUND = (1 << 3),
77 FW_OK = (1 << 4),
78 CFG_QUEUES = (1 << 5),
79 MASTER_PF = (1 << 6),
80 };
81
82 struct rx_sw_desc { /* SW state per Rx descriptor */
83 void *buf; /* struct page or mbuf */
84 dma_addr_t dma_addr;
85 };
86
87 struct sge_fl { /* SGE free-buffer queue state */
88 /* RO fields */
89 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
90
91 dma_addr_t addr; /* bus address of HW ring start */
92 __be64 *desc; /* address of HW Rx descriptor ring */
93
94 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
95 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
96
97 unsigned int cntxt_id; /* SGE relative QID for the free list */
98 unsigned int size; /* capacity of free list */
99
100 unsigned int avail; /* # of available Rx buffers */
101 unsigned int pend_cred; /* new buffers since last FL DB ring */
102 unsigned int cidx; /* consumer index */
103 unsigned int pidx; /* producer index */
104
105 unsigned long alloc_failed; /* # of times buffer allocation failed */
106 unsigned long low; /* # of times momentarily starving */
107 };
108
109 #define MAX_MBUF_FRAGS (16384 / 512 + 2)
110
111 /* A packet gather list */
112 struct pkt_gl {
113 union {
114 struct rte_mbuf *mbufs[MAX_MBUF_FRAGS];
115 } /* UNNAMED */;
116 void *va; /* virtual address of first byte */
117 unsigned int nfrags; /* # of fragments */
118 unsigned int tot_len; /* total length of fragments */
119 bool usembufs; /* use mbufs for fragments */
120 };
121
122 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
123 const struct pkt_gl *gl);
124
125 struct sge_rspq { /* state for an SGE response queue */
126 struct adapter *adapter; /* adapter that this queue belongs to */
127 struct rte_eth_dev *eth_dev; /* associated rte eth device */
128 struct rte_mempool *mb_pool; /* associated mempool */
129
130 dma_addr_t phys_addr; /* physical address of the ring */
131 __be64 *desc; /* address of HW response ring */
132 const __be64 *cur_desc; /* current descriptor in queue */
133
134 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
135 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
136 struct sge_qstat *stat;
137
138 unsigned int cidx; /* consumer index */
139 unsigned int gts_idx; /* last gts write sent */
140 unsigned int iqe_len; /* entry size */
141 unsigned int size; /* capacity of response queue */
142 int offset; /* offset into current Rx buffer */
143
144 u8 gen; /* current generation bit */
145 u8 intr_params; /* interrupt holdoff parameters */
146 u8 next_intr_params; /* holdoff params for next interrupt */
147 u8 pktcnt_idx; /* interrupt packet threshold */
148 u8 port_id; /* associated port-id */
149 u8 idx; /* queue index within its group */
150 u16 cntxt_id; /* SGE relative QID for the response Q */
151 u16 abs_id; /* absolute SGE id for the response q */
152
153 rspq_handler_t handler; /* associated handler for this response q */
154 };
155
156 struct sge_eth_rx_stats { /* Ethernet rx queue statistics */
157 u64 pkts; /* # of ethernet packets */
158 u64 rx_bytes; /* # of ethernet bytes */
159 u64 rx_cso; /* # of Rx checksum offloads */
160 u64 vlan_ex; /* # of Rx VLAN extractions */
161 u64 rx_drops; /* # of packets dropped due to no mem */
162 };
163
164 struct sge_eth_rxq { /* a SW Ethernet Rx queue */
165 struct sge_rspq rspq;
166 struct sge_fl fl;
167 struct sge_eth_rx_stats stats;
168 bool usembufs; /* one ingress packet per mbuf FL buffer */
169 } __rte_cache_aligned;
170
171 /*
172 * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per
173 * packet (if one sgl is present) and type 1 needs 32 bytes. This means
174 * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit
175 * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR
176 * to be able to free those mbufs when we get completions back from the FW.
177 * Allocating the maximum number of pointers in every tx desc is a waste
178 * of memory resources so we only store 2 pointers per tx desc which should
179 * be enough since a tx desc can only fit 2 packets in the best case
180 * scenario where a packet needs 32 bytes.
181 */
182 #define ETH_COALESCE_PKT_NUM 15
183 #define ETH_COALESCE_VF_PKT_NUM 7
184 #define ETH_COALESCE_PKT_PER_DESC 2
185
186 struct tx_eth_coal_desc {
187 struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC];
188 struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC];
189 int idx;
190 };
191
192 struct tx_desc {
193 __be64 flit[8];
194 };
195
196 struct tx_sw_desc { /* SW state per Tx descriptor */
197 struct rte_mbuf *mbuf;
198 struct ulptx_sgl *sgl;
199 struct tx_eth_coal_desc coalesce;
200 };
201
202 enum {
203 EQ_STOPPED = (1 << 0),
204 };
205
206 struct eth_coalesce {
207 unsigned char *ptr;
208 unsigned char type;
209 unsigned int idx;
210 unsigned int len;
211 unsigned int flits;
212 unsigned int max;
213 __u8 ethmacdst[ETHER_ADDR_LEN];
214 __u8 ethmacsrc[ETHER_ADDR_LEN];
215 __be16 ethtype;
216 __be16 vlantci;
217 };
218
219 struct sge_txq {
220 struct tx_desc *desc; /* address of HW Tx descriptor ring */
221 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
222 struct sge_qstat *stat; /* queue status entry */
223 struct eth_coalesce coalesce; /* coalesce info */
224
225 uint64_t phys_addr; /* physical address of the ring */
226
227 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
228 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
229
230 unsigned int cntxt_id; /* SGE relative QID for the Tx Q */
231 unsigned int in_use; /* # of in-use Tx descriptors */
232 unsigned int size; /* # of descriptors */
233 unsigned int cidx; /* SW consumer index */
234 unsigned int pidx; /* producer index */
235 unsigned int dbidx; /* last idx when db ring was done */
236 unsigned int equeidx; /* last sent credit request */
237 unsigned int last_pidx; /* last pidx recorded by tx monitor */
238 unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
239 unsigned int abs_id;
240
241 int db_disabled; /* doorbell state */
242 unsigned short db_pidx; /* doorbell producer index */
243 unsigned short db_pidx_inc; /* doorbell producer increment */
244 };
245
246 struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
247 u64 pkts; /* # of ethernet packets */
248 u64 tx_bytes; /* # of ethernet bytes */
249 u64 tso; /* # of TSO requests */
250 u64 tx_cso; /* # of Tx checksum offloads */
251 u64 vlan_ins; /* # of Tx VLAN insertions */
252 u64 mapping_err; /* # of I/O MMU packet mapping errors */
253 u64 coal_wr; /* # of coalesced wr */
254 u64 coal_pkts; /* # of coalesced packets */
255 };
256
257 struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
258 struct sge_txq q;
259 struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
260 struct rte_eth_dev_data *data;
261 struct sge_eth_tx_stats stats; /* queue statistics */
262 rte_spinlock_t txq_lock;
263
264 unsigned int flags; /* flags for state of the queue */
265 } __rte_cache_aligned;
266
267 struct sge_ctrl_txq { /* State for an SGE control Tx queue */
268 struct sge_txq q; /* txq */
269 struct adapter *adapter; /* adapter associated with this queue */
270 rte_spinlock_t ctrlq_lock; /* control queue lock */
271 u8 full; /* the Tx ring is full */
272 u64 txp; /* number of transmits */
273 struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
274 } __rte_cache_aligned;
275
276 struct sge {
277 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
278 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
279 struct sge_rspq fw_evtq __rte_cache_aligned;
280 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
281
282 u16 max_ethqsets; /* # of available Ethernet queue sets */
283 u32 stat_len; /* length of status page at ring end */
284 u32 pktshift; /* padding between CPL & packet data */
285
286 /* response queue interrupt parameters */
287 u16 timer_val[SGE_NTIMERS];
288 u8 counter_val[SGE_NCOUNTERS];
289
290 u32 fl_align; /* response queue message alignment */
291 u32 fl_pg_order; /* large page allocation size */
292 u32 fl_starve_thres; /* Free List starvation threshold */
293 };
294
295 #define T4_OS_NEEDS_MBOX_LOCKING 1
296
297 /*
298 * OS Lock/List primitives for those interfaces in the Common Code which
299 * need this.
300 */
301
302 struct mbox_entry {
303 TAILQ_ENTRY(mbox_entry) next;
304 };
305
306 TAILQ_HEAD(mbox_list, mbox_entry);
307
308 struct adapter_devargs {
309 bool keep_ovlan;
310 bool force_link_up;
311 bool tx_mode_latency;
312 u32 filtermode;
313 u32 filtermask;
314 };
315
316 struct adapter {
317 struct rte_pci_device *pdev; /* associated rte pci device */
318 struct rte_eth_dev *eth_dev; /* first port's rte eth device */
319 struct adapter_params params; /* adapter parameters */
320 struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */
321 struct sge sge; /* associated SGE */
322
323 /* support for single-threading access to adapter mailbox registers */
324 struct mbox_list mbox_list;
325 rte_spinlock_t mbox_lock;
326
327 u8 *regs; /* pointer to registers region */
328 u8 *bar2; /* pointer to bar2 region */
329 unsigned long flags; /* adapter flags */
330 unsigned int mbox; /* associated mailbox */
331 unsigned int pf; /* associated physical function id */
332
333 unsigned int vpd_busy;
334 unsigned int vpd_flag;
335
336 int use_unpacked_mode; /* unpacked rx mode state */
337 rte_spinlock_t win0_lock;
338
339 rte_spinlock_t flow_lock; /* Serialize access for rte_flow ops */
340
341 unsigned int clipt_start; /* CLIP table start */
342 unsigned int clipt_end; /* CLIP table end */
343 unsigned int l2t_start; /* Layer 2 table start */
344 unsigned int l2t_end; /* Layer 2 table end */
345 struct clip_tbl *clipt; /* CLIP table */
346 struct l2t_data *l2t; /* Layer 2 table */
347 struct smt_data *smt; /* Source mac table */
348 struct mpstcam_table *mpstcam;
349
350 struct tid_info tids; /* Info used to access TID related tables */
351
352 struct adapter_devargs devargs;
353 };
354
355 /**
356 * t4_os_rwlock_init - initialize rwlock
357 * @lock: the rwlock
358 */
359 static inline void t4_os_rwlock_init(rte_rwlock_t *lock)
360 {
361 rte_rwlock_init(lock);
362 }
363
364 /**
365 * t4_os_write_lock - get a write lock
366 * @lock: the rwlock
367 */
368 static inline void t4_os_write_lock(rte_rwlock_t *lock)
369 {
370 rte_rwlock_write_lock(lock);
371 }
372
373 /**
374 * t4_os_write_unlock - unlock a write lock
375 * @lock: the rwlock
376 */
377 static inline void t4_os_write_unlock(rte_rwlock_t *lock)
378 {
379 rte_rwlock_write_unlock(lock);
380 }
381
382 /**
383 * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
384 * @dev: the rte_eth_dev
385 *
386 * Return the struct port_info associated with a rte_eth_dev
387 */
388 static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)
389 {
390 return dev->data->dev_private;
391 }
392
393 /**
394 * adap2pinfo - return the port_info of a port
395 * @adap: the adapter
396 * @idx: the port index
397 *
398 * Return the port_info structure for the port of the given index.
399 */
400 static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
401 {
402 return adap->port[idx];
403 }
404
405 /**
406 * ethdev2adap - return the adapter structure associated with a rte_eth_dev
407 * @dev: the rte_eth_dev
408 *
409 * Return the struct adapter associated with a rte_eth_dev
410 */
411 static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)
412 {
413 return ethdev2pinfo(dev)->adapter;
414 }
415
416 #define CXGBE_PCI_REG(reg) rte_read32(reg)
417
418 static inline uint64_t cxgbe_read_addr64(volatile void *addr)
419 {
420 uint64_t val = CXGBE_PCI_REG(addr);
421 uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4));
422
423 val2 = (uint64_t)(val2 << 32);
424 val += val2;
425 return val;
426 }
427
428 static inline uint32_t cxgbe_read_addr(volatile void *addr)
429 {
430 return CXGBE_PCI_REG(addr);
431 }
432
433 #define CXGBE_PCI_REG_ADDR(adap, reg) \
434 ((volatile uint32_t *)((char *)(adap)->regs + (reg)))
435
436 #define CXGBE_READ_REG(adap, reg) \
437 cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg)))
438
439 #define CXGBE_READ_REG64(adap, reg) \
440 cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
441
442 #define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
443
444 #define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
445 rte_write32_relaxed((value), (reg))
446
447 #define CXGBE_WRITE_REG(adap, reg, value) \
448 CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
449
450 #define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \
451 CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
452
453 static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
454 {
455 CXGBE_PCI_REG_WRITE(addr, val);
456 CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32));
457 return val;
458 }
459
460 #define CXGBE_WRITE_REG64(adap, reg, value) \
461 cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
462
463 /**
464 * t4_read_reg - read a HW register
465 * @adapter: the adapter
466 * @reg_addr: the register address
467 *
468 * Returns the 32-bit value of the given HW register.
469 */
470 static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
471 {
472 return CXGBE_READ_REG(adapter, reg_addr);
473 }
474
475 /**
476 * t4_write_reg - write a HW register with barrier
477 * @adapter: the adapter
478 * @reg_addr: the register address
479 * @val: the value to write
480 *
481 * Write a 32-bit value into the given HW register.
482 */
483 static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
484 {
485 CXGBE_WRITE_REG(adapter, reg_addr, val);
486 }
487
488 /**
489 * t4_write_reg_relaxed - write a HW register with no barrier
490 * @adapter: the adapter
491 * @reg_addr: the register address
492 * @val: the value to write
493 *
494 * Write a 32-bit value into the given HW register.
495 */
496 static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr,
497 u32 val)
498 {
499 CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val);
500 }
501
502 /**
503 * t4_read_reg64 - read a 64-bit HW register
504 * @adapter: the adapter
505 * @reg_addr: the register address
506 *
507 * Returns the 64-bit value of the given HW register.
508 */
509 static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
510 {
511 return CXGBE_READ_REG64(adapter, reg_addr);
512 }
513
514 /**
515 * t4_write_reg64 - write a 64-bit HW register
516 * @adapter: the adapter
517 * @reg_addr: the register address
518 * @val: the value to write
519 *
520 * Write a 64-bit value into the given HW register.
521 */
522 static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
523 u64 val)
524 {
525 CXGBE_WRITE_REG64(adapter, reg_addr, val);
526 }
527
528 #define PCI_STATUS 0x06 /* 16 bits */
529 #define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
530 #define PCI_CAPABILITY_LIST 0x34
531 /* Offset of first capability list entry */
532 #define PCI_CAP_ID_EXP 0x10 /* PCI Express */
533 #define PCI_CAP_LIST_ID 0 /* Capability ID */
534 #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
535 #define PCI_EXP_DEVCTL 0x0008 /* Device control */
536 #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
537 #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
538 #define PCI_EXP_DEVCTL_PAYLOAD 0x00E0 /* Max payload */
539 #define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
540 #define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
541 #define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
542 #define PCI_VPD_DATA 4 /* 32-bits of data returned here */
543
544 /**
545 * t4_os_pci_write_cfg4 - 32-bit write to PCI config space
546 * @adapter: the adapter
547 * @addr: the register address
548 * @val: the value to write
549 *
550 * Write a 32-bit value into the given register in PCI config space.
551 */
552 static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
553 off_t val)
554 {
555 u32 val32 = val;
556
557 if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32),
558 addr) < 0)
559 dev_err(adapter, "Can't write to PCI config space\n");
560 }
561
562 /**
563 * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space
564 * @adapter: the adapter
565 * @addr: the register address
566 * @val: where to store the value read
567 *
568 * Read a 32-bit value from the given register in PCI config space.
569 */
570 static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr,
571 u32 *val)
572 {
573 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
574 addr) < 0)
575 dev_err(adapter, "Can't read from PCI config space\n");
576 }
577
578 /**
579 * t4_os_pci_write_cfg2 - 16-bit write to PCI config space
580 * @adapter: the adapter
581 * @addr: the register address
582 * @val: the value to write
583 *
584 * Write a 16-bit value into the given register in PCI config space.
585 */
586 static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
587 off_t val)
588 {
589 u16 val16 = val;
590
591 if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16),
592 addr) < 0)
593 dev_err(adapter, "Can't write to PCI config space\n");
594 }
595
596 /**
597 * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space
598 * @adapter: the adapter
599 * @addr: the register address
600 * @val: where to store the value read
601 *
602 * Read a 16-bit value from the given register in PCI config space.
603 */
604 static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
605 u16 *val)
606 {
607 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
608 addr) < 0)
609 dev_err(adapter, "Can't read from PCI config space\n");
610 }
611
612 /**
613 * t4_os_pci_read_cfg - read a 8-bit value from PCI config space
614 * @adapter: the adapter
615 * @addr: the register address
616 * @val: where to store the value read
617 *
618 * Read a 8-bit value from the given register in PCI config space.
619 */
620 static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr,
621 u8 *val)
622 {
623 if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
624 addr) < 0)
625 dev_err(adapter, "Can't read from PCI config space\n");
626 }
627
628 /**
629 * t4_os_find_pci_capability - lookup a capability in the PCI capability list
630 * @adapter: the adapter
631 * @cap: the capability
632 *
633 * Return the address of the given capability within the PCI capability list.
634 */
635 static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
636 {
637 u16 status;
638 int ttl = 48;
639 u8 pos = 0;
640 u8 id = 0;
641
642 t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status);
643 if (!(status & PCI_STATUS_CAP_LIST)) {
644 dev_err(adapter, "PCIe capability reading failed\n");
645 return -1;
646 }
647
648 t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos);
649 while (ttl-- && pos >= 0x40) {
650 pos &= ~3;
651 t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id);
652
653 if (id == 0xff)
654 break;
655
656 if (id == cap)
657 return (int)pos;
658
659 t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos);
660 }
661 return 0;
662 }
663
664 /**
665 * t4_os_set_hw_addr - store a port's MAC address in SW
666 * @adapter: the adapter
667 * @port_idx: the port index
668 * @hw_addr: the Ethernet address
669 *
670 * Store the Ethernet address of the given port in SW. Called by the
671 * common code when it retrieves a port's Ethernet address from EEPROM.
672 */
673 static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
674 u8 hw_addr[])
675 {
676 struct port_info *pi = adap2pinfo(adapter, port_idx);
677
678 rte_ether_addr_copy((struct rte_ether_addr *)hw_addr,
679 &pi->eth_dev->data->mac_addrs[0]);
680 }
681
682 /**
683 * t4_os_lock_init - initialize spinlock
684 * @lock: the spinlock
685 */
686 static inline void t4_os_lock_init(rte_spinlock_t *lock)
687 {
688 rte_spinlock_init(lock);
689 }
690
691 /**
692 * t4_os_lock - spin until lock is acquired
693 * @lock: the spinlock
694 */
695 static inline void t4_os_lock(rte_spinlock_t *lock)
696 {
697 rte_spinlock_lock(lock);
698 }
699
700 /**
701 * t4_os_unlock - unlock a spinlock
702 * @lock: the spinlock
703 */
704 static inline void t4_os_unlock(rte_spinlock_t *lock)
705 {
706 rte_spinlock_unlock(lock);
707 }
708
709 /**
710 * t4_os_trylock - try to get a lock
711 * @lock: the spinlock
712 */
713 static inline int t4_os_trylock(rte_spinlock_t *lock)
714 {
715 return rte_spinlock_trylock(lock);
716 }
717
718 /**
719 * t4_os_init_list_head - initialize
720 * @head: head of list to initialize [to empty]
721 */
722 static inline void t4_os_init_list_head(struct mbox_list *head)
723 {
724 TAILQ_INIT(head);
725 }
726
727 static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head)
728 {
729 return TAILQ_FIRST(head);
730 }
731
732 /**
733 * t4_os_atomic_add_tail - Enqueue list element atomically onto list
734 * @new: the entry to be addded to the queue
735 * @head: current head of the linked list
736 * @lock: lock to use to guarantee atomicity
737 */
738 static inline void t4_os_atomic_add_tail(struct mbox_entry *entry,
739 struct mbox_list *head,
740 rte_spinlock_t *lock)
741 {
742 t4_os_lock(lock);
743 TAILQ_INSERT_TAIL(head, entry, next);
744 t4_os_unlock(lock);
745 }
746
747 /**
748 * t4_os_atomic_list_del - Dequeue list element atomically from list
749 * @entry: the entry to be remove/dequeued from the list.
750 * @lock: the spinlock
751 */
752 static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
753 struct mbox_list *head,
754 rte_spinlock_t *lock)
755 {
756 t4_os_lock(lock);
757 TAILQ_REMOVE(head, entry, next);
758 t4_os_unlock(lock);
759 }
760
761 /**
762 * t4_init_completion - initialize completion
763 * @c: the completion context
764 */
765 static inline void t4_init_completion(struct t4_completion *c)
766 {
767 c->done = 0;
768 t4_os_lock_init(&c->lock);
769 }
770
771 /**
772 * t4_complete - set completion as done
773 * @c: the completion context
774 */
775 static inline void t4_complete(struct t4_completion *c)
776 {
777 t4_os_lock(&c->lock);
778 c->done = 1;
779 t4_os_unlock(&c->lock);
780 }
781
782 /**
783 * cxgbe_port_viid - get the VI id of a port
784 * @dev: the device for the port
785 *
786 * Return the VI id of the given port.
787 */
788 static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
789 {
790 return ethdev2pinfo(dev)->viid;
791 }
792
793 void *t4_alloc_mem(size_t size);
794 void t4_free_mem(void *addr);
795 #define t4_os_alloc(_size) t4_alloc_mem((_size))
796 #define t4_os_free(_ptr) t4_free_mem((_ptr))
797
798 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
799 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
800
801 void reclaim_completed_tx(struct sge_txq *q);
802 void t4_free_sge_resources(struct adapter *adap);
803 void t4_sge_tx_monitor_start(struct adapter *adap);
804 void t4_sge_tx_monitor_stop(struct adapter *adap);
805 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
806 uint16_t nb_pkts);
807 int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
808 int t4_sge_init(struct adapter *adap);
809 int t4vf_sge_init(struct adapter *adap);
810 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
811 struct rte_eth_dev *eth_dev, uint16_t queue_id,
812 unsigned int iqid, int socket_id);
813 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
814 struct rte_eth_dev *eth_dev, uint16_t queue_id,
815 unsigned int iqid, int socket_id);
816 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
817 struct rte_eth_dev *eth_dev, int intr_idx,
818 struct sge_fl *fl, rspq_handler_t handler,
819 int cong, struct rte_mempool *mp, int queue_id,
820 int socket_id);
821 int t4_sge_eth_txq_start(struct sge_eth_txq *txq);
822 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq);
823 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq);
824 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq);
825 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq);
826 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
827 void t4_sge_eth_clear_queues(struct port_info *pi);
828 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
829 unsigned int cnt);
830 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
831 unsigned int budget, unsigned int *work_done);
832 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues);
833 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags);
834
835 #endif /* __T4_ADAPTER_H__ */