]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. | |
7c673cae | 3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. |
7c673cae FG |
4 | */ |
5 | ||
6 | #ifndef _VNIC_WQ_H_ | |
7 | #define _VNIC_WQ_H_ | |
8 | ||
9 | ||
10 | #include "vnic_dev.h" | |
11 | #include "vnic_cq.h" | |
12 | #include <rte_memzone.h> | |
13 | ||
14 | /* Work queue control */ | |
15 | struct vnic_wq_ctrl { | |
f67539c2 TL |
16 | uint64_t ring_base; /* 0x00 */ |
17 | uint32_t ring_size; /* 0x08 */ | |
18 | uint32_t pad0; | |
19 | uint32_t posted_index; /* 0x10 */ | |
20 | uint32_t pad1; | |
21 | uint32_t cq_index; /* 0x18 */ | |
22 | uint32_t pad2; | |
23 | uint32_t enable; /* 0x20 */ | |
24 | uint32_t pad3; | |
25 | uint32_t running; /* 0x28 */ | |
26 | uint32_t pad4; | |
27 | uint32_t fetch_index; /* 0x30 */ | |
28 | uint32_t pad5; | |
29 | uint32_t dca_value; /* 0x38 */ | |
30 | uint32_t pad6; | |
31 | uint32_t error_interrupt_enable; /* 0x40 */ | |
32 | uint32_t pad7; | |
33 | uint32_t error_interrupt_offset; /* 0x48 */ | |
34 | uint32_t pad8; | |
35 | uint32_t error_status; /* 0x50 */ | |
36 | uint32_t pad9; | |
7c673cae FG |
37 | }; |
38 | ||
7c673cae FG |
39 | struct vnic_wq { |
40 | unsigned int index; | |
11fdf7f2 | 41 | uint64_t tx_offload_notsup_mask; |
7c673cae FG |
42 | struct vnic_dev *vdev; |
43 | struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ | |
44 | struct vnic_dev_ring ring; | |
11fdf7f2 | 45 | struct rte_mbuf **bufs; |
7c673cae | 46 | unsigned int head_idx; |
11fdf7f2 | 47 | unsigned int cq_pend; |
7c673cae FG |
48 | unsigned int tail_idx; |
49 | unsigned int socket_id; | |
50 | const struct rte_memzone *cqmsg_rz; | |
51 | uint16_t last_completed_index; | |
11fdf7f2 | 52 | uint64_t offloads; |
7c673cae FG |
53 | }; |
54 | ||
55 | static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) | |
56 | { | |
57 | /* how many does SW own? */ | |
58 | return wq->ring.desc_avail; | |
59 | } | |
60 | ||
61 | static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) | |
62 | { | |
63 | /* how many does HW own? */ | |
64 | return wq->ring.desc_count - wq->ring.desc_avail - 1; | |
65 | } | |
66 | ||
67 | #define PI_LOG2_CACHE_LINE_SIZE 5 | |
68 | #define PI_INDEX_BITS 12 | |
69 | #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) | |
70 | #define PI_PREFETCH_LEN_MASK ((1U << PI_LOG2_CACHE_LINE_SIZE) - 1) | |
71 | #define PI_PREFETCH_LEN_OFF 16 | |
72 | #define PI_PREFETCH_ADDR_BITS 43 | |
73 | #define PI_PREFETCH_ADDR_MASK ((1ULL << PI_PREFETCH_ADDR_BITS) - 1) | |
74 | #define PI_PREFETCH_ADDR_OFF 21 | |
75 | ||
76 | /** How many cache lines are touched by buffer (addr, len). */ | |
77 | static inline unsigned int num_cache_lines_touched(dma_addr_t addr, | |
78 | unsigned int len) | |
79 | { | |
80 | const unsigned long mask = PI_PREFETCH_LEN_MASK; | |
81 | const unsigned long laddr = (unsigned long)addr; | |
82 | unsigned long lines, equiv_len; | |
83 | /* A. If addr is aligned, our solution is just to round up len to the | |
84 | next boundary. | |
85 | ||
86 | e.g. addr = 0, len = 48 | |
87 | +--------------------+ | |
88 | |XXXXXXXXXXXXXXXXXXXX| 32-byte cacheline a | |
89 | +--------------------+ | |
90 | |XXXXXXXXXX | cacheline b | |
91 | +--------------------+ | |
92 | ||
93 | B. If addr is not aligned, however, we may use an extra | |
94 | cacheline. e.g. addr = 12, len = 22 | |
95 | ||
96 | +--------------------+ | |
97 | | XXXXXXXXXXXXX| | |
98 | +--------------------+ | |
99 | |XX | | |
100 | +--------------------+ | |
101 | ||
102 | Our solution is to make the problem equivalent to case A | |
103 | above by adding the empty space in the first cacheline to the length: | |
104 | unsigned long len; | |
105 | ||
106 | +--------------------+ | |
107 | |eeeeeeeXXXXXXXXXXXXX| "e" is empty space, which we add to len | |
108 | +--------------------+ | |
109 | |XX | | |
110 | +--------------------+ | |
111 | ||
112 | */ | |
113 | equiv_len = len + (laddr & mask); | |
114 | ||
115 | /* Now we can just round up this len to the next 32-byte boundary. */ | |
116 | lines = (equiv_len + mask) & (~mask); | |
117 | ||
118 | /* Scale bytes -> cachelines. */ | |
119 | return lines >> PI_LOG2_CACHE_LINE_SIZE; | |
120 | } | |
121 | ||
f67539c2 TL |
122 | static inline uint64_t vnic_cached_posted_index(dma_addr_t addr, |
123 | unsigned int len, | |
7c673cae FG |
124 | unsigned int index) |
125 | { | |
126 | unsigned int num_cache_lines = num_cache_lines_touched(addr, len); | |
127 | /* Wish we could avoid a branch here. We could have separate | |
128 | * vnic_wq_post() and vinc_wq_post_inline(), the latter | |
129 | * only supporting < 1k (2^5 * 2^5) sends, I suppose. This would | |
130 | * eliminate the if (eop) branch as well. | |
131 | */ | |
132 | if (num_cache_lines > PI_PREFETCH_LEN_MASK) | |
133 | num_cache_lines = 0; | |
134 | return (index & PI_INDEX_MASK) | | |
135 | ((num_cache_lines & PI_PREFETCH_LEN_MASK) << PI_PREFETCH_LEN_OFF) | | |
136 | (((addr >> PI_LOG2_CACHE_LINE_SIZE) & | |
137 | PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF); | |
138 | } | |
139 | ||
140 | static inline uint32_t | |
141 | buf_idx_incr(uint32_t n_descriptors, uint32_t idx) | |
142 | { | |
143 | idx++; | |
144 | if (unlikely(idx == n_descriptors)) | |
145 | idx = 0; | |
146 | return idx; | |
147 | } | |
148 | ||
149 | void vnic_wq_free(struct vnic_wq *wq); | |
150 | int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, | |
151 | unsigned int desc_count, unsigned int desc_size); | |
152 | void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, | |
153 | unsigned int fetch_index, unsigned int posted_index, | |
154 | unsigned int error_interrupt_enable, | |
155 | unsigned int error_interrupt_offset); | |
156 | void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, | |
157 | unsigned int error_interrupt_enable, | |
158 | unsigned int error_interrupt_offset); | |
159 | void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error); | |
160 | unsigned int vnic_wq_error_status(struct vnic_wq *wq); | |
161 | void vnic_wq_enable(struct vnic_wq *wq); | |
162 | int vnic_wq_disable(struct vnic_wq *wq); | |
163 | void vnic_wq_clean(struct vnic_wq *wq, | |
11fdf7f2 | 164 | void (*buf_clean)(struct rte_mbuf **buf)); |
7c673cae | 165 | #endif /* _VNIC_WQ_H_ */ |