]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/mlx4/mlx4.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / mlx4 / mlx4.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright 2012-2017 6WIND S.A.
5 * Copyright 2012-2017 Mellanox.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #ifndef RTE_PMD_MLX4_H_
35 #define RTE_PMD_MLX4_H_
36
37 #include <stddef.h>
38 #include <stdint.h>
39 #include <limits.h>
40
41 /*
42 * Runtime logging through RTE_LOG() is enabled when not in debugging mode.
43 * Intermediate LOG_*() macros add the required end-of-line characters.
44 */
45 #ifndef NDEBUG
46 #define INFO(...) DEBUG(__VA_ARGS__)
47 #define WARN(...) DEBUG(__VA_ARGS__)
48 #define ERROR(...) DEBUG(__VA_ARGS__)
49 #else
50 #define LOG__(level, m, ...) \
51 RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__)
52 #define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
53 #define INFO(...) LOG_(INFO, __VA_ARGS__)
54 #define WARN(...) LOG_(WARNING, __VA_ARGS__)
55 #define ERROR(...) LOG_(ERR, __VA_ARGS__)
56 #endif
57
58 /* Verbs header. */
59 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
60 #ifdef PEDANTIC
61 #pragma GCC diagnostic ignored "-Wpedantic"
62 #endif
63 #include <infiniband/verbs.h>
64 #ifdef PEDANTIC
65 #pragma GCC diagnostic error "-Wpedantic"
66 #endif
67
68 /*
69 * Maximum number of simultaneous MAC addresses supported.
70 *
71 * According to ConnectX's Programmer Reference Manual:
72 * The L2 Address Match is implemented by comparing a MAC/VLAN combination
73 * of 128 MAC addresses and 127 VLAN values, comprising 128x127 possible
74 * L2 addresses.
75 */
76 #define MLX4_MAX_MAC_ADDRESSES 128
77
78 /* Maximum number of simultaneous VLAN filters supported. See above. */
79 #define MLX4_MAX_VLAN_IDS 127
80
81 /* Request send completion once in every 64 sends, might be less. */
82 #define MLX4_PMD_TX_PER_COMP_REQ 64
83
84 /* Maximum number of physical ports. */
85 #define MLX4_PMD_MAX_PHYS_PORTS 2
86
87 /* Maximum number of Scatter/Gather Elements per Work Request. */
88 #ifndef MLX4_PMD_SGE_WR_N
89 #define MLX4_PMD_SGE_WR_N 4
90 #endif
91
92 /* Maximum size for inline data. */
93 #ifndef MLX4_PMD_MAX_INLINE
94 #define MLX4_PMD_MAX_INLINE 0
95 #endif
96
97 /*
98 * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
99 * from which buffers are to be transmitted will have to be mapped by this
100 * driver to their own Memory Region (MR). This is a slow operation.
101 *
102 * This value is always 1 for RX queues.
103 */
104 #ifndef MLX4_PMD_TX_MP_CACHE
105 #define MLX4_PMD_TX_MP_CACHE 8
106 #endif
107
108 /*
109 * If defined, only use software counters. The PMD will never ask the hardware
110 * for these, and many of them won't be available.
111 */
112 #ifndef MLX4_PMD_SOFT_COUNTERS
113 #define MLX4_PMD_SOFT_COUNTERS 1
114 #endif
115
116 /* Alarm timeout. */
117 #define MLX4_ALARM_TIMEOUT_US 100000
118
119 /* Port parameter. */
120 #define MLX4_PMD_PORT_KVARG "port"
121
122 enum {
123 PCI_VENDOR_ID_MELLANOX = 0x15b3,
124 };
125
126 enum {
127 PCI_DEVICE_ID_MELLANOX_CONNECTX3 = 0x1003,
128 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF = 0x1004,
129 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007,
130 };
131
132 #define MLX4_DRIVER_NAME "net_mlx4"
133
134 /* Bit-field manipulation. */
135 #define BITFIELD_DECLARE(bf, type, size) \
136 type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \
137 !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))]
138 #define BITFIELD_DEFINE(bf, type, size) \
139 BITFIELD_DECLARE((bf), type, (size)) = { 0 }
140 #define BITFIELD_SET(bf, b) \
141 (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
142 (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
143 ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
144 #define BITFIELD_RESET(bf, b) \
145 (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
146 (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
147 ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
148 #define BITFIELD_ISSET(bf, b) \
149 (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
150 !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
151 ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
152
153 /* Number of elements in array. */
154 #define elemof(a) (sizeof(a) / sizeof((a)[0]))
155
156 /* Cast pointer p to structure member m to its parent structure of type t. */
157 #define containerof(p, t, m) ((t *)((uint8_t *)(p) - offsetof(t, m)))
158
159 /* Branch prediction helpers. */
160 #ifndef likely
161 #define likely(c) __builtin_expect(!!(c), 1)
162 #endif
163 #ifndef unlikely
164 #define unlikely(c) __builtin_expect(!!(c), 0)
165 #endif
166
167 /* Debugging */
168 #ifndef NDEBUG
169 #include <stdio.h>
170 #define DEBUG__(m, ...) \
171 (fprintf(stderr, "%s:%d: %s(): " m "%c", \
172 __FILE__, __LINE__, __func__, __VA_ARGS__), \
173 fflush(stderr), \
174 (void)0)
175 /*
176 * Save/restore errno around DEBUG__().
177 * XXX somewhat undefined behavior, but works.
178 */
179 #define DEBUG_(...) \
180 (errno = ((int []){ \
181 *(volatile int *)&errno, \
182 (DEBUG__(__VA_ARGS__), 0) \
183 })[0])
184 #define DEBUG(...) DEBUG_(__VA_ARGS__, '\n')
185 #define claim_zero(...) assert((__VA_ARGS__) == 0)
186 #define claim_nonzero(...) assert((__VA_ARGS__) != 0)
187 #define claim_positive(...) assert((__VA_ARGS__) >= 0)
188 #else /* NDEBUG */
189 /* No-ops. */
190 #define DEBUG(...) (void)0
191 #define claim_zero(...) (__VA_ARGS__)
192 #define claim_nonzero(...) (__VA_ARGS__)
193 #define claim_positive(...) (__VA_ARGS__)
194 #endif /* NDEBUG */
195
196 struct mlx4_rxq_stats {
197 unsigned int idx; /**< Mapping index. */
198 #ifdef MLX4_PMD_SOFT_COUNTERS
199 uint64_t ipackets; /**< Total of successfully received packets. */
200 uint64_t ibytes; /**< Total of successfully received bytes. */
201 #endif
202 uint64_t idropped; /**< Total of packets dropped when RX ring full. */
203 uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
204 };
205
206 /* RX element (scattered packets). */
207 struct rxq_elt_sp {
208 struct ibv_recv_wr wr; /* Work Request. */
209 struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
210 struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */
211 };
212
213 /* RX element. */
214 struct rxq_elt {
215 struct ibv_recv_wr wr; /* Work Request. */
216 struct ibv_sge sge; /* Scatter/Gather Element. */
217 /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
218 };
219
220 /* RX queue descriptor. */
221 struct rxq {
222 struct priv *priv; /* Back pointer to private data. */
223 struct rte_mempool *mp; /* Memory Pool for allocations. */
224 struct ibv_mr *mr; /* Memory Region (for mp). */
225 struct ibv_cq *cq; /* Completion Queue. */
226 struct ibv_qp *qp; /* Queue Pair. */
227 struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
228 struct ibv_exp_cq_family *if_cq; /* CQ interface. */
229 /*
230 * Each VLAN ID requires a separate flow steering rule.
231 */
232 BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
233 struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
234 struct ibv_flow *promisc_flow; /* Promiscuous flow. */
235 struct ibv_flow *allmulti_flow; /* Multicast flow. */
236 unsigned int port_id; /* Port ID for incoming packets. */
237 unsigned int elts_n; /* (*elts)[] length. */
238 unsigned int elts_head; /* Current index in (*elts)[]. */
239 union {
240 struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
241 struct rxq_elt (*no_sp)[]; /* RX elements. */
242 } elts;
243 unsigned int sp:1; /* Use scattered RX elements. */
244 unsigned int csum:1; /* Enable checksum offloading. */
245 unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
246 struct mlx4_rxq_stats stats; /* RX queue counters. */
247 unsigned int socket; /* CPU socket ID for allocations. */
248 struct ibv_exp_res_domain *rd; /* Resource Domain. */
249 };
250
251 /* TX element. */
252 struct txq_elt {
253 struct rte_mbuf *buf;
254 };
255
256 struct mlx4_txq_stats {
257 unsigned int idx; /**< Mapping index. */
258 #ifdef MLX4_PMD_SOFT_COUNTERS
259 uint64_t opackets; /**< Total of successfully sent packets. */
260 uint64_t obytes; /**< Total of successfully sent bytes. */
261 #endif
262 uint64_t odropped; /**< Total of packets not sent when TX ring full. */
263 };
264
265 /*
266 * Linear buffer type. It is used when transmitting buffers with too many
267 * segments that do not fit the hardware queue (see max_send_sge).
268 * Extra segments are copied (linearized) in such buffers, replacing the
269 * last SGE during TX.
270 * The size is arbitrary but large enough to hold a jumbo frame with
271 * 8 segments considering mbuf.buf_len is about 2048 bytes.
272 */
273 typedef uint8_t linear_t[16384];
274
275 /* TX queue descriptor. */
276 struct txq {
277 struct priv *priv; /* Back pointer to private data. */
278 struct {
279 const struct rte_mempool *mp; /* Cached Memory Pool. */
280 struct ibv_mr *mr; /* Memory Region (for mp). */
281 uint32_t lkey; /* mr->lkey */
282 } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
283 struct ibv_cq *cq; /* Completion Queue. */
284 struct ibv_qp *qp; /* Queue Pair. */
285 struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
286 struct ibv_exp_cq_family *if_cq; /* CQ interface. */
287 #if MLX4_PMD_MAX_INLINE > 0
288 uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
289 #endif
290 unsigned int elts_n; /* (*elts)[] length. */
291 struct txq_elt (*elts)[]; /* TX elements. */
292 unsigned int elts_head; /* Current index in (*elts)[]. */
293 unsigned int elts_tail; /* First element awaiting completion. */
294 unsigned int elts_comp; /* Number of completion requests. */
295 unsigned int elts_comp_cd; /* Countdown for next completion request. */
296 unsigned int elts_comp_cd_init; /* Initial value for countdown. */
297 struct mlx4_txq_stats stats; /* TX queue counters. */
298 linear_t (*elts_linear)[]; /* Linearized buffers. */
299 struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
300 unsigned int socket; /* CPU socket ID for allocations. */
301 struct ibv_exp_res_domain *rd; /* Resource Domain. */
302 };
303
304 struct rte_flow;
305
306 struct priv {
307 struct rte_eth_dev *dev; /* Ethernet device. */
308 struct ibv_context *ctx; /* Verbs context. */
309 struct ibv_device_attr device_attr; /* Device properties. */
310 struct ibv_pd *pd; /* Protection Domain. */
311 /*
312 * MAC addresses array and configuration bit-field.
313 * An extra entry that cannot be modified by the DPDK is reserved
314 * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
315 */
316 struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
317 BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
318 /* VLAN filters. */
319 struct {
320 unsigned int enabled:1; /* If enabled. */
321 unsigned int id:12; /* VLAN ID (0-4095). */
322 } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */
323 /* Device properties. */
324 uint16_t mtu; /* Configured MTU. */
325 uint8_t port; /* Physical port number. */
326 unsigned int started:1; /* Device started, flows enabled. */
327 unsigned int promisc:1; /* Device in promiscuous mode. */
328 unsigned int allmulti:1; /* Device receives all multicast packets. */
329 unsigned int hw_qpg:1; /* QP groups are supported. */
330 unsigned int hw_tss:1; /* TSS is supported. */
331 unsigned int hw_rss:1; /* RSS is supported. */
332 unsigned int hw_csum:1; /* Checksum offload is supported. */
333 unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
334 unsigned int rss:1; /* RSS is enabled. */
335 unsigned int vf:1; /* This is a VF device. */
336 unsigned int pending_alarm:1; /* An alarm is pending. */
337 #ifdef INLINE_RECV
338 unsigned int inl_recv_size; /* Inline recv size */
339 #endif
340 unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
341 /* RX/TX queues. */
342 struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
343 unsigned int rxqs_n; /* RX queues array size. */
344 unsigned int txqs_n; /* TX queues array size. */
345 struct rxq *(*rxqs)[]; /* RX queues. */
346 struct txq *(*txqs)[]; /* TX queues. */
347 struct rte_intr_handle intr_handle; /* Interrupt handler. */
348 struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
349 LIST_HEAD(mlx4_flows, rte_flow) flows;
350 struct rte_intr_conf intr_conf; /* Active interrupt configuration. */
351 rte_spinlock_t lock; /* Lock for control functions. */
352 };
353
354 void priv_lock(struct priv *priv);
355 void priv_unlock(struct priv *priv);
356
357 #endif /* RTE_PMD_MLX4_H_ */