]> git.proxmox.com Git - ceph.git/blame - ceph/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / drivers / net / ixgbe / ixgbe_fdir.c
CommitLineData
7c673cae
FG
1/*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <stdio.h>
35#include <stdint.h>
36#include <stdarg.h>
37#include <errno.h>
38#include <sys/queue.h>
39
40#include <rte_interrupts.h>
41#include <rte_log.h>
42#include <rte_debug.h>
43#include <rte_pci.h>
44#include <rte_ether.h>
45#include <rte_ethdev.h>
46
47#include "ixgbe_logs.h"
48#include "base/ixgbe_api.h"
49#include "base/ixgbe_common.h"
50#include "ixgbe_ethdev.h"
51
52/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
53#define FDIRCTRL_PBALLOC_MASK 0x03
54
55/* For calculating memory required for FDIR filters */
56#define PBALLOC_SIZE_SHIFT 15
57
58/* Number of bits used to mask bucket hash for different pballoc sizes */
59#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
60#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
61#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
62#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
63#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
64#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
65#define IXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */
66#define IXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */
67#define IXGBE_MAX_FLX_SOURCE_OFF 62
68#define IXGBE_FDIRCTRL_FLEX_MASK (0x1F << IXGBE_FDIRCTRL_FLEX_SHIFT)
69#define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
70
71#define IXGBE_FDIR_FLOW_TYPES ( \
72 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
73 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
74 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
75 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
76 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
77 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
78 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
79 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
80
81#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
82 uint8_t ipv6_addr[16]; \
83 uint8_t i; \
84 rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
85 (ipv6m) = 0; \
86 for (i = 0; i < sizeof(ipv6_addr); i++) { \
87 if (ipv6_addr[i] == UINT8_MAX) \
88 (ipv6m) |= 1 << i; \
89 else if (ipv6_addr[i] != 0) { \
90 PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
91 return -EINVAL; \
92 } \
93 } \
94} while (0)
95
96#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
97 uint8_t ipv6_addr[16]; \
98 uint8_t i; \
99 for (i = 0; i < sizeof(ipv6_addr); i++) { \
100 if ((ipv6m) & (1 << i)) \
101 ipv6_addr[i] = UINT8_MAX; \
102 else \
103 ipv6_addr[i] = 0; \
104 } \
105 rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
106} while (0)
107
108#define DEFAULT_VXLAN_PORT 4789
109#define IXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
110
111static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
112static int fdir_set_input_mask(struct rte_eth_dev *dev,
113 const struct rte_eth_fdir_masks *input_mask);
114static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
115 const struct rte_eth_fdir_masks *input_mask);
116static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
117 const struct rte_eth_fdir_masks *input_mask);
118static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
119 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
120static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
121static int ixgbe_fdir_filter_to_atr_input(
122 const struct rte_eth_fdir_filter *fdir_filter,
123 union ixgbe_atr_input *input,
124 enum rte_fdir_mode mode);
125static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
126 uint32_t key);
127static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
128 enum rte_fdir_pballoc_type pballoc);
129static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
130 enum rte_fdir_pballoc_type pballoc);
131static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
132 union ixgbe_atr_input *input, uint8_t queue,
133 uint32_t fdircmd, uint32_t fdirhash,
134 enum rte_fdir_mode mode);
135static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
136 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
137 uint32_t fdirhash);
138static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
139 const struct rte_eth_fdir_filter *fdir_filter,
140 bool del,
141 bool update);
142static int ixgbe_fdir_flush(struct rte_eth_dev *dev);
143static void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
144 struct rte_eth_fdir_info *fdir_info);
145static void ixgbe_fdir_stats_get(struct rte_eth_dev *dev,
146 struct rte_eth_fdir_stats *fdir_stats);
147
148/**
149 * This function is based on ixgbe_fdir_enable_82599() in base/ixgbe_82599.c.
150 * It adds extra configuration of fdirctrl that is common for all filter types.
151 *
152 * Initialize Flow Director control registers
153 * @hw: pointer to hardware structure
154 * @fdirctrl: value to write to flow director control register
155 **/
156static int
157fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
158{
159 int i;
160
161 PMD_INIT_FUNC_TRACE();
162
163 /* Prime the keys for hashing */
164 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
165 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
166
167 /*
168 * Continue setup of fdirctrl register bits:
169 * Set the maximum length per hash bucket to 0xA filters
170 * Send interrupt when 64 filters are left
171 */
172 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
173 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
174
175 /*
176 * Poll init-done after we write the register. Estimated times:
177 * 10G: PBALLOC = 11b, timing is 60us
178 * 1G: PBALLOC = 11b, timing is 600us
179 * 100M: PBALLOC = 11b, timing is 6ms
180 *
181 * Multiple these timings by 4 if under full Rx load
182 *
183 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
184 * 1 msec per poll time. If we're at line rate and drop to 100M, then
185 * this might not finish in our poll time, but we can live with that
186 * for now.
187 */
188 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
189 IXGBE_WRITE_FLUSH(hw);
190 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
191 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
192 IXGBE_FDIRCTRL_INIT_DONE)
193 break;
194 msec_delay(1);
195 }
196
197 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
198 PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
199 return -ETIMEDOUT;
200 }
201 return 0;
202}
203
204/*
205 * Set appropriate bits in fdirctrl for: variable reporting levels, moving
206 * flexbytes matching field, and drop queue (only for perfect matching mode).
207 */
208static inline int
209configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
210{
211 *fdirctrl = 0;
212
213 switch (conf->pballoc) {
214 case RTE_FDIR_PBALLOC_64K:
215 /* 8k - 1 signature filters */
216 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
217 break;
218 case RTE_FDIR_PBALLOC_128K:
219 /* 16k - 1 signature filters */
220 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
221 break;
222 case RTE_FDIR_PBALLOC_256K:
223 /* 32k - 1 signature filters */
224 *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
225 break;
226 default:
227 /* bad value */
228 PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
229 return -EINVAL;
230 };
231
232 /* status flags: write hash & swindex in the rx descriptor */
233 switch (conf->status) {
234 case RTE_FDIR_NO_REPORT_STATUS:
235 /* do nothing, default mode */
236 break;
237 case RTE_FDIR_REPORT_STATUS:
238 /* report status when the packet matches a fdir rule */
239 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
240 break;
241 case RTE_FDIR_REPORT_STATUS_ALWAYS:
242 /* always report status */
243 *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
244 break;
245 default:
246 /* bad value */
247 PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
248 return -EINVAL;
249 };
250
251 *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
252 IXGBE_FDIRCTRL_FLEX_SHIFT;
253
254 if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
255 conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
256 *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
257 *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
258 if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
259 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
260 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
261 else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
262 *fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
263 << IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
264 }
265
266 return 0;
267}
268
269/**
270 * Reverse the bits in FDIR registers that store 2 x 16 bit masks.
271 *
272 * @hi_dword: Bits 31:16 mask to be bit swapped.
273 * @lo_dword: Bits 15:0 mask to be bit swapped.
274 *
275 * Flow director uses several registers to store 2 x 16 bit masks with the
276 * bits reversed such as FDIRTCPM, FDIRUDPM. The LS bit of the
277 * mask affects the MS bit/byte of the target. This function reverses the
278 * bits in these masks.
279 * **/
280static inline uint32_t
281reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
282{
283 uint32_t mask = hi_dword << 16;
284
285 mask |= lo_dword;
286 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
287 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
288 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
289 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
290}
291
292/*
293 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
294 * but makes use of the rte_fdir_masks structure to see which bits to set.
295 */
296static int
297fdir_set_input_mask_82599(struct rte_eth_dev *dev,
298 const struct rte_eth_fdir_masks *input_mask)
299{
300 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
301 struct ixgbe_hw_fdir_info *info =
302 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
303 /*
304 * mask VM pool and DIPv6 since there are currently not supported
305 * mask FLEX byte, it will be set in flex_conf
306 */
307 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
308 uint32_t fdirtcpm; /* TCP source and destination port masks. */
309 uint32_t fdiripv6m; /* IPv6 source and destination masks. */
310 uint16_t dst_ipv6m = 0;
311 uint16_t src_ipv6m = 0;
312 volatile uint32_t *reg;
313
314 PMD_INIT_FUNC_TRACE();
315
316 /*
317 * Program the relevant mask registers. If src/dst_port or src/dst_addr
318 * are zero, then assume a full mask for that field. Also assume that
319 * a VLAN of 0 is unspecified, so mask that out as well. L4type
320 * cannot be masked out in this implementation.
321 */
322 if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
323 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
324 fdirm |= IXGBE_FDIRM_L4P;
325
326 if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
327 /* mask VLAN Priority */
328 fdirm |= IXGBE_FDIRM_VLANP;
329 else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
330 /* mask VLAN ID */
331 fdirm |= IXGBE_FDIRM_VLANID;
332 else if (input_mask->vlan_tci_mask == 0)
333 /* mask VLAN ID and Priority */
334 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
335 else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
336 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
337 return -EINVAL;
338 }
339 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
340
341 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
342
343 /* store the TCP/UDP port masks, bit reversed from port layout */
344 fdirtcpm = reverse_fdir_bitmasks(
345 rte_be_to_cpu_16(input_mask->dst_port_mask),
346 rte_be_to_cpu_16(input_mask->src_port_mask));
347
348 /* write all the same so that UDP, TCP and SCTP use the same mask
349 * (little-endian)
350 */
351 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
352 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
353 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
354 info->mask.src_port_mask = input_mask->src_port_mask;
355 info->mask.dst_port_mask = input_mask->dst_port_mask;
356
357 /* Store source and destination IPv4 masks (big-endian),
358 * can not use IXGBE_WRITE_REG.
359 */
360 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
361 *reg = ~(input_mask->ipv4_mask.src_ip);
362 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
363 *reg = ~(input_mask->ipv4_mask.dst_ip);
364 info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
365 info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
366
367 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
368 /*
369 * Store source and destination IPv6 masks (bit reversed)
370 */
371 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
372 IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
373 fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
374
375 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
376 info->mask.src_ipv6_mask = src_ipv6m;
377 info->mask.dst_ipv6_mask = dst_ipv6m;
378 }
379
380 return IXGBE_SUCCESS;
381}
382
383/*
384 * This references ixgbe_fdir_set_input_mask_82599() in base/ixgbe_82599.c,
385 * but makes use of the rte_fdir_masks structure to see which bits to set.
386 */
387static int
388fdir_set_input_mask_x550(struct rte_eth_dev *dev,
389 const struct rte_eth_fdir_masks *input_mask)
390{
391 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
392 struct ixgbe_hw_fdir_info *info =
393 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
394 /* mask VM pool and DIPv6 since there are currently not supported
395 * mask FLEX byte, it will be set in flex_conf
396 */
397 uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
398 IXGBE_FDIRM_FLEX;
399 uint32_t fdiripv6m;
400 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
401 uint16_t mac_mask;
402
403 PMD_INIT_FUNC_TRACE();
404
405 /* set the default UDP port for VxLAN */
406 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
407 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, DEFAULT_VXLAN_PORT);
408
409 /* some bits must be set for mac vlan or tunnel mode */
410 fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
411
412 if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
413 /* mask VLAN Priority */
414 fdirm |= IXGBE_FDIRM_VLANP;
415 else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
416 /* mask VLAN ID */
417 fdirm |= IXGBE_FDIRM_VLANID;
418 else if (input_mask->vlan_tci_mask == 0)
419 /* mask VLAN ID and Priority */
420 fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
421 else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
422 PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
423 return -EINVAL;
424 }
425 info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
426
427 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
428
429 fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
430 fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
431 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
432 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
433 IXGBE_FDIRIP6M_TNI_VNI;
434
435 if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
436 mac_mask = input_mask->mac_addr_byte_mask;
437 fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
438 & IXGBE_FDIRIP6M_INNER_MAC;
439 info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
440
441 switch (input_mask->tunnel_type_mask) {
442 case 0:
443 /* Mask turnnel type */
444 fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
445 break;
446 case 1:
447 break;
448 default:
449 PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
450 return -EINVAL;
451 }
452 info->mask.tunnel_type_mask =
453 input_mask->tunnel_type_mask;
454
455 switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
456 case 0x0:
457 /* Mask vxlan id */
458 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
459 break;
460 case 0x00FFFFFF:
461 fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI_24;
462 break;
463 case 0xFFFFFFFF:
464 break;
465 default:
466 PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
467 return -EINVAL;
468 }
469 info->mask.tunnel_id_mask =
470 input_mask->tunnel_id_mask;
471 }
472
473 IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
474 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
475 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
476 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
477 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
478 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
479
480 return IXGBE_SUCCESS;
481}
482
483static int
484fdir_set_input_mask(struct rte_eth_dev *dev,
485 const struct rte_eth_fdir_masks *input_mask)
486{
487 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
488
489 if (mode >= RTE_FDIR_MODE_SIGNATURE &&
490 mode <= RTE_FDIR_MODE_PERFECT)
491 return fdir_set_input_mask_82599(dev, input_mask);
492 else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
493 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
494 return fdir_set_input_mask_x550(dev, input_mask);
495
496 PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
497 return -ENOTSUP;
498}
499
500/*
501 * ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
502 * arguments are valid
503 */
504static int
505ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
506 const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
507{
508 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
509 struct ixgbe_hw_fdir_info *info =
510 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
511 const struct rte_eth_flex_payload_cfg *flex_cfg;
512 const struct rte_eth_fdir_flex_mask *flex_mask;
513 uint32_t fdirm;
514 uint16_t flexbytes = 0;
515 uint16_t i;
516
517 fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
518
519 if (conf == NULL) {
520 PMD_DRV_LOG(ERR, "NULL pointer.");
521 return -EINVAL;
522 }
523
524 for (i = 0; i < conf->nb_payloads; i++) {
525 flex_cfg = &conf->flex_set[i];
526 if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
527 PMD_DRV_LOG(ERR, "unsupported payload type.");
528 return -EINVAL;
529 }
530 if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
531 (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
532 (flex_cfg->src_offset[0] <= IXGBE_MAX_FLX_SOURCE_OFF)) {
533 *fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK;
534 *fdirctrl |=
535 (flex_cfg->src_offset[0] / sizeof(uint16_t)) <<
536 IXGBE_FDIRCTRL_FLEX_SHIFT;
537 } else {
538 PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
539 return -EINVAL;
540 }
541 }
542
543 for (i = 0; i < conf->nb_flexmasks; i++) {
544 flex_mask = &conf->flex_mask[i];
545 if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
546 PMD_DRV_LOG(ERR, "flexmask should be set globally.");
547 return -EINVAL;
548 }
549 flexbytes = (uint16_t)(((flex_mask->mask[0] << 8) & 0xFF00) |
550 ((flex_mask->mask[1]) & 0xFF));
551 if (flexbytes == UINT16_MAX)
552 fdirm &= ~IXGBE_FDIRM_FLEX;
553 else if (flexbytes != 0) {
554 /* IXGBE_FDIRM_FLEX is set by default when set mask */
555 PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
556 return -EINVAL;
557 }
558 }
559 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
560 info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
561 info->flex_bytes_offset = (uint8_t)((*fdirctrl &
562 IXGBE_FDIRCTRL_FLEX_MASK) >>
563 IXGBE_FDIRCTRL_FLEX_SHIFT);
564 return 0;
565}
566
567int
568ixgbe_fdir_configure(struct rte_eth_dev *dev)
569{
570 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
571 int err;
572 uint32_t fdirctrl, pbsize;
573 int i;
574 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
575
576 PMD_INIT_FUNC_TRACE();
577
578 if (hw->mac.type != ixgbe_mac_82599EB &&
579 hw->mac.type != ixgbe_mac_X540 &&
580 hw->mac.type != ixgbe_mac_X550 &&
581 hw->mac.type != ixgbe_mac_X550EM_x &&
582 hw->mac.type != ixgbe_mac_X550EM_a)
583 return -ENOSYS;
584
585 /* x550 supports mac-vlan and tunnel mode but other NICs not */
586 if (hw->mac.type != ixgbe_mac_X550 &&
587 hw->mac.type != ixgbe_mac_X550EM_x &&
588 hw->mac.type != ixgbe_mac_X550EM_a &&
589 mode != RTE_FDIR_MODE_SIGNATURE &&
590 mode != RTE_FDIR_MODE_PERFECT)
591 return -ENOSYS;
592
593 err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
594 if (err)
595 return err;
596
597 /*
598 * Before enabling Flow Director, the Rx Packet Buffer size
599 * must be reduced. The new value is the current size minus
600 * flow director memory usage size.
601 */
602 pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
603 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
604 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
605
606 /*
607 * The defaults in the HW for RX PB 1-7 are not zero and so should be
608 * intialized to zero for non DCB mode otherwise actual total RX PB
609 * would be bigger than programmed and filter space would run into
610 * the PB 0 region.
611 */
612 for (i = 1; i < 8; i++)
613 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
614
615 err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
616 if (err < 0) {
617 PMD_INIT_LOG(ERR, " Error on setting FD mask");
618 return err;
619 }
620 err = ixgbe_set_fdir_flex_conf(dev,
621 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
622 if (err < 0) {
623 PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
624 return err;
625 }
626
627 err = fdir_enable_82599(hw, fdirctrl);
628 if (err < 0) {
629 PMD_INIT_LOG(ERR, " Error on enabling FD.");
630 return err;
631 }
632 return 0;
633}
634
635/*
636 * Convert DPDK rte_eth_fdir_filter struct to ixgbe_atr_input union that is used
637 * by the IXGBE driver code.
638 */
639static int
640ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
641 union ixgbe_atr_input *input, enum rte_fdir_mode mode)
642{
643 input->formatted.vlan_id = fdir_filter->input.flow_ext.vlan_tci;
644 input->formatted.flex_bytes = (uint16_t)(
645 (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
646 (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
647
648 switch (fdir_filter->input.flow_type) {
649 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
650 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
651 break;
652 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
653 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
654 break;
655 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
656 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
657 break;
658 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
659 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
660 break;
661 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
662 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV6;
663 break;
664 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
665 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
666 break;
667 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
668 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV6;
669 break;
670 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
671 input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV6;
672 break;
673 default:
674 break;
675 }
676
677 switch (fdir_filter->input.flow_type) {
678 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
679 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
680 input->formatted.src_port =
681 fdir_filter->input.flow.udp4_flow.src_port;
682 input->formatted.dst_port =
683 fdir_filter->input.flow.udp4_flow.dst_port;
684 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
685 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
686 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
687 input->formatted.src_ip[0] =
688 fdir_filter->input.flow.ip4_flow.src_ip;
689 input->formatted.dst_ip[0] =
690 fdir_filter->input.flow.ip4_flow.dst_ip;
691 break;
692
693 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
694 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
695 input->formatted.src_port =
696 fdir_filter->input.flow.udp6_flow.src_port;
697 input->formatted.dst_port =
698 fdir_filter->input.flow.udp6_flow.dst_port;
699 /*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
700 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
701 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
702 rte_memcpy(input->formatted.src_ip,
703 fdir_filter->input.flow.ipv6_flow.src_ip,
704 sizeof(input->formatted.src_ip));
705 rte_memcpy(input->formatted.dst_ip,
706 fdir_filter->input.flow.ipv6_flow.dst_ip,
707 sizeof(input->formatted.dst_ip));
708 break;
709 default:
710 break;
711 }
712
713 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
714 rte_memcpy(
715 input->formatted.inner_mac,
716 fdir_filter->input.flow.mac_vlan_flow.mac_addr.addr_bytes,
717 sizeof(input->formatted.inner_mac));
718 } else if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
719 rte_memcpy(
720 input->formatted.inner_mac,
721 fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
722 sizeof(input->formatted.inner_mac));
723 input->formatted.tunnel_type =
724 fdir_filter->input.flow.tunnel_flow.tunnel_type;
725 input->formatted.tni_vni =
726 fdir_filter->input.flow.tunnel_flow.tunnel_id;
727 }
728
729 return 0;
730}
731
732/*
733 * The below function is taken from the FreeBSD IXGBE drivers release
734 * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
735 * before returning, as the signature hash can use 16bits.
736 *
737 * The newer driver has optimised functions for calculating bucket and
738 * signature hashes. However they don't support IPv6 type packets for signature
739 * filters so are not used here.
740 *
741 * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
742 * set.
743 *
744 * Compute the hashes for SW ATR
745 * @stream: input bitstream to compute the hash on
746 * @key: 32-bit hash key
747 **/
748static uint32_t
749ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
750 uint32_t key)
751{
752 /*
753 * The algorithm is as follows:
754 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
755 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
756 * and A[n] x B[n] is bitwise AND between same length strings
757 *
758 * K[n] is 16 bits, defined as:
759 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
760 * for n modulo 32 < 15, K[n] =
761 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
762 *
763 * S[n] is 16 bits, defined as:
764 * for n >= 15, S[n] = S[n:n - 15]
765 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
766 *
767 * To simplify for programming, the algorithm is implemented
768 * in software this way:
769 *
770 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
771 *
772 * for (i = 0; i < 352; i+=32)
773 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
774 *
775 * lo_hash_dword[15:0] ^= Stream[15:0];
776 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
777 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
778 *
779 * hi_hash_dword[31:0] ^= Stream[351:320];
780 *
781 * if (key[0])
782 * hash[15:0] ^= Stream[15:0];
783 *
784 * for (i = 0; i < 16; i++) {
785 * if (key[i])
786 * hash[15:0] ^= lo_hash_dword[(i+15):i];
787 * if (key[i + 16])
788 * hash[15:0] ^= hi_hash_dword[(i+15):i];
789 * }
790 *
791 */
792 __be32 common_hash_dword = 0;
793 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
794 u32 hash_result = 0;
795 u8 i;
796
797 /* record the flow_vm_vlan bits as they are a key part to the hash */
798 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
799
800 /* generate common hash dword */
801 for (i = 1; i <= 13; i++)
802 common_hash_dword ^= atr_input->dword_stream[i];
803
804 hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
805
806 /* low dword is word swapped version of common */
807 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
808
809 /* apply flow ID/VM pool/VLAN ID bits to hash words */
810 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
811
812 /* Process bits 0 and 16 */
813 if (key & 0x0001)
814 hash_result ^= lo_hash_dword;
815 if (key & 0x00010000)
816 hash_result ^= hi_hash_dword;
817
818 /*
819 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
820 * delay this because bit 0 of the stream should not be processed
821 * so we do not add the vlan until after bit 0 was processed
822 */
823 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
824
825
826 /* process the remaining 30 bits in the key 2 bits at a time */
827 for (i = 15; i; i--) {
828 if (key & (0x0001 << i))
829 hash_result ^= lo_hash_dword >> i;
830 if (key & (0x00010000 << i))
831 hash_result ^= hi_hash_dword >> i;
832 }
833
834 return hash_result;
835}
836
837static uint32_t
838atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
839 enum rte_fdir_pballoc_type pballoc)
840{
841 if (pballoc == RTE_FDIR_PBALLOC_256K)
842 return ixgbe_atr_compute_hash_82599(input,
843 IXGBE_ATR_BUCKET_HASH_KEY) &
844 PERFECT_BUCKET_256KB_HASH_MASK;
845 else if (pballoc == RTE_FDIR_PBALLOC_128K)
846 return ixgbe_atr_compute_hash_82599(input,
847 IXGBE_ATR_BUCKET_HASH_KEY) &
848 PERFECT_BUCKET_128KB_HASH_MASK;
849 else
850 return ixgbe_atr_compute_hash_82599(input,
851 IXGBE_ATR_BUCKET_HASH_KEY) &
852 PERFECT_BUCKET_64KB_HASH_MASK;
853}
854
855/**
856 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
857 * @hw: pointer to hardware structure
858 */
859static inline int
860ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
861{
862 int i;
863
864 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
865 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
866 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
867 return 0;
868 rte_delay_us(IXGBE_FDIRCMD_CMD_INTERVAL_US);
869 }
870
871 return -ETIMEDOUT;
872}
873
874/*
875 * Calculate the hash value needed for signature-match filters. In the FreeBSD
876 * driver, this is done by the optimised function
877 * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
878 * doesn't support calculating a hash for an IPv6 filter.
879 */
880static uint32_t
881atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
882 enum rte_fdir_pballoc_type pballoc)
883{
884 uint32_t bucket_hash, sig_hash;
885
886 if (pballoc == RTE_FDIR_PBALLOC_256K)
887 bucket_hash = ixgbe_atr_compute_hash_82599(input,
888 IXGBE_ATR_BUCKET_HASH_KEY) &
889 SIG_BUCKET_256KB_HASH_MASK;
890 else if (pballoc == RTE_FDIR_PBALLOC_128K)
891 bucket_hash = ixgbe_atr_compute_hash_82599(input,
892 IXGBE_ATR_BUCKET_HASH_KEY) &
893 SIG_BUCKET_128KB_HASH_MASK;
894 else
895 bucket_hash = ixgbe_atr_compute_hash_82599(input,
896 IXGBE_ATR_BUCKET_HASH_KEY) &
897 SIG_BUCKET_64KB_HASH_MASK;
898
899 sig_hash = ixgbe_atr_compute_hash_82599(input,
900 IXGBE_ATR_SIGNATURE_HASH_KEY);
901
902 return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
903}
904
905/*
906 * This is based on ixgbe_fdir_write_perfect_filter_82599() in
907 * base/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
908 * added, and IPv6 support also added. The hash value is also pre-calculated
909 * as the pballoc value is needed to do it.
910 */
911static int
912fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
913 union ixgbe_atr_input *input, uint8_t queue,
914 uint32_t fdircmd, uint32_t fdirhash,
915 enum rte_fdir_mode mode)
916{
917 uint32_t fdirport, fdirvlan;
918 u32 addr_low, addr_high;
919 u32 tunnel_type = 0;
920 int err = 0;
921 volatile uint32_t *reg;
922
923 if (mode == RTE_FDIR_MODE_PERFECT) {
924 /* record the IPv4 address (big-endian)
925 * can not use IXGBE_WRITE_REG.
926 */
927 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPSA);
928 *reg = input->formatted.src_ip[0];
929 reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRIPDA);
930 *reg = input->formatted.dst_ip[0];
931
932 /* record source and destination port (little-endian)*/
933 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
934 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
935 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
936 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
937 } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
938 mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
939 /* for mac vlan and tunnel modes */
940 addr_low = ((u32)input->formatted.inner_mac[0] |
941 ((u32)input->formatted.inner_mac[1] << 8) |
942 ((u32)input->formatted.inner_mac[2] << 16) |
943 ((u32)input->formatted.inner_mac[3] << 24));
944 addr_high = ((u32)input->formatted.inner_mac[4] |
945 ((u32)input->formatted.inner_mac[5] << 8));
946
947 if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
948 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
949 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
950 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
951 } else {
952 /* tunnel mode */
953 if (input->formatted.tunnel_type !=
954 RTE_FDIR_TUNNEL_TYPE_NVGRE)
955 tunnel_type = 0x80000000;
956 tunnel_type |= addr_high;
957 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
958 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), tunnel_type);
959 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
960 input->formatted.tni_vni);
961 }
962 }
963
964 /* record vlan (little-endian) and flex_bytes(big-endian) */
965 fdirvlan = input->formatted.flex_bytes;
966 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
967 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
968 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
969
970 /* configure FDIRHASH register */
971 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
972
973 /*
974 * flush all previous writes to make certain registers are
975 * programmed prior to issuing the command
976 */
977 IXGBE_WRITE_FLUSH(hw);
978
979 /* configure FDIRCMD register */
980 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
981 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
982 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
983 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
984 fdircmd |= (uint32_t)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
985
986 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
987
988 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
989
990 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
991 if (err < 0)
992 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
993
994 return err;
995}
996
997/**
998 * This function is based on ixgbe_atr_add_signature_filter_82599() in
999 * base/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
1000 * setting extra fields in the FDIRCMD register, and removes the code that was
1001 * verifying the flow_type field. According to the documentation, a flow type of
1002 * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
1003 * work ok...
1004 *
1005 * Adds a signature hash filter
1006 * @hw: pointer to hardware structure
1007 * @input: unique input dword
1008 * @queue: queue index to direct traffic to
1009 * @fdircmd: any extra flags to set in fdircmd register
1010 * @fdirhash: pre-calculated hash value for the filter
1011 **/
1012static int
1013fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1014 union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
1015 uint32_t fdirhash)
1016{
1017 int err = 0;
1018
1019 PMD_INIT_FUNC_TRACE();
1020
1021 /* configure FDIRCMD register */
1022 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
1023 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1024 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1025 fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1026
1027 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1028 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1029
1030 PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
1031
1032 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1033 if (err < 0)
1034 PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
1035
1036 return err;
1037}
1038
1039/*
1040 * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
1041 * base/ixgbe_82599.c. It is modified to take in the hash as a parameter so
1042 * that it can be used for removing signature and perfect filters.
1043 */
1044static int
1045fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
1046{
1047 uint32_t fdircmd = 0;
1048 int err = 0;
1049
1050 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1051
1052 /* flush hash to HW */
1053 IXGBE_WRITE_FLUSH(hw);
1054
1055 /* Query if filter is present */
1056 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1057
1058 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1059 if (err < 0) {
1060 PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
1061 return err;
1062 }
1063
1064 /* if filter exists in hardware then remove it */
1065 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1066 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1067 IXGBE_WRITE_FLUSH(hw);
1068 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1069 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1070 }
1071 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1072 if (err < 0)
1073 PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
1074 return err;
1075
1076}
1077
1078/*
1079 * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
1080 * @dev: pointer to the structure rte_eth_dev
1081 * @fdir_filter: fdir filter entry
1082 * @del: 1 - delete, 0 - add
1083 * @update: 1 - update
1084 */
1085static int
1086ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
1087 const struct rte_eth_fdir_filter *fdir_filter,
1088 bool del,
1089 bool update)
1090{
1091 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1092 uint32_t fdircmd_flags;
1093 uint32_t fdirhash;
1094 union ixgbe_atr_input input;
1095 uint8_t queue;
1096 bool is_perfect = FALSE;
1097 int err;
1098 struct ixgbe_hw_fdir_info *info =
1099 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1100 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1101
1102 if (fdir_mode == RTE_FDIR_MODE_NONE)
1103 return -ENOTSUP;
1104
1105 /*
1106 * Sanity check for x550.
1107 * When adding a new filter with flow type set to IPv4-other,
1108 * the flow director mask should be configed before,
1109 * and the L4 protocol and ports are masked.
1110 */
1111 if ((!del) &&
1112 (hw->mac.type == ixgbe_mac_X550 ||
1113 hw->mac.type == ixgbe_mac_X550EM_x ||
1114 hw->mac.type == ixgbe_mac_X550EM_a) &&
1115 (fdir_filter->input.flow_type ==
1116 RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
1117 (info->mask.src_port_mask != 0 ||
1118 info->mask.dst_port_mask != 0)) {
1119 PMD_DRV_LOG(ERR, "By this device,"
1120 " IPv4-other is not supported without"
1121 " L4 protocol and ports masked!");
1122 return -ENOTSUP;
1123 }
1124
1125 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1126 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1127 is_perfect = TRUE;
1128
1129 memset(&input, 0, sizeof(input));
1130
1131 err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
1132 fdir_mode);
1133 if (err)
1134 return err;
1135
1136 if (is_perfect) {
1137 if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1138 PMD_DRV_LOG(ERR, "IPv6 is not supported in"
1139 " perfect mode!");
1140 return -ENOTSUP;
1141 }
1142 fdirhash = atr_compute_perfect_hash_82599(&input,
1143 dev->data->dev_conf.fdir_conf.pballoc);
1144 fdirhash |= fdir_filter->soft_id <<
1145 IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1146 } else
1147 fdirhash = atr_compute_sig_hash_82599(&input,
1148 dev->data->dev_conf.fdir_conf.pballoc);
1149
1150 if (del) {
1151 err = fdir_erase_filter_82599(hw, fdirhash);
1152 if (err < 0)
1153 PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
1154 else
1155 PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
1156 return err;
1157 }
1158 /* add or update an fdir filter*/
1159 fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
1160 if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
1161 if (is_perfect) {
1162 queue = dev->data->dev_conf.fdir_conf.drop_queue;
1163 fdircmd_flags |= IXGBE_FDIRCMD_DROP;
1164 } else {
1165 PMD_DRV_LOG(ERR, "Drop option is not supported in"
1166 " signature mode.");
1167 return -EINVAL;
1168 }
1169 } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
1170 fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
1171 queue = (uint8_t)fdir_filter->action.rx_queue;
1172 else
1173 return -EINVAL;
1174
1175 if (is_perfect) {
1176 err = fdir_write_perfect_filter_82599(hw, &input, queue,
1177 fdircmd_flags, fdirhash,
1178 fdir_mode);
1179 } else {
1180 err = fdir_add_signature_filter_82599(hw, &input, queue,
1181 fdircmd_flags, fdirhash);
1182 }
1183 if (err < 0)
1184 PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
1185 else
1186 PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
1187
1188 return err;
1189}
1190
1191static int
1192ixgbe_fdir_flush(struct rte_eth_dev *dev)
1193{
1194 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1195 struct ixgbe_hw_fdir_info *info =
1196 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1197 int ret;
1198
1199 ret = ixgbe_reinit_fdir_tables_82599(hw);
1200 if (ret < 0) {
1201 PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
1202 return ret;
1203 }
1204
1205 info->f_add = 0;
1206 info->f_remove = 0;
1207 info->add = 0;
1208 info->remove = 0;
1209
1210 return ret;
1211}
1212
1213#define FDIRENTRIES_NUM_SHIFT 10
1214static void
1215ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
1216{
1217 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1218 struct ixgbe_hw_fdir_info *info =
1219 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1220 uint32_t fdirctrl, max_num;
1221 uint8_t offset;
1222
1223 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1224 offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
1225 IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);
1226
1227 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
1228 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1229 (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
1230 if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
1231 fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1232 fdir_info->guarant_spc = max_num;
1233 else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
1234 fdir_info->guarant_spc = max_num * 4;
1235
1236 fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
1237 fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
1238 fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
1239 IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
1240 fdir_info->mask.ipv6_mask.src_ip);
1241 IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
1242 fdir_info->mask.ipv6_mask.dst_ip);
1243 fdir_info->mask.src_port_mask = info->mask.src_port_mask;
1244 fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
1245 fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
1246 fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
1247 fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
1248 fdir_info->max_flexpayload = IXGBE_FDIR_MAX_FLEX_LEN;
1249
1250 if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
1251 fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
1252 fdir_info->flow_types_mask[0] = 0;
1253 else
1254 fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
1255
1256 fdir_info->flex_payload_unit = sizeof(uint16_t);
1257 fdir_info->max_flex_payload_segment_num = 1;
1258 fdir_info->flex_payload_limit = IXGBE_MAX_FLX_SOURCE_OFF;
1259 fdir_info->flex_conf.nb_payloads = 1;
1260 fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
1261 fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
1262 fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
1263 fdir_info->flex_conf.nb_flexmasks = 1;
1264 fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
1265 fdir_info->flex_conf.flex_mask[0].mask[0] =
1266 (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
1267 fdir_info->flex_conf.flex_mask[0].mask[1] =
1268 (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
1269}
1270
1271static void
1272ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
1273{
1274 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1275 struct ixgbe_hw_fdir_info *info =
1276 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
1277 uint32_t reg, max_num;
1278 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
1279
1280 /* Get the information from registers */
1281 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
1282 info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
1283 IXGBE_FDIRFREE_COLL_SHIFT);
1284 info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
1285 IXGBE_FDIRFREE_FREE_SHIFT);
1286
1287 reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1288 info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
1289 IXGBE_FDIRLEN_MAXHASH_SHIFT);
1290 info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
1291 IXGBE_FDIRLEN_MAXLEN_SHIFT);
1292
1293 reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1294 info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
1295 IXGBE_FDIRUSTAT_REMOVE_SHIFT;
1296 info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
1297 IXGBE_FDIRUSTAT_ADD_SHIFT;
1298
1299 reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
1300 info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
1301 IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
1302 info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
1303 IXGBE_FDIRFSTAT_FADD_SHIFT;
1304
1305 /* Copy the new information in the fdir parameter */
1306 fdir_stats->collision = info->collision;
1307 fdir_stats->free = info->free;
1308 fdir_stats->maxhash = info->maxhash;
1309 fdir_stats->maxlen = info->maxlen;
1310 fdir_stats->remove = info->remove;
1311 fdir_stats->add = info->add;
1312 fdir_stats->f_remove = info->f_remove;
1313 fdir_stats->f_add = info->f_add;
1314
1315 reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1316 max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
1317 (reg & FDIRCTRL_PBALLOC_MASK)));
1318 if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
1319 fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
1320 fdir_stats->guarant_cnt = max_num - fdir_stats->free;
1321 else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
1322 fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
1323
1324}
1325
1326/*
1327 * ixgbe_fdir_ctrl_func - deal with all operations on flow director.
1328 * @dev: pointer to the structure rte_eth_dev
1329 * @filter_op:operation will be taken
1330 * @arg: a pointer to specific structure corresponding to the filter_op
1331 */
1332int
1333ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
1334 enum rte_filter_op filter_op, void *arg)
1335{
1336 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337 int ret = 0;
1338
1339 if (hw->mac.type != ixgbe_mac_82599EB &&
1340 hw->mac.type != ixgbe_mac_X540 &&
1341 hw->mac.type != ixgbe_mac_X550 &&
1342 hw->mac.type != ixgbe_mac_X550EM_x &&
1343 hw->mac.type != ixgbe_mac_X550EM_a)
1344 return -ENOTSUP;
1345
1346 if (filter_op == RTE_ETH_FILTER_NOP)
1347 return 0;
1348
1349 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1350 return -EINVAL;
1351
1352 switch (filter_op) {
1353 case RTE_ETH_FILTER_ADD:
1354 ret = ixgbe_add_del_fdir_filter(dev,
1355 (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
1356 break;
1357 case RTE_ETH_FILTER_UPDATE:
1358 ret = ixgbe_add_del_fdir_filter(dev,
1359 (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
1360 break;
1361 case RTE_ETH_FILTER_DELETE:
1362 ret = ixgbe_add_del_fdir_filter(dev,
1363 (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
1364 break;
1365 case RTE_ETH_FILTER_FLUSH:
1366 ret = ixgbe_fdir_flush(dev);
1367 break;
1368 case RTE_ETH_FILTER_INFO:
1369 ixgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1370 break;
1371 case RTE_ETH_FILTER_STATS:
1372 ixgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1373 break;
1374 default:
1375 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1376 ret = -EINVAL;
1377 break;
1378 }
1379 return ret;
1380}