1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_malloc.h>
11 #include <rte_byteorder.h>
16 #include <rte_eth_ctrl.h>
18 #include "enic_compat.h"
20 #include "wq_enet_desc.h"
21 #include "rq_enet_desc.h"
22 #include "cq_enet_desc.h"
23 #include "vnic_enet.h"
28 #include "vnic_intr.h"
32 #include <rte_hash_crc.h>
33 #define DEFAULT_HASH_FUNC rte_hash_crc
35 #include <rte_jhash.h>
36 #define DEFAULT_HASH_FUNC rte_jhash
39 #define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
41 void enic_fdir_stats_get(struct enic
*enic
, struct rte_eth_fdir_stats
*stats
)
43 *stats
= enic
->fdir
.stats
;
46 void enic_fdir_info_get(struct enic
*enic
, struct rte_eth_fdir_info
*info
)
48 info
->mode
= (enum rte_fdir_mode
)enic
->fdir
.modes
;
49 info
->flow_types_mask
[0] = enic
->fdir
.types_mask
;
52 void enic_fdir_info(struct enic
*enic
)
54 enic
->fdir
.modes
= (u32
)RTE_FDIR_MODE_PERFECT
;
55 enic
->fdir
.types_mask
= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP
|
56 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP
;
57 if (enic
->adv_filters
) {
58 enic
->fdir
.types_mask
|= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER
|
59 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP
|
60 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP
|
61 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP
|
62 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP
|
63 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER
;
64 enic
->fdir
.copy_fltr_fn
= copy_fltr_v2
;
66 enic
->fdir
.copy_fltr_fn
= copy_fltr_v1
;
71 enic_set_layer(struct filter_generic_1
*gp
, unsigned int flag
,
72 enum filter_generic_1_layer layer
, void *mask
, void *val
,
75 gp
->mask_flags
|= flag
;
76 gp
->val_flags
|= gp
->mask_flags
;
77 memcpy(gp
->layer
[layer
].mask
, mask
, len
);
78 memcpy(gp
->layer
[layer
].val
, val
, len
);
81 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
82 * without advanced filter support.
85 copy_fltr_v1(struct filter_v2
*fltr
, struct rte_eth_fdir_input
*input
,
86 __rte_unused
struct rte_eth_fdir_masks
*masks
)
88 fltr
->type
= FILTER_IPV4_5TUPLE
;
89 fltr
->u
.ipv4
.src_addr
= rte_be_to_cpu_32(
90 input
->flow
.ip4_flow
.src_ip
);
91 fltr
->u
.ipv4
.dst_addr
= rte_be_to_cpu_32(
92 input
->flow
.ip4_flow
.dst_ip
);
93 fltr
->u
.ipv4
.src_port
= rte_be_to_cpu_16(
94 input
->flow
.udp4_flow
.src_port
);
95 fltr
->u
.ipv4
.dst_port
= rte_be_to_cpu_16(
96 input
->flow
.udp4_flow
.dst_port
);
98 if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV4_TCP
)
99 fltr
->u
.ipv4
.protocol
= PROTO_TCP
;
101 fltr
->u
.ipv4
.protocol
= PROTO_UDP
;
103 fltr
->u
.ipv4
.flags
= FILTER_FIELDS_IPV4_5TUPLE
;
106 /* Copy Flow Director filter to a VIC generic filter (requires advanced
110 copy_fltr_v2(struct filter_v2
*fltr
, struct rte_eth_fdir_input
*input
,
111 struct rte_eth_fdir_masks
*masks
)
113 struct filter_generic_1
*gp
= &fltr
->u
.generic_1
;
115 fltr
->type
= FILTER_DPDK_1
;
116 memset(gp
, 0, sizeof(*gp
));
118 if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV4_UDP
) {
119 struct udp_hdr udp_mask
, udp_val
;
120 memset(&udp_mask
, 0, sizeof(udp_mask
));
121 memset(&udp_val
, 0, sizeof(udp_val
));
123 if (input
->flow
.udp4_flow
.src_port
) {
124 udp_mask
.src_port
= masks
->src_port_mask
;
125 udp_val
.src_port
= input
->flow
.udp4_flow
.src_port
;
127 if (input
->flow
.udp4_flow
.dst_port
) {
128 udp_mask
.dst_port
= masks
->dst_port_mask
;
129 udp_val
.dst_port
= input
->flow
.udp4_flow
.dst_port
;
132 enic_set_layer(gp
, FILTER_GENERIC_1_UDP
, FILTER_GENERIC_1_L4
,
133 &udp_mask
, &udp_val
, sizeof(struct udp_hdr
));
134 } else if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV4_TCP
) {
135 struct tcp_hdr tcp_mask
, tcp_val
;
136 memset(&tcp_mask
, 0, sizeof(tcp_mask
));
137 memset(&tcp_val
, 0, sizeof(tcp_val
));
139 if (input
->flow
.tcp4_flow
.src_port
) {
140 tcp_mask
.src_port
= masks
->src_port_mask
;
141 tcp_val
.src_port
= input
->flow
.tcp4_flow
.src_port
;
143 if (input
->flow
.tcp4_flow
.dst_port
) {
144 tcp_mask
.dst_port
= masks
->dst_port_mask
;
145 tcp_val
.dst_port
= input
->flow
.tcp4_flow
.dst_port
;
148 enic_set_layer(gp
, FILTER_GENERIC_1_TCP
, FILTER_GENERIC_1_L4
,
149 &tcp_mask
, &tcp_val
, sizeof(struct tcp_hdr
));
150 } else if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV4_SCTP
) {
151 struct sctp_hdr sctp_mask
, sctp_val
;
152 memset(&sctp_mask
, 0, sizeof(sctp_mask
));
153 memset(&sctp_val
, 0, sizeof(sctp_val
));
155 if (input
->flow
.sctp4_flow
.src_port
) {
156 sctp_mask
.src_port
= masks
->src_port_mask
;
157 sctp_val
.src_port
= input
->flow
.sctp4_flow
.src_port
;
159 if (input
->flow
.sctp4_flow
.dst_port
) {
160 sctp_mask
.dst_port
= masks
->dst_port_mask
;
161 sctp_val
.dst_port
= input
->flow
.sctp4_flow
.dst_port
;
163 if (input
->flow
.sctp4_flow
.verify_tag
) {
164 sctp_mask
.tag
= 0xffffffff;
165 sctp_val
.tag
= input
->flow
.sctp4_flow
.verify_tag
;
168 /* v4 proto should be 132, override ip4_flow.proto */
169 input
->flow
.ip4_flow
.proto
= 132;
171 enic_set_layer(gp
, 0, FILTER_GENERIC_1_L4
, &sctp_mask
,
172 &sctp_val
, sizeof(struct sctp_hdr
));
175 if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV4_UDP
||
176 input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV4_TCP
||
177 input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV4_SCTP
||
178 input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV4_OTHER
) {
179 struct ipv4_hdr ip4_mask
, ip4_val
;
180 memset(&ip4_mask
, 0, sizeof(struct ipv4_hdr
));
181 memset(&ip4_val
, 0, sizeof(struct ipv4_hdr
));
183 if (input
->flow
.ip4_flow
.tos
) {
184 ip4_mask
.type_of_service
= masks
->ipv4_mask
.tos
;
185 ip4_val
.type_of_service
= input
->flow
.ip4_flow
.tos
;
187 if (input
->flow
.ip4_flow
.ttl
) {
188 ip4_mask
.time_to_live
= masks
->ipv4_mask
.ttl
;
189 ip4_val
.time_to_live
= input
->flow
.ip4_flow
.ttl
;
191 if (input
->flow
.ip4_flow
.proto
) {
192 ip4_mask
.next_proto_id
= masks
->ipv4_mask
.proto
;
193 ip4_val
.next_proto_id
= input
->flow
.ip4_flow
.proto
;
195 if (input
->flow
.ip4_flow
.src_ip
) {
196 ip4_mask
.src_addr
= masks
->ipv4_mask
.src_ip
;
197 ip4_val
.src_addr
= input
->flow
.ip4_flow
.src_ip
;
199 if (input
->flow
.ip4_flow
.dst_ip
) {
200 ip4_mask
.dst_addr
= masks
->ipv4_mask
.dst_ip
;
201 ip4_val
.dst_addr
= input
->flow
.ip4_flow
.dst_ip
;
204 enic_set_layer(gp
, FILTER_GENERIC_1_IPV4
, FILTER_GENERIC_1_L3
,
205 &ip4_mask
, &ip4_val
, sizeof(struct ipv4_hdr
));
208 if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV6_UDP
) {
209 struct udp_hdr udp_mask
, udp_val
;
210 memset(&udp_mask
, 0, sizeof(udp_mask
));
211 memset(&udp_val
, 0, sizeof(udp_val
));
213 if (input
->flow
.udp6_flow
.src_port
) {
214 udp_mask
.src_port
= masks
->src_port_mask
;
215 udp_val
.src_port
= input
->flow
.udp6_flow
.src_port
;
217 if (input
->flow
.udp6_flow
.dst_port
) {
218 udp_mask
.dst_port
= masks
->dst_port_mask
;
219 udp_val
.dst_port
= input
->flow
.udp6_flow
.dst_port
;
221 enic_set_layer(gp
, FILTER_GENERIC_1_UDP
, FILTER_GENERIC_1_L4
,
222 &udp_mask
, &udp_val
, sizeof(struct udp_hdr
));
223 } else if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV6_TCP
) {
224 struct tcp_hdr tcp_mask
, tcp_val
;
225 memset(&tcp_mask
, 0, sizeof(tcp_mask
));
226 memset(&tcp_val
, 0, sizeof(tcp_val
));
228 if (input
->flow
.tcp6_flow
.src_port
) {
229 tcp_mask
.src_port
= masks
->src_port_mask
;
230 tcp_val
.src_port
= input
->flow
.tcp6_flow
.src_port
;
232 if (input
->flow
.tcp6_flow
.dst_port
) {
233 tcp_mask
.dst_port
= masks
->dst_port_mask
;
234 tcp_val
.dst_port
= input
->flow
.tcp6_flow
.dst_port
;
236 enic_set_layer(gp
, FILTER_GENERIC_1_TCP
, FILTER_GENERIC_1_L4
,
237 &tcp_mask
, &tcp_val
, sizeof(struct tcp_hdr
));
238 } else if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV6_SCTP
) {
239 struct sctp_hdr sctp_mask
, sctp_val
;
240 memset(&sctp_mask
, 0, sizeof(sctp_mask
));
241 memset(&sctp_val
, 0, sizeof(sctp_val
));
243 if (input
->flow
.sctp6_flow
.src_port
) {
244 sctp_mask
.src_port
= masks
->src_port_mask
;
245 sctp_val
.src_port
= input
->flow
.sctp6_flow
.src_port
;
247 if (input
->flow
.sctp6_flow
.dst_port
) {
248 sctp_mask
.dst_port
= masks
->dst_port_mask
;
249 sctp_val
.dst_port
= input
->flow
.sctp6_flow
.dst_port
;
251 if (input
->flow
.sctp6_flow
.verify_tag
) {
252 sctp_mask
.tag
= 0xffffffff;
253 sctp_val
.tag
= input
->flow
.sctp6_flow
.verify_tag
;
256 /* v4 proto should be 132, override ipv6_flow.proto */
257 input
->flow
.ipv6_flow
.proto
= 132;
259 enic_set_layer(gp
, 0, FILTER_GENERIC_1_L4
, &sctp_mask
,
260 &sctp_val
, sizeof(struct sctp_hdr
));
263 if (input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV6_UDP
||
264 input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV6_TCP
||
265 input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV6_SCTP
||
266 input
->flow_type
== RTE_ETH_FLOW_NONFRAG_IPV6_OTHER
) {
267 struct ipv6_hdr ipv6_mask
, ipv6_val
;
268 memset(&ipv6_mask
, 0, sizeof(struct ipv6_hdr
));
269 memset(&ipv6_val
, 0, sizeof(struct ipv6_hdr
));
271 if (input
->flow
.ipv6_flow
.proto
) {
272 ipv6_mask
.proto
= masks
->ipv6_mask
.proto
;
273 ipv6_val
.proto
= input
->flow
.ipv6_flow
.proto
;
275 memcpy(ipv6_mask
.src_addr
, masks
->ipv6_mask
.src_ip
,
276 sizeof(ipv6_mask
.src_addr
));
277 memcpy(ipv6_val
.src_addr
, input
->flow
.ipv6_flow
.src_ip
,
278 sizeof(ipv6_val
.src_addr
));
279 memcpy(ipv6_mask
.dst_addr
, masks
->ipv6_mask
.dst_ip
,
280 sizeof(ipv6_mask
.dst_addr
));
281 memcpy(ipv6_val
.dst_addr
, input
->flow
.ipv6_flow
.dst_ip
,
282 sizeof(ipv6_val
.dst_addr
));
283 if (input
->flow
.ipv6_flow
.tc
) {
284 ipv6_mask
.vtc_flow
= masks
->ipv6_mask
.tc
<< 12;
285 ipv6_val
.vtc_flow
= input
->flow
.ipv6_flow
.tc
<< 12;
287 if (input
->flow
.ipv6_flow
.hop_limits
) {
288 ipv6_mask
.hop_limits
= masks
->ipv6_mask
.hop_limits
;
289 ipv6_val
.hop_limits
= input
->flow
.ipv6_flow
.hop_limits
;
292 enic_set_layer(gp
, FILTER_GENERIC_1_IPV6
, FILTER_GENERIC_1_L3
,
293 &ipv6_mask
, &ipv6_val
, sizeof(struct ipv6_hdr
));
297 int enic_fdir_del_fltr(struct enic
*enic
, struct rte_eth_fdir_filter
*params
)
300 struct enic_fdir_node
*key
;
301 /* See if the key is in the table */
302 pos
= rte_hash_del_key(enic
->fdir
.hash
, params
);
306 enic
->fdir
.stats
.f_remove
++;
309 /* The entry is present in the table */
310 key
= enic
->fdir
.nodes
[pos
];
312 /* Delete the filter */
313 vnic_dev_classifier(enic
->vdev
, CLSF_DEL
,
314 &key
->fltr_id
, NULL
, NULL
);
316 enic
->fdir
.nodes
[pos
] = NULL
;
317 enic
->fdir
.stats
.free
++;
318 enic
->fdir
.stats
.remove
++;
324 int enic_fdir_add_fltr(struct enic
*enic
, struct rte_eth_fdir_filter
*params
)
326 struct enic_fdir_node
*key
;
327 struct filter_v2 fltr
;
331 u32 flowtype_supported
;
334 struct filter_action_v2 action
;
336 memset(&fltr
, 0, sizeof(fltr
));
337 memset(&action
, 0, sizeof(action
));
338 flowtype_supported
= enic
->fdir
.types_mask
339 & (1 << params
->input
.flow_type
);
341 flex_bytes
= ((params
->input
.flow_ext
.flexbytes
[1] << 8 & 0xFF00) |
342 (params
->input
.flow_ext
.flexbytes
[0] & 0xFF));
344 if (!enic
->fdir
.hash
||
345 (params
->input
.flow_ext
.vlan_tci
& 0xFFF) ||
346 !flowtype_supported
|| flex_bytes
||
347 params
->action
.behavior
/* drop */) {
348 enic
->fdir
.stats
.f_add
++;
352 /* Get the enicpmd RQ from the DPDK Rx queue */
353 queue
= enic_rte_rq_idx_to_sop_idx(params
->action
.rx_queue
);
355 if (!enic
->rq
[queue
].in_use
)
358 /* See if the key is already there in the table */
359 pos
= rte_hash_del_key(enic
->fdir
.hash
, params
);
362 enic
->fdir
.stats
.f_add
++;
365 /* Add a new classifier entry */
366 if (!enic
->fdir
.stats
.free
) {
367 enic
->fdir
.stats
.f_add
++;
370 key
= rte_zmalloc("enic_fdir_node",
371 sizeof(struct enic_fdir_node
), 0);
373 enic
->fdir
.stats
.f_add
++;
378 /* The entry is already present in the table.
379 * Check if there is a change in queue
381 key
= enic
->fdir
.nodes
[pos
];
382 enic
->fdir
.nodes
[pos
] = NULL
;
383 if (unlikely(key
->rq_index
== queue
)) {
384 /* Nothing to be done */
385 enic
->fdir
.stats
.f_add
++;
386 pos
= rte_hash_add_key(enic
->fdir
.hash
, params
);
388 dev_err(enic
, "Add hash key failed\n");
391 enic
->fdir
.nodes
[pos
] = key
;
393 "FDIR rule is already present\n");
397 if (likely(enic
->fdir
.stats
.free
)) {
398 /* Add the filter and then delete the old one.
399 * This is to avoid packets from going into the
400 * default queue during the window between
404 old_fltr_id
= key
->fltr_id
;
406 /* No free slots in the classifier.
407 * Delete the filter and add the modified one later
409 vnic_dev_classifier(enic
->vdev
, CLSF_DEL
,
410 &key
->fltr_id
, NULL
, NULL
);
411 enic
->fdir
.stats
.free
++;
417 key
->filter
= *params
;
418 key
->rq_index
= queue
;
420 enic
->fdir
.copy_fltr_fn(&fltr
, ¶ms
->input
,
421 &enic
->rte_dev
->data
->dev_conf
.fdir_conf
.mask
);
422 action
.type
= FILTER_ACTION_RQ_STEERING
;
423 action
.rq_idx
= queue
;
425 if (!vnic_dev_classifier(enic
->vdev
, CLSF_ADD
, &queue
, &fltr
,
427 key
->fltr_id
= queue
;
429 dev_err(enic
, "Add classifier entry failed\n");
430 enic
->fdir
.stats
.f_add
++;
436 vnic_dev_classifier(enic
->vdev
, CLSF_DEL
, &old_fltr_id
, NULL
,
439 enic
->fdir
.stats
.free
--;
440 enic
->fdir
.stats
.add
++;
443 pos
= rte_hash_add_key(enic
->fdir
.hash
, params
);
445 enic
->fdir
.stats
.f_add
++;
446 dev_err(enic
, "Add hash key failed\n");
450 enic
->fdir
.nodes
[pos
] = key
;
454 void enic_clsf_destroy(struct enic
*enic
)
457 struct enic_fdir_node
*key
;
458 /* delete classifier entries */
459 for (index
= 0; index
< ENICPMD_FDIR_MAX
; index
++) {
460 key
= enic
->fdir
.nodes
[index
];
462 vnic_dev_classifier(enic
->vdev
, CLSF_DEL
,
463 &key
->fltr_id
, NULL
, NULL
);
465 enic
->fdir
.nodes
[index
] = NULL
;
469 if (enic
->fdir
.hash
) {
470 rte_hash_free(enic
->fdir
.hash
);
471 enic
->fdir
.hash
= NULL
;
475 int enic_clsf_init(struct enic
*enic
)
477 char clsf_name
[RTE_HASH_NAMESIZE
];
478 struct rte_hash_parameters hash_params
= {
480 .entries
= ENICPMD_CLSF_HASH_ENTRIES
,
481 .key_len
= sizeof(struct rte_eth_fdir_filter
),
482 .hash_func
= DEFAULT_HASH_FUNC
,
483 .hash_func_init_val
= 0,
484 .socket_id
= SOCKET_ID_ANY
,
486 snprintf(clsf_name
, RTE_HASH_NAMESIZE
, "enic_clsf_%s", enic
->bdf_name
);
487 enic
->fdir
.hash
= rte_hash_create(&hash_params
);
488 memset(&enic
->fdir
.stats
, 0, sizeof(enic
->fdir
.stats
));
489 enic
->fdir
.stats
.free
= ENICPMD_FDIR_MAX
;
490 return NULL
== enic
->fdir
.hash
;