]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/enic/enic_clsf.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / enic / enic_clsf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #include <libgen.h>
7
8 #include <rte_ethdev_driver.h>
9 #include <rte_malloc.h>
10 #include <rte_hash.h>
11 #include <rte_byteorder.h>
12 #include <rte_ip.h>
13 #include <rte_tcp.h>
14 #include <rte_udp.h>
15 #include <rte_sctp.h>
16 #include <rte_eth_ctrl.h>
17
18 #include "enic_compat.h"
19 #include "enic.h"
20 #include "wq_enet_desc.h"
21 #include "rq_enet_desc.h"
22 #include "cq_enet_desc.h"
23 #include "vnic_enet.h"
24 #include "vnic_dev.h"
25 #include "vnic_wq.h"
26 #include "vnic_rq.h"
27 #include "vnic_cq.h"
28 #include "vnic_intr.h"
29 #include "vnic_nic.h"
30
31 #ifdef RTE_ARCH_X86
32 #include <rte_hash_crc.h>
33 #define DEFAULT_HASH_FUNC rte_hash_crc
34 #else
35 #include <rte_jhash.h>
36 #define DEFAULT_HASH_FUNC rte_jhash
37 #endif
38
39 #define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
40
41 void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
42 {
43 *stats = enic->fdir.stats;
44 }
45
46 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
47 {
48 info->mode = (enum rte_fdir_mode)enic->fdir.modes;
49 info->flow_types_mask[0] = enic->fdir.types_mask;
50 }
51
52 void enic_fdir_info(struct enic *enic)
53 {
54 enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
55 enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
56 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
57 if (enic->adv_filters) {
58 enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
59 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
60 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
61 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
62 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
63 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
64 enic->fdir.copy_fltr_fn = copy_fltr_v2;
65 } else {
66 enic->fdir.copy_fltr_fn = copy_fltr_v1;
67 }
68 }
69
70 static void
71 enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
72 enum filter_generic_1_layer layer, void *mask, void *val,
73 unsigned int len)
74 {
75 gp->mask_flags |= flag;
76 gp->val_flags |= gp->mask_flags;
77 memcpy(gp->layer[layer].mask, mask, len);
78 memcpy(gp->layer[layer].val, val, len);
79 }
80
81 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
82 * without advanced filter support.
83 */
84 void
85 copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
86 __rte_unused struct rte_eth_fdir_masks *masks)
87 {
88 fltr->type = FILTER_IPV4_5TUPLE;
89 fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
90 input->flow.ip4_flow.src_ip);
91 fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
92 input->flow.ip4_flow.dst_ip);
93 fltr->u.ipv4.src_port = rte_be_to_cpu_16(
94 input->flow.udp4_flow.src_port);
95 fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
96 input->flow.udp4_flow.dst_port);
97
98 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
99 fltr->u.ipv4.protocol = PROTO_TCP;
100 else
101 fltr->u.ipv4.protocol = PROTO_UDP;
102
103 fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
104 }
105
106 /* Copy Flow Director filter to a VIC generic filter (requires advanced
107 * filter support.
108 */
109 void
110 copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
111 struct rte_eth_fdir_masks *masks)
112 {
113 struct filter_generic_1 *gp = &fltr->u.generic_1;
114
115 fltr->type = FILTER_DPDK_1;
116 memset(gp, 0, sizeof(*gp));
117
118 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
119 struct udp_hdr udp_mask, udp_val;
120 memset(&udp_mask, 0, sizeof(udp_mask));
121 memset(&udp_val, 0, sizeof(udp_val));
122
123 if (input->flow.udp4_flow.src_port) {
124 udp_mask.src_port = masks->src_port_mask;
125 udp_val.src_port = input->flow.udp4_flow.src_port;
126 }
127 if (input->flow.udp4_flow.dst_port) {
128 udp_mask.dst_port = masks->dst_port_mask;
129 udp_val.dst_port = input->flow.udp4_flow.dst_port;
130 }
131
132 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
133 &udp_mask, &udp_val, sizeof(struct udp_hdr));
134 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
135 struct tcp_hdr tcp_mask, tcp_val;
136 memset(&tcp_mask, 0, sizeof(tcp_mask));
137 memset(&tcp_val, 0, sizeof(tcp_val));
138
139 if (input->flow.tcp4_flow.src_port) {
140 tcp_mask.src_port = masks->src_port_mask;
141 tcp_val.src_port = input->flow.tcp4_flow.src_port;
142 }
143 if (input->flow.tcp4_flow.dst_port) {
144 tcp_mask.dst_port = masks->dst_port_mask;
145 tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
146 }
147
148 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
149 &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
150 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
151 struct sctp_hdr sctp_mask, sctp_val;
152 memset(&sctp_mask, 0, sizeof(sctp_mask));
153 memset(&sctp_val, 0, sizeof(sctp_val));
154
155 if (input->flow.sctp4_flow.src_port) {
156 sctp_mask.src_port = masks->src_port_mask;
157 sctp_val.src_port = input->flow.sctp4_flow.src_port;
158 }
159 if (input->flow.sctp4_flow.dst_port) {
160 sctp_mask.dst_port = masks->dst_port_mask;
161 sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
162 }
163 if (input->flow.sctp4_flow.verify_tag) {
164 sctp_mask.tag = 0xffffffff;
165 sctp_val.tag = input->flow.sctp4_flow.verify_tag;
166 }
167
168 /* v4 proto should be 132, override ip4_flow.proto */
169 input->flow.ip4_flow.proto = 132;
170
171 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
172 &sctp_val, sizeof(struct sctp_hdr));
173 }
174
175 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
176 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
177 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
178 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
179 struct ipv4_hdr ip4_mask, ip4_val;
180 memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
181 memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
182
183 if (input->flow.ip4_flow.tos) {
184 ip4_mask.type_of_service = masks->ipv4_mask.tos;
185 ip4_val.type_of_service = input->flow.ip4_flow.tos;
186 }
187 if (input->flow.ip4_flow.ttl) {
188 ip4_mask.time_to_live = masks->ipv4_mask.ttl;
189 ip4_val.time_to_live = input->flow.ip4_flow.ttl;
190 }
191 if (input->flow.ip4_flow.proto) {
192 ip4_mask.next_proto_id = masks->ipv4_mask.proto;
193 ip4_val.next_proto_id = input->flow.ip4_flow.proto;
194 }
195 if (input->flow.ip4_flow.src_ip) {
196 ip4_mask.src_addr = masks->ipv4_mask.src_ip;
197 ip4_val.src_addr = input->flow.ip4_flow.src_ip;
198 }
199 if (input->flow.ip4_flow.dst_ip) {
200 ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
201 ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
202 }
203
204 enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
205 &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
206 }
207
208 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
209 struct udp_hdr udp_mask, udp_val;
210 memset(&udp_mask, 0, sizeof(udp_mask));
211 memset(&udp_val, 0, sizeof(udp_val));
212
213 if (input->flow.udp6_flow.src_port) {
214 udp_mask.src_port = masks->src_port_mask;
215 udp_val.src_port = input->flow.udp6_flow.src_port;
216 }
217 if (input->flow.udp6_flow.dst_port) {
218 udp_mask.dst_port = masks->dst_port_mask;
219 udp_val.dst_port = input->flow.udp6_flow.dst_port;
220 }
221 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
222 &udp_mask, &udp_val, sizeof(struct udp_hdr));
223 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
224 struct tcp_hdr tcp_mask, tcp_val;
225 memset(&tcp_mask, 0, sizeof(tcp_mask));
226 memset(&tcp_val, 0, sizeof(tcp_val));
227
228 if (input->flow.tcp6_flow.src_port) {
229 tcp_mask.src_port = masks->src_port_mask;
230 tcp_val.src_port = input->flow.tcp6_flow.src_port;
231 }
232 if (input->flow.tcp6_flow.dst_port) {
233 tcp_mask.dst_port = masks->dst_port_mask;
234 tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
235 }
236 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
237 &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
238 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
239 struct sctp_hdr sctp_mask, sctp_val;
240 memset(&sctp_mask, 0, sizeof(sctp_mask));
241 memset(&sctp_val, 0, sizeof(sctp_val));
242
243 if (input->flow.sctp6_flow.src_port) {
244 sctp_mask.src_port = masks->src_port_mask;
245 sctp_val.src_port = input->flow.sctp6_flow.src_port;
246 }
247 if (input->flow.sctp6_flow.dst_port) {
248 sctp_mask.dst_port = masks->dst_port_mask;
249 sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
250 }
251 if (input->flow.sctp6_flow.verify_tag) {
252 sctp_mask.tag = 0xffffffff;
253 sctp_val.tag = input->flow.sctp6_flow.verify_tag;
254 }
255
256 /* v4 proto should be 132, override ipv6_flow.proto */
257 input->flow.ipv6_flow.proto = 132;
258
259 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
260 &sctp_val, sizeof(struct sctp_hdr));
261 }
262
263 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
264 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
265 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
266 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
267 struct ipv6_hdr ipv6_mask, ipv6_val;
268 memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
269 memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
270
271 if (input->flow.ipv6_flow.proto) {
272 ipv6_mask.proto = masks->ipv6_mask.proto;
273 ipv6_val.proto = input->flow.ipv6_flow.proto;
274 }
275 memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
276 sizeof(ipv6_mask.src_addr));
277 memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
278 sizeof(ipv6_val.src_addr));
279 memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
280 sizeof(ipv6_mask.dst_addr));
281 memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
282 sizeof(ipv6_val.dst_addr));
283 if (input->flow.ipv6_flow.tc) {
284 ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
285 ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
286 }
287 if (input->flow.ipv6_flow.hop_limits) {
288 ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
289 ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
290 }
291
292 enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
293 &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
294 }
295 }
296
297 int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
298 {
299 int32_t pos;
300 struct enic_fdir_node *key;
301 /* See if the key is in the table */
302 pos = rte_hash_del_key(enic->fdir.hash, params);
303 switch (pos) {
304 case -EINVAL:
305 case -ENOENT:
306 enic->fdir.stats.f_remove++;
307 return -EINVAL;
308 default:
309 /* The entry is present in the table */
310 key = enic->fdir.nodes[pos];
311
312 /* Delete the filter */
313 vnic_dev_classifier(enic->vdev, CLSF_DEL,
314 &key->fltr_id, NULL, NULL);
315 rte_free(key);
316 enic->fdir.nodes[pos] = NULL;
317 enic->fdir.stats.free++;
318 enic->fdir.stats.remove++;
319 break;
320 }
321 return 0;
322 }
323
324 int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
325 {
326 struct enic_fdir_node *key;
327 struct filter_v2 fltr;
328 int32_t pos;
329 u8 do_free = 0;
330 u16 old_fltr_id = 0;
331 u32 flowtype_supported;
332 u16 flex_bytes;
333 u16 queue;
334 struct filter_action_v2 action;
335
336 memset(&fltr, 0, sizeof(fltr));
337 memset(&action, 0, sizeof(action));
338 flowtype_supported = enic->fdir.types_mask
339 & (1 << params->input.flow_type);
340
341 flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
342 (params->input.flow_ext.flexbytes[0] & 0xFF));
343
344 if (!enic->fdir.hash ||
345 (params->input.flow_ext.vlan_tci & 0xFFF) ||
346 !flowtype_supported || flex_bytes ||
347 params->action.behavior /* drop */) {
348 enic->fdir.stats.f_add++;
349 return -ENOTSUP;
350 }
351
352 /* Get the enicpmd RQ from the DPDK Rx queue */
353 queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
354
355 if (!enic->rq[queue].in_use)
356 return -EINVAL;
357
358 /* See if the key is already there in the table */
359 pos = rte_hash_del_key(enic->fdir.hash, params);
360 switch (pos) {
361 case -EINVAL:
362 enic->fdir.stats.f_add++;
363 return -EINVAL;
364 case -ENOENT:
365 /* Add a new classifier entry */
366 if (!enic->fdir.stats.free) {
367 enic->fdir.stats.f_add++;
368 return -ENOSPC;
369 }
370 key = rte_zmalloc("enic_fdir_node",
371 sizeof(struct enic_fdir_node), 0);
372 if (!key) {
373 enic->fdir.stats.f_add++;
374 return -ENOMEM;
375 }
376 break;
377 default:
378 /* The entry is already present in the table.
379 * Check if there is a change in queue
380 */
381 key = enic->fdir.nodes[pos];
382 enic->fdir.nodes[pos] = NULL;
383 if (unlikely(key->rq_index == queue)) {
384 /* Nothing to be done */
385 enic->fdir.stats.f_add++;
386 pos = rte_hash_add_key(enic->fdir.hash, params);
387 if (pos < 0) {
388 dev_err(enic, "Add hash key failed\n");
389 return pos;
390 }
391 enic->fdir.nodes[pos] = key;
392 dev_warning(enic,
393 "FDIR rule is already present\n");
394 return 0;
395 }
396
397 if (likely(enic->fdir.stats.free)) {
398 /* Add the filter and then delete the old one.
399 * This is to avoid packets from going into the
400 * default queue during the window between
401 * delete and add
402 */
403 do_free = 1;
404 old_fltr_id = key->fltr_id;
405 } else {
406 /* No free slots in the classifier.
407 * Delete the filter and add the modified one later
408 */
409 vnic_dev_classifier(enic->vdev, CLSF_DEL,
410 &key->fltr_id, NULL, NULL);
411 enic->fdir.stats.free++;
412 }
413
414 break;
415 }
416
417 key->filter = *params;
418 key->rq_index = queue;
419
420 enic->fdir.copy_fltr_fn(&fltr, &params->input,
421 &enic->rte_dev->data->dev_conf.fdir_conf.mask);
422 action.type = FILTER_ACTION_RQ_STEERING;
423 action.rq_idx = queue;
424
425 if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
426 &action)) {
427 key->fltr_id = queue;
428 } else {
429 dev_err(enic, "Add classifier entry failed\n");
430 enic->fdir.stats.f_add++;
431 rte_free(key);
432 return -1;
433 }
434
435 if (do_free)
436 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
437 NULL);
438 else{
439 enic->fdir.stats.free--;
440 enic->fdir.stats.add++;
441 }
442
443 pos = rte_hash_add_key(enic->fdir.hash, params);
444 if (pos < 0) {
445 enic->fdir.stats.f_add++;
446 dev_err(enic, "Add hash key failed\n");
447 return pos;
448 }
449
450 enic->fdir.nodes[pos] = key;
451 return 0;
452 }
453
454 void enic_clsf_destroy(struct enic *enic)
455 {
456 u32 index;
457 struct enic_fdir_node *key;
458 /* delete classifier entries */
459 for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
460 key = enic->fdir.nodes[index];
461 if (key) {
462 vnic_dev_classifier(enic->vdev, CLSF_DEL,
463 &key->fltr_id, NULL, NULL);
464 rte_free(key);
465 enic->fdir.nodes[index] = NULL;
466 }
467 }
468
469 if (enic->fdir.hash) {
470 rte_hash_free(enic->fdir.hash);
471 enic->fdir.hash = NULL;
472 }
473 }
474
475 int enic_clsf_init(struct enic *enic)
476 {
477 char clsf_name[RTE_HASH_NAMESIZE];
478 struct rte_hash_parameters hash_params = {
479 .name = clsf_name,
480 .entries = ENICPMD_CLSF_HASH_ENTRIES,
481 .key_len = sizeof(struct rte_eth_fdir_filter),
482 .hash_func = DEFAULT_HASH_FUNC,
483 .hash_func_init_val = 0,
484 .socket_id = SOCKET_ID_ANY,
485 };
486 snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
487 enic->fdir.hash = rte_hash_create(&hash_params);
488 memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
489 enic->fdir.stats.free = ENICPMD_FDIR_MAX;
490 return NULL == enic->fdir.hash;
491 }