]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_pipeline/rte_table_action.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_pipeline / rte_table_action.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
3 */
4 #include <stdlib.h>
5 #include <string.h>
6
7 #include <rte_common.h>
8 #include <rte_byteorder.h>
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_ether.h>
13 #include <rte_ip.h>
14 #include <rte_esp.h>
15 #include <rte_tcp.h>
16 #include <rte_udp.h>
17 #include <rte_vxlan.h>
18 #include <rte_cryptodev.h>
19 #include <rte_cryptodev_pmd.h>
20
21 #include "rte_table_action.h"
22
23 #define rte_htons rte_cpu_to_be_16
24 #define rte_htonl rte_cpu_to_be_32
25
26 #define rte_ntohs rte_be_to_cpu_16
27 #define rte_ntohl rte_be_to_cpu_32
28
29 /**
30 * RTE_TABLE_ACTION_FWD
31 */
32 #define fwd_data rte_pipeline_table_entry
33
34 static int
35 fwd_apply(struct fwd_data *data,
36 struct rte_table_action_fwd_params *p)
37 {
38 data->action = p->action;
39
40 if (p->action == RTE_PIPELINE_ACTION_PORT)
41 data->port_id = p->id;
42
43 if (p->action == RTE_PIPELINE_ACTION_TABLE)
44 data->table_id = p->id;
45
46 return 0;
47 }
48
49 /**
50 * RTE_TABLE_ACTION_LB
51 */
52 static int
53 lb_cfg_check(struct rte_table_action_lb_config *cfg)
54 {
55 if ((cfg == NULL) ||
56 (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
57 (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
58 (!rte_is_power_of_2(cfg->key_size)) ||
59 (cfg->f_hash == NULL))
60 return -1;
61
62 return 0;
63 }
64
65 struct lb_data {
66 uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
67 } __rte_packed;
68
69 static int
70 lb_apply(struct lb_data *data,
71 struct rte_table_action_lb_params *p)
72 {
73 memcpy(data->out, p->out, sizeof(data->out));
74
75 return 0;
76 }
77
78 static __rte_always_inline void
79 pkt_work_lb(struct rte_mbuf *mbuf,
80 struct lb_data *data,
81 struct rte_table_action_lb_config *cfg)
82 {
83 uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
84 uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
85 uint64_t digest, pos;
86 uint32_t out_val;
87
88 digest = cfg->f_hash(pkt_key,
89 cfg->key_mask,
90 cfg->key_size,
91 cfg->seed);
92 pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
93 out_val = data->out[pos];
94
95 *out = out_val;
96 }
97
98 /**
99 * RTE_TABLE_ACTION_MTR
100 */
101 static int
102 mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
103 {
104 if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
105 ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
106 (mtr->n_bytes_enabled != 0))
107 return -ENOTSUP;
108 return 0;
109 }
110
111 struct mtr_trtcm_data {
112 struct rte_meter_trtcm trtcm;
113 uint64_t stats[RTE_COLORS];
114 } __rte_packed;
115
116 #define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
117 (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3)
118
119 static void
120 mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
121 uint32_t profile_id)
122 {
123 data->stats[RTE_COLOR_GREEN] &= ~0xF8LLU;
124 data->stats[RTE_COLOR_GREEN] |= (profile_id % 32) << 3;
125 }
126
127 #define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
128 (((data)->stats[(color)] & 4LLU) >> 2)
129
130 #define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
131 ((enum rte_color)((data)->stats[(color)] & 3LLU))
132
133 static void
134 mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
135 enum rte_color color,
136 enum rte_table_action_policer action)
137 {
138 if (action == RTE_TABLE_ACTION_POLICER_DROP) {
139 data->stats[color] |= 4LLU;
140 } else {
141 data->stats[color] &= ~7LLU;
142 data->stats[color] |= color & 3LLU;
143 }
144 }
145
146 static uint64_t
147 mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
148 enum rte_color color)
149 {
150 return data->stats[color] >> 8;
151 }
152
153 static void
154 mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
155 enum rte_color color)
156 {
157 data->stats[color] &= 0xFFLU;
158 }
159
160 #define MTR_TRTCM_DATA_STATS_INC(data, color) \
161 ((data)->stats[(color)] += (1LLU << 8))
162
163 static size_t
164 mtr_data_size(struct rte_table_action_mtr_config *mtr)
165 {
166 return mtr->n_tc * sizeof(struct mtr_trtcm_data);
167 }
168
169 struct dscp_table_entry_data {
170 enum rte_color color;
171 uint16_t tc;
172 uint16_t tc_queue;
173 };
174
175 struct dscp_table_data {
176 struct dscp_table_entry_data entry[64];
177 };
178
179 struct meter_profile_data {
180 struct rte_meter_trtcm_profile profile;
181 uint32_t profile_id;
182 int valid;
183 };
184
185 static struct meter_profile_data *
186 meter_profile_data_find(struct meter_profile_data *mp,
187 uint32_t mp_size,
188 uint32_t profile_id)
189 {
190 uint32_t i;
191
192 for (i = 0; i < mp_size; i++) {
193 struct meter_profile_data *mp_data = &mp[i];
194
195 if (mp_data->valid && (mp_data->profile_id == profile_id))
196 return mp_data;
197 }
198
199 return NULL;
200 }
201
202 static struct meter_profile_data *
203 meter_profile_data_find_unused(struct meter_profile_data *mp,
204 uint32_t mp_size)
205 {
206 uint32_t i;
207
208 for (i = 0; i < mp_size; i++) {
209 struct meter_profile_data *mp_data = &mp[i];
210
211 if (!mp_data->valid)
212 return mp_data;
213 }
214
215 return NULL;
216 }
217
218 static int
219 mtr_apply_check(struct rte_table_action_mtr_params *p,
220 struct rte_table_action_mtr_config *cfg,
221 struct meter_profile_data *mp,
222 uint32_t mp_size)
223 {
224 uint32_t i;
225
226 if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
227 return -EINVAL;
228
229 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
230 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
231 struct meter_profile_data *mp_data;
232
233 if ((p->tc_mask & (1LLU << i)) == 0)
234 continue;
235
236 mp_data = meter_profile_data_find(mp,
237 mp_size,
238 p_tc->meter_profile_id);
239 if (!mp_data)
240 return -EINVAL;
241 }
242
243 return 0;
244 }
245
246 static int
247 mtr_apply(struct mtr_trtcm_data *data,
248 struct rte_table_action_mtr_params *p,
249 struct rte_table_action_mtr_config *cfg,
250 struct meter_profile_data *mp,
251 uint32_t mp_size)
252 {
253 uint32_t i;
254 int status;
255
256 /* Check input arguments */
257 status = mtr_apply_check(p, cfg, mp, mp_size);
258 if (status)
259 return status;
260
261 /* Apply */
262 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
263 struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
264 struct mtr_trtcm_data *data_tc = &data[i];
265 struct meter_profile_data *mp_data;
266
267 if ((p->tc_mask & (1LLU << i)) == 0)
268 continue;
269
270 /* Find profile */
271 mp_data = meter_profile_data_find(mp,
272 mp_size,
273 p_tc->meter_profile_id);
274 if (!mp_data)
275 return -EINVAL;
276
277 memset(data_tc, 0, sizeof(*data_tc));
278
279 /* Meter object */
280 status = rte_meter_trtcm_config(&data_tc->trtcm,
281 &mp_data->profile);
282 if (status)
283 return status;
284
285 /* Meter profile */
286 mtr_trtcm_data_meter_profile_id_set(data_tc,
287 mp_data - mp);
288
289 /* Policer actions */
290 mtr_trtcm_data_policer_action_set(data_tc,
291 RTE_COLOR_GREEN,
292 p_tc->policer[RTE_COLOR_GREEN]);
293
294 mtr_trtcm_data_policer_action_set(data_tc,
295 RTE_COLOR_YELLOW,
296 p_tc->policer[RTE_COLOR_YELLOW]);
297
298 mtr_trtcm_data_policer_action_set(data_tc,
299 RTE_COLOR_RED,
300 p_tc->policer[RTE_COLOR_RED]);
301 }
302
303 return 0;
304 }
305
306 static __rte_always_inline uint64_t
307 pkt_work_mtr(struct rte_mbuf *mbuf,
308 struct mtr_trtcm_data *data,
309 struct dscp_table_data *dscp_table,
310 struct meter_profile_data *mp,
311 uint64_t time,
312 uint32_t dscp,
313 uint16_t total_length)
314 {
315 uint64_t drop_mask;
316 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
317 enum rte_color color_in, color_meter, color_policer;
318 uint32_t tc, mp_id;
319
320 tc = dscp_entry->tc;
321 color_in = dscp_entry->color;
322 data += tc;
323 mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
324
325 /* Meter */
326 color_meter = rte_meter_trtcm_color_aware_check(
327 &data->trtcm,
328 &mp[mp_id].profile,
329 time,
330 total_length,
331 color_in);
332
333 /* Stats */
334 MTR_TRTCM_DATA_STATS_INC(data, color_meter);
335
336 /* Police */
337 drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
338 color_policer =
339 MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
340 rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
341
342 return drop_mask;
343 }
344
345 /**
346 * RTE_TABLE_ACTION_TM
347 */
348 static int
349 tm_cfg_check(struct rte_table_action_tm_config *tm)
350 {
351 if ((tm->n_subports_per_port == 0) ||
352 (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
353 (tm->n_subports_per_port > UINT16_MAX) ||
354 (tm->n_pipes_per_subport == 0) ||
355 (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
356 return -ENOTSUP;
357
358 return 0;
359 }
360
361 struct tm_data {
362 uint32_t queue_id;
363 uint32_t reserved;
364 } __rte_packed;
365
366 static int
367 tm_apply_check(struct rte_table_action_tm_params *p,
368 struct rte_table_action_tm_config *cfg)
369 {
370 if ((p->subport_id >= cfg->n_subports_per_port) ||
371 (p->pipe_id >= cfg->n_pipes_per_subport))
372 return -EINVAL;
373
374 return 0;
375 }
376
377 static int
378 tm_apply(struct tm_data *data,
379 struct rte_table_action_tm_params *p,
380 struct rte_table_action_tm_config *cfg)
381 {
382 int status;
383
384 /* Check input arguments */
385 status = tm_apply_check(p, cfg);
386 if (status)
387 return status;
388
389 /* Apply */
390 data->queue_id = p->subport_id <<
391 (__builtin_ctz(cfg->n_pipes_per_subport) + 4) |
392 p->pipe_id << 4;
393
394 return 0;
395 }
396
397 static __rte_always_inline void
398 pkt_work_tm(struct rte_mbuf *mbuf,
399 struct tm_data *data,
400 struct dscp_table_data *dscp_table,
401 uint32_t dscp)
402 {
403 struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
404 uint32_t queue_id = data->queue_id |
405 dscp_entry->tc_queue;
406 rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
407 (uint8_t)dscp_entry->color);
408 }
409
410 /**
411 * RTE_TABLE_ACTION_ENCAP
412 */
413 static int
414 encap_valid(enum rte_table_action_encap_type encap)
415 {
416 switch (encap) {
417 case RTE_TABLE_ACTION_ENCAP_ETHER:
418 case RTE_TABLE_ACTION_ENCAP_VLAN:
419 case RTE_TABLE_ACTION_ENCAP_QINQ:
420 case RTE_TABLE_ACTION_ENCAP_MPLS:
421 case RTE_TABLE_ACTION_ENCAP_PPPOE:
422 case RTE_TABLE_ACTION_ENCAP_VXLAN:
423 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
424 return 1;
425 default:
426 return 0;
427 }
428 }
429
430 static int
431 encap_cfg_check(struct rte_table_action_encap_config *encap)
432 {
433 if ((encap->encap_mask == 0) ||
434 (__builtin_popcountll(encap->encap_mask) != 1))
435 return -ENOTSUP;
436
437 return 0;
438 }
439
440 struct encap_ether_data {
441 struct rte_ether_hdr ether;
442 };
443
444 #define VLAN(pcp, dei, vid) \
445 ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
446 ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
447 (((uint64_t)(vid)) & 0xFFFLLU)) \
448
449 struct encap_vlan_data {
450 struct rte_ether_hdr ether;
451 struct rte_vlan_hdr vlan;
452 };
453
454 struct encap_qinq_data {
455 struct rte_ether_hdr ether;
456 struct rte_vlan_hdr svlan;
457 struct rte_vlan_hdr cvlan;
458 };
459
460 #define ETHER_TYPE_MPLS_UNICAST 0x8847
461
462 #define ETHER_TYPE_MPLS_MULTICAST 0x8848
463
464 #define MPLS(label, tc, s, ttl) \
465 ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
466 ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
467 ((((uint64_t)(s)) & 0x1LLU) << 8) | \
468 (((uint64_t)(ttl)) & 0xFFLLU)))
469
470 struct encap_mpls_data {
471 struct rte_ether_hdr ether;
472 uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
473 uint32_t mpls_count;
474 } __rte_packed __rte_aligned(2);
475
476 #define PPP_PROTOCOL_IP 0x0021
477
478 struct pppoe_ppp_hdr {
479 uint16_t ver_type_code;
480 uint16_t session_id;
481 uint16_t length;
482 uint16_t protocol;
483 };
484
485 struct encap_pppoe_data {
486 struct rte_ether_hdr ether;
487 struct pppoe_ppp_hdr pppoe_ppp;
488 };
489
490 #define IP_PROTO_UDP 17
491
492 struct encap_vxlan_ipv4_data {
493 struct rte_ether_hdr ether;
494 struct rte_ipv4_hdr ipv4;
495 struct rte_udp_hdr udp;
496 struct rte_vxlan_hdr vxlan;
497 } __rte_packed __rte_aligned(2);
498
499 struct encap_vxlan_ipv4_vlan_data {
500 struct rte_ether_hdr ether;
501 struct rte_vlan_hdr vlan;
502 struct rte_ipv4_hdr ipv4;
503 struct rte_udp_hdr udp;
504 struct rte_vxlan_hdr vxlan;
505 } __rte_packed __rte_aligned(2);
506
507 struct encap_vxlan_ipv6_data {
508 struct rte_ether_hdr ether;
509 struct rte_ipv6_hdr ipv6;
510 struct rte_udp_hdr udp;
511 struct rte_vxlan_hdr vxlan;
512 } __rte_packed __rte_aligned(2);
513
514 struct encap_vxlan_ipv6_vlan_data {
515 struct rte_ether_hdr ether;
516 struct rte_vlan_hdr vlan;
517 struct rte_ipv6_hdr ipv6;
518 struct rte_udp_hdr udp;
519 struct rte_vxlan_hdr vxlan;
520 } __rte_packed __rte_aligned(2);
521
522 struct encap_qinq_pppoe_data {
523 struct rte_ether_hdr ether;
524 struct rte_vlan_hdr svlan;
525 struct rte_vlan_hdr cvlan;
526 struct pppoe_ppp_hdr pppoe_ppp;
527 } __rte_packed __rte_aligned(2);
528
529 static size_t
530 encap_data_size(struct rte_table_action_encap_config *encap)
531 {
532 switch (encap->encap_mask) {
533 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
534 return sizeof(struct encap_ether_data);
535
536 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
537 return sizeof(struct encap_vlan_data);
538
539 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
540 return sizeof(struct encap_qinq_data);
541
542 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
543 return sizeof(struct encap_mpls_data);
544
545 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
546 return sizeof(struct encap_pppoe_data);
547
548 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
549 if (encap->vxlan.ip_version)
550 if (encap->vxlan.vlan)
551 return sizeof(struct encap_vxlan_ipv4_vlan_data);
552 else
553 return sizeof(struct encap_vxlan_ipv4_data);
554 else
555 if (encap->vxlan.vlan)
556 return sizeof(struct encap_vxlan_ipv6_vlan_data);
557 else
558 return sizeof(struct encap_vxlan_ipv6_data);
559
560 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
561 return sizeof(struct encap_qinq_pppoe_data);
562
563 default:
564 return 0;
565 }
566 }
567
568 static int
569 encap_apply_check(struct rte_table_action_encap_params *p,
570 struct rte_table_action_encap_config *cfg)
571 {
572 if ((encap_valid(p->type) == 0) ||
573 ((cfg->encap_mask & (1LLU << p->type)) == 0))
574 return -EINVAL;
575
576 switch (p->type) {
577 case RTE_TABLE_ACTION_ENCAP_ETHER:
578 return 0;
579
580 case RTE_TABLE_ACTION_ENCAP_VLAN:
581 return 0;
582
583 case RTE_TABLE_ACTION_ENCAP_QINQ:
584 return 0;
585
586 case RTE_TABLE_ACTION_ENCAP_MPLS:
587 if ((p->mpls.mpls_count == 0) ||
588 (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
589 return -EINVAL;
590
591 return 0;
592
593 case RTE_TABLE_ACTION_ENCAP_PPPOE:
594 return 0;
595
596 case RTE_TABLE_ACTION_ENCAP_VXLAN:
597 return 0;
598
599 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
600 return 0;
601
602 default:
603 return -EINVAL;
604 }
605 }
606
607 static int
608 encap_ether_apply(void *data,
609 struct rte_table_action_encap_params *p,
610 struct rte_table_action_common_config *common_cfg)
611 {
612 struct encap_ether_data *d = data;
613 uint16_t ethertype = (common_cfg->ip_version) ?
614 RTE_ETHER_TYPE_IPV4 :
615 RTE_ETHER_TYPE_IPV6;
616
617 /* Ethernet */
618 rte_ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
619 rte_ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
620 d->ether.ether_type = rte_htons(ethertype);
621
622 return 0;
623 }
624
625 static int
626 encap_vlan_apply(void *data,
627 struct rte_table_action_encap_params *p,
628 struct rte_table_action_common_config *common_cfg)
629 {
630 struct encap_vlan_data *d = data;
631 uint16_t ethertype = (common_cfg->ip_version) ?
632 RTE_ETHER_TYPE_IPV4 :
633 RTE_ETHER_TYPE_IPV6;
634
635 /* Ethernet */
636 rte_ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
637 rte_ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
638 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
639
640 /* VLAN */
641 d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
642 p->vlan.vlan.dei,
643 p->vlan.vlan.vid));
644 d->vlan.eth_proto = rte_htons(ethertype);
645
646 return 0;
647 }
648
649 static int
650 encap_qinq_apply(void *data,
651 struct rte_table_action_encap_params *p,
652 struct rte_table_action_common_config *common_cfg)
653 {
654 struct encap_qinq_data *d = data;
655 uint16_t ethertype = (common_cfg->ip_version) ?
656 RTE_ETHER_TYPE_IPV4 :
657 RTE_ETHER_TYPE_IPV6;
658
659 /* Ethernet */
660 rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
661 rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
662 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_QINQ);
663
664 /* SVLAN */
665 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
666 p->qinq.svlan.dei,
667 p->qinq.svlan.vid));
668 d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
669
670 /* CVLAN */
671 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
672 p->qinq.cvlan.dei,
673 p->qinq.cvlan.vid));
674 d->cvlan.eth_proto = rte_htons(ethertype);
675
676 return 0;
677 }
678
679 static int
680 encap_qinq_pppoe_apply(void *data,
681 struct rte_table_action_encap_params *p)
682 {
683 struct encap_qinq_pppoe_data *d = data;
684
685 /* Ethernet */
686 rte_ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
687 rte_ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
688 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
689
690 /* SVLAN */
691 d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
692 p->qinq.svlan.dei,
693 p->qinq.svlan.vid));
694 d->svlan.eth_proto = rte_htons(RTE_ETHER_TYPE_VLAN);
695
696 /* CVLAN */
697 d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
698 p->qinq.cvlan.dei,
699 p->qinq.cvlan.vid));
700 d->cvlan.eth_proto = rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION);
701
702 /* PPPoE and PPP*/
703 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
704 d->pppoe_ppp.session_id = rte_htons(p->qinq_pppoe.pppoe.session_id);
705 d->pppoe_ppp.length = 0; /* not pre-computed */
706 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
707
708 return 0;
709 }
710
711 static int
712 encap_mpls_apply(void *data,
713 struct rte_table_action_encap_params *p)
714 {
715 struct encap_mpls_data *d = data;
716 uint16_t ethertype = (p->mpls.unicast) ?
717 ETHER_TYPE_MPLS_UNICAST :
718 ETHER_TYPE_MPLS_MULTICAST;
719 uint32_t i;
720
721 /* Ethernet */
722 rte_ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
723 rte_ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
724 d->ether.ether_type = rte_htons(ethertype);
725
726 /* MPLS */
727 for (i = 0; i < p->mpls.mpls_count - 1; i++)
728 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
729 p->mpls.mpls[i].tc,
730 0,
731 p->mpls.mpls[i].ttl));
732
733 d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
734 p->mpls.mpls[i].tc,
735 1,
736 p->mpls.mpls[i].ttl));
737
738 d->mpls_count = p->mpls.mpls_count;
739 return 0;
740 }
741
742 static int
743 encap_pppoe_apply(void *data,
744 struct rte_table_action_encap_params *p)
745 {
746 struct encap_pppoe_data *d = data;
747
748 /* Ethernet */
749 rte_ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
750 rte_ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
751 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_PPPOE_SESSION);
752
753 /* PPPoE and PPP*/
754 d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
755 d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
756 d->pppoe_ppp.length = 0; /* not pre-computed */
757 d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
758
759 return 0;
760 }
761
762 static int
763 encap_vxlan_apply(void *data,
764 struct rte_table_action_encap_params *p,
765 struct rte_table_action_encap_config *cfg)
766 {
767 if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
768 (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
769 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
770 (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
771 (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
772 return -1;
773
774 if (cfg->vxlan.ip_version)
775 if (cfg->vxlan.vlan) {
776 struct encap_vxlan_ipv4_vlan_data *d = data;
777
778 /* Ethernet */
779 rte_ether_addr_copy(&p->vxlan.ether.da,
780 &d->ether.d_addr);
781 rte_ether_addr_copy(&p->vxlan.ether.sa,
782 &d->ether.s_addr);
783 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
784
785 /* VLAN */
786 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
787 p->vxlan.vlan.dei,
788 p->vxlan.vlan.vid));
789 d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV4);
790
791 /* IPv4*/
792 d->ipv4.version_ihl = 0x45;
793 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
794 d->ipv4.total_length = 0; /* not pre-computed */
795 d->ipv4.packet_id = 0;
796 d->ipv4.fragment_offset = 0;
797 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
798 d->ipv4.next_proto_id = IP_PROTO_UDP;
799 d->ipv4.hdr_checksum = 0;
800 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
801 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
802
803 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
804
805 /* UDP */
806 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
807 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
808 d->udp.dgram_len = 0; /* not pre-computed */
809 d->udp.dgram_cksum = 0;
810
811 /* VXLAN */
812 d->vxlan.vx_flags = rte_htonl(0x08000000);
813 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
814
815 return 0;
816 } else {
817 struct encap_vxlan_ipv4_data *d = data;
818
819 /* Ethernet */
820 rte_ether_addr_copy(&p->vxlan.ether.da,
821 &d->ether.d_addr);
822 rte_ether_addr_copy(&p->vxlan.ether.sa,
823 &d->ether.s_addr);
824 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV4);
825
826 /* IPv4*/
827 d->ipv4.version_ihl = 0x45;
828 d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
829 d->ipv4.total_length = 0; /* not pre-computed */
830 d->ipv4.packet_id = 0;
831 d->ipv4.fragment_offset = 0;
832 d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
833 d->ipv4.next_proto_id = IP_PROTO_UDP;
834 d->ipv4.hdr_checksum = 0;
835 d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
836 d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
837
838 d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
839
840 /* UDP */
841 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
842 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
843 d->udp.dgram_len = 0; /* not pre-computed */
844 d->udp.dgram_cksum = 0;
845
846 /* VXLAN */
847 d->vxlan.vx_flags = rte_htonl(0x08000000);
848 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
849
850 return 0;
851 }
852 else
853 if (cfg->vxlan.vlan) {
854 struct encap_vxlan_ipv6_vlan_data *d = data;
855
856 /* Ethernet */
857 rte_ether_addr_copy(&p->vxlan.ether.da,
858 &d->ether.d_addr);
859 rte_ether_addr_copy(&p->vxlan.ether.sa,
860 &d->ether.s_addr);
861 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_VLAN);
862
863 /* VLAN */
864 d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
865 p->vxlan.vlan.dei,
866 p->vxlan.vlan.vid));
867 d->vlan.eth_proto = rte_htons(RTE_ETHER_TYPE_IPV6);
868
869 /* IPv6*/
870 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
871 (p->vxlan.ipv6.dscp << 22) |
872 p->vxlan.ipv6.flow_label);
873 d->ipv6.payload_len = 0; /* not pre-computed */
874 d->ipv6.proto = IP_PROTO_UDP;
875 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
876 memcpy(d->ipv6.src_addr,
877 p->vxlan.ipv6.sa,
878 sizeof(p->vxlan.ipv6.sa));
879 memcpy(d->ipv6.dst_addr,
880 p->vxlan.ipv6.da,
881 sizeof(p->vxlan.ipv6.da));
882
883 /* UDP */
884 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
885 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
886 d->udp.dgram_len = 0; /* not pre-computed */
887 d->udp.dgram_cksum = 0;
888
889 /* VXLAN */
890 d->vxlan.vx_flags = rte_htonl(0x08000000);
891 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
892
893 return 0;
894 } else {
895 struct encap_vxlan_ipv6_data *d = data;
896
897 /* Ethernet */
898 rte_ether_addr_copy(&p->vxlan.ether.da,
899 &d->ether.d_addr);
900 rte_ether_addr_copy(&p->vxlan.ether.sa,
901 &d->ether.s_addr);
902 d->ether.ether_type = rte_htons(RTE_ETHER_TYPE_IPV6);
903
904 /* IPv6*/
905 d->ipv6.vtc_flow = rte_htonl((6 << 28) |
906 (p->vxlan.ipv6.dscp << 22) |
907 p->vxlan.ipv6.flow_label);
908 d->ipv6.payload_len = 0; /* not pre-computed */
909 d->ipv6.proto = IP_PROTO_UDP;
910 d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
911 memcpy(d->ipv6.src_addr,
912 p->vxlan.ipv6.sa,
913 sizeof(p->vxlan.ipv6.sa));
914 memcpy(d->ipv6.dst_addr,
915 p->vxlan.ipv6.da,
916 sizeof(p->vxlan.ipv6.da));
917
918 /* UDP */
919 d->udp.src_port = rte_htons(p->vxlan.udp.sp);
920 d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
921 d->udp.dgram_len = 0; /* not pre-computed */
922 d->udp.dgram_cksum = 0;
923
924 /* VXLAN */
925 d->vxlan.vx_flags = rte_htonl(0x08000000);
926 d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
927
928 return 0;
929 }
930 }
931
932 static int
933 encap_apply(void *data,
934 struct rte_table_action_encap_params *p,
935 struct rte_table_action_encap_config *cfg,
936 struct rte_table_action_common_config *common_cfg)
937 {
938 int status;
939
940 /* Check input arguments */
941 status = encap_apply_check(p, cfg);
942 if (status)
943 return status;
944
945 switch (p->type) {
946 case RTE_TABLE_ACTION_ENCAP_ETHER:
947 return encap_ether_apply(data, p, common_cfg);
948
949 case RTE_TABLE_ACTION_ENCAP_VLAN:
950 return encap_vlan_apply(data, p, common_cfg);
951
952 case RTE_TABLE_ACTION_ENCAP_QINQ:
953 return encap_qinq_apply(data, p, common_cfg);
954
955 case RTE_TABLE_ACTION_ENCAP_MPLS:
956 return encap_mpls_apply(data, p);
957
958 case RTE_TABLE_ACTION_ENCAP_PPPOE:
959 return encap_pppoe_apply(data, p);
960
961 case RTE_TABLE_ACTION_ENCAP_VXLAN:
962 return encap_vxlan_apply(data, p, cfg);
963
964 case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
965 return encap_qinq_pppoe_apply(data, p);
966
967 default:
968 return -EINVAL;
969 }
970 }
971
972 static __rte_always_inline uint16_t
973 encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
974 uint16_t total_length)
975 {
976 int32_t cksum1;
977
978 cksum1 = cksum0;
979 cksum1 = ~cksum1 & 0xFFFF;
980
981 /* Add total length (one's complement logic) */
982 cksum1 += total_length;
983 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
984 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
985
986 return (uint16_t)(~cksum1);
987 }
988
989 static __rte_always_inline void *
990 encap(void *dst, const void *src, size_t n)
991 {
992 dst = ((uint8_t *) dst) - n;
993 return rte_memcpy(dst, src, n);
994 }
995
996 static __rte_always_inline void
997 pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
998 struct encap_vxlan_ipv4_data *vxlan_tbl,
999 struct rte_table_action_encap_config *cfg)
1000 {
1001 uint32_t ether_offset = cfg->vxlan.data_offset;
1002 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1003 struct encap_vxlan_ipv4_data *vxlan_pkt;
1004 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1005
1006 ether_length = (uint16_t)mbuf->pkt_len;
1007 ipv4_total_length = ether_length +
1008 (sizeof(struct rte_vxlan_hdr) +
1009 sizeof(struct rte_udp_hdr) +
1010 sizeof(struct rte_ipv4_hdr));
1011 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1012 rte_htons(ipv4_total_length));
1013 udp_length = ether_length +
1014 (sizeof(struct rte_vxlan_hdr) +
1015 sizeof(struct rte_udp_hdr));
1016
1017 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1018 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1019 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1020 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1021
1022 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1023 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1024 }
1025
1026 static __rte_always_inline void
1027 pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
1028 struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
1029 struct rte_table_action_encap_config *cfg)
1030 {
1031 uint32_t ether_offset = cfg->vxlan.data_offset;
1032 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1033 struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
1034 uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
1035
1036 ether_length = (uint16_t)mbuf->pkt_len;
1037 ipv4_total_length = ether_length +
1038 (sizeof(struct rte_vxlan_hdr) +
1039 sizeof(struct rte_udp_hdr) +
1040 sizeof(struct rte_ipv4_hdr));
1041 ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
1042 rte_htons(ipv4_total_length));
1043 udp_length = ether_length +
1044 (sizeof(struct rte_vxlan_hdr) +
1045 sizeof(struct rte_udp_hdr));
1046
1047 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1048 vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
1049 vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
1050 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1051
1052 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1053 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1054 }
1055
1056 static __rte_always_inline void
1057 pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
1058 struct encap_vxlan_ipv6_data *vxlan_tbl,
1059 struct rte_table_action_encap_config *cfg)
1060 {
1061 uint32_t ether_offset = cfg->vxlan.data_offset;
1062 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1063 struct encap_vxlan_ipv6_data *vxlan_pkt;
1064 uint16_t ether_length, ipv6_payload_length, udp_length;
1065
1066 ether_length = (uint16_t)mbuf->pkt_len;
1067 ipv6_payload_length = ether_length +
1068 (sizeof(struct rte_vxlan_hdr) +
1069 sizeof(struct rte_udp_hdr));
1070 udp_length = ether_length +
1071 (sizeof(struct rte_vxlan_hdr) +
1072 sizeof(struct rte_udp_hdr));
1073
1074 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1075 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1076 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1077
1078 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1079 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1080 }
1081
1082 static __rte_always_inline void
1083 pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
1084 struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
1085 struct rte_table_action_encap_config *cfg)
1086 {
1087 uint32_t ether_offset = cfg->vxlan.data_offset;
1088 void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
1089 struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
1090 uint16_t ether_length, ipv6_payload_length, udp_length;
1091
1092 ether_length = (uint16_t)mbuf->pkt_len;
1093 ipv6_payload_length = ether_length +
1094 (sizeof(struct rte_vxlan_hdr) +
1095 sizeof(struct rte_udp_hdr));
1096 udp_length = ether_length +
1097 (sizeof(struct rte_vxlan_hdr) +
1098 sizeof(struct rte_udp_hdr));
1099
1100 vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
1101 vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
1102 vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
1103
1104 mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
1105 mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
1106 }
1107
1108 static __rte_always_inline void
1109 pkt_work_encap(struct rte_mbuf *mbuf,
1110 void *data,
1111 struct rte_table_action_encap_config *cfg,
1112 void *ip,
1113 uint16_t total_length,
1114 uint32_t ip_offset)
1115 {
1116 switch (cfg->encap_mask) {
1117 case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
1118 encap(ip, data, sizeof(struct encap_ether_data));
1119 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1120 sizeof(struct encap_ether_data));
1121 mbuf->pkt_len = mbuf->data_len = total_length +
1122 sizeof(struct encap_ether_data);
1123 break;
1124
1125 case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
1126 encap(ip, data, sizeof(struct encap_vlan_data));
1127 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1128 sizeof(struct encap_vlan_data));
1129 mbuf->pkt_len = mbuf->data_len = total_length +
1130 sizeof(struct encap_vlan_data);
1131 break;
1132
1133 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
1134 encap(ip, data, sizeof(struct encap_qinq_data));
1135 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1136 sizeof(struct encap_qinq_data));
1137 mbuf->pkt_len = mbuf->data_len = total_length +
1138 sizeof(struct encap_qinq_data);
1139 break;
1140
1141 case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
1142 {
1143 struct encap_mpls_data *mpls = data;
1144 size_t size = sizeof(struct rte_ether_hdr) +
1145 mpls->mpls_count * 4;
1146
1147 encap(ip, data, size);
1148 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
1149 mbuf->pkt_len = mbuf->data_len = total_length + size;
1150 break;
1151 }
1152
1153 case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
1154 {
1155 struct encap_pppoe_data *pppoe =
1156 encap(ip, data, sizeof(struct encap_pppoe_data));
1157 pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1158 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1159 sizeof(struct encap_pppoe_data));
1160 mbuf->pkt_len = mbuf->data_len = total_length +
1161 sizeof(struct encap_pppoe_data);
1162 break;
1163 }
1164
1165 case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
1166 {
1167 struct encap_qinq_pppoe_data *qinq_pppoe =
1168 encap(ip, data, sizeof(struct encap_qinq_pppoe_data));
1169 qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
1170 mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
1171 sizeof(struct encap_qinq_pppoe_data));
1172 mbuf->pkt_len = mbuf->data_len = total_length +
1173 sizeof(struct encap_qinq_pppoe_data);
1174 break;
1175 }
1176
1177 case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
1178 {
1179 if (cfg->vxlan.ip_version)
1180 if (cfg->vxlan.vlan)
1181 pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
1182 else
1183 pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
1184 else
1185 if (cfg->vxlan.vlan)
1186 pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
1187 else
1188 pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
1189 }
1190
1191 default:
1192 break;
1193 }
1194 }
1195
1196 /**
1197 * RTE_TABLE_ACTION_NAT
1198 */
1199 static int
1200 nat_cfg_check(struct rte_table_action_nat_config *nat)
1201 {
1202 if ((nat->proto != 0x06) &&
1203 (nat->proto != 0x11))
1204 return -ENOTSUP;
1205
1206 return 0;
1207 }
1208
1209 struct nat_ipv4_data {
1210 uint32_t addr;
1211 uint16_t port;
1212 } __rte_packed;
1213
1214 struct nat_ipv6_data {
1215 uint8_t addr[16];
1216 uint16_t port;
1217 } __rte_packed;
1218
1219 static size_t
1220 nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
1221 struct rte_table_action_common_config *common)
1222 {
1223 int ip_version = common->ip_version;
1224
1225 return (ip_version) ?
1226 sizeof(struct nat_ipv4_data) :
1227 sizeof(struct nat_ipv6_data);
1228 }
1229
1230 static int
1231 nat_apply_check(struct rte_table_action_nat_params *p,
1232 struct rte_table_action_common_config *cfg)
1233 {
1234 if ((p->ip_version && (cfg->ip_version == 0)) ||
1235 ((p->ip_version == 0) && cfg->ip_version))
1236 return -EINVAL;
1237
1238 return 0;
1239 }
1240
1241 static int
1242 nat_apply(void *data,
1243 struct rte_table_action_nat_params *p,
1244 struct rte_table_action_common_config *cfg)
1245 {
1246 int status;
1247
1248 /* Check input arguments */
1249 status = nat_apply_check(p, cfg);
1250 if (status)
1251 return status;
1252
1253 /* Apply */
1254 if (p->ip_version) {
1255 struct nat_ipv4_data *d = data;
1256
1257 d->addr = rte_htonl(p->addr.ipv4);
1258 d->port = rte_htons(p->port);
1259 } else {
1260 struct nat_ipv6_data *d = data;
1261
1262 memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
1263 d->port = rte_htons(p->port);
1264 }
1265
1266 return 0;
1267 }
1268
1269 static __rte_always_inline uint16_t
1270 nat_ipv4_checksum_update(uint16_t cksum0,
1271 uint32_t ip0,
1272 uint32_t ip1)
1273 {
1274 int32_t cksum1;
1275
1276 cksum1 = cksum0;
1277 cksum1 = ~cksum1 & 0xFFFF;
1278
1279 /* Subtract ip0 (one's complement logic) */
1280 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
1281 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1282 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1283
1284 /* Add ip1 (one's complement logic) */
1285 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
1286 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1287 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1288
1289 return (uint16_t)(~cksum1);
1290 }
1291
1292 static __rte_always_inline uint16_t
1293 nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
1294 uint32_t ip0,
1295 uint32_t ip1,
1296 uint16_t port0,
1297 uint16_t port1)
1298 {
1299 int32_t cksum1;
1300
1301 cksum1 = cksum0;
1302 cksum1 = ~cksum1 & 0xFFFF;
1303
1304 /* Subtract ip0 and port 0 (one's complement logic) */
1305 cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
1306 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1307 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1308
1309 /* Add ip1 and port1 (one's complement logic) */
1310 cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
1311 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1312 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1313
1314 return (uint16_t)(~cksum1);
1315 }
1316
1317 static __rte_always_inline uint16_t
1318 nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
1319 uint16_t *ip0,
1320 uint16_t *ip1,
1321 uint16_t port0,
1322 uint16_t port1)
1323 {
1324 int32_t cksum1;
1325
1326 cksum1 = cksum0;
1327 cksum1 = ~cksum1 & 0xFFFF;
1328
1329 /* Subtract ip0 and port 0 (one's complement logic) */
1330 cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
1331 ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
1332 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1333 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1334
1335 /* Add ip1 and port1 (one's complement logic) */
1336 cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
1337 ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
1338 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1339 cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
1340
1341 return (uint16_t)(~cksum1);
1342 }
1343
1344 static __rte_always_inline void
1345 pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
1346 struct nat_ipv4_data *data,
1347 struct rte_table_action_nat_config *cfg)
1348 {
1349 if (cfg->source_nat) {
1350 if (cfg->proto == 0x6) {
1351 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1352 uint16_t ip_cksum, tcp_cksum;
1353
1354 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1355 ip->src_addr,
1356 data->addr);
1357
1358 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1359 ip->src_addr,
1360 data->addr,
1361 tcp->src_port,
1362 data->port);
1363
1364 ip->src_addr = data->addr;
1365 ip->hdr_checksum = ip_cksum;
1366 tcp->src_port = data->port;
1367 tcp->cksum = tcp_cksum;
1368 } else {
1369 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1370 uint16_t ip_cksum, udp_cksum;
1371
1372 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1373 ip->src_addr,
1374 data->addr);
1375
1376 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1377 ip->src_addr,
1378 data->addr,
1379 udp->src_port,
1380 data->port);
1381
1382 ip->src_addr = data->addr;
1383 ip->hdr_checksum = ip_cksum;
1384 udp->src_port = data->port;
1385 if (udp->dgram_cksum)
1386 udp->dgram_cksum = udp_cksum;
1387 }
1388 } else {
1389 if (cfg->proto == 0x6) {
1390 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1391 uint16_t ip_cksum, tcp_cksum;
1392
1393 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1394 ip->dst_addr,
1395 data->addr);
1396
1397 tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
1398 ip->dst_addr,
1399 data->addr,
1400 tcp->dst_port,
1401 data->port);
1402
1403 ip->dst_addr = data->addr;
1404 ip->hdr_checksum = ip_cksum;
1405 tcp->dst_port = data->port;
1406 tcp->cksum = tcp_cksum;
1407 } else {
1408 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1409 uint16_t ip_cksum, udp_cksum;
1410
1411 ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
1412 ip->dst_addr,
1413 data->addr);
1414
1415 udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
1416 ip->dst_addr,
1417 data->addr,
1418 udp->dst_port,
1419 data->port);
1420
1421 ip->dst_addr = data->addr;
1422 ip->hdr_checksum = ip_cksum;
1423 udp->dst_port = data->port;
1424 if (udp->dgram_cksum)
1425 udp->dgram_cksum = udp_cksum;
1426 }
1427 }
1428 }
1429
1430 static __rte_always_inline void
1431 pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
1432 struct nat_ipv6_data *data,
1433 struct rte_table_action_nat_config *cfg)
1434 {
1435 if (cfg->source_nat) {
1436 if (cfg->proto == 0x6) {
1437 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1438 uint16_t tcp_cksum;
1439
1440 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1441 (uint16_t *)ip->src_addr,
1442 (uint16_t *)data->addr,
1443 tcp->src_port,
1444 data->port);
1445
1446 rte_memcpy(ip->src_addr, data->addr, 16);
1447 tcp->src_port = data->port;
1448 tcp->cksum = tcp_cksum;
1449 } else {
1450 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1451 uint16_t udp_cksum;
1452
1453 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1454 (uint16_t *)ip->src_addr,
1455 (uint16_t *)data->addr,
1456 udp->src_port,
1457 data->port);
1458
1459 rte_memcpy(ip->src_addr, data->addr, 16);
1460 udp->src_port = data->port;
1461 udp->dgram_cksum = udp_cksum;
1462 }
1463 } else {
1464 if (cfg->proto == 0x6) {
1465 struct rte_tcp_hdr *tcp = (struct rte_tcp_hdr *) &ip[1];
1466 uint16_t tcp_cksum;
1467
1468 tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
1469 (uint16_t *)ip->dst_addr,
1470 (uint16_t *)data->addr,
1471 tcp->dst_port,
1472 data->port);
1473
1474 rte_memcpy(ip->dst_addr, data->addr, 16);
1475 tcp->dst_port = data->port;
1476 tcp->cksum = tcp_cksum;
1477 } else {
1478 struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
1479 uint16_t udp_cksum;
1480
1481 udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
1482 (uint16_t *)ip->dst_addr,
1483 (uint16_t *)data->addr,
1484 udp->dst_port,
1485 data->port);
1486
1487 rte_memcpy(ip->dst_addr, data->addr, 16);
1488 udp->dst_port = data->port;
1489 udp->dgram_cksum = udp_cksum;
1490 }
1491 }
1492 }
1493
1494 /**
1495 * RTE_TABLE_ACTION_TTL
1496 */
1497 static int
1498 ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
1499 {
1500 if (ttl->drop == 0)
1501 return -ENOTSUP;
1502
1503 return 0;
1504 }
1505
1506 struct ttl_data {
1507 uint32_t n_packets;
1508 } __rte_packed;
1509
1510 #define TTL_INIT(data, decrement) \
1511 ((data)->n_packets = (decrement) ? 1 : 0)
1512
1513 #define TTL_DEC_GET(data) \
1514 ((uint8_t)((data)->n_packets & 1))
1515
1516 #define TTL_STATS_RESET(data) \
1517 ((data)->n_packets = ((data)->n_packets & 1))
1518
1519 #define TTL_STATS_READ(data) \
1520 ((data)->n_packets >> 1)
1521
1522 #define TTL_STATS_ADD(data, value) \
1523 ((data)->n_packets = \
1524 (((((data)->n_packets >> 1) + (value)) << 1) | \
1525 ((data)->n_packets & 1)))
1526
1527 static int
1528 ttl_apply(void *data,
1529 struct rte_table_action_ttl_params *p)
1530 {
1531 struct ttl_data *d = data;
1532
1533 TTL_INIT(d, p->decrement);
1534
1535 return 0;
1536 }
1537
1538 static __rte_always_inline uint64_t
1539 pkt_ipv4_work_ttl(struct rte_ipv4_hdr *ip,
1540 struct ttl_data *data)
1541 {
1542 uint32_t drop;
1543 uint16_t cksum = ip->hdr_checksum;
1544 uint8_t ttl = ip->time_to_live;
1545 uint8_t ttl_diff = TTL_DEC_GET(data);
1546
1547 cksum += ttl_diff;
1548 ttl -= ttl_diff;
1549
1550 ip->hdr_checksum = cksum;
1551 ip->time_to_live = ttl;
1552
1553 drop = (ttl == 0) ? 1 : 0;
1554 TTL_STATS_ADD(data, drop);
1555
1556 return drop;
1557 }
1558
1559 static __rte_always_inline uint64_t
1560 pkt_ipv6_work_ttl(struct rte_ipv6_hdr *ip,
1561 struct ttl_data *data)
1562 {
1563 uint32_t drop;
1564 uint8_t ttl = ip->hop_limits;
1565 uint8_t ttl_diff = TTL_DEC_GET(data);
1566
1567 ttl -= ttl_diff;
1568
1569 ip->hop_limits = ttl;
1570
1571 drop = (ttl == 0) ? 1 : 0;
1572 TTL_STATS_ADD(data, drop);
1573
1574 return drop;
1575 }
1576
1577 /**
1578 * RTE_TABLE_ACTION_STATS
1579 */
1580 static int
1581 stats_cfg_check(struct rte_table_action_stats_config *stats)
1582 {
1583 if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
1584 return -EINVAL;
1585
1586 return 0;
1587 }
1588
1589 struct stats_data {
1590 uint64_t n_packets;
1591 uint64_t n_bytes;
1592 } __rte_packed;
1593
1594 static int
1595 stats_apply(struct stats_data *data,
1596 struct rte_table_action_stats_params *p)
1597 {
1598 data->n_packets = p->n_packets;
1599 data->n_bytes = p->n_bytes;
1600
1601 return 0;
1602 }
1603
1604 static __rte_always_inline void
1605 pkt_work_stats(struct stats_data *data,
1606 uint16_t total_length)
1607 {
1608 data->n_packets++;
1609 data->n_bytes += total_length;
1610 }
1611
1612 /**
1613 * RTE_TABLE_ACTION_TIME
1614 */
1615 struct time_data {
1616 uint64_t time;
1617 } __rte_packed;
1618
1619 static int
1620 time_apply(struct time_data *data,
1621 struct rte_table_action_time_params *p)
1622 {
1623 data->time = p->time;
1624 return 0;
1625 }
1626
1627 static __rte_always_inline void
1628 pkt_work_time(struct time_data *data,
1629 uint64_t time)
1630 {
1631 data->time = time;
1632 }
1633
1634
1635 /**
1636 * RTE_TABLE_ACTION_CRYPTO
1637 */
1638
1639 #define CRYPTO_OP_MASK_CIPHER 0x1
1640 #define CRYPTO_OP_MASK_AUTH 0x2
1641 #define CRYPTO_OP_MASK_AEAD 0x4
1642
1643 struct crypto_op_sym_iv_aad {
1644 struct rte_crypto_op op;
1645 struct rte_crypto_sym_op sym_op;
1646 union {
1647 struct {
1648 uint8_t cipher_iv[
1649 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1650 uint8_t auth_iv[
1651 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1652 } cipher_auth;
1653
1654 struct {
1655 uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
1656 uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
1657 } aead_iv_aad;
1658
1659 } iv_aad;
1660 };
1661
1662 struct sym_crypto_data {
1663
1664 union {
1665 struct {
1666
1667 /** Length of cipher iv. */
1668 uint16_t cipher_iv_len;
1669
1670 /** Offset from start of IP header to the cipher iv. */
1671 uint16_t cipher_iv_data_offset;
1672
1673 /** Length of cipher iv to be updated in the mbuf. */
1674 uint16_t cipher_iv_update_len;
1675
1676 /** Offset from start of IP header to the auth iv. */
1677 uint16_t auth_iv_data_offset;
1678
1679 /** Length of auth iv in the mbuf. */
1680 uint16_t auth_iv_len;
1681
1682 /** Length of auth iv to be updated in the mbuf. */
1683 uint16_t auth_iv_update_len;
1684
1685 } cipher_auth;
1686 struct {
1687
1688 /** Length of iv. */
1689 uint16_t iv_len;
1690
1691 /** Offset from start of IP header to the aead iv. */
1692 uint16_t iv_data_offset;
1693
1694 /** Length of iv to be updated in the mbuf. */
1695 uint16_t iv_update_len;
1696
1697 /** Length of aad */
1698 uint16_t aad_len;
1699
1700 /** Offset from start of IP header to the aad. */
1701 uint16_t aad_data_offset;
1702
1703 /** Length of aad to updated in the mbuf. */
1704 uint16_t aad_update_len;
1705
1706 } aead;
1707 };
1708
1709 /** Offset from start of IP header to the data. */
1710 uint16_t data_offset;
1711
1712 /** Digest length. */
1713 uint16_t digest_len;
1714
1715 /** block size */
1716 uint16_t block_size;
1717
1718 /** Mask of crypto operation */
1719 uint16_t op_mask;
1720
1721 /** Session pointer. */
1722 struct rte_cryptodev_sym_session *session;
1723
1724 /** Direction of crypto, encrypt or decrypt */
1725 uint16_t direction;
1726
1727 /** Private data size to store cipher iv / aad. */
1728 uint8_t iv_aad_data[32];
1729
1730 } __rte_packed;
1731
1732 static int
1733 sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
1734 {
1735 if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
1736 return -EINVAL;
1737 if (cfg->mp_create == NULL || cfg->mp_init == NULL)
1738 return -EINVAL;
1739
1740 return 0;
1741 }
1742
1743 static int
1744 get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
1745 {
1746 struct rte_cryptodev_info dev_info;
1747 const struct rte_cryptodev_capabilities *cap;
1748 uint32_t i;
1749
1750 rte_cryptodev_info_get(cdev_id, &dev_info);
1751
1752 for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
1753 i++) {
1754 cap = &dev_info.capabilities[i];
1755
1756 if (cap->sym.xform_type != xform->type)
1757 continue;
1758
1759 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
1760 (cap->sym.cipher.algo == xform->cipher.algo))
1761 return cap->sym.cipher.block_size;
1762
1763 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
1764 (cap->sym.aead.algo == xform->aead.algo))
1765 return cap->sym.aead.block_size;
1766
1767 if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
1768 break;
1769 }
1770
1771 return -1;
1772 }
1773
1774 static int
1775 sym_crypto_apply(struct sym_crypto_data *data,
1776 struct rte_table_action_sym_crypto_config *cfg,
1777 struct rte_table_action_sym_crypto_params *p)
1778 {
1779 const struct rte_crypto_cipher_xform *cipher_xform = NULL;
1780 const struct rte_crypto_auth_xform *auth_xform = NULL;
1781 const struct rte_crypto_aead_xform *aead_xform = NULL;
1782 struct rte_crypto_sym_xform *xform = p->xform;
1783 struct rte_cryptodev_sym_session *session;
1784 int ret;
1785
1786 memset(data, 0, sizeof(*data));
1787
1788 while (xform) {
1789 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1790 cipher_xform = &xform->cipher;
1791
1792 if (cipher_xform->iv.length >
1793 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1794 return -ENOMEM;
1795 if (cipher_xform->iv.offset !=
1796 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1797 return -EINVAL;
1798
1799 ret = get_block_size(xform, cfg->cryptodev_id);
1800 if (ret < 0)
1801 return -1;
1802 data->block_size = (uint16_t)ret;
1803 data->op_mask |= CRYPTO_OP_MASK_CIPHER;
1804
1805 data->cipher_auth.cipher_iv_len =
1806 cipher_xform->iv.length;
1807 data->cipher_auth.cipher_iv_data_offset = (uint16_t)
1808 p->cipher_auth.cipher_iv_update.offset;
1809 data->cipher_auth.cipher_iv_update_len = (uint16_t)
1810 p->cipher_auth.cipher_iv_update.length;
1811
1812 rte_memcpy(data->iv_aad_data,
1813 p->cipher_auth.cipher_iv.val,
1814 p->cipher_auth.cipher_iv.length);
1815
1816 data->direction = cipher_xform->op;
1817
1818 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1819 auth_xform = &xform->auth;
1820 if (auth_xform->iv.length >
1821 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
1822 return -ENOMEM;
1823 data->op_mask |= CRYPTO_OP_MASK_AUTH;
1824
1825 data->cipher_auth.auth_iv_len = auth_xform->iv.length;
1826 data->cipher_auth.auth_iv_data_offset = (uint16_t)
1827 p->cipher_auth.auth_iv_update.offset;
1828 data->cipher_auth.auth_iv_update_len = (uint16_t)
1829 p->cipher_auth.auth_iv_update.length;
1830 data->digest_len = auth_xform->digest_length;
1831
1832 data->direction = (auth_xform->op ==
1833 RTE_CRYPTO_AUTH_OP_GENERATE) ?
1834 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1835 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1836
1837 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
1838 aead_xform = &xform->aead;
1839
1840 if ((aead_xform->iv.length >
1841 RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
1842 aead_xform->aad_length >
1843 RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
1844 return -EINVAL;
1845 if (aead_xform->iv.offset !=
1846 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
1847 return -EINVAL;
1848
1849 ret = get_block_size(xform, cfg->cryptodev_id);
1850 if (ret < 0)
1851 return -1;
1852 data->block_size = (uint16_t)ret;
1853 data->op_mask |= CRYPTO_OP_MASK_AEAD;
1854
1855 data->digest_len = aead_xform->digest_length;
1856 data->aead.iv_len = aead_xform->iv.length;
1857 data->aead.aad_len = aead_xform->aad_length;
1858
1859 data->aead.iv_data_offset = (uint16_t)
1860 p->aead.iv_update.offset;
1861 data->aead.iv_update_len = (uint16_t)
1862 p->aead.iv_update.length;
1863 data->aead.aad_data_offset = (uint16_t)
1864 p->aead.aad_update.offset;
1865 data->aead.aad_update_len = (uint16_t)
1866 p->aead.aad_update.length;
1867
1868 rte_memcpy(data->iv_aad_data,
1869 p->aead.iv.val,
1870 p->aead.iv.length);
1871
1872 rte_memcpy(data->iv_aad_data + p->aead.iv.length,
1873 p->aead.aad.val,
1874 p->aead.aad.length);
1875
1876 data->direction = (aead_xform->op ==
1877 RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1878 RTE_CRYPTO_CIPHER_OP_ENCRYPT :
1879 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1880 } else
1881 return -EINVAL;
1882
1883 xform = xform->next;
1884 }
1885
1886 if (auth_xform && auth_xform->iv.length) {
1887 if (cipher_xform) {
1888 if (auth_xform->iv.offset !=
1889 RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
1890 cipher_xform->iv.length)
1891 return -EINVAL;
1892
1893 rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
1894 p->cipher_auth.auth_iv.val,
1895 p->cipher_auth.auth_iv.length);
1896 } else {
1897 rte_memcpy(data->iv_aad_data,
1898 p->cipher_auth.auth_iv.val,
1899 p->cipher_auth.auth_iv.length);
1900 }
1901 }
1902
1903 session = rte_cryptodev_sym_session_create(cfg->mp_create);
1904 if (!session)
1905 return -ENOMEM;
1906
1907 ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
1908 p->xform, cfg->mp_init);
1909 if (ret < 0) {
1910 rte_cryptodev_sym_session_free(session);
1911 return ret;
1912 }
1913
1914 data->data_offset = (uint16_t)p->data_offset;
1915 data->session = session;
1916
1917 return 0;
1918 }
1919
1920 static __rte_always_inline uint64_t
1921 pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
1922 struct rte_table_action_sym_crypto_config *cfg,
1923 uint16_t ip_offset)
1924 {
1925 struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
1926 RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
1927 struct rte_crypto_op *op = &crypto_op->op;
1928 struct rte_crypto_sym_op *sym = op->sym;
1929 uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
1930 uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
1931
1932 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1933 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1934 op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
1935 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1936 sym->m_src = mbuf;
1937 sym->m_dst = NULL;
1938 sym->session = data->session;
1939
1940 /** pad the packet */
1941 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1942 uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
1943 data->block_size) - payload_len;
1944
1945 if (unlikely(rte_pktmbuf_append(mbuf, append_len +
1946 data->digest_len) == NULL))
1947 return 1;
1948
1949 payload_len += append_len;
1950 } else
1951 payload_len -= data->digest_len;
1952
1953 if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
1954 /** prepare cipher op */
1955 uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
1956
1957 sym->cipher.data.length = payload_len;
1958 sym->cipher.data.offset = data->data_offset - pkt_offset;
1959
1960 if (data->cipher_auth.cipher_iv_update_len) {
1961 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1962 data->cipher_auth.cipher_iv_data_offset
1963 + ip_offset);
1964
1965 /** For encryption, update the pkt iv field, otherwise
1966 * update the iv_aad_field
1967 **/
1968 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
1969 rte_memcpy(pkt_iv, data->iv_aad_data,
1970 data->cipher_auth.cipher_iv_update_len);
1971 else
1972 rte_memcpy(data->iv_aad_data, pkt_iv,
1973 data->cipher_auth.cipher_iv_update_len);
1974 }
1975
1976 /** write iv */
1977 rte_memcpy(iv, data->iv_aad_data,
1978 data->cipher_auth.cipher_iv_len);
1979 }
1980
1981 if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
1982 /** authentication always start from IP header. */
1983 sym->auth.data.offset = ip_offset - pkt_offset;
1984 sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
1985 data->digest_len;
1986 sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
1987 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
1988 data->digest_len);
1989 sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
1990 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
1991
1992 if (data->cipher_auth.auth_iv_update_len) {
1993 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
1994 data->cipher_auth.auth_iv_data_offset
1995 + ip_offset);
1996 uint8_t *data_iv = data->iv_aad_data +
1997 data->cipher_auth.cipher_iv_len;
1998
1999 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2000 rte_memcpy(pkt_iv, data_iv,
2001 data->cipher_auth.auth_iv_update_len);
2002 else
2003 rte_memcpy(data_iv, pkt_iv,
2004 data->cipher_auth.auth_iv_update_len);
2005 }
2006
2007 if (data->cipher_auth.auth_iv_len) {
2008 /** prepare cipher op */
2009 uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
2010
2011 rte_memcpy(iv, data->iv_aad_data +
2012 data->cipher_auth.cipher_iv_len,
2013 data->cipher_auth.auth_iv_len);
2014 }
2015 }
2016
2017 if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
2018 uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
2019 uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
2020
2021 sym->aead.aad.data = aad;
2022 sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2023 aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
2024 sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
2025 uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
2026 data->digest_len);
2027 sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
2028 rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
2029 sym->aead.data.offset = data->data_offset - pkt_offset;
2030 sym->aead.data.length = payload_len;
2031
2032 if (data->aead.iv_update_len) {
2033 uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2034 data->aead.iv_data_offset + ip_offset);
2035 uint8_t *data_iv = data->iv_aad_data;
2036
2037 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2038 rte_memcpy(pkt_iv, data_iv,
2039 data->aead.iv_update_len);
2040 else
2041 rte_memcpy(data_iv, pkt_iv,
2042 data->aead.iv_update_len);
2043 }
2044
2045 rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
2046
2047 if (data->aead.aad_update_len) {
2048 uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
2049 data->aead.aad_data_offset + ip_offset);
2050 uint8_t *data_aad = data->iv_aad_data +
2051 data->aead.iv_len;
2052
2053 if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
2054 rte_memcpy(pkt_aad, data_aad,
2055 data->aead.iv_update_len);
2056 else
2057 rte_memcpy(data_aad, pkt_aad,
2058 data->aead.iv_update_len);
2059 }
2060
2061 rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
2062 data->aead.aad_len);
2063 }
2064
2065 return 0;
2066 }
2067
2068 /**
2069 * RTE_TABLE_ACTION_TAG
2070 */
2071 struct tag_data {
2072 uint32_t tag;
2073 } __rte_packed;
2074
2075 static int
2076 tag_apply(struct tag_data *data,
2077 struct rte_table_action_tag_params *p)
2078 {
2079 data->tag = p->tag;
2080 return 0;
2081 }
2082
2083 static __rte_always_inline void
2084 pkt_work_tag(struct rte_mbuf *mbuf,
2085 struct tag_data *data)
2086 {
2087 mbuf->hash.fdir.hi = data->tag;
2088 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2089 }
2090
2091 static __rte_always_inline void
2092 pkt4_work_tag(struct rte_mbuf *mbuf0,
2093 struct rte_mbuf *mbuf1,
2094 struct rte_mbuf *mbuf2,
2095 struct rte_mbuf *mbuf3,
2096 struct tag_data *data0,
2097 struct tag_data *data1,
2098 struct tag_data *data2,
2099 struct tag_data *data3)
2100 {
2101 mbuf0->hash.fdir.hi = data0->tag;
2102 mbuf1->hash.fdir.hi = data1->tag;
2103 mbuf2->hash.fdir.hi = data2->tag;
2104 mbuf3->hash.fdir.hi = data3->tag;
2105
2106 mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2107 mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2108 mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2109 mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
2110 }
2111
2112 /**
2113 * RTE_TABLE_ACTION_DECAP
2114 */
2115 struct decap_data {
2116 uint16_t n;
2117 } __rte_packed;
2118
2119 static int
2120 decap_apply(struct decap_data *data,
2121 struct rte_table_action_decap_params *p)
2122 {
2123 data->n = p->n;
2124 return 0;
2125 }
2126
2127 static __rte_always_inline void
2128 pkt_work_decap(struct rte_mbuf *mbuf,
2129 struct decap_data *data)
2130 {
2131 uint16_t data_off = mbuf->data_off;
2132 uint16_t data_len = mbuf->data_len;
2133 uint32_t pkt_len = mbuf->pkt_len;
2134 uint16_t n = data->n;
2135
2136 mbuf->data_off = data_off + n;
2137 mbuf->data_len = data_len - n;
2138 mbuf->pkt_len = pkt_len - n;
2139 }
2140
2141 static __rte_always_inline void
2142 pkt4_work_decap(struct rte_mbuf *mbuf0,
2143 struct rte_mbuf *mbuf1,
2144 struct rte_mbuf *mbuf2,
2145 struct rte_mbuf *mbuf3,
2146 struct decap_data *data0,
2147 struct decap_data *data1,
2148 struct decap_data *data2,
2149 struct decap_data *data3)
2150 {
2151 uint16_t data_off0 = mbuf0->data_off;
2152 uint16_t data_len0 = mbuf0->data_len;
2153 uint32_t pkt_len0 = mbuf0->pkt_len;
2154
2155 uint16_t data_off1 = mbuf1->data_off;
2156 uint16_t data_len1 = mbuf1->data_len;
2157 uint32_t pkt_len1 = mbuf1->pkt_len;
2158
2159 uint16_t data_off2 = mbuf2->data_off;
2160 uint16_t data_len2 = mbuf2->data_len;
2161 uint32_t pkt_len2 = mbuf2->pkt_len;
2162
2163 uint16_t data_off3 = mbuf3->data_off;
2164 uint16_t data_len3 = mbuf3->data_len;
2165 uint32_t pkt_len3 = mbuf3->pkt_len;
2166
2167 uint16_t n0 = data0->n;
2168 uint16_t n1 = data1->n;
2169 uint16_t n2 = data2->n;
2170 uint16_t n3 = data3->n;
2171
2172 mbuf0->data_off = data_off0 + n0;
2173 mbuf0->data_len = data_len0 - n0;
2174 mbuf0->pkt_len = pkt_len0 - n0;
2175
2176 mbuf1->data_off = data_off1 + n1;
2177 mbuf1->data_len = data_len1 - n1;
2178 mbuf1->pkt_len = pkt_len1 - n1;
2179
2180 mbuf2->data_off = data_off2 + n2;
2181 mbuf2->data_len = data_len2 - n2;
2182 mbuf2->pkt_len = pkt_len2 - n2;
2183
2184 mbuf3->data_off = data_off3 + n3;
2185 mbuf3->data_len = data_len3 - n3;
2186 mbuf3->pkt_len = pkt_len3 - n3;
2187 }
2188
2189 /**
2190 * Action profile
2191 */
2192 static int
2193 action_valid(enum rte_table_action_type action)
2194 {
2195 switch (action) {
2196 case RTE_TABLE_ACTION_FWD:
2197 case RTE_TABLE_ACTION_LB:
2198 case RTE_TABLE_ACTION_MTR:
2199 case RTE_TABLE_ACTION_TM:
2200 case RTE_TABLE_ACTION_ENCAP:
2201 case RTE_TABLE_ACTION_NAT:
2202 case RTE_TABLE_ACTION_TTL:
2203 case RTE_TABLE_ACTION_STATS:
2204 case RTE_TABLE_ACTION_TIME:
2205 case RTE_TABLE_ACTION_SYM_CRYPTO:
2206 case RTE_TABLE_ACTION_TAG:
2207 case RTE_TABLE_ACTION_DECAP:
2208 return 1;
2209 default:
2210 return 0;
2211 }
2212 }
2213
2214
2215 #define RTE_TABLE_ACTION_MAX 64
2216
2217 struct ap_config {
2218 uint64_t action_mask;
2219 struct rte_table_action_common_config common;
2220 struct rte_table_action_lb_config lb;
2221 struct rte_table_action_mtr_config mtr;
2222 struct rte_table_action_tm_config tm;
2223 struct rte_table_action_encap_config encap;
2224 struct rte_table_action_nat_config nat;
2225 struct rte_table_action_ttl_config ttl;
2226 struct rte_table_action_stats_config stats;
2227 struct rte_table_action_sym_crypto_config sym_crypto;
2228 };
2229
2230 static size_t
2231 action_cfg_size(enum rte_table_action_type action)
2232 {
2233 switch (action) {
2234 case RTE_TABLE_ACTION_LB:
2235 return sizeof(struct rte_table_action_lb_config);
2236 case RTE_TABLE_ACTION_MTR:
2237 return sizeof(struct rte_table_action_mtr_config);
2238 case RTE_TABLE_ACTION_TM:
2239 return sizeof(struct rte_table_action_tm_config);
2240 case RTE_TABLE_ACTION_ENCAP:
2241 return sizeof(struct rte_table_action_encap_config);
2242 case RTE_TABLE_ACTION_NAT:
2243 return sizeof(struct rte_table_action_nat_config);
2244 case RTE_TABLE_ACTION_TTL:
2245 return sizeof(struct rte_table_action_ttl_config);
2246 case RTE_TABLE_ACTION_STATS:
2247 return sizeof(struct rte_table_action_stats_config);
2248 case RTE_TABLE_ACTION_SYM_CRYPTO:
2249 return sizeof(struct rte_table_action_sym_crypto_config);
2250 default:
2251 return 0;
2252 }
2253 }
2254
2255 static void*
2256 action_cfg_get(struct ap_config *ap_config,
2257 enum rte_table_action_type type)
2258 {
2259 switch (type) {
2260 case RTE_TABLE_ACTION_LB:
2261 return &ap_config->lb;
2262
2263 case RTE_TABLE_ACTION_MTR:
2264 return &ap_config->mtr;
2265
2266 case RTE_TABLE_ACTION_TM:
2267 return &ap_config->tm;
2268
2269 case RTE_TABLE_ACTION_ENCAP:
2270 return &ap_config->encap;
2271
2272 case RTE_TABLE_ACTION_NAT:
2273 return &ap_config->nat;
2274
2275 case RTE_TABLE_ACTION_TTL:
2276 return &ap_config->ttl;
2277
2278 case RTE_TABLE_ACTION_STATS:
2279 return &ap_config->stats;
2280
2281 case RTE_TABLE_ACTION_SYM_CRYPTO:
2282 return &ap_config->sym_crypto;
2283 default:
2284 return NULL;
2285 }
2286 }
2287
2288 static void
2289 action_cfg_set(struct ap_config *ap_config,
2290 enum rte_table_action_type type,
2291 void *action_cfg)
2292 {
2293 void *dst = action_cfg_get(ap_config, type);
2294
2295 if (dst)
2296 memcpy(dst, action_cfg, action_cfg_size(type));
2297
2298 ap_config->action_mask |= 1LLU << type;
2299 }
2300
2301 struct ap_data {
2302 size_t offset[RTE_TABLE_ACTION_MAX];
2303 size_t total_size;
2304 };
2305
2306 static size_t
2307 action_data_size(enum rte_table_action_type action,
2308 struct ap_config *ap_config)
2309 {
2310 switch (action) {
2311 case RTE_TABLE_ACTION_FWD:
2312 return sizeof(struct fwd_data);
2313
2314 case RTE_TABLE_ACTION_LB:
2315 return sizeof(struct lb_data);
2316
2317 case RTE_TABLE_ACTION_MTR:
2318 return mtr_data_size(&ap_config->mtr);
2319
2320 case RTE_TABLE_ACTION_TM:
2321 return sizeof(struct tm_data);
2322
2323 case RTE_TABLE_ACTION_ENCAP:
2324 return encap_data_size(&ap_config->encap);
2325
2326 case RTE_TABLE_ACTION_NAT:
2327 return nat_data_size(&ap_config->nat,
2328 &ap_config->common);
2329
2330 case RTE_TABLE_ACTION_TTL:
2331 return sizeof(struct ttl_data);
2332
2333 case RTE_TABLE_ACTION_STATS:
2334 return sizeof(struct stats_data);
2335
2336 case RTE_TABLE_ACTION_TIME:
2337 return sizeof(struct time_data);
2338
2339 case RTE_TABLE_ACTION_SYM_CRYPTO:
2340 return (sizeof(struct sym_crypto_data));
2341
2342 case RTE_TABLE_ACTION_TAG:
2343 return sizeof(struct tag_data);
2344
2345 case RTE_TABLE_ACTION_DECAP:
2346 return sizeof(struct decap_data);
2347
2348 default:
2349 return 0;
2350 }
2351 }
2352
2353
2354 static void
2355 action_data_offset_set(struct ap_data *ap_data,
2356 struct ap_config *ap_config)
2357 {
2358 uint64_t action_mask = ap_config->action_mask;
2359 size_t offset;
2360 uint32_t action;
2361
2362 memset(ap_data->offset, 0, sizeof(ap_data->offset));
2363
2364 offset = 0;
2365 for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
2366 if (action_mask & (1LLU << action)) {
2367 ap_data->offset[action] = offset;
2368 offset += action_data_size((enum rte_table_action_type)action,
2369 ap_config);
2370 }
2371
2372 ap_data->total_size = offset;
2373 }
2374
2375 struct rte_table_action_profile {
2376 struct ap_config cfg;
2377 struct ap_data data;
2378 int frozen;
2379 };
2380
2381 struct rte_table_action_profile *
2382 rte_table_action_profile_create(struct rte_table_action_common_config *common)
2383 {
2384 struct rte_table_action_profile *ap;
2385
2386 /* Check input arguments */
2387 if (common == NULL)
2388 return NULL;
2389
2390 /* Memory allocation */
2391 ap = calloc(1, sizeof(struct rte_table_action_profile));
2392 if (ap == NULL)
2393 return NULL;
2394
2395 /* Initialization */
2396 memcpy(&ap->cfg.common, common, sizeof(*common));
2397
2398 return ap;
2399 }
2400
2401
2402 int
2403 rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
2404 enum rte_table_action_type type,
2405 void *action_config)
2406 {
2407 int status;
2408
2409 /* Check input arguments */
2410 if ((profile == NULL) ||
2411 profile->frozen ||
2412 (action_valid(type) == 0) ||
2413 (profile->cfg.action_mask & (1LLU << type)) ||
2414 ((action_cfg_size(type) == 0) && action_config) ||
2415 (action_cfg_size(type) && (action_config == NULL)))
2416 return -EINVAL;
2417
2418 switch (type) {
2419 case RTE_TABLE_ACTION_LB:
2420 status = lb_cfg_check(action_config);
2421 break;
2422
2423 case RTE_TABLE_ACTION_MTR:
2424 status = mtr_cfg_check(action_config);
2425 break;
2426
2427 case RTE_TABLE_ACTION_TM:
2428 status = tm_cfg_check(action_config);
2429 break;
2430
2431 case RTE_TABLE_ACTION_ENCAP:
2432 status = encap_cfg_check(action_config);
2433 break;
2434
2435 case RTE_TABLE_ACTION_NAT:
2436 status = nat_cfg_check(action_config);
2437 break;
2438
2439 case RTE_TABLE_ACTION_TTL:
2440 status = ttl_cfg_check(action_config);
2441 break;
2442
2443 case RTE_TABLE_ACTION_STATS:
2444 status = stats_cfg_check(action_config);
2445 break;
2446
2447 case RTE_TABLE_ACTION_SYM_CRYPTO:
2448 status = sym_crypto_cfg_check(action_config);
2449 break;
2450
2451 default:
2452 status = 0;
2453 break;
2454 }
2455
2456 if (status)
2457 return status;
2458
2459 /* Action enable */
2460 action_cfg_set(&profile->cfg, type, action_config);
2461
2462 return 0;
2463 }
2464
2465 int
2466 rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
2467 {
2468 if (profile->frozen)
2469 return -EBUSY;
2470
2471 profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
2472 action_data_offset_set(&profile->data, &profile->cfg);
2473 profile->frozen = 1;
2474
2475 return 0;
2476 }
2477
2478 int
2479 rte_table_action_profile_free(struct rte_table_action_profile *profile)
2480 {
2481 if (profile == NULL)
2482 return 0;
2483
2484 free(profile);
2485 return 0;
2486 }
2487
2488 /**
2489 * Action
2490 */
2491 #define METER_PROFILES_MAX 32
2492
2493 struct rte_table_action {
2494 struct ap_config cfg;
2495 struct ap_data data;
2496 struct dscp_table_data dscp_table;
2497 struct meter_profile_data mp[METER_PROFILES_MAX];
2498 };
2499
2500 struct rte_table_action *
2501 rte_table_action_create(struct rte_table_action_profile *profile,
2502 uint32_t socket_id)
2503 {
2504 struct rte_table_action *action;
2505
2506 /* Check input arguments */
2507 if ((profile == NULL) ||
2508 (profile->frozen == 0))
2509 return NULL;
2510
2511 /* Memory allocation */
2512 action = rte_zmalloc_socket(NULL,
2513 sizeof(struct rte_table_action),
2514 RTE_CACHE_LINE_SIZE,
2515 socket_id);
2516 if (action == NULL)
2517 return NULL;
2518
2519 /* Initialization */
2520 memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
2521 memcpy(&action->data, &profile->data, sizeof(profile->data));
2522
2523 return action;
2524 }
2525
2526 static __rte_always_inline void *
2527 action_data_get(void *data,
2528 struct rte_table_action *action,
2529 enum rte_table_action_type type)
2530 {
2531 size_t offset = action->data.offset[type];
2532 uint8_t *data_bytes = data;
2533
2534 return &data_bytes[offset];
2535 }
2536
2537 int
2538 rte_table_action_apply(struct rte_table_action *action,
2539 void *data,
2540 enum rte_table_action_type type,
2541 void *action_params)
2542 {
2543 void *action_data;
2544
2545 /* Check input arguments */
2546 if ((action == NULL) ||
2547 (data == NULL) ||
2548 (action_valid(type) == 0) ||
2549 ((action->cfg.action_mask & (1LLU << type)) == 0) ||
2550 (action_params == NULL))
2551 return -EINVAL;
2552
2553 /* Data update */
2554 action_data = action_data_get(data, action, type);
2555
2556 switch (type) {
2557 case RTE_TABLE_ACTION_FWD:
2558 return fwd_apply(action_data,
2559 action_params);
2560
2561 case RTE_TABLE_ACTION_LB:
2562 return lb_apply(action_data,
2563 action_params);
2564
2565 case RTE_TABLE_ACTION_MTR:
2566 return mtr_apply(action_data,
2567 action_params,
2568 &action->cfg.mtr,
2569 action->mp,
2570 RTE_DIM(action->mp));
2571
2572 case RTE_TABLE_ACTION_TM:
2573 return tm_apply(action_data,
2574 action_params,
2575 &action->cfg.tm);
2576
2577 case RTE_TABLE_ACTION_ENCAP:
2578 return encap_apply(action_data,
2579 action_params,
2580 &action->cfg.encap,
2581 &action->cfg.common);
2582
2583 case RTE_TABLE_ACTION_NAT:
2584 return nat_apply(action_data,
2585 action_params,
2586 &action->cfg.common);
2587
2588 case RTE_TABLE_ACTION_TTL:
2589 return ttl_apply(action_data,
2590 action_params);
2591
2592 case RTE_TABLE_ACTION_STATS:
2593 return stats_apply(action_data,
2594 action_params);
2595
2596 case RTE_TABLE_ACTION_TIME:
2597 return time_apply(action_data,
2598 action_params);
2599
2600 case RTE_TABLE_ACTION_SYM_CRYPTO:
2601 return sym_crypto_apply(action_data,
2602 &action->cfg.sym_crypto,
2603 action_params);
2604
2605 case RTE_TABLE_ACTION_TAG:
2606 return tag_apply(action_data,
2607 action_params);
2608
2609 case RTE_TABLE_ACTION_DECAP:
2610 return decap_apply(action_data,
2611 action_params);
2612
2613 default:
2614 return -EINVAL;
2615 }
2616 }
2617
2618 int
2619 rte_table_action_dscp_table_update(struct rte_table_action *action,
2620 uint64_t dscp_mask,
2621 struct rte_table_action_dscp_table *table)
2622 {
2623 uint32_t i;
2624
2625 /* Check input arguments */
2626 if ((action == NULL) ||
2627 ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
2628 (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
2629 (dscp_mask == 0) ||
2630 (table == NULL))
2631 return -EINVAL;
2632
2633 for (i = 0; i < RTE_DIM(table->entry); i++) {
2634 struct dscp_table_entry_data *data =
2635 &action->dscp_table.entry[i];
2636 struct rte_table_action_dscp_table_entry *entry =
2637 &table->entry[i];
2638
2639 if ((dscp_mask & (1LLU << i)) == 0)
2640 continue;
2641
2642 data->color = entry->color;
2643 data->tc = entry->tc_id;
2644 data->tc_queue = entry->tc_queue_id;
2645 }
2646
2647 return 0;
2648 }
2649
2650 int
2651 rte_table_action_meter_profile_add(struct rte_table_action *action,
2652 uint32_t meter_profile_id,
2653 struct rte_table_action_meter_profile *profile)
2654 {
2655 struct meter_profile_data *mp_data;
2656 uint32_t status;
2657
2658 /* Check input arguments */
2659 if ((action == NULL) ||
2660 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2661 (profile == NULL))
2662 return -EINVAL;
2663
2664 if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
2665 return -ENOTSUP;
2666
2667 mp_data = meter_profile_data_find(action->mp,
2668 RTE_DIM(action->mp),
2669 meter_profile_id);
2670 if (mp_data)
2671 return -EEXIST;
2672
2673 mp_data = meter_profile_data_find_unused(action->mp,
2674 RTE_DIM(action->mp));
2675 if (!mp_data)
2676 return -ENOSPC;
2677
2678 /* Install new profile */
2679 status = rte_meter_trtcm_profile_config(&mp_data->profile,
2680 &profile->trtcm);
2681 if (status)
2682 return status;
2683
2684 mp_data->profile_id = meter_profile_id;
2685 mp_data->valid = 1;
2686
2687 return 0;
2688 }
2689
2690 int
2691 rte_table_action_meter_profile_delete(struct rte_table_action *action,
2692 uint32_t meter_profile_id)
2693 {
2694 struct meter_profile_data *mp_data;
2695
2696 /* Check input arguments */
2697 if ((action == NULL) ||
2698 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
2699 return -EINVAL;
2700
2701 mp_data = meter_profile_data_find(action->mp,
2702 RTE_DIM(action->mp),
2703 meter_profile_id);
2704 if (!mp_data)
2705 return 0;
2706
2707 /* Uninstall profile */
2708 mp_data->valid = 0;
2709
2710 return 0;
2711 }
2712
2713 int
2714 rte_table_action_meter_read(struct rte_table_action *action,
2715 void *data,
2716 uint32_t tc_mask,
2717 struct rte_table_action_mtr_counters *stats,
2718 int clear)
2719 {
2720 struct mtr_trtcm_data *mtr_data;
2721 uint32_t i;
2722
2723 /* Check input arguments */
2724 if ((action == NULL) ||
2725 ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
2726 (data == NULL) ||
2727 (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
2728 return -EINVAL;
2729
2730 mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
2731
2732 /* Read */
2733 if (stats) {
2734 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2735 struct rte_table_action_mtr_counters_tc *dst =
2736 &stats->stats[i];
2737 struct mtr_trtcm_data *src = &mtr_data[i];
2738
2739 if ((tc_mask & (1 << i)) == 0)
2740 continue;
2741
2742 dst->n_packets[RTE_COLOR_GREEN] =
2743 mtr_trtcm_data_stats_get(src, RTE_COLOR_GREEN);
2744
2745 dst->n_packets[RTE_COLOR_YELLOW] =
2746 mtr_trtcm_data_stats_get(src, RTE_COLOR_YELLOW);
2747
2748 dst->n_packets[RTE_COLOR_RED] =
2749 mtr_trtcm_data_stats_get(src, RTE_COLOR_RED);
2750
2751 dst->n_packets_valid = 1;
2752 dst->n_bytes_valid = 0;
2753 }
2754
2755 stats->tc_mask = tc_mask;
2756 }
2757
2758 /* Clear */
2759 if (clear)
2760 for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
2761 struct mtr_trtcm_data *src = &mtr_data[i];
2762
2763 if ((tc_mask & (1 << i)) == 0)
2764 continue;
2765
2766 mtr_trtcm_data_stats_reset(src, RTE_COLOR_GREEN);
2767 mtr_trtcm_data_stats_reset(src, RTE_COLOR_YELLOW);
2768 mtr_trtcm_data_stats_reset(src, RTE_COLOR_RED);
2769 }
2770
2771
2772 return 0;
2773 }
2774
2775 int
2776 rte_table_action_ttl_read(struct rte_table_action *action,
2777 void *data,
2778 struct rte_table_action_ttl_counters *stats,
2779 int clear)
2780 {
2781 struct ttl_data *ttl_data;
2782
2783 /* Check input arguments */
2784 if ((action == NULL) ||
2785 ((action->cfg.action_mask &
2786 (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
2787 (data == NULL))
2788 return -EINVAL;
2789
2790 ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
2791
2792 /* Read */
2793 if (stats)
2794 stats->n_packets = TTL_STATS_READ(ttl_data);
2795
2796 /* Clear */
2797 if (clear)
2798 TTL_STATS_RESET(ttl_data);
2799
2800 return 0;
2801 }
2802
2803 int
2804 rte_table_action_stats_read(struct rte_table_action *action,
2805 void *data,
2806 struct rte_table_action_stats_counters *stats,
2807 int clear)
2808 {
2809 struct stats_data *stats_data;
2810
2811 /* Check input arguments */
2812 if ((action == NULL) ||
2813 ((action->cfg.action_mask &
2814 (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
2815 (data == NULL))
2816 return -EINVAL;
2817
2818 stats_data = action_data_get(data, action,
2819 RTE_TABLE_ACTION_STATS);
2820
2821 /* Read */
2822 if (stats) {
2823 stats->n_packets = stats_data->n_packets;
2824 stats->n_bytes = stats_data->n_bytes;
2825 stats->n_packets_valid = 1;
2826 stats->n_bytes_valid = 1;
2827 }
2828
2829 /* Clear */
2830 if (clear) {
2831 stats_data->n_packets = 0;
2832 stats_data->n_bytes = 0;
2833 }
2834
2835 return 0;
2836 }
2837
2838 int
2839 rte_table_action_time_read(struct rte_table_action *action,
2840 void *data,
2841 uint64_t *timestamp)
2842 {
2843 struct time_data *time_data;
2844
2845 /* Check input arguments */
2846 if ((action == NULL) ||
2847 ((action->cfg.action_mask &
2848 (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
2849 (data == NULL) ||
2850 (timestamp == NULL))
2851 return -EINVAL;
2852
2853 time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
2854
2855 /* Read */
2856 *timestamp = time_data->time;
2857
2858 return 0;
2859 }
2860
2861 struct rte_cryptodev_sym_session *
2862 rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
2863 void *data)
2864 {
2865 struct sym_crypto_data *sym_crypto_data;
2866
2867 /* Check input arguments */
2868 if ((action == NULL) ||
2869 ((action->cfg.action_mask &
2870 (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
2871 (data == NULL))
2872 return NULL;
2873
2874 sym_crypto_data = action_data_get(data, action,
2875 RTE_TABLE_ACTION_SYM_CRYPTO);
2876
2877 return sym_crypto_data->session;
2878 }
2879
2880 static __rte_always_inline uint64_t
2881 pkt_work(struct rte_mbuf *mbuf,
2882 struct rte_pipeline_table_entry *table_entry,
2883 uint64_t time,
2884 struct rte_table_action *action,
2885 struct ap_config *cfg)
2886 {
2887 uint64_t drop_mask = 0;
2888
2889 uint32_t ip_offset = action->cfg.common.ip_offset;
2890 void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
2891
2892 uint32_t dscp;
2893 uint16_t total_length;
2894
2895 if (cfg->common.ip_version) {
2896 struct rte_ipv4_hdr *hdr = ip;
2897
2898 dscp = hdr->type_of_service >> 2;
2899 total_length = rte_ntohs(hdr->total_length);
2900 } else {
2901 struct rte_ipv6_hdr *hdr = ip;
2902
2903 dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
2904 total_length = rte_ntohs(hdr->payload_len) +
2905 sizeof(struct rte_ipv6_hdr);
2906 }
2907
2908 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
2909 void *data =
2910 action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
2911
2912 pkt_work_lb(mbuf,
2913 data,
2914 &cfg->lb);
2915 }
2916 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
2917 void *data =
2918 action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
2919
2920 drop_mask |= pkt_work_mtr(mbuf,
2921 data,
2922 &action->dscp_table,
2923 action->mp,
2924 time,
2925 dscp,
2926 total_length);
2927 }
2928
2929 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
2930 void *data =
2931 action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
2932
2933 pkt_work_tm(mbuf,
2934 data,
2935 &action->dscp_table,
2936 dscp);
2937 }
2938
2939 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
2940 void *data = action_data_get(table_entry,
2941 action,
2942 RTE_TABLE_ACTION_DECAP);
2943
2944 pkt_work_decap(mbuf, data);
2945 }
2946
2947 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
2948 void *data =
2949 action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
2950
2951 pkt_work_encap(mbuf,
2952 data,
2953 &cfg->encap,
2954 ip,
2955 total_length,
2956 ip_offset);
2957 }
2958
2959 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
2960 void *data =
2961 action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
2962
2963 if (cfg->common.ip_version)
2964 pkt_ipv4_work_nat(ip, data, &cfg->nat);
2965 else
2966 pkt_ipv6_work_nat(ip, data, &cfg->nat);
2967 }
2968
2969 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
2970 void *data =
2971 action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
2972
2973 if (cfg->common.ip_version)
2974 drop_mask |= pkt_ipv4_work_ttl(ip, data);
2975 else
2976 drop_mask |= pkt_ipv6_work_ttl(ip, data);
2977 }
2978
2979 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
2980 void *data =
2981 action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
2982
2983 pkt_work_stats(data, total_length);
2984 }
2985
2986 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
2987 void *data =
2988 action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
2989
2990 pkt_work_time(data, time);
2991 }
2992
2993 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
2994 void *data = action_data_get(table_entry, action,
2995 RTE_TABLE_ACTION_SYM_CRYPTO);
2996
2997 drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
2998 ip_offset);
2999 }
3000
3001 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3002 void *data = action_data_get(table_entry,
3003 action,
3004 RTE_TABLE_ACTION_TAG);
3005
3006 pkt_work_tag(mbuf, data);
3007 }
3008
3009 return drop_mask;
3010 }
3011
3012 static __rte_always_inline uint64_t
3013 pkt4_work(struct rte_mbuf **mbufs,
3014 struct rte_pipeline_table_entry **table_entries,
3015 uint64_t time,
3016 struct rte_table_action *action,
3017 struct ap_config *cfg)
3018 {
3019 uint64_t drop_mask0 = 0;
3020 uint64_t drop_mask1 = 0;
3021 uint64_t drop_mask2 = 0;
3022 uint64_t drop_mask3 = 0;
3023
3024 struct rte_mbuf *mbuf0 = mbufs[0];
3025 struct rte_mbuf *mbuf1 = mbufs[1];
3026 struct rte_mbuf *mbuf2 = mbufs[2];
3027 struct rte_mbuf *mbuf3 = mbufs[3];
3028
3029 struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
3030 struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
3031 struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
3032 struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
3033
3034 uint32_t ip_offset = action->cfg.common.ip_offset;
3035 void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
3036 void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
3037 void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
3038 void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
3039
3040 uint32_t dscp0, dscp1, dscp2, dscp3;
3041 uint16_t total_length0, total_length1, total_length2, total_length3;
3042
3043 if (cfg->common.ip_version) {
3044 struct rte_ipv4_hdr *hdr0 = ip0;
3045 struct rte_ipv4_hdr *hdr1 = ip1;
3046 struct rte_ipv4_hdr *hdr2 = ip2;
3047 struct rte_ipv4_hdr *hdr3 = ip3;
3048
3049 dscp0 = hdr0->type_of_service >> 2;
3050 dscp1 = hdr1->type_of_service >> 2;
3051 dscp2 = hdr2->type_of_service >> 2;
3052 dscp3 = hdr3->type_of_service >> 2;
3053
3054 total_length0 = rte_ntohs(hdr0->total_length);
3055 total_length1 = rte_ntohs(hdr1->total_length);
3056 total_length2 = rte_ntohs(hdr2->total_length);
3057 total_length3 = rte_ntohs(hdr3->total_length);
3058 } else {
3059 struct rte_ipv6_hdr *hdr0 = ip0;
3060 struct rte_ipv6_hdr *hdr1 = ip1;
3061 struct rte_ipv6_hdr *hdr2 = ip2;
3062 struct rte_ipv6_hdr *hdr3 = ip3;
3063
3064 dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
3065 dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
3066 dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
3067 dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
3068
3069 total_length0 = rte_ntohs(hdr0->payload_len) +
3070 sizeof(struct rte_ipv6_hdr);
3071 total_length1 = rte_ntohs(hdr1->payload_len) +
3072 sizeof(struct rte_ipv6_hdr);
3073 total_length2 = rte_ntohs(hdr2->payload_len) +
3074 sizeof(struct rte_ipv6_hdr);
3075 total_length3 = rte_ntohs(hdr3->payload_len) +
3076 sizeof(struct rte_ipv6_hdr);
3077 }
3078
3079 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
3080 void *data0 =
3081 action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
3082 void *data1 =
3083 action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
3084 void *data2 =
3085 action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
3086 void *data3 =
3087 action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
3088
3089 pkt_work_lb(mbuf0,
3090 data0,
3091 &cfg->lb);
3092
3093 pkt_work_lb(mbuf1,
3094 data1,
3095 &cfg->lb);
3096
3097 pkt_work_lb(mbuf2,
3098 data2,
3099 &cfg->lb);
3100
3101 pkt_work_lb(mbuf3,
3102 data3,
3103 &cfg->lb);
3104 }
3105
3106 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
3107 void *data0 =
3108 action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
3109 void *data1 =
3110 action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
3111 void *data2 =
3112 action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
3113 void *data3 =
3114 action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
3115
3116 drop_mask0 |= pkt_work_mtr(mbuf0,
3117 data0,
3118 &action->dscp_table,
3119 action->mp,
3120 time,
3121 dscp0,
3122 total_length0);
3123
3124 drop_mask1 |= pkt_work_mtr(mbuf1,
3125 data1,
3126 &action->dscp_table,
3127 action->mp,
3128 time,
3129 dscp1,
3130 total_length1);
3131
3132 drop_mask2 |= pkt_work_mtr(mbuf2,
3133 data2,
3134 &action->dscp_table,
3135 action->mp,
3136 time,
3137 dscp2,
3138 total_length2);
3139
3140 drop_mask3 |= pkt_work_mtr(mbuf3,
3141 data3,
3142 &action->dscp_table,
3143 action->mp,
3144 time,
3145 dscp3,
3146 total_length3);
3147 }
3148
3149 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
3150 void *data0 =
3151 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
3152 void *data1 =
3153 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
3154 void *data2 =
3155 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
3156 void *data3 =
3157 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
3158
3159 pkt_work_tm(mbuf0,
3160 data0,
3161 &action->dscp_table,
3162 dscp0);
3163
3164 pkt_work_tm(mbuf1,
3165 data1,
3166 &action->dscp_table,
3167 dscp1);
3168
3169 pkt_work_tm(mbuf2,
3170 data2,
3171 &action->dscp_table,
3172 dscp2);
3173
3174 pkt_work_tm(mbuf3,
3175 data3,
3176 &action->dscp_table,
3177 dscp3);
3178 }
3179
3180 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
3181 void *data0 = action_data_get(table_entry0,
3182 action,
3183 RTE_TABLE_ACTION_DECAP);
3184 void *data1 = action_data_get(table_entry1,
3185 action,
3186 RTE_TABLE_ACTION_DECAP);
3187 void *data2 = action_data_get(table_entry2,
3188 action,
3189 RTE_TABLE_ACTION_DECAP);
3190 void *data3 = action_data_get(table_entry3,
3191 action,
3192 RTE_TABLE_ACTION_DECAP);
3193
3194 pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
3195 data0, data1, data2, data3);
3196 }
3197
3198 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
3199 void *data0 =
3200 action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
3201 void *data1 =
3202 action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
3203 void *data2 =
3204 action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
3205 void *data3 =
3206 action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
3207
3208 pkt_work_encap(mbuf0,
3209 data0,
3210 &cfg->encap,
3211 ip0,
3212 total_length0,
3213 ip_offset);
3214
3215 pkt_work_encap(mbuf1,
3216 data1,
3217 &cfg->encap,
3218 ip1,
3219 total_length1,
3220 ip_offset);
3221
3222 pkt_work_encap(mbuf2,
3223 data2,
3224 &cfg->encap,
3225 ip2,
3226 total_length2,
3227 ip_offset);
3228
3229 pkt_work_encap(mbuf3,
3230 data3,
3231 &cfg->encap,
3232 ip3,
3233 total_length3,
3234 ip_offset);
3235 }
3236
3237 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
3238 void *data0 =
3239 action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
3240 void *data1 =
3241 action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
3242 void *data2 =
3243 action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
3244 void *data3 =
3245 action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
3246
3247 if (cfg->common.ip_version) {
3248 pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
3249 pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
3250 pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
3251 pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
3252 } else {
3253 pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
3254 pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
3255 pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
3256 pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
3257 }
3258 }
3259
3260 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
3261 void *data0 =
3262 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
3263 void *data1 =
3264 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
3265 void *data2 =
3266 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
3267 void *data3 =
3268 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
3269
3270 if (cfg->common.ip_version) {
3271 drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
3272 drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
3273 drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
3274 drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
3275 } else {
3276 drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
3277 drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
3278 drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
3279 drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
3280 }
3281 }
3282
3283 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
3284 void *data0 =
3285 action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
3286 void *data1 =
3287 action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
3288 void *data2 =
3289 action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
3290 void *data3 =
3291 action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
3292
3293 pkt_work_stats(data0, total_length0);
3294 pkt_work_stats(data1, total_length1);
3295 pkt_work_stats(data2, total_length2);
3296 pkt_work_stats(data3, total_length3);
3297 }
3298
3299 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
3300 void *data0 =
3301 action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
3302 void *data1 =
3303 action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
3304 void *data2 =
3305 action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
3306 void *data3 =
3307 action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
3308
3309 pkt_work_time(data0, time);
3310 pkt_work_time(data1, time);
3311 pkt_work_time(data2, time);
3312 pkt_work_time(data3, time);
3313 }
3314
3315 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
3316 void *data0 = action_data_get(table_entry0, action,
3317 RTE_TABLE_ACTION_SYM_CRYPTO);
3318 void *data1 = action_data_get(table_entry1, action,
3319 RTE_TABLE_ACTION_SYM_CRYPTO);
3320 void *data2 = action_data_get(table_entry2, action,
3321 RTE_TABLE_ACTION_SYM_CRYPTO);
3322 void *data3 = action_data_get(table_entry3, action,
3323 RTE_TABLE_ACTION_SYM_CRYPTO);
3324
3325 drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
3326 ip_offset);
3327 drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
3328 ip_offset);
3329 drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
3330 ip_offset);
3331 drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
3332 ip_offset);
3333 }
3334
3335 if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
3336 void *data0 = action_data_get(table_entry0,
3337 action,
3338 RTE_TABLE_ACTION_TAG);
3339 void *data1 = action_data_get(table_entry1,
3340 action,
3341 RTE_TABLE_ACTION_TAG);
3342 void *data2 = action_data_get(table_entry2,
3343 action,
3344 RTE_TABLE_ACTION_TAG);
3345 void *data3 = action_data_get(table_entry3,
3346 action,
3347 RTE_TABLE_ACTION_TAG);
3348
3349 pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
3350 data0, data1, data2, data3);
3351 }
3352
3353 return drop_mask0 |
3354 (drop_mask1 << 1) |
3355 (drop_mask2 << 2) |
3356 (drop_mask3 << 3);
3357 }
3358
3359 static __rte_always_inline int
3360 ah(struct rte_pipeline *p,
3361 struct rte_mbuf **pkts,
3362 uint64_t pkts_mask,
3363 struct rte_pipeline_table_entry **entries,
3364 struct rte_table_action *action,
3365 struct ap_config *cfg)
3366 {
3367 uint64_t pkts_drop_mask = 0;
3368 uint64_t time = 0;
3369
3370 if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
3371 (1LLU << RTE_TABLE_ACTION_TIME)))
3372 time = rte_rdtsc();
3373
3374 if ((pkts_mask & (pkts_mask + 1)) == 0) {
3375 uint64_t n_pkts = __builtin_popcountll(pkts_mask);
3376 uint32_t i;
3377
3378 for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
3379 uint64_t drop_mask;
3380
3381 drop_mask = pkt4_work(&pkts[i],
3382 &entries[i],
3383 time,
3384 action,
3385 cfg);
3386
3387 pkts_drop_mask |= drop_mask << i;
3388 }
3389
3390 for ( ; i < n_pkts; i++) {
3391 uint64_t drop_mask;
3392
3393 drop_mask = pkt_work(pkts[i],
3394 entries[i],
3395 time,
3396 action,
3397 cfg);
3398
3399 pkts_drop_mask |= drop_mask << i;
3400 }
3401 } else
3402 for ( ; pkts_mask; ) {
3403 uint32_t pos = __builtin_ctzll(pkts_mask);
3404 uint64_t pkt_mask = 1LLU << pos;
3405 uint64_t drop_mask;
3406
3407 drop_mask = pkt_work(pkts[pos],
3408 entries[pos],
3409 time,
3410 action,
3411 cfg);
3412
3413 pkts_mask &= ~pkt_mask;
3414 pkts_drop_mask |= drop_mask << pos;
3415 }
3416
3417 rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
3418
3419 return 0;
3420 }
3421
3422 static int
3423 ah_default(struct rte_pipeline *p,
3424 struct rte_mbuf **pkts,
3425 uint64_t pkts_mask,
3426 struct rte_pipeline_table_entry **entries,
3427 void *arg)
3428 {
3429 struct rte_table_action *action = arg;
3430
3431 return ah(p,
3432 pkts,
3433 pkts_mask,
3434 entries,
3435 action,
3436 &action->cfg);
3437 }
3438
3439 static rte_pipeline_table_action_handler_hit
3440 ah_selector(struct rte_table_action *action)
3441 {
3442 if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
3443 return NULL;
3444
3445 return ah_default;
3446 }
3447
3448 int
3449 rte_table_action_table_params_get(struct rte_table_action *action,
3450 struct rte_pipeline_table_params *params)
3451 {
3452 rte_pipeline_table_action_handler_hit f_action_hit;
3453 uint32_t total_size;
3454
3455 /* Check input arguments */
3456 if ((action == NULL) ||
3457 (params == NULL))
3458 return -EINVAL;
3459
3460 f_action_hit = ah_selector(action);
3461 total_size = rte_align32pow2(action->data.total_size);
3462
3463 /* Fill in params */
3464 params->f_action_hit = f_action_hit;
3465 params->f_action_miss = NULL;
3466 params->arg_ah = (f_action_hit) ? action : NULL;
3467 params->action_data_size = total_size -
3468 sizeof(struct rte_pipeline_table_entry);
3469
3470 return 0;
3471 }
3472
3473 int
3474 rte_table_action_free(struct rte_table_action *action)
3475 {
3476 if (action == NULL)
3477 return 0;
3478
3479 rte_free(action);
3480
3481 return 0;
3482 }