]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright 2016 6WIND S.A. | |
3 | * Copyright 2016 Mellanox Technologies, Ltd | |
4 | */ | |
5 | ||
6 | #include <errno.h> | |
7 | #include <stddef.h> | |
8 | #include <stdint.h> | |
9 | #include <string.h> | |
10 | ||
11 | #include <rte_common.h> | |
12 | #include <rte_errno.h> | |
13 | #include <rte_branch_prediction.h> | |
9f95a23c | 14 | #include <rte_string_fns.h> |
f67539c2 TL |
15 | #include <rte_mbuf.h> |
16 | #include <rte_mbuf_dyn.h> | |
11fdf7f2 TL |
17 | #include "rte_ethdev.h" |
18 | #include "rte_flow_driver.h" | |
19 | #include "rte_flow.h" | |
20 | ||
f67539c2 TL |
21 | /* Mbuf dynamic field name for metadata. */ |
22 | int32_t rte_flow_dynf_metadata_offs = -1; | |
23 | ||
24 | /* Mbuf dynamic field flag bit number for metadata. */ | |
25 | uint64_t rte_flow_dynf_metadata_mask; | |
26 | ||
11fdf7f2 TL |
27 | /** |
28 | * Flow elements description tables. | |
29 | */ | |
30 | struct rte_flow_desc_data { | |
31 | const char *name; | |
32 | size_t size; | |
33 | }; | |
34 | ||
35 | /** Generate flow_item[] entry. */ | |
36 | #define MK_FLOW_ITEM(t, s) \ | |
37 | [RTE_FLOW_ITEM_TYPE_ ## t] = { \ | |
38 | .name = # t, \ | |
39 | .size = s, \ | |
40 | } | |
41 | ||
42 | /** Information about known flow pattern items. */ | |
43 | static const struct rte_flow_desc_data rte_flow_desc_item[] = { | |
44 | MK_FLOW_ITEM(END, 0), | |
45 | MK_FLOW_ITEM(VOID, 0), | |
46 | MK_FLOW_ITEM(INVERT, 0), | |
47 | MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), | |
48 | MK_FLOW_ITEM(PF, 0), | |
49 | MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), | |
50 | MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), | |
51 | MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), | |
52 | MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), | |
53 | MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), | |
54 | MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), | |
55 | MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), | |
56 | MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), | |
57 | MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), | |
58 | MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), | |
59 | MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), | |
60 | MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), | |
61 | MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), | |
11fdf7f2 TL |
62 | MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), |
63 | MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), | |
9f95a23c TL |
64 | MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), |
65 | MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), | |
66 | MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), | |
67 | MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), | |
68 | MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), | |
69 | MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), | |
70 | MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)), | |
11fdf7f2 TL |
71 | MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), |
72 | MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), | |
73 | MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), | |
74 | MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), | |
75 | MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), | |
76 | MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), | |
77 | MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), | |
78 | MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), | |
79 | MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, | |
80 | sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), | |
81 | MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, | |
82 | sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), | |
9f95a23c TL |
83 | MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)), |
84 | MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)), | |
f67539c2 TL |
85 | MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)), |
86 | MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)), | |
87 | MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)), | |
88 | MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)), | |
89 | MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)), | |
90 | MK_FLOW_ITEM(PPPOE_PROTO_ID, | |
91 | sizeof(struct rte_flow_item_pppoe_proto_id)), | |
92 | MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)), | |
93 | MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)), | |
94 | MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)), | |
95 | MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)), | |
96 | MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)), | |
97 | MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)), | |
11fdf7f2 TL |
98 | }; |
99 | ||
100 | /** Generate flow_action[] entry. */ | |
101 | #define MK_FLOW_ACTION(t, s) \ | |
102 | [RTE_FLOW_ACTION_TYPE_ ## t] = { \ | |
103 | .name = # t, \ | |
104 | .size = s, \ | |
105 | } | |
106 | ||
107 | /** Information about known flow actions. */ | |
108 | static const struct rte_flow_desc_data rte_flow_desc_action[] = { | |
109 | MK_FLOW_ACTION(END, 0), | |
110 | MK_FLOW_ACTION(VOID, 0), | |
111 | MK_FLOW_ACTION(PASSTHRU, 0), | |
9f95a23c | 112 | MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), |
11fdf7f2 TL |
113 | MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), |
114 | MK_FLOW_ACTION(FLAG, 0), | |
115 | MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), | |
116 | MK_FLOW_ACTION(DROP, 0), | |
117 | MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), | |
118 | MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), | |
119 | MK_FLOW_ACTION(PF, 0), | |
120 | MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), | |
121 | MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), | |
122 | MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), | |
9f95a23c TL |
123 | MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), |
124 | MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)), | |
11fdf7f2 TL |
125 | MK_FLOW_ACTION(OF_SET_MPLS_TTL, |
126 | sizeof(struct rte_flow_action_of_set_mpls_ttl)), | |
127 | MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), | |
128 | MK_FLOW_ACTION(OF_SET_NW_TTL, | |
129 | sizeof(struct rte_flow_action_of_set_nw_ttl)), | |
130 | MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), | |
131 | MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), | |
132 | MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), | |
133 | MK_FLOW_ACTION(OF_POP_VLAN, 0), | |
134 | MK_FLOW_ACTION(OF_PUSH_VLAN, | |
135 | sizeof(struct rte_flow_action_of_push_vlan)), | |
136 | MK_FLOW_ACTION(OF_SET_VLAN_VID, | |
137 | sizeof(struct rte_flow_action_of_set_vlan_vid)), | |
138 | MK_FLOW_ACTION(OF_SET_VLAN_PCP, | |
139 | sizeof(struct rte_flow_action_of_set_vlan_pcp)), | |
140 | MK_FLOW_ACTION(OF_POP_MPLS, | |
141 | sizeof(struct rte_flow_action_of_pop_mpls)), | |
142 | MK_FLOW_ACTION(OF_PUSH_MPLS, | |
143 | sizeof(struct rte_flow_action_of_push_mpls)), | |
9f95a23c TL |
144 | MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), |
145 | MK_FLOW_ACTION(VXLAN_DECAP, 0), | |
146 | MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), | |
147 | MK_FLOW_ACTION(NVGRE_DECAP, 0), | |
148 | MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), | |
149 | MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), | |
150 | MK_FLOW_ACTION(SET_IPV4_SRC, | |
151 | sizeof(struct rte_flow_action_set_ipv4)), | |
152 | MK_FLOW_ACTION(SET_IPV4_DST, | |
153 | sizeof(struct rte_flow_action_set_ipv4)), | |
154 | MK_FLOW_ACTION(SET_IPV6_SRC, | |
155 | sizeof(struct rte_flow_action_set_ipv6)), | |
156 | MK_FLOW_ACTION(SET_IPV6_DST, | |
157 | sizeof(struct rte_flow_action_set_ipv6)), | |
158 | MK_FLOW_ACTION(SET_TP_SRC, | |
159 | sizeof(struct rte_flow_action_set_tp)), | |
160 | MK_FLOW_ACTION(SET_TP_DST, | |
161 | sizeof(struct rte_flow_action_set_tp)), | |
162 | MK_FLOW_ACTION(MAC_SWAP, 0), | |
163 | MK_FLOW_ACTION(DEC_TTL, 0), | |
164 | MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)), | |
165 | MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)), | |
166 | MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)), | |
f67539c2 TL |
167 | MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)), |
168 | MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)), | |
169 | MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)), | |
170 | MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)), | |
171 | MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)), | |
172 | MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)), | |
173 | MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)), | |
174 | MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)), | |
175 | MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)), | |
11fdf7f2 TL |
176 | }; |
177 | ||
f67539c2 TL |
178 | int |
179 | rte_flow_dynf_metadata_register(void) | |
180 | { | |
181 | int offset; | |
182 | int flag; | |
183 | ||
184 | static const struct rte_mbuf_dynfield desc_offs = { | |
185 | .name = RTE_MBUF_DYNFIELD_METADATA_NAME, | |
186 | .size = sizeof(uint32_t), | |
187 | .align = __alignof__(uint32_t), | |
188 | }; | |
189 | static const struct rte_mbuf_dynflag desc_flag = { | |
190 | .name = RTE_MBUF_DYNFLAG_METADATA_NAME, | |
191 | }; | |
192 | ||
193 | offset = rte_mbuf_dynfield_register(&desc_offs); | |
194 | if (offset < 0) | |
195 | goto error; | |
196 | flag = rte_mbuf_dynflag_register(&desc_flag); | |
197 | if (flag < 0) | |
198 | goto error; | |
199 | rte_flow_dynf_metadata_offs = offset; | |
200 | rte_flow_dynf_metadata_mask = (1ULL << flag); | |
201 | return 0; | |
202 | ||
203 | error: | |
204 | rte_flow_dynf_metadata_offs = -1; | |
205 | rte_flow_dynf_metadata_mask = 0ULL; | |
206 | return -rte_errno; | |
207 | } | |
208 | ||
11fdf7f2 TL |
209 | static int |
210 | flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) | |
211 | { | |
212 | if (ret == 0) | |
213 | return 0; | |
214 | if (rte_eth_dev_is_removed(port_id)) | |
215 | return rte_flow_error_set(error, EIO, | |
216 | RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
217 | NULL, rte_strerror(EIO)); | |
218 | return ret; | |
219 | } | |
220 | ||
f67539c2 TL |
221 | static enum rte_flow_item_type |
222 | rte_flow_expand_rss_item_complete(const struct rte_flow_item *item) | |
223 | { | |
224 | enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; | |
225 | uint16_t ether_type = 0; | |
226 | uint16_t ether_type_m; | |
227 | uint8_t ip_next_proto = 0; | |
228 | uint8_t ip_next_proto_m; | |
229 | ||
230 | if (item == NULL || item->spec == NULL) | |
231 | return ret; | |
232 | switch (item->type) { | |
233 | case RTE_FLOW_ITEM_TYPE_ETH: | |
234 | if (item->mask) | |
235 | ether_type_m = ((const struct rte_flow_item_eth *) | |
236 | (item->mask))->type; | |
237 | else | |
238 | ether_type_m = rte_flow_item_eth_mask.type; | |
239 | if (ether_type_m != RTE_BE16(0xFFFF)) | |
240 | break; | |
241 | ether_type = ((const struct rte_flow_item_eth *) | |
242 | (item->spec))->type; | |
243 | if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) | |
244 | ret = RTE_FLOW_ITEM_TYPE_IPV4; | |
245 | else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) | |
246 | ret = RTE_FLOW_ITEM_TYPE_IPV6; | |
247 | else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) | |
248 | ret = RTE_FLOW_ITEM_TYPE_VLAN; | |
249 | break; | |
250 | case RTE_FLOW_ITEM_TYPE_VLAN: | |
251 | if (item->mask) | |
252 | ether_type_m = ((const struct rte_flow_item_vlan *) | |
253 | (item->mask))->inner_type; | |
254 | else | |
255 | ether_type_m = rte_flow_item_vlan_mask.inner_type; | |
256 | if (ether_type_m != RTE_BE16(0xFFFF)) | |
257 | break; | |
258 | ether_type = ((const struct rte_flow_item_vlan *) | |
259 | (item->spec))->inner_type; | |
260 | if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) | |
261 | ret = RTE_FLOW_ITEM_TYPE_IPV4; | |
262 | else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) | |
263 | ret = RTE_FLOW_ITEM_TYPE_IPV6; | |
264 | else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) | |
265 | ret = RTE_FLOW_ITEM_TYPE_VLAN; | |
266 | break; | |
267 | case RTE_FLOW_ITEM_TYPE_IPV4: | |
268 | if (item->mask) | |
269 | ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) | |
270 | (item->mask))->hdr.next_proto_id; | |
271 | else | |
272 | ip_next_proto_m = | |
273 | rte_flow_item_ipv4_mask.hdr.next_proto_id; | |
274 | if (ip_next_proto_m != 0xFF) | |
275 | break; | |
276 | ip_next_proto = ((const struct rte_flow_item_ipv4 *) | |
277 | (item->spec))->hdr.next_proto_id; | |
278 | if (ip_next_proto == IPPROTO_UDP) | |
279 | ret = RTE_FLOW_ITEM_TYPE_UDP; | |
280 | else if (ip_next_proto == IPPROTO_TCP) | |
281 | ret = RTE_FLOW_ITEM_TYPE_TCP; | |
282 | else if (ip_next_proto == IPPROTO_IP) | |
283 | ret = RTE_FLOW_ITEM_TYPE_IPV4; | |
284 | else if (ip_next_proto == IPPROTO_IPV6) | |
285 | ret = RTE_FLOW_ITEM_TYPE_IPV6; | |
286 | break; | |
287 | case RTE_FLOW_ITEM_TYPE_IPV6: | |
288 | if (item->mask) | |
289 | ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) | |
290 | (item->mask))->hdr.proto; | |
291 | else | |
292 | ip_next_proto_m = | |
293 | rte_flow_item_ipv6_mask.hdr.proto; | |
294 | if (ip_next_proto_m != 0xFF) | |
295 | break; | |
296 | ip_next_proto = ((const struct rte_flow_item_ipv6 *) | |
297 | (item->spec))->hdr.proto; | |
298 | if (ip_next_proto == IPPROTO_UDP) | |
299 | ret = RTE_FLOW_ITEM_TYPE_UDP; | |
300 | else if (ip_next_proto == IPPROTO_TCP) | |
301 | ret = RTE_FLOW_ITEM_TYPE_TCP; | |
302 | else if (ip_next_proto == IPPROTO_IP) | |
303 | ret = RTE_FLOW_ITEM_TYPE_IPV4; | |
304 | else if (ip_next_proto == IPPROTO_IPV6) | |
305 | ret = RTE_FLOW_ITEM_TYPE_IPV6; | |
306 | break; | |
307 | default: | |
308 | ret = RTE_FLOW_ITEM_TYPE_VOID; | |
309 | break; | |
310 | } | |
311 | return ret; | |
312 | } | |
313 | ||
11fdf7f2 TL |
314 | /* Get generic flow operations structure from a port. */ |
315 | const struct rte_flow_ops * | |
316 | rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error) | |
317 | { | |
318 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
319 | const struct rte_flow_ops *ops; | |
320 | int code; | |
321 | ||
322 | if (unlikely(!rte_eth_dev_is_valid_port(port_id))) | |
323 | code = ENODEV; | |
324 | else if (unlikely(!dev->dev_ops->filter_ctrl || | |
325 | dev->dev_ops->filter_ctrl(dev, | |
326 | RTE_ETH_FILTER_GENERIC, | |
327 | RTE_ETH_FILTER_GET, | |
328 | &ops) || | |
329 | !ops)) | |
330 | code = ENOSYS; | |
331 | else | |
332 | return ops; | |
333 | rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
334 | NULL, rte_strerror(code)); | |
335 | return NULL; | |
336 | } | |
337 | ||
338 | /* Check whether a flow rule can be created on a given port. */ | |
339 | int | |
340 | rte_flow_validate(uint16_t port_id, | |
341 | const struct rte_flow_attr *attr, | |
342 | const struct rte_flow_item pattern[], | |
343 | const struct rte_flow_action actions[], | |
344 | struct rte_flow_error *error) | |
345 | { | |
346 | const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); | |
347 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
348 | ||
349 | if (unlikely(!ops)) | |
350 | return -rte_errno; | |
351 | if (likely(!!ops->validate)) | |
352 | return flow_err(port_id, ops->validate(dev, attr, pattern, | |
353 | actions, error), error); | |
354 | return rte_flow_error_set(error, ENOSYS, | |
355 | RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
356 | NULL, rte_strerror(ENOSYS)); | |
357 | } | |
358 | ||
359 | /* Create a flow rule on a given port. */ | |
360 | struct rte_flow * | |
361 | rte_flow_create(uint16_t port_id, | |
362 | const struct rte_flow_attr *attr, | |
363 | const struct rte_flow_item pattern[], | |
364 | const struct rte_flow_action actions[], | |
365 | struct rte_flow_error *error) | |
366 | { | |
367 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
368 | struct rte_flow *flow; | |
369 | const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); | |
370 | ||
371 | if (unlikely(!ops)) | |
372 | return NULL; | |
373 | if (likely(!!ops->create)) { | |
374 | flow = ops->create(dev, attr, pattern, actions, error); | |
375 | if (flow == NULL) | |
376 | flow_err(port_id, -rte_errno, error); | |
377 | return flow; | |
378 | } | |
379 | rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
380 | NULL, rte_strerror(ENOSYS)); | |
381 | return NULL; | |
382 | } | |
383 | ||
384 | /* Destroy a flow rule on a given port. */ | |
385 | int | |
386 | rte_flow_destroy(uint16_t port_id, | |
387 | struct rte_flow *flow, | |
388 | struct rte_flow_error *error) | |
389 | { | |
390 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
391 | const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); | |
392 | ||
393 | if (unlikely(!ops)) | |
394 | return -rte_errno; | |
395 | if (likely(!!ops->destroy)) | |
396 | return flow_err(port_id, ops->destroy(dev, flow, error), | |
397 | error); | |
398 | return rte_flow_error_set(error, ENOSYS, | |
399 | RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
400 | NULL, rte_strerror(ENOSYS)); | |
401 | } | |
402 | ||
403 | /* Destroy all flow rules associated with a port. */ | |
404 | int | |
405 | rte_flow_flush(uint16_t port_id, | |
406 | struct rte_flow_error *error) | |
407 | { | |
408 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
409 | const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); | |
410 | ||
411 | if (unlikely(!ops)) | |
412 | return -rte_errno; | |
413 | if (likely(!!ops->flush)) | |
414 | return flow_err(port_id, ops->flush(dev, error), error); | |
415 | return rte_flow_error_set(error, ENOSYS, | |
416 | RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
417 | NULL, rte_strerror(ENOSYS)); | |
418 | } | |
419 | ||
420 | /* Query an existing flow rule. */ | |
421 | int | |
422 | rte_flow_query(uint16_t port_id, | |
423 | struct rte_flow *flow, | |
424 | const struct rte_flow_action *action, | |
425 | void *data, | |
426 | struct rte_flow_error *error) | |
427 | { | |
428 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
429 | const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); | |
430 | ||
431 | if (!ops) | |
432 | return -rte_errno; | |
433 | if (likely(!!ops->query)) | |
434 | return flow_err(port_id, ops->query(dev, flow, action, data, | |
435 | error), error); | |
436 | return rte_flow_error_set(error, ENOSYS, | |
437 | RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
438 | NULL, rte_strerror(ENOSYS)); | |
439 | } | |
440 | ||
441 | /* Restrict ingress traffic to the defined flow rules. */ | |
442 | int | |
443 | rte_flow_isolate(uint16_t port_id, | |
444 | int set, | |
445 | struct rte_flow_error *error) | |
446 | { | |
447 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
448 | const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); | |
449 | ||
450 | if (!ops) | |
451 | return -rte_errno; | |
452 | if (likely(!!ops->isolate)) | |
453 | return flow_err(port_id, ops->isolate(dev, set, error), error); | |
454 | return rte_flow_error_set(error, ENOSYS, | |
455 | RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
456 | NULL, rte_strerror(ENOSYS)); | |
457 | } | |
458 | ||
459 | /* Initialize flow error structure. */ | |
460 | int | |
461 | rte_flow_error_set(struct rte_flow_error *error, | |
462 | int code, | |
463 | enum rte_flow_error_type type, | |
464 | const void *cause, | |
465 | const char *message) | |
466 | { | |
467 | if (error) { | |
468 | *error = (struct rte_flow_error){ | |
469 | .type = type, | |
470 | .cause = cause, | |
471 | .message = message, | |
472 | }; | |
473 | } | |
474 | rte_errno = code; | |
475 | return -code; | |
476 | } | |
477 | ||
478 | /** Pattern item specification types. */ | |
9f95a23c TL |
479 | enum rte_flow_conv_item_spec_type { |
480 | RTE_FLOW_CONV_ITEM_SPEC, | |
481 | RTE_FLOW_CONV_ITEM_LAST, | |
482 | RTE_FLOW_CONV_ITEM_MASK, | |
11fdf7f2 TL |
483 | }; |
484 | ||
9f95a23c TL |
485 | /** |
486 | * Copy pattern item specification. | |
487 | * | |
488 | * @param[out] buf | |
489 | * Output buffer. Can be NULL if @p size is zero. | |
490 | * @param size | |
491 | * Size of @p buf in bytes. | |
492 | * @param[in] item | |
493 | * Pattern item to copy specification from. | |
494 | * @param type | |
495 | * Specification selector for either @p spec, @p last or @p mask. | |
496 | * | |
497 | * @return | |
498 | * Number of bytes needed to store pattern item specification regardless | |
499 | * of @p size. @p buf contents are truncated to @p size if not large | |
500 | * enough. | |
501 | */ | |
11fdf7f2 | 502 | static size_t |
9f95a23c TL |
503 | rte_flow_conv_item_spec(void *buf, const size_t size, |
504 | const struct rte_flow_item *item, | |
505 | enum rte_flow_conv_item_spec_type type) | |
11fdf7f2 | 506 | { |
9f95a23c | 507 | size_t off; |
11fdf7f2 | 508 | const void *data = |
9f95a23c TL |
509 | type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec : |
510 | type == RTE_FLOW_CONV_ITEM_LAST ? item->last : | |
511 | type == RTE_FLOW_CONV_ITEM_MASK ? item->mask : | |
11fdf7f2 TL |
512 | NULL; |
513 | ||
11fdf7f2 TL |
514 | switch (item->type) { |
515 | union { | |
516 | const struct rte_flow_item_raw *raw; | |
517 | } spec; | |
518 | union { | |
519 | const struct rte_flow_item_raw *raw; | |
520 | } last; | |
521 | union { | |
522 | const struct rte_flow_item_raw *raw; | |
523 | } mask; | |
524 | union { | |
525 | const struct rte_flow_item_raw *raw; | |
526 | } src; | |
527 | union { | |
528 | struct rte_flow_item_raw *raw; | |
529 | } dst; | |
9f95a23c | 530 | size_t tmp; |
11fdf7f2 TL |
531 | |
532 | case RTE_FLOW_ITEM_TYPE_RAW: | |
533 | spec.raw = item->spec; | |
534 | last.raw = item->last ? item->last : item->spec; | |
535 | mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; | |
536 | src.raw = data; | |
537 | dst.raw = buf; | |
9f95a23c TL |
538 | rte_memcpy(dst.raw, |
539 | (&(struct rte_flow_item_raw){ | |
540 | .relative = src.raw->relative, | |
541 | .search = src.raw->search, | |
542 | .reserved = src.raw->reserved, | |
543 | .offset = src.raw->offset, | |
544 | .limit = src.raw->limit, | |
545 | .length = src.raw->length, | |
546 | }), | |
547 | size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size); | |
548 | off = sizeof(*dst.raw); | |
549 | if (type == RTE_FLOW_CONV_ITEM_SPEC || | |
550 | (type == RTE_FLOW_CONV_ITEM_MASK && | |
11fdf7f2 TL |
551 | ((spec.raw->length & mask.raw->length) >= |
552 | (last.raw->length & mask.raw->length)))) | |
9f95a23c | 553 | tmp = spec.raw->length & mask.raw->length; |
11fdf7f2 | 554 | else |
9f95a23c TL |
555 | tmp = last.raw->length & mask.raw->length; |
556 | if (tmp) { | |
557 | off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); | |
558 | if (size >= off + tmp) | |
559 | dst.raw->pattern = rte_memcpy | |
560 | ((void *)((uintptr_t)dst.raw + off), | |
561 | src.raw->pattern, tmp); | |
562 | off += tmp; | |
11fdf7f2 TL |
563 | } |
564 | break; | |
565 | default: | |
9f95a23c TL |
566 | off = rte_flow_desc_item[item->type].size; |
567 | rte_memcpy(buf, data, (size > off ? off : size)); | |
11fdf7f2 TL |
568 | break; |
569 | } | |
9f95a23c | 570 | return off; |
11fdf7f2 TL |
571 | } |
572 | ||
9f95a23c TL |
573 | /** |
574 | * Copy action configuration. | |
575 | * | |
576 | * @param[out] buf | |
577 | * Output buffer. Can be NULL if @p size is zero. | |
578 | * @param size | |
579 | * Size of @p buf in bytes. | |
580 | * @param[in] action | |
581 | * Action to copy configuration from. | |
582 | * | |
583 | * @return | |
584 | * Number of bytes needed to store pattern item specification regardless | |
585 | * of @p size. @p buf contents are truncated to @p size if not large | |
586 | * enough. | |
587 | */ | |
11fdf7f2 | 588 | static size_t |
9f95a23c TL |
589 | rte_flow_conv_action_conf(void *buf, const size_t size, |
590 | const struct rte_flow_action *action) | |
11fdf7f2 | 591 | { |
9f95a23c | 592 | size_t off; |
11fdf7f2 | 593 | |
11fdf7f2 TL |
594 | switch (action->type) { |
595 | union { | |
596 | const struct rte_flow_action_rss *rss; | |
9f95a23c TL |
597 | const struct rte_flow_action_vxlan_encap *vxlan_encap; |
598 | const struct rte_flow_action_nvgre_encap *nvgre_encap; | |
11fdf7f2 TL |
599 | } src; |
600 | union { | |
601 | struct rte_flow_action_rss *rss; | |
9f95a23c TL |
602 | struct rte_flow_action_vxlan_encap *vxlan_encap; |
603 | struct rte_flow_action_nvgre_encap *nvgre_encap; | |
11fdf7f2 | 604 | } dst; |
9f95a23c TL |
605 | size_t tmp; |
606 | int ret; | |
11fdf7f2 TL |
607 | |
608 | case RTE_FLOW_ACTION_TYPE_RSS: | |
609 | src.rss = action->conf; | |
610 | dst.rss = buf; | |
9f95a23c TL |
611 | rte_memcpy(dst.rss, |
612 | (&(struct rte_flow_action_rss){ | |
11fdf7f2 TL |
613 | .func = src.rss->func, |
614 | .level = src.rss->level, | |
615 | .types = src.rss->types, | |
616 | .key_len = src.rss->key_len, | |
617 | .queue_num = src.rss->queue_num, | |
9f95a23c TL |
618 | }), |
619 | size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); | |
620 | off = sizeof(*dst.rss); | |
11fdf7f2 | 621 | if (src.rss->key_len) { |
9f95a23c TL |
622 | off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); |
623 | tmp = sizeof(*src.rss->key) * src.rss->key_len; | |
624 | if (size >= off + tmp) | |
625 | dst.rss->key = rte_memcpy | |
11fdf7f2 | 626 | ((void *)((uintptr_t)dst.rss + off), |
9f95a23c TL |
627 | src.rss->key, tmp); |
628 | off += tmp; | |
11fdf7f2 TL |
629 | } |
630 | if (src.rss->queue_num) { | |
9f95a23c TL |
631 | off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); |
632 | tmp = sizeof(*src.rss->queue) * src.rss->queue_num; | |
633 | if (size >= off + tmp) | |
634 | dst.rss->queue = rte_memcpy | |
11fdf7f2 | 635 | ((void *)((uintptr_t)dst.rss + off), |
9f95a23c TL |
636 | src.rss->queue, tmp); |
637 | off += tmp; | |
638 | } | |
639 | break; | |
640 | case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: | |
641 | case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: | |
642 | src.vxlan_encap = action->conf; | |
643 | dst.vxlan_encap = buf; | |
644 | RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) != | |
645 | sizeof(*src.nvgre_encap) || | |
646 | offsetof(struct rte_flow_action_vxlan_encap, | |
647 | definition) != | |
648 | offsetof(struct rte_flow_action_nvgre_encap, | |
649 | definition)); | |
650 | off = sizeof(*dst.vxlan_encap); | |
651 | if (src.vxlan_encap->definition) { | |
652 | off = RTE_ALIGN_CEIL | |
653 | (off, sizeof(*dst.vxlan_encap->definition)); | |
654 | ret = rte_flow_conv | |
655 | (RTE_FLOW_CONV_OP_PATTERN, | |
656 | (void *)((uintptr_t)dst.vxlan_encap + off), | |
657 | size > off ? size - off : 0, | |
658 | src.vxlan_encap->definition, NULL); | |
659 | if (ret < 0) | |
660 | return 0; | |
661 | if (size >= off + ret) | |
662 | dst.vxlan_encap->definition = | |
663 | (void *)((uintptr_t)dst.vxlan_encap + | |
664 | off); | |
665 | off += ret; | |
11fdf7f2 | 666 | } |
11fdf7f2 TL |
667 | break; |
668 | default: | |
9f95a23c TL |
669 | off = rte_flow_desc_action[action->type].size; |
670 | rte_memcpy(buf, action->conf, (size > off ? off : size)); | |
11fdf7f2 TL |
671 | break; |
672 | } | |
9f95a23c TL |
673 | return off; |
674 | } | |
675 | ||
676 | /** | |
677 | * Copy a list of pattern items. | |
678 | * | |
679 | * @param[out] dst | |
680 | * Destination buffer. Can be NULL if @p size is zero. | |
681 | * @param size | |
682 | * Size of @p dst in bytes. | |
683 | * @param[in] src | |
684 | * Source pattern items. | |
685 | * @param num | |
686 | * Maximum number of pattern items to process from @p src or 0 to process | |
687 | * the entire list. In both cases, processing stops after | |
688 | * RTE_FLOW_ITEM_TYPE_END is encountered. | |
689 | * @param[out] error | |
690 | * Perform verbose error reporting if not NULL. | |
691 | * | |
692 | * @return | |
693 | * A positive value representing the number of bytes needed to store | |
694 | * pattern items regardless of @p size on success (@p buf contents are | |
695 | * truncated to @p size if not large enough), a negative errno value | |
696 | * otherwise and rte_errno is set. | |
697 | */ | |
698 | static int | |
699 | rte_flow_conv_pattern(struct rte_flow_item *dst, | |
700 | const size_t size, | |
701 | const struct rte_flow_item *src, | |
702 | unsigned int num, | |
703 | struct rte_flow_error *error) | |
704 | { | |
705 | uintptr_t data = (uintptr_t)dst; | |
706 | size_t off; | |
707 | size_t ret; | |
708 | unsigned int i; | |
709 | ||
710 | for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { | |
711 | if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || | |
712 | !rte_flow_desc_item[src->type].name) | |
713 | return rte_flow_error_set | |
714 | (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src, | |
715 | "cannot convert unknown item type"); | |
716 | if (size >= off + sizeof(*dst)) | |
717 | *dst = (struct rte_flow_item){ | |
718 | .type = src->type, | |
719 | }; | |
720 | off += sizeof(*dst); | |
721 | if (!src->type) | |
722 | num = i + 1; | |
723 | } | |
724 | num = i; | |
725 | src -= num; | |
726 | dst -= num; | |
727 | do { | |
728 | if (src->spec) { | |
729 | off = RTE_ALIGN_CEIL(off, sizeof(double)); | |
730 | ret = rte_flow_conv_item_spec | |
731 | ((void *)(data + off), | |
732 | size > off ? size - off : 0, src, | |
733 | RTE_FLOW_CONV_ITEM_SPEC); | |
734 | if (size && size >= off + ret) | |
735 | dst->spec = (void *)(data + off); | |
736 | off += ret; | |
737 | ||
738 | } | |
739 | if (src->last) { | |
740 | off = RTE_ALIGN_CEIL(off, sizeof(double)); | |
741 | ret = rte_flow_conv_item_spec | |
742 | ((void *)(data + off), | |
743 | size > off ? size - off : 0, src, | |
744 | RTE_FLOW_CONV_ITEM_LAST); | |
745 | if (size && size >= off + ret) | |
746 | dst->last = (void *)(data + off); | |
747 | off += ret; | |
748 | } | |
749 | if (src->mask) { | |
750 | off = RTE_ALIGN_CEIL(off, sizeof(double)); | |
751 | ret = rte_flow_conv_item_spec | |
752 | ((void *)(data + off), | |
753 | size > off ? size - off : 0, src, | |
754 | RTE_FLOW_CONV_ITEM_MASK); | |
755 | if (size && size >= off + ret) | |
756 | dst->mask = (void *)(data + off); | |
757 | off += ret; | |
758 | } | |
759 | ++src; | |
760 | ++dst; | |
761 | } while (--num); | |
762 | return off; | |
763 | } | |
764 | ||
765 | /** | |
766 | * Copy a list of actions. | |
767 | * | |
768 | * @param[out] dst | |
769 | * Destination buffer. Can be NULL if @p size is zero. | |
770 | * @param size | |
771 | * Size of @p dst in bytes. | |
772 | * @param[in] src | |
773 | * Source actions. | |
774 | * @param num | |
775 | * Maximum number of actions to process from @p src or 0 to process the | |
776 | * entire list. In both cases, processing stops after | |
777 | * RTE_FLOW_ACTION_TYPE_END is encountered. | |
778 | * @param[out] error | |
779 | * Perform verbose error reporting if not NULL. | |
780 | * | |
781 | * @return | |
782 | * A positive value representing the number of bytes needed to store | |
783 | * actions regardless of @p size on success (@p buf contents are truncated | |
784 | * to @p size if not large enough), a negative errno value otherwise and | |
785 | * rte_errno is set. | |
786 | */ | |
787 | static int | |
788 | rte_flow_conv_actions(struct rte_flow_action *dst, | |
789 | const size_t size, | |
790 | const struct rte_flow_action *src, | |
791 | unsigned int num, | |
792 | struct rte_flow_error *error) | |
793 | { | |
794 | uintptr_t data = (uintptr_t)dst; | |
795 | size_t off; | |
796 | size_t ret; | |
797 | unsigned int i; | |
798 | ||
799 | for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { | |
800 | if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || | |
801 | !rte_flow_desc_action[src->type].name) | |
802 | return rte_flow_error_set | |
803 | (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, | |
804 | src, "cannot convert unknown action type"); | |
805 | if (size >= off + sizeof(*dst)) | |
806 | *dst = (struct rte_flow_action){ | |
807 | .type = src->type, | |
808 | }; | |
809 | off += sizeof(*dst); | |
810 | if (!src->type) | |
811 | num = i + 1; | |
812 | } | |
813 | num = i; | |
814 | src -= num; | |
815 | dst -= num; | |
816 | do { | |
817 | if (src->conf) { | |
818 | off = RTE_ALIGN_CEIL(off, sizeof(double)); | |
819 | ret = rte_flow_conv_action_conf | |
820 | ((void *)(data + off), | |
821 | size > off ? size - off : 0, src); | |
822 | if (size && size >= off + ret) | |
823 | dst->conf = (void *)(data + off); | |
824 | off += ret; | |
825 | } | |
826 | ++src; | |
827 | ++dst; | |
828 | } while (--num); | |
829 | return off; | |
830 | } | |
831 | ||
832 | /** | |
833 | * Copy flow rule components. | |
834 | * | |
835 | * This comprises the flow rule descriptor itself, attributes, pattern and | |
836 | * actions list. NULL components in @p src are skipped. | |
837 | * | |
838 | * @param[out] dst | |
839 | * Destination buffer. Can be NULL if @p size is zero. | |
840 | * @param size | |
841 | * Size of @p dst in bytes. | |
842 | * @param[in] src | |
843 | * Source flow rule descriptor. | |
844 | * @param[out] error | |
845 | * Perform verbose error reporting if not NULL. | |
846 | * | |
847 | * @return | |
848 | * A positive value representing the number of bytes needed to store all | |
849 | * components including the descriptor regardless of @p size on success | |
850 | * (@p buf contents are truncated to @p size if not large enough), a | |
851 | * negative errno value otherwise and rte_errno is set. | |
852 | */ | |
853 | static int | |
854 | rte_flow_conv_rule(struct rte_flow_conv_rule *dst, | |
855 | const size_t size, | |
856 | const struct rte_flow_conv_rule *src, | |
857 | struct rte_flow_error *error) | |
858 | { | |
859 | size_t off; | |
860 | int ret; | |
861 | ||
862 | rte_memcpy(dst, | |
863 | (&(struct rte_flow_conv_rule){ | |
864 | .attr = NULL, | |
865 | .pattern = NULL, | |
866 | .actions = NULL, | |
867 | }), | |
868 | size > sizeof(*dst) ? sizeof(*dst) : size); | |
869 | off = sizeof(*dst); | |
870 | if (src->attr_ro) { | |
871 | off = RTE_ALIGN_CEIL(off, sizeof(double)); | |
872 | if (size && size >= off + sizeof(*dst->attr)) | |
873 | dst->attr = rte_memcpy | |
874 | ((void *)((uintptr_t)dst + off), | |
875 | src->attr_ro, sizeof(*dst->attr)); | |
876 | off += sizeof(*dst->attr); | |
877 | } | |
878 | if (src->pattern_ro) { | |
879 | off = RTE_ALIGN_CEIL(off, sizeof(double)); | |
880 | ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off), | |
881 | size > off ? size - off : 0, | |
882 | src->pattern_ro, 0, error); | |
883 | if (ret < 0) | |
884 | return ret; | |
885 | if (size && size >= off + (size_t)ret) | |
886 | dst->pattern = (void *)((uintptr_t)dst + off); | |
887 | off += ret; | |
888 | } | |
889 | if (src->actions_ro) { | |
890 | off = RTE_ALIGN_CEIL(off, sizeof(double)); | |
891 | ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off), | |
892 | size > off ? size - off : 0, | |
893 | src->actions_ro, 0, error); | |
894 | if (ret < 0) | |
895 | return ret; | |
896 | if (size >= off + (size_t)ret) | |
897 | dst->actions = (void *)((uintptr_t)dst + off); | |
898 | off += ret; | |
899 | } | |
900 | return off; | |
901 | } | |
902 | ||
903 | /** | |
904 | * Retrieve the name of a pattern item/action type. | |
905 | * | |
906 | * @param is_action | |
907 | * Nonzero when @p src represents an action type instead of a pattern item | |
908 | * type. | |
909 | * @param is_ptr | |
910 | * Nonzero to write string address instead of contents into @p dst. | |
911 | * @param[out] dst | |
912 | * Destination buffer. Can be NULL if @p size is zero. | |
913 | * @param size | |
914 | * Size of @p dst in bytes. | |
915 | * @param[in] src | |
916 | * Depending on @p is_action, source pattern item or action type cast as a | |
917 | * pointer. | |
918 | * @param[out] error | |
919 | * Perform verbose error reporting if not NULL. | |
920 | * | |
921 | * @return | |
922 | * A positive value representing the number of bytes needed to store the | |
923 | * name or its address regardless of @p size on success (@p buf contents | |
924 | * are truncated to @p size if not large enough), a negative errno value | |
925 | * otherwise and rte_errno is set. | |
926 | */ | |
927 | static int | |
928 | rte_flow_conv_name(int is_action, | |
929 | int is_ptr, | |
930 | char *dst, | |
931 | const size_t size, | |
932 | const void *src, | |
933 | struct rte_flow_error *error) | |
934 | { | |
935 | struct desc_info { | |
936 | const struct rte_flow_desc_data *data; | |
937 | size_t num; | |
938 | }; | |
939 | static const struct desc_info info_rep[2] = { | |
940 | { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), }, | |
941 | { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), }, | |
942 | }; | |
943 | const struct desc_info *const info = &info_rep[!!is_action]; | |
944 | unsigned int type = (uintptr_t)src; | |
945 | ||
946 | if (type >= info->num) | |
947 | return rte_flow_error_set | |
948 | (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, | |
949 | "unknown object type to retrieve the name of"); | |
950 | if (!is_ptr) | |
951 | return strlcpy(dst, info->data[type].name, size); | |
952 | if (size >= sizeof(const char **)) | |
953 | *((const char **)dst) = info->data[type].name; | |
954 | return sizeof(const char **); | |
955 | } | |
956 | ||
957 | /** Helper function to convert flow API objects. */ | |
958 | int | |
959 | rte_flow_conv(enum rte_flow_conv_op op, | |
960 | void *dst, | |
961 | size_t size, | |
962 | const void *src, | |
963 | struct rte_flow_error *error) | |
964 | { | |
965 | switch (op) { | |
966 | const struct rte_flow_attr *attr; | |
967 | ||
968 | case RTE_FLOW_CONV_OP_NONE: | |
969 | return 0; | |
970 | case RTE_FLOW_CONV_OP_ATTR: | |
971 | attr = src; | |
972 | if (size > sizeof(*attr)) | |
973 | size = sizeof(*attr); | |
974 | rte_memcpy(dst, attr, size); | |
975 | return sizeof(*attr); | |
976 | case RTE_FLOW_CONV_OP_ITEM: | |
977 | return rte_flow_conv_pattern(dst, size, src, 1, error); | |
978 | case RTE_FLOW_CONV_OP_ACTION: | |
979 | return rte_flow_conv_actions(dst, size, src, 1, error); | |
980 | case RTE_FLOW_CONV_OP_PATTERN: | |
981 | return rte_flow_conv_pattern(dst, size, src, 0, error); | |
982 | case RTE_FLOW_CONV_OP_ACTIONS: | |
983 | return rte_flow_conv_actions(dst, size, src, 0, error); | |
984 | case RTE_FLOW_CONV_OP_RULE: | |
985 | return rte_flow_conv_rule(dst, size, src, error); | |
986 | case RTE_FLOW_CONV_OP_ITEM_NAME: | |
987 | return rte_flow_conv_name(0, 0, dst, size, src, error); | |
988 | case RTE_FLOW_CONV_OP_ACTION_NAME: | |
989 | return rte_flow_conv_name(1, 0, dst, size, src, error); | |
990 | case RTE_FLOW_CONV_OP_ITEM_NAME_PTR: | |
991 | return rte_flow_conv_name(0, 1, dst, size, src, error); | |
992 | case RTE_FLOW_CONV_OP_ACTION_NAME_PTR: | |
993 | return rte_flow_conv_name(1, 1, dst, size, src, error); | |
994 | } | |
995 | return rte_flow_error_set | |
996 | (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, | |
997 | "unknown object conversion operation"); | |
11fdf7f2 TL |
998 | } |
999 | ||
1000 | /** Store a full rte_flow description. */ | |
1001 | size_t | |
1002 | rte_flow_copy(struct rte_flow_desc *desc, size_t len, | |
1003 | const struct rte_flow_attr *attr, | |
1004 | const struct rte_flow_item *items, | |
1005 | const struct rte_flow_action *actions) | |
1006 | { | |
9f95a23c TL |
1007 | /* |
1008 | * Overlap struct rte_flow_conv with struct rte_flow_desc in order | |
1009 | * to convert the former to the latter without wasting space. | |
1010 | */ | |
1011 | struct rte_flow_conv_rule *dst = | |
1012 | len ? | |
1013 | (void *)((uintptr_t)desc + | |
1014 | (offsetof(struct rte_flow_desc, actions) - | |
1015 | offsetof(struct rte_flow_conv_rule, actions))) : | |
1016 | NULL; | |
1017 | size_t dst_size = | |
1018 | len > sizeof(*desc) - sizeof(*dst) ? | |
1019 | len - (sizeof(*desc) - sizeof(*dst)) : | |
1020 | 0; | |
1021 | struct rte_flow_conv_rule src = { | |
1022 | .attr_ro = NULL, | |
1023 | .pattern_ro = items, | |
1024 | .actions_ro = actions, | |
1025 | }; | |
1026 | int ret; | |
1027 | ||
1028 | RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) < | |
1029 | sizeof(struct rte_flow_conv_rule)); | |
1030 | if (dst_size && | |
1031 | (&dst->pattern != &desc->items || | |
1032 | &dst->actions != &desc->actions || | |
1033 | (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) { | |
1034 | rte_errno = EINVAL; | |
1035 | return 0; | |
11fdf7f2 | 1036 | } |
9f95a23c TL |
1037 | ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL); |
1038 | if (ret < 0) | |
1039 | return 0; | |
1040 | ret += sizeof(*desc) - sizeof(*dst); | |
1041 | rte_memcpy(desc, | |
1042 | (&(struct rte_flow_desc){ | |
1043 | .size = ret, | |
11fdf7f2 | 1044 | .attr = *attr, |
9f95a23c TL |
1045 | .items = dst_size ? dst->pattern : NULL, |
1046 | .actions = dst_size ? dst->actions : NULL, | |
1047 | }), | |
1048 | len > sizeof(*desc) ? sizeof(*desc) : len); | |
1049 | return ret; | |
11fdf7f2 TL |
1050 | } |
1051 | ||
1052 | /** | |
1053 | * Expand RSS flows into several possible flows according to the RSS hash | |
1054 | * fields requested and the driver capabilities. | |
1055 | */ | |
f67539c2 | 1056 | int |
11fdf7f2 TL |
1057 | rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size, |
1058 | const struct rte_flow_item *pattern, uint64_t types, | |
1059 | const struct rte_flow_expand_node graph[], | |
1060 | int graph_root_index) | |
1061 | { | |
1062 | const int elt_n = 8; | |
1063 | const struct rte_flow_item *item; | |
1064 | const struct rte_flow_expand_node *node = &graph[graph_root_index]; | |
1065 | const int *next_node; | |
1066 | const int *stack[elt_n]; | |
1067 | int stack_pos = 0; | |
1068 | struct rte_flow_item flow_items[elt_n]; | |
1069 | unsigned int i; | |
1070 | size_t lsize; | |
1071 | size_t user_pattern_size = 0; | |
1072 | void *addr = NULL; | |
f67539c2 TL |
1073 | const struct rte_flow_expand_node *next = NULL; |
1074 | struct rte_flow_item missed_item; | |
1075 | int missed = 0; | |
1076 | int elt = 0; | |
1077 | const struct rte_flow_item *last_item = NULL; | |
11fdf7f2 | 1078 | |
f67539c2 | 1079 | memset(&missed_item, 0, sizeof(missed_item)); |
11fdf7f2 TL |
1080 | lsize = offsetof(struct rte_flow_expand_rss, entry) + |
1081 | elt_n * sizeof(buf->entry[0]); | |
1082 | if (lsize <= size) { | |
1083 | buf->entry[0].priority = 0; | |
1084 | buf->entry[0].pattern = (void *)&buf->entry[elt_n]; | |
1085 | buf->entries = 0; | |
1086 | addr = buf->entry[0].pattern; | |
1087 | } | |
1088 | for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { | |
f67539c2 TL |
1089 | if (item->type != RTE_FLOW_ITEM_TYPE_VOID) |
1090 | last_item = item; | |
11fdf7f2 TL |
1091 | for (i = 0; node->next && node->next[i]; ++i) { |
1092 | next = &graph[node->next[i]]; | |
1093 | if (next->type == item->type) | |
1094 | break; | |
1095 | } | |
1096 | if (next) | |
1097 | node = next; | |
1098 | user_pattern_size += sizeof(*item); | |
1099 | } | |
1100 | user_pattern_size += sizeof(*item); /* Handle END item. */ | |
1101 | lsize += user_pattern_size; | |
1102 | /* Copy the user pattern in the first entry of the buffer. */ | |
1103 | if (lsize <= size) { | |
1104 | rte_memcpy(addr, pattern, user_pattern_size); | |
1105 | addr = (void *)(((uintptr_t)addr) + user_pattern_size); | |
1106 | buf->entries = 1; | |
1107 | } | |
1108 | /* Start expanding. */ | |
1109 | memset(flow_items, 0, sizeof(flow_items)); | |
1110 | user_pattern_size -= sizeof(*item); | |
f67539c2 TL |
1111 | /* |
1112 | * Check if the last valid item has spec set | |
1113 | * and need complete pattern. | |
1114 | */ | |
1115 | missed_item.type = rte_flow_expand_rss_item_complete(last_item); | |
1116 | if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { | |
1117 | next = NULL; | |
1118 | missed = 1; | |
1119 | for (i = 0; node->next && node->next[i]; ++i) { | |
1120 | next = &graph[node->next[i]]; | |
1121 | if (next->type == missed_item.type) { | |
1122 | flow_items[0].type = missed_item.type; | |
1123 | flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; | |
1124 | break; | |
1125 | } | |
1126 | next = NULL; | |
1127 | } | |
1128 | } | |
1129 | if (next && missed) { | |
1130 | elt = 2; /* missed item + item end. */ | |
1131 | node = next; | |
1132 | lsize += elt * sizeof(*item) + user_pattern_size; | |
1133 | if ((node->rss_types & types) && lsize <= size) { | |
1134 | buf->entry[buf->entries].priority = 1; | |
1135 | buf->entry[buf->entries].pattern = addr; | |
1136 | buf->entries++; | |
1137 | rte_memcpy(addr, buf->entry[0].pattern, | |
1138 | user_pattern_size); | |
1139 | addr = (void *)(((uintptr_t)addr) + user_pattern_size); | |
1140 | rte_memcpy(addr, flow_items, elt * sizeof(*item)); | |
1141 | addr = (void *)(((uintptr_t)addr) + | |
1142 | elt * sizeof(*item)); | |
1143 | } | |
1144 | } | |
1145 | memset(flow_items, 0, sizeof(flow_items)); | |
11fdf7f2 TL |
1146 | next_node = node->next; |
1147 | stack[stack_pos] = next_node; | |
1148 | node = next_node ? &graph[*next_node] : NULL; | |
1149 | while (node) { | |
1150 | flow_items[stack_pos].type = node->type; | |
1151 | if (node->rss_types & types) { | |
1152 | /* | |
1153 | * compute the number of items to copy from the | |
1154 | * expansion and copy it. | |
1155 | * When the stack_pos is 0, there are 1 element in it, | |
1156 | * plus the addition END item. | |
1157 | */ | |
f67539c2 | 1158 | elt = stack_pos + 2; |
11fdf7f2 TL |
1159 | flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; |
1160 | lsize += elt * sizeof(*item) + user_pattern_size; | |
1161 | if (lsize <= size) { | |
1162 | size_t n = elt * sizeof(*item); | |
1163 | ||
1164 | buf->entry[buf->entries].priority = | |
f67539c2 | 1165 | stack_pos + 1 + missed; |
11fdf7f2 TL |
1166 | buf->entry[buf->entries].pattern = addr; |
1167 | buf->entries++; | |
1168 | rte_memcpy(addr, buf->entry[0].pattern, | |
1169 | user_pattern_size); | |
1170 | addr = (void *)(((uintptr_t)addr) + | |
1171 | user_pattern_size); | |
f67539c2 TL |
1172 | rte_memcpy(addr, &missed_item, |
1173 | missed * sizeof(*item)); | |
1174 | addr = (void *)(((uintptr_t)addr) + | |
1175 | missed * sizeof(*item)); | |
11fdf7f2 TL |
1176 | rte_memcpy(addr, flow_items, n); |
1177 | addr = (void *)(((uintptr_t)addr) + n); | |
1178 | } | |
1179 | } | |
1180 | /* Go deeper. */ | |
1181 | if (node->next) { | |
1182 | next_node = node->next; | |
1183 | if (stack_pos++ == elt_n) { | |
1184 | rte_errno = E2BIG; | |
1185 | return -rte_errno; | |
1186 | } | |
1187 | stack[stack_pos] = next_node; | |
1188 | } else if (*(next_node + 1)) { | |
1189 | /* Follow up with the next possibility. */ | |
1190 | ++next_node; | |
1191 | } else { | |
1192 | /* Move to the next path. */ | |
1193 | if (stack_pos) | |
1194 | next_node = stack[--stack_pos]; | |
1195 | next_node++; | |
1196 | stack[stack_pos] = next_node; | |
1197 | } | |
1198 | node = *next_node ? &graph[*next_node] : NULL; | |
1199 | }; | |
f67539c2 TL |
1200 | /* no expanded flows but we have missed item, create one rule for it */ |
1201 | if (buf->entries == 1 && missed != 0) { | |
1202 | elt = 2; | |
1203 | lsize += elt * sizeof(*item) + user_pattern_size; | |
1204 | if (lsize <= size) { | |
1205 | buf->entry[buf->entries].priority = 1; | |
1206 | buf->entry[buf->entries].pattern = addr; | |
1207 | buf->entries++; | |
1208 | flow_items[0].type = missed_item.type; | |
1209 | flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; | |
1210 | rte_memcpy(addr, buf->entry[0].pattern, | |
1211 | user_pattern_size); | |
1212 | addr = (void *)(((uintptr_t)addr) + user_pattern_size); | |
1213 | rte_memcpy(addr, flow_items, elt * sizeof(*item)); | |
1214 | addr = (void *)(((uintptr_t)addr) + | |
1215 | elt * sizeof(*item)); | |
1216 | } | |
1217 | } | |
11fdf7f2 TL |
1218 | return lsize; |
1219 | } | |
f67539c2 TL |
1220 | |
1221 | int | |
1222 | rte_flow_dev_dump(uint16_t port_id, FILE *file, struct rte_flow_error *error) | |
1223 | { | |
1224 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
1225 | const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); | |
1226 | ||
1227 | if (unlikely(!ops)) | |
1228 | return -rte_errno; | |
1229 | if (likely(!!ops->dev_dump)) | |
1230 | return flow_err(port_id, ops->dev_dump(dev, file, error), | |
1231 | error); | |
1232 | return rte_flow_error_set(error, ENOSYS, | |
1233 | RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
1234 | NULL, rte_strerror(ENOSYS)); | |
1235 | } | |
1236 | ||
1237 | int | |
1238 | rte_flow_get_aged_flows(uint16_t port_id, void **contexts, | |
1239 | uint32_t nb_contexts, struct rte_flow_error *error) | |
1240 | { | |
1241 | struct rte_eth_dev *dev = &rte_eth_devices[port_id]; | |
1242 | const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); | |
1243 | ||
1244 | if (unlikely(!ops)) | |
1245 | return -rte_errno; | |
1246 | if (likely(!!ops->get_aged_flows)) | |
1247 | return flow_err(port_id, ops->get_aged_flows(dev, contexts, | |
1248 | nb_contexts, error), error); | |
1249 | return rte_flow_error_set(error, ENOTSUP, | |
1250 | RTE_FLOW_ERROR_TYPE_UNSPECIFIED, | |
1251 | NULL, rte_strerror(ENOTSUP)); | |
1252 | } |