]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/flow_netlink.c
compat: Use nla_parse deprecated functions
[mirror_ovs.git] / datapath / flow_netlink.c
1 /*
2 * Copyright (c) 2007-2017 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
43 #include <net/geneve.h>
44 #include <net/ip.h>
45 #include <net/ipv6.h>
46 #include <net/ndisc.h>
47 #include <net/mpls.h>
48 #include <net/vxlan.h>
49 #include <net/tun_proto.h>
50 #include <net/erspan.h>
51
52 #include "datapath.h"
53 #include "conntrack.h"
54 #include "flow.h"
55 #include "flow_netlink.h"
56 #include "gso.h"
57
58 struct ovs_len_tbl {
59 int len;
60 const struct ovs_len_tbl *next;
61 };
62
63 #define OVS_ATTR_NESTED -1
64 #define OVS_ATTR_VARIABLE -2
65
66 static bool actions_may_change_flow(const struct nlattr *actions)
67 {
68 struct nlattr *nla;
69 int rem;
70
71 nla_for_each_nested(nla, actions, rem) {
72 u16 action = nla_type(nla);
73
74 switch (action) {
75 case OVS_ACTION_ATTR_OUTPUT:
76 case OVS_ACTION_ATTR_RECIRC:
77 case OVS_ACTION_ATTR_TRUNC:
78 case OVS_ACTION_ATTR_USERSPACE:
79 break;
80
81 case OVS_ACTION_ATTR_CT:
82 case OVS_ACTION_ATTR_CT_CLEAR:
83 case OVS_ACTION_ATTR_HASH:
84 case OVS_ACTION_ATTR_POP_ETH:
85 case OVS_ACTION_ATTR_POP_MPLS:
86 case OVS_ACTION_ATTR_POP_NSH:
87 case OVS_ACTION_ATTR_POP_VLAN:
88 case OVS_ACTION_ATTR_PUSH_ETH:
89 case OVS_ACTION_ATTR_PUSH_MPLS:
90 case OVS_ACTION_ATTR_PUSH_NSH:
91 case OVS_ACTION_ATTR_PUSH_VLAN:
92 case OVS_ACTION_ATTR_SAMPLE:
93 case OVS_ACTION_ATTR_SET:
94 case OVS_ACTION_ATTR_SET_MASKED:
95 case OVS_ACTION_ATTR_METER:
96 case OVS_ACTION_ATTR_CHECK_PKT_LEN:
97 default:
98 return true;
99 }
100 }
101 return false;
102 }
103
104 static void update_range(struct sw_flow_match *match,
105 size_t offset, size_t size, bool is_mask)
106 {
107 struct sw_flow_key_range *range;
108 size_t start = rounddown(offset, sizeof(long));
109 size_t end = roundup(offset + size, sizeof(long));
110
111 if (!is_mask)
112 range = &match->range;
113 else
114 range = &match->mask->range;
115
116 if (range->start == range->end) {
117 range->start = start;
118 range->end = end;
119 return;
120 }
121
122 if (range->start > start)
123 range->start = start;
124
125 if (range->end < end)
126 range->end = end;
127 }
128
129 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
130 do { \
131 update_range(match, offsetof(struct sw_flow_key, field), \
132 sizeof((match)->key->field), is_mask); \
133 if (is_mask) \
134 (match)->mask->key.field = value; \
135 else \
136 (match)->key->field = value; \
137 } while (0)
138
139 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
140 do { \
141 update_range(match, offset, len, is_mask); \
142 if (is_mask) \
143 memcpy((u8 *)&(match)->mask->key + offset, value_p, len);\
144 else \
145 memcpy((u8 *)(match)->key + offset, value_p, len); \
146 } while (0)
147
148 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
149 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
150 value_p, len, is_mask)
151
152 #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
153 do { \
154 update_range(match, offsetof(struct sw_flow_key, field), \
155 sizeof((match)->key->field), is_mask); \
156 if (is_mask) \
157 memset((u8 *)&(match)->mask->key.field, value, \
158 sizeof((match)->mask->key.field)); \
159 else \
160 memset((u8 *)&(match)->key->field, value, \
161 sizeof((match)->key->field)); \
162 } while (0)
163
164 static bool match_validate(const struct sw_flow_match *match,
165 u64 key_attrs, u64 mask_attrs, bool log)
166 {
167 u64 key_expected = 0;
168 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
169
170 /* The following mask attributes allowed only if they
171 * pass the validation tests.
172 */
173 mask_allowed &= ~((1ULL << OVS_KEY_ATTR_IPV4)
174 | (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)
175 | (1ULL << OVS_KEY_ATTR_IPV6)
176 | (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)
177 | (1ULL << OVS_KEY_ATTR_TCP)
178 | (1ULL << OVS_KEY_ATTR_TCP_FLAGS)
179 | (1ULL << OVS_KEY_ATTR_UDP)
180 | (1ULL << OVS_KEY_ATTR_SCTP)
181 | (1ULL << OVS_KEY_ATTR_ICMP)
182 | (1ULL << OVS_KEY_ATTR_ICMPV6)
183 | (1ULL << OVS_KEY_ATTR_ARP)
184 | (1ULL << OVS_KEY_ATTR_ND)
185 | (1ULL << OVS_KEY_ATTR_MPLS)
186 | (1ULL << OVS_KEY_ATTR_NSH));
187
188 /* Always allowed mask fields. */
189 mask_allowed |= ((1ULL << OVS_KEY_ATTR_TUNNEL)
190 | (1ULL << OVS_KEY_ATTR_IN_PORT)
191 | (1ULL << OVS_KEY_ATTR_ETHERTYPE));
192
193 /* Check key attributes. */
194 if (match->key->eth.type == htons(ETH_P_ARP)
195 || match->key->eth.type == htons(ETH_P_RARP)) {
196 key_expected |= 1ULL << OVS_KEY_ATTR_ARP;
197 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
198 mask_allowed |= 1ULL << OVS_KEY_ATTR_ARP;
199 }
200
201 if (eth_p_mpls(match->key->eth.type)) {
202 key_expected |= 1ULL << OVS_KEY_ATTR_MPLS;
203 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
204 mask_allowed |= 1ULL << OVS_KEY_ATTR_MPLS;
205 }
206
207 if (match->key->eth.type == htons(ETH_P_IP)) {
208 key_expected |= 1ULL << OVS_KEY_ATTR_IPV4;
209 if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
210 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV4;
211 mask_allowed |= 1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
212 }
213
214 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
215 if (match->key->ip.proto == IPPROTO_UDP) {
216 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
217 if (match->mask && (match->mask->key.ip.proto == 0xff))
218 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
219 }
220
221 if (match->key->ip.proto == IPPROTO_SCTP) {
222 key_expected |= 1ULL << OVS_KEY_ATTR_SCTP;
223 if (match->mask && (match->mask->key.ip.proto == 0xff))
224 mask_allowed |= 1ULL << OVS_KEY_ATTR_SCTP;
225 }
226
227 if (match->key->ip.proto == IPPROTO_TCP) {
228 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
229 key_expected |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
230 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
231 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
232 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
233 }
234 }
235
236 if (match->key->ip.proto == IPPROTO_ICMP) {
237 key_expected |= 1ULL << OVS_KEY_ATTR_ICMP;
238 if (match->mask && (match->mask->key.ip.proto == 0xff))
239 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMP;
240 }
241 }
242 }
243
244 if (match->key->eth.type == htons(ETH_P_IPV6)) {
245 key_expected |= 1ULL << OVS_KEY_ATTR_IPV6;
246 if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
247 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV6;
248 mask_allowed |= 1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
249 }
250
251 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
252 if (match->key->ip.proto == IPPROTO_UDP) {
253 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
254 if (match->mask && (match->mask->key.ip.proto == 0xff))
255 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
256 }
257
258 if (match->key->ip.proto == IPPROTO_SCTP) {
259 key_expected |= 1ULL << OVS_KEY_ATTR_SCTP;
260 if (match->mask && (match->mask->key.ip.proto == 0xff))
261 mask_allowed |= 1ULL << OVS_KEY_ATTR_SCTP;
262 }
263
264 if (match->key->ip.proto == IPPROTO_TCP) {
265 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
266 key_expected |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
267 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
268 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
269 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
270 }
271 }
272
273 if (match->key->ip.proto == IPPROTO_ICMPV6) {
274 key_expected |= 1ULL << OVS_KEY_ATTR_ICMPV6;
275 if (match->mask && (match->mask->key.ip.proto == 0xff))
276 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMPV6;
277
278 if (match->key->tp.src ==
279 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
280 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
281 key_expected |= 1ULL << OVS_KEY_ATTR_ND;
282 /* Original direction conntrack tuple
283 * uses the same space as the ND fields
284 * in the key, so both are not allowed
285 * at the same time.
286 */
287 mask_allowed &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
288 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
289 mask_allowed |= 1ULL << OVS_KEY_ATTR_ND;
290 }
291 }
292 }
293 }
294
295 if (match->key->eth.type == htons(ETH_P_NSH)) {
296 key_expected |= 1 << OVS_KEY_ATTR_NSH;
297 if (match->mask &&
298 match->mask->key.eth.type == htons(0xffff)) {
299 mask_allowed |= 1 << OVS_KEY_ATTR_NSH;
300 }
301 }
302
303 if ((key_attrs & key_expected) != key_expected) {
304 /* Key attributes check failed. */
305 OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
306 (unsigned long long)key_attrs,
307 (unsigned long long)key_expected);
308 return false;
309 }
310
311 if ((mask_attrs & mask_allowed) != mask_attrs) {
312 /* Mask attributes check failed. */
313 OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
314 (unsigned long long)mask_attrs,
315 (unsigned long long)mask_allowed);
316 return false;
317 }
318
319 return true;
320 }
321
322 size_t ovs_tun_key_attr_size(void)
323 {
324 /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
325 * updating this function.
326 */
327 return nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */
328 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
329 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
330 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
331 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
332 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
333 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
334 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
335 + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
336 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS and
337 * OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS is mutually exclusive with
338 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
339 */
340 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
341 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
342 }
343
344 static size_t ovs_nsh_key_attr_size(void)
345 {
346 /* Whenever adding new OVS_NSH_KEY_ FIELDS, we should consider
347 * updating this function.
348 */
349 return nla_total_size(NSH_BASE_HDR_LEN) /* OVS_NSH_KEY_ATTR_BASE */
350 /* OVS_NSH_KEY_ATTR_MD1 and OVS_NSH_KEY_ATTR_MD2 are
351 * mutually exclusive, so the bigger one can cover
352 * the small one.
353 */
354 + nla_total_size(NSH_CTX_HDRS_MAX_LEN);
355 }
356
357 size_t ovs_key_attr_size(void)
358 {
359 /* Whenever adding new OVS_KEY_ FIELDS, we should consider
360 * updating this function.
361 */
362 BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 29);
363
364 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
365 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
366 + ovs_tun_key_attr_size()
367 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
368 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
369 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
370 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
371 + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */
372 + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */
373 + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */
374 + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */
375 + nla_total_size(40) /* OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6 */
376 + nla_total_size(0) /* OVS_KEY_ATTR_NSH */
377 + ovs_nsh_key_attr_size()
378 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
379 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
380 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
381 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
382 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
383 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
384 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
385 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
386 }
387
388 static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
389 [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) },
390 };
391
392 static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
393 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
394 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
395 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) },
396 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
397 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
398 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
399 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
400 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
401 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
402 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
403 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE },
404 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED,
405 .next = ovs_vxlan_ext_key_lens },
406 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
407 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) },
408 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = OVS_ATTR_VARIABLE },
409 };
410
411 static const struct ovs_len_tbl
412 ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
413 [OVS_NSH_KEY_ATTR_BASE] = { .len = sizeof(struct ovs_nsh_key_base) },
414 [OVS_NSH_KEY_ATTR_MD1] = { .len = sizeof(struct ovs_nsh_key_md1) },
415 [OVS_NSH_KEY_ATTR_MD2] = { .len = OVS_ATTR_VARIABLE },
416 };
417
418 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
419 static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
420 [OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED },
421 [OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) },
422 [OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) },
423 [OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) },
424 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
425 [OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) },
426 [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
427 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
428 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
429 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
430 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
431 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
432 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
433 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
434 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
435 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
436 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
437 [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
438 [OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
439 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
440 .next = ovs_tunnel_key_lens, },
441 [OVS_KEY_ATTR_MPLS] = { .len = OVS_ATTR_VARIABLE },
442 [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
443 [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
444 [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
445 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
446 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = {
447 .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
448 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = {
449 .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
450 [OVS_KEY_ATTR_NSH] = { .len = OVS_ATTR_NESTED,
451 .next = ovs_nsh_key_attr_lens, },
452 };
453
454 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
455 {
456 return expected_len == attr_len ||
457 expected_len == OVS_ATTR_NESTED ||
458 expected_len == OVS_ATTR_VARIABLE;
459 }
460
461 static bool is_all_zero(const u8 *fp, size_t size)
462 {
463 int i;
464
465 if (!fp)
466 return false;
467
468 for (i = 0; i < size; i++)
469 if (fp[i])
470 return false;
471
472 return true;
473 }
474
475 static int __parse_flow_nlattrs(const struct nlattr *attr,
476 const struct nlattr *a[],
477 u64 *attrsp, bool log, bool nz)
478 {
479 const struct nlattr *nla;
480 u64 attrs;
481 int rem;
482
483 attrs = *attrsp;
484 nla_for_each_nested(nla, attr, rem) {
485 u16 type = nla_type(nla);
486 int expected_len;
487
488 if (type > OVS_KEY_ATTR_MAX) {
489 OVS_NLERR(log, "Key type %d is out of range max %d",
490 type, OVS_KEY_ATTR_MAX);
491 return -EINVAL;
492 }
493
494 if (attrs & (1ULL << type)) {
495 OVS_NLERR(log, "Duplicate key (type %d).", type);
496 return -EINVAL;
497 }
498
499 expected_len = ovs_key_lens[type].len;
500 if (!check_attr_len(nla_len(nla), expected_len)) {
501 OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
502 type, nla_len(nla), expected_len);
503 return -EINVAL;
504 }
505
506 if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
507 attrs |= 1ULL << type;
508 a[type] = nla;
509 }
510 }
511 if (rem) {
512 OVS_NLERR(log, "Message has %d unknown bytes.", rem);
513 return -EINVAL;
514 }
515
516 *attrsp = attrs;
517 return 0;
518 }
519
520 static int parse_flow_mask_nlattrs(const struct nlattr *attr,
521 const struct nlattr *a[], u64 *attrsp,
522 bool log)
523 {
524 return __parse_flow_nlattrs(attr, a, attrsp, log, true);
525 }
526
527 int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[],
528 u64 *attrsp, bool log)
529 {
530 return __parse_flow_nlattrs(attr, a, attrsp, log, false);
531 }
532
533 static int genev_tun_opt_from_nlattr(const struct nlattr *a,
534 struct sw_flow_match *match, bool is_mask,
535 bool log)
536 {
537 unsigned long opt_key_offset;
538
539 if (nla_len(a) > sizeof(match->key->tun_opts)) {
540 OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
541 nla_len(a), sizeof(match->key->tun_opts));
542 return -EINVAL;
543 }
544
545 if (nla_len(a) % 4 != 0) {
546 OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
547 nla_len(a));
548 return -EINVAL;
549 }
550
551 /* We need to record the length of the options passed
552 * down, otherwise packets with the same format but
553 * additional options will be silently matched.
554 */
555 if (!is_mask) {
556 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
557 false);
558 } else {
559 /* This is somewhat unusual because it looks at
560 * both the key and mask while parsing the
561 * attributes (and by extension assumes the key
562 * is parsed first). Normally, we would verify
563 * that each is the correct length and that the
564 * attributes line up in the validate function.
565 * However, that is difficult because this is
566 * variable length and we won't have the
567 * information later.
568 */
569 if (match->key->tun_opts_len != nla_len(a)) {
570 OVS_NLERR(log, "Geneve option len %d != mask len %d",
571 match->key->tun_opts_len, nla_len(a));
572 return -EINVAL;
573 }
574
575 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
576 }
577
578 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
579 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
580 nla_len(a), is_mask);
581 return 0;
582 }
583
584 static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
585 struct sw_flow_match *match, bool is_mask,
586 bool log)
587 {
588 struct nlattr *a;
589 int rem;
590 unsigned long opt_key_offset;
591 struct vxlan_metadata opts;
592
593 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
594
595 memset(&opts, 0, sizeof(opts));
596 nla_for_each_nested(a, attr, rem) {
597 int type = nla_type(a);
598
599 if (type > OVS_VXLAN_EXT_MAX) {
600 OVS_NLERR(log, "VXLAN extension %d out of range max %d",
601 type, OVS_VXLAN_EXT_MAX);
602 return -EINVAL;
603 }
604
605 if (!check_attr_len(nla_len(a),
606 ovs_vxlan_ext_key_lens[type].len)) {
607 OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
608 type, nla_len(a),
609 ovs_vxlan_ext_key_lens[type].len);
610 return -EINVAL;
611 }
612
613 switch (type) {
614 case OVS_VXLAN_EXT_GBP:
615 opts.gbp = nla_get_u32(a);
616 break;
617 default:
618 OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
619 type);
620 return -EINVAL;
621 }
622 }
623 if (rem) {
624 OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
625 rem);
626 return -EINVAL;
627 }
628
629 if (!is_mask)
630 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
631 else
632 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
633
634 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
635 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
636 is_mask);
637 return 0;
638 }
639
640 static int erspan_tun_opt_from_nlattr(const struct nlattr *a,
641 struct sw_flow_match *match, bool is_mask,
642 bool log)
643 {
644 unsigned long opt_key_offset;
645
646 BUILD_BUG_ON(sizeof(struct erspan_metadata) >
647 sizeof(match->key->tun_opts));
648
649 if (nla_len(a) > sizeof(match->key->tun_opts)) {
650 OVS_NLERR(log, "ERSPAN option length err (len %d, max %zu).",
651 nla_len(a), sizeof(match->key->tun_opts));
652 return -EINVAL;
653 }
654
655 if (!is_mask)
656 SW_FLOW_KEY_PUT(match, tun_opts_len,
657 sizeof(struct erspan_metadata), false);
658 else
659 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
660
661 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
662 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
663 nla_len(a), is_mask);
664 return 0;
665 }
666
667 static int ip_tun_from_nlattr(const struct nlattr *attr,
668 struct sw_flow_match *match, bool is_mask,
669 bool log)
670 {
671 bool ttl = false, ipv4 = false, ipv6 = false;
672 __be16 tun_flags = 0;
673 int opts_type = 0;
674 struct nlattr *a;
675 int rem;
676
677 nla_for_each_nested(a, attr, rem) {
678 int type = nla_type(a);
679 int err;
680
681 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
682 OVS_NLERR(log, "Tunnel attr %d out of range max %d",
683 type, OVS_TUNNEL_KEY_ATTR_MAX);
684 return -EINVAL;
685 }
686
687 if (!check_attr_len(nla_len(a),
688 ovs_tunnel_key_lens[type].len)) {
689 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
690 type, nla_len(a), ovs_tunnel_key_lens[type].len);
691 return -EINVAL;
692 }
693
694 switch (type) {
695 case OVS_TUNNEL_KEY_ATTR_ID:
696 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
697 nla_get_be64(a), is_mask);
698 tun_flags |= TUNNEL_KEY;
699 break;
700 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
701 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
702 nla_get_in_addr(a), is_mask);
703 ipv4 = true;
704 break;
705 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
706 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
707 nla_get_in_addr(a), is_mask);
708 ipv4 = true;
709 break;
710 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
711 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
712 nla_get_in6_addr(a), is_mask);
713 ipv6 = true;
714 break;
715 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
716 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
717 nla_get_in6_addr(a), is_mask);
718 ipv6 = true;
719 break;
720 case OVS_TUNNEL_KEY_ATTR_TOS:
721 SW_FLOW_KEY_PUT(match, tun_key.tos,
722 nla_get_u8(a), is_mask);
723 break;
724 case OVS_TUNNEL_KEY_ATTR_TTL:
725 SW_FLOW_KEY_PUT(match, tun_key.ttl,
726 nla_get_u8(a), is_mask);
727 ttl = true;
728 break;
729 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
730 tun_flags |= TUNNEL_DONT_FRAGMENT;
731 break;
732 case OVS_TUNNEL_KEY_ATTR_CSUM:
733 tun_flags |= TUNNEL_CSUM;
734 break;
735 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
736 SW_FLOW_KEY_PUT(match, tun_key.tp_src,
737 nla_get_be16(a), is_mask);
738 break;
739 case OVS_TUNNEL_KEY_ATTR_TP_DST:
740 SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
741 nla_get_be16(a), is_mask);
742 break;
743 case OVS_TUNNEL_KEY_ATTR_OAM:
744 tun_flags |= TUNNEL_OAM;
745 break;
746 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
747 if (opts_type) {
748 OVS_NLERR(log, "Multiple metadata blocks provided");
749 return -EINVAL;
750 }
751
752 err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
753 if (err)
754 return err;
755
756 tun_flags |= TUNNEL_GENEVE_OPT;
757 opts_type = type;
758 break;
759 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
760 if (opts_type) {
761 OVS_NLERR(log, "Multiple metadata blocks provided");
762 return -EINVAL;
763 }
764
765 err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
766 if (err)
767 return err;
768
769 tun_flags |= TUNNEL_VXLAN_OPT;
770 opts_type = type;
771 break;
772 case OVS_TUNNEL_KEY_ATTR_PAD:
773 break;
774 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
775 if (opts_type) {
776 OVS_NLERR(log, "Multiple metadata blocks provided");
777 return -EINVAL;
778 }
779
780 err = erspan_tun_opt_from_nlattr(a, match, is_mask,
781 log);
782 if (err)
783 return err;
784
785 tun_flags |= TUNNEL_ERSPAN_OPT;
786 opts_type = type;
787 break;
788 default:
789 OVS_NLERR(log, "Unknown IP tunnel attribute %d",
790 type);
791 return -EINVAL;
792 }
793 }
794
795 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
796 if (is_mask)
797 SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
798 else
799 SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
800 false);
801
802 if (rem > 0) {
803 OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
804 rem);
805 return -EINVAL;
806 }
807
808 if (ipv4 && ipv6) {
809 OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
810 return -EINVAL;
811 }
812
813 if (!is_mask) {
814 if (!ipv4 && !ipv6) {
815 OVS_NLERR(log, "IP tunnel dst address not specified");
816 return -EINVAL;
817 }
818 if (ipv4 && !match->key->tun_key.u.ipv4.dst) {
819 OVS_NLERR(log, "IPv4 tunnel dst address is zero");
820 return -EINVAL;
821 }
822 if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
823 OVS_NLERR(log, "IPv6 tunnel dst address is zero");
824 return -EINVAL;
825 }
826
827 if (!ttl) {
828 OVS_NLERR(log, "IP tunnel TTL not specified.");
829 return -EINVAL;
830 }
831 }
832
833 return opts_type;
834 }
835
836 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
837 const void *tun_opts, int swkey_tun_opts_len)
838 {
839 const struct vxlan_metadata *opts = tun_opts;
840 struct nlattr *nla;
841
842 nla = nla_nest_start_noflag(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
843 if (!nla)
844 return -EMSGSIZE;
845
846 if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
847 return -EMSGSIZE;
848
849 nla_nest_end(skb, nla);
850 return 0;
851 }
852
853 static int __ip_tun_to_nlattr(struct sk_buff *skb,
854 const struct ip_tunnel_key *output,
855 const void *tun_opts, int swkey_tun_opts_len,
856 unsigned short tun_proto)
857 {
858 if (output->tun_flags & TUNNEL_KEY &&
859 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
860 OVS_TUNNEL_KEY_ATTR_PAD))
861 return -EMSGSIZE;
862 switch (tun_proto) {
863 case AF_INET:
864 if (output->u.ipv4.src &&
865 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
866 output->u.ipv4.src))
867 return -EMSGSIZE;
868 if (output->u.ipv4.dst &&
869 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
870 output->u.ipv4.dst))
871 return -EMSGSIZE;
872 break;
873 case AF_INET6:
874 if (!ipv6_addr_any(&output->u.ipv6.src) &&
875 nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
876 &output->u.ipv6.src))
877 return -EMSGSIZE;
878 if (!ipv6_addr_any(&output->u.ipv6.dst) &&
879 nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
880 &output->u.ipv6.dst))
881 return -EMSGSIZE;
882 break;
883 }
884 if (output->tos &&
885 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
886 return -EMSGSIZE;
887 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
888 return -EMSGSIZE;
889 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
890 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
891 return -EMSGSIZE;
892 if ((output->tun_flags & TUNNEL_CSUM) &&
893 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
894 return -EMSGSIZE;
895 if (output->tp_src &&
896 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
897 return -EMSGSIZE;
898 if (output->tp_dst &&
899 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
900 return -EMSGSIZE;
901 if ((output->tun_flags & TUNNEL_OAM) &&
902 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
903 return -EMSGSIZE;
904 if (swkey_tun_opts_len) {
905 if (output->tun_flags & TUNNEL_GENEVE_OPT &&
906 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
907 swkey_tun_opts_len, tun_opts))
908 return -EMSGSIZE;
909 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
910 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
911 return -EMSGSIZE;
912 else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
913 nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
914 swkey_tun_opts_len, tun_opts))
915 return -EMSGSIZE;
916 }
917
918 return 0;
919 }
920
921 static int ip_tun_to_nlattr(struct sk_buff *skb,
922 const struct ip_tunnel_key *output,
923 const void *tun_opts, int swkey_tun_opts_len,
924 unsigned short tun_proto)
925 {
926 struct nlattr *nla;
927 int err;
928
929 nla = nla_nest_start_noflag(skb, OVS_KEY_ATTR_TUNNEL);
930 if (!nla)
931 return -EMSGSIZE;
932
933 err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
934 tun_proto);
935 if (err)
936 return err;
937
938 nla_nest_end(skb, nla);
939 return 0;
940 }
941
942 int ovs_nla_put_tunnel_info(struct sk_buff *skb,
943 struct ip_tunnel_info *tun_info)
944 {
945 return __ip_tun_to_nlattr(skb, &tun_info->key,
946 ip_tunnel_info_opts(tun_info),
947 tun_info->options_len,
948 ip_tunnel_info_af(tun_info));
949 }
950
951 static int encode_vlan_from_nlattrs(struct sw_flow_match *match,
952 const struct nlattr *a[],
953 bool is_mask, bool inner)
954 {
955 __be16 tci = 0;
956 __be16 tpid = 0;
957
958 if (a[OVS_KEY_ATTR_VLAN])
959 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
960
961 if (a[OVS_KEY_ATTR_ETHERTYPE])
962 tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
963
964 if (likely(!inner)) {
965 SW_FLOW_KEY_PUT(match, eth.vlan.tpid, tpid, is_mask);
966 SW_FLOW_KEY_PUT(match, eth.vlan.tci, tci, is_mask);
967 } else {
968 SW_FLOW_KEY_PUT(match, eth.cvlan.tpid, tpid, is_mask);
969 SW_FLOW_KEY_PUT(match, eth.cvlan.tci, tci, is_mask);
970 }
971 return 0;
972 }
973
974 static int validate_vlan_from_nlattrs(const struct sw_flow_match *match,
975 u64 key_attrs, bool inner,
976 const struct nlattr **a, bool log)
977 {
978 __be16 tci = 0;
979
980 if (!((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
981 (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
982 eth_type_vlan(nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE])))) {
983 /* Not a VLAN. */
984 return 0;
985 }
986
987 if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
988 (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
989 OVS_NLERR(log, "Invalid %s frame", (inner) ? "C-VLAN" : "VLAN");
990 return -EINVAL;
991 }
992
993 if (a[OVS_KEY_ATTR_VLAN])
994 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
995
996 if (!(tci & htons(VLAN_CFI_MASK))) {
997 if (tci) {
998 OVS_NLERR(log, "%s TCI does not have VLAN_CFI_MASK bit set.",
999 (inner) ? "C-VLAN" : "VLAN");
1000 return -EINVAL;
1001 } else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) {
1002 /* Corner case for truncated VLAN header. */
1003 OVS_NLERR(log, "Truncated %s header has non-zero encap attribute.",
1004 (inner) ? "C-VLAN" : "VLAN");
1005 return -EINVAL;
1006 }
1007 }
1008
1009 return 1;
1010 }
1011
1012 static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
1013 u64 key_attrs, bool inner,
1014 const struct nlattr **a, bool log)
1015 {
1016 __be16 tci = 0;
1017 __be16 tpid = 0;
1018 bool encap_valid = !!(match->key->eth.vlan.tci &
1019 htons(VLAN_CFI_MASK));
1020 bool i_encap_valid = !!(match->key->eth.cvlan.tci &
1021 htons(VLAN_CFI_MASK));
1022
1023 if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) {
1024 /* Not a VLAN. */
1025 return 0;
1026 }
1027
1028 if ((!inner && !encap_valid) || (inner && !i_encap_valid)) {
1029 OVS_NLERR(log, "Encap mask attribute is set for non-%s frame.",
1030 (inner) ? "C-VLAN" : "VLAN");
1031 return -EINVAL;
1032 }
1033
1034 if (a[OVS_KEY_ATTR_VLAN])
1035 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1036
1037 if (a[OVS_KEY_ATTR_ETHERTYPE])
1038 tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1039
1040 if (tpid != htons(0xffff)) {
1041 OVS_NLERR(log, "Must have an exact match on %s TPID (mask=%x).",
1042 (inner) ? "C-VLAN" : "VLAN", ntohs(tpid));
1043 return -EINVAL;
1044 }
1045 if (!(tci & htons(VLAN_CFI_MASK))) {
1046 OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_CFI_MASK bit.",
1047 (inner) ? "C-VLAN" : "VLAN");
1048 return -EINVAL;
1049 }
1050
1051 return 1;
1052 }
1053
1054 static int __parse_vlan_from_nlattrs(struct sw_flow_match *match,
1055 u64 *key_attrs, bool inner,
1056 const struct nlattr **a, bool is_mask,
1057 bool log)
1058 {
1059 int err;
1060 const struct nlattr *encap;
1061
1062 if (!is_mask)
1063 err = validate_vlan_from_nlattrs(match, *key_attrs, inner,
1064 a, log);
1065 else
1066 err = validate_vlan_mask_from_nlattrs(match, *key_attrs, inner,
1067 a, log);
1068 if (err <= 0)
1069 return err;
1070
1071 err = encode_vlan_from_nlattrs(match, a, is_mask, inner);
1072 if (err)
1073 return err;
1074
1075 *key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
1076 *key_attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
1077 *key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1078
1079 encap = a[OVS_KEY_ATTR_ENCAP];
1080
1081 if (!is_mask)
1082 err = parse_flow_nlattrs(encap, a, key_attrs, log);
1083 else
1084 err = parse_flow_mask_nlattrs(encap, a, key_attrs, log);
1085
1086 return err;
1087 }
1088
1089 static int parse_vlan_from_nlattrs(struct sw_flow_match *match,
1090 u64 *key_attrs, const struct nlattr **a,
1091 bool is_mask, bool log)
1092 {
1093 int err;
1094 bool encap_valid = false;
1095
1096 err = __parse_vlan_from_nlattrs(match, key_attrs, false, a,
1097 is_mask, log);
1098 if (err)
1099 return err;
1100
1101 encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_CFI_MASK));
1102 if (encap_valid) {
1103 err = __parse_vlan_from_nlattrs(match, key_attrs, true, a,
1104 is_mask, log);
1105 if (err)
1106 return err;
1107 }
1108
1109 return 0;
1110 }
1111
1112 static int parse_eth_type_from_nlattrs(struct sw_flow_match *match,
1113 u64 *attrs, const struct nlattr **a,
1114 bool is_mask, bool log)
1115 {
1116 __be16 eth_type;
1117
1118 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1119 if (is_mask) {
1120 /* Always exact match EtherType. */
1121 eth_type = htons(0xffff);
1122 } else if (!eth_proto_is_802_3(eth_type)) {
1123 OVS_NLERR(log, "EtherType %x is less than min %x",
1124 ntohs(eth_type), ETH_P_802_3_MIN);
1125 return -EINVAL;
1126 }
1127
1128 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
1129 *attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1130 return 0;
1131 }
1132
1133 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
1134 u64 *attrs, const struct nlattr **a,
1135 bool is_mask, bool log)
1136 {
1137 u8 mac_proto = MAC_PROTO_ETHERNET;
1138
1139 if (*attrs & (1ULL << OVS_KEY_ATTR_DP_HASH)) {
1140 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
1141
1142 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
1143 *attrs &= ~(1ULL << OVS_KEY_ATTR_DP_HASH);
1144 }
1145
1146 if (*attrs & (1ULL << OVS_KEY_ATTR_RECIRC_ID)) {
1147 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
1148
1149 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
1150 *attrs &= ~(1ULL << OVS_KEY_ATTR_RECIRC_ID);
1151 }
1152
1153 if (*attrs & (1ULL << OVS_KEY_ATTR_PRIORITY)) {
1154 SW_FLOW_KEY_PUT(match, phy.priority,
1155 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
1156 *attrs &= ~(1ULL << OVS_KEY_ATTR_PRIORITY);
1157 }
1158
1159 if (*attrs & (1ULL << OVS_KEY_ATTR_IN_PORT)) {
1160 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
1161
1162 if (is_mask) {
1163 in_port = 0xffffffff; /* Always exact match in_port. */
1164 } else if (in_port >= DP_MAX_PORTS) {
1165 OVS_NLERR(log, "Port %d exceeds max allowable %d",
1166 in_port, DP_MAX_PORTS);
1167 return -EINVAL;
1168 }
1169
1170 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
1171 *attrs &= ~(1ULL << OVS_KEY_ATTR_IN_PORT);
1172 } else if (!is_mask) {
1173 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
1174 }
1175
1176 if (*attrs & (1ULL << OVS_KEY_ATTR_SKB_MARK)) {
1177 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
1178
1179 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
1180 *attrs &= ~(1ULL << OVS_KEY_ATTR_SKB_MARK);
1181 }
1182 if (*attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) {
1183 if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
1184 is_mask, log) < 0)
1185 return -EINVAL;
1186 *attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL);
1187 }
1188
1189 if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
1190 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
1191 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
1192
1193 if (ct_state & ~CT_SUPPORTED_MASK) {
1194 OVS_NLERR(log, "ct_state flags %08x unsupported",
1195 ct_state);
1196 return -EINVAL;
1197 }
1198
1199 SW_FLOW_KEY_PUT(match, ct_state, ct_state, is_mask);
1200 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
1201 }
1202 if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
1203 ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
1204 u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
1205
1206 SW_FLOW_KEY_PUT(match, ct_zone, ct_zone, is_mask);
1207 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
1208 }
1209 if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
1210 ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
1211 u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
1212
1213 SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
1214 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
1215 }
1216 if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
1217 ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
1218 const struct ovs_key_ct_labels *cl;
1219
1220 cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
1221 SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
1222 sizeof(*cl), is_mask);
1223 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
1224 }
1225 if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
1226 const struct ovs_key_ct_tuple_ipv4 *ct;
1227
1228 ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
1229
1230 SW_FLOW_KEY_PUT(match, ipv4.ct_orig.src, ct->ipv4_src, is_mask);
1231 SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask);
1232 SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
1233 SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
1234 SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv4_proto, is_mask);
1235 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
1236 }
1237 if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
1238 const struct ovs_key_ct_tuple_ipv6 *ct;
1239
1240 ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
1241
1242 SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.src, &ct->ipv6_src,
1243 sizeof(match->key->ipv6.ct_orig.src),
1244 is_mask);
1245 SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.dst, &ct->ipv6_dst,
1246 sizeof(match->key->ipv6.ct_orig.dst),
1247 is_mask);
1248 SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
1249 SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
1250 SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv6_proto, is_mask);
1251 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
1252 }
1253
1254 /* For layer 3 packets the Ethernet type is provided
1255 * and treated as metadata but no MAC addresses are provided.
1256 */
1257 if (!(*attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) &&
1258 (*attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)))
1259 mac_proto = MAC_PROTO_NONE;
1260
1261 /* Always exact match mac_proto */
1262 SW_FLOW_KEY_PUT(match, mac_proto, is_mask ? 0xff : mac_proto, is_mask);
1263
1264 if (mac_proto == MAC_PROTO_NONE)
1265 return parse_eth_type_from_nlattrs(match, attrs, a, is_mask,
1266 log);
1267
1268 return 0;
1269 }
1270
1271 int nsh_hdr_from_nlattr(const struct nlattr *attr,
1272 struct nshhdr *nh, size_t size)
1273 {
1274 struct nlattr *a;
1275 int rem;
1276 u8 flags = 0;
1277 u8 ttl = 0;
1278 int mdlen = 0;
1279
1280 /* validate_nsh has check this, so we needn't do duplicate check here
1281 */
1282 if (size < NSH_BASE_HDR_LEN)
1283 return -ENOBUFS;
1284
1285 nla_for_each_nested(a, attr, rem) {
1286 int type = nla_type(a);
1287
1288 switch (type) {
1289 case OVS_NSH_KEY_ATTR_BASE: {
1290 const struct ovs_nsh_key_base *base = nla_data(a);
1291
1292 flags = base->flags;
1293 ttl = base->ttl;
1294 nh->np = base->np;
1295 nh->mdtype = base->mdtype;
1296 nh->path_hdr = base->path_hdr;
1297 break;
1298 }
1299 case OVS_NSH_KEY_ATTR_MD1:
1300 mdlen = nla_len(a);
1301 if (mdlen > size - NSH_BASE_HDR_LEN)
1302 return -ENOBUFS;
1303 memcpy(&nh->md1, nla_data(a), mdlen);
1304 break;
1305
1306 case OVS_NSH_KEY_ATTR_MD2:
1307 mdlen = nla_len(a);
1308 if (mdlen > size - NSH_BASE_HDR_LEN)
1309 return -ENOBUFS;
1310 memcpy(&nh->md2, nla_data(a), mdlen);
1311 break;
1312
1313 default:
1314 return -EINVAL;
1315 }
1316 }
1317
1318 /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
1319 nh->ver_flags_ttl_len = 0;
1320 nsh_set_flags_ttl_len(nh, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
1321
1322 return 0;
1323 }
1324
1325 int nsh_key_from_nlattr(const struct nlattr *attr,
1326 struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask)
1327 {
1328 struct nlattr *a;
1329 int rem;
1330
1331 /* validate_nsh has check this, so we needn't do duplicate check here
1332 */
1333 nla_for_each_nested(a, attr, rem) {
1334 int type = nla_type(a);
1335
1336 switch (type) {
1337 case OVS_NSH_KEY_ATTR_BASE: {
1338 const struct ovs_nsh_key_base *base = nla_data(a);
1339 const struct ovs_nsh_key_base *base_mask = base + 1;
1340
1341 nsh->base = *base;
1342 nsh_mask->base = *base_mask;
1343 break;
1344 }
1345 case OVS_NSH_KEY_ATTR_MD1: {
1346 const struct ovs_nsh_key_md1 *md1 = nla_data(a);
1347 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
1348
1349 memcpy(nsh->context, md1->context, sizeof(*md1));
1350 memcpy(nsh_mask->context, md1_mask->context,
1351 sizeof(*md1_mask));
1352 break;
1353 }
1354 case OVS_NSH_KEY_ATTR_MD2:
1355 /* Not supported yet */
1356 return -ENOTSUPP;
1357 default:
1358 return -EINVAL;
1359 }
1360 }
1361
1362 return 0;
1363 }
1364
1365 static int nsh_key_put_from_nlattr(const struct nlattr *attr,
1366 struct sw_flow_match *match, bool is_mask,
1367 bool is_push_nsh, bool log)
1368 {
1369 struct nlattr *a;
1370 int rem;
1371 bool has_base = false;
1372 bool has_md1 = false;
1373 bool has_md2 = false;
1374 u8 mdtype = 0;
1375 int mdlen = 0;
1376
1377 if (WARN_ON(is_push_nsh && is_mask))
1378 return -EINVAL;
1379
1380 nla_for_each_nested(a, attr, rem) {
1381 int type = nla_type(a);
1382 int i;
1383
1384 if (type > OVS_NSH_KEY_ATTR_MAX) {
1385 OVS_NLERR(log, "nsh attr %d is out of range max %d",
1386 type, OVS_NSH_KEY_ATTR_MAX);
1387 return -EINVAL;
1388 }
1389
1390 if (!check_attr_len(nla_len(a),
1391 ovs_nsh_key_attr_lens[type].len)) {
1392 OVS_NLERR(
1393 log,
1394 "nsh attr %d has unexpected len %d expected %d",
1395 type,
1396 nla_len(a),
1397 ovs_nsh_key_attr_lens[type].len
1398 );
1399 return -EINVAL;
1400 }
1401
1402 switch (type) {
1403 case OVS_NSH_KEY_ATTR_BASE: {
1404 const struct ovs_nsh_key_base *base = nla_data(a);
1405
1406 has_base = true;
1407 mdtype = base->mdtype;
1408 SW_FLOW_KEY_PUT(match, nsh.base.flags,
1409 base->flags, is_mask);
1410 SW_FLOW_KEY_PUT(match, nsh.base.ttl,
1411 base->ttl, is_mask);
1412 SW_FLOW_KEY_PUT(match, nsh.base.mdtype,
1413 base->mdtype, is_mask);
1414 SW_FLOW_KEY_PUT(match, nsh.base.np,
1415 base->np, is_mask);
1416 SW_FLOW_KEY_PUT(match, nsh.base.path_hdr,
1417 base->path_hdr, is_mask);
1418 break;
1419 }
1420 case OVS_NSH_KEY_ATTR_MD1: {
1421 const struct ovs_nsh_key_md1 *md1 = nla_data(a);
1422
1423 has_md1 = true;
1424 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++)
1425 SW_FLOW_KEY_PUT(match, nsh.context[i],
1426 md1->context[i], is_mask);
1427 break;
1428 }
1429 case OVS_NSH_KEY_ATTR_MD2:
1430 if (!is_push_nsh) /* Not supported MD type 2 yet */
1431 return -ENOTSUPP;
1432
1433 has_md2 = true;
1434 mdlen = nla_len(a);
1435 if (mdlen > NSH_CTX_HDRS_MAX_LEN || mdlen <= 0) {
1436 OVS_NLERR(
1437 log,
1438 "Invalid MD length %d for MD type %d",
1439 mdlen,
1440 mdtype
1441 );
1442 return -EINVAL;
1443 }
1444 break;
1445 default:
1446 OVS_NLERR(log, "Unknown nsh attribute %d",
1447 type);
1448 return -EINVAL;
1449 }
1450 }
1451
1452 if (rem > 0) {
1453 OVS_NLERR(log, "nsh attribute has %d unknown bytes.", rem);
1454 return -EINVAL;
1455 }
1456
1457 if (has_md1 && has_md2) {
1458 OVS_NLERR(
1459 1,
1460 "invalid nsh attribute: md1 and md2 are exclusive."
1461 );
1462 return -EINVAL;
1463 }
1464
1465 if (!is_mask) {
1466 if ((has_md1 && mdtype != NSH_M_TYPE1) ||
1467 (has_md2 && mdtype != NSH_M_TYPE2)) {
1468 OVS_NLERR(1, "nsh attribute has unmatched MD type %d.",
1469 mdtype);
1470 return -EINVAL;
1471 }
1472
1473 if (is_push_nsh &&
1474 (!has_base || (!has_md1 && !has_md2))) {
1475 OVS_NLERR(
1476 1,
1477 "push_nsh: missing base or metadata attributes"
1478 );
1479 return -EINVAL;
1480 }
1481 }
1482
1483 return 0;
1484 }
1485
1486 static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
1487 u64 attrs, const struct nlattr **a,
1488 bool is_mask, bool log)
1489 {
1490 int err;
1491
1492 err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
1493 if (err)
1494 return err;
1495
1496 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) {
1497 const struct ovs_key_ethernet *eth_key;
1498
1499 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1500 SW_FLOW_KEY_MEMCPY(match, eth.src,
1501 eth_key->eth_src, ETH_ALEN, is_mask);
1502 SW_FLOW_KEY_MEMCPY(match, eth.dst,
1503 eth_key->eth_dst, ETH_ALEN, is_mask);
1504 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERNET);
1505
1506 if (attrs & (1ULL << OVS_KEY_ATTR_VLAN)) {
1507 /* VLAN attribute is always parsed before getting here since it
1508 * may occur multiple times.
1509 */
1510 OVS_NLERR(log, "VLAN attribute unexpected.");
1511 return -EINVAL;
1512 }
1513
1514 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) {
1515 err = parse_eth_type_from_nlattrs(match, &attrs, a, is_mask,
1516 log);
1517 if (err)
1518 return err;
1519 } else if (!is_mask) {
1520 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
1521 }
1522 } else if (!match->key->eth.type) {
1523 OVS_NLERR(log, "Either Ethernet header or EtherType is required.");
1524 return -EINVAL;
1525 }
1526
1527 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
1528 const struct ovs_key_ipv4 *ipv4_key;
1529
1530 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1531 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
1532 OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
1533 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
1534 return -EINVAL;
1535 }
1536 SW_FLOW_KEY_PUT(match, ip.proto,
1537 ipv4_key->ipv4_proto, is_mask);
1538 SW_FLOW_KEY_PUT(match, ip.tos,
1539 ipv4_key->ipv4_tos, is_mask);
1540 SW_FLOW_KEY_PUT(match, ip.ttl,
1541 ipv4_key->ipv4_ttl, is_mask);
1542 SW_FLOW_KEY_PUT(match, ip.frag,
1543 ipv4_key->ipv4_frag, is_mask);
1544 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1545 ipv4_key->ipv4_src, is_mask);
1546 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1547 ipv4_key->ipv4_dst, is_mask);
1548 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
1549 }
1550
1551 if (attrs & (1ULL << OVS_KEY_ATTR_IPV6)) {
1552 const struct ovs_key_ipv6 *ipv6_key;
1553
1554 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1555 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
1556 OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
1557 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
1558 return -EINVAL;
1559 }
1560
1561 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
1562 OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x)",
1563 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
1564 return -EINVAL;
1565 }
1566
1567 SW_FLOW_KEY_PUT(match, ipv6.label,
1568 ipv6_key->ipv6_label, is_mask);
1569 SW_FLOW_KEY_PUT(match, ip.proto,
1570 ipv6_key->ipv6_proto, is_mask);
1571 SW_FLOW_KEY_PUT(match, ip.tos,
1572 ipv6_key->ipv6_tclass, is_mask);
1573 SW_FLOW_KEY_PUT(match, ip.ttl,
1574 ipv6_key->ipv6_hlimit, is_mask);
1575 SW_FLOW_KEY_PUT(match, ip.frag,
1576 ipv6_key->ipv6_frag, is_mask);
1577 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
1578 ipv6_key->ipv6_src,
1579 sizeof(match->key->ipv6.addr.src),
1580 is_mask);
1581 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
1582 ipv6_key->ipv6_dst,
1583 sizeof(match->key->ipv6.addr.dst),
1584 is_mask);
1585
1586 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV6);
1587 }
1588
1589 if (attrs & (1ULL << OVS_KEY_ATTR_ARP)) {
1590 const struct ovs_key_arp *arp_key;
1591
1592 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1593 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
1594 OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
1595 arp_key->arp_op);
1596 return -EINVAL;
1597 }
1598
1599 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1600 arp_key->arp_sip, is_mask);
1601 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1602 arp_key->arp_tip, is_mask);
1603 SW_FLOW_KEY_PUT(match, ip.proto,
1604 ntohs(arp_key->arp_op), is_mask);
1605 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
1606 arp_key->arp_sha, ETH_ALEN, is_mask);
1607 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
1608 arp_key->arp_tha, ETH_ALEN, is_mask);
1609
1610 attrs &= ~(1ULL << OVS_KEY_ATTR_ARP);
1611 }
1612
1613 if (attrs & (1 << OVS_KEY_ATTR_NSH)) {
1614 if (nsh_key_put_from_nlattr(a[OVS_KEY_ATTR_NSH], match,
1615 is_mask, false, log) < 0)
1616 return -EINVAL;
1617 attrs &= ~(1 << OVS_KEY_ATTR_NSH);
1618 }
1619
1620 if (attrs & (1ULL << OVS_KEY_ATTR_MPLS)) {
1621 const struct ovs_key_mpls *mpls_key;
1622 u32 hdr_len;
1623 u32 label_count, label_count_mask, i;
1624
1625
1626 mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
1627 hdr_len = nla_len(a[OVS_KEY_ATTR_MPLS]);
1628 label_count = hdr_len / sizeof(struct ovs_key_mpls);
1629
1630 if (label_count == 0 || label_count > MPLS_LABEL_DEPTH ||
1631 hdr_len % sizeof(struct ovs_key_mpls))
1632 return -EINVAL;
1633
1634 label_count_mask = GENMASK(label_count - 1, 0);
1635
1636 for (i = 0 ; i < label_count; i++)
1637 SW_FLOW_KEY_PUT(match, mpls.lse[i],
1638 mpls_key[i].mpls_lse, is_mask);
1639
1640 SW_FLOW_KEY_PUT(match, mpls.num_labels_mask,
1641 label_count_mask, is_mask);
1642
1643
1644 attrs &= ~(1ULL << OVS_KEY_ATTR_MPLS);
1645 }
1646
1647 if (attrs & (1ULL << OVS_KEY_ATTR_TCP)) {
1648 const struct ovs_key_tcp *tcp_key;
1649
1650 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1651 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
1652 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
1653 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP);
1654 }
1655
1656 if (attrs & (1ULL << OVS_KEY_ATTR_TCP_FLAGS)) {
1657 SW_FLOW_KEY_PUT(match, tp.flags,
1658 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
1659 is_mask);
1660 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP_FLAGS);
1661 }
1662
1663 if (attrs & (1ULL << OVS_KEY_ATTR_UDP)) {
1664 const struct ovs_key_udp *udp_key;
1665
1666 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1667 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
1668 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
1669 attrs &= ~(1ULL << OVS_KEY_ATTR_UDP);
1670 }
1671
1672 if (attrs & (1ULL << OVS_KEY_ATTR_SCTP)) {
1673 const struct ovs_key_sctp *sctp_key;
1674
1675 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
1676 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
1677 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
1678 attrs &= ~(1ULL << OVS_KEY_ATTR_SCTP);
1679 }
1680
1681 if (attrs & (1ULL << OVS_KEY_ATTR_ICMP)) {
1682 const struct ovs_key_icmp *icmp_key;
1683
1684 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1685 SW_FLOW_KEY_PUT(match, tp.src,
1686 htons(icmp_key->icmp_type), is_mask);
1687 SW_FLOW_KEY_PUT(match, tp.dst,
1688 htons(icmp_key->icmp_code), is_mask);
1689 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMP);
1690 }
1691
1692 if (attrs & (1ULL << OVS_KEY_ATTR_ICMPV6)) {
1693 const struct ovs_key_icmpv6 *icmpv6_key;
1694
1695 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1696 SW_FLOW_KEY_PUT(match, tp.src,
1697 htons(icmpv6_key->icmpv6_type), is_mask);
1698 SW_FLOW_KEY_PUT(match, tp.dst,
1699 htons(icmpv6_key->icmpv6_code), is_mask);
1700 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMPV6);
1701 }
1702
1703 if (attrs & (1ULL << OVS_KEY_ATTR_ND)) {
1704 const struct ovs_key_nd *nd_key;
1705
1706 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
1707 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
1708 nd_key->nd_target,
1709 sizeof(match->key->ipv6.nd.target),
1710 is_mask);
1711 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
1712 nd_key->nd_sll, ETH_ALEN, is_mask);
1713 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
1714 nd_key->nd_tll, ETH_ALEN, is_mask);
1715 attrs &= ~(1ULL << OVS_KEY_ATTR_ND);
1716 }
1717
1718 if (attrs != 0) {
1719 OVS_NLERR(log, "Unknown key attributes %llx",
1720 (unsigned long long)attrs);
1721 return -EINVAL;
1722 }
1723
1724 return 0;
1725 }
1726
1727 static void nlattr_set(struct nlattr *attr, u8 val,
1728 const struct ovs_len_tbl *tbl)
1729 {
1730 struct nlattr *nla;
1731 int rem;
1732
1733 /* The nlattr stream should already have been validated */
1734 nla_for_each_nested(nla, attr, rem) {
1735 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
1736 nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
1737 else
1738 memset(nla_data(nla), val, nla_len(nla));
1739
1740 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1741 *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
1742 }
1743 }
1744
1745 static void mask_set_nlattr(struct nlattr *attr, u8 val)
1746 {
1747 nlattr_set(attr, val, ovs_key_lens);
1748 }
1749
1750 /**
1751 * ovs_nla_get_match - parses Netlink attributes into a flow key and
1752 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1753 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1754 * does not include any don't care bit.
1755 * @net: Used to determine per-namespace field support.
1756 * @match: receives the extracted flow match information.
1757 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1758 * sequence. The fields should of the packet that triggered the creation
1759 * of this flow.
1760 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1761 * attribute specifies the mask field of the wildcarded flow.
1762 * @log: Boolean to allow kernel error logging. Normally true, but when
1763 * probing for feature compatibility this should be passed in as false to
1764 * suppress unnecessary error logging.
1765 */
1766 int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
1767 const struct nlattr *nla_key,
1768 const struct nlattr *nla_mask,
1769 bool log)
1770 {
1771 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1772 struct nlattr *newmask = NULL;
1773 u64 key_attrs = 0;
1774 u64 mask_attrs = 0;
1775 int err;
1776
1777 err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
1778 if (err)
1779 return err;
1780
1781 err = parse_vlan_from_nlattrs(match, &key_attrs, a, false, log);
1782 if (err)
1783 return err;
1784
1785 err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
1786 if (err)
1787 return err;
1788
1789 if (match->mask) {
1790 if (!nla_mask) {
1791 /* Create an exact match mask. We need to set to 0xff
1792 * all the 'match->mask' fields that have been touched
1793 * in 'match->key'. We cannot simply memset
1794 * 'match->mask', because padding bytes and fields not
1795 * specified in 'match->key' should be left to 0.
1796 * Instead, we use a stream of netlink attributes,
1797 * copied from 'key' and set to 0xff.
1798 * ovs_key_from_nlattrs() will take care of filling
1799 * 'match->mask' appropriately.
1800 */
1801 newmask = kmemdup(nla_key,
1802 nla_total_size(nla_len(nla_key)),
1803 GFP_KERNEL);
1804 if (!newmask)
1805 return -ENOMEM;
1806
1807 mask_set_nlattr(newmask, 0xff);
1808
1809 /* The userspace does not send tunnel attributes that
1810 * are 0, but we should not wildcard them nonetheless.
1811 */
1812 if (match->key->tun_proto)
1813 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
1814 0xff, true);
1815
1816 nla_mask = newmask;
1817 }
1818
1819 err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
1820 if (err)
1821 goto free_newmask;
1822
1823 SW_FLOW_KEY_PUT(match, eth.vlan.tci, htons(0xffff), true);
1824 SW_FLOW_KEY_PUT(match, eth.cvlan.tci, htons(0xffff), true);
1825
1826 err = parse_vlan_from_nlattrs(match, &mask_attrs, a, true, log);
1827 if (err)
1828 goto free_newmask;
1829
1830 err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
1831 log);
1832 if (err)
1833 goto free_newmask;
1834 }
1835
1836 if (!match_validate(match, key_attrs, mask_attrs, log))
1837 err = -EINVAL;
1838
1839 free_newmask:
1840 kfree(newmask);
1841 return err;
1842 }
1843
1844 static size_t get_ufid_len(const struct nlattr *attr, bool log)
1845 {
1846 size_t len;
1847
1848 if (!attr)
1849 return 0;
1850
1851 len = nla_len(attr);
1852 if (len < 1 || len > MAX_UFID_LENGTH) {
1853 OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
1854 nla_len(attr), MAX_UFID_LENGTH);
1855 return 0;
1856 }
1857
1858 return len;
1859 }
1860
1861 /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
1862 * or false otherwise.
1863 */
1864 bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
1865 bool log)
1866 {
1867 sfid->ufid_len = get_ufid_len(attr, log);
1868 if (sfid->ufid_len)
1869 memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
1870
1871 return sfid->ufid_len;
1872 }
1873
1874 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
1875 const struct sw_flow_key *key, bool log)
1876 {
1877 struct sw_flow_key *new_key;
1878
1879 if (ovs_nla_get_ufid(sfid, ufid, log))
1880 return 0;
1881
1882 /* If UFID was not provided, use unmasked key. */
1883 new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
1884 if (!new_key)
1885 return -ENOMEM;
1886 memcpy(new_key, key, sizeof(*key));
1887 sfid->unmasked_key = new_key;
1888
1889 return 0;
1890 }
1891
1892 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
1893 {
1894 return attr ? nla_get_u32(attr) : 0;
1895 }
1896
1897 /**
1898 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
1899 * @net: Network namespace.
1900 * @key: Receives extracted in_port, priority, tun_key, skb_mark and conntrack
1901 * metadata.
1902 * @a: Array of netlink attributes holding parsed %OVS_KEY_ATTR_* Netlink
1903 * attributes.
1904 * @attrs: Bit mask for the netlink attributes included in @a.
1905 * @log: Boolean to allow kernel error logging. Normally true, but when
1906 * probing for feature compatibility this should be passed in as false to
1907 * suppress unnecessary error logging.
1908 *
1909 * This parses a series of Netlink attributes that form a flow key, which must
1910 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1911 * get the metadata, that is, the parts of the flow key that cannot be
1912 * extracted from the packet itself.
1913 *
1914 * This must be called before the packet key fields are filled in 'key'.
1915 */
1916
1917 int ovs_nla_get_flow_metadata(struct net *net,
1918 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1],
1919 u64 attrs, struct sw_flow_key *key, bool log)
1920 {
1921 struct sw_flow_match match;
1922
1923 memset(&match, 0, sizeof(match));
1924 match.key = key;
1925
1926 key->ct_state = 0;
1927 key->ct_zone = 0;
1928 key->ct_orig_proto = 0;
1929 memset(&key->ct, 0, sizeof(key->ct));
1930 memset(&key->ipv4.ct_orig, 0, sizeof(key->ipv4.ct_orig));
1931 memset(&key->ipv6.ct_orig, 0, sizeof(key->ipv6.ct_orig));
1932
1933 key->phy.in_port = DP_MAX_PORTS;
1934
1935 return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
1936 }
1937
1938 static int ovs_nla_put_vlan(struct sk_buff *skb, const struct vlan_head *vh,
1939 bool is_mask)
1940 {
1941 __be16 eth_type = !is_mask ? vh->tpid : htons(0xffff);
1942
1943 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1944 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, vh->tci))
1945 return -EMSGSIZE;
1946 return 0;
1947 }
1948
1949 static int nsh_key_to_nlattr(const struct ovs_key_nsh *nsh, bool is_mask,
1950 struct sk_buff *skb)
1951 {
1952 struct nlattr *start;
1953
1954 start = nla_nest_start_noflag(skb, OVS_KEY_ATTR_NSH);
1955 if (!start)
1956 return -EMSGSIZE;
1957
1958 if (nla_put(skb, OVS_NSH_KEY_ATTR_BASE, sizeof(nsh->base), &nsh->base))
1959 goto nla_put_failure;
1960
1961 if (is_mask || nsh->base.mdtype == NSH_M_TYPE1) {
1962 if (nla_put(skb, OVS_NSH_KEY_ATTR_MD1,
1963 sizeof(nsh->context), nsh->context))
1964 goto nla_put_failure;
1965 }
1966
1967 /* Don't support MD type 2 yet */
1968
1969 nla_nest_end(skb, start);
1970
1971 return 0;
1972
1973 nla_put_failure:
1974 return -EMSGSIZE;
1975 }
1976
1977 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
1978 const struct sw_flow_key *output, bool is_mask,
1979 struct sk_buff *skb)
1980 {
1981 struct ovs_key_ethernet *eth_key;
1982 struct nlattr *nla;
1983 struct nlattr *encap = NULL;
1984 struct nlattr *in_encap = NULL;
1985
1986 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
1987 goto nla_put_failure;
1988
1989 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
1990 goto nla_put_failure;
1991
1992 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1993 goto nla_put_failure;
1994
1995 if ((swkey->tun_proto || is_mask)) {
1996 const void *opts = NULL;
1997
1998 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
1999 opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
2000
2001 if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
2002 swkey->tun_opts_len, swkey->tun_proto))
2003 goto nla_put_failure;
2004 }
2005
2006 if (swkey->phy.in_port == DP_MAX_PORTS) {
2007 if (is_mask && (output->phy.in_port == 0xffff))
2008 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
2009 goto nla_put_failure;
2010 } else {
2011 u16 upper_u16;
2012 upper_u16 = !is_mask ? 0 : 0xffff;
2013
2014 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
2015 (upper_u16 << 16) | output->phy.in_port))
2016 goto nla_put_failure;
2017 }
2018
2019 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
2020 goto nla_put_failure;
2021
2022 if (ovs_ct_put_key(swkey, output, skb))
2023 goto nla_put_failure;
2024
2025 if (ovs_key_mac_proto(swkey) == MAC_PROTO_ETHERNET) {
2026 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
2027 if (!nla)
2028 goto nla_put_failure;
2029
2030 eth_key = nla_data(nla);
2031 ether_addr_copy(eth_key->eth_src, output->eth.src);
2032 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
2033
2034 if (swkey->eth.vlan.tci || eth_type_vlan(swkey->eth.type)) {
2035 if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask))
2036 goto nla_put_failure;
2037 encap = nla_nest_start_noflag(skb, OVS_KEY_ATTR_ENCAP);
2038 if (!swkey->eth.vlan.tci)
2039 goto unencap;
2040
2041 if (swkey->eth.cvlan.tci || eth_type_vlan(swkey->eth.type)) {
2042 if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask))
2043 goto nla_put_failure;
2044 in_encap = nla_nest_start_noflag(skb,
2045 OVS_KEY_ATTR_ENCAP);
2046 if (!swkey->eth.cvlan.tci)
2047 goto unencap;
2048 }
2049 }
2050
2051 if (swkey->eth.type == htons(ETH_P_802_2)) {
2052 /*
2053 * Ethertype 802.2 is represented in the netlink with omitted
2054 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
2055 * 0xffff in the mask attribute. Ethertype can also
2056 * be wildcarded.
2057 */
2058 if (is_mask && output->eth.type)
2059 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
2060 output->eth.type))
2061 goto nla_put_failure;
2062 goto unencap;
2063 }
2064 }
2065
2066 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
2067 goto nla_put_failure;
2068
2069 if (eth_type_vlan(swkey->eth.type)) {
2070 /* There are 3 VLAN tags, we don't know anything about the rest
2071 * of the packet, so truncate here.
2072 */
2073 WARN_ON_ONCE(!(encap && in_encap));
2074 goto unencap;
2075 }
2076
2077 if (swkey->eth.type == htons(ETH_P_IP)) {
2078 struct ovs_key_ipv4 *ipv4_key;
2079
2080 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
2081 if (!nla)
2082 goto nla_put_failure;
2083 ipv4_key = nla_data(nla);
2084 ipv4_key->ipv4_src = output->ipv4.addr.src;
2085 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
2086 ipv4_key->ipv4_proto = output->ip.proto;
2087 ipv4_key->ipv4_tos = output->ip.tos;
2088 ipv4_key->ipv4_ttl = output->ip.ttl;
2089 ipv4_key->ipv4_frag = output->ip.frag;
2090 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
2091 struct ovs_key_ipv6 *ipv6_key;
2092
2093 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
2094 if (!nla)
2095 goto nla_put_failure;
2096 ipv6_key = nla_data(nla);
2097 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
2098 sizeof(ipv6_key->ipv6_src));
2099 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
2100 sizeof(ipv6_key->ipv6_dst));
2101 ipv6_key->ipv6_label = output->ipv6.label;
2102 ipv6_key->ipv6_proto = output->ip.proto;
2103 ipv6_key->ipv6_tclass = output->ip.tos;
2104 ipv6_key->ipv6_hlimit = output->ip.ttl;
2105 ipv6_key->ipv6_frag = output->ip.frag;
2106 } else if (swkey->eth.type == htons(ETH_P_NSH)) {
2107 if (nsh_key_to_nlattr(&output->nsh, is_mask, skb))
2108 goto nla_put_failure;
2109 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
2110 swkey->eth.type == htons(ETH_P_RARP)) {
2111 struct ovs_key_arp *arp_key;
2112
2113 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
2114 if (!nla)
2115 goto nla_put_failure;
2116 arp_key = nla_data(nla);
2117 memset(arp_key, 0, sizeof(struct ovs_key_arp));
2118 arp_key->arp_sip = output->ipv4.addr.src;
2119 arp_key->arp_tip = output->ipv4.addr.dst;
2120 arp_key->arp_op = htons(output->ip.proto);
2121 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
2122 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
2123 } else if (eth_p_mpls(swkey->eth.type)) {
2124 u8 num_labels, i;
2125 struct ovs_key_mpls *mpls_key;
2126
2127 num_labels = hweight_long(output->mpls.num_labels_mask);
2128 nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS,
2129 num_labels * sizeof(*mpls_key));
2130 if (!nla)
2131 goto nla_put_failure;
2132
2133 mpls_key = nla_data(nla);
2134 for (i = 0; i < num_labels; i++)
2135 mpls_key[i].mpls_lse = output->mpls.lse[i];
2136 }
2137
2138 if ((swkey->eth.type == htons(ETH_P_IP) ||
2139 swkey->eth.type == htons(ETH_P_IPV6)) &&
2140 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
2141
2142 if (swkey->ip.proto == IPPROTO_TCP) {
2143 struct ovs_key_tcp *tcp_key;
2144
2145 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
2146 if (!nla)
2147 goto nla_put_failure;
2148 tcp_key = nla_data(nla);
2149 tcp_key->tcp_src = output->tp.src;
2150 tcp_key->tcp_dst = output->tp.dst;
2151 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
2152 output->tp.flags))
2153 goto nla_put_failure;
2154 } else if (swkey->ip.proto == IPPROTO_UDP) {
2155 struct ovs_key_udp *udp_key;
2156
2157 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
2158 if (!nla)
2159 goto nla_put_failure;
2160 udp_key = nla_data(nla);
2161 udp_key->udp_src = output->tp.src;
2162 udp_key->udp_dst = output->tp.dst;
2163 } else if (swkey->ip.proto == IPPROTO_SCTP) {
2164 struct ovs_key_sctp *sctp_key;
2165
2166 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
2167 if (!nla)
2168 goto nla_put_failure;
2169 sctp_key = nla_data(nla);
2170 sctp_key->sctp_src = output->tp.src;
2171 sctp_key->sctp_dst = output->tp.dst;
2172 } else if (swkey->eth.type == htons(ETH_P_IP) &&
2173 swkey->ip.proto == IPPROTO_ICMP) {
2174 struct ovs_key_icmp *icmp_key;
2175
2176 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
2177 if (!nla)
2178 goto nla_put_failure;
2179 icmp_key = nla_data(nla);
2180 icmp_key->icmp_type = ntohs(output->tp.src);
2181 icmp_key->icmp_code = ntohs(output->tp.dst);
2182 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
2183 swkey->ip.proto == IPPROTO_ICMPV6) {
2184 struct ovs_key_icmpv6 *icmpv6_key;
2185
2186 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
2187 sizeof(*icmpv6_key));
2188 if (!nla)
2189 goto nla_put_failure;
2190 icmpv6_key = nla_data(nla);
2191 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
2192 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
2193
2194 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
2195 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
2196 struct ovs_key_nd *nd_key;
2197
2198 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
2199 if (!nla)
2200 goto nla_put_failure;
2201 nd_key = nla_data(nla);
2202 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
2203 sizeof(nd_key->nd_target));
2204 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
2205 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
2206 }
2207 }
2208 }
2209
2210 unencap:
2211 if (in_encap)
2212 nla_nest_end(skb, in_encap);
2213 if (encap)
2214 nla_nest_end(skb, encap);
2215
2216 return 0;
2217
2218 nla_put_failure:
2219 return -EMSGSIZE;
2220 }
2221
2222 int ovs_nla_put_key(const struct sw_flow_key *swkey,
2223 const struct sw_flow_key *output, int attr, bool is_mask,
2224 struct sk_buff *skb)
2225 {
2226 int err;
2227 struct nlattr *nla;
2228
2229 nla = nla_nest_start_noflag(skb, attr);
2230 if (!nla)
2231 return -EMSGSIZE;
2232 err = __ovs_nla_put_key(swkey, output, is_mask, skb);
2233 if (err)
2234 return err;
2235 nla_nest_end(skb, nla);
2236
2237 return 0;
2238 }
2239
2240 /* Called with ovs_mutex or RCU read lock. */
2241 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
2242 {
2243 if (ovs_identifier_is_ufid(&flow->id))
2244 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
2245 flow->id.ufid);
2246
2247 return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
2248 OVS_FLOW_ATTR_KEY, false, skb);
2249 }
2250
2251 /* Called with ovs_mutex or RCU read lock. */
2252 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
2253 {
2254 return ovs_nla_put_key(&flow->key, &flow->key,
2255 OVS_FLOW_ATTR_KEY, false, skb);
2256 }
2257
2258 /* Called with ovs_mutex or RCU read lock. */
2259 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
2260 {
2261 return ovs_nla_put_key(&flow->key, &flow->mask->key,
2262 OVS_FLOW_ATTR_MASK, true, skb);
2263 }
2264
2265 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)
2266 #define MAX_ACTIONS_BUFSIZE (16 * 1024)
2267 #else
2268 #define MAX_ACTIONS_BUFSIZE (32 * 1024)
2269 #endif
2270
2271 static struct sw_flow_actions *nla_alloc_flow_actions(int size)
2272 {
2273 struct sw_flow_actions *sfa;
2274
2275 WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
2276
2277 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
2278 if (!sfa)
2279 return ERR_PTR(-ENOMEM);
2280
2281 sfa->actions_len = 0;
2282 return sfa;
2283 }
2284
2285 static void ovs_nla_free_set_action(const struct nlattr *a)
2286 {
2287 const struct nlattr *ovs_key = nla_data(a);
2288 struct ovs_tunnel_info *ovs_tun;
2289
2290 switch (nla_type(ovs_key)) {
2291 case OVS_KEY_ATTR_TUNNEL_INFO:
2292 ovs_tun = nla_data(ovs_key);
2293 ovs_dst_release((struct dst_entry *)ovs_tun->tun_dst);
2294 break;
2295 }
2296 }
2297
2298 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
2299 {
2300 const struct nlattr *a;
2301 int rem;
2302
2303 if (!sf_acts)
2304 return;
2305
2306 nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
2307 switch (nla_type(a)) {
2308 case OVS_ACTION_ATTR_SET:
2309 ovs_nla_free_set_action(a);
2310 break;
2311 case OVS_ACTION_ATTR_CT:
2312 ovs_ct_free_action(a);
2313 break;
2314 }
2315 }
2316
2317 kfree(sf_acts);
2318 }
2319
2320 static void __ovs_nla_free_flow_actions(struct rcu_head *head)
2321 {
2322 ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
2323 }
2324
2325 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
2326 * The caller must hold rcu_read_lock for this to be sensible. */
2327 void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
2328 {
2329 call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
2330 }
2331
2332 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2333 int attr_len, bool log)
2334 {
2335
2336 struct sw_flow_actions *acts;
2337 int new_acts_size;
2338 size_t req_size = NLA_ALIGN(attr_len);
2339 int next_offset = offsetof(struct sw_flow_actions, actions) +
2340 (*sfa)->actions_len;
2341
2342 if (req_size <= (ksize(*sfa) - next_offset))
2343 goto out;
2344
2345 new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
2346
2347 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2348 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
2349 OVS_NLERR(log, "Flow action size exceeds max %u",
2350 MAX_ACTIONS_BUFSIZE);
2351 return ERR_PTR(-EMSGSIZE);
2352 }
2353 new_acts_size = MAX_ACTIONS_BUFSIZE;
2354 }
2355
2356 acts = nla_alloc_flow_actions(new_acts_size);
2357 if (IS_ERR(acts))
2358 return (void *)acts;
2359
2360 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
2361 acts->actions_len = (*sfa)->actions_len;
2362 acts->orig_len = (*sfa)->orig_len;
2363 kfree(*sfa);
2364 *sfa = acts;
2365
2366 out:
2367 (*sfa)->actions_len += req_size;
2368 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
2369 }
2370
2371 static struct nlattr *__add_action(struct sw_flow_actions **sfa,
2372 int attrtype, void *data, int len, bool log)
2373 {
2374 struct nlattr *a;
2375
2376 a = reserve_sfa_size(sfa, nla_attr_size(len), log);
2377 if (IS_ERR(a))
2378 return a;
2379
2380 a->nla_type = attrtype;
2381 a->nla_len = nla_attr_size(len);
2382
2383 if (data)
2384 memcpy(nla_data(a), data, len);
2385 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
2386
2387 return a;
2388 }
2389
2390 int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
2391 int len, bool log)
2392 {
2393 struct nlattr *a;
2394
2395 a = __add_action(sfa, attrtype, data, len, log);
2396
2397 return PTR_ERR_OR_ZERO(a);
2398 }
2399
2400 static inline int add_nested_action_start(struct sw_flow_actions **sfa,
2401 int attrtype, bool log)
2402 {
2403 int used = (*sfa)->actions_len;
2404 int err;
2405
2406 err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
2407 if (err)
2408 return err;
2409
2410 return used;
2411 }
2412
2413 static inline void add_nested_action_end(struct sw_flow_actions *sfa,
2414 int st_offset)
2415 {
2416 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
2417 st_offset);
2418
2419 a->nla_len = sfa->actions_len - st_offset;
2420 }
2421
2422 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2423 const struct sw_flow_key *key,
2424 struct sw_flow_actions **sfa,
2425 __be16 eth_type, __be16 vlan_tci,
2426 u32 mpls_label_count, bool log);
2427
2428 static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
2429 const struct sw_flow_key *key,
2430 struct sw_flow_actions **sfa,
2431 __be16 eth_type, __be16 vlan_tci,
2432 u32 mpls_label_count, bool log, bool last)
2433 {
2434 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
2435 const struct nlattr *probability, *actions;
2436 const struct nlattr *a;
2437 int rem, start, err;
2438 struct sample_arg arg;
2439
2440 memset(attrs, 0, sizeof(attrs));
2441 nla_for_each_nested(a, attr, rem) {
2442 int type = nla_type(a);
2443 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
2444 return -EINVAL;
2445 attrs[type] = a;
2446 }
2447 if (rem)
2448 return -EINVAL;
2449
2450 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
2451 if (!probability || nla_len(probability) != sizeof(u32))
2452 return -EINVAL;
2453
2454 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
2455 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
2456 return -EINVAL;
2457
2458 /* validation done, copy sample action. */
2459 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
2460 if (start < 0)
2461 return start;
2462
2463 /* When both skb and flow may be changed, put the sample
2464 * into a deferred fifo. On the other hand, if only skb
2465 * may be modified, the actions can be executed in place.
2466 *
2467 * Do this analysis at the flow installation time.
2468 * Set 'clone_action->exec' to true if the actions can be
2469 * executed without being deferred.
2470 *
2471 * If the sample is the last action, it can always be excuted
2472 * rather than deferred.
2473 */
2474 arg.exec = last || !actions_may_change_flow(actions);
2475 arg.probability = nla_get_u32(probability);
2476
2477 err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_ARG, &arg, sizeof(arg),
2478 log);
2479 if (err)
2480 return err;
2481
2482 err = __ovs_nla_copy_actions(net, actions, key, sfa,
2483 eth_type, vlan_tci, mpls_label_count, log);
2484
2485 if (err)
2486 return err;
2487
2488 add_nested_action_end(*sfa, start);
2489
2490 return 0;
2491 }
2492
2493 static int validate_and_copy_clone(struct net *net,
2494 const struct nlattr *attr,
2495 const struct sw_flow_key *key,
2496 struct sw_flow_actions **sfa,
2497 __be16 eth_type, __be16 vlan_tci,
2498 u32 mpls_label_count, bool log, bool last)
2499 {
2500 int start, err;
2501 u32 exec;
2502
2503 if (nla_len(attr) && nla_len(attr) < NLA_HDRLEN)
2504 return -EINVAL;
2505
2506 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CLONE, log);
2507 if (start < 0)
2508 return start;
2509
2510 exec = last || !actions_may_change_flow(attr);
2511
2512 err = ovs_nla_add_action(sfa, OVS_CLONE_ATTR_EXEC, &exec,
2513 sizeof(exec), log);
2514 if (err)
2515 return err;
2516
2517 err = __ovs_nla_copy_actions(net, attr, key, sfa,
2518 eth_type, vlan_tci, mpls_label_count, log);
2519 if (err)
2520 return err;
2521
2522 add_nested_action_end(*sfa, start);
2523
2524 return 0;
2525 }
2526
2527 void ovs_match_init(struct sw_flow_match *match,
2528 struct sw_flow_key *key,
2529 bool reset_key,
2530 struct sw_flow_mask *mask)
2531 {
2532 memset(match, 0, sizeof(*match));
2533 match->key = key;
2534 match->mask = mask;
2535
2536 if (reset_key)
2537 memset(key, 0, sizeof(*key));
2538
2539 if (mask) {
2540 memset(&mask->key, 0, sizeof(mask->key));
2541 mask->range.start = mask->range.end = 0;
2542 }
2543 }
2544
2545 static int validate_geneve_opts(struct sw_flow_key *key)
2546 {
2547 struct geneve_opt *option;
2548 int opts_len = key->tun_opts_len;
2549 bool crit_opt = false;
2550
2551 option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
2552 while (opts_len > 0) {
2553 int len;
2554
2555 if (opts_len < sizeof(*option))
2556 return -EINVAL;
2557
2558 len = sizeof(*option) + option->length * 4;
2559 if (len > opts_len)
2560 return -EINVAL;
2561
2562 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
2563
2564 option = (struct geneve_opt *)((u8 *)option + len);
2565 opts_len -= len;
2566 }
2567
2568 key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
2569
2570 return 0;
2571 }
2572
2573 static int validate_and_copy_set_tun(const struct nlattr *attr,
2574 struct sw_flow_actions **sfa, bool log)
2575 {
2576 struct sw_flow_match match;
2577 struct sw_flow_key key;
2578 struct metadata_dst *tun_dst;
2579 struct ip_tunnel_info *tun_info;
2580 struct ovs_tunnel_info *ovs_tun;
2581 struct nlattr *a;
2582 int err = 0, start, opts_type;
2583 __be16 dst_opt_type;
2584
2585 dst_opt_type = 0;
2586 ovs_match_init(&match, &key, true, NULL);
2587 opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
2588 if (opts_type < 0)
2589 return opts_type;
2590
2591 if (key.tun_opts_len) {
2592 switch (opts_type) {
2593 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
2594 err = validate_geneve_opts(&key);
2595 if (err < 0)
2596 return err;
2597 dst_opt_type = TUNNEL_GENEVE_OPT;
2598 break;
2599 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
2600 dst_opt_type = TUNNEL_VXLAN_OPT;
2601 break;
2602 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
2603 dst_opt_type = TUNNEL_ERSPAN_OPT;
2604 break;
2605 }
2606 }
2607
2608 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
2609 if (start < 0)
2610 return start;
2611
2612 tun_dst = metadata_dst_alloc(key.tun_opts_len, METADATA_IP_TUNNEL,
2613 GFP_KERNEL);
2614
2615 if (!tun_dst)
2616 return -ENOMEM;
2617
2618 err = dst_cache_init(&tun_dst->u.tun_info.dst_cache, GFP_KERNEL);
2619 if (err) {
2620 dst_release((struct dst_entry *)tun_dst);
2621 return err;
2622 }
2623 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
2624 sizeof(*ovs_tun), log);
2625 if (IS_ERR(a)) {
2626 ovs_dst_release((struct dst_entry *)tun_dst);
2627 return PTR_ERR(a);
2628 }
2629
2630 ovs_tun = nla_data(a);
2631 ovs_tun->tun_dst = tun_dst;
2632
2633 tun_info = &tun_dst->u.tun_info;
2634 tun_info->mode = IP_TUNNEL_INFO_TX;
2635 if (key.tun_proto == AF_INET6)
2636 tun_info->mode |= IP_TUNNEL_INFO_IPV6;
2637 tun_info->key = key.tun_key;
2638
2639 /* We need to store the options in the action itself since
2640 * everything else will go away after flow setup. We can append
2641 * it to tun_info and then point there.
2642 */
2643 ip_tunnel_info_opts_set(tun_info,
2644 TUN_METADATA_OPTS(&key, key.tun_opts_len),
2645 key.tun_opts_len, dst_opt_type);
2646 add_nested_action_end(*sfa, start);
2647
2648 return err;
2649 }
2650
2651 static bool validate_nsh(const struct nlattr *attr, bool is_mask,
2652 bool is_push_nsh, bool log)
2653 {
2654 struct sw_flow_match match;
2655 struct sw_flow_key key;
2656 int ret = 0;
2657
2658 ovs_match_init(&match, &key, true, NULL);
2659 ret = nsh_key_put_from_nlattr(attr, &match, is_mask,
2660 is_push_nsh, log);
2661 return !ret;
2662 }
2663
2664 /* Return false if there are any non-masked bits set.
2665 * Mask follows data immediately, before any netlink padding.
2666 */
2667 static bool validate_masked(u8 *data, int len)
2668 {
2669 u8 *mask = data + len;
2670
2671 while (len--)
2672 if (*data++ & ~*mask++)
2673 return false;
2674
2675 return true;
2676 }
2677
2678 static int validate_set(const struct nlattr *a,
2679 const struct sw_flow_key *flow_key,
2680 struct sw_flow_actions **sfa, bool *skip_copy,
2681 u8 mac_proto, __be16 eth_type, bool masked, bool log)
2682 {
2683 const struct nlattr *ovs_key = nla_data(a);
2684 int key_type = nla_type(ovs_key);
2685 size_t key_len;
2686
2687 /* There can be only one key in a action */
2688 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
2689 return -EINVAL;
2690
2691 key_len = nla_len(ovs_key);
2692 if (masked)
2693 key_len /= 2;
2694
2695 if (key_type > OVS_KEY_ATTR_MAX ||
2696 !check_attr_len(key_len, ovs_key_lens[key_type].len))
2697 return -EINVAL;
2698
2699 if (masked && !validate_masked(nla_data(ovs_key), key_len))
2700 return -EINVAL;
2701
2702 switch (key_type) {
2703 const struct ovs_key_ipv4 *ipv4_key;
2704 const struct ovs_key_ipv6 *ipv6_key;
2705 int err;
2706
2707 case OVS_KEY_ATTR_PRIORITY:
2708 case OVS_KEY_ATTR_SKB_MARK:
2709 case OVS_KEY_ATTR_CT_MARK:
2710 case OVS_KEY_ATTR_CT_LABELS:
2711 break;
2712
2713 case OVS_KEY_ATTR_ETHERNET:
2714 if (mac_proto != MAC_PROTO_ETHERNET)
2715 return -EINVAL;
2716 break;
2717
2718 case OVS_KEY_ATTR_TUNNEL:
2719 #ifndef USE_UPSTREAM_TUNNEL
2720 if (eth_p_mpls(eth_type))
2721 return -EINVAL;
2722 #endif
2723 if (masked)
2724 return -EINVAL; /* Masked tunnel set not supported. */
2725
2726 *skip_copy = true;
2727 err = validate_and_copy_set_tun(a, sfa, log);
2728 if (err)
2729 return err;
2730 break;
2731
2732 case OVS_KEY_ATTR_IPV4:
2733 if (eth_type != htons(ETH_P_IP))
2734 return -EINVAL;
2735
2736 ipv4_key = nla_data(ovs_key);
2737
2738 if (masked) {
2739 const struct ovs_key_ipv4 *mask = ipv4_key + 1;
2740
2741 /* Non-writeable fields. */
2742 if (mask->ipv4_proto || mask->ipv4_frag)
2743 return -EINVAL;
2744 } else {
2745 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
2746 return -EINVAL;
2747
2748 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
2749 return -EINVAL;
2750 }
2751 break;
2752
2753 case OVS_KEY_ATTR_IPV6:
2754 if (eth_type != htons(ETH_P_IPV6))
2755 return -EINVAL;
2756
2757 ipv6_key = nla_data(ovs_key);
2758
2759 if (masked) {
2760 const struct ovs_key_ipv6 *mask = ipv6_key + 1;
2761
2762 /* Non-writeable fields. */
2763 if (mask->ipv6_proto || mask->ipv6_frag)
2764 return -EINVAL;
2765
2766 /* Invalid bits in the flow label mask? */
2767 if (ntohl(mask->ipv6_label) & 0xFFF00000)
2768 return -EINVAL;
2769 } else {
2770 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
2771 return -EINVAL;
2772
2773 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
2774 return -EINVAL;
2775 }
2776 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
2777 return -EINVAL;
2778
2779 break;
2780
2781 case OVS_KEY_ATTR_TCP:
2782 if ((eth_type != htons(ETH_P_IP) &&
2783 eth_type != htons(ETH_P_IPV6)) ||
2784 flow_key->ip.proto != IPPROTO_TCP)
2785 return -EINVAL;
2786
2787 break;
2788
2789 case OVS_KEY_ATTR_UDP:
2790 if ((eth_type != htons(ETH_P_IP) &&
2791 eth_type != htons(ETH_P_IPV6)) ||
2792 flow_key->ip.proto != IPPROTO_UDP)
2793 return -EINVAL;
2794
2795 break;
2796
2797 case OVS_KEY_ATTR_MPLS:
2798 if (!eth_p_mpls(eth_type))
2799 return -EINVAL;
2800 break;
2801
2802 case OVS_KEY_ATTR_SCTP:
2803 if ((eth_type != htons(ETH_P_IP) &&
2804 eth_type != htons(ETH_P_IPV6)) ||
2805 flow_key->ip.proto != IPPROTO_SCTP)
2806 return -EINVAL;
2807
2808 break;
2809
2810 case OVS_KEY_ATTR_NSH:
2811 if (eth_type != htons(ETH_P_NSH))
2812 return -EINVAL;
2813 if (!validate_nsh(nla_data(a), masked, false, log))
2814 return -EINVAL;
2815 break;
2816
2817 default:
2818 return -EINVAL;
2819 }
2820
2821 /* Convert non-masked non-tunnel set actions to masked set actions. */
2822 if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
2823 int start, len = key_len * 2;
2824 struct nlattr *at;
2825
2826 *skip_copy = true;
2827
2828 start = add_nested_action_start(sfa,
2829 OVS_ACTION_ATTR_SET_TO_MASKED,
2830 log);
2831 if (start < 0)
2832 return start;
2833
2834 at = __add_action(sfa, key_type, NULL, len, log);
2835 if (IS_ERR(at))
2836 return PTR_ERR(at);
2837
2838 memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
2839 memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */
2840 /* Clear non-writeable bits from otherwise writeable fields. */
2841 if (key_type == OVS_KEY_ATTR_IPV6) {
2842 struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
2843
2844 mask->ipv6_label &= htonl(0x000FFFFF);
2845 }
2846 add_nested_action_end(*sfa, start);
2847 }
2848
2849 return 0;
2850 }
2851
2852 static int validate_userspace(const struct nlattr *attr)
2853 {
2854 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
2855 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
2856 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
2857 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
2858 };
2859 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
2860 int error;
2861
2862 error = nla_parse_nested_deprecated(a, OVS_USERSPACE_ATTR_MAX, attr,
2863 userspace_policy, NULL);
2864 if (error)
2865 return error;
2866
2867 if (!a[OVS_USERSPACE_ATTR_PID] ||
2868 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
2869 return -EINVAL;
2870
2871 return 0;
2872 }
2873
2874 static const struct nla_policy cpl_policy[OVS_CHECK_PKT_LEN_ATTR_MAX + 1] = {
2875 [OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = {.type = NLA_U16 },
2876 [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = {.type = NLA_NESTED },
2877 [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL] = {.type = NLA_NESTED },
2878 };
2879
2880 static int validate_and_copy_check_pkt_len(struct net *net,
2881 const struct nlattr *attr,
2882 const struct sw_flow_key *key,
2883 struct sw_flow_actions **sfa,
2884 __be16 eth_type, __be16 vlan_tci,
2885 u32 mpls_label_count,
2886 bool log, bool last)
2887 {
2888 const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
2889 struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
2890 struct check_pkt_len_arg arg;
2891 int nested_acts_start;
2892 int start, err;
2893
2894 err = nla_parse_deprecated_strict(a, OVS_CHECK_PKT_LEN_ATTR_MAX,
2895 nla_data(attr), nla_len(attr),
2896 cpl_policy, NULL);
2897 if (err)
2898 return err;
2899
2900 if (!a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] ||
2901 !nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]))
2902 return -EINVAL;
2903
2904 acts_if_lesser_eq = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
2905 acts_if_greater = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
2906
2907 /* Both the nested action should be present. */
2908 if (!acts_if_greater || !acts_if_lesser_eq)
2909 return -EINVAL;
2910
2911 /* validation done, copy the nested actions. */
2912 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CHECK_PKT_LEN,
2913 log);
2914 if (start < 0)
2915 return start;
2916
2917 arg.pkt_len = nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
2918 arg.exec_for_lesser_equal =
2919 last || !actions_may_change_flow(acts_if_lesser_eq);
2920 arg.exec_for_greater =
2921 last || !actions_may_change_flow(acts_if_greater);
2922
2923 err = ovs_nla_add_action(sfa, OVS_CHECK_PKT_LEN_ATTR_ARG, &arg,
2924 sizeof(arg), log);
2925 if (err)
2926 return err;
2927
2928 nested_acts_start = add_nested_action_start(sfa,
2929 OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL, log);
2930 if (nested_acts_start < 0)
2931 return nested_acts_start;
2932
2933 err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
2934 eth_type, vlan_tci, mpls_label_count, log);
2935
2936 if (err)
2937 return err;
2938
2939 add_nested_action_end(*sfa, nested_acts_start);
2940
2941 nested_acts_start = add_nested_action_start(sfa,
2942 OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER, log);
2943 if (nested_acts_start < 0)
2944 return nested_acts_start;
2945
2946 err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
2947 eth_type, vlan_tci, mpls_label_count, log);
2948
2949 if (err)
2950 return err;
2951
2952 add_nested_action_end(*sfa, nested_acts_start);
2953 add_nested_action_end(*sfa, start);
2954 return 0;
2955 }
2956
2957 static int copy_action(const struct nlattr *from,
2958 struct sw_flow_actions **sfa, bool log)
2959 {
2960 int totlen = NLA_ALIGN(from->nla_len);
2961 struct nlattr *to;
2962
2963 to = reserve_sfa_size(sfa, from->nla_len, log);
2964 if (IS_ERR(to))
2965 return PTR_ERR(to);
2966
2967 memcpy(to, from, totlen);
2968 return 0;
2969 }
2970
2971 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2972 const struct sw_flow_key *key,
2973 struct sw_flow_actions **sfa,
2974 __be16 eth_type, __be16 vlan_tci,
2975 u32 mpls_label_count, bool log)
2976 {
2977 u8 mac_proto = ovs_key_mac_proto(key);
2978 const struct nlattr *a;
2979 int rem, err;
2980
2981 nla_for_each_nested(a, attr, rem) {
2982 /* Expected argument lengths, (u32)-1 for variable length. */
2983 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
2984 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
2985 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
2986 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
2987 [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
2988 [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
2989 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
2990 [OVS_ACTION_ATTR_POP_VLAN] = 0,
2991 [OVS_ACTION_ATTR_SET] = (u32)-1,
2992 [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
2993 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
2994 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
2995 [OVS_ACTION_ATTR_CT] = (u32)-1,
2996 [OVS_ACTION_ATTR_CT_CLEAR] = 0,
2997 [OVS_ACTION_ATTR_TRUNC] = sizeof(struct ovs_action_trunc),
2998 [OVS_ACTION_ATTR_PUSH_ETH] = sizeof(struct ovs_action_push_eth),
2999 [OVS_ACTION_ATTR_POP_ETH] = 0,
3000 [OVS_ACTION_ATTR_PUSH_NSH] = (u32)-1,
3001 [OVS_ACTION_ATTR_POP_NSH] = 0,
3002 [OVS_ACTION_ATTR_METER] = sizeof(u32),
3003 [OVS_ACTION_ATTR_CLONE] = (u32)-1,
3004 [OVS_ACTION_ATTR_CHECK_PKT_LEN] = (u32)-1,
3005 };
3006 const struct ovs_action_push_vlan *vlan;
3007 int type = nla_type(a);
3008 bool skip_copy;
3009
3010 if (type > OVS_ACTION_ATTR_MAX ||
3011 (action_lens[type] != nla_len(a) &&
3012 action_lens[type] != (u32)-1))
3013 return -EINVAL;
3014
3015 skip_copy = false;
3016 switch (type) {
3017 case OVS_ACTION_ATTR_UNSPEC:
3018 return -EINVAL;
3019
3020 case OVS_ACTION_ATTR_USERSPACE:
3021 err = validate_userspace(a);
3022 if (err)
3023 return err;
3024 break;
3025
3026 case OVS_ACTION_ATTR_OUTPUT:
3027 if (nla_get_u32(a) >= DP_MAX_PORTS)
3028 return -EINVAL;
3029 break;
3030
3031 case OVS_ACTION_ATTR_TRUNC: {
3032 const struct ovs_action_trunc *trunc = nla_data(a);
3033
3034 if (trunc->max_len < ETH_HLEN)
3035 return -EINVAL;
3036 break;
3037 }
3038
3039 case OVS_ACTION_ATTR_HASH: {
3040 const struct ovs_action_hash *act_hash = nla_data(a);
3041
3042 switch (act_hash->hash_alg) {
3043 case OVS_HASH_ALG_L4:
3044 break;
3045 default:
3046 return -EINVAL;
3047 }
3048
3049 break;
3050 }
3051
3052 case OVS_ACTION_ATTR_POP_VLAN:
3053 if (mac_proto != MAC_PROTO_ETHERNET)
3054 return -EINVAL;
3055 vlan_tci = htons(0);
3056 break;
3057
3058 case OVS_ACTION_ATTR_PUSH_VLAN:
3059 if (mac_proto != MAC_PROTO_ETHERNET)
3060 return -EINVAL;
3061 vlan = nla_data(a);
3062 if (!eth_type_vlan(vlan->vlan_tpid))
3063 return -EINVAL;
3064 if (!(vlan->vlan_tci & htons(VLAN_CFI_MASK)))
3065 return -EINVAL;
3066 vlan_tci = vlan->vlan_tci;
3067 break;
3068
3069 case OVS_ACTION_ATTR_RECIRC:
3070 break;
3071
3072 case OVS_ACTION_ATTR_PUSH_MPLS: {
3073 const struct ovs_action_push_mpls *mpls = nla_data(a);
3074
3075 if (!eth_p_mpls(mpls->mpls_ethertype))
3076 return -EINVAL;
3077 /* Prohibit push MPLS other than to a white list
3078 * for packets that have a known tag order.
3079 */
3080 if (vlan_tci & htons(VLAN_CFI_MASK) ||
3081 (eth_type != htons(ETH_P_IP) &&
3082 eth_type != htons(ETH_P_IPV6) &&
3083 eth_type != htons(ETH_P_ARP) &&
3084 eth_type != htons(ETH_P_RARP) &&
3085 !eth_p_mpls(eth_type)))
3086 return -EINVAL;
3087 eth_type = mpls->mpls_ethertype;
3088 mpls_label_count++;
3089 break;
3090 }
3091
3092 case OVS_ACTION_ATTR_POP_MPLS: {
3093 __be16 proto;
3094 if (vlan_tci & htons(VLAN_CFI_MASK) ||
3095 !eth_p_mpls(eth_type))
3096 return -EINVAL;
3097
3098 /* Disallow subsequent L2.5+ set actions and mpls_pop
3099 * actions once the last MPLS label in the packet is
3100 * popped as there is no check here to ensure that
3101 * the new eth type is valid and thus set actions could
3102 * write off the end of the packet or otherwise corrupt
3103 * it.
3104 *
3105 * Support for these actions is planned using packet
3106 * recirculation.
3107 */
3108 proto = nla_get_be16(a);
3109 mpls_label_count--;
3110
3111 if (!eth_p_mpls(proto) || !mpls_label_count)
3112 eth_type = htons(0);
3113 else
3114 eth_type = proto;
3115 break;
3116 }
3117 case OVS_ACTION_ATTR_SET:
3118 err = validate_set(a, key, sfa,
3119 &skip_copy, mac_proto, eth_type,
3120 false, log);
3121 if (err)
3122 return err;
3123 break;
3124
3125 case OVS_ACTION_ATTR_SET_MASKED:
3126 err = validate_set(a, key, sfa,
3127 &skip_copy, mac_proto, eth_type,
3128 true, log);
3129 if (err)
3130 return err;
3131 break;
3132
3133 case OVS_ACTION_ATTR_SAMPLE: {
3134 bool last = nla_is_last(a, rem);
3135
3136 err = validate_and_copy_sample(net, a, key, sfa,
3137 eth_type, vlan_tci,
3138 mpls_label_count,
3139 log, last);
3140 if (err)
3141 return err;
3142 skip_copy = true;
3143 break;
3144 }
3145
3146 case OVS_ACTION_ATTR_CT:
3147 err = ovs_ct_copy_action(net, a, key, sfa, log);
3148 if (err)
3149 return err;
3150 skip_copy = true;
3151 break;
3152
3153 case OVS_ACTION_ATTR_CT_CLEAR:
3154 break;
3155
3156 case OVS_ACTION_ATTR_PUSH_ETH:
3157 /* Disallow pushing an Ethernet header if one
3158 * is already present */
3159 if (mac_proto != MAC_PROTO_NONE)
3160 return -EINVAL;
3161 mac_proto = MAC_PROTO_ETHERNET;
3162 break;
3163
3164 case OVS_ACTION_ATTR_POP_ETH:
3165 if (mac_proto != MAC_PROTO_ETHERNET)
3166 return -EINVAL;
3167 if (vlan_tci & htons(VLAN_CFI_MASK))
3168 return -EINVAL;
3169 mac_proto = MAC_PROTO_NONE;
3170 break;
3171
3172 case OVS_ACTION_ATTR_PUSH_NSH:
3173 if (mac_proto != MAC_PROTO_ETHERNET) {
3174 u8 next_proto;
3175
3176 next_proto = tun_p_from_eth_p(eth_type);
3177 if (!next_proto)
3178 return -EINVAL;
3179 }
3180 mac_proto = MAC_PROTO_NONE;
3181 if (!validate_nsh(nla_data(a), false, true, true))
3182 return -EINVAL;
3183 break;
3184
3185 case OVS_ACTION_ATTR_POP_NSH: {
3186 __be16 inner_proto;
3187
3188 if (eth_type != htons(ETH_P_NSH))
3189 return -EINVAL;
3190 inner_proto = tun_p_to_eth_p(key->nsh.base.np);
3191 if (!inner_proto)
3192 return -EINVAL;
3193 if (key->nsh.base.np == TUN_P_ETHERNET)
3194 mac_proto = MAC_PROTO_ETHERNET;
3195 else
3196 mac_proto = MAC_PROTO_NONE;
3197 break;
3198 }
3199
3200 case OVS_ACTION_ATTR_METER:
3201 /* Non-existent meters are simply ignored. */
3202 break;
3203
3204 case OVS_ACTION_ATTR_CLONE: {
3205 bool last = nla_is_last(a, rem);
3206
3207 err = validate_and_copy_clone(net, a, key, sfa,
3208 eth_type, vlan_tci,
3209 mpls_label_count,
3210 log, last);
3211 if (err)
3212 return err;
3213 skip_copy = true;
3214 break;
3215 }
3216
3217 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
3218 bool last = nla_is_last(a, rem);
3219
3220 err = validate_and_copy_check_pkt_len(net, a, key, sfa,
3221 eth_type,
3222 vlan_tci, log,
3223 mpls_label_count,
3224 last);
3225 if (err)
3226 return err;
3227 skip_copy = true;
3228 break;
3229 }
3230
3231 default:
3232 OVS_NLERR(log, "Unknown Action type %d", type);
3233 return -EINVAL;
3234 }
3235 if (!skip_copy) {
3236 err = copy_action(a, sfa, log);
3237 if (err)
3238 return err;
3239 }
3240 }
3241
3242 if (rem > 0)
3243 return -EINVAL;
3244
3245 return 0;
3246 }
3247
3248 /* 'key' must be the masked key. */
3249 int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
3250 const struct sw_flow_key *key,
3251 struct sw_flow_actions **sfa, bool log)
3252 {
3253 int err;
3254 u32 mpls_label_count = 0;
3255
3256 *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
3257 if (IS_ERR(*sfa))
3258 return PTR_ERR(*sfa);
3259
3260 if (eth_p_mpls(key->eth.type))
3261 mpls_label_count = hweight_long(key->mpls.num_labels_mask);
3262
3263 (*sfa)->orig_len = nla_len(attr);
3264 err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
3265 key->eth.vlan.tci, mpls_label_count, log);
3266 if (err)
3267 ovs_nla_free_flow_actions(*sfa);
3268
3269 return err;
3270 }
3271
3272 static int sample_action_to_attr(const struct nlattr *attr,
3273 struct sk_buff *skb)
3274 {
3275 struct nlattr *start, *ac_start = NULL, *sample_arg;
3276 int err = 0, rem = nla_len(attr);
3277 const struct sample_arg *arg;
3278 struct nlattr *actions;
3279
3280 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SAMPLE);
3281 if (!start)
3282 return -EMSGSIZE;
3283
3284 sample_arg = nla_data(attr);
3285 arg = nla_data(sample_arg);
3286 actions = nla_next(sample_arg, &rem);
3287
3288 if (nla_put_u32(skb, OVS_SAMPLE_ATTR_PROBABILITY, arg->probability)) {
3289 err = -EMSGSIZE;
3290 goto out;
3291 }
3292
3293 ac_start = nla_nest_start_noflag(skb, OVS_SAMPLE_ATTR_ACTIONS);
3294 if (!ac_start) {
3295 err = -EMSGSIZE;
3296 goto out;
3297 }
3298
3299 err = ovs_nla_put_actions(actions, rem, skb);
3300
3301 out:
3302 if (err) {
3303 nla_nest_cancel(skb, ac_start);
3304 nla_nest_cancel(skb, start);
3305 } else {
3306 nla_nest_end(skb, ac_start);
3307 nla_nest_end(skb, start);
3308 }
3309
3310 return err;
3311 }
3312
3313 static int clone_action_to_attr(const struct nlattr *attr,
3314 struct sk_buff *skb)
3315 {
3316 struct nlattr *start;
3317 int err = 0, rem = nla_len(attr);
3318
3319 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CLONE);
3320 if (!start)
3321 return -EMSGSIZE;
3322
3323 err = ovs_nla_put_actions(nla_data(attr), rem, skb);
3324
3325 if (err)
3326 nla_nest_cancel(skb, start);
3327 else
3328 nla_nest_end(skb, start);
3329
3330 return err;
3331 }
3332
3333 static int check_pkt_len_action_to_attr(const struct nlattr *attr,
3334 struct sk_buff *skb)
3335 {
3336 struct nlattr *start, *ac_start = NULL;
3337 const struct check_pkt_len_arg *arg;
3338 const struct nlattr *a, *cpl_arg;
3339 int err = 0, rem = nla_len(attr);
3340
3341 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CHECK_PKT_LEN);
3342 if (!start)
3343 return -EMSGSIZE;
3344
3345 /* The first nested attribute in 'attr' is always
3346 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
3347 */
3348 cpl_arg = nla_data(attr);
3349 arg = nla_data(cpl_arg);
3350
3351 if (nla_put_u16(skb, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, arg->pkt_len)) {
3352 err = -EMSGSIZE;
3353 goto out;
3354 }
3355
3356 /* Second nested attribute in 'attr' is always
3357 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
3358 */
3359 a = nla_next(cpl_arg, &rem);
3360 ac_start = nla_nest_start_noflag(skb,
3361 OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
3362 if (!ac_start) {
3363 err = -EMSGSIZE;
3364 goto out;
3365 }
3366
3367 err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
3368 if (err) {
3369 nla_nest_cancel(skb, ac_start);
3370 goto out;
3371 } else {
3372 nla_nest_end(skb, ac_start);
3373 }
3374
3375 /* Third nested attribute in 'attr' is always
3376 * OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER.
3377 */
3378 a = nla_next(a, &rem);
3379 ac_start = nla_nest_start_noflag(skb,
3380 OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
3381 if (!ac_start) {
3382 err = -EMSGSIZE;
3383 goto out;
3384 }
3385
3386 err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
3387 if (err) {
3388 nla_nest_cancel(skb, ac_start);
3389 goto out;
3390 } else {
3391 nla_nest_end(skb, ac_start);
3392 }
3393
3394 nla_nest_end(skb, start);
3395 return 0;
3396
3397 out:
3398 nla_nest_cancel(skb, start);
3399 return err;
3400 }
3401
3402 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
3403 {
3404 const struct nlattr *ovs_key = nla_data(a);
3405 int key_type = nla_type(ovs_key);
3406 struct nlattr *start;
3407 int err;
3408
3409 switch (key_type) {
3410 case OVS_KEY_ATTR_TUNNEL_INFO: {
3411 struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
3412 struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
3413
3414 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET);
3415 if (!start)
3416 return -EMSGSIZE;
3417
3418 err = ip_tun_to_nlattr(skb, &tun_info->key,
3419 ip_tunnel_info_opts(tun_info),
3420 tun_info->options_len,
3421 ip_tunnel_info_af(tun_info));
3422 if (err)
3423 return err;
3424 nla_nest_end(skb, start);
3425 break;
3426 }
3427 default:
3428 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
3429 return -EMSGSIZE;
3430 break;
3431 }
3432
3433 return 0;
3434 }
3435
3436 static int masked_set_action_to_set_action_attr(const struct nlattr *a,
3437 struct sk_buff *skb)
3438 {
3439 const struct nlattr *ovs_key = nla_data(a);
3440 struct nlattr *nla;
3441 size_t key_len = nla_len(ovs_key) / 2;
3442
3443 /* Revert the conversion we did from a non-masked set action to
3444 * masked set action.
3445 */
3446 nla = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_SET);
3447 if (!nla)
3448 return -EMSGSIZE;
3449
3450 if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
3451 return -EMSGSIZE;
3452
3453 nla_nest_end(skb, nla);
3454 return 0;
3455 }
3456
3457 int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
3458 {
3459 const struct nlattr *a;
3460 int rem, err;
3461
3462 nla_for_each_attr(a, attr, len, rem) {
3463 int type = nla_type(a);
3464
3465 switch (type) {
3466 case OVS_ACTION_ATTR_SET:
3467 err = set_action_to_attr(a, skb);
3468 if (err)
3469 return err;
3470 break;
3471
3472 case OVS_ACTION_ATTR_SET_TO_MASKED:
3473 err = masked_set_action_to_set_action_attr(a, skb);
3474 if (err)
3475 return err;
3476 break;
3477
3478 case OVS_ACTION_ATTR_SAMPLE:
3479 err = sample_action_to_attr(a, skb);
3480 if (err)
3481 return err;
3482 break;
3483
3484 case OVS_ACTION_ATTR_CT:
3485 err = ovs_ct_action_to_attr(nla_data(a), skb);
3486 if (err)
3487 return err;
3488 break;
3489
3490 case OVS_ACTION_ATTR_CLONE:
3491 err = clone_action_to_attr(a, skb);
3492 if (err)
3493 return err;
3494 break;
3495
3496 case OVS_ACTION_ATTR_CHECK_PKT_LEN:
3497 err = check_pkt_len_action_to_attr(a, skb);
3498 if (err)
3499 return err;
3500 break;
3501
3502 default:
3503 if (nla_put(skb, type, nla_len(a), nla_data(a)))
3504 return -EMSGSIZE;
3505 break;
3506 }
3507 }
3508
3509 return 0;
3510 }