]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/flow_netlink.c
datapath: use KARCH when building linux datapath modules
[mirror_ovs.git] / datapath / flow_netlink.c
1 /*
2 * Copyright (c) 2007-2017 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
43 #include <net/geneve.h>
44 #include <net/ip.h>
45 #include <net/ipv6.h>
46 #include <net/ndisc.h>
47 #include <net/mpls.h>
48 #include <net/vxlan.h>
49 #include <net/tun_proto.h>
50 #include <net/erspan.h>
51
52 #include "datapath.h"
53 #include "conntrack.h"
54 #include "flow.h"
55 #include "flow_netlink.h"
56 #include "gso.h"
57
58 struct ovs_len_tbl {
59 int len;
60 const struct ovs_len_tbl *next;
61 };
62
63 #define OVS_ATTR_NESTED -1
64 #define OVS_ATTR_VARIABLE -2
65
66 static bool actions_may_change_flow(const struct nlattr *actions)
67 {
68 struct nlattr *nla;
69 int rem;
70
71 nla_for_each_nested(nla, actions, rem) {
72 u16 action = nla_type(nla);
73
74 switch (action) {
75 case OVS_ACTION_ATTR_OUTPUT:
76 case OVS_ACTION_ATTR_RECIRC:
77 case OVS_ACTION_ATTR_TRUNC:
78 case OVS_ACTION_ATTR_USERSPACE:
79 break;
80
81 case OVS_ACTION_ATTR_CT:
82 case OVS_ACTION_ATTR_CT_CLEAR:
83 case OVS_ACTION_ATTR_HASH:
84 case OVS_ACTION_ATTR_POP_ETH:
85 case OVS_ACTION_ATTR_POP_MPLS:
86 case OVS_ACTION_ATTR_POP_NSH:
87 case OVS_ACTION_ATTR_POP_VLAN:
88 case OVS_ACTION_ATTR_PUSH_ETH:
89 case OVS_ACTION_ATTR_PUSH_MPLS:
90 case OVS_ACTION_ATTR_PUSH_NSH:
91 case OVS_ACTION_ATTR_PUSH_VLAN:
92 case OVS_ACTION_ATTR_SAMPLE:
93 case OVS_ACTION_ATTR_SET:
94 case OVS_ACTION_ATTR_SET_MASKED:
95 case OVS_ACTION_ATTR_METER:
96 default:
97 return true;
98 }
99 }
100 return false;
101 }
102
103 static void update_range(struct sw_flow_match *match,
104 size_t offset, size_t size, bool is_mask)
105 {
106 struct sw_flow_key_range *range;
107 size_t start = rounddown(offset, sizeof(long));
108 size_t end = roundup(offset + size, sizeof(long));
109
110 if (!is_mask)
111 range = &match->range;
112 else
113 range = &match->mask->range;
114
115 if (range->start == range->end) {
116 range->start = start;
117 range->end = end;
118 return;
119 }
120
121 if (range->start > start)
122 range->start = start;
123
124 if (range->end < end)
125 range->end = end;
126 }
127
128 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
129 do { \
130 update_range(match, offsetof(struct sw_flow_key, field), \
131 sizeof((match)->key->field), is_mask); \
132 if (is_mask) \
133 (match)->mask->key.field = value; \
134 else \
135 (match)->key->field = value; \
136 } while (0)
137
138 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
139 do { \
140 update_range(match, offset, len, is_mask); \
141 if (is_mask) \
142 memcpy((u8 *)&(match)->mask->key + offset, value_p, len);\
143 else \
144 memcpy((u8 *)(match)->key + offset, value_p, len); \
145 } while (0)
146
147 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
148 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
149 value_p, len, is_mask)
150
151 #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
152 do { \
153 update_range(match, offsetof(struct sw_flow_key, field), \
154 sizeof((match)->key->field), is_mask); \
155 if (is_mask) \
156 memset((u8 *)&(match)->mask->key.field, value, \
157 sizeof((match)->mask->key.field)); \
158 else \
159 memset((u8 *)&(match)->key->field, value, \
160 sizeof((match)->key->field)); \
161 } while (0)
162
163 static bool match_validate(const struct sw_flow_match *match,
164 u64 key_attrs, u64 mask_attrs, bool log)
165 {
166 u64 key_expected = 0;
167 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
168
169 /* The following mask attributes allowed only if they
170 * pass the validation tests.
171 */
172 mask_allowed &= ~((1ULL << OVS_KEY_ATTR_IPV4)
173 | (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)
174 | (1ULL << OVS_KEY_ATTR_IPV6)
175 | (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)
176 | (1ULL << OVS_KEY_ATTR_TCP)
177 | (1ULL << OVS_KEY_ATTR_TCP_FLAGS)
178 | (1ULL << OVS_KEY_ATTR_UDP)
179 | (1ULL << OVS_KEY_ATTR_SCTP)
180 | (1ULL << OVS_KEY_ATTR_ICMP)
181 | (1ULL << OVS_KEY_ATTR_ICMPV6)
182 | (1ULL << OVS_KEY_ATTR_ARP)
183 | (1ULL << OVS_KEY_ATTR_ND)
184 | (1ULL << OVS_KEY_ATTR_MPLS)
185 | (1ULL << OVS_KEY_ATTR_NSH));
186
187 /* Always allowed mask fields. */
188 mask_allowed |= ((1ULL << OVS_KEY_ATTR_TUNNEL)
189 | (1ULL << OVS_KEY_ATTR_IN_PORT)
190 | (1ULL << OVS_KEY_ATTR_ETHERTYPE));
191
192 /* Check key attributes. */
193 if (match->key->eth.type == htons(ETH_P_ARP)
194 || match->key->eth.type == htons(ETH_P_RARP)) {
195 key_expected |= 1ULL << OVS_KEY_ATTR_ARP;
196 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
197 mask_allowed |= 1ULL << OVS_KEY_ATTR_ARP;
198 }
199
200 if (eth_p_mpls(match->key->eth.type)) {
201 key_expected |= 1ULL << OVS_KEY_ATTR_MPLS;
202 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
203 mask_allowed |= 1ULL << OVS_KEY_ATTR_MPLS;
204 }
205
206 if (match->key->eth.type == htons(ETH_P_IP)) {
207 key_expected |= 1ULL << OVS_KEY_ATTR_IPV4;
208 if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
209 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV4;
210 mask_allowed |= 1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
211 }
212
213 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
214 if (match->key->ip.proto == IPPROTO_UDP) {
215 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
216 if (match->mask && (match->mask->key.ip.proto == 0xff))
217 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
218 }
219
220 if (match->key->ip.proto == IPPROTO_SCTP) {
221 key_expected |= 1ULL << OVS_KEY_ATTR_SCTP;
222 if (match->mask && (match->mask->key.ip.proto == 0xff))
223 mask_allowed |= 1ULL << OVS_KEY_ATTR_SCTP;
224 }
225
226 if (match->key->ip.proto == IPPROTO_TCP) {
227 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
228 key_expected |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
229 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
230 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
231 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
232 }
233 }
234
235 if (match->key->ip.proto == IPPROTO_ICMP) {
236 key_expected |= 1ULL << OVS_KEY_ATTR_ICMP;
237 if (match->mask && (match->mask->key.ip.proto == 0xff))
238 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMP;
239 }
240 }
241 }
242
243 if (match->key->eth.type == htons(ETH_P_IPV6)) {
244 key_expected |= 1ULL << OVS_KEY_ATTR_IPV6;
245 if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
246 mask_allowed |= 1ULL << OVS_KEY_ATTR_IPV6;
247 mask_allowed |= 1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
248 }
249
250 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
251 if (match->key->ip.proto == IPPROTO_UDP) {
252 key_expected |= 1ULL << OVS_KEY_ATTR_UDP;
253 if (match->mask && (match->mask->key.ip.proto == 0xff))
254 mask_allowed |= 1ULL << OVS_KEY_ATTR_UDP;
255 }
256
257 if (match->key->ip.proto == IPPROTO_SCTP) {
258 key_expected |= 1ULL << OVS_KEY_ATTR_SCTP;
259 if (match->mask && (match->mask->key.ip.proto == 0xff))
260 mask_allowed |= 1ULL << OVS_KEY_ATTR_SCTP;
261 }
262
263 if (match->key->ip.proto == IPPROTO_TCP) {
264 key_expected |= 1ULL << OVS_KEY_ATTR_TCP;
265 key_expected |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
266 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
267 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP;
268 mask_allowed |= 1ULL << OVS_KEY_ATTR_TCP_FLAGS;
269 }
270 }
271
272 if (match->key->ip.proto == IPPROTO_ICMPV6) {
273 key_expected |= 1ULL << OVS_KEY_ATTR_ICMPV6;
274 if (match->mask && (match->mask->key.ip.proto == 0xff))
275 mask_allowed |= 1ULL << OVS_KEY_ATTR_ICMPV6;
276
277 if (match->key->tp.src ==
278 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
279 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
280 key_expected |= 1ULL << OVS_KEY_ATTR_ND;
281 /* Original direction conntrack tuple
282 * uses the same space as the ND fields
283 * in the key, so both are not allowed
284 * at the same time.
285 */
286 mask_allowed &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
287 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
288 mask_allowed |= 1ULL << OVS_KEY_ATTR_ND;
289 }
290 }
291 }
292 }
293
294 if (match->key->eth.type == htons(ETH_P_NSH)) {
295 key_expected |= 1 << OVS_KEY_ATTR_NSH;
296 if (match->mask &&
297 match->mask->key.eth.type == htons(0xffff)) {
298 mask_allowed |= 1 << OVS_KEY_ATTR_NSH;
299 }
300 }
301
302 if ((key_attrs & key_expected) != key_expected) {
303 /* Key attributes check failed. */
304 OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
305 (unsigned long long)key_attrs,
306 (unsigned long long)key_expected);
307 return false;
308 }
309
310 if ((mask_attrs & mask_allowed) != mask_attrs) {
311 /* Mask attributes check failed. */
312 OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
313 (unsigned long long)mask_attrs,
314 (unsigned long long)mask_allowed);
315 return false;
316 }
317
318 return true;
319 }
320
321 size_t ovs_tun_key_attr_size(void)
322 {
323 /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
324 * updating this function.
325 */
326 return nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */
327 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
328 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
329 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
330 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
331 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
332 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
333 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
334 + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
335 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS and
336 * OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS is mutually exclusive with
337 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
338 */
339 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
340 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
341 }
342
343 static size_t ovs_nsh_key_attr_size(void)
344 {
345 /* Whenever adding new OVS_NSH_KEY_ FIELDS, we should consider
346 * updating this function.
347 */
348 return nla_total_size(NSH_BASE_HDR_LEN) /* OVS_NSH_KEY_ATTR_BASE */
349 /* OVS_NSH_KEY_ATTR_MD1 and OVS_NSH_KEY_ATTR_MD2 are
350 * mutually exclusive, so the bigger one can cover
351 * the small one.
352 */
353 + nla_total_size(NSH_CTX_HDRS_MAX_LEN);
354 }
355
356 size_t ovs_key_attr_size(void)
357 {
358 /* Whenever adding new OVS_KEY_ FIELDS, we should consider
359 * updating this function.
360 */
361 BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 29);
362
363 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
364 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
365 + ovs_tun_key_attr_size()
366 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
367 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
368 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
369 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
370 + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */
371 + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */
372 + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */
373 + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */
374 + nla_total_size(40) /* OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6 */
375 + nla_total_size(0) /* OVS_KEY_ATTR_NSH */
376 + ovs_nsh_key_attr_size()
377 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
378 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
379 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
380 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
381 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
382 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
383 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
384 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
385 }
386
387 static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
388 [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) },
389 };
390
391 static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
392 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
393 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
394 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) },
395 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
396 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
397 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
398 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
399 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
400 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
401 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
402 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE },
403 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED,
404 .next = ovs_vxlan_ext_key_lens },
405 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
406 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) },
407 [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = OVS_ATTR_VARIABLE },
408 };
409
410 static const struct ovs_len_tbl
411 ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
412 [OVS_NSH_KEY_ATTR_BASE] = { .len = sizeof(struct ovs_nsh_key_base) },
413 [OVS_NSH_KEY_ATTR_MD1] = { .len = sizeof(struct ovs_nsh_key_md1) },
414 [OVS_NSH_KEY_ATTR_MD2] = { .len = OVS_ATTR_VARIABLE },
415 };
416
417 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
418 static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
419 [OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED },
420 [OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) },
421 [OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) },
422 [OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) },
423 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
424 [OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) },
425 [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
426 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
427 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
428 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
429 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
430 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
431 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
432 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
433 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
434 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
435 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
436 [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
437 [OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
438 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
439 .next = ovs_tunnel_key_lens, },
440 [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
441 [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
442 [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
443 [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
444 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
445 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = {
446 .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
447 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = {
448 .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
449 [OVS_KEY_ATTR_NSH] = { .len = OVS_ATTR_NESTED,
450 .next = ovs_nsh_key_attr_lens, },
451 };
452
453 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
454 {
455 return expected_len == attr_len ||
456 expected_len == OVS_ATTR_NESTED ||
457 expected_len == OVS_ATTR_VARIABLE;
458 }
459
460 static bool is_all_zero(const u8 *fp, size_t size)
461 {
462 int i;
463
464 if (!fp)
465 return false;
466
467 for (i = 0; i < size; i++)
468 if (fp[i])
469 return false;
470
471 return true;
472 }
473
474 static int __parse_flow_nlattrs(const struct nlattr *attr,
475 const struct nlattr *a[],
476 u64 *attrsp, bool log, bool nz)
477 {
478 const struct nlattr *nla;
479 u64 attrs;
480 int rem;
481
482 attrs = *attrsp;
483 nla_for_each_nested(nla, attr, rem) {
484 u16 type = nla_type(nla);
485 int expected_len;
486
487 if (type > OVS_KEY_ATTR_MAX) {
488 OVS_NLERR(log, "Key type %d is out of range max %d",
489 type, OVS_KEY_ATTR_MAX);
490 return -EINVAL;
491 }
492
493 if (attrs & (1ULL << type)) {
494 OVS_NLERR(log, "Duplicate key (type %d).", type);
495 return -EINVAL;
496 }
497
498 expected_len = ovs_key_lens[type].len;
499 if (!check_attr_len(nla_len(nla), expected_len)) {
500 OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
501 type, nla_len(nla), expected_len);
502 return -EINVAL;
503 }
504
505 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
506 attrs |= 1ULL << type;
507 a[type] = nla;
508 }
509 }
510 if (rem) {
511 OVS_NLERR(log, "Message has %d unknown bytes.", rem);
512 return -EINVAL;
513 }
514
515 *attrsp = attrs;
516 return 0;
517 }
518
519 static int parse_flow_mask_nlattrs(const struct nlattr *attr,
520 const struct nlattr *a[], u64 *attrsp,
521 bool log)
522 {
523 return __parse_flow_nlattrs(attr, a, attrsp, log, true);
524 }
525
526 int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[],
527 u64 *attrsp, bool log)
528 {
529 return __parse_flow_nlattrs(attr, a, attrsp, log, false);
530 }
531
532 static int genev_tun_opt_from_nlattr(const struct nlattr *a,
533 struct sw_flow_match *match, bool is_mask,
534 bool log)
535 {
536 unsigned long opt_key_offset;
537
538 if (nla_len(a) > sizeof(match->key->tun_opts)) {
539 OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
540 nla_len(a), sizeof(match->key->tun_opts));
541 return -EINVAL;
542 }
543
544 if (nla_len(a) % 4 != 0) {
545 OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
546 nla_len(a));
547 return -EINVAL;
548 }
549
550 /* We need to record the length of the options passed
551 * down, otherwise packets with the same format but
552 * additional options will be silently matched.
553 */
554 if (!is_mask) {
555 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
556 false);
557 } else {
558 /* This is somewhat unusual because it looks at
559 * both the key and mask while parsing the
560 * attributes (and by extension assumes the key
561 * is parsed first). Normally, we would verify
562 * that each is the correct length and that the
563 * attributes line up in the validate function.
564 * However, that is difficult because this is
565 * variable length and we won't have the
566 * information later.
567 */
568 if (match->key->tun_opts_len != nla_len(a)) {
569 OVS_NLERR(log, "Geneve option len %d != mask len %d",
570 match->key->tun_opts_len, nla_len(a));
571 return -EINVAL;
572 }
573
574 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
575 }
576
577 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
578 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
579 nla_len(a), is_mask);
580 return 0;
581 }
582
583 static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
584 struct sw_flow_match *match, bool is_mask,
585 bool log)
586 {
587 struct nlattr *a;
588 int rem;
589 unsigned long opt_key_offset;
590 struct vxlan_metadata opts;
591
592 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
593
594 memset(&opts, 0, sizeof(opts));
595 nla_for_each_nested(a, attr, rem) {
596 int type = nla_type(a);
597
598 if (type > OVS_VXLAN_EXT_MAX) {
599 OVS_NLERR(log, "VXLAN extension %d out of range max %d",
600 type, OVS_VXLAN_EXT_MAX);
601 return -EINVAL;
602 }
603
604 if (!check_attr_len(nla_len(a),
605 ovs_vxlan_ext_key_lens[type].len)) {
606 OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
607 type, nla_len(a),
608 ovs_vxlan_ext_key_lens[type].len);
609 return -EINVAL;
610 }
611
612 switch (type) {
613 case OVS_VXLAN_EXT_GBP:
614 opts.gbp = nla_get_u32(a);
615 break;
616 default:
617 OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
618 type);
619 return -EINVAL;
620 }
621 }
622 if (rem) {
623 OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
624 rem);
625 return -EINVAL;
626 }
627
628 if (!is_mask)
629 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
630 else
631 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
632
633 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
634 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
635 is_mask);
636 return 0;
637 }
638
639 static int erspan_tun_opt_from_nlattr(const struct nlattr *a,
640 struct sw_flow_match *match, bool is_mask,
641 bool log)
642 {
643 unsigned long opt_key_offset;
644
645 BUILD_BUG_ON(sizeof(struct erspan_metadata) >
646 sizeof(match->key->tun_opts));
647
648 if (nla_len(a) > sizeof(match->key->tun_opts)) {
649 OVS_NLERR(log, "ERSPAN option length err (len %d, max %zu).",
650 nla_len(a), sizeof(match->key->tun_opts));
651 return -EINVAL;
652 }
653
654 if (!is_mask)
655 SW_FLOW_KEY_PUT(match, tun_opts_len,
656 sizeof(struct erspan_metadata), false);
657 else
658 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
659
660 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
661 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
662 nla_len(a), is_mask);
663 return 0;
664 }
665
666 static int ip_tun_from_nlattr(const struct nlattr *attr,
667 struct sw_flow_match *match, bool is_mask,
668 bool log)
669 {
670 bool ttl = false, ipv4 = false, ipv6 = false;
671 __be16 tun_flags = 0;
672 int opts_type = 0;
673 struct nlattr *a;
674 int rem;
675
676 nla_for_each_nested(a, attr, rem) {
677 int type = nla_type(a);
678 int err;
679
680 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
681 OVS_NLERR(log, "Tunnel attr %d out of range max %d",
682 type, OVS_TUNNEL_KEY_ATTR_MAX);
683 return -EINVAL;
684 }
685
686 if (!check_attr_len(nla_len(a),
687 ovs_tunnel_key_lens[type].len)) {
688 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
689 type, nla_len(a), ovs_tunnel_key_lens[type].len);
690 return -EINVAL;
691 }
692
693 switch (type) {
694 case OVS_TUNNEL_KEY_ATTR_ID:
695 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
696 nla_get_be64(a), is_mask);
697 tun_flags |= TUNNEL_KEY;
698 break;
699 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
700 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
701 nla_get_in_addr(a), is_mask);
702 ipv4 = true;
703 break;
704 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
705 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
706 nla_get_in_addr(a), is_mask);
707 ipv4 = true;
708 break;
709 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
710 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
711 nla_get_in6_addr(a), is_mask);
712 ipv6 = true;
713 break;
714 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
715 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
716 nla_get_in6_addr(a), is_mask);
717 ipv6 = true;
718 break;
719 case OVS_TUNNEL_KEY_ATTR_TOS:
720 SW_FLOW_KEY_PUT(match, tun_key.tos,
721 nla_get_u8(a), is_mask);
722 break;
723 case OVS_TUNNEL_KEY_ATTR_TTL:
724 SW_FLOW_KEY_PUT(match, tun_key.ttl,
725 nla_get_u8(a), is_mask);
726 ttl = true;
727 break;
728 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
729 tun_flags |= TUNNEL_DONT_FRAGMENT;
730 break;
731 case OVS_TUNNEL_KEY_ATTR_CSUM:
732 tun_flags |= TUNNEL_CSUM;
733 break;
734 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
735 SW_FLOW_KEY_PUT(match, tun_key.tp_src,
736 nla_get_be16(a), is_mask);
737 break;
738 case OVS_TUNNEL_KEY_ATTR_TP_DST:
739 SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
740 nla_get_be16(a), is_mask);
741 break;
742 case OVS_TUNNEL_KEY_ATTR_OAM:
743 tun_flags |= TUNNEL_OAM;
744 break;
745 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
746 if (opts_type) {
747 OVS_NLERR(log, "Multiple metadata blocks provided");
748 return -EINVAL;
749 }
750
751 err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
752 if (err)
753 return err;
754
755 tun_flags |= TUNNEL_GENEVE_OPT;
756 opts_type = type;
757 break;
758 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
759 if (opts_type) {
760 OVS_NLERR(log, "Multiple metadata blocks provided");
761 return -EINVAL;
762 }
763
764 err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
765 if (err)
766 return err;
767
768 tun_flags |= TUNNEL_VXLAN_OPT;
769 opts_type = type;
770 break;
771 case OVS_TUNNEL_KEY_ATTR_PAD:
772 break;
773 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
774 if (opts_type) {
775 OVS_NLERR(log, "Multiple metadata blocks provided");
776 return -EINVAL;
777 }
778
779 err = erspan_tun_opt_from_nlattr(a, match, is_mask,
780 log);
781 if (err)
782 return err;
783
784 tun_flags |= TUNNEL_ERSPAN_OPT;
785 opts_type = type;
786 break;
787 default:
788 OVS_NLERR(log, "Unknown IP tunnel attribute %d",
789 type);
790 return -EINVAL;
791 }
792 }
793
794 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
795 if (is_mask)
796 SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
797 else
798 SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
799 false);
800
801 if (rem > 0) {
802 OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
803 rem);
804 return -EINVAL;
805 }
806
807 if (ipv4 && ipv6) {
808 OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
809 return -EINVAL;
810 }
811
812 if (!is_mask) {
813 if (!ipv4 && !ipv6) {
814 OVS_NLERR(log, "IP tunnel dst address not specified");
815 return -EINVAL;
816 }
817 if (ipv4 && !match->key->tun_key.u.ipv4.dst) {
818 OVS_NLERR(log, "IPv4 tunnel dst address is zero");
819 return -EINVAL;
820 }
821 if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
822 OVS_NLERR(log, "IPv6 tunnel dst address is zero");
823 return -EINVAL;
824 }
825
826 if (!ttl) {
827 OVS_NLERR(log, "IP tunnel TTL not specified.");
828 return -EINVAL;
829 }
830 }
831
832 return opts_type;
833 }
834
835 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
836 const void *tun_opts, int swkey_tun_opts_len)
837 {
838 const struct vxlan_metadata *opts = tun_opts;
839 struct nlattr *nla;
840
841 nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
842 if (!nla)
843 return -EMSGSIZE;
844
845 if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
846 return -EMSGSIZE;
847
848 nla_nest_end(skb, nla);
849 return 0;
850 }
851
852 static int __ip_tun_to_nlattr(struct sk_buff *skb,
853 const struct ip_tunnel_key *output,
854 const void *tun_opts, int swkey_tun_opts_len,
855 unsigned short tun_proto)
856 {
857 if (output->tun_flags & TUNNEL_KEY &&
858 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
859 OVS_TUNNEL_KEY_ATTR_PAD))
860 return -EMSGSIZE;
861 switch (tun_proto) {
862 case AF_INET:
863 if (output->u.ipv4.src &&
864 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
865 output->u.ipv4.src))
866 return -EMSGSIZE;
867 if (output->u.ipv4.dst &&
868 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
869 output->u.ipv4.dst))
870 return -EMSGSIZE;
871 break;
872 case AF_INET6:
873 if (!ipv6_addr_any(&output->u.ipv6.src) &&
874 nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
875 &output->u.ipv6.src))
876 return -EMSGSIZE;
877 if (!ipv6_addr_any(&output->u.ipv6.dst) &&
878 nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
879 &output->u.ipv6.dst))
880 return -EMSGSIZE;
881 break;
882 }
883 if (output->tos &&
884 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
885 return -EMSGSIZE;
886 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
887 return -EMSGSIZE;
888 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
889 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
890 return -EMSGSIZE;
891 if ((output->tun_flags & TUNNEL_CSUM) &&
892 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
893 return -EMSGSIZE;
894 if (output->tp_src &&
895 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
896 return -EMSGSIZE;
897 if (output->tp_dst &&
898 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
899 return -EMSGSIZE;
900 if ((output->tun_flags & TUNNEL_OAM) &&
901 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
902 return -EMSGSIZE;
903 if (swkey_tun_opts_len) {
904 if (output->tun_flags & TUNNEL_GENEVE_OPT &&
905 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
906 swkey_tun_opts_len, tun_opts))
907 return -EMSGSIZE;
908 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
909 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
910 return -EMSGSIZE;
911 else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
912 nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
913 swkey_tun_opts_len, tun_opts))
914 return -EMSGSIZE;
915 }
916
917 return 0;
918 }
919
920 static int ip_tun_to_nlattr(struct sk_buff *skb,
921 const struct ip_tunnel_key *output,
922 const void *tun_opts, int swkey_tun_opts_len,
923 unsigned short tun_proto)
924 {
925 struct nlattr *nla;
926 int err;
927
928 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
929 if (!nla)
930 return -EMSGSIZE;
931
932 err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
933 tun_proto);
934 if (err)
935 return err;
936
937 nla_nest_end(skb, nla);
938 return 0;
939 }
940
941 int ovs_nla_put_tunnel_info(struct sk_buff *skb,
942 struct ip_tunnel_info *tun_info)
943 {
944 return __ip_tun_to_nlattr(skb, &tun_info->key,
945 ip_tunnel_info_opts(tun_info),
946 tun_info->options_len,
947 ip_tunnel_info_af(tun_info));
948 }
949
950 static int encode_vlan_from_nlattrs(struct sw_flow_match *match,
951 const struct nlattr *a[],
952 bool is_mask, bool inner)
953 {
954 __be16 tci = 0;
955 __be16 tpid = 0;
956
957 if (a[OVS_KEY_ATTR_VLAN])
958 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
959
960 if (a[OVS_KEY_ATTR_ETHERTYPE])
961 tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
962
963 if (likely(!inner)) {
964 SW_FLOW_KEY_PUT(match, eth.vlan.tpid, tpid, is_mask);
965 SW_FLOW_KEY_PUT(match, eth.vlan.tci, tci, is_mask);
966 } else {
967 SW_FLOW_KEY_PUT(match, eth.cvlan.tpid, tpid, is_mask);
968 SW_FLOW_KEY_PUT(match, eth.cvlan.tci, tci, is_mask);
969 }
970 return 0;
971 }
972
973 static int validate_vlan_from_nlattrs(const struct sw_flow_match *match,
974 u64 key_attrs, bool inner,
975 const struct nlattr **a, bool log)
976 {
977 __be16 tci = 0;
978
979 if (!((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
980 (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
981 eth_type_vlan(nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE])))) {
982 /* Not a VLAN. */
983 return 0;
984 }
985
986 if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
987 (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
988 OVS_NLERR(log, "Invalid %s frame", (inner) ? "C-VLAN" : "VLAN");
989 return -EINVAL;
990 }
991
992 if (a[OVS_KEY_ATTR_VLAN])
993 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
994
995 if (!(tci & htons(VLAN_TAG_PRESENT))) {
996 if (tci) {
997 OVS_NLERR(log, "%s TCI does not have VLAN_TAG_PRESENT bit set.",
998 (inner) ? "C-VLAN" : "VLAN");
999 return -EINVAL;
1000 } else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) {
1001 /* Corner case for truncated VLAN header. */
1002 OVS_NLERR(log, "Truncated %s header has non-zero encap attribute.",
1003 (inner) ? "C-VLAN" : "VLAN");
1004 return -EINVAL;
1005 }
1006 }
1007
1008 return 1;
1009 }
1010
1011 static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
1012 u64 key_attrs, bool inner,
1013 const struct nlattr **a, bool log)
1014 {
1015 __be16 tci = 0;
1016 __be16 tpid = 0;
1017 bool encap_valid = !!(match->key->eth.vlan.tci &
1018 htons(VLAN_TAG_PRESENT));
1019 bool i_encap_valid = !!(match->key->eth.cvlan.tci &
1020 htons(VLAN_TAG_PRESENT));
1021
1022 if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) {
1023 /* Not a VLAN. */
1024 return 0;
1025 }
1026
1027 if ((!inner && !encap_valid) || (inner && !i_encap_valid)) {
1028 OVS_NLERR(log, "Encap mask attribute is set for non-%s frame.",
1029 (inner) ? "C-VLAN" : "VLAN");
1030 return -EINVAL;
1031 }
1032
1033 if (a[OVS_KEY_ATTR_VLAN])
1034 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1035
1036 if (a[OVS_KEY_ATTR_ETHERTYPE])
1037 tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1038
1039 if (tpid != htons(0xffff)) {
1040 OVS_NLERR(log, "Must have an exact match on %s TPID (mask=%x).",
1041 (inner) ? "C-VLAN" : "VLAN", ntohs(tpid));
1042 return -EINVAL;
1043 }
1044 if (!(tci & htons(VLAN_TAG_PRESENT))) {
1045 OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_TAG_PRESENT bit.",
1046 (inner) ? "C-VLAN" : "VLAN");
1047 return -EINVAL;
1048 }
1049
1050 return 1;
1051 }
1052
1053 static int __parse_vlan_from_nlattrs(struct sw_flow_match *match,
1054 u64 *key_attrs, bool inner,
1055 const struct nlattr **a, bool is_mask,
1056 bool log)
1057 {
1058 int err;
1059 const struct nlattr *encap;
1060
1061 if (!is_mask)
1062 err = validate_vlan_from_nlattrs(match, *key_attrs, inner,
1063 a, log);
1064 else
1065 err = validate_vlan_mask_from_nlattrs(match, *key_attrs, inner,
1066 a, log);
1067 if (err <= 0)
1068 return err;
1069
1070 err = encode_vlan_from_nlattrs(match, a, is_mask, inner);
1071 if (err)
1072 return err;
1073
1074 *key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
1075 *key_attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
1076 *key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1077
1078 encap = a[OVS_KEY_ATTR_ENCAP];
1079
1080 if (!is_mask)
1081 err = parse_flow_nlattrs(encap, a, key_attrs, log);
1082 else
1083 err = parse_flow_mask_nlattrs(encap, a, key_attrs, log);
1084
1085 return err;
1086 }
1087
1088 static int parse_vlan_from_nlattrs(struct sw_flow_match *match,
1089 u64 *key_attrs, const struct nlattr **a,
1090 bool is_mask, bool log)
1091 {
1092 int err;
1093 bool encap_valid = false;
1094
1095 err = __parse_vlan_from_nlattrs(match, key_attrs, false, a,
1096 is_mask, log);
1097 if (err)
1098 return err;
1099
1100 encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_TAG_PRESENT));
1101 if (encap_valid) {
1102 err = __parse_vlan_from_nlattrs(match, key_attrs, true, a,
1103 is_mask, log);
1104 if (err)
1105 return err;
1106 }
1107
1108 return 0;
1109 }
1110
1111 static int parse_eth_type_from_nlattrs(struct sw_flow_match *match,
1112 u64 *attrs, const struct nlattr **a,
1113 bool is_mask, bool log)
1114 {
1115 __be16 eth_type;
1116
1117 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1118 if (is_mask) {
1119 /* Always exact match EtherType. */
1120 eth_type = htons(0xffff);
1121 } else if (!eth_proto_is_802_3(eth_type)) {
1122 OVS_NLERR(log, "EtherType %x is less than min %x",
1123 ntohs(eth_type), ETH_P_802_3_MIN);
1124 return -EINVAL;
1125 }
1126
1127 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
1128 *attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1129 return 0;
1130 }
1131
1132 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
1133 u64 *attrs, const struct nlattr **a,
1134 bool is_mask, bool log)
1135 {
1136 u8 mac_proto = MAC_PROTO_ETHERNET;
1137
1138 if (*attrs & (1ULL << OVS_KEY_ATTR_DP_HASH)) {
1139 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
1140
1141 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
1142 *attrs &= ~(1ULL << OVS_KEY_ATTR_DP_HASH);
1143 }
1144
1145 if (*attrs & (1ULL << OVS_KEY_ATTR_RECIRC_ID)) {
1146 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
1147
1148 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
1149 *attrs &= ~(1ULL << OVS_KEY_ATTR_RECIRC_ID);
1150 }
1151
1152 if (*attrs & (1ULL << OVS_KEY_ATTR_PRIORITY)) {
1153 SW_FLOW_KEY_PUT(match, phy.priority,
1154 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
1155 *attrs &= ~(1ULL << OVS_KEY_ATTR_PRIORITY);
1156 }
1157
1158 if (*attrs & (1ULL << OVS_KEY_ATTR_IN_PORT)) {
1159 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
1160
1161 if (is_mask) {
1162 in_port = 0xffffffff; /* Always exact match in_port. */
1163 } else if (in_port >= DP_MAX_PORTS) {
1164 OVS_NLERR(log, "Port %d exceeds max allowable %d",
1165 in_port, DP_MAX_PORTS);
1166 return -EINVAL;
1167 }
1168
1169 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
1170 *attrs &= ~(1ULL << OVS_KEY_ATTR_IN_PORT);
1171 } else if (!is_mask) {
1172 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
1173 }
1174
1175 if (*attrs & (1ULL << OVS_KEY_ATTR_SKB_MARK)) {
1176 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
1177
1178 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
1179 *attrs &= ~(1ULL << OVS_KEY_ATTR_SKB_MARK);
1180 }
1181 if (*attrs & (1ULL << OVS_KEY_ATTR_TUNNEL)) {
1182 if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
1183 is_mask, log) < 0)
1184 return -EINVAL;
1185 *attrs &= ~(1ULL << OVS_KEY_ATTR_TUNNEL);
1186 }
1187
1188 if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
1189 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
1190 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
1191
1192 if (ct_state & ~CT_SUPPORTED_MASK) {
1193 OVS_NLERR(log, "ct_state flags %08x unsupported",
1194 ct_state);
1195 return -EINVAL;
1196 }
1197
1198 SW_FLOW_KEY_PUT(match, ct_state, ct_state, is_mask);
1199 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
1200 }
1201 if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
1202 ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
1203 u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
1204
1205 SW_FLOW_KEY_PUT(match, ct_zone, ct_zone, is_mask);
1206 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
1207 }
1208 if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
1209 ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
1210 u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
1211
1212 SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
1213 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
1214 }
1215 if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
1216 ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
1217 const struct ovs_key_ct_labels *cl;
1218
1219 cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
1220 SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
1221 sizeof(*cl), is_mask);
1222 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
1223 }
1224 if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
1225 const struct ovs_key_ct_tuple_ipv4 *ct;
1226
1227 ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
1228
1229 SW_FLOW_KEY_PUT(match, ipv4.ct_orig.src, ct->ipv4_src, is_mask);
1230 SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask);
1231 SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
1232 SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
1233 SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv4_proto, is_mask);
1234 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
1235 }
1236 if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
1237 const struct ovs_key_ct_tuple_ipv6 *ct;
1238
1239 ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
1240
1241 SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.src, &ct->ipv6_src,
1242 sizeof(match->key->ipv6.ct_orig.src),
1243 is_mask);
1244 SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.dst, &ct->ipv6_dst,
1245 sizeof(match->key->ipv6.ct_orig.dst),
1246 is_mask);
1247 SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
1248 SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
1249 SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv6_proto, is_mask);
1250 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
1251 }
1252
1253 /* For layer 3 packets the Ethernet type is provided
1254 * and treated as metadata but no MAC addresses are provided.
1255 */
1256 if (!(*attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) &&
1257 (*attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)))
1258 mac_proto = MAC_PROTO_NONE;
1259
1260 /* Always exact match mac_proto */
1261 SW_FLOW_KEY_PUT(match, mac_proto, is_mask ? 0xff : mac_proto, is_mask);
1262
1263 if (mac_proto == MAC_PROTO_NONE)
1264 return parse_eth_type_from_nlattrs(match, attrs, a, is_mask,
1265 log);
1266
1267 return 0;
1268 }
1269
1270 int nsh_hdr_from_nlattr(const struct nlattr *attr,
1271 struct nshhdr *nh, size_t size)
1272 {
1273 struct nlattr *a;
1274 int rem;
1275 u8 flags = 0;
1276 u8 ttl = 0;
1277 int mdlen = 0;
1278
1279 /* validate_nsh has check this, so we needn't do duplicate check here
1280 */
1281 if (size < NSH_BASE_HDR_LEN)
1282 return -ENOBUFS;
1283
1284 nla_for_each_nested(a, attr, rem) {
1285 int type = nla_type(a);
1286
1287 switch (type) {
1288 case OVS_NSH_KEY_ATTR_BASE: {
1289 const struct ovs_nsh_key_base *base = nla_data(a);
1290
1291 flags = base->flags;
1292 ttl = base->ttl;
1293 nh->np = base->np;
1294 nh->mdtype = base->mdtype;
1295 nh->path_hdr = base->path_hdr;
1296 break;
1297 }
1298 case OVS_NSH_KEY_ATTR_MD1:
1299 mdlen = nla_len(a);
1300 if (mdlen > size - NSH_BASE_HDR_LEN)
1301 return -ENOBUFS;
1302 memcpy(&nh->md1, nla_data(a), mdlen);
1303 break;
1304
1305 case OVS_NSH_KEY_ATTR_MD2:
1306 mdlen = nla_len(a);
1307 if (mdlen > size - NSH_BASE_HDR_LEN)
1308 return -ENOBUFS;
1309 memcpy(&nh->md2, nla_data(a), mdlen);
1310 break;
1311
1312 default:
1313 return -EINVAL;
1314 }
1315 }
1316
1317 /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
1318 nh->ver_flags_ttl_len = 0;
1319 nsh_set_flags_ttl_len(nh, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
1320
1321 return 0;
1322 }
1323
1324 int nsh_key_from_nlattr(const struct nlattr *attr,
1325 struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask)
1326 {
1327 struct nlattr *a;
1328 int rem;
1329
1330 /* validate_nsh has check this, so we needn't do duplicate check here
1331 */
1332 nla_for_each_nested(a, attr, rem) {
1333 int type = nla_type(a);
1334
1335 switch (type) {
1336 case OVS_NSH_KEY_ATTR_BASE: {
1337 const struct ovs_nsh_key_base *base = nla_data(a);
1338 const struct ovs_nsh_key_base *base_mask = base + 1;
1339
1340 nsh->base = *base;
1341 nsh_mask->base = *base_mask;
1342 break;
1343 }
1344 case OVS_NSH_KEY_ATTR_MD1: {
1345 const struct ovs_nsh_key_md1 *md1 = nla_data(a);
1346 const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
1347
1348 memcpy(nsh->context, md1->context, sizeof(*md1));
1349 memcpy(nsh_mask->context, md1_mask->context,
1350 sizeof(*md1_mask));
1351 break;
1352 }
1353 case OVS_NSH_KEY_ATTR_MD2:
1354 /* Not supported yet */
1355 return -ENOTSUPP;
1356 default:
1357 return -EINVAL;
1358 }
1359 }
1360
1361 return 0;
1362 }
1363
1364 static int nsh_key_put_from_nlattr(const struct nlattr *attr,
1365 struct sw_flow_match *match, bool is_mask,
1366 bool is_push_nsh, bool log)
1367 {
1368 struct nlattr *a;
1369 int rem;
1370 bool has_base = false;
1371 bool has_md1 = false;
1372 bool has_md2 = false;
1373 u8 mdtype = 0;
1374 int mdlen = 0;
1375
1376 if (WARN_ON(is_push_nsh && is_mask))
1377 return -EINVAL;
1378
1379 nla_for_each_nested(a, attr, rem) {
1380 int type = nla_type(a);
1381 int i;
1382
1383 if (type > OVS_NSH_KEY_ATTR_MAX) {
1384 OVS_NLERR(log, "nsh attr %d is out of range max %d",
1385 type, OVS_NSH_KEY_ATTR_MAX);
1386 return -EINVAL;
1387 }
1388
1389 if (!check_attr_len(nla_len(a),
1390 ovs_nsh_key_attr_lens[type].len)) {
1391 OVS_NLERR(
1392 log,
1393 "nsh attr %d has unexpected len %d expected %d",
1394 type,
1395 nla_len(a),
1396 ovs_nsh_key_attr_lens[type].len
1397 );
1398 return -EINVAL;
1399 }
1400
1401 switch (type) {
1402 case OVS_NSH_KEY_ATTR_BASE: {
1403 const struct ovs_nsh_key_base *base = nla_data(a);
1404
1405 has_base = true;
1406 mdtype = base->mdtype;
1407 SW_FLOW_KEY_PUT(match, nsh.base.flags,
1408 base->flags, is_mask);
1409 SW_FLOW_KEY_PUT(match, nsh.base.ttl,
1410 base->ttl, is_mask);
1411 SW_FLOW_KEY_PUT(match, nsh.base.mdtype,
1412 base->mdtype, is_mask);
1413 SW_FLOW_KEY_PUT(match, nsh.base.np,
1414 base->np, is_mask);
1415 SW_FLOW_KEY_PUT(match, nsh.base.path_hdr,
1416 base->path_hdr, is_mask);
1417 break;
1418 }
1419 case OVS_NSH_KEY_ATTR_MD1: {
1420 const struct ovs_nsh_key_md1 *md1 = nla_data(a);
1421
1422 has_md1 = true;
1423 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++)
1424 SW_FLOW_KEY_PUT(match, nsh.context[i],
1425 md1->context[i], is_mask);
1426 break;
1427 }
1428 case OVS_NSH_KEY_ATTR_MD2:
1429 if (!is_push_nsh) /* Not supported MD type 2 yet */
1430 return -ENOTSUPP;
1431
1432 has_md2 = true;
1433 mdlen = nla_len(a);
1434 if (mdlen > NSH_CTX_HDRS_MAX_LEN || mdlen <= 0) {
1435 OVS_NLERR(
1436 log,
1437 "Invalid MD length %d for MD type %d",
1438 mdlen,
1439 mdtype
1440 );
1441 return -EINVAL;
1442 }
1443 break;
1444 default:
1445 OVS_NLERR(log, "Unknown nsh attribute %d",
1446 type);
1447 return -EINVAL;
1448 }
1449 }
1450
1451 if (rem > 0) {
1452 OVS_NLERR(log, "nsh attribute has %d unknown bytes.", rem);
1453 return -EINVAL;
1454 }
1455
1456 if (has_md1 && has_md2) {
1457 OVS_NLERR(
1458 1,
1459 "invalid nsh attribute: md1 and md2 are exclusive."
1460 );
1461 return -EINVAL;
1462 }
1463
1464 if (!is_mask) {
1465 if ((has_md1 && mdtype != NSH_M_TYPE1) ||
1466 (has_md2 && mdtype != NSH_M_TYPE2)) {
1467 OVS_NLERR(1, "nsh attribute has unmatched MD type %d.",
1468 mdtype);
1469 return -EINVAL;
1470 }
1471
1472 if (is_push_nsh &&
1473 (!has_base || (!has_md1 && !has_md2))) {
1474 OVS_NLERR(
1475 1,
1476 "push_nsh: missing base or metadata attributes"
1477 );
1478 return -EINVAL;
1479 }
1480 }
1481
1482 return 0;
1483 }
1484
1485 static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
1486 u64 attrs, const struct nlattr **a,
1487 bool is_mask, bool log)
1488 {
1489 int err;
1490
1491 err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
1492 if (err)
1493 return err;
1494
1495 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERNET)) {
1496 const struct ovs_key_ethernet *eth_key;
1497
1498 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
1499 SW_FLOW_KEY_MEMCPY(match, eth.src,
1500 eth_key->eth_src, ETH_ALEN, is_mask);
1501 SW_FLOW_KEY_MEMCPY(match, eth.dst,
1502 eth_key->eth_dst, ETH_ALEN, is_mask);
1503 attrs &= ~(1ULL << OVS_KEY_ATTR_ETHERNET);
1504
1505 if (attrs & (1ULL << OVS_KEY_ATTR_VLAN)) {
1506 /* VLAN attribute is always parsed before getting here since it
1507 * may occur multiple times.
1508 */
1509 OVS_NLERR(log, "VLAN attribute unexpected.");
1510 return -EINVAL;
1511 }
1512
1513 if (attrs & (1ULL << OVS_KEY_ATTR_ETHERTYPE)) {
1514 err = parse_eth_type_from_nlattrs(match, &attrs, a, is_mask,
1515 log);
1516 if (err)
1517 return err;
1518 } else if (!is_mask) {
1519 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
1520 }
1521 } else if (!match->key->eth.type) {
1522 OVS_NLERR(log, "Either Ethernet header or EtherType is required.");
1523 return -EINVAL;
1524 }
1525
1526 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
1527 const struct ovs_key_ipv4 *ipv4_key;
1528
1529 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
1530 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
1531 OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
1532 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
1533 return -EINVAL;
1534 }
1535 SW_FLOW_KEY_PUT(match, ip.proto,
1536 ipv4_key->ipv4_proto, is_mask);
1537 SW_FLOW_KEY_PUT(match, ip.tos,
1538 ipv4_key->ipv4_tos, is_mask);
1539 SW_FLOW_KEY_PUT(match, ip.ttl,
1540 ipv4_key->ipv4_ttl, is_mask);
1541 SW_FLOW_KEY_PUT(match, ip.frag,
1542 ipv4_key->ipv4_frag, is_mask);
1543 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1544 ipv4_key->ipv4_src, is_mask);
1545 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1546 ipv4_key->ipv4_dst, is_mask);
1547 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
1548 }
1549
1550 if (attrs & (1ULL << OVS_KEY_ATTR_IPV6)) {
1551 const struct ovs_key_ipv6 *ipv6_key;
1552
1553 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
1554 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
1555 OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
1556 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
1557 return -EINVAL;
1558 }
1559
1560 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
1561 OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x)",
1562 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
1563 return -EINVAL;
1564 }
1565
1566 SW_FLOW_KEY_PUT(match, ipv6.label,
1567 ipv6_key->ipv6_label, is_mask);
1568 SW_FLOW_KEY_PUT(match, ip.proto,
1569 ipv6_key->ipv6_proto, is_mask);
1570 SW_FLOW_KEY_PUT(match, ip.tos,
1571 ipv6_key->ipv6_tclass, is_mask);
1572 SW_FLOW_KEY_PUT(match, ip.ttl,
1573 ipv6_key->ipv6_hlimit, is_mask);
1574 SW_FLOW_KEY_PUT(match, ip.frag,
1575 ipv6_key->ipv6_frag, is_mask);
1576 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
1577 ipv6_key->ipv6_src,
1578 sizeof(match->key->ipv6.addr.src),
1579 is_mask);
1580 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
1581 ipv6_key->ipv6_dst,
1582 sizeof(match->key->ipv6.addr.dst),
1583 is_mask);
1584
1585 attrs &= ~(1ULL << OVS_KEY_ATTR_IPV6);
1586 }
1587
1588 if (attrs & (1ULL << OVS_KEY_ATTR_ARP)) {
1589 const struct ovs_key_arp *arp_key;
1590
1591 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1592 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
1593 OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
1594 arp_key->arp_op);
1595 return -EINVAL;
1596 }
1597
1598 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1599 arp_key->arp_sip, is_mask);
1600 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1601 arp_key->arp_tip, is_mask);
1602 SW_FLOW_KEY_PUT(match, ip.proto,
1603 ntohs(arp_key->arp_op), is_mask);
1604 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
1605 arp_key->arp_sha, ETH_ALEN, is_mask);
1606 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
1607 arp_key->arp_tha, ETH_ALEN, is_mask);
1608
1609 attrs &= ~(1ULL << OVS_KEY_ATTR_ARP);
1610 }
1611
1612 if (attrs & (1 << OVS_KEY_ATTR_NSH)) {
1613 if (nsh_key_put_from_nlattr(a[OVS_KEY_ATTR_NSH], match,
1614 is_mask, false, log) < 0)
1615 return -EINVAL;
1616 attrs &= ~(1 << OVS_KEY_ATTR_NSH);
1617 }
1618
1619 if (attrs & (1ULL << OVS_KEY_ATTR_MPLS)) {
1620 const struct ovs_key_mpls *mpls_key;
1621
1622 mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
1623 SW_FLOW_KEY_PUT(match, mpls.top_lse,
1624 mpls_key->mpls_lse, is_mask);
1625
1626 attrs &= ~(1ULL << OVS_KEY_ATTR_MPLS);
1627 }
1628
1629 if (attrs & (1ULL << OVS_KEY_ATTR_TCP)) {
1630 const struct ovs_key_tcp *tcp_key;
1631
1632 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1633 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
1634 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
1635 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP);
1636 }
1637
1638 if (attrs & (1ULL << OVS_KEY_ATTR_TCP_FLAGS)) {
1639 SW_FLOW_KEY_PUT(match, tp.flags,
1640 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
1641 is_mask);
1642 attrs &= ~(1ULL << OVS_KEY_ATTR_TCP_FLAGS);
1643 }
1644
1645 if (attrs & (1ULL << OVS_KEY_ATTR_UDP)) {
1646 const struct ovs_key_udp *udp_key;
1647
1648 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1649 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
1650 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
1651 attrs &= ~(1ULL << OVS_KEY_ATTR_UDP);
1652 }
1653
1654 if (attrs & (1ULL << OVS_KEY_ATTR_SCTP)) {
1655 const struct ovs_key_sctp *sctp_key;
1656
1657 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
1658 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
1659 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
1660 attrs &= ~(1ULL << OVS_KEY_ATTR_SCTP);
1661 }
1662
1663 if (attrs & (1ULL << OVS_KEY_ATTR_ICMP)) {
1664 const struct ovs_key_icmp *icmp_key;
1665
1666 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1667 SW_FLOW_KEY_PUT(match, tp.src,
1668 htons(icmp_key->icmp_type), is_mask);
1669 SW_FLOW_KEY_PUT(match, tp.dst,
1670 htons(icmp_key->icmp_code), is_mask);
1671 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMP);
1672 }
1673
1674 if (attrs & (1ULL << OVS_KEY_ATTR_ICMPV6)) {
1675 const struct ovs_key_icmpv6 *icmpv6_key;
1676
1677 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1678 SW_FLOW_KEY_PUT(match, tp.src,
1679 htons(icmpv6_key->icmpv6_type), is_mask);
1680 SW_FLOW_KEY_PUT(match, tp.dst,
1681 htons(icmpv6_key->icmpv6_code), is_mask);
1682 attrs &= ~(1ULL << OVS_KEY_ATTR_ICMPV6);
1683 }
1684
1685 if (attrs & (1ULL << OVS_KEY_ATTR_ND)) {
1686 const struct ovs_key_nd *nd_key;
1687
1688 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
1689 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
1690 nd_key->nd_target,
1691 sizeof(match->key->ipv6.nd.target),
1692 is_mask);
1693 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
1694 nd_key->nd_sll, ETH_ALEN, is_mask);
1695 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
1696 nd_key->nd_tll, ETH_ALEN, is_mask);
1697 attrs &= ~(1ULL << OVS_KEY_ATTR_ND);
1698 }
1699
1700 if (attrs != 0) {
1701 OVS_NLERR(log, "Unknown key attributes %llx",
1702 (unsigned long long)attrs);
1703 return -EINVAL;
1704 }
1705
1706 return 0;
1707 }
1708
1709 static void nlattr_set(struct nlattr *attr, u8 val,
1710 const struct ovs_len_tbl *tbl)
1711 {
1712 struct nlattr *nla;
1713 int rem;
1714
1715 /* The nlattr stream should already have been validated */
1716 nla_for_each_nested(nla, attr, rem) {
1717 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
1718 nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
1719 else
1720 memset(nla_data(nla), val, nla_len(nla));
1721
1722 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1723 *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
1724 }
1725 }
1726
1727 static void mask_set_nlattr(struct nlattr *attr, u8 val)
1728 {
1729 nlattr_set(attr, val, ovs_key_lens);
1730 }
1731
1732 /**
1733 * ovs_nla_get_match - parses Netlink attributes into a flow key and
1734 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1735 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1736 * does not include any don't care bit.
1737 * @net: Used to determine per-namespace field support.
1738 * @match: receives the extracted flow match information.
1739 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1740 * sequence. The fields should of the packet that triggered the creation
1741 * of this flow.
1742 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1743 * attribute specifies the mask field of the wildcarded flow.
1744 * @log: Boolean to allow kernel error logging. Normally true, but when
1745 * probing for feature compatibility this should be passed in as false to
1746 * suppress unnecessary error logging.
1747 */
1748 int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
1749 const struct nlattr *nla_key,
1750 const struct nlattr *nla_mask,
1751 bool log)
1752 {
1753 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1754 struct nlattr *newmask = NULL;
1755 u64 key_attrs = 0;
1756 u64 mask_attrs = 0;
1757 int err;
1758
1759 err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
1760 if (err)
1761 return err;
1762
1763 err = parse_vlan_from_nlattrs(match, &key_attrs, a, false, log);
1764 if (err)
1765 return err;
1766
1767 err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
1768 if (err)
1769 return err;
1770
1771 if (match->mask) {
1772 if (!nla_mask) {
1773 /* Create an exact match mask. We need to set to 0xff
1774 * all the 'match->mask' fields that have been touched
1775 * in 'match->key'. We cannot simply memset
1776 * 'match->mask', because padding bytes and fields not
1777 * specified in 'match->key' should be left to 0.
1778 * Instead, we use a stream of netlink attributes,
1779 * copied from 'key' and set to 0xff.
1780 * ovs_key_from_nlattrs() will take care of filling
1781 * 'match->mask' appropriately.
1782 */
1783 newmask = kmemdup(nla_key,
1784 nla_total_size(nla_len(nla_key)),
1785 GFP_KERNEL);
1786 if (!newmask)
1787 return -ENOMEM;
1788
1789 mask_set_nlattr(newmask, 0xff);
1790
1791 /* The userspace does not send tunnel attributes that
1792 * are 0, but we should not wildcard them nonetheless.
1793 */
1794 if (match->key->tun_proto)
1795 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
1796 0xff, true);
1797
1798 nla_mask = newmask;
1799 }
1800
1801 err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
1802 if (err)
1803 goto free_newmask;
1804
1805 SW_FLOW_KEY_PUT(match, eth.vlan.tci, htons(0xffff), true);
1806 SW_FLOW_KEY_PUT(match, eth.cvlan.tci, htons(0xffff), true);
1807
1808 err = parse_vlan_from_nlattrs(match, &mask_attrs, a, true, log);
1809 if (err)
1810 goto free_newmask;
1811
1812 err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
1813 log);
1814 if (err)
1815 goto free_newmask;
1816 }
1817
1818 if (!match_validate(match, key_attrs, mask_attrs, log))
1819 err = -EINVAL;
1820
1821 free_newmask:
1822 kfree(newmask);
1823 return err;
1824 }
1825
1826 static size_t get_ufid_len(const struct nlattr *attr, bool log)
1827 {
1828 size_t len;
1829
1830 if (!attr)
1831 return 0;
1832
1833 len = nla_len(attr);
1834 if (len < 1 || len > MAX_UFID_LENGTH) {
1835 OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
1836 nla_len(attr), MAX_UFID_LENGTH);
1837 return 0;
1838 }
1839
1840 return len;
1841 }
1842
1843 /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
1844 * or false otherwise.
1845 */
1846 bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
1847 bool log)
1848 {
1849 sfid->ufid_len = get_ufid_len(attr, log);
1850 if (sfid->ufid_len)
1851 memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
1852
1853 return sfid->ufid_len;
1854 }
1855
1856 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
1857 const struct sw_flow_key *key, bool log)
1858 {
1859 struct sw_flow_key *new_key;
1860
1861 if (ovs_nla_get_ufid(sfid, ufid, log))
1862 return 0;
1863
1864 /* If UFID was not provided, use unmasked key. */
1865 new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
1866 if (!new_key)
1867 return -ENOMEM;
1868 memcpy(new_key, key, sizeof(*key));
1869 sfid->unmasked_key = new_key;
1870
1871 return 0;
1872 }
1873
1874 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
1875 {
1876 return attr ? nla_get_u32(attr) : 0;
1877 }
1878
1879 /**
1880 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
1881 * @net: Network namespace.
1882 * @key: Receives extracted in_port, priority, tun_key, skb_mark and conntrack
1883 * metadata.
1884 * @a: Array of netlink attributes holding parsed %OVS_KEY_ATTR_* Netlink
1885 * attributes.
1886 * @attrs: Bit mask for the netlink attributes included in @a.
1887 * @log: Boolean to allow kernel error logging. Normally true, but when
1888 * probing for feature compatibility this should be passed in as false to
1889 * suppress unnecessary error logging.
1890 *
1891 * This parses a series of Netlink attributes that form a flow key, which must
1892 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1893 * get the metadata, that is, the parts of the flow key that cannot be
1894 * extracted from the packet itself.
1895 *
1896 * This must be called before the packet key fields are filled in 'key'.
1897 */
1898
1899 int ovs_nla_get_flow_metadata(struct net *net,
1900 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1],
1901 u64 attrs, struct sw_flow_key *key, bool log)
1902 {
1903 struct sw_flow_match match;
1904
1905 memset(&match, 0, sizeof(match));
1906 match.key = key;
1907
1908 key->ct_state = 0;
1909 key->ct_zone = 0;
1910 key->ct_orig_proto = 0;
1911 memset(&key->ct, 0, sizeof(key->ct));
1912 memset(&key->ipv4.ct_orig, 0, sizeof(key->ipv4.ct_orig));
1913 memset(&key->ipv6.ct_orig, 0, sizeof(key->ipv6.ct_orig));
1914
1915 key->phy.in_port = DP_MAX_PORTS;
1916
1917 return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
1918 }
1919
1920 static int ovs_nla_put_vlan(struct sk_buff *skb, const struct vlan_head *vh,
1921 bool is_mask)
1922 {
1923 __be16 eth_type = !is_mask ? vh->tpid : htons(0xffff);
1924
1925 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1926 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, vh->tci))
1927 return -EMSGSIZE;
1928 return 0;
1929 }
1930
1931 static int nsh_key_to_nlattr(const struct ovs_key_nsh *nsh, bool is_mask,
1932 struct sk_buff *skb)
1933 {
1934 struct nlattr *start;
1935
1936 start = nla_nest_start(skb, OVS_KEY_ATTR_NSH);
1937 if (!start)
1938 return -EMSGSIZE;
1939
1940 if (nla_put(skb, OVS_NSH_KEY_ATTR_BASE, sizeof(nsh->base), &nsh->base))
1941 goto nla_put_failure;
1942
1943 if (is_mask || nsh->base.mdtype == NSH_M_TYPE1) {
1944 if (nla_put(skb, OVS_NSH_KEY_ATTR_MD1,
1945 sizeof(nsh->context), nsh->context))
1946 goto nla_put_failure;
1947 }
1948
1949 /* Don't support MD type 2 yet */
1950
1951 nla_nest_end(skb, start);
1952
1953 return 0;
1954
1955 nla_put_failure:
1956 return -EMSGSIZE;
1957 }
1958
1959 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
1960 const struct sw_flow_key *output, bool is_mask,
1961 struct sk_buff *skb)
1962 {
1963 struct ovs_key_ethernet *eth_key;
1964 struct nlattr *nla;
1965 struct nlattr *encap = NULL;
1966 struct nlattr *in_encap = NULL;
1967
1968 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
1969 goto nla_put_failure;
1970
1971 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
1972 goto nla_put_failure;
1973
1974 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1975 goto nla_put_failure;
1976
1977 if ((swkey->tun_proto || is_mask)) {
1978 const void *opts = NULL;
1979
1980 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
1981 opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
1982
1983 if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
1984 swkey->tun_opts_len, swkey->tun_proto))
1985 goto nla_put_failure;
1986 }
1987
1988 if (swkey->phy.in_port == DP_MAX_PORTS) {
1989 if (is_mask && (output->phy.in_port == 0xffff))
1990 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
1991 goto nla_put_failure;
1992 } else {
1993 u16 upper_u16;
1994 upper_u16 = !is_mask ? 0 : 0xffff;
1995
1996 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1997 (upper_u16 << 16) | output->phy.in_port))
1998 goto nla_put_failure;
1999 }
2000
2001 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
2002 goto nla_put_failure;
2003
2004 if (ovs_ct_put_key(swkey, output, skb))
2005 goto nla_put_failure;
2006
2007 if (ovs_key_mac_proto(swkey) == MAC_PROTO_ETHERNET) {
2008 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
2009 if (!nla)
2010 goto nla_put_failure;
2011
2012 eth_key = nla_data(nla);
2013 ether_addr_copy(eth_key->eth_src, output->eth.src);
2014 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
2015
2016 if (swkey->eth.vlan.tci || eth_type_vlan(swkey->eth.type)) {
2017 if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask))
2018 goto nla_put_failure;
2019 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
2020 if (!swkey->eth.vlan.tci)
2021 goto unencap;
2022
2023 if (swkey->eth.cvlan.tci || eth_type_vlan(swkey->eth.type)) {
2024 if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask))
2025 goto nla_put_failure;
2026 in_encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
2027 if (!swkey->eth.cvlan.tci)
2028 goto unencap;
2029 }
2030 }
2031
2032 if (swkey->eth.type == htons(ETH_P_802_2)) {
2033 /*
2034 * Ethertype 802.2 is represented in the netlink with omitted
2035 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
2036 * 0xffff in the mask attribute. Ethertype can also
2037 * be wildcarded.
2038 */
2039 if (is_mask && output->eth.type)
2040 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
2041 output->eth.type))
2042 goto nla_put_failure;
2043 goto unencap;
2044 }
2045 }
2046
2047 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
2048 goto nla_put_failure;
2049
2050 if (eth_type_vlan(swkey->eth.type)) {
2051 /* There are 3 VLAN tags, we don't know anything about the rest
2052 * of the packet, so truncate here.
2053 */
2054 WARN_ON_ONCE(!(encap && in_encap));
2055 goto unencap;
2056 }
2057
2058 if (swkey->eth.type == htons(ETH_P_IP)) {
2059 struct ovs_key_ipv4 *ipv4_key;
2060
2061 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
2062 if (!nla)
2063 goto nla_put_failure;
2064 ipv4_key = nla_data(nla);
2065 ipv4_key->ipv4_src = output->ipv4.addr.src;
2066 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
2067 ipv4_key->ipv4_proto = output->ip.proto;
2068 ipv4_key->ipv4_tos = output->ip.tos;
2069 ipv4_key->ipv4_ttl = output->ip.ttl;
2070 ipv4_key->ipv4_frag = output->ip.frag;
2071 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
2072 struct ovs_key_ipv6 *ipv6_key;
2073
2074 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
2075 if (!nla)
2076 goto nla_put_failure;
2077 ipv6_key = nla_data(nla);
2078 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
2079 sizeof(ipv6_key->ipv6_src));
2080 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
2081 sizeof(ipv6_key->ipv6_dst));
2082 ipv6_key->ipv6_label = output->ipv6.label;
2083 ipv6_key->ipv6_proto = output->ip.proto;
2084 ipv6_key->ipv6_tclass = output->ip.tos;
2085 ipv6_key->ipv6_hlimit = output->ip.ttl;
2086 ipv6_key->ipv6_frag = output->ip.frag;
2087 } else if (swkey->eth.type == htons(ETH_P_NSH)) {
2088 if (nsh_key_to_nlattr(&output->nsh, is_mask, skb))
2089 goto nla_put_failure;
2090 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
2091 swkey->eth.type == htons(ETH_P_RARP)) {
2092 struct ovs_key_arp *arp_key;
2093
2094 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
2095 if (!nla)
2096 goto nla_put_failure;
2097 arp_key = nla_data(nla);
2098 memset(arp_key, 0, sizeof(struct ovs_key_arp));
2099 arp_key->arp_sip = output->ipv4.addr.src;
2100 arp_key->arp_tip = output->ipv4.addr.dst;
2101 arp_key->arp_op = htons(output->ip.proto);
2102 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
2103 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
2104 } else if (eth_p_mpls(swkey->eth.type)) {
2105 struct ovs_key_mpls *mpls_key;
2106
2107 nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
2108 if (!nla)
2109 goto nla_put_failure;
2110 mpls_key = nla_data(nla);
2111 mpls_key->mpls_lse = output->mpls.top_lse;
2112 }
2113
2114 if ((swkey->eth.type == htons(ETH_P_IP) ||
2115 swkey->eth.type == htons(ETH_P_IPV6)) &&
2116 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
2117
2118 if (swkey->ip.proto == IPPROTO_TCP) {
2119 struct ovs_key_tcp *tcp_key;
2120
2121 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
2122 if (!nla)
2123 goto nla_put_failure;
2124 tcp_key = nla_data(nla);
2125 tcp_key->tcp_src = output->tp.src;
2126 tcp_key->tcp_dst = output->tp.dst;
2127 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
2128 output->tp.flags))
2129 goto nla_put_failure;
2130 } else if (swkey->ip.proto == IPPROTO_UDP) {
2131 struct ovs_key_udp *udp_key;
2132
2133 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
2134 if (!nla)
2135 goto nla_put_failure;
2136 udp_key = nla_data(nla);
2137 udp_key->udp_src = output->tp.src;
2138 udp_key->udp_dst = output->tp.dst;
2139 } else if (swkey->ip.proto == IPPROTO_SCTP) {
2140 struct ovs_key_sctp *sctp_key;
2141
2142 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
2143 if (!nla)
2144 goto nla_put_failure;
2145 sctp_key = nla_data(nla);
2146 sctp_key->sctp_src = output->tp.src;
2147 sctp_key->sctp_dst = output->tp.dst;
2148 } else if (swkey->eth.type == htons(ETH_P_IP) &&
2149 swkey->ip.proto == IPPROTO_ICMP) {
2150 struct ovs_key_icmp *icmp_key;
2151
2152 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
2153 if (!nla)
2154 goto nla_put_failure;
2155 icmp_key = nla_data(nla);
2156 icmp_key->icmp_type = ntohs(output->tp.src);
2157 icmp_key->icmp_code = ntohs(output->tp.dst);
2158 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
2159 swkey->ip.proto == IPPROTO_ICMPV6) {
2160 struct ovs_key_icmpv6 *icmpv6_key;
2161
2162 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
2163 sizeof(*icmpv6_key));
2164 if (!nla)
2165 goto nla_put_failure;
2166 icmpv6_key = nla_data(nla);
2167 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
2168 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
2169
2170 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
2171 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
2172 struct ovs_key_nd *nd_key;
2173
2174 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
2175 if (!nla)
2176 goto nla_put_failure;
2177 nd_key = nla_data(nla);
2178 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
2179 sizeof(nd_key->nd_target));
2180 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
2181 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
2182 }
2183 }
2184 }
2185
2186 unencap:
2187 if (in_encap)
2188 nla_nest_end(skb, in_encap);
2189 if (encap)
2190 nla_nest_end(skb, encap);
2191
2192 return 0;
2193
2194 nla_put_failure:
2195 return -EMSGSIZE;
2196 }
2197
2198 int ovs_nla_put_key(const struct sw_flow_key *swkey,
2199 const struct sw_flow_key *output, int attr, bool is_mask,
2200 struct sk_buff *skb)
2201 {
2202 int err;
2203 struct nlattr *nla;
2204
2205 nla = nla_nest_start(skb, attr);
2206 if (!nla)
2207 return -EMSGSIZE;
2208 err = __ovs_nla_put_key(swkey, output, is_mask, skb);
2209 if (err)
2210 return err;
2211 nla_nest_end(skb, nla);
2212
2213 return 0;
2214 }
2215
2216 /* Called with ovs_mutex or RCU read lock. */
2217 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
2218 {
2219 if (ovs_identifier_is_ufid(&flow->id))
2220 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
2221 flow->id.ufid);
2222
2223 return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
2224 OVS_FLOW_ATTR_KEY, false, skb);
2225 }
2226
2227 /* Called with ovs_mutex or RCU read lock. */
2228 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
2229 {
2230 return ovs_nla_put_key(&flow->key, &flow->key,
2231 OVS_FLOW_ATTR_KEY, false, skb);
2232 }
2233
2234 /* Called with ovs_mutex or RCU read lock. */
2235 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
2236 {
2237 return ovs_nla_put_key(&flow->key, &flow->mask->key,
2238 OVS_FLOW_ATTR_MASK, true, skb);
2239 }
2240
2241 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)
2242 #define MAX_ACTIONS_BUFSIZE (16 * 1024)
2243 #else
2244 #define MAX_ACTIONS_BUFSIZE (32 * 1024)
2245 #endif
2246
2247 static struct sw_flow_actions *nla_alloc_flow_actions(int size)
2248 {
2249 struct sw_flow_actions *sfa;
2250
2251 WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
2252
2253 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
2254 if (!sfa)
2255 return ERR_PTR(-ENOMEM);
2256
2257 sfa->actions_len = 0;
2258 return sfa;
2259 }
2260
2261 static void ovs_nla_free_set_action(const struct nlattr *a)
2262 {
2263 const struct nlattr *ovs_key = nla_data(a);
2264 struct ovs_tunnel_info *ovs_tun;
2265
2266 switch (nla_type(ovs_key)) {
2267 case OVS_KEY_ATTR_TUNNEL_INFO:
2268 ovs_tun = nla_data(ovs_key);
2269 ovs_dst_release((struct dst_entry *)ovs_tun->tun_dst);
2270 break;
2271 }
2272 }
2273
2274 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
2275 {
2276 const struct nlattr *a;
2277 int rem;
2278
2279 if (!sf_acts)
2280 return;
2281
2282 nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
2283 switch (nla_type(a)) {
2284 case OVS_ACTION_ATTR_SET:
2285 ovs_nla_free_set_action(a);
2286 break;
2287 case OVS_ACTION_ATTR_CT:
2288 ovs_ct_free_action(a);
2289 break;
2290 }
2291 }
2292
2293 kfree(sf_acts);
2294 }
2295
2296 static void __ovs_nla_free_flow_actions(struct rcu_head *head)
2297 {
2298 ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
2299 }
2300
2301 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
2302 * The caller must hold rcu_read_lock for this to be sensible. */
2303 void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
2304 {
2305 call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
2306 }
2307
2308 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2309 int attr_len, bool log)
2310 {
2311
2312 struct sw_flow_actions *acts;
2313 int new_acts_size;
2314 int req_size = NLA_ALIGN(attr_len);
2315 int next_offset = offsetof(struct sw_flow_actions, actions) +
2316 (*sfa)->actions_len;
2317
2318 if (req_size <= (ksize(*sfa) - next_offset))
2319 goto out;
2320
2321 new_acts_size = ksize(*sfa) * 2;
2322
2323 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2324 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
2325 OVS_NLERR(log, "Flow action size exceeds max %u",
2326 MAX_ACTIONS_BUFSIZE);
2327 return ERR_PTR(-EMSGSIZE);
2328 }
2329 new_acts_size = MAX_ACTIONS_BUFSIZE;
2330 }
2331
2332 acts = nla_alloc_flow_actions(new_acts_size);
2333 if (IS_ERR(acts))
2334 return (void *)acts;
2335
2336 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
2337 acts->actions_len = (*sfa)->actions_len;
2338 acts->orig_len = (*sfa)->orig_len;
2339 kfree(*sfa);
2340 *sfa = acts;
2341
2342 out:
2343 (*sfa)->actions_len += req_size;
2344 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
2345 }
2346
2347 static struct nlattr *__add_action(struct sw_flow_actions **sfa,
2348 int attrtype, void *data, int len, bool log)
2349 {
2350 struct nlattr *a;
2351
2352 a = reserve_sfa_size(sfa, nla_attr_size(len), log);
2353 if (IS_ERR(a))
2354 return a;
2355
2356 a->nla_type = attrtype;
2357 a->nla_len = nla_attr_size(len);
2358
2359 if (data)
2360 memcpy(nla_data(a), data, len);
2361 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
2362
2363 return a;
2364 }
2365
2366 int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
2367 int len, bool log)
2368 {
2369 struct nlattr *a;
2370
2371 a = __add_action(sfa, attrtype, data, len, log);
2372
2373 return PTR_ERR_OR_ZERO(a);
2374 }
2375
2376 static inline int add_nested_action_start(struct sw_flow_actions **sfa,
2377 int attrtype, bool log)
2378 {
2379 int used = (*sfa)->actions_len;
2380 int err;
2381
2382 err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
2383 if (err)
2384 return err;
2385
2386 return used;
2387 }
2388
2389 static inline void add_nested_action_end(struct sw_flow_actions *sfa,
2390 int st_offset)
2391 {
2392 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
2393 st_offset);
2394
2395 a->nla_len = sfa->actions_len - st_offset;
2396 }
2397
2398 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2399 const struct sw_flow_key *key,
2400 struct sw_flow_actions **sfa,
2401 __be16 eth_type, __be16 vlan_tci, bool log);
2402
2403 static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
2404 const struct sw_flow_key *key,
2405 struct sw_flow_actions **sfa,
2406 __be16 eth_type, __be16 vlan_tci,
2407 bool log, bool last)
2408 {
2409 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
2410 const struct nlattr *probability, *actions;
2411 const struct nlattr *a;
2412 int rem, start, err;
2413 struct sample_arg arg;
2414
2415 memset(attrs, 0, sizeof(attrs));
2416 nla_for_each_nested(a, attr, rem) {
2417 int type = nla_type(a);
2418 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
2419 return -EINVAL;
2420 attrs[type] = a;
2421 }
2422 if (rem)
2423 return -EINVAL;
2424
2425 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
2426 if (!probability || nla_len(probability) != sizeof(u32))
2427 return -EINVAL;
2428
2429 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
2430 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
2431 return -EINVAL;
2432
2433 /* validation done, copy sample action. */
2434 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
2435 if (start < 0)
2436 return start;
2437
2438 /* When both skb and flow may be changed, put the sample
2439 * into a deferred fifo. On the other hand, if only skb
2440 * may be modified, the actions can be executed in place.
2441 *
2442 * Do this analysis at the flow installation time.
2443 * Set 'clone_action->exec' to true if the actions can be
2444 * executed without being deferred.
2445 *
2446 * If the sample is the last action, it can always be excuted
2447 * rather than deferred.
2448 */
2449 arg.exec = last || !actions_may_change_flow(actions);
2450 arg.probability = nla_get_u32(probability);
2451
2452 err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_ARG, &arg, sizeof(arg),
2453 log);
2454 if (err)
2455 return err;
2456
2457 err = __ovs_nla_copy_actions(net, actions, key, sfa,
2458 eth_type, vlan_tci, log);
2459
2460 if (err)
2461 return err;
2462
2463 add_nested_action_end(*sfa, start);
2464
2465 return 0;
2466 }
2467
2468 void ovs_match_init(struct sw_flow_match *match,
2469 struct sw_flow_key *key,
2470 bool reset_key,
2471 struct sw_flow_mask *mask)
2472 {
2473 memset(match, 0, sizeof(*match));
2474 match->key = key;
2475 match->mask = mask;
2476
2477 if (reset_key)
2478 memset(key, 0, sizeof(*key));
2479
2480 if (mask) {
2481 memset(&mask->key, 0, sizeof(mask->key));
2482 mask->range.start = mask->range.end = 0;
2483 }
2484 }
2485
2486 static int validate_geneve_opts(struct sw_flow_key *key)
2487 {
2488 struct geneve_opt *option;
2489 int opts_len = key->tun_opts_len;
2490 bool crit_opt = false;
2491
2492 option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
2493 while (opts_len > 0) {
2494 int len;
2495
2496 if (opts_len < sizeof(*option))
2497 return -EINVAL;
2498
2499 len = sizeof(*option) + option->length * 4;
2500 if (len > opts_len)
2501 return -EINVAL;
2502
2503 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
2504
2505 option = (struct geneve_opt *)((u8 *)option + len);
2506 opts_len -= len;
2507 }
2508
2509 key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
2510
2511 return 0;
2512 }
2513
2514 static int validate_and_copy_set_tun(const struct nlattr *attr,
2515 struct sw_flow_actions **sfa, bool log)
2516 {
2517 struct sw_flow_match match;
2518 struct sw_flow_key key;
2519 struct metadata_dst *tun_dst;
2520 struct ip_tunnel_info *tun_info;
2521 struct ovs_tunnel_info *ovs_tun;
2522 struct nlattr *a;
2523 int err = 0, start, opts_type;
2524
2525 ovs_match_init(&match, &key, true, NULL);
2526 opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
2527 if (opts_type < 0)
2528 return opts_type;
2529
2530 if (key.tun_opts_len) {
2531 switch (opts_type) {
2532 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
2533 err = validate_geneve_opts(&key);
2534 if (err < 0)
2535 return err;
2536 break;
2537 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
2538 break;
2539 case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
2540 break;
2541 }
2542 }
2543
2544 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
2545 if (start < 0)
2546 return start;
2547
2548 tun_dst = metadata_dst_alloc(key.tun_opts_len, METADATA_IP_TUNNEL,
2549 GFP_KERNEL);
2550
2551 if (!tun_dst)
2552 return -ENOMEM;
2553
2554 err = dst_cache_init(&tun_dst->u.tun_info.dst_cache, GFP_KERNEL);
2555 if (err) {
2556 dst_release((struct dst_entry *)tun_dst);
2557 return err;
2558 }
2559 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
2560 sizeof(*ovs_tun), log);
2561 if (IS_ERR(a)) {
2562 ovs_dst_release((struct dst_entry *)tun_dst);
2563 return PTR_ERR(a);
2564 }
2565
2566 ovs_tun = nla_data(a);
2567 ovs_tun->tun_dst = tun_dst;
2568
2569 tun_info = &tun_dst->u.tun_info;
2570 tun_info->mode = IP_TUNNEL_INFO_TX;
2571 if (key.tun_proto == AF_INET6)
2572 tun_info->mode |= IP_TUNNEL_INFO_IPV6;
2573 tun_info->key = key.tun_key;
2574
2575 /* We need to store the options in the action itself since
2576 * everything else will go away after flow setup. We can append
2577 * it to tun_info and then point there.
2578 */
2579 ip_tunnel_info_opts_set(tun_info,
2580 TUN_METADATA_OPTS(&key, key.tun_opts_len),
2581 key.tun_opts_len);
2582 add_nested_action_end(*sfa, start);
2583
2584 return err;
2585 }
2586
2587 static bool validate_nsh(const struct nlattr *attr, bool is_mask,
2588 bool is_push_nsh, bool log)
2589 {
2590 struct sw_flow_match match;
2591 struct sw_flow_key key;
2592 int ret = 0;
2593
2594 ovs_match_init(&match, &key, true, NULL);
2595 ret = nsh_key_put_from_nlattr(attr, &match, is_mask,
2596 is_push_nsh, log);
2597 return !ret;
2598 }
2599
2600 /* Return false if there are any non-masked bits set.
2601 * Mask follows data immediately, before any netlink padding.
2602 */
2603 static bool validate_masked(u8 *data, int len)
2604 {
2605 u8 *mask = data + len;
2606
2607 while (len--)
2608 if (*data++ & ~*mask++)
2609 return false;
2610
2611 return true;
2612 }
2613
2614 static int validate_set(const struct nlattr *a,
2615 const struct sw_flow_key *flow_key,
2616 struct sw_flow_actions **sfa, bool *skip_copy,
2617 u8 mac_proto, __be16 eth_type, bool masked, bool log)
2618 {
2619 const struct nlattr *ovs_key = nla_data(a);
2620 int key_type = nla_type(ovs_key);
2621 size_t key_len;
2622
2623 /* There can be only one key in a action */
2624 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
2625 return -EINVAL;
2626
2627 key_len = nla_len(ovs_key);
2628 if (masked)
2629 key_len /= 2;
2630
2631 if (key_type > OVS_KEY_ATTR_MAX ||
2632 !check_attr_len(key_len, ovs_key_lens[key_type].len))
2633 return -EINVAL;
2634
2635 if (masked && !validate_masked(nla_data(ovs_key), key_len))
2636 return -EINVAL;
2637
2638 switch (key_type) {
2639 const struct ovs_key_ipv4 *ipv4_key;
2640 const struct ovs_key_ipv6 *ipv6_key;
2641 int err;
2642
2643 case OVS_KEY_ATTR_PRIORITY:
2644 case OVS_KEY_ATTR_SKB_MARK:
2645 case OVS_KEY_ATTR_CT_MARK:
2646 case OVS_KEY_ATTR_CT_LABELS:
2647 break;
2648
2649 case OVS_KEY_ATTR_ETHERNET:
2650 if (mac_proto != MAC_PROTO_ETHERNET)
2651 return -EINVAL;
2652 break;
2653
2654 case OVS_KEY_ATTR_TUNNEL:
2655 #ifndef USE_UPSTREAM_TUNNEL
2656 if (eth_p_mpls(eth_type))
2657 return -EINVAL;
2658 #endif
2659 if (masked)
2660 return -EINVAL; /* Masked tunnel set not supported. */
2661
2662 *skip_copy = true;
2663 err = validate_and_copy_set_tun(a, sfa, log);
2664 if (err)
2665 return err;
2666 break;
2667
2668 case OVS_KEY_ATTR_IPV4:
2669 if (eth_type != htons(ETH_P_IP))
2670 return -EINVAL;
2671
2672 ipv4_key = nla_data(ovs_key);
2673
2674 if (masked) {
2675 const struct ovs_key_ipv4 *mask = ipv4_key + 1;
2676
2677 /* Non-writeable fields. */
2678 if (mask->ipv4_proto || mask->ipv4_frag)
2679 return -EINVAL;
2680 } else {
2681 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
2682 return -EINVAL;
2683
2684 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
2685 return -EINVAL;
2686 }
2687 break;
2688
2689 case OVS_KEY_ATTR_IPV6:
2690 if (eth_type != htons(ETH_P_IPV6))
2691 return -EINVAL;
2692
2693 ipv6_key = nla_data(ovs_key);
2694
2695 if (masked) {
2696 const struct ovs_key_ipv6 *mask = ipv6_key + 1;
2697
2698 /* Non-writeable fields. */
2699 if (mask->ipv6_proto || mask->ipv6_frag)
2700 return -EINVAL;
2701
2702 /* Invalid bits in the flow label mask? */
2703 if (ntohl(mask->ipv6_label) & 0xFFF00000)
2704 return -EINVAL;
2705 } else {
2706 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
2707 return -EINVAL;
2708
2709 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
2710 return -EINVAL;
2711 }
2712 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
2713 return -EINVAL;
2714
2715 break;
2716
2717 case OVS_KEY_ATTR_TCP:
2718 if ((eth_type != htons(ETH_P_IP) &&
2719 eth_type != htons(ETH_P_IPV6)) ||
2720 flow_key->ip.proto != IPPROTO_TCP)
2721 return -EINVAL;
2722
2723 break;
2724
2725 case OVS_KEY_ATTR_UDP:
2726 if ((eth_type != htons(ETH_P_IP) &&
2727 eth_type != htons(ETH_P_IPV6)) ||
2728 flow_key->ip.proto != IPPROTO_UDP)
2729 return -EINVAL;
2730
2731 break;
2732
2733 case OVS_KEY_ATTR_MPLS:
2734 if (!eth_p_mpls(eth_type))
2735 return -EINVAL;
2736 break;
2737
2738 case OVS_KEY_ATTR_SCTP:
2739 if ((eth_type != htons(ETH_P_IP) &&
2740 eth_type != htons(ETH_P_IPV6)) ||
2741 flow_key->ip.proto != IPPROTO_SCTP)
2742 return -EINVAL;
2743
2744 break;
2745
2746 case OVS_KEY_ATTR_NSH:
2747 if (eth_type != htons(ETH_P_NSH))
2748 return -EINVAL;
2749 if (!validate_nsh(nla_data(a), masked, false, log))
2750 return -EINVAL;
2751 break;
2752
2753 default:
2754 return -EINVAL;
2755 }
2756
2757 /* Convert non-masked non-tunnel set actions to masked set actions. */
2758 if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
2759 int start, len = key_len * 2;
2760 struct nlattr *at;
2761
2762 *skip_copy = true;
2763
2764 start = add_nested_action_start(sfa,
2765 OVS_ACTION_ATTR_SET_TO_MASKED,
2766 log);
2767 if (start < 0)
2768 return start;
2769
2770 at = __add_action(sfa, key_type, NULL, len, log);
2771 if (IS_ERR(at))
2772 return PTR_ERR(at);
2773
2774 memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
2775 memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */
2776 /* Clear non-writeable bits from otherwise writeable fields. */
2777 if (key_type == OVS_KEY_ATTR_IPV6) {
2778 struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
2779
2780 mask->ipv6_label &= htonl(0x000FFFFF);
2781 }
2782 add_nested_action_end(*sfa, start);
2783 }
2784
2785 return 0;
2786 }
2787
2788 static int validate_userspace(const struct nlattr *attr)
2789 {
2790 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
2791 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
2792 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
2793 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
2794 };
2795 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
2796 int error;
2797
2798 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr,
2799 userspace_policy, NULL);
2800 if (error)
2801 return error;
2802
2803 if (!a[OVS_USERSPACE_ATTR_PID] ||
2804 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
2805 return -EINVAL;
2806
2807 return 0;
2808 }
2809
2810 static int copy_action(const struct nlattr *from,
2811 struct sw_flow_actions **sfa, bool log)
2812 {
2813 int totlen = NLA_ALIGN(from->nla_len);
2814 struct nlattr *to;
2815
2816 to = reserve_sfa_size(sfa, from->nla_len, log);
2817 if (IS_ERR(to))
2818 return PTR_ERR(to);
2819
2820 memcpy(to, from, totlen);
2821 return 0;
2822 }
2823
2824 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2825 const struct sw_flow_key *key,
2826 struct sw_flow_actions **sfa,
2827 __be16 eth_type, __be16 vlan_tci, bool log)
2828 {
2829 u8 mac_proto = ovs_key_mac_proto(key);
2830 const struct nlattr *a;
2831 int rem, err;
2832
2833 nla_for_each_nested(a, attr, rem) {
2834 /* Expected argument lengths, (u32)-1 for variable length. */
2835 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
2836 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
2837 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
2838 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
2839 [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
2840 [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
2841 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
2842 [OVS_ACTION_ATTR_POP_VLAN] = 0,
2843 [OVS_ACTION_ATTR_SET] = (u32)-1,
2844 [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
2845 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
2846 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
2847 [OVS_ACTION_ATTR_CT] = (u32)-1,
2848 [OVS_ACTION_ATTR_CT_CLEAR] = 0,
2849 [OVS_ACTION_ATTR_TRUNC] = sizeof(struct ovs_action_trunc),
2850 [OVS_ACTION_ATTR_PUSH_ETH] = sizeof(struct ovs_action_push_eth),
2851 [OVS_ACTION_ATTR_POP_ETH] = 0,
2852 [OVS_ACTION_ATTR_PUSH_NSH] = (u32)-1,
2853 [OVS_ACTION_ATTR_POP_NSH] = 0,
2854 [OVS_ACTION_ATTR_METER] = sizeof(u32),
2855 };
2856 const struct ovs_action_push_vlan *vlan;
2857 int type = nla_type(a);
2858 bool skip_copy;
2859
2860 if (type > OVS_ACTION_ATTR_MAX ||
2861 (action_lens[type] != nla_len(a) &&
2862 action_lens[type] != (u32)-1))
2863 return -EINVAL;
2864
2865 skip_copy = false;
2866 switch (type) {
2867 case OVS_ACTION_ATTR_UNSPEC:
2868 return -EINVAL;
2869
2870 case OVS_ACTION_ATTR_USERSPACE:
2871 err = validate_userspace(a);
2872 if (err)
2873 return err;
2874 break;
2875
2876 case OVS_ACTION_ATTR_OUTPUT:
2877 if (nla_get_u32(a) >= DP_MAX_PORTS)
2878 return -EINVAL;
2879 break;
2880
2881 case OVS_ACTION_ATTR_TRUNC: {
2882 const struct ovs_action_trunc *trunc = nla_data(a);
2883
2884 if (trunc->max_len < ETH_HLEN)
2885 return -EINVAL;
2886 break;
2887 }
2888
2889 case OVS_ACTION_ATTR_HASH: {
2890 const struct ovs_action_hash *act_hash = nla_data(a);
2891
2892 switch (act_hash->hash_alg) {
2893 case OVS_HASH_ALG_L4:
2894 break;
2895 default:
2896 return -EINVAL;
2897 }
2898
2899 break;
2900 }
2901
2902 case OVS_ACTION_ATTR_POP_VLAN:
2903 if (mac_proto != MAC_PROTO_ETHERNET)
2904 return -EINVAL;
2905 vlan_tci = htons(0);
2906 break;
2907
2908 case OVS_ACTION_ATTR_PUSH_VLAN:
2909 if (mac_proto != MAC_PROTO_ETHERNET)
2910 return -EINVAL;
2911 vlan = nla_data(a);
2912 if (!eth_type_vlan(vlan->vlan_tpid))
2913 return -EINVAL;
2914 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
2915 return -EINVAL;
2916 vlan_tci = vlan->vlan_tci;
2917 break;
2918
2919 case OVS_ACTION_ATTR_RECIRC:
2920 break;
2921
2922 case OVS_ACTION_ATTR_PUSH_MPLS: {
2923 const struct ovs_action_push_mpls *mpls = nla_data(a);
2924
2925 if (!eth_p_mpls(mpls->mpls_ethertype))
2926 return -EINVAL;
2927 /* Prohibit push MPLS other than to a white list
2928 * for packets that have a known tag order.
2929 */
2930 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2931 (eth_type != htons(ETH_P_IP) &&
2932 eth_type != htons(ETH_P_IPV6) &&
2933 eth_type != htons(ETH_P_ARP) &&
2934 eth_type != htons(ETH_P_RARP) &&
2935 !eth_p_mpls(eth_type)))
2936 return -EINVAL;
2937 eth_type = mpls->mpls_ethertype;
2938 break;
2939 }
2940
2941 case OVS_ACTION_ATTR_POP_MPLS:
2942 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2943 !eth_p_mpls(eth_type))
2944 return -EINVAL;
2945
2946 /* Disallow subsequent L2.5+ set and mpls_pop actions
2947 * as there is no check here to ensure that the new
2948 * eth_type is valid and thus set actions could
2949 * write off the end of the packet or otherwise
2950 * corrupt it.
2951 *
2952 * Support for these actions is planned using packet
2953 * recirculation.
2954 */
2955 eth_type = htons(0);
2956 break;
2957
2958 case OVS_ACTION_ATTR_SET:
2959 err = validate_set(a, key, sfa,
2960 &skip_copy, mac_proto, eth_type,
2961 false, log);
2962 if (err)
2963 return err;
2964 break;
2965
2966 case OVS_ACTION_ATTR_SET_MASKED:
2967 err = validate_set(a, key, sfa,
2968 &skip_copy, mac_proto, eth_type,
2969 true, log);
2970 if (err)
2971 return err;
2972 break;
2973
2974 case OVS_ACTION_ATTR_SAMPLE: {
2975 bool last = nla_is_last(a, rem);
2976
2977 err = validate_and_copy_sample(net, a, key, sfa,
2978 eth_type, vlan_tci,
2979 log, last);
2980 if (err)
2981 return err;
2982 skip_copy = true;
2983 break;
2984 }
2985
2986 case OVS_ACTION_ATTR_CT:
2987 err = ovs_ct_copy_action(net, a, key, sfa, log);
2988 if (err)
2989 return err;
2990 skip_copy = true;
2991 break;
2992
2993 case OVS_ACTION_ATTR_CT_CLEAR:
2994 break;
2995
2996 case OVS_ACTION_ATTR_PUSH_ETH:
2997 /* Disallow pushing an Ethernet header if one
2998 * is already present */
2999 if (mac_proto != MAC_PROTO_NONE)
3000 return -EINVAL;
3001 mac_proto = MAC_PROTO_ETHERNET;
3002 break;
3003
3004 case OVS_ACTION_ATTR_POP_ETH:
3005 if (mac_proto != MAC_PROTO_ETHERNET)
3006 return -EINVAL;
3007 if (vlan_tci & htons(VLAN_TAG_PRESENT))
3008 return -EINVAL;
3009 mac_proto = MAC_PROTO_NONE;
3010 break;
3011
3012 case OVS_ACTION_ATTR_PUSH_NSH:
3013 if (mac_proto != MAC_PROTO_ETHERNET) {
3014 u8 next_proto;
3015
3016 next_proto = tun_p_from_eth_p(eth_type);
3017 if (!next_proto)
3018 return -EINVAL;
3019 }
3020 mac_proto = MAC_PROTO_NONE;
3021 if (!validate_nsh(nla_data(a), false, true, true))
3022 return -EINVAL;
3023 break;
3024
3025 case OVS_ACTION_ATTR_POP_NSH: {
3026 __be16 inner_proto;
3027
3028 if (eth_type != htons(ETH_P_NSH))
3029 return -EINVAL;
3030 inner_proto = tun_p_to_eth_p(key->nsh.base.np);
3031 if (!inner_proto)
3032 return -EINVAL;
3033 if (key->nsh.base.np == TUN_P_ETHERNET)
3034 mac_proto = MAC_PROTO_ETHERNET;
3035 else
3036 mac_proto = MAC_PROTO_NONE;
3037 break;
3038 }
3039
3040 case OVS_ACTION_ATTR_METER:
3041 /* Non-existent meters are simply ignored. */
3042 break;
3043
3044 default:
3045 OVS_NLERR(log, "Unknown Action type %d", type);
3046 return -EINVAL;
3047 }
3048 if (!skip_copy) {
3049 err = copy_action(a, sfa, log);
3050 if (err)
3051 return err;
3052 }
3053 }
3054
3055 if (rem > 0)
3056 return -EINVAL;
3057
3058 return 0;
3059 }
3060
3061 /* 'key' must be the masked key. */
3062 int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
3063 const struct sw_flow_key *key,
3064 struct sw_flow_actions **sfa, bool log)
3065 {
3066 int err;
3067
3068 *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
3069 if (IS_ERR(*sfa))
3070 return PTR_ERR(*sfa);
3071
3072 (*sfa)->orig_len = nla_len(attr);
3073 err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
3074 key->eth.vlan.tci, log);
3075 if (err)
3076 ovs_nla_free_flow_actions(*sfa);
3077
3078 return err;
3079 }
3080
3081 static int sample_action_to_attr(const struct nlattr *attr,
3082 struct sk_buff *skb)
3083 {
3084 struct nlattr *start, *ac_start = NULL, *sample_arg;
3085 int err = 0, rem = nla_len(attr);
3086 const struct sample_arg *arg;
3087 struct nlattr *actions;
3088
3089 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
3090 if (!start)
3091 return -EMSGSIZE;
3092
3093 sample_arg = nla_data(attr);
3094 arg = nla_data(sample_arg);
3095 actions = nla_next(sample_arg, &rem);
3096
3097 if (nla_put_u32(skb, OVS_SAMPLE_ATTR_PROBABILITY, arg->probability)) {
3098 err = -EMSGSIZE;
3099 goto out;
3100 }
3101
3102 ac_start = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
3103 if (!ac_start) {
3104 err = -EMSGSIZE;
3105 goto out;
3106 }
3107
3108 err = ovs_nla_put_actions(actions, rem, skb);
3109
3110 out:
3111 if (err) {
3112 nla_nest_cancel(skb, ac_start);
3113 nla_nest_cancel(skb, start);
3114 } else {
3115 nla_nest_end(skb, ac_start);
3116 nla_nest_end(skb, start);
3117 }
3118
3119 return err;
3120 }
3121
3122 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
3123 {
3124 const struct nlattr *ovs_key = nla_data(a);
3125 int key_type = nla_type(ovs_key);
3126 struct nlattr *start;
3127 int err;
3128
3129 switch (key_type) {
3130 case OVS_KEY_ATTR_TUNNEL_INFO: {
3131 struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
3132 struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
3133
3134 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
3135 if (!start)
3136 return -EMSGSIZE;
3137
3138 err = ip_tun_to_nlattr(skb, &tun_info->key,
3139 ip_tunnel_info_opts(tun_info),
3140 tun_info->options_len,
3141 ip_tunnel_info_af(tun_info));
3142 if (err)
3143 return err;
3144 nla_nest_end(skb, start);
3145 break;
3146 }
3147 default:
3148 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
3149 return -EMSGSIZE;
3150 break;
3151 }
3152
3153 return 0;
3154 }
3155
3156 static int masked_set_action_to_set_action_attr(const struct nlattr *a,
3157 struct sk_buff *skb)
3158 {
3159 const struct nlattr *ovs_key = nla_data(a);
3160 struct nlattr *nla;
3161 size_t key_len = nla_len(ovs_key) / 2;
3162
3163 /* Revert the conversion we did from a non-masked set action to
3164 * masked set action.
3165 */
3166 nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
3167 if (!nla)
3168 return -EMSGSIZE;
3169
3170 if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
3171 return -EMSGSIZE;
3172
3173 nla_nest_end(skb, nla);
3174 return 0;
3175 }
3176
3177 int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
3178 {
3179 const struct nlattr *a;
3180 int rem, err;
3181
3182 nla_for_each_attr(a, attr, len, rem) {
3183 int type = nla_type(a);
3184
3185 switch (type) {
3186 case OVS_ACTION_ATTR_SET:
3187 err = set_action_to_attr(a, skb);
3188 if (err)
3189 return err;
3190 break;
3191
3192 case OVS_ACTION_ATTR_SET_TO_MASKED:
3193 err = masked_set_action_to_set_action_attr(a, skb);
3194 if (err)
3195 return err;
3196 break;
3197
3198 case OVS_ACTION_ATTR_SAMPLE:
3199 err = sample_action_to_attr(a, skb);
3200 if (err)
3201 return err;
3202 break;
3203
3204 case OVS_ACTION_ATTR_CT:
3205 err = ovs_ct_action_to_attr(nla_data(a), skb);
3206 if (err)
3207 return err;
3208 break;
3209
3210 default:
3211 if (nla_put(skb, type, nla_len(a), nla_data(a)))
3212 return -EMSGSIZE;
3213 break;
3214 }
3215 }
3216
3217 return 0;
3218 }