]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * VLAN An implementation of 802.1Q VLAN tagging. | |
3 | * | |
4 | * Authors: Ben Greear <greearb@candelatech.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | */ | |
12 | #ifndef _LINUX_IF_VLAN_H_ | |
13 | #define _LINUX_IF_VLAN_H_ | |
14 | ||
15 | #include <linux/netdevice.h> | |
16 | #include <linux/etherdevice.h> | |
17 | #include <linux/rtnetlink.h> | |
18 | #include <linux/bug.h> | |
19 | #include <uapi/linux/if_vlan.h> | |
20 | ||
21 | #define VLAN_HLEN 4 /* The additional bytes required by VLAN | |
22 | * (in addition to the Ethernet header) | |
23 | */ | |
24 | #define VLAN_ETH_HLEN 18 /* Total octets in header. */ | |
25 | #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ | |
26 | ||
27 | /* | |
28 | * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan | |
29 | */ | |
30 | #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ | |
31 | #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ | |
32 | ||
33 | /* | |
34 | * struct vlan_hdr - vlan header | |
35 | * @h_vlan_TCI: priority and VLAN ID | |
36 | * @h_vlan_encapsulated_proto: packet type ID or len | |
37 | */ | |
38 | struct vlan_hdr { | |
39 | __be16 h_vlan_TCI; | |
40 | __be16 h_vlan_encapsulated_proto; | |
41 | }; | |
42 | ||
43 | /** | |
44 | * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) | |
45 | * @h_dest: destination ethernet address | |
46 | * @h_source: source ethernet address | |
47 | * @h_vlan_proto: ethernet protocol | |
48 | * @h_vlan_TCI: priority and VLAN ID | |
49 | * @h_vlan_encapsulated_proto: packet type ID or len | |
50 | */ | |
51 | struct vlan_ethhdr { | |
52 | unsigned char h_dest[ETH_ALEN]; | |
53 | unsigned char h_source[ETH_ALEN]; | |
54 | __be16 h_vlan_proto; | |
55 | __be16 h_vlan_TCI; | |
56 | __be16 h_vlan_encapsulated_proto; | |
57 | }; | |
58 | ||
59 | #include <linux/skbuff.h> | |
60 | ||
61 | static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) | |
62 | { | |
63 | return (struct vlan_ethhdr *)skb_mac_header(skb); | |
64 | } | |
65 | ||
66 | #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ | |
67 | #define VLAN_PRIO_SHIFT 13 | |
68 | #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ | |
69 | #define VLAN_TAG_PRESENT VLAN_CFI_MASK | |
70 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ | |
71 | #define VLAN_N_VID 4096 | |
72 | ||
73 | /* found in socket.c */ | |
74 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); | |
75 | ||
76 | static inline bool is_vlan_dev(const struct net_device *dev) | |
77 | { | |
78 | return dev->priv_flags & IFF_802_1Q_VLAN; | |
79 | } | |
80 | ||
81 | #define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) | |
82 | #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) | |
83 | #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) | |
84 | ||
85 | /** | |
86 | * struct vlan_pcpu_stats - VLAN percpu rx/tx stats | |
87 | * @rx_packets: number of received packets | |
88 | * @rx_bytes: number of received bytes | |
89 | * @rx_multicast: number of received multicast packets | |
90 | * @tx_packets: number of transmitted packets | |
91 | * @tx_bytes: number of transmitted bytes | |
92 | * @syncp: synchronization point for 64bit counters | |
93 | * @rx_errors: number of rx errors | |
94 | * @tx_dropped: number of tx drops | |
95 | */ | |
96 | struct vlan_pcpu_stats { | |
97 | u64 rx_packets; | |
98 | u64 rx_bytes; | |
99 | u64 rx_multicast; | |
100 | u64 tx_packets; | |
101 | u64 tx_bytes; | |
102 | struct u64_stats_sync syncp; | |
103 | u32 rx_errors; | |
104 | u32 tx_dropped; | |
105 | }; | |
106 | ||
107 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | |
108 | ||
109 | extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, | |
110 | __be16 vlan_proto, u16 vlan_id); | |
111 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); | |
112 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); | |
113 | extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); | |
114 | ||
115 | /** | |
116 | * struct vlan_priority_tci_mapping - vlan egress priority mappings | |
117 | * @priority: skb priority | |
118 | * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 | |
119 | * @next: pointer to next struct | |
120 | */ | |
121 | struct vlan_priority_tci_mapping { | |
122 | u32 priority; | |
123 | u16 vlan_qos; | |
124 | struct vlan_priority_tci_mapping *next; | |
125 | }; | |
126 | ||
127 | struct proc_dir_entry; | |
128 | struct netpoll; | |
129 | ||
130 | /** | |
131 | * struct vlan_dev_priv - VLAN private device data | |
132 | * @nr_ingress_mappings: number of ingress priority mappings | |
133 | * @ingress_priority_map: ingress priority mappings | |
134 | * @nr_egress_mappings: number of egress priority mappings | |
135 | * @egress_priority_map: hash of egress priority mappings | |
136 | * @vlan_proto: VLAN encapsulation protocol | |
137 | * @vlan_id: VLAN identifier | |
138 | * @flags: device flags | |
139 | * @real_dev: underlying netdevice | |
140 | * @real_dev_addr: address of underlying netdevice | |
141 | * @dent: proc dir entry | |
142 | * @vlan_pcpu_stats: ptr to percpu rx stats | |
143 | */ | |
144 | struct vlan_dev_priv { | |
145 | unsigned int nr_ingress_mappings; | |
146 | u32 ingress_priority_map[8]; | |
147 | unsigned int nr_egress_mappings; | |
148 | struct vlan_priority_tci_mapping *egress_priority_map[16]; | |
149 | ||
150 | __be16 vlan_proto; | |
151 | u16 vlan_id; | |
152 | u16 flags; | |
153 | ||
154 | struct net_device *real_dev; | |
155 | unsigned char real_dev_addr[ETH_ALEN]; | |
156 | ||
157 | struct proc_dir_entry *dent; | |
158 | struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; | |
159 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
160 | struct netpoll *netpoll; | |
161 | #endif | |
162 | unsigned int nest_level; | |
163 | }; | |
164 | ||
165 | static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) | |
166 | { | |
167 | return netdev_priv(dev); | |
168 | } | |
169 | ||
170 | static inline u16 | |
171 | vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) | |
172 | { | |
173 | struct vlan_priority_tci_mapping *mp; | |
174 | ||
175 | smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ | |
176 | ||
177 | mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; | |
178 | while (mp) { | |
179 | if (mp->priority == skprio) { | |
180 | return mp->vlan_qos; /* This should already be shifted | |
181 | * to mask correctly with the | |
182 | * VLAN's TCI */ | |
183 | } | |
184 | mp = mp->next; | |
185 | } | |
186 | return 0; | |
187 | } | |
188 | ||
189 | extern bool vlan_do_receive(struct sk_buff **skb); | |
190 | ||
191 | extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); | |
192 | extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); | |
193 | ||
194 | extern int vlan_vids_add_by_dev(struct net_device *dev, | |
195 | const struct net_device *by_dev); | |
196 | extern void vlan_vids_del_by_dev(struct net_device *dev, | |
197 | const struct net_device *by_dev); | |
198 | ||
199 | extern bool vlan_uses_dev(const struct net_device *dev); | |
200 | ||
201 | static inline int vlan_get_encap_level(struct net_device *dev) | |
202 | { | |
203 | BUG_ON(!is_vlan_dev(dev)); | |
204 | return vlan_dev_priv(dev)->nest_level; | |
205 | } | |
206 | #else | |
207 | static inline struct net_device * | |
208 | __vlan_find_dev_deep_rcu(struct net_device *real_dev, | |
209 | __be16 vlan_proto, u16 vlan_id) | |
210 | { | |
211 | return NULL; | |
212 | } | |
213 | ||
214 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) | |
215 | { | |
216 | BUG(); | |
217 | return NULL; | |
218 | } | |
219 | ||
220 | static inline u16 vlan_dev_vlan_id(const struct net_device *dev) | |
221 | { | |
222 | BUG(); | |
223 | return 0; | |
224 | } | |
225 | ||
226 | static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) | |
227 | { | |
228 | BUG(); | |
229 | return 0; | |
230 | } | |
231 | ||
232 | static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, | |
233 | u32 skprio) | |
234 | { | |
235 | return 0; | |
236 | } | |
237 | ||
238 | static inline bool vlan_do_receive(struct sk_buff **skb) | |
239 | { | |
240 | return false; | |
241 | } | |
242 | ||
243 | static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) | |
244 | { | |
245 | return 0; | |
246 | } | |
247 | ||
248 | static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) | |
249 | { | |
250 | } | |
251 | ||
252 | static inline int vlan_vids_add_by_dev(struct net_device *dev, | |
253 | const struct net_device *by_dev) | |
254 | { | |
255 | return 0; | |
256 | } | |
257 | ||
258 | static inline void vlan_vids_del_by_dev(struct net_device *dev, | |
259 | const struct net_device *by_dev) | |
260 | { | |
261 | } | |
262 | ||
263 | static inline bool vlan_uses_dev(const struct net_device *dev) | |
264 | { | |
265 | return false; | |
266 | } | |
267 | static inline int vlan_get_encap_level(struct net_device *dev) | |
268 | { | |
269 | BUG(); | |
270 | return 0; | |
271 | } | |
272 | #endif | |
273 | ||
274 | static inline bool vlan_hw_offload_capable(netdev_features_t features, | |
275 | __be16 proto) | |
276 | { | |
277 | if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) | |
278 | return true; | |
279 | if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) | |
280 | return true; | |
281 | return false; | |
282 | } | |
283 | ||
284 | /** | |
285 | * __vlan_insert_tag - regular VLAN tag inserting | |
286 | * @skb: skbuff to tag | |
287 | * @vlan_proto: VLAN encapsulation protocol | |
288 | * @vlan_tci: VLAN TCI to insert | |
289 | * | |
290 | * Inserts the VLAN tag into @skb as part of the payload | |
291 | * Returns error if skb_cow_head failes. | |
292 | * | |
293 | * Does not change skb->protocol so this function can be used during receive. | |
294 | */ | |
295 | static inline int __vlan_insert_tag(struct sk_buff *skb, | |
296 | __be16 vlan_proto, u16 vlan_tci) | |
297 | { | |
298 | struct vlan_ethhdr *veth; | |
299 | ||
300 | if (skb_cow_head(skb, VLAN_HLEN) < 0) | |
301 | return -ENOMEM; | |
302 | ||
303 | veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); | |
304 | ||
305 | /* Move the mac addresses to the beginning of the new header. */ | |
306 | memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); | |
307 | skb->mac_header -= VLAN_HLEN; | |
308 | ||
309 | /* first, the ethernet type */ | |
310 | veth->h_vlan_proto = vlan_proto; | |
311 | ||
312 | /* now, the TCI */ | |
313 | veth->h_vlan_TCI = htons(vlan_tci); | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | /** | |
319 | * vlan_insert_tag - regular VLAN tag inserting | |
320 | * @skb: skbuff to tag | |
321 | * @vlan_proto: VLAN encapsulation protocol | |
322 | * @vlan_tci: VLAN TCI to insert | |
323 | * | |
324 | * Inserts the VLAN tag into @skb as part of the payload | |
325 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. | |
326 | * | |
327 | * Following the skb_unshare() example, in case of error, the calling function | |
328 | * doesn't have to worry about freeing the original skb. | |
329 | * | |
330 | * Does not change skb->protocol so this function can be used during receive. | |
331 | */ | |
332 | static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, | |
333 | __be16 vlan_proto, u16 vlan_tci) | |
334 | { | |
335 | int err; | |
336 | ||
337 | err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); | |
338 | if (err) { | |
339 | dev_kfree_skb_any(skb); | |
340 | return NULL; | |
341 | } | |
342 | return skb; | |
343 | } | |
344 | ||
345 | /** | |
346 | * vlan_insert_tag_set_proto - regular VLAN tag inserting | |
347 | * @skb: skbuff to tag | |
348 | * @vlan_proto: VLAN encapsulation protocol | |
349 | * @vlan_tci: VLAN TCI to insert | |
350 | * | |
351 | * Inserts the VLAN tag into @skb as part of the payload | |
352 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. | |
353 | * | |
354 | * Following the skb_unshare() example, in case of error, the calling function | |
355 | * doesn't have to worry about freeing the original skb. | |
356 | */ | |
357 | static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, | |
358 | __be16 vlan_proto, | |
359 | u16 vlan_tci) | |
360 | { | |
361 | skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); | |
362 | if (skb) | |
363 | skb->protocol = vlan_proto; | |
364 | return skb; | |
365 | } | |
366 | ||
367 | /* | |
368 | * __vlan_hwaccel_push_inside - pushes vlan tag to the payload | |
369 | * @skb: skbuff to tag | |
370 | * | |
371 | * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. | |
372 | * | |
373 | * Following the skb_unshare() example, in case of error, the calling function | |
374 | * doesn't have to worry about freeing the original skb. | |
375 | */ | |
376 | static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) | |
377 | { | |
378 | skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, | |
379 | skb_vlan_tag_get(skb)); | |
380 | if (likely(skb)) | |
381 | skb->vlan_tci = 0; | |
382 | return skb; | |
383 | } | |
384 | /* | |
385 | * vlan_hwaccel_push_inside - pushes vlan tag to the payload | |
386 | * @skb: skbuff to tag | |
387 | * | |
388 | * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the | |
389 | * VLAN tag from @skb->vlan_tci inside to the payload. | |
390 | * | |
391 | * Following the skb_unshare() example, in case of error, the calling function | |
392 | * doesn't have to worry about freeing the original skb. | |
393 | */ | |
394 | static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) | |
395 | { | |
396 | if (skb_vlan_tag_present(skb)) | |
397 | skb = __vlan_hwaccel_push_inside(skb); | |
398 | return skb; | |
399 | } | |
400 | ||
401 | /** | |
402 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting | |
403 | * @skb: skbuff to tag | |
404 | * @vlan_proto: VLAN encapsulation protocol | |
405 | * @vlan_tci: VLAN TCI to insert | |
406 | * | |
407 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest | |
408 | */ | |
409 | static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, | |
410 | __be16 vlan_proto, u16 vlan_tci) | |
411 | { | |
412 | skb->vlan_proto = vlan_proto; | |
413 | skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; | |
414 | } | |
415 | ||
416 | /** | |
417 | * __vlan_get_tag - get the VLAN ID that is part of the payload | |
418 | * @skb: skbuff to query | |
419 | * @vlan_tci: buffer to store value | |
420 | * | |
421 | * Returns error if the skb is not of VLAN type | |
422 | */ | |
423 | static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) | |
424 | { | |
425 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; | |
426 | ||
427 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) && | |
428 | veth->h_vlan_proto != htons(ETH_P_8021AD)) | |
429 | return -EINVAL; | |
430 | ||
431 | *vlan_tci = ntohs(veth->h_vlan_TCI); | |
432 | return 0; | |
433 | } | |
434 | ||
435 | /** | |
436 | * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] | |
437 | * @skb: skbuff to query | |
438 | * @vlan_tci: buffer to store value | |
439 | * | |
440 | * Returns error if @skb->vlan_tci is not set correctly | |
441 | */ | |
442 | static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, | |
443 | u16 *vlan_tci) | |
444 | { | |
445 | if (skb_vlan_tag_present(skb)) { | |
446 | *vlan_tci = skb_vlan_tag_get(skb); | |
447 | return 0; | |
448 | } else { | |
449 | *vlan_tci = 0; | |
450 | return -EINVAL; | |
451 | } | |
452 | } | |
453 | ||
454 | #define HAVE_VLAN_GET_TAG | |
455 | ||
456 | /** | |
457 | * vlan_get_tag - get the VLAN ID from the skb | |
458 | * @skb: skbuff to query | |
459 | * @vlan_tci: buffer to store value | |
460 | * | |
461 | * Returns error if the skb is not VLAN tagged | |
462 | */ | |
463 | static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) | |
464 | { | |
465 | if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { | |
466 | return __vlan_hwaccel_get_tag(skb, vlan_tci); | |
467 | } else { | |
468 | return __vlan_get_tag(skb, vlan_tci); | |
469 | } | |
470 | } | |
471 | ||
472 | /** | |
473 | * vlan_get_protocol - get protocol EtherType. | |
474 | * @skb: skbuff to query | |
475 | * @type: first vlan protocol | |
476 | * @depth: buffer to store length of eth and vlan tags in bytes | |
477 | * | |
478 | * Returns the EtherType of the packet, regardless of whether it is | |
479 | * vlan encapsulated (normal or hardware accelerated) or not. | |
480 | */ | |
481 | static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, | |
482 | int *depth) | |
483 | { | |
484 | unsigned int vlan_depth = skb->mac_len; | |
485 | ||
486 | /* if type is 802.1Q/AD then the header should already be | |
487 | * present at mac_len - VLAN_HLEN (if mac_len > 0), or at | |
488 | * ETH_HLEN otherwise | |
489 | */ | |
490 | if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { | |
491 | if (vlan_depth) { | |
492 | if (WARN_ON(vlan_depth < VLAN_HLEN)) | |
493 | return 0; | |
494 | vlan_depth -= VLAN_HLEN; | |
495 | } else { | |
496 | vlan_depth = ETH_HLEN; | |
497 | } | |
498 | do { | |
499 | struct vlan_hdr *vh; | |
500 | ||
501 | if (unlikely(!pskb_may_pull(skb, | |
502 | vlan_depth + VLAN_HLEN))) | |
503 | return 0; | |
504 | ||
505 | vh = (struct vlan_hdr *)(skb->data + vlan_depth); | |
506 | type = vh->h_vlan_encapsulated_proto; | |
507 | vlan_depth += VLAN_HLEN; | |
508 | } while (type == htons(ETH_P_8021Q) || | |
509 | type == htons(ETH_P_8021AD)); | |
510 | } | |
511 | ||
512 | if (depth) | |
513 | *depth = vlan_depth; | |
514 | ||
515 | return type; | |
516 | } | |
517 | ||
518 | /** | |
519 | * vlan_get_protocol - get protocol EtherType. | |
520 | * @skb: skbuff to query | |
521 | * | |
522 | * Returns the EtherType of the packet, regardless of whether it is | |
523 | * vlan encapsulated (normal or hardware accelerated) or not. | |
524 | */ | |
525 | static inline __be16 vlan_get_protocol(struct sk_buff *skb) | |
526 | { | |
527 | return __vlan_get_protocol(skb, skb->protocol, NULL); | |
528 | } | |
529 | ||
530 | static inline void vlan_set_encap_proto(struct sk_buff *skb, | |
531 | struct vlan_hdr *vhdr) | |
532 | { | |
533 | __be16 proto; | |
534 | unsigned short *rawp; | |
535 | ||
536 | /* | |
537 | * Was a VLAN packet, grab the encapsulated protocol, which the layer | |
538 | * three protocols care about. | |
539 | */ | |
540 | ||
541 | proto = vhdr->h_vlan_encapsulated_proto; | |
542 | if (eth_proto_is_802_3(proto)) { | |
543 | skb->protocol = proto; | |
544 | return; | |
545 | } | |
546 | ||
547 | rawp = (unsigned short *)(vhdr + 1); | |
548 | if (*rawp == 0xFFFF) | |
549 | /* | |
550 | * This is a magic hack to spot IPX packets. Older Novell | |
551 | * breaks the protocol design and runs IPX over 802.3 without | |
552 | * an 802.2 LLC layer. We look for FFFF which isn't a used | |
553 | * 802.2 SSAP/DSAP. This won't work for fault tolerant netware | |
554 | * but does for the rest. | |
555 | */ | |
556 | skb->protocol = htons(ETH_P_802_3); | |
557 | else | |
558 | /* | |
559 | * Real 802.2 LLC | |
560 | */ | |
561 | skb->protocol = htons(ETH_P_802_2); | |
562 | } | |
563 | ||
564 | /** | |
565 | * skb_vlan_tagged - check if skb is vlan tagged. | |
566 | * @skb: skbuff to query | |
567 | * | |
568 | * Returns true if the skb is tagged, regardless of whether it is hardware | |
569 | * accelerated or not. | |
570 | */ | |
571 | static inline bool skb_vlan_tagged(const struct sk_buff *skb) | |
572 | { | |
573 | if (!skb_vlan_tag_present(skb) && | |
574 | likely(skb->protocol != htons(ETH_P_8021Q) && | |
575 | skb->protocol != htons(ETH_P_8021AD))) | |
576 | return false; | |
577 | ||
578 | return true; | |
579 | } | |
580 | ||
581 | /** | |
582 | * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. | |
583 | * @skb: skbuff to query | |
584 | * | |
585 | * Returns true if the skb is tagged with multiple vlan headers, regardless | |
586 | * of whether it is hardware accelerated or not. | |
587 | */ | |
588 | static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) | |
589 | { | |
590 | __be16 protocol = skb->protocol; | |
591 | ||
592 | if (!skb_vlan_tag_present(skb)) { | |
593 | struct vlan_ethhdr *veh; | |
594 | ||
595 | if (likely(protocol != htons(ETH_P_8021Q) && | |
596 | protocol != htons(ETH_P_8021AD))) | |
597 | return false; | |
598 | ||
599 | veh = (struct vlan_ethhdr *)skb->data; | |
600 | protocol = veh->h_vlan_encapsulated_proto; | |
601 | } | |
602 | ||
603 | if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) | |
604 | return false; | |
605 | ||
606 | return true; | |
607 | } | |
608 | ||
609 | /** | |
610 | * vlan_features_check - drop unsafe features for skb with multiple tags. | |
611 | * @skb: skbuff to query | |
612 | * @features: features to be checked | |
613 | * | |
614 | * Returns features without unsafe ones if the skb has multiple tags. | |
615 | */ | |
616 | static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, | |
617 | netdev_features_t features) | |
618 | { | |
619 | if (skb_vlan_tagged_multi(skb)) | |
620 | features = netdev_intersect_features(features, | |
621 | NETIF_F_SG | | |
622 | NETIF_F_HIGHDMA | | |
623 | NETIF_F_FRAGLIST | | |
624 | NETIF_F_HW_CSUM | | |
625 | NETIF_F_HW_VLAN_CTAG_TX | | |
626 | NETIF_F_HW_VLAN_STAG_TX); | |
627 | ||
628 | return features; | |
629 | } | |
630 | ||
631 | /** | |
632 | * compare_vlan_header - Compare two vlan headers | |
633 | * @h1: Pointer to vlan header | |
634 | * @h2: Pointer to vlan header | |
635 | * | |
636 | * Compare two vlan headers, returns 0 if equal. | |
637 | * | |
638 | * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. | |
639 | */ | |
640 | static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1, | |
641 | const struct vlan_hdr *h2) | |
642 | { | |
643 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | |
644 | return *(u32 *)h1 ^ *(u32 *)h2; | |
645 | #else | |
646 | return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | | |
647 | ((__force u32)h1->h_vlan_encapsulated_proto ^ | |
648 | (__force u32)h2->h_vlan_encapsulated_proto); | |
649 | #endif | |
650 | } | |
651 | #endif /* !(_LINUX_IF_VLAN_H_) */ |