1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * xfrm_device.c - IPsec device offloading code.
5 * Copyright (c) 2015 secunet Security Networks AG
8 * Steffen Klassert <steffen.klassert@secunet.com>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
19 #include <linux/notifier.h>
21 #ifdef CONFIG_XFRM_OFFLOAD
22 static void __xfrm_transport_prep(struct xfrm_state
*x
, struct sk_buff
*skb
,
25 struct xfrm_offload
*xo
= xfrm_offload(skb
);
27 skb_reset_mac_len(skb
);
28 if (xo
->flags
& XFRM_GSO_SEGMENT
)
29 skb
->transport_header
-= x
->props
.header_len
;
31 pskb_pull(skb
, skb_transport_offset(skb
) + x
->props
.header_len
);
34 static void __xfrm_mode_tunnel_prep(struct xfrm_state
*x
, struct sk_buff
*skb
,
38 struct xfrm_offload
*xo
= xfrm_offload(skb
);
40 if (xo
->flags
& XFRM_GSO_SEGMENT
)
41 skb
->transport_header
= skb
->network_header
+ hsize
;
43 skb_reset_mac_len(skb
);
44 pskb_pull(skb
, skb
->mac_len
+ x
->props
.header_len
);
47 static void __xfrm_mode_beet_prep(struct xfrm_state
*x
, struct sk_buff
*skb
,
50 struct xfrm_offload
*xo
= xfrm_offload(skb
);
53 if (xo
->flags
& XFRM_GSO_SEGMENT
)
54 skb
->transport_header
= skb
->network_header
+ hsize
;
56 skb_reset_mac_len(skb
);
57 if (x
->sel
.family
!= AF_INET6
) {
58 phlen
= IPV4_BEET_PHMAXLEN
;
59 if (x
->outer_mode
.family
== AF_INET6
)
60 phlen
+= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
63 pskb_pull(skb
, skb
->mac_len
+ hsize
+ (x
->props
.header_len
- phlen
));
66 /* Adjust pointers into the packet when IPsec is done at layer2 */
67 static void xfrm_outer_mode_prep(struct xfrm_state
*x
, struct sk_buff
*skb
)
69 switch (x
->outer_mode
.encap
) {
70 case XFRM_MODE_TUNNEL
:
71 if (x
->outer_mode
.family
== AF_INET
)
72 return __xfrm_mode_tunnel_prep(x
, skb
,
73 sizeof(struct iphdr
));
74 if (x
->outer_mode
.family
== AF_INET6
)
75 return __xfrm_mode_tunnel_prep(x
, skb
,
76 sizeof(struct ipv6hdr
));
78 case XFRM_MODE_TRANSPORT
:
79 if (x
->outer_mode
.family
== AF_INET
)
80 return __xfrm_transport_prep(x
, skb
,
81 sizeof(struct iphdr
));
82 if (x
->outer_mode
.family
== AF_INET6
)
83 return __xfrm_transport_prep(x
, skb
,
84 sizeof(struct ipv6hdr
));
87 if (x
->outer_mode
.family
== AF_INET
)
88 return __xfrm_mode_beet_prep(x
, skb
,
89 sizeof(struct iphdr
));
90 if (x
->outer_mode
.family
== AF_INET6
)
91 return __xfrm_mode_beet_prep(x
, skb
,
92 sizeof(struct ipv6hdr
));
94 case XFRM_MODE_ROUTEOPTIMIZATION
:
95 case XFRM_MODE_IN_TRIGGER
:
100 struct sk_buff
*validate_xmit_xfrm(struct sk_buff
*skb
, netdev_features_t features
, bool *again
)
104 struct xfrm_state
*x
;
105 struct softnet_data
*sd
;
106 struct sk_buff
*skb2
, *nskb
, *pskb
= NULL
;
107 netdev_features_t esp_features
= features
;
108 struct xfrm_offload
*xo
= xfrm_offload(skb
);
109 struct net_device
*dev
= skb
->dev
;
112 if (!xo
|| (xo
->flags
& XFRM_XMIT
))
115 if (!(features
& NETIF_F_HW_ESP
))
116 esp_features
= features
& ~(NETIF_F_SG
| NETIF_F_CSUM_MASK
);
118 sp
= skb_sec_path(skb
);
119 x
= sp
->xvec
[sp
->len
- 1];
120 if (xo
->flags
& XFRM_GRO
|| x
->xso
.flags
& XFRM_OFFLOAD_INBOUND
)
123 /* This skb was already validated on the upper/virtual dev */
124 if ((x
->xso
.dev
!= dev
) && (x
->xso
.real_dev
== dev
))
127 local_irq_save(flags
);
128 sd
= this_cpu_ptr(&softnet_data
);
129 err
= !skb_queue_empty(&sd
->xfrm_backlog
);
130 local_irq_restore(flags
);
137 if (skb_is_gso(skb
) && unlikely(x
->xso
.dev
!= dev
)) {
138 struct sk_buff
*segs
;
140 /* Packet got rerouted, fixup features and segment it. */
141 esp_features
= esp_features
& ~(NETIF_F_HW_ESP
| NETIF_F_GSO_ESP
);
143 segs
= skb_gso_segment(skb
, esp_features
);
146 atomic_long_inc(&dev
->tx_dropped
);
155 esp_features
|= skb
->dev
->gso_partial_features
;
156 xfrm_outer_mode_prep(x
, skb
);
158 xo
->flags
|= XFRM_DEV_RESUME
;
160 err
= x
->type_offload
->xmit(x
, skb
, esp_features
);
162 if (err
== -EINPROGRESS
)
165 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
170 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
175 skb_list_walk_safe(skb
, skb2
, nskb
) {
176 esp_features
|= skb
->dev
->gso_partial_features
;
177 skb_mark_not_on_list(skb2
);
179 xo
= xfrm_offload(skb2
);
180 xo
->flags
|= XFRM_DEV_RESUME
;
182 xfrm_outer_mode_prep(x
, skb2
);
184 err
= x
->type_offload
->xmit(x
, skb2
, esp_features
);
187 } else if (err
!= -EINPROGRESS
) {
188 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
190 kfree_skb_list(skb2
);
201 skb_push(skb2
, skb2
->data
- skb_mac_header(skb2
));
207 EXPORT_SYMBOL_GPL(validate_xmit_xfrm
);
209 int xfrm_dev_state_add(struct net
*net
, struct xfrm_state
*x
,
210 struct xfrm_user_offload
*xuo
)
213 struct dst_entry
*dst
;
214 struct net_device
*dev
;
215 struct xfrm_state_offload
*xso
= &x
->xso
;
216 xfrm_address_t
*saddr
;
217 xfrm_address_t
*daddr
;
219 if (!x
->type_offload
)
222 /* We don't yet support UDP encapsulation and TFC padding. */
223 if (x
->encap
|| x
->tfcpad
)
226 dev
= dev_get_by_index(net
, xuo
->ifindex
);
228 if (!(xuo
->flags
& XFRM_OFFLOAD_INBOUND
)) {
229 saddr
= &x
->props
.saddr
;
230 daddr
= &x
->id
.daddr
;
232 saddr
= &x
->id
.daddr
;
233 daddr
= &x
->props
.saddr
;
236 dst
= __xfrm_dst_lookup(net
, 0, 0, saddr
, daddr
,
238 xfrm_smark_get(0, x
));
248 if (!dev
->xfrmdev_ops
|| !dev
->xfrmdev_ops
->xdo_dev_state_add
) {
254 if (x
->props
.flags
& XFRM_STATE_ESN
&&
255 !dev
->xfrmdev_ops
->xdo_dev_state_advance_esn
) {
263 xso
->num_exthdrs
= 1;
264 xso
->flags
= xuo
->flags
;
266 err
= dev
->xfrmdev_ops
->xdo_dev_state_add(x
);
268 xso
->num_exthdrs
= 0;
273 if (err
!= -EOPNOTSUPP
)
279 EXPORT_SYMBOL_GPL(xfrm_dev_state_add
);
281 bool xfrm_dev_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*x
)
284 struct dst_entry
*dst
= skb_dst(skb
);
285 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
286 struct net_device
*dev
= x
->xso
.dev
;
288 if (!x
->type_offload
|| x
->encap
)
291 if ((!dev
|| (dev
== xfrm_dst_path(dst
)->dev
)) &&
292 (!xdst
->child
->xfrm
)) {
293 mtu
= xfrm_state_mtu(x
, xdst
->child_mtu_cached
);
297 if (skb_is_gso(skb
) && skb_gso_validate_network_len(skb
, mtu
))
304 if (dev
&& dev
->xfrmdev_ops
&& dev
->xfrmdev_ops
->xdo_dev_offload_ok
)
305 return x
->xso
.dev
->xfrmdev_ops
->xdo_dev_offload_ok(skb
, x
);
309 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok
);
311 void xfrm_dev_resume(struct sk_buff
*skb
)
313 struct net_device
*dev
= skb
->dev
;
314 int ret
= NETDEV_TX_BUSY
;
315 struct netdev_queue
*txq
;
316 struct softnet_data
*sd
;
320 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
322 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
323 if (!netif_xmit_frozen_or_stopped(txq
))
324 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &ret
);
325 HARD_TX_UNLOCK(dev
, txq
);
327 if (!dev_xmit_complete(ret
)) {
328 local_irq_save(flags
);
329 sd
= this_cpu_ptr(&softnet_data
);
330 skb_queue_tail(&sd
->xfrm_backlog
, skb
);
331 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
332 local_irq_restore(flags
);
336 EXPORT_SYMBOL_GPL(xfrm_dev_resume
);
338 void xfrm_dev_backlog(struct softnet_data
*sd
)
340 struct sk_buff_head
*xfrm_backlog
= &sd
->xfrm_backlog
;
341 struct sk_buff_head list
;
344 if (skb_queue_empty(xfrm_backlog
))
347 __skb_queue_head_init(&list
);
349 spin_lock(&xfrm_backlog
->lock
);
350 skb_queue_splice_init(xfrm_backlog
, &list
);
351 spin_unlock(&xfrm_backlog
->lock
);
353 while (!skb_queue_empty(&list
)) {
354 skb
= __skb_dequeue(&list
);
355 xfrm_dev_resume(skb
);
361 static int xfrm_api_check(struct net_device
*dev
)
363 #ifdef CONFIG_XFRM_OFFLOAD
364 if ((dev
->features
& NETIF_F_HW_ESP_TX_CSUM
) &&
365 !(dev
->features
& NETIF_F_HW_ESP
))
368 if ((dev
->features
& NETIF_F_HW_ESP
) &&
369 (!(dev
->xfrmdev_ops
&&
370 dev
->xfrmdev_ops
->xdo_dev_state_add
&&
371 dev
->xfrmdev_ops
->xdo_dev_state_delete
)))
374 if (dev
->features
& (NETIF_F_HW_ESP
| NETIF_F_HW_ESP_TX_CSUM
))
381 static int xfrm_dev_register(struct net_device
*dev
)
383 return xfrm_api_check(dev
);
386 static int xfrm_dev_feat_change(struct net_device
*dev
)
388 return xfrm_api_check(dev
);
391 static int xfrm_dev_down(struct net_device
*dev
)
393 if (dev
->features
& NETIF_F_HW_ESP
)
394 xfrm_dev_state_flush(dev_net(dev
), dev
, true);
399 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
401 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
404 case NETDEV_REGISTER
:
405 return xfrm_dev_register(dev
);
407 case NETDEV_FEAT_CHANGE
:
408 return xfrm_dev_feat_change(dev
);
411 case NETDEV_UNREGISTER
:
412 return xfrm_dev_down(dev
);
417 static struct notifier_block xfrm_dev_notifier
= {
418 .notifier_call
= xfrm_dev_event
,
421 void __init
xfrm_dev_init(void)
423 register_netdevice_notifier(&xfrm_dev_notifier
);