]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/net/xfrm.h
7640822bc5154c9e15b6339ae3fe5893b9e4d9bf
[mirror_ubuntu-artful-kernel.git] / include / net / xfrm.h
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15 #include <linux/slab.h>
16
17 #include <net/sock.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/route.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_fib.h>
23 #include <net/flow.h>
24
25 #include <linux/interrupt.h>
26
27 #ifdef CONFIG_XFRM_STATISTICS
28 #include <net/snmp.h>
29 #endif
30
31 #define XFRM_PROTO_ESP 50
32 #define XFRM_PROTO_AH 51
33 #define XFRM_PROTO_COMP 108
34 #define XFRM_PROTO_IPIP 4
35 #define XFRM_PROTO_IPV6 41
36 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
37 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
38
39 #define XFRM_ALIGN4(len) (((len) + 3) & ~3)
40 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
41 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
42 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
43 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
44 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
45
46 #ifdef CONFIG_XFRM_STATISTICS
47 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
48 #define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
49 #define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
50 #else
51 #define XFRM_INC_STATS(net, field) ((void)(net))
52 #define XFRM_INC_STATS_BH(net, field) ((void)(net))
53 #define XFRM_INC_STATS_USER(net, field) ((void)(net))
54 #endif
55
56 extern struct mutex xfrm_cfg_mutex;
57
58 /* Organization of SPD aka "XFRM rules"
59 ------------------------------------
60
61 Basic objects:
62 - policy rule, struct xfrm_policy (=SPD entry)
63 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
64 - instance of a transformer, struct xfrm_state (=SA)
65 - template to clone xfrm_state, struct xfrm_tmpl
66
67 SPD is plain linear list of xfrm_policy rules, ordered by priority.
68 (To be compatible with existing pfkeyv2 implementations,
69 many rules with priority of 0x7fffffff are allowed to exist and
70 such rules are ordered in an unpredictable way, thanks to bsd folks.)
71
72 Lookup is plain linear search until the first match with selector.
73
74 If "action" is "block", then we prohibit the flow, otherwise:
75 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
76 policy entry has list of up to XFRM_MAX_DEPTH transformations,
77 described by templates xfrm_tmpl. Each template is resolved
78 to a complete xfrm_state (see below) and we pack bundle of transformations
79 to a dst_entry returned to requestor.
80
81 dst -. xfrm .-> xfrm_state #1
82 |---. child .-> dst -. xfrm .-> xfrm_state #2
83 |---. child .-> dst -. xfrm .-> xfrm_state #3
84 |---. child .-> NULL
85
86 Bundles are cached at xrfm_policy struct (field ->bundles).
87
88
89 Resolution of xrfm_tmpl
90 -----------------------
91 Template contains:
92 1. ->mode Mode: transport or tunnel
93 2. ->id.proto Protocol: AH/ESP/IPCOMP
94 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
95 Q: allow to resolve security gateway?
96 4. ->id.spi If not zero, static SPI.
97 5. ->saddr Local tunnel endpoint, ignored for transport mode.
98 6. ->algos List of allowed algos. Plain bitmask now.
99 Q: ealgos, aalgos, calgos. What a mess...
100 7. ->share Sharing mode.
101 Q: how to implement private sharing mode? To add struct sock* to
102 flow id?
103
104 Having this template we search through SAD searching for entries
105 with appropriate mode/proto/algo, permitted by selector.
106 If no appropriate entry found, it is requested from key manager.
107
108 PROBLEMS:
109 Q: How to find all the bundles referring to a physical path for
110 PMTU discovery? Seems, dst should contain list of all parents...
111 and enter to infinite locking hierarchy disaster.
112 No! It is easier, we will not search for them, let them find us.
113 We add genid to each dst plus pointer to genid of raw IP route,
114 pmtu disc will update pmtu on raw IP route and increase its genid.
115 dst_check() will see this for top level and trigger resyncing
116 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
117 */
118
119 struct xfrm_state_walk {
120 struct list_head all;
121 u8 state;
122 union {
123 u8 dying;
124 u8 proto;
125 };
126 u32 seq;
127 };
128
129 /* Full description of state of transformer. */
130 struct xfrm_state {
131 #ifdef CONFIG_NET_NS
132 struct net *xs_net;
133 #endif
134 union {
135 struct hlist_node gclist;
136 struct hlist_node bydst;
137 };
138 struct hlist_node bysrc;
139 struct hlist_node byspi;
140
141 atomic_t refcnt;
142 spinlock_t lock;
143
144 struct xfrm_id id;
145 struct xfrm_selector sel;
146 struct xfrm_mark mark;
147 u32 tfcpad;
148
149 u32 genid;
150
151 /* Key manager bits */
152 struct xfrm_state_walk km;
153
154 /* Parameters of this state. */
155 struct {
156 u32 reqid;
157 u8 mode;
158 u8 replay_window;
159 u8 aalgo, ealgo, calgo;
160 u8 flags;
161 u16 family;
162 xfrm_address_t saddr;
163 int header_len;
164 int trailer_len;
165 } props;
166
167 struct xfrm_lifetime_cfg lft;
168
169 /* Data for transformer */
170 struct xfrm_algo_auth *aalg;
171 struct xfrm_algo *ealg;
172 struct xfrm_algo *calg;
173 struct xfrm_algo_aead *aead;
174
175 /* Data for encapsulator */
176 struct xfrm_encap_tmpl *encap;
177
178 /* Data for care-of address */
179 xfrm_address_t *coaddr;
180
181 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
182 struct xfrm_state *tunnel;
183
184 /* If a tunnel, number of users + 1 */
185 atomic_t tunnel_users;
186
187 /* State for replay detection */
188 struct xfrm_replay_state replay;
189 struct xfrm_replay_state_esn *replay_esn;
190
191 /* Replay detection state at the time we sent the last notification */
192 struct xfrm_replay_state preplay;
193 struct xfrm_replay_state_esn *preplay_esn;
194
195 /* internal flag that only holds state for delayed aevent at the
196 * moment
197 */
198 u32 xflags;
199
200 /* Replay detection notification settings */
201 u32 replay_maxage;
202 u32 replay_maxdiff;
203
204 /* Replay detection notification timer */
205 struct timer_list rtimer;
206
207 /* Statistics */
208 struct xfrm_stats stats;
209
210 struct xfrm_lifetime_cur curlft;
211 struct tasklet_hrtimer mtimer;
212
213 /* Last used time */
214 unsigned long lastused;
215
216 /* Reference to data common to all the instances of this
217 * transformer. */
218 const struct xfrm_type *type;
219 struct xfrm_mode *inner_mode;
220 struct xfrm_mode *inner_mode_iaf;
221 struct xfrm_mode *outer_mode;
222
223 /* Security context */
224 struct xfrm_sec_ctx *security;
225
226 /* Private data of this transformer, format is opaque,
227 * interpreted by xfrm_type methods. */
228 void *data;
229 };
230
231 static inline struct net *xs_net(struct xfrm_state *x)
232 {
233 return read_pnet(&x->xs_net);
234 }
235
236 /* xflags - make enum if more show up */
237 #define XFRM_TIME_DEFER 1
238
239 enum {
240 XFRM_STATE_VOID,
241 XFRM_STATE_ACQ,
242 XFRM_STATE_VALID,
243 XFRM_STATE_ERROR,
244 XFRM_STATE_EXPIRED,
245 XFRM_STATE_DEAD
246 };
247
248 /* callback structure passed from either netlink or pfkey */
249 struct km_event {
250 union {
251 u32 hard;
252 u32 proto;
253 u32 byid;
254 u32 aevent;
255 u32 type;
256 } data;
257
258 u32 seq;
259 u32 pid;
260 u32 event;
261 struct net *net;
262 };
263
264 struct net_device;
265 struct xfrm_type;
266 struct xfrm_dst;
267 struct xfrm_policy_afinfo {
268 unsigned short family;
269 struct dst_ops *dst_ops;
270 void (*garbage_collect)(struct net *net);
271 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
272 const xfrm_address_t *saddr,
273 const xfrm_address_t *daddr);
274 int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
275 void (*decode_session)(struct sk_buff *skb,
276 struct flowi *fl,
277 int reverse);
278 int (*get_tos)(const struct flowi *fl);
279 int (*init_path)(struct xfrm_dst *path,
280 struct dst_entry *dst,
281 int nfheader_len);
282 int (*fill_dst)(struct xfrm_dst *xdst,
283 struct net_device *dev,
284 const struct flowi *fl);
285 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
286 };
287
288 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
289 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
290 extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
291 extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
292
293 struct xfrm_tmpl;
294 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
295 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
296 extern int __xfrm_state_delete(struct xfrm_state *x);
297
298 struct xfrm_state_afinfo {
299 unsigned int family;
300 unsigned int proto;
301 __be16 eth_proto;
302 struct module *owner;
303 const struct xfrm_type *type_map[IPPROTO_MAX];
304 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
305 int (*init_flags)(struct xfrm_state *x);
306 void (*init_tempsel)(struct xfrm_selector *sel,
307 const struct flowi *fl);
308 void (*init_temprop)(struct xfrm_state *x,
309 const struct xfrm_tmpl *tmpl,
310 const xfrm_address_t *daddr,
311 const xfrm_address_t *saddr);
312 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
313 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
314 int (*output)(struct sk_buff *skb);
315 int (*extract_input)(struct xfrm_state *x,
316 struct sk_buff *skb);
317 int (*extract_output)(struct xfrm_state *x,
318 struct sk_buff *skb);
319 int (*transport_finish)(struct sk_buff *skb,
320 int async);
321 };
322
323 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
324 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
325
326 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
327
328 struct xfrm_type {
329 char *description;
330 struct module *owner;
331 u8 proto;
332 u8 flags;
333 #define XFRM_TYPE_NON_FRAGMENT 1
334 #define XFRM_TYPE_REPLAY_PROT 2
335 #define XFRM_TYPE_LOCAL_COADDR 4
336 #define XFRM_TYPE_REMOTE_COADDR 8
337
338 int (*init_state)(struct xfrm_state *x);
339 void (*destructor)(struct xfrm_state *);
340 int (*input)(struct xfrm_state *, struct sk_buff *skb);
341 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
342 int (*reject)(struct xfrm_state *, struct sk_buff *,
343 const struct flowi *);
344 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
345 /* Estimate maximal size of result of transformation of a dgram */
346 u32 (*get_mtu)(struct xfrm_state *, int size);
347 };
348
349 extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
350 extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
351
352 struct xfrm_mode {
353 /*
354 * Remove encapsulation header.
355 *
356 * The IP header will be moved over the top of the encapsulation
357 * header.
358 *
359 * On entry, the transport header shall point to where the IP header
360 * should be and the network header shall be set to where the IP
361 * header currently is. skb->data shall point to the start of the
362 * payload.
363 */
364 int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
365
366 /*
367 * This is the actual input entry point.
368 *
369 * For transport mode and equivalent this would be identical to
370 * input2 (which does not need to be set). While tunnel mode
371 * and equivalent would set this to the tunnel encapsulation function
372 * xfrm4_prepare_input that would in turn call input2.
373 */
374 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
375
376 /*
377 * Add encapsulation header.
378 *
379 * On exit, the transport header will be set to the start of the
380 * encapsulation header to be filled in by x->type->output and
381 * the mac header will be set to the nextheader (protocol for
382 * IPv4) field of the extension header directly preceding the
383 * encapsulation header, or in its absence, that of the top IP
384 * header. The value of the network header will always point
385 * to the top IP header while skb->data will point to the payload.
386 */
387 int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
388
389 /*
390 * This is the actual output entry point.
391 *
392 * For transport mode and equivalent this would be identical to
393 * output2 (which does not need to be set). While tunnel mode
394 * and equivalent would set this to a tunnel encapsulation function
395 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
396 * call output2.
397 */
398 int (*output)(struct xfrm_state *x, struct sk_buff *skb);
399
400 struct xfrm_state_afinfo *afinfo;
401 struct module *owner;
402 unsigned int encap;
403 int flags;
404 };
405
406 /* Flags for xfrm_mode. */
407 enum {
408 XFRM_MODE_FLAG_TUNNEL = 1,
409 };
410
411 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
412 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
413
414 static inline int xfrm_af2proto(unsigned int family)
415 {
416 switch(family) {
417 case AF_INET:
418 return IPPROTO_IPIP;
419 case AF_INET6:
420 return IPPROTO_IPV6;
421 default:
422 return 0;
423 }
424 }
425
426 static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
427 {
428 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
429 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
430 return x->inner_mode;
431 else
432 return x->inner_mode_iaf;
433 }
434
435 struct xfrm_tmpl {
436 /* id in template is interpreted as:
437 * daddr - destination of tunnel, may be zero for transport mode.
438 * spi - zero to acquire spi. Not zero if spi is static, then
439 * daddr must be fixed too.
440 * proto - AH/ESP/IPCOMP
441 */
442 struct xfrm_id id;
443
444 /* Source address of tunnel. Ignored, if it is not a tunnel. */
445 xfrm_address_t saddr;
446
447 unsigned short encap_family;
448
449 u32 reqid;
450
451 /* Mode: transport, tunnel etc. */
452 u8 mode;
453
454 /* Sharing mode: unique, this session only, this user only etc. */
455 u8 share;
456
457 /* May skip this transfomration if no SA is found */
458 u8 optional;
459
460 /* Skip aalgos/ealgos/calgos checks. */
461 u8 allalgs;
462
463 /* Bit mask of algos allowed for acquisition */
464 u32 aalgos;
465 u32 ealgos;
466 u32 calgos;
467 };
468
469 #define XFRM_MAX_DEPTH 6
470
471 struct xfrm_policy_walk_entry {
472 struct list_head all;
473 u8 dead;
474 };
475
476 struct xfrm_policy_walk {
477 struct xfrm_policy_walk_entry walk;
478 u8 type;
479 u32 seq;
480 };
481
482 struct xfrm_policy {
483 #ifdef CONFIG_NET_NS
484 struct net *xp_net;
485 #endif
486 struct hlist_node bydst;
487 struct hlist_node byidx;
488
489 /* This lock only affects elements except for entry. */
490 rwlock_t lock;
491 atomic_t refcnt;
492 struct timer_list timer;
493
494 struct flow_cache_object flo;
495 atomic_t genid;
496 u32 priority;
497 u32 index;
498 struct xfrm_mark mark;
499 struct xfrm_selector selector;
500 struct xfrm_lifetime_cfg lft;
501 struct xfrm_lifetime_cur curlft;
502 struct xfrm_policy_walk_entry walk;
503 u8 type;
504 u8 action;
505 u8 flags;
506 u8 xfrm_nr;
507 u16 family;
508 struct xfrm_sec_ctx *security;
509 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
510 };
511
512 static inline struct net *xp_net(const struct xfrm_policy *xp)
513 {
514 return read_pnet(&xp->xp_net);
515 }
516
517 struct xfrm_kmaddress {
518 xfrm_address_t local;
519 xfrm_address_t remote;
520 u32 reserved;
521 u16 family;
522 };
523
524 struct xfrm_migrate {
525 xfrm_address_t old_daddr;
526 xfrm_address_t old_saddr;
527 xfrm_address_t new_daddr;
528 xfrm_address_t new_saddr;
529 u8 proto;
530 u8 mode;
531 u16 reserved;
532 u32 reqid;
533 u16 old_family;
534 u16 new_family;
535 };
536
537 #define XFRM_KM_TIMEOUT 30
538 /* which seqno */
539 #define XFRM_REPLAY_SEQ 1
540 #define XFRM_REPLAY_OSEQ 2
541 #define XFRM_REPLAY_SEQ_MASK 3
542 /* what happened */
543 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
544 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
545
546 /* default aevent timeout in units of 100ms */
547 #define XFRM_AE_ETIME 10
548 /* Async Event timer multiplier */
549 #define XFRM_AE_ETH_M 10
550 /* default seq threshold size */
551 #define XFRM_AE_SEQT_SIZE 2
552
553 struct xfrm_mgr {
554 struct list_head list;
555 char *id;
556 int (*notify)(struct xfrm_state *x, const struct km_event *c);
557 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
558 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
559 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
560 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
561 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
562 int (*migrate)(const struct xfrm_selector *sel,
563 u8 dir, u8 type,
564 const struct xfrm_migrate *m,
565 int num_bundles,
566 const struct xfrm_kmaddress *k);
567 };
568
569 extern int xfrm_register_km(struct xfrm_mgr *km);
570 extern int xfrm_unregister_km(struct xfrm_mgr *km);
571
572 /*
573 * This structure is used for the duration where packets are being
574 * transformed by IPsec. As soon as the packet leaves IPsec the
575 * area beyond the generic IP part may be overwritten.
576 */
577 struct xfrm_skb_cb {
578 union {
579 struct inet_skb_parm h4;
580 struct inet6_skb_parm h6;
581 } header;
582
583 /* Sequence number for replay protection. */
584 union {
585 u64 output;
586 __be32 input;
587 } seq;
588 };
589
590 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
591
592 /*
593 * This structure is used by the afinfo prepare_input/prepare_output functions
594 * to transmit header information to the mode input/output functions.
595 */
596 struct xfrm_mode_skb_cb {
597 union {
598 struct inet_skb_parm h4;
599 struct inet6_skb_parm h6;
600 } header;
601
602 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
603 __be16 id;
604 __be16 frag_off;
605
606 /* IP header length (excluding options or extension headers). */
607 u8 ihl;
608
609 /* TOS for IPv4, class for IPv6. */
610 u8 tos;
611
612 /* TTL for IPv4, hop limitfor IPv6. */
613 u8 ttl;
614
615 /* Protocol for IPv4, NH for IPv6. */
616 u8 protocol;
617
618 /* Option length for IPv4, zero for IPv6. */
619 u8 optlen;
620
621 /* Used by IPv6 only, zero for IPv4. */
622 u8 flow_lbl[3];
623 };
624
625 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
626
627 /*
628 * This structure is used by the input processing to locate the SPI and
629 * related information.
630 */
631 struct xfrm_spi_skb_cb {
632 union {
633 struct inet_skb_parm h4;
634 struct inet6_skb_parm h6;
635 } header;
636
637 unsigned int daddroff;
638 unsigned int family;
639 };
640
641 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
642
643 /* Audit Information */
644 struct xfrm_audit {
645 u32 secid;
646 uid_t loginuid;
647 u32 sessionid;
648 };
649
650 #ifdef CONFIG_AUDITSYSCALL
651 static inline struct audit_buffer *xfrm_audit_start(const char *op)
652 {
653 struct audit_buffer *audit_buf = NULL;
654
655 if (audit_enabled == 0)
656 return NULL;
657 audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
658 AUDIT_MAC_IPSEC_EVENT);
659 if (audit_buf == NULL)
660 return NULL;
661 audit_log_format(audit_buf, "op=%s", op);
662 return audit_buf;
663 }
664
665 static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
666 struct audit_buffer *audit_buf)
667 {
668 char *secctx;
669 u32 secctx_len;
670
671 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
672 if (secid != 0 &&
673 security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
674 audit_log_format(audit_buf, " subj=%s", secctx);
675 security_release_secctx(secctx, secctx_len);
676 } else
677 audit_log_task_context(audit_buf);
678 }
679
680 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
681 u32 auid, u32 ses, u32 secid);
682 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
683 u32 auid, u32 ses, u32 secid);
684 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
685 u32 auid, u32 ses, u32 secid);
686 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
687 u32 auid, u32 ses, u32 secid);
688 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
689 struct sk_buff *skb);
690 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
691 extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
692 __be32 net_spi, __be32 net_seq);
693 extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
694 struct sk_buff *skb, u8 proto);
695 #else
696
697 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
698 u32 auid, u32 ses, u32 secid)
699 {
700 }
701
702 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
703 u32 auid, u32 ses, u32 secid)
704 {
705 }
706
707 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
708 u32 auid, u32 ses, u32 secid)
709 {
710 }
711
712 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
713 u32 auid, u32 ses, u32 secid)
714 {
715 }
716
717 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
718 struct sk_buff *skb)
719 {
720 }
721
722 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
723 u16 family)
724 {
725 }
726
727 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
728 __be32 net_spi, __be32 net_seq)
729 {
730 }
731
732 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
733 struct sk_buff *skb, u8 proto)
734 {
735 }
736 #endif /* CONFIG_AUDITSYSCALL */
737
738 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
739 {
740 if (likely(policy != NULL))
741 atomic_inc(&policy->refcnt);
742 }
743
744 extern void xfrm_policy_destroy(struct xfrm_policy *policy);
745
746 static inline void xfrm_pol_put(struct xfrm_policy *policy)
747 {
748 if (atomic_dec_and_test(&policy->refcnt))
749 xfrm_policy_destroy(policy);
750 }
751
752 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
753 {
754 int i;
755 for (i = npols - 1; i >= 0; --i)
756 xfrm_pol_put(pols[i]);
757 }
758
759 extern void __xfrm_state_destroy(struct xfrm_state *);
760
761 static inline void __xfrm_state_put(struct xfrm_state *x)
762 {
763 atomic_dec(&x->refcnt);
764 }
765
766 static inline void xfrm_state_put(struct xfrm_state *x)
767 {
768 if (atomic_dec_and_test(&x->refcnt))
769 __xfrm_state_destroy(x);
770 }
771
772 static inline void xfrm_state_hold(struct xfrm_state *x)
773 {
774 atomic_inc(&x->refcnt);
775 }
776
777 static inline bool addr_match(const void *token1, const void *token2,
778 int prefixlen)
779 {
780 const __be32 *a1 = token1;
781 const __be32 *a2 = token2;
782 int pdw;
783 int pbi;
784
785 pdw = prefixlen >> 5; /* num of whole u32 in prefix */
786 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
787
788 if (pdw)
789 if (memcmp(a1, a2, pdw << 2))
790 return false;
791
792 if (pbi) {
793 __be32 mask;
794
795 mask = htonl((0xffffffff) << (32 - pbi));
796
797 if ((a1[pdw] ^ a2[pdw]) & mask)
798 return false;
799 }
800
801 return true;
802 }
803
804 static __inline__
805 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
806 {
807 __be16 port;
808 switch(fl->flowi_proto) {
809 case IPPROTO_TCP:
810 case IPPROTO_UDP:
811 case IPPROTO_UDPLITE:
812 case IPPROTO_SCTP:
813 port = uli->ports.sport;
814 break;
815 case IPPROTO_ICMP:
816 case IPPROTO_ICMPV6:
817 port = htons(uli->icmpt.type);
818 break;
819 case IPPROTO_MH:
820 port = htons(uli->mht.type);
821 break;
822 case IPPROTO_GRE:
823 port = htons(ntohl(uli->gre_key) >> 16);
824 break;
825 default:
826 port = 0; /*XXX*/
827 }
828 return port;
829 }
830
831 static __inline__
832 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
833 {
834 __be16 port;
835 switch(fl->flowi_proto) {
836 case IPPROTO_TCP:
837 case IPPROTO_UDP:
838 case IPPROTO_UDPLITE:
839 case IPPROTO_SCTP:
840 port = uli->ports.dport;
841 break;
842 case IPPROTO_ICMP:
843 case IPPROTO_ICMPV6:
844 port = htons(uli->icmpt.code);
845 break;
846 case IPPROTO_GRE:
847 port = htons(ntohl(uli->gre_key) & 0xffff);
848 break;
849 default:
850 port = 0; /*XXX*/
851 }
852 return port;
853 }
854
855 extern int xfrm_selector_match(const struct xfrm_selector *sel,
856 const struct flowi *fl,
857 unsigned short family);
858
859 #ifdef CONFIG_SECURITY_NETWORK_XFRM
860 /* If neither has a context --> match
861 * Otherwise, both must have a context and the sids, doi, alg must match
862 */
863 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
864 {
865 return ((!s1 && !s2) ||
866 (s1 && s2 &&
867 (s1->ctx_sid == s2->ctx_sid) &&
868 (s1->ctx_doi == s2->ctx_doi) &&
869 (s1->ctx_alg == s2->ctx_alg)));
870 }
871 #else
872 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
873 {
874 return 1;
875 }
876 #endif
877
878 /* A struct encoding bundle of transformations to apply to some set of flow.
879 *
880 * dst->child points to the next element of bundle.
881 * dst->xfrm points to an instanse of transformer.
882 *
883 * Due to unfortunate limitations of current routing cache, which we
884 * have no time to fix, it mirrors struct rtable and bound to the same
885 * routing key, including saddr,daddr. However, we can have many of
886 * bundles differing by session id. All the bundles grow from a parent
887 * policy rule.
888 */
889 struct xfrm_dst {
890 union {
891 struct dst_entry dst;
892 struct rtable rt;
893 struct rt6_info rt6;
894 } u;
895 struct dst_entry *route;
896 struct flow_cache_object flo;
897 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
898 int num_pols, num_xfrms;
899 #ifdef CONFIG_XFRM_SUB_POLICY
900 struct flowi *origin;
901 struct xfrm_selector *partner;
902 #endif
903 u32 xfrm_genid;
904 u32 policy_genid;
905 u32 route_mtu_cached;
906 u32 child_mtu_cached;
907 u32 route_cookie;
908 u32 path_cookie;
909 };
910
911 #ifdef CONFIG_XFRM
912 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
913 {
914 xfrm_pols_put(xdst->pols, xdst->num_pols);
915 dst_release(xdst->route);
916 if (likely(xdst->u.dst.xfrm))
917 xfrm_state_put(xdst->u.dst.xfrm);
918 #ifdef CONFIG_XFRM_SUB_POLICY
919 kfree(xdst->origin);
920 xdst->origin = NULL;
921 kfree(xdst->partner);
922 xdst->partner = NULL;
923 #endif
924 }
925 #endif
926
927 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
928
929 struct sec_path {
930 atomic_t refcnt;
931 int len;
932 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
933 };
934
935 static inline struct sec_path *
936 secpath_get(struct sec_path *sp)
937 {
938 if (sp)
939 atomic_inc(&sp->refcnt);
940 return sp;
941 }
942
943 extern void __secpath_destroy(struct sec_path *sp);
944
945 static inline void
946 secpath_put(struct sec_path *sp)
947 {
948 if (sp && atomic_dec_and_test(&sp->refcnt))
949 __secpath_destroy(sp);
950 }
951
952 extern struct sec_path *secpath_dup(struct sec_path *src);
953
954 static inline void
955 secpath_reset(struct sk_buff *skb)
956 {
957 #ifdef CONFIG_XFRM
958 secpath_put(skb->sp);
959 skb->sp = NULL;
960 #endif
961 }
962
963 static inline int
964 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
965 {
966 switch (family) {
967 case AF_INET:
968 return addr->a4 == 0;
969 case AF_INET6:
970 return ipv6_addr_any((struct in6_addr *)&addr->a6);
971 }
972 return 0;
973 }
974
975 static inline int
976 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
977 {
978 return (tmpl->saddr.a4 &&
979 tmpl->saddr.a4 != x->props.saddr.a4);
980 }
981
982 static inline int
983 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
984 {
985 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
986 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
987 }
988
989 static inline int
990 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
991 {
992 switch (family) {
993 case AF_INET:
994 return __xfrm4_state_addr_cmp(tmpl, x);
995 case AF_INET6:
996 return __xfrm6_state_addr_cmp(tmpl, x);
997 }
998 return !0;
999 }
1000
1001 #ifdef CONFIG_XFRM
1002 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
1003
1004 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1005 struct sk_buff *skb,
1006 unsigned int family, int reverse)
1007 {
1008 struct net *net = dev_net(skb->dev);
1009 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1010
1011 if (sk && sk->sk_policy[XFRM_POLICY_IN])
1012 return __xfrm_policy_check(sk, ndir, skb, family);
1013
1014 return (!net->xfrm.policy_count[dir] && !skb->sp) ||
1015 (skb_dst(skb)->flags & DST_NOPOLICY) ||
1016 __xfrm_policy_check(sk, ndir, skb, family);
1017 }
1018
1019 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1020 {
1021 return __xfrm_policy_check2(sk, dir, skb, family, 0);
1022 }
1023
1024 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1025 {
1026 return xfrm_policy_check(sk, dir, skb, AF_INET);
1027 }
1028
1029 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1030 {
1031 return xfrm_policy_check(sk, dir, skb, AF_INET6);
1032 }
1033
1034 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1035 struct sk_buff *skb)
1036 {
1037 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1038 }
1039
1040 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1041 struct sk_buff *skb)
1042 {
1043 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1044 }
1045
1046 extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1047 unsigned int family, int reverse);
1048
1049 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1050 unsigned int family)
1051 {
1052 return __xfrm_decode_session(skb, fl, family, 0);
1053 }
1054
1055 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1056 struct flowi *fl,
1057 unsigned int family)
1058 {
1059 return __xfrm_decode_session(skb, fl, family, 1);
1060 }
1061
1062 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1063
1064 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1065 {
1066 struct net *net = dev_net(skb->dev);
1067
1068 return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
1069 (skb_dst(skb)->flags & DST_NOXFRM) ||
1070 __xfrm_route_forward(skb, family);
1071 }
1072
1073 static inline int xfrm4_route_forward(struct sk_buff *skb)
1074 {
1075 return xfrm_route_forward(skb, AF_INET);
1076 }
1077
1078 static inline int xfrm6_route_forward(struct sk_buff *skb)
1079 {
1080 return xfrm_route_forward(skb, AF_INET6);
1081 }
1082
1083 extern int __xfrm_sk_clone_policy(struct sock *sk);
1084
1085 static inline int xfrm_sk_clone_policy(struct sock *sk)
1086 {
1087 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
1088 return __xfrm_sk_clone_policy(sk);
1089 return 0;
1090 }
1091
1092 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1093
1094 static inline void xfrm_sk_free_policy(struct sock *sk)
1095 {
1096 if (unlikely(sk->sk_policy[0] != NULL)) {
1097 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
1098 sk->sk_policy[0] = NULL;
1099 }
1100 if (unlikely(sk->sk_policy[1] != NULL)) {
1101 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
1102 sk->sk_policy[1] = NULL;
1103 }
1104 }
1105
1106 #else
1107
1108 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1109 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
1110 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1111 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1112 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1113 {
1114 return 1;
1115 }
1116 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1117 {
1118 return 1;
1119 }
1120 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1121 {
1122 return 1;
1123 }
1124 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1125 struct flowi *fl,
1126 unsigned int family)
1127 {
1128 return -ENOSYS;
1129 }
1130 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1131 struct sk_buff *skb)
1132 {
1133 return 1;
1134 }
1135 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1136 struct sk_buff *skb)
1137 {
1138 return 1;
1139 }
1140 #endif
1141
1142 static __inline__
1143 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1144 {
1145 switch (family){
1146 case AF_INET:
1147 return (xfrm_address_t *)&fl->u.ip4.daddr;
1148 case AF_INET6:
1149 return (xfrm_address_t *)&fl->u.ip6.daddr;
1150 }
1151 return NULL;
1152 }
1153
1154 static __inline__
1155 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1156 {
1157 switch (family){
1158 case AF_INET:
1159 return (xfrm_address_t *)&fl->u.ip4.saddr;
1160 case AF_INET6:
1161 return (xfrm_address_t *)&fl->u.ip6.saddr;
1162 }
1163 return NULL;
1164 }
1165
1166 static __inline__
1167 void xfrm_flowi_addr_get(const struct flowi *fl,
1168 xfrm_address_t *saddr, xfrm_address_t *daddr,
1169 unsigned short family)
1170 {
1171 switch(family) {
1172 case AF_INET:
1173 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1174 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1175 break;
1176 case AF_INET6:
1177 ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr);
1178 ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr);
1179 break;
1180 }
1181 }
1182
1183 static __inline__ int
1184 __xfrm4_state_addr_check(const struct xfrm_state *x,
1185 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1186 {
1187 if (daddr->a4 == x->id.daddr.a4 &&
1188 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1189 return 1;
1190 return 0;
1191 }
1192
1193 static __inline__ int
1194 __xfrm6_state_addr_check(const struct xfrm_state *x,
1195 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1196 {
1197 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1198 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
1199 ipv6_addr_any((struct in6_addr *)saddr) ||
1200 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1201 return 1;
1202 return 0;
1203 }
1204
1205 static __inline__ int
1206 xfrm_state_addr_check(const struct xfrm_state *x,
1207 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1208 unsigned short family)
1209 {
1210 switch (family) {
1211 case AF_INET:
1212 return __xfrm4_state_addr_check(x, daddr, saddr);
1213 case AF_INET6:
1214 return __xfrm6_state_addr_check(x, daddr, saddr);
1215 }
1216 return 0;
1217 }
1218
1219 static __inline__ int
1220 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1221 unsigned short family)
1222 {
1223 switch (family) {
1224 case AF_INET:
1225 return __xfrm4_state_addr_check(x,
1226 (const xfrm_address_t *)&fl->u.ip4.daddr,
1227 (const xfrm_address_t *)&fl->u.ip4.saddr);
1228 case AF_INET6:
1229 return __xfrm6_state_addr_check(x,
1230 (const xfrm_address_t *)&fl->u.ip6.daddr,
1231 (const xfrm_address_t *)&fl->u.ip6.saddr);
1232 }
1233 return 0;
1234 }
1235
1236 static inline int xfrm_state_kern(const struct xfrm_state *x)
1237 {
1238 return atomic_read(&x->tunnel_users);
1239 }
1240
1241 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1242 {
1243 return (!userproto || proto == userproto ||
1244 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1245 proto == IPPROTO_ESP ||
1246 proto == IPPROTO_COMP)));
1247 }
1248
1249 /*
1250 * xfrm algorithm information
1251 */
1252 struct xfrm_algo_aead_info {
1253 u16 icv_truncbits;
1254 };
1255
1256 struct xfrm_algo_auth_info {
1257 u16 icv_truncbits;
1258 u16 icv_fullbits;
1259 };
1260
1261 struct xfrm_algo_encr_info {
1262 u16 blockbits;
1263 u16 defkeybits;
1264 };
1265
1266 struct xfrm_algo_comp_info {
1267 u16 threshold;
1268 };
1269
1270 struct xfrm_algo_desc {
1271 char *name;
1272 char *compat;
1273 u8 available:1;
1274 union {
1275 struct xfrm_algo_aead_info aead;
1276 struct xfrm_algo_auth_info auth;
1277 struct xfrm_algo_encr_info encr;
1278 struct xfrm_algo_comp_info comp;
1279 } uinfo;
1280 struct sadb_alg desc;
1281 };
1282
1283 /* XFRM tunnel handlers. */
1284 struct xfrm_tunnel {
1285 int (*handler)(struct sk_buff *skb);
1286 int (*err_handler)(struct sk_buff *skb, u32 info);
1287
1288 struct xfrm_tunnel __rcu *next;
1289 int priority;
1290 };
1291
1292 struct xfrm6_tunnel {
1293 int (*handler)(struct sk_buff *skb);
1294 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1295 u8 type, u8 code, int offset, __be32 info);
1296 struct xfrm6_tunnel __rcu *next;
1297 int priority;
1298 };
1299
1300 extern void xfrm_init(void);
1301 extern void xfrm4_init(int rt_hash_size);
1302 extern int xfrm_state_init(struct net *net);
1303 extern void xfrm_state_fini(struct net *net);
1304 extern void xfrm4_state_init(void);
1305 #ifdef CONFIG_XFRM
1306 extern int xfrm6_init(void);
1307 extern void xfrm6_fini(void);
1308 extern int xfrm6_state_init(void);
1309 extern void xfrm6_state_fini(void);
1310 #else
1311 static inline int xfrm6_init(void)
1312 {
1313 return 0;
1314 }
1315 static inline void xfrm6_fini(void)
1316 {
1317 ;
1318 }
1319 #endif
1320
1321 #ifdef CONFIG_XFRM_STATISTICS
1322 extern int xfrm_proc_init(struct net *net);
1323 extern void xfrm_proc_fini(struct net *net);
1324 #endif
1325
1326 extern int xfrm_sysctl_init(struct net *net);
1327 #ifdef CONFIG_SYSCTL
1328 extern void xfrm_sysctl_fini(struct net *net);
1329 #else
1330 static inline void xfrm_sysctl_fini(struct net *net)
1331 {
1332 }
1333 #endif
1334
1335 extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
1336 extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1337 int (*func)(struct xfrm_state *, int, void*), void *);
1338 extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
1339 extern struct xfrm_state *xfrm_state_alloc(struct net *net);
1340 extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1341 const xfrm_address_t *saddr,
1342 const struct flowi *fl,
1343 struct xfrm_tmpl *tmpl,
1344 struct xfrm_policy *pol, int *err,
1345 unsigned short family);
1346 extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
1347 xfrm_address_t *daddr,
1348 xfrm_address_t *saddr,
1349 unsigned short family,
1350 u8 mode, u8 proto, u32 reqid);
1351 extern int xfrm_state_check_expire(struct xfrm_state *x);
1352 extern void xfrm_state_insert(struct xfrm_state *x);
1353 extern int xfrm_state_add(struct xfrm_state *x);
1354 extern int xfrm_state_update(struct xfrm_state *x);
1355 extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1356 const xfrm_address_t *daddr, __be32 spi,
1357 u8 proto, unsigned short family);
1358 extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1359 const xfrm_address_t *daddr,
1360 const xfrm_address_t *saddr,
1361 u8 proto,
1362 unsigned short family);
1363 #ifdef CONFIG_XFRM_SUB_POLICY
1364 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1365 int n, unsigned short family);
1366 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1367 int n, unsigned short family);
1368 #else
1369 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1370 int n, unsigned short family)
1371 {
1372 return -ENOSYS;
1373 }
1374
1375 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1376 int n, unsigned short family)
1377 {
1378 return -ENOSYS;
1379 }
1380 #endif
1381
1382 struct xfrmk_sadinfo {
1383 u32 sadhcnt; /* current hash bkts */
1384 u32 sadhmcnt; /* max allowed hash bkts */
1385 u32 sadcnt; /* current running count */
1386 };
1387
1388 struct xfrmk_spdinfo {
1389 u32 incnt;
1390 u32 outcnt;
1391 u32 fwdcnt;
1392 u32 inscnt;
1393 u32 outscnt;
1394 u32 fwdscnt;
1395 u32 spdhcnt;
1396 u32 spdhmcnt;
1397 };
1398
1399 extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
1400 u32 seq);
1401 extern int xfrm_state_delete(struct xfrm_state *x);
1402 extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
1403 extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1404 extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1405 extern int xfrm_replay_check(struct xfrm_state *x,
1406 struct sk_buff *skb, __be32 seq);
1407 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
1408 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1409 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1410 extern int xfrm_init_state(struct xfrm_state *x);
1411 extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1412 extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
1413 int encap_type);
1414 extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1415 extern int xfrm_output_resume(struct sk_buff *skb, int err);
1416 extern int xfrm_output(struct sk_buff *skb);
1417 extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1418 extern int xfrm4_extract_header(struct sk_buff *skb);
1419 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1420 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1421 int encap_type);
1422 extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
1423 extern int xfrm4_rcv(struct sk_buff *skb);
1424
1425 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1426 {
1427 return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
1428 }
1429
1430 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1431 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1432 extern int xfrm4_output(struct sk_buff *skb);
1433 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1434 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1435 extern int xfrm6_extract_header(struct sk_buff *skb);
1436 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1437 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1438 extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
1439 extern int xfrm6_rcv(struct sk_buff *skb);
1440 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1441 xfrm_address_t *saddr, u8 proto);
1442 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1443 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1444 extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1445 extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
1446 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1447 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1448 extern int xfrm6_output(struct sk_buff *skb);
1449 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1450 u8 **prevhdr);
1451
1452 #ifdef CONFIG_XFRM
1453 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1454 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1455 #else
1456 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1457 {
1458 return -ENOPROTOOPT;
1459 }
1460
1461 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1462 {
1463 /* should not happen */
1464 kfree_skb(skb);
1465 return 0;
1466 }
1467 #endif
1468
1469 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1470
1471 extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1472 extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1473 int (*func)(struct xfrm_policy *, int, int, void*), void *);
1474 extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
1475 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1476 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
1477 u8 type, int dir,
1478 struct xfrm_selector *sel,
1479 struct xfrm_sec_ctx *ctx, int delete,
1480 int *err);
1481 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
1482 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
1483 u32 xfrm_get_acqseq(void);
1484 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1485 struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
1486 u8 mode, u32 reqid, u8 proto,
1487 const xfrm_address_t *daddr,
1488 const xfrm_address_t *saddr, int create,
1489 unsigned short family);
1490 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1491
1492 #ifdef CONFIG_XFRM_MIGRATE
1493 extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1494 const struct xfrm_migrate *m, int num_bundles,
1495 const struct xfrm_kmaddress *k);
1496 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1497 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1498 struct xfrm_migrate *m);
1499 extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1500 struct xfrm_migrate *m, int num_bundles,
1501 struct xfrm_kmaddress *k);
1502 #endif
1503
1504 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1505 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1506 extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1507
1508 extern void xfrm_input_init(void);
1509 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1510
1511 extern void xfrm_probe_algs(void);
1512 extern int xfrm_count_auth_supported(void);
1513 extern int xfrm_count_enc_supported(void);
1514 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1515 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1516 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1517 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1518 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1519 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1520 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1521 extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1522 extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1523 int probe);
1524
1525 struct hash_desc;
1526 struct scatterlist;
1527 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1528 unsigned int);
1529
1530 static inline int xfrm_addr_cmp(const xfrm_address_t *a,
1531 const xfrm_address_t *b,
1532 int family)
1533 {
1534 switch (family) {
1535 default:
1536 case AF_INET:
1537 return (__force u32)a->a4 - (__force u32)b->a4;
1538 case AF_INET6:
1539 return ipv6_addr_cmp((struct in6_addr *)a,
1540 (struct in6_addr *)b);
1541 }
1542 }
1543
1544 static inline int xfrm_policy_id2dir(u32 index)
1545 {
1546 return index & 7;
1547 }
1548
1549 #ifdef CONFIG_XFRM
1550 static inline int xfrm_aevent_is_on(struct net *net)
1551 {
1552 struct sock *nlsk;
1553 int ret = 0;
1554
1555 rcu_read_lock();
1556 nlsk = rcu_dereference(net->xfrm.nlsk);
1557 if (nlsk)
1558 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1559 rcu_read_unlock();
1560 return ret;
1561 }
1562 #endif
1563
1564 static inline int xfrm_alg_len(const struct xfrm_algo *alg)
1565 {
1566 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1567 }
1568
1569 static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1570 {
1571 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1572 }
1573
1574 static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1575 {
1576 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1577 }
1578
1579 #ifdef CONFIG_XFRM_MIGRATE
1580 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1581 {
1582 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1583 }
1584
1585 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1586 {
1587 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1588 }
1589
1590 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1591 {
1592 int i;
1593 for (i = 0; i < n; i++)
1594 xfrm_state_put(*(states + i));
1595 }
1596
1597 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1598 {
1599 int i;
1600 for (i = 0; i < n; i++)
1601 xfrm_state_delete(*(states + i));
1602 }
1603 #endif
1604
1605 #ifdef CONFIG_XFRM
1606 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1607 {
1608 return skb->sp->xvec[skb->sp->len - 1];
1609 }
1610 #endif
1611
1612 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1613 {
1614 if (attrs[XFRMA_MARK])
1615 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1616 else
1617 m->v = m->m = 0;
1618
1619 return m->v & m->m;
1620 }
1621
1622 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
1623 {
1624 if (m->m | m->v)
1625 NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1626 return 0;
1627
1628 nla_put_failure:
1629 return -1;
1630 }
1631
1632 #endif /* _NET_XFRM_H */