]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/net/xfrm.h
Merge branch 'linus' into idle-test
[mirror_ubuntu-artful-kernel.git] / include / net / xfrm.h
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15 #include <linux/slab.h>
16
17 #include <net/sock.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/route.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_fib.h>
23 #include <net/flow.h>
24
25 #include <linux/interrupt.h>
26
27 #ifdef CONFIG_XFRM_STATISTICS
28 #include <net/snmp.h>
29 #endif
30
31 #define XFRM_PROTO_ESP 50
32 #define XFRM_PROTO_AH 51
33 #define XFRM_PROTO_COMP 108
34 #define XFRM_PROTO_IPIP 4
35 #define XFRM_PROTO_IPV6 41
36 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
37 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
38
39 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
40 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
41 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
42 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
43 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
44
45 #ifdef CONFIG_XFRM_STATISTICS
46 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
47 #define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
48 #define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
49 #else
50 #define XFRM_INC_STATS(net, field) ((void)(net))
51 #define XFRM_INC_STATS_BH(net, field) ((void)(net))
52 #define XFRM_INC_STATS_USER(net, field) ((void)(net))
53 #endif
54
55 extern struct mutex xfrm_cfg_mutex;
56
57 /* Organization of SPD aka "XFRM rules"
58 ------------------------------------
59
60 Basic objects:
61 - policy rule, struct xfrm_policy (=SPD entry)
62 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
63 - instance of a transformer, struct xfrm_state (=SA)
64 - template to clone xfrm_state, struct xfrm_tmpl
65
66 SPD is plain linear list of xfrm_policy rules, ordered by priority.
67 (To be compatible with existing pfkeyv2 implementations,
68 many rules with priority of 0x7fffffff are allowed to exist and
69 such rules are ordered in an unpredictable way, thanks to bsd folks.)
70
71 Lookup is plain linear search until the first match with selector.
72
73 If "action" is "block", then we prohibit the flow, otherwise:
74 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
75 policy entry has list of up to XFRM_MAX_DEPTH transformations,
76 described by templates xfrm_tmpl. Each template is resolved
77 to a complete xfrm_state (see below) and we pack bundle of transformations
78 to a dst_entry returned to requestor.
79
80 dst -. xfrm .-> xfrm_state #1
81 |---. child .-> dst -. xfrm .-> xfrm_state #2
82 |---. child .-> dst -. xfrm .-> xfrm_state #3
83 |---. child .-> NULL
84
85 Bundles are cached at xrfm_policy struct (field ->bundles).
86
87
88 Resolution of xrfm_tmpl
89 -----------------------
90 Template contains:
91 1. ->mode Mode: transport or tunnel
92 2. ->id.proto Protocol: AH/ESP/IPCOMP
93 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
94 Q: allow to resolve security gateway?
95 4. ->id.spi If not zero, static SPI.
96 5. ->saddr Local tunnel endpoint, ignored for transport mode.
97 6. ->algos List of allowed algos. Plain bitmask now.
98 Q: ealgos, aalgos, calgos. What a mess...
99 7. ->share Sharing mode.
100 Q: how to implement private sharing mode? To add struct sock* to
101 flow id?
102
103 Having this template we search through SAD searching for entries
104 with appropriate mode/proto/algo, permitted by selector.
105 If no appropriate entry found, it is requested from key manager.
106
107 PROBLEMS:
108 Q: How to find all the bundles referring to a physical path for
109 PMTU discovery? Seems, dst should contain list of all parents...
110 and enter to infinite locking hierarchy disaster.
111 No! It is easier, we will not search for them, let them find us.
112 We add genid to each dst plus pointer to genid of raw IP route,
113 pmtu disc will update pmtu on raw IP route and increase its genid.
114 dst_check() will see this for top level and trigger resyncing
115 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
116 */
117
118 struct xfrm_state_walk {
119 struct list_head all;
120 u8 state;
121 union {
122 u8 dying;
123 u8 proto;
124 };
125 u32 seq;
126 };
127
128 /* Full description of state of transformer. */
129 struct xfrm_state {
130 #ifdef CONFIG_NET_NS
131 struct net *xs_net;
132 #endif
133 union {
134 struct hlist_node gclist;
135 struct hlist_node bydst;
136 };
137 struct hlist_node bysrc;
138 struct hlist_node byspi;
139
140 atomic_t refcnt;
141 spinlock_t lock;
142
143 struct xfrm_id id;
144 struct xfrm_selector sel;
145 struct xfrm_mark mark;
146 u32 tfcpad;
147
148 u32 genid;
149
150 /* Key manager bits */
151 struct xfrm_state_walk km;
152
153 /* Parameters of this state. */
154 struct {
155 u32 reqid;
156 u8 mode;
157 u8 replay_window;
158 u8 aalgo, ealgo, calgo;
159 u8 flags;
160 u16 family;
161 xfrm_address_t saddr;
162 int header_len;
163 int trailer_len;
164 } props;
165
166 struct xfrm_lifetime_cfg lft;
167
168 /* Data for transformer */
169 struct xfrm_algo_auth *aalg;
170 struct xfrm_algo *ealg;
171 struct xfrm_algo *calg;
172 struct xfrm_algo_aead *aead;
173
174 /* Data for encapsulator */
175 struct xfrm_encap_tmpl *encap;
176
177 /* Data for care-of address */
178 xfrm_address_t *coaddr;
179
180 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
181 struct xfrm_state *tunnel;
182
183 /* If a tunnel, number of users + 1 */
184 atomic_t tunnel_users;
185
186 /* State for replay detection */
187 struct xfrm_replay_state replay;
188
189 /* Replay detection state at the time we sent the last notification */
190 struct xfrm_replay_state preplay;
191
192 /* internal flag that only holds state for delayed aevent at the
193 * moment
194 */
195 u32 xflags;
196
197 /* Replay detection notification settings */
198 u32 replay_maxage;
199 u32 replay_maxdiff;
200
201 /* Replay detection notification timer */
202 struct timer_list rtimer;
203
204 /* Statistics */
205 struct xfrm_stats stats;
206
207 struct xfrm_lifetime_cur curlft;
208 struct tasklet_hrtimer mtimer;
209
210 /* Last used time */
211 unsigned long lastused;
212
213 /* Reference to data common to all the instances of this
214 * transformer. */
215 const struct xfrm_type *type;
216 struct xfrm_mode *inner_mode;
217 struct xfrm_mode *inner_mode_iaf;
218 struct xfrm_mode *outer_mode;
219
220 /* Security context */
221 struct xfrm_sec_ctx *security;
222
223 /* Private data of this transformer, format is opaque,
224 * interpreted by xfrm_type methods. */
225 void *data;
226 };
227
228 static inline struct net *xs_net(struct xfrm_state *x)
229 {
230 return read_pnet(&x->xs_net);
231 }
232
233 /* xflags - make enum if more show up */
234 #define XFRM_TIME_DEFER 1
235
236 enum {
237 XFRM_STATE_VOID,
238 XFRM_STATE_ACQ,
239 XFRM_STATE_VALID,
240 XFRM_STATE_ERROR,
241 XFRM_STATE_EXPIRED,
242 XFRM_STATE_DEAD
243 };
244
245 /* callback structure passed from either netlink or pfkey */
246 struct km_event {
247 union {
248 u32 hard;
249 u32 proto;
250 u32 byid;
251 u32 aevent;
252 u32 type;
253 } data;
254
255 u32 seq;
256 u32 pid;
257 u32 event;
258 struct net *net;
259 };
260
261 struct net_device;
262 struct xfrm_type;
263 struct xfrm_dst;
264 struct xfrm_policy_afinfo {
265 unsigned short family;
266 struct dst_ops *dst_ops;
267 void (*garbage_collect)(struct net *net);
268 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
269 xfrm_address_t *saddr,
270 xfrm_address_t *daddr);
271 int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
272 void (*decode_session)(struct sk_buff *skb,
273 struct flowi *fl,
274 int reverse);
275 int (*get_tos)(struct flowi *fl);
276 int (*init_path)(struct xfrm_dst *path,
277 struct dst_entry *dst,
278 int nfheader_len);
279 int (*fill_dst)(struct xfrm_dst *xdst,
280 struct net_device *dev,
281 struct flowi *fl);
282 };
283
284 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
285 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
286 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
287 extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
288
289 struct xfrm_tmpl;
290 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
291 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
292 extern int __xfrm_state_delete(struct xfrm_state *x);
293
294 struct xfrm_state_afinfo {
295 unsigned int family;
296 unsigned int proto;
297 __be16 eth_proto;
298 struct module *owner;
299 const struct xfrm_type *type_map[IPPROTO_MAX];
300 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
301 int (*init_flags)(struct xfrm_state *x);
302 void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
303 void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
304 xfrm_address_t *daddr, xfrm_address_t *saddr);
305 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
306 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
307 int (*output)(struct sk_buff *skb);
308 int (*extract_input)(struct xfrm_state *x,
309 struct sk_buff *skb);
310 int (*extract_output)(struct xfrm_state *x,
311 struct sk_buff *skb);
312 int (*transport_finish)(struct sk_buff *skb,
313 int async);
314 };
315
316 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
317 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
318
319 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
320
321 struct xfrm_type {
322 char *description;
323 struct module *owner;
324 u8 proto;
325 u8 flags;
326 #define XFRM_TYPE_NON_FRAGMENT 1
327 #define XFRM_TYPE_REPLAY_PROT 2
328 #define XFRM_TYPE_LOCAL_COADDR 4
329 #define XFRM_TYPE_REMOTE_COADDR 8
330
331 int (*init_state)(struct xfrm_state *x);
332 void (*destructor)(struct xfrm_state *);
333 int (*input)(struct xfrm_state *, struct sk_buff *skb);
334 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
335 int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
336 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
337 /* Estimate maximal size of result of transformation of a dgram */
338 u32 (*get_mtu)(struct xfrm_state *, int size);
339 };
340
341 extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
342 extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
343
344 struct xfrm_mode {
345 /*
346 * Remove encapsulation header.
347 *
348 * The IP header will be moved over the top of the encapsulation
349 * header.
350 *
351 * On entry, the transport header shall point to where the IP header
352 * should be and the network header shall be set to where the IP
353 * header currently is. skb->data shall point to the start of the
354 * payload.
355 */
356 int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
357
358 /*
359 * This is the actual input entry point.
360 *
361 * For transport mode and equivalent this would be identical to
362 * input2 (which does not need to be set). While tunnel mode
363 * and equivalent would set this to the tunnel encapsulation function
364 * xfrm4_prepare_input that would in turn call input2.
365 */
366 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
367
368 /*
369 * Add encapsulation header.
370 *
371 * On exit, the transport header will be set to the start of the
372 * encapsulation header to be filled in by x->type->output and
373 * the mac header will be set to the nextheader (protocol for
374 * IPv4) field of the extension header directly preceding the
375 * encapsulation header, or in its absence, that of the top IP
376 * header. The value of the network header will always point
377 * to the top IP header while skb->data will point to the payload.
378 */
379 int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
380
381 /*
382 * This is the actual output entry point.
383 *
384 * For transport mode and equivalent this would be identical to
385 * output2 (which does not need to be set). While tunnel mode
386 * and equivalent would set this to a tunnel encapsulation function
387 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
388 * call output2.
389 */
390 int (*output)(struct xfrm_state *x, struct sk_buff *skb);
391
392 struct xfrm_state_afinfo *afinfo;
393 struct module *owner;
394 unsigned int encap;
395 int flags;
396 };
397
398 /* Flags for xfrm_mode. */
399 enum {
400 XFRM_MODE_FLAG_TUNNEL = 1,
401 };
402
403 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
404 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
405
406 static inline int xfrm_af2proto(unsigned int family)
407 {
408 switch(family) {
409 case AF_INET:
410 return IPPROTO_IPIP;
411 case AF_INET6:
412 return IPPROTO_IPV6;
413 default:
414 return 0;
415 }
416 }
417
418 static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
419 {
420 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
421 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
422 return x->inner_mode;
423 else
424 return x->inner_mode_iaf;
425 }
426
427 struct xfrm_tmpl {
428 /* id in template is interpreted as:
429 * daddr - destination of tunnel, may be zero for transport mode.
430 * spi - zero to acquire spi. Not zero if spi is static, then
431 * daddr must be fixed too.
432 * proto - AH/ESP/IPCOMP
433 */
434 struct xfrm_id id;
435
436 /* Source address of tunnel. Ignored, if it is not a tunnel. */
437 xfrm_address_t saddr;
438
439 unsigned short encap_family;
440
441 u32 reqid;
442
443 /* Mode: transport, tunnel etc. */
444 u8 mode;
445
446 /* Sharing mode: unique, this session only, this user only etc. */
447 u8 share;
448
449 /* May skip this transfomration if no SA is found */
450 u8 optional;
451
452 /* Skip aalgos/ealgos/calgos checks. */
453 u8 allalgs;
454
455 /* Bit mask of algos allowed for acquisition */
456 u32 aalgos;
457 u32 ealgos;
458 u32 calgos;
459 };
460
461 #define XFRM_MAX_DEPTH 6
462
463 struct xfrm_policy_walk_entry {
464 struct list_head all;
465 u8 dead;
466 };
467
468 struct xfrm_policy_walk {
469 struct xfrm_policy_walk_entry walk;
470 u8 type;
471 u32 seq;
472 };
473
474 struct xfrm_policy {
475 #ifdef CONFIG_NET_NS
476 struct net *xp_net;
477 #endif
478 struct hlist_node bydst;
479 struct hlist_node byidx;
480
481 /* This lock only affects elements except for entry. */
482 rwlock_t lock;
483 atomic_t refcnt;
484 struct timer_list timer;
485
486 struct flow_cache_object flo;
487 atomic_t genid;
488 u32 priority;
489 u32 index;
490 struct xfrm_mark mark;
491 struct xfrm_selector selector;
492 struct xfrm_lifetime_cfg lft;
493 struct xfrm_lifetime_cur curlft;
494 struct xfrm_policy_walk_entry walk;
495 u8 type;
496 u8 action;
497 u8 flags;
498 u8 xfrm_nr;
499 u16 family;
500 struct xfrm_sec_ctx *security;
501 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
502 };
503
504 static inline struct net *xp_net(struct xfrm_policy *xp)
505 {
506 return read_pnet(&xp->xp_net);
507 }
508
509 struct xfrm_kmaddress {
510 xfrm_address_t local;
511 xfrm_address_t remote;
512 u32 reserved;
513 u16 family;
514 };
515
516 struct xfrm_migrate {
517 xfrm_address_t old_daddr;
518 xfrm_address_t old_saddr;
519 xfrm_address_t new_daddr;
520 xfrm_address_t new_saddr;
521 u8 proto;
522 u8 mode;
523 u16 reserved;
524 u32 reqid;
525 u16 old_family;
526 u16 new_family;
527 };
528
529 #define XFRM_KM_TIMEOUT 30
530 /* which seqno */
531 #define XFRM_REPLAY_SEQ 1
532 #define XFRM_REPLAY_OSEQ 2
533 #define XFRM_REPLAY_SEQ_MASK 3
534 /* what happened */
535 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
536 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
537
538 /* default aevent timeout in units of 100ms */
539 #define XFRM_AE_ETIME 10
540 /* Async Event timer multiplier */
541 #define XFRM_AE_ETH_M 10
542 /* default seq threshold size */
543 #define XFRM_AE_SEQT_SIZE 2
544
545 struct xfrm_mgr {
546 struct list_head list;
547 char *id;
548 int (*notify)(struct xfrm_state *x, struct km_event *c);
549 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
550 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
551 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
552 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
553 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
554 int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k);
555 };
556
557 extern int xfrm_register_km(struct xfrm_mgr *km);
558 extern int xfrm_unregister_km(struct xfrm_mgr *km);
559
560 /*
561 * This structure is used for the duration where packets are being
562 * transformed by IPsec. As soon as the packet leaves IPsec the
563 * area beyond the generic IP part may be overwritten.
564 */
565 struct xfrm_skb_cb {
566 union {
567 struct inet_skb_parm h4;
568 struct inet6_skb_parm h6;
569 } header;
570
571 /* Sequence number for replay protection. */
572 union {
573 u64 output;
574 __be32 input;
575 } seq;
576 };
577
578 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
579
580 /*
581 * This structure is used by the afinfo prepare_input/prepare_output functions
582 * to transmit header information to the mode input/output functions.
583 */
584 struct xfrm_mode_skb_cb {
585 union {
586 struct inet_skb_parm h4;
587 struct inet6_skb_parm h6;
588 } header;
589
590 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
591 __be16 id;
592 __be16 frag_off;
593
594 /* IP header length (excluding options or extension headers). */
595 u8 ihl;
596
597 /* TOS for IPv4, class for IPv6. */
598 u8 tos;
599
600 /* TTL for IPv4, hop limitfor IPv6. */
601 u8 ttl;
602
603 /* Protocol for IPv4, NH for IPv6. */
604 u8 protocol;
605
606 /* Option length for IPv4, zero for IPv6. */
607 u8 optlen;
608
609 /* Used by IPv6 only, zero for IPv4. */
610 u8 flow_lbl[3];
611 };
612
613 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
614
615 /*
616 * This structure is used by the input processing to locate the SPI and
617 * related information.
618 */
619 struct xfrm_spi_skb_cb {
620 union {
621 struct inet_skb_parm h4;
622 struct inet6_skb_parm h6;
623 } header;
624
625 unsigned int daddroff;
626 unsigned int family;
627 };
628
629 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
630
631 /* Audit Information */
632 struct xfrm_audit {
633 u32 secid;
634 uid_t loginuid;
635 u32 sessionid;
636 };
637
638 #ifdef CONFIG_AUDITSYSCALL
639 static inline struct audit_buffer *xfrm_audit_start(const char *op)
640 {
641 struct audit_buffer *audit_buf = NULL;
642
643 if (audit_enabled == 0)
644 return NULL;
645 audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
646 AUDIT_MAC_IPSEC_EVENT);
647 if (audit_buf == NULL)
648 return NULL;
649 audit_log_format(audit_buf, "op=%s", op);
650 return audit_buf;
651 }
652
653 static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
654 struct audit_buffer *audit_buf)
655 {
656 char *secctx;
657 u32 secctx_len;
658
659 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
660 if (secid != 0 &&
661 security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
662 audit_log_format(audit_buf, " subj=%s", secctx);
663 security_release_secctx(secctx, secctx_len);
664 } else
665 audit_log_task_context(audit_buf);
666 }
667
668 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
669 u32 auid, u32 ses, u32 secid);
670 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
671 u32 auid, u32 ses, u32 secid);
672 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
673 u32 auid, u32 ses, u32 secid);
674 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
675 u32 auid, u32 ses, u32 secid);
676 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
677 struct sk_buff *skb);
678 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
679 extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
680 __be32 net_spi, __be32 net_seq);
681 extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
682 struct sk_buff *skb, u8 proto);
683 #else
684
685 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
686 u32 auid, u32 ses, u32 secid)
687 {
688 }
689
690 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
691 u32 auid, u32 ses, u32 secid)
692 {
693 }
694
695 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
696 u32 auid, u32 ses, u32 secid)
697 {
698 }
699
700 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
701 u32 auid, u32 ses, u32 secid)
702 {
703 }
704
705 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
706 struct sk_buff *skb)
707 {
708 }
709
710 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
711 u16 family)
712 {
713 }
714
715 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
716 __be32 net_spi, __be32 net_seq)
717 {
718 }
719
720 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
721 struct sk_buff *skb, u8 proto)
722 {
723 }
724 #endif /* CONFIG_AUDITSYSCALL */
725
726 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
727 {
728 if (likely(policy != NULL))
729 atomic_inc(&policy->refcnt);
730 }
731
732 extern void xfrm_policy_destroy(struct xfrm_policy *policy);
733
734 static inline void xfrm_pol_put(struct xfrm_policy *policy)
735 {
736 if (atomic_dec_and_test(&policy->refcnt))
737 xfrm_policy_destroy(policy);
738 }
739
740 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
741 {
742 int i;
743 for (i = npols - 1; i >= 0; --i)
744 xfrm_pol_put(pols[i]);
745 }
746
747 extern void __xfrm_state_destroy(struct xfrm_state *);
748
749 static inline void __xfrm_state_put(struct xfrm_state *x)
750 {
751 atomic_dec(&x->refcnt);
752 }
753
754 static inline void xfrm_state_put(struct xfrm_state *x)
755 {
756 if (atomic_dec_and_test(&x->refcnt))
757 __xfrm_state_destroy(x);
758 }
759
760 static inline void xfrm_state_hold(struct xfrm_state *x)
761 {
762 atomic_inc(&x->refcnt);
763 }
764
765 static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
766 {
767 __be32 *a1 = token1;
768 __be32 *a2 = token2;
769 int pdw;
770 int pbi;
771
772 pdw = prefixlen >> 5; /* num of whole u32 in prefix */
773 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
774
775 if (pdw)
776 if (memcmp(a1, a2, pdw << 2))
777 return 0;
778
779 if (pbi) {
780 __be32 mask;
781
782 mask = htonl((0xffffffff) << (32 - pbi));
783
784 if ((a1[pdw] ^ a2[pdw]) & mask)
785 return 0;
786 }
787
788 return 1;
789 }
790
791 static __inline__
792 __be16 xfrm_flowi_sport(struct flowi *fl)
793 {
794 __be16 port;
795 switch(fl->proto) {
796 case IPPROTO_TCP:
797 case IPPROTO_UDP:
798 case IPPROTO_UDPLITE:
799 case IPPROTO_SCTP:
800 port = fl->fl_ip_sport;
801 break;
802 case IPPROTO_ICMP:
803 case IPPROTO_ICMPV6:
804 port = htons(fl->fl_icmp_type);
805 break;
806 case IPPROTO_MH:
807 port = htons(fl->fl_mh_type);
808 break;
809 case IPPROTO_GRE:
810 port = htons(ntohl(fl->fl_gre_key) >> 16);
811 break;
812 default:
813 port = 0; /*XXX*/
814 }
815 return port;
816 }
817
818 static __inline__
819 __be16 xfrm_flowi_dport(struct flowi *fl)
820 {
821 __be16 port;
822 switch(fl->proto) {
823 case IPPROTO_TCP:
824 case IPPROTO_UDP:
825 case IPPROTO_UDPLITE:
826 case IPPROTO_SCTP:
827 port = fl->fl_ip_dport;
828 break;
829 case IPPROTO_ICMP:
830 case IPPROTO_ICMPV6:
831 port = htons(fl->fl_icmp_code);
832 break;
833 case IPPROTO_GRE:
834 port = htons(ntohl(fl->fl_gre_key) & 0xffff);
835 break;
836 default:
837 port = 0; /*XXX*/
838 }
839 return port;
840 }
841
842 extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
843 unsigned short family);
844
845 #ifdef CONFIG_SECURITY_NETWORK_XFRM
846 /* If neither has a context --> match
847 * Otherwise, both must have a context and the sids, doi, alg must match
848 */
849 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
850 {
851 return ((!s1 && !s2) ||
852 (s1 && s2 &&
853 (s1->ctx_sid == s2->ctx_sid) &&
854 (s1->ctx_doi == s2->ctx_doi) &&
855 (s1->ctx_alg == s2->ctx_alg)));
856 }
857 #else
858 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
859 {
860 return 1;
861 }
862 #endif
863
864 /* A struct encoding bundle of transformations to apply to some set of flow.
865 *
866 * dst->child points to the next element of bundle.
867 * dst->xfrm points to an instanse of transformer.
868 *
869 * Due to unfortunate limitations of current routing cache, which we
870 * have no time to fix, it mirrors struct rtable and bound to the same
871 * routing key, including saddr,daddr. However, we can have many of
872 * bundles differing by session id. All the bundles grow from a parent
873 * policy rule.
874 */
875 struct xfrm_dst {
876 union {
877 struct dst_entry dst;
878 struct rtable rt;
879 struct rt6_info rt6;
880 } u;
881 struct dst_entry *route;
882 struct flow_cache_object flo;
883 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
884 int num_pols, num_xfrms;
885 #ifdef CONFIG_XFRM_SUB_POLICY
886 struct flowi *origin;
887 struct xfrm_selector *partner;
888 #endif
889 u32 xfrm_genid;
890 u32 policy_genid;
891 u32 route_mtu_cached;
892 u32 child_mtu_cached;
893 u32 route_cookie;
894 u32 path_cookie;
895 };
896
897 #ifdef CONFIG_XFRM
898 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
899 {
900 xfrm_pols_put(xdst->pols, xdst->num_pols);
901 dst_release(xdst->route);
902 if (likely(xdst->u.dst.xfrm))
903 xfrm_state_put(xdst->u.dst.xfrm);
904 #ifdef CONFIG_XFRM_SUB_POLICY
905 kfree(xdst->origin);
906 xdst->origin = NULL;
907 kfree(xdst->partner);
908 xdst->partner = NULL;
909 #endif
910 }
911 #endif
912
913 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
914
915 struct sec_path {
916 atomic_t refcnt;
917 int len;
918 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
919 };
920
921 static inline struct sec_path *
922 secpath_get(struct sec_path *sp)
923 {
924 if (sp)
925 atomic_inc(&sp->refcnt);
926 return sp;
927 }
928
929 extern void __secpath_destroy(struct sec_path *sp);
930
931 static inline void
932 secpath_put(struct sec_path *sp)
933 {
934 if (sp && atomic_dec_and_test(&sp->refcnt))
935 __secpath_destroy(sp);
936 }
937
938 extern struct sec_path *secpath_dup(struct sec_path *src);
939
940 static inline void
941 secpath_reset(struct sk_buff *skb)
942 {
943 #ifdef CONFIG_XFRM
944 secpath_put(skb->sp);
945 skb->sp = NULL;
946 #endif
947 }
948
949 static inline int
950 xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
951 {
952 switch (family) {
953 case AF_INET:
954 return addr->a4 == 0;
955 case AF_INET6:
956 return ipv6_addr_any((struct in6_addr *)&addr->a6);
957 }
958 return 0;
959 }
960
961 static inline int
962 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
963 {
964 return (tmpl->saddr.a4 &&
965 tmpl->saddr.a4 != x->props.saddr.a4);
966 }
967
968 static inline int
969 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
970 {
971 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
972 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
973 }
974
975 static inline int
976 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
977 {
978 switch (family) {
979 case AF_INET:
980 return __xfrm4_state_addr_cmp(tmpl, x);
981 case AF_INET6:
982 return __xfrm6_state_addr_cmp(tmpl, x);
983 }
984 return !0;
985 }
986
987 #ifdef CONFIG_XFRM
988 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
989
990 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
991 struct sk_buff *skb,
992 unsigned int family, int reverse)
993 {
994 struct net *net = dev_net(skb->dev);
995 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
996
997 if (sk && sk->sk_policy[XFRM_POLICY_IN])
998 return __xfrm_policy_check(sk, ndir, skb, family);
999
1000 return (!net->xfrm.policy_count[dir] && !skb->sp) ||
1001 (skb_dst(skb)->flags & DST_NOPOLICY) ||
1002 __xfrm_policy_check(sk, ndir, skb, family);
1003 }
1004
1005 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1006 {
1007 return __xfrm_policy_check2(sk, dir, skb, family, 0);
1008 }
1009
1010 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1011 {
1012 return xfrm_policy_check(sk, dir, skb, AF_INET);
1013 }
1014
1015 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1016 {
1017 return xfrm_policy_check(sk, dir, skb, AF_INET6);
1018 }
1019
1020 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1021 struct sk_buff *skb)
1022 {
1023 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1024 }
1025
1026 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1027 struct sk_buff *skb)
1028 {
1029 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1030 }
1031
1032 extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1033 unsigned int family, int reverse);
1034
1035 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1036 unsigned int family)
1037 {
1038 return __xfrm_decode_session(skb, fl, family, 0);
1039 }
1040
1041 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1042 struct flowi *fl,
1043 unsigned int family)
1044 {
1045 return __xfrm_decode_session(skb, fl, family, 1);
1046 }
1047
1048 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1049
1050 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1051 {
1052 struct net *net = dev_net(skb->dev);
1053
1054 return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
1055 (skb_dst(skb)->flags & DST_NOXFRM) ||
1056 __xfrm_route_forward(skb, family);
1057 }
1058
1059 static inline int xfrm4_route_forward(struct sk_buff *skb)
1060 {
1061 return xfrm_route_forward(skb, AF_INET);
1062 }
1063
1064 static inline int xfrm6_route_forward(struct sk_buff *skb)
1065 {
1066 return xfrm_route_forward(skb, AF_INET6);
1067 }
1068
1069 extern int __xfrm_sk_clone_policy(struct sock *sk);
1070
1071 static inline int xfrm_sk_clone_policy(struct sock *sk)
1072 {
1073 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
1074 return __xfrm_sk_clone_policy(sk);
1075 return 0;
1076 }
1077
1078 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1079
1080 static inline void xfrm_sk_free_policy(struct sock *sk)
1081 {
1082 if (unlikely(sk->sk_policy[0] != NULL)) {
1083 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
1084 sk->sk_policy[0] = NULL;
1085 }
1086 if (unlikely(sk->sk_policy[1] != NULL)) {
1087 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
1088 sk->sk_policy[1] = NULL;
1089 }
1090 }
1091
1092 #else
1093
1094 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1095 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
1096 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1097 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1098 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1099 {
1100 return 1;
1101 }
1102 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1103 {
1104 return 1;
1105 }
1106 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1107 {
1108 return 1;
1109 }
1110 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1111 struct flowi *fl,
1112 unsigned int family)
1113 {
1114 return -ENOSYS;
1115 }
1116 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1117 struct sk_buff *skb)
1118 {
1119 return 1;
1120 }
1121 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1122 struct sk_buff *skb)
1123 {
1124 return 1;
1125 }
1126 #endif
1127
1128 static __inline__
1129 xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
1130 {
1131 switch (family){
1132 case AF_INET:
1133 return (xfrm_address_t *)&fl->fl4_dst;
1134 case AF_INET6:
1135 return (xfrm_address_t *)&fl->fl6_dst;
1136 }
1137 return NULL;
1138 }
1139
1140 static __inline__
1141 xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
1142 {
1143 switch (family){
1144 case AF_INET:
1145 return (xfrm_address_t *)&fl->fl4_src;
1146 case AF_INET6:
1147 return (xfrm_address_t *)&fl->fl6_src;
1148 }
1149 return NULL;
1150 }
1151
1152 static __inline__
1153 void xfrm_flowi_addr_get(struct flowi *fl,
1154 xfrm_address_t *saddr, xfrm_address_t *daddr,
1155 unsigned short family)
1156 {
1157 switch(family) {
1158 case AF_INET:
1159 memcpy(&saddr->a4, &fl->fl4_src, sizeof(saddr->a4));
1160 memcpy(&daddr->a4, &fl->fl4_dst, sizeof(daddr->a4));
1161 break;
1162 case AF_INET6:
1163 ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->fl6_src);
1164 ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->fl6_dst);
1165 break;
1166 }
1167 }
1168
1169 static __inline__ int
1170 __xfrm4_state_addr_check(struct xfrm_state *x,
1171 xfrm_address_t *daddr, xfrm_address_t *saddr)
1172 {
1173 if (daddr->a4 == x->id.daddr.a4 &&
1174 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1175 return 1;
1176 return 0;
1177 }
1178
1179 static __inline__ int
1180 __xfrm6_state_addr_check(struct xfrm_state *x,
1181 xfrm_address_t *daddr, xfrm_address_t *saddr)
1182 {
1183 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1184 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
1185 ipv6_addr_any((struct in6_addr *)saddr) ||
1186 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1187 return 1;
1188 return 0;
1189 }
1190
1191 static __inline__ int
1192 xfrm_state_addr_check(struct xfrm_state *x,
1193 xfrm_address_t *daddr, xfrm_address_t *saddr,
1194 unsigned short family)
1195 {
1196 switch (family) {
1197 case AF_INET:
1198 return __xfrm4_state_addr_check(x, daddr, saddr);
1199 case AF_INET6:
1200 return __xfrm6_state_addr_check(x, daddr, saddr);
1201 }
1202 return 0;
1203 }
1204
1205 static __inline__ int
1206 xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
1207 unsigned short family)
1208 {
1209 switch (family) {
1210 case AF_INET:
1211 return __xfrm4_state_addr_check(x,
1212 (xfrm_address_t *)&fl->fl4_dst,
1213 (xfrm_address_t *)&fl->fl4_src);
1214 case AF_INET6:
1215 return __xfrm6_state_addr_check(x,
1216 (xfrm_address_t *)&fl->fl6_dst,
1217 (xfrm_address_t *)&fl->fl6_src);
1218 }
1219 return 0;
1220 }
1221
1222 static inline int xfrm_state_kern(struct xfrm_state *x)
1223 {
1224 return atomic_read(&x->tunnel_users);
1225 }
1226
1227 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1228 {
1229 return (!userproto || proto == userproto ||
1230 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1231 proto == IPPROTO_ESP ||
1232 proto == IPPROTO_COMP)));
1233 }
1234
1235 /*
1236 * xfrm algorithm information
1237 */
1238 struct xfrm_algo_aead_info {
1239 u16 icv_truncbits;
1240 };
1241
1242 struct xfrm_algo_auth_info {
1243 u16 icv_truncbits;
1244 u16 icv_fullbits;
1245 };
1246
1247 struct xfrm_algo_encr_info {
1248 u16 blockbits;
1249 u16 defkeybits;
1250 };
1251
1252 struct xfrm_algo_comp_info {
1253 u16 threshold;
1254 };
1255
1256 struct xfrm_algo_desc {
1257 char *name;
1258 char *compat;
1259 u8 available:1;
1260 union {
1261 struct xfrm_algo_aead_info aead;
1262 struct xfrm_algo_auth_info auth;
1263 struct xfrm_algo_encr_info encr;
1264 struct xfrm_algo_comp_info comp;
1265 } uinfo;
1266 struct sadb_alg desc;
1267 };
1268
1269 /* XFRM tunnel handlers. */
1270 struct xfrm_tunnel {
1271 int (*handler)(struct sk_buff *skb);
1272 int (*err_handler)(struct sk_buff *skb, u32 info);
1273
1274 struct xfrm_tunnel __rcu *next;
1275 int priority;
1276 };
1277
1278 struct xfrm6_tunnel {
1279 int (*handler)(struct sk_buff *skb);
1280 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1281 u8 type, u8 code, int offset, __be32 info);
1282 struct xfrm6_tunnel __rcu *next;
1283 int priority;
1284 };
1285
1286 extern void xfrm_init(void);
1287 extern void xfrm4_init(int rt_hash_size);
1288 extern int xfrm_state_init(struct net *net);
1289 extern void xfrm_state_fini(struct net *net);
1290 extern void xfrm4_state_init(void);
1291 #ifdef CONFIG_XFRM
1292 extern int xfrm6_init(void);
1293 extern void xfrm6_fini(void);
1294 extern int xfrm6_state_init(void);
1295 extern void xfrm6_state_fini(void);
1296 #else
1297 static inline int xfrm6_init(void)
1298 {
1299 return 0;
1300 }
1301 static inline void xfrm6_fini(void)
1302 {
1303 ;
1304 }
1305 #endif
1306
1307 #ifdef CONFIG_XFRM_STATISTICS
1308 extern int xfrm_proc_init(struct net *net);
1309 extern void xfrm_proc_fini(struct net *net);
1310 #endif
1311
1312 extern int xfrm_sysctl_init(struct net *net);
1313 #ifdef CONFIG_SYSCTL
1314 extern void xfrm_sysctl_fini(struct net *net);
1315 #else
1316 static inline void xfrm_sysctl_fini(struct net *net)
1317 {
1318 }
1319 #endif
1320
1321 extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
1322 extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1323 int (*func)(struct xfrm_state *, int, void*), void *);
1324 extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
1325 extern struct xfrm_state *xfrm_state_alloc(struct net *net);
1326 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1327 struct flowi *fl, struct xfrm_tmpl *tmpl,
1328 struct xfrm_policy *pol, int *err,
1329 unsigned short family);
1330 extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
1331 xfrm_address_t *daddr,
1332 xfrm_address_t *saddr,
1333 unsigned short family,
1334 u8 mode, u8 proto, u32 reqid);
1335 extern int xfrm_state_check_expire(struct xfrm_state *x);
1336 extern void xfrm_state_insert(struct xfrm_state *x);
1337 extern int xfrm_state_add(struct xfrm_state *x);
1338 extern int xfrm_state_update(struct xfrm_state *x);
1339 extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1340 xfrm_address_t *daddr, __be32 spi,
1341 u8 proto, unsigned short family);
1342 extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1343 xfrm_address_t *daddr,
1344 xfrm_address_t *saddr,
1345 u8 proto,
1346 unsigned short family);
1347 #ifdef CONFIG_XFRM_SUB_POLICY
1348 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1349 int n, unsigned short family);
1350 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1351 int n, unsigned short family);
1352 #else
1353 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1354 int n, unsigned short family)
1355 {
1356 return -ENOSYS;
1357 }
1358
1359 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1360 int n, unsigned short family)
1361 {
1362 return -ENOSYS;
1363 }
1364 #endif
1365
1366 struct xfrmk_sadinfo {
1367 u32 sadhcnt; /* current hash bkts */
1368 u32 sadhmcnt; /* max allowed hash bkts */
1369 u32 sadcnt; /* current running count */
1370 };
1371
1372 struct xfrmk_spdinfo {
1373 u32 incnt;
1374 u32 outcnt;
1375 u32 fwdcnt;
1376 u32 inscnt;
1377 u32 outscnt;
1378 u32 fwdscnt;
1379 u32 spdhcnt;
1380 u32 spdhmcnt;
1381 };
1382
1383 extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
1384 u32 seq);
1385 extern int xfrm_state_delete(struct xfrm_state *x);
1386 extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
1387 extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1388 extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1389 extern int xfrm_replay_check(struct xfrm_state *x,
1390 struct sk_buff *skb, __be32 seq);
1391 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
1392 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1393 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1394 extern int xfrm_init_state(struct xfrm_state *x);
1395 extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1396 extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
1397 int encap_type);
1398 extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1399 extern int xfrm_output_resume(struct sk_buff *skb, int err);
1400 extern int xfrm_output(struct sk_buff *skb);
1401 extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1402 extern int xfrm4_extract_header(struct sk_buff *skb);
1403 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1404 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1405 int encap_type);
1406 extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
1407 extern int xfrm4_rcv(struct sk_buff *skb);
1408
1409 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1410 {
1411 return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
1412 }
1413
1414 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1415 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1416 extern int xfrm4_output(struct sk_buff *skb);
1417 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1418 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1419 extern int xfrm6_extract_header(struct sk_buff *skb);
1420 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1421 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1422 extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
1423 extern int xfrm6_rcv(struct sk_buff *skb);
1424 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1425 xfrm_address_t *saddr, u8 proto);
1426 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1427 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1428 extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1429 extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
1430 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1431 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1432 extern int xfrm6_output(struct sk_buff *skb);
1433 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1434 u8 **prevhdr);
1435
1436 #ifdef CONFIG_XFRM
1437 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1438 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1439 #else
1440 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1441 {
1442 return -ENOPROTOOPT;
1443 }
1444
1445 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1446 {
1447 /* should not happen */
1448 kfree_skb(skb);
1449 return 0;
1450 }
1451 #endif
1452
1453 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1454
1455 extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1456 extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1457 int (*func)(struct xfrm_policy *, int, int, void*), void *);
1458 extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
1459 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1460 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
1461 u8 type, int dir,
1462 struct xfrm_selector *sel,
1463 struct xfrm_sec_ctx *ctx, int delete,
1464 int *err);
1465 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
1466 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
1467 u32 xfrm_get_acqseq(void);
1468 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1469 struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
1470 u8 mode, u32 reqid, u8 proto,
1471 xfrm_address_t *daddr,
1472 xfrm_address_t *saddr, int create,
1473 unsigned short family);
1474 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1475
1476 #ifdef CONFIG_XFRM_MIGRATE
1477 extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1478 struct xfrm_migrate *m, int num_bundles,
1479 struct xfrm_kmaddress *k);
1480 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1481 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1482 struct xfrm_migrate *m);
1483 extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1484 struct xfrm_migrate *m, int num_bundles,
1485 struct xfrm_kmaddress *k);
1486 #endif
1487
1488 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1489 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1490 extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1491
1492 extern void xfrm_input_init(void);
1493 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1494
1495 extern void xfrm_probe_algs(void);
1496 extern int xfrm_count_auth_supported(void);
1497 extern int xfrm_count_enc_supported(void);
1498 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1499 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1500 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1501 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1502 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1503 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
1504 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
1505 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
1506 extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
1507 int probe);
1508
1509 struct hash_desc;
1510 struct scatterlist;
1511 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1512 unsigned int);
1513
1514 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
1515 int family)
1516 {
1517 switch (family) {
1518 default:
1519 case AF_INET:
1520 return (__force u32)a->a4 - (__force u32)b->a4;
1521 case AF_INET6:
1522 return ipv6_addr_cmp((struct in6_addr *)a,
1523 (struct in6_addr *)b);
1524 }
1525 }
1526
1527 static inline int xfrm_policy_id2dir(u32 index)
1528 {
1529 return index & 7;
1530 }
1531
1532 #ifdef CONFIG_XFRM
1533 static inline int xfrm_aevent_is_on(struct net *net)
1534 {
1535 struct sock *nlsk;
1536 int ret = 0;
1537
1538 rcu_read_lock();
1539 nlsk = rcu_dereference(net->xfrm.nlsk);
1540 if (nlsk)
1541 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1542 rcu_read_unlock();
1543 return ret;
1544 }
1545 #endif
1546
1547 static inline int xfrm_alg_len(struct xfrm_algo *alg)
1548 {
1549 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1550 }
1551
1552 static inline int xfrm_alg_auth_len(struct xfrm_algo_auth *alg)
1553 {
1554 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1555 }
1556
1557 #ifdef CONFIG_XFRM_MIGRATE
1558 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1559 {
1560 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1561 }
1562
1563 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1564 {
1565 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1566 }
1567
1568 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1569 {
1570 int i;
1571 for (i = 0; i < n; i++)
1572 xfrm_state_put(*(states + i));
1573 }
1574
1575 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1576 {
1577 int i;
1578 for (i = 0; i < n; i++)
1579 xfrm_state_delete(*(states + i));
1580 }
1581 #endif
1582
1583 #ifdef CONFIG_XFRM
1584 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1585 {
1586 return skb->sp->xvec[skb->sp->len - 1];
1587 }
1588 #endif
1589
1590 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1591 {
1592 if (attrs[XFRMA_MARK])
1593 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1594 else
1595 m->v = m->m = 0;
1596
1597 return m->v & m->m;
1598 }
1599
1600 static inline int xfrm_mark_put(struct sk_buff *skb, struct xfrm_mark *m)
1601 {
1602 if (m->m | m->v)
1603 NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1604 return 0;
1605
1606 nla_put_failure:
1607 return -1;
1608 }
1609
1610 #endif /* _NET_XFRM_H */