]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * net/dst.h Protocol independent destination cache definitions. | |
4 | * | |
5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
6 | * | |
7 | */ | |
8 | ||
9 | #ifndef _NET_DST_H | |
10 | #define _NET_DST_H | |
11 | ||
86393e52 | 12 | #include <net/dst_ops.h> |
14c85021 | 13 | #include <linux/netdevice.h> |
1da177e4 LT |
14 | #include <linux/rtnetlink.h> |
15 | #include <linux/rcupdate.h> | |
187f1882 | 16 | #include <linux/bug.h> |
1da177e4 | 17 | #include <linux/jiffies.h> |
9620fef2 | 18 | #include <linux/refcount.h> |
1da177e4 LT |
19 | #include <net/neighbour.h> |
20 | #include <asm/processor.h> | |
21 | ||
1da177e4 LT |
22 | #define DST_GC_MIN (HZ/10) |
23 | #define DST_GC_INC (HZ/2) | |
24 | #define DST_GC_MAX (120*HZ) | |
25 | ||
26 | /* Each dst_entry has reference count and sits in some parent list(s). | |
27 | * When it is removed from parent list, it is "freed" (dst_free). | |
28 | * After this it enters dead state (dst->obsolete > 0) and if its refcnt | |
29 | * is zero, it can be destroyed immediately, otherwise it is added | |
30 | * to gc list and garbage collector periodically checks the refcnt. | |
31 | */ | |
32 | ||
33 | struct sk_buff; | |
34 | ||
fd2c3ef7 | 35 | struct dst_entry { |
66727145 | 36 | struct net_device *dev; |
1e19e02c | 37 | struct rcu_head rcu_head; |
1da177e4 | 38 | struct dst_entry *child; |
62fa8a84 DM |
39 | struct dst_ops *ops; |
40 | unsigned long _metrics; | |
ecd98837 | 41 | unsigned long expires; |
f1dd9c37 | 42 | struct dst_entry *path; |
ecd98837 | 43 | struct dst_entry *from; |
def8b4fa | 44 | #ifdef CONFIG_XFRM |
1da177e4 | 45 | struct xfrm_state *xfrm; |
5635c10d ED |
46 | #else |
47 | void *__pad1; | |
def8b4fa | 48 | #endif |
7f95e188 | 49 | int (*input)(struct sk_buff *); |
ede2059d | 50 | int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); |
1da177e4 | 51 | |
5110effe | 52 | unsigned short flags; |
f6b72b62 DM |
53 | #define DST_HOST 0x0001 |
54 | #define DST_NOXFRM 0x0002 | |
55 | #define DST_NOPOLICY 0x0004 | |
1eb04e7c WW |
56 | #define DST_NOCOUNT 0x0008 |
57 | #define DST_FAKE_RTABLE 0x0010 | |
58 | #define DST_XFRM_TUNNEL 0x0020 | |
59 | #define DST_XFRM_QUEUE 0x0040 | |
60 | #define DST_METADATA 0x0080 | |
f6b72b62 | 61 | |
62fa8a84 | 62 | short error; |
f5b0a874 DM |
63 | |
64 | /* A non-zero value of dst->obsolete forces by-hand validation | |
65 | * of the route entry. Positive values are set by the generic | |
66 | * dst layer to indicate that the entry has been forcefully | |
67 | * destroyed. | |
68 | * | |
69 | * Negative values are used by the implementation layer code to | |
70 | * force invocation of the dst_ops->check() method. | |
71 | */ | |
62fa8a84 | 72 | short obsolete; |
f5b0a874 DM |
73 | #define DST_OBSOLETE_NONE 0 |
74 | #define DST_OBSOLETE_DEAD 2 | |
75 | #define DST_OBSOLETE_FORCE_CHK -1 | |
ceb33206 | 76 | #define DST_OBSOLETE_KILL -2 |
62fa8a84 DM |
77 | unsigned short header_len; /* more space at head required */ |
78 | unsigned short trailer_len; /* space to reserve at tail */ | |
51ce8bd4 JA |
79 | unsigned short __pad3; |
80 | ||
c7066f70 | 81 | #ifdef CONFIG_IP_ROUTE_CLASSID |
f1dd9c37 | 82 | __u32 tclassid; |
5635c10d ED |
83 | #else |
84 | __u32 __pad2; | |
f1dd9c37 ZY |
85 | #endif |
86 | ||
751a587a | 87 | #ifdef CONFIG_64BIT |
5635c10d ED |
88 | /* |
89 | * Align __refcnt to a 64 bytes alignment | |
90 | * (L1_CACHE_SIZE would be too much) | |
91 | */ | |
0868e253 | 92 | long __pad_to_align_refcnt[2]; |
5635c10d | 93 | #endif |
f1dd9c37 ZY |
94 | /* |
95 | * __refcnt wants to be on a different cache line from | |
96 | * input/output/ops or performance tanks badly | |
97 | */ | |
1e19e02c ED |
98 | atomic_t __refcnt; /* client references */ |
99 | int __use; | |
f1dd9c37 | 100 | unsigned long lastuse; |
751a587a | 101 | struct lwtunnel_state *lwtstate; |
1e19e02c | 102 | union { |
fc766e4c ED |
103 | struct dst_entry *next; |
104 | struct rtable __rcu *rt_next; | |
105 | struct rt6_info *rt6_next; | |
106 | struct dn_route __rcu *dn_next; | |
1e19e02c | 107 | }; |
1da177e4 LT |
108 | }; |
109 | ||
3fb07daf ED |
110 | struct dst_metrics { |
111 | u32 metrics[RTAX_MAX]; | |
9620fef2 | 112 | refcount_t refcnt; |
3fb07daf ED |
113 | }; |
114 | extern const struct dst_metrics dst_default_metrics; | |
115 | ||
a4023dd0 | 116 | u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 | 117 | |
e5fd387a | 118 | #define DST_METRICS_READ_ONLY 0x1UL |
3fb07daf | 119 | #define DST_METRICS_REFCOUNTED 0x2UL |
e5fd387a | 120 | #define DST_METRICS_FLAGS 0x3UL |
62fa8a84 | 121 | #define __DST_METRICS_PTR(Y) \ |
e5fd387a | 122 | ((u32 *)((Y) & ~DST_METRICS_FLAGS)) |
62fa8a84 DM |
123 | #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) |
124 | ||
125 | static inline bool dst_metrics_read_only(const struct dst_entry *dst) | |
126 | { | |
127 | return dst->_metrics & DST_METRICS_READ_ONLY; | |
128 | } | |
129 | ||
a4023dd0 | 130 | void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); |
62fa8a84 DM |
131 | |
132 | static inline void dst_destroy_metrics_generic(struct dst_entry *dst) | |
133 | { | |
134 | unsigned long val = dst->_metrics; | |
135 | if (!(val & DST_METRICS_READ_ONLY)) | |
136 | __dst_destroy_metrics_generic(dst, val); | |
137 | } | |
138 | ||
139 | static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) | |
140 | { | |
141 | unsigned long p = dst->_metrics; | |
142 | ||
1f37070d SH |
143 | BUG_ON(!p); |
144 | ||
62fa8a84 DM |
145 | if (p & DST_METRICS_READ_ONLY) |
146 | return dst->ops->cow_metrics(dst, p); | |
147 | return __DST_METRICS_PTR(p); | |
148 | } | |
149 | ||
150 | /* This may only be invoked before the entry has reached global | |
151 | * visibility. | |
152 | */ | |
153 | static inline void dst_init_metrics(struct dst_entry *dst, | |
154 | const u32 *src_metrics, | |
155 | bool read_only) | |
156 | { | |
157 | dst->_metrics = ((unsigned long) src_metrics) | | |
158 | (read_only ? DST_METRICS_READ_ONLY : 0); | |
159 | } | |
160 | ||
161 | static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) | |
162 | { | |
163 | u32 *dst_metrics = dst_metrics_write_ptr(dest); | |
164 | ||
165 | if (dst_metrics) { | |
166 | u32 *src_metrics = DST_METRICS_PTR(src); | |
167 | ||
168 | memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); | |
169 | } | |
170 | } | |
171 | ||
172 | static inline u32 *dst_metrics_ptr(struct dst_entry *dst) | |
173 | { | |
174 | return DST_METRICS_PTR(dst); | |
175 | } | |
176 | ||
1da177e4 | 177 | static inline u32 |
5170ae82 | 178 | dst_metric_raw(const struct dst_entry *dst, const int metric) |
1da177e4 | 179 | { |
62fa8a84 DM |
180 | u32 *p = DST_METRICS_PTR(dst); |
181 | ||
182 | return p[metric-1]; | |
defb3519 DM |
183 | } |
184 | ||
5170ae82 DM |
185 | static inline u32 |
186 | dst_metric(const struct dst_entry *dst, const int metric) | |
187 | { | |
0dbaee3b | 188 | WARN_ON_ONCE(metric == RTAX_HOPLIMIT || |
d33e4553 DM |
189 | metric == RTAX_ADVMSS || |
190 | metric == RTAX_MTU); | |
5170ae82 DM |
191 | return dst_metric_raw(dst, metric); |
192 | } | |
193 | ||
0dbaee3b DM |
194 | static inline u32 |
195 | dst_metric_advmss(const struct dst_entry *dst) | |
196 | { | |
197 | u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); | |
198 | ||
199 | if (!advmss) | |
200 | advmss = dst->ops->default_advmss(dst); | |
201 | ||
202 | return advmss; | |
203 | } | |
204 | ||
defb3519 DM |
205 | static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) |
206 | { | |
62fa8a84 | 207 | u32 *p = dst_metrics_write_ptr(dst); |
defb3519 | 208 | |
62fa8a84 DM |
209 | if (p) |
210 | p[metric-1] = val; | |
1da177e4 LT |
211 | } |
212 | ||
c3a8d947 DB |
213 | /* Kernel-internal feature bits that are unallocated in user space. */ |
214 | #define DST_FEATURE_ECN_CA (1 << 31) | |
215 | ||
216 | #define DST_FEATURE_MASK (DST_FEATURE_ECN_CA) | |
217 | #define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN) | |
218 | ||
0c3adfb8 GBY |
219 | static inline u32 |
220 | dst_feature(const struct dst_entry *dst, u32 feature) | |
221 | { | |
bb5b7c11 | 222 | return dst_metric(dst, RTAX_FEATURES) & feature; |
0c3adfb8 GBY |
223 | } |
224 | ||
1da177e4 LT |
225 | static inline u32 dst_mtu(const struct dst_entry *dst) |
226 | { | |
618f9bc7 | 227 | return dst->ops->mtu(dst); |
1da177e4 LT |
228 | } |
229 | ||
c1e20f7c SH |
230 | /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ |
231 | static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) | |
232 | { | |
233 | return msecs_to_jiffies(dst_metric(dst, metric)); | |
234 | } | |
235 | ||
1da177e4 LT |
236 | static inline u32 |
237 | dst_allfrag(const struct dst_entry *dst) | |
238 | { | |
0c3adfb8 | 239 | int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); |
1da177e4 LT |
240 | return ret; |
241 | } | |
242 | ||
243 | static inline int | |
d33e4553 | 244 | dst_metric_locked(const struct dst_entry *dst, int metric) |
1da177e4 LT |
245 | { |
246 | return dst_metric(dst, RTAX_LOCK) & (1<<metric); | |
247 | } | |
248 | ||
7f95e188 | 249 | static inline void dst_hold(struct dst_entry *dst) |
1da177e4 | 250 | { |
5635c10d ED |
251 | /* |
252 | * If your kernel compilation stops here, please check | |
253 | * __pad_to_align_refcnt declaration in struct dst_entry | |
254 | */ | |
255 | BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); | |
44ebe791 | 256 | WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0); |
1da177e4 LT |
257 | } |
258 | ||
03f49f34 PE |
259 | static inline void dst_use(struct dst_entry *dst, unsigned long time) |
260 | { | |
261 | dst_hold(dst); | |
262 | dst->__use++; | |
263 | dst->lastuse = time; | |
264 | } | |
265 | ||
7fee226a ED |
266 | static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) |
267 | { | |
268 | dst->__use++; | |
269 | dst->lastuse = time; | |
270 | } | |
271 | ||
7f95e188 | 272 | static inline struct dst_entry *dst_clone(struct dst_entry *dst) |
1da177e4 LT |
273 | { |
274 | if (dst) | |
222d7dbd | 275 | dst_hold(dst); |
1da177e4 LT |
276 | return dst; |
277 | } | |
278 | ||
a4023dd0 | 279 | void dst_release(struct dst_entry *dst); |
7fee226a | 280 | |
5f56f409 WW |
281 | void dst_release_immediate(struct dst_entry *dst); |
282 | ||
7fee226a ED |
283 | static inline void refdst_drop(unsigned long refdst) |
284 | { | |
285 | if (!(refdst & SKB_DST_NOREF)) | |
286 | dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); | |
287 | } | |
288 | ||
289 | /** | |
290 | * skb_dst_drop - drops skb dst | |
291 | * @skb: buffer | |
292 | * | |
293 | * Drops dst reference count if a reference was taken. | |
294 | */ | |
adf30907 ED |
295 | static inline void skb_dst_drop(struct sk_buff *skb) |
296 | { | |
7fee226a ED |
297 | if (skb->_skb_refdst) { |
298 | refdst_drop(skb->_skb_refdst); | |
299 | skb->_skb_refdst = 0UL; | |
300 | } | |
301 | } | |
302 | ||
e79e2595 | 303 | static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst) |
7fee226a | 304 | { |
e79e2595 | 305 | nskb->_skb_refdst = refdst; |
7fee226a ED |
306 | if (!(nskb->_skb_refdst & SKB_DST_NOREF)) |
307 | dst_clone(skb_dst(nskb)); | |
308 | } | |
309 | ||
e79e2595 JS |
310 | static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) |
311 | { | |
312 | __skb_dst_copy(nskb, oskb->_skb_refdst); | |
313 | } | |
314 | ||
5037e9ef ED |
315 | /** |
316 | * dst_hold_safe - Take a reference on a dst if possible | |
317 | * @dst: pointer to dst entry | |
318 | * | |
319 | * This helper returns false if it could not safely | |
320 | * take a reference on a dst. | |
321 | */ | |
322 | static inline bool dst_hold_safe(struct dst_entry *dst) | |
323 | { | |
b2a9c0ed | 324 | return atomic_inc_not_zero(&dst->__refcnt); |
5037e9ef ED |
325 | } |
326 | ||
327 | /** | |
222d7dbd | 328 | * skb_dst_force - makes sure skb dst is refcounted |
5037e9ef ED |
329 | * @skb: buffer |
330 | * | |
331 | * If dst is not yet refcounted and not destroyed, grab a ref on it. | |
332 | */ | |
222d7dbd | 333 | static inline void skb_dst_force(struct sk_buff *skb) |
5037e9ef ED |
334 | { |
335 | if (skb_dst_is_noref(skb)) { | |
336 | struct dst_entry *dst = skb_dst(skb); | |
337 | ||
222d7dbd | 338 | WARN_ON(!rcu_read_lock_held()); |
5037e9ef ED |
339 | if (!dst_hold_safe(dst)) |
340 | dst = NULL; | |
341 | ||
342 | skb->_skb_refdst = (unsigned long)dst; | |
343 | } | |
344 | } | |
345 | ||
d19d56dd | 346 | |
290b895e ED |
347 | /** |
348 | * __skb_tunnel_rx - prepare skb for rx reinsert | |
349 | * @skb: buffer | |
350 | * @dev: tunnel device | |
ea23192e | 351 | * @net: netns for packet i/o |
290b895e ED |
352 | * |
353 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
354 | * so make some cleanups. (no accounting done) | |
355 | */ | |
ea23192e ND |
356 | static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
357 | struct net *net) | |
290b895e ED |
358 | { |
359 | skb->dev = dev; | |
bdeab991 TH |
360 | |
361 | /* | |
7539fadc | 362 | * Clear hash so that we can recalulate the hash for the |
bdeab991 TH |
363 | * encapsulated packet, unless we have already determine the hash |
364 | * over the L4 4-tuple. | |
365 | */ | |
7539fadc | 366 | skb_clear_hash_if_not_l4(skb); |
290b895e | 367 | skb_set_queue_mapping(skb, 0); |
ea23192e | 368 | skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); |
290b895e ED |
369 | } |
370 | ||
d19d56dd ED |
371 | /** |
372 | * skb_tunnel_rx - prepare skb for rx reinsert | |
373 | * @skb: buffer | |
374 | * @dev: tunnel device | |
375 | * | |
376 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
377 | * so make some cleanups, and perform accounting. | |
290b895e | 378 | * Note: this accounting is not SMP safe. |
d19d56dd | 379 | */ |
ea23192e ND |
380 | static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, |
381 | struct net *net) | |
d19d56dd | 382 | { |
d19d56dd ED |
383 | /* TODO : stats should be SMP safe */ |
384 | dev->stats.rx_packets++; | |
385 | dev->stats.rx_bytes += skb->len; | |
ea23192e | 386 | __skb_tunnel_rx(skb, dev, net); |
d19d56dd ED |
387 | } |
388 | ||
808c1b69 DB |
389 | static inline u32 dst_tclassid(const struct sk_buff *skb) |
390 | { | |
391 | #ifdef CONFIG_IP_ROUTE_CLASSID | |
392 | const struct dst_entry *dst; | |
393 | ||
394 | dst = skb_dst(skb); | |
395 | if (dst) | |
396 | return dst->tclassid; | |
397 | #endif | |
398 | return 0; | |
399 | } | |
400 | ||
ede2059d | 401 | int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
aad88724 ED |
402 | static inline int dst_discard(struct sk_buff *skb) |
403 | { | |
ede2059d | 404 | return dst_discard_out(&init_net, skb->sk, skb); |
aad88724 | 405 | } |
a4023dd0 JP |
406 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, |
407 | int initial_obsolete, unsigned short flags); | |
f38a9eb1 TG |
408 | void dst_init(struct dst_entry *dst, struct dst_ops *ops, |
409 | struct net_device *dev, int initial_ref, int initial_obsolete, | |
410 | unsigned short flags); | |
a4023dd0 | 411 | struct dst_entry *dst_destroy(struct dst_entry *dst); |
4a6ce2b6 | 412 | void dst_dev_put(struct dst_entry *dst); |
1da177e4 | 413 | |
1da177e4 LT |
414 | static inline void dst_confirm(struct dst_entry *dst) |
415 | { | |
5110effe | 416 | } |
f2c31e32 | 417 | |
d3aaeb38 DM |
418 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
419 | { | |
aaa0c23c ZZ |
420 | struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); |
421 | return IS_ERR(n) ? NULL : n; | |
f894cbf8 DM |
422 | } |
423 | ||
424 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, | |
425 | struct sk_buff *skb) | |
426 | { | |
aaa0c23c ZZ |
427 | struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); |
428 | return IS_ERR(n) ? NULL : n; | |
d3aaeb38 DM |
429 | } |
430 | ||
63fca65d JA |
431 | static inline void dst_confirm_neigh(const struct dst_entry *dst, |
432 | const void *daddr) | |
433 | { | |
434 | if (dst->ops->confirm_neigh) | |
435 | dst->ops->confirm_neigh(dst, daddr); | |
436 | } | |
437 | ||
1da177e4 LT |
438 | static inline void dst_link_failure(struct sk_buff *skb) |
439 | { | |
adf30907 | 440 | struct dst_entry *dst = skb_dst(skb); |
1da177e4 LT |
441 | if (dst && dst->ops && dst->ops->link_failure) |
442 | dst->ops->link_failure(skb); | |
443 | } | |
444 | ||
445 | static inline void dst_set_expires(struct dst_entry *dst, int timeout) | |
446 | { | |
447 | unsigned long expires = jiffies + timeout; | |
448 | ||
449 | if (expires == 0) | |
450 | expires = 1; | |
451 | ||
452 | if (dst->expires == 0 || time_before(expires, dst->expires)) | |
453 | dst->expires = expires; | |
454 | } | |
455 | ||
456 | /* Output packet to network from transport. */ | |
13206b6b | 457 | static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb) |
aad88724 | 458 | { |
ede2059d | 459 | return skb_dst(skb)->output(net, sk, skb); |
aad88724 | 460 | } |
1da177e4 LT |
461 | |
462 | /* Input packet from network to transport. */ | |
463 | static inline int dst_input(struct sk_buff *skb) | |
464 | { | |
adf30907 | 465 | return skb_dst(skb)->input(skb); |
1da177e4 LT |
466 | } |
467 | ||
468 | static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) | |
469 | { | |
470 | if (dst->obsolete) | |
471 | dst = dst->ops->check(dst, cookie); | |
472 | return dst; | |
473 | } | |
474 | ||
815f4e57 HX |
475 | /* Flags for xfrm_lookup flags argument. */ |
476 | enum { | |
80c0bc9e | 477 | XFRM_LOOKUP_ICMP = 1 << 0, |
b8c203b2 | 478 | XFRM_LOOKUP_QUEUE = 1 << 1, |
ac37e251 | 479 | XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, |
815f4e57 HX |
480 | }; |
481 | ||
1da177e4 LT |
482 | struct flowi; |
483 | #ifndef CONFIG_XFRM | |
452edd59 DM |
484 | static inline struct dst_entry *xfrm_lookup(struct net *net, |
485 | struct dst_entry *dst_orig, | |
6f9c9615 ED |
486 | const struct flowi *fl, |
487 | const struct sock *sk, | |
452edd59 | 488 | int flags) |
1da177e4 | 489 | { |
452edd59 | 490 | return dst_orig; |
f92ee619 SK |
491 | } |
492 | ||
493 | static inline struct dst_entry *xfrm_lookup_route(struct net *net, | |
494 | struct dst_entry *dst_orig, | |
495 | const struct flowi *fl, | |
6f9c9615 | 496 | const struct sock *sk, |
f92ee619 SK |
497 | int flags) |
498 | { | |
499 | return dst_orig; | |
500 | } | |
e87b3998 VY |
501 | |
502 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
503 | { | |
504 | return NULL; | |
505 | } | |
506 | ||
1da177e4 | 507 | #else |
a4023dd0 | 508 | struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
6f9c9615 | 509 | const struct flowi *fl, const struct sock *sk, |
a4023dd0 | 510 | int flags); |
e87b3998 | 511 | |
f92ee619 | 512 | struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, |
6f9c9615 | 513 | const struct flowi *fl, const struct sock *sk, |
f92ee619 SK |
514 | int flags); |
515 | ||
e87b3998 VY |
516 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ |
517 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) | |
518 | { | |
519 | return dst->xfrm; | |
520 | } | |
1da177e4 | 521 | #endif |
1da177e4 LT |
522 | |
523 | #endif /* _NET_DST_H */ |