]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/net/inet_frag.h
inet: frag: don't account number of fragment queues
[mirror_ubuntu-zesty-kernel.git] / include / net / inet_frag.h
1 #ifndef __NET_FRAG_H__
2 #define __NET_FRAG_H__
3
4 #include <linux/percpu_counter.h>
5
6 struct netns_frags {
7 struct list_head lru_list;
8 spinlock_t lru_lock;
9
10 /* The percpu_counter "mem" need to be cacheline aligned.
11 * mem.count must not share cacheline with other writers
12 */
13 struct percpu_counter mem ____cacheline_aligned_in_smp;
14
15 /* sysctls */
16 int timeout;
17 int high_thresh;
18 int low_thresh;
19 };
20
21 struct inet_frag_queue {
22 spinlock_t lock;
23 struct timer_list timer; /* when will this queue expire? */
24 struct list_head lru_list; /* lru list member */
25 struct hlist_node list;
26 atomic_t refcnt;
27 struct sk_buff *fragments; /* list of received fragments */
28 struct sk_buff *fragments_tail;
29 ktime_t stamp;
30 int len; /* total length of orig datagram */
31 int meat;
32 __u8 last_in; /* first/last segment arrived? */
33
34 #define INET_FRAG_EVICTED 8
35 #define INET_FRAG_COMPLETE 4
36 #define INET_FRAG_FIRST_IN 2
37 #define INET_FRAG_LAST_IN 1
38
39 u16 max_size;
40
41 struct netns_frags *net;
42 };
43
44 #define INETFRAGS_HASHSZ 1024
45
46 /* averaged:
47 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
48 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
49 * struct frag_queue))
50 */
51 #define INETFRAGS_MAXDEPTH 128
52
53 struct inet_frag_bucket {
54 struct hlist_head chain;
55 spinlock_t chain_lock;
56 };
57
58 struct inet_frags {
59 struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
60 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
61 * netfilter). Important to keep this on a seperate cacheline.
62 * Its primarily a rebuild protection rwlock.
63 */
64 rwlock_t lock ____cacheline_aligned_in_smp;
65 int secret_interval;
66 struct timer_list secret_timer;
67
68 struct work_struct frags_work;
69 unsigned int next_bucket;
70
71 /* The first call to hashfn is responsible to initialize
72 * rnd. This is best done with net_get_random_once.
73 */
74 u32 rnd;
75 int qsize;
76
77 unsigned int (*hashfn)(const struct inet_frag_queue *);
78 bool (*match)(const struct inet_frag_queue *q,
79 const void *arg);
80 void (*constructor)(struct inet_frag_queue *q,
81 const void *arg);
82 void (*destructor)(struct inet_frag_queue *);
83 void (*skb_free)(struct sk_buff *);
84 void (*frag_expire)(unsigned long data);
85 };
86
87 void inet_frags_init(struct inet_frags *);
88 void inet_frags_fini(struct inet_frags *);
89
90 void inet_frags_init_net(struct netns_frags *nf);
91 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
92
93 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
94 void inet_frag_destroy(struct inet_frag_queue *q,
95 struct inet_frags *f, int *work);
96 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
97 struct inet_frags *f, void *key, unsigned int hash)
98 __releases(&f->lock);
99 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
100 const char *prefix);
101
102 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
103 {
104 if (atomic_dec_and_test(&q->refcnt))
105 inet_frag_destroy(q, f, NULL);
106 }
107
108 /* Memory Tracking Functions. */
109
110 /* The default percpu_counter batch size is not big enough to scale to
111 * fragmentation mem acct sizes.
112 * The mem size of a 64K fragment is approx:
113 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
114 */
115 static unsigned int frag_percpu_counter_batch = 130000;
116
117 static inline int frag_mem_limit(struct netns_frags *nf)
118 {
119 return percpu_counter_read(&nf->mem);
120 }
121
122 static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
123 {
124 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
125 }
126
127 static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
128 {
129 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
130 }
131
132 static inline void init_frag_mem_limit(struct netns_frags *nf)
133 {
134 percpu_counter_init(&nf->mem, 0);
135 }
136
137 static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
138 {
139 unsigned int res;
140
141 local_bh_disable();
142 res = percpu_counter_sum_positive(&nf->mem);
143 local_bh_enable();
144
145 return res;
146 }
147
148 static inline void inet_frag_lru_move(struct inet_frag_queue *q)
149 {
150 spin_lock(&q->net->lru_lock);
151 if (!list_empty(&q->lru_list))
152 list_move_tail(&q->lru_list, &q->net->lru_list);
153 spin_unlock(&q->net->lru_lock);
154 }
155
156 static inline void inet_frag_lru_del(struct inet_frag_queue *q)
157 {
158 spin_lock(&q->net->lru_lock);
159 list_del_init(&q->lru_list);
160 spin_unlock(&q->net->lru_lock);
161 }
162
163 static inline void inet_frag_lru_add(struct netns_frags *nf,
164 struct inet_frag_queue *q)
165 {
166 spin_lock(&nf->lru_lock);
167 list_add_tail(&q->lru_list, &nf->lru_list);
168 spin_unlock(&nf->lru_lock);
169 }
170
171 /* RFC 3168 support :
172 * We want to check ECN values of all fragments, do detect invalid combinations.
173 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
174 */
175 #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
176 #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
177 #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
178 #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
179
180 extern const u8 ip_frag_ecn_table[16];
181
182 #endif