]>
Commit | Line | Data |
---|---|---|
2e6599cb ACM |
1 | /* |
2 | * NET Generic infrastructure for Network protocols. | |
3 | * | |
4 | * Definitions for request_sock | |
5 | * | |
6 | * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> | |
7 | * | |
8 | * From code originally in include/net/tcp.h | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | */ | |
15 | #ifndef _REQUEST_SOCK_H | |
16 | #define _REQUEST_SOCK_H | |
17 | ||
18 | #include <linux/slab.h> | |
0e87506f | 19 | #include <linux/spinlock.h> |
2e6599cb | 20 | #include <linux/types.h> |
547b792c | 21 | #include <linux/bug.h> |
0e87506f | 22 | |
2e6599cb ACM |
23 | #include <net/sock.h> |
24 | ||
60236fdd | 25 | struct request_sock; |
2e6599cb ACM |
26 | struct sk_buff; |
27 | struct dst_entry; | |
28 | struct proto; | |
29 | ||
60236fdd | 30 | struct request_sock_ops { |
2e6599cb | 31 | int family; |
2e6599cb | 32 | int obj_size; |
e18b890b | 33 | struct kmem_cache *slab; |
7e56b5d6 | 34 | char *slab_name; |
ea3bea3a | 35 | int (*rtx_syn_ack)(const struct sock *sk, |
1a2c6181 | 36 | struct request_sock *req); |
a00e7444 | 37 | void (*send_ack)(const struct sock *sk, struct sk_buff *skb, |
60236fdd | 38 | struct request_sock *req); |
a00e7444 | 39 | void (*send_reset)(const struct sock *sk, |
cfb6eeb4 | 40 | struct sk_buff *skb); |
60236fdd | 41 | void (*destructor)(struct request_sock *req); |
42cb80a2 | 42 | void (*syn_ack_timeout)(const struct request_sock *req); |
2e6599cb ACM |
43 | }; |
44 | ||
1b70e977 | 45 | int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req); |
e6c022a4 | 46 | |
60236fdd | 47 | /* struct request_sock - mini sock to represent a connection request |
2e6599cb | 48 | */ |
60236fdd | 49 | struct request_sock { |
634fb979 | 50 | struct sock_common __req_common; |
1e2e0117 | 51 | #define rsk_refcnt __req_common.skc_refcnt |
52452c54 | 52 | #define rsk_hash __req_common.skc_hash |
1e2e0117 | 53 | |
3fb62c5d | 54 | struct request_sock *dl_next; |
4e9a578e | 55 | struct sock *rsk_listener; |
2e6599cb | 56 | u16 mss; |
e6c022a4 ED |
57 | u8 num_retrans; /* number of retransmits */ |
58 | u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ | |
59 | u8 num_timeout:7; /* number of timeouts */ | |
2e6599cb ACM |
60 | /* The following two fields can be easily recomputed I think -AK */ |
61 | u32 window_clamp; /* window clamp at creation time */ | |
62 | u32 rcv_wnd; /* rcv_wnd offered first time */ | |
63 | u32 ts_recent; | |
fa76ce73 | 64 | struct timer_list rsk_timer; |
72a3effa | 65 | const struct request_sock_ops *rsk_ops; |
2e6599cb | 66 | struct sock *sk; |
cd8ae852 | 67 | u32 *saved_syn; |
4237c75c | 68 | u32 secid; |
6b877699 | 69 | u32 peer_secid; |
2e6599cb ACM |
70 | }; |
71 | ||
b267cdd1 ED |
72 | static inline struct request_sock *inet_reqsk(struct sock *sk) |
73 | { | |
74 | return (struct request_sock *)sk; | |
75 | } | |
76 | ||
77 | static inline struct sock *req_to_sk(struct request_sock *req) | |
78 | { | |
79 | return (struct sock *)req; | |
80 | } | |
81 | ||
4e9a578e ED |
82 | static inline struct request_sock * |
83 | reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener) | |
2e6599cb | 84 | { |
54e6ecb2 | 85 | struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); |
2e6599cb | 86 | |
4e9a578e | 87 | if (req) { |
60236fdd | 88 | req->rsk_ops = ops; |
4e9a578e ED |
89 | sock_hold(sk_listener); |
90 | req->rsk_listener = sk_listener; | |
b267cdd1 ED |
91 | req_to_sk(req)->sk_prot = sk_listener->sk_prot; |
92 | sk_node_init(&req_to_sk(req)->sk_node); | |
cd8ae852 | 93 | req->saved_syn = NULL; |
0470c8ca ED |
94 | /* Following is temporary. It is coupled with debugging |
95 | * helpers in reqsk_put() & reqsk_free() | |
96 | */ | |
97 | atomic_set(&req->rsk_refcnt, 0); | |
4e9a578e | 98 | } |
2e6599cb ACM |
99 | return req; |
100 | } | |
101 | ||
60236fdd | 102 | static inline void reqsk_free(struct request_sock *req) |
2e6599cb | 103 | { |
13854e5a ED |
104 | /* temporary debugging */ |
105 | WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0); | |
106 | ||
60236fdd | 107 | req->rsk_ops->destructor(req); |
4e9a578e ED |
108 | if (req->rsk_listener) |
109 | sock_put(req->rsk_listener); | |
cd8ae852 | 110 | kfree(req->saved_syn); |
13854e5a | 111 | kmem_cache_free(req->rsk_ops->slab, req); |
2e6599cb ACM |
112 | } |
113 | ||
1e2e0117 ED |
114 | static inline void reqsk_put(struct request_sock *req) |
115 | { | |
116 | if (atomic_dec_and_test(&req->rsk_refcnt)) | |
117 | reqsk_free(req); | |
118 | } | |
119 | ||
0e87506f ACM |
120 | extern int sysctl_max_syn_backlog; |
121 | ||
10467163 JC |
122 | /* |
123 | * For a TCP Fast Open listener - | |
124 | * lock - protects the access to all the reqsk, which is co-owned by | |
125 | * the listener and the child socket. | |
126 | * qlen - pending TFO requests (still in TCP_SYN_RECV). | |
127 | * max_qlen - max TFO reqs allowed before TFO is disabled. | |
128 | * | |
129 | * XXX (TFO) - ideally these fields can be made as part of "listen_sock" | |
130 | * structure above. But there is some implementation difficulty due to | |
131 | * listen_sock being part of request_sock_queue hence will be freed when | |
132 | * a listener is stopped. But TFO related fields may continue to be | |
133 | * accessed even after a listener is closed, until its sk_refcnt drops | |
134 | * to 0 implying no more outstanding TFO reqs. One solution is to keep | |
135 | * listen_opt around until sk_refcnt drops to 0. But there is some other | |
136 | * complexity that needs to be resolved. E.g., a listener can be disabled | |
137 | * temporarily through shutdown()->tcp_disconnect(), and re-enabled later. | |
138 | */ | |
139 | struct fastopen_queue { | |
140 | struct request_sock *rskq_rst_head; /* Keep track of past TFO */ | |
141 | struct request_sock *rskq_rst_tail; /* requests that caused RST. | |
142 | * This is part of the defense | |
143 | * against spoofing attack. | |
144 | */ | |
145 | spinlock_t lock; | |
146 | int qlen; /* # of pending (TCP_SYN_RECV) reqs */ | |
147 | int max_qlen; /* != 0 iff TFO is currently enabled */ | |
148 | }; | |
149 | ||
0e87506f ACM |
150 | /** struct request_sock_queue - queue of request_socks |
151 | * | |
152 | * @rskq_accept_head - FIFO head of established children | |
153 | * @rskq_accept_tail - FIFO tail of established children | |
295f7324 | 154 | * @rskq_defer_accept - User waits for some data after accept() |
0e87506f | 155 | * |
0e87506f ACM |
156 | */ |
157 | struct request_sock_queue { | |
fff1f300 ED |
158 | spinlock_t rskq_lock; |
159 | u8 rskq_defer_accept; | |
ef547f2a | 160 | |
8d2675f1 | 161 | u32 synflood_warned; |
aac065c5 ED |
162 | atomic_t qlen; |
163 | atomic_t young; | |
164 | ||
0e87506f ACM |
165 | struct request_sock *rskq_accept_head; |
166 | struct request_sock *rskq_accept_tail; | |
0536fcc0 ED |
167 | struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine |
168 | * if TFO is enabled. | |
10467163 | 169 | */ |
0e87506f ACM |
170 | }; |
171 | ||
ef547f2a | 172 | void reqsk_queue_alloc(struct request_sock_queue *queue); |
0e87506f | 173 | |
c0f4502a JP |
174 | void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, |
175 | bool reset); | |
83e3609e | 176 | |
fff1f300 | 177 | static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) |
0e87506f ACM |
178 | { |
179 | return queue->rskq_accept_head == NULL; | |
180 | } | |
181 | ||
0e87506f ACM |
182 | static inline void reqsk_queue_add(struct request_sock_queue *queue, |
183 | struct request_sock *req, | |
184 | struct sock *parent, | |
185 | struct sock *child) | |
186 | { | |
fff1f300 | 187 | spin_lock(&queue->rskq_lock); |
0e87506f ACM |
188 | req->sk = child; |
189 | sk_acceptq_added(parent); | |
190 | ||
191 | if (queue->rskq_accept_head == NULL) | |
192 | queue->rskq_accept_head = req; | |
193 | else | |
194 | queue->rskq_accept_tail->dl_next = req; | |
195 | ||
196 | queue->rskq_accept_tail = req; | |
197 | req->dl_next = NULL; | |
fff1f300 | 198 | spin_unlock(&queue->rskq_lock); |
0e87506f ACM |
199 | } |
200 | ||
fff1f300 ED |
201 | static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, |
202 | struct sock *parent) | |
0e87506f | 203 | { |
fff1f300 | 204 | struct request_sock *req; |
0e87506f | 205 | |
fff1f300 ED |
206 | spin_lock_bh(&queue->rskq_lock); |
207 | req = queue->rskq_accept_head; | |
208 | if (req) { | |
209 | sk_acceptq_removed(parent); | |
210 | queue->rskq_accept_head = req->dl_next; | |
211 | if (queue->rskq_accept_head == NULL) | |
212 | queue->rskq_accept_tail = NULL; | |
213 | } | |
214 | spin_unlock_bh(&queue->rskq_lock); | |
0e87506f ACM |
215 | return req; |
216 | } | |
217 | ||
fa76ce73 ED |
218 | static inline void reqsk_queue_removed(struct request_sock_queue *queue, |
219 | const struct request_sock *req) | |
0e87506f | 220 | { |
e6c022a4 | 221 | if (req->num_timeout == 0) |
aac065c5 ED |
222 | atomic_dec(&queue->young); |
223 | atomic_dec(&queue->qlen); | |
0e87506f ACM |
224 | } |
225 | ||
fa76ce73 | 226 | static inline void reqsk_queue_added(struct request_sock_queue *queue) |
0e87506f | 227 | { |
aac065c5 ED |
228 | atomic_inc(&queue->young); |
229 | atomic_inc(&queue->qlen); | |
0e87506f ACM |
230 | } |
231 | ||
fa76ce73 | 232 | static inline int reqsk_queue_len(const struct request_sock_queue *queue) |
0e87506f | 233 | { |
aac065c5 | 234 | return atomic_read(&queue->qlen); |
0e87506f ACM |
235 | } |
236 | ||
fa76ce73 | 237 | static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) |
0e87506f | 238 | { |
aac065c5 | 239 | return atomic_read(&queue->young); |
fa76ce73 | 240 | } |
0470c8ca | 241 | |
2e6599cb | 242 | #endif /* _REQUEST_SOCK_H */ |