]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/ipvs/ip_vs_app.c
[NET]: Make /proc/net per network namespace
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / ipvs / ip_vs_app.c
1 /*
2 * ip_vs_app.c: Application module support for IPVS
3 *
4 * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
14 * is that ip_vs_app module handles the reverse direction (incoming requests
15 * and outgoing responses).
16 *
17 * IP_MASQ_APP application masquerading module
18 *
19 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <net/net_namespace.h>
29 #include <net/protocol.h>
30 #include <net/tcp.h>
31 #include <asm/system.h>
32 #include <linux/stat.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/mutex.h>
36
37 #include <net/ip_vs.h>
38
39 EXPORT_SYMBOL(register_ip_vs_app);
40 EXPORT_SYMBOL(unregister_ip_vs_app);
41 EXPORT_SYMBOL(register_ip_vs_app_inc);
42
43 /* ipvs application list head */
44 static LIST_HEAD(ip_vs_app_list);
45 static DEFINE_MUTEX(__ip_vs_app_mutex);
46
47
48 /*
49 * Get an ip_vs_app object
50 */
51 static inline int ip_vs_app_get(struct ip_vs_app *app)
52 {
53 /* test and get the module atomically */
54 if (app->module)
55 return try_module_get(app->module);
56 else
57 return 1;
58 }
59
60
61 static inline void ip_vs_app_put(struct ip_vs_app *app)
62 {
63 if (app->module)
64 module_put(app->module);
65 }
66
67
68 /*
69 * Allocate/initialize app incarnation and register it in proto apps.
70 */
71 static int
72 ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
73 {
74 struct ip_vs_protocol *pp;
75 struct ip_vs_app *inc;
76 int ret;
77
78 if (!(pp = ip_vs_proto_get(proto)))
79 return -EPROTONOSUPPORT;
80
81 if (!pp->unregister_app)
82 return -EOPNOTSUPP;
83
84 inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
85 if (!inc)
86 return -ENOMEM;
87 INIT_LIST_HEAD(&inc->p_list);
88 INIT_LIST_HEAD(&inc->incs_list);
89 inc->app = app;
90 inc->port = htons(port);
91 atomic_set(&inc->usecnt, 0);
92
93 if (app->timeouts) {
94 inc->timeout_table =
95 ip_vs_create_timeout_table(app->timeouts,
96 app->timeouts_size);
97 if (!inc->timeout_table) {
98 ret = -ENOMEM;
99 goto out;
100 }
101 }
102
103 ret = pp->register_app(inc);
104 if (ret)
105 goto out;
106
107 list_add(&inc->a_list, &app->incs_list);
108 IP_VS_DBG(9, "%s application %s:%u registered\n",
109 pp->name, inc->name, inc->port);
110
111 return 0;
112
113 out:
114 kfree(inc->timeout_table);
115 kfree(inc);
116 return ret;
117 }
118
119
120 /*
121 * Release app incarnation
122 */
123 static void
124 ip_vs_app_inc_release(struct ip_vs_app *inc)
125 {
126 struct ip_vs_protocol *pp;
127
128 if (!(pp = ip_vs_proto_get(inc->protocol)))
129 return;
130
131 if (pp->unregister_app)
132 pp->unregister_app(inc);
133
134 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
135 pp->name, inc->name, inc->port);
136
137 list_del(&inc->a_list);
138
139 kfree(inc->timeout_table);
140 kfree(inc);
141 }
142
143
144 /*
145 * Get reference to app inc (only called from softirq)
146 *
147 */
148 int ip_vs_app_inc_get(struct ip_vs_app *inc)
149 {
150 int result;
151
152 atomic_inc(&inc->usecnt);
153 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
154 atomic_dec(&inc->usecnt);
155 return result;
156 }
157
158
159 /*
160 * Put the app inc (only called from timer or net softirq)
161 */
162 void ip_vs_app_inc_put(struct ip_vs_app *inc)
163 {
164 ip_vs_app_put(inc->app);
165 atomic_dec(&inc->usecnt);
166 }
167
168
169 /*
170 * Register an application incarnation in protocol applications
171 */
172 int
173 register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
174 {
175 int result;
176
177 mutex_lock(&__ip_vs_app_mutex);
178
179 result = ip_vs_app_inc_new(app, proto, port);
180
181 mutex_unlock(&__ip_vs_app_mutex);
182
183 return result;
184 }
185
186
187 /*
188 * ip_vs_app registration routine
189 */
190 int register_ip_vs_app(struct ip_vs_app *app)
191 {
192 /* increase the module use count */
193 ip_vs_use_count_inc();
194
195 mutex_lock(&__ip_vs_app_mutex);
196
197 list_add(&app->a_list, &ip_vs_app_list);
198
199 mutex_unlock(&__ip_vs_app_mutex);
200
201 return 0;
202 }
203
204
205 /*
206 * ip_vs_app unregistration routine
207 * We are sure there are no app incarnations attached to services
208 */
209 void unregister_ip_vs_app(struct ip_vs_app *app)
210 {
211 struct ip_vs_app *inc, *nxt;
212
213 mutex_lock(&__ip_vs_app_mutex);
214
215 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
216 ip_vs_app_inc_release(inc);
217 }
218
219 list_del(&app->a_list);
220
221 mutex_unlock(&__ip_vs_app_mutex);
222
223 /* decrease the module use count */
224 ip_vs_use_count_dec();
225 }
226
227
228 /*
229 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
230 */
231 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
232 {
233 return pp->app_conn_bind(cp);
234 }
235
236
237 /*
238 * Unbind cp from application incarnation (called by cp destructor)
239 */
240 void ip_vs_unbind_app(struct ip_vs_conn *cp)
241 {
242 struct ip_vs_app *inc = cp->app;
243
244 if (!inc)
245 return;
246
247 if (inc->unbind_conn)
248 inc->unbind_conn(inc, cp);
249 if (inc->done_conn)
250 inc->done_conn(inc, cp);
251 ip_vs_app_inc_put(inc);
252 cp->app = NULL;
253 }
254
255
256 /*
257 * Fixes th->seq based on ip_vs_seq info.
258 */
259 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
260 {
261 __u32 seq = ntohl(th->seq);
262
263 /*
264 * Adjust seq with delta-offset for all packets after
265 * the most recent resized pkt seq and with previous_delta offset
266 * for all packets before most recent resized pkt seq.
267 */
268 if (vseq->delta || vseq->previous_delta) {
269 if(after(seq, vseq->init_seq)) {
270 th->seq = htonl(seq + vseq->delta);
271 IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
272 vseq->delta);
273 } else {
274 th->seq = htonl(seq + vseq->previous_delta);
275 IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
276 "(%d) to seq\n", vseq->previous_delta);
277 }
278 }
279 }
280
281
282 /*
283 * Fixes th->ack_seq based on ip_vs_seq info.
284 */
285 static inline void
286 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
287 {
288 __u32 ack_seq = ntohl(th->ack_seq);
289
290 /*
291 * Adjust ack_seq with delta-offset for
292 * the packets AFTER most recent resized pkt has caused a shift
293 * for packets before most recent resized pkt, use previous_delta
294 */
295 if (vseq->delta || vseq->previous_delta) {
296 /* since ack_seq is the number of octet that is expected
297 to receive next, so compare it with init_seq+delta */
298 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
299 th->ack_seq = htonl(ack_seq - vseq->delta);
300 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
301 "(%d) from ack_seq\n", vseq->delta);
302
303 } else {
304 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
305 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
306 "previous_delta (%d) from ack_seq\n",
307 vseq->previous_delta);
308 }
309 }
310 }
311
312
313 /*
314 * Updates ip_vs_seq if pkt has been resized
315 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
316 */
317 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
318 unsigned flag, __u32 seq, int diff)
319 {
320 /* spinlock is to keep updating cp->flags atomic */
321 spin_lock(&cp->lock);
322 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
323 vseq->previous_delta = vseq->delta;
324 vseq->delta += diff;
325 vseq->init_seq = seq;
326 cp->flags |= flag;
327 }
328 spin_unlock(&cp->lock);
329 }
330
331 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb,
332 struct ip_vs_app *app)
333 {
334 int diff;
335 const unsigned int tcp_offset = ip_hdrlen(*pskb);
336 struct tcphdr *th;
337 __u32 seq;
338
339 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
340 return 0;
341
342 th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset);
343
344 /*
345 * Remember seq number in case this pkt gets resized
346 */
347 seq = ntohl(th->seq);
348
349 /*
350 * Fix seq stuff if flagged as so.
351 */
352 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
353 vs_fix_seq(&cp->out_seq, th);
354 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
355 vs_fix_ack_seq(&cp->in_seq, th);
356
357 /*
358 * Call private output hook function
359 */
360 if (app->pkt_out == NULL)
361 return 1;
362
363 if (!app->pkt_out(app, cp, pskb, &diff))
364 return 0;
365
366 /*
367 * Update ip_vs seq stuff if len has changed.
368 */
369 if (diff != 0)
370 vs_seq_update(cp, &cp->out_seq,
371 IP_VS_CONN_F_OUT_SEQ, seq, diff);
372
373 return 1;
374 }
375
376 /*
377 * Output pkt hook. Will call bound ip_vs_app specific function
378 * called by ipvs packet handler, assumes previously checked cp!=NULL
379 * returns false if it can't handle packet (oom)
380 */
381 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb)
382 {
383 struct ip_vs_app *app;
384
385 /*
386 * check if application module is bound to
387 * this ip_vs_conn.
388 */
389 if ((app = cp->app) == NULL)
390 return 1;
391
392 /* TCP is complicated */
393 if (cp->protocol == IPPROTO_TCP)
394 return app_tcp_pkt_out(cp, pskb, app);
395
396 /*
397 * Call private output hook function
398 */
399 if (app->pkt_out == NULL)
400 return 1;
401
402 return app->pkt_out(app, cp, pskb, NULL);
403 }
404
405
406 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb,
407 struct ip_vs_app *app)
408 {
409 int diff;
410 const unsigned int tcp_offset = ip_hdrlen(*pskb);
411 struct tcphdr *th;
412 __u32 seq;
413
414 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
415 return 0;
416
417 th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset);
418
419 /*
420 * Remember seq number in case this pkt gets resized
421 */
422 seq = ntohl(th->seq);
423
424 /*
425 * Fix seq stuff if flagged as so.
426 */
427 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
428 vs_fix_seq(&cp->in_seq, th);
429 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
430 vs_fix_ack_seq(&cp->out_seq, th);
431
432 /*
433 * Call private input hook function
434 */
435 if (app->pkt_in == NULL)
436 return 1;
437
438 if (!app->pkt_in(app, cp, pskb, &diff))
439 return 0;
440
441 /*
442 * Update ip_vs seq stuff if len has changed.
443 */
444 if (diff != 0)
445 vs_seq_update(cp, &cp->in_seq,
446 IP_VS_CONN_F_IN_SEQ, seq, diff);
447
448 return 1;
449 }
450
451 /*
452 * Input pkt hook. Will call bound ip_vs_app specific function
453 * called by ipvs packet handler, assumes previously checked cp!=NULL.
454 * returns false if can't handle packet (oom).
455 */
456 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb)
457 {
458 struct ip_vs_app *app;
459
460 /*
461 * check if application module is bound to
462 * this ip_vs_conn.
463 */
464 if ((app = cp->app) == NULL)
465 return 1;
466
467 /* TCP is complicated */
468 if (cp->protocol == IPPROTO_TCP)
469 return app_tcp_pkt_in(cp, pskb, app);
470
471 /*
472 * Call private input hook function
473 */
474 if (app->pkt_in == NULL)
475 return 1;
476
477 return app->pkt_in(app, cp, pskb, NULL);
478 }
479
480
481 #ifdef CONFIG_PROC_FS
482 /*
483 * /proc/net/ip_vs_app entry function
484 */
485
486 static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
487 {
488 struct ip_vs_app *app, *inc;
489
490 list_for_each_entry(app, &ip_vs_app_list, a_list) {
491 list_for_each_entry(inc, &app->incs_list, a_list) {
492 if (pos-- == 0)
493 return inc;
494 }
495 }
496 return NULL;
497
498 }
499
500 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
501 {
502 mutex_lock(&__ip_vs_app_mutex);
503
504 return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
505 }
506
507 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
508 {
509 struct ip_vs_app *inc, *app;
510 struct list_head *e;
511
512 ++*pos;
513 if (v == SEQ_START_TOKEN)
514 return ip_vs_app_idx(0);
515
516 inc = v;
517 app = inc->app;
518
519 if ((e = inc->a_list.next) != &app->incs_list)
520 return list_entry(e, struct ip_vs_app, a_list);
521
522 /* go on to next application */
523 for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
524 app = list_entry(e, struct ip_vs_app, a_list);
525 list_for_each_entry(inc, &app->incs_list, a_list) {
526 return inc;
527 }
528 }
529 return NULL;
530 }
531
532 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
533 {
534 mutex_unlock(&__ip_vs_app_mutex);
535 }
536
537 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
538 {
539 if (v == SEQ_START_TOKEN)
540 seq_puts(seq, "prot port usecnt name\n");
541 else {
542 const struct ip_vs_app *inc = v;
543
544 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
545 ip_vs_proto_name(inc->protocol),
546 ntohs(inc->port),
547 atomic_read(&inc->usecnt),
548 inc->name);
549 }
550 return 0;
551 }
552
553 static const struct seq_operations ip_vs_app_seq_ops = {
554 .start = ip_vs_app_seq_start,
555 .next = ip_vs_app_seq_next,
556 .stop = ip_vs_app_seq_stop,
557 .show = ip_vs_app_seq_show,
558 };
559
560 static int ip_vs_app_open(struct inode *inode, struct file *file)
561 {
562 return seq_open(file, &ip_vs_app_seq_ops);
563 }
564
565 static const struct file_operations ip_vs_app_fops = {
566 .owner = THIS_MODULE,
567 .open = ip_vs_app_open,
568 .read = seq_read,
569 .llseek = seq_lseek,
570 .release = seq_release,
571 };
572 #endif
573
574
575 /*
576 * Replace a segment of data with a new segment
577 */
578 int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
579 char *o_buf, int o_len, char *n_buf, int n_len)
580 {
581 int diff;
582 int o_offset;
583 int o_left;
584
585 EnterFunction(9);
586
587 diff = n_len - o_len;
588 o_offset = o_buf - (char *)skb->data;
589 /* The length of left data after o_buf+o_len in the skb data */
590 o_left = skb->len - (o_offset + o_len);
591
592 if (diff <= 0) {
593 memmove(o_buf + n_len, o_buf + o_len, o_left);
594 memcpy(o_buf, n_buf, n_len);
595 skb_trim(skb, skb->len + diff);
596 } else if (diff <= skb_tailroom(skb)) {
597 skb_put(skb, diff);
598 memmove(o_buf + n_len, o_buf + o_len, o_left);
599 memcpy(o_buf, n_buf, n_len);
600 } else {
601 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
602 return -ENOMEM;
603 skb_put(skb, diff);
604 memmove(skb->data + o_offset + n_len,
605 skb->data + o_offset + o_len, o_left);
606 skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
607 }
608
609 /* must update the iph total length here */
610 ip_hdr(skb)->tot_len = htons(skb->len);
611
612 LeaveFunction(9);
613 return 0;
614 }
615
616
617 int ip_vs_app_init(void)
618 {
619 /* we will replace it with proc_net_ipvs_create() soon */
620 proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
621 return 0;
622 }
623
624
625 void ip_vs_app_cleanup(void)
626 {
627 proc_net_remove(&init_net, "ip_vs_app");
628 }