]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/netfilter/ipvs/ip_vs_lblcr.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
[mirror_ubuntu-zesty-kernel.git] / net / netfilter / ipvs / ip_vs_lblcr.c
1 /*
2 * IPVS: Locality-Based Least-Connection with Replication scheduler
3 *
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Changes:
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
14 *
15 */
16
17 /*
18 * The lblc/r algorithm is as follows (pseudo code):
19 *
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
22 * else
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
24 * if (n is null) OR
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
35 *
36 * return n;
37 *
38 */
39
40 #define KMSG_COMPONENT "IPVS"
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
42
43 #include <linux/ip.h>
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/skbuff.h>
47 #include <linux/jiffies.h>
48 #include <linux/list.h>
49 #include <linux/slab.h>
50
51 /* for sysctl */
52 #include <linux/fs.h>
53 #include <linux/sysctl.h>
54 #include <net/net_namespace.h>
55
56 #include <net/ip_vs.h>
57
58
59 /*
60 * It is for garbage collection of stale IPVS lblcr entries,
61 * when the table is full.
62 */
63 #define CHECK_EXPIRE_INTERVAL (60*HZ)
64 #define ENTRY_TIMEOUT (6*60*HZ)
65
66 #define DEFAULT_EXPIRATION (24*60*60*HZ)
67
68 /*
69 * It is for full expiration check.
70 * When there is no partial expiration check (garbage collection)
71 * in a half hour, do a full expiration check to collect stale
72 * entries that haven't been touched for a day.
73 */
74 #define COUNT_FOR_FULL_EXPIRATION 30
75
76 /*
77 * for IPVS lblcr entry hash table
78 */
79 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
80 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
81 #endif
82 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
83 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
84 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
85
86
87 /*
88 * IPVS destination set structure and operations
89 */
90 struct ip_vs_dest_set_elem {
91 struct list_head list; /* list link */
92 struct ip_vs_dest *dest; /* destination server */
93 };
94
95 struct ip_vs_dest_set {
96 atomic_t size; /* set size */
97 unsigned long lastmod; /* last modified time */
98 struct list_head list; /* destination list */
99 rwlock_t lock; /* lock for this list */
100 };
101
102
103 static struct ip_vs_dest_set_elem *
104 ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
105 {
106 struct ip_vs_dest_set_elem *e;
107
108 list_for_each_entry(e, &set->list, list) {
109 if (e->dest == dest)
110 /* already existed */
111 return NULL;
112 }
113
114 e = kmalloc(sizeof(*e), GFP_ATOMIC);
115 if (e == NULL)
116 return NULL;
117
118 atomic_inc(&dest->refcnt);
119 e->dest = dest;
120
121 list_add(&e->list, &set->list);
122 atomic_inc(&set->size);
123
124 set->lastmod = jiffies;
125 return e;
126 }
127
128 static void
129 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
130 {
131 struct ip_vs_dest_set_elem *e;
132
133 list_for_each_entry(e, &set->list, list) {
134 if (e->dest == dest) {
135 /* HIT */
136 atomic_dec(&set->size);
137 set->lastmod = jiffies;
138 atomic_dec(&e->dest->refcnt);
139 list_del(&e->list);
140 kfree(e);
141 break;
142 }
143 }
144 }
145
146 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
147 {
148 struct ip_vs_dest_set_elem *e, *ep;
149
150 write_lock(&set->lock);
151 list_for_each_entry_safe(e, ep, &set->list, list) {
152 /*
153 * We don't kfree dest because it is referred either
154 * by its service or by the trash dest list.
155 */
156 atomic_dec(&e->dest->refcnt);
157 list_del(&e->list);
158 kfree(e);
159 }
160 write_unlock(&set->lock);
161 }
162
163 /* get weighted least-connection node in the destination set */
164 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
165 {
166 register struct ip_vs_dest_set_elem *e;
167 struct ip_vs_dest *dest, *least;
168 int loh, doh;
169
170 if (set == NULL)
171 return NULL;
172
173 /* select the first destination server, whose weight > 0 */
174 list_for_each_entry(e, &set->list, list) {
175 least = e->dest;
176 if (least->flags & IP_VS_DEST_F_OVERLOAD)
177 continue;
178
179 if ((atomic_read(&least->weight) > 0)
180 && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
181 loh = ip_vs_dest_conn_overhead(least);
182 goto nextstage;
183 }
184 }
185 return NULL;
186
187 /* find the destination with the weighted least load */
188 nextstage:
189 list_for_each_entry(e, &set->list, list) {
190 dest = e->dest;
191 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
192 continue;
193
194 doh = ip_vs_dest_conn_overhead(dest);
195 if ((loh * atomic_read(&dest->weight) >
196 doh * atomic_read(&least->weight))
197 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
198 least = dest;
199 loh = doh;
200 }
201 }
202
203 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
204 "activeconns %d refcnt %d weight %d overhead %d\n",
205 __func__,
206 IP_VS_DBG_ADDR(least->af, &least->addr),
207 ntohs(least->port),
208 atomic_read(&least->activeconns),
209 atomic_read(&least->refcnt),
210 atomic_read(&least->weight), loh);
211 return least;
212 }
213
214
215 /* get weighted most-connection node in the destination set */
216 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
217 {
218 register struct ip_vs_dest_set_elem *e;
219 struct ip_vs_dest *dest, *most;
220 int moh, doh;
221
222 if (set == NULL)
223 return NULL;
224
225 /* select the first destination server, whose weight > 0 */
226 list_for_each_entry(e, &set->list, list) {
227 most = e->dest;
228 if (atomic_read(&most->weight) > 0) {
229 moh = ip_vs_dest_conn_overhead(most);
230 goto nextstage;
231 }
232 }
233 return NULL;
234
235 /* find the destination with the weighted most load */
236 nextstage:
237 list_for_each_entry(e, &set->list, list) {
238 dest = e->dest;
239 doh = ip_vs_dest_conn_overhead(dest);
240 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
241 if ((moh * atomic_read(&dest->weight) <
242 doh * atomic_read(&most->weight))
243 && (atomic_read(&dest->weight) > 0)) {
244 most = dest;
245 moh = doh;
246 }
247 }
248
249 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
250 "activeconns %d refcnt %d weight %d overhead %d\n",
251 __func__,
252 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
253 atomic_read(&most->activeconns),
254 atomic_read(&most->refcnt),
255 atomic_read(&most->weight), moh);
256 return most;
257 }
258
259
260 /*
261 * IPVS lblcr entry represents an association between destination
262 * IP address and its destination server set
263 */
264 struct ip_vs_lblcr_entry {
265 struct list_head list;
266 int af; /* address family */
267 union nf_inet_addr addr; /* destination IP address */
268 struct ip_vs_dest_set set; /* destination server set */
269 unsigned long lastuse; /* last used time */
270 };
271
272
273 /*
274 * IPVS lblcr hash table
275 */
276 struct ip_vs_lblcr_table {
277 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
278 atomic_t entries; /* number of entries */
279 int max_size; /* maximum size of entries */
280 struct timer_list periodic_timer; /* collect stale entries */
281 int rover; /* rover for expire check */
282 int counter; /* counter for no expire */
283 };
284
285
286 #ifdef CONFIG_SYSCTL
287 /*
288 * IPVS LBLCR sysctl table
289 */
290
291 static ctl_table vs_vars_table[] = {
292 {
293 .procname = "lblcr_expiration",
294 .data = NULL,
295 .maxlen = sizeof(int),
296 .mode = 0644,
297 .proc_handler = proc_dointvec_jiffies,
298 },
299 { }
300 };
301 #endif
302
303 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
304 {
305 list_del(&en->list);
306 ip_vs_dest_set_eraseall(&en->set);
307 kfree(en);
308 }
309
310
311 /*
312 * Returns hash value for IPVS LBLCR entry
313 */
314 static inline unsigned int
315 ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
316 {
317 __be32 addr_fold = addr->ip;
318
319 #ifdef CONFIG_IP_VS_IPV6
320 if (af == AF_INET6)
321 addr_fold = addr->ip6[0]^addr->ip6[1]^
322 addr->ip6[2]^addr->ip6[3];
323 #endif
324 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
325 }
326
327
328 /*
329 * Hash an entry in the ip_vs_lblcr_table.
330 * returns bool success.
331 */
332 static void
333 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
334 {
335 unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
336
337 list_add(&en->list, &tbl->bucket[hash]);
338 atomic_inc(&tbl->entries);
339 }
340
341
342 /*
343 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
344 * read lock.
345 */
346 static inline struct ip_vs_lblcr_entry *
347 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
348 const union nf_inet_addr *addr)
349 {
350 unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
351 struct ip_vs_lblcr_entry *en;
352
353 list_for_each_entry(en, &tbl->bucket[hash], list)
354 if (ip_vs_addr_equal(af, &en->addr, addr))
355 return en;
356
357 return NULL;
358 }
359
360
361 /*
362 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
363 * IP address to a server. Called under write lock.
364 */
365 static inline struct ip_vs_lblcr_entry *
366 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
367 struct ip_vs_dest *dest)
368 {
369 struct ip_vs_lblcr_entry *en;
370
371 en = ip_vs_lblcr_get(dest->af, tbl, daddr);
372 if (!en) {
373 en = kmalloc(sizeof(*en), GFP_ATOMIC);
374 if (!en)
375 return NULL;
376
377 en->af = dest->af;
378 ip_vs_addr_copy(dest->af, &en->addr, daddr);
379 en->lastuse = jiffies;
380
381 /* initialize its dest set */
382 atomic_set(&(en->set.size), 0);
383 INIT_LIST_HEAD(&en->set.list);
384 rwlock_init(&en->set.lock);
385
386 ip_vs_lblcr_hash(tbl, en);
387 }
388
389 write_lock(&en->set.lock);
390 ip_vs_dest_set_insert(&en->set, dest);
391 write_unlock(&en->set.lock);
392
393 return en;
394 }
395
396
397 /*
398 * Flush all the entries of the specified table.
399 */
400 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
401 {
402 int i;
403 struct ip_vs_lblcr_entry *en, *nxt;
404
405 /* No locking required, only called during cleanup. */
406 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
407 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
408 ip_vs_lblcr_free(en);
409 }
410 }
411 }
412
413 static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
414 {
415 #ifdef CONFIG_SYSCTL
416 struct netns_ipvs *ipvs = net_ipvs(svc->net);
417 return ipvs->sysctl_lblcr_expiration;
418 #else
419 return DEFAULT_EXPIRATION;
420 #endif
421 }
422
423 static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
424 {
425 struct ip_vs_lblcr_table *tbl = svc->sched_data;
426 unsigned long now = jiffies;
427 int i, j;
428 struct ip_vs_lblcr_entry *en, *nxt;
429
430 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
431 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
432
433 write_lock(&svc->sched_lock);
434 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
435 if (time_after(en->lastuse +
436 sysctl_lblcr_expiration(svc), now))
437 continue;
438
439 ip_vs_lblcr_free(en);
440 atomic_dec(&tbl->entries);
441 }
442 write_unlock(&svc->sched_lock);
443 }
444 tbl->rover = j;
445 }
446
447
448 /*
449 * Periodical timer handler for IPVS lblcr table
450 * It is used to collect stale entries when the number of entries
451 * exceeds the maximum size of the table.
452 *
453 * Fixme: we probably need more complicated algorithm to collect
454 * entries that have not been used for a long time even
455 * if the number of entries doesn't exceed the maximum size
456 * of the table.
457 * The full expiration check is for this purpose now.
458 */
459 static void ip_vs_lblcr_check_expire(unsigned long data)
460 {
461 struct ip_vs_service *svc = (struct ip_vs_service *) data;
462 struct ip_vs_lblcr_table *tbl = svc->sched_data;
463 unsigned long now = jiffies;
464 int goal;
465 int i, j;
466 struct ip_vs_lblcr_entry *en, *nxt;
467
468 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
469 /* do full expiration check */
470 ip_vs_lblcr_full_check(svc);
471 tbl->counter = 1;
472 goto out;
473 }
474
475 if (atomic_read(&tbl->entries) <= tbl->max_size) {
476 tbl->counter++;
477 goto out;
478 }
479
480 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
481 if (goal > tbl->max_size/2)
482 goal = tbl->max_size/2;
483
484 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
485 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
486
487 write_lock(&svc->sched_lock);
488 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
489 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
490 continue;
491
492 ip_vs_lblcr_free(en);
493 atomic_dec(&tbl->entries);
494 goal--;
495 }
496 write_unlock(&svc->sched_lock);
497 if (goal <= 0)
498 break;
499 }
500 tbl->rover = j;
501
502 out:
503 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
504 }
505
506 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
507 {
508 int i;
509 struct ip_vs_lblcr_table *tbl;
510
511 /*
512 * Allocate the ip_vs_lblcr_table for this service
513 */
514 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
515 if (tbl == NULL)
516 return -ENOMEM;
517
518 svc->sched_data = tbl;
519 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
520 "current service\n", sizeof(*tbl));
521
522 /*
523 * Initialize the hash buckets
524 */
525 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
526 INIT_LIST_HEAD(&tbl->bucket[i]);
527 }
528 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
529 tbl->rover = 0;
530 tbl->counter = 1;
531
532 /*
533 * Hook periodic timer for garbage collection
534 */
535 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
536 (unsigned long)svc);
537 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
538
539 return 0;
540 }
541
542
543 static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
544 {
545 struct ip_vs_lblcr_table *tbl = svc->sched_data;
546
547 /* remove periodic timer */
548 del_timer_sync(&tbl->periodic_timer);
549
550 /* got to clean up table entries here */
551 ip_vs_lblcr_flush(tbl);
552
553 /* release the table itself */
554 kfree(tbl);
555 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
556 sizeof(*tbl));
557
558 return 0;
559 }
560
561
562 static inline struct ip_vs_dest *
563 __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
564 {
565 struct ip_vs_dest *dest, *least;
566 int loh, doh;
567
568 /*
569 * We use the following formula to estimate the load:
570 * (dest overhead) / dest->weight
571 *
572 * Remember -- no floats in kernel mode!!!
573 * The comparison of h1*w2 > h2*w1 is equivalent to that of
574 * h1/w1 > h2/w2
575 * if every weight is larger than zero.
576 *
577 * The server with weight=0 is quiesced and will not receive any
578 * new connection.
579 */
580 list_for_each_entry(dest, &svc->destinations, n_list) {
581 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
582 continue;
583
584 if (atomic_read(&dest->weight) > 0) {
585 least = dest;
586 loh = ip_vs_dest_conn_overhead(least);
587 goto nextstage;
588 }
589 }
590 return NULL;
591
592 /*
593 * Find the destination with the least load.
594 */
595 nextstage:
596 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
597 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
598 continue;
599
600 doh = ip_vs_dest_conn_overhead(dest);
601 if (loh * atomic_read(&dest->weight) >
602 doh * atomic_read(&least->weight)) {
603 least = dest;
604 loh = doh;
605 }
606 }
607
608 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
609 "activeconns %d refcnt %d weight %d overhead %d\n",
610 IP_VS_DBG_ADDR(least->af, &least->addr),
611 ntohs(least->port),
612 atomic_read(&least->activeconns),
613 atomic_read(&least->refcnt),
614 atomic_read(&least->weight), loh);
615
616 return least;
617 }
618
619
620 /*
621 * If this destination server is overloaded and there is a less loaded
622 * server, then return true.
623 */
624 static inline int
625 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
626 {
627 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
628 struct ip_vs_dest *d;
629
630 list_for_each_entry(d, &svc->destinations, n_list) {
631 if (atomic_read(&d->activeconns)*2
632 < atomic_read(&d->weight)) {
633 return 1;
634 }
635 }
636 }
637 return 0;
638 }
639
640
641 /*
642 * Locality-Based (weighted) Least-Connection scheduling
643 */
644 static struct ip_vs_dest *
645 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
646 {
647 struct ip_vs_lblcr_table *tbl = svc->sched_data;
648 struct ip_vs_iphdr iph;
649 struct ip_vs_dest *dest = NULL;
650 struct ip_vs_lblcr_entry *en;
651
652 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
653
654 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
655
656 /* First look in our cache */
657 read_lock(&svc->sched_lock);
658 en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
659 if (en) {
660 /* We only hold a read lock, but this is atomic */
661 en->lastuse = jiffies;
662
663 /* Get the least loaded destination */
664 read_lock(&en->set.lock);
665 dest = ip_vs_dest_set_min(&en->set);
666 read_unlock(&en->set.lock);
667
668 /* More than one destination + enough time passed by, cleanup */
669 if (atomic_read(&en->set.size) > 1 &&
670 time_after(jiffies, en->set.lastmod +
671 sysctl_lblcr_expiration(svc))) {
672 struct ip_vs_dest *m;
673
674 write_lock(&en->set.lock);
675 m = ip_vs_dest_set_max(&en->set);
676 if (m)
677 ip_vs_dest_set_erase(&en->set, m);
678 write_unlock(&en->set.lock);
679 }
680
681 /* If the destination is not overloaded, use it */
682 if (dest && !is_overloaded(dest, svc)) {
683 read_unlock(&svc->sched_lock);
684 goto out;
685 }
686
687 /* The cache entry is invalid, time to schedule */
688 dest = __ip_vs_lblcr_schedule(svc);
689 if (!dest) {
690 ip_vs_scheduler_err(svc, "no destination available");
691 read_unlock(&svc->sched_lock);
692 return NULL;
693 }
694
695 /* Update our cache entry */
696 write_lock(&en->set.lock);
697 ip_vs_dest_set_insert(&en->set, dest);
698 write_unlock(&en->set.lock);
699 }
700 read_unlock(&svc->sched_lock);
701
702 if (dest)
703 goto out;
704
705 /* No cache entry, time to schedule */
706 dest = __ip_vs_lblcr_schedule(svc);
707 if (!dest) {
708 IP_VS_DBG(1, "no destination available\n");
709 return NULL;
710 }
711
712 /* If we fail to create a cache entry, we'll just use the valid dest */
713 write_lock(&svc->sched_lock);
714 ip_vs_lblcr_new(tbl, &iph.daddr, dest);
715 write_unlock(&svc->sched_lock);
716
717 out:
718 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
719 IP_VS_DBG_ADDR(svc->af, &iph.daddr),
720 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
721
722 return dest;
723 }
724
725
726 /*
727 * IPVS LBLCR Scheduler structure
728 */
729 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
730 {
731 .name = "lblcr",
732 .refcnt = ATOMIC_INIT(0),
733 .module = THIS_MODULE,
734 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
735 .init_service = ip_vs_lblcr_init_svc,
736 .done_service = ip_vs_lblcr_done_svc,
737 .schedule = ip_vs_lblcr_schedule,
738 };
739
740 /*
741 * per netns init.
742 */
743 #ifdef CONFIG_SYSCTL
744 static int __net_init __ip_vs_lblcr_init(struct net *net)
745 {
746 struct netns_ipvs *ipvs = net_ipvs(net);
747
748 if (!ipvs)
749 return -ENOENT;
750
751 if (!net_eq(net, &init_net)) {
752 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
753 sizeof(vs_vars_table),
754 GFP_KERNEL);
755 if (ipvs->lblcr_ctl_table == NULL)
756 return -ENOMEM;
757
758 /* Don't export sysctls to unprivileged users */
759 if (net->user_ns != &init_user_ns)
760 ipvs->lblcr_ctl_table[0].procname = NULL;
761 } else
762 ipvs->lblcr_ctl_table = vs_vars_table;
763 ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
764 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
765
766 ipvs->lblcr_ctl_header =
767 register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table);
768 if (!ipvs->lblcr_ctl_header) {
769 if (!net_eq(net, &init_net))
770 kfree(ipvs->lblcr_ctl_table);
771 return -ENOMEM;
772 }
773
774 return 0;
775 }
776
777 static void __net_exit __ip_vs_lblcr_exit(struct net *net)
778 {
779 struct netns_ipvs *ipvs = net_ipvs(net);
780
781 unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
782
783 if (!net_eq(net, &init_net))
784 kfree(ipvs->lblcr_ctl_table);
785 }
786
787 #else
788
789 static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
790 static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
791
792 #endif
793
794 static struct pernet_operations ip_vs_lblcr_ops = {
795 .init = __ip_vs_lblcr_init,
796 .exit = __ip_vs_lblcr_exit,
797 };
798
799 static int __init ip_vs_lblcr_init(void)
800 {
801 int ret;
802
803 ret = register_pernet_subsys(&ip_vs_lblcr_ops);
804 if (ret)
805 return ret;
806
807 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
808 if (ret)
809 unregister_pernet_subsys(&ip_vs_lblcr_ops);
810 return ret;
811 }
812
813 static void __exit ip_vs_lblcr_cleanup(void)
814 {
815 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
816 unregister_pernet_subsys(&ip_vs_lblcr_ops);
817 }
818
819
820 module_init(ip_vs_lblcr_init);
821 module_exit(ip_vs_lblcr_cleanup);
822 MODULE_LICENSE("GPL");