]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/netfilter/ipvs/ip_vs_lblc.c
ipvs: do not disable bh for long time
[mirror_ubuntu-zesty-kernel.git] / net / netfilter / ipvs / ip_vs_lblc.c
CommitLineData
1da177e4
LT
1/*
2 * IPVS: Locality-Based Least-Connection scheduling module
3 *
1da177e4
LT
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Changes:
12 * Martin Hamilton : fixed the terrible locking bugs
13 * *lock(tbl->lock) ==> *lock(&tbl->lock)
421f91d2 14 * Wensong Zhang : fixed the uninitialized tbl->lock bug
1da177e4
LT
15 * Wensong Zhang : added doing full expiration check to
16 * collect stale entries of 24+ hours when
17 * no partial expire check in a half hour
18 * Julian Anastasov : replaced del_timer call with del_timer_sync
19 * to avoid the possible race between timer
20 * handler and del_timer thread in SMP
21 *
22 */
23
24/*
25 * The lblc algorithm is as follows (pseudo code):
26 *
27 * if cachenode[dest_ip] is null then
28 * n, cachenode[dest_ip] <- {weighted least-conn node};
29 * else
30 * n <- cachenode[dest_ip];
31 * if (n is dead) OR
32 * (n.conns>n.weight AND
33 * there is a node m with m.conns<m.weight/2) then
34 * n, cachenode[dest_ip] <- {weighted least-conn node};
35 *
36 * return n;
37 *
38 * Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
39 * me to write this module.
40 */
41
9aada7ac
HE
42#define KMSG_COMPONENT "IPVS"
43#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44
14c85021 45#include <linux/ip.h>
5a0e3ad6 46#include <linux/slab.h>
1da177e4
LT
47#include <linux/module.h>
48#include <linux/kernel.h>
14c85021 49#include <linux/skbuff.h>
d7fe0f24 50#include <linux/jiffies.h>
1da177e4
LT
51
52/* for sysctl */
53#include <linux/fs.h>
54#include <linux/sysctl.h>
55
56#include <net/ip_vs.h>
57
58
59/*
60 * It is for garbage collection of stale IPVS lblc entries,
61 * when the table is full.
62 */
63#define CHECK_EXPIRE_INTERVAL (60*HZ)
64#define ENTRY_TIMEOUT (6*60*HZ)
65
b27d777e
SH
66#define DEFAULT_EXPIRATION (24*60*60*HZ)
67
1da177e4
LT
68/*
69 * It is for full expiration check.
70 * When there is no partial expiration check (garbage collection)
71 * in a half hour, do a full expiration check to collect stale
72 * entries that haven't been touched for a day.
73 */
74#define COUNT_FOR_FULL_EXPIRATION 30
1da177e4
LT
75
76
77/*
78 * for IPVS lblc entry hash table
79 */
80#ifndef CONFIG_IP_VS_LBLC_TAB_BITS
81#define CONFIG_IP_VS_LBLC_TAB_BITS 10
82#endif
83#define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
84#define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
85#define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
86
87
88/*
89 * IPVS lblc entry represents an association between destination
90 * IP address and its destination server
91 */
92struct ip_vs_lblc_entry {
c2a4ffb7 93 struct hlist_node list;
44548375
JV
94 int af; /* address family */
95 union nf_inet_addr addr; /* destination IP address */
c2a4ffb7 96 struct ip_vs_dest __rcu *dest; /* real server (cache) */
1da177e4 97 unsigned long lastuse; /* last used time */
c2a4ffb7 98 struct rcu_head rcu_head;
1da177e4
LT
99};
100
101
102/*
103 * IPVS lblc hash table
104 */
105struct ip_vs_lblc_table {
c2a4ffb7
JA
106 struct rcu_head rcu_head;
107 struct hlist_head __rcu bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
108 struct timer_list periodic_timer; /* collect stale entries */
1da177e4
LT
109 atomic_t entries; /* number of entries */
110 int max_size; /* maximum size of entries */
1da177e4
LT
111 int rover; /* rover for expire check */
112 int counter; /* counter for no expire */
c2a4ffb7 113 bool dead;
1da177e4
LT
114};
115
116
117/*
118 * IPVS LBLC sysctl table
119 */
fb1de432 120#ifdef CONFIG_SYSCTL
1da177e4
LT
121static ctl_table vs_vars_table[] = {
122 {
1da177e4 123 .procname = "lblc_expiration",
b6e885dd 124 .data = NULL,
1da177e4 125 .maxlen = sizeof(int),
e905a9ed 126 .mode = 0644,
6d9f239a 127 .proc_handler = proc_dointvec_jiffies,
1da177e4 128 },
f8572d8f 129 { }
1da177e4 130};
fb1de432 131#endif
1da177e4 132
1da177e4
LT
133static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
134{
c2a4ffb7
JA
135 struct ip_vs_dest *dest;
136
137 hlist_del_rcu(&en->list);
1da177e4 138 /*
25985edc 139 * We don't kfree dest because it is referred either by its service
1da177e4
LT
140 * or the trash dest list.
141 */
c2a4ffb7
JA
142 dest = rcu_dereference_protected(en->dest, 1);
143 ip_vs_dest_put(dest);
144 kfree_rcu(en, rcu_head);
1da177e4
LT
145}
146
147
148/*
149 * Returns hash value for IPVS LBLC entry
150 */
95c96174 151static inline unsigned int
44548375 152ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
1da177e4 153{
44548375
JV
154 __be32 addr_fold = addr->ip;
155
156#ifdef CONFIG_IP_VS_IPV6
157 if (af == AF_INET6)
158 addr_fold = addr->ip6[0]^addr->ip6[1]^
159 addr->ip6[2]^addr->ip6[3];
160#endif
161 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
1da177e4
LT
162}
163
164
165/*
166 * Hash an entry in the ip_vs_lblc_table.
167 * returns bool success.
168 */
39ac50d0 169static void
1da177e4
LT
170ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
171{
95c96174 172 unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
1da177e4 173
c2a4ffb7 174 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
1da177e4 175 atomic_inc(&tbl->entries);
1da177e4
LT
176}
177
178
c2a4ffb7 179/* Get ip_vs_lblc_entry associated with supplied parameters. */
1da177e4 180static inline struct ip_vs_lblc_entry *
44548375
JV
181ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
182 const union nf_inet_addr *addr)
1da177e4 183{
95c96174 184 unsigned int hash = ip_vs_lblc_hashkey(af, addr);
1da177e4
LT
185 struct ip_vs_lblc_entry *en;
186
c2a4ffb7 187 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
44548375 188 if (ip_vs_addr_equal(af, &en->addr, addr))
39ac50d0 189 return en;
1da177e4 190
39ac50d0
SW
191 return NULL;
192}
1da177e4 193
39ac50d0
SW
194
195/*
196 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
ba3a3ce1 197 * address to a server. Called under spin lock.
39ac50d0
SW
198 */
199static inline struct ip_vs_lblc_entry *
44548375 200ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
39ac50d0
SW
201 struct ip_vs_dest *dest)
202{
203 struct ip_vs_lblc_entry *en;
204
44548375 205 en = ip_vs_lblc_get(dest->af, tbl, daddr);
39ac50d0
SW
206 if (!en) {
207 en = kmalloc(sizeof(*en), GFP_ATOMIC);
0a9ee813 208 if (!en)
39ac50d0 209 return NULL;
1da177e4 210
44548375
JV
211 en->af = dest->af;
212 ip_vs_addr_copy(dest->af, &en->addr, daddr);
39ac50d0 213 en->lastuse = jiffies;
1da177e4 214
c2a4ffb7
JA
215 ip_vs_dest_hold(dest);
216 RCU_INIT_POINTER(en->dest, dest);
39ac50d0
SW
217
218 ip_vs_lblc_hash(tbl, en);
c2a4ffb7
JA
219 } else {
220 struct ip_vs_dest *old_dest;
221
222 old_dest = rcu_dereference_protected(en->dest, 1);
223 if (old_dest != dest) {
224 ip_vs_dest_put(old_dest);
225 ip_vs_dest_hold(dest);
226 /* No ordering constraints for refcnt */
227 RCU_INIT_POINTER(en->dest, dest);
228 }
39ac50d0
SW
229 }
230
231 return en;
1da177e4
LT
232}
233
234
235/*
236 * Flush all the entries of the specified table.
237 */
c2a4ffb7 238static void ip_vs_lblc_flush(struct ip_vs_service *svc)
1da177e4 239{
c2a4ffb7
JA
240 struct ip_vs_lblc_table *tbl = svc->sched_data;
241 struct ip_vs_lblc_entry *en;
242 struct hlist_node *next;
39ac50d0 243 int i;
1da177e4 244
ba3a3ce1 245 spin_lock_bh(&svc->sched_lock);
c2a4ffb7 246 tbl->dead = 1;
1da177e4 247 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
c2a4ffb7 248 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
1da177e4
LT
249 ip_vs_lblc_free(en);
250 atomic_dec(&tbl->entries);
251 }
1da177e4 252 }
ba3a3ce1 253 spin_unlock_bh(&svc->sched_lock);
1da177e4
LT
254}
255
b27d777e
SH
256static int sysctl_lblc_expiration(struct ip_vs_service *svc)
257{
258#ifdef CONFIG_SYSCTL
259 struct netns_ipvs *ipvs = net_ipvs(svc->net);
260 return ipvs->sysctl_lblc_expiration;
261#else
262 return DEFAULT_EXPIRATION;
263#endif
264}
1da177e4 265
39ac50d0 266static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
1da177e4 267{
39ac50d0 268 struct ip_vs_lblc_table *tbl = svc->sched_data;
c2a4ffb7
JA
269 struct ip_vs_lblc_entry *en;
270 struct hlist_node *next;
1da177e4
LT
271 unsigned long now = jiffies;
272 int i, j;
1da177e4
LT
273
274 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
275 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
276
ba3a3ce1 277 spin_lock(&svc->sched_lock);
c2a4ffb7 278 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
e905a9ed 279 if (time_before(now,
b6e885dd 280 en->lastuse +
b27d777e 281 sysctl_lblc_expiration(svc)))
1da177e4
LT
282 continue;
283
284 ip_vs_lblc_free(en);
285 atomic_dec(&tbl->entries);
286 }
ba3a3ce1 287 spin_unlock(&svc->sched_lock);
1da177e4
LT
288 }
289 tbl->rover = j;
290}
291
292
293/*
294 * Periodical timer handler for IPVS lblc table
295 * It is used to collect stale entries when the number of entries
296 * exceeds the maximum size of the table.
297 *
298 * Fixme: we probably need more complicated algorithm to collect
299 * entries that have not been used for a long time even
300 * if the number of entries doesn't exceed the maximum size
301 * of the table.
302 * The full expiration check is for this purpose now.
303 */
304static void ip_vs_lblc_check_expire(unsigned long data)
305{
39ac50d0
SW
306 struct ip_vs_service *svc = (struct ip_vs_service *) data;
307 struct ip_vs_lblc_table *tbl = svc->sched_data;
1da177e4
LT
308 unsigned long now = jiffies;
309 int goal;
310 int i, j;
c2a4ffb7
JA
311 struct ip_vs_lblc_entry *en;
312 struct hlist_node *next;
1da177e4 313
1da177e4
LT
314 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
315 /* do full expiration check */
39ac50d0 316 ip_vs_lblc_full_check(svc);
1da177e4
LT
317 tbl->counter = 1;
318 goto out;
319 }
320
321 if (atomic_read(&tbl->entries) <= tbl->max_size) {
322 tbl->counter++;
323 goto out;
324 }
325
326 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
327 if (goal > tbl->max_size/2)
328 goal = tbl->max_size/2;
329
330 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
331 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
332
ba3a3ce1 333 spin_lock(&svc->sched_lock);
c2a4ffb7 334 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
1da177e4
LT
335 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
336 continue;
337
338 ip_vs_lblc_free(en);
339 atomic_dec(&tbl->entries);
340 goal--;
341 }
ba3a3ce1 342 spin_unlock(&svc->sched_lock);
1da177e4
LT
343 if (goal <= 0)
344 break;
345 }
346 tbl->rover = j;
347
348 out:
349 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
350}
351
352
353static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
354{
355 int i;
356 struct ip_vs_lblc_table *tbl;
357
358 /*
359 * Allocate the ip_vs_lblc_table for this service
360 */
748d845c 361 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
0a9ee813 362 if (tbl == NULL)
1da177e4 363 return -ENOMEM;
0a9ee813 364
1da177e4
LT
365 svc->sched_data = tbl;
366 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
39ac50d0 367 "current service\n", sizeof(*tbl));
1da177e4
LT
368
369 /*
370 * Initialize the hash buckets
371 */
372 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
c2a4ffb7 373 INIT_HLIST_HEAD(&tbl->bucket[i]);
1da177e4 374 }
1da177e4
LT
375 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
376 tbl->rover = 0;
377 tbl->counter = 1;
c2a4ffb7 378 tbl->dead = 0;
1da177e4
LT
379
380 /*
381 * Hook periodic timer for garbage collection
382 */
b24b8a24 383 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
39ac50d0
SW
384 (unsigned long)svc);
385 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
1da177e4
LT
386
387 return 0;
388}
389
390
ed3ffc4e 391static void ip_vs_lblc_done_svc(struct ip_vs_service *svc)
1da177e4
LT
392{
393 struct ip_vs_lblc_table *tbl = svc->sched_data;
394
395 /* remove periodic timer */
396 del_timer_sync(&tbl->periodic_timer);
397
398 /* got to clean up table entries here */
c2a4ffb7 399 ip_vs_lblc_flush(svc);
1da177e4
LT
400
401 /* release the table itself */
c2a4ffb7 402 kfree_rcu(tbl, rcu_head);
1da177e4 403 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
39ac50d0 404 sizeof(*tbl));
1da177e4
LT
405}
406
407
1da177e4 408static inline struct ip_vs_dest *
44548375 409__ip_vs_lblc_schedule(struct ip_vs_service *svc)
1da177e4
LT
410{
411 struct ip_vs_dest *dest, *least;
412 int loh, doh;
413
414 /*
b552f7e3 415 * We use the following formula to estimate the load:
1da177e4
LT
416 * (dest overhead) / dest->weight
417 *
418 * Remember -- no floats in kernel mode!!!
419 * The comparison of h1*w2 > h2*w1 is equivalent to that of
420 * h1/w1 > h2/w2
421 * if every weight is larger than zero.
422 *
423 * The server with weight=0 is quiesced and will not receive any
424 * new connection.
425 */
c2a4ffb7 426 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
1da177e4
LT
427 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
428 continue;
429 if (atomic_read(&dest->weight) > 0) {
430 least = dest;
b552f7e3 431 loh = ip_vs_dest_conn_overhead(least);
1da177e4
LT
432 goto nextstage;
433 }
434 }
435 return NULL;
436
437 /*
438 * Find the destination with the least load.
439 */
440 nextstage:
c2a4ffb7 441 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
1da177e4
LT
442 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
443 continue;
444
b552f7e3 445 doh = ip_vs_dest_conn_overhead(dest);
1da177e4
LT
446 if (loh * atomic_read(&dest->weight) >
447 doh * atomic_read(&least->weight)) {
448 least = dest;
449 loh = doh;
450 }
451 }
452
44548375
JV
453 IP_VS_DBG_BUF(6, "LBLC: server %s:%d "
454 "activeconns %d refcnt %d weight %d overhead %d\n",
455 IP_VS_DBG_ADDR(least->af, &least->addr),
456 ntohs(least->port),
457 atomic_read(&least->activeconns),
458 atomic_read(&least->refcnt),
459 atomic_read(&least->weight), loh);
1da177e4
LT
460
461 return least;
462}
463
464
465/*
466 * If this destination server is overloaded and there is a less loaded
467 * server, then return true.
468 */
469static inline int
470is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
471{
472 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
473 struct ip_vs_dest *d;
474
c2a4ffb7 475 list_for_each_entry_rcu(d, &svc->destinations, n_list) {
1da177e4
LT
476 if (atomic_read(&d->activeconns)*2
477 < atomic_read(&d->weight)) {
478 return 1;
479 }
480 }
481 }
482 return 0;
483}
484
485
486/*
487 * Locality-Based (weighted) Least-Connection scheduling
488 */
489static struct ip_vs_dest *
490ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
491{
39ac50d0 492 struct ip_vs_lblc_table *tbl = svc->sched_data;
44548375 493 struct ip_vs_iphdr iph;
39ac50d0
SW
494 struct ip_vs_dest *dest = NULL;
495 struct ip_vs_lblc_entry *en;
1da177e4 496
63dca2c0 497 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
44548375 498
1e3e238e 499 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
1da177e4 500
39ac50d0 501 /* First look in our cache */
44548375 502 en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr);
39ac50d0
SW
503 if (en) {
504 /* We only hold a read lock, but this is atomic */
505 en->lastuse = jiffies;
506
507 /*
508 * If the destination is not available, i.e. it's in the trash,
509 * we must ignore it, as it may be removed from under our feet,
510 * if someone drops our reference count. Our caller only makes
511 * sure that destinations, that are not in the trash, are not
512 * moved to the trash, while we are scheduling. But anyone can
513 * free up entries from the trash at any time.
514 */
515
c2a4ffb7
JA
516 dest = rcu_dereference(en->dest);
517 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
518 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
519 goto out;
39ac50d0 520 }
39ac50d0
SW
521
522 /* No cache entry or it is invalid, time to schedule */
44548375 523 dest = __ip_vs_lblc_schedule(svc);
39ac50d0 524 if (!dest) {
41ac51ee 525 ip_vs_scheduler_err(svc, "no destination available");
39ac50d0 526 return NULL;
1da177e4 527 }
1da177e4 528
39ac50d0 529 /* If we fail to create a cache entry, we'll just use the valid dest */
ac69269a 530 spin_lock_bh(&svc->sched_lock);
c2a4ffb7
JA
531 if (!tbl->dead)
532 ip_vs_lblc_new(tbl, &iph.daddr, dest);
ac69269a 533 spin_unlock_bh(&svc->sched_lock);
39ac50d0
SW
534
535out:
44548375
JV
536 IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
537 IP_VS_DBG_ADDR(svc->af, &iph.daddr),
538 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
1da177e4
LT
539
540 return dest;
541}
542
543
544/*
545 * IPVS LBLC Scheduler structure
546 */
547static struct ip_vs_scheduler ip_vs_lblc_scheduler =
548{
549 .name = "lblc",
550 .refcnt = ATOMIC_INIT(0),
551 .module = THIS_MODULE,
d149ccc9 552 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
1da177e4
LT
553 .init_service = ip_vs_lblc_init_svc,
554 .done_service = ip_vs_lblc_done_svc,
1da177e4
LT
555 .schedule = ip_vs_lblc_schedule,
556};
557
61b1ab45
HS
558/*
559 * per netns init.
560 */
fb1de432 561#ifdef CONFIG_SYSCTL
61b1ab45
HS
562static int __net_init __ip_vs_lblc_init(struct net *net)
563{
b6e885dd
HS
564 struct netns_ipvs *ipvs = net_ipvs(net);
565
4b984cd5
HS
566 if (!ipvs)
567 return -ENOENT;
568
b6e885dd
HS
569 if (!net_eq(net, &init_net)) {
570 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
571 sizeof(vs_vars_table),
572 GFP_KERNEL);
573 if (ipvs->lblc_ctl_table == NULL)
0443929f 574 return -ENOMEM;
464dc801
EB
575
576 /* Don't export sysctls to unprivileged users */
577 if (net->user_ns != &init_user_ns)
578 ipvs->lblc_ctl_table[0].procname = NULL;
579
b6e885dd
HS
580 } else
581 ipvs->lblc_ctl_table = vs_vars_table;
b27d777e 582 ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
b6e885dd
HS
583 ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
584
585 ipvs->lblc_ctl_header =
ec8f23ce 586 register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table);
0443929f
SH
587 if (!ipvs->lblc_ctl_header) {
588 if (!net_eq(net, &init_net))
e5ef39ed 589 kfree(ipvs->lblc_ctl_table);
0443929f
SH
590 return -ENOMEM;
591 }
61b1ab45
HS
592
593 return 0;
594}
595
596static void __net_exit __ip_vs_lblc_exit(struct net *net)
597{
b6e885dd
HS
598 struct netns_ipvs *ipvs = net_ipvs(net);
599
600 unregister_net_sysctl_table(ipvs->lblc_ctl_header);
61b1ab45 601
b6e885dd
HS
602 if (!net_eq(net, &init_net))
603 kfree(ipvs->lblc_ctl_table);
61b1ab45
HS
604}
605
fb1de432
SH
606#else
607
608static int __net_init __ip_vs_lblc_init(struct net *net) { return 0; }
609static void __net_exit __ip_vs_lblc_exit(struct net *net) { }
610
611#endif
612
61b1ab45
HS
613static struct pernet_operations ip_vs_lblc_ops = {
614 .init = __ip_vs_lblc_init,
615 .exit = __ip_vs_lblc_exit,
616};
1da177e4
LT
617
618static int __init ip_vs_lblc_init(void)
619{
a014bc8f
PE
620 int ret;
621
61b1ab45
HS
622 ret = register_pernet_subsys(&ip_vs_lblc_ops);
623 if (ret)
624 return ret;
625
a014bc8f
PE
626 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
627 if (ret)
61b1ab45 628 unregister_pernet_subsys(&ip_vs_lblc_ops);
a014bc8f 629 return ret;
1da177e4
LT
630}
631
1da177e4
LT
632static void __exit ip_vs_lblc_cleanup(void)
633{
1da177e4 634 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
61b1ab45 635 unregister_pernet_subsys(&ip_vs_lblc_ops);
ceec4c38 636 synchronize_rcu();
1da177e4
LT
637}
638
639
640module_init(ip_vs_lblc_init);
641module_exit(ip_vs_lblc_cleanup);
642MODULE_LICENSE("GPL");