2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
18 * The lblc/r algorithm is as follows (pseudo code):
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
40 #define KMSG_COMPONENT "IPVS"
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/skbuff.h>
47 #include <linux/jiffies.h>
48 #include <linux/list.h>
49 #include <linux/slab.h>
53 #include <linux/sysctl.h>
54 #include <net/net_namespace.h>
56 #include <net/ip_vs.h>
60 * It is for garbage collection of stale IPVS lblcr entries,
61 * when the table is full.
63 #define CHECK_EXPIRE_INTERVAL (60*HZ)
64 #define ENTRY_TIMEOUT (6*60*HZ)
66 #define DEFAULT_EXPIRATION (24*60*60*HZ)
69 * It is for full expiration check.
70 * When there is no partial expiration check (garbage collection)
71 * in a half hour, do a full expiration check to collect stale
72 * entries that haven't been touched for a day.
74 #define COUNT_FOR_FULL_EXPIRATION 30
77 * for IPVS lblcr entry hash table
79 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
80 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
82 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
83 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
84 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
88 * IPVS destination set structure and operations
90 struct ip_vs_dest_set_elem
{
91 struct list_head list
; /* list link */
92 struct ip_vs_dest __rcu
*dest
; /* destination server */
93 struct rcu_head rcu_head
;
96 struct ip_vs_dest_set
{
97 atomic_t size
; /* set size */
98 unsigned long lastmod
; /* last modified time */
99 struct list_head list
; /* destination list */
103 static void ip_vs_dest_set_insert(struct ip_vs_dest_set
*set
,
104 struct ip_vs_dest
*dest
, bool check
)
106 struct ip_vs_dest_set_elem
*e
;
109 list_for_each_entry(e
, &set
->list
, list
) {
110 struct ip_vs_dest
*d
;
112 d
= rcu_dereference_protected(e
->dest
, 1);
114 /* already existed */
119 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
123 ip_vs_dest_hold(dest
);
124 RCU_INIT_POINTER(e
->dest
, dest
);
126 list_add_rcu(&e
->list
, &set
->list
);
127 atomic_inc(&set
->size
);
129 set
->lastmod
= jiffies
;
133 ip_vs_dest_set_erase(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
135 struct ip_vs_dest_set_elem
*e
;
137 list_for_each_entry(e
, &set
->list
, list
) {
138 struct ip_vs_dest
*d
;
140 d
= rcu_dereference_protected(e
->dest
, 1);
143 atomic_dec(&set
->size
);
144 set
->lastmod
= jiffies
;
145 ip_vs_dest_put(dest
);
146 list_del_rcu(&e
->list
);
147 kfree_rcu(e
, rcu_head
);
153 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set
*set
)
155 struct ip_vs_dest_set_elem
*e
, *ep
;
157 list_for_each_entry_safe(e
, ep
, &set
->list
, list
) {
158 struct ip_vs_dest
*d
;
160 d
= rcu_dereference_protected(e
->dest
, 1);
162 * We don't kfree dest because it is referred either
163 * by its service or by the trash dest list.
166 list_del_rcu(&e
->list
);
167 kfree_rcu(e
, rcu_head
);
171 /* get weighted least-connection node in the destination set */
172 static inline struct ip_vs_dest
*ip_vs_dest_set_min(struct ip_vs_dest_set
*set
)
174 register struct ip_vs_dest_set_elem
*e
;
175 struct ip_vs_dest
*dest
, *least
;
181 /* select the first destination server, whose weight > 0 */
182 list_for_each_entry_rcu(e
, &set
->list
, list
) {
183 least
= rcu_dereference(e
->dest
);
184 if (least
->flags
& IP_VS_DEST_F_OVERLOAD
)
187 if ((atomic_read(&least
->weight
) > 0)
188 && (least
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
189 loh
= ip_vs_dest_conn_overhead(least
);
195 /* find the destination with the weighted least load */
197 list_for_each_entry_continue_rcu(e
, &set
->list
, list
) {
198 dest
= rcu_dereference(e
->dest
);
199 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
202 doh
= ip_vs_dest_conn_overhead(dest
);
203 if ((loh
* atomic_read(&dest
->weight
) >
204 doh
* atomic_read(&least
->weight
))
205 && (dest
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
211 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
212 "activeconns %d refcnt %d weight %d overhead %d\n",
214 IP_VS_DBG_ADDR(least
->af
, &least
->addr
),
216 atomic_read(&least
->activeconns
),
217 atomic_read(&least
->refcnt
),
218 atomic_read(&least
->weight
), loh
);
223 /* get weighted most-connection node in the destination set */
224 static inline struct ip_vs_dest
*ip_vs_dest_set_max(struct ip_vs_dest_set
*set
)
226 register struct ip_vs_dest_set_elem
*e
;
227 struct ip_vs_dest
*dest
, *most
;
233 /* select the first destination server, whose weight > 0 */
234 list_for_each_entry(e
, &set
->list
, list
) {
235 most
= rcu_dereference_protected(e
->dest
, 1);
236 if (atomic_read(&most
->weight
) > 0) {
237 moh
= ip_vs_dest_conn_overhead(most
);
243 /* find the destination with the weighted most load */
245 list_for_each_entry_continue(e
, &set
->list
, list
) {
246 dest
= rcu_dereference_protected(e
->dest
, 1);
247 doh
= ip_vs_dest_conn_overhead(dest
);
248 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
249 if ((moh
* atomic_read(&dest
->weight
) <
250 doh
* atomic_read(&most
->weight
))
251 && (atomic_read(&dest
->weight
) > 0)) {
257 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
258 "activeconns %d refcnt %d weight %d overhead %d\n",
260 IP_VS_DBG_ADDR(most
->af
, &most
->addr
), ntohs(most
->port
),
261 atomic_read(&most
->activeconns
),
262 atomic_read(&most
->refcnt
),
263 atomic_read(&most
->weight
), moh
);
269 * IPVS lblcr entry represents an association between destination
270 * IP address and its destination server set
272 struct ip_vs_lblcr_entry
{
273 struct hlist_node list
;
274 int af
; /* address family */
275 union nf_inet_addr addr
; /* destination IP address */
276 struct ip_vs_dest_set set
; /* destination server set */
277 unsigned long lastuse
; /* last used time */
278 struct rcu_head rcu_head
;
283 * IPVS lblcr hash table
285 struct ip_vs_lblcr_table
{
286 struct rcu_head rcu_head
;
287 struct hlist_head bucket
[IP_VS_LBLCR_TAB_SIZE
]; /* hash bucket */
288 atomic_t entries
; /* number of entries */
289 int max_size
; /* maximum size of entries */
290 struct timer_list periodic_timer
; /* collect stale entries */
291 int rover
; /* rover for expire check */
292 int counter
; /* counter for no expire */
299 * IPVS LBLCR sysctl table
302 static struct ctl_table vs_vars_table
[] = {
304 .procname
= "lblcr_expiration",
306 .maxlen
= sizeof(int),
308 .proc_handler
= proc_dointvec_jiffies
,
314 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry
*en
)
316 hlist_del_rcu(&en
->list
);
317 ip_vs_dest_set_eraseall(&en
->set
);
318 kfree_rcu(en
, rcu_head
);
323 * Returns hash value for IPVS LBLCR entry
325 static inline unsigned int
326 ip_vs_lblcr_hashkey(int af
, const union nf_inet_addr
*addr
)
328 __be32 addr_fold
= addr
->ip
;
330 #ifdef CONFIG_IP_VS_IPV6
332 addr_fold
= addr
->ip6
[0]^addr
->ip6
[1]^
333 addr
->ip6
[2]^addr
->ip6
[3];
335 return (ntohl(addr_fold
)*2654435761UL) & IP_VS_LBLCR_TAB_MASK
;
340 * Hash an entry in the ip_vs_lblcr_table.
341 * returns bool success.
344 ip_vs_lblcr_hash(struct ip_vs_lblcr_table
*tbl
, struct ip_vs_lblcr_entry
*en
)
346 unsigned int hash
= ip_vs_lblcr_hashkey(en
->af
, &en
->addr
);
348 hlist_add_head_rcu(&en
->list
, &tbl
->bucket
[hash
]);
349 atomic_inc(&tbl
->entries
);
353 /* Get ip_vs_lblcr_entry associated with supplied parameters. */
354 static inline struct ip_vs_lblcr_entry
*
355 ip_vs_lblcr_get(int af
, struct ip_vs_lblcr_table
*tbl
,
356 const union nf_inet_addr
*addr
)
358 unsigned int hash
= ip_vs_lblcr_hashkey(af
, addr
);
359 struct ip_vs_lblcr_entry
*en
;
361 hlist_for_each_entry_rcu(en
, &tbl
->bucket
[hash
], list
)
362 if (ip_vs_addr_equal(af
, &en
->addr
, addr
))
370 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
371 * IP address to a server. Called under spin lock.
373 static inline struct ip_vs_lblcr_entry
*
374 ip_vs_lblcr_new(struct ip_vs_lblcr_table
*tbl
, const union nf_inet_addr
*daddr
,
375 struct ip_vs_dest
*dest
)
377 struct ip_vs_lblcr_entry
*en
;
379 en
= ip_vs_lblcr_get(dest
->af
, tbl
, daddr
);
381 en
= kmalloc(sizeof(*en
), GFP_ATOMIC
);
386 ip_vs_addr_copy(dest
->af
, &en
->addr
, daddr
);
387 en
->lastuse
= jiffies
;
389 /* initialize its dest set */
390 atomic_set(&(en
->set
.size
), 0);
391 INIT_LIST_HEAD(&en
->set
.list
);
393 ip_vs_dest_set_insert(&en
->set
, dest
, false);
395 ip_vs_lblcr_hash(tbl
, en
);
399 ip_vs_dest_set_insert(&en
->set
, dest
, true);
406 * Flush all the entries of the specified table.
408 static void ip_vs_lblcr_flush(struct ip_vs_service
*svc
)
410 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
412 struct ip_vs_lblcr_entry
*en
;
413 struct hlist_node
*next
;
415 spin_lock_bh(&svc
->sched_lock
);
417 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
418 hlist_for_each_entry_safe(en
, next
, &tbl
->bucket
[i
], list
) {
419 ip_vs_lblcr_free(en
);
422 spin_unlock_bh(&svc
->sched_lock
);
425 static int sysctl_lblcr_expiration(struct ip_vs_service
*svc
)
428 struct netns_ipvs
*ipvs
= net_ipvs(svc
->net
);
429 return ipvs
->sysctl_lblcr_expiration
;
431 return DEFAULT_EXPIRATION
;
435 static inline void ip_vs_lblcr_full_check(struct ip_vs_service
*svc
)
437 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
438 unsigned long now
= jiffies
;
440 struct ip_vs_lblcr_entry
*en
;
441 struct hlist_node
*next
;
443 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
444 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
446 spin_lock(&svc
->sched_lock
);
447 hlist_for_each_entry_safe(en
, next
, &tbl
->bucket
[j
], list
) {
448 if (time_after(en
->lastuse
+
449 sysctl_lblcr_expiration(svc
), now
))
452 ip_vs_lblcr_free(en
);
453 atomic_dec(&tbl
->entries
);
455 spin_unlock(&svc
->sched_lock
);
462 * Periodical timer handler for IPVS lblcr table
463 * It is used to collect stale entries when the number of entries
464 * exceeds the maximum size of the table.
466 * Fixme: we probably need more complicated algorithm to collect
467 * entries that have not been used for a long time even
468 * if the number of entries doesn't exceed the maximum size
470 * The full expiration check is for this purpose now.
472 static void ip_vs_lblcr_check_expire(unsigned long data
)
474 struct ip_vs_service
*svc
= (struct ip_vs_service
*) data
;
475 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
476 unsigned long now
= jiffies
;
479 struct ip_vs_lblcr_entry
*en
;
480 struct hlist_node
*next
;
482 if ((tbl
->counter
% COUNT_FOR_FULL_EXPIRATION
) == 0) {
483 /* do full expiration check */
484 ip_vs_lblcr_full_check(svc
);
489 if (atomic_read(&tbl
->entries
) <= tbl
->max_size
) {
494 goal
= (atomic_read(&tbl
->entries
) - tbl
->max_size
)*4/3;
495 if (goal
> tbl
->max_size
/2)
496 goal
= tbl
->max_size
/2;
498 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
499 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
501 spin_lock(&svc
->sched_lock
);
502 hlist_for_each_entry_safe(en
, next
, &tbl
->bucket
[j
], list
) {
503 if (time_before(now
, en
->lastuse
+ENTRY_TIMEOUT
))
506 ip_vs_lblcr_free(en
);
507 atomic_dec(&tbl
->entries
);
510 spin_unlock(&svc
->sched_lock
);
517 mod_timer(&tbl
->periodic_timer
, jiffies
+CHECK_EXPIRE_INTERVAL
);
520 static int ip_vs_lblcr_init_svc(struct ip_vs_service
*svc
)
523 struct ip_vs_lblcr_table
*tbl
;
526 * Allocate the ip_vs_lblcr_table for this service
528 tbl
= kmalloc(sizeof(*tbl
), GFP_KERNEL
);
532 svc
->sched_data
= tbl
;
533 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
534 "current service\n", sizeof(*tbl
));
537 * Initialize the hash buckets
539 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
540 INIT_HLIST_HEAD(&tbl
->bucket
[i
]);
542 tbl
->max_size
= IP_VS_LBLCR_TAB_SIZE
*16;
548 * Hook periodic timer for garbage collection
550 setup_timer(&tbl
->periodic_timer
, ip_vs_lblcr_check_expire
,
552 mod_timer(&tbl
->periodic_timer
, jiffies
+ CHECK_EXPIRE_INTERVAL
);
558 static void ip_vs_lblcr_done_svc(struct ip_vs_service
*svc
)
560 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
562 /* remove periodic timer */
563 del_timer_sync(&tbl
->periodic_timer
);
565 /* got to clean up table entries here */
566 ip_vs_lblcr_flush(svc
);
568 /* release the table itself */
569 kfree_rcu(tbl
, rcu_head
);
570 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
575 static inline struct ip_vs_dest
*
576 __ip_vs_lblcr_schedule(struct ip_vs_service
*svc
)
578 struct ip_vs_dest
*dest
, *least
;
582 * We use the following formula to estimate the load:
583 * (dest overhead) / dest->weight
585 * Remember -- no floats in kernel mode!!!
586 * The comparison of h1*w2 > h2*w1 is equivalent to that of
588 * if every weight is larger than zero.
590 * The server with weight=0 is quiesced and will not receive any
593 list_for_each_entry_rcu(dest
, &svc
->destinations
, n_list
) {
594 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
597 if (atomic_read(&dest
->weight
) > 0) {
599 loh
= ip_vs_dest_conn_overhead(least
);
606 * Find the destination with the least load.
609 list_for_each_entry_continue_rcu(dest
, &svc
->destinations
, n_list
) {
610 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
613 doh
= ip_vs_dest_conn_overhead(dest
);
614 if (loh
* atomic_read(&dest
->weight
) >
615 doh
* atomic_read(&least
->weight
)) {
621 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
622 "activeconns %d refcnt %d weight %d overhead %d\n",
623 IP_VS_DBG_ADDR(least
->af
, &least
->addr
),
625 atomic_read(&least
->activeconns
),
626 atomic_read(&least
->refcnt
),
627 atomic_read(&least
->weight
), loh
);
634 * If this destination server is overloaded and there is a less loaded
635 * server, then return true.
638 is_overloaded(struct ip_vs_dest
*dest
, struct ip_vs_service
*svc
)
640 if (atomic_read(&dest
->activeconns
) > atomic_read(&dest
->weight
)) {
641 struct ip_vs_dest
*d
;
643 list_for_each_entry_rcu(d
, &svc
->destinations
, n_list
) {
644 if (atomic_read(&d
->activeconns
)*2
645 < atomic_read(&d
->weight
)) {
655 * Locality-Based (weighted) Least-Connection scheduling
657 static struct ip_vs_dest
*
658 ip_vs_lblcr_schedule(struct ip_vs_service
*svc
, const struct sk_buff
*skb
)
660 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
661 struct ip_vs_iphdr iph
;
662 struct ip_vs_dest
*dest
;
663 struct ip_vs_lblcr_entry
*en
;
665 ip_vs_fill_iph_addr_only(svc
->af
, skb
, &iph
);
667 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__
);
669 /* First look in our cache */
670 en
= ip_vs_lblcr_get(svc
->af
, tbl
, &iph
.daddr
);
672 en
->lastuse
= jiffies
;
674 /* Get the least loaded destination */
675 dest
= ip_vs_dest_set_min(&en
->set
);
677 /* More than one destination + enough time passed by, cleanup */
678 if (atomic_read(&en
->set
.size
) > 1 &&
679 time_after(jiffies
, en
->set
.lastmod
+
680 sysctl_lblcr_expiration(svc
))) {
681 spin_lock_bh(&svc
->sched_lock
);
682 if (atomic_read(&en
->set
.size
) > 1) {
683 struct ip_vs_dest
*m
;
685 m
= ip_vs_dest_set_max(&en
->set
);
687 ip_vs_dest_set_erase(&en
->set
, m
);
689 spin_unlock_bh(&svc
->sched_lock
);
692 /* If the destination is not overloaded, use it */
693 if (dest
&& !is_overloaded(dest
, svc
))
696 /* The cache entry is invalid, time to schedule */
697 dest
= __ip_vs_lblcr_schedule(svc
);
699 ip_vs_scheduler_err(svc
, "no destination available");
703 /* Update our cache entry */
704 spin_lock_bh(&svc
->sched_lock
);
706 ip_vs_dest_set_insert(&en
->set
, dest
, true);
707 spin_unlock_bh(&svc
->sched_lock
);
711 /* No cache entry, time to schedule */
712 dest
= __ip_vs_lblcr_schedule(svc
);
714 IP_VS_DBG(1, "no destination available\n");
718 /* If we fail to create a cache entry, we'll just use the valid dest */
719 spin_lock_bh(&svc
->sched_lock
);
721 ip_vs_lblcr_new(tbl
, &iph
.daddr
, dest
);
722 spin_unlock_bh(&svc
->sched_lock
);
725 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
726 IP_VS_DBG_ADDR(svc
->af
, &iph
.daddr
),
727 IP_VS_DBG_ADDR(svc
->af
, &dest
->addr
), ntohs(dest
->port
));
734 * IPVS LBLCR Scheduler structure
736 static struct ip_vs_scheduler ip_vs_lblcr_scheduler
=
739 .refcnt
= ATOMIC_INIT(0),
740 .module
= THIS_MODULE
,
741 .n_list
= LIST_HEAD_INIT(ip_vs_lblcr_scheduler
.n_list
),
742 .init_service
= ip_vs_lblcr_init_svc
,
743 .done_service
= ip_vs_lblcr_done_svc
,
744 .schedule
= ip_vs_lblcr_schedule
,
751 static int __net_init
__ip_vs_lblcr_init(struct net
*net
)
753 struct netns_ipvs
*ipvs
= net_ipvs(net
);
758 if (!net_eq(net
, &init_net
)) {
759 ipvs
->lblcr_ctl_table
= kmemdup(vs_vars_table
,
760 sizeof(vs_vars_table
),
762 if (ipvs
->lblcr_ctl_table
== NULL
)
765 /* Don't export sysctls to unprivileged users */
766 if (net
->user_ns
!= &init_user_ns
)
767 ipvs
->lblcr_ctl_table
[0].procname
= NULL
;
769 ipvs
->lblcr_ctl_table
= vs_vars_table
;
770 ipvs
->sysctl_lblcr_expiration
= DEFAULT_EXPIRATION
;
771 ipvs
->lblcr_ctl_table
[0].data
= &ipvs
->sysctl_lblcr_expiration
;
773 ipvs
->lblcr_ctl_header
=
774 register_net_sysctl(net
, "net/ipv4/vs", ipvs
->lblcr_ctl_table
);
775 if (!ipvs
->lblcr_ctl_header
) {
776 if (!net_eq(net
, &init_net
))
777 kfree(ipvs
->lblcr_ctl_table
);
784 static void __net_exit
__ip_vs_lblcr_exit(struct net
*net
)
786 struct netns_ipvs
*ipvs
= net_ipvs(net
);
788 unregister_net_sysctl_table(ipvs
->lblcr_ctl_header
);
790 if (!net_eq(net
, &init_net
))
791 kfree(ipvs
->lblcr_ctl_table
);
796 static int __net_init
__ip_vs_lblcr_init(struct net
*net
) { return 0; }
797 static void __net_exit
__ip_vs_lblcr_exit(struct net
*net
) { }
801 static struct pernet_operations ip_vs_lblcr_ops
= {
802 .init
= __ip_vs_lblcr_init
,
803 .exit
= __ip_vs_lblcr_exit
,
806 static int __init
ip_vs_lblcr_init(void)
810 ret
= register_pernet_subsys(&ip_vs_lblcr_ops
);
814 ret
= register_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
816 unregister_pernet_subsys(&ip_vs_lblcr_ops
);
820 static void __exit
ip_vs_lblcr_cleanup(void)
822 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
823 unregister_pernet_subsys(&ip_vs_lblcr_ops
);
828 module_init(ip_vs_lblcr_init
);
829 module_exit(ip_vs_lblcr_cleanup
);
830 MODULE_LICENSE("GPL");