]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - net/netfilter/ipvs/ip_vs_lblc.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-eoan-kernel.git] / net / netfilter / ipvs / ip_vs_lblc.c
CommitLineData
1da177e4
LT
1/*
2 * IPVS: Locality-Based Least-Connection scheduling module
3 *
1da177e4
LT
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Changes:
12 * Martin Hamilton : fixed the terrible locking bugs
13 * *lock(tbl->lock) ==> *lock(&tbl->lock)
14 * Wensong Zhang : fixed the uninitilized tbl->lock bug
15 * Wensong Zhang : added doing full expiration check to
16 * collect stale entries of 24+ hours when
17 * no partial expire check in a half hour
18 * Julian Anastasov : replaced del_timer call with del_timer_sync
19 * to avoid the possible race between timer
20 * handler and del_timer thread in SMP
21 *
22 */
23
24/*
25 * The lblc algorithm is as follows (pseudo code):
26 *
27 * if cachenode[dest_ip] is null then
28 * n, cachenode[dest_ip] <- {weighted least-conn node};
29 * else
30 * n <- cachenode[dest_ip];
31 * if (n is dead) OR
32 * (n.conns>n.weight AND
33 * there is a node m with m.conns<m.weight/2) then
34 * n, cachenode[dest_ip] <- {weighted least-conn node};
35 *
36 * return n;
37 *
38 * Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
39 * me to write this module.
40 */
41
9aada7ac
HE
42#define KMSG_COMPONENT "IPVS"
43#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44
14c85021 45#include <linux/ip.h>
5a0e3ad6 46#include <linux/slab.h>
1da177e4
LT
47#include <linux/module.h>
48#include <linux/kernel.h>
14c85021 49#include <linux/skbuff.h>
d7fe0f24 50#include <linux/jiffies.h>
1da177e4
LT
51
52/* for sysctl */
53#include <linux/fs.h>
54#include <linux/sysctl.h>
55
56#include <net/ip_vs.h>
57
58
59/*
60 * It is for garbage collection of stale IPVS lblc entries,
61 * when the table is full.
62 */
63#define CHECK_EXPIRE_INTERVAL (60*HZ)
64#define ENTRY_TIMEOUT (6*60*HZ)
65
66/*
67 * It is for full expiration check.
68 * When there is no partial expiration check (garbage collection)
69 * in a half hour, do a full expiration check to collect stale
70 * entries that haven't been touched for a day.
71 */
72#define COUNT_FOR_FULL_EXPIRATION 30
73static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
74
75
76/*
77 * for IPVS lblc entry hash table
78 */
79#ifndef CONFIG_IP_VS_LBLC_TAB_BITS
80#define CONFIG_IP_VS_LBLC_TAB_BITS 10
81#endif
82#define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
83#define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
84#define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
85
86
87/*
88 * IPVS lblc entry represents an association between destination
89 * IP address and its destination server
90 */
91struct ip_vs_lblc_entry {
92 struct list_head list;
44548375
JV
93 int af; /* address family */
94 union nf_inet_addr addr; /* destination IP address */
1da177e4
LT
95 struct ip_vs_dest *dest; /* real server (cache) */
96 unsigned long lastuse; /* last used time */
97};
98
99
100/*
101 * IPVS lblc hash table
102 */
103struct ip_vs_lblc_table {
1da177e4
LT
104 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
105 atomic_t entries; /* number of entries */
106 int max_size; /* maximum size of entries */
107 struct timer_list periodic_timer; /* collect stale entries */
108 int rover; /* rover for expire check */
109 int counter; /* counter for no expire */
110};
111
112
113/*
114 * IPVS LBLC sysctl table
115 */
116
117static ctl_table vs_vars_table[] = {
118 {
1da177e4
LT
119 .procname = "lblc_expiration",
120 .data = &sysctl_ip_vs_lblc_expiration,
121 .maxlen = sizeof(int),
e905a9ed 122 .mode = 0644,
6d9f239a 123 .proc_handler = proc_dointvec_jiffies,
1da177e4 124 },
f8572d8f 125 { }
1da177e4
LT
126};
127
1da177e4
LT
128static struct ctl_table_header * sysctl_header;
129
1da177e4
LT
130static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
131{
132 list_del(&en->list);
133 /*
134 * We don't kfree dest because it is refered either by its service
135 * or the trash dest list.
136 */
137 atomic_dec(&en->dest->refcnt);
138 kfree(en);
139}
140
141
142/*
143 * Returns hash value for IPVS LBLC entry
144 */
44548375
JV
145static inline unsigned
146ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
1da177e4 147{
44548375
JV
148 __be32 addr_fold = addr->ip;
149
150#ifdef CONFIG_IP_VS_IPV6
151 if (af == AF_INET6)
152 addr_fold = addr->ip6[0]^addr->ip6[1]^
153 addr->ip6[2]^addr->ip6[3];
154#endif
155 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
1da177e4
LT
156}
157
158
159/*
160 * Hash an entry in the ip_vs_lblc_table.
161 * returns bool success.
162 */
39ac50d0 163static void
1da177e4
LT
164ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
165{
44548375 166 unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr);
1da177e4 167
1da177e4
LT
168 list_add(&en->list, &tbl->bucket[hash]);
169 atomic_inc(&tbl->entries);
1da177e4
LT
170}
171
172
1da177e4 173/*
39ac50d0
SW
174 * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
175 * lock
1da177e4
LT
176 */
177static inline struct ip_vs_lblc_entry *
44548375
JV
178ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
179 const union nf_inet_addr *addr)
1da177e4 180{
44548375 181 unsigned hash = ip_vs_lblc_hashkey(af, addr);
1da177e4
LT
182 struct ip_vs_lblc_entry *en;
183
39ac50d0 184 list_for_each_entry(en, &tbl->bucket[hash], list)
44548375 185 if (ip_vs_addr_equal(af, &en->addr, addr))
39ac50d0 186 return en;
1da177e4 187
39ac50d0
SW
188 return NULL;
189}
1da177e4 190
39ac50d0
SW
191
192/*
193 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
194 * address to a server. Called under write lock.
195 */
196static inline struct ip_vs_lblc_entry *
44548375 197ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
39ac50d0
SW
198 struct ip_vs_dest *dest)
199{
200 struct ip_vs_lblc_entry *en;
201
44548375 202 en = ip_vs_lblc_get(dest->af, tbl, daddr);
39ac50d0
SW
203 if (!en) {
204 en = kmalloc(sizeof(*en), GFP_ATOMIC);
205 if (!en) {
1e3e238e 206 pr_err("%s(): no memory\n", __func__);
39ac50d0 207 return NULL;
1da177e4 208 }
1da177e4 209
44548375
JV
210 en->af = dest->af;
211 ip_vs_addr_copy(dest->af, &en->addr, daddr);
39ac50d0 212 en->lastuse = jiffies;
1da177e4 213
39ac50d0
SW
214 atomic_inc(&dest->refcnt);
215 en->dest = dest;
216
217 ip_vs_lblc_hash(tbl, en);
218 } else if (en->dest != dest) {
219 atomic_dec(&en->dest->refcnt);
220 atomic_inc(&dest->refcnt);
221 en->dest = dest;
222 }
223
224 return en;
1da177e4
LT
225}
226
227
228/*
229 * Flush all the entries of the specified table.
230 */
231static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
232{
1da177e4 233 struct ip_vs_lblc_entry *en, *nxt;
39ac50d0 234 int i;
1da177e4
LT
235
236 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
1da177e4
LT
237 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
238 ip_vs_lblc_free(en);
239 atomic_dec(&tbl->entries);
240 }
1da177e4
LT
241 }
242}
243
244
39ac50d0 245static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
1da177e4 246{
39ac50d0
SW
247 struct ip_vs_lblc_table *tbl = svc->sched_data;
248 struct ip_vs_lblc_entry *en, *nxt;
1da177e4
LT
249 unsigned long now = jiffies;
250 int i, j;
1da177e4
LT
251
252 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
253 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
254
39ac50d0 255 write_lock(&svc->sched_lock);
1da177e4 256 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
e905a9ed 257 if (time_before(now,
1da177e4
LT
258 en->lastuse + sysctl_ip_vs_lblc_expiration))
259 continue;
260
261 ip_vs_lblc_free(en);
262 atomic_dec(&tbl->entries);
263 }
39ac50d0 264 write_unlock(&svc->sched_lock);
1da177e4
LT
265 }
266 tbl->rover = j;
267}
268
269
270/*
271 * Periodical timer handler for IPVS lblc table
272 * It is used to collect stale entries when the number of entries
273 * exceeds the maximum size of the table.
274 *
275 * Fixme: we probably need more complicated algorithm to collect
276 * entries that have not been used for a long time even
277 * if the number of entries doesn't exceed the maximum size
278 * of the table.
279 * The full expiration check is for this purpose now.
280 */
281static void ip_vs_lblc_check_expire(unsigned long data)
282{
39ac50d0
SW
283 struct ip_vs_service *svc = (struct ip_vs_service *) data;
284 struct ip_vs_lblc_table *tbl = svc->sched_data;
1da177e4
LT
285 unsigned long now = jiffies;
286 int goal;
287 int i, j;
288 struct ip_vs_lblc_entry *en, *nxt;
289
1da177e4
LT
290 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
291 /* do full expiration check */
39ac50d0 292 ip_vs_lblc_full_check(svc);
1da177e4
LT
293 tbl->counter = 1;
294 goto out;
295 }
296
297 if (atomic_read(&tbl->entries) <= tbl->max_size) {
298 tbl->counter++;
299 goto out;
300 }
301
302 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
303 if (goal > tbl->max_size/2)
304 goal = tbl->max_size/2;
305
306 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
307 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
308
39ac50d0 309 write_lock(&svc->sched_lock);
1da177e4
LT
310 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
311 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
312 continue;
313
314 ip_vs_lblc_free(en);
315 atomic_dec(&tbl->entries);
316 goal--;
317 }
39ac50d0 318 write_unlock(&svc->sched_lock);
1da177e4
LT
319 if (goal <= 0)
320 break;
321 }
322 tbl->rover = j;
323
324 out:
325 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
326}
327
328
329static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
330{
331 int i;
332 struct ip_vs_lblc_table *tbl;
333
334 /*
335 * Allocate the ip_vs_lblc_table for this service
336 */
39ac50d0 337 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
1da177e4 338 if (tbl == NULL) {
1e3e238e 339 pr_err("%s(): no memory\n", __func__);
1da177e4
LT
340 return -ENOMEM;
341 }
342 svc->sched_data = tbl;
343 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
39ac50d0 344 "current service\n", sizeof(*tbl));
1da177e4
LT
345
346 /*
347 * Initialize the hash buckets
348 */
349 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
350 INIT_LIST_HEAD(&tbl->bucket[i]);
351 }
1da177e4
LT
352 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
353 tbl->rover = 0;
354 tbl->counter = 1;
355
356 /*
357 * Hook periodic timer for garbage collection
358 */
b24b8a24 359 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
39ac50d0
SW
360 (unsigned long)svc);
361 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
1da177e4
LT
362
363 return 0;
364}
365
366
367static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
368{
369 struct ip_vs_lblc_table *tbl = svc->sched_data;
370
371 /* remove periodic timer */
372 del_timer_sync(&tbl->periodic_timer);
373
374 /* got to clean up table entries here */
375 ip_vs_lblc_flush(tbl);
376
377 /* release the table itself */
39ac50d0 378 kfree(tbl);
1da177e4 379 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
39ac50d0 380 sizeof(*tbl));
1da177e4
LT
381
382 return 0;
383}
384
385
1da177e4 386static inline struct ip_vs_dest *
44548375 387__ip_vs_lblc_schedule(struct ip_vs_service *svc)
1da177e4
LT
388{
389 struct ip_vs_dest *dest, *least;
390 int loh, doh;
391
392 /*
393 * We think the overhead of processing active connections is fifty
394 * times higher than that of inactive connections in average. (This
395 * fifty times might not be accurate, we will change it later.) We
396 * use the following formula to estimate the overhead:
397 * dest->activeconns*50 + dest->inactconns
398 * and the load:
399 * (dest overhead) / dest->weight
400 *
401 * Remember -- no floats in kernel mode!!!
402 * The comparison of h1*w2 > h2*w1 is equivalent to that of
403 * h1/w1 > h2/w2
404 * if every weight is larger than zero.
405 *
406 * The server with weight=0 is quiesced and will not receive any
407 * new connection.
408 */
409 list_for_each_entry(dest, &svc->destinations, n_list) {
410 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
411 continue;
412 if (atomic_read(&dest->weight) > 0) {
413 least = dest;
414 loh = atomic_read(&least->activeconns) * 50
415 + atomic_read(&least->inactconns);
416 goto nextstage;
417 }
418 }
419 return NULL;
420
421 /*
422 * Find the destination with the least load.
423 */
424 nextstage:
425 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
426 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
427 continue;
428
429 doh = atomic_read(&dest->activeconns) * 50
430 + atomic_read(&dest->inactconns);
431 if (loh * atomic_read(&dest->weight) >
432 doh * atomic_read(&least->weight)) {
433 least = dest;
434 loh = doh;
435 }
436 }
437
44548375
JV
438 IP_VS_DBG_BUF(6, "LBLC: server %s:%d "
439 "activeconns %d refcnt %d weight %d overhead %d\n",
440 IP_VS_DBG_ADDR(least->af, &least->addr),
441 ntohs(least->port),
442 atomic_read(&least->activeconns),
443 atomic_read(&least->refcnt),
444 atomic_read(&least->weight), loh);
1da177e4
LT
445
446 return least;
447}
448
449
450/*
451 * If this destination server is overloaded and there is a less loaded
452 * server, then return true.
453 */
454static inline int
455is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
456{
457 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
458 struct ip_vs_dest *d;
459
460 list_for_each_entry(d, &svc->destinations, n_list) {
461 if (atomic_read(&d->activeconns)*2
462 < atomic_read(&d->weight)) {
463 return 1;
464 }
465 }
466 }
467 return 0;
468}
469
470
471/*
472 * Locality-Based (weighted) Least-Connection scheduling
473 */
474static struct ip_vs_dest *
475ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
476{
39ac50d0 477 struct ip_vs_lblc_table *tbl = svc->sched_data;
44548375 478 struct ip_vs_iphdr iph;
39ac50d0
SW
479 struct ip_vs_dest *dest = NULL;
480 struct ip_vs_lblc_entry *en;
1da177e4 481
44548375
JV
482 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
483
1e3e238e 484 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
1da177e4 485
39ac50d0
SW
486 /* First look in our cache */
487 read_lock(&svc->sched_lock);
44548375 488 en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr);
39ac50d0
SW
489 if (en) {
490 /* We only hold a read lock, but this is atomic */
491 en->lastuse = jiffies;
492
493 /*
494 * If the destination is not available, i.e. it's in the trash,
495 * we must ignore it, as it may be removed from under our feet,
496 * if someone drops our reference count. Our caller only makes
497 * sure that destinations, that are not in the trash, are not
498 * moved to the trash, while we are scheduling. But anyone can
499 * free up entries from the trash at any time.
500 */
501
502 if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
503 dest = en->dest;
504 }
505 read_unlock(&svc->sched_lock);
506
507 /* If the destination has a weight and is not overloaded, use it */
508 if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
509 goto out;
510
511 /* No cache entry or it is invalid, time to schedule */
44548375 512 dest = __ip_vs_lblc_schedule(svc);
39ac50d0 513 if (!dest) {
68888d10 514 IP_VS_ERR_RL("LBLC: no destination available\n");
39ac50d0 515 return NULL;
1da177e4 516 }
1da177e4 517
39ac50d0
SW
518 /* If we fail to create a cache entry, we'll just use the valid dest */
519 write_lock(&svc->sched_lock);
44548375 520 ip_vs_lblc_new(tbl, &iph.daddr, dest);
39ac50d0
SW
521 write_unlock(&svc->sched_lock);
522
523out:
44548375
JV
524 IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
525 IP_VS_DBG_ADDR(svc->af, &iph.daddr),
526 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
1da177e4
LT
527
528 return dest;
529}
530
531
532/*
533 * IPVS LBLC Scheduler structure
534 */
535static struct ip_vs_scheduler ip_vs_lblc_scheduler =
536{
537 .name = "lblc",
538 .refcnt = ATOMIC_INIT(0),
539 .module = THIS_MODULE,
d149ccc9 540 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
1da177e4
LT
541 .init_service = ip_vs_lblc_init_svc,
542 .done_service = ip_vs_lblc_done_svc,
1da177e4
LT
543 .schedule = ip_vs_lblc_schedule,
544};
545
546
547static int __init ip_vs_lblc_init(void)
548{
a014bc8f
PE
549 int ret;
550
90754f8e 551 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
a014bc8f
PE
552 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
553 if (ret)
554 unregister_sysctl_table(sysctl_header);
555 return ret;
1da177e4
LT
556}
557
558
559static void __exit ip_vs_lblc_cleanup(void)
560{
561 unregister_sysctl_table(sysctl_header);
562 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
563}
564
565
566module_init(ip_vs_lblc_init);
567module_exit(ip_vs_lblc_cleanup);
568MODULE_LICENSE("GPL");