]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/netfilter/ipvs/ip_vs_lblc.c
mac80211: don't WARN on bad WMM parameters from buggy APs
[mirror_ubuntu-eoan-kernel.git] / net / netfilter / ipvs / ip_vs_lblc.c
1 /*
2 * IPVS: Locality-Based Least-Connection scheduling module
3 *
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Changes:
12 * Martin Hamilton : fixed the terrible locking bugs
13 * *lock(tbl->lock) ==> *lock(&tbl->lock)
14 * Wensong Zhang : fixed the uninitialized tbl->lock bug
15 * Wensong Zhang : added doing full expiration check to
16 * collect stale entries of 24+ hours when
17 * no partial expire check in a half hour
18 * Julian Anastasov : replaced del_timer call with del_timer_sync
19 * to avoid the possible race between timer
20 * handler and del_timer thread in SMP
21 *
22 */
23
24 /*
25 * The lblc algorithm is as follows (pseudo code):
26 *
27 * if cachenode[dest_ip] is null then
28 * n, cachenode[dest_ip] <- {weighted least-conn node};
29 * else
30 * n <- cachenode[dest_ip];
31 * if (n is dead) OR
32 * (n.conns>n.weight AND
33 * there is a node m with m.conns<m.weight/2) then
34 * n, cachenode[dest_ip] <- {weighted least-conn node};
35 *
36 * return n;
37 *
38 * Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
39 * me to write this module.
40 */
41
42 #define KMSG_COMPONENT "IPVS"
43 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44
45 #include <linux/ip.h>
46 #include <linux/slab.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/skbuff.h>
50 #include <linux/jiffies.h>
51
52 /* for sysctl */
53 #include <linux/fs.h>
54 #include <linux/sysctl.h>
55
56 #include <net/ip_vs.h>
57
58
59 /*
60 * It is for garbage collection of stale IPVS lblc entries,
61 * when the table is full.
62 */
63 #define CHECK_EXPIRE_INTERVAL (60*HZ)
64 #define ENTRY_TIMEOUT (6*60*HZ)
65
66 #define DEFAULT_EXPIRATION (24*60*60*HZ)
67
68 /*
69 * It is for full expiration check.
70 * When there is no partial expiration check (garbage collection)
71 * in a half hour, do a full expiration check to collect stale
72 * entries that haven't been touched for a day.
73 */
74 #define COUNT_FOR_FULL_EXPIRATION 30
75
76
77 /*
78 * for IPVS lblc entry hash table
79 */
80 #ifndef CONFIG_IP_VS_LBLC_TAB_BITS
81 #define CONFIG_IP_VS_LBLC_TAB_BITS 10
82 #endif
83 #define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
84 #define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
85 #define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
86
87
88 /*
89 * IPVS lblc entry represents an association between destination
90 * IP address and its destination server
91 */
92 struct ip_vs_lblc_entry {
93 struct hlist_node list;
94 int af; /* address family */
95 union nf_inet_addr addr; /* destination IP address */
96 struct ip_vs_dest *dest; /* real server (cache) */
97 unsigned long lastuse; /* last used time */
98 struct rcu_head rcu_head;
99 };
100
101
102 /*
103 * IPVS lblc hash table
104 */
105 struct ip_vs_lblc_table {
106 struct rcu_head rcu_head;
107 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
108 struct timer_list periodic_timer; /* collect stale entries */
109 struct ip_vs_service *svc; /* pointer back to service */
110 atomic_t entries; /* number of entries */
111 int max_size; /* maximum size of entries */
112 int rover; /* rover for expire check */
113 int counter; /* counter for no expire */
114 bool dead;
115 };
116
117
118 /*
119 * IPVS LBLC sysctl table
120 */
121 #ifdef CONFIG_SYSCTL
122 static struct ctl_table vs_vars_table[] = {
123 {
124 .procname = "lblc_expiration",
125 .data = NULL,
126 .maxlen = sizeof(int),
127 .mode = 0644,
128 .proc_handler = proc_dointvec_jiffies,
129 },
130 { }
131 };
132 #endif
133
134 static void ip_vs_lblc_rcu_free(struct rcu_head *head)
135 {
136 struct ip_vs_lblc_entry *en = container_of(head,
137 struct ip_vs_lblc_entry,
138 rcu_head);
139
140 ip_vs_dest_put_and_free(en->dest);
141 kfree(en);
142 }
143
144 static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
145 {
146 hlist_del_rcu(&en->list);
147 call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
148 }
149
150 /*
151 * Returns hash value for IPVS LBLC entry
152 */
153 static inline unsigned int
154 ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
155 {
156 __be32 addr_fold = addr->ip;
157
158 #ifdef CONFIG_IP_VS_IPV6
159 if (af == AF_INET6)
160 addr_fold = addr->ip6[0]^addr->ip6[1]^
161 addr->ip6[2]^addr->ip6[3];
162 #endif
163 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
164 }
165
166
167 /*
168 * Hash an entry in the ip_vs_lblc_table.
169 * returns bool success.
170 */
171 static void
172 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
173 {
174 unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
175
176 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
177 atomic_inc(&tbl->entries);
178 }
179
180
181 /* Get ip_vs_lblc_entry associated with supplied parameters. */
182 static inline struct ip_vs_lblc_entry *
183 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
184 const union nf_inet_addr *addr)
185 {
186 unsigned int hash = ip_vs_lblc_hashkey(af, addr);
187 struct ip_vs_lblc_entry *en;
188
189 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
190 if (ip_vs_addr_equal(af, &en->addr, addr))
191 return en;
192
193 return NULL;
194 }
195
196
197 /*
198 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
199 * address to a server. Called under spin lock.
200 */
201 static inline struct ip_vs_lblc_entry *
202 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
203 u16 af, struct ip_vs_dest *dest)
204 {
205 struct ip_vs_lblc_entry *en;
206
207 en = ip_vs_lblc_get(af, tbl, daddr);
208 if (en) {
209 if (en->dest == dest)
210 return en;
211 ip_vs_lblc_del(en);
212 }
213 en = kmalloc(sizeof(*en), GFP_ATOMIC);
214 if (!en)
215 return NULL;
216
217 en->af = af;
218 ip_vs_addr_copy(af, &en->addr, daddr);
219 en->lastuse = jiffies;
220
221 ip_vs_dest_hold(dest);
222 en->dest = dest;
223
224 ip_vs_lblc_hash(tbl, en);
225
226 return en;
227 }
228
229
230 /*
231 * Flush all the entries of the specified table.
232 */
233 static void ip_vs_lblc_flush(struct ip_vs_service *svc)
234 {
235 struct ip_vs_lblc_table *tbl = svc->sched_data;
236 struct ip_vs_lblc_entry *en;
237 struct hlist_node *next;
238 int i;
239
240 spin_lock_bh(&svc->sched_lock);
241 tbl->dead = 1;
242 for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
243 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
244 ip_vs_lblc_del(en);
245 atomic_dec(&tbl->entries);
246 }
247 }
248 spin_unlock_bh(&svc->sched_lock);
249 }
250
251 static int sysctl_lblc_expiration(struct ip_vs_service *svc)
252 {
253 #ifdef CONFIG_SYSCTL
254 return svc->ipvs->sysctl_lblc_expiration;
255 #else
256 return DEFAULT_EXPIRATION;
257 #endif
258 }
259
260 static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
261 {
262 struct ip_vs_lblc_table *tbl = svc->sched_data;
263 struct ip_vs_lblc_entry *en;
264 struct hlist_node *next;
265 unsigned long now = jiffies;
266 int i, j;
267
268 for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
269 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
270
271 spin_lock(&svc->sched_lock);
272 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
273 if (time_before(now,
274 en->lastuse +
275 sysctl_lblc_expiration(svc)))
276 continue;
277
278 ip_vs_lblc_del(en);
279 atomic_dec(&tbl->entries);
280 }
281 spin_unlock(&svc->sched_lock);
282 }
283 tbl->rover = j;
284 }
285
286
287 /*
288 * Periodical timer handler for IPVS lblc table
289 * It is used to collect stale entries when the number of entries
290 * exceeds the maximum size of the table.
291 *
292 * Fixme: we probably need more complicated algorithm to collect
293 * entries that have not been used for a long time even
294 * if the number of entries doesn't exceed the maximum size
295 * of the table.
296 * The full expiration check is for this purpose now.
297 */
298 static void ip_vs_lblc_check_expire(struct timer_list *t)
299 {
300 struct ip_vs_lblc_table *tbl = from_timer(tbl, t, periodic_timer);
301 struct ip_vs_service *svc = tbl->svc;
302 unsigned long now = jiffies;
303 int goal;
304 int i, j;
305 struct ip_vs_lblc_entry *en;
306 struct hlist_node *next;
307
308 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
309 /* do full expiration check */
310 ip_vs_lblc_full_check(svc);
311 tbl->counter = 1;
312 goto out;
313 }
314
315 if (atomic_read(&tbl->entries) <= tbl->max_size) {
316 tbl->counter++;
317 goto out;
318 }
319
320 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
321 if (goal > tbl->max_size/2)
322 goal = tbl->max_size/2;
323
324 for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
325 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
326
327 spin_lock(&svc->sched_lock);
328 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
329 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
330 continue;
331
332 ip_vs_lblc_del(en);
333 atomic_dec(&tbl->entries);
334 goal--;
335 }
336 spin_unlock(&svc->sched_lock);
337 if (goal <= 0)
338 break;
339 }
340 tbl->rover = j;
341
342 out:
343 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
344 }
345
346
347 static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
348 {
349 int i;
350 struct ip_vs_lblc_table *tbl;
351
352 /*
353 * Allocate the ip_vs_lblc_table for this service
354 */
355 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
356 if (tbl == NULL)
357 return -ENOMEM;
358
359 svc->sched_data = tbl;
360 IP_VS_DBG(6, "LBLC hash table (memory=%zdbytes) allocated for "
361 "current service\n", sizeof(*tbl));
362
363 /*
364 * Initialize the hash buckets
365 */
366 for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
367 INIT_HLIST_HEAD(&tbl->bucket[i]);
368 }
369 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
370 tbl->rover = 0;
371 tbl->counter = 1;
372 tbl->dead = 0;
373 tbl->svc = svc;
374
375 /*
376 * Hook periodic timer for garbage collection
377 */
378 timer_setup(&tbl->periodic_timer, ip_vs_lblc_check_expire, 0);
379 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
380
381 return 0;
382 }
383
384
385 static void ip_vs_lblc_done_svc(struct ip_vs_service *svc)
386 {
387 struct ip_vs_lblc_table *tbl = svc->sched_data;
388
389 /* remove periodic timer */
390 del_timer_sync(&tbl->periodic_timer);
391
392 /* got to clean up table entries here */
393 ip_vs_lblc_flush(svc);
394
395 /* release the table itself */
396 kfree_rcu(tbl, rcu_head);
397 IP_VS_DBG(6, "LBLC hash table (memory=%zdbytes) released\n",
398 sizeof(*tbl));
399 }
400
401
402 static inline struct ip_vs_dest *
403 __ip_vs_lblc_schedule(struct ip_vs_service *svc)
404 {
405 struct ip_vs_dest *dest, *least;
406 int loh, doh;
407
408 /*
409 * We use the following formula to estimate the load:
410 * (dest overhead) / dest->weight
411 *
412 * Remember -- no floats in kernel mode!!!
413 * The comparison of h1*w2 > h2*w1 is equivalent to that of
414 * h1/w1 > h2/w2
415 * if every weight is larger than zero.
416 *
417 * The server with weight=0 is quiesced and will not receive any
418 * new connection.
419 */
420 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
421 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
422 continue;
423 if (atomic_read(&dest->weight) > 0) {
424 least = dest;
425 loh = ip_vs_dest_conn_overhead(least);
426 goto nextstage;
427 }
428 }
429 return NULL;
430
431 /*
432 * Find the destination with the least load.
433 */
434 nextstage:
435 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
436 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
437 continue;
438
439 doh = ip_vs_dest_conn_overhead(dest);
440 if ((__s64)loh * atomic_read(&dest->weight) >
441 (__s64)doh * atomic_read(&least->weight)) {
442 least = dest;
443 loh = doh;
444 }
445 }
446
447 IP_VS_DBG_BUF(6, "LBLC: server %s:%d "
448 "activeconns %d refcnt %d weight %d overhead %d\n",
449 IP_VS_DBG_ADDR(least->af, &least->addr),
450 ntohs(least->port),
451 atomic_read(&least->activeconns),
452 refcount_read(&least->refcnt),
453 atomic_read(&least->weight), loh);
454
455 return least;
456 }
457
458
459 /*
460 * If this destination server is overloaded and there is a less loaded
461 * server, then return true.
462 */
463 static inline int
464 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
465 {
466 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
467 struct ip_vs_dest *d;
468
469 list_for_each_entry_rcu(d, &svc->destinations, n_list) {
470 if (atomic_read(&d->activeconns)*2
471 < atomic_read(&d->weight)) {
472 return 1;
473 }
474 }
475 }
476 return 0;
477 }
478
479
480 /*
481 * Locality-Based (weighted) Least-Connection scheduling
482 */
483 static struct ip_vs_dest *
484 ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
485 struct ip_vs_iphdr *iph)
486 {
487 struct ip_vs_lblc_table *tbl = svc->sched_data;
488 struct ip_vs_dest *dest = NULL;
489 struct ip_vs_lblc_entry *en;
490
491 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
492
493 /* First look in our cache */
494 en = ip_vs_lblc_get(svc->af, tbl, &iph->daddr);
495 if (en) {
496 /* We only hold a read lock, but this is atomic */
497 en->lastuse = jiffies;
498
499 /*
500 * If the destination is not available, i.e. it's in the trash,
501 * we must ignore it, as it may be removed from under our feet,
502 * if someone drops our reference count. Our caller only makes
503 * sure that destinations, that are not in the trash, are not
504 * moved to the trash, while we are scheduling. But anyone can
505 * free up entries from the trash at any time.
506 */
507
508 dest = en->dest;
509 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
510 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
511 goto out;
512 }
513
514 /* No cache entry or it is invalid, time to schedule */
515 dest = __ip_vs_lblc_schedule(svc);
516 if (!dest) {
517 ip_vs_scheduler_err(svc, "no destination available");
518 return NULL;
519 }
520
521 /* If we fail to create a cache entry, we'll just use the valid dest */
522 spin_lock_bh(&svc->sched_lock);
523 if (!tbl->dead)
524 ip_vs_lblc_new(tbl, &iph->daddr, svc->af, dest);
525 spin_unlock_bh(&svc->sched_lock);
526
527 out:
528 IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
529 IP_VS_DBG_ADDR(svc->af, &iph->daddr),
530 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
531
532 return dest;
533 }
534
535
536 /*
537 * IPVS LBLC Scheduler structure
538 */
539 static struct ip_vs_scheduler ip_vs_lblc_scheduler = {
540 .name = "lblc",
541 .refcnt = ATOMIC_INIT(0),
542 .module = THIS_MODULE,
543 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
544 .init_service = ip_vs_lblc_init_svc,
545 .done_service = ip_vs_lblc_done_svc,
546 .schedule = ip_vs_lblc_schedule,
547 };
548
549 /*
550 * per netns init.
551 */
552 #ifdef CONFIG_SYSCTL
553 static int __net_init __ip_vs_lblc_init(struct net *net)
554 {
555 struct netns_ipvs *ipvs = net_ipvs(net);
556
557 if (!ipvs)
558 return -ENOENT;
559
560 if (!net_eq(net, &init_net)) {
561 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
562 sizeof(vs_vars_table),
563 GFP_KERNEL);
564 if (ipvs->lblc_ctl_table == NULL)
565 return -ENOMEM;
566
567 /* Don't export sysctls to unprivileged users */
568 if (net->user_ns != &init_user_ns)
569 ipvs->lblc_ctl_table[0].procname = NULL;
570
571 } else
572 ipvs->lblc_ctl_table = vs_vars_table;
573 ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
574 ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
575
576 ipvs->lblc_ctl_header =
577 register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table);
578 if (!ipvs->lblc_ctl_header) {
579 if (!net_eq(net, &init_net))
580 kfree(ipvs->lblc_ctl_table);
581 return -ENOMEM;
582 }
583
584 return 0;
585 }
586
587 static void __net_exit __ip_vs_lblc_exit(struct net *net)
588 {
589 struct netns_ipvs *ipvs = net_ipvs(net);
590
591 unregister_net_sysctl_table(ipvs->lblc_ctl_header);
592
593 if (!net_eq(net, &init_net))
594 kfree(ipvs->lblc_ctl_table);
595 }
596
597 #else
598
599 static int __net_init __ip_vs_lblc_init(struct net *net) { return 0; }
600 static void __net_exit __ip_vs_lblc_exit(struct net *net) { }
601
602 #endif
603
604 static struct pernet_operations ip_vs_lblc_ops = {
605 .init = __ip_vs_lblc_init,
606 .exit = __ip_vs_lblc_exit,
607 .async = true,
608 };
609
610 static int __init ip_vs_lblc_init(void)
611 {
612 int ret;
613
614 ret = register_pernet_subsys(&ip_vs_lblc_ops);
615 if (ret)
616 return ret;
617
618 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
619 if (ret)
620 unregister_pernet_subsys(&ip_vs_lblc_ops);
621 return ret;
622 }
623
624 static void __exit ip_vs_lblc_cleanup(void)
625 {
626 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
627 unregister_pernet_subsys(&ip_vs_lblc_ops);
628 rcu_barrier();
629 }
630
631
632 module_init(ip_vs_lblc_init);
633 module_exit(ip_vs_lblc_cleanup);
634 MODULE_LICENSE("GPL");