2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
18 * The lblc/r algorithm is as follows (pseudo code):
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
40 #define KMSG_COMPONENT "IPVS"
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/skbuff.h>
47 #include <linux/jiffies.h>
48 #include <linux/list.h>
49 #include <linux/slab.h>
53 #include <linux/sysctl.h>
54 #include <net/net_namespace.h>
56 #include <net/ip_vs.h>
60 * It is for garbage collection of stale IPVS lblcr entries,
61 * when the table is full.
63 #define CHECK_EXPIRE_INTERVAL (60*HZ)
64 #define ENTRY_TIMEOUT (6*60*HZ)
67 * It is for full expiration check.
68 * When there is no partial expiration check (garbage collection)
69 * in a half hour, do a full expiration check to collect stale
70 * entries that haven't been touched for a day.
72 #define COUNT_FOR_FULL_EXPIRATION 30
73 static int sysctl_ip_vs_lblcr_expiration
= 24*60*60*HZ
;
77 * for IPVS lblcr entry hash table
79 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
80 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
82 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
83 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
84 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
88 * IPVS destination set structure and operations
90 struct ip_vs_dest_set_elem
{
91 struct list_head list
; /* list link */
92 struct ip_vs_dest
*dest
; /* destination server */
95 struct ip_vs_dest_set
{
96 atomic_t size
; /* set size */
97 unsigned long lastmod
; /* last modified time */
98 struct list_head list
; /* destination list */
99 rwlock_t lock
; /* lock for this list */
103 static struct ip_vs_dest_set_elem
*
104 ip_vs_dest_set_insert(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
106 struct ip_vs_dest_set_elem
*e
;
108 list_for_each_entry(e
, &set
->list
, list
) {
110 /* already existed */
114 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
116 pr_err("%s(): no memory\n", __func__
);
120 atomic_inc(&dest
->refcnt
);
123 list_add(&e
->list
, &set
->list
);
124 atomic_inc(&set
->size
);
126 set
->lastmod
= jiffies
;
131 ip_vs_dest_set_erase(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
133 struct ip_vs_dest_set_elem
*e
;
135 list_for_each_entry(e
, &set
->list
, list
) {
136 if (e
->dest
== dest
) {
138 atomic_dec(&set
->size
);
139 set
->lastmod
= jiffies
;
140 atomic_dec(&e
->dest
->refcnt
);
148 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set
*set
)
150 struct ip_vs_dest_set_elem
*e
, *ep
;
152 write_lock(&set
->lock
);
153 list_for_each_entry_safe(e
, ep
, &set
->list
, list
) {
155 * We don't kfree dest because it is refered either
156 * by its service or by the trash dest list.
158 atomic_dec(&e
->dest
->refcnt
);
162 write_unlock(&set
->lock
);
165 /* get weighted least-connection node in the destination set */
166 static inline struct ip_vs_dest
*ip_vs_dest_set_min(struct ip_vs_dest_set
*set
)
168 register struct ip_vs_dest_set_elem
*e
;
169 struct ip_vs_dest
*dest
, *least
;
175 /* select the first destination server, whose weight > 0 */
176 list_for_each_entry(e
, &set
->list
, list
) {
178 if (least
->flags
& IP_VS_DEST_F_OVERLOAD
)
181 if ((atomic_read(&least
->weight
) > 0)
182 && (least
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
183 loh
= atomic_read(&least
->activeconns
) * 50
184 + atomic_read(&least
->inactconns
);
190 /* find the destination with the weighted least load */
192 list_for_each_entry(e
, &set
->list
, list
) {
194 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
197 doh
= atomic_read(&dest
->activeconns
) * 50
198 + atomic_read(&dest
->inactconns
);
199 if ((loh
* atomic_read(&dest
->weight
) >
200 doh
* atomic_read(&least
->weight
))
201 && (dest
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
207 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
208 "activeconns %d refcnt %d weight %d overhead %d\n",
210 IP_VS_DBG_ADDR(least
->af
, &least
->addr
),
212 atomic_read(&least
->activeconns
),
213 atomic_read(&least
->refcnt
),
214 atomic_read(&least
->weight
), loh
);
219 /* get weighted most-connection node in the destination set */
220 static inline struct ip_vs_dest
*ip_vs_dest_set_max(struct ip_vs_dest_set
*set
)
222 register struct ip_vs_dest_set_elem
*e
;
223 struct ip_vs_dest
*dest
, *most
;
229 /* select the first destination server, whose weight > 0 */
230 list_for_each_entry(e
, &set
->list
, list
) {
232 if (atomic_read(&most
->weight
) > 0) {
233 moh
= atomic_read(&most
->activeconns
) * 50
234 + atomic_read(&most
->inactconns
);
240 /* find the destination with the weighted most load */
242 list_for_each_entry(e
, &set
->list
, list
) {
244 doh
= atomic_read(&dest
->activeconns
) * 50
245 + atomic_read(&dest
->inactconns
);
246 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
247 if ((moh
* atomic_read(&dest
->weight
) <
248 doh
* atomic_read(&most
->weight
))
249 && (atomic_read(&dest
->weight
) > 0)) {
255 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
256 "activeconns %d refcnt %d weight %d overhead %d\n",
258 IP_VS_DBG_ADDR(most
->af
, &most
->addr
), ntohs(most
->port
),
259 atomic_read(&most
->activeconns
),
260 atomic_read(&most
->refcnt
),
261 atomic_read(&most
->weight
), moh
);
267 * IPVS lblcr entry represents an association between destination
268 * IP address and its destination server set
270 struct ip_vs_lblcr_entry
{
271 struct list_head list
;
272 int af
; /* address family */
273 union nf_inet_addr addr
; /* destination IP address */
274 struct ip_vs_dest_set set
; /* destination server set */
275 unsigned long lastuse
; /* last used time */
280 * IPVS lblcr hash table
282 struct ip_vs_lblcr_table
{
283 struct list_head bucket
[IP_VS_LBLCR_TAB_SIZE
]; /* hash bucket */
284 atomic_t entries
; /* number of entries */
285 int max_size
; /* maximum size of entries */
286 struct timer_list periodic_timer
; /* collect stale entries */
287 int rover
; /* rover for expire check */
288 int counter
; /* counter for no expire */
293 * IPVS LBLCR sysctl table
296 static ctl_table vs_vars_table
[] = {
298 .procname
= "lblcr_expiration",
299 .data
= &sysctl_ip_vs_lblcr_expiration
,
300 .maxlen
= sizeof(int),
302 .proc_handler
= proc_dointvec_jiffies
,
307 static struct ctl_table_header
* sysctl_header
;
309 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry
*en
)
312 ip_vs_dest_set_eraseall(&en
->set
);
318 * Returns hash value for IPVS LBLCR entry
320 static inline unsigned
321 ip_vs_lblcr_hashkey(int af
, const union nf_inet_addr
*addr
)
323 __be32 addr_fold
= addr
->ip
;
325 #ifdef CONFIG_IP_VS_IPV6
327 addr_fold
= addr
->ip6
[0]^addr
->ip6
[1]^
328 addr
->ip6
[2]^addr
->ip6
[3];
330 return (ntohl(addr_fold
)*2654435761UL) & IP_VS_LBLCR_TAB_MASK
;
335 * Hash an entry in the ip_vs_lblcr_table.
336 * returns bool success.
339 ip_vs_lblcr_hash(struct ip_vs_lblcr_table
*tbl
, struct ip_vs_lblcr_entry
*en
)
341 unsigned hash
= ip_vs_lblcr_hashkey(en
->af
, &en
->addr
);
343 list_add(&en
->list
, &tbl
->bucket
[hash
]);
344 atomic_inc(&tbl
->entries
);
349 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
352 static inline struct ip_vs_lblcr_entry
*
353 ip_vs_lblcr_get(int af
, struct ip_vs_lblcr_table
*tbl
,
354 const union nf_inet_addr
*addr
)
356 unsigned hash
= ip_vs_lblcr_hashkey(af
, addr
);
357 struct ip_vs_lblcr_entry
*en
;
359 list_for_each_entry(en
, &tbl
->bucket
[hash
], list
)
360 if (ip_vs_addr_equal(af
, &en
->addr
, addr
))
368 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
369 * IP address to a server. Called under write lock.
371 static inline struct ip_vs_lblcr_entry
*
372 ip_vs_lblcr_new(struct ip_vs_lblcr_table
*tbl
, const union nf_inet_addr
*daddr
,
373 struct ip_vs_dest
*dest
)
375 struct ip_vs_lblcr_entry
*en
;
377 en
= ip_vs_lblcr_get(dest
->af
, tbl
, daddr
);
379 en
= kmalloc(sizeof(*en
), GFP_ATOMIC
);
381 pr_err("%s(): no memory\n", __func__
);
386 ip_vs_addr_copy(dest
->af
, &en
->addr
, daddr
);
387 en
->lastuse
= jiffies
;
389 /* initilize its dest set */
390 atomic_set(&(en
->set
.size
), 0);
391 INIT_LIST_HEAD(&en
->set
.list
);
392 rwlock_init(&en
->set
.lock
);
394 ip_vs_lblcr_hash(tbl
, en
);
397 write_lock(&en
->set
.lock
);
398 ip_vs_dest_set_insert(&en
->set
, dest
);
399 write_unlock(&en
->set
.lock
);
406 * Flush all the entries of the specified table.
408 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table
*tbl
)
411 struct ip_vs_lblcr_entry
*en
, *nxt
;
413 /* No locking required, only called during cleanup. */
414 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
415 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[i
], list
) {
416 ip_vs_lblcr_free(en
);
422 static inline void ip_vs_lblcr_full_check(struct ip_vs_service
*svc
)
424 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
425 unsigned long now
= jiffies
;
427 struct ip_vs_lblcr_entry
*en
, *nxt
;
429 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
430 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
432 write_lock(&svc
->sched_lock
);
433 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[j
], list
) {
434 if (time_after(en
->lastuse
+sysctl_ip_vs_lblcr_expiration
,
438 ip_vs_lblcr_free(en
);
439 atomic_dec(&tbl
->entries
);
441 write_unlock(&svc
->sched_lock
);
448 * Periodical timer handler for IPVS lblcr table
449 * It is used to collect stale entries when the number of entries
450 * exceeds the maximum size of the table.
452 * Fixme: we probably need more complicated algorithm to collect
453 * entries that have not been used for a long time even
454 * if the number of entries doesn't exceed the maximum size
456 * The full expiration check is for this purpose now.
458 static void ip_vs_lblcr_check_expire(unsigned long data
)
460 struct ip_vs_service
*svc
= (struct ip_vs_service
*) data
;
461 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
462 unsigned long now
= jiffies
;
465 struct ip_vs_lblcr_entry
*en
, *nxt
;
467 if ((tbl
->counter
% COUNT_FOR_FULL_EXPIRATION
) == 0) {
468 /* do full expiration check */
469 ip_vs_lblcr_full_check(svc
);
474 if (atomic_read(&tbl
->entries
) <= tbl
->max_size
) {
479 goal
= (atomic_read(&tbl
->entries
) - tbl
->max_size
)*4/3;
480 if (goal
> tbl
->max_size
/2)
481 goal
= tbl
->max_size
/2;
483 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
484 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
486 write_lock(&svc
->sched_lock
);
487 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[j
], list
) {
488 if (time_before(now
, en
->lastuse
+ENTRY_TIMEOUT
))
491 ip_vs_lblcr_free(en
);
492 atomic_dec(&tbl
->entries
);
495 write_unlock(&svc
->sched_lock
);
502 mod_timer(&tbl
->periodic_timer
, jiffies
+CHECK_EXPIRE_INTERVAL
);
505 static int ip_vs_lblcr_init_svc(struct ip_vs_service
*svc
)
508 struct ip_vs_lblcr_table
*tbl
;
511 * Allocate the ip_vs_lblcr_table for this service
513 tbl
= kmalloc(sizeof(*tbl
), GFP_ATOMIC
);
515 pr_err("%s(): no memory\n", __func__
);
518 svc
->sched_data
= tbl
;
519 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
520 "current service\n", sizeof(*tbl
));
523 * Initialize the hash buckets
525 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
526 INIT_LIST_HEAD(&tbl
->bucket
[i
]);
528 tbl
->max_size
= IP_VS_LBLCR_TAB_SIZE
*16;
533 * Hook periodic timer for garbage collection
535 setup_timer(&tbl
->periodic_timer
, ip_vs_lblcr_check_expire
,
537 mod_timer(&tbl
->periodic_timer
, jiffies
+ CHECK_EXPIRE_INTERVAL
);
543 static int ip_vs_lblcr_done_svc(struct ip_vs_service
*svc
)
545 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
547 /* remove periodic timer */
548 del_timer_sync(&tbl
->periodic_timer
);
550 /* got to clean up table entries here */
551 ip_vs_lblcr_flush(tbl
);
553 /* release the table itself */
555 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
562 static inline struct ip_vs_dest
*
563 __ip_vs_lblcr_schedule(struct ip_vs_service
*svc
)
565 struct ip_vs_dest
*dest
, *least
;
569 * We think the overhead of processing active connections is fifty
570 * times higher than that of inactive connections in average. (This
571 * fifty times might not be accurate, we will change it later.) We
572 * use the following formula to estimate the overhead:
573 * dest->activeconns*50 + dest->inactconns
575 * (dest overhead) / dest->weight
577 * Remember -- no floats in kernel mode!!!
578 * The comparison of h1*w2 > h2*w1 is equivalent to that of
580 * if every weight is larger than zero.
582 * The server with weight=0 is quiesced and will not receive any
585 list_for_each_entry(dest
, &svc
->destinations
, n_list
) {
586 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
589 if (atomic_read(&dest
->weight
) > 0) {
591 loh
= atomic_read(&least
->activeconns
) * 50
592 + atomic_read(&least
->inactconns
);
599 * Find the destination with the least load.
602 list_for_each_entry_continue(dest
, &svc
->destinations
, n_list
) {
603 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
606 doh
= atomic_read(&dest
->activeconns
) * 50
607 + atomic_read(&dest
->inactconns
);
608 if (loh
* atomic_read(&dest
->weight
) >
609 doh
* atomic_read(&least
->weight
)) {
615 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
616 "activeconns %d refcnt %d weight %d overhead %d\n",
617 IP_VS_DBG_ADDR(least
->af
, &least
->addr
),
619 atomic_read(&least
->activeconns
),
620 atomic_read(&least
->refcnt
),
621 atomic_read(&least
->weight
), loh
);
628 * If this destination server is overloaded and there is a less loaded
629 * server, then return true.
632 is_overloaded(struct ip_vs_dest
*dest
, struct ip_vs_service
*svc
)
634 if (atomic_read(&dest
->activeconns
) > atomic_read(&dest
->weight
)) {
635 struct ip_vs_dest
*d
;
637 list_for_each_entry(d
, &svc
->destinations
, n_list
) {
638 if (atomic_read(&d
->activeconns
)*2
639 < atomic_read(&d
->weight
)) {
649 * Locality-Based (weighted) Least-Connection scheduling
651 static struct ip_vs_dest
*
652 ip_vs_lblcr_schedule(struct ip_vs_service
*svc
, const struct sk_buff
*skb
)
654 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
655 struct ip_vs_iphdr iph
;
656 struct ip_vs_dest
*dest
= NULL
;
657 struct ip_vs_lblcr_entry
*en
;
659 ip_vs_fill_iphdr(svc
->af
, skb_network_header(skb
), &iph
);
661 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__
);
663 /* First look in our cache */
664 read_lock(&svc
->sched_lock
);
665 en
= ip_vs_lblcr_get(svc
->af
, tbl
, &iph
.daddr
);
667 /* We only hold a read lock, but this is atomic */
668 en
->lastuse
= jiffies
;
670 /* Get the least loaded destination */
671 read_lock(&en
->set
.lock
);
672 dest
= ip_vs_dest_set_min(&en
->set
);
673 read_unlock(&en
->set
.lock
);
675 /* More than one destination + enough time passed by, cleanup */
676 if (atomic_read(&en
->set
.size
) > 1 &&
677 time_after(jiffies
, en
->set
.lastmod
+
678 sysctl_ip_vs_lblcr_expiration
)) {
679 struct ip_vs_dest
*m
;
681 write_lock(&en
->set
.lock
);
682 m
= ip_vs_dest_set_max(&en
->set
);
684 ip_vs_dest_set_erase(&en
->set
, m
);
685 write_unlock(&en
->set
.lock
);
688 /* If the destination is not overloaded, use it */
689 if (dest
&& !is_overloaded(dest
, svc
)) {
690 read_unlock(&svc
->sched_lock
);
694 /* The cache entry is invalid, time to schedule */
695 dest
= __ip_vs_lblcr_schedule(svc
);
697 IP_VS_ERR_RL("LBLCR: no destination available\n");
698 read_unlock(&svc
->sched_lock
);
702 /* Update our cache entry */
703 write_lock(&en
->set
.lock
);
704 ip_vs_dest_set_insert(&en
->set
, dest
);
705 write_unlock(&en
->set
.lock
);
707 read_unlock(&svc
->sched_lock
);
712 /* No cache entry, time to schedule */
713 dest
= __ip_vs_lblcr_schedule(svc
);
715 IP_VS_DBG(1, "no destination available\n");
719 /* If we fail to create a cache entry, we'll just use the valid dest */
720 write_lock(&svc
->sched_lock
);
721 ip_vs_lblcr_new(tbl
, &iph
.daddr
, dest
);
722 write_unlock(&svc
->sched_lock
);
725 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
726 IP_VS_DBG_ADDR(svc
->af
, &iph
.daddr
),
727 IP_VS_DBG_ADDR(svc
->af
, &dest
->addr
), ntohs(dest
->port
));
734 * IPVS LBLCR Scheduler structure
736 static struct ip_vs_scheduler ip_vs_lblcr_scheduler
=
739 .refcnt
= ATOMIC_INIT(0),
740 .module
= THIS_MODULE
,
741 .n_list
= LIST_HEAD_INIT(ip_vs_lblcr_scheduler
.n_list
),
742 .init_service
= ip_vs_lblcr_init_svc
,
743 .done_service
= ip_vs_lblcr_done_svc
,
744 .schedule
= ip_vs_lblcr_schedule
,
748 static int __init
ip_vs_lblcr_init(void)
752 sysctl_header
= register_sysctl_paths(net_vs_ctl_path
, vs_vars_table
);
753 ret
= register_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
755 unregister_sysctl_table(sysctl_header
);
760 static void __exit
ip_vs_lblcr_cleanup(void)
762 unregister_sysctl_table(sysctl_header
);
763 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
767 module_init(ip_vs_lblcr_init
);
768 module_exit(ip_vs_lblcr_cleanup
);
769 MODULE_LICENSE("GPL");