#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/jiffies.h>
+#include <linux/list.h>
/* for sysctl */
#include <linux/fs.h>
/*
* IPVS destination set structure and operations
*/
-struct ip_vs_dest_list {
- struct ip_vs_dest_list *next; /* list link */
+struct ip_vs_dest_set_elem {
+ struct list_head list; /* list link */
struct ip_vs_dest *dest; /* destination server */
};
struct ip_vs_dest_set {
atomic_t size; /* set size */
unsigned long lastmod; /* last modified time */
- struct ip_vs_dest_list *list; /* destination list */
+ struct list_head list; /* destination list */
rwlock_t lock; /* lock for this list */
};
-static struct ip_vs_dest_list *
+static struct ip_vs_dest_set_elem *
ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
{
- struct ip_vs_dest_list *e;
+ struct ip_vs_dest_set_elem *e;
- for (e=set->list; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
if (e->dest == dest)
/* already existed */
return NULL;
atomic_inc(&dest->refcnt);
e->dest = dest;
- /* link it to the list */
- e->next = set->list;
- set->list = e;
+ list_add(&e->list, &set->list);
atomic_inc(&set->size);
set->lastmod = jiffies;
static void
ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
{
- struct ip_vs_dest_list *e, **ep;
+ struct ip_vs_dest_set_elem *e;
- for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
+ list_for_each_entry(e, &set->list, list) {
if (e->dest == dest) {
/* HIT */
- *ep = e->next;
atomic_dec(&set->size);
set->lastmod = jiffies;
atomic_dec(&e->dest->refcnt);
+ list_del(&e->list);
kfree(e);
break;
}
- ep = &e->next;
}
}
static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
{
- struct ip_vs_dest_list *e, **ep;
+ struct ip_vs_dest_set_elem *e, *ep;
write_lock(&set->lock);
- for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
- *ep = e->next;
+ list_for_each_entry_safe(e, ep, &set->list, list) {
/*
* We don't kfree dest because it is refered either
* by its service or by the trash dest list.
*/
atomic_dec(&e->dest->refcnt);
+ list_del(&e->list);
kfree(e);
}
write_unlock(&set->lock);
/* get weighted least-connection node in the destination set */
static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
{
- register struct ip_vs_dest_list *e;
+ register struct ip_vs_dest_set_elem *e;
struct ip_vs_dest *dest, *least;
int loh, doh;
return NULL;
/* select the first destination server, whose weight > 0 */
- for (e=set->list; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
least = e->dest;
if (least->flags & IP_VS_DEST_F_OVERLOAD)
continue;
/* find the destination with the weighted least load */
nextstage:
- for (e=e->next; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
dest = e->dest;
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
/* get weighted most-connection node in the destination set */
static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
{
- register struct ip_vs_dest_list *e;
+ register struct ip_vs_dest_set_elem *e;
struct ip_vs_dest *dest, *most;
int moh, doh;
return NULL;
/* select the first destination server, whose weight > 0 */
- for (e=set->list; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
most = e->dest;
if (atomic_read(&most->weight) > 0) {
moh = atomic_read(&most->activeconns) * 50
/* find the destination with the weighted most load */
nextstage:
- for (e=e->next; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
dest = e->dest;
doh = atomic_read(&dest->activeconns) * 50
+ atomic_read(&dest->inactconns);
/* initilize its dest set */
atomic_set(&(en->set.size), 0);
- en->set.list = NULL;
+ INIT_LIST_HEAD(&en->set.list);
rwlock_init(&en->set.lock);
ip_vs_lblcr_hash(tbl, en);