* The kadt functions don't use the comment extensions in any way.
*/
static inline void
-ip_set_init_comment(struct ip_set_comment *comment,
+ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
const struct ip_set_ext *ext)
{
struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
size_t len = ext->comment ? strlen(ext->comment) : 0;
if (unlikely(c)) {
+ set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
kfree_rcu(c, rcu);
rcu_assign_pointer(comment->c, NULL);
}
if (unlikely(!c))
return;
strlcpy(c->str, ext->comment, len + 1);
+ set->ext_size += sizeof(*c) + strlen(c->str) + 1;
rcu_assign_pointer(comment->c, c);
}
* of the set data anymore.
*/
static inline void
-ip_set_comment_free(struct ip_set_comment *comment)
+ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment)
{
struct ip_set_comment_rcu *c;
c = rcu_dereference_protected(comment->c, 1);
if (unlikely(!c))
return;
+ set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
kfree_rcu(c, rcu);
rcu_assign_pointer(comment->c, NULL);
}
mtype_ext_cleanup(set);
memset(map->members, 0, map->memsize);
set->elements = 0;
+ set->ext_size = 0;
}
/* Calculate the actual memory size of the set data */
{
const struct mtype *map = set->data;
struct nlattr *nested;
- size_t memsize = mtype_memsize(map, set->dsize);
+ size_t memsize = mtype_memsize(map, set->dsize) + set->ext_size;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
if (SET_WITH_COUNTER(set))
ip_set_init_counter(ext_counter(x, set), ext);
if (SET_WITH_COMMENT(set))
- ip_set_init_comment(ext_comment(x, set), ext);
+ ip_set_init_comment(set, ext_comment(x, set), ext);
if (SET_WITH_SKBINFO(set))
ip_set_init_skbinfo(ext_skbinfo(x, set), ext);
/* Calculate the actual memory size of the set data */
static size_t
mtype_ahash_memsize(const struct htype *h, const struct htable *t,
- u8 nets_length, size_t dsize)
+ u8 nets_length)
{
- u32 i;
- struct hbucket *n;
size_t memsize = sizeof(*h) + sizeof(*t);
#ifdef IP_SET_HASH_WITH_NETS
memsize += sizeof(struct net_prefixes) * nets_length;
#endif
- for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = rcu_dereference_bh(hbucket(t, i));
- if (!n)
- continue;
- memsize += sizeof(struct hbucket) + n->size * dsize;
- }
return memsize;
}
memset(h->nets, 0, sizeof(struct net_prefixes) * NLEN(set->family));
#endif
set->elements = 0;
+ set->ext_size = 0;
}
/* Destroy the hashtable part of the set */
d++;
}
tmp->pos = d;
+ set->ext_size -= AHASH_INIT_SIZE * dsize;
rcu_assign_pointer(hbucket(t, i), tmp);
kfree_rcu(n, rcu);
}
struct htype *h = set->data;
struct htable *t, *orig;
u8 htable_bits;
- size_t dsize = set->dsize;
+ size_t extsize, dsize = set->dsize;
#ifdef IP_SET_HASH_WITH_NETS
u8 flags;
struct mtype_elem *tmp;
/* There can't be another parallel resizing, but dumping is possible */
atomic_set(&orig->ref, 1);
atomic_inc(&orig->uref);
+ extsize = 0;
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
set->name, orig->htable_bits, htable_bits, orig);
for (i = 0; i < jhash_size(orig->htable_bits); i++) {
goto cleanup;
}
m->size = AHASH_INIT_SIZE;
+ extsize = sizeof(*m) + AHASH_INIT_SIZE * dsize;
RCU_INIT_POINTER(hbucket(t, key), m);
} else if (m->pos >= m->size) {
struct hbucket *ht;
memcpy(ht, m, sizeof(struct hbucket) +
m->size * dsize);
ht->size = m->size + AHASH_INIT_SIZE;
+ extsize += AHASH_INIT_SIZE * dsize;
kfree(m);
m = ht;
RCU_INIT_POINTER(hbucket(t, key), ht);
}
}
rcu_assign_pointer(h->table, t);
+ set->ext_size = extsize;
spin_unlock_bh(&set->lock);
if (!n)
return -ENOMEM;
n->size = AHASH_INIT_SIZE;
+ set->ext_size += sizeof(*n) + AHASH_INIT_SIZE * set->dsize;
goto copy_elem;
}
for (i = 0; i < n->pos; i++) {
memcpy(n, old, sizeof(struct hbucket) +
old->size * set->dsize);
n->size = old->size + AHASH_INIT_SIZE;
+ set->ext_size += AHASH_INIT_SIZE * set->dsize;
}
copy_elem:
if (SET_WITH_COUNTER(set))
ip_set_init_counter(ext_counter(data, set), ext);
if (SET_WITH_COMMENT(set))
- ip_set_init_comment(ext_comment(data, set), ext);
+ ip_set_init_comment(set, ext_comment(data, set), ext);
if (SET_WITH_SKBINFO(set))
ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
/* Must come last for the case when timed out entry is reused */
k++;
}
if (n->pos == 0 && k == 0) {
+ set->ext_size -= sizeof(*n) + n->size * dsize;
rcu_assign_pointer(hbucket(t, key), NULL);
kfree_rcu(n, rcu);
} else if (k >= AHASH_INIT_SIZE) {
k++;
}
tmp->pos = k;
+ set->ext_size -= AHASH_INIT_SIZE * dsize;
rcu_assign_pointer(hbucket(t, key), tmp);
kfree_rcu(n, rcu);
}
rcu_read_lock_bh();
t = rcu_dereference_bh_nfnl(h->table);
- memsize = mtype_ahash_memsize(h, t, NLEN(set->family), set->dsize);
+ memsize = mtype_ahash_memsize(h, t, NLEN(set->family)) + set->ext_size;
htable_bits = t->htable_bits;
rcu_read_unlock_bh();
if (SET_WITH_COUNTER(set))
ip_set_init_counter(ext_counter(e, set), ext);
if (SET_WITH_COMMENT(set))
- ip_set_init_comment(ext_comment(e, set), ext);
+ ip_set_init_comment(set, ext_comment(e, set), ext);
if (SET_WITH_SKBINFO(set))
ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
/* Update timeout last */
list_for_each_entry_safe(e, n, &map->members, list)
list_set_del(set, e);
set->elements = 0;
+ set->ext_size = 0;
}
static void
{
const struct list_set *map = set->data;
struct nlattr *nested;
- size_t memsize = list_set_memsize(map, set->dsize);
+ size_t memsize = list_set_memsize(map, set->dsize) + set->ext_size;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)