static struct hlist_node *ct_get_first(struct seq_file *seq)
{
struct ct_iter_state *st = seq->private;
+ struct hlist_node *n;
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
- if (!hlist_empty(&nf_conntrack_hash[st->bucket]))
- return nf_conntrack_hash[st->bucket].first;
+ n = rcu_dereference(nf_conntrack_hash[st->bucket].first);
+ if (n)
+ return n;
}
return NULL;
}
{
struct ct_iter_state *st = seq->private;
- head = head->next;
+ head = rcu_dereference(head->next);
while (head == NULL) {
if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
- head = nf_conntrack_hash[st->bucket].first;
+ head = rcu_dereference(nf_conntrack_hash[st->bucket].first);
}
return head;
}
}
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU)
{
- read_lock_bh(&nf_conntrack_lock);
+ rcu_read_lock();
return ct_get_idx(seq, *pos);
}
}
static void ct_seq_stop(struct seq_file *s, void *v)
+ __releases(RCU)
{
- read_unlock_bh(&nf_conntrack_lock);
+ rcu_read_unlock();
}
static int ct_seq_show(struct seq_file *s, void *v)
}
static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU)
{
rcu_read_lock();
return ct_expect_get_idx(seq, *pos);
}
static void exp_seq_stop(struct seq_file *seq, void *v)
+ __releases(RCU)
{
rcu_read_unlock();
}
clean_from_lists(struct nf_conn *ct)
{
pr_debug("clean_from_lists(%p)\n", ct);
- hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
- hlist_del(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
+ hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
+ hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
/* Destroy all pending expectations */
nf_ct_remove_expectations(ct);
struct hlist_node *n;
unsigned int hash = hash_conntrack(tuple);
- hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
+ hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found);
nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct;
- read_lock_bh(&nf_conntrack_lock);
+ rcu_read_lock();
h = __nf_conntrack_find(tuple, NULL);
- if (h)
- atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
- read_unlock_bh(&nf_conntrack_lock);
+ if (h) {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ h = NULL;
+ }
+ rcu_read_unlock();
return h;
}
unsigned int hash,
unsigned int repl_hash)
{
- hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
- &nf_conntrack_hash[hash]);
- hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
- &nf_conntrack_hash[repl_hash]);
+ hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
+ &nf_conntrack_hash[hash]);
+ hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
+ &nf_conntrack_hash[repl_hash]);
}
void nf_conntrack_hash_insert(struct nf_conn *ct)
{
struct nf_conntrack_tuple_hash *h;
- read_lock_bh(&nf_conntrack_lock);
+ rcu_read_lock();
h = __nf_conntrack_find(tuple, ignored_conntrack);
- read_unlock_bh(&nf_conntrack_lock);
+ rcu_read_unlock();
return h != NULL;
}
unsigned int i, cnt = 0;
int dropped = 0;
- read_lock_bh(&nf_conntrack_lock);
+ rcu_read_lock();
for (i = 0; i < nf_conntrack_htable_size; i++) {
- hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
+ hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
+ hnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
ct = tmp;
cnt++;
}
+
+ if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ ct = NULL;
if (ct || cnt >= NF_CT_EVICTION_RANGE)
break;
hash = (hash + 1) % nf_conntrack_htable_size;
}
- if (ct)
- atomic_inc(&ct->ct_general.use);
- read_unlock_bh(&nf_conntrack_lock);
+ rcu_read_unlock();
if (!ct)
return dropped;
/* Don't set timer yet: wait for confirmation */
setup_timer(&conntrack->timeout, death_by_timeout,
(unsigned long)conntrack);
+ INIT_RCU_HEAD(&conntrack->rcu);
return conntrack;
}
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
-void nf_conntrack_free(struct nf_conn *conntrack)
+static void nf_conntrack_free_rcu(struct rcu_head *head)
{
- nf_ct_ext_free(conntrack);
- kmem_cache_free(nf_conntrack_cachep, conntrack);
+ struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
+
+ nf_ct_ext_free(ct);
+ kmem_cache_free(nf_conntrack_cachep, ct);
atomic_dec(&nf_conntrack_count);
}
+
+void nf_conntrack_free(struct nf_conn *conntrack)
+{
+ call_rcu(&conntrack->rcu, nf_conntrack_free_rcu);
+}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
/* Allocate a new conntrack: we return -ENOMEM if classification
* use a newrandom seed */
get_random_bytes(&rnd, 4);
+ /* Lookups in the old hash might happen in parallel, which means we
+ * might get false negatives during connection lookup. New connections
+ * created because of a false negative won't make it into the hash
+ * though since that required taking the lock.
+ */
write_lock_bh(&nf_conntrack_lock);
for (i = 0; i < nf_conntrack_htable_size; i++) {
while (!hlist_empty(&nf_conntrack_hash[i])) {
h = hlist_entry(nf_conntrack_hash[i].first,
struct nf_conntrack_tuple_hash, hnode);
- hlist_del(&h->hnode);
+ hlist_del_rcu(&h->hnode);
bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
hlist_add_head(&h->hnode, &hash[bucket]);
}
static struct hlist_node *ct_get_first(struct seq_file *seq)
{
struct ct_iter_state *st = seq->private;
+ struct hlist_node *n;
for (st->bucket = 0;
st->bucket < nf_conntrack_htable_size;
st->bucket++) {
- if (!hlist_empty(&nf_conntrack_hash[st->bucket]))
- return nf_conntrack_hash[st->bucket].first;
+ n = rcu_dereference(nf_conntrack_hash[st->bucket].first);
+ if (n)
+ return n;
}
return NULL;
}
{
struct ct_iter_state *st = seq->private;
- head = head->next;
+ head = rcu_dereference(head->next);
while (head == NULL) {
if (++st->bucket >= nf_conntrack_htable_size)
return NULL;
- head = nf_conntrack_hash[st->bucket].first;
+ head = rcu_dereference(nf_conntrack_hash[st->bucket].first);
}
return head;
}
}
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(nf_conntrack_lock)
+ __acquires(RCU)
{
- read_lock_bh(&nf_conntrack_lock);
+ rcu_read_lock();
return ct_get_idx(seq, *pos);
}
}
static void ct_seq_stop(struct seq_file *s, void *v)
- __releases(nf_conntrack_lock)
+ __releases(RCU)
{
- read_unlock_bh(&nf_conntrack_lock);
+ rcu_read_unlock();
}
/* return 0 on success, 1 in case of error */