netfilter: Add fail-open support
authorKrishna Kumar <krkumar2@in.ibm.com>
Thu, 24 May 2012 03:56:44 +0000 (03:56 +0000)
committerPablo Neira Ayuso <pablo@netfilter.org>
Thu, 7 Jun 2012 12:58:39 +0000 (14:58 +0200)
Implement a new "fail-open" mode where packets are not dropped
upon queue-full condition. This mode can be enabled/disabled per
queue using netlink NFQA_CFG_FLAGS & NFQA_CFG_MASK attributes.

Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: Vivek Kashyap <vivk@us.ibm.com>
Signed-off-by: Sridhar Samudrala <samudrala@us.ibm.com>
include/linux/netfilter/nfnetlink_queue.h
net/netfilter/nfnetlink_queue.c

index 24b32e6c009e773f60a445228917740bd25f73b6..a6c1ddac05cc005dc2fff3d00c853193aa9bf3a5 100644 (file)
@@ -84,8 +84,13 @@ enum nfqnl_attr_config {
        NFQA_CFG_CMD,                   /* nfqnl_msg_config_cmd */
        NFQA_CFG_PARAMS,                /* nfqnl_msg_config_params */
        NFQA_CFG_QUEUE_MAXLEN,          /* __u32 */
+       NFQA_CFG_MASK,                  /* identify which flags to change */
+       NFQA_CFG_FLAGS,                 /* value of these flags (__u32) */
        __NFQA_CFG_MAX
 };
 #define NFQA_CFG_MAX (__NFQA_CFG_MAX-1)
 
+/* Flags for NFQA_CFG_FLAGS */
+#define NFQA_CFG_F_FAIL_OPEN                   (1 << 0)
+
 #endif /* _NFNETLINK_QUEUE_H */
index 4162437b83614e380a70d5f4fe2a94e76a4d0a0d..630da3d2c62a5473894c03d52896a27bc35b49ea 100644 (file)
@@ -52,6 +52,7 @@ struct nfqnl_instance {
 
        u_int16_t queue_num;                    /* number of this queue */
        u_int8_t copy_mode;
+       u_int32_t flags;                        /* Set using NFQA_CFG_FLAGS */
 /*
  * Following fields are dirtied for each queued packet,
  * keep them in same cache line if possible.
@@ -406,6 +407,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        struct nfqnl_instance *queue;
        int err = -ENOBUFS;
        __be32 *packet_id_ptr;
+       int failopen = 0;
 
        /* rcu_read_lock()ed by nf_hook_slow() */
        queue = instance_lookup(queuenum);
@@ -431,9 +433,14 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
                goto err_out_free_nskb;
        }
        if (queue->queue_total >= queue->queue_maxlen) {
-               queue->queue_dropped++;
-               net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
-                                    queue->queue_total);
+               if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
+                       failopen = 1;
+                       err = 0;
+               } else {
+                       queue->queue_dropped++;
+                       net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
+                                            queue->queue_total);
+               }
                goto err_out_free_nskb;
        }
        entry->id = ++queue->id_sequence;
@@ -455,6 +462,8 @@ err_out_free_nskb:
        kfree_skb(nskb);
 err_out_unlock:
        spin_unlock_bh(&queue->lock);
+       if (failopen)
+               nf_reinject(entry, NF_ACCEPT);
 err_out:
        return err;
 }
@@ -858,6 +867,31 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                spin_unlock_bh(&queue->lock);
        }
 
+       if (nfqa[NFQA_CFG_FLAGS]) {
+               __u32 flags, mask;
+
+               if (!queue) {
+                       ret = -ENODEV;
+                       goto err_out_unlock;
+               }
+
+               if (!nfqa[NFQA_CFG_MASK]) {
+                       /* A mask is needed to specify which flags are being
+                        * changed.
+                        */
+                       ret = -EINVAL;
+                       goto err_out_unlock;
+               }
+
+               flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
+               mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
+
+               spin_lock_bh(&queue->lock);
+               queue->flags &= ~mask;
+               queue->flags |= flags & mask;
+               spin_unlock_bh(&queue->lock);
+       }
+
 err_out_unlock:
        rcu_read_unlock();
        return ret;