netfilter: nf_ct_ecache: refactor nf_ct_deliver_cached_events
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_ecache.c
1 /* Event cache for netfilter. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <linux/stddef.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_conntrack_extend.h>
27
28 static DEFINE_MUTEX(nf_ct_ecache_mutex);
29
30 /* deliver cached events and clear cache entry - must be called with locally
31 * disabled softirqs */
32 void nf_ct_deliver_cached_events(struct nf_conn *ct)
33 {
34 struct net *net = nf_ct_net(ct);
35 unsigned long events, missed;
36 struct nf_ct_event_notifier *notify;
37 struct nf_conntrack_ecache *e;
38 struct nf_ct_event item;
39 int ret;
40
41 rcu_read_lock();
42 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
43 if (notify == NULL)
44 goto out_unlock;
45
46 e = nf_ct_ecache_find(ct);
47 if (e == NULL)
48 goto out_unlock;
49
50 events = xchg(&e->cache, 0);
51
52 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
53 goto out_unlock;
54
55 /* We make a copy of the missed event cache without taking
56 * the lock, thus we may send missed events twice. However,
57 * this does not harm and it happens very rarely. */
58 missed = e->missed;
59
60 if (!((events | missed) & e->ctmask))
61 goto out_unlock;
62
63 item.ct = ct;
64 item.pid = 0;
65 item.report = 0;
66
67 ret = notify->fcn(events | missed, &item);
68
69 if (likely(ret >= 0 && !missed))
70 goto out_unlock;
71
72 spin_lock_bh(&ct->lock);
73 if (ret < 0)
74 e->missed |= events;
75 else
76 e->missed &= ~missed;
77 spin_unlock_bh(&ct->lock);
78
79 out_unlock:
80 rcu_read_unlock();
81 }
82 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
83
84 int nf_conntrack_register_notifier(struct net *net,
85 struct nf_ct_event_notifier *new)
86 {
87 int ret = 0;
88 struct nf_ct_event_notifier *notify;
89
90 mutex_lock(&nf_ct_ecache_mutex);
91 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
92 lockdep_is_held(&nf_ct_ecache_mutex));
93 if (notify != NULL) {
94 ret = -EBUSY;
95 goto out_unlock;
96 }
97 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
98 mutex_unlock(&nf_ct_ecache_mutex);
99 return ret;
100
101 out_unlock:
102 mutex_unlock(&nf_ct_ecache_mutex);
103 return ret;
104 }
105 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
106
107 void nf_conntrack_unregister_notifier(struct net *net,
108 struct nf_ct_event_notifier *new)
109 {
110 struct nf_ct_event_notifier *notify;
111
112 mutex_lock(&nf_ct_ecache_mutex);
113 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
114 lockdep_is_held(&nf_ct_ecache_mutex));
115 BUG_ON(notify != new);
116 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
117 mutex_unlock(&nf_ct_ecache_mutex);
118 }
119 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
120
121 int nf_ct_expect_register_notifier(struct net *net,
122 struct nf_exp_event_notifier *new)
123 {
124 int ret = 0;
125 struct nf_exp_event_notifier *notify;
126
127 mutex_lock(&nf_ct_ecache_mutex);
128 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
129 lockdep_is_held(&nf_ct_ecache_mutex));
130 if (notify != NULL) {
131 ret = -EBUSY;
132 goto out_unlock;
133 }
134 rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
135 mutex_unlock(&nf_ct_ecache_mutex);
136 return ret;
137
138 out_unlock:
139 mutex_unlock(&nf_ct_ecache_mutex);
140 return ret;
141 }
142 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
143
144 void nf_ct_expect_unregister_notifier(struct net *net,
145 struct nf_exp_event_notifier *new)
146 {
147 struct nf_exp_event_notifier *notify;
148
149 mutex_lock(&nf_ct_ecache_mutex);
150 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
151 lockdep_is_held(&nf_ct_ecache_mutex));
152 BUG_ON(notify != new);
153 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
154 mutex_unlock(&nf_ct_ecache_mutex);
155 }
156 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
157
158 #define NF_CT_EVENTS_DEFAULT 1
159 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
160 static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
161
162 #ifdef CONFIG_SYSCTL
163 static struct ctl_table event_sysctl_table[] = {
164 {
165 .procname = "nf_conntrack_events",
166 .data = &init_net.ct.sysctl_events,
167 .maxlen = sizeof(unsigned int),
168 .mode = 0644,
169 .proc_handler = proc_dointvec,
170 },
171 {
172 .procname = "nf_conntrack_events_retry_timeout",
173 .data = &init_net.ct.sysctl_events_retry_timeout,
174 .maxlen = sizeof(unsigned int),
175 .mode = 0644,
176 .proc_handler = proc_dointvec_jiffies,
177 },
178 {}
179 };
180 #endif /* CONFIG_SYSCTL */
181
182 static struct nf_ct_ext_type event_extend __read_mostly = {
183 .len = sizeof(struct nf_conntrack_ecache),
184 .align = __alignof__(struct nf_conntrack_ecache),
185 .id = NF_CT_EXT_ECACHE,
186 };
187
188 #ifdef CONFIG_SYSCTL
189 static int nf_conntrack_event_init_sysctl(struct net *net)
190 {
191 struct ctl_table *table;
192
193 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
194 GFP_KERNEL);
195 if (!table)
196 goto out;
197
198 table[0].data = &net->ct.sysctl_events;
199 table[1].data = &net->ct.sysctl_events_retry_timeout;
200
201 net->ct.event_sysctl_header =
202 register_net_sysctl_table(net,
203 nf_net_netfilter_sysctl_path, table);
204 if (!net->ct.event_sysctl_header) {
205 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
206 goto out_register;
207 }
208 return 0;
209
210 out_register:
211 kfree(table);
212 out:
213 return -ENOMEM;
214 }
215
216 static void nf_conntrack_event_fini_sysctl(struct net *net)
217 {
218 struct ctl_table *table;
219
220 table = net->ct.event_sysctl_header->ctl_table_arg;
221 unregister_net_sysctl_table(net->ct.event_sysctl_header);
222 kfree(table);
223 }
224 #else
225 static int nf_conntrack_event_init_sysctl(struct net *net)
226 {
227 return 0;
228 }
229
230 static void nf_conntrack_event_fini_sysctl(struct net *net)
231 {
232 }
233 #endif /* CONFIG_SYSCTL */
234
235 int nf_conntrack_ecache_init(struct net *net)
236 {
237 int ret;
238
239 net->ct.sysctl_events = nf_ct_events;
240 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
241
242 if (net_eq(net, &init_net)) {
243 ret = nf_ct_extend_register(&event_extend);
244 if (ret < 0) {
245 printk(KERN_ERR "nf_ct_event: Unable to register "
246 "event extension.\n");
247 goto out_extend_register;
248 }
249 }
250
251 ret = nf_conntrack_event_init_sysctl(net);
252 if (ret < 0)
253 goto out_sysctl;
254
255 return 0;
256
257 out_sysctl:
258 if (net_eq(net, &init_net))
259 nf_ct_extend_unregister(&event_extend);
260 out_extend_register:
261 return ret;
262 }
263
264 void nf_conntrack_ecache_fini(struct net *net)
265 {
266 nf_conntrack_event_fini_sysctl(net);
267 if (net_eq(net, &init_net))
268 nf_ct_extend_unregister(&event_extend);
269 }