Merge branch 'iommu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_ecache.c
1 /* Event cache for netfilter. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <linux/stddef.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_conntrack_extend.h>
27
28 static DEFINE_MUTEX(nf_ct_ecache_mutex);
29
30 /* deliver cached events and clear cache entry - must be called with locally
31 * disabled softirqs */
32 void nf_ct_deliver_cached_events(struct nf_conn *ct)
33 {
34 struct net *net = nf_ct_net(ct);
35 unsigned long events;
36 struct nf_ct_event_notifier *notify;
37 struct nf_conntrack_ecache *e;
38
39 rcu_read_lock();
40 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
41 if (notify == NULL)
42 goto out_unlock;
43
44 e = nf_ct_ecache_find(ct);
45 if (e == NULL)
46 goto out_unlock;
47
48 events = xchg(&e->cache, 0);
49
50 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
51 struct nf_ct_event item = {
52 .ct = ct,
53 .pid = 0,
54 .report = 0
55 };
56 int ret;
57 /* We make a copy of the missed event cache without taking
58 * the lock, thus we may send missed events twice. However,
59 * this does not harm and it happens very rarely. */
60 unsigned long missed = e->missed;
61
62 if (!((events | missed) & e->ctmask))
63 goto out_unlock;
64
65 ret = notify->fcn(events | missed, &item);
66 if (unlikely(ret < 0 || missed)) {
67 spin_lock_bh(&ct->lock);
68 if (ret < 0)
69 e->missed |= events;
70 else
71 e->missed &= ~missed;
72 spin_unlock_bh(&ct->lock);
73 }
74 }
75
76 out_unlock:
77 rcu_read_unlock();
78 }
79 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
80
81 int nf_conntrack_register_notifier(struct net *net,
82 struct nf_ct_event_notifier *new)
83 {
84 int ret = 0;
85 struct nf_ct_event_notifier *notify;
86
87 mutex_lock(&nf_ct_ecache_mutex);
88 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
89 lockdep_is_held(&nf_ct_ecache_mutex));
90 if (notify != NULL) {
91 ret = -EBUSY;
92 goto out_unlock;
93 }
94 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
95 mutex_unlock(&nf_ct_ecache_mutex);
96 return ret;
97
98 out_unlock:
99 mutex_unlock(&nf_ct_ecache_mutex);
100 return ret;
101 }
102 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
103
104 void nf_conntrack_unregister_notifier(struct net *net,
105 struct nf_ct_event_notifier *new)
106 {
107 struct nf_ct_event_notifier *notify;
108
109 mutex_lock(&nf_ct_ecache_mutex);
110 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
111 lockdep_is_held(&nf_ct_ecache_mutex));
112 BUG_ON(notify != new);
113 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
114 mutex_unlock(&nf_ct_ecache_mutex);
115 }
116 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
117
118 int nf_ct_expect_register_notifier(struct net *net,
119 struct nf_exp_event_notifier *new)
120 {
121 int ret = 0;
122 struct nf_exp_event_notifier *notify;
123
124 mutex_lock(&nf_ct_ecache_mutex);
125 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
126 lockdep_is_held(&nf_ct_ecache_mutex));
127 if (notify != NULL) {
128 ret = -EBUSY;
129 goto out_unlock;
130 }
131 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
132 mutex_unlock(&nf_ct_ecache_mutex);
133 return ret;
134
135 out_unlock:
136 mutex_unlock(&nf_ct_ecache_mutex);
137 return ret;
138 }
139 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
140
141 void nf_ct_expect_unregister_notifier(struct net *net,
142 struct nf_exp_event_notifier *new)
143 {
144 struct nf_exp_event_notifier *notify;
145
146 mutex_lock(&nf_ct_ecache_mutex);
147 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
148 lockdep_is_held(&nf_ct_ecache_mutex));
149 BUG_ON(notify != new);
150 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
151 mutex_unlock(&nf_ct_ecache_mutex);
152 }
153 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
154
155 #define NF_CT_EVENTS_DEFAULT 1
156 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
157 static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
158
159 #ifdef CONFIG_SYSCTL
160 static struct ctl_table event_sysctl_table[] = {
161 {
162 .procname = "nf_conntrack_events",
163 .data = &init_net.ct.sysctl_events,
164 .maxlen = sizeof(unsigned int),
165 .mode = 0644,
166 .proc_handler = proc_dointvec,
167 },
168 {
169 .procname = "nf_conntrack_events_retry_timeout",
170 .data = &init_net.ct.sysctl_events_retry_timeout,
171 .maxlen = sizeof(unsigned int),
172 .mode = 0644,
173 .proc_handler = proc_dointvec_jiffies,
174 },
175 {}
176 };
177 #endif /* CONFIG_SYSCTL */
178
179 static struct nf_ct_ext_type event_extend __read_mostly = {
180 .len = sizeof(struct nf_conntrack_ecache),
181 .align = __alignof__(struct nf_conntrack_ecache),
182 .id = NF_CT_EXT_ECACHE,
183 };
184
185 #ifdef CONFIG_SYSCTL
186 static int nf_conntrack_event_init_sysctl(struct net *net)
187 {
188 struct ctl_table *table;
189
190 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
191 GFP_KERNEL);
192 if (!table)
193 goto out;
194
195 table[0].data = &net->ct.sysctl_events;
196 table[1].data = &net->ct.sysctl_events_retry_timeout;
197
198 net->ct.event_sysctl_header =
199 register_net_sysctl_table(net,
200 nf_net_netfilter_sysctl_path, table);
201 if (!net->ct.event_sysctl_header) {
202 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
203 goto out_register;
204 }
205 return 0;
206
207 out_register:
208 kfree(table);
209 out:
210 return -ENOMEM;
211 }
212
213 static void nf_conntrack_event_fini_sysctl(struct net *net)
214 {
215 struct ctl_table *table;
216
217 table = net->ct.event_sysctl_header->ctl_table_arg;
218 unregister_net_sysctl_table(net->ct.event_sysctl_header);
219 kfree(table);
220 }
221 #else
222 static int nf_conntrack_event_init_sysctl(struct net *net)
223 {
224 return 0;
225 }
226
227 static void nf_conntrack_event_fini_sysctl(struct net *net)
228 {
229 }
230 #endif /* CONFIG_SYSCTL */
231
232 int nf_conntrack_ecache_init(struct net *net)
233 {
234 int ret;
235
236 net->ct.sysctl_events = nf_ct_events;
237 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
238
239 if (net_eq(net, &init_net)) {
240 ret = nf_ct_extend_register(&event_extend);
241 if (ret < 0) {
242 printk(KERN_ERR "nf_ct_event: Unable to register "
243 "event extension.\n");
244 goto out_extend_register;
245 }
246 }
247
248 ret = nf_conntrack_event_init_sysctl(net);
249 if (ret < 0)
250 goto out_sysctl;
251
252 return 0;
253
254 out_sysctl:
255 if (net_eq(net, &init_net))
256 nf_ct_extend_unregister(&event_extend);
257 out_extend_register:
258 return ret;
259 }
260
261 void nf_conntrack_ecache_fini(struct net *net)
262 {
263 nf_conntrack_event_fini_sysctl(net);
264 if (net_eq(net, &init_net))
265 nf_ct_extend_unregister(&event_extend);
266 }