Merge branch 'fix/misc' into for-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_ecache.c
1 /* Event cache for netfilter. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <linux/stddef.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/netdevice.h>
21 #include <linux/slab.h>
22
23 #include <net/netfilter/nf_conntrack.h>
24 #include <net/netfilter/nf_conntrack_core.h>
25 #include <net/netfilter/nf_conntrack_extend.h>
26
27 static DEFINE_MUTEX(nf_ct_ecache_mutex);
28
29 struct nf_ct_event_notifier *nf_conntrack_event_cb __read_mostly;
30 EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
31
32 struct nf_exp_event_notifier *nf_expect_event_cb __read_mostly;
33 EXPORT_SYMBOL_GPL(nf_expect_event_cb);
34
35 /* deliver cached events and clear cache entry - must be called with locally
36 * disabled softirqs */
37 void nf_ct_deliver_cached_events(struct nf_conn *ct)
38 {
39 unsigned long events;
40 struct nf_ct_event_notifier *notify;
41 struct nf_conntrack_ecache *e;
42
43 rcu_read_lock();
44 notify = rcu_dereference(nf_conntrack_event_cb);
45 if (notify == NULL)
46 goto out_unlock;
47
48 e = nf_ct_ecache_find(ct);
49 if (e == NULL)
50 goto out_unlock;
51
52 events = xchg(&e->cache, 0);
53
54 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
55 struct nf_ct_event item = {
56 .ct = ct,
57 .pid = 0,
58 .report = 0
59 };
60 int ret;
61 /* We make a copy of the missed event cache without taking
62 * the lock, thus we may send missed events twice. However,
63 * this does not harm and it happens very rarely. */
64 unsigned long missed = e->missed;
65
66 ret = notify->fcn(events | missed, &item);
67 if (unlikely(ret < 0 || missed)) {
68 spin_lock_bh(&ct->lock);
69 if (ret < 0)
70 e->missed |= events;
71 else
72 e->missed &= ~missed;
73 spin_unlock_bh(&ct->lock);
74 }
75 }
76
77 out_unlock:
78 rcu_read_unlock();
79 }
80 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
81
82 int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
83 {
84 int ret = 0;
85 struct nf_ct_event_notifier *notify;
86
87 mutex_lock(&nf_ct_ecache_mutex);
88 notify = rcu_dereference_protected(nf_conntrack_event_cb,
89 lockdep_is_held(&nf_ct_ecache_mutex));
90 if (notify != NULL) {
91 ret = -EBUSY;
92 goto out_unlock;
93 }
94 rcu_assign_pointer(nf_conntrack_event_cb, new);
95 mutex_unlock(&nf_ct_ecache_mutex);
96 return ret;
97
98 out_unlock:
99 mutex_unlock(&nf_ct_ecache_mutex);
100 return ret;
101 }
102 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
103
104 void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
105 {
106 struct nf_ct_event_notifier *notify;
107
108 mutex_lock(&nf_ct_ecache_mutex);
109 notify = rcu_dereference_protected(nf_conntrack_event_cb,
110 lockdep_is_held(&nf_ct_ecache_mutex));
111 BUG_ON(notify != new);
112 rcu_assign_pointer(nf_conntrack_event_cb, NULL);
113 mutex_unlock(&nf_ct_ecache_mutex);
114 }
115 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
116
117 int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
118 {
119 int ret = 0;
120 struct nf_exp_event_notifier *notify;
121
122 mutex_lock(&nf_ct_ecache_mutex);
123 notify = rcu_dereference_protected(nf_expect_event_cb,
124 lockdep_is_held(&nf_ct_ecache_mutex));
125 if (notify != NULL) {
126 ret = -EBUSY;
127 goto out_unlock;
128 }
129 rcu_assign_pointer(nf_expect_event_cb, new);
130 mutex_unlock(&nf_ct_ecache_mutex);
131 return ret;
132
133 out_unlock:
134 mutex_unlock(&nf_ct_ecache_mutex);
135 return ret;
136 }
137 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
138
139 void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
140 {
141 struct nf_exp_event_notifier *notify;
142
143 mutex_lock(&nf_ct_ecache_mutex);
144 notify = rcu_dereference_protected(nf_expect_event_cb,
145 lockdep_is_held(&nf_ct_ecache_mutex));
146 BUG_ON(notify != new);
147 rcu_assign_pointer(nf_expect_event_cb, NULL);
148 mutex_unlock(&nf_ct_ecache_mutex);
149 }
150 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
151
152 #define NF_CT_EVENTS_DEFAULT 1
153 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
154 static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
155
156 #ifdef CONFIG_SYSCTL
157 static struct ctl_table event_sysctl_table[] = {
158 {
159 .procname = "nf_conntrack_events",
160 .data = &init_net.ct.sysctl_events,
161 .maxlen = sizeof(unsigned int),
162 .mode = 0644,
163 .proc_handler = proc_dointvec,
164 },
165 {
166 .procname = "nf_conntrack_events_retry_timeout",
167 .data = &init_net.ct.sysctl_events_retry_timeout,
168 .maxlen = sizeof(unsigned int),
169 .mode = 0644,
170 .proc_handler = proc_dointvec_jiffies,
171 },
172 {}
173 };
174 #endif /* CONFIG_SYSCTL */
175
176 static struct nf_ct_ext_type event_extend __read_mostly = {
177 .len = sizeof(struct nf_conntrack_ecache),
178 .align = __alignof__(struct nf_conntrack_ecache),
179 .id = NF_CT_EXT_ECACHE,
180 };
181
182 #ifdef CONFIG_SYSCTL
183 static int nf_conntrack_event_init_sysctl(struct net *net)
184 {
185 struct ctl_table *table;
186
187 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
188 GFP_KERNEL);
189 if (!table)
190 goto out;
191
192 table[0].data = &net->ct.sysctl_events;
193 table[1].data = &net->ct.sysctl_events_retry_timeout;
194
195 net->ct.event_sysctl_header =
196 register_net_sysctl_table(net,
197 nf_net_netfilter_sysctl_path, table);
198 if (!net->ct.event_sysctl_header) {
199 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
200 goto out_register;
201 }
202 return 0;
203
204 out_register:
205 kfree(table);
206 out:
207 return -ENOMEM;
208 }
209
210 static void nf_conntrack_event_fini_sysctl(struct net *net)
211 {
212 struct ctl_table *table;
213
214 table = net->ct.event_sysctl_header->ctl_table_arg;
215 unregister_net_sysctl_table(net->ct.event_sysctl_header);
216 kfree(table);
217 }
218 #else
219 static int nf_conntrack_event_init_sysctl(struct net *net)
220 {
221 return 0;
222 }
223
224 static void nf_conntrack_event_fini_sysctl(struct net *net)
225 {
226 }
227 #endif /* CONFIG_SYSCTL */
228
229 int nf_conntrack_ecache_init(struct net *net)
230 {
231 int ret;
232
233 net->ct.sysctl_events = nf_ct_events;
234 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
235
236 if (net_eq(net, &init_net)) {
237 ret = nf_ct_extend_register(&event_extend);
238 if (ret < 0) {
239 printk(KERN_ERR "nf_ct_event: Unable to register "
240 "event extension.\n");
241 goto out_extend_register;
242 }
243 }
244
245 ret = nf_conntrack_event_init_sysctl(net);
246 if (ret < 0)
247 goto out_sysctl;
248
249 return 0;
250
251 out_sysctl:
252 if (net_eq(net, &init_net))
253 nf_ct_extend_unregister(&event_extend);
254 out_extend_register:
255 return ret;
256 }
257
258 void nf_conntrack_ecache_fini(struct net *net)
259 {
260 nf_conntrack_event_fini_sysctl(net);
261 if (net_eq(net, &init_net))
262 nf_ct_extend_unregister(&event_extend);
263 }