[NETFILTER]: nf_conntrack: introduce extension infrastructure
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_extend.c
CommitLineData
ecfab2c9
YK
1/* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/rcupdate.h>
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <net/netfilter/nf_conntrack_extend.h>
18
19static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
20static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21
22/* Horrible trick to figure out smallest amount worth kmallocing. */
23#define CACHE(x) (x) + 0 *
24enum {
25 NF_CT_EXT_MIN_SIZE =
26#include <linux/kmalloc_sizes.h>
27 1 };
28#undef CACHE
29
30void __nf_ct_ext_destroy(struct nf_conn *ct)
31{
32 unsigned int i;
33 struct nf_ct_ext_type *t;
34
35 for (i = 0; i < NF_CT_EXT_NUM; i++) {
36 if (!nf_ct_ext_exist(ct, i))
37 continue;
38
39 rcu_read_lock();
40 t = rcu_dereference(nf_ct_ext_types[i]);
41
42 /* Here the nf_ct_ext_type might have been unregisterd.
43 * I.e., it has responsible to cleanup private
44 * area in all conntracks when it is unregisterd.
45 */
46 if (t && t->destroy)
47 t->destroy(ct);
48 rcu_read_unlock();
49 }
50}
51EXPORT_SYMBOL(__nf_ct_ext_destroy);
52
53static void *
54nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
55{
56 unsigned int off, len, real_len;
57 struct nf_ct_ext_type *t;
58
59 rcu_read_lock();
60 t = rcu_dereference(nf_ct_ext_types[id]);
61 BUG_ON(t == NULL);
62 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
63 len = off + t->len;
64 real_len = t->alloc_size;
65 rcu_read_unlock();
66
67 *ext = kzalloc(real_len, gfp);
68 if (!*ext)
69 return NULL;
70
71 (*ext)->offset[id] = off;
72 (*ext)->len = len;
73 (*ext)->real_len = real_len;
74
75 return (void *)(*ext) + off;
76}
77
78void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
79{
80 struct nf_ct_ext *new;
81 int i, newlen, newoff;
82 struct nf_ct_ext_type *t;
83
84 if (!ct->ext)
85 return nf_ct_ext_create(&ct->ext, id, gfp);
86
87 if (nf_ct_ext_exist(ct, id))
88 return NULL;
89
90 rcu_read_lock();
91 t = rcu_dereference(nf_ct_ext_types[id]);
92 BUG_ON(t == NULL);
93
94 newoff = ALIGN(ct->ext->len, t->align);
95 newlen = newoff + t->len;
96 rcu_read_unlock();
97
98 if (newlen >= ct->ext->real_len) {
99 new = kmalloc(newlen, gfp);
100 if (!new)
101 return NULL;
102
103 memcpy(new, ct->ext, ct->ext->len);
104
105 for (i = 0; i < NF_CT_EXT_NUM; i++) {
106 if (!nf_ct_ext_exist(ct, i))
107 continue;
108
109 rcu_read_lock();
110 t = rcu_dereference(nf_ct_ext_types[i]);
111 if (t && t->move)
112 t->move(ct, ct->ext + ct->ext->offset[id]);
113 rcu_read_unlock();
114 }
115 kfree(ct->ext);
116 new->real_len = newlen;
117 ct->ext = new;
118 }
119
120 ct->ext->offset[id] = newoff;
121 ct->ext->len = newlen;
122 memset((void *)ct->ext + newoff, 0, newlen - newoff);
123 return (void *)ct->ext + newoff;
124}
125EXPORT_SYMBOL(__nf_ct_ext_add);
126
127static void update_alloc_size(struct nf_ct_ext_type *type)
128{
129 int i, j;
130 struct nf_ct_ext_type *t1, *t2;
131 enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
132
133 /* unnecessary to update all types */
134 if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
135 min = type->id;
136 max = type->id;
137 }
138
139 /* This assumes that extended areas in conntrack for the types
140 whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
141 for (i = min; i <= max; i++) {
142 t1 = nf_ct_ext_types[i];
143 if (!t1)
144 continue;
145
146 t1->alloc_size = sizeof(struct nf_ct_ext)
147 + ALIGN(sizeof(struct nf_ct_ext), t1->align)
148 + t1->len;
149 for (j = 0; j < NF_CT_EXT_NUM; j++) {
150 t2 = nf_ct_ext_types[j];
151 if (t2 == NULL || t2 == t1 ||
152 (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
153 continue;
154
155 t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
156 + t2->len;
157 }
158 if (t1->alloc_size < NF_CT_EXT_MIN_SIZE)
159 t1->alloc_size = NF_CT_EXT_MIN_SIZE;
160 }
161}
162
163/* This MUST be called in process context. */
164int nf_ct_extend_register(struct nf_ct_ext_type *type)
165{
166 int ret = 0;
167
168 mutex_lock(&nf_ct_ext_type_mutex);
169 if (nf_ct_ext_types[type->id]) {
170 ret = -EBUSY;
171 goto out;
172 }
173
174 /* This ensures that nf_ct_ext_create() can allocate enough area
175 before updating alloc_size */
176 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
177 + type->len;
178 rcu_assign_pointer(nf_ct_ext_types[type->id], type);
179 update_alloc_size(type);
180out:
181 mutex_unlock(&nf_ct_ext_type_mutex);
182 return ret;
183}
184EXPORT_SYMBOL_GPL(nf_ct_extend_register);
185
186/* This MUST be called in process context. */
187void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
188{
189 mutex_lock(&nf_ct_ext_type_mutex);
190 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
191 update_alloc_size(type);
192 mutex_unlock(&nf_ct_ext_type_mutex);
193 synchronize_rcu();
194}
195EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);