netfilter: ct_extend: fix the wrong alloc_size
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_extend.c
1 /* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/skbuff.h>
17 #include <net/netfilter/nf_conntrack_extend.h>
18
19 static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM];
20 static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21
22 void __nf_ct_ext_destroy(struct nf_conn *ct)
23 {
24 unsigned int i;
25 struct nf_ct_ext_type *t;
26 struct nf_ct_ext *ext = ct->ext;
27
28 for (i = 0; i < NF_CT_EXT_NUM; i++) {
29 if (!__nf_ct_ext_exist(ext, i))
30 continue;
31
32 rcu_read_lock();
33 t = rcu_dereference(nf_ct_ext_types[i]);
34
35 /* Here the nf_ct_ext_type might have been unregisterd.
36 * I.e., it has responsible to cleanup private
37 * area in all conntracks when it is unregisterd.
38 */
39 if (t && t->destroy)
40 t->destroy(ct);
41 rcu_read_unlock();
42 }
43 }
44 EXPORT_SYMBOL(__nf_ct_ext_destroy);
45
46 static void *
47 nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
48 {
49 unsigned int off, len;
50 struct nf_ct_ext_type *t;
51 size_t alloc_size;
52
53 rcu_read_lock();
54 t = rcu_dereference(nf_ct_ext_types[id]);
55 BUG_ON(t == NULL);
56 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
57 len = off + t->len;
58 alloc_size = t->alloc_size;
59 rcu_read_unlock();
60
61 *ext = kzalloc(alloc_size, gfp);
62 if (!*ext)
63 return NULL;
64
65 (*ext)->offset[id] = off;
66 (*ext)->len = len;
67
68 return (void *)(*ext) + off;
69 }
70
71 static void __nf_ct_ext_free_rcu(struct rcu_head *head)
72 {
73 struct nf_ct_ext *ext = container_of(head, struct nf_ct_ext, rcu);
74 kfree(ext);
75 }
76
77 void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
78 {
79 struct nf_ct_ext *old, *new;
80 int i, newlen, newoff;
81 struct nf_ct_ext_type *t;
82
83 /* Conntrack must not be confirmed to avoid races on reallocation. */
84 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
85
86 old = ct->ext;
87 if (!old)
88 return nf_ct_ext_create(&ct->ext, id, gfp);
89
90 if (__nf_ct_ext_exist(old, id))
91 return NULL;
92
93 rcu_read_lock();
94 t = rcu_dereference(nf_ct_ext_types[id]);
95 BUG_ON(t == NULL);
96
97 newoff = ALIGN(old->len, t->align);
98 newlen = newoff + t->len;
99 rcu_read_unlock();
100
101 new = __krealloc(old, newlen, gfp);
102 if (!new)
103 return NULL;
104
105 if (new != old) {
106 for (i = 0; i < NF_CT_EXT_NUM; i++) {
107 if (!__nf_ct_ext_exist(old, i))
108 continue;
109
110 rcu_read_lock();
111 t = rcu_dereference(nf_ct_ext_types[i]);
112 if (t && t->move)
113 t->move((void *)new + new->offset[i],
114 (void *)old + old->offset[i]);
115 rcu_read_unlock();
116 }
117 call_rcu(&old->rcu, __nf_ct_ext_free_rcu);
118 ct->ext = new;
119 }
120
121 new->offset[id] = newoff;
122 new->len = newlen;
123 memset((void *)new + newoff, 0, newlen - newoff);
124 return (void *)new + newoff;
125 }
126 EXPORT_SYMBOL(__nf_ct_ext_add);
127
128 static void update_alloc_size(struct nf_ct_ext_type *type)
129 {
130 int i, j;
131 struct nf_ct_ext_type *t1, *t2;
132 enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
133
134 /* unnecessary to update all types */
135 if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
136 min = type->id;
137 max = type->id;
138 }
139
140 /* This assumes that extended areas in conntrack for the types
141 whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
142 for (i = min; i <= max; i++) {
143 t1 = nf_ct_ext_types[i];
144 if (!t1)
145 continue;
146
147 t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
148 t1->len;
149 for (j = 0; j < NF_CT_EXT_NUM; j++) {
150 t2 = nf_ct_ext_types[j];
151 if (t2 == NULL || t2 == t1 ||
152 (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
153 continue;
154
155 t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
156 + t2->len;
157 }
158 }
159 }
160
161 /* This MUST be called in process context. */
162 int nf_ct_extend_register(struct nf_ct_ext_type *type)
163 {
164 int ret = 0;
165
166 mutex_lock(&nf_ct_ext_type_mutex);
167 if (nf_ct_ext_types[type->id]) {
168 ret = -EBUSY;
169 goto out;
170 }
171
172 /* This ensures that nf_ct_ext_create() can allocate enough area
173 before updating alloc_size */
174 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
175 + type->len;
176 rcu_assign_pointer(nf_ct_ext_types[type->id], type);
177 update_alloc_size(type);
178 out:
179 mutex_unlock(&nf_ct_ext_type_mutex);
180 return ret;
181 }
182 EXPORT_SYMBOL_GPL(nf_ct_extend_register);
183
184 /* This MUST be called in process context. */
185 void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
186 {
187 mutex_lock(&nf_ct_ext_type_mutex);
188 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
189 update_alloc_size(type);
190 mutex_unlock(&nf_ct_ext_type_mutex);
191 rcu_barrier(); /* Wait for completion of call_rcu()'s */
192 }
193 EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);