[NETFILTER]: nf_conntrack_h323: logical-bitwise & confusion in process_setup()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_extend.c
1 /* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/skbuff.h>
17 #include <net/netfilter/nf_conntrack_extend.h>
18
19 static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
20 static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21
22 void __nf_ct_ext_destroy(struct nf_conn *ct)
23 {
24 unsigned int i;
25 struct nf_ct_ext_type *t;
26
27 for (i = 0; i < NF_CT_EXT_NUM; i++) {
28 if (!nf_ct_ext_exist(ct, i))
29 continue;
30
31 rcu_read_lock();
32 t = rcu_dereference(nf_ct_ext_types[i]);
33
34 /* Here the nf_ct_ext_type might have been unregisterd.
35 * I.e., it has responsible to cleanup private
36 * area in all conntracks when it is unregisterd.
37 */
38 if (t && t->destroy)
39 t->destroy(ct);
40 rcu_read_unlock();
41 }
42 }
43 EXPORT_SYMBOL(__nf_ct_ext_destroy);
44
45 static void *
46 nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
47 {
48 unsigned int off, len;
49 struct nf_ct_ext_type *t;
50
51 rcu_read_lock();
52 t = rcu_dereference(nf_ct_ext_types[id]);
53 BUG_ON(t == NULL);
54 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
55 len = off + t->len;
56 rcu_read_unlock();
57
58 *ext = kzalloc(t->alloc_size, gfp);
59 if (!*ext)
60 return NULL;
61
62 (*ext)->offset[id] = off;
63 (*ext)->len = len;
64
65 return (void *)(*ext) + off;
66 }
67
68 void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
69 {
70 struct nf_ct_ext *new;
71 int i, newlen, newoff;
72 struct nf_ct_ext_type *t;
73
74 if (!ct->ext)
75 return nf_ct_ext_create(&ct->ext, id, gfp);
76
77 if (nf_ct_ext_exist(ct, id))
78 return NULL;
79
80 rcu_read_lock();
81 t = rcu_dereference(nf_ct_ext_types[id]);
82 BUG_ON(t == NULL);
83
84 newoff = ALIGN(ct->ext->len, t->align);
85 newlen = newoff + t->len;
86 rcu_read_unlock();
87
88 if (newlen >= ksize(ct->ext)) {
89 new = kmalloc(newlen, gfp);
90 if (!new)
91 return NULL;
92
93 memcpy(new, ct->ext, ct->ext->len);
94
95 for (i = 0; i < NF_CT_EXT_NUM; i++) {
96 if (!nf_ct_ext_exist(ct, i))
97 continue;
98
99 rcu_read_lock();
100 t = rcu_dereference(nf_ct_ext_types[i]);
101 if (t && t->move)
102 t->move((void *)new + new->offset[i],
103 (void *)ct->ext + ct->ext->offset[i]);
104 rcu_read_unlock();
105 }
106 kfree(ct->ext);
107 ct->ext = new;
108 }
109
110 ct->ext->offset[id] = newoff;
111 ct->ext->len = newlen;
112 memset((void *)ct->ext + newoff, 0, newlen - newoff);
113 return (void *)ct->ext + newoff;
114 }
115 EXPORT_SYMBOL(__nf_ct_ext_add);
116
117 static void update_alloc_size(struct nf_ct_ext_type *type)
118 {
119 int i, j;
120 struct nf_ct_ext_type *t1, *t2;
121 enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
122
123 /* unnecessary to update all types */
124 if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
125 min = type->id;
126 max = type->id;
127 }
128
129 /* This assumes that extended areas in conntrack for the types
130 whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
131 for (i = min; i <= max; i++) {
132 t1 = nf_ct_ext_types[i];
133 if (!t1)
134 continue;
135
136 t1->alloc_size = sizeof(struct nf_ct_ext)
137 + ALIGN(sizeof(struct nf_ct_ext), t1->align)
138 + t1->len;
139 for (j = 0; j < NF_CT_EXT_NUM; j++) {
140 t2 = nf_ct_ext_types[j];
141 if (t2 == NULL || t2 == t1 ||
142 (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
143 continue;
144
145 t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
146 + t2->len;
147 }
148 }
149 }
150
151 /* This MUST be called in process context. */
152 int nf_ct_extend_register(struct nf_ct_ext_type *type)
153 {
154 int ret = 0;
155
156 mutex_lock(&nf_ct_ext_type_mutex);
157 if (nf_ct_ext_types[type->id]) {
158 ret = -EBUSY;
159 goto out;
160 }
161
162 /* This ensures that nf_ct_ext_create() can allocate enough area
163 before updating alloc_size */
164 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
165 + type->len;
166 rcu_assign_pointer(nf_ct_ext_types[type->id], type);
167 update_alloc_size(type);
168 out:
169 mutex_unlock(&nf_ct_ext_type_mutex);
170 return ret;
171 }
172 EXPORT_SYMBOL_GPL(nf_ct_extend_register);
173
174 /* This MUST be called in process context. */
175 void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
176 {
177 mutex_lock(&nf_ct_ext_type_mutex);
178 rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
179 update_alloc_size(type);
180 mutex_unlock(&nf_ct_ext_type_mutex);
181 synchronize_rcu();
182 }
183 EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);