net: Remove CONFIG_KMOD from net/ (towards removing CONFIG_KMOD entirely)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / ccid.c
1 /*
2 * net/dccp/ccid.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * CCID infrastructure
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include "ccid.h"
15
16 static struct ccid_operations *ccids[CCID_MAX];
17 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
18 static atomic_t ccids_lockct = ATOMIC_INIT(0);
19 static DEFINE_SPINLOCK(ccids_lock);
20
21 /*
22 * The strategy is: modifications ccids vector are short, do not sleep and
23 * veeery rare, but read access should be free of any exclusive locks.
24 */
25 static void ccids_write_lock(void)
26 {
27 spin_lock(&ccids_lock);
28 while (atomic_read(&ccids_lockct) != 0) {
29 spin_unlock(&ccids_lock);
30 yield();
31 spin_lock(&ccids_lock);
32 }
33 }
34
35 static inline void ccids_write_unlock(void)
36 {
37 spin_unlock(&ccids_lock);
38 }
39
40 static inline void ccids_read_lock(void)
41 {
42 atomic_inc(&ccids_lockct);
43 smp_mb__after_atomic_inc();
44 spin_unlock_wait(&ccids_lock);
45 }
46
47 static inline void ccids_read_unlock(void)
48 {
49 atomic_dec(&ccids_lockct);
50 }
51
52 #else
53 #define ccids_write_lock() do { } while(0)
54 #define ccids_write_unlock() do { } while(0)
55 #define ccids_read_lock() do { } while(0)
56 #define ccids_read_unlock() do { } while(0)
57 #endif
58
59 static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
60 {
61 struct kmem_cache *slab;
62 char slab_name_fmt[32], *slab_name;
63 va_list args;
64
65 va_start(args, fmt);
66 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
67 va_end(args);
68
69 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL);
70 if (slab_name == NULL)
71 return NULL;
72 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
73 SLAB_HWCACHE_ALIGN, NULL);
74 if (slab == NULL)
75 kfree(slab_name);
76 return slab;
77 }
78
79 static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
80 {
81 if (slab != NULL) {
82 const char *name = kmem_cache_name(slab);
83
84 kmem_cache_destroy(slab);
85 kfree(name);
86 }
87 }
88
89 int ccid_register(struct ccid_operations *ccid_ops)
90 {
91 int err = -ENOBUFS;
92
93 ccid_ops->ccid_hc_rx_slab =
94 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
95 "ccid%u_hc_rx_sock",
96 ccid_ops->ccid_id);
97 if (ccid_ops->ccid_hc_rx_slab == NULL)
98 goto out;
99
100 ccid_ops->ccid_hc_tx_slab =
101 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
102 "ccid%u_hc_tx_sock",
103 ccid_ops->ccid_id);
104 if (ccid_ops->ccid_hc_tx_slab == NULL)
105 goto out_free_rx_slab;
106
107 ccids_write_lock();
108 err = -EEXIST;
109 if (ccids[ccid_ops->ccid_id] == NULL) {
110 ccids[ccid_ops->ccid_id] = ccid_ops;
111 err = 0;
112 }
113 ccids_write_unlock();
114 if (err != 0)
115 goto out_free_tx_slab;
116
117 pr_info("CCID: Registered CCID %d (%s)\n",
118 ccid_ops->ccid_id, ccid_ops->ccid_name);
119 out:
120 return err;
121 out_free_tx_slab:
122 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
123 ccid_ops->ccid_hc_tx_slab = NULL;
124 goto out;
125 out_free_rx_slab:
126 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
127 ccid_ops->ccid_hc_rx_slab = NULL;
128 goto out;
129 }
130
131 EXPORT_SYMBOL_GPL(ccid_register);
132
133 int ccid_unregister(struct ccid_operations *ccid_ops)
134 {
135 ccids_write_lock();
136 ccids[ccid_ops->ccid_id] = NULL;
137 ccids_write_unlock();
138
139 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
140 ccid_ops->ccid_hc_tx_slab = NULL;
141 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
142 ccid_ops->ccid_hc_rx_slab = NULL;
143
144 pr_info("CCID: Unregistered CCID %d (%s)\n",
145 ccid_ops->ccid_id, ccid_ops->ccid_name);
146 return 0;
147 }
148
149 EXPORT_SYMBOL_GPL(ccid_unregister);
150
151 struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
152 {
153 struct ccid_operations *ccid_ops;
154 struct ccid *ccid = NULL;
155
156 ccids_read_lock();
157 #ifdef CONFIG_MODULES
158 if (ccids[id] == NULL) {
159 /* We only try to load if in process context */
160 ccids_read_unlock();
161 if (gfp & GFP_ATOMIC)
162 goto out;
163 request_module("net-dccp-ccid-%d", id);
164 ccids_read_lock();
165 }
166 #endif
167 ccid_ops = ccids[id];
168 if (ccid_ops == NULL)
169 goto out_unlock;
170
171 if (!try_module_get(ccid_ops->ccid_owner))
172 goto out_unlock;
173
174 ccids_read_unlock();
175
176 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
177 ccid_ops->ccid_hc_tx_slab, gfp);
178 if (ccid == NULL)
179 goto out_module_put;
180 ccid->ccid_ops = ccid_ops;
181 if (rx) {
182 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
183 if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
184 ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
185 goto out_free_ccid;
186 } else {
187 memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
188 if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
189 ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
190 goto out_free_ccid;
191 }
192 out:
193 return ccid;
194 out_unlock:
195 ccids_read_unlock();
196 goto out;
197 out_free_ccid:
198 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
199 ccid_ops->ccid_hc_tx_slab, ccid);
200 ccid = NULL;
201 out_module_put:
202 module_put(ccid_ops->ccid_owner);
203 goto out;
204 }
205
206 EXPORT_SYMBOL_GPL(ccid_new);
207
208 struct ccid *ccid_hc_rx_new(unsigned char id, struct sock *sk, gfp_t gfp)
209 {
210 return ccid_new(id, sk, 1, gfp);
211 }
212
213 EXPORT_SYMBOL_GPL(ccid_hc_rx_new);
214
215 struct ccid *ccid_hc_tx_new(unsigned char id,struct sock *sk, gfp_t gfp)
216 {
217 return ccid_new(id, sk, 0, gfp);
218 }
219
220 EXPORT_SYMBOL_GPL(ccid_hc_tx_new);
221
222 static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
223 {
224 struct ccid_operations *ccid_ops;
225
226 if (ccid == NULL)
227 return;
228
229 ccid_ops = ccid->ccid_ops;
230 if (rx) {
231 if (ccid_ops->ccid_hc_rx_exit != NULL)
232 ccid_ops->ccid_hc_rx_exit(sk);
233 kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
234 } else {
235 if (ccid_ops->ccid_hc_tx_exit != NULL)
236 ccid_ops->ccid_hc_tx_exit(sk);
237 kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
238 }
239 ccids_read_lock();
240 if (ccids[ccid_ops->ccid_id] != NULL)
241 module_put(ccid_ops->ccid_owner);
242 ccids_read_unlock();
243 }
244
245 void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
246 {
247 ccid_delete(ccid, sk, 1);
248 }
249
250 EXPORT_SYMBOL_GPL(ccid_hc_rx_delete);
251
252 void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
253 {
254 ccid_delete(ccid, sk, 0);
255 }
256
257 EXPORT_SYMBOL_GPL(ccid_hc_tx_delete);