Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/hskinnemoen...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / tipc / ref.c
1 /*
2 * net/tipc/ref.c: TIPC object registry code
3 *
4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "ref.h"
39
40 /**
41 * struct reference - TIPC object reference entry
42 * @object: pointer to object associated with reference entry
43 * @lock: spinlock controlling access to object
44 * @ref: reference value for object (combines instance & array index info)
45 */
46
47 struct reference {
48 void *object;
49 spinlock_t lock;
50 u32 ref;
51 };
52
53 /**
54 * struct tipc_ref_table - table of TIPC object reference entries
55 * @entries: pointer to array of reference entries
56 * @capacity: array index of first unusable entry
57 * @init_point: array index of first uninitialized entry
58 * @first_free: array index of first unused object reference entry
59 * @last_free: array index of last unused object reference entry
60 * @index_mask: bitmask for array index portion of reference values
61 * @start_mask: initial value for instance value portion of reference values
62 */
63
64 struct ref_table {
65 struct reference *entries;
66 u32 capacity;
67 u32 init_point;
68 u32 first_free;
69 u32 last_free;
70 u32 index_mask;
71 u32 start_mask;
72 };
73
74 /*
75 * Object reference table consists of 2**N entries.
76 *
77 * State Object ptr Reference
78 * ----- ---------- ---------
79 * In use non-NULL XXXX|own index
80 * (XXXX changes each time entry is acquired)
81 * Free NULL YYYY|next free index
82 * (YYYY is one more than last used XXXX)
83 * Uninitialized NULL 0
84 *
85 * Entry 0 is not used; this allows index 0 to denote the end of the free list.
86 *
87 * Note that a reference value of 0 does not necessarily indicate that an
88 * entry is uninitialized, since the last entry in the free list could also
89 * have a reference value of 0 (although this is unlikely).
90 */
91
92 static struct ref_table tipc_ref_table = { NULL };
93
94 static DEFINE_RWLOCK(ref_table_lock);
95
96 /**
97 * tipc_ref_table_init - create reference table for objects
98 */
99
100 int tipc_ref_table_init(u32 requested_size, u32 start)
101 {
102 struct reference *table;
103 u32 actual_size;
104
105 /* account for unused entry, then round up size to a power of 2 */
106
107 requested_size++;
108 for (actual_size = 16; actual_size < requested_size; actual_size <<= 1)
109 /* do nothing */ ;
110
111 /* allocate table & mark all entries as uninitialized */
112
113 table = __vmalloc(actual_size * sizeof(struct reference),
114 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
115 if (table == NULL)
116 return -ENOMEM;
117
118 tipc_ref_table.entries = table;
119 tipc_ref_table.capacity = requested_size;
120 tipc_ref_table.init_point = 1;
121 tipc_ref_table.first_free = 0;
122 tipc_ref_table.last_free = 0;
123 tipc_ref_table.index_mask = actual_size - 1;
124 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
125
126 return TIPC_OK;
127 }
128
129 /**
130 * tipc_ref_table_stop - destroy reference table for objects
131 */
132
133 void tipc_ref_table_stop(void)
134 {
135 if (!tipc_ref_table.entries)
136 return;
137
138 vfree(tipc_ref_table.entries);
139 tipc_ref_table.entries = NULL;
140 }
141
142 /**
143 * tipc_ref_acquire - create reference to an object
144 *
145 * Return a unique reference value which can be translated back to the pointer
146 * 'object' at a later time. Also, pass back a pointer to the lock protecting
147 * the object, but without locking it.
148 */
149
150 u32 tipc_ref_acquire(void *object, spinlock_t **lock)
151 {
152 struct reference *entry;
153 u32 index;
154 u32 index_mask;
155 u32 next_plus_upper;
156 u32 ref;
157
158 if (!object) {
159 err("Attempt to acquire reference to non-existent object\n");
160 return 0;
161 }
162 if (!tipc_ref_table.entries) {
163 err("Reference table not found during acquisition attempt\n");
164 return 0;
165 }
166
167 /* take a free entry, if available; otherwise initialize a new entry */
168
169 write_lock_bh(&ref_table_lock);
170 if (tipc_ref_table.first_free) {
171 index = tipc_ref_table.first_free;
172 entry = &(tipc_ref_table.entries[index]);
173 index_mask = tipc_ref_table.index_mask;
174 /* take lock in case a previous user of entry still holds it */
175 spin_lock_bh(&entry->lock);
176 next_plus_upper = entry->ref;
177 tipc_ref_table.first_free = next_plus_upper & index_mask;
178 ref = (next_plus_upper & ~index_mask) + index;
179 entry->ref = ref;
180 entry->object = object;
181 spin_unlock_bh(&entry->lock);
182 *lock = &entry->lock;
183 }
184 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
185 index = tipc_ref_table.init_point++;
186 entry = &(tipc_ref_table.entries[index]);
187 spin_lock_init(&entry->lock);
188 ref = tipc_ref_table.start_mask + index;
189 entry->ref = ref;
190 entry->object = object;
191 *lock = &entry->lock;
192 }
193 else {
194 ref = 0;
195 }
196 write_unlock_bh(&ref_table_lock);
197
198 return ref;
199 }
200
201 /**
202 * tipc_ref_discard - invalidate references to an object
203 *
204 * Disallow future references to an object and free up the entry for re-use.
205 * Note: The entry's spin_lock may still be busy after discard
206 */
207
208 void tipc_ref_discard(u32 ref)
209 {
210 struct reference *entry;
211 u32 index;
212 u32 index_mask;
213
214 if (!tipc_ref_table.entries) {
215 err("Reference table not found during discard attempt\n");
216 return;
217 }
218
219 index_mask = tipc_ref_table.index_mask;
220 index = ref & index_mask;
221 entry = &(tipc_ref_table.entries[index]);
222
223 write_lock_bh(&ref_table_lock);
224
225 if (!entry->object) {
226 err("Attempt to discard reference to non-existent object\n");
227 goto exit;
228 }
229 if (entry->ref != ref) {
230 err("Attempt to discard non-existent reference\n");
231 goto exit;
232 }
233
234 /*
235 * mark entry as unused; increment instance part of entry's reference
236 * to invalidate any subsequent references
237 */
238
239 entry->object = NULL;
240 entry->ref = (ref & ~index_mask) + (index_mask + 1);
241
242 /* append entry to free entry list */
243
244 if (tipc_ref_table.first_free == 0)
245 tipc_ref_table.first_free = index;
246 else
247 tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
248 tipc_ref_table.last_free = index;
249
250 exit:
251 write_unlock_bh(&ref_table_lock);
252 }
253
254 /**
255 * tipc_ref_lock - lock referenced object and return pointer to it
256 */
257
258 void *tipc_ref_lock(u32 ref)
259 {
260 if (likely(tipc_ref_table.entries)) {
261 struct reference *entry;
262
263 entry = &tipc_ref_table.entries[ref &
264 tipc_ref_table.index_mask];
265 if (likely(entry->ref != 0)) {
266 spin_lock_bh(&entry->lock);
267 if (likely((entry->ref == ref) && (entry->object)))
268 return entry->object;
269 spin_unlock_bh(&entry->lock);
270 }
271 }
272 return NULL;
273 }
274
275 /**
276 * tipc_ref_unlock - unlock referenced object
277 */
278
279 void tipc_ref_unlock(u32 ref)
280 {
281 if (likely(tipc_ref_table.entries)) {
282 struct reference *entry;
283
284 entry = &tipc_ref_table.entries[ref &
285 tipc_ref_table.index_mask];
286 if (likely((entry->ref == ref) && (entry->object)))
287 spin_unlock_bh(&entry->lock);
288 else
289 err("Attempt to unlock non-existent reference\n");
290 }
291 }
292
293 /**
294 * tipc_ref_deref - return pointer referenced object (without locking it)
295 */
296
297 void *tipc_ref_deref(u32 ref)
298 {
299 if (likely(tipc_ref_table.entries)) {
300 struct reference *entry;
301
302 entry = &tipc_ref_table.entries[ref &
303 tipc_ref_table.index_mask];
304 if (likely(entry->ref == ref))
305 return entry->object;
306 }
307 return NULL;
308 }
309