]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/tipc/ref.c
Merge branch 'drm-nouveau-fixes' of git://anongit.freedesktop.org/git/nouveau/linux...
[mirror_ubuntu-zesty-kernel.git] / net / tipc / ref.c
1 /*
2 * net/tipc/ref.c: TIPC object registry code
3 *
4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "ref.h"
39
40 /**
41 * struct reference - TIPC object reference entry
42 * @object: pointer to object associated with reference entry
43 * @lock: spinlock controlling access to object
44 * @ref: reference value for object (combines instance & array index info)
45 */
46 struct reference {
47 void *object;
48 spinlock_t lock;
49 u32 ref;
50 };
51
52 /**
53 * struct tipc_ref_table - table of TIPC object reference entries
54 * @entries: pointer to array of reference entries
55 * @capacity: array index of first unusable entry
56 * @init_point: array index of first uninitialized entry
57 * @first_free: array index of first unused object reference entry
58 * @last_free: array index of last unused object reference entry
59 * @index_mask: bitmask for array index portion of reference values
60 * @start_mask: initial value for instance value portion of reference values
61 */
62 struct ref_table {
63 struct reference *entries;
64 u32 capacity;
65 u32 init_point;
66 u32 first_free;
67 u32 last_free;
68 u32 index_mask;
69 u32 start_mask;
70 };
71
72 /*
73 * Object reference table consists of 2**N entries.
74 *
75 * State Object ptr Reference
76 * ----- ---------- ---------
77 * In use non-NULL XXXX|own index
78 * (XXXX changes each time entry is acquired)
79 * Free NULL YYYY|next free index
80 * (YYYY is one more than last used XXXX)
81 * Uninitialized NULL 0
82 *
83 * Entry 0 is not used; this allows index 0 to denote the end of the free list.
84 *
85 * Note that a reference value of 0 does not necessarily indicate that an
86 * entry is uninitialized, since the last entry in the free list could also
87 * have a reference value of 0 (although this is unlikely).
88 */
89
90 static struct ref_table tipc_ref_table;
91
92 static DEFINE_RWLOCK(ref_table_lock);
93
94 /**
95 * tipc_ref_table_init - create reference table for objects
96 */
97 int tipc_ref_table_init(u32 requested_size, u32 start)
98 {
99 struct reference *table;
100 u32 actual_size;
101
102 /* account for unused entry, then round up size to a power of 2 */
103
104 requested_size++;
105 for (actual_size = 16; actual_size < requested_size; actual_size <<= 1)
106 /* do nothing */ ;
107
108 /* allocate table & mark all entries as uninitialized */
109 table = vzalloc(actual_size * sizeof(struct reference));
110 if (table == NULL)
111 return -ENOMEM;
112
113 tipc_ref_table.entries = table;
114 tipc_ref_table.capacity = requested_size;
115 tipc_ref_table.init_point = 1;
116 tipc_ref_table.first_free = 0;
117 tipc_ref_table.last_free = 0;
118 tipc_ref_table.index_mask = actual_size - 1;
119 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
120
121 return 0;
122 }
123
124 /**
125 * tipc_ref_table_stop - destroy reference table for objects
126 */
127 void tipc_ref_table_stop(void)
128 {
129 if (!tipc_ref_table.entries)
130 return;
131
132 vfree(tipc_ref_table.entries);
133 tipc_ref_table.entries = NULL;
134 }
135
136 /**
137 * tipc_ref_acquire - create reference to an object
138 *
139 * Register an object pointer in reference table and lock the object.
140 * Returns a unique reference value that is used from then on to retrieve the
141 * object pointer, or to determine that the object has been deregistered.
142 *
143 * Note: The object is returned in the locked state so that the caller can
144 * register a partially initialized object, without running the risk that
145 * the object will be accessed before initialization is complete.
146 */
147 u32 tipc_ref_acquire(void *object, spinlock_t **lock)
148 {
149 u32 index;
150 u32 index_mask;
151 u32 next_plus_upper;
152 u32 ref;
153 struct reference *entry = NULL;
154
155 if (!object) {
156 pr_err("Attempt to acquire ref. to non-existent obj\n");
157 return 0;
158 }
159 if (!tipc_ref_table.entries) {
160 pr_err("Ref. table not found in acquisition attempt\n");
161 return 0;
162 }
163
164 /* take a free entry, if available; otherwise initialize a new entry */
165 write_lock_bh(&ref_table_lock);
166 if (tipc_ref_table.first_free) {
167 index = tipc_ref_table.first_free;
168 entry = &(tipc_ref_table.entries[index]);
169 index_mask = tipc_ref_table.index_mask;
170 next_plus_upper = entry->ref;
171 tipc_ref_table.first_free = next_plus_upper & index_mask;
172 ref = (next_plus_upper & ~index_mask) + index;
173 } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
174 index = tipc_ref_table.init_point++;
175 entry = &(tipc_ref_table.entries[index]);
176 spin_lock_init(&entry->lock);
177 ref = tipc_ref_table.start_mask + index;
178 } else {
179 ref = 0;
180 }
181 write_unlock_bh(&ref_table_lock);
182
183 /*
184 * Grab the lock so no one else can modify this entry
185 * While we assign its ref value & object pointer
186 */
187 if (entry) {
188 spin_lock_bh(&entry->lock);
189 entry->ref = ref;
190 entry->object = object;
191 *lock = &entry->lock;
192 /*
193 * keep it locked, the caller is responsible
194 * for unlocking this when they're done with it
195 */
196 }
197
198 return ref;
199 }
200
201 /**
202 * tipc_ref_discard - invalidate references to an object
203 *
204 * Disallow future references to an object and free up the entry for re-use.
205 * Note: The entry's spin_lock may still be busy after discard
206 */
207 void tipc_ref_discard(u32 ref)
208 {
209 struct reference *entry;
210 u32 index;
211 u32 index_mask;
212
213 if (!tipc_ref_table.entries) {
214 pr_err("Ref. table not found during discard attempt\n");
215 return;
216 }
217
218 index_mask = tipc_ref_table.index_mask;
219 index = ref & index_mask;
220 entry = &(tipc_ref_table.entries[index]);
221
222 write_lock_bh(&ref_table_lock);
223
224 if (!entry->object) {
225 pr_err("Attempt to discard ref. to non-existent obj\n");
226 goto exit;
227 }
228 if (entry->ref != ref) {
229 pr_err("Attempt to discard non-existent reference\n");
230 goto exit;
231 }
232
233 /*
234 * mark entry as unused; increment instance part of entry's reference
235 * to invalidate any subsequent references
236 */
237 entry->object = NULL;
238 entry->ref = (ref & ~index_mask) + (index_mask + 1);
239
240 /* append entry to free entry list */
241 if (tipc_ref_table.first_free == 0)
242 tipc_ref_table.first_free = index;
243 else
244 tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
245 tipc_ref_table.last_free = index;
246
247 exit:
248 write_unlock_bh(&ref_table_lock);
249 }
250
251 /**
252 * tipc_ref_lock - lock referenced object and return pointer to it
253 */
254 void *tipc_ref_lock(u32 ref)
255 {
256 if (likely(tipc_ref_table.entries)) {
257 struct reference *entry;
258
259 entry = &tipc_ref_table.entries[ref &
260 tipc_ref_table.index_mask];
261 if (likely(entry->ref != 0)) {
262 spin_lock_bh(&entry->lock);
263 if (likely((entry->ref == ref) && (entry->object)))
264 return entry->object;
265 spin_unlock_bh(&entry->lock);
266 }
267 }
268 return NULL;
269 }
270
271
272 /**
273 * tipc_ref_deref - return pointer referenced object (without locking it)
274 */
275 void *tipc_ref_deref(u32 ref)
276 {
277 if (likely(tipc_ref_table.entries)) {
278 struct reference *entry;
279
280 entry = &tipc_ref_table.entries[ref &
281 tipc_ref_table.index_mask];
282 if (likely(entry->ref == ref))
283 return entry->object;
284 }
285 return NULL;
286 }