]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
932fb06b | 2 | * kref.h - library routines for handling generic reference counted objects |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> | |
5 | * Copyright (C) 2004 IBM Corp. | |
6 | * | |
7 | * based on kobject.h which was: | |
8 | * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> | |
9 | * Copyright (C) 2002-2003 Open Source Development Labs | |
10 | * | |
11 | * This file is released under the GPLv2. | |
12 | * | |
13 | */ | |
14 | ||
15 | #ifndef _KREF_H_ | |
16 | #define _KREF_H_ | |
17 | ||
6261ddee GKH |
18 | #include <linux/bug.h> |
19 | #include <linux/atomic.h> | |
67175b85 | 20 | #include <linux/kernel.h> |
8ad5db8a | 21 | #include <linux/mutex.h> |
ccf5ae83 | 22 | #include <linux/spinlock.h> |
1da177e4 LT |
23 | |
24 | struct kref { | |
25 | atomic_t refcount; | |
26 | }; | |
27 | ||
4af679cd PZ |
28 | /** |
29 | * kref_init - initialize object. | |
30 | * @kref: object in question. | |
31 | */ | |
32 | static inline void kref_init(struct kref *kref) | |
33 | { | |
34 | atomic_set(&kref->refcount, 1); | |
4af679cd PZ |
35 | } |
36 | ||
37 | /** | |
38 | * kref_get - increment refcount for object. | |
39 | * @kref: object. | |
40 | */ | |
41 | static inline void kref_get(struct kref *kref) | |
42 | { | |
2d864e41 AP |
43 | /* If refcount was 0 before incrementing then we have a race |
44 | * condition when this kref is freeing by some other thread right now. | |
45 | * In this case one should use kref_get_unless_zero() | |
46 | */ | |
47 | WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2); | |
4af679cd PZ |
48 | } |
49 | ||
50 | /** | |
47dbd7d9 | 51 | * kref_sub - subtract a number of refcounts for object. |
4af679cd | 52 | * @kref: object. |
47dbd7d9 | 53 | * @count: Number of recounts to subtract. |
4af679cd PZ |
54 | * @release: pointer to the function that will clean up the object when the |
55 | * last reference to the object is released. | |
56 | * This pointer is required, and it is not acceptable to pass kfree | |
6261ddee GKH |
57 | * in as this function. If the caller does pass kfree to this |
58 | * function, you will be publicly mocked mercilessly by the kref | |
59 | * maintainer, and anyone else who happens to notice it. You have | |
60 | * been warned. | |
4af679cd | 61 | * |
47dbd7d9 | 62 | * Subtract @count from the refcount, and if 0, call release(). |
4af679cd PZ |
63 | * Return 1 if the object was removed, otherwise return 0. Beware, if this |
64 | * function returns 0, you still can not count on the kref from remaining in | |
65 | * memory. Only use the return value if you want to see if the kref is now | |
66 | * gone, not present. | |
67 | */ | |
47dbd7d9 PZ |
68 | static inline int kref_sub(struct kref *kref, unsigned int count, |
69 | void (*release)(struct kref *kref)) | |
4af679cd PZ |
70 | { |
71 | WARN_ON(release == NULL); | |
4af679cd | 72 | |
47dbd7d9 | 73 | if (atomic_sub_and_test((int) count, &kref->refcount)) { |
4af679cd PZ |
74 | release(kref); |
75 | return 1; | |
76 | } | |
77 | return 0; | |
78 | } | |
79 | ||
4af679cd | 80 | /** |
47dbd7d9 | 81 | * kref_put - decrement refcount for object. |
4af679cd | 82 | * @kref: object. |
4af679cd PZ |
83 | * @release: pointer to the function that will clean up the object when the |
84 | * last reference to the object is released. | |
85 | * This pointer is required, and it is not acceptable to pass kfree | |
6261ddee GKH |
86 | * in as this function. If the caller does pass kfree to this |
87 | * function, you will be publicly mocked mercilessly by the kref | |
88 | * maintainer, and anyone else who happens to notice it. You have | |
89 | * been warned. | |
4af679cd | 90 | * |
47dbd7d9 | 91 | * Decrement the refcount, and if 0, call release(). |
4af679cd PZ |
92 | * Return 1 if the object was removed, otherwise return 0. Beware, if this |
93 | * function returns 0, you still can not count on the kref from remaining in | |
94 | * memory. Only use the return value if you want to see if the kref is now | |
95 | * gone, not present. | |
96 | */ | |
47dbd7d9 | 97 | static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) |
4af679cd | 98 | { |
47dbd7d9 | 99 | return kref_sub(kref, 1, release); |
4af679cd | 100 | } |
8ad5db8a | 101 | |
ccf5ae83 JE |
102 | /** |
103 | * kref_put_spinlock_irqsave - decrement refcount for object. | |
104 | * @kref: object. | |
105 | * @release: pointer to the function that will clean up the object when the | |
106 | * last reference to the object is released. | |
107 | * This pointer is required, and it is not acceptable to pass kfree | |
108 | * in as this function. | |
109 | * @lock: lock to take in release case | |
110 | * | |
111 | * Behaves identical to kref_put with one exception. If the reference count | |
112 | * drops to zero, the lock will be taken atomically wrt dropping the reference | |
113 | * count. The release function has to call spin_unlock() without _irqrestore. | |
114 | */ | |
115 | static inline int kref_put_spinlock_irqsave(struct kref *kref, | |
116 | void (*release)(struct kref *kref), | |
117 | spinlock_t *lock) | |
118 | { | |
119 | unsigned long flags; | |
120 | ||
121 | WARN_ON(release == NULL); | |
122 | if (atomic_add_unless(&kref->refcount, -1, 1)) | |
123 | return 0; | |
124 | spin_lock_irqsave(lock, flags); | |
125 | if (atomic_dec_and_test(&kref->refcount)) { | |
126 | release(kref); | |
127 | local_irq_restore(flags); | |
128 | return 1; | |
129 | } | |
130 | spin_unlock_irqrestore(lock, flags); | |
131 | return 0; | |
132 | } | |
133 | ||
8ad5db8a AV |
134 | static inline int kref_put_mutex(struct kref *kref, |
135 | void (*release)(struct kref *kref), | |
136 | struct mutex *lock) | |
137 | { | |
138 | WARN_ON(release == NULL); | |
2d864e41 | 139 | if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { |
8ad5db8a AV |
140 | mutex_lock(lock); |
141 | if (unlikely(!atomic_dec_and_test(&kref->refcount))) { | |
142 | mutex_unlock(lock); | |
143 | return 0; | |
144 | } | |
145 | release(kref); | |
146 | return 1; | |
147 | } | |
148 | return 0; | |
149 | } | |
4b20db3d TH |
150 | |
151 | /** | |
152 | * kref_get_unless_zero - Increment refcount for object unless it is zero. | |
153 | * @kref: object. | |
154 | * | |
155 | * Return non-zero if the increment succeeded. Otherwise return 0. | |
156 | * | |
157 | * This function is intended to simplify locking around refcounting for | |
158 | * objects that can be looked up from a lookup structure, and which are | |
159 | * removed from that lookup structure in the object destructor. | |
160 | * Operations on such objects require at least a read lock around | |
161 | * lookup + kref_get, and a write lock around kref_put + remove from lookup | |
162 | * structure. Furthermore, RCU implementations become extremely tricky. | |
163 | * With a lookup followed by a kref_get_unless_zero *with return value check* | |
164 | * locking in the kref_put path can be deferred to the actual removal from | |
165 | * the lookup structure and RCU lookups become trivial. | |
166 | */ | |
167 | static inline int __must_check kref_get_unless_zero(struct kref *kref) | |
168 | { | |
169 | return atomic_add_unless(&kref->refcount, 1, 0); | |
170 | } | |
1da177e4 | 171 | #endif /* _KREF_H_ */ |