]> git.proxmox.com Git - ceph.git/blob - ceph/src/mds/flock.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / mds / flock.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 #ifndef CEPH_MDS_FLOCK_H
4 #define CEPH_MDS_FLOCK_H
5
6 #include <errno.h>
7
8 #include "common/debug.h"
9 #include "mdstypes.h"
10
11
12 inline ostream& operator<<(ostream& out, const ceph_filelock& l) {
13 out << "start: " << l.start << ", length: " << l.length
14 << ", client: " << l.client << ", owner: " << l.owner
15 << ", pid: " << l.pid << ", type: " << (int)l.type
16 << std::endl;
17 return out;
18 }
19
20 inline bool ceph_filelock_owner_equal(const ceph_filelock& l, const ceph_filelock& r)
21 {
22 if (l.client != r.client || l.owner != r.owner)
23 return false;
24 // The file lock is from old client if the most significant bit of
25 // 'owner' is not set. Old clients use both 'owner' and 'pid' to
26 // identify the owner of lock.
27 if (l.owner & (1ULL << 63))
28 return true;
29 return l.pid == r.pid;
30 }
31
32 inline int ceph_filelock_owner_compare(const ceph_filelock& l, const ceph_filelock& r)
33 {
34 if (l.client != r.client)
35 return l.client > r.client ? 1 : -1;
36 if (l.owner != r.owner)
37 return l.owner > r.owner ? 1 : -1;
38 if (l.owner & (1ULL << 63))
39 return 0;
40 if (l.pid != r.pid)
41 return l.pid > r.pid ? 1 : -1;
42 return 0;
43 }
44
45 inline int ceph_filelock_compare(const ceph_filelock& l, const ceph_filelock& r)
46 {
47 int ret = ceph_filelock_owner_compare(l, r);
48 if (ret)
49 return ret;
50 if (l.start != r.start)
51 return l.start > r.start ? 1 : -1;
52 if (l.length != r.length)
53 return l.length > r.length ? 1 : -1;
54 if (l.type != r.type)
55 return l.type > r.type ? 1 : -1;
56 return 0;
57 }
58
59 inline bool operator<(const ceph_filelock& l, const ceph_filelock& r)
60 {
61 return ceph_filelock_compare(l, r) < 0;
62 }
63
64 inline bool operator==(const ceph_filelock& l, const ceph_filelock& r) {
65 return ceph_filelock_compare(l, r) == 0;
66 }
67
68 inline bool operator!=(const ceph_filelock& l, const ceph_filelock& r) {
69 return ceph_filelock_compare(l, r) != 0;
70 }
71
72 class ceph_lock_state_t {
73 CephContext *cct;
74 int type;
75 public:
76 explicit ceph_lock_state_t(CephContext *cct_, int type_) : cct(cct_), type(type_) {}
77 ~ceph_lock_state_t();
78 multimap<uint64_t, ceph_filelock> held_locks; // current locks
79 multimap<uint64_t, ceph_filelock> waiting_locks; // locks waiting for other locks
80 // both of the above are keyed by starting offset
81 map<client_t, int> client_held_lock_counts;
82 map<client_t, int> client_waiting_lock_counts;
83
84 /**
85 * Check if a lock is on the waiting_locks list.
86 *
87 * @param fl The filelock to check for
88 * @returns True if the lock is waiting, false otherwise
89 */
90 bool is_waiting(const ceph_filelock &fl) const;
91 /**
92 * Remove a lock from the waiting_locks list
93 *
94 * @param fl The filelock to remove
95 */
96 void remove_waiting(const ceph_filelock& fl);
97 /*
98 * Try to set a new lock. If it's blocked and wait_on_fail is true,
99 * add the lock to waiting_locks.
100 * The lock needs to be of type CEPH_LOCK_EXCL or CEPH_LOCK_SHARED.
101 * This may merge previous locks, or convert the type of already-owned
102 * locks.
103 *
104 * @param new_lock The lock to set
105 * @param wait_on_fail whether to wait until the lock can be set.
106 * Otherwise it fails immediately when blocked.
107 *
108 * @returns true if set, false if not set.
109 */
110 bool add_lock(ceph_filelock& new_lock, bool wait_on_fail, bool replay,
111 bool *deadlock);
112 /**
113 * See if a lock is blocked by existing locks. If the lock is blocked,
114 * it will be set to the value of the first blocking lock. Otherwise,
115 * it will be returned unchanged, except for setting the type field
116 * to CEPH_LOCK_UNLOCK.
117 *
118 * @param testing_lock The lock to check for conflicts on.
119 */
120 void look_for_lock(ceph_filelock& testing_lock);
121
122 /*
123 * Remove lock(s) described in old_lock. This may involve splitting a
124 * previous lock or making a previous lock smaller.
125 *
126 * @param removal_lock The lock to remove
127 * @param activated_locks A return parameter, holding activated wait locks.
128 */
129 void remove_lock(const ceph_filelock removal_lock,
130 list<ceph_filelock>& activated_locks);
131
132 bool remove_all_from(client_t client);
133 private:
134 static const unsigned MAX_DEADLK_DEPTH = 5;
135
136 /**
137 * Check if adding the lock causes deadlock
138 *
139 * @param fl The blocking filelock
140 * @param overlapping_locks list of all overlapping locks
141 * @param first_fl
142 * @depth recursion call depth
143 */
144 bool is_deadlock(const ceph_filelock& fl,
145 list<multimap<uint64_t, ceph_filelock>::iterator>&
146 overlapping_locks,
147 const ceph_filelock *first_fl=NULL, unsigned depth=0) const;
148
149 /**
150 * Add a lock to the waiting_locks list
151 *
152 * @param fl The filelock to add
153 */
154 void add_waiting(const ceph_filelock& fl);
155
156 /**
157 * Adjust old locks owned by a single process so that process can set
158 * a new lock of different type. Handle any changes needed to the old locks
159 * (and the new lock) so that once the new lock is inserted into the
160 * held_locks list the process has a coherent, non-fragmented set of lock
161 * ranges. Make sure any overlapping locks are combined, trimmed, and removed
162 * as needed.
163 * This function should only be called once you know the lock will be
164 * inserted, as it DOES adjust new_lock. You can call this function
165 * on an empty list, in which case it does nothing.
166 * This function does not remove elements from old_locks, so regard the list
167 * as bad information following function invocation.
168 *
169 * @param new_lock The new lock the process has requested.
170 * @param old_locks list of all locks currently held by same
171 * client/process that overlap new_lock.
172 * @param neighbor_locks locks owned by same process that neighbor new_lock on
173 * left or right side.
174 */
175 void adjust_locks(list<multimap<uint64_t, ceph_filelock>::iterator> old_locks,
176 ceph_filelock& new_lock,
177 list<multimap<uint64_t, ceph_filelock>::iterator>
178 neighbor_locks);
179
180 //get last lock prior to start position
181 multimap<uint64_t, ceph_filelock>::iterator
182 get_lower_bound(uint64_t start,
183 multimap<uint64_t, ceph_filelock>& lock_map);
184 //get latest-starting lock that goes over the byte "end"
185 multimap<uint64_t, ceph_filelock>::iterator
186 get_last_before(uint64_t end,
187 multimap<uint64_t, ceph_filelock>& lock_map);
188
189 /*
190 * See if an iterator's lock covers any of the same bounds as a given range
191 * Rules: locks cover "length" bytes from "start", so the last covered
192 * byte is at start + length - 1.
193 * If the length is 0, the lock covers from "start" to the end of the file.
194 */
195 bool share_space(multimap<uint64_t, ceph_filelock>::iterator& iter,
196 uint64_t start, uint64_t end);
197
198 bool share_space(multimap<uint64_t, ceph_filelock>::iterator& iter,
199 const ceph_filelock &lock) {
200 uint64_t end = lock.start;
201 if (lock.length) {
202 end += lock.length - 1;
203 } else { // zero length means end of file
204 end = uint64_t(-1);
205 }
206 return share_space(iter, lock.start, end);
207 }
208 /*
209 *get a list of all locks overlapping with the given lock's range
210 * lock: the lock to compare with.
211 * overlaps: an empty list, to be filled.
212 * Returns: true if at least one lock overlaps.
213 */
214 bool get_overlapping_locks(const ceph_filelock& lock,
215 list<multimap<uint64_t,
216 ceph_filelock>::iterator> & overlaps,
217 list<multimap<uint64_t,
218 ceph_filelock>::iterator> *self_neighbors);
219
220
221 bool get_overlapping_locks(const ceph_filelock& lock,
222 list<multimap<uint64_t, ceph_filelock>::iterator>& overlaps) {
223 return get_overlapping_locks(lock, overlaps, NULL);
224 }
225
226 /**
227 * Get a list of all waiting locks that overlap with the given lock's range.
228 * lock: specifies the range to compare with
229 * overlaps: an empty list, to be filled
230 * Returns: true if at least one waiting_lock overlaps
231 */
232 bool get_waiting_overlaps(const ceph_filelock& lock,
233 list<multimap<uint64_t,
234 ceph_filelock>::iterator>& overlaps);
235 /*
236 * split a list of locks up by whether they're owned by same
237 * process as given lock
238 * owner: the owning lock
239 * locks: the list of locks (obtained from get_overlapping_locks, probably)
240 * Will have all locks owned by owner removed
241 * owned_locks: an empty list, to be filled with the locks owned by owner
242 */
243 void split_by_owner(const ceph_filelock& owner,
244 list<multimap<uint64_t,
245 ceph_filelock>::iterator> & locks,
246 list<multimap<uint64_t,
247 ceph_filelock>::iterator> & owned_locks);
248
249 ceph_filelock *contains_exclusive_lock(list<multimap<uint64_t,
250 ceph_filelock>::iterator>& locks);
251
252 public:
253 void encode(bufferlist& bl) const {
254 ::encode(held_locks, bl);
255 ::encode(waiting_locks, bl);
256 ::encode(client_held_lock_counts, bl);
257 ::encode(client_waiting_lock_counts, bl);
258 }
259 void decode(bufferlist::iterator& bl) {
260 ::decode(held_locks, bl);
261 ::decode(waiting_locks, bl);
262 ::decode(client_held_lock_counts, bl);
263 ::decode(client_waiting_lock_counts, bl);
264 }
265 void clear() {
266 held_locks.clear();
267 waiting_locks.clear();
268 client_held_lock_counts.clear();
269 client_waiting_lock_counts.clear();
270 }
271 bool empty() const {
272 return held_locks.empty() && waiting_locks.empty() &&
273 client_held_lock_counts.empty() &&
274 client_waiting_lock_counts.empty();
275 }
276 };
277 WRITE_CLASS_ENCODER(ceph_lock_state_t)
278
279
280 inline ostream& operator<<(ostream &out, const ceph_lock_state_t &l) {
281 out << "ceph_lock_state_t. held_locks.size()=" << l.held_locks.size()
282 << ", waiting_locks.size()=" << l.waiting_locks.size()
283 << ", client_held_lock_counts -- " << l.client_held_lock_counts
284 << "\n client_waiting_lock_counts -- " << l.client_waiting_lock_counts
285 << "\n held_locks -- ";
286 for (auto iter = l.held_locks.begin();
287 iter != l.held_locks.end();
288 ++iter)
289 out << iter->second;
290 out << "\n waiting_locks -- ";
291 for (auto iter =l.waiting_locks.begin();
292 iter != l.waiting_locks.end();
293 ++iter)
294 out << iter->second << "\n";
295 return out;
296 }
297
298 #endif