]> git.proxmox.com Git - ceph.git/blob - ceph/src/mds/flock.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / mds / flock.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 #ifndef CEPH_MDS_FLOCK_H
4 #define CEPH_MDS_FLOCK_H
5
6 #include <errno.h>
7
8 #include "common/debug.h"
9 #include "mdstypes.h"
10
11 inline ostream& operator<<(ostream& out, const ceph_filelock& l) {
12 out << "start: " << l.start << ", length: " << l.length
13 << ", client: " << l.client << ", owner: " << l.owner
14 << ", pid: " << l.pid << ", type: " << (int)l.type
15 << std::endl;
16 return out;
17 }
18
19 inline bool ceph_filelock_owner_equal(const ceph_filelock& l, const ceph_filelock& r)
20 {
21 if (l.client != r.client || l.owner != r.owner)
22 return false;
23 // The file lock is from old client if the most significant bit of
24 // 'owner' is not set. Old clients use both 'owner' and 'pid' to
25 // identify the owner of lock.
26 if (l.owner & (1ULL << 63))
27 return true;
28 return l.pid == r.pid;
29 }
30
31 inline int ceph_filelock_owner_compare(const ceph_filelock& l, const ceph_filelock& r)
32 {
33 if (l.client != r.client)
34 return l.client > r.client ? 1 : -1;
35 if (l.owner != r.owner)
36 return l.owner > r.owner ? 1 : -1;
37 if (l.owner & (1ULL << 63))
38 return 0;
39 if (l.pid != r.pid)
40 return l.pid > r.pid ? 1 : -1;
41 return 0;
42 }
43
44 inline int ceph_filelock_compare(const ceph_filelock& l, const ceph_filelock& r)
45 {
46 int ret = ceph_filelock_owner_compare(l, r);
47 if (ret)
48 return ret;
49 if (l.start != r.start)
50 return l.start > r.start ? 1 : -1;
51 if (l.length != r.length)
52 return l.length > r.length ? 1 : -1;
53 if (l.type != r.type)
54 return l.type > r.type ? 1 : -1;
55 return 0;
56 }
57
58 inline bool operator<(const ceph_filelock& l, const ceph_filelock& r)
59 {
60 return ceph_filelock_compare(l, r) < 0;
61 }
62
63 inline bool operator==(const ceph_filelock& l, const ceph_filelock& r) {
64 return ceph_filelock_compare(l, r) == 0;
65 }
66
67 inline bool operator!=(const ceph_filelock& l, const ceph_filelock& r) {
68 return ceph_filelock_compare(l, r) != 0;
69 }
70
71 class ceph_lock_state_t {
72 public:
73 explicit ceph_lock_state_t(CephContext *cct_, int type_) : cct(cct_), type(type_) {}
74 ~ceph_lock_state_t();
75 /**
76 * Check if a lock is on the waiting_locks list.
77 *
78 * @param fl The filelock to check for
79 * @returns True if the lock is waiting, false otherwise
80 */
81 bool is_waiting(const ceph_filelock &fl) const;
82 /**
83 * Remove a lock from the waiting_locks list
84 *
85 * @param fl The filelock to remove
86 */
87 void remove_waiting(const ceph_filelock& fl);
88 /*
89 * Try to set a new lock. If it's blocked and wait_on_fail is true,
90 * add the lock to waiting_locks.
91 * The lock needs to be of type CEPH_LOCK_EXCL or CEPH_LOCK_SHARED.
92 * This may merge previous locks, or convert the type of already-owned
93 * locks.
94 *
95 * @param new_lock The lock to set
96 * @param wait_on_fail whether to wait until the lock can be set.
97 * Otherwise it fails immediately when blocked.
98 *
99 * @returns true if set, false if not set.
100 */
101 bool add_lock(ceph_filelock& new_lock, bool wait_on_fail, bool replay,
102 bool *deadlock);
103 /**
104 * See if a lock is blocked by existing locks. If the lock is blocked,
105 * it will be set to the value of the first blocking lock. Otherwise,
106 * it will be returned unchanged, except for setting the type field
107 * to CEPH_LOCK_UNLOCK.
108 *
109 * @param testing_lock The lock to check for conflicts on.
110 */
111 void look_for_lock(ceph_filelock& testing_lock);
112
113 /*
114 * Remove lock(s) described in old_lock. This may involve splitting a
115 * previous lock or making a previous lock smaller.
116 *
117 * @param removal_lock The lock to remove
118 * @param activated_locks A return parameter, holding activated wait locks.
119 */
120 void remove_lock(const ceph_filelock removal_lock,
121 list<ceph_filelock>& activated_locks);
122
123 bool remove_all_from(client_t client);
124
125 void encode(bufferlist& bl) const {
126 using ceph::encode;
127 encode(held_locks, bl);
128 encode(client_held_lock_counts, bl);
129 }
130 void decode(bufferlist::const_iterator& bl) {
131 using ceph::decode;
132 decode(held_locks, bl);
133 decode(client_held_lock_counts, bl);
134 }
135 bool empty() const {
136 return held_locks.empty() && waiting_locks.empty() &&
137 client_held_lock_counts.empty() &&
138 client_waiting_lock_counts.empty();
139 }
140
141 multimap<uint64_t, ceph_filelock> held_locks; // current locks
142 multimap<uint64_t, ceph_filelock> waiting_locks; // locks waiting for other locks
143 // both of the above are keyed by starting offset
144 map<client_t, int> client_held_lock_counts;
145 map<client_t, int> client_waiting_lock_counts;
146
147 private:
148 static const unsigned MAX_DEADLK_DEPTH = 5;
149
150 /**
151 * Check if adding the lock causes deadlock
152 *
153 * @param fl The blocking filelock
154 * @param overlapping_locks list of all overlapping locks
155 * @param first_fl
156 * @depth recursion call depth
157 */
158 bool is_deadlock(const ceph_filelock& fl,
159 list<multimap<uint64_t, ceph_filelock>::iterator>&
160 overlapping_locks,
161 const ceph_filelock *first_fl=NULL, unsigned depth=0) const;
162
163 /**
164 * Add a lock to the waiting_locks list
165 *
166 * @param fl The filelock to add
167 */
168 void add_waiting(const ceph_filelock& fl);
169
170 /**
171 * Adjust old locks owned by a single process so that process can set
172 * a new lock of different type. Handle any changes needed to the old locks
173 * (and the new lock) so that once the new lock is inserted into the
174 * held_locks list the process has a coherent, non-fragmented set of lock
175 * ranges. Make sure any overlapping locks are combined, trimmed, and removed
176 * as needed.
177 * This function should only be called once you know the lock will be
178 * inserted, as it DOES adjust new_lock. You can call this function
179 * on an empty list, in which case it does nothing.
180 * This function does not remove elements from old_locks, so regard the list
181 * as bad information following function invocation.
182 *
183 * @param new_lock The new lock the process has requested.
184 * @param old_locks list of all locks currently held by same
185 * client/process that overlap new_lock.
186 * @param neighbor_locks locks owned by same process that neighbor new_lock on
187 * left or right side.
188 */
189 void adjust_locks(list<multimap<uint64_t, ceph_filelock>::iterator> old_locks,
190 ceph_filelock& new_lock,
191 list<multimap<uint64_t, ceph_filelock>::iterator>
192 neighbor_locks);
193
194 //get last lock prior to start position
195 multimap<uint64_t, ceph_filelock>::iterator
196 get_lower_bound(uint64_t start,
197 multimap<uint64_t, ceph_filelock>& lock_map);
198 //get latest-starting lock that goes over the byte "end"
199 multimap<uint64_t, ceph_filelock>::iterator
200 get_last_before(uint64_t end,
201 multimap<uint64_t, ceph_filelock>& lock_map);
202
203 /*
204 * See if an iterator's lock covers any of the same bounds as a given range
205 * Rules: locks cover "length" bytes from "start", so the last covered
206 * byte is at start + length - 1.
207 * If the length is 0, the lock covers from "start" to the end of the file.
208 */
209 bool share_space(multimap<uint64_t, ceph_filelock>::iterator& iter,
210 uint64_t start, uint64_t end);
211
212 bool share_space(multimap<uint64_t, ceph_filelock>::iterator& iter,
213 const ceph_filelock &lock) {
214 uint64_t end = lock.start;
215 if (lock.length) {
216 end += lock.length - 1;
217 } else { // zero length means end of file
218 end = uint64_t(-1);
219 }
220 return share_space(iter, lock.start, end);
221 }
222 /*
223 *get a list of all locks overlapping with the given lock's range
224 * lock: the lock to compare with.
225 * overlaps: an empty list, to be filled.
226 * Returns: true if at least one lock overlaps.
227 */
228 bool get_overlapping_locks(const ceph_filelock& lock,
229 list<multimap<uint64_t,
230 ceph_filelock>::iterator> & overlaps,
231 list<multimap<uint64_t,
232 ceph_filelock>::iterator> *self_neighbors);
233
234
235 bool get_overlapping_locks(const ceph_filelock& lock,
236 list<multimap<uint64_t, ceph_filelock>::iterator>& overlaps) {
237 return get_overlapping_locks(lock, overlaps, NULL);
238 }
239
240 /**
241 * Get a list of all waiting locks that overlap with the given lock's range.
242 * lock: specifies the range to compare with
243 * overlaps: an empty list, to be filled
244 * Returns: true if at least one waiting_lock overlaps
245 */
246 bool get_waiting_overlaps(const ceph_filelock& lock,
247 list<multimap<uint64_t,
248 ceph_filelock>::iterator>& overlaps);
249 /*
250 * split a list of locks up by whether they're owned by same
251 * process as given lock
252 * owner: the owning lock
253 * locks: the list of locks (obtained from get_overlapping_locks, probably)
254 * Will have all locks owned by owner removed
255 * owned_locks: an empty list, to be filled with the locks owned by owner
256 */
257 void split_by_owner(const ceph_filelock& owner,
258 list<multimap<uint64_t,
259 ceph_filelock>::iterator> & locks,
260 list<multimap<uint64_t,
261 ceph_filelock>::iterator> & owned_locks);
262
263 ceph_filelock *contains_exclusive_lock(list<multimap<uint64_t,
264 ceph_filelock>::iterator>& locks);
265
266 CephContext *cct;
267 int type;
268 };
269 WRITE_CLASS_ENCODER(ceph_lock_state_t)
270
271 inline ostream& operator<<(ostream &out, const ceph_lock_state_t &l) {
272 out << "ceph_lock_state_t. held_locks.size()=" << l.held_locks.size()
273 << ", waiting_locks.size()=" << l.waiting_locks.size()
274 << ", client_held_lock_counts -- " << l.client_held_lock_counts
275 << "\n client_waiting_lock_counts -- " << l.client_waiting_lock_counts
276 << "\n held_locks -- ";
277 for (auto iter = l.held_locks.begin();
278 iter != l.held_locks.end();
279 ++iter)
280 out << iter->second;
281 out << "\n waiting_locks -- ";
282 for (auto iter =l.waiting_locks.begin();
283 iter != l.waiting_locks.end();
284 ++iter)
285 out << iter->second << "\n";
286 return out;
287 }
288
289 #endif