]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/seqlock.h
regulator: ab8500: Remove AB8505 USB regulator
[mirror_ubuntu-bionic-kernel.git] / include / linux / seqlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
4 /*
5 * Reader/writer consistent mechanism without starving writers. This type of
6 * lock for data where the reader wants a consistent set of information
7 * and is willing to retry if the information changes. There are two types
8 * of readers:
9 * 1. Sequence readers which never block a writer but they may have to retry
10 * if a writer is in progress by detecting change in sequence number.
11 * Writers do not wait for a sequence reader.
12 * 2. Locking readers which will wait if a writer or another locking reader
13 * is in progress. A locking reader in progress will also block a writer
14 * from going forward. Unlike the regular rwlock, the read lock here is
15 * exclusive so that only one locking reader can get it.
16 *
17 * This is not as cache friendly as brlock. Also, this may not work well
18 * for data that contains pointers, because any writer could
19 * invalidate a pointer that a reader was following.
20 *
21 * Expected non-blocking reader usage:
22 * do {
23 * seq = read_seqbegin(&foo);
24 * ...
25 * } while (read_seqretry(&foo, seq));
26 *
27 *
28 * On non-SMP the spin locks disappear but the writer still needs
29 * to increment the sequence variables because an interrupt routine could
30 * change the state of the data.
31 *
32 * Based on x86_64 vsyscall gettimeofday
33 * by Keith Owens and Andrea Arcangeli
34 */
35
36 #include <linux/spinlock.h>
37 #include <linux/preempt.h>
38 #include <linux/lockdep.h>
39 #include <linux/compiler.h>
40 #include <asm/processor.h>
41
42 /*
43 * Version using sequence counter only.
44 * This can be used when code has its own mutex protecting the
45 * updating starting before the write_seqcountbeqin() and ending
46 * after the write_seqcount_end().
47 */
48 typedef struct seqcount {
49 unsigned sequence;
50 #ifdef CONFIG_DEBUG_LOCK_ALLOC
51 struct lockdep_map dep_map;
52 #endif
53 } seqcount_t;
54
55 static inline void __seqcount_init(seqcount_t *s, const char *name,
56 struct lock_class_key *key)
57 {
58 /*
59 * Make sure we are not reinitializing a held lock:
60 */
61 lockdep_init_map(&s->dep_map, name, key, 0);
62 s->sequence = 0;
63 }
64
65 #ifdef CONFIG_DEBUG_LOCK_ALLOC
66 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
67 .dep_map = { .name = #lockname } \
68
69 # define seqcount_init(s) \
70 do { \
71 static struct lock_class_key __key; \
72 __seqcount_init((s), #s, &__key); \
73 } while (0)
74
75 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
76 {
77 seqcount_t *l = (seqcount_t *)s;
78 unsigned long flags;
79
80 local_irq_save(flags);
81 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
82 seqcount_release(&l->dep_map, 1, _RET_IP_);
83 local_irq_restore(flags);
84 }
85
86 #else
87 # define SEQCOUNT_DEP_MAP_INIT(lockname)
88 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
89 # define seqcount_lockdep_reader_access(x)
90 #endif
91
92 #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
93
94
95 /**
96 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
97 * @s: pointer to seqcount_t
98 * Returns: count to be passed to read_seqcount_retry
99 *
100 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
101 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
102 * provided before actually loading any of the variables that are to be
103 * protected in this critical section.
104 *
105 * Use carefully, only in critical code, and comment how the barrier is
106 * provided.
107 */
108 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
109 {
110 unsigned ret;
111
112 repeat:
113 ret = READ_ONCE(s->sequence);
114 if (unlikely(ret & 1)) {
115 cpu_relax();
116 goto repeat;
117 }
118 return ret;
119 }
120
121 /**
122 * raw_read_seqcount - Read the raw seqcount
123 * @s: pointer to seqcount_t
124 * Returns: count to be passed to read_seqcount_retry
125 *
126 * raw_read_seqcount opens a read critical section of the given
127 * seqcount without any lockdep checking and without checking or
128 * masking the LSB. Calling code is responsible for handling that.
129 */
130 static inline unsigned raw_read_seqcount(const seqcount_t *s)
131 {
132 unsigned ret = READ_ONCE(s->sequence);
133 smp_rmb();
134 return ret;
135 }
136
137 /**
138 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
139 * @s: pointer to seqcount_t
140 * Returns: count to be passed to read_seqcount_retry
141 *
142 * raw_read_seqcount_begin opens a read critical section of the given
143 * seqcount, but without any lockdep checking. Validity of the critical
144 * section is tested by checking read_seqcount_retry function.
145 */
146 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
147 {
148 unsigned ret = __read_seqcount_begin(s);
149 smp_rmb();
150 return ret;
151 }
152
153 /**
154 * read_seqcount_begin - begin a seq-read critical section
155 * @s: pointer to seqcount_t
156 * Returns: count to be passed to read_seqcount_retry
157 *
158 * read_seqcount_begin opens a read critical section of the given seqcount.
159 * Validity of the critical section is tested by checking read_seqcount_retry
160 * function.
161 */
162 static inline unsigned read_seqcount_begin(const seqcount_t *s)
163 {
164 seqcount_lockdep_reader_access(s);
165 return raw_read_seqcount_begin(s);
166 }
167
168 /**
169 * raw_seqcount_begin - begin a seq-read critical section
170 * @s: pointer to seqcount_t
171 * Returns: count to be passed to read_seqcount_retry
172 *
173 * raw_seqcount_begin opens a read critical section of the given seqcount.
174 * Validity of the critical section is tested by checking read_seqcount_retry
175 * function.
176 *
177 * Unlike read_seqcount_begin(), this function will not wait for the count
178 * to stabilize. If a writer is active when we begin, we will fail the
179 * read_seqcount_retry() instead of stabilizing at the beginning of the
180 * critical section.
181 */
182 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183 {
184 unsigned ret = READ_ONCE(s->sequence);
185 smp_rmb();
186 return ret & ~1;
187 }
188
189 /**
190 * __read_seqcount_retry - end a seq-read critical section (without barrier)
191 * @s: pointer to seqcount_t
192 * @start: count, from read_seqcount_begin
193 * Returns: 1 if retry is required, else 0
194 *
195 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
196 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
197 * provided before actually loading any of the variables that are to be
198 * protected in this critical section.
199 *
200 * Use carefully, only in critical code, and comment how the barrier is
201 * provided.
202 */
203 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204 {
205 return unlikely(s->sequence != start);
206 }
207
208 /**
209 * read_seqcount_retry - end a seq-read critical section
210 * @s: pointer to seqcount_t
211 * @start: count, from read_seqcount_begin
212 * Returns: 1 if retry is required, else 0
213 *
214 * read_seqcount_retry closes a read critical section of the given seqcount.
215 * If the critical section was invalid, it must be ignored (and typically
216 * retried).
217 */
218 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
219 {
220 smp_rmb();
221 return __read_seqcount_retry(s, start);
222 }
223
224
225
226 static inline void raw_write_seqcount_begin(seqcount_t *s)
227 {
228 s->sequence++;
229 smp_wmb();
230 }
231
232 static inline void raw_write_seqcount_end(seqcount_t *s)
233 {
234 smp_wmb();
235 s->sequence++;
236 }
237
238 /**
239 * raw_write_seqcount_barrier - do a seq write barrier
240 * @s: pointer to seqcount_t
241 *
242 * This can be used to provide an ordering guarantee instead of the
243 * usual consistency guarantee. It is one wmb cheaper, because we can
244 * collapse the two back-to-back wmb()s.
245 *
246 * seqcount_t seq;
247 * bool X = true, Y = false;
248 *
249 * void read(void)
250 * {
251 * bool x, y;
252 *
253 * do {
254 * int s = read_seqcount_begin(&seq);
255 *
256 * x = X; y = Y;
257 *
258 * } while (read_seqcount_retry(&seq, s));
259 *
260 * BUG_ON(!x && !y);
261 * }
262 *
263 * void write(void)
264 * {
265 * Y = true;
266 *
267 * raw_write_seqcount_barrier(seq);
268 *
269 * X = false;
270 * }
271 */
272 static inline void raw_write_seqcount_barrier(seqcount_t *s)
273 {
274 s->sequence++;
275 smp_wmb();
276 s->sequence++;
277 }
278
279 static inline int raw_read_seqcount_latch(seqcount_t *s)
280 {
281 int seq = READ_ONCE(s->sequence);
282 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
283 smp_read_barrier_depends();
284 return seq;
285 }
286
287 /**
288 * raw_write_seqcount_latch - redirect readers to even/odd copy
289 * @s: pointer to seqcount_t
290 *
291 * The latch technique is a multiversion concurrency control method that allows
292 * queries during non-atomic modifications. If you can guarantee queries never
293 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
294 * -- you most likely do not need this.
295 *
296 * Where the traditional RCU/lockless data structures rely on atomic
297 * modifications to ensure queries observe either the old or the new state the
298 * latch allows the same for non-atomic updates. The trade-off is doubling the
299 * cost of storage; we have to maintain two copies of the entire data
300 * structure.
301 *
302 * Very simply put: we first modify one copy and then the other. This ensures
303 * there is always one copy in a stable state, ready to give us an answer.
304 *
305 * The basic form is a data structure like:
306 *
307 * struct latch_struct {
308 * seqcount_t seq;
309 * struct data_struct data[2];
310 * };
311 *
312 * Where a modification, which is assumed to be externally serialized, does the
313 * following:
314 *
315 * void latch_modify(struct latch_struct *latch, ...)
316 * {
317 * smp_wmb(); <- Ensure that the last data[1] update is visible
318 * latch->seq++;
319 * smp_wmb(); <- Ensure that the seqcount update is visible
320 *
321 * modify(latch->data[0], ...);
322 *
323 * smp_wmb(); <- Ensure that the data[0] update is visible
324 * latch->seq++;
325 * smp_wmb(); <- Ensure that the seqcount update is visible
326 *
327 * modify(latch->data[1], ...);
328 * }
329 *
330 * The query will have a form like:
331 *
332 * struct entry *latch_query(struct latch_struct *latch, ...)
333 * {
334 * struct entry *entry;
335 * unsigned seq, idx;
336 *
337 * do {
338 * seq = raw_read_seqcount_latch(&latch->seq);
339 *
340 * idx = seq & 0x01;
341 * entry = data_query(latch->data[idx], ...);
342 *
343 * smp_rmb();
344 * } while (seq != latch->seq);
345 *
346 * return entry;
347 * }
348 *
349 * So during the modification, queries are first redirected to data[1]. Then we
350 * modify data[0]. When that is complete, we redirect queries back to data[0]
351 * and we can modify data[1].
352 *
353 * NOTE: The non-requirement for atomic modifications does _NOT_ include
354 * the publishing of new entries in the case where data is a dynamic
355 * data structure.
356 *
357 * An iteration might start in data[0] and get suspended long enough
358 * to miss an entire modification sequence, once it resumes it might
359 * observe the new entry.
360 *
361 * NOTE: When data is a dynamic data structure; one should use regular RCU
362 * patterns to manage the lifetimes of the objects within.
363 */
364 static inline void raw_write_seqcount_latch(seqcount_t *s)
365 {
366 smp_wmb(); /* prior stores before incrementing "sequence" */
367 s->sequence++;
368 smp_wmb(); /* increment "sequence" before following stores */
369 }
370
371 /*
372 * Sequence counter only version assumes that callers are using their
373 * own mutexing.
374 */
375 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
376 {
377 raw_write_seqcount_begin(s);
378 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
379 }
380
381 static inline void write_seqcount_begin(seqcount_t *s)
382 {
383 write_seqcount_begin_nested(s, 0);
384 }
385
386 static inline void write_seqcount_end(seqcount_t *s)
387 {
388 seqcount_release(&s->dep_map, 1, _RET_IP_);
389 raw_write_seqcount_end(s);
390 }
391
392 /**
393 * write_seqcount_invalidate - invalidate in-progress read-side seq operations
394 * @s: pointer to seqcount_t
395 *
396 * After write_seqcount_invalidate, no read-side seq operations will complete
397 * successfully and see data older than this.
398 */
399 static inline void write_seqcount_invalidate(seqcount_t *s)
400 {
401 smp_wmb();
402 s->sequence+=2;
403 }
404
405 typedef struct {
406 struct seqcount seqcount;
407 spinlock_t lock;
408 } seqlock_t;
409
410 /*
411 * These macros triggered gcc-3.x compile-time problems. We think these are
412 * OK now. Be cautious.
413 */
414 #define __SEQLOCK_UNLOCKED(lockname) \
415 { \
416 .seqcount = SEQCNT_ZERO(lockname), \
417 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
418 }
419
420 #define seqlock_init(x) \
421 do { \
422 seqcount_init(&(x)->seqcount); \
423 spin_lock_init(&(x)->lock); \
424 } while (0)
425
426 #define DEFINE_SEQLOCK(x) \
427 seqlock_t x = __SEQLOCK_UNLOCKED(x)
428
429 /*
430 * Read side functions for starting and finalizing a read side section.
431 */
432 static inline unsigned read_seqbegin(const seqlock_t *sl)
433 {
434 return read_seqcount_begin(&sl->seqcount);
435 }
436
437 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
438 {
439 return read_seqcount_retry(&sl->seqcount, start);
440 }
441
442 /*
443 * Lock out other writers and update the count.
444 * Acts like a normal spin_lock/unlock.
445 * Don't need preempt_disable() because that is in the spin_lock already.
446 */
447 static inline void write_seqlock(seqlock_t *sl)
448 {
449 spin_lock(&sl->lock);
450 write_seqcount_begin(&sl->seqcount);
451 }
452
453 static inline void write_sequnlock(seqlock_t *sl)
454 {
455 write_seqcount_end(&sl->seqcount);
456 spin_unlock(&sl->lock);
457 }
458
459 static inline void write_seqlock_bh(seqlock_t *sl)
460 {
461 spin_lock_bh(&sl->lock);
462 write_seqcount_begin(&sl->seqcount);
463 }
464
465 static inline void write_sequnlock_bh(seqlock_t *sl)
466 {
467 write_seqcount_end(&sl->seqcount);
468 spin_unlock_bh(&sl->lock);
469 }
470
471 static inline void write_seqlock_irq(seqlock_t *sl)
472 {
473 spin_lock_irq(&sl->lock);
474 write_seqcount_begin(&sl->seqcount);
475 }
476
477 static inline void write_sequnlock_irq(seqlock_t *sl)
478 {
479 write_seqcount_end(&sl->seqcount);
480 spin_unlock_irq(&sl->lock);
481 }
482
483 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&sl->lock, flags);
488 write_seqcount_begin(&sl->seqcount);
489 return flags;
490 }
491
492 #define write_seqlock_irqsave(lock, flags) \
493 do { flags = __write_seqlock_irqsave(lock); } while (0)
494
495 static inline void
496 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
497 {
498 write_seqcount_end(&sl->seqcount);
499 spin_unlock_irqrestore(&sl->lock, flags);
500 }
501
502 /*
503 * A locking reader exclusively locks out other writers and locking readers,
504 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
505 * Don't need preempt_disable() because that is in the spin_lock already.
506 */
507 static inline void read_seqlock_excl(seqlock_t *sl)
508 {
509 spin_lock(&sl->lock);
510 }
511
512 static inline void read_sequnlock_excl(seqlock_t *sl)
513 {
514 spin_unlock(&sl->lock);
515 }
516
517 /**
518 * read_seqbegin_or_lock - begin a sequence number check or locking block
519 * @lock: sequence lock
520 * @seq : sequence number to be checked
521 *
522 * First try it once optimistically without taking the lock. If that fails,
523 * take the lock. The sequence number is also used as a marker for deciding
524 * whether to be a reader (even) or writer (odd).
525 * N.B. seq must be initialized to an even number to begin with.
526 */
527 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
528 {
529 if (!(*seq & 1)) /* Even */
530 *seq = read_seqbegin(lock);
531 else /* Odd */
532 read_seqlock_excl(lock);
533 }
534
535 static inline int need_seqretry(seqlock_t *lock, int seq)
536 {
537 return !(seq & 1) && read_seqretry(lock, seq);
538 }
539
540 static inline void done_seqretry(seqlock_t *lock, int seq)
541 {
542 if (seq & 1)
543 read_sequnlock_excl(lock);
544 }
545
546 static inline void read_seqlock_excl_bh(seqlock_t *sl)
547 {
548 spin_lock_bh(&sl->lock);
549 }
550
551 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
552 {
553 spin_unlock_bh(&sl->lock);
554 }
555
556 static inline void read_seqlock_excl_irq(seqlock_t *sl)
557 {
558 spin_lock_irq(&sl->lock);
559 }
560
561 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
562 {
563 spin_unlock_irq(&sl->lock);
564 }
565
566 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
567 {
568 unsigned long flags;
569
570 spin_lock_irqsave(&sl->lock, flags);
571 return flags;
572 }
573
574 #define read_seqlock_excl_irqsave(lock, flags) \
575 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
576
577 static inline void
578 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
579 {
580 spin_unlock_irqrestore(&sl->lock, flags);
581 }
582
583 static inline unsigned long
584 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
585 {
586 unsigned long flags = 0;
587
588 if (!(*seq & 1)) /* Even */
589 *seq = read_seqbegin(lock);
590 else /* Odd */
591 read_seqlock_excl_irqsave(lock, flags);
592
593 return flags;
594 }
595
596 static inline void
597 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
598 {
599 if (seq & 1)
600 read_sequnlock_excl_irqrestore(lock, flags);
601 }
602 #endif /* __LINUX_SEQLOCK_H */