]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/block/drbd/drbd_interval.c
drbd: compute the end before rb_insert_augmented()
[mirror_ubuntu-jammy-kernel.git] / drivers / block / drbd / drbd_interval.c
1 #include <asm/bug.h>
2 #include <linux/rbtree_augmented.h>
3 #include "drbd_interval.h"
4
5 /**
6 * interval_end - return end of @node
7 */
8 static inline
9 sector_t interval_end(struct rb_node *node)
10 {
11 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb);
12 return this->end;
13 }
14
15 /**
16 * compute_subtree_last - compute end of @node
17 *
18 * The end of an interval is the highest (start + (size >> 9)) value of this
19 * node and of its children. Called for @node and its parents whenever the end
20 * may have changed.
21 */
22 static inline sector_t
23 compute_subtree_last(struct drbd_interval *node)
24 {
25 sector_t max = node->sector + (node->size >> 9);
26
27 if (node->rb.rb_left) {
28 sector_t left = interval_end(node->rb.rb_left);
29 if (left > max)
30 max = left;
31 }
32 if (node->rb.rb_right) {
33 sector_t right = interval_end(node->rb.rb_right);
34 if (right > max)
35 max = right;
36 }
37 return max;
38 }
39
40 static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
41 {
42 while (rb != stop) {
43 struct drbd_interval *node = rb_entry(rb, struct drbd_interval, rb);
44 sector_t subtree_last = compute_subtree_last(node);
45 if (node->end == subtree_last)
46 break;
47 node->end = subtree_last;
48 rb = rb_parent(&node->rb);
49 }
50 }
51
52 static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new)
53 {
54 struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
55 struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
56
57 new->end = old->end;
58 }
59
60 static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
61 {
62 struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb);
63 struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb);
64
65 new->end = old->end;
66 old->end = compute_subtree_last(old);
67 }
68
69 static const struct rb_augment_callbacks augment_callbacks = {
70 augment_propagate,
71 augment_copy,
72 augment_rotate,
73 };
74
75 /**
76 * drbd_insert_interval - insert a new interval into a tree
77 */
78 bool
79 drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
80 {
81 struct rb_node **new = &root->rb_node, *parent = NULL;
82 sector_t this_end = this->sector + (this->size >> 9);
83
84 BUG_ON(!IS_ALIGNED(this->size, 512));
85
86 while (*new) {
87 struct drbd_interval *here =
88 rb_entry(*new, struct drbd_interval, rb);
89
90 parent = *new;
91 if (here->end < this_end)
92 here->end = this_end;
93 if (this->sector < here->sector)
94 new = &(*new)->rb_left;
95 else if (this->sector > here->sector)
96 new = &(*new)->rb_right;
97 else if (this < here)
98 new = &(*new)->rb_left;
99 else if (this > here)
100 new = &(*new)->rb_right;
101 else
102 return false;
103 }
104
105 this->end = this_end;
106 rb_link_node(&this->rb, parent, new);
107 rb_insert_augmented(&this->rb, root, &augment_callbacks);
108 return true;
109 }
110
111 /**
112 * drbd_contains_interval - check if a tree contains a given interval
113 * @sector: start sector of @interval
114 * @interval: may not be a valid pointer
115 *
116 * Returns if the tree contains the node @interval with start sector @start.
117 * Does not dereference @interval until @interval is known to be a valid object
118 * in @tree. Returns %false if @interval is in the tree but with a different
119 * sector number.
120 */
121 bool
122 drbd_contains_interval(struct rb_root *root, sector_t sector,
123 struct drbd_interval *interval)
124 {
125 struct rb_node *node = root->rb_node;
126
127 while (node) {
128 struct drbd_interval *here =
129 rb_entry(node, struct drbd_interval, rb);
130
131 if (sector < here->sector)
132 node = node->rb_left;
133 else if (sector > here->sector)
134 node = node->rb_right;
135 else if (interval < here)
136 node = node->rb_left;
137 else if (interval > here)
138 node = node->rb_right;
139 else
140 return true;
141 }
142 return false;
143 }
144
145 /**
146 * drbd_remove_interval - remove an interval from a tree
147 */
148 void
149 drbd_remove_interval(struct rb_root *root, struct drbd_interval *this)
150 {
151 rb_erase_augmented(&this->rb, root, &augment_callbacks);
152 }
153
154 /**
155 * drbd_find_overlap - search for an interval overlapping with [sector, sector + size)
156 * @sector: start sector
157 * @size: size, aligned to 512 bytes
158 *
159 * Returns an interval overlapping with [sector, sector + size), or NULL if
160 * there is none. When there is more than one overlapping interval in the
161 * tree, the interval with the lowest start sector is returned, and all other
162 * overlapping intervals will be on the right side of the tree, reachable with
163 * rb_next().
164 */
165 struct drbd_interval *
166 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size)
167 {
168 struct rb_node *node = root->rb_node;
169 struct drbd_interval *overlap = NULL;
170 sector_t end = sector + (size >> 9);
171
172 BUG_ON(!IS_ALIGNED(size, 512));
173
174 while (node) {
175 struct drbd_interval *here =
176 rb_entry(node, struct drbd_interval, rb);
177
178 if (node->rb_left &&
179 sector < interval_end(node->rb_left)) {
180 /* Overlap if any must be on left side */
181 node = node->rb_left;
182 } else if (here->sector < end &&
183 sector < here->sector + (here->size >> 9)) {
184 overlap = here;
185 break;
186 } else if (sector >= here->sector) {
187 /* Overlap if any must be on right side */
188 node = node->rb_right;
189 } else
190 break;
191 }
192 return overlap;
193 }
194
195 struct drbd_interval *
196 drbd_next_overlap(struct drbd_interval *i, sector_t sector, unsigned int size)
197 {
198 sector_t end = sector + (size >> 9);
199 struct rb_node *node;
200
201 for (;;) {
202 node = rb_next(&i->rb);
203 if (!node)
204 return NULL;
205 i = rb_entry(node, struct drbd_interval, rb);
206 if (i->sector >= end)
207 return NULL;
208 if (sector < i->sector + (i->size >> 9))
209 return i;
210 }
211 }