]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/range.c
block: provide plug based way of signaling forced no-wait semantics
[mirror_ubuntu-jammy-kernel.git] / kernel / range.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
27811d8c
YL
2/*
3 * Range add and subtract
4 */
9a418455 5#include <linux/kernel.h>
27811d8c
YL
6#include <linux/init.h>
7#include <linux/sort.h>
05418815 8#include <linux/string.h>
27811d8c
YL
9#include <linux/range.h>
10
27811d8c
YL
11int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
12{
e9a0064a 13 if (start >= end)
27811d8c
YL
14 return nr_range;
15
16 /* Out of slots: */
17 if (nr_range >= az)
18 return nr_range;
19
20 range[nr_range].start = start;
21 range[nr_range].end = end;
22
23 nr_range++;
24
25 return nr_range;
26}
27
28int add_range_with_merge(struct range *range, int az, int nr_range,
29 u64 start, u64 end)
30{
31 int i;
32
e9a0064a 33 if (start >= end)
27811d8c
YL
34 return nr_range;
35
05418815 36 /* get new start/end: */
27811d8c 37 for (i = 0; i < nr_range; i++) {
27811d8c
YL
38 u64 common_start, common_end;
39
40 if (!range[i].end)
41 continue;
42
43 common_start = max(range[i].start, start);
44 common_end = min(range[i].end, end);
e9a0064a 45 if (common_start > common_end)
27811d8c
YL
46 continue;
47
05418815
YL
48 /* new start/end, will add it back at last */
49 start = min(range[i].start, start);
50 end = max(range[i].end, end);
27811d8c 51
05418815
YL
52 memmove(&range[i], &range[i + 1],
53 (nr_range - (i + 1)) * sizeof(range[i]));
54 range[nr_range - 1].start = 0;
55 range[nr_range - 1].end = 0;
56 nr_range--;
57 i--;
27811d8c
YL
58 }
59
60 /* Need to add it: */
61 return add_range(range, az, nr_range, start, end);
62}
63
64void subtract_range(struct range *range, int az, u64 start, u64 end)
65{
66 int i, j;
67
e9a0064a 68 if (start >= end)
27811d8c
YL
69 return;
70
71 for (j = 0; j < az; j++) {
72 if (!range[j].end)
73 continue;
74
75 if (start <= range[j].start && end >= range[j].end) {
76 range[j].start = 0;
77 range[j].end = 0;
78 continue;
79 }
80
81 if (start <= range[j].start && end < range[j].end &&
e9a0064a
YL
82 range[j].start < end) {
83 range[j].start = end;
27811d8c
YL
84 continue;
85 }
86
87
88 if (start > range[j].start && end >= range[j].end &&
e9a0064a
YL
89 range[j].end > start) {
90 range[j].end = start;
27811d8c
YL
91 continue;
92 }
93
94 if (start > range[j].start && end < range[j].end) {
95 /* Find the new spare: */
96 for (i = 0; i < az; i++) {
97 if (range[i].end == 0)
98 break;
99 }
100 if (i < az) {
101 range[i].end = range[j].end;
e9a0064a 102 range[i].start = end;
27811d8c 103 } else {
7fba2c27
LF
104 pr_err("%s: run out of slot in ranges\n",
105 __func__);
27811d8c 106 }
e9a0064a 107 range[j].end = start;
27811d8c
YL
108 continue;
109 }
110 }
111}
112
113static int cmp_range(const void *x1, const void *x2)
114{
115 const struct range *r1 = x1;
116 const struct range *r2 = x2;
27811d8c 117
fc7f0dd3
LL
118 if (r1->start < r2->start)
119 return -1;
120 if (r1->start > r2->start)
121 return 1;
122 return 0;
27811d8c
YL
123}
124
125int clean_sort_range(struct range *range, int az)
126{
834b4038 127 int i, j, k = az - 1, nr_range = az;
27811d8c
YL
128
129 for (i = 0; i < k; i++) {
130 if (range[i].end)
131 continue;
132 for (j = k; j > i; j--) {
133 if (range[j].end) {
134 k = j;
135 break;
136 }
137 }
138 if (j == i)
139 break;
140 range[i].start = range[k].start;
141 range[i].end = range[k].end;
142 range[k].start = 0;
143 range[k].end = 0;
144 k--;
145 }
146 /* count it */
147 for (i = 0; i < az; i++) {
148 if (!range[i].end) {
149 nr_range = i;
150 break;
151 }
152 }
153
154 /* sort them */
155 sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
156
157 return nr_range;
158}
159
160void sort_range(struct range *range, int nr_range)
161{
162 /* sort them */
163 sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
164}