]>
Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
aa9ad44a DJ |
2 | /* |
3 | * Copyright(c) 2017 Intel Corporation. All rights reserved. | |
aa9ad44a DJ |
4 | */ |
5 | #include <linux/libnvdimm.h> | |
6 | #include <linux/badblocks.h> | |
7 | #include <linux/export.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/device.h> | |
11 | #include <linux/ctype.h> | |
12 | #include <linux/ndctl.h> | |
13 | #include <linux/mutex.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/io.h> | |
16 | #include "nd-core.h" | |
17 | #include "nd.h" | |
18 | ||
19 | void badrange_init(struct badrange *badrange) | |
20 | { | |
21 | INIT_LIST_HEAD(&badrange->list); | |
22 | spin_lock_init(&badrange->lock); | |
23 | } | |
24 | EXPORT_SYMBOL_GPL(badrange_init); | |
25 | ||
26 | static void append_badrange_entry(struct badrange *badrange, | |
27 | struct badrange_entry *bre, u64 addr, u64 length) | |
28 | { | |
29 | lockdep_assert_held(&badrange->lock); | |
30 | bre->start = addr; | |
31 | bre->length = length; | |
32 | list_add_tail(&bre->list, &badrange->list); | |
33 | } | |
34 | ||
35 | static int alloc_and_append_badrange_entry(struct badrange *badrange, | |
36 | u64 addr, u64 length, gfp_t flags) | |
37 | { | |
38 | struct badrange_entry *bre; | |
39 | ||
40 | bre = kzalloc(sizeof(*bre), flags); | |
41 | if (!bre) | |
42 | return -ENOMEM; | |
43 | ||
44 | append_badrange_entry(badrange, bre, addr, length); | |
45 | return 0; | |
46 | } | |
47 | ||
48 | static int add_badrange(struct badrange *badrange, u64 addr, u64 length) | |
49 | { | |
50 | struct badrange_entry *bre, *bre_new; | |
51 | ||
52 | spin_unlock(&badrange->lock); | |
53 | bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL); | |
54 | spin_lock(&badrange->lock); | |
55 | ||
56 | if (list_empty(&badrange->list)) { | |
57 | if (!bre_new) | |
58 | return -ENOMEM; | |
59 | append_badrange_entry(badrange, bre_new, addr, length); | |
60 | return 0; | |
61 | } | |
62 | ||
63 | /* | |
64 | * There is a chance this is a duplicate, check for those first. | |
65 | * This will be the common case as ARS_STATUS returns all known | |
66 | * errors in the SPA space, and we can't query it per region | |
67 | */ | |
68 | list_for_each_entry(bre, &badrange->list, list) | |
69 | if (bre->start == addr) { | |
70 | /* If length has changed, update this list entry */ | |
71 | if (bre->length != length) | |
72 | bre->length = length; | |
73 | kfree(bre_new); | |
74 | return 0; | |
75 | } | |
76 | ||
77 | /* | |
78 | * If not a duplicate or a simple length update, add the entry as is, | |
79 | * as any overlapping ranges will get resolved when the list is consumed | |
80 | * and converted to badblocks | |
81 | */ | |
82 | if (!bre_new) | |
83 | return -ENOMEM; | |
84 | append_badrange_entry(badrange, bre_new, addr, length); | |
85 | ||
86 | return 0; | |
87 | } | |
88 | ||
89 | int badrange_add(struct badrange *badrange, u64 addr, u64 length) | |
90 | { | |
91 | int rc; | |
92 | ||
93 | spin_lock(&badrange->lock); | |
94 | rc = add_badrange(badrange, addr, length); | |
95 | spin_unlock(&badrange->lock); | |
96 | ||
97 | return rc; | |
98 | } | |
99 | EXPORT_SYMBOL_GPL(badrange_add); | |
100 | ||
101 | void badrange_forget(struct badrange *badrange, phys_addr_t start, | |
102 | unsigned int len) | |
103 | { | |
104 | struct list_head *badrange_list = &badrange->list; | |
105 | u64 clr_end = start + len - 1; | |
106 | struct badrange_entry *bre, *next; | |
107 | ||
108 | spin_lock(&badrange->lock); | |
aa9ad44a DJ |
109 | |
110 | /* | |
111 | * [start, clr_end] is the badrange interval being cleared. | |
112 | * [bre->start, bre_end] is the badrange_list entry we're comparing | |
113 | * the above interval against. The badrange list entry may need | |
114 | * to be modified (update either start or length), deleted, or | |
115 | * split into two based on the overlap characteristics | |
116 | */ | |
117 | ||
118 | list_for_each_entry_safe(bre, next, badrange_list, list) { | |
119 | u64 bre_end = bre->start + bre->length - 1; | |
120 | ||
121 | /* Skip intervals with no intersection */ | |
122 | if (bre_end < start) | |
123 | continue; | |
124 | if (bre->start > clr_end) | |
125 | continue; | |
126 | /* Delete completely overlapped badrange entries */ | |
127 | if ((bre->start >= start) && (bre_end <= clr_end)) { | |
128 | list_del(&bre->list); | |
129 | kfree(bre); | |
130 | continue; | |
131 | } | |
132 | /* Adjust start point of partially cleared entries */ | |
133 | if ((start <= bre->start) && (clr_end > bre->start)) { | |
134 | bre->length -= clr_end - bre->start + 1; | |
135 | bre->start = clr_end + 1; | |
136 | continue; | |
137 | } | |
138 | /* Adjust bre->length for partial clearing at the tail end */ | |
139 | if ((bre->start < start) && (bre_end <= clr_end)) { | |
140 | /* bre->start remains the same */ | |
141 | bre->length = start - bre->start; | |
142 | continue; | |
143 | } | |
144 | /* | |
145 | * If clearing in the middle of an entry, we split it into | |
146 | * two by modifying the current entry to represent one half of | |
147 | * the split, and adding a new entry for the second half. | |
148 | */ | |
149 | if ((bre->start < start) && (bre_end > clr_end)) { | |
150 | u64 new_start = clr_end + 1; | |
151 | u64 new_len = bre_end - new_start + 1; | |
152 | ||
153 | /* Add new entry covering the right half */ | |
154 | alloc_and_append_badrange_entry(badrange, new_start, | |
155 | new_len, GFP_NOWAIT); | |
156 | /* Adjust this entry to cover the left half */ | |
157 | bre->length = start - bre->start; | |
158 | continue; | |
159 | } | |
160 | } | |
161 | spin_unlock(&badrange->lock); | |
162 | } | |
163 | EXPORT_SYMBOL_GPL(badrange_forget); | |
164 | ||
165 | static void set_badblock(struct badblocks *bb, sector_t s, int num) | |
166 | { | |
167 | dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n", | |
168 | (u64) s * 512, (u64) num * 512); | |
169 | /* this isn't an error as the hardware will still throw an exception */ | |
170 | if (badblocks_set(bb, s, num, 1)) | |
171 | dev_info_once(bb->dev, "%s: failed for sector %llx\n", | |
172 | __func__, (u64) s); | |
173 | } | |
174 | ||
175 | /** | |
176 | * __add_badblock_range() - Convert a physical address range to bad sectors | |
177 | * @bb: badblocks instance to populate | |
178 | * @ns_offset: namespace offset where the error range begins (in bytes) | |
179 | * @len: number of bytes of badrange to be added | |
180 | * | |
181 | * This assumes that the range provided with (ns_offset, len) is within | |
182 | * the bounds of physical addresses for this namespace, i.e. lies in the | |
183 | * interval [ns_start, ns_start + ns_size) | |
184 | */ | |
185 | static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) | |
186 | { | |
187 | const unsigned int sector_size = 512; | |
188 | sector_t start_sector, end_sector; | |
189 | u64 num_sectors; | |
190 | u32 rem; | |
191 | ||
192 | start_sector = div_u64(ns_offset, sector_size); | |
193 | end_sector = div_u64_rem(ns_offset + len, sector_size, &rem); | |
194 | if (rem) | |
195 | end_sector++; | |
196 | num_sectors = end_sector - start_sector; | |
197 | ||
198 | if (unlikely(num_sectors > (u64)INT_MAX)) { | |
199 | u64 remaining = num_sectors; | |
200 | sector_t s = start_sector; | |
201 | ||
202 | while (remaining) { | |
203 | int done = min_t(u64, remaining, INT_MAX); | |
204 | ||
205 | set_badblock(bb, s, done); | |
206 | remaining -= done; | |
207 | s += done; | |
208 | } | |
209 | } else | |
210 | set_badblock(bb, start_sector, num_sectors); | |
211 | } | |
212 | ||
213 | static void badblocks_populate(struct badrange *badrange, | |
214 | struct badblocks *bb, const struct resource *res) | |
215 | { | |
216 | struct badrange_entry *bre; | |
217 | ||
218 | if (list_empty(&badrange->list)) | |
219 | return; | |
220 | ||
221 | list_for_each_entry(bre, &badrange->list, list) { | |
222 | u64 bre_end = bre->start + bre->length - 1; | |
223 | ||
224 | /* Discard intervals with no intersection */ | |
225 | if (bre_end < res->start) | |
226 | continue; | |
227 | if (bre->start > res->end) | |
228 | continue; | |
229 | /* Deal with any overlap after start of the namespace */ | |
230 | if (bre->start >= res->start) { | |
231 | u64 start = bre->start; | |
232 | u64 len; | |
233 | ||
234 | if (bre_end <= res->end) | |
235 | len = bre->length; | |
236 | else | |
237 | len = res->start + resource_size(res) | |
238 | - bre->start; | |
239 | __add_badblock_range(bb, start - res->start, len); | |
240 | continue; | |
241 | } | |
242 | /* | |
243 | * Deal with overlap for badrange starting before | |
244 | * the namespace. | |
245 | */ | |
246 | if (bre->start < res->start) { | |
247 | u64 len; | |
248 | ||
249 | if (bre_end < res->end) | |
250 | len = bre->start + bre->length - res->start; | |
251 | else | |
252 | len = resource_size(res); | |
253 | __add_badblock_range(bb, 0, len); | |
254 | } | |
255 | } | |
256 | } | |
257 | ||
258 | /** | |
259 | * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks | |
260 | * @region: parent region of the range to interrogate | |
261 | * @bb: badblocks instance to populate | |
262 | * @res: resource range to consider | |
263 | * | |
264 | * The badrange list generated during bus initialization may contain | |
265 | * multiple, possibly overlapping physical address ranges. Compare each | |
266 | * of these ranges to the resource range currently being initialized, | |
267 | * and add badblocks entries for all matching sub-ranges | |
268 | */ | |
269 | void nvdimm_badblocks_populate(struct nd_region *nd_region, | |
270 | struct badblocks *bb, const struct resource *res) | |
271 | { | |
272 | struct nvdimm_bus *nvdimm_bus; | |
273 | ||
274 | if (!is_memory(&nd_region->dev)) { | |
275 | dev_WARN_ONCE(&nd_region->dev, 1, | |
276 | "%s only valid for pmem regions\n", __func__); | |
277 | return; | |
278 | } | |
279 | nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); | |
280 | ||
281 | nvdimm_bus_lock(&nvdimm_bus->dev); | |
282 | badblocks_populate(&nvdimm_bus->badrange, bb, res); | |
283 | nvdimm_bus_unlock(&nvdimm_bus->dev); | |
284 | } | |
285 | EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate); |