]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - tools/testing/nvdimm/test/iomap.c
Merge tag 'mmc-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-jammy-kernel.git] / tools / testing / nvdimm / test / iomap.c
1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/memremap.h>
14 #include <linux/rculist.h>
15 #include <linux/export.h>
16 #include <linux/ioport.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/pfn_t.h>
20 #include <linux/acpi.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include "nfit_test.h"
24
25 static LIST_HEAD(iomap_head);
26
27 static struct iomap_ops {
28 nfit_test_lookup_fn nfit_test_lookup;
29 nfit_test_evaluate_dsm_fn evaluate_dsm;
30 struct list_head list;
31 } iomap_ops = {
32 .list = LIST_HEAD_INIT(iomap_ops.list),
33 };
34
35 void nfit_test_setup(nfit_test_lookup_fn lookup,
36 nfit_test_evaluate_dsm_fn evaluate)
37 {
38 iomap_ops.nfit_test_lookup = lookup;
39 iomap_ops.evaluate_dsm = evaluate;
40 list_add_rcu(&iomap_ops.list, &iomap_head);
41 }
42 EXPORT_SYMBOL(nfit_test_setup);
43
44 void nfit_test_teardown(void)
45 {
46 list_del_rcu(&iomap_ops.list);
47 synchronize_rcu();
48 }
49 EXPORT_SYMBOL(nfit_test_teardown);
50
51 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
52 {
53 struct iomap_ops *ops;
54
55 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
56 if (ops)
57 return ops->nfit_test_lookup(resource);
58 return NULL;
59 }
60
61 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
62 {
63 struct nfit_test_resource *res;
64
65 rcu_read_lock();
66 res = __get_nfit_res(resource);
67 rcu_read_unlock();
68
69 return res;
70 }
71 EXPORT_SYMBOL(get_nfit_res);
72
73 void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
74 void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
75 {
76 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
77
78 if (nfit_res)
79 return (void __iomem *) nfit_res->buf + offset
80 - nfit_res->res.start;
81 return fallback_fn(offset, size);
82 }
83
84 void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
85 resource_size_t offset, unsigned long size)
86 {
87 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
88
89 if (nfit_res)
90 return (void __iomem *) nfit_res->buf + offset
91 - nfit_res->res.start;
92 return devm_ioremap_nocache(dev, offset, size);
93 }
94 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
95
96 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
97 size_t size, unsigned long flags)
98 {
99 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
100
101 if (nfit_res)
102 return nfit_res->buf + offset - nfit_res->res.start;
103 return devm_memremap(dev, offset, size, flags);
104 }
105 EXPORT_SYMBOL(__wrap_devm_memremap);
106
107 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
108 {
109 resource_size_t offset = pgmap->res.start;
110 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
111
112 if (nfit_res)
113 return nfit_res->buf + offset - nfit_res->res.start;
114 return devm_memremap_pages(dev, pgmap);
115 }
116 EXPORT_SYMBOL(__wrap_devm_memremap_pages);
117
118 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
119 {
120 struct nfit_test_resource *nfit_res = get_nfit_res(addr);
121
122 if (nfit_res)
123 flags &= ~PFN_MAP;
124 return phys_to_pfn_t(addr, flags);
125 }
126 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
127
128 void *__wrap_memremap(resource_size_t offset, size_t size,
129 unsigned long flags)
130 {
131 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
132
133 if (nfit_res)
134 return nfit_res->buf + offset - nfit_res->res.start;
135 return memremap(offset, size, flags);
136 }
137 EXPORT_SYMBOL(__wrap_memremap);
138
139 void __wrap_devm_memunmap(struct device *dev, void *addr)
140 {
141 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
142
143 if (nfit_res)
144 return;
145 return devm_memunmap(dev, addr);
146 }
147 EXPORT_SYMBOL(__wrap_devm_memunmap);
148
149 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
150 {
151 return __nfit_test_ioremap(offset, size, ioremap_nocache);
152 }
153 EXPORT_SYMBOL(__wrap_ioremap_nocache);
154
155 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
156 {
157 return __nfit_test_ioremap(offset, size, ioremap_wc);
158 }
159 EXPORT_SYMBOL(__wrap_ioremap_wc);
160
161 void __wrap_iounmap(volatile void __iomem *addr)
162 {
163 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
164 if (nfit_res)
165 return;
166 return iounmap(addr);
167 }
168 EXPORT_SYMBOL(__wrap_iounmap);
169
170 void __wrap_memunmap(void *addr)
171 {
172 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
173
174 if (nfit_res)
175 return;
176 return memunmap(addr);
177 }
178 EXPORT_SYMBOL(__wrap_memunmap);
179
180 static bool nfit_test_release_region(struct device *dev,
181 struct resource *parent, resource_size_t start,
182 resource_size_t n);
183
184 static void nfit_devres_release(struct device *dev, void *data)
185 {
186 struct resource *res = *((struct resource **) data);
187
188 WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
189 resource_size(res)));
190 }
191
192 static int match(struct device *dev, void *__res, void *match_data)
193 {
194 struct resource *res = *((struct resource **) __res);
195 resource_size_t start = *((resource_size_t *) match_data);
196
197 return res->start == start;
198 }
199
200 static bool nfit_test_release_region(struct device *dev,
201 struct resource *parent, resource_size_t start,
202 resource_size_t n)
203 {
204 if (parent == &iomem_resource) {
205 struct nfit_test_resource *nfit_res = get_nfit_res(start);
206
207 if (nfit_res) {
208 struct nfit_test_request *req;
209 struct resource *res = NULL;
210
211 if (dev) {
212 devres_release(dev, nfit_devres_release, match,
213 &start);
214 return true;
215 }
216
217 spin_lock(&nfit_res->lock);
218 list_for_each_entry(req, &nfit_res->requests, list)
219 if (req->res.start == start) {
220 res = &req->res;
221 list_del(&req->list);
222 break;
223 }
224 spin_unlock(&nfit_res->lock);
225
226 WARN(!res || resource_size(res) != n,
227 "%s: start: %llx n: %llx mismatch: %pr\n",
228 __func__, start, n, res);
229 if (res)
230 kfree(req);
231 return true;
232 }
233 }
234 return false;
235 }
236
237 static struct resource *nfit_test_request_region(struct device *dev,
238 struct resource *parent, resource_size_t start,
239 resource_size_t n, const char *name, int flags)
240 {
241 struct nfit_test_resource *nfit_res;
242
243 if (parent == &iomem_resource) {
244 nfit_res = get_nfit_res(start);
245 if (nfit_res) {
246 struct nfit_test_request *req;
247 struct resource *res = NULL;
248
249 if (start + n > nfit_res->res.start
250 + resource_size(&nfit_res->res)) {
251 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
252 __func__, start, n,
253 &nfit_res->res);
254 return NULL;
255 }
256
257 spin_lock(&nfit_res->lock);
258 list_for_each_entry(req, &nfit_res->requests, list)
259 if (start == req->res.start) {
260 res = &req->res;
261 break;
262 }
263 spin_unlock(&nfit_res->lock);
264
265 if (res) {
266 WARN(1, "%pr already busy\n", res);
267 return NULL;
268 }
269
270 req = kzalloc(sizeof(*req), GFP_KERNEL);
271 if (!req)
272 return NULL;
273 INIT_LIST_HEAD(&req->list);
274 res = &req->res;
275
276 res->start = start;
277 res->end = start + n - 1;
278 res->name = name;
279 res->flags = resource_type(parent);
280 res->flags |= IORESOURCE_BUSY | flags;
281 spin_lock(&nfit_res->lock);
282 list_add(&req->list, &nfit_res->requests);
283 spin_unlock(&nfit_res->lock);
284
285 if (dev) {
286 struct resource **d;
287
288 d = devres_alloc(nfit_devres_release,
289 sizeof(struct resource *),
290 GFP_KERNEL);
291 if (!d)
292 return NULL;
293 *d = res;
294 devres_add(dev, d);
295 }
296
297 pr_debug("%s: %pr\n", __func__, res);
298 return res;
299 }
300 }
301 if (dev)
302 return __devm_request_region(dev, parent, start, n, name);
303 return __request_region(parent, start, n, name, flags);
304 }
305
306 struct resource *__wrap___request_region(struct resource *parent,
307 resource_size_t start, resource_size_t n, const char *name,
308 int flags)
309 {
310 return nfit_test_request_region(NULL, parent, start, n, name, flags);
311 }
312 EXPORT_SYMBOL(__wrap___request_region);
313
314 int __wrap_insert_resource(struct resource *parent, struct resource *res)
315 {
316 if (get_nfit_res(res->start))
317 return 0;
318 return insert_resource(parent, res);
319 }
320 EXPORT_SYMBOL(__wrap_insert_resource);
321
322 int __wrap_remove_resource(struct resource *res)
323 {
324 if (get_nfit_res(res->start))
325 return 0;
326 return remove_resource(res);
327 }
328 EXPORT_SYMBOL(__wrap_remove_resource);
329
330 struct resource *__wrap___devm_request_region(struct device *dev,
331 struct resource *parent, resource_size_t start,
332 resource_size_t n, const char *name)
333 {
334 if (!dev)
335 return NULL;
336 return nfit_test_request_region(dev, parent, start, n, name, 0);
337 }
338 EXPORT_SYMBOL(__wrap___devm_request_region);
339
340 void __wrap___release_region(struct resource *parent, resource_size_t start,
341 resource_size_t n)
342 {
343 if (!nfit_test_release_region(NULL, parent, start, n))
344 __release_region(parent, start, n);
345 }
346 EXPORT_SYMBOL(__wrap___release_region);
347
348 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
349 resource_size_t start, resource_size_t n)
350 {
351 if (!nfit_test_release_region(dev, parent, start, n))
352 __devm_release_region(dev, parent, start, n);
353 }
354 EXPORT_SYMBOL(__wrap___devm_release_region);
355
356 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
357 struct acpi_object_list *p, struct acpi_buffer *buf)
358 {
359 struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
360 union acpi_object **obj;
361
362 if (!nfit_res || strcmp(path, "_FIT") || !buf)
363 return acpi_evaluate_object(handle, path, p, buf);
364
365 obj = nfit_res->buf;
366 buf->length = sizeof(union acpi_object);
367 buf->pointer = *obj;
368 return AE_OK;
369 }
370 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
371
372 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
373 u64 rev, u64 func, union acpi_object *argv4)
374 {
375 union acpi_object *obj = ERR_PTR(-ENXIO);
376 struct iomap_ops *ops;
377
378 rcu_read_lock();
379 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
380 if (ops)
381 obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
382 rcu_read_unlock();
383
384 if (IS_ERR(obj))
385 return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
386 return obj;
387 }
388 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
389
390 MODULE_LICENSE("GPL v2");