]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f4a18312 | 2 | #include <linux/err.h> |
5ea81769 AV |
3 | #include <linux/pci.h> |
4 | #include <linux/io.h> | |
5a0e3ad6 | 5 | #include <linux/gfp.h> |
8bc3bcc9 | 6 | #include <linux/export.h> |
d5e83827 | 7 | #include <linux/of_address.h> |
5ea81769 | 8 | |
1b723413 YX |
9 | enum devm_ioremap_type { |
10 | DEVM_IOREMAP = 0, | |
e537654b | 11 | DEVM_IOREMAP_UC, |
1b723413 YX |
12 | DEVM_IOREMAP_WC, |
13 | }; | |
14 | ||
b41e5fff | 15 | void devm_ioremap_release(struct device *dev, void *res) |
5ea81769 AV |
16 | { |
17 | iounmap(*(void __iomem **)res); | |
18 | } | |
19 | ||
20 | static int devm_ioremap_match(struct device *dev, void *res, void *match_data) | |
21 | { | |
22 | return *(void **)res == match_data; | |
23 | } | |
24 | ||
1b723413 YX |
25 | static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, |
26 | resource_size_t size, | |
27 | enum devm_ioremap_type type) | |
5ea81769 | 28 | { |
1b723413 | 29 | void __iomem **ptr, *addr = NULL; |
5ea81769 AV |
30 | |
31 | ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); | |
32 | if (!ptr) | |
33 | return NULL; | |
34 | ||
1b723413 YX |
35 | switch (type) { |
36 | case DEVM_IOREMAP: | |
37 | addr = ioremap(offset, size); | |
38 | break; | |
e537654b TZ |
39 | case DEVM_IOREMAP_UC: |
40 | addr = ioremap_uc(offset, size); | |
41 | break; | |
1b723413 YX |
42 | case DEVM_IOREMAP_WC: |
43 | addr = ioremap_wc(offset, size); | |
44 | break; | |
45 | } | |
46 | ||
5ea81769 AV |
47 | if (addr) { |
48 | *ptr = addr; | |
49 | devres_add(dev, ptr); | |
50 | } else | |
51 | devres_free(ptr); | |
52 | ||
53 | return addr; | |
54 | } | |
1b723413 YX |
55 | |
56 | /** | |
57 | * devm_ioremap - Managed ioremap() | |
58 | * @dev: Generic device to remap IO address for | |
59 | * @offset: Resource address to map | |
60 | * @size: Size of map | |
61 | * | |
62 | * Managed ioremap(). Map is automatically unmapped on driver detach. | |
63 | */ | |
64 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, | |
65 | resource_size_t size) | |
66 | { | |
67 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP); | |
68 | } | |
5ea81769 AV |
69 | EXPORT_SYMBOL(devm_ioremap); |
70 | ||
e537654b TZ |
71 | /** |
72 | * devm_ioremap_uc - Managed ioremap_uc() | |
73 | * @dev: Generic device to remap IO address for | |
74 | * @offset: Resource address to map | |
75 | * @size: Size of map | |
76 | * | |
77 | * Managed ioremap_uc(). Map is automatically unmapped on driver detach. | |
78 | */ | |
79 | void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, | |
80 | resource_size_t size) | |
81 | { | |
82 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC); | |
83 | } | |
84 | EXPORT_SYMBOL_GPL(devm_ioremap_uc); | |
85 | ||
34644524 AK |
86 | /** |
87 | * devm_ioremap_wc - Managed ioremap_wc() | |
88 | * @dev: Generic device to remap IO address for | |
6524754e | 89 | * @offset: Resource address to map |
34644524 AK |
90 | * @size: Size of map |
91 | * | |
92 | * Managed ioremap_wc(). Map is automatically unmapped on driver detach. | |
93 | */ | |
94 | void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, | |
95 | resource_size_t size) | |
96 | { | |
1b723413 | 97 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC); |
34644524 AK |
98 | } |
99 | EXPORT_SYMBOL(devm_ioremap_wc); | |
100 | ||
5ea81769 AV |
101 | /** |
102 | * devm_iounmap - Managed iounmap() | |
103 | * @dev: Generic device to unmap for | |
104 | * @addr: Address to unmap | |
105 | * | |
106 | * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). | |
107 | */ | |
108 | void devm_iounmap(struct device *dev, void __iomem *addr) | |
109 | { | |
5ea81769 | 110 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, |
b104d6a5 | 111 | (__force void *)addr)); |
ae891a1b | 112 | iounmap(addr); |
5ea81769 AV |
113 | } |
114 | EXPORT_SYMBOL(devm_iounmap); | |
115 | ||
6e924822 BG |
116 | static void __iomem * |
117 | __devm_ioremap_resource(struct device *dev, const struct resource *res, | |
118 | enum devm_ioremap_type type) | |
72f8c0bf WS |
119 | { |
120 | resource_size_t size; | |
72f8c0bf | 121 | void __iomem *dest_ptr; |
35bd8c07 | 122 | char *pretty_name; |
72f8c0bf WS |
123 | |
124 | BUG_ON(!dev); | |
125 | ||
126 | if (!res || resource_type(res) != IORESOURCE_MEM) { | |
127 | dev_err(dev, "invalid resource\n"); | |
b104d6a5 | 128 | return IOMEM_ERR_PTR(-EINVAL); |
72f8c0bf WS |
129 | } |
130 | ||
131 | size = resource_size(res); | |
72f8c0bf | 132 | |
35bd8c07 VO |
133 | if (res->name) |
134 | pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", | |
135 | dev_name(dev), res->name); | |
136 | else | |
137 | pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); | |
138 | if (!pretty_name) | |
139 | return IOMEM_ERR_PTR(-ENOMEM); | |
140 | ||
141 | if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { | |
72f8c0bf | 142 | dev_err(dev, "can't request region for resource %pR\n", res); |
b104d6a5 | 143 | return IOMEM_ERR_PTR(-EBUSY); |
72f8c0bf WS |
144 | } |
145 | ||
6e924822 | 146 | dest_ptr = __devm_ioremap(dev, res->start, size, type); |
72f8c0bf WS |
147 | if (!dest_ptr) { |
148 | dev_err(dev, "ioremap failed for resource %pR\n", res); | |
149 | devm_release_mem_region(dev, res->start, size); | |
b104d6a5 | 150 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); |
72f8c0bf WS |
151 | } |
152 | ||
153 | return dest_ptr; | |
154 | } | |
6e924822 BG |
155 | |
156 | /** | |
157 | * devm_ioremap_resource() - check, request region, and ioremap resource | |
158 | * @dev: generic device to handle the resource for | |
159 | * @res: resource to be handled | |
160 | * | |
161 | * Checks that a resource is a valid memory region, requests the memory | |
162 | * region and ioremaps it. All operations are managed and will be undone | |
163 | * on driver detach. | |
164 | * | |
165 | * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code | |
166 | * on failure. Usage example: | |
167 | * | |
168 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
169 | * base = devm_ioremap_resource(&pdev->dev, res); | |
170 | * if (IS_ERR(base)) | |
171 | * return PTR_ERR(base); | |
172 | */ | |
173 | void __iomem *devm_ioremap_resource(struct device *dev, | |
174 | const struct resource *res) | |
175 | { | |
176 | return __devm_ioremap_resource(dev, res, DEVM_IOREMAP); | |
177 | } | |
75096579 TR |
178 | EXPORT_SYMBOL(devm_ioremap_resource); |
179 | ||
b873af62 BG |
180 | /** |
181 | * devm_ioremap_resource_wc() - write-combined variant of | |
182 | * devm_ioremap_resource() | |
183 | * @dev: generic device to handle the resource for | |
184 | * @res: resource to be handled | |
185 | * | |
186 | * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code | |
187 | * on failure. Usage example: | |
188 | */ | |
189 | void __iomem *devm_ioremap_resource_wc(struct device *dev, | |
190 | const struct resource *res) | |
191 | { | |
192 | return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC); | |
193 | } | |
194 | ||
d5e83827 BH |
195 | /* |
196 | * devm_of_iomap - Requests a resource and maps the memory mapped IO | |
197 | * for a given device_node managed by a given device | |
198 | * | |
199 | * Checks that a resource is a valid memory region, requests the memory | |
200 | * region and ioremaps it. All operations are managed and will be undone | |
201 | * on driver detach of the device. | |
202 | * | |
203 | * This is to be used when a device requests/maps resources described | |
204 | * by other device tree nodes (children or otherwise). | |
205 | * | |
206 | * @dev: The device "managing" the resource | |
207 | * @node: The device-tree node where the resource resides | |
208 | * @index: index of the MMIO range in the "reg" property | |
209 | * @size: Returns the size of the resource (pass NULL if not needed) | |
210 | * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded | |
211 | * error code on failure. Usage example: | |
212 | * | |
213 | * base = devm_of_iomap(&pdev->dev, node, 0, NULL); | |
214 | * if (IS_ERR(base)) | |
215 | * return PTR_ERR(base); | |
216 | */ | |
217 | void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, | |
218 | resource_size_t *size) | |
219 | { | |
220 | struct resource res; | |
221 | ||
222 | if (of_address_to_resource(node, index, &res)) | |
223 | return IOMEM_ERR_PTR(-EINVAL); | |
224 | if (size) | |
225 | *size = resource_size(&res); | |
226 | return devm_ioremap_resource(dev, &res); | |
227 | } | |
228 | EXPORT_SYMBOL(devm_of_iomap); | |
229 | ||
ce816fa8 | 230 | #ifdef CONFIG_HAS_IOPORT_MAP |
5ea81769 AV |
231 | /* |
232 | * Generic iomap devres | |
233 | */ | |
234 | static void devm_ioport_map_release(struct device *dev, void *res) | |
235 | { | |
236 | ioport_unmap(*(void __iomem **)res); | |
237 | } | |
238 | ||
239 | static int devm_ioport_map_match(struct device *dev, void *res, | |
240 | void *match_data) | |
241 | { | |
242 | return *(void **)res == match_data; | |
243 | } | |
244 | ||
245 | /** | |
246 | * devm_ioport_map - Managed ioport_map() | |
247 | * @dev: Generic device to map ioport for | |
248 | * @port: Port to map | |
249 | * @nr: Number of ports to map | |
250 | * | |
251 | * Managed ioport_map(). Map is automatically unmapped on driver | |
252 | * detach. | |
253 | */ | |
5cbb00cc | 254 | void __iomem *devm_ioport_map(struct device *dev, unsigned long port, |
5ea81769 AV |
255 | unsigned int nr) |
256 | { | |
257 | void __iomem **ptr, *addr; | |
258 | ||
259 | ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); | |
260 | if (!ptr) | |
261 | return NULL; | |
262 | ||
263 | addr = ioport_map(port, nr); | |
264 | if (addr) { | |
265 | *ptr = addr; | |
266 | devres_add(dev, ptr); | |
267 | } else | |
268 | devres_free(ptr); | |
269 | ||
270 | return addr; | |
271 | } | |
272 | EXPORT_SYMBOL(devm_ioport_map); | |
273 | ||
274 | /** | |
275 | * devm_ioport_unmap - Managed ioport_unmap() | |
276 | * @dev: Generic device to unmap for | |
277 | * @addr: Address to unmap | |
278 | * | |
279 | * Managed ioport_unmap(). @addr must have been mapped using | |
280 | * devm_ioport_map(). | |
281 | */ | |
282 | void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |
283 | { | |
284 | ioport_unmap(addr); | |
285 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, | |
b104d6a5 | 286 | devm_ioport_map_match, (__force void *)addr)); |
5ea81769 AV |
287 | } |
288 | EXPORT_SYMBOL(devm_ioport_unmap); | |
ce816fa8 | 289 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
5ea81769 AV |
290 | |
291 | #ifdef CONFIG_PCI | |
292 | /* | |
293 | * PCI iomap devres | |
294 | */ | |
c9c13ba4 | 295 | #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS |
5ea81769 AV |
296 | |
297 | struct pcim_iomap_devres { | |
298 | void __iomem *table[PCIM_IOMAP_MAX]; | |
299 | }; | |
300 | ||
301 | static void pcim_iomap_release(struct device *gendev, void *res) | |
302 | { | |
20af74ef | 303 | struct pci_dev *dev = to_pci_dev(gendev); |
5ea81769 AV |
304 | struct pcim_iomap_devres *this = res; |
305 | int i; | |
306 | ||
307 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | |
308 | if (this->table[i]) | |
309 | pci_iounmap(dev, this->table[i]); | |
310 | } | |
311 | ||
312 | /** | |
313 | * pcim_iomap_table - access iomap allocation table | |
314 | * @pdev: PCI device to access iomap table for | |
315 | * | |
316 | * Access iomap allocation table for @dev. If iomap table doesn't | |
317 | * exist and @pdev is managed, it will be allocated. All iomaps | |
318 | * recorded in the iomap table are automatically unmapped on driver | |
319 | * detach. | |
320 | * | |
321 | * This function might sleep when the table is first allocated but can | |
322 | * be safely called without context and guaranteed to succed once | |
323 | * allocated. | |
324 | */ | |
5cbb00cc | 325 | void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) |
5ea81769 AV |
326 | { |
327 | struct pcim_iomap_devres *dr, *new_dr; | |
328 | ||
329 | dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); | |
330 | if (dr) | |
331 | return dr->table; | |
332 | ||
333 | new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); | |
334 | if (!new_dr) | |
335 | return NULL; | |
336 | dr = devres_get(&pdev->dev, new_dr, NULL, NULL); | |
337 | return dr->table; | |
338 | } | |
339 | EXPORT_SYMBOL(pcim_iomap_table); | |
340 | ||
341 | /** | |
342 | * pcim_iomap - Managed pcim_iomap() | |
343 | * @pdev: PCI device to iomap for | |
344 | * @bar: BAR to iomap | |
345 | * @maxlen: Maximum length of iomap | |
346 | * | |
347 | * Managed pci_iomap(). Map is automatically unmapped on driver | |
348 | * detach. | |
349 | */ | |
5cbb00cc | 350 | void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) |
5ea81769 AV |
351 | { |
352 | void __iomem **tbl; | |
353 | ||
354 | BUG_ON(bar >= PCIM_IOMAP_MAX); | |
355 | ||
356 | tbl = (void __iomem **)pcim_iomap_table(pdev); | |
357 | if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ | |
358 | return NULL; | |
359 | ||
360 | tbl[bar] = pci_iomap(pdev, bar, maxlen); | |
361 | return tbl[bar]; | |
362 | } | |
363 | EXPORT_SYMBOL(pcim_iomap); | |
364 | ||
365 | /** | |
366 | * pcim_iounmap - Managed pci_iounmap() | |
367 | * @pdev: PCI device to iounmap for | |
368 | * @addr: Address to unmap | |
369 | * | |
370 | * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). | |
371 | */ | |
372 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) | |
373 | { | |
374 | void __iomem **tbl; | |
375 | int i; | |
376 | ||
377 | pci_iounmap(pdev, addr); | |
378 | ||
379 | tbl = (void __iomem **)pcim_iomap_table(pdev); | |
380 | BUG_ON(!tbl); | |
381 | ||
382 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | |
383 | if (tbl[i] == addr) { | |
384 | tbl[i] = NULL; | |
385 | return; | |
386 | } | |
387 | WARN_ON(1); | |
388 | } | |
389 | EXPORT_SYMBOL(pcim_iounmap); | |
390 | ||
391 | /** | |
392 | * pcim_iomap_regions - Request and iomap PCI BARs | |
393 | * @pdev: PCI device to map IO resources for | |
394 | * @mask: Mask of BARs to request and iomap | |
395 | * @name: Name used when requesting regions | |
396 | * | |
397 | * Request and iomap regions specified by @mask. | |
398 | */ | |
fb7ebfe4 | 399 | int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) |
5ea81769 AV |
400 | { |
401 | void __iomem * const *iomap; | |
402 | int i, rc; | |
403 | ||
404 | iomap = pcim_iomap_table(pdev); | |
405 | if (!iomap) | |
406 | return -ENOMEM; | |
407 | ||
408 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
409 | unsigned long len; | |
410 | ||
411 | if (!(mask & (1 << i))) | |
412 | continue; | |
413 | ||
414 | rc = -EINVAL; | |
415 | len = pci_resource_len(pdev, i); | |
416 | if (!len) | |
417 | goto err_inval; | |
418 | ||
419 | rc = pci_request_region(pdev, i, name); | |
420 | if (rc) | |
fb4d64e7 | 421 | goto err_inval; |
5ea81769 AV |
422 | |
423 | rc = -ENOMEM; | |
424 | if (!pcim_iomap(pdev, i, 0)) | |
fb4d64e7 | 425 | goto err_region; |
5ea81769 AV |
426 | } |
427 | ||
428 | return 0; | |
429 | ||
5ea81769 AV |
430 | err_region: |
431 | pci_release_region(pdev, i); | |
432 | err_inval: | |
433 | while (--i >= 0) { | |
fb4d64e7 FD |
434 | if (!(mask & (1 << i))) |
435 | continue; | |
5ea81769 AV |
436 | pcim_iounmap(pdev, iomap[i]); |
437 | pci_release_region(pdev, i); | |
438 | } | |
439 | ||
440 | return rc; | |
441 | } | |
442 | EXPORT_SYMBOL(pcim_iomap_regions); | |
ec04b075 | 443 | |
916fbfb7 TH |
444 | /** |
445 | * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones | |
446 | * @pdev: PCI device to map IO resources for | |
447 | * @mask: Mask of BARs to iomap | |
448 | * @name: Name used when requesting regions | |
449 | * | |
450 | * Request all PCI BARs and iomap regions specified by @mask. | |
451 | */ | |
fb7ebfe4 | 452 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, |
916fbfb7 TH |
453 | const char *name) |
454 | { | |
455 | int request_mask = ((1 << 6) - 1) & ~mask; | |
456 | int rc; | |
457 | ||
458 | rc = pci_request_selected_regions(pdev, request_mask, name); | |
459 | if (rc) | |
460 | return rc; | |
461 | ||
462 | rc = pcim_iomap_regions(pdev, mask, name); | |
463 | if (rc) | |
464 | pci_release_selected_regions(pdev, request_mask); | |
465 | return rc; | |
466 | } | |
467 | EXPORT_SYMBOL(pcim_iomap_regions_request_all); | |
468 | ||
ec04b075 TH |
469 | /** |
470 | * pcim_iounmap_regions - Unmap and release PCI BARs | |
471 | * @pdev: PCI device to map IO resources for | |
472 | * @mask: Mask of BARs to unmap and release | |
473 | * | |
4d45ada3 | 474 | * Unmap and release regions specified by @mask. |
ec04b075 | 475 | */ |
fb7ebfe4 | 476 | void pcim_iounmap_regions(struct pci_dev *pdev, int mask) |
ec04b075 TH |
477 | { |
478 | void __iomem * const *iomap; | |
479 | int i; | |
480 | ||
481 | iomap = pcim_iomap_table(pdev); | |
482 | if (!iomap) | |
483 | return; | |
484 | ||
1f35d04a | 485 | for (i = 0; i < PCIM_IOMAP_MAX; i++) { |
ec04b075 TH |
486 | if (!(mask & (1 << i))) |
487 | continue; | |
488 | ||
489 | pcim_iounmap(pdev, iomap[i]); | |
490 | pci_release_region(pdev, i); | |
491 | } | |
492 | } | |
493 | EXPORT_SYMBOL(pcim_iounmap_regions); | |
571806a9 | 494 | #endif /* CONFIG_PCI */ |