2 * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
3 * accessing in atomic context.
5 * This is used for NMI handler to access IO memory area, because
6 * ioremap/iounmap can not be used in NMI handler. The IO memory area
7 * is pre-mapped in process context and accessed in NMI handler.
9 * Copyright (C) 2009-2010, Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version
14 * 2 as published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/acpi.h>
31 #include <linux/kref.h>
32 #include <linux/rculist.h>
33 #include <linux/interrupt.h>
34 #include <linux/slab.h>
35 #include <acpi/atomicio.h>
37 #define ACPI_PFX "ACPI: "
39 static LIST_HEAD(acpi_iomaps
);
41 * Used for mutual exclusion between writers of acpi_iomaps list, for
42 * synchronization between readers and writer, RCU is used.
44 static DEFINE_SPINLOCK(acpi_iomaps_lock
);
47 struct list_head list
;
54 /* acpi_iomaps_lock or RCU read lock must be held before calling */
55 static struct acpi_iomap
*__acpi_find_iomap(phys_addr_t paddr
,
58 struct acpi_iomap
*map
;
60 list_for_each_entry_rcu(map
, &acpi_iomaps
, list
) {
61 if (map
->paddr
+ map
->size
>= paddr
+ size
&&
69 * Atomic "ioremap" used by NMI handler, if the specified IO memory
70 * area is not pre-mapped, NULL will be returned.
72 * acpi_iomaps_lock or RCU read lock must be held before calling
74 static void __iomem
*__acpi_ioremap_fast(phys_addr_t paddr
,
77 struct acpi_iomap
*map
;
79 map
= __acpi_find_iomap(paddr
, size
);
81 return map
->vaddr
+ (paddr
- map
->paddr
);
86 /* acpi_iomaps_lock must be held before calling */
87 static void __iomem
*__acpi_try_ioremap(phys_addr_t paddr
,
90 struct acpi_iomap
*map
;
92 map
= __acpi_find_iomap(paddr
, size
);
95 return map
->vaddr
+ (paddr
- map
->paddr
);
101 * Used to pre-map the specified IO memory area. First try to find
102 * whether the area is already pre-mapped, if it is, increase the
103 * reference count (in __acpi_try_ioremap) and return; otherwise, do
104 * the real ioremap, and add the mapping into acpi_iomaps list.
106 static void __iomem
*acpi_pre_map(phys_addr_t paddr
,
110 struct acpi_iomap
*map
;
111 unsigned long pg_sz
, flags
;
114 spin_lock_irqsave(&acpi_iomaps_lock
, flags
);
115 vaddr
= __acpi_try_ioremap(paddr
, size
);
116 spin_unlock_irqrestore(&acpi_iomaps_lock
, flags
);
120 pg_off
= paddr
& PAGE_MASK
;
121 pg_sz
= ((paddr
+ size
+ PAGE_SIZE
- 1) & PAGE_MASK
) - pg_off
;
122 vaddr
= ioremap(pg_off
, pg_sz
);
125 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
128 INIT_LIST_HEAD(&map
->list
);
132 kref_init(&map
->ref
);
134 spin_lock_irqsave(&acpi_iomaps_lock
, flags
);
135 vaddr
= __acpi_try_ioremap(paddr
, size
);
137 spin_unlock_irqrestore(&acpi_iomaps_lock
, flags
);
142 list_add_tail_rcu(&map
->list
, &acpi_iomaps
);
143 spin_unlock_irqrestore(&acpi_iomaps_lock
, flags
);
145 return map
->vaddr
+ (paddr
- map
->paddr
);
151 /* acpi_iomaps_lock must be held before calling */
152 static void __acpi_kref_del_iomap(struct kref
*ref
)
154 struct acpi_iomap
*map
;
156 map
= container_of(ref
, struct acpi_iomap
, ref
);
157 list_del_rcu(&map
->list
);
161 * Used to post-unmap the specified IO memory area. The iounmap is
162 * done only if the reference count goes zero.
164 static void acpi_post_unmap(phys_addr_t paddr
, unsigned long size
)
166 struct acpi_iomap
*map
;
170 spin_lock_irqsave(&acpi_iomaps_lock
, flags
);
171 map
= __acpi_find_iomap(paddr
, size
);
173 del
= kref_put(&map
->ref
, __acpi_kref_del_iomap
);
174 spin_unlock_irqrestore(&acpi_iomaps_lock
, flags
);
184 /* In NMI handler, should set silent = 1 */
185 static int acpi_check_gar(struct acpi_generic_address
*reg
,
186 u64
*paddr
, int silent
)
190 width
= reg
->bit_width
;
191 space_id
= reg
->space_id
;
192 /* Handle possible alignment issues */
193 memcpy(paddr
, ®
->address
, sizeof(*paddr
));
196 pr_warning(FW_BUG ACPI_PFX
197 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
198 *paddr
, width
, space_id
);
202 if ((width
!= 8) && (width
!= 16) && (width
!= 32) && (width
!= 64)) {
204 pr_warning(FW_BUG ACPI_PFX
205 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
206 *paddr
, width
, space_id
);
210 if (space_id
!= ACPI_ADR_SPACE_SYSTEM_MEMORY
&&
211 space_id
!= ACPI_ADR_SPACE_SYSTEM_IO
) {
213 pr_warning(FW_BUG ACPI_PFX
214 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
215 *paddr
, width
, space_id
);
222 /* Pre-map, working on GAR */
223 int acpi_pre_map_gar(struct acpi_generic_address
*reg
)
229 if (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_MEMORY
)
232 rc
= acpi_check_gar(reg
, &paddr
, 0);
236 vaddr
= acpi_pre_map(paddr
, reg
->bit_width
/ 8);
242 EXPORT_SYMBOL_GPL(acpi_pre_map_gar
);
244 /* Post-unmap, working on GAR */
245 int acpi_post_unmap_gar(struct acpi_generic_address
*reg
)
250 if (reg
->space_id
!= ACPI_ADR_SPACE_SYSTEM_MEMORY
)
253 rc
= acpi_check_gar(reg
, &paddr
, 0);
257 acpi_post_unmap(paddr
, reg
->bit_width
/ 8);
261 EXPORT_SYMBOL_GPL(acpi_post_unmap_gar
);
264 * Can be used in atomic (including NMI) or process context. RCU read
265 * lock can only be released after the IO memory area accessing.
267 static int acpi_atomic_read_mem(u64 paddr
, u64
*val
, u32 width
)
272 addr
= __acpi_ioremap_fast(paddr
, width
);
294 static int acpi_atomic_write_mem(u64 paddr
, u64 val
, u32 width
)
299 addr
= __acpi_ioremap_fast(paddr
, width
);
321 /* GAR accessing in atomic (including NMI) or process context */
322 int acpi_atomic_read(u64
*val
, struct acpi_generic_address
*reg
)
327 rc
= acpi_check_gar(reg
, &paddr
, 1);
332 switch (reg
->space_id
) {
333 case ACPI_ADR_SPACE_SYSTEM_MEMORY
:
334 return acpi_atomic_read_mem(paddr
, val
, reg
->bit_width
);
335 case ACPI_ADR_SPACE_SYSTEM_IO
:
336 return acpi_os_read_port(paddr
, (u32
*)val
, reg
->bit_width
);
341 EXPORT_SYMBOL_GPL(acpi_atomic_read
);
343 int acpi_atomic_write(u64 val
, struct acpi_generic_address
*reg
)
348 rc
= acpi_check_gar(reg
, &paddr
, 1);
352 switch (reg
->space_id
) {
353 case ACPI_ADR_SPACE_SYSTEM_MEMORY
:
354 return acpi_atomic_write_mem(paddr
, val
, reg
->bit_width
);
355 case ACPI_ADR_SPACE_SYSTEM_IO
:
356 return acpi_os_write_port(paddr
, val
, reg
->bit_width
);
361 EXPORT_SYMBOL_GPL(acpi_atomic_write
);