4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/vmalloc.h>
27 #include <asm/cacheflush.h>
28 #include <asm/sizes.h>
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
34 #include <asm/mmu_context.h>
47 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
48 * PMB_NO_ENTRY to search for a free one
52 /* Adjacent entry link for contiguous multi-entry mappings */
53 struct pmb_entry
*link
;
60 { .size
= SZ_512M
, .flag
= PMB_SZ_512M
, },
61 { .size
= SZ_128M
, .flag
= PMB_SZ_128M
, },
62 { .size
= SZ_64M
, .flag
= PMB_SZ_64M
, },
63 { .size
= SZ_16M
, .flag
= PMB_SZ_16M
, },
66 static void pmb_unmap_entry(struct pmb_entry
*, int depth
);
68 static DEFINE_RWLOCK(pmb_rwlock
);
69 static struct pmb_entry pmb_entry_list
[NR_PMB_ENTRIES
];
70 static DECLARE_BITMAP(pmb_map
, NR_PMB_ENTRIES
);
72 static unsigned int pmb_iomapping_enabled
;
74 static __always_inline
unsigned long mk_pmb_entry(unsigned int entry
)
76 return (entry
& PMB_E_MASK
) << PMB_E_SHIFT
;
79 static __always_inline
unsigned long mk_pmb_addr(unsigned int entry
)
81 return mk_pmb_entry(entry
) | PMB_ADDR
;
84 static __always_inline
unsigned long mk_pmb_data(unsigned int entry
)
86 return mk_pmb_entry(entry
) | PMB_DATA
;
89 static __always_inline
unsigned int pmb_ppn_in_range(unsigned long ppn
)
91 return ppn
>= __pa(memory_start
) && ppn
< __pa(memory_end
);
95 * Ensure that the PMB entries match our cache configuration.
97 * When we are in 32-bit address extended mode, CCR.CB becomes
98 * invalid, so care must be taken to manually adjust cacheable
101 static __always_inline
unsigned long pmb_cache_flags(void)
103 unsigned long flags
= 0;
105 #if defined(CONFIG_CACHE_OFF)
106 flags
|= PMB_WT
| PMB_UB
;
107 #elif defined(CONFIG_CACHE_WRITETHROUGH)
108 flags
|= PMB_C
| PMB_WT
| PMB_UB
;
109 #elif defined(CONFIG_CACHE_WRITEBACK)
117 * Convert typical pgprot value to the PMB equivalent
119 static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot
)
121 unsigned long pmb_flags
= 0;
122 u64 flags
= pgprot_val(prot
);
124 if (flags
& _PAGE_CACHABLE
)
126 if (flags
& _PAGE_WT
)
127 pmb_flags
|= PMB_WT
| PMB_UB
;
132 static inline bool pmb_can_merge(struct pmb_entry
*a
, struct pmb_entry
*b
)
134 return (b
->vpn
== (a
->vpn
+ a
->size
)) &&
135 (b
->ppn
== (a
->ppn
+ a
->size
)) &&
136 (b
->flags
== a
->flags
);
139 static bool pmb_mapping_exists(unsigned long vaddr
, phys_addr_t phys
,
144 read_lock(&pmb_rwlock
);
146 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
147 struct pmb_entry
*pmbe
, *iter
;
150 if (!test_bit(i
, pmb_map
))
153 pmbe
= &pmb_entry_list
[i
];
156 * See if VPN and PPN are bounded by an existing mapping.
158 if ((vaddr
< pmbe
->vpn
) || (vaddr
>= (pmbe
->vpn
+ pmbe
->size
)))
160 if ((phys
< pmbe
->ppn
) || (phys
>= (pmbe
->ppn
+ pmbe
->size
)))
164 * Now see if we're in range of a simple mapping.
166 if (size
<= pmbe
->size
) {
167 read_unlock(&pmb_rwlock
);
174 * Finally for sizes that involve compound mappings, walk
177 for (iter
= pmbe
->link
; iter
; iter
= iter
->link
)
181 * Nothing else to do if the range requirements are met.
184 read_unlock(&pmb_rwlock
);
189 read_unlock(&pmb_rwlock
);
193 static bool pmb_size_valid(unsigned long size
)
197 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
198 if (pmb_sizes
[i
].size
== size
)
204 static inline bool pmb_addr_valid(unsigned long addr
, unsigned long size
)
206 return (addr
>= P1SEG
&& (addr
+ size
- 1) < P3SEG
);
209 static inline bool pmb_prot_valid(pgprot_t prot
)
211 return (pgprot_val(prot
) & _PAGE_USER
) == 0;
214 static int pmb_size_to_flags(unsigned long size
)
218 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
219 if (pmb_sizes
[i
].size
== size
)
220 return pmb_sizes
[i
].flag
;
225 static int pmb_alloc_entry(void)
229 pos
= find_first_zero_bit(pmb_map
, NR_PMB_ENTRIES
);
230 if (pos
>= 0 && pos
< NR_PMB_ENTRIES
)
231 __set_bit(pos
, pmb_map
);
238 static struct pmb_entry
*pmb_alloc(unsigned long vpn
, unsigned long ppn
,
239 unsigned long flags
, int entry
)
241 struct pmb_entry
*pmbe
;
242 unsigned long irqflags
;
246 write_lock_irqsave(&pmb_rwlock
, irqflags
);
248 if (entry
== PMB_NO_ENTRY
) {
249 pos
= pmb_alloc_entry();
250 if (unlikely(pos
< 0)) {
255 if (__test_and_set_bit(entry
, pmb_map
)) {
256 ret
= ERR_PTR(-ENOSPC
);
263 write_unlock_irqrestore(&pmb_rwlock
, irqflags
);
265 pmbe
= &pmb_entry_list
[pos
];
267 memset(pmbe
, 0, sizeof(struct pmb_entry
));
269 spin_lock_init(&pmbe
->lock
);
279 write_unlock_irqrestore(&pmb_rwlock
, irqflags
);
283 static void pmb_free(struct pmb_entry
*pmbe
)
285 __clear_bit(pmbe
->entry
, pmb_map
);
287 pmbe
->entry
= PMB_NO_ENTRY
;
292 * Must be run uncached.
294 static void __set_pmb_entry(struct pmb_entry
*pmbe
)
296 unsigned long addr
, data
;
298 addr
= mk_pmb_addr(pmbe
->entry
);
299 data
= mk_pmb_data(pmbe
->entry
);
304 __raw_writel(pmbe
->vpn
| PMB_V
, addr
);
305 __raw_writel(pmbe
->ppn
| pmbe
->flags
| PMB_V
, data
);
310 static void __clear_pmb_entry(struct pmb_entry
*pmbe
)
312 unsigned long addr
, data
;
313 unsigned long addr_val
, data_val
;
315 addr
= mk_pmb_addr(pmbe
->entry
);
316 data
= mk_pmb_data(pmbe
->entry
);
318 addr_val
= __raw_readl(addr
);
319 data_val
= __raw_readl(data
);
322 writel_uncached(addr_val
& ~PMB_V
, addr
);
323 writel_uncached(data_val
& ~PMB_V
, data
);
327 static void set_pmb_entry(struct pmb_entry
*pmbe
)
331 spin_lock_irqsave(&pmbe
->lock
, flags
);
332 __set_pmb_entry(pmbe
);
333 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
335 #endif /* CONFIG_PM */
337 int pmb_bolt_mapping(unsigned long vaddr
, phys_addr_t phys
,
338 unsigned long size
, pgprot_t prot
)
340 struct pmb_entry
*pmbp
, *pmbe
;
341 unsigned long orig_addr
, orig_size
;
342 unsigned long flags
, pmb_flags
;
345 if (!pmb_addr_valid(vaddr
, size
))
347 if (pmb_mapping_exists(vaddr
, phys
, size
))
353 flush_tlb_kernel_range(vaddr
, vaddr
+ size
);
355 pmb_flags
= pgprot_to_pmb_flags(prot
);
359 for (i
= mapped
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++) {
360 if (size
< pmb_sizes
[i
].size
)
363 pmbe
= pmb_alloc(vaddr
, phys
, pmb_flags
|
364 pmb_sizes
[i
].flag
, PMB_NO_ENTRY
);
366 pmb_unmap_entry(pmbp
, mapped
);
367 return PTR_ERR(pmbe
);
370 spin_lock_irqsave(&pmbe
->lock
, flags
);
372 pmbe
->size
= pmb_sizes
[i
].size
;
374 __set_pmb_entry(pmbe
);
381 * Link adjacent entries that span multiple PMB
382 * entries for easier tear-down.
385 spin_lock(&pmbp
->lock
);
387 spin_unlock(&pmbp
->lock
);
393 * Instead of trying smaller sizes on every
394 * iteration (even if we succeed in allocating
395 * space), try using pmb_sizes[i].size again.
400 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
402 } while (size
>= SZ_16M
);
404 flush_cache_vmap(orig_addr
, orig_addr
+ orig_size
);
409 void __iomem
*pmb_remap_caller(phys_addr_t phys
, unsigned long size
,
410 pgprot_t prot
, void *caller
)
413 phys_addr_t offset
, last_addr
;
414 phys_addr_t align_mask
;
415 unsigned long aligned
;
416 struct vm_struct
*area
;
419 if (!pmb_iomapping_enabled
)
423 * Small mappings need to go through the TLB.
426 return ERR_PTR(-EINVAL
);
427 if (!pmb_prot_valid(prot
))
428 return ERR_PTR(-EINVAL
);
430 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
431 if (size
>= pmb_sizes
[i
].size
)
434 last_addr
= phys
+ size
;
435 align_mask
= ~(pmb_sizes
[i
].size
- 1);
436 offset
= phys
& ~align_mask
;
438 aligned
= ALIGN(last_addr
, pmb_sizes
[i
].size
) - phys
;
441 * XXX: This should really start from uncached_end, but this
442 * causes the MMU to reset, so for now we restrict it to the
443 * 0xb000...0xc000 range.
445 area
= __get_vm_area_caller(aligned
, VM_IOREMAP
, 0xb0000000,
450 area
->phys_addr
= phys
;
451 vaddr
= (unsigned long)area
->addr
;
453 ret
= pmb_bolt_mapping(vaddr
, phys
, size
, prot
);
454 if (unlikely(ret
!= 0))
457 return (void __iomem
*)(offset
+ (char *)vaddr
);
460 int pmb_unmap(void __iomem
*addr
)
462 struct pmb_entry
*pmbe
= NULL
;
463 unsigned long vaddr
= (unsigned long __force
)addr
;
466 read_lock(&pmb_rwlock
);
468 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
469 if (test_bit(i
, pmb_map
)) {
470 pmbe
= &pmb_entry_list
[i
];
471 if (pmbe
->vpn
== vaddr
) {
478 read_unlock(&pmb_rwlock
);
481 pmb_unmap_entry(pmbe
, NR_PMB_ENTRIES
);
488 static void __pmb_unmap_entry(struct pmb_entry
*pmbe
, int depth
)
491 struct pmb_entry
*pmblink
= pmbe
;
494 * We may be called before this pmb_entry has been
495 * entered into the PMB table via set_pmb_entry(), but
496 * that's OK because we've allocated a unique slot for
497 * this entry in pmb_alloc() (even if we haven't filled
500 * Therefore, calling __clear_pmb_entry() is safe as no
501 * other mapping can be using that slot.
503 __clear_pmb_entry(pmbe
);
505 flush_cache_vunmap(pmbe
->vpn
, pmbe
->vpn
+ pmbe
->size
);
507 pmbe
= pmblink
->link
;
510 } while (pmbe
&& --depth
);
513 static void pmb_unmap_entry(struct pmb_entry
*pmbe
, int depth
)
520 write_lock_irqsave(&pmb_rwlock
, flags
);
521 __pmb_unmap_entry(pmbe
, depth
);
522 write_unlock_irqrestore(&pmb_rwlock
, flags
);
525 static void __init
pmb_notify(void)
529 pr_info("PMB: boot mappings:\n");
531 read_lock(&pmb_rwlock
);
533 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
534 struct pmb_entry
*pmbe
;
536 if (!test_bit(i
, pmb_map
))
539 pmbe
= &pmb_entry_list
[i
];
541 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
542 pmbe
->vpn
>> PAGE_SHIFT
, pmbe
->ppn
>> PAGE_SHIFT
,
543 pmbe
->size
>> 20, (pmbe
->flags
& PMB_C
) ? "" : "un");
546 read_unlock(&pmb_rwlock
);
550 * Sync our software copy of the PMB mappings with those in hardware. The
551 * mappings in the hardware PMB were either set up by the bootloader or
552 * very early on by the kernel.
554 static void __init
pmb_synchronize(void)
556 struct pmb_entry
*pmbp
= NULL
;
560 * Run through the initial boot mappings, log the established
561 * ones, and blow away anything that falls outside of the valid
562 * PPN range. Specifically, we only care about existing mappings
563 * that impact the cached/uncached sections.
565 * Note that touching these can be a bit of a minefield; the boot
566 * loader can establish multi-page mappings with the same caching
567 * attributes, so we need to ensure that we aren't modifying a
568 * mapping that we're presently executing from, or may execute
569 * from in the case of straddling page boundaries.
571 * In the future we will have to tidy up after the boot loader by
572 * jumping between the cached and uncached mappings and tearing
573 * down alternating mappings while executing from the other.
575 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
576 unsigned long addr
, data
;
577 unsigned long addr_val
, data_val
;
578 unsigned long ppn
, vpn
, flags
;
579 unsigned long irqflags
;
581 struct pmb_entry
*pmbe
;
583 addr
= mk_pmb_addr(i
);
584 data
= mk_pmb_data(i
);
586 addr_val
= __raw_readl(addr
);
587 data_val
= __raw_readl(data
);
590 * Skip over any bogus entries
592 if (!(data_val
& PMB_V
) || !(addr_val
& PMB_V
))
595 ppn
= data_val
& PMB_PFN_MASK
;
596 vpn
= addr_val
& PMB_PFN_MASK
;
599 * Only preserve in-range mappings.
601 if (!pmb_ppn_in_range(ppn
)) {
603 * Invalidate anything out of bounds.
605 writel_uncached(addr_val
& ~PMB_V
, addr
);
606 writel_uncached(data_val
& ~PMB_V
, data
);
611 * Update the caching attributes if necessary
613 if (data_val
& PMB_C
) {
614 data_val
&= ~PMB_CACHE_MASK
;
615 data_val
|= pmb_cache_flags();
617 writel_uncached(data_val
, data
);
620 size
= data_val
& PMB_SZ_MASK
;
621 flags
= size
| (data_val
& PMB_CACHE_MASK
);
623 pmbe
= pmb_alloc(vpn
, ppn
, flags
, i
);
629 spin_lock_irqsave(&pmbe
->lock
, irqflags
);
631 for (j
= 0; j
< ARRAY_SIZE(pmb_sizes
); j
++)
632 if (pmb_sizes
[j
].flag
== size
)
633 pmbe
->size
= pmb_sizes
[j
].size
;
636 spin_lock(&pmbp
->lock
);
639 * Compare the previous entry against the current one to
640 * see if the entries span a contiguous mapping. If so,
641 * setup the entry links accordingly. Compound mappings
642 * are later coalesced.
644 if (pmb_can_merge(pmbp
, pmbe
))
647 spin_unlock(&pmbp
->lock
);
652 spin_unlock_irqrestore(&pmbe
->lock
, irqflags
);
656 static void __init
pmb_merge(struct pmb_entry
*head
)
658 unsigned long span
, newsize
;
659 struct pmb_entry
*tail
;
660 int i
= 1, depth
= 0;
662 span
= newsize
= head
->size
;
668 if (pmb_size_valid(span
)) {
673 /* This is the end of the line.. */
682 * The merged page size must be valid.
684 if (!pmb_size_valid(newsize
))
687 head
->flags
&= ~PMB_SZ_MASK
;
688 head
->flags
|= pmb_size_to_flags(newsize
);
690 head
->size
= newsize
;
692 __pmb_unmap_entry(head
->link
, depth
);
693 __set_pmb_entry(head
);
696 static void __init
pmb_coalesce(void)
701 write_lock_irqsave(&pmb_rwlock
, flags
);
703 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
704 struct pmb_entry
*pmbe
;
706 if (!test_bit(i
, pmb_map
))
709 pmbe
= &pmb_entry_list
[i
];
712 * We're only interested in compound mappings
718 * Nothing to do if it already uses the largest possible
721 if (pmbe
->size
== SZ_512M
)
727 write_unlock_irqrestore(&pmb_rwlock
, flags
);
730 #ifdef CONFIG_UNCACHED_MAPPING
731 static void __init
pmb_resize(void)
736 * If the uncached mapping was constructed by the kernel, it will
737 * already be a reasonable size.
739 if (uncached_size
== SZ_16M
)
742 read_lock(&pmb_rwlock
);
744 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
745 struct pmb_entry
*pmbe
;
748 if (!test_bit(i
, pmb_map
))
751 pmbe
= &pmb_entry_list
[i
];
753 if (pmbe
->vpn
!= uncached_start
)
757 * Found it, now resize it.
759 spin_lock_irqsave(&pmbe
->lock
, flags
);
762 pmbe
->flags
&= ~PMB_SZ_MASK
;
763 pmbe
->flags
|= pmb_size_to_flags(pmbe
->size
);
765 uncached_resize(pmbe
->size
);
767 __set_pmb_entry(pmbe
);
769 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
772 read_lock(&pmb_rwlock
);
776 static int __init
early_pmb(char *p
)
781 if (strstr(p
, "iomap"))
782 pmb_iomapping_enabled
= 1;
786 early_param("pmb", early_pmb
);
788 void __init
pmb_init(void)
790 /* Synchronize software state */
793 /* Attempt to combine compound mappings */
796 #ifdef CONFIG_UNCACHED_MAPPING
797 /* Resize initial mappings, if necessary */
804 writel_uncached(0, PMB_IRMCR
);
806 /* Flush out the TLB */
807 local_flush_tlb_all();
811 bool __in_29bit_mode(void)
813 return (__raw_readl(PMB_PASCR
) & PASCR_SE
) == 0;
816 static int pmb_seq_show(struct seq_file
*file
, void *iter
)
820 seq_printf(file
, "V: Valid, C: Cacheable, WT: Write-Through\n"
821 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
822 seq_printf(file
, "ety vpn ppn size flags\n");
824 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
825 unsigned long addr
, data
;
829 addr
= __raw_readl(mk_pmb_addr(i
));
830 data
= __raw_readl(mk_pmb_data(i
));
832 size
= data
& PMB_SZ_MASK
;
833 sz_str
= (size
== PMB_SZ_16M
) ? " 16MB":
834 (size
== PMB_SZ_64M
) ? " 64MB":
835 (size
== PMB_SZ_128M
) ? "128MB":
838 /* 02: V 0x88 0x08 128MB C CB B */
839 seq_printf(file
, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
840 i
, ((addr
& PMB_V
) && (data
& PMB_V
)) ? 'V' : ' ',
841 (addr
>> 24) & 0xff, (data
>> 24) & 0xff,
842 sz_str
, (data
& PMB_C
) ? 'C' : ' ',
843 (data
& PMB_WT
) ? "WT" : "CB",
844 (data
& PMB_UB
) ? "UB" : " B");
850 static int pmb_debugfs_open(struct inode
*inode
, struct file
*file
)
852 return single_open(file
, pmb_seq_show
, NULL
);
855 static const struct file_operations pmb_debugfs_fops
= {
856 .owner
= THIS_MODULE
,
857 .open
= pmb_debugfs_open
,
860 .release
= single_release
,
863 static int __init
pmb_debugfs_init(void)
865 struct dentry
*dentry
;
867 dentry
= debugfs_create_file("pmb", S_IFREG
| S_IRUGO
,
868 sh_debugfs_root
, NULL
, &pmb_debugfs_fops
);
872 return PTR_ERR(dentry
);
876 subsys_initcall(pmb_debugfs_init
);
879 static int pmb_sysdev_suspend(struct sys_device
*dev
, pm_message_t state
)
881 static pm_message_t prev_state
;
884 /* Restore the PMB after a resume from hibernation */
885 if (state
.event
== PM_EVENT_ON
&&
886 prev_state
.event
== PM_EVENT_FREEZE
) {
887 struct pmb_entry
*pmbe
;
889 read_lock(&pmb_rwlock
);
891 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
892 if (test_bit(i
, pmb_map
)) {
893 pmbe
= &pmb_entry_list
[i
];
898 read_unlock(&pmb_rwlock
);
906 static int pmb_sysdev_resume(struct sys_device
*dev
)
908 return pmb_sysdev_suspend(dev
, PMSG_ON
);
911 static struct sysdev_driver pmb_sysdev_driver
= {
912 .suspend
= pmb_sysdev_suspend
,
913 .resume
= pmb_sysdev_resume
,
916 static int __init
pmb_sysdev_init(void)
918 return sysdev_driver_register(&cpu_sysdev_class
, &pmb_sysdev_driver
);
920 subsys_initcall(pmb_sysdev_init
);