4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/vmalloc.h>
27 #include <asm/cacheflush.h>
28 #include <asm/sizes.h>
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
34 #include <asm/mmu_context.h>
47 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
48 * PMB_NO_ENTRY to search for a free one
52 /* Adjacent entry link for contiguous multi-entry mappings */
53 struct pmb_entry
*link
;
60 { .size
= SZ_512M
, .flag
= PMB_SZ_512M
, },
61 { .size
= SZ_128M
, .flag
= PMB_SZ_128M
, },
62 { .size
= SZ_64M
, .flag
= PMB_SZ_64M
, },
63 { .size
= SZ_16M
, .flag
= PMB_SZ_16M
, },
66 static void pmb_unmap_entry(struct pmb_entry
*, int depth
);
68 static DEFINE_RWLOCK(pmb_rwlock
);
69 static struct pmb_entry pmb_entry_list
[NR_PMB_ENTRIES
];
70 static DECLARE_BITMAP(pmb_map
, NR_PMB_ENTRIES
);
72 static unsigned int pmb_iomapping_enabled
;
74 static __always_inline
unsigned long mk_pmb_entry(unsigned int entry
)
76 return (entry
& PMB_E_MASK
) << PMB_E_SHIFT
;
79 static __always_inline
unsigned long mk_pmb_addr(unsigned int entry
)
81 return mk_pmb_entry(entry
) | PMB_ADDR
;
84 static __always_inline
unsigned long mk_pmb_data(unsigned int entry
)
86 return mk_pmb_entry(entry
) | PMB_DATA
;
89 static __always_inline
unsigned int pmb_ppn_in_range(unsigned long ppn
)
91 return ppn
>= __pa(memory_start
) && ppn
< __pa(memory_end
);
95 * Ensure that the PMB entries match our cache configuration.
97 * When we are in 32-bit address extended mode, CCR.CB becomes
98 * invalid, so care must be taken to manually adjust cacheable
101 static __always_inline
unsigned long pmb_cache_flags(void)
103 unsigned long flags
= 0;
105 #if defined(CONFIG_CACHE_OFF)
106 flags
|= PMB_WT
| PMB_UB
;
107 #elif defined(CONFIG_CACHE_WRITETHROUGH)
108 flags
|= PMB_C
| PMB_WT
| PMB_UB
;
109 #elif defined(CONFIG_CACHE_WRITEBACK)
117 * Convert typical pgprot value to the PMB equivalent
119 static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot
)
121 unsigned long pmb_flags
= 0;
122 u64 flags
= pgprot_val(prot
);
124 if (flags
& _PAGE_CACHABLE
)
126 if (flags
& _PAGE_WT
)
127 pmb_flags
|= PMB_WT
| PMB_UB
;
132 static inline bool pmb_can_merge(struct pmb_entry
*a
, struct pmb_entry
*b
)
134 return (b
->vpn
== (a
->vpn
+ a
->size
)) &&
135 (b
->ppn
== (a
->ppn
+ a
->size
)) &&
136 (b
->flags
== a
->flags
);
139 static bool pmb_mapping_exists(unsigned long vaddr
, phys_addr_t phys
,
144 read_lock(&pmb_rwlock
);
146 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
147 struct pmb_entry
*pmbe
, *iter
;
150 if (!test_bit(i
, pmb_map
))
153 pmbe
= &pmb_entry_list
[i
];
156 * See if VPN and PPN are bounded by an existing mapping.
158 if ((vaddr
< pmbe
->vpn
) || (vaddr
>= (pmbe
->vpn
+ pmbe
->size
)))
160 if ((phys
< pmbe
->ppn
) || (phys
>= (pmbe
->ppn
+ pmbe
->size
)))
164 * Now see if we're in range of a simple mapping.
166 if (size
<= pmbe
->size
) {
167 read_unlock(&pmb_rwlock
);
174 * Finally for sizes that involve compound mappings, walk
177 for (iter
= pmbe
->link
; iter
; iter
= iter
->link
)
181 * Nothing else to do if the range requirements are met.
184 read_unlock(&pmb_rwlock
);
189 read_unlock(&pmb_rwlock
);
193 static bool pmb_size_valid(unsigned long size
)
197 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
198 if (pmb_sizes
[i
].size
== size
)
204 static inline bool pmb_addr_valid(unsigned long addr
, unsigned long size
)
206 return (addr
>= P1SEG
&& (addr
+ size
- 1) < P3SEG
);
209 static inline bool pmb_prot_valid(pgprot_t prot
)
211 return (pgprot_val(prot
) & _PAGE_USER
) == 0;
214 static int pmb_size_to_flags(unsigned long size
)
218 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
219 if (pmb_sizes
[i
].size
== size
)
220 return pmb_sizes
[i
].flag
;
225 static int pmb_alloc_entry(void)
229 pos
= find_first_zero_bit(pmb_map
, NR_PMB_ENTRIES
);
230 if (pos
>= 0 && pos
< NR_PMB_ENTRIES
)
231 __set_bit(pos
, pmb_map
);
238 static struct pmb_entry
*pmb_alloc(unsigned long vpn
, unsigned long ppn
,
239 unsigned long flags
, int entry
)
241 struct pmb_entry
*pmbe
;
242 unsigned long irqflags
;
246 write_lock_irqsave(&pmb_rwlock
, irqflags
);
248 if (entry
== PMB_NO_ENTRY
) {
249 pos
= pmb_alloc_entry();
250 if (unlikely(pos
< 0)) {
255 if (__test_and_set_bit(entry
, pmb_map
)) {
256 ret
= ERR_PTR(-ENOSPC
);
263 write_unlock_irqrestore(&pmb_rwlock
, irqflags
);
265 pmbe
= &pmb_entry_list
[pos
];
267 memset(pmbe
, 0, sizeof(struct pmb_entry
));
269 spin_lock_init(&pmbe
->lock
);
279 write_unlock_irqrestore(&pmb_rwlock
, irqflags
);
283 static void pmb_free(struct pmb_entry
*pmbe
)
285 __clear_bit(pmbe
->entry
, pmb_map
);
287 pmbe
->entry
= PMB_NO_ENTRY
;
292 * Must be run uncached.
294 static void __set_pmb_entry(struct pmb_entry
*pmbe
)
296 unsigned long addr
, data
;
298 addr
= mk_pmb_addr(pmbe
->entry
);
299 data
= mk_pmb_data(pmbe
->entry
);
304 __raw_writel(pmbe
->vpn
| PMB_V
, addr
);
305 __raw_writel(pmbe
->ppn
| pmbe
->flags
| PMB_V
, data
);
310 static void __clear_pmb_entry(struct pmb_entry
*pmbe
)
312 unsigned long addr
, data
;
313 unsigned long addr_val
, data_val
;
315 addr
= mk_pmb_addr(pmbe
->entry
);
316 data
= mk_pmb_data(pmbe
->entry
);
318 addr_val
= __raw_readl(addr
);
319 data_val
= __raw_readl(data
);
322 writel_uncached(addr_val
& ~PMB_V
, addr
);
323 writel_uncached(data_val
& ~PMB_V
, data
);
326 static void set_pmb_entry(struct pmb_entry
*pmbe
)
330 spin_lock_irqsave(&pmbe
->lock
, flags
);
331 __set_pmb_entry(pmbe
);
332 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
335 int pmb_bolt_mapping(unsigned long vaddr
, phys_addr_t phys
,
336 unsigned long size
, pgprot_t prot
)
338 struct pmb_entry
*pmbp
, *pmbe
;
339 unsigned long orig_addr
, orig_size
;
340 unsigned long flags
, pmb_flags
;
343 if (!pmb_addr_valid(vaddr
, size
))
345 if (pmb_mapping_exists(vaddr
, phys
, size
))
351 flush_tlb_kernel_range(vaddr
, vaddr
+ size
);
353 pmb_flags
= pgprot_to_pmb_flags(prot
);
357 for (i
= mapped
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++) {
358 if (size
< pmb_sizes
[i
].size
)
361 pmbe
= pmb_alloc(vaddr
, phys
, pmb_flags
|
362 pmb_sizes
[i
].flag
, PMB_NO_ENTRY
);
364 pmb_unmap_entry(pmbp
, mapped
);
365 return PTR_ERR(pmbe
);
368 spin_lock_irqsave(&pmbe
->lock
, flags
);
370 pmbe
->size
= pmb_sizes
[i
].size
;
372 __set_pmb_entry(pmbe
);
379 * Link adjacent entries that span multiple PMB
380 * entries for easier tear-down.
383 spin_lock(&pmbp
->lock
);
385 spin_unlock(&pmbp
->lock
);
391 * Instead of trying smaller sizes on every
392 * iteration (even if we succeed in allocating
393 * space), try using pmb_sizes[i].size again.
398 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
400 } while (size
>= SZ_16M
);
402 flush_cache_vmap(orig_addr
, orig_addr
+ orig_size
);
407 void __iomem
*pmb_remap_caller(phys_addr_t phys
, unsigned long size
,
408 pgprot_t prot
, void *caller
)
411 phys_addr_t offset
, last_addr
;
412 phys_addr_t align_mask
;
413 unsigned long aligned
;
414 struct vm_struct
*area
;
417 if (!pmb_iomapping_enabled
)
421 * Small mappings need to go through the TLB.
424 return ERR_PTR(-EINVAL
);
425 if (!pmb_prot_valid(prot
))
426 return ERR_PTR(-EINVAL
);
428 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++)
429 if (size
>= pmb_sizes
[i
].size
)
432 last_addr
= phys
+ size
;
433 align_mask
= ~(pmb_sizes
[i
].size
- 1);
434 offset
= phys
& ~align_mask
;
436 aligned
= ALIGN(last_addr
, pmb_sizes
[i
].size
) - phys
;
439 * XXX: This should really start from uncached_end, but this
440 * causes the MMU to reset, so for now we restrict it to the
441 * 0xb000...0xc000 range.
443 area
= __get_vm_area_caller(aligned
, VM_IOREMAP
, 0xb0000000,
448 area
->phys_addr
= phys
;
449 vaddr
= (unsigned long)area
->addr
;
451 ret
= pmb_bolt_mapping(vaddr
, phys
, size
, prot
);
452 if (unlikely(ret
!= 0))
455 return (void __iomem
*)(offset
+ (char *)vaddr
);
458 int pmb_unmap(void __iomem
*addr
)
460 struct pmb_entry
*pmbe
= NULL
;
461 unsigned long vaddr
= (unsigned long __force
)addr
;
464 read_lock(&pmb_rwlock
);
466 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
467 if (test_bit(i
, pmb_map
)) {
468 pmbe
= &pmb_entry_list
[i
];
469 if (pmbe
->vpn
== vaddr
) {
476 read_unlock(&pmb_rwlock
);
479 pmb_unmap_entry(pmbe
, NR_PMB_ENTRIES
);
486 static void __pmb_unmap_entry(struct pmb_entry
*pmbe
, int depth
)
489 struct pmb_entry
*pmblink
= pmbe
;
492 * We may be called before this pmb_entry has been
493 * entered into the PMB table via set_pmb_entry(), but
494 * that's OK because we've allocated a unique slot for
495 * this entry in pmb_alloc() (even if we haven't filled
498 * Therefore, calling __clear_pmb_entry() is safe as no
499 * other mapping can be using that slot.
501 __clear_pmb_entry(pmbe
);
503 flush_cache_vunmap(pmbe
->vpn
, pmbe
->vpn
+ pmbe
->size
);
505 pmbe
= pmblink
->link
;
508 } while (pmbe
&& --depth
);
511 static void pmb_unmap_entry(struct pmb_entry
*pmbe
, int depth
)
518 write_lock_irqsave(&pmb_rwlock
, flags
);
519 __pmb_unmap_entry(pmbe
, depth
);
520 write_unlock_irqrestore(&pmb_rwlock
, flags
);
523 static void __init
pmb_notify(void)
527 pr_info("PMB: boot mappings:\n");
529 read_lock(&pmb_rwlock
);
531 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
532 struct pmb_entry
*pmbe
;
534 if (!test_bit(i
, pmb_map
))
537 pmbe
= &pmb_entry_list
[i
];
539 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
540 pmbe
->vpn
>> PAGE_SHIFT
, pmbe
->ppn
>> PAGE_SHIFT
,
541 pmbe
->size
>> 20, (pmbe
->flags
& PMB_C
) ? "" : "un");
544 read_unlock(&pmb_rwlock
);
548 * Sync our software copy of the PMB mappings with those in hardware. The
549 * mappings in the hardware PMB were either set up by the bootloader or
550 * very early on by the kernel.
552 static void __init
pmb_synchronize(void)
554 struct pmb_entry
*pmbp
= NULL
;
558 * Run through the initial boot mappings, log the established
559 * ones, and blow away anything that falls outside of the valid
560 * PPN range. Specifically, we only care about existing mappings
561 * that impact the cached/uncached sections.
563 * Note that touching these can be a bit of a minefield; the boot
564 * loader can establish multi-page mappings with the same caching
565 * attributes, so we need to ensure that we aren't modifying a
566 * mapping that we're presently executing from, or may execute
567 * from in the case of straddling page boundaries.
569 * In the future we will have to tidy up after the boot loader by
570 * jumping between the cached and uncached mappings and tearing
571 * down alternating mappings while executing from the other.
573 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
574 unsigned long addr
, data
;
575 unsigned long addr_val
, data_val
;
576 unsigned long ppn
, vpn
, flags
;
577 unsigned long irqflags
;
579 struct pmb_entry
*pmbe
;
581 addr
= mk_pmb_addr(i
);
582 data
= mk_pmb_data(i
);
584 addr_val
= __raw_readl(addr
);
585 data_val
= __raw_readl(data
);
588 * Skip over any bogus entries
590 if (!(data_val
& PMB_V
) || !(addr_val
& PMB_V
))
593 ppn
= data_val
& PMB_PFN_MASK
;
594 vpn
= addr_val
& PMB_PFN_MASK
;
597 * Only preserve in-range mappings.
599 if (!pmb_ppn_in_range(ppn
)) {
601 * Invalidate anything out of bounds.
603 writel_uncached(addr_val
& ~PMB_V
, addr
);
604 writel_uncached(data_val
& ~PMB_V
, data
);
609 * Update the caching attributes if necessary
611 if (data_val
& PMB_C
) {
612 data_val
&= ~PMB_CACHE_MASK
;
613 data_val
|= pmb_cache_flags();
615 writel_uncached(data_val
, data
);
618 size
= data_val
& PMB_SZ_MASK
;
619 flags
= size
| (data_val
& PMB_CACHE_MASK
);
621 pmbe
= pmb_alloc(vpn
, ppn
, flags
, i
);
627 spin_lock_irqsave(&pmbe
->lock
, irqflags
);
629 for (j
= 0; j
< ARRAY_SIZE(pmb_sizes
); j
++)
630 if (pmb_sizes
[j
].flag
== size
)
631 pmbe
->size
= pmb_sizes
[j
].size
;
634 spin_lock(&pmbp
->lock
);
637 * Compare the previous entry against the current one to
638 * see if the entries span a contiguous mapping. If so,
639 * setup the entry links accordingly. Compound mappings
640 * are later coalesced.
642 if (pmb_can_merge(pmbp
, pmbe
))
645 spin_unlock(&pmbp
->lock
);
650 spin_unlock_irqrestore(&pmbe
->lock
, irqflags
);
654 static void __init
pmb_merge(struct pmb_entry
*head
)
656 unsigned long span
, newsize
;
657 struct pmb_entry
*tail
;
658 int i
= 1, depth
= 0;
660 span
= newsize
= head
->size
;
666 if (pmb_size_valid(span
)) {
671 /* This is the end of the line.. */
680 * The merged page size must be valid.
682 if (!pmb_size_valid(newsize
))
685 head
->flags
&= ~PMB_SZ_MASK
;
686 head
->flags
|= pmb_size_to_flags(newsize
);
688 head
->size
= newsize
;
690 __pmb_unmap_entry(head
->link
, depth
);
691 __set_pmb_entry(head
);
694 static void __init
pmb_coalesce(void)
699 write_lock_irqsave(&pmb_rwlock
, flags
);
701 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
702 struct pmb_entry
*pmbe
;
704 if (!test_bit(i
, pmb_map
))
707 pmbe
= &pmb_entry_list
[i
];
710 * We're only interested in compound mappings
716 * Nothing to do if it already uses the largest possible
719 if (pmbe
->size
== SZ_512M
)
725 write_unlock_irqrestore(&pmb_rwlock
, flags
);
728 #ifdef CONFIG_UNCACHED_MAPPING
729 static void __init
pmb_resize(void)
734 * If the uncached mapping was constructed by the kernel, it will
735 * already be a reasonable size.
737 if (uncached_size
== SZ_16M
)
740 read_lock(&pmb_rwlock
);
742 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
743 struct pmb_entry
*pmbe
;
746 if (!test_bit(i
, pmb_map
))
749 pmbe
= &pmb_entry_list
[i
];
751 if (pmbe
->vpn
!= uncached_start
)
755 * Found it, now resize it.
757 spin_lock_irqsave(&pmbe
->lock
, flags
);
760 pmbe
->flags
&= ~PMB_SZ_MASK
;
761 pmbe
->flags
|= pmb_size_to_flags(pmbe
->size
);
763 uncached_resize(pmbe
->size
);
765 __set_pmb_entry(pmbe
);
767 spin_unlock_irqrestore(&pmbe
->lock
, flags
);
770 read_lock(&pmb_rwlock
);
774 static int __init
early_pmb(char *p
)
779 if (strstr(p
, "iomap"))
780 pmb_iomapping_enabled
= 1;
784 early_param("pmb", early_pmb
);
786 void __init
pmb_init(void)
788 /* Synchronize software state */
791 /* Attempt to combine compound mappings */
794 #ifdef CONFIG_UNCACHED_MAPPING
795 /* Resize initial mappings, if necessary */
802 writel_uncached(0, PMB_IRMCR
);
804 /* Flush out the TLB */
805 __raw_writel(__raw_readl(MMUCR
) | MMUCR_TI
, MMUCR
);
809 bool __in_29bit_mode(void)
811 return (__raw_readl(PMB_PASCR
) & PASCR_SE
) == 0;
814 static int pmb_seq_show(struct seq_file
*file
, void *iter
)
818 seq_printf(file
, "V: Valid, C: Cacheable, WT: Write-Through\n"
819 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
820 seq_printf(file
, "ety vpn ppn size flags\n");
822 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
823 unsigned long addr
, data
;
827 addr
= __raw_readl(mk_pmb_addr(i
));
828 data
= __raw_readl(mk_pmb_data(i
));
830 size
= data
& PMB_SZ_MASK
;
831 sz_str
= (size
== PMB_SZ_16M
) ? " 16MB":
832 (size
== PMB_SZ_64M
) ? " 64MB":
833 (size
== PMB_SZ_128M
) ? "128MB":
836 /* 02: V 0x88 0x08 128MB C CB B */
837 seq_printf(file
, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
838 i
, ((addr
& PMB_V
) && (data
& PMB_V
)) ? 'V' : ' ',
839 (addr
>> 24) & 0xff, (data
>> 24) & 0xff,
840 sz_str
, (data
& PMB_C
) ? 'C' : ' ',
841 (data
& PMB_WT
) ? "WT" : "CB",
842 (data
& PMB_UB
) ? "UB" : " B");
848 static int pmb_debugfs_open(struct inode
*inode
, struct file
*file
)
850 return single_open(file
, pmb_seq_show
, NULL
);
853 static const struct file_operations pmb_debugfs_fops
= {
854 .owner
= THIS_MODULE
,
855 .open
= pmb_debugfs_open
,
858 .release
= single_release
,
861 static int __init
pmb_debugfs_init(void)
863 struct dentry
*dentry
;
865 dentry
= debugfs_create_file("pmb", S_IFREG
| S_IRUGO
,
866 sh_debugfs_root
, NULL
, &pmb_debugfs_fops
);
870 return PTR_ERR(dentry
);
874 subsys_initcall(pmb_debugfs_init
);
877 static int pmb_sysdev_suspend(struct sys_device
*dev
, pm_message_t state
)
879 static pm_message_t prev_state
;
882 /* Restore the PMB after a resume from hibernation */
883 if (state
.event
== PM_EVENT_ON
&&
884 prev_state
.event
== PM_EVENT_FREEZE
) {
885 struct pmb_entry
*pmbe
;
887 read_lock(&pmb_rwlock
);
889 for (i
= 0; i
< ARRAY_SIZE(pmb_entry_list
); i
++) {
890 if (test_bit(i
, pmb_map
)) {
891 pmbe
= &pmb_entry_list
[i
];
896 read_unlock(&pmb_rwlock
);
904 static int pmb_sysdev_resume(struct sys_device
*dev
)
906 return pmb_sysdev_suspend(dev
, PMSG_ON
);
909 static struct sysdev_driver pmb_sysdev_driver
= {
910 .suspend
= pmb_sysdev_suspend
,
911 .resume
= pmb_sysdev_resume
,
914 static int __init
pmb_sysdev_init(void)
916 return sysdev_driver_register(&cpu_sysdev_class
, &pmb_sysdev_driver
);
918 subsys_initcall(pmb_sysdev_init
);