#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
+#include <linux/sizes.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
#include <asm/setup.h>
-#include <asm/sizes.h>
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
- /*
- * Only use write-through for non-SMP systems
- */
- if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
- vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
-
/*
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = size;
- vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = caller;
vm_area_add_early(vm);
}
/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue;
addr = (unsigned long)vm->addr;
if (addr < next)