#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/sections.h>
+#include <asm/kdebug.h>
+#include <asm/numa.h>
#ifndef Dprintk
#define Dprintk(x...)
set_pte_phys(address, phys, prot);
}
-unsigned long __meminitdata table_start, table_end;
+static unsigned long __initdata table_start;
+static unsigned long __meminitdata table_end;
static __meminit void *alloc_low_page(unsigned long *phys)
{
vaddr += addr & ~PMD_MASK;
addr &= PMD_MASK;
for (i = 0; i < pmds; i++, addr += PMD_SIZE)
- set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
- __flush_tlb();
+ set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
+ __flush_tlb_all();
return (void *)vaddr;
next:
;
pmd = level2_kernel_pgt + pmd_index(vaddr);
for (i = 0; i < pmds; i++)
pmd_clear(pmd + i);
- __flush_tlb();
+ __flush_tlb_all();
}
static void __meminit
if (pmd_val(*pmd))
continue;
- entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
+ entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address;
entry &= __supported_pte_mask;
set_pmd(pmd, __pmd(entry));
}
spin_unlock(&init_mm.page_table_lock);
unmap_low_page(pmd);
}
- __flush_tlb();
+ __flush_tlb_all();
}
static void __init find_early_table_space(unsigned long end)
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
This runs before bootmem is initialized and gets pages directly from the
physical memory. To access them they are temporarily mapped. */
-void __meminit init_memory_mapping(unsigned long start, unsigned long end)
+void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
{
unsigned long next;
if (!after_bootmem)
mmu_cr4_features = read_cr4();
__flush_tlb_all();
+
+ reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
}
#ifndef CONFIG_NUMA
#endif /* CONFIG_MEMORY_HOTPLUG */
-#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
-/*
- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
- * just online the pages.
- */
-int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
-{
- int err = -EIO;
- unsigned long pfn;
- unsigned long total = 0, mem = 0;
- for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
- if (pfn_valid(pfn)) {
- online_page(pfn_to_page(pfn));
- err = 0;
- mem++;
- }
- total++;
- }
- if (!err) {
- z->spanned_pages += total;
- z->present_pages += mem;
- z->zone_pgdat->node_spanned_pages += total;
- z->zone_pgdat->node_present_pages += mem;
- }
- return err;
-}
-#endif
-
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
pci_iommu_alloc();
- /* clear the zero-page */
- memset(empty_zero_page, 0, PAGE_SIZE);
+ /* clear_bss() already clear the empty_zero_page */
+
+ /* temporary debugging - double check it's true: */
+ {
+ int i;
+
+ for (i = 0; i < 1024; i++)
+ WARN_ON_ONCE(empty_zero_page[i]);
+ }
reservedpages = 0;
init_page_count(virt_to_page(addr));
memset((void *)(addr & ~(PAGE_SIZE-1)),
POISON_FREE_INITMEM, PAGE_SIZE);
- if (addr >= __START_KERNEL_map)
- change_page_attr_addr(addr, 1, __pgprot(0));
free_page(addr);
totalram_pages++;
}
- if (addr > __START_KERNEL_map)
- global_flush_tlb();
+#ifdef CONFIG_DEBUG_RODATA
+ /*
+ * This will make the __init pages not present and
+ * not executable, so that any attempt to use a
+ * __init function from now on will fault immediately
+ * rather than supriously later when memory gets reused.
+ *
+ * We only do this for DEBUG_RODATA to not break up the
+ * 2Mb kernel mapping just for this debug feature.
+ */
+ if (begin >= __START_KERNEL_map) {
+ set_memory_rw(begin, (end - begin)/PAGE_SIZE);
+ set_memory_np(begin, (end - begin)/PAGE_SIZE);
+ set_memory_nx(begin, (end - begin)/PAGE_SIZE);
+ rodata_test();
+ }
+#endif
}
void free_initmem(void)
}
#ifdef CONFIG_DEBUG_RODATA
+const int rodata_test_data = 0xC3;
+EXPORT_SYMBOL_GPL(rodata_test_data);
void mark_rodata_ro(void)
{
if (end <= start)
return;
- change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
- /*
- * change_page_attr_addr() requires a global_flush_tlb() call after it.
- * We do this after the printk so that if something went wrong in the
- * change, the printk gets out at least to give a better debug hint
- * of who is the culprit.
- */
- global_flush_tlb();
+#ifdef CONFIG_CPA_DEBUG
+ printk("Testing CPA: undo %lx-%lx\n", start, end);
+ set_memory_rw(start, (end-start) >> PAGE_SHIFT);
+
+ printk("Testing CPA: again\n");
+ set_memory_ro(start, (end-start) >> PAGE_SHIFT);
+#endif
}
#endif
return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
}
-void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
-{
- return __alloc_bootmem_core(pgdat->bdata, size,
- SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
-}
-
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
if (!p)
return -ENOMEM;
- entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
- mk_pte_huge(entry);
+ entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE);
set_pmd(pmd, __pmd(pte_val(entry)));
printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",