2 * Page Deallocation Table (PDT) support
4 * The Page Deallocation Table (PDT) holds a table with pointers to bad
5 * memory (broken RAM modules) which is maintained by firmware.
7 * Copyright 2017 by Helge Deller <deller@gmx.de>
10 * - check regularily for new bad memory
11 * - add userspace interface with procfs or sysfs
12 * - increase number of PDT entries dynamically
15 #include <linux/memblock.h>
16 #include <linux/seq_file.h>
19 #include <asm/pdcpat.h>
20 #include <asm/sections.h>
21 #include <asm/pgtable.h>
23 enum pdt_access_type
{
30 static enum pdt_access_type pdt_type
;
32 /* global PDT status information */
33 static struct pdc_mem_retinfo pdt_status
;
35 #define MAX_PDT_TABLE_SIZE PAGE_SIZE
36 #define MAX_PDT_ENTRIES (MAX_PDT_TABLE_SIZE / sizeof(unsigned long))
37 static unsigned long pdt_entry
[MAX_PDT_ENTRIES
] __page_aligned_bss
;
40 /* report PDT entries via /proc/meminfo */
41 void arch_report_meminfo(struct seq_file
*m
)
43 if (pdt_type
== PDT_NONE
)
46 seq_printf(m
, "PDT_max_entries: %7lu\n",
48 seq_printf(m
, "PDT_cur_entries: %7lu\n",
49 pdt_status
.pdt_entries
);
55 * Initialize kernel PDT structures, read initial PDT table from firmware,
56 * report all current PDT entries and mark bad memory with memblock_reserve()
57 * to avoid that the kernel will use broken memory areas.
60 void __init
pdc_pdt_init(void)
63 unsigned long entries
;
64 struct pdc_mem_read_pdt pdt_read_ret
;
67 struct pdc_pat_mem_retinfo pat_rinfo
;
69 pdt_type
= PDT_PAT_NEW
;
70 ret
= pdc_pat_mem_pdt_info(&pat_rinfo
);
71 pdt_status
.pdt_size
= pat_rinfo
.max_pdt_entries
;
72 pdt_status
.pdt_entries
= pat_rinfo
.current_pdt_entries
;
73 pdt_status
.pdt_status
= 0;
74 pdt_status
.first_dbe_loc
= pat_rinfo
.first_dbe_loc
;
75 pdt_status
.good_mem
= pat_rinfo
.good_mem
;
78 ret
= pdc_mem_pdt_info(&pdt_status
);
83 pr_info("PDT: Firmware does not provide any page deallocation"
88 entries
= pdt_status
.pdt_entries
;
89 WARN_ON(entries
> MAX_PDT_ENTRIES
);
91 pr_info("PDT: size %lu, entries %lu, status %lu, dbe_loc 0x%lx,"
93 pdt_status
.pdt_size
, pdt_status
.pdt_entries
,
94 pdt_status
.pdt_status
, pdt_status
.first_dbe_loc
,
98 pr_info("PDT: Firmware reports all memory OK.\n");
102 if (pdt_status
.first_dbe_loc
&&
103 pdt_status
.first_dbe_loc
<= __pa((unsigned long)&_end
))
104 pr_crit("CRITICAL: Bad memory inside kernel image memory area!\n");
106 pr_warn("PDT: Firmware reports %lu entries of faulty memory:\n",
109 if (pdt_type
== PDT_PDC
)
110 ret
= pdc_mem_pdt_read_entries(&pdt_read_ret
, pdt_entry
);
113 struct pdc_pat_mem_read_pd_retinfo pat_pret
;
115 ret
= pdc_pat_mem_read_cell_pdt(&pat_pret
, pdt_entry
,
118 pdt_type
= PDT_PAT_OLD
;
119 ret
= pdc_pat_mem_read_pd_pdt(&pat_pret
, pdt_entry
,
120 MAX_PDT_TABLE_SIZE
, 0);
129 pr_debug("PDT type %d, retval = %d\n", pdt_type
, ret
);
133 for (i
= 0; i
< pdt_status
.pdt_entries
; i
++) {
135 pr_warn("PDT: BAD PAGE #%d at 0x%08lx (error_type = %lu)\n",
137 pdt_entry
[i
] & PAGE_MASK
,
140 /* mark memory page bad */
141 memblock_reserve(pdt_entry
[i
] & PAGE_MASK
, PAGE_SIZE
);