]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/i386/kernel/cpu/mtrr/generic.c
[PATCH] x86: Save and restore the fixed-range MTRRs of the BSP when suspending
[mirror_ubuntu-bionic-kernel.git] / arch / i386 / kernel / cpu / mtrr / generic.c
CommitLineData
1da177e4
LT
1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
365bff80 6#include <linux/module.h>
1da177e4
LT
7#include <asm/io.h>
8#include <asm/mtrr.h>
9#include <asm/msr.h>
10#include <asm/system.h>
11#include <asm/cpufeature.h>
12#include <asm/tlbflush.h>
13#include "mtrr.h"
14
15struct mtrr_state {
16 struct mtrr_var_range *var_ranges;
17 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
18 unsigned char enabled;
365bff80 19 unsigned char have_fixed;
1da177e4
LT
20 mtrr_type def_type;
21};
22
23static unsigned long smp_changes_mask;
24static struct mtrr_state mtrr_state = {};
25
365bff80
JB
26#undef MODULE_PARAM_PREFIX
27#define MODULE_PARAM_PREFIX "mtrr."
28
29static __initdata int mtrr_show;
30module_param_named(show, mtrr_show, bool, 0);
31
1da177e4
LT
32/* Get the MSR pair relating to a var range */
33static void __init
34get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
35{
36 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
37 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
38}
39
2b3b4835 40static void
1da177e4
LT
41get_fixed_ranges(mtrr_type * frs)
42{
43 unsigned int *p = (unsigned int *) frs;
44 int i;
45
46 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
47
48 for (i = 0; i < 2; i++)
49 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
50 for (i = 0; i < 8; i++)
51 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
52}
53
2b3b4835
BK
54void mtrr_save_fixed_ranges(void *info)
55{
56 get_fixed_ranges(mtrr_state.fixed_ranges);
57}
58
365bff80
JB
59static void __init print_fixed(unsigned base, unsigned step, const mtrr_type*types)
60{
61 unsigned i;
62
63 for (i = 0; i < 8; ++i, ++types, base += step)
64 printk(KERN_INFO "MTRR %05X-%05X %s\n", base, base + step - 1, mtrr_attrib_to_str(*types));
65}
66
1da177e4
LT
67/* Grab all of the MTRR state for this CPU into *state */
68void __init get_mtrr_state(void)
69{
70 unsigned int i;
71 struct mtrr_var_range *vrs;
72 unsigned lo, dummy;
73
74 if (!mtrr_state.var_ranges) {
75 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
76 GFP_KERNEL);
77 if (!mtrr_state.var_ranges)
78 return;
79 }
80 vrs = mtrr_state.var_ranges;
81
365bff80
JB
82 rdmsr(MTRRcap_MSR, lo, dummy);
83 mtrr_state.have_fixed = (lo >> 8) & 1;
84
1da177e4
LT
85 for (i = 0; i < num_var_ranges; i++)
86 get_mtrr_var_range(i, &vrs[i]);
365bff80
JB
87 if (mtrr_state.have_fixed)
88 get_fixed_ranges(mtrr_state.fixed_ranges);
1da177e4
LT
89
90 rdmsr(MTRRdefType_MSR, lo, dummy);
91 mtrr_state.def_type = (lo & 0xff);
92 mtrr_state.enabled = (lo & 0xc00) >> 10;
365bff80
JB
93
94 if (mtrr_show) {
95 int high_width;
96
97 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
98 if (mtrr_state.have_fixed) {
99 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
100 mtrr_state.enabled & 1 ? "en" : "dis");
101 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
102 for (i = 0; i < 2; ++i)
103 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
104 for (i = 0; i < 8; ++i)
105 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
106 }
107 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
108 mtrr_state.enabled & 2 ? "en" : "dis");
109 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
110 for (i = 0; i < num_var_ranges; ++i) {
111 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
112 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
113 i,
114 high_width,
115 mtrr_state.var_ranges[i].base_hi,
116 mtrr_state.var_ranges[i].base_lo >> 12,
117 high_width,
118 mtrr_state.var_ranges[i].mask_hi,
119 mtrr_state.var_ranges[i].mask_lo >> 12,
120 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
121 else
122 printk(KERN_INFO "MTRR %u disabled\n", i);
123 }
124 }
1da177e4
LT
125}
126
1da177e4
LT
127/* Some BIOS's are fucked and don't set all MTRRs the same! */
128void __init mtrr_state_warn(void)
129{
130 unsigned long mask = smp_changes_mask;
131
132 if (!mask)
133 return;
134 if (mask & MTRR_CHANGE_MASK_FIXED)
135 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
136 if (mask & MTRR_CHANGE_MASK_VARIABLE)
137 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
138 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
139 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
140 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
141 printk(KERN_INFO "mtrr: corrected configuration.\n");
142}
143
144/* Doesn't attempt to pass an error out to MTRR users
145 because it's quite complicated in some cases and probably not
146 worth it because the best error handling is to ignore it. */
147void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
148{
149 if (wrmsr_safe(msr, a, b) < 0)
150 printk(KERN_ERR
151 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
152 smp_processor_id(), msr, a, b);
153}
154
365bff80 155int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
1da177e4
LT
156/* [SUMMARY] Get a free MTRR.
157 <base> The starting (base) address of the region.
158 <size> The size (in bytes) of the region.
159 [RETURNS] The index of the region on success, else -1 on error.
160*/
161{
162 int i, max;
163 mtrr_type ltype;
365bff80 164 unsigned long lbase, lsize;
1da177e4
LT
165
166 max = num_var_ranges;
365bff80
JB
167 if (replace_reg >= 0 && replace_reg < max)
168 return replace_reg;
1da177e4
LT
169 for (i = 0; i < max; ++i) {
170 mtrr_if->get(i, &lbase, &lsize, &ltype);
171 if (lsize == 0)
172 return i;
173 }
174 return -ENOSPC;
175}
176
408b664a 177static void generic_get_mtrr(unsigned int reg, unsigned long *base,
365bff80 178 unsigned long *size, mtrr_type *type)
1da177e4
LT
179{
180 unsigned int mask_lo, mask_hi, base_lo, base_hi;
181
182 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
183 if ((mask_lo & 0x800) == 0) {
184 /* Invalid (i.e. free) range */
185 *base = 0;
186 *size = 0;
187 *type = 0;
188 return;
189 }
190
191 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
192
193 /* Work out the shifted address mask. */
194 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
195 | mask_lo >> PAGE_SHIFT;
196
197 /* This works correctly if size is a power of two, i.e. a
198 contiguous range. */
199 *size = -mask_lo;
200 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
201 *type = base_lo & 0xff;
202}
203
204static int set_fixed_ranges(mtrr_type * frs)
205{
206 unsigned int *p = (unsigned int *) frs;
207 int changed = FALSE;
208 int i;
209 unsigned int lo, hi;
210
211 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
212 if (p[0] != lo || p[1] != hi) {
213 mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
214 changed = TRUE;
215 }
216
217 for (i = 0; i < 2; i++) {
218 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
219 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
220 mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
221 p[3 + i * 2]);
222 changed = TRUE;
223 }
224 }
225
226 for (i = 0; i < 8; i++) {
227 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
228 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
229 mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
230 p[7 + i * 2]);
231 changed = TRUE;
232 }
233 }
234 return changed;
235}
236
237/* Set the MSR pair relating to a var range. Returns TRUE if
238 changes are made */
239static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
240{
241 unsigned int lo, hi;
242 int changed = FALSE;
243
244 rdmsr(MTRRphysBase_MSR(index), lo, hi);
245 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
cf94b62f
SS
246 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
247 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
1da177e4
LT
248 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
249 changed = TRUE;
250 }
251
252 rdmsr(MTRRphysMask_MSR(index), lo, hi);
253
254 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
cf94b62f
SS
255 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
256 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
1da177e4
LT
257 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
258 changed = TRUE;
259 }
260 return changed;
261}
262
365bff80
JB
263static u32 deftype_lo, deftype_hi;
264
265static unsigned long set_mtrr_state(void)
1da177e4
LT
266/* [SUMMARY] Set the MTRR state for this CPU.
267 <state> The MTRR state information to read.
268 <ctxt> Some relevant CPU context.
269 [NOTE] The CPU must already be in a safe state for MTRR changes.
270 [RETURNS] 0 if no changes made, else a mask indication what was changed.
271*/
272{
273 unsigned int i;
274 unsigned long change_mask = 0;
275
276 for (i = 0; i < num_var_ranges; i++)
277 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
278 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
279
365bff80 280 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
1da177e4
LT
281 change_mask |= MTRR_CHANGE_MASK_FIXED;
282
283 /* Set_mtrr_restore restores the old value of MTRRdefType,
284 so to set it we fiddle with the saved value */
285 if ((deftype_lo & 0xff) != mtrr_state.def_type
286 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
365bff80 287 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
1da177e4
LT
288 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
289 }
290
291 return change_mask;
292}
293
294
295static unsigned long cr4 = 0;
1da177e4
LT
296static DEFINE_SPINLOCK(set_atomicity_lock);
297
298/*
299 * Since we are disabling the cache don't allow any interrupts - they
300 * would run extremely slow and would only increase the pain. The caller must
301 * ensure that local interrupts are disabled and are reenabled after post_set()
302 * has been called.
303 */
304
182daa55 305static void prepare_set(void) __acquires(set_atomicity_lock)
1da177e4
LT
306{
307 unsigned long cr0;
308
309 /* Note that this is not ideal, since the cache is only flushed/disabled
310 for this CPU while the MTRRs are changed, but changing this requires
311 more invasive changes to the way the kernel boots */
312
313 spin_lock(&set_atomicity_lock);
314
315 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
316 cr0 = read_cr0() | 0x40000000; /* set CD flag */
317 write_cr0(cr0);
318 wbinvd();
319
320 /* Save value of CR4 and clear Page Global Enable (bit 7) */
321 if ( cpu_has_pge ) {
322 cr4 = read_cr4();
323 write_cr4(cr4 & ~X86_CR4_PGE);
324 }
325
326 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
327 __flush_tlb();
328
329 /* Save MTRR state */
330 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
331
332 /* Disable MTRRs, and set the default type to uncached */
365bff80 333 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
1da177e4
LT
334}
335
182daa55 336static void post_set(void) __releases(set_atomicity_lock)
1da177e4
LT
337{
338 /* Flush TLBs (no need to flush caches - they are disabled) */
339 __flush_tlb();
340
341 /* Intel (P6) standard MTRRs */
342 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
343
344 /* Enable caches */
345 write_cr0(read_cr0() & 0xbfffffff);
346
347 /* Restore value of CR4 */
348 if ( cpu_has_pge )
349 write_cr4(cr4);
350 spin_unlock(&set_atomicity_lock);
351}
352
353static void generic_set_all(void)
354{
355 unsigned long mask, count;
356 unsigned long flags;
357
358 local_irq_save(flags);
359 prepare_set();
360
361 /* Actually set the state */
365bff80 362 mask = set_mtrr_state();
1da177e4
LT
363
364 post_set();
365 local_irq_restore(flags);
366
367 /* Use the atomic bitops to update the global mask */
368 for (count = 0; count < sizeof mask * 8; ++count) {
369 if (mask & 0x01)
370 set_bit(count, &smp_changes_mask);
371 mask >>= 1;
372 }
373
374}
375
376static void generic_set_mtrr(unsigned int reg, unsigned long base,
377 unsigned long size, mtrr_type type)
378/* [SUMMARY] Set variable MTRR register on the local CPU.
379 <reg> The register to set.
380 <base> The base address of the region.
381 <size> The size of the region. If this is 0 the region is disabled.
382 <type> The type of the region.
383 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
384 be done externally.
385 [RETURNS] Nothing.
386*/
387{
388 unsigned long flags;
3b520b23
SL
389 struct mtrr_var_range *vr;
390
391 vr = &mtrr_state.var_ranges[reg];
1da177e4
LT
392
393 local_irq_save(flags);
394 prepare_set();
395
396 if (size == 0) {
397 /* The invalid bit is kept in the mask, so we simply clear the
398 relevant mask register to disable a range. */
399 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
3b520b23 400 memset(vr, 0, sizeof(struct mtrr_var_range));
1da177e4 401 } else {
3b520b23
SL
402 vr->base_lo = base << PAGE_SHIFT | type;
403 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
404 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
405 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
406
407 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
408 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
1da177e4
LT
409 }
410
411 post_set();
412 local_irq_restore(flags);
413}
414
415int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
416{
417 unsigned long lbase, last;
418
419 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
420 and not touch 0x70000000->0x7003FFFF */
421 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
422 boot_cpu_data.x86_model == 1 &&
423 boot_cpu_data.x86_mask <= 7) {
424 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
425 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
426 return -EINVAL;
427 }
9b483417 428 if (!(base + size < 0x70000 || base > 0x7003F) &&
1da177e4
LT
429 (type == MTRR_TYPE_WRCOMB
430 || type == MTRR_TYPE_WRBACK)) {
431 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
432 return -EINVAL;
433 }
434 }
435
9215da33 436 if (base < 0x100) {
1da177e4
LT
437 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
438 base, size);
439 return -EINVAL;
440 }
441 /* Check upper bits of base and last are equal and lower bits are 0
442 for base and 1 for last */
443 last = base + size - 1;
444 for (lbase = base; !(lbase & 1) && (last & 1);
445 lbase = lbase >> 1, last = last >> 1) ;
446 if (lbase != last) {
447 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
448 base, size);
449 return -EINVAL;
450 }
451 return 0;
452}
453
454
455static int generic_have_wrcomb(void)
456{
457 unsigned long config, dummy;
458 rdmsr(MTRRcap_MSR, config, dummy);
459 return (config & (1 << 10));
460}
461
462int positive_have_wrcomb(void)
463{
464 return 1;
465}
466
467/* generic structure...
468 */
469struct mtrr_ops generic_mtrr_ops = {
470 .use_intel_if = 1,
471 .set_all = generic_set_all,
472 .get = generic_get_mtrr,
473 .get_free_region = generic_get_free_region,
474 .set = generic_set_mtrr,
475 .validate_add_page = generic_validate_add_page,
476 .have_wrcomb = generic_have_wrcomb,
477};