]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/cpu/mtrr/generic.c
x86: merge resume-trace.h variants
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / mtrr / generic.c
CommitLineData
1da177e4
LT
1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
365bff80 6#include <linux/module.h>
1da177e4
LT
7#include <asm/io.h>
8#include <asm/mtrr.h>
9#include <asm/msr.h>
10#include <asm/system.h>
11#include <asm/cpufeature.h>
12#include <asm/tlbflush.h>
13#include "mtrr.h"
14
15struct mtrr_state {
16 struct mtrr_var_range *var_ranges;
17 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
18 unsigned char enabled;
365bff80 19 unsigned char have_fixed;
1da177e4
LT
20 mtrr_type def_type;
21};
22
de938c51
BK
23struct fixed_range_block {
24 int base_msr; /* start address of an MTRR block */
25 int ranges; /* number of MTRRs in this block */
26};
27
28static struct fixed_range_block fixed_range_blocks[] = {
29 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
30 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
31 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
32 {}
33};
34
1da177e4
LT
35static unsigned long smp_changes_mask;
36static struct mtrr_state mtrr_state = {};
37
365bff80
JB
38#undef MODULE_PARAM_PREFIX
39#define MODULE_PARAM_PREFIX "mtrr."
40
25c16b99 41static int mtrr_show;
365bff80
JB
42module_param_named(show, mtrr_show, bool, 0);
43
1da177e4 44/* Get the MSR pair relating to a var range */
bf8c4817 45static void
1da177e4
LT
46get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
47{
48 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
49 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
50}
51
2b3b4835 52static void
1da177e4
LT
53get_fixed_ranges(mtrr_type * frs)
54{
55 unsigned int *p = (unsigned int *) frs;
56 int i;
57
58 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
59
60 for (i = 0; i < 2; i++)
61 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
62 for (i = 0; i < 8; i++)
63 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
64}
65
2b3b4835
BK
66void mtrr_save_fixed_ranges(void *info)
67{
84288ad8
AM
68 if (cpu_has_mtrr)
69 get_fixed_ranges(mtrr_state.fixed_ranges);
2b3b4835
BK
70}
71
bf8c4817 72static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
365bff80
JB
73{
74 unsigned i;
75
76 for (i = 0; i < 8; ++i, ++types, base += step)
25c16b99
RD
77 printk(KERN_INFO "MTRR %05X-%05X %s\n",
78 base, base + step - 1, mtrr_attrib_to_str(*types));
365bff80
JB
79}
80
1da177e4 81/* Grab all of the MTRR state for this CPU into *state */
9ef231a4 82void __init get_mtrr_state(void)
1da177e4
LT
83{
84 unsigned int i;
85 struct mtrr_var_range *vrs;
86 unsigned lo, dummy;
87
88 if (!mtrr_state.var_ranges) {
89 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
90 GFP_KERNEL);
91 if (!mtrr_state.var_ranges)
92 return;
93 }
94 vrs = mtrr_state.var_ranges;
95
365bff80
JB
96 rdmsr(MTRRcap_MSR, lo, dummy);
97 mtrr_state.have_fixed = (lo >> 8) & 1;
98
1da177e4
LT
99 for (i = 0; i < num_var_ranges; i++)
100 get_mtrr_var_range(i, &vrs[i]);
365bff80
JB
101 if (mtrr_state.have_fixed)
102 get_fixed_ranges(mtrr_state.fixed_ranges);
1da177e4
LT
103
104 rdmsr(MTRRdefType_MSR, lo, dummy);
105 mtrr_state.def_type = (lo & 0xff);
106 mtrr_state.enabled = (lo & 0xc00) >> 10;
365bff80
JB
107
108 if (mtrr_show) {
109 int high_width;
110
111 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
112 if (mtrr_state.have_fixed) {
113 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
114 mtrr_state.enabled & 1 ? "en" : "dis");
115 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
116 for (i = 0; i < 2; ++i)
117 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
118 for (i = 0; i < 8; ++i)
119 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
120 }
121 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
122 mtrr_state.enabled & 2 ? "en" : "dis");
123 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
124 for (i = 0; i < num_var_ranges; ++i) {
125 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
126 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
127 i,
128 high_width,
129 mtrr_state.var_ranges[i].base_hi,
130 mtrr_state.var_ranges[i].base_lo >> 12,
131 high_width,
132 mtrr_state.var_ranges[i].mask_hi,
133 mtrr_state.var_ranges[i].mask_lo >> 12,
134 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
135 else
136 printk(KERN_INFO "MTRR %u disabled\n", i);
137 }
138 }
1da177e4
LT
139}
140
1da177e4
LT
141/* Some BIOS's are fucked and don't set all MTRRs the same! */
142void __init mtrr_state_warn(void)
143{
144 unsigned long mask = smp_changes_mask;
145
146 if (!mask)
147 return;
148 if (mask & MTRR_CHANGE_MASK_FIXED)
149 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
150 if (mask & MTRR_CHANGE_MASK_VARIABLE)
151 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
152 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
153 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
154 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
155 printk(KERN_INFO "mtrr: corrected configuration.\n");
156}
157
158/* Doesn't attempt to pass an error out to MTRR users
159 because it's quite complicated in some cases and probably not
160 worth it because the best error handling is to ignore it. */
161void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
162{
163 if (wrmsr_safe(msr, a, b) < 0)
164 printk(KERN_ERR
165 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
166 smp_processor_id(), msr, a, b);
167}
168
de938c51
BK
169/**
170 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
171 * see AMD publication no. 24593, chapter 3.2.1 for more information
172 */
173static inline void k8_enable_fixed_iorrs(void)
174{
175 unsigned lo, hi;
176
177 rdmsr(MSR_K8_SYSCFG, lo, hi);
178 mtrr_wrmsr(MSR_K8_SYSCFG, lo
179 | K8_MTRRFIXRANGE_DRAM_ENABLE
180 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
181}
182
183/**
184 * Checks and updates an fixed-range MTRR if it differs from the value it
27b46d76 185 * should have. If K8 extentions are wanted, update the K8 SYSCFG MSR also.
de938c51
BK
186 * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information
187 * \param msr MSR address of the MTTR which should be checked and updated
188 * \param changed pointer which indicates whether the MTRR needed to be changed
189 * \param msrwords pointer to the MSR values which the MSR should have
190 */
2d2ee8de 191static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
de938c51
BK
192{
193 unsigned lo, hi;
194
195 rdmsr(msr, lo, hi);
196
197 if (lo != msrwords[0] || hi != msrwords[1]) {
198 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
199 boot_cpu_data.x86 == 15 &&
200 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
201 k8_enable_fixed_iorrs();
202 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
2d2ee8de 203 *changed = true;
de938c51
BK
204 }
205}
206
365bff80 207int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
1da177e4
LT
208/* [SUMMARY] Get a free MTRR.
209 <base> The starting (base) address of the region.
210 <size> The size (in bytes) of the region.
211 [RETURNS] The index of the region on success, else -1 on error.
212*/
213{
214 int i, max;
215 mtrr_type ltype;
365bff80 216 unsigned long lbase, lsize;
1da177e4
LT
217
218 max = num_var_ranges;
365bff80
JB
219 if (replace_reg >= 0 && replace_reg < max)
220 return replace_reg;
1da177e4
LT
221 for (i = 0; i < max; ++i) {
222 mtrr_if->get(i, &lbase, &lsize, &ltype);
223 if (lsize == 0)
224 return i;
225 }
226 return -ENOSPC;
227}
228
408b664a 229static void generic_get_mtrr(unsigned int reg, unsigned long *base,
365bff80 230 unsigned long *size, mtrr_type *type)
1da177e4
LT
231{
232 unsigned int mask_lo, mask_hi, base_lo, base_hi;
233
234 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
235 if ((mask_lo & 0x800) == 0) {
236 /* Invalid (i.e. free) range */
237 *base = 0;
238 *size = 0;
239 *type = 0;
240 return;
241 }
242
243 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
244
245 /* Work out the shifted address mask. */
246 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
247 | mask_lo >> PAGE_SHIFT;
248
249 /* This works correctly if size is a power of two, i.e. a
250 contiguous range. */
251 *size = -mask_lo;
252 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
253 *type = base_lo & 0xff;
254}
255
de938c51
BK
256/**
257 * Checks and updates the fixed-range MTRRs if they differ from the saved set
258 * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges()
259 */
1da177e4
LT
260static int set_fixed_ranges(mtrr_type * frs)
261{
de938c51 262 unsigned long long *saved = (unsigned long long *) frs;
2d2ee8de 263 bool changed = false;
de938c51 264 int block=-1, range;
1da177e4 265
de938c51
BK
266 while (fixed_range_blocks[++block].ranges)
267 for (range=0; range < fixed_range_blocks[block].ranges; range++)
268 set_fixed_range(fixed_range_blocks[block].base_msr + range,
269 &changed, (unsigned int *) saved++);
1da177e4 270
1da177e4
LT
271 return changed;
272}
273
274/* Set the MSR pair relating to a var range. Returns TRUE if
275 changes are made */
2d2ee8de 276static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
1da177e4
LT
277{
278 unsigned int lo, hi;
2d2ee8de 279 bool changed = false;
1da177e4
LT
280
281 rdmsr(MTRRphysBase_MSR(index), lo, hi);
282 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
cf94b62f
SS
283 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
284 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
1da177e4 285 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
2d2ee8de 286 changed = true;
1da177e4
LT
287 }
288
289 rdmsr(MTRRphysMask_MSR(index), lo, hi);
290
291 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
cf94b62f
SS
292 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
293 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
1da177e4 294 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
2d2ee8de 295 changed = true;
1da177e4
LT
296 }
297 return changed;
298}
299
365bff80
JB
300static u32 deftype_lo, deftype_hi;
301
302static unsigned long set_mtrr_state(void)
1da177e4
LT
303/* [SUMMARY] Set the MTRR state for this CPU.
304 <state> The MTRR state information to read.
305 <ctxt> Some relevant CPU context.
306 [NOTE] The CPU must already be in a safe state for MTRR changes.
307 [RETURNS] 0 if no changes made, else a mask indication what was changed.
308*/
309{
310 unsigned int i;
311 unsigned long change_mask = 0;
312
313 for (i = 0; i < num_var_ranges; i++)
314 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
315 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
316
365bff80 317 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
1da177e4
LT
318 change_mask |= MTRR_CHANGE_MASK_FIXED;
319
320 /* Set_mtrr_restore restores the old value of MTRRdefType,
321 so to set it we fiddle with the saved value */
322 if ((deftype_lo & 0xff) != mtrr_state.def_type
323 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
365bff80 324 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
1da177e4
LT
325 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
326 }
327
328 return change_mask;
329}
330
331
332static unsigned long cr4 = 0;
1da177e4
LT
333static DEFINE_SPINLOCK(set_atomicity_lock);
334
335/*
336 * Since we are disabling the cache don't allow any interrupts - they
337 * would run extremely slow and would only increase the pain. The caller must
338 * ensure that local interrupts are disabled and are reenabled after post_set()
339 * has been called.
340 */
341
182daa55 342static void prepare_set(void) __acquires(set_atomicity_lock)
1da177e4
LT
343{
344 unsigned long cr0;
345
346 /* Note that this is not ideal, since the cache is only flushed/disabled
347 for this CPU while the MTRRs are changed, but changing this requires
348 more invasive changes to the way the kernel boots */
349
350 spin_lock(&set_atomicity_lock);
351
352 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
353 cr0 = read_cr0() | 0x40000000; /* set CD flag */
354 write_cr0(cr0);
355 wbinvd();
356
357 /* Save value of CR4 and clear Page Global Enable (bit 7) */
358 if ( cpu_has_pge ) {
359 cr4 = read_cr4();
360 write_cr4(cr4 & ~X86_CR4_PGE);
361 }
362
363 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
364 __flush_tlb();
365
366 /* Save MTRR state */
367 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
368
369 /* Disable MTRRs, and set the default type to uncached */
365bff80 370 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
1da177e4
LT
371}
372
182daa55 373static void post_set(void) __releases(set_atomicity_lock)
1da177e4
LT
374{
375 /* Flush TLBs (no need to flush caches - they are disabled) */
376 __flush_tlb();
377
378 /* Intel (P6) standard MTRRs */
379 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
380
381 /* Enable caches */
382 write_cr0(read_cr0() & 0xbfffffff);
383
384 /* Restore value of CR4 */
385 if ( cpu_has_pge )
386 write_cr4(cr4);
387 spin_unlock(&set_atomicity_lock);
388}
389
390static void generic_set_all(void)
391{
392 unsigned long mask, count;
393 unsigned long flags;
394
395 local_irq_save(flags);
396 prepare_set();
397
398 /* Actually set the state */
365bff80 399 mask = set_mtrr_state();
1da177e4
LT
400
401 post_set();
402 local_irq_restore(flags);
403
404 /* Use the atomic bitops to update the global mask */
405 for (count = 0; count < sizeof mask * 8; ++count) {
406 if (mask & 0x01)
407 set_bit(count, &smp_changes_mask);
408 mask >>= 1;
409 }
410
411}
412
413static void generic_set_mtrr(unsigned int reg, unsigned long base,
414 unsigned long size, mtrr_type type)
415/* [SUMMARY] Set variable MTRR register on the local CPU.
416 <reg> The register to set.
417 <base> The base address of the region.
418 <size> The size of the region. If this is 0 the region is disabled.
419 <type> The type of the region.
1da177e4
LT
420 [RETURNS] Nothing.
421*/
422{
423 unsigned long flags;
3b520b23
SL
424 struct mtrr_var_range *vr;
425
426 vr = &mtrr_state.var_ranges[reg];
1da177e4
LT
427
428 local_irq_save(flags);
429 prepare_set();
430
431 if (size == 0) {
432 /* The invalid bit is kept in the mask, so we simply clear the
433 relevant mask register to disable a range. */
434 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
3b520b23 435 memset(vr, 0, sizeof(struct mtrr_var_range));
1da177e4 436 } else {
3b520b23
SL
437 vr->base_lo = base << PAGE_SHIFT | type;
438 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
439 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
440 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
441
442 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
443 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
1da177e4
LT
444 }
445
446 post_set();
447 local_irq_restore(flags);
448}
449
450int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
451{
452 unsigned long lbase, last;
453
454 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
455 and not touch 0x70000000->0x7003FFFF */
456 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
457 boot_cpu_data.x86_model == 1 &&
458 boot_cpu_data.x86_mask <= 7) {
459 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
460 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
461 return -EINVAL;
462 }
9b483417 463 if (!(base + size < 0x70000 || base > 0x7003F) &&
1da177e4
LT
464 (type == MTRR_TYPE_WRCOMB
465 || type == MTRR_TYPE_WRBACK)) {
466 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
467 return -EINVAL;
468 }
469 }
470
1da177e4
LT
471 /* Check upper bits of base and last are equal and lower bits are 0
472 for base and 1 for last */
473 last = base + size - 1;
474 for (lbase = base; !(lbase & 1) && (last & 1);
475 lbase = lbase >> 1, last = last >> 1) ;
476 if (lbase != last) {
477 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
478 base, size);
479 return -EINVAL;
480 }
481 return 0;
482}
483
484
485static int generic_have_wrcomb(void)
486{
487 unsigned long config, dummy;
488 rdmsr(MTRRcap_MSR, config, dummy);
489 return (config & (1 << 10));
490}
491
492int positive_have_wrcomb(void)
493{
494 return 1;
495}
496
497/* generic structure...
498 */
499struct mtrr_ops generic_mtrr_ops = {
500 .use_intel_if = 1,
501 .set_all = generic_set_all,
502 .get = generic_get_mtrr,
503 .get_free_region = generic_get_free_region,
504 .set = generic_set_mtrr,
505 .validate_add_page = generic_validate_add_page,
506 .have_wrcomb = generic_have_wrcomb,
507};