]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/i386/kernel/cpu/mtrr/generic.c
[PATCH] i386: Kill references to xtime
[mirror_ubuntu-bionic-kernel.git] / arch / i386 / kernel / cpu / mtrr / generic.c
CommitLineData
1da177e4
LT
1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
6#include <asm/io.h>
7#include <asm/mtrr.h>
8#include <asm/msr.h>
9#include <asm/system.h>
10#include <asm/cpufeature.h>
11#include <asm/tlbflush.h>
12#include "mtrr.h"
13
14struct mtrr_state {
15 struct mtrr_var_range *var_ranges;
16 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
17 unsigned char enabled;
18 mtrr_type def_type;
19};
20
21static unsigned long smp_changes_mask;
22static struct mtrr_state mtrr_state = {};
23
24/* Get the MSR pair relating to a var range */
25static void __init
26get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
27{
28 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
29 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
30}
31
32static void __init
33get_fixed_ranges(mtrr_type * frs)
34{
35 unsigned int *p = (unsigned int *) frs;
36 int i;
37
38 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
39
40 for (i = 0; i < 2; i++)
41 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
42 for (i = 0; i < 8; i++)
43 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
44}
45
46/* Grab all of the MTRR state for this CPU into *state */
47void __init get_mtrr_state(void)
48{
49 unsigned int i;
50 struct mtrr_var_range *vrs;
51 unsigned lo, dummy;
52
53 if (!mtrr_state.var_ranges) {
54 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
55 GFP_KERNEL);
56 if (!mtrr_state.var_ranges)
57 return;
58 }
59 vrs = mtrr_state.var_ranges;
60
61 for (i = 0; i < num_var_ranges; i++)
62 get_mtrr_var_range(i, &vrs[i]);
63 get_fixed_ranges(mtrr_state.fixed_ranges);
64
65 rdmsr(MTRRdefType_MSR, lo, dummy);
66 mtrr_state.def_type = (lo & 0xff);
67 mtrr_state.enabled = (lo & 0xc00) >> 10;
68}
69
1da177e4
LT
70/* Some BIOS's are fucked and don't set all MTRRs the same! */
71void __init mtrr_state_warn(void)
72{
73 unsigned long mask = smp_changes_mask;
74
75 if (!mask)
76 return;
77 if (mask & MTRR_CHANGE_MASK_FIXED)
78 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
79 if (mask & MTRR_CHANGE_MASK_VARIABLE)
80 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
81 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
82 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
83 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
84 printk(KERN_INFO "mtrr: corrected configuration.\n");
85}
86
87/* Doesn't attempt to pass an error out to MTRR users
88 because it's quite complicated in some cases and probably not
89 worth it because the best error handling is to ignore it. */
90void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
91{
92 if (wrmsr_safe(msr, a, b) < 0)
93 printk(KERN_ERR
94 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
95 smp_processor_id(), msr, a, b);
96}
97
98int generic_get_free_region(unsigned long base, unsigned long size)
99/* [SUMMARY] Get a free MTRR.
100 <base> The starting (base) address of the region.
101 <size> The size (in bytes) of the region.
102 [RETURNS] The index of the region on success, else -1 on error.
103*/
104{
105 int i, max;
106 mtrr_type ltype;
107 unsigned long lbase;
108 unsigned lsize;
109
110 max = num_var_ranges;
111 for (i = 0; i < max; ++i) {
112 mtrr_if->get(i, &lbase, &lsize, &ltype);
113 if (lsize == 0)
114 return i;
115 }
116 return -ENOSPC;
117}
118
408b664a
AB
119static void generic_get_mtrr(unsigned int reg, unsigned long *base,
120 unsigned int *size, mtrr_type * type)
1da177e4
LT
121{
122 unsigned int mask_lo, mask_hi, base_lo, base_hi;
123
124 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
125 if ((mask_lo & 0x800) == 0) {
126 /* Invalid (i.e. free) range */
127 *base = 0;
128 *size = 0;
129 *type = 0;
130 return;
131 }
132
133 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
134
135 /* Work out the shifted address mask. */
136 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
137 | mask_lo >> PAGE_SHIFT;
138
139 /* This works correctly if size is a power of two, i.e. a
140 contiguous range. */
141 *size = -mask_lo;
142 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
143 *type = base_lo & 0xff;
144}
145
146static int set_fixed_ranges(mtrr_type * frs)
147{
148 unsigned int *p = (unsigned int *) frs;
149 int changed = FALSE;
150 int i;
151 unsigned int lo, hi;
152
153 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
154 if (p[0] != lo || p[1] != hi) {
155 mtrr_wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
156 changed = TRUE;
157 }
158
159 for (i = 0; i < 2; i++) {
160 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
161 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
162 mtrr_wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
163 p[3 + i * 2]);
164 changed = TRUE;
165 }
166 }
167
168 for (i = 0; i < 8; i++) {
169 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
170 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
171 mtrr_wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
172 p[7 + i * 2]);
173 changed = TRUE;
174 }
175 }
176 return changed;
177}
178
179/* Set the MSR pair relating to a var range. Returns TRUE if
180 changes are made */
181static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
182{
183 unsigned int lo, hi;
184 int changed = FALSE;
185
186 rdmsr(MTRRphysBase_MSR(index), lo, hi);
187 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
cf94b62f
SS
188 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
189 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
1da177e4
LT
190 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
191 changed = TRUE;
192 }
193
194 rdmsr(MTRRphysMask_MSR(index), lo, hi);
195
196 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
cf94b62f
SS
197 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
198 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
1da177e4
LT
199 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
200 changed = TRUE;
201 }
202 return changed;
203}
204
205static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
206/* [SUMMARY] Set the MTRR state for this CPU.
207 <state> The MTRR state information to read.
208 <ctxt> Some relevant CPU context.
209 [NOTE] The CPU must already be in a safe state for MTRR changes.
210 [RETURNS] 0 if no changes made, else a mask indication what was changed.
211*/
212{
213 unsigned int i;
214 unsigned long change_mask = 0;
215
216 for (i = 0; i < num_var_ranges; i++)
217 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
218 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
219
220 if (set_fixed_ranges(mtrr_state.fixed_ranges))
221 change_mask |= MTRR_CHANGE_MASK_FIXED;
222
223 /* Set_mtrr_restore restores the old value of MTRRdefType,
224 so to set it we fiddle with the saved value */
225 if ((deftype_lo & 0xff) != mtrr_state.def_type
226 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
227 deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
228 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
229 }
230
231 return change_mask;
232}
233
234
235static unsigned long cr4 = 0;
236static u32 deftype_lo, deftype_hi;
237static DEFINE_SPINLOCK(set_atomicity_lock);
238
239/*
240 * Since we are disabling the cache don't allow any interrupts - they
241 * would run extremely slow and would only increase the pain. The caller must
242 * ensure that local interrupts are disabled and are reenabled after post_set()
243 * has been called.
244 */
245
246static void prepare_set(void)
247{
248 unsigned long cr0;
249
250 /* Note that this is not ideal, since the cache is only flushed/disabled
251 for this CPU while the MTRRs are changed, but changing this requires
252 more invasive changes to the way the kernel boots */
253
254 spin_lock(&set_atomicity_lock);
255
256 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
257 cr0 = read_cr0() | 0x40000000; /* set CD flag */
258 write_cr0(cr0);
259 wbinvd();
260
261 /* Save value of CR4 and clear Page Global Enable (bit 7) */
262 if ( cpu_has_pge ) {
263 cr4 = read_cr4();
264 write_cr4(cr4 & ~X86_CR4_PGE);
265 }
266
267 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
268 __flush_tlb();
269
270 /* Save MTRR state */
271 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
272
273 /* Disable MTRRs, and set the default type to uncached */
274 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
275}
276
277static void post_set(void)
278{
279 /* Flush TLBs (no need to flush caches - they are disabled) */
280 __flush_tlb();
281
282 /* Intel (P6) standard MTRRs */
283 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
284
285 /* Enable caches */
286 write_cr0(read_cr0() & 0xbfffffff);
287
288 /* Restore value of CR4 */
289 if ( cpu_has_pge )
290 write_cr4(cr4);
291 spin_unlock(&set_atomicity_lock);
292}
293
294static void generic_set_all(void)
295{
296 unsigned long mask, count;
297 unsigned long flags;
298
299 local_irq_save(flags);
300 prepare_set();
301
302 /* Actually set the state */
303 mask = set_mtrr_state(deftype_lo,deftype_hi);
304
305 post_set();
306 local_irq_restore(flags);
307
308 /* Use the atomic bitops to update the global mask */
309 for (count = 0; count < sizeof mask * 8; ++count) {
310 if (mask & 0x01)
311 set_bit(count, &smp_changes_mask);
312 mask >>= 1;
313 }
314
315}
316
317static void generic_set_mtrr(unsigned int reg, unsigned long base,
318 unsigned long size, mtrr_type type)
319/* [SUMMARY] Set variable MTRR register on the local CPU.
320 <reg> The register to set.
321 <base> The base address of the region.
322 <size> The size of the region. If this is 0 the region is disabled.
323 <type> The type of the region.
324 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
325 be done externally.
326 [RETURNS] Nothing.
327*/
328{
329 unsigned long flags;
3b520b23
SL
330 struct mtrr_var_range *vr;
331
332 vr = &mtrr_state.var_ranges[reg];
1da177e4
LT
333
334 local_irq_save(flags);
335 prepare_set();
336
337 if (size == 0) {
338 /* The invalid bit is kept in the mask, so we simply clear the
339 relevant mask register to disable a range. */
340 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
3b520b23 341 memset(vr, 0, sizeof(struct mtrr_var_range));
1da177e4 342 } else {
3b520b23
SL
343 vr->base_lo = base << PAGE_SHIFT | type;
344 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
345 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
346 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
347
348 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
349 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
1da177e4
LT
350 }
351
352 post_set();
353 local_irq_restore(flags);
354}
355
356int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
357{
358 unsigned long lbase, last;
359
360 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
361 and not touch 0x70000000->0x7003FFFF */
362 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
363 boot_cpu_data.x86_model == 1 &&
364 boot_cpu_data.x86_mask <= 7) {
365 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
366 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
367 return -EINVAL;
368 }
369 if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
370 (type == MTRR_TYPE_WRCOMB
371 || type == MTRR_TYPE_WRBACK)) {
372 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
373 return -EINVAL;
374 }
375 }
376
377 if (base + size < 0x100) {
378 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
379 base, size);
380 return -EINVAL;
381 }
382 /* Check upper bits of base and last are equal and lower bits are 0
383 for base and 1 for last */
384 last = base + size - 1;
385 for (lbase = base; !(lbase & 1) && (last & 1);
386 lbase = lbase >> 1, last = last >> 1) ;
387 if (lbase != last) {
388 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
389 base, size);
390 return -EINVAL;
391 }
392 return 0;
393}
394
395
396static int generic_have_wrcomb(void)
397{
398 unsigned long config, dummy;
399 rdmsr(MTRRcap_MSR, config, dummy);
400 return (config & (1 << 10));
401}
402
403int positive_have_wrcomb(void)
404{
405 return 1;
406}
407
408/* generic structure...
409 */
410struct mtrr_ops generic_mtrr_ops = {
411 .use_intel_if = 1,
412 .set_all = generic_set_all,
413 .get = generic_get_mtrr,
414 .get_free_region = generic_get_free_region,
415 .set = generic_set_mtrr,
416 .validate_add_page = generic_validate_add_page,
417 .have_wrcomb = generic_have_wrcomb,
418};