]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/x86/kernel/cpu/mtrr/generic.c
Merge branch 'linus' into core/softlockup
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / kernel / cpu / mtrr / generic.c
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
14 #include <asm/pat.h>
15 #include "mtrr.h"
16
17 struct mtrr_state {
18 struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
19 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
20 unsigned char enabled;
21 unsigned char have_fixed;
22 mtrr_type def_type;
23 };
24
25 struct fixed_range_block {
26 int base_msr; /* start address of an MTRR block */
27 int ranges; /* number of MTRRs in this block */
28 };
29
30 static struct fixed_range_block fixed_range_blocks[] = {
31 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
32 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
33 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
34 {}
35 };
36
37 static unsigned long smp_changes_mask;
38 static struct mtrr_state mtrr_state = {};
39 static int mtrr_state_set;
40 u64 mtrr_tom2;
41
42 #undef MODULE_PARAM_PREFIX
43 #define MODULE_PARAM_PREFIX "mtrr."
44
45 static int mtrr_show;
46 module_param_named(show, mtrr_show, bool, 0);
47
48 /*
49 * Returns the effective MTRR type for the region
50 * Error returns:
51 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
52 * - 0xFF - when MTRR is not enabled
53 */
54 u8 mtrr_type_lookup(u64 start, u64 end)
55 {
56 int i;
57 u64 base, mask;
58 u8 prev_match, curr_match;
59
60 if (!mtrr_state_set)
61 return 0xFF;
62
63 if (!mtrr_state.enabled)
64 return 0xFF;
65
66 /* Make end inclusive end, instead of exclusive */
67 end--;
68
69 /* Look in fixed ranges. Just return the type as per start */
70 if (mtrr_state.have_fixed && (start < 0x100000)) {
71 int idx;
72
73 if (start < 0x80000) {
74 idx = 0;
75 idx += (start >> 16);
76 return mtrr_state.fixed_ranges[idx];
77 } else if (start < 0xC0000) {
78 idx = 1 * 8;
79 idx += ((start - 0x80000) >> 14);
80 return mtrr_state.fixed_ranges[idx];
81 } else if (start < 0x1000000) {
82 idx = 3 * 8;
83 idx += ((start - 0xC0000) >> 12);
84 return mtrr_state.fixed_ranges[idx];
85 }
86 }
87
88 /*
89 * Look in variable ranges
90 * Look of multiple ranges matching this address and pick type
91 * as per MTRR precedence
92 */
93 if (!(mtrr_state.enabled & 2)) {
94 return mtrr_state.def_type;
95 }
96
97 prev_match = 0xFF;
98 for (i = 0; i < num_var_ranges; ++i) {
99 unsigned short start_state, end_state;
100
101 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
102 continue;
103
104 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
105 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
106 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
107 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
108
109 start_state = ((start & mask) == (base & mask));
110 end_state = ((end & mask) == (base & mask));
111 if (start_state != end_state)
112 return 0xFE;
113
114 if ((start & mask) != (base & mask)) {
115 continue;
116 }
117
118 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
119 if (prev_match == 0xFF) {
120 prev_match = curr_match;
121 continue;
122 }
123
124 if (prev_match == MTRR_TYPE_UNCACHABLE ||
125 curr_match == MTRR_TYPE_UNCACHABLE) {
126 return MTRR_TYPE_UNCACHABLE;
127 }
128
129 if ((prev_match == MTRR_TYPE_WRBACK &&
130 curr_match == MTRR_TYPE_WRTHROUGH) ||
131 (prev_match == MTRR_TYPE_WRTHROUGH &&
132 curr_match == MTRR_TYPE_WRBACK)) {
133 prev_match = MTRR_TYPE_WRTHROUGH;
134 curr_match = MTRR_TYPE_WRTHROUGH;
135 }
136
137 if (prev_match != curr_match) {
138 return MTRR_TYPE_UNCACHABLE;
139 }
140 }
141
142 if (mtrr_tom2) {
143 if (start >= (1ULL<<32) && (end < mtrr_tom2))
144 return MTRR_TYPE_WRBACK;
145 }
146
147 if (prev_match != 0xFF)
148 return prev_match;
149
150 return mtrr_state.def_type;
151 }
152
153 /* Get the MSR pair relating to a var range */
154 static void
155 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
156 {
157 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
158 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
159 }
160
161 /* fill the MSR pair relating to a var range */
162 void fill_mtrr_var_range(unsigned int index,
163 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
164 {
165 struct mtrr_var_range *vr;
166
167 vr = mtrr_state.var_ranges;
168
169 vr[index].base_lo = base_lo;
170 vr[index].base_hi = base_hi;
171 vr[index].mask_lo = mask_lo;
172 vr[index].mask_hi = mask_hi;
173 }
174
175 static void
176 get_fixed_ranges(mtrr_type * frs)
177 {
178 unsigned int *p = (unsigned int *) frs;
179 int i;
180
181 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
182
183 for (i = 0; i < 2; i++)
184 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
185 for (i = 0; i < 8; i++)
186 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
187 }
188
189 void mtrr_save_fixed_ranges(void *info)
190 {
191 if (cpu_has_mtrr)
192 get_fixed_ranges(mtrr_state.fixed_ranges);
193 }
194
195 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
196 {
197 unsigned i;
198
199 for (i = 0; i < 8; ++i, ++types, base += step)
200 printk(KERN_INFO "MTRR %05X-%05X %s\n",
201 base, base + step - 1, mtrr_attrib_to_str(*types));
202 }
203
204 static void prepare_set(void);
205 static void post_set(void);
206
207 /* Grab all of the MTRR state for this CPU into *state */
208 void __init get_mtrr_state(void)
209 {
210 unsigned int i;
211 struct mtrr_var_range *vrs;
212 unsigned lo, dummy;
213 unsigned long flags;
214
215 vrs = mtrr_state.var_ranges;
216
217 rdmsr(MTRRcap_MSR, lo, dummy);
218 mtrr_state.have_fixed = (lo >> 8) & 1;
219
220 for (i = 0; i < num_var_ranges; i++)
221 get_mtrr_var_range(i, &vrs[i]);
222 if (mtrr_state.have_fixed)
223 get_fixed_ranges(mtrr_state.fixed_ranges);
224
225 rdmsr(MTRRdefType_MSR, lo, dummy);
226 mtrr_state.def_type = (lo & 0xff);
227 mtrr_state.enabled = (lo & 0xc00) >> 10;
228
229 if (amd_special_default_mtrr()) {
230 unsigned low, high;
231 /* TOP_MEM2 */
232 rdmsr(MSR_K8_TOP_MEM2, low, high);
233 mtrr_tom2 = high;
234 mtrr_tom2 <<= 32;
235 mtrr_tom2 |= low;
236 mtrr_tom2 &= 0xffffff800000ULL;
237 }
238 if (mtrr_show) {
239 int high_width;
240
241 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
242 if (mtrr_state.have_fixed) {
243 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
244 mtrr_state.enabled & 1 ? "en" : "dis");
245 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
246 for (i = 0; i < 2; ++i)
247 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
248 for (i = 0; i < 8; ++i)
249 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
250 }
251 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
252 mtrr_state.enabled & 2 ? "en" : "dis");
253 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
254 for (i = 0; i < num_var_ranges; ++i) {
255 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
256 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
257 i,
258 high_width,
259 mtrr_state.var_ranges[i].base_hi,
260 mtrr_state.var_ranges[i].base_lo >> 12,
261 high_width,
262 mtrr_state.var_ranges[i].mask_hi,
263 mtrr_state.var_ranges[i].mask_lo >> 12,
264 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
265 else
266 printk(KERN_INFO "MTRR %u disabled\n", i);
267 }
268 if (mtrr_tom2) {
269 printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
270 mtrr_tom2, mtrr_tom2>>20);
271 }
272 }
273 mtrr_state_set = 1;
274
275 /* PAT setup for BP. We need to go through sync steps here */
276 local_irq_save(flags);
277 prepare_set();
278
279 pat_init();
280
281 post_set();
282 local_irq_restore(flags);
283
284 }
285
286 /* Some BIOS's are fucked and don't set all MTRRs the same! */
287 void __init mtrr_state_warn(void)
288 {
289 unsigned long mask = smp_changes_mask;
290
291 if (!mask)
292 return;
293 if (mask & MTRR_CHANGE_MASK_FIXED)
294 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
295 if (mask & MTRR_CHANGE_MASK_VARIABLE)
296 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
297 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
298 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
299 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
300 printk(KERN_INFO "mtrr: corrected configuration.\n");
301 }
302
303 /* Doesn't attempt to pass an error out to MTRR users
304 because it's quite complicated in some cases and probably not
305 worth it because the best error handling is to ignore it. */
306 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
307 {
308 if (wrmsr_safe(msr, a, b) < 0)
309 printk(KERN_ERR
310 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
311 smp_processor_id(), msr, a, b);
312 }
313
314 /**
315 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
316 * see AMD publication no. 24593, chapter 3.2.1 for more information
317 */
318 static inline void k8_enable_fixed_iorrs(void)
319 {
320 unsigned lo, hi;
321
322 rdmsr(MSR_K8_SYSCFG, lo, hi);
323 mtrr_wrmsr(MSR_K8_SYSCFG, lo
324 | K8_MTRRFIXRANGE_DRAM_ENABLE
325 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
326 }
327
328 /**
329 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
330 * @msr: MSR address of the MTTR which should be checked and updated
331 * @changed: pointer which indicates whether the MTRR needed to be changed
332 * @msrwords: pointer to the MSR values which the MSR should have
333 *
334 * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
335 * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
336 */
337 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
338 {
339 unsigned lo, hi;
340
341 rdmsr(msr, lo, hi);
342
343 if (lo != msrwords[0] || hi != msrwords[1]) {
344 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
345 (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
346 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
347 k8_enable_fixed_iorrs();
348 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
349 *changed = true;
350 }
351 }
352
353 /**
354 * generic_get_free_region - Get a free MTRR.
355 * @base: The starting (base) address of the region.
356 * @size: The size (in bytes) of the region.
357 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
358 *
359 * Returns: The index of the region on success, else negative on error.
360 */
361 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
362 {
363 int i, max;
364 mtrr_type ltype;
365 unsigned long lbase, lsize;
366
367 max = num_var_ranges;
368 if (replace_reg >= 0 && replace_reg < max)
369 return replace_reg;
370 for (i = 0; i < max; ++i) {
371 mtrr_if->get(i, &lbase, &lsize, &ltype);
372 if (lsize == 0)
373 return i;
374 }
375 return -ENOSPC;
376 }
377
378 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
379 unsigned long *size, mtrr_type *type)
380 {
381 unsigned int mask_lo, mask_hi, base_lo, base_hi;
382
383 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
384 if ((mask_lo & 0x800) == 0) {
385 /* Invalid (i.e. free) range */
386 *base = 0;
387 *size = 0;
388 *type = 0;
389 return;
390 }
391
392 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
393
394 /* Work out the shifted address mask. */
395 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
396 | mask_lo >> PAGE_SHIFT;
397
398 /* This works correctly if size is a power of two, i.e. a
399 contiguous range. */
400 *size = -mask_lo;
401 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
402 *type = base_lo & 0xff;
403 }
404
405 /**
406 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
407 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
408 */
409 static int set_fixed_ranges(mtrr_type * frs)
410 {
411 unsigned long long *saved = (unsigned long long *) frs;
412 bool changed = false;
413 int block=-1, range;
414
415 while (fixed_range_blocks[++block].ranges)
416 for (range=0; range < fixed_range_blocks[block].ranges; range++)
417 set_fixed_range(fixed_range_blocks[block].base_msr + range,
418 &changed, (unsigned int *) saved++);
419
420 return changed;
421 }
422
423 /* Set the MSR pair relating to a var range. Returns TRUE if
424 changes are made */
425 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
426 {
427 unsigned int lo, hi;
428 bool changed = false;
429
430 rdmsr(MTRRphysBase_MSR(index), lo, hi);
431 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
432 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
433 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
434 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
435 changed = true;
436 }
437
438 rdmsr(MTRRphysMask_MSR(index), lo, hi);
439
440 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
441 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
442 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
443 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
444 changed = true;
445 }
446 return changed;
447 }
448
449 static u32 deftype_lo, deftype_hi;
450
451 /**
452 * set_mtrr_state - Set the MTRR state for this CPU.
453 *
454 * NOTE: The CPU must already be in a safe state for MTRR changes.
455 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
456 */
457 static unsigned long set_mtrr_state(void)
458 {
459 unsigned int i;
460 unsigned long change_mask = 0;
461
462 for (i = 0; i < num_var_ranges; i++)
463 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
464 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
465
466 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
467 change_mask |= MTRR_CHANGE_MASK_FIXED;
468
469 /* Set_mtrr_restore restores the old value of MTRRdefType,
470 so to set it we fiddle with the saved value */
471 if ((deftype_lo & 0xff) != mtrr_state.def_type
472 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
473 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
474 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
475 }
476
477 return change_mask;
478 }
479
480
481 static unsigned long cr4 = 0;
482 static DEFINE_SPINLOCK(set_atomicity_lock);
483
484 /*
485 * Since we are disabling the cache don't allow any interrupts - they
486 * would run extremely slow and would only increase the pain. The caller must
487 * ensure that local interrupts are disabled and are reenabled after post_set()
488 * has been called.
489 */
490
491 static void prepare_set(void) __acquires(set_atomicity_lock)
492 {
493 unsigned long cr0;
494
495 /* Note that this is not ideal, since the cache is only flushed/disabled
496 for this CPU while the MTRRs are changed, but changing this requires
497 more invasive changes to the way the kernel boots */
498
499 spin_lock(&set_atomicity_lock);
500
501 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
502 cr0 = read_cr0() | X86_CR0_CD;
503 write_cr0(cr0);
504 wbinvd();
505
506 /* Save value of CR4 and clear Page Global Enable (bit 7) */
507 if ( cpu_has_pge ) {
508 cr4 = read_cr4();
509 write_cr4(cr4 & ~X86_CR4_PGE);
510 }
511
512 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
513 __flush_tlb();
514
515 /* Save MTRR state */
516 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
517
518 /* Disable MTRRs, and set the default type to uncached */
519 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
520 }
521
522 static void post_set(void) __releases(set_atomicity_lock)
523 {
524 /* Flush TLBs (no need to flush caches - they are disabled) */
525 __flush_tlb();
526
527 /* Intel (P6) standard MTRRs */
528 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
529
530 /* Enable caches */
531 write_cr0(read_cr0() & 0xbfffffff);
532
533 /* Restore value of CR4 */
534 if ( cpu_has_pge )
535 write_cr4(cr4);
536 spin_unlock(&set_atomicity_lock);
537 }
538
539 static void generic_set_all(void)
540 {
541 unsigned long mask, count;
542 unsigned long flags;
543
544 local_irq_save(flags);
545 prepare_set();
546
547 /* Actually set the state */
548 mask = set_mtrr_state();
549
550 /* also set PAT */
551 pat_init();
552
553 post_set();
554 local_irq_restore(flags);
555
556 /* Use the atomic bitops to update the global mask */
557 for (count = 0; count < sizeof mask * 8; ++count) {
558 if (mask & 0x01)
559 set_bit(count, &smp_changes_mask);
560 mask >>= 1;
561 }
562
563 }
564
565 static void generic_set_mtrr(unsigned int reg, unsigned long base,
566 unsigned long size, mtrr_type type)
567 /* [SUMMARY] Set variable MTRR register on the local CPU.
568 <reg> The register to set.
569 <base> The base address of the region.
570 <size> The size of the region. If this is 0 the region is disabled.
571 <type> The type of the region.
572 [RETURNS] Nothing.
573 */
574 {
575 unsigned long flags;
576 struct mtrr_var_range *vr;
577
578 vr = &mtrr_state.var_ranges[reg];
579
580 local_irq_save(flags);
581 prepare_set();
582
583 if (size == 0) {
584 /* The invalid bit is kept in the mask, so we simply clear the
585 relevant mask register to disable a range. */
586 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
587 memset(vr, 0, sizeof(struct mtrr_var_range));
588 } else {
589 vr->base_lo = base << PAGE_SHIFT | type;
590 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
591 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
592 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
593
594 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
595 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
596 }
597
598 post_set();
599 local_irq_restore(flags);
600 }
601
602 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
603 {
604 unsigned long lbase, last;
605
606 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
607 and not touch 0x70000000->0x7003FFFF */
608 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
609 boot_cpu_data.x86_model == 1 &&
610 boot_cpu_data.x86_mask <= 7) {
611 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
612 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
613 return -EINVAL;
614 }
615 if (!(base + size < 0x70000 || base > 0x7003F) &&
616 (type == MTRR_TYPE_WRCOMB
617 || type == MTRR_TYPE_WRBACK)) {
618 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
619 return -EINVAL;
620 }
621 }
622
623 /* Check upper bits of base and last are equal and lower bits are 0
624 for base and 1 for last */
625 last = base + size - 1;
626 for (lbase = base; !(lbase & 1) && (last & 1);
627 lbase = lbase >> 1, last = last >> 1) ;
628 if (lbase != last) {
629 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
630 base, size);
631 return -EINVAL;
632 }
633 return 0;
634 }
635
636
637 static int generic_have_wrcomb(void)
638 {
639 unsigned long config, dummy;
640 rdmsr(MTRRcap_MSR, config, dummy);
641 return (config & (1 << 10));
642 }
643
644 int positive_have_wrcomb(void)
645 {
646 return 1;
647 }
648
649 /* generic structure...
650 */
651 struct mtrr_ops generic_mtrr_ops = {
652 .use_intel_if = 1,
653 .set_all = generic_set_all,
654 .get = generic_get_mtrr,
655 .get_free_region = generic_get_free_region,
656 .set = generic_set_mtrr,
657 .validate_add_page = generic_validate_add_page,
658 .have_wrcomb = generic_have_wrcomb,
659 };