1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37 #include <linux/smp.h>
38 #include <linux/cpu.h>
39 #include <linux/mutex.h>
40 #include <linux/sort.h>
44 #include <asm/uaccess.h>
45 #include <asm/processor.h>
47 #include <asm/kvm_para.h>
50 u32 num_var_ranges
= 0;
52 unsigned int mtrr_usage_table
[MAX_VAR_RANGES
];
53 static DEFINE_MUTEX(mtrr_mutex
);
55 u64 size_or_mask
, size_and_mask
;
57 static struct mtrr_ops
* mtrr_ops
[X86_VENDOR_NUM
] = {};
59 struct mtrr_ops
* mtrr_if
= NULL
;
61 static void set_mtrr(unsigned int reg
, unsigned long base
,
62 unsigned long size
, mtrr_type type
);
64 void set_mtrr_ops(struct mtrr_ops
* ops
)
66 if (ops
->vendor
&& ops
->vendor
< X86_VENDOR_NUM
)
67 mtrr_ops
[ops
->vendor
] = ops
;
70 /* Returns non-zero if we have the write-combining memory type */
71 static int have_wrcomb(void)
76 if ((dev
= pci_get_class(PCI_CLASS_BRIDGE_HOST
<< 8, NULL
)) != NULL
) {
77 /* ServerWorks LE chipsets < rev 6 have problems with write-combining
78 Don't allow it and leave room for other chipsets to be tagged */
79 if (dev
->vendor
== PCI_VENDOR_ID_SERVERWORKS
&&
80 dev
->device
== PCI_DEVICE_ID_SERVERWORKS_LE
) {
81 pci_read_config_byte(dev
, PCI_CLASS_REVISION
, &rev
);
83 printk(KERN_INFO
"mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
88 /* Intel 450NX errata # 23. Non ascending cacheline evictions to
89 write combining memory may resulting in data corruption */
90 if (dev
->vendor
== PCI_VENDOR_ID_INTEL
&&
91 dev
->device
== PCI_DEVICE_ID_INTEL_82451NX
) {
92 printk(KERN_INFO
"mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
98 return (mtrr_if
->have_wrcomb
? mtrr_if
->have_wrcomb() : 0);
101 /* This function returns the number of variable MTRRs */
102 static void __init
set_num_var_ranges(void)
104 unsigned long config
= 0, dummy
;
107 rdmsr(MTRRcap_MSR
, config
, dummy
);
108 } else if (is_cpu(AMD
))
110 else if (is_cpu(CYRIX
) || is_cpu(CENTAUR
))
112 num_var_ranges
= config
& 0xff;
115 static void __init
init_table(void)
119 max
= num_var_ranges
;
120 for (i
= 0; i
< max
; i
++)
121 mtrr_usage_table
[i
] = 1;
124 struct set_mtrr_data
{
127 unsigned long smp_base
;
128 unsigned long smp_size
;
129 unsigned int smp_reg
;
133 static void ipi_handler(void *info
)
134 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
139 struct set_mtrr_data
*data
= info
;
142 local_irq_save(flags
);
144 atomic_dec(&data
->count
);
145 while(!atomic_read(&data
->gate
))
148 /* The master has cleared me to execute */
149 if (data
->smp_reg
!= ~0U)
150 mtrr_if
->set(data
->smp_reg
, data
->smp_base
,
151 data
->smp_size
, data
->smp_type
);
155 atomic_dec(&data
->count
);
156 while(atomic_read(&data
->gate
))
159 atomic_dec(&data
->count
);
160 local_irq_restore(flags
);
164 static inline int types_compatible(mtrr_type type1
, mtrr_type type2
) {
165 return type1
== MTRR_TYPE_UNCACHABLE
||
166 type2
== MTRR_TYPE_UNCACHABLE
||
167 (type1
== MTRR_TYPE_WRTHROUGH
&& type2
== MTRR_TYPE_WRBACK
) ||
168 (type1
== MTRR_TYPE_WRBACK
&& type2
== MTRR_TYPE_WRTHROUGH
);
172 * set_mtrr - update mtrrs on all processors
173 * @reg: mtrr in question
178 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
180 * 1. Send IPI to do the following:
181 * 2. Disable Interrupts
182 * 3. Wait for all procs to do so
183 * 4. Enter no-fill cache mode
187 * 8. Disable all range registers
188 * 9. Update the MTRRs
189 * 10. Enable all range registers
190 * 11. Flush all TLBs and caches again
191 * 12. Enter normal cache mode and reenable caching
193 * 14. Wait for buddies to catch up
194 * 15. Enable interrupts.
196 * What does that mean for us? Well, first we set data.count to the number
197 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
198 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
199 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
200 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
201 * differently, so we call mtrr_if->set() callback and let them take care of it.
202 * When they're done, they again decrement data->count and wait for data.gate to
204 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
205 * Everyone then enables interrupts and we all continue on.
207 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
210 static void set_mtrr(unsigned int reg
, unsigned long base
,
211 unsigned long size
, mtrr_type type
)
213 struct set_mtrr_data data
;
217 data
.smp_base
= base
;
218 data
.smp_size
= size
;
219 data
.smp_type
= type
;
220 atomic_set(&data
.count
, num_booting_cpus() - 1);
221 /* make sure data.count is visible before unleashing other CPUs */
223 atomic_set(&data
.gate
,0);
225 /* Start the ball rolling on other CPUs */
226 if (smp_call_function(ipi_handler
, &data
, 0) != 0)
227 panic("mtrr: timed out waiting for other CPUs\n");
229 local_irq_save(flags
);
231 while(atomic_read(&data
.count
))
234 /* ok, reset count and toggle gate */
235 atomic_set(&data
.count
, num_booting_cpus() - 1);
237 atomic_set(&data
.gate
,1);
239 /* do our MTRR business */
242 * We use this same function to initialize the mtrrs on boot.
243 * The state of the boot cpu's mtrrs has been saved, and we want
244 * to replicate across all the APs.
245 * If we're doing that @reg is set to something special...
248 mtrr_if
->set(reg
,base
,size
,type
);
250 /* wait for the others */
251 while(atomic_read(&data
.count
))
254 atomic_set(&data
.count
, num_booting_cpus() - 1);
256 atomic_set(&data
.gate
,0);
259 * Wait here for everyone to have seen the gate change
260 * So we're the last ones to touch 'data'
262 while(atomic_read(&data
.count
))
265 local_irq_restore(flags
);
269 * mtrr_add_page - Add a memory type region
270 * @base: Physical base address of region in pages (in units of 4 kB!)
271 * @size: Physical size of region in pages (4 kB)
272 * @type: Type of MTRR desired
273 * @increment: If this is true do usage counting on the region
275 * Memory type region registers control the caching on newer Intel and
276 * non Intel processors. This function allows drivers to request an
277 * MTRR is added. The details and hardware specifics of each processor's
278 * implementation are hidden from the caller, but nevertheless the
279 * caller should expect to need to provide a power of two size on an
280 * equivalent power of two boundary.
282 * If the region cannot be added either because all regions are in use
283 * or the CPU cannot support it a negative value is returned. On success
284 * the register number for this entry is returned, but should be treated
287 * On a multiprocessor machine the changes are made to all processors.
288 * This is required on x86 by the Intel processors.
290 * The available types are
292 * %MTRR_TYPE_UNCACHABLE - No caching
294 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
296 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
298 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
300 * BUGS: Needs a quiet flag for the cases where drivers do not mind
301 * failures and do not wish system log messages to be sent.
304 int mtrr_add_page(unsigned long base
, unsigned long size
,
305 unsigned int type
, bool increment
)
307 int i
, replace
, error
;
309 unsigned long lbase
, lsize
;
314 if ((error
= mtrr_if
->validate_add_page(base
,size
,type
)))
317 if (type
>= MTRR_NUM_TYPES
) {
318 printk(KERN_WARNING
"mtrr: type: %u invalid\n", type
);
322 /* If the type is WC, check that this processor supports it */
323 if ((type
== MTRR_TYPE_WRCOMB
) && !have_wrcomb()) {
325 "mtrr: your processor doesn't support write-combining\n");
330 printk(KERN_WARNING
"mtrr: zero sized request\n");
334 if (base
& size_or_mask
|| size
& size_or_mask
) {
335 printk(KERN_WARNING
"mtrr: base or size exceeds the MTRR width\n");
342 /* No CPU hotplug when we change MTRR entries */
344 /* Search for existing MTRR */
345 mutex_lock(&mtrr_mutex
);
346 for (i
= 0; i
< num_var_ranges
; ++i
) {
347 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
348 if (!lsize
|| base
> lbase
+ lsize
- 1 || base
+ size
- 1 < lbase
)
350 /* At this point we know there is some kind of overlap/enclosure */
351 if (base
< lbase
|| base
+ size
- 1 > lbase
+ lsize
- 1) {
352 if (base
<= lbase
&& base
+ size
- 1 >= lbase
+ lsize
- 1) {
353 /* New region encloses an existing region */
355 replace
= replace
== -1 ? i
: -2;
358 else if (types_compatible(type
, ltype
))
362 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
363 " 0x%lx000,0x%lx000\n", base
, size
, lbase
,
367 /* New region is enclosed by an existing region */
369 if (types_compatible(type
, ltype
))
371 printk (KERN_WARNING
"mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
372 base
, size
, mtrr_attrib_to_str(ltype
),
373 mtrr_attrib_to_str(type
));
377 ++mtrr_usage_table
[i
];
381 /* Search for an empty MTRR */
382 i
= mtrr_if
->get_free_region(base
, size
, replace
);
384 set_mtrr(i
, base
, size
, type
);
385 if (likely(replace
< 0)) {
386 mtrr_usage_table
[i
] = 1;
388 mtrr_usage_table
[i
] = mtrr_usage_table
[replace
];
390 mtrr_usage_table
[i
]++;
391 if (unlikely(replace
!= i
)) {
392 set_mtrr(replace
, 0, 0, 0);
393 mtrr_usage_table
[replace
] = 0;
397 printk(KERN_INFO
"mtrr: no more MTRRs available\n");
400 mutex_unlock(&mtrr_mutex
);
405 static int mtrr_check(unsigned long base
, unsigned long size
)
407 if ((base
& (PAGE_SIZE
- 1)) || (size
& (PAGE_SIZE
- 1))) {
409 "mtrr: size and base must be multiples of 4 kiB\n");
411 "mtrr: size: 0x%lx base: 0x%lx\n", size
, base
);
419 * mtrr_add - Add a memory type region
420 * @base: Physical base address of region
421 * @size: Physical size of region
422 * @type: Type of MTRR desired
423 * @increment: If this is true do usage counting on the region
425 * Memory type region registers control the caching on newer Intel and
426 * non Intel processors. This function allows drivers to request an
427 * MTRR is added. The details and hardware specifics of each processor's
428 * implementation are hidden from the caller, but nevertheless the
429 * caller should expect to need to provide a power of two size on an
430 * equivalent power of two boundary.
432 * If the region cannot be added either because all regions are in use
433 * or the CPU cannot support it a negative value is returned. On success
434 * the register number for this entry is returned, but should be treated
437 * On a multiprocessor machine the changes are made to all processors.
438 * This is required on x86 by the Intel processors.
440 * The available types are
442 * %MTRR_TYPE_UNCACHABLE - No caching
444 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
446 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
448 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
450 * BUGS: Needs a quiet flag for the cases where drivers do not mind
451 * failures and do not wish system log messages to be sent.
455 mtrr_add(unsigned long base
, unsigned long size
, unsigned int type
,
458 if (mtrr_check(base
, size
))
460 return mtrr_add_page(base
>> PAGE_SHIFT
, size
>> PAGE_SHIFT
, type
,
465 * mtrr_del_page - delete a memory type region
466 * @reg: Register returned by mtrr_add
467 * @base: Physical base address
468 * @size: Size of region
470 * If register is supplied then base and size are ignored. This is
471 * how drivers should call it.
473 * Releases an MTRR region. If the usage count drops to zero the
474 * register is freed and the region returns to default state.
475 * On success the register is returned, on failure a negative error
479 int mtrr_del_page(int reg
, unsigned long base
, unsigned long size
)
483 unsigned long lbase
, lsize
;
489 max
= num_var_ranges
;
490 /* No CPU hotplug when we change MTRR entries */
492 mutex_lock(&mtrr_mutex
);
494 /* Search for existing MTRR */
495 for (i
= 0; i
< max
; ++i
) {
496 mtrr_if
->get(i
, &lbase
, &lsize
, <ype
);
497 if (lbase
== base
&& lsize
== size
) {
503 printk(KERN_DEBUG
"mtrr: no MTRR for %lx000,%lx000 found\n", base
,
509 printk(KERN_WARNING
"mtrr: register: %d too big\n", reg
);
512 mtrr_if
->get(reg
, &lbase
, &lsize
, <ype
);
514 printk(KERN_WARNING
"mtrr: MTRR %d not used\n", reg
);
517 if (mtrr_usage_table
[reg
] < 1) {
518 printk(KERN_WARNING
"mtrr: reg: %d has count=0\n", reg
);
521 if (--mtrr_usage_table
[reg
] < 1)
522 set_mtrr(reg
, 0, 0, 0);
525 mutex_unlock(&mtrr_mutex
);
530 * mtrr_del - delete a memory type region
531 * @reg: Register returned by mtrr_add
532 * @base: Physical base address
533 * @size: Size of region
535 * If register is supplied then base and size are ignored. This is
536 * how drivers should call it.
538 * Releases an MTRR region. If the usage count drops to zero the
539 * register is freed and the region returns to default state.
540 * On success the register is returned, on failure a negative error
545 mtrr_del(int reg
, unsigned long base
, unsigned long size
)
547 if (mtrr_check(base
, size
))
549 return mtrr_del_page(reg
, base
>> PAGE_SHIFT
, size
>> PAGE_SHIFT
);
552 EXPORT_SYMBOL(mtrr_add
);
553 EXPORT_SYMBOL(mtrr_del
);
556 * These should be called implicitly, but we can't yet until all the initcall
559 static void __init
init_ifs(void)
561 #ifndef CONFIG_X86_64
568 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
569 * MTRR driver doesn't require this
577 static struct mtrr_value mtrr_state
[MAX_VAR_RANGES
];
579 static int mtrr_save(struct sys_device
* sysdev
, pm_message_t state
)
583 for (i
= 0; i
< num_var_ranges
; i
++) {
585 &mtrr_state
[i
].lbase
,
586 &mtrr_state
[i
].lsize
,
587 &mtrr_state
[i
].ltype
);
592 static int mtrr_restore(struct sys_device
* sysdev
)
596 for (i
= 0; i
< num_var_ranges
; i
++) {
597 if (mtrr_state
[i
].lsize
)
601 mtrr_state
[i
].ltype
);
608 static struct sysdev_driver mtrr_sysdev_driver
= {
609 .suspend
= mtrr_save
,
610 .resume
= mtrr_restore
,
613 /* should be related to MTRR_VAR_RANGES nums */
614 #define RANGE_NUM 256
622 add_range(struct res_range
*range
, int nr_range
, unsigned long start
,
626 if (nr_range
>= RANGE_NUM
)
629 range
[nr_range
].start
= start
;
630 range
[nr_range
].end
= end
;
638 add_range_with_merge(struct res_range
*range
, int nr_range
, unsigned long start
,
643 /* try to merge it with old one */
644 for (i
= 0; i
< nr_range
; i
++) {
645 unsigned long final_start
, final_end
;
646 unsigned long common_start
, common_end
;
651 common_start
= max(range
[i
].start
, start
);
652 common_end
= min(range
[i
].end
, end
);
653 if (common_start
> common_end
+ 1)
656 final_start
= min(range
[i
].start
, start
);
657 final_end
= max(range
[i
].end
, end
);
659 range
[i
].start
= final_start
;
660 range
[i
].end
= final_end
;
664 /* need to add that */
665 return add_range(range
, nr_range
, start
, end
);
669 subtract_range(struct res_range
*range
, unsigned long start
, unsigned long end
)
673 for (j
= 0; j
< RANGE_NUM
; j
++) {
677 if (start
<= range
[j
].start
&& end
>= range
[j
].end
) {
683 if (start
<= range
[j
].start
&& end
< range
[j
].end
&&
684 range
[j
].start
< end
+ 1) {
685 range
[j
].start
= end
+ 1;
690 if (start
> range
[j
].start
&& end
>= range
[j
].end
&&
691 range
[j
].end
> start
- 1) {
692 range
[j
].end
= start
- 1;
696 if (start
> range
[j
].start
&& end
< range
[j
].end
) {
697 /* find the new spare */
698 for (i
= 0; i
< RANGE_NUM
; i
++) {
699 if (range
[i
].end
== 0)
703 range
[i
].end
= range
[j
].end
;
704 range
[i
].start
= end
+ 1;
706 printk(KERN_ERR
"run of slot in ranges\n");
708 range
[j
].end
= start
- 1;
714 static int __init
cmp_range(const void *x1
, const void *x2
)
716 const struct res_range
*r1
= x1
;
717 const struct res_range
*r2
= x2
;
723 return start1
- start2
;
726 struct var_mtrr_range_state
{
727 unsigned long base_pfn
;
728 unsigned long size_pfn
;
732 struct var_mtrr_range_state __initdata range_state
[RANGE_NUM
];
733 static int __initdata debug_print
;
736 x86_get_mtrr_mem_range(struct res_range
*range
, int nr_range
,
737 unsigned long extra_remove_base
,
738 unsigned long extra_remove_size
)
740 unsigned long i
, base
, size
;
743 for (i
= 0; i
< num_var_ranges
; i
++) {
744 type
= range_state
[i
].type
;
745 if (type
!= MTRR_TYPE_WRBACK
)
747 base
= range_state
[i
].base_pfn
;
748 size
= range_state
[i
].size_pfn
;
749 nr_range
= add_range_with_merge(range
, nr_range
, base
,
753 printk(KERN_DEBUG
"After WB checking\n");
754 for (i
= 0; i
< nr_range
; i
++)
755 printk(KERN_DEBUG
"MTRR MAP PFN: %016lx - %016lx\n",
756 range
[i
].start
, range
[i
].end
+ 1);
759 /* take out UC ranges */
760 for (i
= 0; i
< num_var_ranges
; i
++) {
761 type
= range_state
[i
].type
;
762 if (type
!= MTRR_TYPE_UNCACHABLE
)
764 size
= range_state
[i
].size_pfn
;
767 base
= range_state
[i
].base_pfn
;
768 subtract_range(range
, base
, base
+ size
- 1);
770 if (extra_remove_size
)
771 subtract_range(range
, extra_remove_base
,
772 extra_remove_base
+ extra_remove_size
- 1);
774 /* get new range num */
776 for (i
= 0; i
< RANGE_NUM
; i
++) {
782 printk(KERN_DEBUG
"After UC checking\n");
783 for (i
= 0; i
< nr_range
; i
++)
784 printk(KERN_DEBUG
"MTRR MAP PFN: %016lx - %016lx\n",
785 range
[i
].start
, range
[i
].end
+ 1);
788 /* sort the ranges */
789 sort(range
, nr_range
, sizeof(struct res_range
), cmp_range
, NULL
);
791 printk(KERN_DEBUG
"After sorting\n");
792 for (i
= 0; i
< nr_range
; i
++)
793 printk(KERN_DEBUG
"MTRR MAP PFN: %016lx - %016lx\n",
794 range
[i
].start
, range
[i
].end
+ 1);
797 /* clear those is not used */
798 for (i
= nr_range
; i
< RANGE_NUM
; i
++)
799 memset(&range
[i
], 0, sizeof(range
[i
]));
804 static struct res_range __initdata range
[RANGE_NUM
];
806 #ifdef CONFIG_MTRR_SANITIZER
808 static unsigned long __init
sum_ranges(struct res_range
*range
, int nr_range
)
814 for (i
= 0; i
< nr_range
; i
++)
815 sum
+= range
[i
].end
+ 1 - range
[i
].start
;
820 static int enable_mtrr_cleanup __initdata
=
821 CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT
;
823 static int __init
disable_mtrr_cleanup_setup(char *str
)
825 if (enable_mtrr_cleanup
!= -1)
826 enable_mtrr_cleanup
= 0;
829 early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup
);
831 static int __init
enable_mtrr_cleanup_setup(char *str
)
833 if (enable_mtrr_cleanup
!= -1)
834 enable_mtrr_cleanup
= 1;
837 early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup
);
839 static int __init
mtrr_cleanup_debug_setup(char *str
)
844 early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup
);
846 struct var_mtrr_state
{
847 unsigned long range_startk
;
848 unsigned long range_sizek
;
849 unsigned long chunk_sizek
;
850 unsigned long gran_sizek
;
855 set_var_mtrr(unsigned int reg
, unsigned long basek
, unsigned long sizek
,
856 unsigned char type
, unsigned int address_bits
)
858 u32 base_lo
, base_hi
, mask_lo
, mask_hi
;
862 fill_mtrr_var_range(reg
, 0, 0, 0, 0);
866 mask
= (1ULL << address_bits
) - 1;
867 mask
&= ~((((u64
)sizek
) << 10) - 1);
869 base
= ((u64
)basek
) << 10;
874 base_lo
= base
& ((1ULL<<32) - 1);
875 base_hi
= base
>> 32;
877 mask_lo
= mask
& ((1ULL<<32) - 1);
878 mask_hi
= mask
>> 32;
880 fill_mtrr_var_range(reg
, base_lo
, base_hi
, mask_lo
, mask_hi
);
884 save_var_mtrr(unsigned int reg
, unsigned long basek
, unsigned long sizek
,
887 range_state
[reg
].base_pfn
= basek
>> (PAGE_SHIFT
- 10);
888 range_state
[reg
].size_pfn
= sizek
>> (PAGE_SHIFT
- 10);
889 range_state
[reg
].type
= type
;
893 set_var_mtrr_all(unsigned int address_bits
)
895 unsigned long basek
, sizek
;
899 for (reg
= 0; reg
< num_var_ranges
; reg
++) {
900 basek
= range_state
[reg
].base_pfn
<< (PAGE_SHIFT
- 10);
901 sizek
= range_state
[reg
].size_pfn
<< (PAGE_SHIFT
- 10);
902 type
= range_state
[reg
].type
;
904 set_var_mtrr(reg
, basek
, sizek
, type
, address_bits
);
908 static unsigned int __init
909 range_to_mtrr(unsigned int reg
, unsigned long range_startk
,
910 unsigned long range_sizek
, unsigned char type
)
912 if (!range_sizek
|| (reg
>= num_var_ranges
))
915 while (range_sizek
) {
916 unsigned long max_align
, align
;
919 /* Compute the maximum size I can make a range */
921 max_align
= ffs(range_startk
) - 1;
924 align
= fls(range_sizek
) - 1;
925 if (align
> max_align
)
930 printk(KERN_DEBUG
"Setting variable MTRR %d, "
931 "base: %ldMB, range: %ldMB, type %s\n",
932 reg
, range_startk
>> 10, sizek
>> 10,
933 (type
== MTRR_TYPE_UNCACHABLE
)?"UC":
934 ((type
== MTRR_TYPE_WRBACK
)?"WB":"Other")
936 save_var_mtrr(reg
++, range_startk
, sizek
, type
);
937 range_startk
+= sizek
;
938 range_sizek
-= sizek
;
939 if (reg
>= num_var_ranges
)
945 static unsigned __init
946 range_to_mtrr_with_hole(struct var_mtrr_state
*state
, unsigned long basek
,
949 unsigned long hole_basek
, hole_sizek
;
950 unsigned long second_basek
, second_sizek
;
951 unsigned long range0_basek
, range0_sizek
;
952 unsigned long range_basek
, range_sizek
;
953 unsigned long chunk_sizek
;
954 unsigned long gran_sizek
;
960 chunk_sizek
= state
->chunk_sizek
;
961 gran_sizek
= state
->gran_sizek
;
963 /* align with gran size, prevent small block used up MTRRs */
964 range_basek
= ALIGN(state
->range_startk
, gran_sizek
);
965 if ((range_basek
> basek
) && basek
)
967 state
->range_sizek
-= (range_basek
- state
->range_startk
);
968 range_sizek
= ALIGN(state
->range_sizek
, gran_sizek
);
970 while (range_sizek
> state
->range_sizek
) {
971 range_sizek
-= gran_sizek
;
975 state
->range_sizek
= range_sizek
;
977 /* try to append some small hole */
978 range0_basek
= state
->range_startk
;
979 range0_sizek
= ALIGN(state
->range_sizek
, chunk_sizek
);
982 if (range0_sizek
== state
->range_sizek
) {
984 printk(KERN_DEBUG
"rangeX: %016lx - %016lx\n",
986 (range0_basek
+ state
->range_sizek
)<<10);
987 state
->reg
= range_to_mtrr(state
->reg
, range0_basek
,
988 state
->range_sizek
, MTRR_TYPE_WRBACK
);
992 /* only cut back, when it is not the last */
994 while (range0_basek
+ range0_sizek
> (basek
+ sizek
)) {
995 if (range0_sizek
>= chunk_sizek
)
996 range0_sizek
-= chunk_sizek
;
1006 range_basek
= range0_basek
+ range0_sizek
;
1008 /* one hole in the middle */
1009 if (range_basek
> basek
&& range_basek
<= (basek
+ sizek
))
1010 second_sizek
= range_basek
- basek
;
1012 if (range0_sizek
> state
->range_sizek
) {
1014 /* one hole in middle or at end */
1015 hole_sizek
= range0_sizek
- state
->range_sizek
- second_sizek
;
1017 /* hole size should be less than half of range0 size */
1018 if (hole_sizek
> (range0_sizek
>> 1) &&
1019 range0_sizek
>= chunk_sizek
) {
1020 range0_sizek
-= chunk_sizek
;
1030 printk(KERN_DEBUG
"range0: %016lx - %016lx\n",
1032 (range0_basek
+ range0_sizek
)<<10);
1033 state
->reg
= range_to_mtrr(state
->reg
, range0_basek
,
1034 range0_sizek
, MTRR_TYPE_WRBACK
);
1037 if (range0_sizek
< state
->range_sizek
) {
1038 /* need to handle left over */
1039 range_sizek
= state
->range_sizek
- range0_sizek
;
1042 printk(KERN_DEBUG
"range: %016lx - %016lx\n",
1044 (range_basek
+ range_sizek
)<<10);
1045 state
->reg
= range_to_mtrr(state
->reg
, range_basek
,
1046 range_sizek
, MTRR_TYPE_WRBACK
);
1050 hole_basek
= range_basek
- hole_sizek
- second_sizek
;
1052 printk(KERN_DEBUG
"hole: %016lx - %016lx\n",
1054 (hole_basek
+ hole_sizek
)<<10);
1055 state
->reg
= range_to_mtrr(state
->reg
, hole_basek
,
1056 hole_sizek
, MTRR_TYPE_UNCACHABLE
);
1059 return second_sizek
;
1063 set_var_mtrr_range(struct var_mtrr_state
*state
, unsigned long base_pfn
,
1064 unsigned long size_pfn
)
1066 unsigned long basek
, sizek
;
1067 unsigned long second_sizek
= 0;
1069 if (state
->reg
>= num_var_ranges
)
1072 basek
= base_pfn
<< (PAGE_SHIFT
- 10);
1073 sizek
= size_pfn
<< (PAGE_SHIFT
- 10);
1075 /* See if I can merge with the last range */
1076 if ((basek
<= 1024) ||
1077 (state
->range_startk
+ state
->range_sizek
== basek
)) {
1078 unsigned long endk
= basek
+ sizek
;
1079 state
->range_sizek
= endk
- state
->range_startk
;
1082 /* Write the range mtrrs */
1083 if (state
->range_sizek
!= 0)
1084 second_sizek
= range_to_mtrr_with_hole(state
, basek
, sizek
);
1086 /* Allocate an msr */
1087 state
->range_startk
= basek
+ second_sizek
;
1088 state
->range_sizek
= sizek
- second_sizek
;
1091 /* mininum size of mtrr block that can take hole */
1092 static u64 mtrr_chunk_size __initdata
= (256ULL<<20);
1094 static int __init
parse_mtrr_chunk_size_opt(char *p
)
1098 mtrr_chunk_size
= memparse(p
, &p
);
1101 early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt
);
1103 /* granity of mtrr of block */
1104 static u64 mtrr_gran_size __initdata
;
1106 static int __init
parse_mtrr_gran_size_opt(char *p
)
1110 mtrr_gran_size
= memparse(p
, &p
);
1113 early_param("mtrr_gran_size", parse_mtrr_gran_size_opt
);
1115 static int nr_mtrr_spare_reg __initdata
=
1116 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
;
1118 static int __init
parse_mtrr_spare_reg(char *arg
)
1121 nr_mtrr_spare_reg
= simple_strtoul(arg
, NULL
, 0);
1125 early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg
);
1128 x86_setup_var_mtrrs(struct res_range
*range
, int nr_range
,
1129 u64 chunk_size
, u64 gran_size
)
1131 struct var_mtrr_state var_state
;
1135 var_state
.range_startk
= 0;
1136 var_state
.range_sizek
= 0;
1138 var_state
.chunk_sizek
= chunk_size
>> 10;
1139 var_state
.gran_sizek
= gran_size
>> 10;
1141 memset(range_state
, 0, sizeof(range_state
));
1143 /* Write the range etc */
1144 for (i
= 0; i
< nr_range
; i
++)
1145 set_var_mtrr_range(&var_state
, range
[i
].start
,
1146 range
[i
].end
- range
[i
].start
+ 1);
1148 /* Write the last range */
1149 if (var_state
.range_sizek
!= 0)
1150 range_to_mtrr_with_hole(&var_state
, 0, 0);
1152 num_reg
= var_state
.reg
;
1153 /* Clear out the extra MTRR's */
1154 while (var_state
.reg
< num_var_ranges
) {
1155 save_var_mtrr(var_state
.reg
, 0, 0, 0);
1162 struct mtrr_cleanup_result
{
1163 unsigned long gran_sizek
;
1164 unsigned long chunk_sizek
;
1165 unsigned long lose_cover_sizek
;
1166 unsigned int num_reg
;
1171 * gran_size: 1M, 2M, ..., 2G
1172 * chunk size: gran_size, ..., 2G
1173 * so we need (1+12)*6
1175 #define NUM_RESULT 78
1176 #define PSHIFT (PAGE_SHIFT - 10)
1178 static struct mtrr_cleanup_result __initdata result
[NUM_RESULT
];
1179 static struct res_range __initdata range_new
[RANGE_NUM
];
1180 static unsigned long __initdata min_loss_pfn
[RANGE_NUM
];
1182 static int __init
mtrr_cleanup(unsigned address_bits
)
1184 unsigned long extra_remove_base
, extra_remove_size
;
1185 unsigned long i
, base
, size
, def
, dummy
;
1187 int nr_range
, nr_range_new
;
1188 u64 chunk_size
, gran_size
;
1189 unsigned long range_sums
, range_sums_new
;
1193 /* extra one for all 0 */
1194 int num
[MTRR_NUM_TYPES
+ 1];
1196 if (!is_cpu(INTEL
) || enable_mtrr_cleanup
< 1)
1198 rdmsr(MTRRdefType_MSR
, def
, dummy
);
1200 if (def
!= MTRR_TYPE_UNCACHABLE
)
1203 /* get it and store it aside */
1204 memset(range_state
, 0, sizeof(range_state
));
1205 for (i
= 0; i
< num_var_ranges
; i
++) {
1206 mtrr_if
->get(i
, &base
, &size
, &type
);
1207 range_state
[i
].base_pfn
= base
;
1208 range_state
[i
].size_pfn
= size
;
1209 range_state
[i
].type
= type
;
1212 /* check entries number */
1213 memset(num
, 0, sizeof(num
));
1214 for (i
= 0; i
< num_var_ranges
; i
++) {
1215 type
= range_state
[i
].type
;
1216 size
= range_state
[i
].size_pfn
;
1217 if (type
>= MTRR_NUM_TYPES
)
1220 type
= MTRR_NUM_TYPES
;
1224 /* check if we got UC entries */
1225 if (!num
[MTRR_TYPE_UNCACHABLE
])
1228 /* check if we only had WB and UC */
1229 if (num
[MTRR_TYPE_WRBACK
] + num
[MTRR_TYPE_UNCACHABLE
] !=
1230 num_var_ranges
- num
[MTRR_NUM_TYPES
])
1233 memset(range
, 0, sizeof(range
));
1234 extra_remove_size
= 0;
1236 extra_remove_base
= 1 << (32 - PAGE_SHIFT
);
1238 (mtrr_tom2
>> PAGE_SHIFT
) - extra_remove_base
;
1240 nr_range
= x86_get_mtrr_mem_range(range
, 0, extra_remove_base
,
1242 range_sums
= sum_ranges(range
, nr_range
);
1243 printk(KERN_INFO
"total RAM coverred: %ldM\n",
1244 range_sums
>> (20 - PAGE_SHIFT
));
1246 if (mtrr_chunk_size
&& mtrr_gran_size
) {
1250 /* convert ranges to var ranges state */
1251 num_reg
= x86_setup_var_mtrrs(range
, nr_range
, mtrr_chunk_size
,
1254 /* we got new setting in range_state, check it */
1255 memset(range_new
, 0, sizeof(range_new
));
1256 nr_range_new
= x86_get_mtrr_mem_range(range_new
, 0,
1259 range_sums_new
= sum_ranges(range_new
, nr_range_new
);
1262 result
[i
].chunk_sizek
= mtrr_chunk_size
>> 10;
1263 result
[i
].gran_sizek
= mtrr_gran_size
>> 10;
1264 result
[i
].num_reg
= num_reg
;
1265 if (range_sums
< range_sums_new
) {
1266 result
[i
].lose_cover_sizek
=
1267 (range_sums_new
- range_sums
) << PSHIFT
;
1270 result
[i
].lose_cover_sizek
=
1271 (range_sums
- range_sums_new
) << PSHIFT
;
1273 printk(KERN_INFO
"%sgran_size: %ldM \tchunk_size: %ldM \t",
1274 result
[i
].bad
?"*BAD*":" ", result
[i
].gran_sizek
>> 10,
1275 result
[i
].chunk_sizek
>> 10);
1276 printk(KERN_CONT
"num_reg: %d \tlose cover RAM: %s%ldM \n",
1277 result
[i
].num_reg
, result
[i
].bad
?"-":"",
1278 result
[i
].lose_cover_sizek
>> 10);
1279 if (!result
[i
].bad
) {
1280 set_var_mtrr_all(address_bits
);
1283 printk(KERN_INFO
"invalid mtrr_gran_size or mtrr_chunk_size, "
1284 "will find optimal one\n");
1286 memset(result
, 0, sizeof(result
[0]));
1290 memset(min_loss_pfn
, 0xff, sizeof(min_loss_pfn
));
1291 memset(result
, 0, sizeof(result
));
1292 for (gran_size
= (1ULL<<20); gran_size
< (1ULL<<32); gran_size
<<= 1) {
1293 for (chunk_size
= gran_size
; chunk_size
< (1ULL<<32);
1299 "\ngran_size: %lldM chunk_size_size: %lldM\n",
1300 gran_size
>> 20, chunk_size
>> 20);
1301 if (i
>= NUM_RESULT
)
1304 /* convert ranges to var ranges state */
1305 num_reg
= x86_setup_var_mtrrs(range
, nr_range
,
1306 chunk_size
, gran_size
);
1308 /* we got new setting in range_state, check it */
1309 memset(range_new
, 0, sizeof(range_new
));
1310 nr_range_new
= x86_get_mtrr_mem_range(range_new
, 0,
1311 extra_remove_base
, extra_remove_size
);
1312 range_sums_new
= sum_ranges(range_new
, nr_range_new
);
1314 result
[i
].chunk_sizek
= chunk_size
>> 10;
1315 result
[i
].gran_sizek
= gran_size
>> 10;
1316 result
[i
].num_reg
= num_reg
;
1317 if (range_sums
< range_sums_new
) {
1318 result
[i
].lose_cover_sizek
=
1319 (range_sums_new
- range_sums
) << PSHIFT
;
1322 result
[i
].lose_cover_sizek
=
1323 (range_sums
- range_sums_new
) << PSHIFT
;
1325 /* double check it */
1326 if (!result
[i
].bad
&& !result
[i
].lose_cover_sizek
) {
1327 if (nr_range_new
!= nr_range
||
1328 memcmp(range
, range_new
, sizeof(range
)))
1332 if (!result
[i
].bad
&& (range_sums
- range_sums_new
<
1333 min_loss_pfn
[num_reg
])) {
1334 min_loss_pfn
[num_reg
] =
1335 range_sums
- range_sums_new
;
1342 for (i
= 0; i
< NUM_RESULT
; i
++) {
1343 printk(KERN_INFO
"%sgran_size: %ldM \tchunk_size: %ldM \t",
1344 result
[i
].bad
?"*BAD* ":" ", result
[i
].gran_sizek
>> 10,
1345 result
[i
].chunk_sizek
>> 10);
1346 printk(KERN_CONT
"num_reg: %d \tlose RAM: %s%ldM\n",
1347 result
[i
].num_reg
, result
[i
].bad
?"-":"",
1348 result
[i
].lose_cover_sizek
>> 10);
1351 /* try to find the optimal index */
1352 if (nr_mtrr_spare_reg
>= num_var_ranges
)
1353 nr_mtrr_spare_reg
= num_var_ranges
- 1;
1355 for (i
= num_var_ranges
- nr_mtrr_spare_reg
; i
> 0; i
--) {
1356 if (!min_loss_pfn
[i
])
1361 if (num_reg_good
!= -1) {
1362 for (i
= 0; i
< NUM_RESULT
; i
++) {
1363 if (!result
[i
].bad
&&
1364 result
[i
].num_reg
== num_reg_good
&&
1365 !result
[i
].lose_cover_sizek
) {
1372 if (index_good
!= -1) {
1373 printk(KERN_INFO
"Found optimal setting for mtrr clean up\n");
1375 printk(KERN_INFO
"gran_size: %ldM \tchunk_size: %ldM \t",
1376 result
[i
].gran_sizek
>> 10,
1377 result
[i
].chunk_sizek
>> 10);
1378 printk(KERN_CONT
"num_reg: %d \tlose RAM: %ldM\n",
1380 result
[i
].lose_cover_sizek
>> 10);
1381 /* convert ranges to var ranges state */
1382 chunk_size
= result
[i
].chunk_sizek
;
1384 gran_size
= result
[i
].gran_sizek
;
1387 x86_setup_var_mtrrs(range
, nr_range
, chunk_size
, gran_size
);
1389 set_var_mtrr_all(address_bits
);
1393 printk(KERN_INFO
"mtrr_cleanup: can not find optimal value\n");
1394 printk(KERN_INFO
"please specify mtrr_gran_size/mtrr_chunk_size\n");
1399 static int __init
mtrr_cleanup(unsigned address_bits
)
1405 static int __initdata changed_by_mtrr_cleanup
;
1407 static int disable_mtrr_trim
;
1409 static int __init
disable_mtrr_trim_setup(char *str
)
1411 disable_mtrr_trim
= 1;
1414 early_param("disable_mtrr_trim", disable_mtrr_trim_setup
);
1417 * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
1418 * for memory >4GB. Check for that here.
1419 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
1420 * apply to are wrong, but so far we don't know of any such case in the wild.
1422 #define Tom2Enabled (1U << 21)
1423 #define Tom2ForceMemTypeWB (1U << 22)
1425 int __init
amd_special_default_mtrr(void)
1429 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
)
1431 if (boot_cpu_data
.x86
< 0xf || boot_cpu_data
.x86
> 0x11)
1433 /* In case some hypervisor doesn't pass SYSCFG through */
1434 if (rdmsr_safe(MSR_K8_SYSCFG
, &l
, &h
) < 0)
1437 * Memory between 4GB and top of mem is forced WB by this magic bit.
1438 * Reserved before K8RevF, but should be zero there.
1440 if ((l
& (Tom2Enabled
| Tom2ForceMemTypeWB
)) ==
1441 (Tom2Enabled
| Tom2ForceMemTypeWB
))
1446 static u64 __init
real_trim_memory(unsigned long start_pfn
,
1447 unsigned long limit_pfn
)
1449 u64 trim_start
, trim_size
;
1450 trim_start
= start_pfn
;
1451 trim_start
<<= PAGE_SHIFT
;
1452 trim_size
= limit_pfn
;
1453 trim_size
<<= PAGE_SHIFT
;
1454 trim_size
-= trim_start
;
1456 return e820_update_range(trim_start
, trim_size
, E820_RAM
,
1460 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
1461 * @end_pfn: ending page frame number
1463 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
1464 * memory configurations. This routine checks that the highest MTRR matches
1465 * the end of memory, to make sure the MTRRs having a write back type cover
1466 * all of the memory the kernel is intending to use. If not, it'll trim any
1467 * memory off the end by adjusting end_pfn, removing it from the kernel's
1468 * allocation pools, warning the user with an obnoxious message.
1470 int __init
mtrr_trim_uncached_memory(unsigned long end_pfn
)
1472 unsigned long i
, base
, size
, highest_pfn
= 0, def
, dummy
;
1475 u64 total_trim_size
;
1477 /* extra one for all 0 */
1478 int num
[MTRR_NUM_TYPES
+ 1];
1480 * Make sure we only trim uncachable memory on machines that
1481 * support the Intel MTRR architecture:
1483 if (!is_cpu(INTEL
) || disable_mtrr_trim
)
1485 rdmsr(MTRRdefType_MSR
, def
, dummy
);
1487 if (def
!= MTRR_TYPE_UNCACHABLE
)
1490 /* get it and store it aside */
1491 memset(range_state
, 0, sizeof(range_state
));
1492 for (i
= 0; i
< num_var_ranges
; i
++) {
1493 mtrr_if
->get(i
, &base
, &size
, &type
);
1494 range_state
[i
].base_pfn
= base
;
1495 range_state
[i
].size_pfn
= size
;
1496 range_state
[i
].type
= type
;
1499 /* Find highest cached pfn */
1500 for (i
= 0; i
< num_var_ranges
; i
++) {
1501 type
= range_state
[i
].type
;
1502 if (type
!= MTRR_TYPE_WRBACK
)
1504 base
= range_state
[i
].base_pfn
;
1505 size
= range_state
[i
].size_pfn
;
1506 if (highest_pfn
< base
+ size
)
1507 highest_pfn
= base
+ size
;
1510 /* kvm/qemu doesn't have mtrr set right, don't trim them all */
1512 WARN(!kvm_para_available(), KERN_WARNING
1513 "WARNING: strange, CPU MTRRs all blank?\n");
1517 /* check entries number */
1518 memset(num
, 0, sizeof(num
));
1519 for (i
= 0; i
< num_var_ranges
; i
++) {
1520 type
= range_state
[i
].type
;
1521 if (type
>= MTRR_NUM_TYPES
)
1523 size
= range_state
[i
].size_pfn
;
1525 type
= MTRR_NUM_TYPES
;
1529 /* no entry for WB? */
1530 if (!num
[MTRR_TYPE_WRBACK
])
1533 /* check if we only had WB and UC */
1534 if (num
[MTRR_TYPE_WRBACK
] + num
[MTRR_TYPE_UNCACHABLE
] !=
1535 num_var_ranges
- num
[MTRR_NUM_TYPES
])
1538 memset(range
, 0, sizeof(range
));
1541 range
[nr_range
].start
= (1ULL<<(32 - PAGE_SHIFT
));
1542 range
[nr_range
].end
= (mtrr_tom2
>> PAGE_SHIFT
) - 1;
1543 if (highest_pfn
< range
[nr_range
].end
+ 1)
1544 highest_pfn
= range
[nr_range
].end
+ 1;
1547 nr_range
= x86_get_mtrr_mem_range(range
, nr_range
, 0, 0);
1549 total_trim_size
= 0;
1550 /* check the head */
1552 total_trim_size
+= real_trim_memory(0, range
[0].start
);
1553 /* check the holes */
1554 for (i
= 0; i
< nr_range
- 1; i
++) {
1555 if (range
[i
].end
+ 1 < range
[i
+1].start
)
1556 total_trim_size
+= real_trim_memory(range
[i
].end
+ 1,
1561 if (range
[i
].end
+ 1 < end_pfn
)
1562 total_trim_size
+= real_trim_memory(range
[i
].end
+ 1,
1565 if (total_trim_size
) {
1566 printk(KERN_WARNING
"WARNING: BIOS bug: CPU MTRRs don't cover"
1567 " all of memory, losing %lluMB of RAM.\n",
1568 total_trim_size
>> 20);
1570 if (!changed_by_mtrr_cleanup
)
1573 printk(KERN_INFO
"update e820 for mtrr\n");
1583 * mtrr_bp_init - initialize mtrrs on the boot CPU
1585 * This needs to be called early; before any of the other CPUs are
1586 * initialized (i.e. before smp_init()).
1589 void __init
mtrr_bp_init(void)
1597 mtrr_if
= &generic_mtrr_ops
;
1598 size_or_mask
= 0xff000000; /* 36 bits */
1599 size_and_mask
= 0x00f00000;
1602 /* This is an AMD specific MSR, but we assume(hope?) that
1603 Intel will implement it to when they extend the address
1605 if (cpuid_eax(0x80000000) >= 0x80000008) {
1606 phys_addr
= cpuid_eax(0x80000008) & 0xff;
1607 /* CPUID workaround for Intel 0F33/0F34 CPU */
1608 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
1609 boot_cpu_data
.x86
== 0xF &&
1610 boot_cpu_data
.x86_model
== 0x3 &&
1611 (boot_cpu_data
.x86_mask
== 0x3 ||
1612 boot_cpu_data
.x86_mask
== 0x4))
1615 size_or_mask
= ~((1ULL << (phys_addr
- PAGE_SHIFT
)) - 1);
1616 size_and_mask
= ~size_or_mask
& 0xfffff00000ULL
;
1617 } else if (boot_cpu_data
.x86_vendor
== X86_VENDOR_CENTAUR
&&
1618 boot_cpu_data
.x86
== 6) {
1619 /* VIA C* family have Intel style MTRRs, but
1620 don't support PAE */
1621 size_or_mask
= 0xfff00000; /* 32 bits */
1626 switch (boot_cpu_data
.x86_vendor
) {
1627 case X86_VENDOR_AMD
:
1628 if (cpu_has_k6_mtrr
) {
1629 /* Pre-Athlon (K6) AMD CPU MTRRs */
1630 mtrr_if
= mtrr_ops
[X86_VENDOR_AMD
];
1631 size_or_mask
= 0xfff00000; /* 32 bits */
1635 case X86_VENDOR_CENTAUR
:
1636 if (cpu_has_centaur_mcr
) {
1637 mtrr_if
= mtrr_ops
[X86_VENDOR_CENTAUR
];
1638 size_or_mask
= 0xfff00000; /* 32 bits */
1642 case X86_VENDOR_CYRIX
:
1643 if (cpu_has_cyrix_arr
) {
1644 mtrr_if
= mtrr_ops
[X86_VENDOR_CYRIX
];
1645 size_or_mask
= 0xfff00000; /* 32 bits */
1655 set_num_var_ranges();
1660 if (mtrr_cleanup(phys_addr
)) {
1661 changed_by_mtrr_cleanup
= 1;
1669 void mtrr_ap_init(void)
1671 unsigned long flags
;
1673 if (!mtrr_if
|| !use_intel())
1676 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
1677 * but this routine will be called in cpu boot time, holding the lock
1678 * breaks it. This routine is called in two cases: 1.very earily time
1679 * of software resume, when there absolutely isn't mtrr entry changes;
1680 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
1681 * prevent mtrr entry changes
1683 local_irq_save(flags
);
1687 local_irq_restore(flags
);
1691 * Save current fixed-range MTRR state of the BSP
1693 void mtrr_save_state(void)
1695 smp_call_function_single(0, mtrr_save_fixed_ranges
, NULL
, 1);
1698 static int __init
mtrr_init_finialize(void)
1703 if (!changed_by_mtrr_cleanup
)
1706 /* The CPUs haven't MTRR and seem to not support SMP. They have
1707 * specific drivers, we use a tricky method to support
1708 * suspend/resume for them.
1709 * TBD: is there any system with such CPU which supports
1710 * suspend/resume? if no, we should remove the code.
1712 sysdev_driver_register(&cpu_sysdev_class
,
1713 &mtrr_sysdev_driver
);
1717 subsys_initcall(mtrr_init_finialize
);