]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/cpu/mtrr/main.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / mtrr / main.c
1 /* Generic MTRR (Memory Type Range Register) driver.
2
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
15
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
23
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
26 section 11.11.7
27
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
29 on 6-7 March 2002.
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
32 */
33
34 #define DEBUG
35
36 #include <linux/types.h> /* FIXME: kvm_para.h needs this */
37
38 #include <linux/stop_machine.h>
39 #include <linux/kvm_para.h>
40 #include <linux/uaccess.h>
41 #include <linux/export.h>
42 #include <linux/mutex.h>
43 #include <linux/init.h>
44 #include <linux/sort.h>
45 #include <linux/cpu.h>
46 #include <linux/pci.h>
47 #include <linux/smp.h>
48 #include <linux/syscore_ops.h>
49
50 #include <asm/cpufeature.h>
51 #include <asm/e820/api.h>
52 #include <asm/mtrr.h>
53 #include <asm/msr.h>
54 #include <asm/pat.h>
55
56 #include "mtrr.h"
57
58 /* arch_phys_wc_add returns an MTRR register index plus this offset. */
59 #define MTRR_TO_PHYS_WC_OFFSET 1000
60
61 u32 num_var_ranges;
62 static bool __mtrr_enabled;
63
64 static bool mtrr_enabled(void)
65 {
66 return __mtrr_enabled;
67 }
68
69 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
70 static DEFINE_MUTEX(mtrr_mutex);
71
72 u64 size_or_mask, size_and_mask;
73 static bool mtrr_aps_delayed_init;
74
75 static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
76
77 const struct mtrr_ops *mtrr_if;
78
79 static void set_mtrr(unsigned int reg, unsigned long base,
80 unsigned long size, mtrr_type type);
81
82 void __init set_mtrr_ops(const struct mtrr_ops *ops)
83 {
84 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
85 mtrr_ops[ops->vendor] = ops;
86 }
87
88 /* Returns non-zero if we have the write-combining memory type */
89 static int have_wrcomb(void)
90 {
91 struct pci_dev *dev;
92
93 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
94 if (dev != NULL) {
95 /*
96 * ServerWorks LE chipsets < rev 6 have problems with
97 * write-combining. Don't allow it and leave room for other
98 * chipsets to be tagged
99 */
100 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
101 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE &&
102 dev->revision <= 5) {
103 pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
104 pci_dev_put(dev);
105 return 0;
106 }
107 /*
108 * Intel 450NX errata # 23. Non ascending cacheline evictions to
109 * write combining memory may resulting in data corruption
110 */
111 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
112 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
113 pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
114 pci_dev_put(dev);
115 return 0;
116 }
117 pci_dev_put(dev);
118 }
119 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
120 }
121
122 /* This function returns the number of variable MTRRs */
123 static void __init set_num_var_ranges(void)
124 {
125 unsigned long config = 0, dummy;
126
127 if (use_intel())
128 rdmsr(MSR_MTRRcap, config, dummy);
129 else if (is_cpu(AMD))
130 config = 2;
131 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
132 config = 8;
133
134 num_var_ranges = config & 0xff;
135 }
136
137 static void __init init_table(void)
138 {
139 int i, max;
140
141 max = num_var_ranges;
142 for (i = 0; i < max; i++)
143 mtrr_usage_table[i] = 1;
144 }
145
146 struct set_mtrr_data {
147 unsigned long smp_base;
148 unsigned long smp_size;
149 unsigned int smp_reg;
150 mtrr_type smp_type;
151 };
152
153 /**
154 * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed
155 * by all the CPUs.
156 * @info: pointer to mtrr configuration data
157 *
158 * Returns nothing.
159 */
160 static int mtrr_rendezvous_handler(void *info)
161 {
162 struct set_mtrr_data *data = info;
163
164 /*
165 * We use this same function to initialize the mtrrs during boot,
166 * resume, runtime cpu online and on an explicit request to set a
167 * specific MTRR.
168 *
169 * During boot or suspend, the state of the boot cpu's mtrrs has been
170 * saved, and we want to replicate that across all the cpus that come
171 * online (either at the end of boot or resume or during a runtime cpu
172 * online). If we're doing that, @reg is set to something special and on
173 * all the cpu's we do mtrr_if->set_all() (On the logical cpu that
174 * started the boot/resume sequence, this might be a duplicate
175 * set_all()).
176 */
177 if (data->smp_reg != ~0U) {
178 mtrr_if->set(data->smp_reg, data->smp_base,
179 data->smp_size, data->smp_type);
180 } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
181 mtrr_if->set_all();
182 }
183 return 0;
184 }
185
186 static inline int types_compatible(mtrr_type type1, mtrr_type type2)
187 {
188 return type1 == MTRR_TYPE_UNCACHABLE ||
189 type2 == MTRR_TYPE_UNCACHABLE ||
190 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
191 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
192 }
193
194 /**
195 * set_mtrr - update mtrrs on all processors
196 * @reg: mtrr in question
197 * @base: mtrr base
198 * @size: mtrr size
199 * @type: mtrr type
200 *
201 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
202 *
203 * 1. Queue work to do the following on all processors:
204 * 2. Disable Interrupts
205 * 3. Wait for all procs to do so
206 * 4. Enter no-fill cache mode
207 * 5. Flush caches
208 * 6. Clear PGE bit
209 * 7. Flush all TLBs
210 * 8. Disable all range registers
211 * 9. Update the MTRRs
212 * 10. Enable all range registers
213 * 11. Flush all TLBs and caches again
214 * 12. Enter normal cache mode and reenable caching
215 * 13. Set PGE
216 * 14. Wait for buddies to catch up
217 * 15. Enable interrupts.
218 *
219 * What does that mean for us? Well, stop_machine() will ensure that
220 * the rendezvous handler is started on each CPU. And in lockstep they
221 * do the state transition of disabling interrupts, updating MTRR's
222 * (the CPU vendors may each do it differently, so we call mtrr_if->set()
223 * callback and let them take care of it.) and enabling interrupts.
224 *
225 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
226 * becomes nops.
227 */
228 static void
229 set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
230 {
231 struct set_mtrr_data data = { .smp_reg = reg,
232 .smp_base = base,
233 .smp_size = size,
234 .smp_type = type
235 };
236
237 stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
238 }
239
240 static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
241 unsigned long size, mtrr_type type)
242 {
243 struct set_mtrr_data data = { .smp_reg = reg,
244 .smp_base = base,
245 .smp_size = size,
246 .smp_type = type
247 };
248
249 stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
250 }
251
252 static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
253 unsigned long size, mtrr_type type)
254 {
255 struct set_mtrr_data data = { .smp_reg = reg,
256 .smp_base = base,
257 .smp_size = size,
258 .smp_type = type
259 };
260
261 stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
262 cpu_callout_mask);
263 }
264
265 /**
266 * mtrr_add_page - Add a memory type region
267 * @base: Physical base address of region in pages (in units of 4 kB!)
268 * @size: Physical size of region in pages (4 kB)
269 * @type: Type of MTRR desired
270 * @increment: If this is true do usage counting on the region
271 *
272 * Memory type region registers control the caching on newer Intel and
273 * non Intel processors. This function allows drivers to request an
274 * MTRR is added. The details and hardware specifics of each processor's
275 * implementation are hidden from the caller, but nevertheless the
276 * caller should expect to need to provide a power of two size on an
277 * equivalent power of two boundary.
278 *
279 * If the region cannot be added either because all regions are in use
280 * or the CPU cannot support it a negative value is returned. On success
281 * the register number for this entry is returned, but should be treated
282 * as a cookie only.
283 *
284 * On a multiprocessor machine the changes are made to all processors.
285 * This is required on x86 by the Intel processors.
286 *
287 * The available types are
288 *
289 * %MTRR_TYPE_UNCACHABLE - No caching
290 *
291 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
292 *
293 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
294 *
295 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
296 *
297 * BUGS: Needs a quiet flag for the cases where drivers do not mind
298 * failures and do not wish system log messages to be sent.
299 */
300 int mtrr_add_page(unsigned long base, unsigned long size,
301 unsigned int type, bool increment)
302 {
303 unsigned long lbase, lsize;
304 int i, replace, error;
305 mtrr_type ltype;
306
307 if (!mtrr_enabled())
308 return -ENXIO;
309
310 error = mtrr_if->validate_add_page(base, size, type);
311 if (error)
312 return error;
313
314 if (type >= MTRR_NUM_TYPES) {
315 pr_warn("mtrr: type: %u invalid\n", type);
316 return -EINVAL;
317 }
318
319 /* If the type is WC, check that this processor supports it */
320 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
321 pr_warn("mtrr: your processor doesn't support write-combining\n");
322 return -ENOSYS;
323 }
324
325 if (!size) {
326 pr_warn("mtrr: zero sized request\n");
327 return -EINVAL;
328 }
329
330 if ((base | (base + size - 1)) >>
331 (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
332 pr_warn("mtrr: base or size exceeds the MTRR width\n");
333 return -EINVAL;
334 }
335
336 error = -EINVAL;
337 replace = -1;
338
339 /* No CPU hotplug when we change MTRR entries */
340 get_online_cpus();
341
342 /* Search for existing MTRR */
343 mutex_lock(&mtrr_mutex);
344 for (i = 0; i < num_var_ranges; ++i) {
345 mtrr_if->get(i, &lbase, &lsize, &ltype);
346 if (!lsize || base > lbase + lsize - 1 ||
347 base + size - 1 < lbase)
348 continue;
349 /*
350 * At this point we know there is some kind of
351 * overlap/enclosure
352 */
353 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
354 if (base <= lbase &&
355 base + size - 1 >= lbase + lsize - 1) {
356 /* New region encloses an existing region */
357 if (type == ltype) {
358 replace = replace == -1 ? i : -2;
359 continue;
360 } else if (types_compatible(type, ltype))
361 continue;
362 }
363 pr_warn("mtrr: 0x%lx000,0x%lx000 overlaps existing"
364 " 0x%lx000,0x%lx000\n", base, size, lbase,
365 lsize);
366 goto out;
367 }
368 /* New region is enclosed by an existing region */
369 if (ltype != type) {
370 if (types_compatible(type, ltype))
371 continue;
372 pr_warn("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
373 base, size, mtrr_attrib_to_str(ltype),
374 mtrr_attrib_to_str(type));
375 goto out;
376 }
377 if (increment)
378 ++mtrr_usage_table[i];
379 error = i;
380 goto out;
381 }
382 /* Search for an empty MTRR */
383 i = mtrr_if->get_free_region(base, size, replace);
384 if (i >= 0) {
385 set_mtrr_cpuslocked(i, base, size, type);
386 if (likely(replace < 0)) {
387 mtrr_usage_table[i] = 1;
388 } else {
389 mtrr_usage_table[i] = mtrr_usage_table[replace];
390 if (increment)
391 mtrr_usage_table[i]++;
392 if (unlikely(replace != i)) {
393 set_mtrr_cpuslocked(replace, 0, 0, 0);
394 mtrr_usage_table[replace] = 0;
395 }
396 }
397 } else {
398 pr_info("mtrr: no more MTRRs available\n");
399 }
400 error = i;
401 out:
402 mutex_unlock(&mtrr_mutex);
403 put_online_cpus();
404 return error;
405 }
406
407 static int mtrr_check(unsigned long base, unsigned long size)
408 {
409 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
410 pr_warn("mtrr: size and base must be multiples of 4 kiB\n");
411 pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
412 dump_stack();
413 return -1;
414 }
415 return 0;
416 }
417
418 /**
419 * mtrr_add - Add a memory type region
420 * @base: Physical base address of region
421 * @size: Physical size of region
422 * @type: Type of MTRR desired
423 * @increment: If this is true do usage counting on the region
424 *
425 * Memory type region registers control the caching on newer Intel and
426 * non Intel processors. This function allows drivers to request an
427 * MTRR is added. The details and hardware specifics of each processor's
428 * implementation are hidden from the caller, but nevertheless the
429 * caller should expect to need to provide a power of two size on an
430 * equivalent power of two boundary.
431 *
432 * If the region cannot be added either because all regions are in use
433 * or the CPU cannot support it a negative value is returned. On success
434 * the register number for this entry is returned, but should be treated
435 * as a cookie only.
436 *
437 * On a multiprocessor machine the changes are made to all processors.
438 * This is required on x86 by the Intel processors.
439 *
440 * The available types are
441 *
442 * %MTRR_TYPE_UNCACHABLE - No caching
443 *
444 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
445 *
446 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
447 *
448 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
449 *
450 * BUGS: Needs a quiet flag for the cases where drivers do not mind
451 * failures and do not wish system log messages to be sent.
452 */
453 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
454 bool increment)
455 {
456 if (!mtrr_enabled())
457 return -ENODEV;
458 if (mtrr_check(base, size))
459 return -EINVAL;
460 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
461 increment);
462 }
463
464 /**
465 * mtrr_del_page - delete a memory type region
466 * @reg: Register returned by mtrr_add
467 * @base: Physical base address
468 * @size: Size of region
469 *
470 * If register is supplied then base and size are ignored. This is
471 * how drivers should call it.
472 *
473 * Releases an MTRR region. If the usage count drops to zero the
474 * register is freed and the region returns to default state.
475 * On success the register is returned, on failure a negative error
476 * code.
477 */
478 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
479 {
480 int i, max;
481 mtrr_type ltype;
482 unsigned long lbase, lsize;
483 int error = -EINVAL;
484
485 if (!mtrr_enabled())
486 return -ENODEV;
487
488 max = num_var_ranges;
489 /* No CPU hotplug when we change MTRR entries */
490 get_online_cpus();
491 mutex_lock(&mtrr_mutex);
492 if (reg < 0) {
493 /* Search for existing MTRR */
494 for (i = 0; i < max; ++i) {
495 mtrr_if->get(i, &lbase, &lsize, &ltype);
496 if (lbase == base && lsize == size) {
497 reg = i;
498 break;
499 }
500 }
501 if (reg < 0) {
502 pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
503 base, size);
504 goto out;
505 }
506 }
507 if (reg >= max) {
508 pr_warn("mtrr: register: %d too big\n", reg);
509 goto out;
510 }
511 mtrr_if->get(reg, &lbase, &lsize, &ltype);
512 if (lsize < 1) {
513 pr_warn("mtrr: MTRR %d not used\n", reg);
514 goto out;
515 }
516 if (mtrr_usage_table[reg] < 1) {
517 pr_warn("mtrr: reg: %d has count=0\n", reg);
518 goto out;
519 }
520 if (--mtrr_usage_table[reg] < 1)
521 set_mtrr_cpuslocked(reg, 0, 0, 0);
522 error = reg;
523 out:
524 mutex_unlock(&mtrr_mutex);
525 put_online_cpus();
526 return error;
527 }
528
529 /**
530 * mtrr_del - delete a memory type region
531 * @reg: Register returned by mtrr_add
532 * @base: Physical base address
533 * @size: Size of region
534 *
535 * If register is supplied then base and size are ignored. This is
536 * how drivers should call it.
537 *
538 * Releases an MTRR region. If the usage count drops to zero the
539 * register is freed and the region returns to default state.
540 * On success the register is returned, on failure a negative error
541 * code.
542 */
543 int mtrr_del(int reg, unsigned long base, unsigned long size)
544 {
545 if (!mtrr_enabled())
546 return -ENODEV;
547 if (mtrr_check(base, size))
548 return -EINVAL;
549 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
550 }
551
552 /**
553 * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
554 * @base: Physical base address
555 * @size: Size of region
556 *
557 * If PAT is available, this does nothing. If PAT is unavailable, it
558 * attempts to add a WC MTRR covering size bytes starting at base and
559 * logs an error if this fails.
560 *
561 * The called should provide a power of two size on an equivalent
562 * power of two boundary.
563 *
564 * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
565 * but drivers should not try to interpret that return value.
566 */
567 int arch_phys_wc_add(unsigned long base, unsigned long size)
568 {
569 int ret;
570
571 if (pat_enabled() || !mtrr_enabled())
572 return 0; /* Success! (We don't need to do anything.) */
573
574 ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
575 if (ret < 0) {
576 pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
577 (void *)base, (void *)(base + size - 1));
578 return ret;
579 }
580 return ret + MTRR_TO_PHYS_WC_OFFSET;
581 }
582 EXPORT_SYMBOL(arch_phys_wc_add);
583
584 /*
585 * arch_phys_wc_del - undoes arch_phys_wc_add
586 * @handle: Return value from arch_phys_wc_add
587 *
588 * This cleans up after mtrr_add_wc_if_needed.
589 *
590 * The API guarantees that mtrr_del_wc_if_needed(error code) and
591 * mtrr_del_wc_if_needed(0) do nothing.
592 */
593 void arch_phys_wc_del(int handle)
594 {
595 if (handle >= 1) {
596 WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
597 mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
598 }
599 }
600 EXPORT_SYMBOL(arch_phys_wc_del);
601
602 /*
603 * arch_phys_wc_index - translates arch_phys_wc_add's return value
604 * @handle: Return value from arch_phys_wc_add
605 *
606 * This will turn the return value from arch_phys_wc_add into an mtrr
607 * index suitable for debugging.
608 *
609 * Note: There is no legitimate use for this function, except possibly
610 * in printk line. Alas there is an illegitimate use in some ancient
611 * drm ioctls.
612 */
613 int arch_phys_wc_index(int handle)
614 {
615 if (handle < MTRR_TO_PHYS_WC_OFFSET)
616 return -1;
617 else
618 return handle - MTRR_TO_PHYS_WC_OFFSET;
619 }
620 EXPORT_SYMBOL_GPL(arch_phys_wc_index);
621
622 /*
623 * HACK ALERT!
624 * These should be called implicitly, but we can't yet until all the initcall
625 * stuff is done...
626 */
627 static void __init init_ifs(void)
628 {
629 #ifndef CONFIG_X86_64
630 amd_init_mtrr();
631 cyrix_init_mtrr();
632 centaur_init_mtrr();
633 #endif
634 }
635
636 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
637 * MTRR driver doesn't require this
638 */
639 struct mtrr_value {
640 mtrr_type ltype;
641 unsigned long lbase;
642 unsigned long lsize;
643 };
644
645 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
646
647 static int mtrr_save(void)
648 {
649 int i;
650
651 for (i = 0; i < num_var_ranges; i++) {
652 mtrr_if->get(i, &mtrr_value[i].lbase,
653 &mtrr_value[i].lsize,
654 &mtrr_value[i].ltype);
655 }
656 return 0;
657 }
658
659 static void mtrr_restore(void)
660 {
661 int i;
662
663 for (i = 0; i < num_var_ranges; i++) {
664 if (mtrr_value[i].lsize) {
665 set_mtrr(i, mtrr_value[i].lbase,
666 mtrr_value[i].lsize,
667 mtrr_value[i].ltype);
668 }
669 }
670 }
671
672
673
674 static struct syscore_ops mtrr_syscore_ops = {
675 .suspend = mtrr_save,
676 .resume = mtrr_restore,
677 };
678
679 int __initdata changed_by_mtrr_cleanup;
680
681 #define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
682 /**
683 * mtrr_bp_init - initialize mtrrs on the boot CPU
684 *
685 * This needs to be called early; before any of the other CPUs are
686 * initialized (i.e. before smp_init()).
687 *
688 */
689 void __init mtrr_bp_init(void)
690 {
691 u32 phys_addr;
692
693 init_ifs();
694
695 phys_addr = 32;
696
697 if (boot_cpu_has(X86_FEATURE_MTRR)) {
698 mtrr_if = &generic_mtrr_ops;
699 size_or_mask = SIZE_OR_MASK_BITS(36);
700 size_and_mask = 0x00f00000;
701 phys_addr = 36;
702
703 /*
704 * This is an AMD specific MSR, but we assume(hope?) that
705 * Intel will implement it too when they extend the address
706 * bus of the Xeon.
707 */
708 if (cpuid_eax(0x80000000) >= 0x80000008) {
709 phys_addr = cpuid_eax(0x80000008) & 0xff;
710 /* CPUID workaround for Intel 0F33/0F34 CPU */
711 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
712 boot_cpu_data.x86 == 0xF &&
713 boot_cpu_data.x86_model == 0x3 &&
714 (boot_cpu_data.x86_mask == 0x3 ||
715 boot_cpu_data.x86_mask == 0x4))
716 phys_addr = 36;
717
718 size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
719 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
720 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
721 boot_cpu_data.x86 == 6) {
722 /*
723 * VIA C* family have Intel style MTRRs,
724 * but don't support PAE
725 */
726 size_or_mask = SIZE_OR_MASK_BITS(32);
727 size_and_mask = 0;
728 phys_addr = 32;
729 }
730 } else {
731 switch (boot_cpu_data.x86_vendor) {
732 case X86_VENDOR_AMD:
733 if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
734 /* Pre-Athlon (K6) AMD CPU MTRRs */
735 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
736 size_or_mask = SIZE_OR_MASK_BITS(32);
737 size_and_mask = 0;
738 }
739 break;
740 case X86_VENDOR_CENTAUR:
741 if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
742 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
743 size_or_mask = SIZE_OR_MASK_BITS(32);
744 size_and_mask = 0;
745 }
746 break;
747 case X86_VENDOR_CYRIX:
748 if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
749 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
750 size_or_mask = SIZE_OR_MASK_BITS(32);
751 size_and_mask = 0;
752 }
753 break;
754 default:
755 break;
756 }
757 }
758
759 if (mtrr_if) {
760 __mtrr_enabled = true;
761 set_num_var_ranges();
762 init_table();
763 if (use_intel()) {
764 /* BIOS may override */
765 __mtrr_enabled = get_mtrr_state();
766
767 if (mtrr_enabled())
768 mtrr_bp_pat_init();
769
770 if (mtrr_cleanup(phys_addr)) {
771 changed_by_mtrr_cleanup = 1;
772 mtrr_if->set_all();
773 }
774 }
775 }
776
777 if (!mtrr_enabled()) {
778 pr_info("MTRR: Disabled\n");
779
780 /*
781 * PAT initialization relies on MTRR's rendezvous handler.
782 * Skip PAT init until the handler can initialize both
783 * features independently.
784 */
785 pat_disable("MTRRs disabled, skipping PAT initialization too.");
786 }
787 }
788
789 void mtrr_ap_init(void)
790 {
791 if (!mtrr_enabled())
792 return;
793
794 if (!use_intel() || mtrr_aps_delayed_init)
795 return;
796 /*
797 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
798 * changed, but this routine will be called in cpu boot time,
799 * holding the lock breaks it.
800 *
801 * This routine is called in two cases:
802 *
803 * 1. very earily time of software resume, when there absolutely
804 * isn't mtrr entry changes;
805 *
806 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
807 * lock to prevent mtrr entry changes
808 */
809 set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
810 }
811
812 /**
813 * Save current fixed-range MTRR state of the first cpu in cpu_online_mask.
814 */
815 void mtrr_save_state(void)
816 {
817 int first_cpu;
818
819 if (!mtrr_enabled())
820 return;
821
822 first_cpu = cpumask_first(cpu_online_mask);
823 smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
824 }
825
826 void set_mtrr_aps_delayed_init(void)
827 {
828 if (!mtrr_enabled())
829 return;
830 if (!use_intel())
831 return;
832
833 mtrr_aps_delayed_init = true;
834 }
835
836 /*
837 * Delayed MTRR initialization for all AP's
838 */
839 void mtrr_aps_init(void)
840 {
841 if (!use_intel() || !mtrr_enabled())
842 return;
843
844 /*
845 * Check if someone has requested the delay of AP MTRR initialization,
846 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
847 * then we are done.
848 */
849 if (!mtrr_aps_delayed_init)
850 return;
851
852 set_mtrr(~0U, 0, 0, 0);
853 mtrr_aps_delayed_init = false;
854 }
855
856 void mtrr_bp_restore(void)
857 {
858 if (!use_intel() || !mtrr_enabled())
859 return;
860
861 mtrr_if->set_all();
862 }
863
864 static int __init mtrr_init_finialize(void)
865 {
866 if (!mtrr_enabled())
867 return 0;
868
869 if (use_intel()) {
870 if (!changed_by_mtrr_cleanup)
871 mtrr_state_warn();
872 return 0;
873 }
874
875 /*
876 * The CPU has no MTRR and seems to not support SMP. They have
877 * specific drivers, we use a tricky method to support
878 * suspend/resume for them.
879 *
880 * TBD: is there any system with such CPU which supports
881 * suspend/resume? If no, we should remove the code.
882 */
883 register_syscore_ops(&mtrr_syscore_ops);
884
885 return 0;
886 }
887 subsys_initcall(mtrr_init_finialize);