]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/amd_nb.c
x86/amd_nb: Make amd_northbridges internal to amd_nb.c
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / amd_nb.c
CommitLineData
a32073bf
AK
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
c767a54b
JP
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
a32073bf 8#include <linux/types.h>
5a0e3ad6 9#include <linux/slab.h>
a32073bf
AK
10#include <linux/init.h>
11#include <linux/errno.h>
186f4360 12#include <linux/export.h>
a32073bf 13#include <linux/spinlock.h>
23ac4ae8 14#include <asm/amd_nb.h>
a32073bf 15
a32073bf
AK
16static u32 *flush_words;
17
691269f0 18const struct pci_device_id amd_nb_misc_ids[] = {
cf169702
JR
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
cb293250 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
24214449 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
7d64ac64 23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
15895a72 24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
94c1acf2 25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
85a8885b 26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
a32073bf
AK
27 {}
28};
9653a5c7 29EXPORT_SYMBOL(amd_nb_misc_ids);
a32073bf 30
c391c788 31static const struct pci_device_id amd_nb_link_ids[] = {
cb6c8520 32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
7d64ac64 33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
15895a72 34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
94c1acf2 35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
85a8885b 36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
41b2610c
HR
37 {}
38};
39
24d9b70b
JB
40const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
41 { 0x00, 0x18, 0x20 },
42 { 0xff, 0x00, 0x20 },
43 { 0xfe, 0x00, 0x20 },
44 { }
45};
46
c7993890
YG
47static struct amd_northbridge_info amd_northbridges;
48
49u16 amd_nb_num(void)
50{
51 return amd_northbridges.num;
52}
53EXPORT_SYMBOL(amd_nb_num);
54
55bool amd_nb_has_feature(unsigned int feature)
56{
57 return ((amd_northbridges.flags & feature) == feature);
58}
59EXPORT_SYMBOL(amd_nb_has_feature);
60
61struct amd_northbridge *node_to_amd_nb(int node)
62{
63 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
64}
65EXPORT_SYMBOL(node_to_amd_nb);
a32073bf 66
9653a5c7 67static struct pci_dev *next_northbridge(struct pci_dev *dev,
691269f0 68 const struct pci_device_id *ids)
a32073bf
AK
69{
70 do {
71 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
72 if (!dev)
73 break;
9653a5c7 74 } while (!pci_match_id(ids, dev));
a32073bf
AK
75 return dev;
76}
77
9653a5c7 78int amd_cache_northbridges(void)
a32073bf 79{
84fd1d35 80 u16 i = 0;
9653a5c7 81 struct amd_northbridge *nb;
41b2610c 82 struct pci_dev *misc, *link;
3c6df2a9 83
c7993890 84 if (amd_northbridges.num)
a32073bf
AK
85 return 0;
86
9653a5c7
HR
87 misc = NULL;
88 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
89 i++;
900f9ac9 90
1ead852d
BP
91 if (!i)
92 return -ENODEV;
a32073bf 93
9653a5c7
HR
94 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
95 if (!nb)
a32073bf
AK
96 return -ENOMEM;
97
9653a5c7
HR
98 amd_northbridges.nb = nb;
99 amd_northbridges.num = i;
3c6df2a9 100
41b2610c 101 link = misc = NULL;
c7993890 102 for (i = 0; i != amd_northbridges.num; i++) {
9653a5c7
HR
103 node_to_amd_nb(i)->misc = misc =
104 next_northbridge(misc, amd_nb_misc_ids);
41b2610c
HR
105 node_to_amd_nb(i)->link = link =
106 next_northbridge(link, amd_nb_link_ids);
7d64ac64 107 }
9653a5c7 108
1b457429 109 if (amd_gart_present())
9653a5c7 110 amd_northbridges.flags |= AMD_NB_GART;
a32073bf 111
7d64ac64
AG
112 /*
113 * Check for L3 cache presence.
114 */
115 if (!cpuid_edx(0x80000006))
116 return 0;
117
f658bcfb
HR
118 /*
119 * Some CPU families support L3 Cache Index Disable. There are some
120 * limitations because of E382 and E388 on family 0x10.
121 */
122 if (boot_cpu_data.x86 == 0x10 &&
123 boot_cpu_data.x86_model >= 0x8 &&
124 (boot_cpu_data.x86_model > 0x9 ||
125 boot_cpu_data.x86_mask >= 0x1))
126 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
127
b453de02
HR
128 if (boot_cpu_data.x86 == 0x15)
129 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
130
cabb5bd7
HR
131 /* L3 cache partitioning is supported on family 0x15 */
132 if (boot_cpu_data.x86 == 0x15)
133 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
134
a32073bf
AK
135 return 0;
136}
9653a5c7 137EXPORT_SYMBOL_GPL(amd_cache_northbridges);
a32073bf 138
84fd1d35
BP
139/*
140 * Ignores subdevice/subvendor but as far as I can figure out
141 * they're useless anyways
142 */
143bool __init early_is_amd_nb(u32 device)
a32073bf 144{
691269f0 145 const struct pci_device_id *id;
a32073bf 146 u32 vendor = device & 0xffff;
691269f0 147
a32073bf 148 device >>= 16;
9653a5c7 149 for (id = amd_nb_misc_ids; id->vendor; id++)
a32073bf 150 if (vendor == id->vendor && device == id->device)
84fd1d35
BP
151 return true;
152 return false;
a32073bf
AK
153}
154
24d25dbf
BH
155struct resource *amd_get_mmconfig_range(struct resource *res)
156{
157 u32 address;
158 u64 base, msr;
159 unsigned segn_busn_bits;
160
161 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
162 return NULL;
163
164 /* assume all cpus from fam10h have mmconfig */
165 if (boot_cpu_data.x86 < 0x10)
166 return NULL;
167
168 address = MSR_FAM10H_MMIO_CONF_BASE;
169 rdmsrl(address, msr);
170
171 /* mmconfig is not enabled */
172 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
173 return NULL;
174
175 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
176
177 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
178 FAM10H_MMIO_CONF_BUSRANGE_MASK;
179
180 res->flags = IORESOURCE_MEM;
181 res->start = base;
182 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
183 return res;
184}
185
cabb5bd7
HR
186int amd_get_subcaches(int cpu)
187{
188 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
189 unsigned int mask;
cabb5bd7
HR
190
191 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
192 return 0;
193
194 pci_read_config_dword(link, 0x1d4, &mask);
195
8196dab4 196 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
cabb5bd7
HR
197}
198
2993ae33 199int amd_set_subcaches(int cpu, unsigned long mask)
cabb5bd7
HR
200{
201 static unsigned int reset, ban;
202 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
203 unsigned int reg;
141168c3 204 int cuid;
cabb5bd7
HR
205
206 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
207 return -EINVAL;
208
209 /* if necessary, collect reset state of L3 partitioning and BAN mode */
210 if (reset == 0) {
211 pci_read_config_dword(nb->link, 0x1d4, &reset);
212 pci_read_config_dword(nb->misc, 0x1b8, &ban);
213 ban &= 0x180000;
214 }
215
216 /* deactivate BAN mode if any subcaches are to be disabled */
217 if (mask != 0xf) {
218 pci_read_config_dword(nb->misc, 0x1b8, &reg);
219 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
220 }
221
8196dab4 222 cuid = cpu_data(cpu).cpu_core_id;
cabb5bd7
HR
223 mask <<= 4 * cuid;
224 mask |= (0xf ^ (1 << cuid)) << 26;
225
226 pci_write_config_dword(nb->link, 0x1d4, mask);
227
228 /* reset BAN mode if L3 partitioning returned to reset state */
229 pci_read_config_dword(nb->link, 0x1d4, &reg);
230 if (reg == reset) {
231 pci_read_config_dword(nb->misc, 0x1b8, &reg);
232 reg &= ~0x180000;
233 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
234 }
235
236 return 0;
237}
238
09c6c30e 239static void amd_cache_gart(void)
9653a5c7 240{
84fd1d35 241 u16 i;
9653a5c7 242
09c6c30e
BP
243 if (!amd_nb_has_feature(AMD_NB_GART))
244 return;
9653a5c7 245
c7993890 246 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
09c6c30e
BP
247 if (!flush_words) {
248 amd_northbridges.flags &= ~AMD_NB_GART;
249 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
250 return;
251 }
9653a5c7 252
c7993890 253 for (i = 0; i != amd_northbridges.num; i++)
09c6c30e 254 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
9653a5c7
HR
255}
256
eec1d4fa 257void amd_flush_garts(void)
a32073bf
AK
258{
259 int flushed, i;
260 unsigned long flags;
261 static DEFINE_SPINLOCK(gart_lock);
262
9653a5c7 263 if (!amd_nb_has_feature(AMD_NB_GART))
900f9ac9
AH
264 return;
265
a32073bf
AK
266 /* Avoid races between AGP and IOMMU. In theory it's not needed
267 but I'm not sure if the hardware won't lose flush requests
268 when another is pending. This whole thing is so expensive anyways
269 that it doesn't matter to serialize more. -AK */
270 spin_lock_irqsave(&gart_lock, flags);
271 flushed = 0;
c7993890 272 for (i = 0; i < amd_northbridges.num; i++) {
9653a5c7
HR
273 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
274 flush_words[i] | 1);
a32073bf
AK
275 flushed++;
276 }
c7993890 277 for (i = 0; i < amd_northbridges.num; i++) {
a32073bf
AK
278 u32 w;
279 /* Make sure the hardware actually executed the flush*/
280 for (;;) {
9653a5c7 281 pci_read_config_dword(node_to_amd_nb(i)->misc,
a32073bf
AK
282 0x9c, &w);
283 if (!(w & 1))
284 break;
285 cpu_relax();
286 }
287 }
288 spin_unlock_irqrestore(&gart_lock, flags);
289 if (!flushed)
c767a54b 290 pr_notice("nothing to flush?\n");
a32073bf 291}
eec1d4fa 292EXPORT_SYMBOL_GPL(amd_flush_garts);
a32073bf 293
eec1d4fa 294static __init int init_amd_nbs(void)
0e152cd7 295{
09c6c30e
BP
296 amd_cache_northbridges();
297 amd_cache_gart();
0e152cd7 298
09c6c30e 299 return 0;
0e152cd7
BP
300}
301
302/* This has to go after the PCI subsystem */
eec1d4fa 303fs_initcall(init_amd_nbs);