]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright 2004 James Cleverdon, IBM. | |
3 | * Subject to the GNU Public License, v.2 | |
4 | * | |
f8d31193 | 5 | * Flat APIC subarch code. |
1da177e4 LT |
6 | * |
7 | * Hacked for x86-64 by James Cleverdon from i386 architecture code by | |
8 | * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and | |
9 | * James Cleverdon. | |
10 | */ | |
11 | #include <linux/config.h> | |
12 | #include <linux/threads.h> | |
13 | #include <linux/cpumask.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/ctype.h> | |
17 | #include <linux/init.h> | |
18 | #include <asm/smp.h> | |
19 | #include <asm/ipi.h> | |
20 | ||
1da177e4 LT |
21 | static cpumask_t flat_target_cpus(void) |
22 | { | |
23 | return cpu_online_map; | |
24 | } | |
25 | ||
26 | /* | |
27 | * Set up the logical destination ID. | |
28 | * | |
29 | * Intel recommends to set DFR, LDR and TPR before enabling | |
30 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | |
31 | * document number 292116). So here it goes... | |
32 | */ | |
33 | static void flat_init_apic_ldr(void) | |
34 | { | |
35 | unsigned long val; | |
36 | unsigned long num, id; | |
37 | ||
38 | num = smp_processor_id(); | |
39 | id = 1UL << num; | |
40 | x86_cpu_to_log_apicid[num] = id; | |
41 | apic_write_around(APIC_DFR, APIC_DFR_FLAT); | |
42 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | |
43 | val |= SET_APIC_LOGICAL_ID(id); | |
44 | apic_write_around(APIC_LDR, val); | |
45 | } | |
46 | ||
1da177e4 LT |
47 | static void flat_send_IPI_mask(cpumask_t cpumask, int vector) |
48 | { | |
49 | unsigned long mask = cpus_addr(cpumask)[0]; | |
50 | unsigned long cfg; | |
51 | unsigned long flags; | |
52 | ||
53 | local_save_flags(flags); | |
54 | local_irq_disable(); | |
55 | ||
56 | /* | |
57 | * Wait for idle. | |
58 | */ | |
59 | apic_wait_icr_idle(); | |
60 | ||
61 | /* | |
62 | * prepare target chip field | |
63 | */ | |
64 | cfg = __prepare_ICR2(mask); | |
65 | apic_write_around(APIC_ICR2, cfg); | |
66 | ||
67 | /* | |
68 | * program the ICR | |
69 | */ | |
70 | cfg = __prepare_ICR(0, vector, APIC_DEST_LOGICAL); | |
71 | ||
72 | /* | |
73 | * Send the IPI. The write to APIC_ICR fires this off. | |
74 | */ | |
75 | apic_write_around(APIC_ICR, cfg); | |
76 | local_irq_restore(flags); | |
77 | } | |
78 | ||
a02c4cb6 AR |
79 | static void flat_send_IPI_allbutself(int vector) |
80 | { | |
81 | if (((num_online_cpus()) - 1) >= 1) | |
37a47e65 | 82 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); |
884d9e40 AR |
83 | } |
84 | ||
85 | static void flat_send_IPI_all(int vector) | |
86 | { | |
37a47e65 | 87 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); |
884d9e40 AR |
88 | } |
89 | ||
1da177e4 LT |
90 | static int flat_apic_id_registered(void) |
91 | { | |
92 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | |
93 | } | |
94 | ||
95 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | |
96 | { | |
97 | return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; | |
98 | } | |
99 | ||
100 | static unsigned int phys_pkg_id(int index_msb) | |
101 | { | |
102 | u32 ebx; | |
103 | ||
104 | ebx = cpuid_ebx(1); | |
105 | return ((ebx >> 24) & 0xFF) >> index_msb; | |
106 | } | |
107 | ||
108 | struct genapic apic_flat = { | |
109 | .name = "flat", | |
110 | .int_delivery_mode = dest_LowestPrio, | |
111 | .int_dest_mode = (APIC_DEST_LOGICAL != 0), | |
112 | .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST, | |
113 | .target_cpus = flat_target_cpus, | |
114 | .apic_id_registered = flat_apic_id_registered, | |
115 | .init_apic_ldr = flat_init_apic_ldr, | |
116 | .send_IPI_all = flat_send_IPI_all, | |
117 | .send_IPI_allbutself = flat_send_IPI_allbutself, | |
118 | .send_IPI_mask = flat_send_IPI_mask, | |
119 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | |
120 | .phys_pkg_id = phys_pkg_id, | |
121 | }; | |
f8d31193 AK |
122 | |
123 | /* | |
124 | * Physflat mode is used when there are more than 8 CPUs on a AMD system. | |
125 | * We cannot use logical delivery in this case because the mask | |
126 | * overflows, so use physical mode. | |
127 | */ | |
128 | ||
129 | static cpumask_t physflat_target_cpus(void) | |
130 | { | |
131 | return cpumask_of_cpu(0); | |
132 | } | |
133 | ||
134 | static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) | |
135 | { | |
136 | send_IPI_mask_sequence(cpumask, vector); | |
137 | } | |
138 | ||
139 | static void physflat_send_IPI_allbutself(int vector) | |
140 | { | |
141 | cpumask_t allbutme = cpu_online_map; | |
142 | int me = get_cpu(); | |
143 | cpu_clear(me, allbutme); | |
144 | physflat_send_IPI_mask(allbutme, vector); | |
145 | put_cpu(); | |
146 | } | |
147 | ||
148 | static void physflat_send_IPI_all(int vector) | |
149 | { | |
150 | physflat_send_IPI_mask(cpu_online_map, vector); | |
151 | } | |
152 | ||
153 | static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | |
154 | { | |
155 | int cpu; | |
156 | ||
157 | /* | |
158 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | |
159 | * May as well be the first. | |
160 | */ | |
161 | cpu = first_cpu(cpumask); | |
162 | if ((unsigned)cpu < NR_CPUS) | |
163 | return x86_cpu_to_apicid[cpu]; | |
164 | else | |
165 | return BAD_APICID; | |
166 | } | |
167 | ||
168 | struct genapic apic_physflat = { | |
169 | .name = "physical flat", | |
170 | .int_delivery_mode = dest_LowestPrio, | |
171 | .int_dest_mode = (APIC_DEST_PHYSICAL != 0), | |
172 | .int_delivery_dest = APIC_DEST_PHYSICAL | APIC_DM_LOWEST, | |
173 | .target_cpus = physflat_target_cpus, | |
174 | .apic_id_registered = flat_apic_id_registered, | |
175 | .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/ | |
176 | .send_IPI_all = physflat_send_IPI_all, | |
177 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | |
178 | .send_IPI_mask = physflat_send_IPI_mask, | |
179 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | |
180 | .phys_pkg_id = phys_pkg_id, | |
181 | }; |