]>
Commit | Line | Data |
---|---|---|
1812924b CW |
1 | /* |
2 | * SGI UltraViolet TLB flush routines. | |
3 | * | |
a26fd719 | 4 | * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI. |
1812924b CW |
5 | * |
6 | * This code is released under the GNU General Public License version 2 or | |
7 | * later. | |
8 | */ | |
aef8f5b8 | 9 | #include <linux/seq_file.h> |
1812924b | 10 | #include <linux/proc_fs.h> |
e8e5e8a8 | 11 | #include <linux/debugfs.h> |
1812924b | 12 | #include <linux/kernel.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
ca444564 | 14 | #include <linux/delay.h> |
1812924b | 15 | |
1812924b | 16 | #include <asm/mmu_context.h> |
bdbcdd48 | 17 | #include <asm/uv/uv.h> |
1812924b | 18 | #include <asm/uv/uv_mmrs.h> |
b4c286e6 | 19 | #include <asm/uv/uv_hub.h> |
1812924b | 20 | #include <asm/uv/uv_bau.h> |
7b6aa335 | 21 | #include <asm/apic.h> |
b4c286e6 | 22 | #include <asm/idle.h> |
b194b120 | 23 | #include <asm/tsc.h> |
99dd8713 | 24 | #include <asm/irq_vectors.h> |
b8f7fb13 | 25 | #include <asm/timer.h> |
1812924b | 26 | |
12a6611f CW |
27 | /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ |
28 | static int timeout_base_ns[] = { | |
29 | 20, | |
30 | 160, | |
31 | 1280, | |
32 | 10240, | |
33 | 81920, | |
34 | 655360, | |
35 | 5242880, | |
36 | 167772160 | |
37 | }; | |
f073cc8f | 38 | |
12a6611f | 39 | static int timeout_us; |
1c532e00 | 40 | static bool nobau = true; |
26ef8577 | 41 | static int nobau_perm; |
50fb55ac | 42 | static cycles_t congested_cycles; |
12a6611f | 43 | |
e8e5e8a8 | 44 | /* tunables: */ |
f073cc8f CW |
45 | static int max_concurr = MAX_BAU_CONCURRENT; |
46 | static int max_concurr_const = MAX_BAU_CONCURRENT; | |
47 | static int plugged_delay = PLUGGED_DELAY; | |
48 | static int plugsb4reset = PLUGSB4RESET; | |
8b6e511e | 49 | static int giveup_limit = GIVEUP_LIMIT; |
f073cc8f CW |
50 | static int timeoutsb4reset = TIMEOUTSB4RESET; |
51 | static int ipi_reset_limit = IPI_RESET_LIMIT; | |
52 | static int complete_threshold = COMPLETE_THRESHOLD; | |
53 | static int congested_respns_us = CONGESTED_RESPONSE_US; | |
54 | static int congested_reps = CONGESTED_REPS; | |
8b6e511e | 55 | static int disabled_period = DISABLED_PERIOD; |
f073cc8f CW |
56 | |
57 | static struct tunables tunables[] = { | |
58 | {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */ | |
59 | {&plugged_delay, PLUGGED_DELAY}, | |
60 | {&plugsb4reset, PLUGSB4RESET}, | |
61 | {&timeoutsb4reset, TIMEOUTSB4RESET}, | |
62 | {&ipi_reset_limit, IPI_RESET_LIMIT}, | |
63 | {&complete_threshold, COMPLETE_THRESHOLD}, | |
64 | {&congested_respns_us, CONGESTED_RESPONSE_US}, | |
65 | {&congested_reps, CONGESTED_REPS}, | |
8b6e511e CW |
66 | {&disabled_period, DISABLED_PERIOD}, |
67 | {&giveup_limit, GIVEUP_LIMIT} | |
f073cc8f CW |
68 | }; |
69 | ||
e8e5e8a8 CW |
70 | static struct dentry *tunables_dir; |
71 | static struct dentry *tunables_file; | |
b4c286e6 | 72 | |
f073cc8f CW |
73 | /* these correspond to the statistics printed by ptc_seq_show() */ |
74 | static char *stat_description[] = { | |
75 | "sent: number of shootdown messages sent", | |
76 | "stime: time spent sending messages", | |
77 | "numuvhubs: number of hubs targeted with shootdown", | |
78 | "numuvhubs16: number times 16 or more hubs targeted", | |
79 | "numuvhubs8: number times 8 or more hubs targeted", | |
80 | "numuvhubs4: number times 4 or more hubs targeted", | |
81 | "numuvhubs2: number times 2 or more hubs targeted", | |
82 | "numuvhubs1: number times 1 hub targeted", | |
83 | "numcpus: number of cpus targeted with shootdown", | |
84 | "dto: number of destination timeouts", | |
85 | "retries: destination timeout retries sent", | |
86 | "rok: : destination timeouts successfully retried", | |
87 | "resetp: ipi-style resource resets for plugs", | |
88 | "resett: ipi-style resource resets for timeouts", | |
89 | "giveup: fall-backs to ipi-style shootdowns", | |
90 | "sto: number of source timeouts", | |
91 | "bz: number of stay-busy's", | |
92 | "throt: number times spun in throttle", | |
93 | "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE", | |
94 | "recv: shootdown messages received", | |
95 | "rtime: time spent processing messages", | |
96 | "all: shootdown all-tlb messages", | |
97 | "one: shootdown one-tlb messages", | |
98 | "mult: interrupts that found multiple messages", | |
99 | "none: interrupts that found no messages", | |
100 | "retry: number of retry messages processed", | |
101 | "canc: number messages canceled by retries", | |
102 | "nocan: number retries that found nothing to cancel", | |
103 | "reset: number of ipi-style reset requests processed", | |
104 | "rcan: number messages canceled by reset requests", | |
105 | "disable: number times use of the BAU was disabled", | |
106 | "enable: number times use of the BAU was re-enabled" | |
107 | }; | |
108 | ||
1c532e00 | 109 | static int __init setup_bau(char *arg) |
b8f7fb13 | 110 | { |
1c532e00 AT |
111 | int result; |
112 | ||
113 | if (!arg) | |
114 | return -EINVAL; | |
115 | ||
116 | result = strtobool(arg, &nobau); | |
117 | if (result) | |
118 | return result; | |
119 | ||
120 | /* we need to flip the logic here, so that bau=y sets nobau to false */ | |
121 | nobau = !nobau; | |
122 | ||
123 | if (!nobau) | |
124 | pr_info("UV BAU Enabled\n"); | |
125 | else | |
126 | pr_info("UV BAU Disabled\n"); | |
127 | ||
b8f7fb13 CW |
128 | return 0; |
129 | } | |
1c532e00 | 130 | early_param("bau", setup_bau); |
b4c286e6 | 131 | |
b8f7fb13 | 132 | /* base pnode in this partition */ |
f073cc8f | 133 | static int uv_base_pnode __read_mostly; |
1812924b | 134 | |
dc163a41 IM |
135 | static DEFINE_PER_CPU(struct ptc_stats, ptcstats); |
136 | static DEFINE_PER_CPU(struct bau_control, bau_control); | |
b8f7fb13 CW |
137 | static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); |
138 | ||
26ef8577 CW |
139 | static void |
140 | set_bau_on(void) | |
141 | { | |
142 | int cpu; | |
143 | struct bau_control *bcp; | |
144 | ||
145 | if (nobau_perm) { | |
146 | pr_info("BAU not initialized; cannot be turned on\n"); | |
147 | return; | |
148 | } | |
1c532e00 | 149 | nobau = false; |
26ef8577 CW |
150 | for_each_present_cpu(cpu) { |
151 | bcp = &per_cpu(bau_control, cpu); | |
1c532e00 | 152 | bcp->nobau = false; |
26ef8577 CW |
153 | } |
154 | pr_info("BAU turned on\n"); | |
155 | return; | |
156 | } | |
157 | ||
158 | static void | |
159 | set_bau_off(void) | |
160 | { | |
161 | int cpu; | |
162 | struct bau_control *bcp; | |
163 | ||
1c532e00 | 164 | nobau = true; |
26ef8577 CW |
165 | for_each_present_cpu(cpu) { |
166 | bcp = &per_cpu(bau_control, cpu); | |
1c532e00 | 167 | bcp->nobau = true; |
26ef8577 CW |
168 | } |
169 | pr_info("BAU turned off\n"); | |
170 | return; | |
171 | } | |
172 | ||
9674f35b | 173 | /* |
b8f7fb13 CW |
174 | * Determine the first node on a uvhub. 'Nodes' are used for kernel |
175 | * memory allocation. | |
9674f35b | 176 | */ |
b8f7fb13 | 177 | static int __init uvhub_to_first_node(int uvhub) |
9674f35b CW |
178 | { |
179 | int node, b; | |
180 | ||
181 | for_each_online_node(node) { | |
182 | b = uv_node_to_blade_id(node); | |
b8f7fb13 | 183 | if (uvhub == b) |
9674f35b CW |
184 | return node; |
185 | } | |
b8f7fb13 | 186 | return -1; |
9674f35b CW |
187 | } |
188 | ||
189 | /* | |
b8f7fb13 | 190 | * Determine the apicid of the first cpu on a uvhub. |
9674f35b | 191 | */ |
b8f7fb13 | 192 | static int __init uvhub_to_first_apicid(int uvhub) |
9674f35b CW |
193 | { |
194 | int cpu; | |
195 | ||
196 | for_each_present_cpu(cpu) | |
b8f7fb13 | 197 | if (uvhub == uv_cpu_to_blade_id(cpu)) |
9674f35b CW |
198 | return per_cpu(x86_cpu_to_apicid, cpu); |
199 | return -1; | |
200 | } | |
201 | ||
1812924b CW |
202 | /* |
203 | * Free a software acknowledge hardware resource by clearing its Pending | |
204 | * bit. This will return a reply to the sender. | |
205 | * If the message has timed out, a reply has already been sent by the | |
206 | * hardware but the resource has not been released. In that case our | |
207 | * clear of the Timeout bit (as well) will free the resource. No reply will | |
208 | * be sent (the hardware will only do one reply per message). | |
209 | */ | |
c5d35d39 CW |
210 | static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp, |
211 | int do_acknowledge) | |
1812924b | 212 | { |
b194b120 | 213 | unsigned long dw; |
f073cc8f | 214 | struct bau_pq_entry *msg; |
1812924b | 215 | |
b8f7fb13 | 216 | msg = mdp->msg; |
c5d35d39 | 217 | if (!msg->canceled && do_acknowledge) { |
f073cc8f CW |
218 | dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec; |
219 | write_mmr_sw_ack(dw); | |
b8f7fb13 | 220 | } |
1812924b | 221 | msg->replied_to = 1; |
f073cc8f | 222 | msg->swack_vec = 0; |
1812924b CW |
223 | } |
224 | ||
225 | /* | |
b8f7fb13 | 226 | * Process the receipt of a RETRY message |
1812924b | 227 | */ |
f073cc8f CW |
228 | static void bau_process_retry_msg(struct msg_desc *mdp, |
229 | struct bau_control *bcp) | |
1812924b | 230 | { |
b8f7fb13 CW |
231 | int i; |
232 | int cancel_count = 0; | |
b8f7fb13 CW |
233 | unsigned long msg_res; |
234 | unsigned long mmr = 0; | |
f073cc8f CW |
235 | struct bau_pq_entry *msg = mdp->msg; |
236 | struct bau_pq_entry *msg2; | |
237 | struct ptc_stats *stat = bcp->statp; | |
1812924b | 238 | |
b8f7fb13 CW |
239 | stat->d_retries++; |
240 | /* | |
241 | * cancel any message from msg+1 to the retry itself | |
242 | */ | |
243 | for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { | |
f073cc8f CW |
244 | if (msg2 > mdp->queue_last) |
245 | msg2 = mdp->queue_first; | |
b8f7fb13 CW |
246 | if (msg2 == msg) |
247 | break; | |
248 | ||
f073cc8f | 249 | /* same conditions for cancellation as do_reset */ |
b8f7fb13 | 250 | if ((msg2->replied_to == 0) && (msg2->canceled == 0) && |
f073cc8f CW |
251 | (msg2->swack_vec) && ((msg2->swack_vec & |
252 | msg->swack_vec) == 0) && | |
b8f7fb13 CW |
253 | (msg2->sending_cpu == msg->sending_cpu) && |
254 | (msg2->msg_type != MSG_NOOP)) { | |
f073cc8f CW |
255 | mmr = read_mmr_sw_ack(); |
256 | msg_res = msg2->swack_vec; | |
b8f7fb13 CW |
257 | /* |
258 | * This is a message retry; clear the resources held | |
259 | * by the previous message only if they timed out. | |
260 | * If it has not timed out we have an unexpected | |
261 | * situation to report. | |
262 | */ | |
39847e7f | 263 | if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { |
f073cc8f | 264 | unsigned long mr; |
b8f7fb13 | 265 | /* |
c5d35d39 CW |
266 | * Is the resource timed out? |
267 | * Make everyone ignore the cancelled message. | |
b8f7fb13 CW |
268 | */ |
269 | msg2->canceled = 1; | |
270 | stat->d_canceled++; | |
271 | cancel_count++; | |
f073cc8f CW |
272 | mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; |
273 | write_mmr_sw_ack(mr); | |
39847e7f | 274 | } |
b8f7fb13 CW |
275 | } |
276 | } | |
277 | if (!cancel_count) | |
278 | stat->d_nocanceled++; | |
279 | } | |
1812924b | 280 | |
b8f7fb13 CW |
281 | /* |
282 | * Do all the things a cpu should do for a TLB shootdown message. | |
283 | * Other cpu's may come here at the same time for this message. | |
284 | */ | |
c5d35d39 CW |
285 | static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, |
286 | int do_acknowledge) | |
b8f7fb13 | 287 | { |
b8f7fb13 | 288 | short socket_ack_count = 0; |
f073cc8f CW |
289 | short *sp; |
290 | struct atomic_short *asp; | |
291 | struct ptc_stats *stat = bcp->statp; | |
292 | struct bau_pq_entry *msg = mdp->msg; | |
b8f7fb13 | 293 | struct bau_control *smaster = bcp->socket_master; |
1812924b | 294 | |
b8f7fb13 CW |
295 | /* |
296 | * This must be a normal message, or retry of a normal message | |
297 | */ | |
1812924b CW |
298 | if (msg->address == TLB_FLUSH_ALL) { |
299 | local_flush_tlb(); | |
b8f7fb13 | 300 | stat->d_alltlb++; |
1812924b CW |
301 | } else { |
302 | __flush_tlb_one(msg->address); | |
b8f7fb13 | 303 | stat->d_onetlb++; |
1812924b | 304 | } |
b8f7fb13 CW |
305 | stat->d_requestee++; |
306 | ||
307 | /* | |
308 | * One cpu on each uvhub has the additional job on a RETRY | |
309 | * of releasing the resource held by the message that is | |
310 | * being retried. That message is identified by sending | |
311 | * cpu number. | |
312 | */ | |
313 | if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) | |
f073cc8f | 314 | bau_process_retry_msg(mdp, bcp); |
1812924b | 315 | |
b8f7fb13 | 316 | /* |
f073cc8f | 317 | * This is a swack message, so we have to reply to it. |
b8f7fb13 CW |
318 | * Count each responding cpu on the socket. This avoids |
319 | * pinging the count's cache line back and forth between | |
320 | * the sockets. | |
321 | */ | |
f073cc8f CW |
322 | sp = &smaster->socket_acknowledge_count[mdp->msg_slot]; |
323 | asp = (struct atomic_short *)sp; | |
324 | socket_ack_count = atom_asr(1, asp); | |
b8f7fb13 | 325 | if (socket_ack_count == bcp->cpus_in_socket) { |
f073cc8f | 326 | int msg_ack_count; |
b8f7fb13 CW |
327 | /* |
328 | * Both sockets dump their completed count total into | |
329 | * the message's count. | |
330 | */ | |
8b6e511e | 331 | *sp = 0; |
f073cc8f CW |
332 | asp = (struct atomic_short *)&msg->acknowledge_count; |
333 | msg_ack_count = atom_asr(socket_ack_count, asp); | |
b8f7fb13 CW |
334 | |
335 | if (msg_ack_count == bcp->cpus_in_uvhub) { | |
336 | /* | |
337 | * All cpus in uvhub saw it; reply | |
c5d35d39 | 338 | * (unless we are in the UV2 workaround) |
b8f7fb13 | 339 | */ |
c5d35d39 | 340 | reply_to_message(mdp, bcp, do_acknowledge); |
b8f7fb13 CW |
341 | } |
342 | } | |
1812924b | 343 | |
b8f7fb13 | 344 | return; |
1812924b CW |
345 | } |
346 | ||
347 | /* | |
485f07d3 | 348 | * Determine the first cpu on a pnode. |
b8f7fb13 | 349 | */ |
485f07d3 | 350 | static int pnode_to_first_cpu(int pnode, struct bau_control *smaster) |
b8f7fb13 CW |
351 | { |
352 | int cpu; | |
485f07d3 | 353 | struct hub_and_pnode *hpp; |
354 | ||
355 | for_each_present_cpu(cpu) { | |
356 | hpp = &smaster->thp[cpu]; | |
357 | if (pnode == hpp->pnode) | |
b8f7fb13 | 358 | return cpu; |
485f07d3 | 359 | } |
b8f7fb13 CW |
360 | return -1; |
361 | } | |
362 | ||
363 | /* | |
364 | * Last resort when we get a large number of destination timeouts is | |
365 | * to clear resources held by a given cpu. | |
366 | * Do this with IPI so that all messages in the BAU message queue | |
f073cc8f | 367 | * can be identified by their nonzero swack_vec field. |
1812924b | 368 | * |
b8f7fb13 CW |
369 | * This is entered for a single cpu on the uvhub. |
370 | * The sender want's this uvhub to free a specific message's | |
f073cc8f | 371 | * swack resources. |
1812924b | 372 | */ |
f073cc8f | 373 | static void do_reset(void *ptr) |
1812924b | 374 | { |
b4c286e6 | 375 | int i; |
f073cc8f CW |
376 | struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id()); |
377 | struct reset_args *rap = (struct reset_args *)ptr; | |
378 | struct bau_pq_entry *msg; | |
379 | struct ptc_stats *stat = bcp->statp; | |
1812924b | 380 | |
b8f7fb13 | 381 | stat->d_resets++; |
b8f7fb13 CW |
382 | /* |
383 | * We're looking for the given sender, and | |
f073cc8f | 384 | * will free its swack resource. |
b8f7fb13 CW |
385 | * If all cpu's finally responded after the timeout, its |
386 | * message 'replied_to' was set. | |
387 | */ | |
f073cc8f CW |
388 | for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { |
389 | unsigned long msg_res; | |
390 | /* do_reset: same conditions for cancellation as | |
391 | bau_process_retry_msg() */ | |
b8f7fb13 CW |
392 | if ((msg->replied_to == 0) && |
393 | (msg->canceled == 0) && | |
394 | (msg->sending_cpu == rap->sender) && | |
f073cc8f | 395 | (msg->swack_vec) && |
b8f7fb13 | 396 | (msg->msg_type != MSG_NOOP)) { |
f073cc8f CW |
397 | unsigned long mmr; |
398 | unsigned long mr; | |
b8f7fb13 CW |
399 | /* |
400 | * make everyone else ignore this message | |
401 | */ | |
402 | msg->canceled = 1; | |
b8f7fb13 CW |
403 | /* |
404 | * only reset the resource if it is still pending | |
405 | */ | |
f073cc8f CW |
406 | mmr = read_mmr_sw_ack(); |
407 | msg_res = msg->swack_vec; | |
408 | mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res; | |
b8f7fb13 CW |
409 | if (mmr & msg_res) { |
410 | stat->d_rcanceled++; | |
f073cc8f | 411 | write_mmr_sw_ack(mr); |
dc163a41 | 412 | } |
dc163a41 IM |
413 | } |
414 | } | |
b8f7fb13 | 415 | return; |
dc163a41 IM |
416 | } |
417 | ||
418 | /* | |
b8f7fb13 CW |
419 | * Use IPI to get all target uvhubs to release resources held by |
420 | * a given sending cpu number. | |
dc163a41 | 421 | */ |
a456eaab | 422 | static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) |
dc163a41 | 423 | { |
485f07d3 | 424 | int pnode; |
425 | int apnode; | |
f073cc8f | 426 | int maskbits; |
485f07d3 | 427 | int sender = bcp->cpu; |
442d3924 | 428 | cpumask_t *mask = bcp->uvhub_master->cpumask; |
485f07d3 | 429 | struct bau_control *smaster = bcp->socket_master; |
b8f7fb13 | 430 | struct reset_args reset_args; |
dc163a41 | 431 | |
b8f7fb13 | 432 | reset_args.sender = sender; |
020b37ac | 433 | cpumask_clear(mask); |
b8f7fb13 | 434 | /* find a single cpu for each uvhub in this distribution mask */ |
a456eaab | 435 | maskbits = sizeof(struct pnmask) * BITSPERBYTE; |
485f07d3 | 436 | /* each bit is a pnode relative to the partition base pnode */ |
437 | for (pnode = 0; pnode < maskbits; pnode++) { | |
f073cc8f | 438 | int cpu; |
485f07d3 | 439 | if (!bau_uvhub_isset(pnode, distribution)) |
b194b120 | 440 | continue; |
485f07d3 | 441 | apnode = pnode + bcp->partition_base_pnode; |
442 | cpu = pnode_to_first_cpu(apnode, smaster); | |
020b37ac | 443 | cpumask_set_cpu(cpu, mask); |
1812924b | 444 | } |
f073cc8f CW |
445 | |
446 | /* IPI all cpus; preemption is already disabled */ | |
442d3924 | 447 | smp_call_function_many(mask, do_reset, (void *)&reset_args, 1); |
b8f7fb13 CW |
448 | return; |
449 | } | |
450 | ||
20d1c86a PZ |
451 | /* |
452 | * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative | |
453 | * number, not an absolute. It converts a duration in cycles to a duration in | |
454 | * ns. | |
455 | */ | |
456 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | |
b8f7fb13 | 457 | { |
20d1c86a | 458 | struct cyc2ns_data *data = cyc2ns_read_begin(); |
b8f7fb13 | 459 | unsigned long long ns; |
f073cc8f | 460 | |
20d1c86a PZ |
461 | ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); |
462 | ||
463 | cyc2ns_read_end(data); | |
464 | return ns; | |
465 | } | |
466 | ||
467 | /* | |
468 | * The reverse of the above; converts a duration in ns to a duration in cycles. | |
a26fd719 | 469 | */ |
20d1c86a PZ |
470 | static inline unsigned long long ns_2_cycles(unsigned long long ns) |
471 | { | |
472 | struct cyc2ns_data *data = cyc2ns_read_begin(); | |
473 | unsigned long long cyc; | |
474 | ||
475 | cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul; | |
476 | ||
477 | cyc2ns_read_end(data); | |
478 | return cyc; | |
479 | } | |
480 | ||
481 | static inline unsigned long cycles_2_us(unsigned long long cyc) | |
482 | { | |
483 | return cycles_2_ns(cyc) / NSEC_PER_USEC; | |
484 | } | |
485 | ||
486 | static inline cycles_t sec_2_cycles(unsigned long sec) | |
487 | { | |
488 | return ns_2_cycles(sec * NSEC_PER_SEC); | |
489 | } | |
490 | ||
491 | static inline unsigned long long usec_2_cycles(unsigned long usec) | |
492 | { | |
493 | return ns_2_cycles(usec * NSEC_PER_USEC); | |
1812924b CW |
494 | } |
495 | ||
b194b120 | 496 | /* |
b8f7fb13 CW |
497 | * wait for all cpus on this hub to finish their sends and go quiet |
498 | * leaves uvhub_quiesce set so that no new broadcasts are started by | |
499 | * bau_flush_send_and_wait() | |
500 | */ | |
f073cc8f | 501 | static inline void quiesce_local_uvhub(struct bau_control *hmaster) |
b8f7fb13 | 502 | { |
f073cc8f | 503 | atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce); |
b8f7fb13 CW |
504 | } |
505 | ||
506 | /* | |
507 | * mark this quiet-requestor as done | |
508 | */ | |
f073cc8f | 509 | static inline void end_uvhub_quiesce(struct bau_control *hmaster) |
b8f7fb13 | 510 | { |
f073cc8f CW |
511 | atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce); |
512 | } | |
513 | ||
514 | static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift) | |
515 | { | |
516 | unsigned long descriptor_status; | |
517 | ||
518 | descriptor_status = uv_read_local_mmr(mmr_offset); | |
519 | descriptor_status >>= right_shift; | |
520 | descriptor_status &= UV_ACT_STATUS_MASK; | |
521 | return descriptor_status; | |
b8f7fb13 CW |
522 | } |
523 | ||
524 | /* | |
525 | * Wait for completion of a broadcast software ack message | |
526 | * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP | |
b194b120 | 527 | */ |
2a919596 | 528 | static int uv1_wait_completion(struct bau_desc *bau_desc, |
f073cc8f CW |
529 | unsigned long mmr_offset, int right_shift, |
530 | struct bau_control *bcp, long try) | |
b194b120 | 531 | { |
b194b120 | 532 | unsigned long descriptor_status; |
f073cc8f | 533 | cycles_t ttm; |
712157aa | 534 | struct ptc_stats *stat = bcp->statp; |
b194b120 | 535 | |
f073cc8f | 536 | descriptor_status = uv1_read_status(mmr_offset, right_shift); |
b8f7fb13 | 537 | /* spin on the status MMR, waiting for it to go idle */ |
f073cc8f | 538 | while ((descriptor_status != DS_IDLE)) { |
b194b120 | 539 | /* |
2a919596 JS |
540 | * Our software ack messages may be blocked because |
541 | * there are no swack resources available. As long | |
542 | * as none of them has timed out hardware will NACK | |
543 | * our message and its state will stay IDLE. | |
b194b120 | 544 | */ |
f073cc8f | 545 | if (descriptor_status == DS_SOURCE_TIMEOUT) { |
b8f7fb13 CW |
546 | stat->s_stimeout++; |
547 | return FLUSH_GIVEUP; | |
f073cc8f | 548 | } else if (descriptor_status == DS_DESTINATION_TIMEOUT) { |
2a919596 | 549 | stat->s_dtimeout++; |
f073cc8f | 550 | ttm = get_cycles(); |
2a919596 JS |
551 | |
552 | /* | |
553 | * Our retries may be blocked by all destination | |
554 | * swack resources being consumed, and a timeout | |
555 | * pending. In that case hardware returns the | |
556 | * ERROR that looks like a destination timeout. | |
557 | */ | |
f073cc8f | 558 | if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { |
2a919596 JS |
559 | bcp->conseccompletes = 0; |
560 | return FLUSH_RETRY_PLUGGED; | |
561 | } | |
562 | ||
563 | bcp->conseccompletes = 0; | |
564 | return FLUSH_RETRY_TIMEOUT; | |
565 | } else { | |
566 | /* | |
567 | * descriptor_status is still BUSY | |
568 | */ | |
569 | cpu_relax(); | |
570 | } | |
f073cc8f | 571 | descriptor_status = uv1_read_status(mmr_offset, right_shift); |
2a919596 JS |
572 | } |
573 | bcp->conseccompletes++; | |
574 | return FLUSH_COMPLETE; | |
575 | } | |
576 | ||
f073cc8f | 577 | /* |
8b6e511e CW |
578 | * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register. |
579 | * But not currently used. | |
f073cc8f | 580 | */ |
a26fd719 | 581 | static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc) |
2a919596 JS |
582 | { |
583 | unsigned long descriptor_status; | |
f073cc8f | 584 | |
8b6e511e CW |
585 | descriptor_status = |
586 | ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1; | |
f073cc8f CW |
587 | return descriptor_status; |
588 | } | |
589 | ||
c5d35d39 CW |
590 | /* |
591 | * Return whether the status of the descriptor that is normally used for this | |
592 | * cpu (the one indexed by its hub-relative cpu number) is busy. | |
593 | * The status of the original 32 descriptors is always reflected in the 64 | |
594 | * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0. | |
595 | * The bit provided by the activation_status_2 register is irrelevant to | |
596 | * the status if it is only being tested for busy or not busy. | |
597 | */ | |
598 | int normal_busy(struct bau_control *bcp) | |
599 | { | |
600 | int cpu = bcp->uvhub_cpu; | |
601 | int mmr_offset; | |
602 | int right_shift; | |
603 | ||
604 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; | |
605 | right_shift = cpu * UV_ACT_STATUS_SIZE; | |
606 | return (((((read_lmmr(mmr_offset) >> right_shift) & | |
607 | UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY); | |
608 | } | |
609 | ||
610 | /* | |
611 | * Entered when a bau descriptor has gone into a permanent busy wait because | |
612 | * of a hardware bug. | |
613 | * Workaround the bug. | |
614 | */ | |
615 | int handle_uv2_busy(struct bau_control *bcp) | |
616 | { | |
c5d35d39 | 617 | struct ptc_stats *stat = bcp->statp; |
c5d35d39 CW |
618 | |
619 | stat->s_uv2_wars++; | |
8b6e511e CW |
620 | bcp->busy = 1; |
621 | return FLUSH_GIVEUP; | |
c5d35d39 CW |
622 | } |
623 | ||
a26fd719 | 624 | static int uv2_3_wait_completion(struct bau_desc *bau_desc, |
f073cc8f CW |
625 | unsigned long mmr_offset, int right_shift, |
626 | struct bau_control *bcp, long try) | |
627 | { | |
628 | unsigned long descriptor_stat; | |
629 | cycles_t ttm; | |
8b6e511e | 630 | int desc = bcp->uvhub_cpu; |
c5d35d39 | 631 | long busy_reps = 0; |
2a919596 JS |
632 | struct ptc_stats *stat = bcp->statp; |
633 | ||
a26fd719 | 634 | descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc); |
f073cc8f | 635 | |
2a919596 | 636 | /* spin on the status MMR, waiting for it to go idle */ |
f073cc8f | 637 | while (descriptor_stat != UV2H_DESC_IDLE) { |
8b6e511e CW |
638 | if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) { |
639 | /* | |
640 | * A h/w bug on the destination side may | |
641 | * have prevented the message being marked | |
642 | * pending, thus it doesn't get replied to | |
643 | * and gets continually nacked until it times | |
644 | * out with a SOURCE_TIMEOUT. | |
645 | */ | |
2a919596 JS |
646 | stat->s_stimeout++; |
647 | return FLUSH_GIVEUP; | |
f073cc8f | 648 | } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) { |
8b6e511e CW |
649 | ttm = get_cycles(); |
650 | ||
651 | /* | |
652 | * Our retries may be blocked by all destination | |
653 | * swack resources being consumed, and a timeout | |
654 | * pending. In that case hardware returns the | |
655 | * ERROR that looks like a destination timeout. | |
656 | * Without using the extended status we have to | |
657 | * deduce from the short time that this was a | |
658 | * strong nack. | |
659 | */ | |
660 | if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { | |
661 | bcp->conseccompletes = 0; | |
662 | stat->s_plugged++; | |
663 | /* FLUSH_RETRY_PLUGGED causes hang on boot */ | |
664 | return FLUSH_GIVEUP; | |
665 | } | |
b8f7fb13 | 666 | stat->s_dtimeout++; |
b8f7fb13 | 667 | bcp->conseccompletes = 0; |
8b6e511e CW |
668 | /* FLUSH_RETRY_TIMEOUT causes hang on boot */ |
669 | return FLUSH_GIVEUP; | |
b8f7fb13 | 670 | } else { |
c5d35d39 CW |
671 | busy_reps++; |
672 | if (busy_reps > 1000000) { | |
673 | /* not to hammer on the clock */ | |
674 | busy_reps = 0; | |
675 | ttm = get_cycles(); | |
a26fd719 | 676 | if ((ttm - bcp->send_message) > bcp->timeout_interval) |
c5d35d39 | 677 | return handle_uv2_busy(bcp); |
c5d35d39 | 678 | } |
b8f7fb13 | 679 | /* |
f073cc8f | 680 | * descriptor_stat is still BUSY |
b8f7fb13 CW |
681 | */ |
682 | cpu_relax(); | |
b194b120 | 683 | } |
a26fd719 | 684 | descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc); |
b194b120 | 685 | } |
b8f7fb13 | 686 | bcp->conseccompletes++; |
b194b120 CW |
687 | return FLUSH_COMPLETE; |
688 | } | |
689 | ||
f073cc8f CW |
690 | /* |
691 | * There are 2 status registers; each and array[32] of 2 bits. Set up for | |
692 | * which register to read and position in that register based on cpu in | |
693 | * current hub. | |
694 | */ | |
a26fd719 | 695 | static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, long try) |
2a919596 | 696 | { |
f073cc8f CW |
697 | int right_shift; |
698 | unsigned long mmr_offset; | |
8b6e511e | 699 | int desc = bcp->uvhub_cpu; |
f073cc8f | 700 | |
c5d35d39 | 701 | if (desc < UV_CPUS_PER_AS) { |
f073cc8f | 702 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; |
c5d35d39 | 703 | right_shift = desc * UV_ACT_STATUS_SIZE; |
f073cc8f CW |
704 | } else { |
705 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; | |
c5d35d39 | 706 | right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE); |
f073cc8f CW |
707 | } |
708 | ||
da87c937 | 709 | if (bcp->uvhub_version == 1) |
a26fd719 | 710 | return uv1_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); |
2a919596 | 711 | else |
a26fd719 | 712 | return uv2_3_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); |
2a919596 JS |
713 | } |
714 | ||
b8f7fb13 | 715 | /* |
f073cc8f | 716 | * Our retries are blocked by all destination sw ack resources being |
f6d8a566 CW |
717 | * in use, and a timeout is pending. In that case hardware immediately |
718 | * returns the ERROR that looks like a destination timeout. | |
719 | */ | |
f073cc8f CW |
720 | static void destination_plugged(struct bau_desc *bau_desc, |
721 | struct bau_control *bcp, | |
f6d8a566 CW |
722 | struct bau_control *hmaster, struct ptc_stats *stat) |
723 | { | |
724 | udelay(bcp->plugged_delay); | |
725 | bcp->plugged_tries++; | |
f073cc8f | 726 | |
f6d8a566 CW |
727 | if (bcp->plugged_tries >= bcp->plugsb4reset) { |
728 | bcp->plugged_tries = 0; | |
f073cc8f | 729 | |
f6d8a566 | 730 | quiesce_local_uvhub(hmaster); |
f073cc8f | 731 | |
f6d8a566 | 732 | spin_lock(&hmaster->queue_lock); |
485f07d3 | 733 | reset_with_ipi(&bau_desc->distribution, bcp); |
f6d8a566 | 734 | spin_unlock(&hmaster->queue_lock); |
f073cc8f | 735 | |
f6d8a566 | 736 | end_uvhub_quiesce(hmaster); |
f073cc8f | 737 | |
f6d8a566 CW |
738 | bcp->ipi_attempts++; |
739 | stat->s_resets_plug++; | |
740 | } | |
741 | } | |
742 | ||
f073cc8f CW |
743 | static void destination_timeout(struct bau_desc *bau_desc, |
744 | struct bau_control *bcp, struct bau_control *hmaster, | |
745 | struct ptc_stats *stat) | |
f6d8a566 | 746 | { |
f073cc8f | 747 | hmaster->max_concurr = 1; |
f6d8a566 CW |
748 | bcp->timeout_tries++; |
749 | if (bcp->timeout_tries >= bcp->timeoutsb4reset) { | |
750 | bcp->timeout_tries = 0; | |
f073cc8f | 751 | |
f6d8a566 | 752 | quiesce_local_uvhub(hmaster); |
f073cc8f | 753 | |
f6d8a566 | 754 | spin_lock(&hmaster->queue_lock); |
485f07d3 | 755 | reset_with_ipi(&bau_desc->distribution, bcp); |
f6d8a566 | 756 | spin_unlock(&hmaster->queue_lock); |
f073cc8f | 757 | |
f6d8a566 | 758 | end_uvhub_quiesce(hmaster); |
f073cc8f | 759 | |
f6d8a566 CW |
760 | bcp->ipi_attempts++; |
761 | stat->s_resets_timeout++; | |
762 | } | |
763 | } | |
764 | ||
50fb55ac | 765 | /* |
8b6e511e CW |
766 | * Stop all cpus on a uvhub from using the BAU for a period of time. |
767 | * This is reversed by check_enable. | |
50fb55ac | 768 | */ |
8b6e511e | 769 | static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat) |
50fb55ac | 770 | { |
8b6e511e CW |
771 | int tcpu; |
772 | struct bau_control *tbcp; | |
773 | struct bau_control *hmaster; | |
774 | cycles_t tm1; | |
775 | ||
776 | hmaster = bcp->uvhub_master; | |
777 | spin_lock(&hmaster->disable_lock); | |
778 | if (!bcp->baudisabled) { | |
50fb55ac | 779 | stat->s_bau_disabled++; |
8b6e511e | 780 | tm1 = get_cycles(); |
50fb55ac CW |
781 | for_each_present_cpu(tcpu) { |
782 | tbcp = &per_cpu(bau_control, tcpu); | |
8b6e511e CW |
783 | if (tbcp->uvhub_master == hmaster) { |
784 | tbcp->baudisabled = 1; | |
785 | tbcp->set_bau_on_time = | |
786 | tm1 + bcp->disabled_period; | |
787 | } | |
50fb55ac CW |
788 | } |
789 | } | |
8b6e511e | 790 | spin_unlock(&hmaster->disable_lock); |
50fb55ac CW |
791 | } |
792 | ||
f073cc8f CW |
793 | static void count_max_concurr(int stat, struct bau_control *bcp, |
794 | struct bau_control *hmaster) | |
795 | { | |
796 | bcp->plugged_tries = 0; | |
797 | bcp->timeout_tries = 0; | |
798 | if (stat != FLUSH_COMPLETE) | |
799 | return; | |
800 | if (bcp->conseccompletes <= bcp->complete_threshold) | |
801 | return; | |
802 | if (hmaster->max_concurr >= hmaster->max_concurr_const) | |
803 | return; | |
804 | hmaster->max_concurr++; | |
805 | } | |
806 | ||
807 | static void record_send_stats(cycles_t time1, cycles_t time2, | |
808 | struct bau_control *bcp, struct ptc_stats *stat, | |
809 | int completion_status, int try) | |
810 | { | |
811 | cycles_t elapsed; | |
812 | ||
813 | if (time2 > time1) { | |
814 | elapsed = time2 - time1; | |
815 | stat->s_time += elapsed; | |
816 | ||
817 | if ((completion_status == FLUSH_COMPLETE) && (try == 1)) { | |
818 | bcp->period_requests++; | |
819 | bcp->period_time += elapsed; | |
820 | if ((elapsed > congested_cycles) && | |
8b6e511e CW |
821 | (bcp->period_requests > bcp->cong_reps) && |
822 | ((bcp->period_time / bcp->period_requests) > | |
823 | congested_cycles)) { | |
824 | stat->s_congested++; | |
825 | disable_for_period(bcp, stat); | |
826 | } | |
f073cc8f CW |
827 | } |
828 | } else | |
829 | stat->s_requestor--; | |
830 | ||
831 | if (completion_status == FLUSH_COMPLETE && try > 1) | |
832 | stat->s_retriesok++; | |
8b6e511e | 833 | else if (completion_status == FLUSH_GIVEUP) { |
f073cc8f | 834 | stat->s_giveup++; |
8b6e511e CW |
835 | if (get_cycles() > bcp->period_end) |
836 | bcp->period_giveups = 0; | |
837 | bcp->period_giveups++; | |
838 | if (bcp->period_giveups == 1) | |
839 | bcp->period_end = get_cycles() + bcp->disabled_period; | |
840 | if (bcp->period_giveups > bcp->giveup_limit) { | |
841 | disable_for_period(bcp, stat); | |
842 | stat->s_giveuplimit++; | |
843 | } | |
844 | } | |
f073cc8f CW |
845 | } |
846 | ||
847 | /* | |
848 | * Because of a uv1 hardware bug only a limited number of concurrent | |
849 | * requests can be made. | |
850 | */ | |
851 | static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) | |
852 | { | |
853 | spinlock_t *lock = &hmaster->uvhub_lock; | |
854 | atomic_t *v; | |
855 | ||
856 | v = &hmaster->active_descriptor_count; | |
857 | if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) { | |
858 | stat->s_throttles++; | |
859 | do { | |
860 | cpu_relax(); | |
861 | } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)); | |
862 | } | |
863 | } | |
864 | ||
865 | /* | |
866 | * Handle the completion status of a message send. | |
867 | */ | |
868 | static void handle_cmplt(int completion_status, struct bau_desc *bau_desc, | |
869 | struct bau_control *bcp, struct bau_control *hmaster, | |
870 | struct ptc_stats *stat) | |
871 | { | |
872 | if (completion_status == FLUSH_RETRY_PLUGGED) | |
873 | destination_plugged(bau_desc, bcp, hmaster, stat); | |
874 | else if (completion_status == FLUSH_RETRY_TIMEOUT) | |
875 | destination_timeout(bau_desc, bcp, hmaster, stat); | |
876 | } | |
877 | ||
878 | /* | |
b8f7fb13 | 879 | * Send a broadcast and wait for it to complete. |
b194b120 | 880 | * |
f6d8a566 | 881 | * The flush_mask contains the cpus the broadcast is to be sent to including |
b8f7fb13 | 882 | * cpus that are on the local uvhub. |
b194b120 | 883 | * |
450a007e CW |
884 | * Returns 0 if all flushing represented in the mask was done. |
885 | * Returns 1 if it gives up entirely and the original cpu mask is to be | |
886 | * returned to the kernel. | |
b194b120 | 887 | */ |
8b6e511e CW |
888 | int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, |
889 | struct bau_desc *bau_desc) | |
b194b120 | 890 | { |
b8f7fb13 | 891 | int seq_number = 0; |
f073cc8f | 892 | int completion_stat = 0; |
da87c937 | 893 | int uv1 = 0; |
b8f7fb13 | 894 | long try = 0; |
b4c286e6 | 895 | unsigned long index; |
b194b120 CW |
896 | cycles_t time1; |
897 | cycles_t time2; | |
712157aa | 898 | struct ptc_stats *stat = bcp->statp; |
b8f7fb13 | 899 | struct bau_control *hmaster = bcp->uvhub_master; |
da87c937 | 900 | struct uv1_bau_msg_header *uv1_hdr = NULL; |
a26fd719 | 901 | struct uv2_3_bau_msg_header *uv2_3_hdr = NULL; |
b8f7fb13 | 902 | |
8b6e511e CW |
903 | if (bcp->uvhub_version == 1) { |
904 | uv1 = 1; | |
f073cc8f | 905 | uv1_throttle(hmaster, stat); |
8b6e511e | 906 | } |
f073cc8f | 907 | |
b8f7fb13 CW |
908 | while (hmaster->uvhub_quiesce) |
909 | cpu_relax(); | |
b194b120 | 910 | |
b194b120 | 911 | time1 = get_cycles(); |
8b6e511e CW |
912 | if (uv1) |
913 | uv1_hdr = &bau_desc->header.uv1_hdr; | |
914 | else | |
a26fd719 CW |
915 | /* uv2 and uv3 */ |
916 | uv2_3_hdr = &bau_desc->header.uv2_3_hdr; | |
8b6e511e | 917 | |
b194b120 | 918 | do { |
8b6e511e | 919 | if (try == 0) { |
da87c937 CW |
920 | if (uv1) |
921 | uv1_hdr->msg_type = MSG_REGULAR; | |
922 | else | |
a26fd719 | 923 | uv2_3_hdr->msg_type = MSG_REGULAR; |
b8f7fb13 CW |
924 | seq_number = bcp->message_number++; |
925 | } else { | |
da87c937 CW |
926 | if (uv1) |
927 | uv1_hdr->msg_type = MSG_RETRY; | |
928 | else | |
a26fd719 | 929 | uv2_3_hdr->msg_type = MSG_RETRY; |
b8f7fb13 CW |
930 | stat->s_retry_messages++; |
931 | } | |
f073cc8f | 932 | |
da87c937 CW |
933 | if (uv1) |
934 | uv1_hdr->sequence = seq_number; | |
935 | else | |
a26fd719 | 936 | uv2_3_hdr->sequence = seq_number; |
8b6e511e | 937 | index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; |
b8f7fb13 | 938 | bcp->send_message = get_cycles(); |
f073cc8f CW |
939 | |
940 | write_mmr_activation(index); | |
941 | ||
b8f7fb13 | 942 | try++; |
f073cc8f CW |
943 | completion_stat = wait_completion(bau_desc, bcp, try); |
944 | ||
945 | handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); | |
b8f7fb13 | 946 | |
e8e5e8a8 | 947 | if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { |
b8f7fb13 | 948 | bcp->ipi_attempts = 0; |
8b6e511e | 949 | stat->s_overipilimit++; |
f073cc8f | 950 | completion_stat = FLUSH_GIVEUP; |
b8f7fb13 CW |
951 | break; |
952 | } | |
953 | cpu_relax(); | |
f073cc8f CW |
954 | } while ((completion_stat == FLUSH_RETRY_PLUGGED) || |
955 | (completion_stat == FLUSH_RETRY_TIMEOUT)); | |
956 | ||
b194b120 | 957 | time2 = get_cycles(); |
f073cc8f CW |
958 | |
959 | count_max_concurr(completion_stat, bcp, hmaster); | |
960 | ||
b8f7fb13 CW |
961 | while (hmaster->uvhub_quiesce) |
962 | cpu_relax(); | |
f073cc8f | 963 | |
b8f7fb13 | 964 | atomic_dec(&hmaster->active_descriptor_count); |
f073cc8f CW |
965 | |
966 | record_send_stats(time1, time2, bcp, stat, completion_stat, try); | |
967 | ||
968 | if (completion_stat == FLUSH_GIVEUP) | |
c5d35d39 | 969 | /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */ |
f073cc8f CW |
970 | return 1; |
971 | return 0; | |
972 | } | |
973 | ||
974 | /* | |
8b6e511e CW |
975 | * The BAU is disabled for this uvhub. When the disabled time period has |
976 | * expired re-enable it. | |
977 | * Return 0 if it is re-enabled for all cpus on this uvhub. | |
f073cc8f CW |
978 | */ |
979 | static int check_enable(struct bau_control *bcp, struct ptc_stats *stat) | |
980 | { | |
981 | int tcpu; | |
982 | struct bau_control *tbcp; | |
8b6e511e | 983 | struct bau_control *hmaster; |
f073cc8f | 984 | |
8b6e511e CW |
985 | hmaster = bcp->uvhub_master; |
986 | spin_lock(&hmaster->disable_lock); | |
987 | if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { | |
988 | stat->s_bau_reenabled++; | |
989 | for_each_present_cpu(tcpu) { | |
990 | tbcp = &per_cpu(bau_control, tcpu); | |
991 | if (tbcp->uvhub_master == hmaster) { | |
f073cc8f CW |
992 | tbcp->baudisabled = 0; |
993 | tbcp->period_requests = 0; | |
994 | tbcp->period_time = 0; | |
8b6e511e | 995 | tbcp->period_giveups = 0; |
50fb55ac CW |
996 | } |
997 | } | |
8b6e511e CW |
998 | spin_unlock(&hmaster->disable_lock); |
999 | return 0; | |
f073cc8f | 1000 | } |
8b6e511e | 1001 | spin_unlock(&hmaster->disable_lock); |
f073cc8f CW |
1002 | return -1; |
1003 | } | |
1004 | ||
1005 | static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs, | |
1006 | int remotes, struct bau_desc *bau_desc) | |
1007 | { | |
1008 | stat->s_requestor++; | |
1009 | stat->s_ntargcpu += remotes + locals; | |
1010 | stat->s_ntargremotes += remotes; | |
1011 | stat->s_ntarglocals += locals; | |
1012 | ||
1013 | /* uvhub statistics */ | |
1014 | hubs = bau_uvhub_weight(&bau_desc->distribution); | |
1015 | if (locals) { | |
1016 | stat->s_ntarglocaluvhub++; | |
1017 | stat->s_ntargremoteuvhub += (hubs - 1); | |
e8e5e8a8 | 1018 | } else |
f073cc8f CW |
1019 | stat->s_ntargremoteuvhub += hubs; |
1020 | ||
1021 | stat->s_ntarguvhub += hubs; | |
1022 | ||
1023 | if (hubs >= 16) | |
1024 | stat->s_ntarguvhub16++; | |
1025 | else if (hubs >= 8) | |
1026 | stat->s_ntarguvhub8++; | |
1027 | else if (hubs >= 4) | |
1028 | stat->s_ntarguvhub4++; | |
1029 | else if (hubs >= 2) | |
1030 | stat->s_ntarguvhub2++; | |
1031 | else | |
1032 | stat->s_ntarguvhub1++; | |
1033 | } | |
1034 | ||
1035 | /* | |
1036 | * Translate a cpu mask to the uvhub distribution mask in the BAU | |
1037 | * activation descriptor. | |
1038 | */ | |
1039 | static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, | |
1040 | struct bau_desc *bau_desc, int *localsp, int *remotesp) | |
1041 | { | |
1042 | int cpu; | |
1043 | int pnode; | |
1044 | int cnt = 0; | |
1045 | struct hub_and_pnode *hpp; | |
1046 | ||
1047 | for_each_cpu(cpu, flush_mask) { | |
1048 | /* | |
1049 | * The distribution vector is a bit map of pnodes, relative | |
1050 | * to the partition base pnode (and the partition base nasid | |
1051 | * in the header). | |
1052 | * Translate cpu to pnode and hub using a local memory array. | |
1053 | */ | |
1054 | hpp = &bcp->socket_master->thp[cpu]; | |
1055 | pnode = hpp->pnode - bcp->partition_base_pnode; | |
1056 | bau_uvhub_set(pnode, &bau_desc->distribution); | |
1057 | cnt++; | |
1058 | if (hpp->uvhub == bcp->uvhub) | |
1059 | (*localsp)++; | |
1060 | else | |
1061 | (*remotesp)++; | |
b194b120 | 1062 | } |
f073cc8f CW |
1063 | if (!cnt) |
1064 | return 1; | |
450a007e | 1065 | return 0; |
b194b120 CW |
1066 | } |
1067 | ||
f073cc8f CW |
1068 | /* |
1069 | * globally purge translation cache of a virtual address or all TLB's | |
bdbcdd48 | 1070 | * @cpumask: mask of all cpu's in which the address is to be removed |
1812924b | 1071 | * @mm: mm_struct containing virtual address range |
57c4f430 AS |
1072 | * @start: start virtual address to be removed from TLB |
1073 | * @end: end virtual address to be remove from TLB | |
bdbcdd48 | 1074 | * @cpu: the current cpu |
1812924b CW |
1075 | * |
1076 | * This is the entry point for initiating any UV global TLB shootdown. | |
1077 | * | |
1078 | * Purges the translation caches of all specified processors of the given | |
1079 | * virtual address, or purges all TLB's on specified processors. | |
1080 | * | |
bdbcdd48 TH |
1081 | * The caller has derived the cpumask from the mm_struct. This function |
1082 | * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) | |
1812924b | 1083 | * |
b8f7fb13 CW |
1084 | * The cpumask is converted into a uvhubmask of the uvhubs containing |
1085 | * those cpus. | |
b194b120 | 1086 | * |
bdbcdd48 TH |
1087 | * Note that this function should be called with preemption disabled. |
1088 | * | |
1089 | * Returns NULL if all remote flushing was done. | |
1090 | * Returns pointer to cpumask if some remote flushing remains to be | |
1091 | * done. The returned pointer is valid till preemption is re-enabled. | |
1812924b | 1092 | */ |
bdbcdd48 | 1093 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
a26fd719 CW |
1094 | struct mm_struct *mm, |
1095 | unsigned long start, | |
1096 | unsigned long end, | |
1097 | unsigned int cpu) | |
1812924b | 1098 | { |
b194b120 | 1099 | int locals = 0; |
450a007e CW |
1100 | int remotes = 0; |
1101 | int hubs = 0; | |
dc163a41 | 1102 | struct bau_desc *bau_desc; |
b8f7fb13 CW |
1103 | struct cpumask *flush_mask; |
1104 | struct ptc_stats *stat; | |
1105 | struct bau_control *bcp; | |
8b6e511e CW |
1106 | unsigned long descriptor_status; |
1107 | unsigned long status; | |
bdbcdd48 | 1108 | |
b8f7fb13 | 1109 | bcp = &per_cpu(bau_control, cpu); |
26ef8577 CW |
1110 | |
1111 | if (bcp->nobau) | |
1112 | return cpumask; | |
50fb55ac | 1113 | |
3eae49ca | 1114 | stat = bcp->statp; |
1115 | stat->s_enters++; | |
1116 | ||
8b6e511e CW |
1117 | if (bcp->busy) { |
1118 | descriptor_status = | |
1119 | read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); | |
1120 | status = ((descriptor_status >> (bcp->uvhub_cpu * | |
1121 | UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1; | |
1122 | if (status == UV2H_DESC_BUSY) | |
1123 | return cpumask; | |
1124 | bcp->busy = 0; | |
1125 | } | |
1126 | ||
50fb55ac CW |
1127 | /* bau was disabled due to slow response */ |
1128 | if (bcp->baudisabled) { | |
8b6e511e CW |
1129 | if (check_enable(bcp, stat)) { |
1130 | stat->s_ipifordisabled++; | |
f073cc8f | 1131 | return cpumask; |
8b6e511e | 1132 | } |
50fb55ac | 1133 | } |
e8e5e8a8 | 1134 | |
b8f7fb13 CW |
1135 | /* |
1136 | * Each sending cpu has a per-cpu mask which it fills from the caller's | |
450a007e CW |
1137 | * cpu mask. All cpus are converted to uvhubs and copied to the |
1138 | * activation descriptor. | |
b8f7fb13 CW |
1139 | */ |
1140 | flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); | |
450a007e | 1141 | /* don't actually do a shootdown of the local cpu */ |
b8f7fb13 | 1142 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); |
f073cc8f | 1143 | |
020b37ac | 1144 | if (cpumask_test_cpu(cpu, cpumask)) |
450a007e | 1145 | stat->s_ntargself++; |
1812924b | 1146 | |
b8f7fb13 | 1147 | bau_desc = bcp->descriptor_base; |
8b6e511e | 1148 | bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu); |
b8f7fb13 | 1149 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
f073cc8f | 1150 | if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) |
450a007e | 1151 | return NULL; |
450a007e | 1152 | |
f073cc8f | 1153 | record_send_statistics(stat, locals, hubs, remotes, bau_desc); |
1812924b | 1154 | |
57c4f430 AS |
1155 | if (!end || (end - start) <= PAGE_SIZE) |
1156 | bau_desc->payload.address = start; | |
1157 | else | |
1158 | bau_desc->payload.address = TLB_FLUSH_ALL; | |
bdbcdd48 | 1159 | bau_desc->payload.sending_cpu = cpu; |
b8f7fb13 | 1160 | /* |
450a007e CW |
1161 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, |
1162 | * or 1 if it gave up and the original cpumask should be returned. | |
b8f7fb13 | 1163 | */ |
8b6e511e | 1164 | if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc)) |
450a007e CW |
1165 | return NULL; |
1166 | else | |
1167 | return cpumask; | |
1812924b CW |
1168 | } |
1169 | ||
c5d35d39 | 1170 | /* |
8b6e511e CW |
1171 | * Search the message queue for any 'other' unprocessed message with the |
1172 | * same software acknowledge resource bit vector as the 'msg' message. | |
c5d35d39 CW |
1173 | */ |
1174 | struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg, | |
8b6e511e | 1175 | struct bau_control *bcp) |
c5d35d39 CW |
1176 | { |
1177 | struct bau_pq_entry *msg_next = msg + 1; | |
8b6e511e | 1178 | unsigned char swack_vec = msg->swack_vec; |
c5d35d39 CW |
1179 | |
1180 | if (msg_next > bcp->queue_last) | |
1181 | msg_next = bcp->queue_first; | |
8b6e511e CW |
1182 | while (msg_next != msg) { |
1183 | if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) && | |
1184 | (msg_next->swack_vec == swack_vec)) | |
c5d35d39 CW |
1185 | return msg_next; |
1186 | msg_next++; | |
1187 | if (msg_next > bcp->queue_last) | |
1188 | msg_next = bcp->queue_first; | |
1189 | } | |
1190 | return NULL; | |
1191 | } | |
1192 | ||
1193 | /* | |
1194 | * UV2 needs to work around a bug in which an arriving message has not | |
1195 | * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register. | |
1196 | * Such a message must be ignored. | |
1197 | */ | |
1198 | void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) | |
1199 | { | |
1200 | unsigned long mmr_image; | |
1201 | unsigned char swack_vec; | |
1202 | struct bau_pq_entry *msg = mdp->msg; | |
1203 | struct bau_pq_entry *other_msg; | |
1204 | ||
1205 | mmr_image = read_mmr_sw_ack(); | |
1206 | swack_vec = msg->swack_vec; | |
1207 | ||
1208 | if ((swack_vec & mmr_image) == 0) { | |
1209 | /* | |
1210 | * This message was assigned a swack resource, but no | |
1211 | * reserved acknowlegment is pending. | |
1212 | * The bug has prevented this message from setting the MMR. | |
c5d35d39 | 1213 | */ |
c5d35d39 | 1214 | /* |
8b6e511e CW |
1215 | * Some message has set the MMR 'pending' bit; it might have |
1216 | * been another message. Look for that message. | |
c5d35d39 | 1217 | */ |
8b6e511e CW |
1218 | other_msg = find_another_by_swack(msg, bcp); |
1219 | if (other_msg) { | |
1220 | /* | |
1221 | * There is another. Process this one but do not | |
1222 | * ack it. | |
1223 | */ | |
1224 | bau_process_message(mdp, bcp, 0); | |
1225 | /* | |
1226 | * Let the natural processing of that other message | |
1227 | * acknowledge it. Don't get the processing of sw_ack's | |
1228 | * out of order. | |
1229 | */ | |
1230 | return; | |
1231 | } | |
c5d35d39 CW |
1232 | } |
1233 | ||
1234 | /* | |
8b6e511e CW |
1235 | * Either the MMR shows this one pending a reply or there is no |
1236 | * other message using this sw_ack, so it is safe to acknowledge it. | |
c5d35d39 CW |
1237 | */ |
1238 | bau_process_message(mdp, bcp, 1); | |
1239 | ||
1240 | return; | |
1241 | } | |
1242 | ||
1812924b CW |
1243 | /* |
1244 | * The BAU message interrupt comes here. (registered by set_intr_gate) | |
1245 | * See entry_64.S | |
1246 | * | |
1247 | * We received a broadcast assist message. | |
1248 | * | |
b8f7fb13 | 1249 | * Interrupts are disabled; this interrupt could represent |
1812924b CW |
1250 | * the receipt of several messages. |
1251 | * | |
b8f7fb13 CW |
1252 | * All cores/threads on this hub get this interrupt. |
1253 | * The last one to see it does the software ack. | |
1812924b | 1254 | * (the resource will not be freed until noninterruptable cpus see this |
b8f7fb13 | 1255 | * interrupt; hardware may timeout the s/w ack and reply ERROR) |
1812924b | 1256 | */ |
b194b120 | 1257 | void uv_bau_message_interrupt(struct pt_regs *regs) |
1812924b | 1258 | { |
1812924b | 1259 | int count = 0; |
b8f7fb13 | 1260 | cycles_t time_start; |
f073cc8f | 1261 | struct bau_pq_entry *msg; |
b8f7fb13 CW |
1262 | struct bau_control *bcp; |
1263 | struct ptc_stats *stat; | |
1264 | struct msg_desc msgdesc; | |
1265 | ||
88ed9dd7 | 1266 | ack_APIC_irq(); |
b8f7fb13 | 1267 | time_start = get_cycles(); |
f073cc8f | 1268 | |
b8f7fb13 | 1269 | bcp = &per_cpu(bau_control, smp_processor_id()); |
712157aa | 1270 | stat = bcp->statp; |
f073cc8f CW |
1271 | |
1272 | msgdesc.queue_first = bcp->queue_first; | |
1273 | msgdesc.queue_last = bcp->queue_last; | |
1274 | ||
b8f7fb13 | 1275 | msg = bcp->bau_msg_head; |
f073cc8f | 1276 | while (msg->swack_vec) { |
1812924b | 1277 | count++; |
f073cc8f CW |
1278 | |
1279 | msgdesc.msg_slot = msg - msgdesc.queue_first; | |
b8f7fb13 | 1280 | msgdesc.msg = msg; |
c5d35d39 CW |
1281 | if (bcp->uvhub_version == 2) |
1282 | process_uv2_message(&msgdesc, bcp); | |
1283 | else | |
a26fd719 | 1284 | /* no error workaround for uv1 or uv3 */ |
c5d35d39 | 1285 | bau_process_message(&msgdesc, bcp, 1); |
f073cc8f | 1286 | |
1812924b | 1287 | msg++; |
f073cc8f CW |
1288 | if (msg > msgdesc.queue_last) |
1289 | msg = msgdesc.queue_first; | |
b8f7fb13 | 1290 | bcp->bau_msg_head = msg; |
1812924b | 1291 | } |
b8f7fb13 | 1292 | stat->d_time += (get_cycles() - time_start); |
1812924b | 1293 | if (!count) |
b8f7fb13 | 1294 | stat->d_nomsg++; |
1812924b | 1295 | else if (count > 1) |
b8f7fb13 | 1296 | stat->d_multmsg++; |
1812924b CW |
1297 | } |
1298 | ||
c4c4688f | 1299 | /* |
f073cc8f | 1300 | * Each target uvhub (i.e. a uvhub that has cpu's) needs to have |
c4c4688f CW |
1301 | * shootdown message timeouts enabled. The timeout does not cause |
1302 | * an interrupt, but causes an error message to be returned to | |
1303 | * the sender. | |
1304 | */ | |
f073cc8f | 1305 | static void __init enable_timeouts(void) |
1812924b | 1306 | { |
b8f7fb13 CW |
1307 | int uvhub; |
1308 | int nuvhubs; | |
1812924b | 1309 | int pnode; |
c4c4688f | 1310 | unsigned long mmr_image; |
1812924b | 1311 | |
b8f7fb13 | 1312 | nuvhubs = uv_num_possible_blades(); |
1812924b | 1313 | |
b8f7fb13 CW |
1314 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
1315 | if (!uv_blade_nr_possible_cpus(uvhub)) | |
1812924b | 1316 | continue; |
c4c4688f | 1317 | |
b8f7fb13 | 1318 | pnode = uv_blade_to_pnode(uvhub); |
f073cc8f | 1319 | mmr_image = read_mmr_misc_control(pnode); |
c4c4688f CW |
1320 | /* |
1321 | * Set the timeout period and then lock it in, in three | |
1322 | * steps; captures and locks in the period. | |
1323 | * | |
1324 | * To program the period, the SOFT_ACK_MODE must be off. | |
1325 | */ | |
f073cc8f CW |
1326 | mmr_image &= ~(1L << SOFTACK_MSHIFT); |
1327 | write_mmr_misc_control(pnode, mmr_image); | |
c4c4688f CW |
1328 | /* |
1329 | * Set the 4-bit period. | |
1330 | */ | |
f073cc8f CW |
1331 | mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT); |
1332 | mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT); | |
1333 | write_mmr_misc_control(pnode, mmr_image); | |
c4c4688f | 1334 | /* |
2a919596 | 1335 | * UV1: |
c4c4688f CW |
1336 | * Subsequent reversals of the timebase bit (3) cause an |
1337 | * immediate timeout of one or all INTD resources as | |
1338 | * indicated in bits 2:0 (7 causes all of them to timeout). | |
1339 | */ | |
f073cc8f | 1340 | mmr_image |= (1L << SOFTACK_MSHIFT); |
2a919596 | 1341 | if (is_uv2_hub()) { |
a26fd719 | 1342 | /* do not touch the legacy mode bit */ |
8b6e511e CW |
1343 | /* hw bug workaround; do not use extended status */ |
1344 | mmr_image &= ~(1L << UV2_EXT_SHFT); | |
a26fd719 CW |
1345 | } else if (is_uv3_hub()) { |
1346 | mmr_image &= ~(1L << PREFETCH_HINT_SHFT); | |
1347 | mmr_image |= (1L << SB_STATUS_SHFT); | |
2a919596 | 1348 | } |
f073cc8f | 1349 | write_mmr_misc_control(pnode, mmr_image); |
1812924b | 1350 | } |
1812924b CW |
1351 | } |
1352 | ||
f073cc8f | 1353 | static void *ptc_seq_start(struct seq_file *file, loff_t *offset) |
1812924b CW |
1354 | { |
1355 | if (*offset < num_possible_cpus()) | |
1356 | return offset; | |
1357 | return NULL; | |
1358 | } | |
1359 | ||
f073cc8f | 1360 | static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset) |
1812924b CW |
1361 | { |
1362 | (*offset)++; | |
1363 | if (*offset < num_possible_cpus()) | |
1364 | return offset; | |
1365 | return NULL; | |
1366 | } | |
1367 | ||
f073cc8f | 1368 | static void ptc_seq_stop(struct seq_file *file, void *data) |
1812924b CW |
1369 | { |
1370 | } | |
1371 | ||
1372 | /* | |
f073cc8f | 1373 | * Display the statistics thru /proc/sgi_uv/ptc_statistics |
b8f7fb13 | 1374 | * 'data' points to the cpu number |
f073cc8f | 1375 | * Note: see the descriptions in stat_description[]. |
1812924b | 1376 | */ |
f073cc8f | 1377 | static int ptc_seq_show(struct seq_file *file, void *data) |
1812924b CW |
1378 | { |
1379 | struct ptc_stats *stat; | |
26ef8577 | 1380 | struct bau_control *bcp; |
1812924b CW |
1381 | int cpu; |
1382 | ||
1383 | cpu = *(loff_t *)data; | |
1812924b | 1384 | if (!cpu) { |
3736708f RV |
1385 | seq_puts(file, |
1386 | "# cpu bauoff sent stime self locals remotes ncpus localhub "); | |
1387 | seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 "); | |
1388 | seq_puts(file, | |
1389 | "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries "); | |
1390 | seq_puts(file, | |
1391 | "rok resetp resett giveup sto bz throt disable "); | |
1392 | seq_puts(file, | |
1393 | "enable wars warshw warwaits enters ipidis plugged "); | |
1394 | seq_puts(file, | |
1395 | "ipiover glim cong swack recv rtime all one mult "); | |
1396 | seq_puts(file, "none retry canc nocan reset rcan\n"); | |
1812924b CW |
1397 | } |
1398 | if (cpu < num_possible_cpus() && cpu_online(cpu)) { | |
26ef8577 | 1399 | bcp = &per_cpu(bau_control, cpu); |
fa2a79ce JC |
1400 | if (bcp->nobau) { |
1401 | seq_printf(file, "cpu %d bau disabled\n", cpu); | |
1402 | return 0; | |
1403 | } | |
26ef8577 | 1404 | stat = bcp->statp; |
b8f7fb13 CW |
1405 | /* source side statistics */ |
1406 | seq_printf(file, | |
8b6e511e | 1407 | "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", |
26ef8577 CW |
1408 | cpu, bcp->nobau, stat->s_requestor, |
1409 | cycles_2_us(stat->s_time), | |
450a007e CW |
1410 | stat->s_ntargself, stat->s_ntarglocals, |
1411 | stat->s_ntargremotes, stat->s_ntargcpu, | |
1412 | stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, | |
1413 | stat->s_ntarguvhub, stat->s_ntarguvhub16); | |
b54bd9be | 1414 | seq_printf(file, "%ld %ld %ld %ld %ld %ld ", |
b8f7fb13 CW |
1415 | stat->s_ntarguvhub8, stat->s_ntarguvhub4, |
1416 | stat->s_ntarguvhub2, stat->s_ntarguvhub1, | |
b54bd9be | 1417 | stat->s_dtimeout, stat->s_strongnacks); |
8b6e511e | 1418 | seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", |
b8f7fb13 CW |
1419 | stat->s_retry_messages, stat->s_retriesok, |
1420 | stat->s_resets_plug, stat->s_resets_timeout, | |
1421 | stat->s_giveup, stat->s_stimeout, | |
8b6e511e CW |
1422 | stat->s_busy, stat->s_throttles); |
1423 | seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", | |
1424 | stat->s_bau_disabled, stat->s_bau_reenabled, | |
1425 | stat->s_uv2_wars, stat->s_uv2_wars_hw, | |
1426 | stat->s_uv2_war_waits, stat->s_enters, | |
1427 | stat->s_ipifordisabled, stat->s_plugged, | |
1428 | stat->s_overipilimit, stat->s_giveuplimit, | |
1429 | stat->s_congested); | |
e8e5e8a8 | 1430 | |
b8f7fb13 CW |
1431 | /* destination side statistics */ |
1432 | seq_printf(file, | |
8b6e511e | 1433 | "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", |
f073cc8f | 1434 | read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)), |
b8f7fb13 CW |
1435 | stat->d_requestee, cycles_2_us(stat->d_time), |
1436 | stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, | |
1437 | stat->d_nomsg, stat->d_retries, stat->d_canceled, | |
1438 | stat->d_nocanceled, stat->d_resets, | |
1439 | stat->d_rcanceled); | |
1812924b | 1440 | } |
1812924b CW |
1441 | return 0; |
1442 | } | |
1443 | ||
e8e5e8a8 CW |
1444 | /* |
1445 | * Display the tunables thru debugfs | |
1446 | */ | |
1447 | static ssize_t tunables_read(struct file *file, char __user *userbuf, | |
f073cc8f | 1448 | size_t count, loff_t *ppos) |
e8e5e8a8 | 1449 | { |
b365a85c | 1450 | char *buf; |
e8e5e8a8 CW |
1451 | int ret; |
1452 | ||
8b6e511e CW |
1453 | buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n", |
1454 | "max_concur plugged_delay plugsb4reset timeoutsb4reset", | |
1455 | "ipi_reset_limit complete_threshold congested_response_us", | |
1456 | "congested_reps disabled_period giveup_limit", | |
f073cc8f | 1457 | max_concurr, plugged_delay, plugsb4reset, |
e8e5e8a8 | 1458 | timeoutsb4reset, ipi_reset_limit, complete_threshold, |
8b6e511e CW |
1459 | congested_respns_us, congested_reps, disabled_period, |
1460 | giveup_limit); | |
e8e5e8a8 | 1461 | |
b365a85c DC |
1462 | if (!buf) |
1463 | return -ENOMEM; | |
1464 | ||
1465 | ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); | |
1466 | kfree(buf); | |
1467 | return ret; | |
e8e5e8a8 CW |
1468 | } |
1469 | ||
1812924b | 1470 | /* |
f073cc8f CW |
1471 | * handle a write to /proc/sgi_uv/ptc_statistics |
1472 | * -1: reset the statistics | |
1812924b | 1473 | * 0: display meaning of the statistics |
1812924b | 1474 | */ |
f073cc8f CW |
1475 | static ssize_t ptc_proc_write(struct file *file, const char __user *user, |
1476 | size_t count, loff_t *data) | |
1812924b | 1477 | { |
b8f7fb13 | 1478 | int cpu; |
f073cc8f CW |
1479 | int i; |
1480 | int elements; | |
b8f7fb13 | 1481 | long input_arg; |
1812924b | 1482 | char optstr[64]; |
b8f7fb13 | 1483 | struct ptc_stats *stat; |
1812924b | 1484 | |
e7eb8726 | 1485 | if (count == 0 || count > sizeof(optstr)) |
cef53278 | 1486 | return -EINVAL; |
1812924b CW |
1487 | if (copy_from_user(optstr, user, count)) |
1488 | return -EFAULT; | |
1489 | optstr[count - 1] = '\0'; | |
f073cc8f | 1490 | |
26ef8577 CW |
1491 | if (!strcmp(optstr, "on")) { |
1492 | set_bau_on(); | |
1493 | return count; | |
1494 | } else if (!strcmp(optstr, "off")) { | |
1495 | set_bau_off(); | |
1496 | return count; | |
1497 | } | |
1498 | ||
164109e3 | 1499 | if (kstrtol(optstr, 10, &input_arg) < 0) { |
1812924b CW |
1500 | printk(KERN_DEBUG "%s is invalid\n", optstr); |
1501 | return -EINVAL; | |
1502 | } | |
1503 | ||
b8f7fb13 | 1504 | if (input_arg == 0) { |
64441745 | 1505 | elements = ARRAY_SIZE(stat_description); |
1812924b | 1506 | printk(KERN_DEBUG "# cpu: cpu number\n"); |
b8f7fb13 | 1507 | printk(KERN_DEBUG "Sender statistics:\n"); |
f073cc8f CW |
1508 | for (i = 0; i < elements; i++) |
1509 | printk(KERN_DEBUG "%s\n", stat_description[i]); | |
b8f7fb13 CW |
1510 | } else if (input_arg == -1) { |
1511 | for_each_present_cpu(cpu) { | |
1512 | stat = &per_cpu(ptcstats, cpu); | |
1513 | memset(stat, 0, sizeof(struct ptc_stats)); | |
1514 | } | |
e8e5e8a8 CW |
1515 | } |
1516 | ||
1517 | return count; | |
1518 | } | |
1519 | ||
1520 | static int local_atoi(const char *name) | |
1521 | { | |
1522 | int val = 0; | |
1523 | ||
1524 | for (;; name++) { | |
1525 | switch (*name) { | |
1526 | case '0' ... '9': | |
1527 | val = 10*val+(*name-'0'); | |
1528 | break; | |
1529 | default: | |
1530 | return val; | |
b8f7fb13 | 1531 | } |
1812924b | 1532 | } |
e8e5e8a8 CW |
1533 | } |
1534 | ||
1535 | /* | |
f073cc8f CW |
1536 | * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables. |
1537 | * Zero values reset them to defaults. | |
e8e5e8a8 | 1538 | */ |
f073cc8f CW |
1539 | static int parse_tunables_write(struct bau_control *bcp, char *instr, |
1540 | int count) | |
e8e5e8a8 | 1541 | { |
e8e5e8a8 CW |
1542 | char *p; |
1543 | char *q; | |
f073cc8f CW |
1544 | int cnt = 0; |
1545 | int val; | |
64441745 | 1546 | int e = ARRAY_SIZE(tunables); |
e8e5e8a8 | 1547 | |
e8e5e8a8 CW |
1548 | p = instr + strspn(instr, WHITESPACE); |
1549 | q = p; | |
1550 | for (; *p; p = q + strspn(q, WHITESPACE)) { | |
1551 | q = p + strcspn(p, WHITESPACE); | |
1552 | cnt++; | |
1553 | if (q == p) | |
1554 | break; | |
1555 | } | |
f073cc8f CW |
1556 | if (cnt != e) { |
1557 | printk(KERN_INFO "bau tunable error: should be %d values\n", e); | |
e8e5e8a8 CW |
1558 | return -EINVAL; |
1559 | } | |
1560 | ||
1561 | p = instr + strspn(instr, WHITESPACE); | |
1562 | q = p; | |
1563 | for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) { | |
1564 | q = p + strcspn(p, WHITESPACE); | |
1565 | val = local_atoi(p); | |
1566 | switch (cnt) { | |
1567 | case 0: | |
1568 | if (val == 0) { | |
f073cc8f CW |
1569 | max_concurr = MAX_BAU_CONCURRENT; |
1570 | max_concurr_const = MAX_BAU_CONCURRENT; | |
e8e5e8a8 CW |
1571 | continue; |
1572 | } | |
e8e5e8a8 CW |
1573 | if (val < 1 || val > bcp->cpus_in_uvhub) { |
1574 | printk(KERN_DEBUG | |
1575 | "Error: BAU max concurrent %d is invalid\n", | |
1576 | val); | |
1577 | return -EINVAL; | |
1578 | } | |
f073cc8f CW |
1579 | max_concurr = val; |
1580 | max_concurr_const = val; | |
e8e5e8a8 | 1581 | continue; |
f073cc8f | 1582 | default: |
e8e5e8a8 | 1583 | if (val == 0) |
f073cc8f | 1584 | *tunables[cnt].tunp = tunables[cnt].deflt; |
e8e5e8a8 | 1585 | else |
f073cc8f | 1586 | *tunables[cnt].tunp = val; |
e8e5e8a8 CW |
1587 | continue; |
1588 | } | |
1589 | if (q == p) | |
1590 | break; | |
1591 | } | |
f073cc8f CW |
1592 | return 0; |
1593 | } | |
1594 | ||
1595 | /* | |
1596 | * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables) | |
1597 | */ | |
1598 | static ssize_t tunables_write(struct file *file, const char __user *user, | |
1599 | size_t count, loff_t *data) | |
1600 | { | |
1601 | int cpu; | |
1602 | int ret; | |
1603 | char instr[100]; | |
1604 | struct bau_control *bcp; | |
1605 | ||
1606 | if (count == 0 || count > sizeof(instr)-1) | |
1607 | return -EINVAL; | |
1608 | if (copy_from_user(instr, user, count)) | |
1609 | return -EFAULT; | |
1610 | ||
1611 | instr[count] = '\0'; | |
1612 | ||
00b30cf0 | 1613 | cpu = get_cpu(); |
1614 | bcp = &per_cpu(bau_control, cpu); | |
f073cc8f | 1615 | ret = parse_tunables_write(bcp, instr, count); |
00b30cf0 | 1616 | put_cpu(); |
f073cc8f CW |
1617 | if (ret) |
1618 | return ret; | |
1619 | ||
e8e5e8a8 CW |
1620 | for_each_present_cpu(cpu) { |
1621 | bcp = &per_cpu(bau_control, cpu); | |
f073cc8f CW |
1622 | bcp->max_concurr = max_concurr; |
1623 | bcp->max_concurr_const = max_concurr; | |
1624 | bcp->plugged_delay = plugged_delay; | |
1625 | bcp->plugsb4reset = plugsb4reset; | |
1626 | bcp->timeoutsb4reset = timeoutsb4reset; | |
1627 | bcp->ipi_reset_limit = ipi_reset_limit; | |
1628 | bcp->complete_threshold = complete_threshold; | |
1629 | bcp->cong_response_us = congested_respns_us; | |
1630 | bcp->cong_reps = congested_reps; | |
8b6e511e CW |
1631 | bcp->disabled_period = sec_2_cycles(disabled_period); |
1632 | bcp->giveup_limit = giveup_limit; | |
e8e5e8a8 | 1633 | } |
1812924b CW |
1634 | return count; |
1635 | } | |
1636 | ||
1637 | static const struct seq_operations uv_ptc_seq_ops = { | |
f073cc8f CW |
1638 | .start = ptc_seq_start, |
1639 | .next = ptc_seq_next, | |
1640 | .stop = ptc_seq_stop, | |
1641 | .show = ptc_seq_show | |
1812924b CW |
1642 | }; |
1643 | ||
f073cc8f | 1644 | static int ptc_proc_open(struct inode *inode, struct file *file) |
1812924b CW |
1645 | { |
1646 | return seq_open(file, &uv_ptc_seq_ops); | |
1647 | } | |
1648 | ||
e8e5e8a8 CW |
1649 | static int tunables_open(struct inode *inode, struct file *file) |
1650 | { | |
1651 | return 0; | |
1652 | } | |
1653 | ||
1812924b | 1654 | static const struct file_operations proc_uv_ptc_operations = { |
f073cc8f | 1655 | .open = ptc_proc_open, |
b194b120 | 1656 | .read = seq_read, |
f073cc8f | 1657 | .write = ptc_proc_write, |
b194b120 CW |
1658 | .llseek = seq_lseek, |
1659 | .release = seq_release, | |
1812924b CW |
1660 | }; |
1661 | ||
e8e5e8a8 CW |
1662 | static const struct file_operations tunables_fops = { |
1663 | .open = tunables_open, | |
1664 | .read = tunables_read, | |
1665 | .write = tunables_write, | |
6038f373 | 1666 | .llseek = default_llseek, |
e8e5e8a8 CW |
1667 | }; |
1668 | ||
b194b120 | 1669 | static int __init uv_ptc_init(void) |
1812924b | 1670 | { |
b194b120 | 1671 | struct proc_dir_entry *proc_uv_ptc; |
1812924b CW |
1672 | |
1673 | if (!is_uv_system()) | |
1674 | return 0; | |
1675 | ||
10f02d11 AD |
1676 | proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL, |
1677 | &proc_uv_ptc_operations); | |
1812924b CW |
1678 | if (!proc_uv_ptc) { |
1679 | printk(KERN_ERR "unable to create %s proc entry\n", | |
1680 | UV_PTC_BASENAME); | |
1681 | return -EINVAL; | |
1682 | } | |
e8e5e8a8 CW |
1683 | |
1684 | tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL); | |
1685 | if (!tunables_dir) { | |
1686 | printk(KERN_ERR "unable to create debugfs directory %s\n", | |
1687 | UV_BAU_TUNABLES_DIR); | |
1688 | return -EINVAL; | |
1689 | } | |
1690 | tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600, | |
f073cc8f | 1691 | tunables_dir, NULL, &tunables_fops); |
e8e5e8a8 CW |
1692 | if (!tunables_file) { |
1693 | printk(KERN_ERR "unable to create debugfs file %s\n", | |
1694 | UV_BAU_TUNABLES_FILE); | |
1695 | return -EINVAL; | |
1696 | } | |
1812924b CW |
1697 | return 0; |
1698 | } | |
1699 | ||
1812924b | 1700 | /* |
77ed23f8 | 1701 | * Initialize the sending side's sending buffers. |
1812924b | 1702 | */ |
f073cc8f | 1703 | static void activation_descriptor_init(int node, int pnode, int base_pnode) |
1812924b CW |
1704 | { |
1705 | int i; | |
b8f7fb13 | 1706 | int cpu; |
da87c937 | 1707 | int uv1 = 0; |
6a469e46 | 1708 | unsigned long gpa; |
1812924b | 1709 | unsigned long m; |
b194b120 | 1710 | unsigned long n; |
f073cc8f | 1711 | size_t dsize; |
b8f7fb13 CW |
1712 | struct bau_desc *bau_desc; |
1713 | struct bau_desc *bd2; | |
da87c937 | 1714 | struct uv1_bau_msg_header *uv1_hdr; |
a26fd719 | 1715 | struct uv2_3_bau_msg_header *uv2_3_hdr; |
b8f7fb13 | 1716 | struct bau_control *bcp; |
b194b120 | 1717 | |
0e2595cd | 1718 | /* |
f073cc8f CW |
1719 | * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC) |
1720 | * per cpu; and one per cpu on the uvhub (ADP_SZ) | |
0e2595cd | 1721 | */ |
f073cc8f CW |
1722 | dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC; |
1723 | bau_desc = kmalloc_node(dsize, GFP_KERNEL, node); | |
b8f7fb13 | 1724 | BUG_ON(!bau_desc); |
b4c286e6 | 1725 | |
6a469e46 JS |
1726 | gpa = uv_gpa(bau_desc); |
1727 | n = uv_gpa_to_gnode(gpa); | |
1728 | m = uv_gpa_to_offset(gpa); | |
da87c937 CW |
1729 | if (is_uv1_hub()) |
1730 | uv1 = 1; | |
b4c286e6 | 1731 | |
77ed23f8 | 1732 | /* the 14-bit pnode */ |
f073cc8f | 1733 | write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m)); |
0e2595cd | 1734 | /* |
f073cc8f | 1735 | * Initializing all 8 (ITEMS_PER_DESC) descriptors for each |
0e2595cd | 1736 | * cpu even though we only use the first one; one descriptor can |
b8f7fb13 | 1737 | * describe a broadcast to 256 uv hubs. |
0e2595cd | 1738 | */ |
f073cc8f | 1739 | for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) { |
b8f7fb13 | 1740 | memset(bd2, 0, sizeof(struct bau_desc)); |
da87c937 CW |
1741 | if (uv1) { |
1742 | uv1_hdr = &bd2->header.uv1_hdr; | |
1743 | uv1_hdr->swack_flag = 1; | |
1744 | /* | |
1745 | * The base_dest_nasid set in the message header | |
1746 | * is the nasid of the first uvhub in the partition. | |
1747 | * The bit map will indicate destination pnode numbers | |
1748 | * relative to that base. They may not be consecutive | |
1749 | * if nasid striding is being used. | |
1750 | */ | |
1751 | uv1_hdr->base_dest_nasid = | |
1752 | UV_PNODE_TO_NASID(base_pnode); | |
1753 | uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID; | |
1754 | uv1_hdr->command = UV_NET_ENDPOINT_INTD; | |
1755 | uv1_hdr->int_both = 1; | |
1756 | /* | |
1757 | * all others need to be set to zero: | |
1758 | * fairness chaining multilevel count replied_to | |
1759 | */ | |
1760 | } else { | |
8b6e511e | 1761 | /* |
a26fd719 | 1762 | * BIOS uses legacy mode, but uv2 and uv3 hardware always |
8b6e511e CW |
1763 | * uses native mode for selective broadcasts. |
1764 | */ | |
a26fd719 CW |
1765 | uv2_3_hdr = &bd2->header.uv2_3_hdr; |
1766 | uv2_3_hdr->swack_flag = 1; | |
1767 | uv2_3_hdr->base_dest_nasid = | |
da87c937 | 1768 | UV_PNODE_TO_NASID(base_pnode); |
a26fd719 CW |
1769 | uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID; |
1770 | uv2_3_hdr->command = UV_NET_ENDPOINT_INTD; | |
da87c937 | 1771 | } |
b194b120 | 1772 | } |
b8f7fb13 CW |
1773 | for_each_present_cpu(cpu) { |
1774 | if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) | |
1775 | continue; | |
1776 | bcp = &per_cpu(bau_control, cpu); | |
1777 | bcp->descriptor_base = bau_desc; | |
1778 | } | |
b194b120 CW |
1779 | } |
1780 | ||
1781 | /* | |
1782 | * initialize the destination side's receiving buffers | |
b8f7fb13 CW |
1783 | * entered for each uvhub in the partition |
1784 | * - node is first node (kernel memory notion) on the uvhub | |
1785 | * - pnode is the uvhub's physical identifier | |
b194b120 | 1786 | */ |
f073cc8f | 1787 | static void pq_init(int node, int pnode) |
b194b120 | 1788 | { |
b8f7fb13 | 1789 | int cpu; |
f073cc8f | 1790 | size_t plsize; |
b4c286e6 | 1791 | char *cp; |
f073cc8f CW |
1792 | void *vp; |
1793 | unsigned long pn; | |
1794 | unsigned long first; | |
1795 | unsigned long pn_first; | |
1796 | unsigned long last; | |
1797 | struct bau_pq_entry *pqp; | |
b8f7fb13 | 1798 | struct bau_control *bcp; |
1812924b | 1799 | |
f073cc8f CW |
1800 | plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry); |
1801 | vp = kmalloc_node(plsize, GFP_KERNEL, node); | |
1802 | pqp = (struct bau_pq_entry *)vp; | |
dc163a41 | 1803 | BUG_ON(!pqp); |
b4c286e6 | 1804 | |
b194b120 | 1805 | cp = (char *)pqp + 31; |
f073cc8f | 1806 | pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5); |
b8f7fb13 CW |
1807 | |
1808 | for_each_present_cpu(cpu) { | |
1809 | if (pnode != uv_cpu_to_pnode(cpu)) | |
1810 | continue; | |
1811 | /* for every cpu on this pnode: */ | |
1812 | bcp = &per_cpu(bau_control, cpu); | |
f073cc8f CW |
1813 | bcp->queue_first = pqp; |
1814 | bcp->bau_msg_head = pqp; | |
1815 | bcp->queue_last = pqp + (DEST_Q_SIZE - 1); | |
b8f7fb13 | 1816 | } |
4ea3c51d | 1817 | /* |
6a469e46 | 1818 | * need the gnode of where the memory was really allocated |
4ea3c51d | 1819 | */ |
6a469e46 | 1820 | pn = uv_gpa_to_gnode(uv_gpa(pqp)); |
f073cc8f CW |
1821 | first = uv_physnodeaddr(pqp); |
1822 | pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first; | |
1823 | last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)); | |
1824 | write_mmr_payload_first(pnode, pn_first); | |
1825 | write_mmr_payload_tail(pnode, first); | |
1826 | write_mmr_payload_last(pnode, last); | |
c5d35d39 | 1827 | write_gmmr_sw_ack(pnode, 0xffffUL); |
f073cc8f | 1828 | |
b8f7fb13 | 1829 | /* in effect, all msg_type's are set to MSG_NOOP */ |
f073cc8f | 1830 | memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); |
b194b120 | 1831 | } |
1812924b | 1832 | |
b194b120 | 1833 | /* |
b8f7fb13 | 1834 | * Initialization of each UV hub's structures |
b194b120 | 1835 | */ |
f073cc8f | 1836 | static void __init init_uvhub(int uvhub, int vector, int base_pnode) |
b194b120 | 1837 | { |
9674f35b | 1838 | int node; |
b194b120 | 1839 | int pnode; |
b194b120 | 1840 | unsigned long apicid; |
b8f7fb13 CW |
1841 | |
1842 | node = uvhub_to_first_node(uvhub); | |
1843 | pnode = uv_blade_to_pnode(uvhub); | |
f073cc8f CW |
1844 | |
1845 | activation_descriptor_init(node, pnode, base_pnode); | |
1846 | ||
1847 | pq_init(node, pnode); | |
b194b120 | 1848 | /* |
77ed23f8 CW |
1849 | * The below initialization can't be in firmware because the |
1850 | * messaging IRQ will be determined by the OS. | |
b194b120 | 1851 | */ |
8191c9f6 | 1852 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; |
f073cc8f | 1853 | write_mmr_data_config(pnode, ((apicid << 32) | vector)); |
b8f7fb13 CW |
1854 | } |
1855 | ||
12a6611f CW |
1856 | /* |
1857 | * We will set BAU_MISC_CONTROL with a timeout period. | |
1858 | * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT. | |
f073cc8f | 1859 | * So the destination timeout period has to be calculated from them. |
12a6611f | 1860 | */ |
f073cc8f | 1861 | static int calculate_destination_timeout(void) |
12a6611f CW |
1862 | { |
1863 | unsigned long mmr_image; | |
1864 | int mult1; | |
1865 | int mult2; | |
1866 | int index; | |
1867 | int base; | |
1868 | int ret; | |
1869 | unsigned long ts_ns; | |
1870 | ||
2a919596 | 1871 | if (is_uv1_hub()) { |
f073cc8f | 1872 | mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK; |
2a919596 JS |
1873 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); |
1874 | index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; | |
1875 | mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); | |
1876 | mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; | |
11cab711 CW |
1877 | ts_ns = timeout_base_ns[index]; |
1878 | ts_ns *= (mult1 * mult2); | |
2a919596 JS |
1879 | ret = ts_ns / 1000; |
1880 | } else { | |
a26fd719 | 1881 | /* same destination timeout for uv2 and uv3 */ |
d059f9fa CW |
1882 | /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */ |
1883 | mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL); | |
2a919596 | 1884 | mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; |
f073cc8f | 1885 | if (mmr_image & (1L << UV2_ACK_UNITS_SHFT)) |
d059f9fa | 1886 | base = 80; |
2a919596 | 1887 | else |
d059f9fa CW |
1888 | base = 10; |
1889 | mult1 = mmr_image & UV2_ACK_MASK; | |
2a919596 JS |
1890 | ret = mult1 * base; |
1891 | } | |
12a6611f CW |
1892 | return ret; |
1893 | } | |
1894 | ||
f073cc8f CW |
1895 | static void __init init_per_cpu_tunables(void) |
1896 | { | |
1897 | int cpu; | |
1898 | struct bau_control *bcp; | |
1899 | ||
1900 | for_each_present_cpu(cpu) { | |
1901 | bcp = &per_cpu(bau_control, cpu); | |
1902 | bcp->baudisabled = 0; | |
26ef8577 | 1903 | if (nobau) |
1c532e00 | 1904 | bcp->nobau = true; |
f073cc8f CW |
1905 | bcp->statp = &per_cpu(ptcstats, cpu); |
1906 | /* time interval to catch a hardware stay-busy bug */ | |
1907 | bcp->timeout_interval = usec_2_cycles(2*timeout_us); | |
1908 | bcp->max_concurr = max_concurr; | |
1909 | bcp->max_concurr_const = max_concurr; | |
1910 | bcp->plugged_delay = plugged_delay; | |
1911 | bcp->plugsb4reset = plugsb4reset; | |
1912 | bcp->timeoutsb4reset = timeoutsb4reset; | |
1913 | bcp->ipi_reset_limit = ipi_reset_limit; | |
1914 | bcp->complete_threshold = complete_threshold; | |
1915 | bcp->cong_response_us = congested_respns_us; | |
1916 | bcp->cong_reps = congested_reps; | |
8b6e511e CW |
1917 | bcp->disabled_period = sec_2_cycles(disabled_period); |
1918 | bcp->giveup_limit = giveup_limit; | |
d2ebc71d CW |
1919 | spin_lock_init(&bcp->queue_lock); |
1920 | spin_lock_init(&bcp->uvhub_lock); | |
8b6e511e | 1921 | spin_lock_init(&bcp->disable_lock); |
f073cc8f CW |
1922 | } |
1923 | } | |
1924 | ||
b8f7fb13 | 1925 | /* |
f073cc8f | 1926 | * Scan all cpus to collect blade and socket summaries. |
b8f7fb13 | 1927 | */ |
f073cc8f CW |
1928 | static int __init get_cpu_topology(int base_pnode, |
1929 | struct uvhub_desc *uvhub_descs, | |
1930 | unsigned char *uvhub_mask) | |
b8f7fb13 | 1931 | { |
b8f7fb13 CW |
1932 | int cpu; |
1933 | int pnode; | |
1934 | int uvhub; | |
f073cc8f | 1935 | int socket; |
b8f7fb13 CW |
1936 | struct bau_control *bcp; |
1937 | struct uvhub_desc *bdp; | |
1938 | struct socket_desc *sdp; | |
b8f7fb13 | 1939 | |
b8f7fb13 CW |
1940 | for_each_present_cpu(cpu) { |
1941 | bcp = &per_cpu(bau_control, cpu); | |
f073cc8f | 1942 | |
b8f7fb13 | 1943 | memset(bcp, 0, sizeof(struct bau_control)); |
f073cc8f | 1944 | |
b8f7fb13 | 1945 | pnode = uv_cpu_hub_info(cpu)->pnode; |
f073cc8f | 1946 | if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) { |
77ed23f8 CW |
1947 | printk(KERN_EMERG |
1948 | "cpu %d pnode %d-%d beyond %d; BAU disabled\n", | |
f073cc8f | 1949 | cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE); |
77ed23f8 CW |
1950 | return 1; |
1951 | } | |
f073cc8f | 1952 | |
77ed23f8 | 1953 | bcp->osnode = cpu_to_node(cpu); |
f073cc8f CW |
1954 | bcp->partition_base_pnode = base_pnode; |
1955 | ||
b8f7fb13 | 1956 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; |
c4026cfd | 1957 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); |
b8f7fb13 | 1958 | bdp = &uvhub_descs[uvhub]; |
f073cc8f | 1959 | |
b8f7fb13 CW |
1960 | bdp->num_cpus++; |
1961 | bdp->uvhub = uvhub; | |
1962 | bdp->pnode = pnode; | |
f073cc8f | 1963 | |
a8328ee5 CW |
1964 | /* kludge: 'assuming' one node per socket, and assuming that |
1965 | disabling a socket just leaves a gap in node numbers */ | |
77ed23f8 | 1966 | socket = bcp->osnode & 1; |
a8328ee5 | 1967 | bdp->socket_mask |= (1 << socket); |
b8f7fb13 CW |
1968 | sdp = &bdp->socket[socket]; |
1969 | sdp->cpu_number[sdp->num_cpus] = cpu; | |
1970 | sdp->num_cpus++; | |
cfa60917 | 1971 | if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) { |
f073cc8f CW |
1972 | printk(KERN_EMERG "%d cpus per socket invalid\n", |
1973 | sdp->num_cpus); | |
cfa60917 CW |
1974 | return 1; |
1975 | } | |
b8f7fb13 | 1976 | } |
f073cc8f CW |
1977 | return 0; |
1978 | } | |
1979 | ||
1980 | /* | |
1981 | * Each socket is to get a local array of pnodes/hubs. | |
1982 | */ | |
1983 | static void make_per_cpu_thp(struct bau_control *smaster) | |
1984 | { | |
1985 | int cpu; | |
1986 | size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus(); | |
1987 | ||
1988 | smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode); | |
1989 | memset(smaster->thp, 0, hpsz); | |
1990 | for_each_present_cpu(cpu) { | |
1991 | smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode; | |
1992 | smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; | |
1993 | } | |
1994 | } | |
1995 | ||
442d3924 | 1996 | /* |
1997 | * Each uvhub is to get a local cpumask. | |
1998 | */ | |
1999 | static void make_per_hub_cpumask(struct bau_control *hmaster) | |
2000 | { | |
2001 | int sz = sizeof(cpumask_t); | |
2002 | ||
2003 | hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode); | |
2004 | } | |
2005 | ||
f073cc8f CW |
2006 | /* |
2007 | * Initialize all the per_cpu information for the cpu's on a given socket, | |
2008 | * given what has been gathered into the socket_desc struct. | |
2009 | * And reports the chosen hub and socket masters back to the caller. | |
2010 | */ | |
2011 | static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, | |
2012 | struct bau_control **smasterp, | |
2013 | struct bau_control **hmasterp) | |
2014 | { | |
2015 | int i; | |
2016 | int cpu; | |
2017 | struct bau_control *bcp; | |
2018 | ||
2019 | for (i = 0; i < sdp->num_cpus; i++) { | |
2020 | cpu = sdp->cpu_number[i]; | |
2021 | bcp = &per_cpu(bau_control, cpu); | |
2022 | bcp->cpu = cpu; | |
2023 | if (i == 0) { | |
2024 | *smasterp = bcp; | |
2025 | if (!(*hmasterp)) | |
2026 | *hmasterp = bcp; | |
2027 | } | |
2028 | bcp->cpus_in_uvhub = bdp->num_cpus; | |
2029 | bcp->cpus_in_socket = sdp->num_cpus; | |
2030 | bcp->socket_master = *smasterp; | |
2031 | bcp->uvhub = bdp->uvhub; | |
da87c937 CW |
2032 | if (is_uv1_hub()) |
2033 | bcp->uvhub_version = 1; | |
2034 | else if (is_uv2_hub()) | |
2035 | bcp->uvhub_version = 2; | |
a26fd719 CW |
2036 | else if (is_uv3_hub()) |
2037 | bcp->uvhub_version = 3; | |
da87c937 | 2038 | else { |
a26fd719 | 2039 | printk(KERN_EMERG "uvhub version not 1, 2 or 3\n"); |
da87c937 CW |
2040 | return 1; |
2041 | } | |
f073cc8f CW |
2042 | bcp->uvhub_master = *hmasterp; |
2043 | bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id; | |
2044 | if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { | |
2045 | printk(KERN_EMERG "%d cpus per uvhub invalid\n", | |
2046 | bcp->uvhub_cpu); | |
2047 | return 1; | |
2048 | } | |
2049 | } | |
2050 | return 0; | |
2051 | } | |
2052 | ||
2053 | /* | |
2054 | * Summarize the blade and socket topology into the per_cpu structures. | |
2055 | */ | |
2056 | static int __init summarize_uvhub_sockets(int nuvhubs, | |
2057 | struct uvhub_desc *uvhub_descs, | |
2058 | unsigned char *uvhub_mask) | |
2059 | { | |
2060 | int socket; | |
2061 | int uvhub; | |
2062 | unsigned short socket_mask; | |
2063 | ||
c4026cfd | 2064 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
f073cc8f CW |
2065 | struct uvhub_desc *bdp; |
2066 | struct bau_control *smaster = NULL; | |
2067 | struct bau_control *hmaster = NULL; | |
2068 | ||
c4026cfd CW |
2069 | if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8)))) |
2070 | continue; | |
f073cc8f | 2071 | |
b8f7fb13 | 2072 | bdp = &uvhub_descs[uvhub]; |
a8328ee5 CW |
2073 | socket_mask = bdp->socket_mask; |
2074 | socket = 0; | |
2075 | while (socket_mask) { | |
f073cc8f CW |
2076 | struct socket_desc *sdp; |
2077 | if ((socket_mask & 1)) { | |
2078 | sdp = &bdp->socket[socket]; | |
2079 | if (scan_sock(sdp, bdp, &smaster, &hmaster)) | |
cfa60917 | 2080 | return 1; |
9c9153db | 2081 | make_per_cpu_thp(smaster); |
b8f7fb13 CW |
2082 | } |
2083 | socket++; | |
a8328ee5 | 2084 | socket_mask = (socket_mask >> 1); |
b8f7fb13 | 2085 | } |
442d3924 | 2086 | make_per_hub_cpumask(hmaster); |
b8f7fb13 | 2087 | } |
f073cc8f CW |
2088 | return 0; |
2089 | } | |
2090 | ||
2091 | /* | |
2092 | * initialize the bau_control structure for each cpu | |
2093 | */ | |
2094 | static int __init init_per_cpu(int nuvhubs, int base_part_pnode) | |
2095 | { | |
2096 | unsigned char *uvhub_mask; | |
2097 | void *vp; | |
2098 | struct uvhub_desc *uvhub_descs; | |
2099 | ||
2100 | timeout_us = calculate_destination_timeout(); | |
2101 | ||
2102 | vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); | |
2103 | uvhub_descs = (struct uvhub_desc *)vp; | |
2104 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); | |
2105 | uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); | |
2106 | ||
2107 | if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask)) | |
bbd270e6 | 2108 | goto fail; |
f073cc8f CW |
2109 | |
2110 | if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask)) | |
bbd270e6 | 2111 | goto fail; |
f073cc8f | 2112 | |
b8f7fb13 | 2113 | kfree(uvhub_descs); |
c4026cfd | 2114 | kfree(uvhub_mask); |
f073cc8f | 2115 | init_per_cpu_tunables(); |
cfa60917 | 2116 | return 0; |
bbd270e6 | 2117 | |
2118 | fail: | |
2119 | kfree(uvhub_descs); | |
2120 | kfree(uvhub_mask); | |
2121 | return 1; | |
b194b120 CW |
2122 | } |
2123 | ||
2124 | /* | |
2125 | * Initialization of BAU-related structures | |
2126 | */ | |
2127 | static int __init uv_bau_init(void) | |
2128 | { | |
b8f7fb13 CW |
2129 | int uvhub; |
2130 | int pnode; | |
2131 | int nuvhubs; | |
2c74d666 | 2132 | int cur_cpu; |
f073cc8f | 2133 | int cpus; |
b8f7fb13 | 2134 | int vector; |
f073cc8f | 2135 | cpumask_var_t *mask; |
b194b120 CW |
2136 | |
2137 | if (!is_uv_system()) | |
2138 | return 0; | |
1812924b | 2139 | |
f073cc8f CW |
2140 | for_each_possible_cpu(cur_cpu) { |
2141 | mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); | |
2142 | zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu)); | |
2143 | } | |
76ba0ecd | 2144 | |
b8f7fb13 | 2145 | nuvhubs = uv_num_possible_blades(); |
f073cc8f | 2146 | congested_cycles = usec_2_cycles(congested_respns_us); |
9674f35b | 2147 | |
f073cc8f | 2148 | uv_base_pnode = 0x7fffffff; |
77ed23f8 | 2149 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
f073cc8f CW |
2150 | cpus = uv_blade_nr_possible_cpus(uvhub); |
2151 | if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode)) | |
2152 | uv_base_pnode = uv_blade_to_pnode(uvhub); | |
77ed23f8 CW |
2153 | } |
2154 | ||
d059f9fa CW |
2155 | enable_timeouts(); |
2156 | ||
f073cc8f | 2157 | if (init_per_cpu(nuvhubs, uv_base_pnode)) { |
26ef8577 CW |
2158 | set_bau_off(); |
2159 | nobau_perm = 1; | |
77ed23f8 CW |
2160 | return 0; |
2161 | } | |
b8f7fb13 CW |
2162 | |
2163 | vector = UV_BAU_MESSAGE; | |
a26fd719 | 2164 | for_each_possible_blade(uvhub) { |
b8f7fb13 | 2165 | if (uv_blade_nr_possible_cpus(uvhub)) |
f073cc8f | 2166 | init_uvhub(uvhub, vector, uv_base_pnode); |
a26fd719 | 2167 | } |
b8f7fb13 | 2168 | |
b8f7fb13 CW |
2169 | alloc_intr_gate(vector, uv_bau_message_intr1); |
2170 | ||
2171 | for_each_possible_blade(uvhub) { | |
93a7ca0c | 2172 | if (uv_blade_nr_possible_cpus(uvhub)) { |
f073cc8f CW |
2173 | unsigned long val; |
2174 | unsigned long mmr; | |
93a7ca0c CW |
2175 | pnode = uv_blade_to_pnode(uvhub); |
2176 | /* INIT the bau */ | |
f073cc8f CW |
2177 | val = 1L << 63; |
2178 | write_gmmr_activation(pnode, val); | |
93a7ca0c | 2179 | mmr = 1; /* should be 1 to broadcast to both sockets */ |
da87c937 CW |
2180 | if (!is_uv1_hub()) |
2181 | write_mmr_data_broadcast(pnode, mmr); | |
93a7ca0c | 2182 | } |
b8f7fb13 | 2183 | } |
b4c286e6 | 2184 | |
1812924b CW |
2185 | return 0; |
2186 | } | |
b8f7fb13 | 2187 | core_initcall(uv_bau_init); |
e8e5e8a8 | 2188 | fs_initcall(uv_ptc_init); |