]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/fs.h> | |
16 | #include <linux/proc_fs.h> | |
17 | #include <linux/seq_file.h> | |
18 | #include <linux/rwsem.h> | |
19 | #include <linux/kprobes.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/hardirq.h> | |
22 | #include <linux/uaccess.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/cdev.h> | |
25 | #include <linux/compat.h> | |
26 | #include <asm/hardwall.h> | |
27 | #include <asm/traps.h> | |
28 | #include <asm/siginfo.h> | |
29 | #include <asm/irq_regs.h> | |
30 | ||
31 | #include <arch/interrupts.h> | |
32 | #include <arch/spr_def.h> | |
33 | ||
34 | ||
35 | /* | |
36 | * This data structure tracks the rectangle data, etc., associated | |
37 | * one-to-one with a "struct file *" from opening HARDWALL_FILE. | |
38 | * Note that the file's private data points back to this structure. | |
39 | */ | |
40 | struct hardwall_info { | |
41 | struct list_head list; /* "rectangles" list */ | |
42 | struct list_head task_head; /* head of tasks in this hardwall */ | |
43 | int ulhc_x; /* upper left hand corner x coord */ | |
44 | int ulhc_y; /* upper left hand corner y coord */ | |
45 | int width; /* rectangle width */ | |
46 | int height; /* rectangle height */ | |
47 | int teardown_in_progress; /* are we tearing this one down? */ | |
48 | }; | |
49 | ||
50 | /* Currently allocated hardwall rectangles */ | |
51 | static LIST_HEAD(rectangles); | |
52 | ||
53 | /* | |
54 | * Guard changes to the hardwall data structures. | |
55 | * This could be finer grained (e.g. one lock for the list of hardwall | |
56 | * rectangles, then separate embedded locks for each one's list of tasks), | |
57 | * but there are subtle correctness issues when trying to start with | |
58 | * a task's "hardwall" pointer and lock the correct rectangle's embedded | |
59 | * lock in the presence of a simultaneous deactivation, so it seems | |
60 | * easier to have a single lock, given that none of these data | |
61 | * structures are touched very frequently during normal operation. | |
62 | */ | |
63 | static DEFINE_SPINLOCK(hardwall_lock); | |
64 | ||
65 | /* Allow disabling UDN access. */ | |
66 | static int udn_disabled; | |
67 | static int __init noudn(char *str) | |
68 | { | |
69 | pr_info("User-space UDN access is disabled\n"); | |
70 | udn_disabled = 1; | |
71 | return 0; | |
72 | } | |
73 | early_param("noudn", noudn); | |
74 | ||
75 | ||
76 | /* | |
77 | * Low-level primitives | |
78 | */ | |
79 | ||
80 | /* Set a CPU bit if the CPU is online. */ | |
81 | #define cpu_online_set(cpu, dst) do { \ | |
82 | if (cpu_online(cpu)) \ | |
83 | cpumask_set_cpu(cpu, dst); \ | |
84 | } while (0) | |
85 | ||
86 | ||
87 | /* Does the given rectangle contain the given x,y coordinate? */ | |
88 | static int contains(struct hardwall_info *r, int x, int y) | |
89 | { | |
90 | return (x >= r->ulhc_x && x < r->ulhc_x + r->width) && | |
91 | (y >= r->ulhc_y && y < r->ulhc_y + r->height); | |
92 | } | |
93 | ||
94 | /* Compute the rectangle parameters and validate the cpumask. */ | |
95 | static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask) | |
96 | { | |
97 | int x, y, cpu, ulhc, lrhc; | |
98 | ||
99 | /* The first cpu is the ULHC, the last the LRHC. */ | |
100 | ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits); | |
101 | lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits); | |
102 | ||
103 | /* Compute the rectangle attributes from the cpus. */ | |
104 | r->ulhc_x = cpu_x(ulhc); | |
105 | r->ulhc_y = cpu_y(ulhc); | |
106 | r->width = cpu_x(lrhc) - r->ulhc_x + 1; | |
107 | r->height = cpu_y(lrhc) - r->ulhc_y + 1; | |
108 | ||
109 | /* Width and height must be positive */ | |
110 | if (r->width <= 0 || r->height <= 0) | |
111 | return -EINVAL; | |
112 | ||
113 | /* Confirm that the cpumask is exactly the rectangle. */ | |
114 | for (y = 0, cpu = 0; y < smp_height; ++y) | |
115 | for (x = 0; x < smp_width; ++x, ++cpu) | |
116 | if (cpumask_test_cpu(cpu, mask) != contains(r, x, y)) | |
117 | return -EINVAL; | |
118 | ||
119 | /* | |
120 | * Note that offline cpus can't be drained when this UDN | |
121 | * rectangle eventually closes. We used to detect this | |
122 | * situation and print a warning, but it annoyed users and | |
123 | * they ignored it anyway, so now we just return without a | |
124 | * warning. | |
125 | */ | |
126 | return 0; | |
127 | } | |
128 | ||
129 | /* Do the two given rectangles overlap on any cpu? */ | |
130 | static int overlaps(struct hardwall_info *a, struct hardwall_info *b) | |
131 | { | |
132 | return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */ | |
133 | b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */ | |
134 | a->ulhc_y + a->height > b->ulhc_y && /* A not above */ | |
135 | b->ulhc_y + b->height > a->ulhc_y; /* B not above */ | |
136 | } | |
137 | ||
138 | ||
139 | /* | |
140 | * Hardware management of hardwall setup, teardown, trapping, | |
141 | * and enabling/disabling PL0 access to the networks. | |
142 | */ | |
143 | ||
144 | /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */ | |
145 | enum direction_protect { | |
146 | N_PROTECT = (1 << 0), | |
147 | E_PROTECT = (1 << 1), | |
148 | S_PROTECT = (1 << 2), | |
149 | W_PROTECT = (1 << 3) | |
150 | }; | |
151 | ||
152 | static void enable_firewall_interrupts(void) | |
153 | { | |
154 | raw_local_irq_unmask_now(INT_UDN_FIREWALL); | |
155 | } | |
156 | ||
157 | static void disable_firewall_interrupts(void) | |
158 | { | |
159 | raw_local_irq_mask_now(INT_UDN_FIREWALL); | |
160 | } | |
161 | ||
162 | /* Set up hardwall on this cpu based on the passed hardwall_info. */ | |
163 | static void hardwall_setup_ipi_func(void *info) | |
164 | { | |
165 | struct hardwall_info *r = info; | |
166 | int cpu = smp_processor_id(); | |
167 | int x = cpu % smp_width; | |
168 | int y = cpu / smp_width; | |
169 | int bits = 0; | |
170 | if (x == r->ulhc_x) | |
171 | bits |= W_PROTECT; | |
172 | if (x == r->ulhc_x + r->width - 1) | |
173 | bits |= E_PROTECT; | |
174 | if (y == r->ulhc_y) | |
175 | bits |= N_PROTECT; | |
176 | if (y == r->ulhc_y + r->height - 1) | |
177 | bits |= S_PROTECT; | |
178 | BUG_ON(bits == 0); | |
179 | __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits); | |
180 | enable_firewall_interrupts(); | |
181 | ||
182 | } | |
183 | ||
184 | /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ | |
185 | static void hardwall_setup(struct hardwall_info *r) | |
186 | { | |
187 | int x, y, cpu, delta; | |
188 | struct cpumask rect_cpus; | |
189 | ||
190 | cpumask_clear(&rect_cpus); | |
191 | ||
192 | /* First include the top and bottom edges */ | |
193 | cpu = r->ulhc_y * smp_width + r->ulhc_x; | |
194 | delta = (r->height - 1) * smp_width; | |
195 | for (x = 0; x < r->width; ++x, ++cpu) { | |
196 | cpu_online_set(cpu, &rect_cpus); | |
197 | cpu_online_set(cpu + delta, &rect_cpus); | |
198 | } | |
199 | ||
200 | /* Then the left and right edges */ | |
201 | cpu -= r->width; | |
202 | delta = r->width - 1; | |
203 | for (y = 0; y < r->height; ++y, cpu += smp_width) { | |
204 | cpu_online_set(cpu, &rect_cpus); | |
205 | cpu_online_set(cpu + delta, &rect_cpus); | |
206 | } | |
207 | ||
208 | /* Then tell all the cpus to set up their protection SPR */ | |
209 | on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1); | |
210 | } | |
211 | ||
212 | void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | |
213 | { | |
214 | struct hardwall_info *rect; | |
215 | struct task_struct *p; | |
216 | struct siginfo info; | |
217 | int x, y; | |
218 | int cpu = smp_processor_id(); | |
219 | int found_processes; | |
220 | unsigned long flags; | |
221 | ||
222 | struct pt_regs *old_regs = set_irq_regs(regs); | |
223 | irq_enter(); | |
224 | ||
225 | /* This tile trapped a network access; find the rectangle. */ | |
226 | x = cpu % smp_width; | |
227 | y = cpu / smp_width; | |
228 | spin_lock_irqsave(&hardwall_lock, flags); | |
229 | list_for_each_entry(rect, &rectangles, list) { | |
230 | if (contains(rect, x, y)) | |
231 | break; | |
232 | } | |
233 | ||
234 | /* | |
235 | * It shouldn't be possible not to find this cpu on the | |
236 | * rectangle list, since only cpus in rectangles get hardwalled. | |
237 | * The hardwall is only removed after the UDN is drained. | |
238 | */ | |
239 | BUG_ON(&rect->list == &rectangles); | |
240 | ||
241 | /* | |
242 | * If we already started teardown on this hardwall, don't worry; | |
243 | * the abort signal has been sent and we are just waiting for things | |
244 | * to quiesce. | |
245 | */ | |
246 | if (rect->teardown_in_progress) { | |
247 | pr_notice("cpu %d: detected hardwall violation %#lx" | |
248 | " while teardown already in progress\n", | |
249 | cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); | |
250 | goto done; | |
251 | } | |
252 | ||
253 | /* | |
254 | * Kill off any process that is activated in this rectangle. | |
255 | * We bypass security to deliver the signal, since it must be | |
256 | * one of the activated processes that generated the UDN | |
257 | * message that caused this trap, and all the activated | |
258 | * processes shared a single open file so are pretty tightly | |
259 | * bound together from a security point of view to begin with. | |
260 | */ | |
261 | rect->teardown_in_progress = 1; | |
262 | wmb(); /* Ensure visibility of rectangle before notifying processes. */ | |
263 | pr_notice("cpu %d: detected hardwall violation %#lx...\n", | |
264 | cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); | |
265 | info.si_signo = SIGILL; | |
266 | info.si_errno = 0; | |
267 | info.si_code = ILL_HARDWALL; | |
268 | found_processes = 0; | |
269 | list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { | |
270 | BUG_ON(p->thread.hardwall != rect); | |
271 | if (p->sighand) { | |
272 | found_processes = 1; | |
273 | pr_notice("hardwall: killing %d\n", p->pid); | |
274 | spin_lock(&p->sighand->siglock); | |
275 | __group_send_sig_info(info.si_signo, &info, p); | |
276 | spin_unlock(&p->sighand->siglock); | |
277 | } | |
278 | } | |
279 | if (!found_processes) | |
280 | pr_notice("hardwall: no associated processes!\n"); | |
281 | ||
282 | done: | |
283 | spin_unlock_irqrestore(&hardwall_lock, flags); | |
284 | ||
285 | /* | |
286 | * We have to disable firewall interrupts now, or else when we | |
287 | * return from this handler, we will simply re-interrupt back to | |
288 | * it. However, we can't clear the protection bits, since we | |
289 | * haven't yet drained the network, and that would allow packets | |
290 | * to cross out of the hardwall region. | |
291 | */ | |
292 | disable_firewall_interrupts(); | |
293 | ||
294 | irq_exit(); | |
295 | set_irq_regs(old_regs); | |
296 | } | |
297 | ||
298 | /* Allow access from user space to the UDN. */ | |
299 | void grant_network_mpls(void) | |
300 | { | |
301 | __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1); | |
302 | __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1); | |
303 | __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1); | |
304 | __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1); | |
305 | #if !CHIP_HAS_REV1_XDN() | |
306 | __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1); | |
307 | __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1); | |
308 | #endif | |
309 | } | |
310 | ||
311 | /* Deny access from user space to the UDN. */ | |
312 | void restrict_network_mpls(void) | |
313 | { | |
314 | __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1); | |
315 | __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1); | |
316 | __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1); | |
317 | __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1); | |
318 | #if !CHIP_HAS_REV1_XDN() | |
319 | __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1); | |
320 | __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1); | |
321 | #endif | |
322 | } | |
323 | ||
324 | ||
325 | /* | |
326 | * Code to create, activate, deactivate, and destroy hardwall rectangles. | |
327 | */ | |
328 | ||
329 | /* Create a hardwall for the given rectangle */ | |
330 | static struct hardwall_info *hardwall_create( | |
331 | size_t size, const unsigned char __user *bits) | |
332 | { | |
333 | struct hardwall_info *iter, *rect; | |
334 | struct cpumask mask; | |
335 | unsigned long flags; | |
336 | int rc; | |
337 | ||
338 | /* Reject crazy sizes out of hand, a la sys_mbind(). */ | |
339 | if (size > PAGE_SIZE) | |
340 | return ERR_PTR(-EINVAL); | |
341 | ||
342 | /* Copy whatever fits into a cpumask. */ | |
343 | if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size))) | |
344 | return ERR_PTR(-EFAULT); | |
345 | ||
346 | /* | |
347 | * If the size was short, clear the rest of the mask; | |
348 | * otherwise validate that the rest of the user mask was zero | |
349 | * (we don't try hard to be efficient when validating huge masks). | |
350 | */ | |
351 | if (size < sizeof(struct cpumask)) { | |
352 | memset((char *)&mask + size, 0, sizeof(struct cpumask) - size); | |
353 | } else if (size > sizeof(struct cpumask)) { | |
354 | size_t i; | |
355 | for (i = sizeof(struct cpumask); i < size; ++i) { | |
356 | char c; | |
357 | if (get_user(c, &bits[i])) | |
358 | return ERR_PTR(-EFAULT); | |
359 | if (c) | |
360 | return ERR_PTR(-EINVAL); | |
361 | } | |
362 | } | |
363 | ||
364 | /* Allocate a new rectangle optimistically. */ | |
365 | rect = kmalloc(sizeof(struct hardwall_info), | |
366 | GFP_KERNEL | __GFP_ZERO); | |
367 | INIT_LIST_HEAD(&rect->task_head); | |
368 | ||
369 | /* Compute the rectangle size and validate that it's plausible. */ | |
370 | rc = setup_rectangle(rect, &mask); | |
371 | if (rc != 0) { | |
372 | kfree(rect); | |
373 | return ERR_PTR(rc); | |
374 | } | |
375 | ||
376 | /* Confirm it doesn't overlap and add it to the list. */ | |
377 | spin_lock_irqsave(&hardwall_lock, flags); | |
378 | list_for_each_entry(iter, &rectangles, list) { | |
379 | if (overlaps(iter, rect)) { | |
380 | spin_unlock_irqrestore(&hardwall_lock, flags); | |
381 | kfree(rect); | |
382 | return ERR_PTR(-EBUSY); | |
383 | } | |
384 | } | |
385 | list_add_tail(&rect->list, &rectangles); | |
386 | spin_unlock_irqrestore(&hardwall_lock, flags); | |
387 | ||
388 | /* Set up appropriate hardwalling on all affected cpus. */ | |
389 | hardwall_setup(rect); | |
390 | ||
391 | return rect; | |
392 | } | |
393 | ||
394 | /* Activate a given hardwall on this cpu for this process. */ | |
395 | static int hardwall_activate(struct hardwall_info *rect) | |
396 | { | |
397 | int cpu, x, y; | |
398 | unsigned long flags; | |
399 | struct task_struct *p = current; | |
400 | struct thread_struct *ts = &p->thread; | |
401 | ||
402 | /* Require a rectangle. */ | |
403 | if (rect == NULL) | |
404 | return -ENODATA; | |
405 | ||
406 | /* Not allowed to activate a rectangle that is being torn down. */ | |
407 | if (rect->teardown_in_progress) | |
408 | return -EINVAL; | |
409 | ||
410 | /* | |
411 | * Get our affinity; if we're not bound to this tile uniquely, | |
412 | * we can't access the network registers. | |
413 | */ | |
414 | if (cpumask_weight(&p->cpus_allowed) != 1) | |
415 | return -EPERM; | |
416 | ||
417 | /* Make sure we are bound to a cpu in this rectangle. */ | |
418 | cpu = smp_processor_id(); | |
419 | BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); | |
420 | x = cpu_x(cpu); | |
421 | y = cpu_y(cpu); | |
422 | if (!contains(rect, x, y)) | |
423 | return -EINVAL; | |
424 | ||
425 | /* If we are already bound to this hardwall, it's a no-op. */ | |
426 | if (ts->hardwall) { | |
427 | BUG_ON(ts->hardwall != rect); | |
428 | return 0; | |
429 | } | |
430 | ||
431 | /* Success! This process gets to use the user networks on this cpu. */ | |
432 | ts->hardwall = rect; | |
433 | spin_lock_irqsave(&hardwall_lock, flags); | |
434 | list_add(&ts->hardwall_list, &rect->task_head); | |
435 | spin_unlock_irqrestore(&hardwall_lock, flags); | |
436 | grant_network_mpls(); | |
437 | printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n", | |
438 | p->pid, p->comm, cpu); | |
439 | return 0; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Deactivate a task's hardwall. Must hold hardwall_lock. | |
444 | * This method may be called from free_task(), so we don't want to | |
445 | * rely on too many fields of struct task_struct still being valid. | |
446 | * We assume the cpus_allowed, pid, and comm fields are still valid. | |
447 | */ | |
448 | static void _hardwall_deactivate(struct task_struct *task) | |
449 | { | |
450 | struct thread_struct *ts = &task->thread; | |
451 | ||
452 | if (cpumask_weight(&task->cpus_allowed) != 1) { | |
453 | pr_err("pid %d (%s) releasing networks with" | |
454 | " an affinity mask containing %d cpus!\n", | |
455 | task->pid, task->comm, | |
456 | cpumask_weight(&task->cpus_allowed)); | |
457 | BUG(); | |
458 | } | |
459 | ||
460 | BUG_ON(ts->hardwall == NULL); | |
461 | ts->hardwall = NULL; | |
462 | list_del(&ts->hardwall_list); | |
463 | if (task == current) | |
464 | restrict_network_mpls(); | |
465 | } | |
466 | ||
467 | /* Deactivate a task's hardwall. */ | |
468 | int hardwall_deactivate(struct task_struct *task) | |
469 | { | |
470 | unsigned long flags; | |
471 | int activated; | |
472 | ||
473 | spin_lock_irqsave(&hardwall_lock, flags); | |
474 | activated = (task->thread.hardwall != NULL); | |
475 | if (activated) | |
476 | _hardwall_deactivate(task); | |
477 | spin_unlock_irqrestore(&hardwall_lock, flags); | |
478 | ||
479 | if (!activated) | |
480 | return -EINVAL; | |
481 | ||
482 | printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n", | |
483 | task->pid, task->comm, smp_processor_id()); | |
484 | return 0; | |
485 | } | |
486 | ||
487 | /* Stop a UDN switch before draining the network. */ | |
488 | static void stop_udn_switch(void *ignored) | |
489 | { | |
490 | #if !CHIP_HAS_REV1_XDN() | |
491 | /* Freeze the switch and the demux. */ | |
492 | __insn_mtspr(SPR_UDN_SP_FREEZE, | |
493 | SPR_UDN_SP_FREEZE__SP_FRZ_MASK | | |
494 | SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | | |
495 | SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); | |
496 | #endif | |
497 | } | |
498 | ||
499 | /* Drain all the state from a stopped switch. */ | |
500 | static void drain_udn_switch(void *ignored) | |
501 | { | |
502 | #if !CHIP_HAS_REV1_XDN() | |
503 | int i; | |
504 | int from_tile_words, ca_count; | |
505 | ||
506 | /* Empty out the 5 switch point fifos. */ | |
507 | for (i = 0; i < 5; i++) { | |
508 | int words, j; | |
509 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); | |
510 | words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF; | |
511 | for (j = 0; j < words; j++) | |
512 | (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA); | |
513 | BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0); | |
514 | } | |
515 | ||
516 | /* Dump out the 3 word fifo at top. */ | |
517 | from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3; | |
518 | for (i = 0; i < from_tile_words; i++) | |
519 | (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); | |
520 | ||
521 | /* Empty out demuxes. */ | |
522 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) | |
523 | (void) __tile_udn0_receive(); | |
524 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1)) | |
525 | (void) __tile_udn1_receive(); | |
526 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2)) | |
527 | (void) __tile_udn2_receive(); | |
528 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3)) | |
529 | (void) __tile_udn3_receive(); | |
530 | BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0); | |
531 | ||
532 | /* Empty out catch all. */ | |
533 | ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); | |
534 | for (i = 0; i < ca_count; i++) | |
535 | (void) __insn_mfspr(SPR_UDN_CA_DATA); | |
536 | BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0); | |
537 | ||
538 | /* Clear demux logic. */ | |
539 | __insn_mtspr(SPR_UDN_DEMUX_CTL, 1); | |
540 | ||
541 | /* | |
542 | * Write switch state; experimentation indicates that 0xc3000 | |
543 | * is an idle switch point. | |
544 | */ | |
545 | for (i = 0; i < 5; i++) { | |
546 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); | |
547 | __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000); | |
548 | } | |
549 | #endif | |
550 | } | |
551 | ||
552 | /* Reset random UDN state registers at boot up and during hardwall teardown. */ | |
553 | void reset_network_state(void) | |
554 | { | |
555 | #if !CHIP_HAS_REV1_XDN() | |
556 | /* Reset UDN coordinates to their standard value */ | |
557 | unsigned int cpu = smp_processor_id(); | |
558 | unsigned int x = cpu % smp_width; | |
559 | unsigned int y = cpu / smp_width; | |
560 | #endif | |
561 | ||
562 | if (udn_disabled) | |
563 | return; | |
564 | ||
565 | #if !CHIP_HAS_REV1_XDN() | |
566 | __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); | |
567 | ||
568 | /* Set demux tags to predefined values and enable them. */ | |
569 | __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); | |
570 | __insn_mtspr(SPR_UDN_TAG_0, (1 << 0)); | |
571 | __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); | |
572 | __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); | |
573 | __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); | |
574 | #endif | |
575 | ||
576 | /* Clear out other random registers so we have a clean slate. */ | |
577 | __insn_mtspr(SPR_UDN_AVAIL_EN, 0); | |
578 | __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0); | |
579 | #if !CHIP_HAS_REV1_XDN() | |
580 | __insn_mtspr(SPR_UDN_REFILL_EN, 0); | |
581 | __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); | |
582 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); | |
583 | #endif | |
584 | ||
585 | /* Start the switch and demux. */ | |
586 | #if !CHIP_HAS_REV1_XDN() | |
587 | __insn_mtspr(SPR_UDN_SP_FREEZE, 0); | |
588 | #endif | |
589 | } | |
590 | ||
591 | /* Restart a UDN switch after draining. */ | |
592 | static void restart_udn_switch(void *ignored) | |
593 | { | |
594 | reset_network_state(); | |
595 | ||
596 | /* Disable firewall interrupts. */ | |
597 | __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0); | |
598 | disable_firewall_interrupts(); | |
599 | } | |
600 | ||
601 | /* Build a struct cpumask containing all valid tiles in bounding rectangle. */ | |
602 | static void fill_mask(struct hardwall_info *r, struct cpumask *result) | |
603 | { | |
604 | int x, y, cpu; | |
605 | ||
606 | cpumask_clear(result); | |
607 | ||
608 | cpu = r->ulhc_y * smp_width + r->ulhc_x; | |
609 | for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) { | |
610 | for (x = 0; x < r->width; ++x, ++cpu) | |
611 | cpu_online_set(cpu, result); | |
612 | } | |
613 | } | |
614 | ||
615 | /* Last reference to a hardwall is gone, so clear the network. */ | |
616 | static void hardwall_destroy(struct hardwall_info *rect) | |
617 | { | |
618 | struct task_struct *task; | |
619 | unsigned long flags; | |
620 | struct cpumask mask; | |
621 | ||
622 | /* Make sure this file actually represents a rectangle. */ | |
623 | if (rect == NULL) | |
624 | return; | |
625 | ||
626 | /* | |
627 | * Deactivate any remaining tasks. It's possible to race with | |
628 | * some other thread that is exiting and hasn't yet called | |
629 | * deactivate (when freeing its thread_info), so we carefully | |
630 | * deactivate any remaining tasks before freeing the | |
631 | * hardwall_info object itself. | |
632 | */ | |
633 | spin_lock_irqsave(&hardwall_lock, flags); | |
634 | list_for_each_entry(task, &rect->task_head, thread.hardwall_list) | |
635 | _hardwall_deactivate(task); | |
636 | spin_unlock_irqrestore(&hardwall_lock, flags); | |
637 | ||
638 | /* Drain the UDN. */ | |
639 | printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n", | |
640 | rect->width, rect->height, rect->ulhc_x, rect->ulhc_y); | |
641 | fill_mask(rect, &mask); | |
642 | on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1); | |
643 | on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1); | |
644 | ||
645 | /* Restart switch and disable firewall. */ | |
646 | on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1); | |
647 | ||
648 | /* Now free the rectangle from the list. */ | |
649 | spin_lock_irqsave(&hardwall_lock, flags); | |
650 | BUG_ON(!list_empty(&rect->task_head)); | |
651 | list_del(&rect->list); | |
652 | spin_unlock_irqrestore(&hardwall_lock, flags); | |
653 | kfree(rect); | |
654 | } | |
655 | ||
656 | ||
657 | /* | |
658 | * Dump hardwall state via /proc; initialized in arch/tile/sys/proc.c. | |
659 | */ | |
660 | int proc_tile_hardwall_show(struct seq_file *sf, void *v) | |
661 | { | |
662 | struct hardwall_info *r; | |
663 | ||
664 | if (udn_disabled) { | |
665 | seq_printf(sf, "%dx%d 0,0 pids:\n", smp_width, smp_height); | |
666 | return 0; | |
667 | } | |
668 | ||
669 | spin_lock_irq(&hardwall_lock); | |
670 | list_for_each_entry(r, &rectangles, list) { | |
671 | struct task_struct *p; | |
672 | seq_printf(sf, "%dx%d %d,%d pids:", | |
673 | r->width, r->height, r->ulhc_x, r->ulhc_y); | |
674 | list_for_each_entry(p, &r->task_head, thread.hardwall_list) { | |
675 | unsigned int cpu = cpumask_first(&p->cpus_allowed); | |
676 | unsigned int x = cpu % smp_width; | |
677 | unsigned int y = cpu / smp_width; | |
678 | seq_printf(sf, " %d@%d,%d", p->pid, x, y); | |
679 | } | |
680 | seq_printf(sf, "\n"); | |
681 | } | |
682 | spin_unlock_irq(&hardwall_lock); | |
683 | return 0; | |
684 | } | |
685 | ||
686 | ||
687 | /* | |
688 | * Character device support via ioctl/close. | |
689 | */ | |
690 | ||
691 | static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) | |
692 | { | |
693 | struct hardwall_info *rect = file->private_data; | |
694 | ||
695 | if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) | |
696 | return -EINVAL; | |
697 | ||
698 | switch (_IOC_NR(a)) { | |
699 | case _HARDWALL_CREATE: | |
700 | if (udn_disabled) | |
701 | return -ENOSYS; | |
702 | if (rect != NULL) | |
703 | return -EALREADY; | |
704 | rect = hardwall_create(_IOC_SIZE(a), | |
705 | (const unsigned char __user *)b); | |
706 | if (IS_ERR(rect)) | |
707 | return PTR_ERR(rect); | |
708 | file->private_data = rect; | |
709 | return 0; | |
710 | ||
711 | case _HARDWALL_ACTIVATE: | |
712 | return hardwall_activate(rect); | |
713 | ||
714 | case _HARDWALL_DEACTIVATE: | |
715 | if (current->thread.hardwall != rect) | |
716 | return -EINVAL; | |
717 | return hardwall_deactivate(current); | |
718 | ||
719 | default: | |
720 | return -EINVAL; | |
721 | } | |
722 | } | |
723 | ||
724 | #ifdef CONFIG_COMPAT | |
725 | static long hardwall_compat_ioctl(struct file *file, | |
726 | unsigned int a, unsigned long b) | |
727 | { | |
728 | /* Sign-extend the argument so it can be used as a pointer. */ | |
729 | return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b)); | |
730 | } | |
731 | #endif | |
732 | ||
733 | /* The user process closed the file; revoke access to user networks. */ | |
734 | static int hardwall_flush(struct file *file, fl_owner_t owner) | |
735 | { | |
736 | struct hardwall_info *rect = file->private_data; | |
737 | struct task_struct *task, *tmp; | |
738 | unsigned long flags; | |
739 | ||
740 | if (rect) { | |
741 | /* | |
742 | * NOTE: if multiple threads are activated on this hardwall | |
743 | * file, the other threads will continue having access to the | |
744 | * UDN until they are context-switched out and back in again. | |
745 | * | |
746 | * NOTE: A NULL files pointer means the task is being torn | |
747 | * down, so in that case we also deactivate it. | |
748 | */ | |
749 | spin_lock_irqsave(&hardwall_lock, flags); | |
750 | list_for_each_entry_safe(task, tmp, &rect->task_head, | |
751 | thread.hardwall_list) { | |
752 | if (task->files == owner || task->files == NULL) | |
753 | _hardwall_deactivate(task); | |
754 | } | |
755 | spin_unlock_irqrestore(&hardwall_lock, flags); | |
756 | } | |
757 | ||
758 | return 0; | |
759 | } | |
760 | ||
761 | /* This hardwall is gone, so destroy it. */ | |
762 | static int hardwall_release(struct inode *inode, struct file *file) | |
763 | { | |
764 | hardwall_destroy(file->private_data); | |
765 | return 0; | |
766 | } | |
767 | ||
768 | static const struct file_operations dev_hardwall_fops = { | |
769 | .unlocked_ioctl = hardwall_ioctl, | |
770 | #ifdef CONFIG_COMPAT | |
771 | .compat_ioctl = hardwall_compat_ioctl, | |
772 | #endif | |
773 | .flush = hardwall_flush, | |
774 | .release = hardwall_release, | |
775 | }; | |
776 | ||
777 | static struct cdev hardwall_dev; | |
778 | ||
779 | static int __init dev_hardwall_init(void) | |
780 | { | |
781 | int rc; | |
782 | dev_t dev; | |
783 | ||
784 | rc = alloc_chrdev_region(&dev, 0, 1, "hardwall"); | |
785 | if (rc < 0) | |
786 | return rc; | |
787 | cdev_init(&hardwall_dev, &dev_hardwall_fops); | |
788 | rc = cdev_add(&hardwall_dev, dev, 1); | |
789 | if (rc < 0) | |
790 | return rc; | |
791 | ||
792 | return 0; | |
793 | } | |
794 | late_initcall(dev_hardwall_init); |