]>
Commit | Line | Data |
---|---|---|
9f9c0382 CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/fs.h> | |
16 | #include <linux/proc_fs.h> | |
17 | #include <linux/seq_file.h> | |
18 | #include <linux/rwsem.h> | |
19 | #include <linux/kprobes.h> | |
20 | #include <linux/sched.h> | |
21 | #include <linux/hardirq.h> | |
22 | #include <linux/uaccess.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/cdev.h> | |
25 | #include <linux/compat.h> | |
26 | #include <asm/hardwall.h> | |
27 | #include <asm/traps.h> | |
28 | #include <asm/siginfo.h> | |
29 | #include <asm/irq_regs.h> | |
30 | ||
31 | #include <arch/interrupts.h> | |
32 | #include <arch/spr_def.h> | |
33 | ||
34 | ||
35 | /* | |
b8ace083 CM |
36 | * Implement a per-cpu "hardwall" resource class such as UDN or IPI. |
37 | * We use "hardwall" nomenclature throughout for historical reasons. | |
38 | * The lock here controls access to the list data structure as well as | |
39 | * to the items on the list. | |
40 | */ | |
41 | struct hardwall_type { | |
42 | int index; | |
43 | int is_xdn; | |
44 | int is_idn; | |
45 | int disabled; | |
46 | const char *name; | |
47 | struct list_head list; | |
48 | spinlock_t lock; | |
49 | struct proc_dir_entry *proc_dir; | |
50 | }; | |
51 | ||
52 | enum hardwall_index { | |
53 | HARDWALL_UDN = 0, | |
54 | #ifndef __tilepro__ | |
55 | HARDWALL_IDN = 1, | |
56 | HARDWALL_IPI = 2, | |
57 | #endif | |
58 | _HARDWALL_TYPES | |
59 | }; | |
60 | ||
61 | static struct hardwall_type hardwall_types[] = { | |
62 | { /* user-space access to UDN */ | |
63 | 0, | |
64 | 1, | |
65 | 0, | |
66 | 0, | |
67 | "udn", | |
68 | LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), | |
69 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), | |
70 | NULL | |
71 | }, | |
72 | #ifndef __tilepro__ | |
73 | { /* user-space access to IDN */ | |
74 | 1, | |
75 | 1, | |
76 | 1, | |
77 | 1, /* disabled pending hypervisor support */ | |
78 | "idn", | |
79 | LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), | |
80 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), | |
81 | NULL | |
82 | }, | |
83 | { /* access to user-space IPI */ | |
84 | 2, | |
85 | 0, | |
86 | 0, | |
87 | 0, | |
88 | "ipi", | |
89 | LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), | |
90 | __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), | |
91 | NULL | |
92 | }, | |
93 | #endif | |
94 | }; | |
95 | ||
96 | /* | |
97 | * This data structure tracks the cpu data, etc., associated | |
98 | * one-to-one with a "struct file *" from opening a hardwall device file. | |
9f9c0382 CM |
99 | * Note that the file's private data points back to this structure. |
100 | */ | |
101 | struct hardwall_info { | |
b8ace083 | 102 | struct list_head list; /* for hardwall_types.list */ |
9f9c0382 | 103 | struct list_head task_head; /* head of tasks in this hardwall */ |
b8ace083 CM |
104 | struct hardwall_type *type; /* type of this resource */ |
105 | struct cpumask cpumask; /* cpus reserved */ | |
106 | int id; /* integer id for this hardwall */ | |
107 | int teardown_in_progress; /* are we tearing this one down? */ | |
108 | ||
109 | /* Remaining fields only valid for user-network resources. */ | |
9f9c0382 CM |
110 | int ulhc_x; /* upper left hand corner x coord */ |
111 | int ulhc_y; /* upper left hand corner y coord */ | |
112 | int width; /* rectangle width */ | |
113 | int height; /* rectangle height */ | |
b8ace083 CM |
114 | #if CHIP_HAS_REV1_XDN() |
115 | atomic_t xdn_pending_count; /* cores in phase 1 of drain */ | |
116 | #endif | |
9f9c0382 CM |
117 | }; |
118 | ||
9f9c0382 | 119 | |
f133ecca CM |
120 | /* /proc/tile/hardwall */ |
121 | static struct proc_dir_entry *hardwall_proc_dir; | |
122 | ||
123 | /* Functions to manage files in /proc/tile/hardwall. */ | |
b8ace083 CM |
124 | static void hardwall_add_proc(struct hardwall_info *); |
125 | static void hardwall_remove_proc(struct hardwall_info *); | |
9f9c0382 CM |
126 | |
127 | /* Allow disabling UDN access. */ | |
9f9c0382 CM |
128 | static int __init noudn(char *str) |
129 | { | |
130 | pr_info("User-space UDN access is disabled\n"); | |
b8ace083 | 131 | hardwall_types[HARDWALL_UDN].disabled = 1; |
9f9c0382 CM |
132 | return 0; |
133 | } | |
134 | early_param("noudn", noudn); | |
135 | ||
b8ace083 CM |
136 | #ifndef __tilepro__ |
137 | /* Allow disabling IDN access. */ | |
138 | static int __init noidn(char *str) | |
139 | { | |
140 | pr_info("User-space IDN access is disabled\n"); | |
141 | hardwall_types[HARDWALL_IDN].disabled = 1; | |
142 | return 0; | |
143 | } | |
144 | early_param("noidn", noidn); | |
145 | ||
146 | /* Allow disabling IPI access. */ | |
147 | static int __init noipi(char *str) | |
148 | { | |
149 | pr_info("User-space IPI access is disabled\n"); | |
150 | hardwall_types[HARDWALL_IPI].disabled = 1; | |
151 | return 0; | |
152 | } | |
153 | early_param("noipi", noipi); | |
154 | #endif | |
155 | ||
9f9c0382 CM |
156 | |
157 | /* | |
b8ace083 | 158 | * Low-level primitives for UDN/IDN |
9f9c0382 CM |
159 | */ |
160 | ||
b8ace083 CM |
161 | #ifdef __tilepro__ |
162 | #define mtspr_XDN(hwt, name, val) \ | |
163 | do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0) | |
164 | #define mtspr_MPL_XDN(hwt, name, val) \ | |
165 | do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0) | |
166 | #define mfspr_XDN(hwt, name) \ | |
167 | ((void)(hwt), __insn_mfspr(SPR_UDN_##name)) | |
168 | #else | |
169 | #define mtspr_XDN(hwt, name, val) \ | |
170 | do { \ | |
171 | if ((hwt)->is_idn) \ | |
172 | __insn_mtspr(SPR_IDN_##name, (val)); \ | |
173 | else \ | |
174 | __insn_mtspr(SPR_UDN_##name, (val)); \ | |
175 | } while (0) | |
176 | #define mtspr_MPL_XDN(hwt, name, val) \ | |
177 | do { \ | |
178 | if ((hwt)->is_idn) \ | |
179 | __insn_mtspr(SPR_MPL_IDN_##name, (val)); \ | |
180 | else \ | |
181 | __insn_mtspr(SPR_MPL_UDN_##name, (val)); \ | |
182 | } while (0) | |
183 | #define mfspr_XDN(hwt, name) \ | |
184 | ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name)) | |
185 | #endif | |
186 | ||
9f9c0382 CM |
187 | /* Set a CPU bit if the CPU is online. */ |
188 | #define cpu_online_set(cpu, dst) do { \ | |
189 | if (cpu_online(cpu)) \ | |
190 | cpumask_set_cpu(cpu, dst); \ | |
191 | } while (0) | |
192 | ||
193 | ||
194 | /* Does the given rectangle contain the given x,y coordinate? */ | |
195 | static int contains(struct hardwall_info *r, int x, int y) | |
196 | { | |
197 | return (x >= r->ulhc_x && x < r->ulhc_x + r->width) && | |
198 | (y >= r->ulhc_y && y < r->ulhc_y + r->height); | |
199 | } | |
200 | ||
201 | /* Compute the rectangle parameters and validate the cpumask. */ | |
b8ace083 | 202 | static int check_rectangle(struct hardwall_info *r, struct cpumask *mask) |
9f9c0382 CM |
203 | { |
204 | int x, y, cpu, ulhc, lrhc; | |
205 | ||
206 | /* The first cpu is the ULHC, the last the LRHC. */ | |
207 | ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits); | |
208 | lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits); | |
209 | ||
210 | /* Compute the rectangle attributes from the cpus. */ | |
211 | r->ulhc_x = cpu_x(ulhc); | |
212 | r->ulhc_y = cpu_y(ulhc); | |
213 | r->width = cpu_x(lrhc) - r->ulhc_x + 1; | |
214 | r->height = cpu_y(lrhc) - r->ulhc_y + 1; | |
215 | ||
216 | /* Width and height must be positive */ | |
217 | if (r->width <= 0 || r->height <= 0) | |
218 | return -EINVAL; | |
219 | ||
220 | /* Confirm that the cpumask is exactly the rectangle. */ | |
221 | for (y = 0, cpu = 0; y < smp_height; ++y) | |
222 | for (x = 0; x < smp_width; ++x, ++cpu) | |
223 | if (cpumask_test_cpu(cpu, mask) != contains(r, x, y)) | |
224 | return -EINVAL; | |
225 | ||
226 | /* | |
b8ace083 | 227 | * Note that offline cpus can't be drained when this user network |
9f9c0382 CM |
228 | * rectangle eventually closes. We used to detect this |
229 | * situation and print a warning, but it annoyed users and | |
230 | * they ignored it anyway, so now we just return without a | |
231 | * warning. | |
232 | */ | |
233 | return 0; | |
234 | } | |
235 | ||
9f9c0382 CM |
236 | /* |
237 | * Hardware management of hardwall setup, teardown, trapping, | |
238 | * and enabling/disabling PL0 access to the networks. | |
239 | */ | |
240 | ||
241 | /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */ | |
242 | enum direction_protect { | |
243 | N_PROTECT = (1 << 0), | |
244 | E_PROTECT = (1 << 1), | |
245 | S_PROTECT = (1 << 2), | |
b8ace083 CM |
246 | W_PROTECT = (1 << 3), |
247 | C_PROTECT = (1 << 4), | |
9f9c0382 CM |
248 | }; |
249 | ||
b8ace083 CM |
250 | static inline int xdn_which_interrupt(struct hardwall_type *hwt) |
251 | { | |
252 | #ifndef __tilepro__ | |
253 | if (hwt->is_idn) | |
254 | return INT_IDN_FIREWALL; | |
255 | #endif | |
256 | return INT_UDN_FIREWALL; | |
257 | } | |
258 | ||
259 | static void enable_firewall_interrupts(struct hardwall_type *hwt) | |
9f9c0382 | 260 | { |
b8ace083 | 261 | arch_local_irq_unmask_now(xdn_which_interrupt(hwt)); |
9f9c0382 CM |
262 | } |
263 | ||
b8ace083 | 264 | static void disable_firewall_interrupts(struct hardwall_type *hwt) |
9f9c0382 | 265 | { |
b8ace083 | 266 | arch_local_irq_mask_now(xdn_which_interrupt(hwt)); |
9f9c0382 CM |
267 | } |
268 | ||
269 | /* Set up hardwall on this cpu based on the passed hardwall_info. */ | |
b8ace083 | 270 | static void hardwall_setup_func(void *info) |
9f9c0382 CM |
271 | { |
272 | struct hardwall_info *r = info; | |
b8ace083 CM |
273 | struct hardwall_type *hwt = r->type; |
274 | ||
9f9c0382 CM |
275 | int cpu = smp_processor_id(); |
276 | int x = cpu % smp_width; | |
277 | int y = cpu / smp_width; | |
278 | int bits = 0; | |
279 | if (x == r->ulhc_x) | |
280 | bits |= W_PROTECT; | |
281 | if (x == r->ulhc_x + r->width - 1) | |
282 | bits |= E_PROTECT; | |
283 | if (y == r->ulhc_y) | |
284 | bits |= N_PROTECT; | |
285 | if (y == r->ulhc_y + r->height - 1) | |
286 | bits |= S_PROTECT; | |
287 | BUG_ON(bits == 0); | |
b8ace083 CM |
288 | mtspr_XDN(hwt, DIRECTION_PROTECT, bits); |
289 | enable_firewall_interrupts(hwt); | |
9f9c0382 CM |
290 | } |
291 | ||
292 | /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ | |
b8ace083 | 293 | static void hardwall_protect_rectangle(struct hardwall_info *r) |
9f9c0382 CM |
294 | { |
295 | int x, y, cpu, delta; | |
296 | struct cpumask rect_cpus; | |
297 | ||
298 | cpumask_clear(&rect_cpus); | |
299 | ||
300 | /* First include the top and bottom edges */ | |
301 | cpu = r->ulhc_y * smp_width + r->ulhc_x; | |
302 | delta = (r->height - 1) * smp_width; | |
303 | for (x = 0; x < r->width; ++x, ++cpu) { | |
304 | cpu_online_set(cpu, &rect_cpus); | |
305 | cpu_online_set(cpu + delta, &rect_cpus); | |
306 | } | |
307 | ||
308 | /* Then the left and right edges */ | |
309 | cpu -= r->width; | |
310 | delta = r->width - 1; | |
311 | for (y = 0; y < r->height; ++y, cpu += smp_width) { | |
312 | cpu_online_set(cpu, &rect_cpus); | |
313 | cpu_online_set(cpu + delta, &rect_cpus); | |
314 | } | |
315 | ||
316 | /* Then tell all the cpus to set up their protection SPR */ | |
b8ace083 | 317 | on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1); |
9f9c0382 CM |
318 | } |
319 | ||
320 | void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | |
321 | { | |
322 | struct hardwall_info *rect; | |
b8ace083 | 323 | struct hardwall_type *hwt; |
9f9c0382 CM |
324 | struct task_struct *p; |
325 | struct siginfo info; | |
9f9c0382 CM |
326 | int cpu = smp_processor_id(); |
327 | int found_processes; | |
328 | unsigned long flags; | |
9f9c0382 | 329 | struct pt_regs *old_regs = set_irq_regs(regs); |
b8ace083 | 330 | |
9f9c0382 CM |
331 | irq_enter(); |
332 | ||
b8ace083 CM |
333 | /* Figure out which network trapped. */ |
334 | switch (fault_num) { | |
335 | #ifndef __tilepro__ | |
336 | case INT_IDN_FIREWALL: | |
337 | hwt = &hardwall_types[HARDWALL_IDN]; | |
338 | break; | |
339 | #endif | |
340 | case INT_UDN_FIREWALL: | |
341 | hwt = &hardwall_types[HARDWALL_UDN]; | |
342 | break; | |
343 | default: | |
344 | BUG(); | |
345 | } | |
346 | BUG_ON(hwt->disabled); | |
347 | ||
9f9c0382 | 348 | /* This tile trapped a network access; find the rectangle. */ |
b8ace083 CM |
349 | spin_lock_irqsave(&hwt->lock, flags); |
350 | list_for_each_entry(rect, &hwt->list, list) { | |
351 | if (cpumask_test_cpu(cpu, &rect->cpumask)) | |
9f9c0382 CM |
352 | break; |
353 | } | |
354 | ||
355 | /* | |
356 | * It shouldn't be possible not to find this cpu on the | |
357 | * rectangle list, since only cpus in rectangles get hardwalled. | |
b8ace083 | 358 | * The hardwall is only removed after the user network is drained. |
9f9c0382 | 359 | */ |
b8ace083 | 360 | BUG_ON(&rect->list == &hwt->list); |
9f9c0382 CM |
361 | |
362 | /* | |
363 | * If we already started teardown on this hardwall, don't worry; | |
364 | * the abort signal has been sent and we are just waiting for things | |
365 | * to quiesce. | |
366 | */ | |
367 | if (rect->teardown_in_progress) { | |
b8ace083 | 368 | pr_notice("cpu %d: detected %s hardwall violation %#lx" |
9f9c0382 | 369 | " while teardown already in progress\n", |
b8ace083 CM |
370 | cpu, hwt->name, |
371 | (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); | |
9f9c0382 CM |
372 | goto done; |
373 | } | |
374 | ||
375 | /* | |
376 | * Kill off any process that is activated in this rectangle. | |
377 | * We bypass security to deliver the signal, since it must be | |
b8ace083 | 378 | * one of the activated processes that generated the user network |
9f9c0382 CM |
379 | * message that caused this trap, and all the activated |
380 | * processes shared a single open file so are pretty tightly | |
381 | * bound together from a security point of view to begin with. | |
382 | */ | |
383 | rect->teardown_in_progress = 1; | |
384 | wmb(); /* Ensure visibility of rectangle before notifying processes. */ | |
b8ace083 CM |
385 | pr_notice("cpu %d: detected %s hardwall violation %#lx...\n", |
386 | cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); | |
9f9c0382 CM |
387 | info.si_signo = SIGILL; |
388 | info.si_errno = 0; | |
389 | info.si_code = ILL_HARDWALL; | |
390 | found_processes = 0; | |
b8ace083 CM |
391 | list_for_each_entry(p, &rect->task_head, |
392 | thread.hardwall[hwt->index].list) { | |
393 | BUG_ON(p->thread.hardwall[hwt->index].info != rect); | |
ceca3c19 | 394 | if (!(p->flags & PF_EXITING)) { |
9f9c0382 CM |
395 | found_processes = 1; |
396 | pr_notice("hardwall: killing %d\n", p->pid); | |
ceca3c19 | 397 | do_send_sig_info(info.si_signo, &info, p, false); |
9f9c0382 CM |
398 | } |
399 | } | |
400 | if (!found_processes) | |
401 | pr_notice("hardwall: no associated processes!\n"); | |
402 | ||
403 | done: | |
b8ace083 | 404 | spin_unlock_irqrestore(&hwt->lock, flags); |
9f9c0382 CM |
405 | |
406 | /* | |
407 | * We have to disable firewall interrupts now, or else when we | |
408 | * return from this handler, we will simply re-interrupt back to | |
409 | * it. However, we can't clear the protection bits, since we | |
410 | * haven't yet drained the network, and that would allow packets | |
411 | * to cross out of the hardwall region. | |
412 | */ | |
b8ace083 | 413 | disable_firewall_interrupts(hwt); |
9f9c0382 CM |
414 | |
415 | irq_exit(); | |
416 | set_irq_regs(old_regs); | |
417 | } | |
418 | ||
b8ace083 CM |
419 | /* Allow access from user space to the user network. */ |
420 | void grant_hardwall_mpls(struct hardwall_type *hwt) | |
9f9c0382 | 421 | { |
b8ace083 CM |
422 | #ifndef __tilepro__ |
423 | if (!hwt->is_xdn) { | |
424 | __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1); | |
425 | return; | |
426 | } | |
427 | #endif | |
428 | mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1); | |
429 | mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1); | |
430 | mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1); | |
431 | mtspr_MPL_XDN(hwt, TIMER_SET_0, 1); | |
9f9c0382 | 432 | #if !CHIP_HAS_REV1_XDN() |
b8ace083 CM |
433 | mtspr_MPL_XDN(hwt, REFILL_SET_0, 1); |
434 | mtspr_MPL_XDN(hwt, CA_SET_0, 1); | |
9f9c0382 CM |
435 | #endif |
436 | } | |
437 | ||
b8ace083 CM |
438 | /* Deny access from user space to the user network. */ |
439 | void restrict_hardwall_mpls(struct hardwall_type *hwt) | |
9f9c0382 | 440 | { |
b8ace083 CM |
441 | #ifndef __tilepro__ |
442 | if (!hwt->is_xdn) { | |
443 | __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1); | |
444 | return; | |
445 | } | |
446 | #endif | |
447 | mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1); | |
448 | mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1); | |
449 | mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1); | |
450 | mtspr_MPL_XDN(hwt, TIMER_SET_1, 1); | |
9f9c0382 | 451 | #if !CHIP_HAS_REV1_XDN() |
b8ace083 CM |
452 | mtspr_MPL_XDN(hwt, REFILL_SET_1, 1); |
453 | mtspr_MPL_XDN(hwt, CA_SET_1, 1); | |
9f9c0382 CM |
454 | #endif |
455 | } | |
456 | ||
b8ace083 CM |
457 | /* Restrict or deny as necessary for the task we're switching to. */ |
458 | void hardwall_switch_tasks(struct task_struct *prev, | |
459 | struct task_struct *next) | |
460 | { | |
461 | int i; | |
462 | for (i = 0; i < HARDWALL_TYPES; ++i) { | |
463 | if (prev->thread.hardwall[i].info != NULL) { | |
464 | if (next->thread.hardwall[i].info == NULL) | |
465 | restrict_hardwall_mpls(&hardwall_types[i]); | |
466 | } else if (next->thread.hardwall[i].info != NULL) { | |
467 | grant_hardwall_mpls(&hardwall_types[i]); | |
468 | } | |
469 | } | |
470 | } | |
471 | ||
472 | /* Does this task have the right to IPI the given cpu? */ | |
473 | int hardwall_ipi_valid(int cpu) | |
474 | { | |
475 | #ifdef __tilegx__ | |
476 | struct hardwall_info *info = | |
477 | current->thread.hardwall[HARDWALL_IPI].info; | |
478 | return info && cpumask_test_cpu(cpu, &info->cpumask); | |
479 | #else | |
480 | return 0; | |
481 | #endif | |
482 | } | |
9f9c0382 CM |
483 | |
484 | /* | |
b8ace083 | 485 | * Code to create, activate, deactivate, and destroy hardwall resources. |
9f9c0382 CM |
486 | */ |
487 | ||
b8ace083 CM |
488 | /* Create a hardwall for the given resource */ |
489 | static struct hardwall_info *hardwall_create(struct hardwall_type *hwt, | |
490 | size_t size, | |
491 | const unsigned char __user *bits) | |
9f9c0382 | 492 | { |
b8ace083 | 493 | struct hardwall_info *iter, *info; |
9f9c0382 CM |
494 | struct cpumask mask; |
495 | unsigned long flags; | |
496 | int rc; | |
497 | ||
498 | /* Reject crazy sizes out of hand, a la sys_mbind(). */ | |
499 | if (size > PAGE_SIZE) | |
500 | return ERR_PTR(-EINVAL); | |
501 | ||
502 | /* Copy whatever fits into a cpumask. */ | |
503 | if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size))) | |
504 | return ERR_PTR(-EFAULT); | |
505 | ||
506 | /* | |
507 | * If the size was short, clear the rest of the mask; | |
508 | * otherwise validate that the rest of the user mask was zero | |
509 | * (we don't try hard to be efficient when validating huge masks). | |
510 | */ | |
511 | if (size < sizeof(struct cpumask)) { | |
512 | memset((char *)&mask + size, 0, sizeof(struct cpumask) - size); | |
513 | } else if (size > sizeof(struct cpumask)) { | |
514 | size_t i; | |
515 | for (i = sizeof(struct cpumask); i < size; ++i) { | |
516 | char c; | |
517 | if (get_user(c, &bits[i])) | |
518 | return ERR_PTR(-EFAULT); | |
519 | if (c) | |
520 | return ERR_PTR(-EINVAL); | |
521 | } | |
522 | } | |
523 | ||
b8ace083 CM |
524 | /* Allocate a new hardwall_info optimistically. */ |
525 | info = kmalloc(sizeof(struct hardwall_info), | |
9f9c0382 | 526 | GFP_KERNEL | __GFP_ZERO); |
b8ace083 | 527 | if (info == NULL) |
1c689cbc | 528 | return ERR_PTR(-ENOMEM); |
b8ace083 CM |
529 | INIT_LIST_HEAD(&info->task_head); |
530 | info->type = hwt; | |
9f9c0382 CM |
531 | |
532 | /* Compute the rectangle size and validate that it's plausible. */ | |
b8ace083 CM |
533 | cpumask_copy(&info->cpumask, &mask); |
534 | info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits); | |
535 | if (hwt->is_xdn) { | |
536 | rc = check_rectangle(info, &mask); | |
537 | if (rc != 0) { | |
538 | kfree(info); | |
539 | return ERR_PTR(rc); | |
540 | } | |
9f9c0382 CM |
541 | } |
542 | ||
543 | /* Confirm it doesn't overlap and add it to the list. */ | |
b8ace083 CM |
544 | spin_lock_irqsave(&hwt->lock, flags); |
545 | list_for_each_entry(iter, &hwt->list, list) { | |
546 | if (cpumask_intersects(&iter->cpumask, &info->cpumask)) { | |
547 | spin_unlock_irqrestore(&hwt->lock, flags); | |
548 | kfree(info); | |
9f9c0382 CM |
549 | return ERR_PTR(-EBUSY); |
550 | } | |
551 | } | |
b8ace083 CM |
552 | list_add_tail(&info->list, &hwt->list); |
553 | spin_unlock_irqrestore(&hwt->lock, flags); | |
9f9c0382 CM |
554 | |
555 | /* Set up appropriate hardwalling on all affected cpus. */ | |
b8ace083 CM |
556 | if (hwt->is_xdn) |
557 | hardwall_protect_rectangle(info); | |
9f9c0382 | 558 | |
f133ecca | 559 | /* Create a /proc/tile/hardwall entry. */ |
b8ace083 | 560 | hardwall_add_proc(info); |
f133ecca | 561 | |
b8ace083 | 562 | return info; |
9f9c0382 CM |
563 | } |
564 | ||
565 | /* Activate a given hardwall on this cpu for this process. */ | |
b8ace083 | 566 | static int hardwall_activate(struct hardwall_info *info) |
9f9c0382 | 567 | { |
b8ace083 | 568 | int cpu; |
9f9c0382 CM |
569 | unsigned long flags; |
570 | struct task_struct *p = current; | |
571 | struct thread_struct *ts = &p->thread; | |
b8ace083 | 572 | struct hardwall_type *hwt; |
9f9c0382 | 573 | |
b8ace083 CM |
574 | /* Require a hardwall. */ |
575 | if (info == NULL) | |
9f9c0382 CM |
576 | return -ENODATA; |
577 | ||
b8ace083 CM |
578 | /* Not allowed to activate a hardwall that is being torn down. */ |
579 | if (info->teardown_in_progress) | |
9f9c0382 CM |
580 | return -EINVAL; |
581 | ||
582 | /* | |
583 | * Get our affinity; if we're not bound to this tile uniquely, | |
584 | * we can't access the network registers. | |
585 | */ | |
586 | if (cpumask_weight(&p->cpus_allowed) != 1) | |
587 | return -EPERM; | |
588 | ||
b8ace083 | 589 | /* Make sure we are bound to a cpu assigned to this resource. */ |
9f9c0382 CM |
590 | cpu = smp_processor_id(); |
591 | BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); | |
b8ace083 | 592 | if (!cpumask_test_cpu(cpu, &info->cpumask)) |
9f9c0382 CM |
593 | return -EINVAL; |
594 | ||
595 | /* If we are already bound to this hardwall, it's a no-op. */ | |
b8ace083 CM |
596 | hwt = info->type; |
597 | if (ts->hardwall[hwt->index].info) { | |
598 | BUG_ON(ts->hardwall[hwt->index].info != info); | |
9f9c0382 CM |
599 | return 0; |
600 | } | |
601 | ||
b8ace083 CM |
602 | /* Success! This process gets to use the resource on this cpu. */ |
603 | ts->hardwall[hwt->index].info = info; | |
604 | spin_lock_irqsave(&hwt->lock, flags); | |
605 | list_add(&ts->hardwall[hwt->index].list, &info->task_head); | |
606 | spin_unlock_irqrestore(&hwt->lock, flags); | |
607 | grant_hardwall_mpls(hwt); | |
608 | printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n", | |
609 | p->pid, p->comm, hwt->name, cpu); | |
9f9c0382 CM |
610 | return 0; |
611 | } | |
612 | ||
613 | /* | |
b8ace083 | 614 | * Deactivate a task's hardwall. Must hold lock for hardwall_type. |
9f9c0382 CM |
615 | * This method may be called from free_task(), so we don't want to |
616 | * rely on too many fields of struct task_struct still being valid. | |
617 | * We assume the cpus_allowed, pid, and comm fields are still valid. | |
618 | */ | |
b8ace083 CM |
619 | static void _hardwall_deactivate(struct hardwall_type *hwt, |
620 | struct task_struct *task) | |
9f9c0382 CM |
621 | { |
622 | struct thread_struct *ts = &task->thread; | |
623 | ||
624 | if (cpumask_weight(&task->cpus_allowed) != 1) { | |
b8ace083 | 625 | pr_err("pid %d (%s) releasing %s hardwall with" |
9f9c0382 | 626 | " an affinity mask containing %d cpus!\n", |
b8ace083 | 627 | task->pid, task->comm, hwt->name, |
9f9c0382 CM |
628 | cpumask_weight(&task->cpus_allowed)); |
629 | BUG(); | |
630 | } | |
631 | ||
b8ace083 CM |
632 | BUG_ON(ts->hardwall[hwt->index].info == NULL); |
633 | ts->hardwall[hwt->index].info = NULL; | |
634 | list_del(&ts->hardwall[hwt->index].list); | |
9f9c0382 | 635 | if (task == current) |
b8ace083 | 636 | restrict_hardwall_mpls(hwt); |
9f9c0382 CM |
637 | } |
638 | ||
639 | /* Deactivate a task's hardwall. */ | |
b8ace083 CM |
640 | static int hardwall_deactivate(struct hardwall_type *hwt, |
641 | struct task_struct *task) | |
9f9c0382 CM |
642 | { |
643 | unsigned long flags; | |
644 | int activated; | |
645 | ||
b8ace083 CM |
646 | spin_lock_irqsave(&hwt->lock, flags); |
647 | activated = (task->thread.hardwall[hwt->index].info != NULL); | |
9f9c0382 | 648 | if (activated) |
b8ace083 CM |
649 | _hardwall_deactivate(hwt, task); |
650 | spin_unlock_irqrestore(&hwt->lock, flags); | |
9f9c0382 CM |
651 | |
652 | if (!activated) | |
653 | return -EINVAL; | |
654 | ||
b8ace083 CM |
655 | printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n", |
656 | task->pid, task->comm, hwt->name, smp_processor_id()); | |
9f9c0382 CM |
657 | return 0; |
658 | } | |
659 | ||
b8ace083 CM |
660 | void hardwall_deactivate_all(struct task_struct *task) |
661 | { | |
662 | int i; | |
663 | for (i = 0; i < HARDWALL_TYPES; ++i) | |
664 | if (task->thread.hardwall[i].info) | |
665 | hardwall_deactivate(&hardwall_types[i], task); | |
666 | } | |
667 | ||
668 | /* Stop the switch before draining the network. */ | |
669 | static void stop_xdn_switch(void *arg) | |
9f9c0382 CM |
670 | { |
671 | #if !CHIP_HAS_REV1_XDN() | |
672 | /* Freeze the switch and the demux. */ | |
673 | __insn_mtspr(SPR_UDN_SP_FREEZE, | |
674 | SPR_UDN_SP_FREEZE__SP_FRZ_MASK | | |
675 | SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | | |
676 | SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); | |
b8ace083 CM |
677 | #else |
678 | /* | |
679 | * Drop all packets bound for the core or off the edge. | |
680 | * We rely on the normal hardwall protection setup code | |
681 | * to have set the low four bits to trigger firewall interrupts, | |
682 | * and shift those bits up to trigger "drop on send" semantics, | |
683 | * plus adding "drop on send to core" for all switches. | |
684 | * In practice it seems the switches latch the DIRECTION_PROTECT | |
685 | * SPR so they won't start dropping if they're already | |
686 | * delivering the last message to the core, but it doesn't | |
687 | * hurt to enable it here. | |
688 | */ | |
689 | struct hardwall_type *hwt = arg; | |
690 | unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT); | |
691 | mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5); | |
9f9c0382 CM |
692 | #endif |
693 | } | |
694 | ||
b8ace083 CM |
695 | static void empty_xdn_demuxes(struct hardwall_type *hwt) |
696 | { | |
697 | #ifndef __tilepro__ | |
698 | if (hwt->is_idn) { | |
699 | while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0)) | |
700 | (void) __tile_idn0_receive(); | |
701 | while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1)) | |
702 | (void) __tile_idn1_receive(); | |
703 | return; | |
704 | } | |
705 | #endif | |
706 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) | |
707 | (void) __tile_udn0_receive(); | |
708 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1)) | |
709 | (void) __tile_udn1_receive(); | |
710 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2)) | |
711 | (void) __tile_udn2_receive(); | |
712 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3)) | |
713 | (void) __tile_udn3_receive(); | |
714 | } | |
715 | ||
9f9c0382 | 716 | /* Drain all the state from a stopped switch. */ |
b8ace083 | 717 | static void drain_xdn_switch(void *arg) |
9f9c0382 | 718 | { |
b8ace083 CM |
719 | struct hardwall_info *info = arg; |
720 | struct hardwall_type *hwt = info->type; | |
721 | ||
722 | #if CHIP_HAS_REV1_XDN() | |
723 | /* | |
724 | * The switches have been configured to drop any messages | |
725 | * destined for cores (or off the edge of the rectangle). | |
726 | * But the current message may continue to be delivered, | |
727 | * so we wait until all the cores have finished any pending | |
728 | * messages before we stop draining. | |
729 | */ | |
730 | int pending = mfspr_XDN(hwt, PENDING); | |
731 | while (pending--) { | |
732 | empty_xdn_demuxes(hwt); | |
733 | if (hwt->is_idn) | |
734 | __tile_idn_send(0); | |
735 | else | |
736 | __tile_udn_send(0); | |
737 | } | |
738 | atomic_dec(&info->xdn_pending_count); | |
739 | while (atomic_read(&info->xdn_pending_count)) | |
740 | empty_xdn_demuxes(hwt); | |
741 | #else | |
9f9c0382 CM |
742 | int i; |
743 | int from_tile_words, ca_count; | |
744 | ||
745 | /* Empty out the 5 switch point fifos. */ | |
746 | for (i = 0; i < 5; i++) { | |
747 | int words, j; | |
748 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); | |
749 | words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF; | |
750 | for (j = 0; j < words; j++) | |
751 | (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA); | |
752 | BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0); | |
753 | } | |
754 | ||
755 | /* Dump out the 3 word fifo at top. */ | |
756 | from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3; | |
757 | for (i = 0; i < from_tile_words; i++) | |
758 | (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); | |
759 | ||
760 | /* Empty out demuxes. */ | |
b8ace083 | 761 | empty_xdn_demuxes(hwt); |
9f9c0382 CM |
762 | |
763 | /* Empty out catch all. */ | |
764 | ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); | |
765 | for (i = 0; i < ca_count; i++) | |
766 | (void) __insn_mfspr(SPR_UDN_CA_DATA); | |
767 | BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0); | |
768 | ||
769 | /* Clear demux logic. */ | |
770 | __insn_mtspr(SPR_UDN_DEMUX_CTL, 1); | |
771 | ||
772 | /* | |
773 | * Write switch state; experimentation indicates that 0xc3000 | |
774 | * is an idle switch point. | |
775 | */ | |
776 | for (i = 0; i < 5; i++) { | |
777 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); | |
778 | __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000); | |
779 | } | |
780 | #endif | |
781 | } | |
782 | ||
b8ace083 CM |
783 | /* Reset random XDN state registers at boot up and during hardwall teardown. */ |
784 | static void reset_xdn_network_state(struct hardwall_type *hwt) | |
9f9c0382 | 785 | { |
b8ace083 | 786 | if (hwt->disabled) |
9f9c0382 CM |
787 | return; |
788 | ||
b8ace083 CM |
789 | /* Clear out other random registers so we have a clean slate. */ |
790 | mtspr_XDN(hwt, DIRECTION_PROTECT, 0); | |
791 | mtspr_XDN(hwt, AVAIL_EN, 0); | |
792 | mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0); | |
793 | ||
9f9c0382 | 794 | #if !CHIP_HAS_REV1_XDN() |
b8ace083 CM |
795 | /* Reset UDN coordinates to their standard value */ |
796 | { | |
797 | unsigned int cpu = smp_processor_id(); | |
798 | unsigned int x = cpu % smp_width; | |
799 | unsigned int y = cpu / smp_width; | |
800 | __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); | |
801 | } | |
9f9c0382 CM |
802 | |
803 | /* Set demux tags to predefined values and enable them. */ | |
804 | __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); | |
805 | __insn_mtspr(SPR_UDN_TAG_0, (1 << 0)); | |
806 | __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); | |
807 | __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); | |
808 | __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); | |
9f9c0382 | 809 | |
b8ace083 | 810 | /* Set other rev0 random registers to a clean state. */ |
9f9c0382 CM |
811 | __insn_mtspr(SPR_UDN_REFILL_EN, 0); |
812 | __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); | |
813 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); | |
9f9c0382 CM |
814 | |
815 | /* Start the switch and demux. */ | |
9f9c0382 CM |
816 | __insn_mtspr(SPR_UDN_SP_FREEZE, 0); |
817 | #endif | |
818 | } | |
819 | ||
b8ace083 | 820 | void reset_network_state(void) |
9f9c0382 | 821 | { |
b8ace083 CM |
822 | reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]); |
823 | #ifndef __tilepro__ | |
824 | reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]); | |
825 | #endif | |
9f9c0382 CM |
826 | } |
827 | ||
b8ace083 CM |
828 | /* Restart an XDN switch after draining. */ |
829 | static void restart_xdn_switch(void *arg) | |
9f9c0382 | 830 | { |
b8ace083 | 831 | struct hardwall_type *hwt = arg; |
9f9c0382 | 832 | |
b8ace083 CM |
833 | #if CHIP_HAS_REV1_XDN() |
834 | /* One last drain step to avoid races with injection and draining. */ | |
835 | empty_xdn_demuxes(hwt); | |
836 | #endif | |
9f9c0382 | 837 | |
b8ace083 CM |
838 | reset_xdn_network_state(hwt); |
839 | ||
840 | /* Disable firewall interrupts. */ | |
841 | disable_firewall_interrupts(hwt); | |
9f9c0382 CM |
842 | } |
843 | ||
844 | /* Last reference to a hardwall is gone, so clear the network. */ | |
b8ace083 | 845 | static void hardwall_destroy(struct hardwall_info *info) |
9f9c0382 CM |
846 | { |
847 | struct task_struct *task; | |
b8ace083 | 848 | struct hardwall_type *hwt; |
9f9c0382 | 849 | unsigned long flags; |
9f9c0382 | 850 | |
b8ace083 CM |
851 | /* Make sure this file actually represents a hardwall. */ |
852 | if (info == NULL) | |
9f9c0382 CM |
853 | return; |
854 | ||
855 | /* | |
856 | * Deactivate any remaining tasks. It's possible to race with | |
857 | * some other thread that is exiting and hasn't yet called | |
858 | * deactivate (when freeing its thread_info), so we carefully | |
859 | * deactivate any remaining tasks before freeing the | |
860 | * hardwall_info object itself. | |
861 | */ | |
b8ace083 CM |
862 | hwt = info->type; |
863 | info->teardown_in_progress = 1; | |
864 | spin_lock_irqsave(&hwt->lock, flags); | |
865 | list_for_each_entry(task, &info->task_head, | |
866 | thread.hardwall[hwt->index].list) | |
867 | _hardwall_deactivate(hwt, task); | |
868 | spin_unlock_irqrestore(&hwt->lock, flags); | |
869 | ||
870 | if (hwt->is_xdn) { | |
871 | /* Configure the switches for draining the user network. */ | |
872 | printk(KERN_DEBUG | |
873 | "Clearing %s hardwall rectangle %dx%d %d,%d\n", | |
874 | hwt->name, info->width, info->height, | |
875 | info->ulhc_x, info->ulhc_y); | |
876 | on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1); | |
877 | ||
878 | /* Drain the network. */ | |
879 | #if CHIP_HAS_REV1_XDN() | |
880 | atomic_set(&info->xdn_pending_count, | |
881 | cpumask_weight(&info->cpumask)); | |
882 | on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0); | |
883 | #else | |
884 | on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1); | |
885 | #endif | |
9f9c0382 | 886 | |
b8ace083 CM |
887 | /* Restart switch and disable firewall. */ |
888 | on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1); | |
889 | } | |
9f9c0382 | 890 | |
f133ecca | 891 | /* Remove the /proc/tile/hardwall entry. */ |
b8ace083 CM |
892 | hardwall_remove_proc(info); |
893 | ||
894 | /* Now free the hardwall from the list. */ | |
895 | spin_lock_irqsave(&hwt->lock, flags); | |
896 | BUG_ON(!list_empty(&info->task_head)); | |
897 | list_del(&info->list); | |
898 | spin_unlock_irqrestore(&hwt->lock, flags); | |
899 | kfree(info); | |
9f9c0382 CM |
900 | } |
901 | ||
902 | ||
f133ecca | 903 | static int hardwall_proc_show(struct seq_file *sf, void *v) |
9f9c0382 | 904 | { |
b8ace083 | 905 | struct hardwall_info *info = sf->private; |
f133ecca | 906 | char buf[256]; |
9f9c0382 | 907 | |
b8ace083 | 908 | int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask); |
f133ecca CM |
909 | buf[rc++] = '\n'; |
910 | seq_write(sf, buf, rc); | |
9f9c0382 CM |
911 | return 0; |
912 | } | |
913 | ||
f133ecca CM |
914 | static int hardwall_proc_open(struct inode *inode, |
915 | struct file *file) | |
916 | { | |
917 | return single_open(file, hardwall_proc_show, PDE(inode)->data); | |
918 | } | |
919 | ||
920 | static const struct file_operations hardwall_proc_fops = { | |
921 | .open = hardwall_proc_open, | |
922 | .read = seq_read, | |
923 | .llseek = seq_lseek, | |
924 | .release = single_release, | |
925 | }; | |
926 | ||
b8ace083 | 927 | static void hardwall_add_proc(struct hardwall_info *info) |
f133ecca CM |
928 | { |
929 | char buf[64]; | |
b8ace083 CM |
930 | snprintf(buf, sizeof(buf), "%d", info->id); |
931 | proc_create_data(buf, 0444, info->type->proc_dir, | |
932 | &hardwall_proc_fops, info); | |
f133ecca CM |
933 | } |
934 | ||
b8ace083 | 935 | static void hardwall_remove_proc(struct hardwall_info *info) |
f133ecca CM |
936 | { |
937 | char buf[64]; | |
b8ace083 CM |
938 | snprintf(buf, sizeof(buf), "%d", info->id); |
939 | remove_proc_entry(buf, info->type->proc_dir); | |
f133ecca CM |
940 | } |
941 | ||
942 | int proc_pid_hardwall(struct task_struct *task, char *buffer) | |
943 | { | |
b8ace083 CM |
944 | int i; |
945 | int n = 0; | |
946 | for (i = 0; i < HARDWALL_TYPES; ++i) { | |
947 | struct hardwall_info *info = task->thread.hardwall[i].info; | |
948 | if (info) | |
949 | n += sprintf(&buffer[n], "%s: %d\n", | |
950 | info->type->name, info->id); | |
951 | } | |
952 | return n; | |
f133ecca CM |
953 | } |
954 | ||
955 | void proc_tile_hardwall_init(struct proc_dir_entry *root) | |
956 | { | |
b8ace083 CM |
957 | int i; |
958 | for (i = 0; i < HARDWALL_TYPES; ++i) { | |
959 | struct hardwall_type *hwt = &hardwall_types[i]; | |
960 | if (hwt->disabled) | |
961 | continue; | |
962 | if (hardwall_proc_dir == NULL) | |
963 | hardwall_proc_dir = proc_mkdir("hardwall", root); | |
964 | hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir); | |
965 | } | |
f133ecca CM |
966 | } |
967 | ||
9f9c0382 CM |
968 | |
969 | /* | |
970 | * Character device support via ioctl/close. | |
971 | */ | |
972 | ||
973 | static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) | |
974 | { | |
b8ace083 CM |
975 | struct hardwall_info *info = file->private_data; |
976 | int minor = iminor(file->f_mapping->host); | |
977 | struct hardwall_type* hwt; | |
9f9c0382 CM |
978 | |
979 | if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) | |
980 | return -EINVAL; | |
981 | ||
b8ace083 CM |
982 | BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES); |
983 | BUILD_BUG_ON(HARDWALL_TYPES != | |
984 | sizeof(hardwall_types)/sizeof(hardwall_types[0])); | |
985 | ||
986 | if (minor < 0 || minor >= HARDWALL_TYPES) | |
987 | return -EINVAL; | |
988 | hwt = &hardwall_types[minor]; | |
989 | WARN_ON(info && hwt != info->type); | |
990 | ||
9f9c0382 CM |
991 | switch (_IOC_NR(a)) { |
992 | case _HARDWALL_CREATE: | |
b8ace083 | 993 | if (hwt->disabled) |
9f9c0382 | 994 | return -ENOSYS; |
b8ace083 | 995 | if (info != NULL) |
9f9c0382 | 996 | return -EALREADY; |
b8ace083 CM |
997 | info = hardwall_create(hwt, _IOC_SIZE(a), |
998 | (const unsigned char __user *)b); | |
999 | if (IS_ERR(info)) | |
1000 | return PTR_ERR(info); | |
1001 | file->private_data = info; | |
9f9c0382 CM |
1002 | return 0; |
1003 | ||
1004 | case _HARDWALL_ACTIVATE: | |
b8ace083 | 1005 | return hardwall_activate(info); |
9f9c0382 CM |
1006 | |
1007 | case _HARDWALL_DEACTIVATE: | |
b8ace083 | 1008 | if (current->thread.hardwall[hwt->index].info != info) |
9f9c0382 | 1009 | return -EINVAL; |
b8ace083 | 1010 | return hardwall_deactivate(hwt, current); |
9f9c0382 | 1011 | |
f133ecca | 1012 | case _HARDWALL_GET_ID: |
b8ace083 | 1013 | return info ? info->id : -EINVAL; |
f133ecca | 1014 | |
9f9c0382 CM |
1015 | default: |
1016 | return -EINVAL; | |
1017 | } | |
1018 | } | |
1019 | ||
1020 | #ifdef CONFIG_COMPAT | |
1021 | static long hardwall_compat_ioctl(struct file *file, | |
1022 | unsigned int a, unsigned long b) | |
1023 | { | |
1024 | /* Sign-extend the argument so it can be used as a pointer. */ | |
1025 | return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b)); | |
1026 | } | |
1027 | #endif | |
1028 | ||
1029 | /* The user process closed the file; revoke access to user networks. */ | |
1030 | static int hardwall_flush(struct file *file, fl_owner_t owner) | |
1031 | { | |
b8ace083 | 1032 | struct hardwall_info *info = file->private_data; |
9f9c0382 CM |
1033 | struct task_struct *task, *tmp; |
1034 | unsigned long flags; | |
1035 | ||
b8ace083 | 1036 | if (info) { |
9f9c0382 CM |
1037 | /* |
1038 | * NOTE: if multiple threads are activated on this hardwall | |
1039 | * file, the other threads will continue having access to the | |
b8ace083 CM |
1040 | * user network until they are context-switched out and back |
1041 | * in again. | |
9f9c0382 CM |
1042 | * |
1043 | * NOTE: A NULL files pointer means the task is being torn | |
1044 | * down, so in that case we also deactivate it. | |
1045 | */ | |
b8ace083 CM |
1046 | struct hardwall_type *hwt = info->type; |
1047 | spin_lock_irqsave(&hwt->lock, flags); | |
1048 | list_for_each_entry_safe(task, tmp, &info->task_head, | |
1049 | thread.hardwall[hwt->index].list) { | |
9f9c0382 | 1050 | if (task->files == owner || task->files == NULL) |
b8ace083 | 1051 | _hardwall_deactivate(hwt, task); |
9f9c0382 | 1052 | } |
b8ace083 | 1053 | spin_unlock_irqrestore(&hwt->lock, flags); |
9f9c0382 CM |
1054 | } |
1055 | ||
1056 | return 0; | |
1057 | } | |
1058 | ||
1059 | /* This hardwall is gone, so destroy it. */ | |
1060 | static int hardwall_release(struct inode *inode, struct file *file) | |
1061 | { | |
1062 | hardwall_destroy(file->private_data); | |
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | static const struct file_operations dev_hardwall_fops = { | |
d02db4f8 | 1067 | .open = nonseekable_open, |
9f9c0382 CM |
1068 | .unlocked_ioctl = hardwall_ioctl, |
1069 | #ifdef CONFIG_COMPAT | |
1070 | .compat_ioctl = hardwall_compat_ioctl, | |
1071 | #endif | |
1072 | .flush = hardwall_flush, | |
1073 | .release = hardwall_release, | |
1074 | }; | |
1075 | ||
1076 | static struct cdev hardwall_dev; | |
1077 | ||
1078 | static int __init dev_hardwall_init(void) | |
1079 | { | |
1080 | int rc; | |
1081 | dev_t dev; | |
1082 | ||
b8ace083 | 1083 | rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall"); |
9f9c0382 CM |
1084 | if (rc < 0) |
1085 | return rc; | |
1086 | cdev_init(&hardwall_dev, &dev_hardwall_fops); | |
b8ace083 | 1087 | rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES); |
9f9c0382 CM |
1088 | if (rc < 0) |
1089 | return rc; | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | late_initcall(dev_hardwall_init); |