]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/mips/kernel/vpe.c
new helper: file_inode(file)
[mirror_ubuntu-artful-kernel.git] / arch / mips / kernel / vpe.c
CommitLineData
e01402b1
RB
1/*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
e01402b1
RB
16 */
17
18/*
19 * VPE support module
20 *
21 * Provides support for loading a MIPS SP program on VPE1.
25985edc 22 * The SP environment is rather simple, no tlb's. It needs to be relocatable
e01402b1
RB
23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
26 *
27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
28 * i.e cat spapp >/dev/vpe1.
e01402b1 29 */
e01402b1 30#include <linux/kernel.h>
27a3bbaf 31#include <linux/device.h>
e01402b1
RB
32#include <linux/fs.h>
33#include <linux/init.h>
34#include <asm/uaccess.h>
35#include <linux/slab.h>
36#include <linux/list.h>
37#include <linux/vmalloc.h>
38#include <linux/elf.h>
39#include <linux/seq_file.h>
40#include <linux/syscalls.h>
41#include <linux/moduleloader.h>
42#include <linux/interrupt.h>
43#include <linux/poll.h>
44#include <linux/bootmem.h>
45#include <asm/mipsregs.h>
340ee4b9 46#include <asm/mipsmtregs.h>
e01402b1 47#include <asm/cacheflush.h>
60063497 48#include <linux/atomic.h>
e01402b1 49#include <asm/cpu.h>
27a3bbaf 50#include <asm/mips_mt.h>
e01402b1 51#include <asm/processor.h>
2600990e 52#include <asm/vpe.h>
e01402b1
RB
53
54typedef void *vpe_handle;
55
e01402b1
RB
56#ifndef ARCH_SHF_SMALL
57#define ARCH_SHF_SMALL 0
58#endif
59
60/* If this is set, the section belongs in the init part of the module */
61#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
62
41790e04
RB
63/*
64 * The number of TCs and VPEs physically available on the core
65 */
66static int hw_tcs, hw_vpes;
e01402b1 67static char module_name[] = "vpe";
307bd284 68static int major;
27a3bbaf 69static const int minor = 1; /* fixed for now */
e01402b1
RB
70
71/* grab the likely amount of memory we will need. */
72#ifdef CONFIG_MIPS_VPE_LOADER_TOM
73#define P_SIZE (2 * 1024 * 1024)
74#else
75/* add an overhead to the max kmalloc size for non-striped symbols/etc */
76#define P_SIZE (256 * 1024)
77#endif
78
2600990e
RB
79extern unsigned long physical_memsize;
80
e01402b1 81#define MAX_VPES 16
2600990e 82#define VPE_PATH_MAX 256
e01402b1
RB
83
84enum vpe_state {
85 VPE_STATE_UNUSED = 0,
86 VPE_STATE_INUSE,
87 VPE_STATE_RUNNING
88};
89
90enum tc_state {
91 TC_STATE_UNUSED = 0,
92 TC_STATE_INUSE,
93 TC_STATE_RUNNING,
94 TC_STATE_DYNAMIC
95};
96
307bd284 97struct vpe {
e01402b1
RB
98 enum vpe_state state;
99
100 /* (device) minor associated with this vpe */
101 int minor;
102
103 /* elfloader stuff */
104 void *load_addr;
571e0bed 105 unsigned long len;
e01402b1 106 char *pbuffer;
571e0bed 107 unsigned long plen;
2600990e
RB
108 unsigned int uid, gid;
109 char cwd[VPE_PATH_MAX];
e01402b1
RB
110
111 unsigned long __start;
112
113 /* tc's associated with this vpe */
114 struct list_head tc;
115
116 /* The list of vpe's */
117 struct list_head list;
118
119 /* shared symbol address */
120 void *shared_ptr;
2600990e
RB
121
122 /* the list of who wants to know when something major happens */
123 struct list_head notify;
41790e04
RB
124
125 unsigned int ntcs;
307bd284
RB
126};
127
128struct tc {
129 enum tc_state state;
130 int index;
131
07cc0c9e
RB
132 struct vpe *pvpe; /* parent VPE */
133 struct list_head tc; /* The list of TC's with this VPE */
134 struct list_head list; /* The global list of tc's */
307bd284 135};
e01402b1 136
9cfdf6f1 137struct {
1bbfc20d
RB
138 spinlock_t vpe_list_lock;
139 struct list_head vpe_list; /* Virtual processing elements */
140 spinlock_t tc_list_lock;
141 struct list_head tc_list; /* Thread contexts */
9cfdf6f1 142} vpecontrol = {
52bd080d 143 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
1bbfc20d 144 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
52bd080d 145 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
1bbfc20d 146 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
9cfdf6f1 147};
e01402b1
RB
148
149static void release_progmem(void *ptr);
e01402b1
RB
150
151/* get the vpe associated with this minor */
f18b51cc 152static struct vpe *get_vpe(int minor)
e01402b1 153{
1bbfc20d 154 struct vpe *res, *v;
e01402b1 155
2600990e
RB
156 if (!cpu_has_mipsmt)
157 return NULL;
158
1bbfc20d
RB
159 res = NULL;
160 spin_lock(&vpecontrol.vpe_list_lock);
e01402b1 161 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
1bbfc20d
RB
162 if (v->minor == minor) {
163 res = v;
164 break;
165 }
e01402b1 166 }
1bbfc20d 167 spin_unlock(&vpecontrol.vpe_list_lock);
e01402b1 168
1bbfc20d 169 return res;
e01402b1
RB
170}
171
172/* get the vpe associated with this minor */
f18b51cc 173static struct tc *get_tc(int index)
e01402b1 174{
1bbfc20d 175 struct tc *res, *t;
e01402b1 176
1bbfc20d
RB
177 res = NULL;
178 spin_lock(&vpecontrol.tc_list_lock);
e01402b1 179 list_for_each_entry(t, &vpecontrol.tc_list, list) {
1bbfc20d
RB
180 if (t->index == index) {
181 res = t;
182 break;
183 }
e01402b1 184 }
1bbfc20d 185 spin_unlock(&vpecontrol.tc_list_lock);
e01402b1 186
9fbcbd7e 187 return res;
e01402b1
RB
188}
189
e01402b1 190/* allocate a vpe and associate it with this minor (or index) */
f18b51cc 191static struct vpe *alloc_vpe(int minor)
e01402b1
RB
192{
193 struct vpe *v;
194
1bbfc20d 195 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
e01402b1 196 return NULL;
e01402b1 197
e01402b1 198 INIT_LIST_HEAD(&v->tc);
1bbfc20d 199 spin_lock(&vpecontrol.vpe_list_lock);
e01402b1 200 list_add_tail(&v->list, &vpecontrol.vpe_list);
1bbfc20d 201 spin_unlock(&vpecontrol.vpe_list_lock);
e01402b1 202
2600990e 203 INIT_LIST_HEAD(&v->notify);
e01402b1 204 v->minor = minor;
1bbfc20d 205
e01402b1
RB
206 return v;
207}
208
209/* allocate a tc. At startup only tc0 is running, all other can be halted. */
f18b51cc 210static struct tc *alloc_tc(int index)
e01402b1 211{
07cc0c9e 212 struct tc *tc;
e01402b1 213
07cc0c9e
RB
214 if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
215 goto out;
e01402b1 216
07cc0c9e
RB
217 INIT_LIST_HEAD(&tc->tc);
218 tc->index = index;
1bbfc20d
RB
219
220 spin_lock(&vpecontrol.tc_list_lock);
07cc0c9e 221 list_add_tail(&tc->list, &vpecontrol.tc_list);
1bbfc20d 222 spin_unlock(&vpecontrol.tc_list_lock);
e01402b1 223
07cc0c9e
RB
224out:
225 return tc;
e01402b1
RB
226}
227
228/* clean up and free everything */
f18b51cc 229static void release_vpe(struct vpe *v)
e01402b1
RB
230{
231 list_del(&v->list);
232 if (v->load_addr)
233 release_progmem(v);
234 kfree(v);
235}
236
1bbfc20d 237static void __maybe_unused dump_mtregs(void)
e01402b1
RB
238{
239 unsigned long val;
240
241 val = read_c0_config3();
242 printk("config3 0x%lx MT %ld\n", val,
243 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
244
e01402b1
RB
245 val = read_c0_mvpcontrol();
246 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
247 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
248 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
249 (val & MVPCONTROL_EVP));
250
2600990e
RB
251 val = read_c0_mvpconf0();
252 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
253 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
254 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
e01402b1
RB
255}
256
257/* Find some VPE program space */
571e0bed 258static void *alloc_progmem(unsigned long len)
e01402b1 259{
5408c490
RB
260 void *addr;
261
e01402b1 262#ifdef CONFIG_MIPS_VPE_LOADER_TOM
5408c490
RB
263 /*
264 * This means you must tell Linux to use less memory than you
265 * physically have, for example by passing a mem= boot argument.
266 */
9f2546ad 267 addr = pfn_to_kaddr(max_low_pfn);
5408c490 268 memset(addr, 0, len);
e01402b1 269#else
5408c490
RB
270 /* simple grab some mem for now */
271 addr = kzalloc(len, GFP_KERNEL);
e01402b1 272#endif
5408c490
RB
273
274 return addr;
e01402b1
RB
275}
276
277static void release_progmem(void *ptr)
278{
279#ifndef CONFIG_MIPS_VPE_LOADER_TOM
280 kfree(ptr);
281#endif
282}
283
284/* Update size with this section: return offset. */
285static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
286{
287 long ret;
288
289 ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
290 *size = ret + sechdr->sh_size;
291 return ret;
292}
293
294/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
295 might -- code, read-only data, read-write data, small data. Tally
296 sizes, and place the offsets into sh_entsize fields: high bit means it
297 belongs in init. */
298static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
299 Elf_Shdr * sechdrs, const char *secstrings)
300{
301 static unsigned long const masks[][2] = {
302 /* NOTE: all executable code must be the first section
303 * in this array; otherwise modify the text_size
304 * finder in the two loops below */
305 {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
306 {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
307 {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
308 {ARCH_SHF_SMALL | SHF_ALLOC, 0}
309 };
310 unsigned int m, i;
311
312 for (i = 0; i < hdr->e_shnum; i++)
313 sechdrs[i].sh_entsize = ~0UL;
314
315 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
316 for (i = 0; i < hdr->e_shnum; ++i) {
317 Elf_Shdr *s = &sechdrs[i];
318
319 // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
320 if ((s->sh_flags & masks[m][0]) != masks[m][0]
321 || (s->sh_flags & masks[m][1])
322 || s->sh_entsize != ~0UL)
323 continue;
e2a9cf96
RG
324 s->sh_entsize =
325 get_offset((unsigned long *)&mod->core_size, s);
e01402b1
RB
326 }
327
328 if (m == 0)
329 mod->core_text_size = mod->core_size;
330
331 }
332}
333
334
335/* from module-elf32.c, but subverted a little */
336
337struct mips_hi16 {
338 struct mips_hi16 *next;
339 Elf32_Addr *addr;
340 Elf32_Addr value;
341};
342
343static struct mips_hi16 *mips_hi16_list;
344static unsigned int gp_offs, gp_addr;
345
346static int apply_r_mips_none(struct module *me, uint32_t *location,
347 Elf32_Addr v)
348{
349 return 0;
350}
351
352static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
353 Elf32_Addr v)
354{
355 int rel;
356
357 if( !(*location & 0xffff) ) {
358 rel = (int)v - gp_addr;
359 }
360 else {
361 /* .sbss + gp(relative) + offset */
362 /* kludge! */
363 rel = (int)(short)((int)v + gp_offs +
364 (int)(short)(*location & 0xffff) - gp_addr);
365 }
366
367 if( (rel > 32768) || (rel < -32768) ) {
2600990e
RB
368 printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
369 "relative address 0x%x out of range of gp register\n",
370 rel);
e01402b1
RB
371 return -ENOEXEC;
372 }
373
374 *location = (*location & 0xffff0000) | (rel & 0xffff);
375
376 return 0;
377}
378
379static int apply_r_mips_pc16(struct module *me, uint32_t *location,
380 Elf32_Addr v)
381{
382 int rel;
383 rel = (((unsigned int)v - (unsigned int)location));
384 rel >>= 2; // because the offset is in _instructions_ not bytes.
385 rel -= 1; // and one instruction less due to the branch delay slot.
386
387 if( (rel > 32768) || (rel < -32768) ) {
2600990e
RB
388 printk(KERN_DEBUG "VPE loader: "
389 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
e01402b1
RB
390 return -ENOEXEC;
391 }
392
393 *location = (*location & 0xffff0000) | (rel & 0xffff);
394
395 return 0;
396}
397
398static int apply_r_mips_32(struct module *me, uint32_t *location,
399 Elf32_Addr v)
400{
401 *location += v;
402
403 return 0;
404}
405
406static int apply_r_mips_26(struct module *me, uint32_t *location,
407 Elf32_Addr v)
408{
409 if (v % 4) {
2600990e
RB
410 printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
411 " unaligned relocation\n");
e01402b1
RB
412 return -ENOEXEC;
413 }
414
307bd284
RB
415/*
416 * Not desperately convinced this is a good check of an overflow condition
417 * anyway. But it gets in the way of handling undefined weak symbols which
418 * we want to set to zero.
419 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
420 * printk(KERN_ERR
421 * "module %s: relocation overflow\n",
422 * me->name);
423 * return -ENOEXEC;
424 * }
425 */
e01402b1
RB
426
427 *location = (*location & ~0x03ffffff) |
428 ((*location + (v >> 2)) & 0x03ffffff);
429 return 0;
430}
431
432static int apply_r_mips_hi16(struct module *me, uint32_t *location,
433 Elf32_Addr v)
434{
435 struct mips_hi16 *n;
436
437 /*
438 * We cannot relocate this one now because we don't know the value of
439 * the carry we need to add. Save the information, and let LO16 do the
440 * actual relocation.
441 */
442 n = kmalloc(sizeof *n, GFP_KERNEL);
443 if (!n)
444 return -ENOMEM;
445
446 n->addr = location;
447 n->value = v;
448 n->next = mips_hi16_list;
449 mips_hi16_list = n;
450
451 return 0;
452}
453
454static int apply_r_mips_lo16(struct module *me, uint32_t *location,
455 Elf32_Addr v)
456{
457 unsigned long insnlo = *location;
458 Elf32_Addr val, vallo;
477c4b07 459 struct mips_hi16 *l, *next;
e01402b1
RB
460
461 /* Sign extend the addend we extract from the lo insn. */
462 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
463
464 if (mips_hi16_list != NULL) {
e01402b1
RB
465
466 l = mips_hi16_list;
467 while (l != NULL) {
e01402b1
RB
468 unsigned long insn;
469
470 /*
471 * The value for the HI16 had best be the same.
472 */
2600990e
RB
473 if (v != l->value) {
474 printk(KERN_DEBUG "VPE loader: "
b1e3afa0 475 "apply_r_mips_lo16/hi16: \t"
2600990e 476 "inconsistent value information\n");
477c4b07 477 goto out_free;
e01402b1
RB
478 }
479
e01402b1
RB
480 /*
481 * Do the HI16 relocation. Note that we actually don't
482 * need to know anything about the LO16 itself, except
483 * where to find the low 16 bits of the addend needed
484 * by the LO16.
485 */
486 insn = *l->addr;
487 val = ((insn & 0xffff) << 16) + vallo;
488 val += v;
489
490 /*
491 * Account for the sign extension that will happen in
492 * the low bits.
493 */
494 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
495
496 insn = (insn & ~0xffff) | val;
497 *l->addr = insn;
498
499 next = l->next;
500 kfree(l);
501 l = next;
502 }
503
504 mips_hi16_list = NULL;
505 }
506
507 /*
508 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
509 */
510 val = v + vallo;
511 insnlo = (insnlo & ~0xffff) | (val & 0xffff);
512 *location = insnlo;
513
514 return 0;
477c4b07
RB
515
516out_free:
517 while (l != NULL) {
518 next = l->next;
519 kfree(l);
520 l = next;
521 }
522 mips_hi16_list = NULL;
523
524 return -ENOEXEC;
e01402b1
RB
525}
526
527static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
528 Elf32_Addr v) = {
529 [R_MIPS_NONE] = apply_r_mips_none,
530 [R_MIPS_32] = apply_r_mips_32,
531 [R_MIPS_26] = apply_r_mips_26,
532 [R_MIPS_HI16] = apply_r_mips_hi16,
533 [R_MIPS_LO16] = apply_r_mips_lo16,
534 [R_MIPS_GPREL16] = apply_r_mips_gprel16,
535 [R_MIPS_PC16] = apply_r_mips_pc16
536};
537
2600990e 538static char *rstrs[] = {
e0daad44 539 [R_MIPS_NONE] = "MIPS_NONE",
2600990e
RB
540 [R_MIPS_32] = "MIPS_32",
541 [R_MIPS_26] = "MIPS_26",
542 [R_MIPS_HI16] = "MIPS_HI16",
543 [R_MIPS_LO16] = "MIPS_LO16",
544 [R_MIPS_GPREL16] = "MIPS_GPREL16",
545 [R_MIPS_PC16] = "MIPS_PC16"
546};
e01402b1 547
f18b51cc 548static int apply_relocations(Elf32_Shdr *sechdrs,
e01402b1
RB
549 const char *strtab,
550 unsigned int symindex,
551 unsigned int relsec,
552 struct module *me)
553{
554 Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
555 Elf32_Sym *sym;
556 uint32_t *location;
557 unsigned int i;
558 Elf32_Addr v;
559 int res;
560
561 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
562 Elf32_Word r_info = rel[i].r_info;
563
564 /* This is where to make the change */
565 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
566 + rel[i].r_offset;
567 /* This is the symbol it is referring to */
568 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
569 + ELF32_R_SYM(r_info);
570
571 if (!sym->st_value) {
572 printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
573 me->name, strtab + sym->st_name);
574 /* just print the warning, dont barf */
575 }
576
577 v = sym->st_value;
578
579 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
580 if( res ) {
2600990e
RB
581 char *r = rstrs[ELF32_R_TYPE(r_info)];
582 printk(KERN_WARNING "VPE loader: .text+0x%x "
583 "relocation type %s for symbol \"%s\" failed\n",
584 rel[i].r_offset, r ? r : "UNKNOWN",
585 strtab + sym->st_name);
e01402b1 586 return res;
2600990e 587 }
e01402b1
RB
588 }
589
590 return 0;
591}
592
f18b51cc 593static inline void save_gp_address(unsigned int secbase, unsigned int rel)
e01402b1
RB
594{
595 gp_addr = secbase + rel;
596 gp_offs = gp_addr - (secbase & 0xffff0000);
597}
598/* end module-elf32.c */
599
600
601
602/* Change all symbols so that sh_value encodes the pointer directly. */
2600990e 603static void simplify_symbols(Elf_Shdr * sechdrs,
e01402b1
RB
604 unsigned int symindex,
605 const char *strtab,
606 const char *secstrings,
607 unsigned int nsecs, struct module *mod)
608{
609 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
610 unsigned long secbase, bssbase = 0;
611 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
2600990e 612 int size;
e01402b1
RB
613
614 /* find the .bss section for COMMON symbols */
615 for (i = 0; i < nsecs; i++) {
2600990e 616 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
e01402b1 617 bssbase = sechdrs[i].sh_addr;
2600990e
RB
618 break;
619 }
e01402b1
RB
620 }
621
622 for (i = 1; i < n; i++) {
623 switch (sym[i].st_shndx) {
624 case SHN_COMMON:
2600990e
RB
625 /* Allocate space for the symbol in the .bss section.
626 st_value is currently size.
e01402b1
RB
627 We want it to have the address of the symbol. */
628
629 size = sym[i].st_value;
630 sym[i].st_value = bssbase;
631
632 bssbase += size;
633 break;
634
635 case SHN_ABS:
636 /* Don't need to do anything */
637 break;
638
639 case SHN_UNDEF:
640 /* ret = -ENOENT; */
641 break;
642
643 case SHN_MIPS_SCOMMON:
b1e3afa0 644 printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON "
2600990e
RB
645 "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
646 sym[i].st_shndx);
e01402b1
RB
647 // .sbss section
648 break;
649
650 default:
651 secbase = sechdrs[sym[i].st_shndx].sh_addr;
652
653 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
654 save_gp_address(secbase, sym[i].st_value);
655 }
656
657 sym[i].st_value += secbase;
658 break;
659 }
e01402b1 660 }
e01402b1
RB
661}
662
663#ifdef DEBUG_ELFLOADER
664static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
665 const char *strtab, struct module *mod)
666{
667 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
668 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
669
670 printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
671 for (i = 1; i < n; i++) {
672 printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
673 strtab + sym[i].st_name, sym[i].st_value);
674 }
675}
676#endif
677
e01402b1 678/* We are prepared so configure and start the VPE... */
be6e1437 679static int vpe_run(struct vpe * v)
e01402b1 680{
07cc0c9e 681 unsigned long flags, val, dmt_flag;
2600990e 682 struct vpe_notifications *n;
07cc0c9e 683 unsigned int vpeflags;
e01402b1
RB
684 struct tc *t;
685
686 /* check we are the Master VPE */
07cc0c9e 687 local_irq_save(flags);
e01402b1
RB
688 val = read_c0_vpeconf0();
689 if (!(val & VPECONF0_MVP)) {
690 printk(KERN_WARNING
2600990e 691 "VPE loader: only Master VPE's are allowed to configure MT\n");
07cc0c9e
RB
692 local_irq_restore(flags);
693
e01402b1
RB
694 return -1;
695 }
696
07cc0c9e
RB
697 dmt_flag = dmt();
698 vpeflags = dvpe();
e01402b1 699
2600990e 700 if (!list_empty(&v->tc)) {
e0daad44 701 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
07cc0c9e
RB
702 evpe(vpeflags);
703 emt(dmt_flag);
704 local_irq_restore(flags);
705
706 printk(KERN_WARNING
707 "VPE loader: TC %d is already in use.\n",
708 t->index);
e0daad44
RB
709 return -ENOEXEC;
710 }
711 } else {
07cc0c9e
RB
712 evpe(vpeflags);
713 emt(dmt_flag);
714 local_irq_restore(flags);
715
716 printk(KERN_WARNING
717 "VPE loader: No TC's associated with VPE %d\n",
e0daad44 718 v->minor);
07cc0c9e 719
e0daad44
RB
720 return -ENOEXEC;
721 }
2600990e 722
e01402b1 723 /* Put MVPE's into 'configuration state' */
340ee4b9 724 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 725
e01402b1
RB
726 settc(t->index);
727
e01402b1
RB
728 /* should check it is halted, and not activated */
729 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
07cc0c9e
RB
730 evpe(vpeflags);
731 emt(dmt_flag);
732 local_irq_restore(flags);
733
734 printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
e01402b1 735 t->index);
07cc0c9e 736
e01402b1
RB
737 return -ENOEXEC;
738 }
739
740 /* Write the address we want it to start running from in the TCPC register. */
741 write_tc_c0_tcrestart((unsigned long)v->__start);
e01402b1 742 write_tc_c0_tccontext((unsigned long)0);
07cc0c9e 743
2600990e
RB
744 /*
745 * Mark the TC as activated, not interrupt exempt and not dynamically
746 * allocatable
747 */
e01402b1
RB
748 val = read_tc_c0_tcstatus();
749 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
750 write_tc_c0_tcstatus(val);
751
752 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
753
e01402b1
RB
754 /*
755 * The sde-kit passes 'memsize' to __start in $a3, so set something
2600990e 756 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
e01402b1
RB
757 * DFLT_HEAP_SIZE when you compile your program
758 */
41790e04 759 mttgpr(6, v->ntcs);
07cc0c9e 760 mttgpr(7, physical_memsize);
2600990e
RB
761
762 /* set up VPE1 */
763 /*
764 * bind the TC to VPE 1 as late as possible so we only have the final
765 * VPE registers to set up, and so an EJTAG probe can trigger on it
766 */
07cc0c9e 767 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
e01402b1 768
a94d7020
EO
769 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
770
771 back_to_back_c0_hazard();
772
e0daad44
RB
773 /* Set up the XTC bit in vpeconf0 to point at our tc */
774 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
775 | (t->index << VPECONF0_XTC_SHIFT));
e01402b1 776
a94d7020
EO
777 back_to_back_c0_hazard();
778
e0daad44
RB
779 /* enable this VPE */
780 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
e01402b1
RB
781
782 /* clear out any left overs from a previous program */
2600990e 783 write_vpe_c0_status(0);
e01402b1
RB
784 write_vpe_c0_cause(0);
785
786 /* take system out of configuration state */
340ee4b9 787 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 788
b618336a
KK
789 /*
790 * SMTC/SMVP kernels manage VPE enable independently,
791 * but uniprocessor kernels need to turn it on, even
792 * if that wasn't the pre-dvpe() state.
793 */
07cc0c9e 794#ifdef CONFIG_SMP
07cc0c9e 795 evpe(vpeflags);
b618336a
KK
796#else
797 evpe(EVPE_ENABLE);
07cc0c9e
RB
798#endif
799 emt(dmt_flag);
800 local_irq_restore(flags);
e01402b1 801
07cc0c9e
RB
802 list_for_each_entry(n, &v->notify, list)
803 n->start(minor);
2600990e 804
e01402b1
RB
805 return 0;
806}
807
2600990e 808static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
e01402b1
RB
809 unsigned int symindex, const char *strtab,
810 struct module *mod)
811{
812 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
813 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
814
815 for (i = 1; i < n; i++) {
816 if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
817 v->__start = sym[i].st_value;
818 }
819
820 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
821 v->shared_ptr = (void *)sym[i].st_value;
822 }
823 }
824
2600990e
RB
825 if ( (v->__start == 0) || (v->shared_ptr == NULL))
826 return -1;
827
e01402b1
RB
828 return 0;
829}
830
307bd284 831/*
2600990e
RB
832 * Allocates a VPE with some program code space(the load address), copies the
833 * contents of the program (p)buffer performing relocatations/etc, free's it
834 * when finished.
835 */
be6e1437 836static int vpe_elfload(struct vpe * v)
e01402b1
RB
837{
838 Elf_Ehdr *hdr;
839 Elf_Shdr *sechdrs;
840 long err = 0;
841 char *secstrings, *strtab = NULL;
2600990e 842 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
e01402b1
RB
843 struct module mod; // so we can re-use the relocations code
844
845 memset(&mod, 0, sizeof(struct module));
2600990e 846 strcpy(mod.name, "VPE loader");
e01402b1
RB
847
848 hdr = (Elf_Ehdr *) v->pbuffer;
849 len = v->plen;
850
851 /* Sanity checks against insmoding binaries or wrong arch,
852 weird elf version */
d303f4a1 853 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
2600990e
RB
854 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
855 || !elf_check_arch(hdr)
e01402b1
RB
856 || hdr->e_shentsize != sizeof(*sechdrs)) {
857 printk(KERN_WARNING
2600990e 858 "VPE loader: program wrong arch or weird elf version\n");
e01402b1
RB
859
860 return -ENOEXEC;
861 }
862
2600990e
RB
863 if (hdr->e_type == ET_REL)
864 relocate = 1;
865
e01402b1 866 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
2600990e
RB
867 printk(KERN_ERR "VPE loader: program length %u truncated\n",
868 len);
869
e01402b1
RB
870 return -ENOEXEC;
871 }
872
873 /* Convenience variables */
874 sechdrs = (void *)hdr + hdr->e_shoff;
875 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
876 sechdrs[0].sh_addr = 0;
877
878 /* And these should exist, but gcc whinges if we don't init them */
879 symindex = strindex = 0;
880
2600990e
RB
881 if (relocate) {
882 for (i = 1; i < hdr->e_shnum; i++) {
883 if (sechdrs[i].sh_type != SHT_NOBITS
884 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
885 printk(KERN_ERR "VPE program length %u truncated\n",
886 len);
887 return -ENOEXEC;
888 }
e01402b1 889
2600990e
RB
890 /* Mark all sections sh_addr with their address in the
891 temporary image. */
892 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
e01402b1 893
2600990e
RB
894 /* Internal symbols and strings. */
895 if (sechdrs[i].sh_type == SHT_SYMTAB) {
896 symindex = i;
897 strindex = sechdrs[i].sh_link;
898 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
899 }
e01402b1 900 }
2600990e 901 layout_sections(&mod, hdr, sechdrs, secstrings);
e01402b1
RB
902 }
903
e01402b1 904 v->load_addr = alloc_progmem(mod.core_size);
5408c490
RB
905 if (!v->load_addr)
906 return -ENOMEM;
e01402b1 907
5408c490 908 pr_info("VPE loader: loading to %p\n", v->load_addr);
e01402b1 909
2600990e
RB
910 if (relocate) {
911 for (i = 0; i < hdr->e_shnum; i++) {
912 void *dest;
e01402b1 913
2600990e
RB
914 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
915 continue;
e01402b1 916
2600990e 917 dest = v->load_addr + sechdrs[i].sh_entsize;
e01402b1 918
2600990e
RB
919 if (sechdrs[i].sh_type != SHT_NOBITS)
920 memcpy(dest, (void *)sechdrs[i].sh_addr,
921 sechdrs[i].sh_size);
922 /* Update sh_addr to point to copy in image. */
923 sechdrs[i].sh_addr = (unsigned long)dest;
e01402b1 924
2600990e
RB
925 printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
926 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
927 }
e01402b1 928
2600990e
RB
929 /* Fix up syms, so that st_value is a pointer to location. */
930 simplify_symbols(sechdrs, symindex, strtab, secstrings,
931 hdr->e_shnum, &mod);
932
933 /* Now do relocations. */
934 for (i = 1; i < hdr->e_shnum; i++) {
935 const char *strtab = (char *)sechdrs[strindex].sh_addr;
936 unsigned int info = sechdrs[i].sh_info;
937
938 /* Not a valid relocation section? */
939 if (info >= hdr->e_shnum)
940 continue;
941
942 /* Don't bother with non-allocated sections */
943 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
944 continue;
945
946 if (sechdrs[i].sh_type == SHT_REL)
947 err = apply_relocations(sechdrs, strtab, symindex, i,
948 &mod);
949 else if (sechdrs[i].sh_type == SHT_RELA)
950 err = apply_relocate_add(sechdrs, strtab, symindex, i,
951 &mod);
952 if (err < 0)
953 return err;
954
955 }
956 } else {
bdf5d42c 957 struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
2600990e 958
bdf5d42c 959 for (i = 0; i < hdr->e_phnum; i++) {
b618336a
KK
960 if (phdr->p_type == PT_LOAD) {
961 memcpy((void *)phdr->p_paddr,
962 (char *)hdr + phdr->p_offset,
963 phdr->p_filesz);
964 memset((void *)phdr->p_paddr + phdr->p_filesz,
965 0, phdr->p_memsz - phdr->p_filesz);
966 }
967 phdr++;
bdf5d42c
RB
968 }
969
970 for (i = 0; i < hdr->e_shnum; i++) {
2600990e
RB
971 /* Internal symbols and strings. */
972 if (sechdrs[i].sh_type == SHT_SYMTAB) {
973 symindex = i;
974 strindex = sechdrs[i].sh_link;
975 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
976
977 /* mark the symtab's address for when we try to find the
978 magic symbols */
979 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
980 }
e01402b1
RB
981 }
982 }
983
984 /* make sure it's physically written out */
985 flush_icache_range((unsigned long)v->load_addr,
986 (unsigned long)v->load_addr + v->len);
987
988 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
2600990e
RB
989 if (v->__start == 0) {
990 printk(KERN_WARNING "VPE loader: program does not contain "
991 "a __start symbol\n");
992 return -ENOEXEC;
993 }
e01402b1 994
2600990e
RB
995 if (v->shared_ptr == NULL)
996 printk(KERN_WARNING "VPE loader: "
997 "program does not contain vpe_shared symbol.\n"
998 " Unable to use AMVP (AP/SP) facilities.\n");
e01402b1
RB
999 }
1000
1001 printk(" elf loaded\n");
2600990e 1002 return 0;
e01402b1
RB
1003}
1004
2600990e
RB
1005static void cleanup_tc(struct tc *tc)
1006{
07cc0c9e
RB
1007 unsigned long flags;
1008 unsigned int mtflags, vpflags;
2600990e
RB
1009 int tmp;
1010
07cc0c9e
RB
1011 local_irq_save(flags);
1012 mtflags = dmt();
1013 vpflags = dvpe();
2600990e
RB
1014 /* Put MVPE's into 'configuration state' */
1015 set_c0_mvpcontrol(MVPCONTROL_VPC);
1016
1017 settc(tc->index);
1018 tmp = read_tc_c0_tcstatus();
1019
1020 /* mark not allocated and not dynamically allocatable */
1021 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1022 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1023 write_tc_c0_tcstatus(tmp);
1024
1025 write_tc_c0_tchalt(TCHALT_H);
7c3a622d 1026 mips_ihb();
2600990e
RB
1027
1028 /* bind it to anything other than VPE1 */
07cc0c9e 1029// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
2600990e
RB
1030
1031 clear_c0_mvpcontrol(MVPCONTROL_VPC);
07cc0c9e
RB
1032 evpe(vpflags);
1033 emt(mtflags);
1034 local_irq_restore(flags);
2600990e
RB
1035}
1036
1037static int getcwd(char *buff, int size)
1038{
1039 mm_segment_t old_fs;
1040 int ret;
1041
1042 old_fs = get_fs();
1043 set_fs(KERNEL_DS);
1044
21a151d8 1045 ret = sys_getcwd(buff, size);
2600990e
RB
1046
1047 set_fs(old_fs);
1048
1049 return ret;
1050}
1051
1052/* checks VPE is unused and gets ready to load program */
e01402b1
RB
1053static int vpe_open(struct inode *inode, struct file *filp)
1054{
c4c4018b 1055 enum vpe_state state;
2600990e 1056 struct vpe_notifications *not;
07cc0c9e 1057 struct vpe *v;
1bbfc20d 1058 int ret;
e01402b1 1059
07cc0c9e
RB
1060 if (minor != iminor(inode)) {
1061 /* assume only 1 device at the moment. */
1bbfc20d
RB
1062 pr_warning("VPE loader: only vpe1 is supported\n");
1063
1064 return -ENODEV;
e01402b1
RB
1065 }
1066
07cc0c9e 1067 if ((v = get_vpe(tclimit)) == NULL) {
1bbfc20d
RB
1068 pr_warning("VPE loader: unable to get vpe\n");
1069
1070 return -ENODEV;
e01402b1
RB
1071 }
1072
c4c4018b
RB
1073 state = xchg(&v->state, VPE_STATE_INUSE);
1074 if (state != VPE_STATE_UNUSED) {
2600990e 1075 printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
e01402b1 1076
2600990e 1077 list_for_each_entry(not, &v->notify, list) {
07cc0c9e 1078 not->stop(tclimit);
2600990e 1079 }
e01402b1 1080
2600990e 1081 release_progmem(v->load_addr);
07cc0c9e 1082 cleanup_tc(get_tc(tclimit));
e01402b1
RB
1083 }
1084
e01402b1
RB
1085 /* this of-course trashes what was there before... */
1086 v->pbuffer = vmalloc(P_SIZE);
863abad4
JJ
1087 if (!v->pbuffer) {
1088 pr_warning("VPE loader: unable to allocate memory\n");
1089 return -ENOMEM;
1090 }
e01402b1
RB
1091 v->plen = P_SIZE;
1092 v->load_addr = NULL;
1093 v->len = 0;
1094
d76b0d9b
DH
1095 v->uid = filp->f_cred->fsuid;
1096 v->gid = filp->f_cred->fsgid;
2600990e 1097
2600990e
RB
1098 v->cwd[0] = 0;
1099 ret = getcwd(v->cwd, VPE_PATH_MAX);
1100 if (ret < 0)
1101 printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
1102
1103 v->shared_ptr = NULL;
1104 v->__start = 0;
07cc0c9e 1105
e01402b1
RB
1106 return 0;
1107}
1108
1109static int vpe_release(struct inode *inode, struct file *filp)
1110{
307bd284 1111 struct vpe *v;
e01402b1 1112 Elf_Ehdr *hdr;
07cc0c9e 1113 int ret = 0;
e01402b1 1114
07cc0c9e
RB
1115 v = get_vpe(tclimit);
1116 if (v == NULL)
e01402b1
RB
1117 return -ENODEV;
1118
e01402b1 1119 hdr = (Elf_Ehdr *) v->pbuffer;
d303f4a1 1120 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
07cc0c9e 1121 if (vpe_elfload(v) >= 0) {
e01402b1 1122 vpe_run(v);
07cc0c9e 1123 } else {
2600990e 1124 printk(KERN_WARNING "VPE loader: ELF load failed.\n");
e01402b1
RB
1125 ret = -ENOEXEC;
1126 }
1127 } else {
2600990e 1128 printk(KERN_WARNING "VPE loader: only elf files are supported\n");
e01402b1
RB
1129 ret = -ENOEXEC;
1130 }
1131
2600990e
RB
1132 /* It's good to be able to run the SP and if it chokes have a look at
1133 the /dev/rt?. But if we reset the pointer to the shared struct we
8ebcfc8b 1134 lose what has happened. So perhaps if garbage is sent to the vpe
2600990e
RB
1135 device, use it as a trigger for the reset. Hopefully a nice
1136 executable will be along shortly. */
1137 if (ret < 0)
1138 v->shared_ptr = NULL;
1139
863abad4 1140 vfree(v->pbuffer);
e01402b1 1141 v->plen = 0;
863abad4 1142
e01402b1
RB
1143 return ret;
1144}
1145
1146static ssize_t vpe_write(struct file *file, const char __user * buffer,
1147 size_t count, loff_t * ppos)
1148{
e01402b1 1149 size_t ret = count;
307bd284 1150 struct vpe *v;
e01402b1 1151
496ad9aa 1152 if (iminor(file_inode(file)) != minor)
07cc0c9e
RB
1153 return -ENODEV;
1154
1155 v = get_vpe(tclimit);
1156 if (v == NULL)
e01402b1
RB
1157 return -ENODEV;
1158
e01402b1
RB
1159 if ((count + v->len) > v->plen) {
1160 printk(KERN_WARNING
2600990e 1161 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
e01402b1
RB
1162 return -ENOMEM;
1163 }
1164
1165 count -= copy_from_user(v->pbuffer + v->len, buffer, count);
2600990e 1166 if (!count)
e01402b1 1167 return -EFAULT;
e01402b1
RB
1168
1169 v->len += count;
1170 return ret;
1171}
1172
5dfe4c96 1173static const struct file_operations vpe_fops = {
e01402b1
RB
1174 .owner = THIS_MODULE,
1175 .open = vpe_open,
1176 .release = vpe_release,
6038f373
AB
1177 .write = vpe_write,
1178 .llseek = noop_llseek,
e01402b1
RB
1179};
1180
1181/* module wrapper entry points */
1182/* give me a vpe */
1183vpe_handle vpe_alloc(void)
1184{
1185 int i;
1186 struct vpe *v;
1187
1188 /* find a vpe */
1189 for (i = 1; i < MAX_VPES; i++) {
1190 if ((v = get_vpe(i)) != NULL) {
1191 v->state = VPE_STATE_INUSE;
1192 return v;
1193 }
1194 }
1195 return NULL;
1196}
1197
1198EXPORT_SYMBOL(vpe_alloc);
1199
1200/* start running from here */
1201int vpe_start(vpe_handle vpe, unsigned long start)
1202{
1203 struct vpe *v = vpe;
1204
1205 v->__start = start;
1206 return vpe_run(v);
1207}
1208
1209EXPORT_SYMBOL(vpe_start);
1210
1211/* halt it for now */
1212int vpe_stop(vpe_handle vpe)
1213{
1214 struct vpe *v = vpe;
1215 struct tc *t;
1216 unsigned int evpe_flags;
1217
1218 evpe_flags = dvpe();
1219
1220 if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
1221
1222 settc(t->index);
1223 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1224 }
1225
1226 evpe(evpe_flags);
1227
1228 return 0;
1229}
1230
1231EXPORT_SYMBOL(vpe_stop);
1232
1233/* I've done with it thank you */
1234int vpe_free(vpe_handle vpe)
1235{
1236 struct vpe *v = vpe;
1237 struct tc *t;
1238 unsigned int evpe_flags;
1239
1240 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
1241 return -ENOEXEC;
1242 }
1243
1244 evpe_flags = dvpe();
1245
1246 /* Put MVPE's into 'configuration state' */
340ee4b9 1247 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1248
1249 settc(t->index);
1250 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1251
7c3a622d 1252 /* halt the TC */
e01402b1 1253 write_tc_c0_tchalt(TCHALT_H);
7c3a622d
NS
1254 mips_ihb();
1255
1256 /* mark the TC unallocated */
1257 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
e01402b1
RB
1258
1259 v->state = VPE_STATE_UNUSED;
1260
340ee4b9 1261 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1262 evpe(evpe_flags);
1263
1264 return 0;
1265}
1266
1267EXPORT_SYMBOL(vpe_free);
1268
1269void *vpe_get_shared(int index)
1270{
1271 struct vpe *v;
1272
2600990e 1273 if ((v = get_vpe(index)) == NULL)
e01402b1 1274 return NULL;
e01402b1
RB
1275
1276 return v->shared_ptr;
1277}
1278
1279EXPORT_SYMBOL(vpe_get_shared);
1280
2600990e
RB
1281int vpe_getuid(int index)
1282{
1283 struct vpe *v;
1284
1285 if ((v = get_vpe(index)) == NULL)
1286 return -1;
1287
1288 return v->uid;
1289}
1290
1291EXPORT_SYMBOL(vpe_getuid);
1292
1293int vpe_getgid(int index)
1294{
1295 struct vpe *v;
1296
1297 if ((v = get_vpe(index)) == NULL)
1298 return -1;
1299
1300 return v->gid;
1301}
1302
1303EXPORT_SYMBOL(vpe_getgid);
1304
1305int vpe_notify(int index, struct vpe_notifications *notify)
1306{
1307 struct vpe *v;
1308
1309 if ((v = get_vpe(index)) == NULL)
1310 return -1;
1311
1312 list_add(&notify->list, &v->notify);
1313 return 0;
1314}
1315
1316EXPORT_SYMBOL(vpe_notify);
1317
1318char *vpe_getcwd(int index)
1319{
1320 struct vpe *v;
1321
1322 if ((v = get_vpe(index)) == NULL)
1323 return NULL;
1324
1325 return v->cwd;
1326}
1327
1328EXPORT_SYMBOL(vpe_getcwd);
1329
736fad17
KS
1330static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1331 const char *buf, size_t len)
0f5d0df3
RB
1332{
1333 struct vpe *vpe = get_vpe(tclimit);
1334 struct vpe_notifications *not;
1335
1336 list_for_each_entry(not, &vpe->notify, list) {
1337 not->stop(tclimit);
1338 }
1339
1340 release_progmem(vpe->load_addr);
1341 cleanup_tc(get_tc(tclimit));
1342 vpe_stop(vpe);
1343 vpe_free(vpe);
1344
1345 return len;
1346}
1347
736fad17
KS
1348static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
1349 char *buf)
41790e04
RB
1350{
1351 struct vpe *vpe = get_vpe(tclimit);
1352
1353 return sprintf(buf, "%d\n", vpe->ntcs);
1354}
1355
736fad17
KS
1356static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
1357 const char *buf, size_t len)
41790e04
RB
1358{
1359 struct vpe *vpe = get_vpe(tclimit);
1360 unsigned long new;
1361 char *endp;
1362
1363 new = simple_strtoul(buf, &endp, 0);
1364 if (endp == buf)
1365 goto out_einval;
1366
1367 if (new == 0 || new > (hw_tcs - tclimit))
1368 goto out_einval;
1369
1370 vpe->ntcs = new;
1371
1372 return len;
1373
1374out_einval:
52a7a27c 1375 return -EINVAL;
41790e04
RB
1376}
1377
736fad17 1378static struct device_attribute vpe_class_attributes[] = {
0f5d0df3 1379 __ATTR(kill, S_IWUSR, NULL, store_kill),
41790e04
RB
1380 __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
1381 {}
1382};
1383
736fad17 1384static void vpe_device_release(struct device *cd)
41790e04
RB
1385{
1386 kfree(cd);
1387}
1388
1389struct class vpe_class = {
1390 .name = "vpe",
1391 .owner = THIS_MODULE,
736fad17
KS
1392 .dev_release = vpe_device_release,
1393 .dev_attrs = vpe_class_attributes,
41790e04
RB
1394};
1395
736fad17 1396struct device vpe_device;
27a3bbaf 1397
e01402b1
RB
1398static int __init vpe_module_init(void)
1399{
07cc0c9e 1400 unsigned int mtflags, vpflags;
07cc0c9e 1401 unsigned long flags, val;
e01402b1
RB
1402 struct vpe *v = NULL;
1403 struct tc *t;
41790e04 1404 int tc, err;
e01402b1
RB
1405
1406 if (!cpu_has_mipsmt) {
1407 printk("VPE loader: not a MIPS MT capable processor\n");
1408 return -ENODEV;
1409 }
1410
07cc0c9e
RB
1411 if (vpelimit == 0) {
1412 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
1413 "initializing VPE loader.\nPass maxvpes=<n> argument as "
1414 "kernel argument\n");
1415
1416 return -ENODEV;
1417 }
1418
1419 if (tclimit == 0) {
1420 printk(KERN_WARNING "No TCs reserved for AP/SP, not "
1421 "initializing VPE loader.\nPass maxtcs=<n> argument as "
1422 "kernel argument\n");
1423
1424 return -ENODEV;
1425 }
1426
682e852e
AD
1427 major = register_chrdev(0, module_name, &vpe_fops);
1428 if (major < 0) {
e01402b1 1429 printk("VPE loader: unable to register character device\n");
307bd284 1430 return major;
e01402b1
RB
1431 }
1432
41790e04
RB
1433 err = class_register(&vpe_class);
1434 if (err) {
1435 printk(KERN_ERR "vpe_class registration failed\n");
27a3bbaf
RB
1436 goto out_chrdev;
1437 }
41790e04 1438
736fad17 1439 device_initialize(&vpe_device);
41790e04
RB
1440 vpe_device.class = &vpe_class,
1441 vpe_device.parent = NULL,
1bb5beb4 1442 dev_set_name(&vpe_device, "vpe1");
41790e04 1443 vpe_device.devt = MKDEV(major, minor);
736fad17 1444 err = device_add(&vpe_device);
41790e04
RB
1445 if (err) {
1446 printk(KERN_ERR "Adding vpe_device failed\n");
1447 goto out_class;
1448 }
27a3bbaf 1449
07cc0c9e
RB
1450 local_irq_save(flags);
1451 mtflags = dmt();
1452 vpflags = dvpe();
e01402b1
RB
1453
1454 /* Put MVPE's into 'configuration state' */
340ee4b9 1455 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1456
1457 /* dump_mtregs(); */
1458
e01402b1 1459 val = read_c0_mvpconf0();
07cc0c9e
RB
1460 hw_tcs = (val & MVPCONF0_PTC) + 1;
1461 hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
1462
1463 for (tc = tclimit; tc < hw_tcs; tc++) {
1464 /*
1465 * Must re-enable multithreading temporarily or in case we
1466 * reschedule send IPIs or similar we might hang.
1467 */
1468 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1469 evpe(vpflags);
1470 emt(mtflags);
1471 local_irq_restore(flags);
1472 t = alloc_tc(tc);
1473 if (!t) {
1474 err = -ENOMEM;
1475 goto out;
1476 }
1477
1478 local_irq_save(flags);
1479 mtflags = dmt();
1480 vpflags = dvpe();
1481 set_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1
RB
1482
1483 /* VPE's */
07cc0c9e
RB
1484 if (tc < hw_tcs) {
1485 settc(tc);
e01402b1 1486
07cc0c9e 1487 if ((v = alloc_vpe(tc)) == NULL) {
e01402b1 1488 printk(KERN_WARNING "VPE: unable to allocate VPE\n");
07cc0c9e
RB
1489
1490 goto out_reenable;
e01402b1
RB
1491 }
1492
41790e04
RB
1493 v->ntcs = hw_tcs - tclimit;
1494
2600990e
RB
1495 /* add the tc to the list of this vpe's tc's. */
1496 list_add(&t->tc, &v->tc);
e01402b1
RB
1497
1498 /* deactivate all but vpe0 */
07cc0c9e 1499 if (tc >= tclimit) {
e01402b1
RB
1500 unsigned long tmp = read_vpe_c0_vpeconf0();
1501
1502 tmp &= ~VPECONF0_VPA;
1503
1504 /* master VPE */
1505 tmp |= VPECONF0_MVP;
1506 write_vpe_c0_vpeconf0(tmp);
1507 }
1508
1509 /* disable multi-threading with TC's */
1510 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
1511
07cc0c9e 1512 if (tc >= vpelimit) {
2600990e
RB
1513 /*
1514 * Set config to be the same as vpe0,
1515 * particularly kseg0 coherency alg
1516 */
e01402b1
RB
1517 write_vpe_c0_config(read_c0_config());
1518 }
e01402b1
RB
1519 }
1520
1521 /* TC's */
1522 t->pvpe = v; /* set the parent vpe */
1523
07cc0c9e 1524 if (tc >= tclimit) {
e01402b1
RB
1525 unsigned long tmp;
1526
07cc0c9e 1527 settc(tc);
e01402b1 1528
2600990e
RB
1529 /* Any TC that is bound to VPE0 gets left as is - in case
1530 we are running SMTC on VPE0. A TC that is bound to any
1531 other VPE gets bound to VPE0, ideally I'd like to make
1532 it homeless but it doesn't appear to let me bind a TC
1533 to a non-existent VPE. Which is perfectly reasonable.
1534
1535 The (un)bound state is visible to an EJTAG probe so may
1536 notify GDB...
1537 */
1538
1539 if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
1540 /* tc is bound >vpe0 */
1541 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
1542
1543 t->pvpe = get_vpe(0); /* set the parent vpe */
1544 }
e01402b1 1545
7c3a622d
NS
1546 /* halt the TC */
1547 write_tc_c0_tchalt(TCHALT_H);
1548 mips_ihb();
1549
e01402b1
RB
1550 tmp = read_tc_c0_tcstatus();
1551
2600990e 1552 /* mark not activated and not dynamically allocatable */
e01402b1
RB
1553 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1554 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1555 write_tc_c0_tcstatus(tmp);
e01402b1
RB
1556 }
1557 }
1558
07cc0c9e 1559out_reenable:
e01402b1 1560 /* release config state */
340ee4b9 1561 clear_c0_mvpcontrol(MVPCONTROL_VPC);
e01402b1 1562
07cc0c9e
RB
1563 evpe(vpflags);
1564 emt(mtflags);
1565 local_irq_restore(flags);
1566
e01402b1 1567 return 0;
27a3bbaf 1568
41790e04
RB
1569out_class:
1570 class_unregister(&vpe_class);
27a3bbaf
RB
1571out_chrdev:
1572 unregister_chrdev(major, module_name);
1573
07cc0c9e 1574out:
27a3bbaf 1575 return err;
e01402b1
RB
1576}
1577
1578static void __exit vpe_module_exit(void)
1579{
1580 struct vpe *v, *n;
1581
1bbfc20d
RB
1582 device_del(&vpe_device);
1583 unregister_chrdev(major, module_name);
1584
1585 /* No locking needed here */
e01402b1 1586 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
1bbfc20d 1587 if (v->state != VPE_STATE_UNUSED)
e01402b1 1588 release_vpe(v);
e01402b1 1589 }
e01402b1
RB
1590}
1591
1592module_init(vpe_module_init);
1593module_exit(vpe_module_exit);
1594MODULE_DESCRIPTION("MIPS VPE Loader");
2600990e 1595MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
e01402b1 1596MODULE_LICENSE("GPL");