]>
Commit | Line | Data |
---|---|---|
cbf6b1ba PM |
1 | #include <linux/mm.h> |
2 | #include <linux/kernel.h> | |
5a0e3ad6 | 3 | #include <linux/slab.h> |
cbf6b1ba | 4 | #include <linux/sched.h> |
5d920bb9 FA |
5 | #include <linux/export.h> |
6 | #include <linux/stackprotector.h> | |
cbf6b1ba | 7 | |
0ea820cf PM |
8 | struct kmem_cache *task_xstate_cachep = NULL; |
9 | unsigned int xstate_size; | |
10 | ||
5d920bb9 FA |
11 | #ifdef CONFIG_CC_STACKPROTECTOR |
12 | unsigned long __stack_chk_guard __read_mostly; | |
13 | EXPORT_SYMBOL(__stack_chk_guard); | |
14 | #endif | |
15 | ||
0ea820cf PM |
16 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
17 | { | |
18 | *dst = *src; | |
19 | ||
20 | if (src->thread.xstate) { | |
21 | dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | |
22 | GFP_KERNEL); | |
23 | if (!dst->thread.xstate) | |
24 | return -ENOMEM; | |
25 | memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); | |
26 | } | |
27 | ||
28 | return 0; | |
29 | } | |
30 | ||
31 | void free_thread_xstate(struct task_struct *tsk) | |
32 | { | |
33 | if (tsk->thread.xstate) { | |
34 | kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); | |
35 | tsk->thread.xstate = NULL; | |
36 | } | |
37 | } | |
38 | ||
cbf6b1ba PM |
39 | #if THREAD_SHIFT < PAGE_SHIFT |
40 | static struct kmem_cache *thread_info_cache; | |
41 | ||
b15ed691 | 42 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) |
cbf6b1ba PM |
43 | { |
44 | struct thread_info *ti; | |
cbf6b1ba | 45 | #ifdef CONFIG_DEBUG_STACK_USAGE |
b6a84016 ED |
46 | gfp_t mask = GFP_KERNEL | __GFP_ZERO; |
47 | #else | |
48 | gfp_t mask = GFP_KERNEL; | |
cbf6b1ba | 49 | #endif |
b6a84016 ED |
50 | |
51 | ti = kmem_cache_alloc_node(thread_info_cache, mask, node); | |
cbf6b1ba PM |
52 | return ti; |
53 | } | |
54 | ||
55 | void free_thread_info(struct thread_info *ti) | |
56 | { | |
0ea820cf | 57 | free_thread_xstate(ti->task); |
cbf6b1ba PM |
58 | kmem_cache_free(thread_info_cache, ti); |
59 | } | |
60 | ||
61 | void thread_info_cache_init(void) | |
62 | { | |
63 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | |
a3705799 | 64 | THREAD_SIZE, SLAB_PANIC, NULL); |
cbf6b1ba PM |
65 | } |
66 | #else | |
b15ed691 | 67 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) |
cbf6b1ba PM |
68 | { |
69 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
70 | gfp_t mask = GFP_KERNEL | __GFP_ZERO; | |
71 | #else | |
72 | gfp_t mask = GFP_KERNEL; | |
73 | #endif | |
b6a84016 ED |
74 | struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER); |
75 | ||
76 | return page ? page_address(page) : NULL; | |
cbf6b1ba PM |
77 | } |
78 | ||
79 | void free_thread_info(struct thread_info *ti) | |
80 | { | |
0ea820cf | 81 | free_thread_xstate(ti->task); |
cbf6b1ba PM |
82 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); |
83 | } | |
84 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | |
0ea820cf PM |
85 | |
86 | void arch_task_cache_init(void) | |
87 | { | |
88 | if (!xstate_size) | |
89 | return; | |
90 | ||
91 | task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, | |
92 | __alignof__(union thread_xstate), | |
93 | SLAB_PANIC | SLAB_NOTRACK, NULL); | |
94 | } | |
95 | ||
96 | #ifdef CONFIG_SH_FPU_EMU | |
97 | # define HAVE_SOFTFP 1 | |
98 | #else | |
99 | # define HAVE_SOFTFP 0 | |
100 | #endif | |
101 | ||
4a6feab0 | 102 | void __cpuinit init_thread_xstate(void) |
0ea820cf PM |
103 | { |
104 | if (boot_cpu_data.flags & CPU_HAS_FPU) | |
105 | xstate_size = sizeof(struct sh_fpu_hard_struct); | |
106 | else if (HAVE_SOFTFP) | |
107 | xstate_size = sizeof(struct sh_fpu_soft_struct); | |
108 | else | |
109 | xstate_size = 0; | |
110 | } |