]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - ubuntu/vbox/r0drv/linux/thread-r0drv-linux.c
UBUNTU: ubuntu: vbox -- update to 5.1.6-dfsg-1
[mirror_ubuntu-zesty-kernel.git] / ubuntu / vbox / r0drv / linux / thread-r0drv-linux.c
1 /* $Id: thread-r0drv-linux.c $ */
2 /** @file
3 * IPRT - Threads, Ring-0 Driver, Linux.
4 */
5
6 /*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28 /*********************************************************************************************************************************
29 * Header Files *
30 *********************************************************************************************************************************/
31 #include "the-linux-kernel.h"
32 #include "internal/iprt.h"
33 #include <iprt/thread.h>
34
35 #include <iprt/asm.h>
36 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28) || defined(CONFIG_X86_SMAP)
37 # include <iprt/asm-amd64-x86.h>
38 #endif
39 #include <iprt/assert.h>
40 #include <iprt/err.h>
41 #include <iprt/mp.h>
42
43
44 /*********************************************************************************************************************************
45 * Global Variables *
46 *********************************************************************************************************************************/
47 #ifndef CONFIG_PREEMPT
48 /** Per-cpu preemption counters. */
49 static int32_t volatile g_acPreemptDisabled[NR_CPUS];
50 #endif
51
52
53 RTDECL(RTNATIVETHREAD) RTThreadNativeSelf(void)
54 {
55 return (RTNATIVETHREAD)current;
56 }
57 RT_EXPORT_SYMBOL(RTThreadNativeSelf);
58
59
60 static int rtR0ThreadLnxSleepCommon(RTMSINTERVAL cMillies)
61 {
62 IPRT_LINUX_SAVE_EFL_AC();
63 long cJiffies = msecs_to_jiffies(cMillies);
64 set_current_state(TASK_INTERRUPTIBLE);
65 cJiffies = schedule_timeout(cJiffies);
66 IPRT_LINUX_RESTORE_EFL_AC();
67 if (!cJiffies)
68 return VINF_SUCCESS;
69 return VERR_INTERRUPTED;
70 }
71
72
73 RTDECL(int) RTThreadSleep(RTMSINTERVAL cMillies)
74 {
75 return rtR0ThreadLnxSleepCommon(cMillies);
76 }
77 RT_EXPORT_SYMBOL(RTThreadSleep);
78
79
80 RTDECL(int) RTThreadSleepNoLog(RTMSINTERVAL cMillies)
81 {
82 return rtR0ThreadLnxSleepCommon(cMillies);
83 }
84 RT_EXPORT_SYMBOL(RTThreadSleepNoLog);
85
86
87 RTDECL(bool) RTThreadYield(void)
88 {
89 IPRT_LINUX_SAVE_EFL_AC();
90 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 20)
91 yield();
92 #else
93 /** @todo r=ramshankar: Can we use cond_resched() instead? */
94 set_current_state(TASK_RUNNING);
95 sys_sched_yield();
96 schedule();
97 #endif
98 IPRT_LINUX_RESTORE_EFL_AC();
99 return true;
100 }
101 RT_EXPORT_SYMBOL(RTThreadYield);
102
103
104 RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
105 {
106 #ifdef CONFIG_PREEMPT
107 Assert(hThread == NIL_RTTHREAD); RT_NOREF_PV(hThread);
108 # ifdef preemptible
109 return preemptible();
110 # else
111 return preempt_count() == 0 && !in_atomic() && !irqs_disabled();
112 # endif
113 #else
114 int32_t c;
115
116 Assert(hThread == NIL_RTTHREAD);
117 c = g_acPreemptDisabled[smp_processor_id()];
118 AssertMsg(c >= 0 && c < 32, ("%d\n", c));
119 if (c != 0)
120 return false;
121 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
122 if (in_atomic())
123 return false;
124 # endif
125 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 28)
126 if (irqs_disabled())
127 return false;
128 # else
129 if (!ASMIntAreEnabled())
130 return false;
131 # endif
132 return true;
133 #endif
134 }
135 RT_EXPORT_SYMBOL(RTThreadPreemptIsEnabled);
136
137
138 RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
139 {
140 Assert(hThread == NIL_RTTHREAD); RT_NOREF_PV(hThread);
141 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 4)
142 return !!test_tsk_thread_flag(current, TIF_NEED_RESCHED);
143
144 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 20)
145 return !!need_resched();
146
147 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 110)
148 return current->need_resched != 0;
149
150 #else
151 return need_resched != 0;
152 #endif
153 }
154 RT_EXPORT_SYMBOL(RTThreadPreemptIsPending);
155
156
157 RTDECL(bool) RTThreadPreemptIsPendingTrusty(void)
158 {
159 /* yes, RTThreadPreemptIsPending is reliable. */
160 return true;
161 }
162 RT_EXPORT_SYMBOL(RTThreadPreemptIsPendingTrusty);
163
164
165 RTDECL(bool) RTThreadPreemptIsPossible(void)
166 {
167 /** @todo r=ramshankar: What about CONFIG_PREEMPT_VOLUNTARY? That can preempt
168 * too but does so in voluntarily in explicit preemption points. */
169 #ifdef CONFIG_PREEMPT
170 return true; /* yes, kernel preemption is possible. */
171 #else
172 return false; /* no kernel preemption */
173 #endif
174 }
175 RT_EXPORT_SYMBOL(RTThreadPreemptIsPossible);
176
177
178 RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
179 {
180 #ifdef CONFIG_PREEMPT
181 AssertPtr(pState);
182 Assert(pState->u32Reserved == 0);
183 pState->u32Reserved = 42;
184 /* This ASSUMES that CONFIG_PREEMPT_COUNT is always defined with CONFIG_PREEMPT. */
185 preempt_disable();
186 RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
187
188 #else /* !CONFIG_PREEMPT */
189 int32_t c;
190 AssertPtr(pState);
191 Assert(pState->u32Reserved == 0);
192
193 /* Do our own accounting. */
194 c = ASMAtomicIncS32(&g_acPreemptDisabled[smp_processor_id()]);
195 AssertMsg(c > 0 && c < 32, ("%d\n", c));
196 pState->u32Reserved = c;
197 RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
198 #endif
199 }
200 RT_EXPORT_SYMBOL(RTThreadPreemptDisable);
201
202
203 RTDECL(void) RTThreadPreemptRestore(PRTTHREADPREEMPTSTATE pState)
204 {
205 #ifdef CONFIG_PREEMPT
206 IPRT_LINUX_SAVE_EFL_AC(); /* paranoia */
207 AssertPtr(pState);
208 Assert(pState->u32Reserved == 42);
209 RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
210 preempt_enable();
211 IPRT_LINUX_RESTORE_EFL_ONLY_AC(); /* paranoia */
212
213 #else
214 int32_t volatile *pc;
215 AssertPtr(pState);
216 AssertMsg(pState->u32Reserved > 0 && pState->u32Reserved < 32, ("%d\n", pState->u32Reserved));
217 RT_ASSERT_PREEMPT_CPUID_RESTORE(pState);
218
219 /* Do our own accounting. */
220 pc = &g_acPreemptDisabled[smp_processor_id()];
221 AssertMsg(pState->u32Reserved == (uint32_t)*pc, ("u32Reserved=%d *pc=%d \n", pState->u32Reserved, *pc));
222 ASMAtomicUoWriteS32(pc, pState->u32Reserved - 1);
223 #endif
224 pState->u32Reserved = 0;
225 }
226 RT_EXPORT_SYMBOL(RTThreadPreemptRestore);
227
228
229 RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
230 {
231 Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
232
233 return in_interrupt() != 0;
234 }
235 RT_EXPORT_SYMBOL(RTThreadIsInInterrupt);
236