]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Virtual cpu timer based timer functions. |
3 | * | |
27f6b416 | 4 | * Copyright IBM Corp. 2004, 2012 |
1da177e4 LT |
5 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> |
6 | */ | |
7 | ||
27f6b416 | 8 | #include <linux/kernel_stat.h> |
27f6b416 | 9 | #include <linux/export.h> |
1da177e4 | 10 | #include <linux/kernel.h> |
1da177e4 | 11 | #include <linux/timex.h> |
27f6b416 MS |
12 | #include <linux/types.h> |
13 | #include <linux/time.h> | |
1da177e4 | 14 | |
76d4e00a | 15 | #include <asm/cputime.h> |
27f6b416 | 16 | #include <asm/vtimer.h> |
a5725ac2 | 17 | #include <asm/vtime.h> |
10ad34bc MS |
18 | #include <asm/cpu_mf.h> |
19 | #include <asm/smp.h> | |
1da177e4 | 20 | |
27f6b416 | 21 | static void virt_timer_expire(void); |
1da177e4 | 22 | |
27f6b416 MS |
23 | static LIST_HEAD(virt_timer_list); |
24 | static DEFINE_SPINLOCK(virt_timer_lock); | |
25 | static atomic64_t virt_timer_current; | |
26 | static atomic64_t virt_timer_elapsed; | |
27 | ||
10ad34bc MS |
28 | static DEFINE_PER_CPU(u64, mt_cycles[32]); |
29 | static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; | |
30 | static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; | |
31 | ||
27f6b416 | 32 | static inline u64 get_vtimer(void) |
9cfb9b3c | 33 | { |
27f6b416 | 34 | u64 timer; |
9cfb9b3c | 35 | |
27f6b416 | 36 | asm volatile("stpt %0" : "=m" (timer)); |
9cfb9b3c MS |
37 | return timer; |
38 | } | |
39 | ||
27f6b416 | 40 | static inline void set_vtimer(u64 expires) |
9cfb9b3c | 41 | { |
27f6b416 | 42 | u64 timer; |
9cfb9b3c | 43 | |
27f6b416 MS |
44 | asm volatile( |
45 | " stpt %0\n" /* Store current cpu timer value */ | |
46 | " spt %1" /* Set new value imm. afterwards */ | |
47 | : "=m" (timer) : "m" (expires)); | |
9cfb9b3c MS |
48 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; |
49 | S390_lowcore.last_update_timer = expires; | |
50 | } | |
51 | ||
27f6b416 MS |
52 | static inline int virt_timer_forward(u64 elapsed) |
53 | { | |
54 | BUG_ON(!irqs_disabled()); | |
55 | ||
56 | if (list_empty(&virt_timer_list)) | |
57 | return 0; | |
58 | elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); | |
59 | return elapsed >= atomic64_read(&virt_timer_current); | |
60 | } | |
61 | ||
1da177e4 LT |
62 | /* |
63 | * Update process times based on virtual cpu times stored by entry.S | |
64 | * to the lowcore fields user_timer, system_timer & steal_clock. | |
65 | */ | |
27f6b416 | 66 | static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) |
1da177e4 | 67 | { |
aa5e97ce | 68 | struct thread_info *ti = task_thread_info(tsk); |
27f6b416 | 69 | u64 timer, clock, user, system, steal; |
10ad34bc MS |
70 | u64 user_scaled, system_scaled; |
71 | int i; | |
1da177e4 LT |
72 | |
73 | timer = S390_lowcore.last_update_timer; | |
74 | clock = S390_lowcore.last_update_clock; | |
27f6b416 MS |
75 | asm volatile( |
76 | " stpt %0\n" /* Store current cpu timer value */ | |
1f759bb3 MS |
77 | #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES |
78 | " stckf %1" /* Store current tod clock value */ | |
79 | #else | |
27f6b416 | 80 | " stck %1" /* Store current tod clock value */ |
1f759bb3 | 81 | #endif |
27f6b416 MS |
82 | : "=m" (S390_lowcore.last_update_timer), |
83 | "=m" (S390_lowcore.last_update_clock)); | |
1da177e4 | 84 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
aa5e97ce | 85 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
1da177e4 | 86 | |
10ad34bc MS |
87 | /* Do MT utilization calculation */ |
88 | if (smp_cpu_mtid) { | |
89 | u64 cycles_new[32], *cycles_old; | |
90 | u64 delta, mult, div; | |
91 | ||
92 | cycles_old = this_cpu_ptr(mt_cycles); | |
93 | if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { | |
94 | mult = div = 0; | |
95 | for (i = 0; i <= smp_cpu_mtid; i++) { | |
96 | delta = cycles_new[i] - cycles_old[i]; | |
97 | mult += delta; | |
98 | div += (i + 1) * delta; | |
99 | } | |
100 | if (mult > 0) { | |
101 | /* Update scaling factor */ | |
102 | __this_cpu_write(mt_scaling_mult, mult); | |
103 | __this_cpu_write(mt_scaling_div, div); | |
104 | memcpy(cycles_old, cycles_new, | |
105 | sizeof(u64) * (smp_cpu_mtid + 1)); | |
106 | } | |
107 | } | |
108 | } | |
109 | ||
aa5e97ce MS |
110 | user = S390_lowcore.user_timer - ti->user_timer; |
111 | S390_lowcore.steal_timer -= user; | |
112 | ti->user_timer = S390_lowcore.user_timer; | |
1da177e4 | 113 | |
aa5e97ce MS |
114 | system = S390_lowcore.system_timer - ti->system_timer; |
115 | S390_lowcore.steal_timer -= system; | |
116 | ti->system_timer = S390_lowcore.system_timer; | |
10ad34bc MS |
117 | |
118 | user_scaled = user; | |
119 | system_scaled = system; | |
120 | /* Do MT utilization scaling */ | |
121 | if (smp_cpu_mtid) { | |
122 | u64 mult = __this_cpu_read(mt_scaling_mult); | |
123 | u64 div = __this_cpu_read(mt_scaling_div); | |
124 | ||
125 | user_scaled = (user_scaled * mult) / div; | |
126 | system_scaled = (system_scaled * mult) / div; | |
127 | } | |
128 | account_user_time(tsk, user, user_scaled); | |
129 | account_system_time(tsk, hardirq_offset, system, system_scaled); | |
1da177e4 | 130 | |
aa5e97ce MS |
131 | steal = S390_lowcore.steal_timer; |
132 | if ((s64) steal > 0) { | |
133 | S390_lowcore.steal_timer = 0; | |
9cfb9b3c | 134 | account_steal_time(steal); |
1da177e4 | 135 | } |
27f6b416 MS |
136 | |
137 | return virt_timer_forward(user + system); | |
1da177e4 LT |
138 | } |
139 | ||
bf9fae9f | 140 | void vtime_task_switch(struct task_struct *prev) |
1f1c12af | 141 | { |
aa5e97ce MS |
142 | struct thread_info *ti; |
143 | ||
144 | do_account_vtime(prev, 0); | |
145 | ti = task_thread_info(prev); | |
146 | ti->user_timer = S390_lowcore.user_timer; | |
147 | ti->system_timer = S390_lowcore.system_timer; | |
baa36046 | 148 | ti = task_thread_info(current); |
aa5e97ce MS |
149 | S390_lowcore.user_timer = ti->user_timer; |
150 | S390_lowcore.system_timer = ti->system_timer; | |
151 | } | |
1f1c12af | 152 | |
bcebdf84 FW |
153 | /* |
154 | * In s390, accounting pending user time also implies | |
155 | * accounting system time in order to correctly compute | |
156 | * the stolen time accounting. | |
157 | */ | |
158 | void vtime_account_user(struct task_struct *tsk) | |
aa5e97ce | 159 | { |
27f6b416 MS |
160 | if (do_account_vtime(tsk, HARDIRQ_OFFSET)) |
161 | virt_timer_expire(); | |
1f1c12af MS |
162 | } |
163 | ||
1da177e4 LT |
164 | /* |
165 | * Update process times based on virtual cpu times stored by entry.S | |
166 | * to the lowcore fields user_timer, system_timer & steal_clock. | |
167 | */ | |
6a61671b | 168 | void vtime_account_irq_enter(struct task_struct *tsk) |
1da177e4 | 169 | { |
aa5e97ce | 170 | struct thread_info *ti = task_thread_info(tsk); |
10ad34bc | 171 | u64 timer, system, system_scaled; |
1da177e4 LT |
172 | |
173 | timer = S390_lowcore.last_update_timer; | |
9cfb9b3c | 174 | S390_lowcore.last_update_timer = get_vtimer(); |
1da177e4 LT |
175 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
176 | ||
aa5e97ce MS |
177 | system = S390_lowcore.system_timer - ti->system_timer; |
178 | S390_lowcore.steal_timer -= system; | |
179 | ti->system_timer = S390_lowcore.system_timer; | |
10ad34bc MS |
180 | system_scaled = system; |
181 | /* Do MT utilization scaling */ | |
182 | if (smp_cpu_mtid) { | |
183 | u64 mult = __this_cpu_read(mt_scaling_mult); | |
184 | u64 div = __this_cpu_read(mt_scaling_div); | |
185 | ||
186 | system_scaled = (system_scaled * mult) / div; | |
187 | } | |
188 | account_system_time(tsk, 0, system, system_scaled); | |
27f6b416 MS |
189 | |
190 | virt_timer_forward(system); | |
1da177e4 | 191 | } |
6a61671b | 192 | EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
1da177e4 | 193 | |
fd25b4c2 | 194 | void vtime_account_system(struct task_struct *tsk) |
6a61671b | 195 | __attribute__((alias("vtime_account_irq_enter"))); |
fd25b4c2 | 196 | EXPORT_SYMBOL_GPL(vtime_account_system); |
11113334 | 197 | |
1da177e4 LT |
198 | /* |
199 | * Sorted add to a list. List is linear searched until first bigger | |
200 | * element is found. | |
201 | */ | |
202 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) | |
203 | { | |
27f6b416 | 204 | struct vtimer_list *tmp; |
1da177e4 | 205 | |
27f6b416 MS |
206 | list_for_each_entry(tmp, head, entry) { |
207 | if (tmp->expires > timer->expires) { | |
208 | list_add_tail(&timer->entry, &tmp->entry); | |
1da177e4 LT |
209 | return; |
210 | } | |
211 | } | |
212 | list_add_tail(&timer->entry, head); | |
213 | } | |
214 | ||
215 | /* | |
27f6b416 | 216 | * Handler for expired virtual CPU timer. |
1da177e4 | 217 | */ |
27f6b416 | 218 | static void virt_timer_expire(void) |
1da177e4 | 219 | { |
27f6b416 MS |
220 | struct vtimer_list *timer, *tmp; |
221 | unsigned long elapsed; | |
222 | LIST_HEAD(cb_list); | |
223 | ||
224 | /* walk timer list, fire all expired timers */ | |
225 | spin_lock(&virt_timer_lock); | |
226 | elapsed = atomic64_read(&virt_timer_elapsed); | |
227 | list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { | |
228 | if (timer->expires < elapsed) | |
9cfb9b3c | 229 | /* move expired timer to the callback queue */ |
27f6b416 | 230 | list_move_tail(&timer->entry, &cb_list); |
9cfb9b3c | 231 | else |
27f6b416 | 232 | timer->expires -= elapsed; |
1da177e4 | 233 | } |
27f6b416 MS |
234 | if (!list_empty(&virt_timer_list)) { |
235 | timer = list_first_entry(&virt_timer_list, | |
236 | struct vtimer_list, entry); | |
237 | atomic64_set(&virt_timer_current, timer->expires); | |
238 | } | |
239 | atomic64_sub(elapsed, &virt_timer_elapsed); | |
240 | spin_unlock(&virt_timer_lock); | |
241 | ||
242 | /* Do callbacks and recharge periodic timers */ | |
243 | list_for_each_entry_safe(timer, tmp, &cb_list, entry) { | |
244 | list_del_init(&timer->entry); | |
245 | timer->function(timer->data); | |
246 | if (timer->interval) { | |
247 | /* Recharge interval timer */ | |
248 | timer->expires = timer->interval + | |
249 | atomic64_read(&virt_timer_elapsed); | |
250 | spin_lock(&virt_timer_lock); | |
251 | list_add_sorted(timer, &virt_timer_list); | |
252 | spin_unlock(&virt_timer_lock); | |
253 | } | |
4c1051e3 | 254 | } |
1da177e4 LT |
255 | } |
256 | ||
257 | void init_virt_timer(struct vtimer_list *timer) | |
258 | { | |
1da177e4 LT |
259 | timer->function = NULL; |
260 | INIT_LIST_HEAD(&timer->entry); | |
1da177e4 LT |
261 | } |
262 | EXPORT_SYMBOL(init_virt_timer); | |
263 | ||
1da177e4 LT |
264 | static inline int vtimer_pending(struct vtimer_list *timer) |
265 | { | |
27f6b416 | 266 | return !list_empty(&timer->entry); |
1da177e4 LT |
267 | } |
268 | ||
1da177e4 LT |
269 | static void internal_add_vtimer(struct vtimer_list *timer) |
270 | { | |
27f6b416 MS |
271 | if (list_empty(&virt_timer_list)) { |
272 | /* First timer, just program it. */ | |
273 | atomic64_set(&virt_timer_current, timer->expires); | |
274 | atomic64_set(&virt_timer_elapsed, 0); | |
275 | list_add(&timer->entry, &virt_timer_list); | |
9cfb9b3c | 276 | } else { |
27f6b416 MS |
277 | /* Update timer against current base. */ |
278 | timer->expires += atomic64_read(&virt_timer_elapsed); | |
279 | if (likely((s64) timer->expires < | |
280 | (s64) atomic64_read(&virt_timer_current))) | |
9cfb9b3c | 281 | /* The new timer expires before the current timer. */ |
27f6b416 MS |
282 | atomic64_set(&virt_timer_current, timer->expires); |
283 | /* Insert new timer into the list. */ | |
284 | list_add_sorted(timer, &virt_timer_list); | |
1da177e4 | 285 | } |
1da177e4 LT |
286 | } |
287 | ||
27f6b416 | 288 | static void __add_vtimer(struct vtimer_list *timer, int periodic) |
1da177e4 | 289 | { |
27f6b416 MS |
290 | unsigned long flags; |
291 | ||
292 | timer->interval = periodic ? timer->expires : 0; | |
293 | spin_lock_irqsave(&virt_timer_lock, flags); | |
294 | internal_add_vtimer(timer); | |
295 | spin_unlock_irqrestore(&virt_timer_lock, flags); | |
1da177e4 LT |
296 | } |
297 | ||
298 | /* | |
299 | * add_virt_timer - add an oneshot virtual CPU timer | |
300 | */ | |
27f6b416 | 301 | void add_virt_timer(struct vtimer_list *timer) |
1da177e4 | 302 | { |
27f6b416 | 303 | __add_vtimer(timer, 0); |
1da177e4 LT |
304 | } |
305 | EXPORT_SYMBOL(add_virt_timer); | |
306 | ||
307 | /* | |
308 | * add_virt_timer_int - add an interval virtual CPU timer | |
309 | */ | |
27f6b416 | 310 | void add_virt_timer_periodic(struct vtimer_list *timer) |
1da177e4 | 311 | { |
27f6b416 | 312 | __add_vtimer(timer, 1); |
1da177e4 LT |
313 | } |
314 | EXPORT_SYMBOL(add_virt_timer_periodic); | |
315 | ||
27f6b416 | 316 | static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) |
1da177e4 | 317 | { |
1da177e4 | 318 | unsigned long flags; |
27f6b416 | 319 | int rc; |
1da177e4 | 320 | |
ca366a32 | 321 | BUG_ON(!timer->function); |
1da177e4 | 322 | |
1da177e4 LT |
323 | if (timer->expires == expires && vtimer_pending(timer)) |
324 | return 1; | |
27f6b416 MS |
325 | spin_lock_irqsave(&virt_timer_lock, flags); |
326 | rc = vtimer_pending(timer); | |
327 | if (rc) | |
328 | list_del_init(&timer->entry); | |
329 | timer->interval = periodic ? expires : 0; | |
1da177e4 | 330 | timer->expires = expires; |
1da177e4 | 331 | internal_add_vtimer(timer); |
27f6b416 MS |
332 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
333 | return rc; | |
1da177e4 | 334 | } |
b6ecfa92 JG |
335 | |
336 | /* | |
b6ecfa92 JG |
337 | * returns whether it has modified a pending timer (1) or not (0) |
338 | */ | |
27f6b416 | 339 | int mod_virt_timer(struct vtimer_list *timer, u64 expires) |
b6ecfa92 JG |
340 | { |
341 | return __mod_vtimer(timer, expires, 0); | |
342 | } | |
1da177e4 LT |
343 | EXPORT_SYMBOL(mod_virt_timer); |
344 | ||
b6ecfa92 | 345 | /* |
b6ecfa92 JG |
346 | * returns whether it has modified a pending timer (1) or not (0) |
347 | */ | |
27f6b416 | 348 | int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) |
b6ecfa92 JG |
349 | { |
350 | return __mod_vtimer(timer, expires, 1); | |
351 | } | |
352 | EXPORT_SYMBOL(mod_virt_timer_periodic); | |
353 | ||
1da177e4 | 354 | /* |
27f6b416 | 355 | * Delete a virtual timer. |
1da177e4 LT |
356 | * |
357 | * returns whether the deleted timer was pending (1) or not (0) | |
358 | */ | |
359 | int del_virt_timer(struct vtimer_list *timer) | |
360 | { | |
361 | unsigned long flags; | |
1da177e4 | 362 | |
1da177e4 LT |
363 | if (!vtimer_pending(timer)) |
364 | return 0; | |
27f6b416 | 365 | spin_lock_irqsave(&virt_timer_lock, flags); |
1da177e4 | 366 | list_del_init(&timer->entry); |
27f6b416 | 367 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
1da177e4 LT |
368 | return 1; |
369 | } | |
370 | EXPORT_SYMBOL(del_virt_timer); | |
371 | ||
372 | /* | |
373 | * Start the virtual CPU timer on the current CPU. | |
374 | */ | |
b5f87f15 | 375 | void vtime_init(void) |
1da177e4 | 376 | { |
8b646bd7 | 377 | /* set initial cpu timer */ |
27f6b416 | 378 | set_vtimer(VTIMER_MAX_SLICE); |
1da177e4 | 379 | } |