]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/platforms/cell/spufs/sched.c
Clocklib: Fix SA1111 clock name mess.
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / platforms / cell / spufs / sched.c
1 /* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
6 * 2006-03-31 NUMA domains added.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 #undef DEBUG
24
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
29 #include <linux/mm.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/marker.h>
43
44 #include <asm/io.h>
45 #include <asm/mmu_context.h>
46 #include <asm/spu.h>
47 #include <asm/spu_csa.h>
48 #include <asm/spu_priv1.h>
49 #include "spufs.h"
50
51 struct spu_prio_array {
52 DECLARE_BITMAP(bitmap, MAX_PRIO);
53 struct list_head runq[MAX_PRIO];
54 spinlock_t runq_lock;
55 int nr_waiting;
56 };
57
58 static unsigned long spu_avenrun[3];
59 static struct spu_prio_array *spu_prio;
60 static struct task_struct *spusched_task;
61 static struct timer_list spusched_timer;
62 static struct timer_list spuloadavg_timer;
63
64 /*
65 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
66 */
67 #define NORMAL_PRIO 120
68
69 /*
70 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
71 * tick for every 10 CPU scheduler ticks.
72 */
73 #define SPUSCHED_TICK (10)
74
75 /*
76 * These are the 'tuning knobs' of the scheduler:
77 *
78 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
79 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
80 */
81 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
82 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
83
84 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
85 #define SCALE_PRIO(x, prio) \
86 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
87
88 /*
89 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
90 * [800ms ... 100ms ... 5ms]
91 *
92 * The higher a thread's priority, the bigger timeslices
93 * it gets during one round of execution. But even the lowest
94 * priority thread gets MIN_TIMESLICE worth of execution time.
95 */
96 void spu_set_timeslice(struct spu_context *ctx)
97 {
98 if (ctx->prio < NORMAL_PRIO)
99 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
100 else
101 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
102 }
103
104 /*
105 * Update scheduling information from the owning thread.
106 */
107 void __spu_update_sched_info(struct spu_context *ctx)
108 {
109 /*
110 * assert that the context is not on the runqueue, so it is safe
111 * to change its scheduling parameters.
112 */
113 BUG_ON(!list_empty(&ctx->rq));
114
115 /*
116 * 32-Bit assignments are atomic on powerpc, and we don't care about
117 * memory ordering here because retrieving the controlling thread is
118 * per definition racy.
119 */
120 ctx->tid = current->pid;
121
122 /*
123 * We do our own priority calculations, so we normally want
124 * ->static_prio to start with. Unfortunately this field
125 * contains junk for threads with a realtime scheduling
126 * policy so we have to look at ->prio in this case.
127 */
128 if (rt_prio(current->prio))
129 ctx->prio = current->prio;
130 else
131 ctx->prio = current->static_prio;
132 ctx->policy = current->policy;
133
134 /*
135 * TO DO: the context may be loaded, so we may need to activate
136 * it again on a different node. But it shouldn't hurt anything
137 * to update its parameters, because we know that the scheduler
138 * is not actively looking at this field, since it is not on the
139 * runqueue. The context will be rescheduled on the proper node
140 * if it is timesliced or preempted.
141 */
142 ctx->cpus_allowed = current->cpus_allowed;
143
144 /* Save the current cpu id for spu interrupt routing. */
145 ctx->last_ran = raw_smp_processor_id();
146 }
147
148 void spu_update_sched_info(struct spu_context *ctx)
149 {
150 int node;
151
152 if (ctx->state == SPU_STATE_RUNNABLE) {
153 node = ctx->spu->node;
154
155 /*
156 * Take list_mutex to sync with find_victim().
157 */
158 mutex_lock(&cbe_spu_info[node].list_mutex);
159 __spu_update_sched_info(ctx);
160 mutex_unlock(&cbe_spu_info[node].list_mutex);
161 } else {
162 __spu_update_sched_info(ctx);
163 }
164 }
165
166 static int __node_allowed(struct spu_context *ctx, int node)
167 {
168 if (nr_cpus_node(node)) {
169 cpumask_t mask = node_to_cpumask(node);
170
171 if (cpus_intersects(mask, ctx->cpus_allowed))
172 return 1;
173 }
174
175 return 0;
176 }
177
178 static int node_allowed(struct spu_context *ctx, int node)
179 {
180 int rval;
181
182 spin_lock(&spu_prio->runq_lock);
183 rval = __node_allowed(ctx, node);
184 spin_unlock(&spu_prio->runq_lock);
185
186 return rval;
187 }
188
189 void do_notify_spus_active(void)
190 {
191 int node;
192
193 /*
194 * Wake up the active spu_contexts.
195 *
196 * When the awakened processes see their "notify_active" flag is set,
197 * they will call spu_switch_notify().
198 */
199 for_each_online_node(node) {
200 struct spu *spu;
201
202 mutex_lock(&cbe_spu_info[node].list_mutex);
203 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
204 if (spu->alloc_state != SPU_FREE) {
205 struct spu_context *ctx = spu->ctx;
206 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
207 &ctx->sched_flags);
208 mb();
209 wake_up_all(&ctx->stop_wq);
210 }
211 }
212 mutex_unlock(&cbe_spu_info[node].list_mutex);
213 }
214 }
215
216 /**
217 * spu_bind_context - bind spu context to physical spu
218 * @spu: physical spu to bind to
219 * @ctx: context to bind
220 */
221 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
222 {
223 spu_context_trace(spu_bind_context__enter, ctx, spu);
224
225 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
226
227 if (ctx->flags & SPU_CREATE_NOSCHED)
228 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
229
230 ctx->stats.slb_flt_base = spu->stats.slb_flt;
231 ctx->stats.class2_intr_base = spu->stats.class2_intr;
232
233 spu->ctx = ctx;
234 spu->flags = 0;
235 ctx->spu = spu;
236 ctx->ops = &spu_hw_ops;
237 spu->pid = current->pid;
238 spu->tgid = current->tgid;
239 spu_associate_mm(spu, ctx->owner);
240 spu->ibox_callback = spufs_ibox_callback;
241 spu->wbox_callback = spufs_wbox_callback;
242 spu->stop_callback = spufs_stop_callback;
243 spu->mfc_callback = spufs_mfc_callback;
244 mb();
245 spu_unmap_mappings(ctx);
246 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
247 spu_restore(&ctx->csa, spu);
248 spu->timestamp = jiffies;
249 spu_switch_notify(spu, ctx);
250 ctx->state = SPU_STATE_RUNNABLE;
251
252 spuctx_switch_state(ctx, SPU_UTIL_USER);
253 }
254
255 /*
256 * Must be used with the list_mutex held.
257 */
258 static inline int sched_spu(struct spu *spu)
259 {
260 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
261
262 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
263 }
264
265 static void aff_merge_remaining_ctxs(struct spu_gang *gang)
266 {
267 struct spu_context *ctx;
268
269 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
270 if (list_empty(&ctx->aff_list))
271 list_add(&ctx->aff_list, &gang->aff_list_head);
272 }
273 gang->aff_flags |= AFF_MERGED;
274 }
275
276 static void aff_set_offsets(struct spu_gang *gang)
277 {
278 struct spu_context *ctx;
279 int offset;
280
281 offset = -1;
282 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
283 aff_list) {
284 if (&ctx->aff_list == &gang->aff_list_head)
285 break;
286 ctx->aff_offset = offset--;
287 }
288
289 offset = 0;
290 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
291 if (&ctx->aff_list == &gang->aff_list_head)
292 break;
293 ctx->aff_offset = offset++;
294 }
295
296 gang->aff_flags |= AFF_OFFSETS_SET;
297 }
298
299 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
300 int group_size, int lowest_offset)
301 {
302 struct spu *spu;
303 int node, n;
304
305 /*
306 * TODO: A better algorithm could be used to find a good spu to be
307 * used as reference location for the ctxs chain.
308 */
309 node = cpu_to_node(raw_smp_processor_id());
310 for (n = 0; n < MAX_NUMNODES; n++, node++) {
311 node = (node < MAX_NUMNODES) ? node : 0;
312 if (!node_allowed(ctx, node))
313 continue;
314 mutex_lock(&cbe_spu_info[node].list_mutex);
315 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
316 if ((!mem_aff || spu->has_mem_affinity) &&
317 sched_spu(spu)) {
318 mutex_unlock(&cbe_spu_info[node].list_mutex);
319 return spu;
320 }
321 }
322 mutex_unlock(&cbe_spu_info[node].list_mutex);
323 }
324 return NULL;
325 }
326
327 static void aff_set_ref_point_location(struct spu_gang *gang)
328 {
329 int mem_aff, gs, lowest_offset;
330 struct spu_context *ctx;
331 struct spu *tmp;
332
333 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
334 lowest_offset = 0;
335 gs = 0;
336
337 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
338 gs++;
339
340 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
341 aff_list) {
342 if (&ctx->aff_list == &gang->aff_list_head)
343 break;
344 lowest_offset = ctx->aff_offset;
345 }
346
347 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
348 lowest_offset);
349 }
350
351 static struct spu *ctx_location(struct spu *ref, int offset, int node)
352 {
353 struct spu *spu;
354
355 spu = NULL;
356 if (offset >= 0) {
357 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
358 BUG_ON(spu->node != node);
359 if (offset == 0)
360 break;
361 if (sched_spu(spu))
362 offset--;
363 }
364 } else {
365 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
366 BUG_ON(spu->node != node);
367 if (offset == 0)
368 break;
369 if (sched_spu(spu))
370 offset++;
371 }
372 }
373
374 return spu;
375 }
376
377 /*
378 * affinity_check is called each time a context is going to be scheduled.
379 * It returns the spu ptr on which the context must run.
380 */
381 static int has_affinity(struct spu_context *ctx)
382 {
383 struct spu_gang *gang = ctx->gang;
384
385 if (list_empty(&ctx->aff_list))
386 return 0;
387
388 if (!gang->aff_ref_spu) {
389 if (!(gang->aff_flags & AFF_MERGED))
390 aff_merge_remaining_ctxs(gang);
391 if (!(gang->aff_flags & AFF_OFFSETS_SET))
392 aff_set_offsets(gang);
393 aff_set_ref_point_location(gang);
394 }
395
396 return gang->aff_ref_spu != NULL;
397 }
398
399 /**
400 * spu_unbind_context - unbind spu context from physical spu
401 * @spu: physical spu to unbind from
402 * @ctx: context to unbind
403 */
404 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
405 {
406 spu_context_trace(spu_unbind_context__enter, ctx, spu);
407
408 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
409
410 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
411 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
412
413 if (ctx->gang){
414 mutex_lock(&ctx->gang->aff_mutex);
415 if (has_affinity(ctx)) {
416 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
417 ctx->gang->aff_ref_spu = NULL;
418 }
419 mutex_unlock(&ctx->gang->aff_mutex);
420 }
421
422 spu_switch_notify(spu, NULL);
423 spu_unmap_mappings(ctx);
424 spu_save(&ctx->csa, spu);
425 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
426 spu->timestamp = jiffies;
427 ctx->state = SPU_STATE_SAVED;
428 spu->ibox_callback = NULL;
429 spu->wbox_callback = NULL;
430 spu->stop_callback = NULL;
431 spu->mfc_callback = NULL;
432 spu_associate_mm(spu, NULL);
433 spu->pid = 0;
434 spu->tgid = 0;
435 ctx->ops = &spu_backing_ops;
436 spu->flags = 0;
437 spu->ctx = NULL;
438
439 ctx->stats.slb_flt +=
440 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
441 ctx->stats.class2_intr +=
442 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
443
444 /* This maps the underlying spu state to idle */
445 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
446 ctx->spu = NULL;
447 }
448
449 /**
450 * spu_add_to_rq - add a context to the runqueue
451 * @ctx: context to add
452 */
453 static void __spu_add_to_rq(struct spu_context *ctx)
454 {
455 /*
456 * Unfortunately this code path can be called from multiple threads
457 * on behalf of a single context due to the way the problem state
458 * mmap support works.
459 *
460 * Fortunately we need to wake up all these threads at the same time
461 * and can simply skip the runqueue addition for every but the first
462 * thread getting into this codepath.
463 *
464 * It's still quite hacky, and long-term we should proxy all other
465 * threads through the owner thread so that spu_run is in control
466 * of all the scheduling activity for a given context.
467 */
468 if (list_empty(&ctx->rq)) {
469 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
470 set_bit(ctx->prio, spu_prio->bitmap);
471 if (!spu_prio->nr_waiting++)
472 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
473 }
474 }
475
476 static void spu_add_to_rq(struct spu_context *ctx)
477 {
478 spin_lock(&spu_prio->runq_lock);
479 __spu_add_to_rq(ctx);
480 spin_unlock(&spu_prio->runq_lock);
481 }
482
483 static void __spu_del_from_rq(struct spu_context *ctx)
484 {
485 int prio = ctx->prio;
486
487 if (!list_empty(&ctx->rq)) {
488 if (!--spu_prio->nr_waiting)
489 del_timer(&spusched_timer);
490 list_del_init(&ctx->rq);
491
492 if (list_empty(&spu_prio->runq[prio]))
493 clear_bit(prio, spu_prio->bitmap);
494 }
495 }
496
497 void spu_del_from_rq(struct spu_context *ctx)
498 {
499 spin_lock(&spu_prio->runq_lock);
500 __spu_del_from_rq(ctx);
501 spin_unlock(&spu_prio->runq_lock);
502 }
503
504 static void spu_prio_wait(struct spu_context *ctx)
505 {
506 DEFINE_WAIT(wait);
507
508 /*
509 * The caller must explicitly wait for a context to be loaded
510 * if the nosched flag is set. If NOSCHED is not set, the caller
511 * queues the context and waits for an spu event or error.
512 */
513 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
514
515 spin_lock(&spu_prio->runq_lock);
516 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
517 if (!signal_pending(current)) {
518 __spu_add_to_rq(ctx);
519 spin_unlock(&spu_prio->runq_lock);
520 mutex_unlock(&ctx->state_mutex);
521 schedule();
522 mutex_lock(&ctx->state_mutex);
523 spin_lock(&spu_prio->runq_lock);
524 __spu_del_from_rq(ctx);
525 }
526 spin_unlock(&spu_prio->runq_lock);
527 __set_current_state(TASK_RUNNING);
528 remove_wait_queue(&ctx->stop_wq, &wait);
529 }
530
531 static struct spu *spu_get_idle(struct spu_context *ctx)
532 {
533 struct spu *spu, *aff_ref_spu;
534 int node, n;
535
536 spu_context_nospu_trace(spu_get_idle__enter, ctx);
537
538 if (ctx->gang) {
539 mutex_lock(&ctx->gang->aff_mutex);
540 if (has_affinity(ctx)) {
541 aff_ref_spu = ctx->gang->aff_ref_spu;
542 atomic_inc(&ctx->gang->aff_sched_count);
543 mutex_unlock(&ctx->gang->aff_mutex);
544 node = aff_ref_spu->node;
545
546 mutex_lock(&cbe_spu_info[node].list_mutex);
547 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
548 if (spu && spu->alloc_state == SPU_FREE)
549 goto found;
550 mutex_unlock(&cbe_spu_info[node].list_mutex);
551
552 mutex_lock(&ctx->gang->aff_mutex);
553 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
554 ctx->gang->aff_ref_spu = NULL;
555 mutex_unlock(&ctx->gang->aff_mutex);
556 goto not_found;
557 }
558 mutex_unlock(&ctx->gang->aff_mutex);
559 }
560 node = cpu_to_node(raw_smp_processor_id());
561 for (n = 0; n < MAX_NUMNODES; n++, node++) {
562 node = (node < MAX_NUMNODES) ? node : 0;
563 if (!node_allowed(ctx, node))
564 continue;
565
566 mutex_lock(&cbe_spu_info[node].list_mutex);
567 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
568 if (spu->alloc_state == SPU_FREE)
569 goto found;
570 }
571 mutex_unlock(&cbe_spu_info[node].list_mutex);
572 }
573
574 not_found:
575 spu_context_nospu_trace(spu_get_idle__not_found, ctx);
576 return NULL;
577
578 found:
579 spu->alloc_state = SPU_USED;
580 mutex_unlock(&cbe_spu_info[node].list_mutex);
581 spu_context_trace(spu_get_idle__found, ctx, spu);
582 spu_init_channels(spu);
583 return spu;
584 }
585
586 /**
587 * find_victim - find a lower priority context to preempt
588 * @ctx: canidate context for running
589 *
590 * Returns the freed physical spu to run the new context on.
591 */
592 static struct spu *find_victim(struct spu_context *ctx)
593 {
594 struct spu_context *victim = NULL;
595 struct spu *spu;
596 int node, n;
597
598 spu_context_nospu_trace(spu_find_victim__enter, ctx);
599
600 /*
601 * Look for a possible preemption candidate on the local node first.
602 * If there is no candidate look at the other nodes. This isn't
603 * exactly fair, but so far the whole spu scheduler tries to keep
604 * a strong node affinity. We might want to fine-tune this in
605 * the future.
606 */
607 restart:
608 node = cpu_to_node(raw_smp_processor_id());
609 for (n = 0; n < MAX_NUMNODES; n++, node++) {
610 node = (node < MAX_NUMNODES) ? node : 0;
611 if (!node_allowed(ctx, node))
612 continue;
613
614 mutex_lock(&cbe_spu_info[node].list_mutex);
615 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
616 struct spu_context *tmp = spu->ctx;
617
618 if (tmp && tmp->prio > ctx->prio &&
619 !(tmp->flags & SPU_CREATE_NOSCHED) &&
620 (!victim || tmp->prio > victim->prio))
621 victim = spu->ctx;
622 }
623 mutex_unlock(&cbe_spu_info[node].list_mutex);
624
625 if (victim) {
626 /*
627 * This nests ctx->state_mutex, but we always lock
628 * higher priority contexts before lower priority
629 * ones, so this is safe until we introduce
630 * priority inheritance schemes.
631 *
632 * XXX if the highest priority context is locked,
633 * this can loop a long time. Might be better to
634 * look at another context or give up after X retries.
635 */
636 if (!mutex_trylock(&victim->state_mutex)) {
637 victim = NULL;
638 goto restart;
639 }
640
641 spu = victim->spu;
642 if (!spu || victim->prio <= ctx->prio) {
643 /*
644 * This race can happen because we've dropped
645 * the active list mutex. Not a problem, just
646 * restart the search.
647 */
648 mutex_unlock(&victim->state_mutex);
649 victim = NULL;
650 goto restart;
651 }
652
653 spu_context_trace(__spu_deactivate__unload, ctx, spu);
654
655 mutex_lock(&cbe_spu_info[node].list_mutex);
656 cbe_spu_info[node].nr_active--;
657 spu_unbind_context(spu, victim);
658 mutex_unlock(&cbe_spu_info[node].list_mutex);
659
660 victim->stats.invol_ctx_switch++;
661 spu->stats.invol_ctx_switch++;
662 if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
663 spu_add_to_rq(victim);
664
665 mutex_unlock(&victim->state_mutex);
666
667 return spu;
668 }
669 }
670
671 return NULL;
672 }
673
674 static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
675 {
676 int node = spu->node;
677 int success = 0;
678
679 spu_set_timeslice(ctx);
680
681 mutex_lock(&cbe_spu_info[node].list_mutex);
682 if (spu->ctx == NULL) {
683 spu_bind_context(spu, ctx);
684 cbe_spu_info[node].nr_active++;
685 spu->alloc_state = SPU_USED;
686 success = 1;
687 }
688 mutex_unlock(&cbe_spu_info[node].list_mutex);
689
690 if (success)
691 wake_up_all(&ctx->run_wq);
692 else
693 spu_add_to_rq(ctx);
694 }
695
696 static void spu_schedule(struct spu *spu, struct spu_context *ctx)
697 {
698 /* not a candidate for interruptible because it's called either
699 from the scheduler thread or from spu_deactivate */
700 mutex_lock(&ctx->state_mutex);
701 __spu_schedule(spu, ctx);
702 spu_release(ctx);
703 }
704
705 static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
706 {
707 int node = spu->node;
708
709 mutex_lock(&cbe_spu_info[node].list_mutex);
710 cbe_spu_info[node].nr_active--;
711 spu->alloc_state = SPU_FREE;
712 spu_unbind_context(spu, ctx);
713 ctx->stats.invol_ctx_switch++;
714 spu->stats.invol_ctx_switch++;
715 mutex_unlock(&cbe_spu_info[node].list_mutex);
716 }
717
718 /**
719 * spu_activate - find a free spu for a context and execute it
720 * @ctx: spu context to schedule
721 * @flags: flags (currently ignored)
722 *
723 * Tries to find a free spu to run @ctx. If no free spu is available
724 * add the context to the runqueue so it gets woken up once an spu
725 * is available.
726 */
727 int spu_activate(struct spu_context *ctx, unsigned long flags)
728 {
729 struct spu *spu;
730
731 /*
732 * If there are multiple threads waiting for a single context
733 * only one actually binds the context while the others will
734 * only be able to acquire the state_mutex once the context
735 * already is in runnable state.
736 */
737 if (ctx->spu)
738 return 0;
739
740 spu_activate_top:
741 if (signal_pending(current))
742 return -ERESTARTSYS;
743
744 spu = spu_get_idle(ctx);
745 /*
746 * If this is a realtime thread we try to get it running by
747 * preempting a lower priority thread.
748 */
749 if (!spu && rt_prio(ctx->prio))
750 spu = find_victim(ctx);
751 if (spu) {
752 unsigned long runcntl;
753
754 runcntl = ctx->ops->runcntl_read(ctx);
755 __spu_schedule(spu, ctx);
756 if (runcntl & SPU_RUNCNTL_RUNNABLE)
757 spuctx_switch_state(ctx, SPU_UTIL_USER);
758
759 return 0;
760 }
761
762 if (ctx->flags & SPU_CREATE_NOSCHED) {
763 spu_prio_wait(ctx);
764 goto spu_activate_top;
765 }
766
767 spu_add_to_rq(ctx);
768
769 return 0;
770 }
771
772 /**
773 * grab_runnable_context - try to find a runnable context
774 *
775 * Remove the highest priority context on the runqueue and return it
776 * to the caller. Returns %NULL if no runnable context was found.
777 */
778 static struct spu_context *grab_runnable_context(int prio, int node)
779 {
780 struct spu_context *ctx;
781 int best;
782
783 spin_lock(&spu_prio->runq_lock);
784 best = find_first_bit(spu_prio->bitmap, prio);
785 while (best < prio) {
786 struct list_head *rq = &spu_prio->runq[best];
787
788 list_for_each_entry(ctx, rq, rq) {
789 /* XXX(hch): check for affinity here aswell */
790 if (__node_allowed(ctx, node)) {
791 __spu_del_from_rq(ctx);
792 goto found;
793 }
794 }
795 best++;
796 }
797 ctx = NULL;
798 found:
799 spin_unlock(&spu_prio->runq_lock);
800 return ctx;
801 }
802
803 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
804 {
805 struct spu *spu = ctx->spu;
806 struct spu_context *new = NULL;
807
808 if (spu) {
809 new = grab_runnable_context(max_prio, spu->node);
810 if (new || force) {
811 spu_unschedule(spu, ctx);
812 if (new) {
813 if (new->flags & SPU_CREATE_NOSCHED)
814 wake_up(&new->stop_wq);
815 else {
816 spu_release(ctx);
817 spu_schedule(spu, new);
818 /* this one can't easily be made
819 interruptible */
820 mutex_lock(&ctx->state_mutex);
821 }
822 }
823 }
824 }
825
826 return new != NULL;
827 }
828
829 /**
830 * spu_deactivate - unbind a context from it's physical spu
831 * @ctx: spu context to unbind
832 *
833 * Unbind @ctx from the physical spu it is running on and schedule
834 * the highest priority context to run on the freed physical spu.
835 */
836 void spu_deactivate(struct spu_context *ctx)
837 {
838 spu_context_nospu_trace(spu_deactivate__enter, ctx);
839 __spu_deactivate(ctx, 1, MAX_PRIO);
840 }
841
842 /**
843 * spu_yield - yield a physical spu if others are waiting
844 * @ctx: spu context to yield
845 *
846 * Check if there is a higher priority context waiting and if yes
847 * unbind @ctx from the physical spu and schedule the highest
848 * priority context to run on the freed physical spu instead.
849 */
850 void spu_yield(struct spu_context *ctx)
851 {
852 spu_context_nospu_trace(spu_yield__enter, ctx);
853 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
854 mutex_lock(&ctx->state_mutex);
855 __spu_deactivate(ctx, 0, MAX_PRIO);
856 mutex_unlock(&ctx->state_mutex);
857 }
858 }
859
860 static noinline void spusched_tick(struct spu_context *ctx)
861 {
862 struct spu_context *new = NULL;
863 struct spu *spu = NULL;
864
865 if (spu_acquire(ctx))
866 BUG(); /* a kernel thread never has signals pending */
867
868 if (ctx->state != SPU_STATE_RUNNABLE)
869 goto out;
870 if (ctx->flags & SPU_CREATE_NOSCHED)
871 goto out;
872 if (ctx->policy == SCHED_FIFO)
873 goto out;
874
875 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
876 goto out;
877
878 spu = ctx->spu;
879
880 spu_context_trace(spusched_tick__preempt, ctx, spu);
881
882 new = grab_runnable_context(ctx->prio + 1, spu->node);
883 if (new) {
884 spu_unschedule(spu, ctx);
885 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
886 spu_add_to_rq(ctx);
887 } else {
888 spu_context_nospu_trace(spusched_tick__newslice, ctx);
889 ctx->time_slice++;
890 }
891 out:
892 spu_release(ctx);
893
894 if (new)
895 spu_schedule(spu, new);
896 }
897
898 /**
899 * count_active_contexts - count nr of active tasks
900 *
901 * Return the number of tasks currently running or waiting to run.
902 *
903 * Note that we don't take runq_lock / list_mutex here. Reading
904 * a single 32bit value is atomic on powerpc, and we don't care
905 * about memory ordering issues here.
906 */
907 static unsigned long count_active_contexts(void)
908 {
909 int nr_active = 0, node;
910
911 for (node = 0; node < MAX_NUMNODES; node++)
912 nr_active += cbe_spu_info[node].nr_active;
913 nr_active += spu_prio->nr_waiting;
914
915 return nr_active;
916 }
917
918 /**
919 * spu_calc_load - update the avenrun load estimates.
920 *
921 * No locking against reading these values from userspace, as for
922 * the CPU loadavg code.
923 */
924 static void spu_calc_load(void)
925 {
926 unsigned long active_tasks; /* fixed-point */
927
928 active_tasks = count_active_contexts() * FIXED_1;
929 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
930 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
931 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
932 }
933
934 static void spusched_wake(unsigned long data)
935 {
936 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
937 wake_up_process(spusched_task);
938 }
939
940 static void spuloadavg_wake(unsigned long data)
941 {
942 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
943 spu_calc_load();
944 }
945
946 static int spusched_thread(void *unused)
947 {
948 struct spu *spu;
949 int node;
950
951 while (!kthread_should_stop()) {
952 set_current_state(TASK_INTERRUPTIBLE);
953 schedule();
954 for (node = 0; node < MAX_NUMNODES; node++) {
955 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
956
957 mutex_lock(mtx);
958 list_for_each_entry(spu, &cbe_spu_info[node].spus,
959 cbe_list) {
960 struct spu_context *ctx = spu->ctx;
961
962 if (ctx) {
963 mutex_unlock(mtx);
964 spusched_tick(ctx);
965 mutex_lock(mtx);
966 }
967 }
968 mutex_unlock(mtx);
969 }
970 }
971
972 return 0;
973 }
974
975 void spuctx_switch_state(struct spu_context *ctx,
976 enum spu_utilization_state new_state)
977 {
978 unsigned long long curtime;
979 signed long long delta;
980 struct timespec ts;
981 struct spu *spu;
982 enum spu_utilization_state old_state;
983
984 ktime_get_ts(&ts);
985 curtime = timespec_to_ns(&ts);
986 delta = curtime - ctx->stats.tstamp;
987
988 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
989 WARN_ON(delta < 0);
990
991 spu = ctx->spu;
992 old_state = ctx->stats.util_state;
993 ctx->stats.util_state = new_state;
994 ctx->stats.tstamp = curtime;
995
996 /*
997 * Update the physical SPU utilization statistics.
998 */
999 if (spu) {
1000 ctx->stats.times[old_state] += delta;
1001 spu->stats.times[old_state] += delta;
1002 spu->stats.util_state = new_state;
1003 spu->stats.tstamp = curtime;
1004 }
1005 }
1006
1007 #define LOAD_INT(x) ((x) >> FSHIFT)
1008 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1009
1010 static int show_spu_loadavg(struct seq_file *s, void *private)
1011 {
1012 int a, b, c;
1013
1014 a = spu_avenrun[0] + (FIXED_1/200);
1015 b = spu_avenrun[1] + (FIXED_1/200);
1016 c = spu_avenrun[2] + (FIXED_1/200);
1017
1018 /*
1019 * Note that last_pid doesn't really make much sense for the
1020 * SPU loadavg (it even seems very odd on the CPU side...),
1021 * but we include it here to have a 100% compatible interface.
1022 */
1023 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1024 LOAD_INT(a), LOAD_FRAC(a),
1025 LOAD_INT(b), LOAD_FRAC(b),
1026 LOAD_INT(c), LOAD_FRAC(c),
1027 count_active_contexts(),
1028 atomic_read(&nr_spu_contexts),
1029 current->nsproxy->pid_ns->last_pid);
1030 return 0;
1031 }
1032
1033 static int spu_loadavg_open(struct inode *inode, struct file *file)
1034 {
1035 return single_open(file, show_spu_loadavg, NULL);
1036 }
1037
1038 static const struct file_operations spu_loadavg_fops = {
1039 .open = spu_loadavg_open,
1040 .read = seq_read,
1041 .llseek = seq_lseek,
1042 .release = single_release,
1043 };
1044
1045 int __init spu_sched_init(void)
1046 {
1047 struct proc_dir_entry *entry;
1048 int err = -ENOMEM, i;
1049
1050 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1051 if (!spu_prio)
1052 goto out;
1053
1054 for (i = 0; i < MAX_PRIO; i++) {
1055 INIT_LIST_HEAD(&spu_prio->runq[i]);
1056 __clear_bit(i, spu_prio->bitmap);
1057 }
1058 spin_lock_init(&spu_prio->runq_lock);
1059
1060 setup_timer(&spusched_timer, spusched_wake, 0);
1061 setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
1062
1063 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1064 if (IS_ERR(spusched_task)) {
1065 err = PTR_ERR(spusched_task);
1066 goto out_free_spu_prio;
1067 }
1068
1069 mod_timer(&spuloadavg_timer, 0);
1070
1071 entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
1072 if (!entry)
1073 goto out_stop_kthread;
1074
1075 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1076 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1077 return 0;
1078
1079 out_stop_kthread:
1080 kthread_stop(spusched_task);
1081 out_free_spu_prio:
1082 kfree(spu_prio);
1083 out:
1084 return err;
1085 }
1086
1087 void spu_sched_exit(void)
1088 {
1089 struct spu *spu;
1090 int node;
1091
1092 remove_proc_entry("spu_loadavg", NULL);
1093
1094 del_timer_sync(&spusched_timer);
1095 del_timer_sync(&spuloadavg_timer);
1096 kthread_stop(spusched_task);
1097
1098 for (node = 0; node < MAX_NUMNODES; node++) {
1099 mutex_lock(&cbe_spu_info[node].list_mutex);
1100 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1101 if (spu->alloc_state != SPU_FREE)
1102 spu->alloc_state = SPU_FREE;
1103 mutex_unlock(&cbe_spu_info[node].list_mutex);
1104 }
1105 kfree(spu_prio);
1106 }