]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/platforms/cell/spufs/sched.c
sched/core: Remove the tsk_cpus_allowed() wrapper
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / platforms / cell / spufs / sched.c
CommitLineData
8b3d6663
AB
1/* sched.c - SPU scheduler.
2 *
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
5 *
a68cf983 6 * 2006-03-31 NUMA domains added.
8b3d6663
AB
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
3b3d22cb
AB
23#undef DEBUG
24
8b3d6663
AB
25#include <linux/errno.h>
26#include <linux/sched.h>
993db4b4 27#include <linux/sched/rt.h>
8b3d6663
AB
28#include <linux/kernel.h>
29#include <linux/mm.h>
5a0e3ad6 30#include <linux/slab.h>
8b3d6663
AB
31#include <linux/completion.h>
32#include <linux/vmalloc.h>
33#include <linux/smp.h>
8b3d6663
AB
34#include <linux/stddef.h>
35#include <linux/unistd.h>
a68cf983
MN
36#include <linux/numa.h>
37#include <linux/mutex.h>
86767277 38#include <linux/notifier.h>
37901802 39#include <linux/kthread.h>
65de66f0
CH
40#include <linux/pid_namespace.h>
41#include <linux/proc_fs.h>
42#include <linux/seq_file.h>
8b3d6663
AB
43
44#include <asm/io.h>
45#include <asm/mmu_context.h>
46#include <asm/spu.h>
47#include <asm/spu_csa.h>
a91942ae 48#include <asm/spu_priv1.h>
8b3d6663 49#include "spufs.h"
ae142e0c
CH
50#define CREATE_TRACE_POINTS
51#include "sputrace.h"
8b3d6663 52
8b3d6663 53struct spu_prio_array {
72cb3608 54 DECLARE_BITMAP(bitmap, MAX_PRIO);
079cdb61
CH
55 struct list_head runq[MAX_PRIO];
56 spinlock_t runq_lock;
65de66f0 57 int nr_waiting;
8b3d6663
AB
58};
59
65de66f0 60static unsigned long spu_avenrun[3];
a68cf983 61static struct spu_prio_array *spu_prio;
37901802
CH
62static struct task_struct *spusched_task;
63static struct timer_list spusched_timer;
90608a29 64static struct timer_list spuloadavg_timer;
8b3d6663 65
fe443ef2
CH
66/*
67 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
68 */
69#define NORMAL_PRIO 120
70
71/*
72 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
73 * tick for every 10 CPU scheduler ticks.
74 */
75#define SPUSCHED_TICK (10)
76
77/*
78 * These are the 'tuning knobs' of the scheduler:
79 *
60e24239
JK
80 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
81 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
fe443ef2 82 */
60e24239
JK
83#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
84#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
fe443ef2 85
fe443ef2
CH
86#define SCALE_PRIO(x, prio) \
87 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
88
89/*
90 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
91 * [800ms ... 100ms ... 5ms]
92 *
93 * The higher a thread's priority, the bigger timeslices
94 * it gets during one round of execution. But even the lowest
95 * priority thread gets MIN_TIMESLICE worth of execution time.
96 */
97void spu_set_timeslice(struct spu_context *ctx)
98{
99 if (ctx->prio < NORMAL_PRIO)
100 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
101 else
102 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
103}
104
2cf2b3b4
CH
105/*
106 * Update scheduling information from the owning thread.
107 */
108void __spu_update_sched_info(struct spu_context *ctx)
109{
91569531
LB
110 /*
111 * assert that the context is not on the runqueue, so it is safe
112 * to change its scheduling parameters.
113 */
114 BUG_ON(!list_empty(&ctx->rq));
115
476273ad 116 /*
9b1d21f8
JMV
117 * 32-Bit assignments are atomic on powerpc, and we don't care about
118 * memory ordering here because retrieving the controlling thread is
119 * per definition racy.
476273ad
CH
120 */
121 ctx->tid = current->pid;
122
2cf2b3b4
CH
123 /*
124 * We do our own priority calculations, so we normally want
9b1d21f8 125 * ->static_prio to start with. Unfortunately this field
2cf2b3b4
CH
126 * contains junk for threads with a realtime scheduling
127 * policy so we have to look at ->prio in this case.
128 */
129 if (rt_prio(current->prio))
130 ctx->prio = current->prio;
131 else
132 ctx->prio = current->static_prio;
133 ctx->policy = current->policy;
ea1ae594
CH
134
135 /*
91569531
LB
136 * TO DO: the context may be loaded, so we may need to activate
137 * it again on a different node. But it shouldn't hurt anything
138 * to update its parameters, because we know that the scheduler
139 * is not actively looking at this field, since it is not on the
140 * runqueue. The context will be rescheduled on the proper node
141 * if it is timesliced or preempted.
ea1ae594 142 */
0c98d344 143 cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
7a214200
LB
144
145 /* Save the current cpu id for spu interrupt routing. */
146 ctx->last_ran = raw_smp_processor_id();
2cf2b3b4
CH
147}
148
149void spu_update_sched_info(struct spu_context *ctx)
150{
91569531 151 int node;
2cf2b3b4 152
91569531
LB
153 if (ctx->state == SPU_STATE_RUNNABLE) {
154 node = ctx->spu->node;
e65c2f6f
LB
155
156 /*
157 * Take list_mutex to sync with find_victim().
158 */
91569531
LB
159 mutex_lock(&cbe_spu_info[node].list_mutex);
160 __spu_update_sched_info(ctx);
161 mutex_unlock(&cbe_spu_info[node].list_mutex);
162 } else {
163 __spu_update_sched_info(ctx);
164 }
2cf2b3b4
CH
165}
166
ea1ae594 167static int __node_allowed(struct spu_context *ctx, int node)
8b3d6663 168{
ea1ae594 169 if (nr_cpus_node(node)) {
86c6f274 170 const struct cpumask *mask = cpumask_of_node(node);
8b3d6663 171
86c6f274 172 if (cpumask_intersects(mask, &ctx->cpus_allowed))
ea1ae594
CH
173 return 1;
174 }
175
176 return 0;
177}
178
179static int node_allowed(struct spu_context *ctx, int node)
180{
181 int rval;
182
183 spin_lock(&spu_prio->runq_lock);
184 rval = __node_allowed(ctx, node);
185 spin_unlock(&spu_prio->runq_lock);
186
187 return rval;
8b3d6663
AB
188}
189
aed3a8c9 190void do_notify_spus_active(void)
36aaccc1
BN
191{
192 int node;
193
194 /*
195 * Wake up the active spu_contexts.
196 *
197 * When the awakened processes see their "notify_active" flag is set,
9b1d21f8 198 * they will call spu_switch_notify().
36aaccc1
BN
199 */
200 for_each_online_node(node) {
201 struct spu *spu;
486acd48
CH
202
203 mutex_lock(&cbe_spu_info[node].list_mutex);
204 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
205 if (spu->alloc_state != SPU_FREE) {
206 struct spu_context *ctx = spu->ctx;
207 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
208 &ctx->sched_flags);
209 mb();
210 wake_up_all(&ctx->stop_wq);
211 }
36aaccc1 212 }
486acd48 213 mutex_unlock(&cbe_spu_info[node].list_mutex);
36aaccc1
BN
214 }
215}
216
202557d2
CH
217/**
218 * spu_bind_context - bind spu context to physical spu
219 * @spu: physical spu to bind to
220 * @ctx: context to bind
221 */
222static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
8b3d6663 223{
038200cf
CH
224 spu_context_trace(spu_bind_context__enter, ctx, spu);
225
27ec41d3 226 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
e9f8a0b6 227
aa6d5b20
AB
228 if (ctx->flags & SPU_CREATE_NOSCHED)
229 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
230
e9f8a0b6
CH
231 ctx->stats.slb_flt_base = spu->stats.slb_flt;
232 ctx->stats.class2_intr_base = spu->stats.class2_intr;
233
2c911a14
LB
234 spu_associate_mm(spu, ctx->owner);
235
236 spin_lock_irq(&spu->register_lock);
8b3d6663
AB
237 spu->ctx = ctx;
238 spu->flags = 0;
239 ctx->spu = spu;
240 ctx->ops = &spu_hw_ops;
241 spu->pid = current->pid;
1474855d 242 spu->tgid = current->tgid;
8b3d6663
AB
243 spu->ibox_callback = spufs_ibox_callback;
244 spu->wbox_callback = spufs_wbox_callback;
5110459f 245 spu->stop_callback = spufs_stop_callback;
a33a7d73 246 spu->mfc_callback = spufs_mfc_callback;
2c911a14
LB
247 spin_unlock_irq(&spu->register_lock);
248
5110459f 249 spu_unmap_mappings(ctx);
2c911a14 250
5158e9b5 251 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
8b3d6663 252 spu_restore(&ctx->csa, spu);
2a911f0b 253 spu->timestamp = jiffies;
86767277 254 spu_switch_notify(spu, ctx);
81998baf 255 ctx->state = SPU_STATE_RUNNABLE;
27ec41d3 256
2a58aa33 257 spuctx_switch_state(ctx, SPU_UTIL_USER);
8b3d6663
AB
258}
259
c5fc8d2a 260/*
486acd48 261 * Must be used with the list_mutex held.
c5fc8d2a
AB
262 */
263static inline int sched_spu(struct spu *spu)
264{
486acd48
CH
265 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
266
c5fc8d2a
AB
267 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
268}
269
270static void aff_merge_remaining_ctxs(struct spu_gang *gang)
271{
272 struct spu_context *ctx;
273
274 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
275 if (list_empty(&ctx->aff_list))
276 list_add(&ctx->aff_list, &gang->aff_list_head);
277 }
278 gang->aff_flags |= AFF_MERGED;
279}
280
281static void aff_set_offsets(struct spu_gang *gang)
282{
283 struct spu_context *ctx;
284 int offset;
285
286 offset = -1;
287 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
288 aff_list) {
289 if (&ctx->aff_list == &gang->aff_list_head)
290 break;
291 ctx->aff_offset = offset--;
292 }
293
294 offset = 0;
295 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
296 if (&ctx->aff_list == &gang->aff_list_head)
297 break;
298 ctx->aff_offset = offset++;
299 }
300
301 gang->aff_flags |= AFF_OFFSETS_SET;
302}
303
304static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
305 int group_size, int lowest_offset)
306{
307 struct spu *spu;
308 int node, n;
309
310 /*
311 * TODO: A better algorithm could be used to find a good spu to be
312 * used as reference location for the ctxs chain.
313 */
314 node = cpu_to_node(raw_smp_processor_id());
315 for (n = 0; n < MAX_NUMNODES; n++, node++) {
10baa26c
AD
316 /*
317 * "available_spus" counts how many spus are not potentially
318 * going to be used by other affinity gangs whose reference
319 * context is already in place. Although this code seeks to
320 * avoid having affinity gangs with a summed amount of
321 * contexts bigger than the amount of spus in the node,
322 * this may happen sporadically. In this case, available_spus
323 * becomes negative, which is harmless.
324 */
ad1ede12
AD
325 int available_spus;
326
c5fc8d2a
AB
327 node = (node < MAX_NUMNODES) ? node : 0;
328 if (!node_allowed(ctx, node))
329 continue;
ad1ede12
AD
330
331 available_spus = 0;
486acd48 332 mutex_lock(&cbe_spu_info[node].list_mutex);
ad1ede12 333 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
10baa26c
AD
334 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
335 && spu->ctx->gang->aff_ref_spu)
336 available_spus -= spu->ctx->gang->contexts;
337 available_spus++;
ad1ede12
AD
338 }
339 if (available_spus < ctx->gang->contexts) {
340 mutex_unlock(&cbe_spu_info[node].list_mutex);
341 continue;
342 }
343
c5fc8d2a
AB
344 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
345 if ((!mem_aff || spu->has_mem_affinity) &&
486acd48
CH
346 sched_spu(spu)) {
347 mutex_unlock(&cbe_spu_info[node].list_mutex);
c5fc8d2a 348 return spu;
486acd48 349 }
c5fc8d2a 350 }
486acd48 351 mutex_unlock(&cbe_spu_info[node].list_mutex);
c5fc8d2a
AB
352 }
353 return NULL;
354}
355
356static void aff_set_ref_point_location(struct spu_gang *gang)
357{
358 int mem_aff, gs, lowest_offset;
359 struct spu_context *ctx;
360 struct spu *tmp;
361
362 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
363 lowest_offset = 0;
364 gs = 0;
365
366 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
367 gs++;
368
369 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
370 aff_list) {
371 if (&ctx->aff_list == &gang->aff_list_head)
372 break;
373 lowest_offset = ctx->aff_offset;
374 }
375
683e3ab2
AD
376 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
377 lowest_offset);
c5fc8d2a
AB
378}
379
486acd48 380static struct spu *ctx_location(struct spu *ref, int offset, int node)
c5fc8d2a
AB
381{
382 struct spu *spu;
383
384 spu = NULL;
385 if (offset >= 0) {
386 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
486acd48 387 BUG_ON(spu->node != node);
c5fc8d2a
AB
388 if (offset == 0)
389 break;
390 if (sched_spu(spu))
391 offset--;
392 }
393 } else {
394 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
486acd48 395 BUG_ON(spu->node != node);
c5fc8d2a
AB
396 if (offset == 0)
397 break;
398 if (sched_spu(spu))
399 offset++;
400 }
401 }
486acd48 402
c5fc8d2a
AB
403 return spu;
404}
405
406/*
407 * affinity_check is called each time a context is going to be scheduled.
408 * It returns the spu ptr on which the context must run.
409 */
486acd48 410static int has_affinity(struct spu_context *ctx)
c5fc8d2a 411{
486acd48 412 struct spu_gang *gang = ctx->gang;
c5fc8d2a
AB
413
414 if (list_empty(&ctx->aff_list))
486acd48
CH
415 return 0;
416
0855b543
AD
417 if (atomic_read(&ctx->gang->aff_sched_count) == 0)
418 ctx->gang->aff_ref_spu = NULL;
419
c5fc8d2a
AB
420 if (!gang->aff_ref_spu) {
421 if (!(gang->aff_flags & AFF_MERGED))
422 aff_merge_remaining_ctxs(gang);
423 if (!(gang->aff_flags & AFF_OFFSETS_SET))
424 aff_set_offsets(gang);
425 aff_set_ref_point_location(gang);
426 }
486acd48
CH
427
428 return gang->aff_ref_spu != NULL;
c5fc8d2a
AB
429}
430
202557d2
CH
431/**
432 * spu_unbind_context - unbind spu context from physical spu
433 * @spu: physical spu to unbind from
434 * @ctx: context to unbind
202557d2 435 */
678b2ff1 436static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
8b3d6663 437{
028fda0a
LB
438 u32 status;
439
038200cf
CH
440 spu_context_trace(spu_unbind_context__enter, ctx, spu);
441
27ec41d3 442 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
fe2f896d 443
aa6d5b20
AB
444 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
445 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
36ddbb13 446
0855b543 447 if (ctx->gang)
34318c25
AD
448 /*
449 * If ctx->gang->aff_sched_count is positive, SPU affinity is
450 * being considered in this gang. Using atomic_dec_if_positive
451 * allow us to skip an explicit check for affinity in this gang
452 */
0855b543 453 atomic_dec_if_positive(&ctx->gang->aff_sched_count);
36ddbb13 454
86767277 455 spu_switch_notify(spu, NULL);
5110459f 456 spu_unmap_mappings(ctx);
8b3d6663 457 spu_save(&ctx->csa, spu);
5158e9b5 458 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
2c911a14
LB
459
460 spin_lock_irq(&spu->register_lock);
2a911f0b 461 spu->timestamp = jiffies;
8b3d6663
AB
462 ctx->state = SPU_STATE_SAVED;
463 spu->ibox_callback = NULL;
464 spu->wbox_callback = NULL;
5110459f 465 spu->stop_callback = NULL;
a33a7d73 466 spu->mfc_callback = NULL;
8b3d6663 467 spu->pid = 0;
1474855d 468 spu->tgid = 0;
8b3d6663 469 ctx->ops = &spu_backing_ops;
2a911f0b 470 spu->flags = 0;
8b3d6663 471 spu->ctx = NULL;
2c911a14
LB
472 spin_unlock_irq(&spu->register_lock);
473
474 spu_associate_mm(spu, NULL);
e9f8a0b6
CH
475
476 ctx->stats.slb_flt +=
477 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
478 ctx->stats.class2_intr +=
479 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
27ec41d3
AD
480
481 /* This maps the underlying spu state to idle */
482 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
483 ctx->spu = NULL;
028fda0a
LB
484
485 if (spu_stopped(ctx, &status))
486 wake_up_all(&ctx->stop_wq);
8b3d6663
AB
487}
488
079cdb61
CH
489/**
490 * spu_add_to_rq - add a context to the runqueue
491 * @ctx: context to add
492 */
4e0f4ed0 493static void __spu_add_to_rq(struct spu_context *ctx)
8b3d6663 494{
27449971
CH
495 /*
496 * Unfortunately this code path can be called from multiple threads
497 * on behalf of a single context due to the way the problem state
498 * mmap support works.
499 *
500 * Fortunately we need to wake up all these threads at the same time
501 * and can simply skip the runqueue addition for every but the first
502 * thread getting into this codepath.
503 *
504 * It's still quite hacky, and long-term we should proxy all other
505 * threads through the owner thread so that spu_run is in control
506 * of all the scheduling activity for a given context.
507 */
508 if (list_empty(&ctx->rq)) {
509 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
510 set_bit(ctx->prio, spu_prio->bitmap);
511 if (!spu_prio->nr_waiting++)
74019224 512 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
27449971 513 }
2a911f0b 514}
5110459f 515
e65c2f6f
LB
516static void spu_add_to_rq(struct spu_context *ctx)
517{
518 spin_lock(&spu_prio->runq_lock);
519 __spu_add_to_rq(ctx);
520 spin_unlock(&spu_prio->runq_lock);
521}
522
4e0f4ed0 523static void __spu_del_from_rq(struct spu_context *ctx)
a475c2f4 524{
4e0f4ed0
LB
525 int prio = ctx->prio;
526
65de66f0 527 if (!list_empty(&ctx->rq)) {
c77239b8
CH
528 if (!--spu_prio->nr_waiting)
529 del_timer(&spusched_timer);
a475c2f4 530 list_del_init(&ctx->rq);
c77239b8
CH
531
532 if (list_empty(&spu_prio->runq[prio]))
533 clear_bit(prio, spu_prio->bitmap);
65de66f0 534 }
079cdb61 535}
a68cf983 536
e65c2f6f
LB
537void spu_del_from_rq(struct spu_context *ctx)
538{
539 spin_lock(&spu_prio->runq_lock);
540 __spu_del_from_rq(ctx);
541 spin_unlock(&spu_prio->runq_lock);
542}
543
079cdb61 544static void spu_prio_wait(struct spu_context *ctx)
8b3d6663 545{
a68cf983 546 DEFINE_WAIT(wait);
8b3d6663 547
e65c2f6f
LB
548 /*
549 * The caller must explicitly wait for a context to be loaded
550 * if the nosched flag is set. If NOSCHED is not set, the caller
551 * queues the context and waits for an spu event or error.
552 */
553 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
554
4e0f4ed0 555 spin_lock(&spu_prio->runq_lock);
079cdb61 556 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
a68cf983 557 if (!signal_pending(current)) {
4e0f4ed0
LB
558 __spu_add_to_rq(ctx);
559 spin_unlock(&spu_prio->runq_lock);
650f8b02 560 mutex_unlock(&ctx->state_mutex);
a68cf983 561 schedule();
650f8b02 562 mutex_lock(&ctx->state_mutex);
4e0f4ed0
LB
563 spin_lock(&spu_prio->runq_lock);
564 __spu_del_from_rq(ctx);
8b3d6663 565 }
4e0f4ed0 566 spin_unlock(&spu_prio->runq_lock);
079cdb61
CH
567 __set_current_state(TASK_RUNNING);
568 remove_wait_queue(&ctx->stop_wq, &wait);
8b3d6663
AB
569}
570
079cdb61 571static struct spu *spu_get_idle(struct spu_context *ctx)
a68cf983 572{
36ddbb13 573 struct spu *spu, *aff_ref_spu;
486acd48
CH
574 int node, n;
575
038200cf
CH
576 spu_context_nospu_trace(spu_get_idle__enter, ctx);
577
36ddbb13
AD
578 if (ctx->gang) {
579 mutex_lock(&ctx->gang->aff_mutex);
580 if (has_affinity(ctx)) {
581 aff_ref_spu = ctx->gang->aff_ref_spu;
582 atomic_inc(&ctx->gang->aff_sched_count);
583 mutex_unlock(&ctx->gang->aff_mutex);
584 node = aff_ref_spu->node;
585
586 mutex_lock(&cbe_spu_info[node].list_mutex);
587 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
588 if (spu && spu->alloc_state == SPU_FREE)
589 goto found;
590 mutex_unlock(&cbe_spu_info[node].list_mutex);
a68cf983 591
0855b543 592 atomic_dec(&ctx->gang->aff_sched_count);
038200cf 593 goto not_found;
36ddbb13
AD
594 }
595 mutex_unlock(&ctx->gang->aff_mutex);
596 }
486acd48 597 node = cpu_to_node(raw_smp_processor_id());
a68cf983
MN
598 for (n = 0; n < MAX_NUMNODES; n++, node++) {
599 node = (node < MAX_NUMNODES) ? node : 0;
ea1ae594 600 if (!node_allowed(ctx, node))
a68cf983 601 continue;
486acd48
CH
602
603 mutex_lock(&cbe_spu_info[node].list_mutex);
604 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
605 if (spu->alloc_state == SPU_FREE)
606 goto found;
607 }
608 mutex_unlock(&cbe_spu_info[node].list_mutex);
a68cf983 609 }
486acd48 610
038200cf
CH
611 not_found:
612 spu_context_nospu_trace(spu_get_idle__not_found, ctx);
486acd48
CH
613 return NULL;
614
615 found:
616 spu->alloc_state = SPU_USED;
617 mutex_unlock(&cbe_spu_info[node].list_mutex);
038200cf 618 spu_context_trace(spu_get_idle__found, ctx, spu);
486acd48 619 spu_init_channels(spu);
a68cf983
MN
620 return spu;
621}
8b3d6663 622
52f04fcf
CH
623/**
624 * find_victim - find a lower priority context to preempt
027dfac6 625 * @ctx: candidate context for running
52f04fcf
CH
626 *
627 * Returns the freed physical spu to run the new context on.
628 */
629static struct spu *find_victim(struct spu_context *ctx)
630{
631 struct spu_context *victim = NULL;
632 struct spu *spu;
633 int node, n;
634
8a476d49 635 spu_context_nospu_trace(spu_find_victim__enter, ctx);
038200cf 636
52f04fcf
CH
637 /*
638 * Look for a possible preemption candidate on the local node first.
639 * If there is no candidate look at the other nodes. This isn't
9b1d21f8 640 * exactly fair, but so far the whole spu scheduler tries to keep
52f04fcf
CH
641 * a strong node affinity. We might want to fine-tune this in
642 * the future.
643 */
644 restart:
645 node = cpu_to_node(raw_smp_processor_id());
646 for (n = 0; n < MAX_NUMNODES; n++, node++) {
647 node = (node < MAX_NUMNODES) ? node : 0;
ea1ae594 648 if (!node_allowed(ctx, node))
52f04fcf
CH
649 continue;
650
486acd48
CH
651 mutex_lock(&cbe_spu_info[node].list_mutex);
652 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
52f04fcf
CH
653 struct spu_context *tmp = spu->ctx;
654
c0e7b4aa 655 if (tmp && tmp->prio > ctx->prio &&
e65c2f6f 656 !(tmp->flags & SPU_CREATE_NOSCHED) &&
8d5636fb 657 (!victim || tmp->prio > victim->prio)) {
52f04fcf 658 victim = spu->ctx;
8d5636fb 659 }
52f04fcf 660 }
9f43e391
JK
661 if (victim)
662 get_spu_context(victim);
486acd48 663 mutex_unlock(&cbe_spu_info[node].list_mutex);
52f04fcf
CH
664
665 if (victim) {
666 /*
667 * This nests ctx->state_mutex, but we always lock
668 * higher priority contexts before lower priority
669 * ones, so this is safe until we introduce
670 * priority inheritance schemes.
91569531
LB
671 *
672 * XXX if the highest priority context is locked,
673 * this can loop a long time. Might be better to
674 * look at another context or give up after X retries.
52f04fcf
CH
675 */
676 if (!mutex_trylock(&victim->state_mutex)) {
8d5636fb 677 put_spu_context(victim);
52f04fcf
CH
678 victim = NULL;
679 goto restart;
680 }
681
682 spu = victim->spu;
b192541b 683 if (!spu || victim->prio <= ctx->prio) {
52f04fcf
CH
684 /*
685 * This race can happen because we've dropped
b192541b 686 * the active list mutex. Not a problem, just
52f04fcf
CH
687 * restart the search.
688 */
689 mutex_unlock(&victim->state_mutex);
8d5636fb 690 put_spu_context(victim);
52f04fcf
CH
691 victim = NULL;
692 goto restart;
693 }
486acd48 694
038200cf
CH
695 spu_context_trace(__spu_deactivate__unload, ctx, spu);
696
486acd48
CH
697 mutex_lock(&cbe_spu_info[node].list_mutex);
698 cbe_spu_info[node].nr_active--;
c0e7b4aa 699 spu_unbind_context(spu, victim);
486acd48
CH
700 mutex_unlock(&cbe_spu_info[node].list_mutex);
701
e9f8a0b6 702 victim->stats.invol_ctx_switch++;
fe2f896d 703 spu->stats.invol_ctx_switch++;
08fcf1d6 704 if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
7a28a154 705 spu_add_to_rq(victim);
e65c2f6f 706
52f04fcf 707 mutex_unlock(&victim->state_mutex);
8d5636fb 708 put_spu_context(victim);
e65c2f6f 709
52f04fcf
CH
710 return spu;
711 }
712 }
713
714 return NULL;
715}
716
e65c2f6f
LB
717static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
718{
719 int node = spu->node;
720 int success = 0;
721
722 spu_set_timeslice(ctx);
723
724 mutex_lock(&cbe_spu_info[node].list_mutex);
725 if (spu->ctx == NULL) {
726 spu_bind_context(spu, ctx);
727 cbe_spu_info[node].nr_active++;
728 spu->alloc_state = SPU_USED;
729 success = 1;
730 }
731 mutex_unlock(&cbe_spu_info[node].list_mutex);
732
733 if (success)
734 wake_up_all(&ctx->run_wq);
735 else
736 spu_add_to_rq(ctx);
737}
738
739static void spu_schedule(struct spu *spu, struct spu_context *ctx)
740{
c9101bdb
CH
741 /* not a candidate for interruptible because it's called either
742 from the scheduler thread or from spu_deactivate */
743 mutex_lock(&ctx->state_mutex);
b2e601d1
AD
744 if (ctx->state == SPU_STATE_SAVED)
745 __spu_schedule(spu, ctx);
e65c2f6f
LB
746 spu_release(ctx);
747}
748
b65fe035
JK
749/**
750 * spu_unschedule - remove a context from a spu, and possibly release it.
751 * @spu: The SPU to unschedule from
752 * @ctx: The context currently scheduled on the SPU
753 * @free_spu Whether to free the SPU for other contexts
754 *
755 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
756 * SPU is made available for other contexts (ie, may be returned by
757 * spu_get_idle). If this is zero, the caller is expected to schedule another
758 * context to this spu.
759 *
760 * Should be called with ctx->state_mutex held.
761 */
762static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
763 int free_spu)
e65c2f6f
LB
764{
765 int node = spu->node;
766
767 mutex_lock(&cbe_spu_info[node].list_mutex);
768 cbe_spu_info[node].nr_active--;
b65fe035
JK
769 if (free_spu)
770 spu->alloc_state = SPU_FREE;
e65c2f6f
LB
771 spu_unbind_context(spu, ctx);
772 ctx->stats.invol_ctx_switch++;
773 spu->stats.invol_ctx_switch++;
774 mutex_unlock(&cbe_spu_info[node].list_mutex);
775}
776
079cdb61
CH
777/**
778 * spu_activate - find a free spu for a context and execute it
779 * @ctx: spu context to schedule
780 * @flags: flags (currently ignored)
781 *
08873095 782 * Tries to find a free spu to run @ctx. If no free spu is available
079cdb61
CH
783 * add the context to the runqueue so it gets woken up once an spu
784 * is available.
785 */
26bec673 786int spu_activate(struct spu_context *ctx, unsigned long flags)
8b3d6663 787{
e65c2f6f 788 struct spu *spu;
079cdb61 789
e65c2f6f
LB
790 /*
791 * If there are multiple threads waiting for a single context
792 * only one actually binds the context while the others will
793 * only be able to acquire the state_mutex once the context
794 * already is in runnable state.
795 */
796 if (ctx->spu)
797 return 0;
27449971 798
e65c2f6f
LB
799spu_activate_top:
800 if (signal_pending(current))
801 return -ERESTARTSYS;
486acd48 802
e65c2f6f
LB
803 spu = spu_get_idle(ctx);
804 /*
805 * If this is a realtime thread we try to get it running by
806 * preempting a lower priority thread.
807 */
808 if (!spu && rt_prio(ctx->prio))
809 spu = find_victim(ctx);
810 if (spu) {
811 unsigned long runcntl;
812
813 runcntl = ctx->ops->runcntl_read(ctx);
814 __spu_schedule(spu, ctx);
815 if (runcntl & SPU_RUNCNTL_RUNNABLE)
816 spuctx_switch_state(ctx, SPU_UTIL_USER);
079cdb61 817
e65c2f6f
LB
818 return 0;
819 }
820
821 if (ctx->flags & SPU_CREATE_NOSCHED) {
50b520d4 822 spu_prio_wait(ctx);
e65c2f6f
LB
823 goto spu_activate_top;
824 }
825
826 spu_add_to_rq(ctx);
079cdb61 827
e65c2f6f 828 return 0;
8b3d6663
AB
829}
830
bb5db29a
CH
831/**
832 * grab_runnable_context - try to find a runnable context
833 *
834 * Remove the highest priority context on the runqueue and return it
835 * to the caller. Returns %NULL if no runnable context was found.
836 */
ea1ae594 837static struct spu_context *grab_runnable_context(int prio, int node)
bb5db29a 838{
ea1ae594 839 struct spu_context *ctx;
bb5db29a
CH
840 int best;
841
842 spin_lock(&spu_prio->runq_lock);
7e90b749 843 best = find_first_bit(spu_prio->bitmap, prio);
ea1ae594 844 while (best < prio) {
bb5db29a
CH
845 struct list_head *rq = &spu_prio->runq[best];
846
ea1ae594 847 list_for_each_entry(ctx, rq, rq) {
25985edc 848 /* XXX(hch): check for affinity here as well */
ea1ae594
CH
849 if (__node_allowed(ctx, node)) {
850 __spu_del_from_rq(ctx);
851 goto found;
852 }
853 }
854 best++;
bb5db29a 855 }
ea1ae594
CH
856 ctx = NULL;
857 found:
bb5db29a 858 spin_unlock(&spu_prio->runq_lock);
bb5db29a
CH
859 return ctx;
860}
861
862static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
863{
864 struct spu *spu = ctx->spu;
865 struct spu_context *new = NULL;
866
867 if (spu) {
ea1ae594 868 new = grab_runnable_context(max_prio, spu->node);
bb5db29a 869 if (new || force) {
b65fe035 870 spu_unschedule(spu, ctx, new == NULL);
e65c2f6f
LB
871 if (new) {
872 if (new->flags & SPU_CREATE_NOSCHED)
873 wake_up(&new->stop_wq);
874 else {
875 spu_release(ctx);
876 spu_schedule(spu, new);
c9101bdb
CH
877 /* this one can't easily be made
878 interruptible */
879 mutex_lock(&ctx->state_mutex);
e65c2f6f
LB
880 }
881 }
bb5db29a 882 }
bb5db29a
CH
883 }
884
885 return new != NULL;
886}
887
678b2ff1
CH
888/**
889 * spu_deactivate - unbind a context from it's physical spu
890 * @ctx: spu context to unbind
891 *
892 * Unbind @ctx from the physical spu it is running on and schedule
893 * the highest priority context to run on the freed physical spu.
894 */
8b3d6663
AB
895void spu_deactivate(struct spu_context *ctx)
896{
038200cf 897 spu_context_nospu_trace(spu_deactivate__enter, ctx);
bb5db29a 898 __spu_deactivate(ctx, 1, MAX_PRIO);
8b3d6663
AB
899}
900
ae7b4c52 901/**
1474855d 902 * spu_yield - yield a physical spu if others are waiting
ae7b4c52
CH
903 * @ctx: spu context to yield
904 *
905 * Check if there is a higher priority context waiting and if yes
906 * unbind @ctx from the physical spu and schedule the highest
907 * priority context to run on the freed physical spu instead.
908 */
8b3d6663
AB
909void spu_yield(struct spu_context *ctx)
910{
038200cf 911 spu_context_nospu_trace(spu_yield__enter, ctx);
e5c0b9ec
CH
912 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
913 mutex_lock(&ctx->state_mutex);
27ec41d3 914 __spu_deactivate(ctx, 0, MAX_PRIO);
e5c0b9ec
CH
915 mutex_unlock(&ctx->state_mutex);
916 }
bb5db29a 917}
8b3d6663 918
486acd48 919static noinline void spusched_tick(struct spu_context *ctx)
bb5db29a 920{
e65c2f6f
LB
921 struct spu_context *new = NULL;
922 struct spu *spu = NULL;
e65c2f6f 923
c9101bdb
CH
924 if (spu_acquire(ctx))
925 BUG(); /* a kernel thread never has signals pending */
e65c2f6f
LB
926
927 if (ctx->state != SPU_STATE_RUNNABLE)
928 goto out;
df09cf3e 929 if (ctx->flags & SPU_CREATE_NOSCHED)
e65c2f6f 930 goto out;
df09cf3e 931 if (ctx->policy == SCHED_FIFO)
e65c2f6f 932 goto out;
df09cf3e 933
ce7c191b 934 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
e65c2f6f 935 goto out;
bb5db29a 936
e65c2f6f 937 spu = ctx->spu;
038200cf
CH
938
939 spu_context_trace(spusched_tick__preempt, ctx, spu);
940
e65c2f6f
LB
941 new = grab_runnable_context(ctx->prio + 1, spu->node);
942 if (new) {
b65fe035 943 spu_unschedule(spu, ctx, 0);
ce7c191b 944 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
4ef11014 945 spu_add_to_rq(ctx);
bb5db29a 946 } else {
038200cf 947 spu_context_nospu_trace(spusched_tick__newslice, ctx);
2442a8ba
LB
948 if (!ctx->time_slice)
949 ctx->time_slice++;
8b3d6663 950 }
e65c2f6f
LB
951out:
952 spu_release(ctx);
953
954 if (new)
955 spu_schedule(spu, new);
8b3d6663
AB
956}
957
65de66f0
CH
958/**
959 * count_active_contexts - count nr of active tasks
960 *
961 * Return the number of tasks currently running or waiting to run.
962 *
486acd48 963 * Note that we don't take runq_lock / list_mutex here. Reading
65de66f0
CH
964 * a single 32bit value is atomic on powerpc, and we don't care
965 * about memory ordering issues here.
966 */
967static unsigned long count_active_contexts(void)
968{
969 int nr_active = 0, node;
970
971 for (node = 0; node < MAX_NUMNODES; node++)
486acd48 972 nr_active += cbe_spu_info[node].nr_active;
65de66f0
CH
973 nr_active += spu_prio->nr_waiting;
974
975 return nr_active;
976}
977
978/**
90608a29 979 * spu_calc_load - update the avenrun load estimates.
65de66f0
CH
980 *
981 * No locking against reading these values from userspace, as for
982 * the CPU loadavg code.
983 */
90608a29 984static void spu_calc_load(void)
65de66f0
CH
985{
986 unsigned long active_tasks; /* fixed-point */
90608a29
AL
987
988 active_tasks = count_active_contexts() * FIXED_1;
989 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
990 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
991 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
65de66f0
CH
992}
993
37901802
CH
994static void spusched_wake(unsigned long data)
995{
996 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
997 wake_up_process(spusched_task);
90608a29
AL
998}
999
1000static void spuloadavg_wake(unsigned long data)
1001{
1002 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
1003 spu_calc_load();
37901802
CH
1004}
1005
1006static int spusched_thread(void *unused)
1007{
486acd48 1008 struct spu *spu;
37901802
CH
1009 int node;
1010
37901802
CH
1011 while (!kthread_should_stop()) {
1012 set_current_state(TASK_INTERRUPTIBLE);
1013 schedule();
1014 for (node = 0; node < MAX_NUMNODES; node++) {
e65c2f6f
LB
1015 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
1016
1017 mutex_lock(mtx);
1018 list_for_each_entry(spu, &cbe_spu_info[node].spus,
1019 cbe_list) {
1020 struct spu_context *ctx = spu->ctx;
1021
1022 if (ctx) {
8d5636fb 1023 get_spu_context(ctx);
e65c2f6f
LB
1024 mutex_unlock(mtx);
1025 spusched_tick(ctx);
1026 mutex_lock(mtx);
8d5636fb 1027 put_spu_context(ctx);
e65c2f6f
LB
1028 }
1029 }
1030 mutex_unlock(mtx);
37901802
CH
1031 }
1032 }
1033
37901802
CH
1034 return 0;
1035}
1036
7cd58e43
JK
1037void spuctx_switch_state(struct spu_context *ctx,
1038 enum spu_utilization_state new_state)
1039{
1040 unsigned long long curtime;
1041 signed long long delta;
7cd58e43
JK
1042 struct spu *spu;
1043 enum spu_utilization_state old_state;
fabb6570 1044 int node;
7cd58e43 1045
f2dec1ea 1046 curtime = ktime_get_ns();
7cd58e43
JK
1047 delta = curtime - ctx->stats.tstamp;
1048
1049 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1050 WARN_ON(delta < 0);
1051
1052 spu = ctx->spu;
1053 old_state = ctx->stats.util_state;
1054 ctx->stats.util_state = new_state;
1055 ctx->stats.tstamp = curtime;
1056
1057 /*
1058 * Update the physical SPU utilization statistics.
1059 */
1060 if (spu) {
1061 ctx->stats.times[old_state] += delta;
1062 spu->stats.times[old_state] += delta;
1063 spu->stats.util_state = new_state;
1064 spu->stats.tstamp = curtime;
fabb6570
MS
1065 node = spu->node;
1066 if (old_state == SPU_UTIL_USER)
1067 atomic_dec(&cbe_spu_info[node].busy_spus);
cb9808d3 1068 if (new_state == SPU_UTIL_USER)
fabb6570 1069 atomic_inc(&cbe_spu_info[node].busy_spus);
7cd58e43
JK
1070 }
1071}
1072
65de66f0
CH
1073#define LOAD_INT(x) ((x) >> FSHIFT)
1074#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1075
1076static int show_spu_loadavg(struct seq_file *s, void *private)
1077{
1078 int a, b, c;
1079
1080 a = spu_avenrun[0] + (FIXED_1/200);
1081 b = spu_avenrun[1] + (FIXED_1/200);
1082 c = spu_avenrun[2] + (FIXED_1/200);
1083
1084 /*
1085 * Note that last_pid doesn't really make much sense for the
9b1d21f8 1086 * SPU loadavg (it even seems very odd on the CPU side...),
65de66f0
CH
1087 * but we include it here to have a 100% compatible interface.
1088 */
1089 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1090 LOAD_INT(a), LOAD_FRAC(a),
1091 LOAD_INT(b), LOAD_FRAC(b),
1092 LOAD_INT(c), LOAD_FRAC(c),
1093 count_active_contexts(),
1094 atomic_read(&nr_spu_contexts),
17cf22c3 1095 task_active_pid_ns(current)->last_pid);
65de66f0
CH
1096 return 0;
1097}
1098
1099static int spu_loadavg_open(struct inode *inode, struct file *file)
1100{
1101 return single_open(file, show_spu_loadavg, NULL);
1102}
1103
1104static const struct file_operations spu_loadavg_fops = {
1105 .open = spu_loadavg_open,
1106 .read = seq_read,
1107 .llseek = seq_lseek,
1108 .release = single_release,
1109};
1110
8b3d6663
AB
1111int __init spu_sched_init(void)
1112{
65de66f0
CH
1113 struct proc_dir_entry *entry;
1114 int err = -ENOMEM, i;
8b3d6663 1115
a68cf983 1116 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
37901802 1117 if (!spu_prio)
65de66f0 1118 goto out;
37901802 1119
8b3d6663 1120 for (i = 0; i < MAX_PRIO; i++) {
079cdb61 1121 INIT_LIST_HEAD(&spu_prio->runq[i]);
a68cf983 1122 __clear_bit(i, spu_prio->bitmap);
8b3d6663 1123 }
079cdb61 1124 spin_lock_init(&spu_prio->runq_lock);
37901802 1125
c77239b8 1126 setup_timer(&spusched_timer, spusched_wake, 0);
90608a29 1127 setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
c77239b8 1128
37901802
CH
1129 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1130 if (IS_ERR(spusched_task)) {
65de66f0
CH
1131 err = PTR_ERR(spusched_task);
1132 goto out_free_spu_prio;
37901802 1133 }
f3f59bec 1134
90608a29
AL
1135 mod_timer(&spuloadavg_timer, 0);
1136
66747138 1137 entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
65de66f0
CH
1138 if (!entry)
1139 goto out_stop_kthread;
65de66f0 1140
f3f59bec
JK
1141 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1142 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
8b3d6663 1143 return 0;
37901802 1144
65de66f0
CH
1145 out_stop_kthread:
1146 kthread_stop(spusched_task);
1147 out_free_spu_prio:
1148 kfree(spu_prio);
1149 out:
1150 return err;
8b3d6663
AB
1151}
1152
d1450317 1153void spu_sched_exit(void)
8b3d6663 1154{
486acd48 1155 struct spu *spu;
a68cf983
MN
1156 int node;
1157
65de66f0
CH
1158 remove_proc_entry("spu_loadavg", NULL);
1159
c77239b8 1160 del_timer_sync(&spusched_timer);
90608a29 1161 del_timer_sync(&spuloadavg_timer);
37901802
CH
1162 kthread_stop(spusched_task);
1163
a68cf983 1164 for (node = 0; node < MAX_NUMNODES; node++) {
486acd48
CH
1165 mutex_lock(&cbe_spu_info[node].list_mutex);
1166 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1167 if (spu->alloc_state != SPU_FREE)
1168 spu->alloc_state = SPU_FREE;
1169 mutex_unlock(&cbe_spu_info[node].list_mutex);
8b3d6663 1170 }
a68cf983 1171 kfree(spu_prio);
8b3d6663 1172}