ctx->owner = get_task_mm(current);
if (gang)
spu_gang_add_ctx(gang, ctx);
+ ctx->prio = current->prio;
goto out;
out_free:
kfree(ctx);
ret = spu_activate(ctx, 0);
if (ret)
goto out_unlock;
- } else
- ctx->spu->prio = current->prio;
+ }
return 0;
ctx->spu = spu;
ctx->ops = &spu_hw_ops;
spu->pid = current->pid;
- spu->prio = current->prio;
spu->mm = ctx->owner;
mm_needs_global_tlbie(spu->mm);
spu->ibox_callback = spufs_ibox_callback;
spu->dma_callback = NULL;
spu->mm = NULL;
spu->pid = 0;
- spu->prio = MAX_PRIO;
ctx->ops = &spu_backing_ops;
ctx->spu = NULL;
spu->flags = 0;
static void spu_prio_wait(struct spu_context *ctx, u64 flags)
{
- int prio = current->prio;
+ int prio = ctx->prio;
wait_queue_head_t *wq = &spu_prio->waitq[prio];
DEFINE_WAIT(wait);
__FUNCTION__, spu->number, spu->node);
spu_deactivate(ctx);
need_yield = 1;
- } else {
- spu->prio = MAX_PRIO;
}
}
mutex_unlock(&ctx->state_mutex);
struct list_head gang_list;
struct spu_gang *gang;
+
+ /* scheduler fields */
+ int prio;
};
struct spu_gang {
DUMP_FIELD(spu, "0x%lx", irqs[2]);
DUMP_FIELD(spu, "0x%x", slb_replace);
DUMP_FIELD(spu, "%d", pid);
- DUMP_FIELD(spu, "%d", prio);
DUMP_FIELD(spu, "0x%p", mm);
DUMP_FIELD(spu, "0x%p", ctx);
DUMP_FIELD(spu, "0x%p", rq);
struct spu_runqueue *rq;
unsigned long long timestamp;
pid_t pid;
- int prio;
int class_0_pending;
spinlock_t register_lock;