[POWERPC] spufs: reorganize spu_run_init
This cleans up spu_run_init so that it does all of the spu initialization for spufs_run_spu. It initializes the spu context as much as possible before it activates the spu and writes the runcntl register. Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
d6ad39bc53
commit
91569531d1
2 changed files with 58 additions and 36 deletions
|
@ -152,23 +152,41 @@ static int spu_setup_isolated(struct spu_context *ctx)
|
|||
static int spu_run_init(struct spu_context *ctx, u32 *npc)
|
||||
{
|
||||
unsigned long runcntl;
|
||||
int ret;
|
||||
|
||||
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
|
||||
|
||||
if (ctx->flags & SPU_CREATE_ISOLATE) {
|
||||
/*
|
||||
* Force activation of spu. Isolated state assumes that
|
||||
* special loader context is loaded and running on spu.
|
||||
*/
|
||||
if (ctx->state == SPU_STATE_SAVED) {
|
||||
spu_set_timeslice(ctx);
|
||||
|
||||
if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
|
||||
int ret = spu_setup_isolated(ctx);
|
||||
ret = spu_activate(ctx, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* if userspace has set the runcntrl register (eg, to issue an
|
||||
* isolated exit), we need to re-set it here */
|
||||
if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
|
||||
ret = spu_setup_isolated(ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If userspace has set the runcntrl register (eg, to
|
||||
* issue an isolated exit), we need to re-set it here
|
||||
*/
|
||||
runcntl = ctx->ops->runcntl_read(ctx) &
|
||||
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
|
||||
if (runcntl == 0)
|
||||
runcntl = SPU_RUNCNTL_RUNNABLE;
|
||||
|
||||
spuctx_switch_state(ctx, SPU_UTIL_USER);
|
||||
ctx->ops->runcntl_write(ctx, runcntl);
|
||||
|
||||
} else {
|
||||
unsigned long privcntl;
|
||||
|
||||
|
@ -180,12 +198,18 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
|
|||
|
||||
ctx->ops->npc_write(ctx, *npc);
|
||||
ctx->ops->privcntl_write(ctx, privcntl);
|
||||
|
||||
if (ctx->state == SPU_STATE_SAVED) {
|
||||
spu_set_timeslice(ctx);
|
||||
ret = spu_activate(ctx, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
spuctx_switch_state(ctx, SPU_UTIL_USER);
|
||||
ctx->ops->runcntl_write(ctx, runcntl);
|
||||
}
|
||||
|
||||
ctx->ops->runcntl_write(ctx, runcntl);
|
||||
|
||||
spuctx_switch_state(ctx, SPU_UTIL_USER);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -323,25 +347,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
|
|||
ctx->event_return = 0;
|
||||
|
||||
spu_acquire(ctx);
|
||||
if (ctx->state == SPU_STATE_SAVED) {
|
||||
__spu_update_sched_info(ctx);
|
||||
spu_set_timeslice(ctx);
|
||||
|
||||
ret = spu_activate(ctx, 0);
|
||||
if (ret) {
|
||||
spu_release(ctx);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* We have to update the scheduling priority under active_mutex
|
||||
* to protect against find_victim().
|
||||
*
|
||||
* No need to update the timeslice ASAP, it will get updated
|
||||
* once the current one has expired.
|
||||
*/
|
||||
spu_update_sched_info(ctx);
|
||||
}
|
||||
spu_update_sched_info(ctx);
|
||||
|
||||
ret = spu_run_init(ctx, npc);
|
||||
if (ret) {
|
||||
|
|
|
@ -104,6 +104,12 @@ void spu_set_timeslice(struct spu_context *ctx)
|
|||
*/
|
||||
void __spu_update_sched_info(struct spu_context *ctx)
|
||||
{
|
||||
/*
|
||||
* assert that the context is not on the runqueue, so it is safe
|
||||
* to change its scheduling parameters.
|
||||
*/
|
||||
BUG_ON(!list_empty(&ctx->rq));
|
||||
|
||||
/*
|
||||
* 32-Bit assignments are atomic on powerpc, and we don't care about
|
||||
* memory ordering here because retrieving the controlling thread is
|
||||
|
@ -124,23 +130,28 @@ void __spu_update_sched_info(struct spu_context *ctx)
|
|||
ctx->policy = current->policy;
|
||||
|
||||
/*
|
||||
* A lot of places that don't hold list_mutex poke into
|
||||
* cpus_allowed, including grab_runnable_context which
|
||||
* already holds the runq_lock. So abuse runq_lock
|
||||
* to protect this field as well.
|
||||
* TO DO: the context may be loaded, so we may need to activate
|
||||
* it again on a different node. But it shouldn't hurt anything
|
||||
* to update its parameters, because we know that the scheduler
|
||||
* is not actively looking at this field, since it is not on the
|
||||
* runqueue. The context will be rescheduled on the proper node
|
||||
* if it is timesliced or preempted.
|
||||
*/
|
||||
spin_lock(&spu_prio->runq_lock);
|
||||
ctx->cpus_allowed = current->cpus_allowed;
|
||||
spin_unlock(&spu_prio->runq_lock);
|
||||
}
|
||||
|
||||
void spu_update_sched_info(struct spu_context *ctx)
|
||||
{
|
||||
int node = ctx->spu->node;
|
||||
int node;
|
||||
|
||||
mutex_lock(&cbe_spu_info[node].list_mutex);
|
||||
__spu_update_sched_info(ctx);
|
||||
mutex_unlock(&cbe_spu_info[node].list_mutex);
|
||||
if (ctx->state == SPU_STATE_RUNNABLE) {
|
||||
node = ctx->spu->node;
|
||||
mutex_lock(&cbe_spu_info[node].list_mutex);
|
||||
__spu_update_sched_info(ctx);
|
||||
mutex_unlock(&cbe_spu_info[node].list_mutex);
|
||||
} else {
|
||||
__spu_update_sched_info(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static int __node_allowed(struct spu_context *ctx, int node)
|
||||
|
@ -604,6 +615,10 @@ static struct spu *find_victim(struct spu_context *ctx)
|
|||
* higher priority contexts before lower priority
|
||||
* ones, so this is safe until we introduce
|
||||
* priority inheritance schemes.
|
||||
*
|
||||
* XXX if the highest priority context is locked,
|
||||
* this can loop a long time. Might be better to
|
||||
* look at another context or give up after X retries.
|
||||
*/
|
||||
if (!mutex_trylock(&victim->state_mutex)) {
|
||||
victim = NULL;
|
||||
|
|
Loading…
Reference in a new issue