summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spufs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-04-23 21:08:06 +0200
committerArnd Bergmann <arnd@klappe.arndb.de>2007-04-23 21:18:52 +0200
commit0887309589824fb1c3744c69a330c99c369124a0 (patch)
treedf69c7be00e75c240302a37b78964c10419615c9 /arch/powerpc/platforms/cell/spufs
parent390cbb56a731546edc0f35fbc4c5045676467581 (diff)
[POWERPC] spufs: use cancel_rearming_delayed_workqueue when stopping spu contexts
The scheduler workqueue may rearm itself and deadlock when we try to stop it. Put a flag in place to avoid skip the work if we're tearing down the context. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c25
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
2 files changed, 23 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index c9561582ce2a..003e330fc76f 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -71,14 +71,25 @@ static inline int node_allowed(int node)
void spu_start_tick(struct spu_context *ctx)
{
- if (ctx->policy == SCHED_RR)
+ if (ctx->policy == SCHED_RR) {
+ /*
+ * Make sure the exiting bit is cleared.
+ */
+ clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
+ }
}
void spu_stop_tick(struct spu_context *ctx)
{
- if (ctx->policy == SCHED_RR)
+ if (ctx->policy == SCHED_RR) {
+ /*
+ * While the work can be rearming normally setting this flag
+ * makes sure it does not rearm itself anymore.
+ */
+ set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
cancel_delayed_work(&ctx->sched_work);
+ }
}
void spu_sched_tick(struct work_struct *work)
@@ -88,6 +99,14 @@ void spu_sched_tick(struct work_struct *work)
struct spu *spu;
int rearm = 1;
+ /*
+ * If this context is being stopped avoid rescheduling from the
+ * scheduler tick because we would block on the state_mutex.
+ * The caller will yield the spu later on anyway.
+ */
+ if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
+ return;
+
mutex_lock(&ctx->state_mutex);
spu = ctx->spu;
if (spu) {
@@ -377,7 +396,7 @@ static struct spu *find_victim(struct spu_context *ctx)
* @ctx: spu context to schedule
* @flags: flags (currently ignored)
*
- * Tries to find a free spu to run @ctx. If no free spu is availble
+ * Tries to find a free spu to run @ctx. If no free spu is available
* add the context to the runqueue so it gets woken up once an spu
* is available.
*/
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 5c4e47d69d79..f418378abdff 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -41,7 +41,7 @@ struct spu_gang;
/* ctx->sched_flags */
enum {
- SPU_SCHED_WAKE = 0, /* currently unused */
+ SPU_SCHED_EXITING = 0,
};
struct spu_context {