[POWERPC] avoid SPU_ACTIVATE_NOWAKE optimization

This optimization was added recently but is still buggy,
so back it out for now.

Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>

authored by Christoph Hellwig and committed by Arnd Bergmann 50b520d4 aa0ed2bd

+6 -11
+2 -2
arch/powerpc/platforms/cell/spufs/run.c
··· 143 int ret; 144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; 145 146 - ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); 147 if (ret) 148 return ret; 149 ··· 155 spu_release(ctx); 156 ret = spu_setup_isolated(ctx); 157 if (!ret) 158 - ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); 159 } 160 161 /* if userspace has set the runcntrl register (eg, to issue an
··· 143 int ret; 144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; 145 146 + ret = spu_acquire_runnable(ctx, 0); 147 if (ret) 148 return ret; 149 ··· 155 spu_release(ctx); 156 ret = spu_setup_isolated(ctx); 157 if (!ret) 158 + ret = spu_acquire_runnable(ctx, 0); 159 } 160 161 /* if userspace has set the runcntrl register (eg, to issue an
+2 -5
arch/powerpc/platforms/cell/spufs/sched.c
··· 263 { 264 DEFINE_WAIT(wait); 265 266 - set_bit(SPU_SCHED_WAKE, &ctx->sched_flags); 267 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 268 if (!signal_pending(current)) { 269 mutex_unlock(&ctx->state_mutex); ··· 271 } 272 __set_current_state(TASK_RUNNING); 273 remove_wait_queue(&ctx->stop_wq, &wait); 274 - clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags); 275 } 276 277 /** ··· 290 best = sched_find_first_bit(spu_prio->bitmap); 291 if (best < MAX_PRIO) { 292 struct spu_context *ctx = spu_grab_context(best); 293 - if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags)) 294 wake_up(&ctx->stop_wq); 295 } 296 spin_unlock(&spu_prio->runq_lock); ··· 412 } 413 414 spu_add_to_rq(ctx); 415 - if (!(flags & SPU_ACTIVATE_NOWAKE)) 416 - spu_prio_wait(ctx); 417 spu_del_from_rq(ctx); 418 } while (!signal_pending(current)); 419
··· 263 { 264 DEFINE_WAIT(wait); 265 266 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 267 if (!signal_pending(current)) { 268 mutex_unlock(&ctx->state_mutex); ··· 272 } 273 __set_current_state(TASK_RUNNING); 274 remove_wait_queue(&ctx->stop_wq, &wait); 275 } 276 277 /** ··· 292 best = sched_find_first_bit(spu_prio->bitmap); 293 if (best < MAX_PRIO) { 294 struct spu_context *ctx = spu_grab_context(best); 295 + if (ctx) 296 wake_up(&ctx->stop_wq); 297 } 298 spin_unlock(&spu_prio->runq_lock); ··· 414 } 415 416 spu_add_to_rq(ctx); 417 + spu_prio_wait(ctx); 418 spu_del_from_rq(ctx); 419 } while (!signal_pending(current)); 420
+2 -4
arch/powerpc/platforms/cell/spufs/spufs.h
··· 41 42 /* ctx->sched_flags */ 43 enum { 44 - SPU_SCHED_WAKE = 0, 45 }; 46 47 struct spu_context { ··· 191 int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); 192 void spu_acquire_saved(struct spu_context *ctx); 193 int spu_acquire_exclusive(struct spu_context *ctx); 194 - enum { 195 - SPU_ACTIVATE_NOWAKE = 1, 196 - }; 197 int spu_activate(struct spu_context *ctx, unsigned long flags); 198 void spu_deactivate(struct spu_context *ctx); 199 void spu_yield(struct spu_context *ctx);
··· 41 42 /* ctx->sched_flags */ 43 enum { 44 + SPU_SCHED_WAKE = 0, /* currently unused */ 45 }; 46 47 struct spu_context { ··· 191 int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); 192 void spu_acquire_saved(struct spu_context *ctx); 193 int spu_acquire_exclusive(struct spu_context *ctx); 194 + 195 int spu_activate(struct spu_context *ctx, unsigned long flags); 196 void spu_deactivate(struct spu_context *ctx); 197 void spu_yield(struct spu_context *ctx);