Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/jk/spufs into merge

+17 -9
+8 -7
arch/powerpc/platforms/cell/spufs/run.c
··· 206 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 207 if (runcntl == 0) 208 runcntl = SPU_RUNCNTL_RUNNABLE; 209 - } 210 - 211 - if (ctx->flags & SPU_CREATE_NOSCHED) { 212 - spuctx_switch_state(ctx, SPU_UTIL_USER); 213 - ctx->ops->runcntl_write(ctx, runcntl); 214 } else { 215 unsigned long privcntl; 216 ··· 214 else 215 privcntl = SPU_PRIVCNTL_MODE_NORMAL; 216 217 - ctx->ops->npc_write(ctx, *npc); 218 ctx->ops->privcntl_write(ctx, privcntl); 219 - ctx->ops->runcntl_write(ctx, runcntl); 220 221 if (ctx->state == SPU_STATE_SAVED) { 222 ret = spu_activate(ctx, 0);
··· 206 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 207 if (runcntl == 0) 208 runcntl = SPU_RUNCNTL_RUNNABLE; 209 } else { 210 unsigned long privcntl; 211 ··· 219 else 220 privcntl = SPU_PRIVCNTL_MODE_NORMAL; 221 222 ctx->ops->privcntl_write(ctx, privcntl); 223 + ctx->ops->npc_write(ctx, *npc); 224 + } 225 + 226 + ctx->ops->runcntl_write(ctx, runcntl); 227 + 228 + if (ctx->flags & SPU_CREATE_NOSCHED) { 229 + spuctx_switch_state(ctx, SPU_UTIL_USER); 230 + } else { 231 232 if (ctx->state == SPU_STATE_SAVED) { 233 ret = spu_activate(ctx, 0);
+9 -2
arch/powerpc/platforms/cell/spufs/sched.c
··· 641 642 if (tmp && tmp->prio > ctx->prio && 643 !(tmp->flags & SPU_CREATE_NOSCHED) && 644 - (!victim || tmp->prio > victim->prio)) 645 victim = spu->ctx; 646 } 647 mutex_unlock(&cbe_spu_info[node].list_mutex); 648 ··· 660 * look at another context or give up after X retries. 661 */ 662 if (!mutex_trylock(&victim->state_mutex)) { 663 victim = NULL; 664 goto restart; 665 } ··· 673 * restart the search. 674 */ 675 mutex_unlock(&victim->state_mutex); 676 victim = NULL; 677 goto restart; 678 } ··· 691 spu_add_to_rq(victim); 692 693 mutex_unlock(&victim->state_mutex); 694 695 return spu; 696 } ··· 990 struct spu_context *ctx = spu->ctx; 991 992 if (ctx) { 993 mutex_unlock(mtx); 994 spusched_tick(ctx); 995 mutex_lock(mtx); 996 } 997 } 998 mutex_unlock(mtx); ··· 1037 node = spu->node; 1038 if (old_state == SPU_UTIL_USER) 1039 atomic_dec(&cbe_spu_info[node].busy_spus); 1040 - if (new_state == SPU_UTIL_USER); 1041 atomic_inc(&cbe_spu_info[node].busy_spus); 1042 } 1043 }
··· 641 642 if (tmp && tmp->prio > ctx->prio && 643 !(tmp->flags & SPU_CREATE_NOSCHED) && 644 + (!victim || tmp->prio > victim->prio)) { 645 victim = spu->ctx; 646 + get_spu_context(victim); 647 + } 648 } 649 mutex_unlock(&cbe_spu_info[node].list_mutex); 650 ··· 658 * look at another context or give up after X retries. 659 */ 660 if (!mutex_trylock(&victim->state_mutex)) { 661 + put_spu_context(victim); 662 victim = NULL; 663 goto restart; 664 } ··· 670 * restart the search. 671 */ 672 mutex_unlock(&victim->state_mutex); 673 + put_spu_context(victim); 674 victim = NULL; 675 goto restart; 676 } ··· 687 spu_add_to_rq(victim); 688 689 mutex_unlock(&victim->state_mutex); 690 + put_spu_context(victim); 691 692 return spu; 693 } ··· 985 struct spu_context *ctx = spu->ctx; 986 987 if (ctx) { 988 + get_spu_context(ctx); 989 mutex_unlock(mtx); 990 spusched_tick(ctx); 991 mutex_lock(mtx); 992 + put_spu_context(ctx); 993 } 994 } 995 mutex_unlock(mtx); ··· 1030 node = spu->node; 1031 if (old_state == SPU_UTIL_USER) 1032 atomic_dec(&cbe_spu_info[node].busy_spus); 1033 + if (new_state == SPU_UTIL_USER) 1034 atomic_inc(&cbe_spu_info[node].busy_spus); 1035 } 1036 }