Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[CELL] oprofile: enable SPU switch notification to detect currently active SPU tasks

From: Maynard Johnson <mpjohn@us.ibm.com>

This patch adds to the capability of spu_switch_event_register so that
the caller is also notified of currently active SPU tasks.
Exports spu_switch_event_register and spu_switch_event_unregister so
that OProfile can get access to the notifications provided.

Signed-off-by: Maynard Johnson <mpjohn@us.ibm.com>
Signed-off-by: Carl Love <carll@us.ibm.com>
Signed-off-by: Bob Nelson <rrnelson@us.ibm.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Acked-by: Paul Mackerras <paulus@samba.org>

authored by

Bob Nelson and committed by
Arnd Bergmann
36aaccc1 24140594

+55 -8
+17 -6
arch/powerpc/platforms/cell/spufs/run.c
··· 18 18 wake_up_all(&ctx->stop_wq); 19 19 } 20 20 21 - static inline int spu_stopped(struct spu_context *ctx, u32 * stat) 21 + static inline int spu_stopped(struct spu_context *ctx, u32 *stat) 22 22 { 23 23 struct spu *spu; 24 24 u64 pte_fault; 25 25 26 26 *stat = ctx->ops->status_read(ctx); 27 - if (ctx->state != SPU_STATE_RUNNABLE) 28 - return 1; 27 + 29 28 spu = ctx->spu; 29 + if (ctx->state != SPU_STATE_RUNNABLE || 30 + test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) 31 + return 1; 30 32 pte_fault = spu->dsisr & 31 33 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); 32 34 return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? ··· 126 124 return ret; 127 125 } 128 126 129 - static int spu_run_init(struct spu_context *ctx, u32 * npc) 127 + static int spu_run_init(struct spu_context *ctx, u32 *npc) 130 128 { 131 129 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 132 130 ··· 160 158 return 0; 161 159 } 162 160 163 - static int spu_run_fini(struct spu_context *ctx, u32 * npc, 164 - u32 * status) 161 + static int spu_run_fini(struct spu_context *ctx, u32 *npc, 162 + u32 *status) 165 163 { 166 164 int ret = 0; 167 165 ··· 300 298 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) 301 299 { 302 300 int ret; 301 + struct spu *spu; 303 302 u32 status; 304 303 305 304 if (mutex_lock_interruptible(&ctx->run_mutex)) ··· 336 333 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 337 334 if (unlikely(ret)) 338 335 break; 336 + spu = ctx->spu; 337 + if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, 338 + &ctx->sched_flags))) { 339 + if (!(status & SPU_STATUS_STOPPED_BY_STOP)) { 340 + spu_switch_notify(spu, ctx); 341 + continue; 342 + } 343 + } 339 344 340 345 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 341 346
+32 -2
arch/powerpc/platforms/cell/spufs/sched.c
··· 204 204 205 205 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); 206 206 207 - static void spu_switch_notify(struct spu *spu, struct spu_context *ctx) 207 + void spu_switch_notify(struct spu *spu, struct spu_context *ctx) 208 208 { 209 209 blocking_notifier_call_chain(&spu_switch_notifier, 210 210 ctx ? ctx->object_id : 0, spu); 211 211 } 212 212 213 + static void notify_spus_active(void) 214 + { 215 + int node; 216 + 217 + /* 218 + * Wake up the active spu_contexts. 219 + * 220 + * When the awakened processes see their "notify_active" flag is set, 221 + * they will call spu_switch_notify(); 222 + */ 223 + for_each_online_node(node) { 224 + struct spu *spu; 225 + mutex_lock(&spu_prio->active_mutex[node]); 226 + list_for_each_entry(spu, &spu_prio->active_list[node], list) { 227 + struct spu_context *ctx = spu->ctx; 228 + set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags); 229 + mb(); /* make sure any tasks woken up below */ 230 + /* can see the bit(s) set above */ 231 + wake_up_all(&ctx->stop_wq); 232 + } 233 + mutex_unlock(&spu_prio->active_mutex[node]); 234 + } 235 + } 236 + 213 237 int spu_switch_event_register(struct notifier_block * n) 214 238 { 215 - return blocking_notifier_chain_register(&spu_switch_notifier, n); 239 + int ret; 240 + ret = blocking_notifier_chain_register(&spu_switch_notifier, n); 241 + if (!ret) 242 + notify_spus_active(); 243 + return ret; 216 244 } 245 + EXPORT_SYMBOL_GPL(spu_switch_event_register); 217 246 218 247 int spu_switch_event_unregister(struct notifier_block * n) 219 248 { 220 249 return blocking_notifier_chain_unregister(&spu_switch_notifier, n); 221 250 } 251 + EXPORT_SYMBOL_GPL(spu_switch_event_unregister); 222 252 223 253 /** 224 254 * spu_bind_context - bind spu context to physical spu
+6
arch/powerpc/platforms/cell/spufs/spufs.h
··· 44 44 SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */ 45 45 }; 46 46 47 + /* ctx->sched_flags */ 48 + enum { 49 + SPU_SCHED_NOTIFY_ACTIVE, 50 + }; 51 + 47 52 struct spu_context { 48 53 struct spu *spu; /* pointer to a physical SPU */ 49 54 struct spu_state csa; /* SPU context save area. */ ··· 245 240 int spu_activate(struct spu_context *ctx, unsigned long flags); 246 241 void spu_deactivate(struct spu_context *ctx); 247 242 void spu_yield(struct spu_context *ctx); 243 + void spu_switch_notify(struct spu *spu, struct spu_context *ctx); 248 244 void spu_set_timeslice(struct spu_context *ctx); 249 245 void spu_update_sched_info(struct spu_context *ctx); 250 246 void __spu_update_sched_info(struct spu_context *ctx);