[POWERPC] spufs: implement error event delivery to user space

This tries to fix spufs so we have an interface closer to what is
specified in the man page for events returned in the third argument of
spu_run.

Fortunately, libspe has never been using the returned contents of that
register, as they were the same as the return code of spu_run (duh!).

Unlike the specification that we never implemented correctly, we now
require a SPU_CREATE_EVENTS_ENABLED flag passed to spu_create, in
order to get the new behavior. When this flag is not passed, spu_run
will simply ignore the third argument now.

Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by Arnd Bergmann and committed by Paul Mackerras 9add11da 28347bce

+68 -24
+3 -3
arch/powerpc/platforms/cell/spu_base.c
··· 46 static int __spu_trap_invalid_dma(struct spu *spu) 47 { 48 pr_debug("%s\n", __FUNCTION__); 49 - force_sig(SIGBUS, /* info, */ current); 50 return 0; 51 } 52 53 static int __spu_trap_dma_align(struct spu *spu) 54 { 55 pr_debug("%s\n", __FUNCTION__); 56 - force_sig(SIGBUS, /* info, */ current); 57 return 0; 58 } 59 60 static int __spu_trap_error(struct spu *spu) 61 { 62 pr_debug("%s\n", __FUNCTION__); 63 - force_sig(SIGILL, /* info, */ current); 64 return 0; 65 } 66
··· 46 static int __spu_trap_invalid_dma(struct spu *spu) 47 { 48 pr_debug("%s\n", __FUNCTION__); 49 + spu->dma_callback(spu, SPE_EVENT_INVALID_DMA); 50 return 0; 51 } 52 53 static int __spu_trap_dma_align(struct spu *spu) 54 { 55 pr_debug("%s\n", __FUNCTION__); 56 + spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT); 57 return 0; 58 } 59 60 static int __spu_trap_error(struct spu *spu) 61 { 62 pr_debug("%s\n", __FUNCTION__); 63 + spu->dma_callback(spu, SPE_EVENT_SPE_ERROR); 64 return 0; 65 } 66
+6 -3
arch/powerpc/platforms/cell/spufs/inode.c
··· 224 }; 225 226 static int 227 - spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode) 228 { 229 int ret; 230 struct inode *inode; ··· 244 SPUFS_I(inode)->i_ctx = ctx; 245 if (!ctx) 246 goto out_iput; 247 248 inode->i_op = &spufs_dir_inode_operations; 249 inode->i_fop = &simple_dir_operations; ··· 307 goto out; 308 309 /* all flags are reserved */ 310 - if (flags) 311 goto out; 312 313 dentry = lookup_create(nd, 1); ··· 320 goto out_dput; 321 322 mode &= ~current->fs->umask; 323 - ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO); 324 if (ret) 325 goto out_dput; 326
··· 224 }; 225 226 static int 227 + spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, 228 + int mode) 229 { 230 int ret; 231 struct inode *inode; ··· 243 SPUFS_I(inode)->i_ctx = ctx; 244 if (!ctx) 245 goto out_iput; 246 + 247 + ctx->flags = flags; 248 249 inode->i_op = &spufs_dir_inode_operations; 250 inode->i_fop = &simple_dir_operations; ··· 304 goto out; 305 306 /* all flags are reserved */ 307 + if (flags & (~SPU_CREATE_FLAG_ALL)) 308 goto out; 309 310 dentry = lookup_create(nd, 1); ··· 317 goto out_dput; 318 319 mode &= ~current->fs->umask; 320 + ret = spufs_mkdir(nd->dentry->d_inode, dentry, flags, mode & S_IRWXUGO); 321 if (ret) 322 goto out_dput; 323
+35 -13
arch/powerpc/platforms/cell/spufs/run.c
··· 14 wake_up_all(&ctx->stop_wq); 15 } 16 17 static inline int spu_stopped(struct spu_context *ctx, u32 * stat) 18 { 19 struct spu *spu; ··· 48 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; 49 } 50 51 - static inline int spu_run_init(struct spu_context *ctx, u32 * npc, 52 - u32 * status) 53 { 54 int ret; 55 ··· 91 SPU_STATUS_STOPPED_BY_HALT)) { 92 return *status; 93 } 94 - if ((ret = spu_run_init(ctx, npc, status)) != 0) 95 return ret; 96 return 0; 97 } ··· 196 } 197 198 long spufs_run_spu(struct file *file, struct spu_context *ctx, 199 - u32 * npc, u32 * status) 200 { 201 int ret; 202 203 if (down_interruptible(&ctx->run_sema)) 204 return -ERESTARTSYS; 205 206 - ret = spu_run_init(ctx, npc, status); 207 if (ret) 208 goto out; 209 210 do { 211 - ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status)); 212 if (unlikely(ret)) 213 break; 214 - if ((*status & SPU_STATUS_STOPPED_BY_STOP) && 215 - (*status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 216 ret = spu_process_callback(ctx); 217 if (ret) 218 break; 219 - *status &= ~SPU_STATUS_STOPPED_BY_STOP; 220 } 221 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 222 - ret = spu_reacquire_runnable(ctx, npc, status); 223 if (ret) 224 goto out; 225 continue; 226 } 227 ret = spu_process_events(ctx); 228 229 - } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP | 230 SPU_STATUS_STOPPED_BY_HALT))); 231 232 ctx->ops->runcntl_stop(ctx); 233 - ret = spu_run_fini(ctx, npc, status); 234 if (!ret) 235 - ret = *status; 236 spu_yield(ctx); 237 238 out: 239 up(&ctx->run_sema); 240 return ret; 241 }
··· 14 wake_up_all(&ctx->stop_wq); 15 } 16 17 + void spufs_dma_callback(struct spu *spu, int type) 18 + { 19 + struct spu_context *ctx = spu->ctx; 20 + 21 + if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { 22 + ctx->event_return |= type; 23 + wake_up_all(&ctx->stop_wq); 24 + } else { 25 + switch (type) { 26 + case SPE_EVENT_DMA_ALIGNMENT: 27 + case SPE_EVENT_INVALID_DMA: 28 + force_sig(SIGBUS, /* info, */ current); 29 + break; 30 + case SPE_EVENT_SPE_ERROR: 31 + force_sig(SIGILL, /* info */ current); 32 + break; 33 + } 34 + } 35 + } 36 + 37 static inline int spu_stopped(struct spu_context *ctx, u32 * stat) 38 { 39 struct spu *spu; ··· 28 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; 29 } 30 31 + static inline int spu_run_init(struct spu_context *ctx, u32 * npc) 32 { 33 int ret; 34 ··· 72 SPU_STATUS_STOPPED_BY_HALT)) { 73 return *status; 74 } 75 + if ((ret = spu_run_init(ctx, npc)) != 0) 76 return ret; 77 return 0; 78 } ··· 177 } 178 179 long spufs_run_spu(struct file *file, struct spu_context *ctx, 180 + u32 *npc, u32 *event) 181 { 182 int ret; 183 + u32 status; 184 185 if (down_interruptible(&ctx->run_sema)) 186 return -ERESTARTSYS; 187 188 + ctx->event_return = 0; 189 + ret = spu_run_init(ctx, npc); 190 if (ret) 191 goto out; 192 193 do { 194 + ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 195 if (unlikely(ret)) 196 break; 197 + if ((status & SPU_STATUS_STOPPED_BY_STOP) && 198 + (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 199 ret = spu_process_callback(ctx); 200 if (ret) 201 break; 202 + status &= ~SPU_STATUS_STOPPED_BY_STOP; 203 } 204 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 205 + ret = spu_reacquire_runnable(ctx, npc, &status); 206 if (ret) 207 goto out; 208 continue; 209 } 210 ret = spu_process_events(ctx); 211 212 + } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 213 SPU_STATUS_STOPPED_BY_HALT))); 214 215 ctx->ops->runcntl_stop(ctx); 216 + ret = spu_run_fini(ctx, npc, &status); 217 if (!ret) 218 + ret = status; 219 spu_yield(ctx); 220 221 out: 222 + *event = ctx->event_return; 223 up(&ctx->run_sema); 224 return ret; 225 }
+2 -2
arch/powerpc/platforms/cell/spufs/sched.c
··· 81 spu->number, spu->node); 82 spu->ctx = ctx; 83 spu->flags = 0; 84 - ctx->flags = 0; 85 ctx->spu = spu; 86 ctx->ops = &spu_hw_ops; 87 spu->pid = current->pid; ··· 91 spu->wbox_callback = spufs_wbox_callback; 92 spu->stop_callback = spufs_stop_callback; 93 spu->mfc_callback = spufs_mfc_callback; 94 mb(); 95 spu_unmap_mappings(ctx); 96 spu_restore(&ctx->csa, spu); ··· 111 spu->wbox_callback = NULL; 112 spu->stop_callback = NULL; 113 spu->mfc_callback = NULL; 114 spu->mm = NULL; 115 spu->pid = 0; 116 spu->prio = MAX_PRIO; 117 ctx->ops = &spu_backing_ops; 118 ctx->spu = NULL; 119 - ctx->flags = 0; 120 spu->flags = 0; 121 spu->ctx = NULL; 122 }
··· 81 spu->number, spu->node); 82 spu->ctx = ctx; 83 spu->flags = 0; 84 ctx->spu = spu; 85 ctx->ops = &spu_hw_ops; 86 spu->pid = current->pid; ··· 92 spu->wbox_callback = spufs_wbox_callback; 93 spu->stop_callback = spufs_stop_callback; 94 spu->mfc_callback = spufs_mfc_callback; 95 + spu->dma_callback = spufs_dma_callback; 96 mb(); 97 spu_unmap_mappings(ctx); 98 spu_restore(&ctx->csa, spu); ··· 111 spu->wbox_callback = NULL; 112 spu->stop_callback = NULL; 113 spu->mfc_callback = NULL; 114 + spu->dma_callback = NULL; 115 spu->mm = NULL; 116 spu->pid = 0; 117 spu->prio = MAX_PRIO; 118 ctx->ops = &spu_backing_ops; 119 ctx->spu = NULL; 120 spu->flags = 0; 121 spu->ctx = NULL; 122 }
+3 -1
arch/powerpc/platforms/cell/spufs/spufs.h
··· 66 u32 tagwait; 67 struct spu_context_ops *ops; 68 struct work_struct reap_work; 69 - u64 flags; 70 }; 71 72 struct mfc_dma_command { ··· 184 void spufs_wbox_callback(struct spu *spu); 185 void spufs_stop_callback(struct spu *spu); 186 void spufs_mfc_callback(struct spu *spu); 187 188 #endif
··· 66 u32 tagwait; 67 struct spu_context_ops *ops; 68 struct work_struct reap_work; 69 + unsigned long flags; 70 + unsigned long event_return; 71 }; 72 73 struct mfc_dma_command { ··· 183 void spufs_wbox_callback(struct spu *spu); 184 void spufs_stop_callback(struct spu *spu); 185 void spufs_mfc_callback(struct spu *spu); 186 + void spufs_dma_callback(struct spu *spu, int type); 187 188 #endif
+5 -2
arch/powerpc/platforms/cell/spufs/syscalls.c
··· 38 u32 npc, status; 39 40 ret = -EFAULT; 41 - if (get_user(npc, unpc) || get_user(status, ustatus)) 42 goto out; 43 44 /* check if this file was created by spu_create */ ··· 49 i = SPUFS_I(filp->f_dentry->d_inode); 50 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status); 51 52 - if (put_user(npc, unpc) || put_user(status, ustatus)) 53 ret = -EFAULT; 54 out: 55 return ret;
··· 38 u32 npc, status; 39 40 ret = -EFAULT; 41 + if (get_user(npc, unpc)) 42 goto out; 43 44 /* check if this file was created by spu_create */ ··· 49 i = SPUFS_I(filp->f_dentry->d_inode); 50 ret = spufs_run_spu(filp, i->i_ctx, &npc, &status); 51 52 + if (put_user(npc, unpc)) 53 + ret = -EFAULT; 54 + 55 + if (ustatus && put_user(status, ustatus)) 56 ret = -EFAULT; 57 out: 58 return ret;
+14
include/asm-powerpc/spu.h
··· 138 void (* ibox_callback)(struct spu *spu); 139 void (* stop_callback)(struct spu *spu); 140 void (* mfc_callback)(struct spu *spu); 141 142 char irq_c0[8]; 143 char irq_c1[8]; ··· 169 __u32 __user *ustatus); 170 struct module *owner; 171 } spufs_calls; 172 173 #ifdef CONFIG_SPU_FS_MODULE 174 int register_spu_syscalls(struct spufs_calls *calls);
··· 138 void (* ibox_callback)(struct spu *spu); 139 void (* stop_callback)(struct spu *spu); 140 void (* mfc_callback)(struct spu *spu); 141 + void (* dma_callback)(struct spu *spu, int type); 142 143 char irq_c0[8]; 144 char irq_c1[8]; ··· 168 __u32 __user *ustatus); 169 struct module *owner; 170 } spufs_calls; 171 + 172 + /* return status from spu_run, same as in libspe */ 173 + #define SPE_EVENT_DMA_ALIGNMENT 0x0008 /*A DMA alignment error */ 174 + #define SPE_EVENT_SPE_ERROR 0x0010 /*An illegal instruction error*/ 175 + #define SPE_EVENT_SPE_DATA_SEGMENT 0x0020 /*A DMA segmentation error */ 176 + #define SPE_EVENT_SPE_DATA_STORAGE 0x0040 /*A DMA storage error */ 177 + #define SPE_EVENT_INVALID_DMA 0x0800 /* Invalid MFC DMA */ 178 + 179 + /* 180 + * Flags for sys_spu_create. 181 + */ 182 + #define SPU_CREATE_EVENTS_ENABLED 0x0001 183 + #define SPU_CREATE_FLAG_ALL 0x0001 /* mask of all valid flags */ 184 185 #ifdef CONFIG_SPU_FS_MODULE 186 int register_spu_syscalls(struct spufs_calls *calls);