Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/cell: Move data segment faulting code out of cell platform

__spu_trap_data_seg() currently contains code to determine the VSID and ESID
required for a particular EA and mm struct.

This code is generically useful for other co-processors. This moves the code of
the cell platform so it can be used by other powerpc code. It also adds 1TB
segment handling which Cell didn't support. The new function is called
copro_calculate_slb().

This also moves the internal struct spu_slb to a generic struct copro_slb which
is now used in the Cell and copro code. We use this new struct instead of
passing around esid and vsid parameters.

Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Ian Munsie and committed by
Michael Ellerman
73d16a6e e83d0169

+69 -49
+7
arch/powerpc/include/asm/copro.h
··· 10 10 #ifndef _ASM_POWERPC_COPRO_H 11 11 #define _ASM_POWERPC_COPRO_H 12 12 13 + struct copro_slb 14 + { 15 + u64 esid, vsid; 16 + }; 17 + 13 18 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, 14 19 unsigned long dsisr, unsigned *flt); 20 + 21 + int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); 15 22 16 23 #endif /* _ASM_POWERPC_COPRO_H */
+7
arch/powerpc/include/asm/mmu-hash64.h
··· 190 190 191 191 #ifndef __ASSEMBLY__ 192 192 193 + static inline int slb_vsid_shift(int ssize) 194 + { 195 + if (ssize == MMU_SEGSIZE_256M) 196 + return SLB_VSID_SHIFT; 197 + return SLB_VSID_SHIFT_1T; 198 + } 199 + 193 200 static inline int segment_shift(int ssize) 194 201 { 195 202 if (ssize == MMU_SEGSIZE_256M)
+46
arch/powerpc/mm/copro_fault.c
··· 24 24 #include <linux/mm.h> 25 25 #include <linux/export.h> 26 26 #include <asm/reg.h> 27 + #include <asm/copro.h> 27 28 28 29 /* 29 30 * This ought to be kept in sync with the powerpc specific do_page_fault ··· 91 90 return ret; 92 91 } 93 92 EXPORT_SYMBOL_GPL(copro_handle_mm_fault); 93 + 94 + int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) 95 + { 96 + u64 vsid; 97 + int psize, ssize; 98 + 99 + slb->esid = (ea & ESID_MASK) | SLB_ESID_V; 100 + 101 + switch (REGION_ID(ea)) { 102 + case USER_REGION_ID: 103 + pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); 104 + psize = get_slice_psize(mm, ea); 105 + ssize = user_segment_size(ea); 106 + vsid = get_vsid(mm->context.id, ea, ssize); 107 + break; 108 + case VMALLOC_REGION_ID: 109 + pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea); 110 + if (ea < VMALLOC_END) 111 + psize = mmu_vmalloc_psize; 112 + else 113 + psize = mmu_io_psize; 114 + ssize = mmu_kernel_ssize; 115 + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); 116 + break; 117 + case KERNEL_REGION_ID: 118 + pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea); 119 + psize = mmu_linear_psize; 120 + ssize = mmu_kernel_ssize; 121 + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); 122 + break; 123 + default: 124 + pr_debug("%s: invalid region access at %016llx\n", __func__, ea); 125 + return 1; 126 + } 127 + 128 + vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER; 129 + 130 + vsid |= mmu_psize_defs[psize].sllp | 131 + ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); 132 + 133 + slb->vsid = vsid; 134 + 135 + return 0; 136 + } 137 + EXPORT_SYMBOL_GPL(copro_calculate_slb);
-3
arch/powerpc/mm/slb.c
··· 46 46 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; 47 47 } 48 48 49 - #define slb_vsid_shift(ssize) \ 50 - ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T) 51 - 52 49 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, 53 50 unsigned long flags) 54 51 {
+9 -46
arch/powerpc/platforms/cell/spu_base.c
··· 76 76 static DEFINE_SPINLOCK(spu_full_list_lock); 77 77 static DEFINE_MUTEX(spu_full_list_mutex); 78 78 79 - struct spu_slb { 80 - u64 esid, vsid; 81 - }; 82 - 83 79 void spu_invalidate_slbs(struct spu *spu) 84 80 { 85 81 struct spu_priv2 __iomem *priv2 = spu->priv2; ··· 145 149 } 146 150 } 147 151 148 - static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) 152 + static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb) 149 153 { 150 154 struct spu_priv2 __iomem *priv2 = spu->priv2; 151 155 ··· 163 167 164 168 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) 165 169 { 166 - struct mm_struct *mm = spu->mm; 167 - struct spu_slb slb; 168 - int psize; 170 + struct copro_slb slb; 171 + int ret; 169 172 170 - pr_debug("%s\n", __func__); 171 - 172 - slb.esid = (ea & ESID_MASK) | SLB_ESID_V; 173 - 174 - switch(REGION_ID(ea)) { 175 - case USER_REGION_ID: 176 - #ifdef CONFIG_PPC_MM_SLICES 177 - psize = get_slice_psize(mm, ea); 178 - #else 179 - psize = mm->context.user_psize; 180 - #endif 181 - slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) 182 - << SLB_VSID_SHIFT) | SLB_VSID_USER; 183 - break; 184 - case VMALLOC_REGION_ID: 185 - if (ea < VMALLOC_END) 186 - psize = mmu_vmalloc_psize; 187 - else 188 - psize = mmu_io_psize; 189 - slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) 190 - << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; 191 - break; 192 - case KERNEL_REGION_ID: 193 - psize = mmu_linear_psize; 194 - slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) 195 - << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; 196 - break; 197 - default: 198 - /* Future: support kernel segments so that drivers 199 - * can use SPUs. 200 - */ 201 - pr_debug("invalid region access at %016lx\n", ea); 202 - return 1; 203 - } 204 - slb.vsid |= mmu_psize_defs[psize].sllp; 173 + ret = copro_calculate_slb(spu->mm, ea, &slb); 174 + if (ret) 175 + return ret; 205 176 206 177 spu_load_slb(spu, spu->slb_replace, &slb); 207 178 ··· 216 253 return 0; 217 254 } 218 255 219 - static void __spu_kernel_slb(void *addr, struct spu_slb *slb) 256 + static void __spu_kernel_slb(void *addr, struct copro_slb *slb) 220 257 { 221 258 unsigned long ea = (unsigned long)addr; 222 259 u64 llp; ··· 235 272 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the 236 273 * address @new_addr is present. 237 274 */ 238 - static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, 275 + static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, 239 276 void *new_addr) 240 277 { 241 278 unsigned long ea = (unsigned long)new_addr; ··· 260 297 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, 261 298 void *code, int code_size) 262 299 { 263 - struct spu_slb slbs[4]; 300 + struct copro_slb slbs[4]; 264 301 int i, nr_slbs = 0; 265 302 /* start and end addresses of both mappings */ 266 303 void *addrs[] = {