Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Create disable_kernel_{fp,altivec,vsx,spe}()

The enable_kernel_*() functions leave the relevant MSR bits enabled
until we exit the kernel sometime later. Create disable versions
that wrap the kernel use of FP, Altivec VSX or SPE.

While we don't want to disable it normally for performance reasons
(MSR writes are slow), it will be used for a debug boot option that
does this and catches bad uses in other areas of the kernel.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Anton Blanchard and committed by
Michael Ellerman
dc4fbba1 a0e72cf1

+39
+1
arch/powerpc/crypto/aes-spe-glue.c
··· 85 85 86 86 static void spe_end(void) 87 87 { 88 + disable_kernel_spe(); 88 89 /* reenable preemption */ 89 90 preempt_enable(); 90 91 }
+1
arch/powerpc/crypto/sha1-spe-glue.c
··· 46 46 47 47 static void spe_end(void) 48 48 { 49 + disable_kernel_spe(); 49 50 /* reenable preemption */ 50 51 preempt_enable(); 51 52 }
+1
arch/powerpc/crypto/sha256-spe-glue.c
··· 47 47 48 48 static void spe_end(void) 49 49 { 50 + disable_kernel_spe(); 50 51 /* reenable preemption */ 51 52 preempt_enable(); 52 53 }
+5
arch/powerpc/include/asm/switch_to.h
··· 26 26 extern void load_up_spe(struct task_struct *); 27 27 extern void switch_booke_debug_regs(struct debug_reg *new_debug); 28 28 29 + static inline void disable_kernel_fp(void) { } 30 + static inline void disable_kernel_altivec(void) { } 31 + static inline void disable_kernel_spe(void) { } 32 + static inline void disable_kernel_vsx(void) { } 33 + 29 34 #ifdef CONFIG_PPC_FPU 30 35 extern void flush_fp_to_thread(struct task_struct *); 31 36 extern void giveup_fpu(struct task_struct *);
+2
arch/powerpc/kernel/align.c
··· 960 960 preempt_disable(); 961 961 enable_kernel_fp(); 962 962 cvt_df(&data.dd, (float *)&data.x32.low32); 963 + disable_kernel_fp(); 963 964 preempt_enable(); 964 965 #else 965 966 return 0; ··· 1001 1000 preempt_disable(); 1002 1001 enable_kernel_fp(); 1003 1002 cvt_fd((float *)&data.x32.low32, &data.dd); 1003 + disable_kernel_fp(); 1004 1004 preempt_enable(); 1005 1005 #else 1006 1006 return 0;
+1
arch/powerpc/kvm/book3s_paired_singles.c
··· 1265 1265 if (rcomp) 1266 1266 kvmppc_set_cr(vcpu, cr); 1267 1267 1268 + disable_kernel_fp(); 1268 1269 preempt_enable(); 1269 1270 1270 1271 return emulated;
+4
arch/powerpc/kvm/book3s_pr.c
··· 751 751 preempt_disable(); 752 752 enable_kernel_fp(); 753 753 load_fp_state(&vcpu->arch.fp); 754 + disable_kernel_fp(); 754 755 t->fp_save_area = &vcpu->arch.fp; 755 756 preempt_enable(); 756 757 } ··· 761 760 preempt_disable(); 762 761 enable_kernel_altivec(); 763 762 load_vr_state(&vcpu->arch.vr); 763 + disable_kernel_altivec(); 764 764 t->vr_save_area = &vcpu->arch.vr; 765 765 preempt_enable(); 766 766 #endif ··· 790 788 preempt_disable(); 791 789 enable_kernel_fp(); 792 790 load_fp_state(&vcpu->arch.fp); 791 + disable_kernel_fp(); 793 792 preempt_enable(); 794 793 } 795 794 #ifdef CONFIG_ALTIVEC ··· 798 795 preempt_disable(); 799 796 enable_kernel_altivec(); 800 797 load_vr_state(&vcpu->arch.vr); 798 + disable_kernel_altivec(); 801 799 preempt_enable(); 802 800 } 803 801 #endif
+4
arch/powerpc/kvm/booke.c
··· 98 98 preempt_disable(); 99 99 enable_kernel_spe(); 100 100 kvmppc_save_guest_spe(vcpu); 101 + disable_kernel_spe(); 101 102 vcpu->arch.shadow_msr &= ~MSR_SPE; 102 103 preempt_enable(); 103 104 } ··· 108 107 preempt_disable(); 109 108 enable_kernel_spe(); 110 109 kvmppc_load_guest_spe(vcpu); 110 + disable_kernel_spe(); 111 111 vcpu->arch.shadow_msr |= MSR_SPE; 112 112 preempt_enable(); 113 113 } ··· 143 141 if (!(current->thread.regs->msr & MSR_FP)) { 144 142 enable_kernel_fp(); 145 143 load_fp_state(&vcpu->arch.fp); 144 + disable_kernel_fp(); 146 145 current->thread.fp_save_area = &vcpu->arch.fp; 147 146 current->thread.regs->msr |= MSR_FP; 148 147 } ··· 185 182 if (!(current->thread.regs->msr & MSR_VEC)) { 186 183 enable_kernel_altivec(); 187 184 load_vr_state(&vcpu->arch.vr); 185 + disable_kernel_altivec(); 188 186 current->thread.vr_save_area = &vcpu->arch.vr; 189 187 current->thread.regs->msr |= MSR_VEC; 190 188 }
+2
arch/powerpc/lib/vmx-helper.c
··· 46 46 */ 47 47 int exit_vmx_usercopy(void) 48 48 { 49 + disable_kernel_altivec(); 49 50 pagefault_enable(); 50 51 preempt_enable(); 51 52 return 0; ··· 71 70 */ 72 71 void *exit_vmx_copy(void *dest) 73 72 { 73 + disable_kernel_altivec(); 74 74 preempt_enable(); 75 75 return dest; 76 76 }
+4
arch/powerpc/lib/xor_vmx.c
··· 74 74 v2 += 4; 75 75 } while (--lines > 0); 76 76 77 + disable_kernel_altivec(); 77 78 preempt_enable(); 78 79 } 79 80 EXPORT_SYMBOL(xor_altivec_2); ··· 103 102 v3 += 4; 104 103 } while (--lines > 0); 105 104 105 + disable_kernel_altivec(); 106 106 preempt_enable(); 107 107 } 108 108 EXPORT_SYMBOL(xor_altivec_3); ··· 137 135 v4 += 4; 138 136 } while (--lines > 0); 139 137 138 + disable_kernel_altivec(); 140 139 preempt_enable(); 141 140 } 142 141 EXPORT_SYMBOL(xor_altivec_4); ··· 175 172 v5 += 4; 176 173 } while (--lines > 0); 177 174 175 + disable_kernel_altivec(); 178 176 preempt_enable(); 179 177 } 180 178 EXPORT_SYMBOL(xor_altivec_5);
+3
drivers/crypto/vmx/aes.c
··· 86 86 enable_kernel_vsx(); 87 87 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 88 88 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); 89 + disable_kernel_vsx(); 89 90 pagefault_enable(); 90 91 preempt_enable(); 91 92 ··· 105 104 pagefault_disable(); 106 105 enable_kernel_vsx(); 107 106 aes_p8_encrypt(src, dst, &ctx->enc_key); 107 + disable_kernel_vsx(); 108 108 pagefault_enable(); 109 109 preempt_enable(); 110 110 } ··· 122 120 pagefault_disable(); 123 121 enable_kernel_vsx(); 124 122 aes_p8_decrypt(src, dst, &ctx->dec_key); 123 + disable_kernel_vsx(); 125 124 pagefault_enable(); 126 125 preempt_enable(); 127 126 }
+3
drivers/crypto/vmx/aes_cbc.c
··· 87 87 enable_kernel_vsx(); 88 88 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 89 89 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); 90 + disable_kernel_vsx(); 90 91 pagefault_enable(); 91 92 preempt_enable(); 92 93 ··· 128 127 ret = blkcipher_walk_done(desc, &walk, nbytes); 129 128 } 130 129 130 + disable_kernel_vsx(); 131 131 pagefault_enable(); 132 132 preempt_enable(); 133 133 } ··· 169 167 ret = blkcipher_walk_done(desc, &walk, nbytes); 170 168 } 171 169 170 + disable_kernel_vsx(); 172 171 pagefault_enable(); 173 172 preempt_enable(); 174 173 }
+3
drivers/crypto/vmx/aes_ctr.c
··· 83 83 pagefault_disable(); 84 84 enable_kernel_vsx(); 85 85 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 86 + disable_kernel_vsx(); 86 87 pagefault_enable(); 87 88 88 89 ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); ··· 102 101 pagefault_disable(); 103 102 enable_kernel_vsx(); 104 103 aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); 104 + disable_kernel_vsx(); 105 105 pagefault_enable(); 106 106 107 107 crypto_xor(keystream, src, nbytes); ··· 141 139 AES_BLOCK_SIZE, 142 140 &ctx->enc_key, 143 141 walk.iv); 142 + disable_kernel_vsx(); 144 143 pagefault_enable(); 145 144 146 145 /* We need to update IV mostly for last bytes/round */
+4
drivers/crypto/vmx/ghash.c
··· 120 120 pagefault_disable(); 121 121 enable_kernel_vsx(); 122 122 gcm_init_p8(ctx->htable, (const u64 *) key); 123 + disable_kernel_vsx(); 123 124 pagefault_enable(); 124 125 preempt_enable(); 125 126 return crypto_shash_setkey(ctx->fallback, key, keylen); ··· 151 150 enable_kernel_vsx(); 152 151 gcm_ghash_p8(dctx->shash, ctx->htable, 153 152 dctx->buffer, GHASH_DIGEST_SIZE); 153 + disable_kernel_vsx(); 154 154 pagefault_enable(); 155 155 preempt_enable(); 156 156 src += GHASH_DIGEST_SIZE - dctx->bytes; ··· 164 162 pagefault_disable(); 165 163 enable_kernel_vsx(); 166 164 gcm_ghash_p8(dctx->shash, ctx->htable, src, len); 165 + disable_kernel_vsx(); 167 166 pagefault_enable(); 168 167 preempt_enable(); 169 168 src += len; ··· 195 192 enable_kernel_vsx(); 196 193 gcm_ghash_p8(dctx->shash, ctx->htable, 197 194 dctx->buffer, GHASH_DIGEST_SIZE); 195 + disable_kernel_vsx(); 198 196 pagefault_enable(); 199 197 preempt_enable(); 200 198 dctx->bytes = 0;
+1
lib/raid6/altivec.uc
··· 101 101 102 102 raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs); 103 103 104 + disable_kernel_altivec(); 104 105 preempt_enable(); 105 106 } 106 107