Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/preempt, powerpc: Disable preemption in enable_kernel_altivec() explicitly

enable_kernel_altivec() has to be called with disabled preemption.
Let's make this explicit, to prepare for pagefault_disable() not
touching preemption anymore.

Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: bigeasy@linutronix.de
Cc: borntraeger@de.ibm.com
Cc: daniel.vetter@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: hocko@suse.cz
Cc: hughd@google.com
Cc: mst@redhat.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: schwidefsky@de.ibm.com
Cc: yang.shi@windriver.com
Link: http://lkml.kernel.org/r/1431359540-32227-14-git-send-email-dahi@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

David Hildenbrand and committed by
Ingo Molnar
5f76eea8 2f09b227

+27 -6
+6 -5
arch/powerpc/lib/vmx-helper.c
··· 27 27 if (in_interrupt()) 28 28 return 0; 29 29 30 - /* This acts as preempt_disable() as well and will make 31 - * enable_kernel_altivec(). We need to disable page faults 32 - * as they can call schedule and thus make us lose the VMX 33 - * context. So on page faults, we just fail which will cause 34 - * a fallback to the normal non-vmx copy. 30 + preempt_disable(); 31 + /* 32 + * We need to disable page faults as they can call schedule and 33 + * thus make us lose the VMX context. So on page faults, we just 34 + * fail which will cause a fallback to the normal non-vmx copy. 35 35 */ 36 36 pagefault_disable(); 37 37 ··· 47 47 int exit_vmx_usercopy(void) 48 48 { 49 49 pagefault_enable(); 50 + preempt_enable(); 50 51 return 0; 51 52 } 52 53
+7 -1
drivers/crypto/vmx/aes.c
··· 78 78 int ret; 79 79 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); 80 80 81 + preempt_disable(); 81 82 pagefault_disable(); 82 83 enable_kernel_altivec(); 83 84 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 84 85 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); 85 86 pagefault_enable(); 86 - 87 + preempt_enable(); 88 + 87 89 ret += crypto_cipher_setkey(ctx->fallback, key, keylen); 88 90 return ret; 89 91 } ··· 97 95 if (in_interrupt()) { 98 96 crypto_cipher_encrypt_one(ctx->fallback, dst, src); 99 97 } else { 98 + preempt_disable(); 100 99 pagefault_disable(); 101 100 enable_kernel_altivec(); 102 101 aes_p8_encrypt(src, dst, &ctx->enc_key); 103 102 pagefault_enable(); 103 + preempt_enable(); 104 104 } 105 105 } 106 106 ··· 113 109 if (in_interrupt()) { 114 110 crypto_cipher_decrypt_one(ctx->fallback, dst, src); 115 111 } else { 112 + preempt_disable(); 116 113 pagefault_disable(); 117 114 enable_kernel_altivec(); 118 115 aes_p8_decrypt(src, dst, &ctx->dec_key); 119 116 pagefault_enable(); 117 + preempt_enable(); 120 118 } 121 119 } 122 120
+6
drivers/crypto/vmx/aes_cbc.c
··· 79 79 int ret; 80 80 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 81 81 82 + preempt_disable(); 82 83 pagefault_disable(); 83 84 enable_kernel_altivec(); 84 85 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 85 86 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); 86 87 pagefault_enable(); 88 + preempt_enable(); 87 89 88 90 ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); 89 91 return ret; ··· 108 106 if (in_interrupt()) { 109 107 ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); 110 108 } else { 109 + preempt_disable(); 111 110 pagefault_disable(); 112 111 enable_kernel_altivec(); 113 112 ··· 122 119 } 123 120 124 121 pagefault_enable(); 122 + preempt_enable(); 125 123 } 126 124 127 125 return ret; ··· 145 141 if (in_interrupt()) { 146 142 ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); 147 143 } else { 144 + preempt_disable(); 148 145 pagefault_disable(); 149 146 enable_kernel_altivec(); 150 147 ··· 159 154 } 160 155 161 156 pagefault_enable(); 157 + preempt_enable(); 162 158 } 163 159 164 160 return ret;
+8
drivers/crypto/vmx/ghash.c
··· 114 114 if (keylen != GHASH_KEY_LEN) 115 115 return -EINVAL; 116 116 117 + preempt_disable(); 117 118 pagefault_disable(); 118 119 enable_kernel_altivec(); 119 120 enable_kernel_fp(); 120 121 gcm_init_p8(ctx->htable, (const u64 *) key); 121 122 pagefault_enable(); 123 + preempt_enable(); 122 124 return crypto_shash_setkey(ctx->fallback, key, keylen); 123 125 } 124 126 ··· 142 140 } 143 141 memcpy(dctx->buffer + dctx->bytes, src, 144 142 GHASH_DIGEST_SIZE - dctx->bytes); 143 + preempt_disable(); 145 144 pagefault_disable(); 146 145 enable_kernel_altivec(); 147 146 enable_kernel_fp(); 148 147 gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, 149 148 GHASH_DIGEST_SIZE); 150 149 pagefault_enable(); 150 + preempt_enable(); 151 151 src += GHASH_DIGEST_SIZE - dctx->bytes; 152 152 srclen -= GHASH_DIGEST_SIZE - dctx->bytes; 153 153 dctx->bytes = 0; 154 154 } 155 155 len = srclen & ~(GHASH_DIGEST_SIZE - 1); 156 156 if (len) { 157 + preempt_disable(); 157 158 pagefault_disable(); 158 159 enable_kernel_altivec(); 159 160 enable_kernel_fp(); 160 161 gcm_ghash_p8(dctx->shash, ctx->htable, src, len); 161 162 pagefault_enable(); 163 + preempt_enable(); 162 164 src += len; 163 165 srclen -= len; 164 166 } ··· 186 180 if (dctx->bytes) { 187 181 for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) 188 182 dctx->buffer[i] = 0; 183 + preempt_disable(); 189 184 pagefault_disable(); 190 185 enable_kernel_altivec(); 191 186 enable_kernel_fp(); 192 187 gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, 193 188 GHASH_DIGEST_SIZE); 194 189 pagefault_enable(); 190 + preempt_enable(); 195 191 dctx->bytes = 0; 196 192 } 197 193 memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);