Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto/arm64: aes-ccm - Switch to 'ksimd' scoped guard API

Switch to the more abstract 'scoped_ksimd()' API, which will be modified
in a future patch to transparently allocate a kernel mode FP/SIMD state
buffer on the stack, so that kernel mode FP/SIMD code remains
preemptible in principe, but without the memory overhead that adds 528
bytes to the size of struct task_struct.

Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

+54 -57
+54 -57
arch/arm64/crypto/aes-ce-ccm-glue.c
··· 8 8 * Author: Ard Biesheuvel <ardb@kernel.org> 9 9 */ 10 10 11 - #include <asm/neon.h> 12 11 #include <linux/unaligned.h> 13 12 #include <crypto/aes.h> 14 13 #include <crypto/scatterwalk.h> 15 14 #include <crypto/internal/aead.h> 16 15 #include <crypto/internal/skcipher.h> 17 16 #include <linux/module.h> 17 + 18 + #include <asm/simd.h> 18 19 19 20 #include "aes-ce-setkey.h" 20 21 ··· 185 184 if (unlikely(err)) 186 185 return err; 187 186 188 - kernel_neon_begin(); 187 + scoped_ksimd() { 188 + if (req->assoclen) 189 + ccm_calculate_auth_mac(req, mac); 189 190 190 - if (req->assoclen) 191 - ccm_calculate_auth_mac(req, mac); 191 + do { 192 + u32 tail = walk.nbytes % AES_BLOCK_SIZE; 193 + const u8 *src = walk.src.virt.addr; 194 + u8 *dst = walk.dst.virt.addr; 195 + u8 buf[AES_BLOCK_SIZE]; 196 + u8 *final_iv = NULL; 192 197 193 - do { 194 - u32 tail = walk.nbytes % AES_BLOCK_SIZE; 195 - const u8 *src = walk.src.virt.addr; 196 - u8 *dst = walk.dst.virt.addr; 197 - u8 buf[AES_BLOCK_SIZE]; 198 - u8 *final_iv = NULL; 198 + if (walk.nbytes == walk.total) { 199 + tail = 0; 200 + final_iv = orig_iv; 201 + } 199 202 200 - if (walk.nbytes == walk.total) { 201 - tail = 0; 202 - final_iv = orig_iv; 203 - } 203 + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) 204 + src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], 205 + src, walk.nbytes); 204 206 205 - if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) 206 - src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], 207 - src, walk.nbytes); 207 + ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail, 208 + ctx->key_enc, num_rounds(ctx), 209 + mac, walk.iv, final_iv); 208 210 209 - ce_aes_ccm_encrypt(dst, src, walk.nbytes - tail, 210 - ctx->key_enc, num_rounds(ctx), 211 - mac, walk.iv, final_iv); 211 + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) 212 + memcpy(walk.dst.virt.addr, dst, walk.nbytes); 212 213 213 - if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) 214 - memcpy(walk.dst.virt.addr, dst, walk.nbytes); 215 - 216 - if (walk.nbytes) { 217 - err = skcipher_walk_done(&walk, tail); 218 - } 219 - } while (walk.nbytes); 220 - 221 - kernel_neon_end(); 214 + if (walk.nbytes) { 215 + err = skcipher_walk_done(&walk, tail); 216 + } 217 + } while (walk.nbytes); 218 + } 222 219 223 220 if (unlikely(err)) 224 221 return err; ··· 250 251 if (unlikely(err)) 251 252 return err; 252 253 253 - kernel_neon_begin(); 254 + scoped_ksimd() { 255 + if (req->assoclen) 256 + ccm_calculate_auth_mac(req, mac); 254 257 255 - if (req->assoclen) 256 - ccm_calculate_auth_mac(req, mac); 258 + do { 259 + u32 tail = walk.nbytes % AES_BLOCK_SIZE; 260 + const u8 *src = walk.src.virt.addr; 261 + u8 *dst = walk.dst.virt.addr; 262 + u8 buf[AES_BLOCK_SIZE]; 263 + u8 *final_iv = NULL; 257 264 258 - do { 259 - u32 tail = walk.nbytes % AES_BLOCK_SIZE; 260 - const u8 *src = walk.src.virt.addr; 261 - u8 *dst = walk.dst.virt.addr; 262 - u8 buf[AES_BLOCK_SIZE]; 263 - u8 *final_iv = NULL; 265 + if (walk.nbytes == walk.total) { 266 + tail = 0; 267 + final_iv = orig_iv; 268 + } 264 269 265 - if (walk.nbytes == walk.total) { 266 - tail = 0; 267 - final_iv = orig_iv; 268 - } 270 + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) 271 + src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], 272 + src, walk.nbytes); 269 273 270 - if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) 271 - src = dst = memcpy(&buf[sizeof(buf) - walk.nbytes], 272 - src, walk.nbytes); 274 + ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail, 275 + ctx->key_enc, num_rounds(ctx), 276 + mac, walk.iv, final_iv); 273 277 274 - ce_aes_ccm_decrypt(dst, src, walk.nbytes - tail, 275 - ctx->key_enc, num_rounds(ctx), 276 - mac, walk.iv, final_iv); 278 + if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) 279 + memcpy(walk.dst.virt.addr, dst, walk.nbytes); 277 280 278 - if (unlikely(walk.nbytes < AES_BLOCK_SIZE)) 279 - memcpy(walk.dst.virt.addr, dst, walk.nbytes); 280 - 281 - if (walk.nbytes) { 282 - err = skcipher_walk_done(&walk, tail); 283 - } 284 - } while (walk.nbytes); 285 - 286 - kernel_neon_end(); 281 + if (walk.nbytes) { 282 + err = skcipher_walk_done(&walk, tail); 283 + } 284 + } while (walk.nbytes); 285 + } 287 286 288 287 if (unlikely(err)) 289 288 return err;