Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

- Fix use after free in chtls

- Fix RBP breakage in sha3

- Fix use after free in hwrng_unregister

- Fix overread in morus640

- Move sleep out of kernel_neon in arm64/aes-blk

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
hwrng: core - Always drop the RNG in hwrng_unregister()
crypto: morus640 - Fix out-of-bounds access
crypto: don't optimize keccakf()
crypto: arm64/aes-blk - fix and move skcipher_walk_done out of kernel_neon_begin, _end
crypto: chtls - use after free in chtls_pt_recvmsg()

+15 -8
+1 -1
arch/arm64/crypto/aes-glue.c
··· 223 kernel_neon_begin(); 224 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 225 (u8 *)ctx->key_enc, rounds, blocks, walk.iv); 226 - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 227 kernel_neon_end(); 228 } 229 if (walk.nbytes) { 230 u8 __aligned(8) tail[AES_BLOCK_SIZE];
··· 223 kernel_neon_begin(); 224 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 225 (u8 *)ctx->key_enc, rounds, blocks, walk.iv); 226 kernel_neon_end(); 227 + err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 228 } 229 if (walk.nbytes) { 230 u8 __aligned(8) tail[AES_BLOCK_SIZE];
+2 -1
crypto/morus640.c
··· 274 union morus640_block_in tail; 275 276 memcpy(tail.bytes, src, size); 277 278 - crypto_morus640_load_a(&m, src); 279 crypto_morus640_core(state, &m); 280 crypto_morus640_store_a(tail.bytes, &m); 281 memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
··· 274 union morus640_block_in tail; 275 276 memcpy(tail.bytes, src, size); 277 + memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); 278 279 + crypto_morus640_load_a(&m, tail.bytes); 280 crypto_morus640_core(state, &m); 281 crypto_morus640_store_a(tail.bytes, &m); 282 memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
+1 -1
crypto/sha3_generic.c
··· 152 st[24] ^= bc[ 4]; 153 } 154 155 - static void __optimize("O3") keccakf(u64 st[25]) 156 { 157 int round; 158
··· 152 st[24] ^= bc[ 4]; 153 } 154 155 + static void keccakf(u64 st[25]) 156 { 157 int round; 158
+9 -2
drivers/char/hw_random/core.c
··· 516 517 void hwrng_unregister(struct hwrng *rng) 518 { 519 mutex_lock(&rng_mutex); 520 521 list_del(&rng->list); 522 - if (current_rng == rng) 523 - enable_best_rng(); 524 525 if (list_empty(&rng_list)) { 526 mutex_unlock(&rng_mutex);
··· 516 517 void hwrng_unregister(struct hwrng *rng) 518 { 519 + int err; 520 + 521 mutex_lock(&rng_mutex); 522 523 list_del(&rng->list); 524 + if (current_rng == rng) { 525 + err = enable_best_rng(); 526 + if (err) { 527 + drop_current_rng(); 528 + cur_rng_set_by_user = 0; 529 + } 530 + } 531 532 if (list_empty(&rng_list)) { 533 mutex_unlock(&rng_mutex);
+2 -3
drivers/crypto/chelsio/chtls/chtls_io.c
··· 1548 tp->urg_data = 0; 1549 1550 if ((avail + offset) >= skb->len) { 1551 - if (likely(skb)) 1552 - chtls_free_skb(sk, skb); 1553 - buffers_freed++; 1554 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { 1555 tp->copied_seq += skb->len; 1556 hws->rcvpld = skb->hdr_len; 1557 } else { 1558 tp->copied_seq += hws->rcvpld; 1559 } 1560 hws->copied_seq = 0; 1561 if (copied >= target && 1562 !skb_peek(&sk->sk_receive_queue))
··· 1548 tp->urg_data = 0; 1549 1550 if ((avail + offset) >= skb->len) { 1551 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { 1552 tp->copied_seq += skb->len; 1553 hws->rcvpld = skb->hdr_len; 1554 } else { 1555 tp->copied_seq += hws->rcvpld; 1556 } 1557 + chtls_free_skb(sk, skb); 1558 + buffers_freed++; 1559 hws->copied_seq = 0; 1560 if (copied >= target && 1561 !skb_peek(&sk->sk_receive_queue))