Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: algif_skcipher - Fixed overflow when sndbuf is page aligned

When sk_sndbuf is not a multiple of PAGE_SIZE, the limit tests
in sendmsg fail as the limit variable becomes negative and we're
using an unsigned comparison.

The same thing can happen if sk_sndbuf is lowered after a sendmsg
call.

This patch fixes this by always taking the signed maximum of limit
and 0 before we perform the comparison.

It also rounds the value of sk_sndbuf down to a multiple of PAGE_SIZE
so that we don't end up allocating a page only to use a small number
of bytes in it because we're bound by sk_sndbuf.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+11 -21
+11 -21
crypto/algif_skcipher.c
··· 52 52 #define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \ 53 53 sizeof(struct scatterlist) - 1) 54 54 55 - static inline bool skcipher_writable(struct sock *sk) 55 + static inline int skcipher_sndbuf(struct sock *sk) 56 56 { 57 57 struct alg_sock *ask = alg_sk(sk); 58 58 struct skcipher_ctx *ctx = ask->private; 59 59 60 - return ctx->used + PAGE_SIZE <= max_t(int, sk->sk_sndbuf, PAGE_SIZE); 60 + return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 61 + ctx->used, 0); 62 + } 63 + 64 + static inline bool skcipher_writable(struct sock *sk) 65 + { 66 + return PAGE_SIZE <= skcipher_sndbuf(sk); 61 67 } 62 68 63 69 static int skcipher_alloc_sgl(struct sock *sk) ··· 251 245 struct af_alg_control con = {}; 252 246 long copied = 0; 253 247 bool enc = 0; 254 - int limit; 255 248 int err; 256 249 int i; 257 250 ··· 286 281 memcpy(ctx->iv, con.iv->iv, ivsize); 287 282 } 288 283 289 - limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE); 290 - limit -= ctx->used; 291 - 292 284 while (size) { 293 285 struct scatterlist *sg; 294 286 unsigned long len = size; ··· 311 309 ctx->used += len; 312 310 copied += len; 313 311 size -= len; 314 - limit -= len; 315 312 continue; 316 313 } 317 314 318 - if (limit < PAGE_SIZE) { 315 + if (!skcipher_writable(sk)) { 319 316 err = skcipher_wait_for_wmem(sk, msg->msg_flags); 320 317 if (err) 321 318 goto unlock; 322 - 323 - limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE); 324 - limit -= ctx->used; 325 319 } 326 320 327 - len = min_t(unsigned long, len, limit); 321 + len = min_t(unsigned long, len, skcipher_sndbuf(sk)); 328 322 329 323 err = skcipher_alloc_sgl(sk); 330 324 if (err) ··· 350 352 ctx->used += plen; 351 353 copied += plen; 352 354 size -= plen; 353 - limit -= plen; 354 355 sgl->cur++; 355 356 } while (len && sgl->cur < MAX_SGL_ENTS); 356 357 ··· 377 380 struct skcipher_ctx *ctx = ask->private; 378 381 struct skcipher_sg_list *sgl; 379 382 int err = -EINVAL; 380 - int limit; 381 383 382 384 lock_sock(sk); 383 385 if (!ctx->more && ctx->used) ··· 385 389 if (!size) 386 390 goto done; 387 391 388 - limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE); 389 - limit -= ctx->used; 390 - 391 - if (limit < PAGE_SIZE) { 392 + if (!skcipher_writable(sk)) { 392 393 err = skcipher_wait_for_wmem(sk, flags); 393 394 if (err) 394 395 goto unlock; 395 - 396 - limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE); 397 - limit -= ctx->used; 398 396 } 399 397 400 398 err = skcipher_alloc_sgl(sk);