Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm crypt: use GFP_ATOMIC when allocating crypto requests from softirq

Commit 39d42fa96ba1 ("dm crypt: add flags to optionally bypass kcryptd
workqueues") made it possible for some code paths in dm-crypt to be
executed in softirq context, when the underlying driver processes IO
requests in interrupt/softirq context.

In this case sometimes when allocating a new crypto request we may get
a stacktrace like below:

[ 210.103008][ C0] BUG: sleeping function called from invalid context at mm/mempool.c:381
[ 210.104746][ C0] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 2602, name: fio
[ 210.106599][ C0] CPU: 0 PID: 2602 Comm: fio Tainted: G W 5.10.0+ #50
[ 210.108331][ C0] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 0.0.0 02/06/2015
[ 210.110212][ C0] Call Trace:
[ 210.110921][ C0] <IRQ>
[ 210.111527][ C0] dump_stack+0x7d/0xa3
[ 210.112411][ C0] ___might_sleep.cold+0x122/0x151
[ 210.113527][ C0] mempool_alloc+0x16b/0x2f0
[ 210.114524][ C0] ? __queue_work+0x515/0xde0
[ 210.115553][ C0] ? mempool_resize+0x700/0x700
[ 210.116586][ C0] ? crypt_endio+0x91/0x180
[ 210.117479][ C0] ? blk_update_request+0x757/0x1150
[ 210.118513][ C0] ? blk_mq_end_request+0x4b/0x480
[ 210.119572][ C0] ? blk_done_softirq+0x21d/0x340
[ 210.120628][ C0] ? __do_softirq+0x190/0x611
[ 210.121626][ C0] crypt_convert+0x29f9/0x4c00
[ 210.122668][ C0] ? _raw_spin_lock_irqsave+0x87/0xe0
[ 210.123824][ C0] ? kasan_set_track+0x1c/0x30
[ 210.124858][ C0] ? crypt_iv_tcw_ctr+0x4a0/0x4a0
[ 210.125930][ C0] ? kmem_cache_free+0x104/0x470
[ 210.126973][ C0] ? crypt_endio+0x91/0x180
[ 210.127947][ C0] kcryptd_crypt_read_convert+0x30e/0x420
[ 210.129165][ C0] blk_update_request+0x757/0x1150
[ 210.130231][ C0] blk_mq_end_request+0x4b/0x480
[ 210.131294][ C0] blk_done_softirq+0x21d/0x340
[ 210.132332][ C0] ? _raw_spin_lock+0x81/0xd0
[ 210.133289][ C0] ? blk_mq_stop_hw_queue+0x30/0x30
[ 210.134399][ C0] ? _raw_read_lock_irq+0x40/0x40
[ 210.135458][ C0] __do_softirq+0x190/0x611
[ 210.136409][ C0] ? handle_edge_irq+0x221/0xb60
[ 210.137447][ C0] asm_call_irq_on_stack+0x12/0x20
[ 210.138507][ C0] </IRQ>
[ 210.139118][ C0] do_softirq_own_stack+0x37/0x40
[ 210.140191][ C0] irq_exit_rcu+0x110/0x1b0
[ 210.141151][ C0] common_interrupt+0x74/0x120
[ 210.142171][ C0] asm_common_interrupt+0x1e/0x40

Fix this by allocating crypto requests with GFP_ATOMIC mask in
interrupt context.

Fixes: 39d42fa96ba1 ("dm crypt: add flags to optionally bypass kcryptd workqueues")
Cc: stable@vger.kernel.org # v5.9+
Reported-by: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
Signed-off-by: Ignat Korchagin <ignat@cloudflare.com>
Acked-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Ignat Korchagin and committed by
Mike Snitzer
d68b2958 8abec36d

+25 -10
+25 -10
drivers/md/dm-crypt.c
··· 1454 1454 static void kcryptd_async_done(struct crypto_async_request *async_req, 1455 1455 int error); 1456 1456 1457 - static void crypt_alloc_req_skcipher(struct crypt_config *cc, 1457 + static int crypt_alloc_req_skcipher(struct crypt_config *cc, 1458 1458 struct convert_context *ctx) 1459 1459 { 1460 1460 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 1461 1461 1462 - if (!ctx->r.req) 1463 - ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO); 1462 + if (!ctx->r.req) { 1463 + ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); 1464 + if (!ctx->r.req) 1465 + return -ENOMEM; 1466 + } 1464 1467 1465 1468 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]); 1466 1469 ··· 1474 1471 skcipher_request_set_callback(ctx->r.req, 1475 1472 CRYPTO_TFM_REQ_MAY_BACKLOG, 1476 1473 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); 1474 + 1475 + return 0; 1477 1476 } 1478 1477 1479 - static void crypt_alloc_req_aead(struct crypt_config *cc, 1478 + static int crypt_alloc_req_aead(struct crypt_config *cc, 1480 1479 struct convert_context *ctx) 1481 1480 { 1482 - if (!ctx->r.req_aead) 1483 - ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO); 1481 + if (!ctx->r.req) { 1482 + ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); 1483 + if (!ctx->r.req) 1484 + return -ENOMEM; 1485 + } 1484 1486 1485 1487 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]); 1486 1488 ··· 1496 1488 aead_request_set_callback(ctx->r.req_aead, 1497 1489 CRYPTO_TFM_REQ_MAY_BACKLOG, 1498 1490 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); 1491 + 1492 + return 0; 1499 1493 } 1500 1494 1501 - static void crypt_alloc_req(struct crypt_config *cc, 1495 + static int crypt_alloc_req(struct crypt_config *cc, 1502 1496 struct convert_context *ctx) 1503 1497 { 1504 1498 if (crypt_integrity_aead(cc)) 1505 - crypt_alloc_req_aead(cc, ctx); 1499 + return crypt_alloc_req_aead(cc, ctx); 1506 1500 else 1507 - crypt_alloc_req_skcipher(cc, ctx); 1501 + return crypt_alloc_req_skcipher(cc, ctx); 1508 1502 } 1509 1503 1510 1504 static void crypt_free_req_skcipher(struct crypt_config *cc, ··· 1555 1545 1556 1546 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { 1557 1547 1558 - crypt_alloc_req(cc, ctx); 1548 + r = crypt_alloc_req(cc, ctx); 1549 + if (r) { 1550 + complete(&ctx->restart); 1551 + return BLK_STS_DEV_RESOURCE; 1552 + } 1553 + 1559 1554 atomic_inc(&ctx->cc_pending); 1560 1555 1561 1556 if (crypt_integrity_aead(cc))