Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mtdchar: prevent unbounded allocation in MEMWRITE ioctl

In the mtdchar_write_ioctl() function, memdup_user() is called with its
'len' parameter set to verbatim values provided by user space via a
struct mtd_write_req. Both the 'len' and 'ooblen' fields of that
structure are 64-bit unsigned integers, which means the MEMWRITE ioctl
can trigger unbounded kernel memory allocation requests.

Fix by iterating over the buffers provided by user space in a loop,
processing at most mtd->erasesize bytes in each iteration. Adopt some
checks from mtd_check_oob_ops() to retain backward user space
compatibility.

Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Michał Kępień <kernel@kempniu.pl>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20211130113149.21848-1-kernel@kempniu.pl

authored by

Michał Kępień and committed by
Miquel Raynal
6420ac0a dd8a2e88

+86 -22
+86 -22
drivers/mtd/mtdchar.c
··· 573 573 } 574 574 } 575 575 576 + static void adjust_oob_length(struct mtd_info *mtd, uint64_t start, 577 + struct mtd_oob_ops *ops) 578 + { 579 + uint32_t start_page, end_page; 580 + u32 oob_per_page; 581 + 582 + if (ops->len == 0 || ops->ooblen == 0) 583 + return; 584 + 585 + start_page = mtd_div_by_ws(start, mtd); 586 + end_page = mtd_div_by_ws(start + ops->len - 1, mtd); 587 + oob_per_page = mtd_oobavail(mtd, ops); 588 + 589 + ops->ooblen = min_t(size_t, ops->ooblen, 590 + (end_page - start_page + 1) * oob_per_page); 591 + } 592 + 576 593 static int mtdchar_write_ioctl(struct mtd_info *mtd, 577 594 struct mtd_write_req __user *argp) 578 595 { 579 596 struct mtd_info *master = mtd_get_master(mtd); 580 597 struct mtd_write_req req; 581 - struct mtd_oob_ops ops = {}; 582 598 const void __user *usr_data, *usr_oob; 583 - int ret; 599 + uint8_t *datbuf = NULL, *oobbuf = NULL; 600 + size_t datbuf_len, oobbuf_len; 601 + int ret = 0; 584 602 585 603 if (copy_from_user(&req, argp, sizeof(req))) 586 604 return -EFAULT; ··· 608 590 609 591 if (!master->_write_oob) 610 592 return -EOPNOTSUPP; 611 - ops.mode = req.mode; 612 - ops.len = (size_t)req.len; 613 - ops.ooblen = (size_t)req.ooblen; 614 - ops.ooboffs = 0; 615 593 616 - if (usr_data) { 617 - ops.datbuf = memdup_user(usr_data, ops.len); 618 - if (IS_ERR(ops.datbuf)) 619 - return PTR_ERR(ops.datbuf); 620 - } else { 621 - ops.datbuf = NULL; 594 + if (!usr_data) 595 + req.len = 0; 596 + 597 + if (!usr_oob) 598 + req.ooblen = 0; 599 + 600 + if (req.start + req.len > mtd->size) 601 + return -EINVAL; 602 + 603 + datbuf_len = min_t(size_t, req.len, mtd->erasesize); 604 + if (datbuf_len > 0) { 605 + datbuf = kmalloc(datbuf_len, GFP_KERNEL); 606 + if (!datbuf) 607 + return -ENOMEM; 622 608 } 623 609 624 - if (usr_oob) { 625 - ops.oobbuf = memdup_user(usr_oob, ops.ooblen); 626 - if (IS_ERR(ops.oobbuf)) { 627 - kfree(ops.datbuf); 628 - return PTR_ERR(ops.oobbuf); 610 + oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize); 611 + if (oobbuf_len > 0) { 612 + oobbuf = kmalloc(oobbuf_len, GFP_KERNEL); 613 + if (!oobbuf) { 614 + kfree(datbuf); 615 + return -ENOMEM; 629 616 } 630 - } else { 631 - ops.oobbuf = NULL; 632 617 } 633 618 634 - ret = mtd_write_oob(mtd, (loff_t)req.start, &ops); 619 + while (req.len > 0 || (!usr_data && req.ooblen > 0)) { 620 + struct mtd_oob_ops ops = { 621 + .mode = req.mode, 622 + .len = min_t(size_t, req.len, datbuf_len), 623 + .ooblen = min_t(size_t, req.ooblen, oobbuf_len), 624 + .datbuf = datbuf, 625 + .oobbuf = oobbuf, 626 + }; 635 627 636 - kfree(ops.datbuf); 637 - kfree(ops.oobbuf); 628 + /* 629 + * Shorten non-page-aligned, eraseblock-sized writes so that 630 + * the write ends on an eraseblock boundary. This is necessary 631 + * for adjust_oob_length() to properly handle non-page-aligned 632 + * writes. 633 + */ 634 + if (ops.len == mtd->erasesize) 635 + ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd); 636 + 637 + /* 638 + * For writes which are not OOB-only, adjust the amount of OOB 639 + * data written according to the number of data pages written. 640 + * This is necessary to prevent OOB data from being skipped 641 + * over in data+OOB writes requiring multiple mtd_write_oob() 642 + * calls to be completed. 643 + */ 644 + adjust_oob_length(mtd, req.start, &ops); 645 + 646 + if (copy_from_user(datbuf, usr_data, ops.len) || 647 + copy_from_user(oobbuf, usr_oob, ops.ooblen)) { 648 + ret = -EFAULT; 649 + break; 650 + } 651 + 652 + ret = mtd_write_oob(mtd, req.start, &ops); 653 + if (ret) 654 + break; 655 + 656 + req.start += ops.retlen; 657 + req.len -= ops.retlen; 658 + usr_data += ops.retlen; 659 + 660 + req.ooblen -= ops.oobretlen; 661 + usr_oob += ops.oobretlen; 662 + } 663 + 664 + kfree(datbuf); 665 + kfree(oobbuf); 638 666 639 667 return ret; 640 668 }