Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ubi: Fix races around ubi_refill_pools()

When writing a new Fastmap the first thing that happens
is refilling the pools in memory.
At this stage it is possible that new PEBs from the new pools
get already claimed and written with data.
If this happens before the new Fastmap data structure hits the
flash and we face power cut the freshly written PEB will not
scanned and unnoticed.

Solve the issue by locking the pools until Fastmap is written.

Cc: <stable@vger.kernel.org>
Fixes: dbb7d2a88d ("UBI: Add fastmap core")
Signed-off-by: Richard Weinberger <richard@nod.at>

+30 -14
+2 -2
drivers/mtd/ubi/eba.c
··· 1210 1210 struct ubi_volume *vol; 1211 1211 uint32_t crc; 1212 1212 1213 + ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); 1214 + 1213 1215 vol_id = be32_to_cpu(vid_hdr->vol_id); 1214 1216 lnum = be32_to_cpu(vid_hdr->lnum); 1215 1217 ··· 1354 1352 } 1355 1353 1356 1354 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from); 1357 - down_read(&ubi->fm_eba_sem); 1358 1355 vol->eba_tbl->entries[lnum].pnum = to; 1359 - up_read(&ubi->fm_eba_sem); 1360 1356 1361 1357 out_unlock_buf: 1362 1358 mutex_unlock(&ubi->buf_mutex);
+4 -2
drivers/mtd/ubi/fastmap-wl.c
··· 262 262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; 263 263 int pnum; 264 264 265 + ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); 266 + 265 267 if (pool->used == pool->size) { 266 268 /* We cannot update the fastmap here because this 267 269 * function is called in atomic context. ··· 305 303 306 304 wrk->anchor = 1; 307 305 wrk->func = &wear_leveling_worker; 308 - schedule_ubi_work(ubi, wrk); 306 + __schedule_ubi_work(ubi, wrk); 309 307 return 0; 310 308 } 311 309 ··· 346 344 spin_unlock(&ubi->wl_lock); 347 345 348 346 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; 349 - return schedule_erase(ubi, e, vol_id, lnum, torture); 347 + return schedule_erase(ubi, e, vol_id, lnum, torture, true); 350 348 } 351 349 352 350 /**
+10 -4
drivers/mtd/ubi/fastmap.c
··· 1492 1492 struct ubi_wl_entry *tmp_e; 1493 1493 1494 1494 down_write(&ubi->fm_protect); 1495 + down_write(&ubi->work_sem); 1496 + down_write(&ubi->fm_eba_sem); 1495 1497 1496 1498 ubi_refill_pools(ubi); 1497 1499 1498 1500 if (ubi->ro_mode || ubi->fm_disabled) { 1501 + up_write(&ubi->fm_eba_sem); 1502 + up_write(&ubi->work_sem); 1499 1503 up_write(&ubi->fm_protect); 1500 1504 return 0; 1501 1505 } 1502 1506 1503 1507 ret = ubi_ensure_anchor_pebs(ubi); 1504 1508 if (ret) { 1509 + up_write(&ubi->fm_eba_sem); 1510 + up_write(&ubi->work_sem); 1505 1511 up_write(&ubi->fm_protect); 1506 1512 return ret; 1507 1513 } 1508 1514 1509 1515 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1510 1516 if (!new_fm) { 1517 + up_write(&ubi->fm_eba_sem); 1518 + up_write(&ubi->work_sem); 1511 1519 up_write(&ubi->fm_protect); 1512 1520 return -ENOMEM; 1513 1521 } ··· 1624 1616 new_fm->e[0] = tmp_e; 1625 1617 } 1626 1618 1627 - down_write(&ubi->work_sem); 1628 - down_write(&ubi->fm_eba_sem); 1629 1619 ret = ubi_write_fastmap(ubi, new_fm); 1630 - up_write(&ubi->fm_eba_sem); 1631 - up_write(&ubi->work_sem); 1632 1620 1633 1621 if (ret) 1634 1622 goto err; 1635 1623 1636 1624 out_unlock: 1625 + up_write(&ubi->fm_eba_sem); 1626 + up_write(&ubi->work_sem); 1637 1627 up_write(&ubi->fm_protect); 1638 1628 kfree(old_fm); 1639 1629 return ret;
+14 -6
drivers/mtd/ubi/wl.c
··· 580 580 * failure. 581 581 */ 582 582 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 583 - int vol_id, int lnum, int torture) 583 + int vol_id, int lnum, int torture, bool nested) 584 584 { 585 585 struct ubi_work *wl_wrk; 586 586 ··· 599 599 wl_wrk->lnum = lnum; 600 600 wl_wrk->torture = torture; 601 601 602 - schedule_ubi_work(ubi, wl_wrk); 602 + if (nested) 603 + __schedule_ubi_work(ubi, wl_wrk); 604 + else 605 + schedule_ubi_work(ubi, wl_wrk); 603 606 return 0; 604 607 } 605 608 ··· 666 663 667 664 vid_hdr = ubi_get_vid_hdr(vidb); 668 665 666 + down_read(&ubi->fm_eba_sem); 669 667 mutex_lock(&ubi->move_mutex); 670 668 spin_lock(&ubi->wl_lock); 671 669 ubi_assert(!ubi->move_from && !ubi->move_to); ··· 897 893 898 894 dbg_wl("done"); 899 895 mutex_unlock(&ubi->move_mutex); 896 + up_read(&ubi->fm_eba_sem); 900 897 return 0; 901 898 902 899 /* ··· 948 943 } 949 944 950 945 mutex_unlock(&ubi->move_mutex); 946 + up_read(&ubi->fm_eba_sem); 951 947 return 0; 952 948 953 949 out_error: ··· 970 964 out_ro: 971 965 ubi_ro_mode(ubi); 972 966 mutex_unlock(&ubi->move_mutex); 967 + up_read(&ubi->fm_eba_sem); 973 968 ubi_assert(err != 0); 974 969 return err < 0 ? err : -EIO; 975 970 ··· 978 971 ubi->wl_scheduled = 0; 979 972 spin_unlock(&ubi->wl_lock); 980 973 mutex_unlock(&ubi->move_mutex); 974 + up_read(&ubi->fm_eba_sem); 981 975 ubi_free_vid_buf(vidb); 982 976 return 0; 983 977 } ··· 1101 1093 int err1; 1102 1094 1103 1095 /* Re-schedule the LEB for erasure */ 1104 - err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1096 + err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false); 1105 1097 if (err1) { 1106 1098 wl_entry_destroy(ubi, e); 1107 1099 err = err1; ··· 1282 1274 } 1283 1275 spin_unlock(&ubi->wl_lock); 1284 1276 1285 - err = schedule_erase(ubi, e, vol_id, lnum, torture); 1277 + err = schedule_erase(ubi, e, vol_id, lnum, torture, false); 1286 1278 if (err) { 1287 1279 spin_lock(&ubi->wl_lock); 1288 1280 wl_tree_add(e, &ubi->used); ··· 1573 1565 e->pnum = aeb->pnum; 1574 1566 e->ec = aeb->ec; 1575 1567 ubi->lookuptbl[e->pnum] = e; 1576 - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1568 + if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { 1577 1569 wl_entry_destroy(ubi, e); 1578 1570 goto out_free; 1579 1571 } ··· 1652 1644 e->ec = aeb->ec; 1653 1645 ubi_assert(!ubi->lookuptbl[e->pnum]); 1654 1646 ubi->lookuptbl[e->pnum] = e; 1655 - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1647 + if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { 1656 1648 wl_entry_destroy(ubi, e); 1657 1649 goto out_free; 1658 1650 }