Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

UBI: introduce the VID buffer concept

Currently, all VID headers are allocated and freed using the
ubi_zalloc_vid_hdr() and ubi_free_vid_hdr() function. These functions
make sure to align allocation on ubi->vid_hdr_alsize and adjust the
vid_hdr pointer to match the ubi->vid_hdr_shift requirements.
This works fine, but is a bit convoluted.
Moreover, the future introduction of LEB consolidation (needed to support
MLC/TLC NANDs) will allows a VID buffer to contain more than one VID
header.

Hence the creation of a ubi_vid_io_buf struct to attach extra information
to the VID header.

We currently only store the actual pointer of the underlying buffer, but
will soon add the number of VID headers contained in the buffer.

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: Richard Weinberger <richard@nod.at>

authored by

Boris Brezillon and committed by
Richard Weinberger
3291b52f 799dca34

+219 -143
+21 -19
drivers/mtd/ubi/attach.c
··· 453 453 { 454 454 int len, err, second_is_newer, bitflips = 0, corrupted = 0; 455 455 uint32_t data_crc, crc; 456 - struct ubi_vid_hdr *vh = NULL; 456 + struct ubi_vid_io_buf *vidb = NULL; 457 457 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); 458 458 459 459 if (sqnum2 == aeb->sqnum) { ··· 496 496 return bitflips << 1; 497 497 } 498 498 499 - vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 500 - if (!vh) 499 + vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 500 + if (!vidb) 501 501 return -ENOMEM; 502 502 503 503 pnum = aeb->pnum; 504 - err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 504 + err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0); 505 505 if (err) { 506 506 if (err == UBI_IO_BITFLIPS) 507 507 bitflips = 1; ··· 515 515 } 516 516 } 517 517 518 - vid_hdr = vh; 518 + vid_hdr = ubi_get_vid_hdr(vidb); 519 519 } 520 520 521 521 /* Read the data of the copy and check the CRC */ ··· 541 541 } 542 542 mutex_unlock(&ubi->buf_mutex); 543 543 544 - ubi_free_vid_hdr(ubi, vh); 544 + ubi_free_vid_buf(vidb); 545 545 546 546 if (second_is_newer) 547 547 dbg_bld("second PEB %d is newer, copy_flag is set", pnum); ··· 553 553 out_unlock: 554 554 mutex_unlock(&ubi->buf_mutex); 555 555 out_free_vidh: 556 - ubi_free_vid_hdr(ubi, vh); 556 + ubi_free_vid_buf(vidb); 557 557 return err; 558 558 } 559 559 ··· 955 955 int pnum, bool fast) 956 956 { 957 957 struct ubi_ec_hdr *ech = ai->ech; 958 - struct ubi_vid_hdr *vidh = ai->vidh; 958 + struct ubi_vid_io_buf *vidb = ai->vidb; 959 + struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb); 959 960 long long ec; 960 961 int err, bitflips = 0, vol_id = -1, ec_err = 0; 961 962 ··· 1054 1053 1055 1054 /* OK, we've done with the EC header, let's look at the VID header */ 1056 1055 1057 - err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); 1056 + err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0); 1058 1057 if (err < 0) 1059 1058 return err; 1060 1059 switch (err) { ··· 1397 1396 if (!ai->ech) 1398 1397 return err; 1399 1398 1400 - ai->vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 1401 - if (!ai->vidh) 1399 + ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 1400 + if (!ai->vidb) 1402 1401 goto out_ech; 1403 1402 1404 1403 for (pnum = start; pnum < ubi->peb_count; pnum++) { ··· 1447 1446 if (err) 1448 1447 goto out_vidh; 1449 1448 1450 - ubi_free_vid_hdr(ubi, ai->vidh); 1449 + ubi_free_vid_buf(ai->vidb); 1451 1450 kfree(ai->ech); 1452 1451 1453 1452 return 0; 1454 1453 1455 1454 out_vidh: 1456 - ubi_free_vid_hdr(ubi, ai->vidh); 1455 + ubi_free_vid_buf(ai->vidb); 1457 1456 out_ech: 1458 1457 kfree(ai->ech); 1459 1458 return err; ··· 1511 1510 if (!scan_ai->ech) 1512 1511 goto out_ai; 1513 1512 1514 - scan_ai->vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 1515 - if (!scan_ai->vidh) 1513 + scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 1514 + if (!scan_ai->vidb) 1516 1515 goto out_ech; 1517 1516 1518 1517 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { ··· 1524 1523 goto out_vidh; 1525 1524 } 1526 1525 1527 - ubi_free_vid_hdr(ubi, scan_ai->vidh); 1526 + ubi_free_vid_buf(scan_ai->vidb); 1528 1527 kfree(scan_ai->ech); 1529 1528 1530 1529 if (scan_ai->force_full_scan) ··· 1545 1544 return err; 1546 1545 1547 1546 out_vidh: 1548 - ubi_free_vid_hdr(ubi, scan_ai->vidh); 1547 + ubi_free_vid_buf(scan_ai->vidb); 1549 1548 out_ech: 1550 1549 kfree(scan_ai->ech); 1551 1550 out_ai: ··· 1669 1668 */ 1670 1669 static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) 1671 1670 { 1672 - struct ubi_vid_hdr *vidh = ai->vidh; 1671 + struct ubi_vid_io_buf *vidb = ai->vidb; 1672 + struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb); 1673 1673 int pnum, err, vols_found = 0; 1674 1674 struct rb_node *rb1, *rb2; 1675 1675 struct ubi_ainf_volume *av; ··· 1806 1804 1807 1805 last_aeb = aeb; 1808 1806 1809 - err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1); 1807 + err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1); 1810 1808 if (err && err != UBI_IO_BITFLIPS) { 1811 1809 ubi_err(ubi, "VID header is not OK (%d)", 1812 1810 err);
+47 -34
drivers/mtd/ubi/eba.c
··· 513 513 void *buf, int offset, int len, int check) 514 514 { 515 515 int err, pnum, scrub = 0, vol_id = vol->vol_id; 516 + struct ubi_vid_io_buf *vidb; 516 517 struct ubi_vid_hdr *vid_hdr; 517 518 uint32_t uninitialized_var(crc); 518 519 ··· 544 543 545 544 retry: 546 545 if (check) { 547 - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 548 - if (!vid_hdr) { 546 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); 547 + if (!vidb) { 549 548 err = -ENOMEM; 550 549 goto out_unlock; 551 550 } 552 551 553 - err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 552 + vid_hdr = ubi_get_vid_hdr(vidb); 553 + 554 + err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1); 554 555 if (err && err != UBI_IO_BITFLIPS) { 555 556 if (err > 0) { 556 557 /* ··· 598 595 ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); 599 596 600 597 crc = be32_to_cpu(vid_hdr->data_crc); 601 - ubi_free_vid_hdr(ubi, vid_hdr); 598 + ubi_free_vid_buf(vidb); 602 599 } 603 600 604 601 err = ubi_io_read_data(ubi, buf, pnum, offset, len); ··· 635 632 return err; 636 633 637 634 out_free: 638 - ubi_free_vid_hdr(ubi, vid_hdr); 635 + ubi_free_vid_buf(vidb); 639 636 out_unlock: 640 637 leb_read_unlock(ubi, vol_id, lnum); 641 638 return err; ··· 704 701 * @buf: data which was not written because of the write failure 705 702 * @offset: offset of the failed write 706 703 * @len: how many bytes should have been written 707 - * @vid: VID header 704 + * @vidb: VID buffer 708 705 * @retry: whether the caller should retry in case of failure 709 706 * 710 707 * This function is called in case of a write failure and moves all good data ··· 716 713 */ 717 714 static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum, 718 715 const void *buf, int offset, int len, 719 - struct ubi_vid_hdr *vid_hdr, bool *retry) 716 + struct ubi_vid_io_buf *vidb, bool *retry) 720 717 { 721 718 struct ubi_device *ubi = vol->ubi; 719 + struct ubi_vid_hdr *vid_hdr; 722 720 int new_pnum, err, vol_id = vol->vol_id, data_size; 723 721 uint32_t crc; 724 722 ··· 734 730 ubi_msg(ubi, "recover PEB %d, move data to PEB %d", 735 731 pnum, new_pnum); 736 732 737 - err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 733 + err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1); 738 734 if (err && err != UBI_IO_BITFLIPS) { 739 735 if (err > 0) 740 736 err = -EIO; ··· 763 759 vid_hdr->copy_flag = 1; 764 760 vid_hdr->data_size = cpu_to_be32(data_size); 765 761 vid_hdr->data_crc = cpu_to_be32(crc); 766 - err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); 762 + err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb); 767 763 if (err) 768 764 goto out_unlock; 769 765 ··· 814 810 { 815 811 int err, idx = vol_id2idx(ubi, vol_id), tries; 816 812 struct ubi_volume *vol = ubi->volumes[idx]; 817 - struct ubi_vid_hdr *vid_hdr; 813 + struct ubi_vid_io_buf *vidb; 818 814 819 - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 820 - if (!vid_hdr) 815 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); 816 + if (!vidb) 821 817 return -ENOMEM; 822 818 823 819 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { 824 820 bool retry; 825 821 826 - err = try_recover_peb(vol, pnum, lnum, buf, offset, len, 827 - vid_hdr, &retry); 822 + err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb, 823 + &retry); 828 824 if (!err || !retry) 829 825 break; 830 826 831 827 ubi_msg(ubi, "try again"); 832 828 } 833 829 834 - ubi_free_vid_hdr(ubi, vid_hdr); 830 + ubi_free_vid_buf(vidb); 835 831 836 832 return err; 837 833 } ··· 840 836 * try_write_vid_and_data - try to write VID header and data to a new PEB. 841 837 * @vol: volume description object 842 838 * @lnum: logical eraseblock number 843 - * @vid_hdr: VID header to write 839 + * @vidb: the VID buffer to write 844 840 * @buf: buffer containing the data 845 841 * @offset: where to start writing data 846 842 * @len: how many bytes should be written ··· 852 848 * flash media, but may be some garbage. 853 849 */ 854 850 static int try_write_vid_and_data(struct ubi_volume *vol, int lnum, 855 - struct ubi_vid_hdr *vid_hdr, const void *buf, 851 + struct ubi_vid_io_buf *vidb, const void *buf, 856 852 int offset, int len) 857 853 { 858 854 struct ubi_device *ubi = vol->ubi; ··· 869 865 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", 870 866 len, offset, vol_id, lnum, pnum); 871 867 872 - err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 868 + err = ubi_io_write_vid_hdr(ubi, pnum, vidb); 873 869 if (err) { 874 870 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 875 871 vol_id, lnum, pnum); ··· 918 914 const void *buf, int offset, int len) 919 915 { 920 916 int err, pnum, tries, vol_id = vol->vol_id; 917 + struct ubi_vid_io_buf *vidb; 921 918 struct ubi_vid_hdr *vid_hdr; 922 919 923 920 if (ubi->ro_mode) ··· 948 943 * The logical eraseblock is not mapped. We have to get a free physical 949 944 * eraseblock and write the volume identifier header there first. 950 945 */ 951 - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 952 - if (!vid_hdr) { 946 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); 947 + if (!vidb) { 953 948 leb_write_unlock(ubi, vol_id, lnum); 954 949 return -ENOMEM; 955 950 } 951 + 952 + vid_hdr = ubi_get_vid_hdr(vidb); 956 953 957 954 vid_hdr->vol_type = UBI_VID_DYNAMIC; 958 955 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); ··· 964 957 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 965 958 966 959 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { 967 - err = try_write_vid_and_data(vol, lnum, vid_hdr, buf, offset, 968 - len); 960 + err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len); 969 961 if (err != -EIO || !ubi->bad_allowed) 970 962 break; 971 963 ··· 978 972 ubi_msg(ubi, "try another PEB"); 979 973 } 980 974 981 - ubi_free_vid_hdr(ubi, vid_hdr); 975 + ubi_free_vid_buf(vidb); 982 976 983 977 out: 984 978 if (err) ··· 1015 1009 int lnum, const void *buf, int len, int used_ebs) 1016 1010 { 1017 1011 int err, tries, data_size = len, vol_id = vol->vol_id; 1012 + struct ubi_vid_io_buf *vidb; 1018 1013 struct ubi_vid_hdr *vid_hdr; 1019 1014 uint32_t crc; 1020 1015 ··· 1028 1021 else 1029 1022 ubi_assert(!(len & (ubi->min_io_size - 1))); 1030 1023 1031 - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 1032 - if (!vid_hdr) 1024 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); 1025 + if (!vidb) 1033 1026 return -ENOMEM; 1027 + 1028 + vid_hdr = ubi_get_vid_hdr(vidb); 1034 1029 1035 1030 err = leb_write_lock(ubi, vol_id, lnum); 1036 1031 if (err) ··· 1053 1044 ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0); 1054 1045 1055 1046 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { 1056 - err = try_write_vid_and_data(vol, lnum, vid_hdr, buf, 0, len); 1047 + err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len); 1057 1048 if (err != -EIO || !ubi->bad_allowed) 1058 1049 break; 1059 1050 ··· 1067 1058 leb_write_unlock(ubi, vol_id, lnum); 1068 1059 1069 1060 out: 1070 - ubi_free_vid_hdr(ubi, vid_hdr); 1061 + ubi_free_vid_buf(vidb); 1071 1062 1072 1063 return err; 1073 1064 } ··· 1093 1084 int lnum, const void *buf, int len) 1094 1085 { 1095 1086 int err, tries, vol_id = vol->vol_id; 1087 + struct ubi_vid_io_buf *vidb; 1096 1088 struct ubi_vid_hdr *vid_hdr; 1097 1089 uint32_t crc; 1098 1090 ··· 1111 1101 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); 1112 1102 } 1113 1103 1114 - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 1115 - if (!vid_hdr) 1104 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); 1105 + if (!vidb) 1116 1106 return -ENOMEM; 1107 + 1108 + vid_hdr = ubi_get_vid_hdr(vidb); 1117 1109 1118 1110 mutex_lock(&ubi->alc_mutex); 1119 1111 err = leb_write_lock(ubi, vol_id, lnum); ··· 1137 1125 dbg_eba("change LEB %d:%d", vol_id, lnum); 1138 1126 1139 1127 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) { 1140 - err = try_write_vid_and_data(vol, lnum, vid_hdr, buf, 0, len); 1128 + err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len); 1141 1129 if (err != -EIO || !ubi->bad_allowed) 1142 1130 break; 1143 1131 ··· 1157 1145 1158 1146 out_mutex: 1159 1147 mutex_unlock(&ubi->alc_mutex); 1160 - ubi_free_vid_hdr(ubi, vid_hdr); 1148 + ubi_free_vid_buf(vidb); 1161 1149 return err; 1162 1150 } 1163 1151 ··· 1203 1191 * o a negative error code in case of failure. 1204 1192 */ 1205 1193 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 1206 - struct ubi_vid_hdr *vid_hdr) 1194 + struct ubi_vid_io_buf *vidb) 1207 1195 { 1208 1196 int err, vol_id, lnum, data_size, aldata_size, idx; 1197 + struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); 1209 1198 struct ubi_volume *vol; 1210 1199 uint32_t crc; 1211 1200 ··· 1318 1305 } 1319 1306 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1320 1307 1321 - err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1308 + err = ubi_io_write_vid_hdr(ubi, to, vidb); 1322 1309 if (err) { 1323 1310 if (err == -EIO) 1324 1311 err = MOVE_TARGET_WR_ERR; ··· 1328 1315 cond_resched(); 1329 1316 1330 1317 /* Read the VID header back and check if it was written correctly */ 1331 - err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1318 + err = ubi_io_read_vid_hdr(ubi, to, vidb, 1); 1332 1319 if (err) { 1333 1320 if (err != UBI_IO_BITFLIPS) { 1334 1321 ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
+43 -28
drivers/mtd/ubi/fastmap.c
··· 110 110 * Returns a new struct ubi_vid_hdr on success. 111 111 * NULL indicates out of memory. 112 112 */ 113 - static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) 113 + static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id) 114 114 { 115 - struct ubi_vid_hdr *new; 115 + struct ubi_vid_io_buf *new; 116 + struct ubi_vid_hdr *vh; 116 117 117 - new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 118 + new = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 118 119 if (!new) 119 120 goto out; 120 121 121 - new->vol_type = UBI_VID_DYNAMIC; 122 - new->vol_id = cpu_to_be32(vol_id); 122 + vh = ubi_get_vid_hdr(new); 123 + vh->vol_type = UBI_VID_DYNAMIC; 124 + vh->vol_id = cpu_to_be32(vol_id); 123 125 124 126 /* UBI implementations without fastmap support have to delete the 125 127 * fastmap. 126 128 */ 127 - new->compat = UBI_COMPAT_DELETE; 129 + vh->compat = UBI_COMPAT_DELETE; 128 130 129 131 out: 130 132 return new; ··· 410 408 __be32 *pebs, int pool_size, unsigned long long *max_sqnum, 411 409 struct list_head *free) 412 410 { 411 + struct ubi_vid_io_buf *vb; 413 412 struct ubi_vid_hdr *vh; 414 413 struct ubi_ec_hdr *ech; 415 414 struct ubi_ainf_peb *new_aeb; ··· 420 417 if (!ech) 421 418 return -ENOMEM; 422 419 423 - vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 424 - if (!vh) { 420 + vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 421 + if (!vb) { 425 422 kfree(ech); 426 423 return -ENOMEM; 427 424 } 425 + 426 + vh = ubi_get_vid_hdr(vb); 428 427 429 428 dbg_bld("scanning fastmap pool: size = %i", pool_size); 430 429 ··· 468 463 goto out; 469 464 } 470 465 471 - err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 466 + err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0); 472 467 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { 473 468 unsigned long long ec = be64_to_cpu(ech->ec); 474 469 unmap_peb(ai, pnum); ··· 514 509 } 515 510 516 511 out: 517 - ubi_free_vid_hdr(ubi, vh); 512 + ubi_free_vid_buf(vb); 518 513 kfree(ech); 519 514 return ret; 520 515 } ··· 842 837 struct ubi_attach_info *scan_ai) 843 838 { 844 839 struct ubi_fm_sb *fmsb, *fmsb2; 840 + struct ubi_vid_io_buf *vb; 845 841 struct ubi_vid_hdr *vh; 846 842 struct ubi_ec_hdr *ech; 847 843 struct ubi_fastmap_layout *fm; ··· 918 912 goto free_fm_sb; 919 913 } 920 914 921 - vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 922 - if (!vh) { 915 + vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 916 + if (!vb) { 923 917 ret = -ENOMEM; 924 918 goto free_hdr; 925 919 } 920 + 921 + vh = ubi_get_vid_hdr(vb); 926 922 927 923 for (i = 0; i < used_blocks; i++) { 928 924 int image_seq; ··· 968 960 goto free_hdr; 969 961 } 970 962 971 - ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 963 + ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0); 972 964 if (ret && ret != UBI_IO_BITFLIPS) { 973 965 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", 974 966 i, pnum); ··· 1058 1050 ubi->fm_disabled = 0; 1059 1051 ubi->fast_attach = 1; 1060 1052 1061 - ubi_free_vid_hdr(ubi, vh); 1053 + ubi_free_vid_buf(vb); 1062 1054 kfree(ech); 1063 1055 out: 1064 1056 up_write(&ubi->fm_protect); ··· 1067 1059 return ret; 1068 1060 1069 1061 free_hdr: 1070 - ubi_free_vid_hdr(ubi, vh); 1062 + ubi_free_vid_buf(vb); 1071 1063 kfree(ech); 1072 1064 free_fm_sb: 1073 1065 kfree(fmsb); ··· 1095 1087 struct ubi_fm_eba *feba; 1096 1088 struct ubi_wl_entry *wl_e; 1097 1089 struct ubi_volume *vol; 1090 + struct ubi_vid_io_buf *avbuf, *dvbuf; 1098 1091 struct ubi_vid_hdr *avhdr, *dvhdr; 1099 1092 struct ubi_work *ubi_wrk; 1100 1093 struct rb_node *tmp_rb; ··· 1106 1097 fm_raw = ubi->fm_buf; 1107 1098 memset(ubi->fm_buf, 0, ubi->fm_size); 1108 1099 1109 - avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1110 - if (!avhdr) { 1100 + avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID); 1101 + if (!avbuf) { 1111 1102 ret = -ENOMEM; 1112 1103 goto out; 1113 1104 } 1114 1105 1115 - dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); 1116 - if (!dvhdr) { 1106 + dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID); 1107 + if (!dvbuf) { 1117 1108 ret = -ENOMEM; 1118 1109 goto out_kfree; 1119 1110 } 1111 + 1112 + avhdr = ubi_get_vid_hdr(avbuf); 1113 + dvhdr = ubi_get_vid_hdr(dvbuf); 1120 1114 1121 1115 seen_pebs = init_seen(ubi); 1122 1116 if (IS_ERR(seen_pebs)) { ··· 1289 1277 spin_unlock(&ubi->volumes_lock); 1290 1278 1291 1279 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); 1292 - ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); 1280 + ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf); 1293 1281 if (ret) { 1294 1282 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); 1295 1283 goto out_kfree; ··· 1310 1298 dvhdr->lnum = cpu_to_be32(i); 1311 1299 dbg_bld("writing fastmap data to PEB %i sqnum %llu", 1312 1300 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1313 - ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); 1301 + ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf); 1314 1302 if (ret) { 1315 1303 ubi_err(ubi, "unable to write vid_hdr to PEB %i!", 1316 1304 new_fm->e[i]->pnum); ··· 1335 1323 dbg_bld("fastmap written!"); 1336 1324 1337 1325 out_kfree: 1338 - ubi_free_vid_hdr(ubi, avhdr); 1339 - ubi_free_vid_hdr(ubi, dvhdr); 1326 + ubi_free_vid_buf(avbuf); 1327 + ubi_free_vid_buf(dvbuf); 1340 1328 free_seen(seen_pebs); 1341 1329 out: 1342 1330 return ret; ··· 1406 1394 int ret; 1407 1395 struct ubi_fastmap_layout *fm; 1408 1396 struct ubi_wl_entry *e; 1409 - struct ubi_vid_hdr *vh = NULL; 1397 + struct ubi_vid_io_buf *vb = NULL; 1398 + struct ubi_vid_hdr *vh; 1410 1399 1411 1400 if (!ubi->fm) 1412 1401 return 0; ··· 1419 1406 if (!fm) 1420 1407 goto out; 1421 1408 1422 - vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1423 - if (!vh) 1409 + vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID); 1410 + if (!vb) 1424 1411 goto out_free_fm; 1412 + 1413 + vh = ubi_get_vid_hdr(vb); 1425 1414 1426 1415 ret = -ENOSPC; 1427 1416 e = ubi_wl_get_fm_peb(ubi, 1); ··· 1435 1420 * to scanning mode. 1436 1421 */ 1437 1422 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1438 - ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh); 1423 + ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb); 1439 1424 if (ret < 0) { 1440 1425 ubi_wl_put_fm_peb(ubi, e, 0, 0); 1441 1426 goto out_free_fm; ··· 1447 1432 ubi->fm = fm; 1448 1433 1449 1434 out: 1450 - ubi_free_vid_hdr(ubi, vh); 1435 + ubi_free_vid_buf(vb); 1451 1436 return ret; 1452 1437 1453 1438 out_free_fm:
+22 -17
drivers/mtd/ubi/io.c
··· 502 502 loff_t addr; 503 503 uint32_t data = 0; 504 504 struct ubi_ec_hdr ec_hdr; 505 + struct ubi_vid_io_buf vidb; 505 506 506 507 /* 507 508 * Note, we cannot generally define VID header buffers on stack, ··· 529 528 goto error; 530 529 } 531 530 532 - err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); 531 + ubi_init_vid_buf(ubi, &vidb, &vid_hdr); 532 + ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb)); 533 + 534 + err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0); 533 535 if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR && 534 536 err != UBI_IO_FF){ 535 537 addr += ubi->vid_hdr_aloffset; ··· 999 995 * ubi_io_read_vid_hdr - read and check a volume identifier header. 1000 996 * @ubi: UBI device description object 1001 997 * @pnum: physical eraseblock number to read from 1002 - * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume 1003 - * identifier header 998 + * @vidb: the volume identifier buffer to store data in 1004 999 * @verbose: be verbose if the header is corrupted or wasn't found 1005 1000 * 1006 1001 * This function reads the volume identifier header from physical eraseblock 1007 - * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read 1002 + * @pnum and stores it in @vidb. It also checks CRC checksum of the read 1008 1003 * volume identifier header. The error codes are the same as in 1009 1004 * 'ubi_io_read_ec_hdr()'. 1010 1005 * ··· 1011 1008 * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'. 1012 1009 */ 1013 1010 int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, 1014 - struct ubi_vid_hdr *vid_hdr, int verbose) 1011 + struct ubi_vid_io_buf *vidb, int verbose) 1015 1012 { 1016 1013 int err, read_err; 1017 1014 uint32_t crc, magic, hdr_crc; 1018 - void *p; 1015 + struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); 1016 + void *p = vidb->buffer; 1019 1017 1020 1018 dbg_io("read VID header from PEB %d", pnum); 1021 1019 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 1022 1020 1023 - p = (char *)vid_hdr - ubi->vid_hdr_shift; 1024 1021 read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 1025 1022 ubi->vid_hdr_shift + UBI_VID_HDR_SIZE); 1026 1023 if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) ··· 1083 1080 * ubi_io_write_vid_hdr - write a volume identifier header. 1084 1081 * @ubi: UBI device description object 1085 1082 * @pnum: the physical eraseblock number to write to 1086 - * @vid_hdr: the volume identifier header to write 1083 + * @vidb: the volume identifier buffer to write 1087 1084 * 1088 1085 * This function writes the volume identifier header described by @vid_hdr to 1089 1086 * physical eraseblock @pnum. This function automatically fills the 1090 - * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates 1091 - * header CRC checksum and stores it at vid_hdr->hdr_crc. 1087 + * @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates 1088 + * header CRC checksum and stores it at vidb->hdr->hdr_crc. 1092 1089 * 1093 1090 * This function returns zero in case of success and a negative error code in 1094 1091 * case of failure. If %-EIO is returned, the physical eraseblock probably went 1095 1092 * bad. 1096 1093 */ 1097 1094 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, 1098 - struct ubi_vid_hdr *vid_hdr) 1095 + struct ubi_vid_io_buf *vidb) 1099 1096 { 1097 + struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); 1100 1098 int err; 1101 1099 uint32_t crc; 1102 - void *p; 1100 + void *p = vidb->buffer; 1103 1101 1104 1102 dbg_io("write VID header to PEB %d", pnum); 1105 1103 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ··· 1121 1117 if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE)) 1122 1118 return -EROFS; 1123 1119 1124 - p = (char *)vid_hdr - ubi->vid_hdr_shift; 1125 1120 err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, 1126 1121 ubi->vid_hdr_alsize); 1127 1122 return err; ··· 1286 1283 { 1287 1284 int err; 1288 1285 uint32_t crc, hdr_crc; 1286 + struct ubi_vid_io_buf *vidb; 1289 1287 struct ubi_vid_hdr *vid_hdr; 1290 1288 void *p; 1291 1289 1292 1290 if (!ubi_dbg_chk_io(ubi)) 1293 1291 return 0; 1294 1292 1295 - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 1296 - if (!vid_hdr) 1293 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); 1294 + if (!vidb) 1297 1295 return -ENOMEM; 1298 1296 1299 - p = (char *)vid_hdr - ubi->vid_hdr_shift; 1297 + vid_hdr = ubi_get_vid_hdr(vidb); 1298 + p = vidb->buffer; 1300 1299 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 1301 1300 ubi->vid_hdr_alsize); 1302 1301 if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) ··· 1319 1314 err = self_check_vid_hdr(ubi, pnum, vid_hdr); 1320 1315 1321 1316 exit: 1322 - ubi_free_vid_hdr(ubi, vid_hdr); 1317 + ubi_free_vid_buf(vidb); 1323 1318 return err; 1324 1319 } 1325 1320
+67 -32
drivers/mtd/ubi/ubi.h
··· 167 167 }; 168 168 169 169 /** 170 + * struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the 171 + * flash. 172 + * @hdr: a pointer to the VID header stored in buffer 173 + * @buffer: underlying buffer 174 + */ 175 + struct ubi_vid_io_buf { 176 + struct ubi_vid_hdr *hdr; 177 + void *buffer; 178 + }; 179 + 180 + /** 170 181 * struct ubi_wl_entry - wear-leveling entry. 171 182 * @u.rb: link in the corresponding (free/used) RB-tree 172 183 * @u.list: link in the protection queue ··· 751 740 * @ec_count: a temporary variable used when calculating @mean_ec 752 741 * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects 753 742 * @ech: temporary EC header. Only available during scan 754 - * @vidh: temporary VID header. Only available during scan 743 + * @vidh: temporary VID buffer. Only available during scan 755 744 * 756 745 * This data structure contains the result of attaching an MTD device and may 757 746 * be used by other UBI sub-systems to build final UBI data structures, further ··· 781 770 int ec_count; 782 771 struct kmem_cache *aeb_slab_cache; 783 772 struct ubi_ec_hdr *ech; 784 - struct ubi_vid_hdr *vidh; 773 + struct ubi_vid_io_buf *vidb; 785 774 }; 786 775 787 776 /** ··· 898 887 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 899 888 int lnum, const void *buf, int len); 900 889 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 901 - struct ubi_vid_hdr *vid_hdr); 890 + struct ubi_vid_io_buf *vidb); 902 891 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); 903 892 unsigned long long ubi_next_sqnum(struct ubi_device *ubi); 904 893 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, ··· 933 922 int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, 934 923 struct ubi_ec_hdr *ec_hdr); 935 924 int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, 936 - struct ubi_vid_hdr *vid_hdr, int verbose); 925 + struct ubi_vid_io_buf *vidb, int verbose); 937 926 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, 938 - struct ubi_vid_hdr *vid_hdr); 927 + struct ubi_vid_io_buf *vidb); 939 928 940 929 /* build.c */ 941 930 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, ··· 1056 1045 } 1057 1046 1058 1047 /** 1059 - * ubi_zalloc_vid_hdr - allocate a volume identifier header object. 1060 - * @ubi: UBI device description object 1061 - * @gfp_flags: GFP flags to allocate with 1062 - * 1063 - * This function returns a pointer to the newly allocated and zero-filled 1064 - * volume identifier header object in case of success and %NULL in case of 1065 - * failure. 1048 + * ubi_init_vid_buf - Initialize a VID buffer 1049 + * @ubi: the UBI device 1050 + * @vidb: the VID buffer to initialize 1051 + * @buf: the underlying buffer 1066 1052 */ 1067 - static inline struct ubi_vid_hdr * 1068 - ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags) 1053 + static inline void ubi_init_vid_buf(const struct ubi_device *ubi, 1054 + struct ubi_vid_io_buf *vidb, 1055 + void *buf) 1069 1056 { 1070 - void *vid_hdr; 1057 + if (buf) 1058 + memset(buf, 0, ubi->vid_hdr_alsize); 1071 1059 1072 - vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags); 1073 - if (!vid_hdr) 1074 - return NULL; 1075 - 1076 - /* 1077 - * VID headers may be stored at un-aligned flash offsets, so we shift 1078 - * the pointer. 1079 - */ 1080 - return vid_hdr + ubi->vid_hdr_shift; 1060 + vidb->buffer = buf; 1061 + vidb->hdr = buf + ubi->vid_hdr_shift; 1081 1062 } 1082 1063 1083 1064 /** 1084 - * ubi_free_vid_hdr - free a volume identifier header object. 1085 - * @ubi: UBI device description object 1086 - * @vid_hdr: the object to free 1065 + * ubi_init_vid_buf - Allocate a VID buffer 1066 + * @ubi: the UBI device 1067 + * @gfp_flags: GFP flags to use for the allocation 1087 1068 */ 1088 - static inline void ubi_free_vid_hdr(const struct ubi_device *ubi, 1089 - struct ubi_vid_hdr *vid_hdr) 1069 + static inline struct ubi_vid_io_buf * 1070 + ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags) 1090 1071 { 1091 - void *p = vid_hdr; 1072 + struct ubi_vid_io_buf *vidb; 1073 + void *buf; 1092 1074 1093 - if (!p) 1075 + vidb = kzalloc(sizeof(*vidb), gfp_flags); 1076 + if (!vidb) 1077 + return NULL; 1078 + 1079 + buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags); 1080 + if (!buf) { 1081 + kfree(vidb); 1082 + return NULL; 1083 + } 1084 + 1085 + ubi_init_vid_buf(ubi, vidb, buf); 1086 + 1087 + return vidb; 1088 + } 1089 + 1090 + /** 1091 + * ubi_free_vid_buf - Free a VID buffer 1092 + * @vidb: the VID buffer to free 1093 + */ 1094 + static inline void ubi_free_vid_buf(struct ubi_vid_io_buf *vidb) 1095 + { 1096 + if (!vidb) 1094 1097 return; 1095 1098 1096 - kfree(p - ubi->vid_hdr_shift); 1099 + kfree(vidb->buffer); 1100 + kfree(vidb); 1101 + } 1102 + 1103 + /** 1104 + * ubi_get_vid_hdr - Get the VID header attached to a VID buffer 1105 + * @vidb: VID buffer 1106 + */ 1107 + static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb) 1108 + { 1109 + return vidb->hdr; 1097 1110 } 1098 1111 1099 1112 /*
+8 -5
drivers/mtd/ubi/vtbl.c
··· 299 299 int copy, void *vtbl) 300 300 { 301 301 int err, tries = 0; 302 + struct ubi_vid_io_buf *vidb; 302 303 struct ubi_vid_hdr *vid_hdr; 303 304 struct ubi_ainf_peb *new_aeb; 304 305 305 306 dbg_gen("create volume table (copy #%d)", copy + 1); 306 307 307 - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 308 - if (!vid_hdr) 308 + vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); 309 + if (!vidb) 309 310 return -ENOMEM; 311 + 312 + vid_hdr = ubi_get_vid_hdr(vidb); 310 313 311 314 retry: 312 315 new_aeb = ubi_early_get_peb(ubi, ai); ··· 327 324 vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum); 328 325 329 326 /* The EC header is already there, write the VID header */ 330 - err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr); 327 + err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb); 331 328 if (err) 332 329 goto write_error; 333 330 ··· 342 339 */ 343 340 err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); 344 341 ubi_free_aeb(ai, new_aeb); 345 - ubi_free_vid_hdr(ubi, vid_hdr); 342 + ubi_free_vid_buf(vidb); 346 343 return err; 347 344 348 345 write_error: ··· 356 353 } 357 354 ubi_free_aeb(ai, new_aeb); 358 355 out_free: 359 - ubi_free_vid_hdr(ubi, vid_hdr); 356 + ubi_free_vid_buf(vidb); 360 357 return err; 361 358 362 359 }
+11 -8
drivers/mtd/ubi/wl.c
··· 649 649 int anchor = wrk->anchor; 650 650 #endif 651 651 struct ubi_wl_entry *e1, *e2; 652 + struct ubi_vid_io_buf *vidb; 652 653 struct ubi_vid_hdr *vid_hdr; 653 654 int dst_leb_clean = 0; 654 655 ··· 657 656 if (shutdown) 658 657 return 0; 659 658 660 - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 661 - if (!vid_hdr) 659 + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); 660 + if (!vidb) 662 661 return -ENOMEM; 662 + 663 + vid_hdr = ubi_get_vid_hdr(vidb); 663 664 664 665 mutex_lock(&ubi->move_mutex); 665 666 spin_lock(&ubi->wl_lock); ··· 756 753 * which is being moved was unmapped. 757 754 */ 758 755 759 - err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 756 + err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0); 760 757 if (err && err != UBI_IO_BITFLIPS) { 761 758 dst_leb_clean = 1; 762 759 if (err == UBI_IO_FF) { ··· 793 790 vol_id = be32_to_cpu(vid_hdr->vol_id); 794 791 lnum = be32_to_cpu(vid_hdr->lnum); 795 792 796 - err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 793 + err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb); 797 794 if (err) { 798 795 if (err == MOVE_CANCEL_RACE) { 799 796 /* ··· 850 847 if (scrubbing) 851 848 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", 852 849 e1->pnum, vol_id, lnum, e2->pnum); 853 - ubi_free_vid_hdr(ubi, vid_hdr); 850 + ubi_free_vid_buf(vidb); 854 851 855 852 spin_lock(&ubi->wl_lock); 856 853 if (!ubi->move_to_put) { ··· 916 913 ubi->wl_scheduled = 0; 917 914 spin_unlock(&ubi->wl_lock); 918 915 919 - ubi_free_vid_hdr(ubi, vid_hdr); 916 + ubi_free_vid_buf(vidb); 920 917 if (dst_leb_clean) { 921 918 ensure_wear_leveling(ubi, 1); 922 919 } else { ··· 940 937 ubi->move_to_put = ubi->wl_scheduled = 0; 941 938 spin_unlock(&ubi->wl_lock); 942 939 943 - ubi_free_vid_hdr(ubi, vid_hdr); 940 + ubi_free_vid_buf(vidb); 944 941 wl_entry_destroy(ubi, e1); 945 942 wl_entry_destroy(ubi, e2); 946 943 ··· 954 951 ubi->wl_scheduled = 0; 955 952 spin_unlock(&ubi->wl_lock); 956 953 mutex_unlock(&ubi->move_mutex); 957 - ubi_free_vid_hdr(ubi, vid_hdr); 954 + ubi_free_vid_buf(vidb); 958 955 return 0; 959 956 } 960 957