Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mtd: implement proper partition handling

Instead of collecting partitions in a flat list, create a hierarchy
within the mtd_info structure: use a partitions list to keep track of
the partitions of an MTD device (which might be itself a partition of
another MTD device), a pointer to the parent device (NULL when the MTD
device is the root one, not a partition).

By also saving directly in mtd_info the offset of the partition, we
can get rid of the mtd_part structure.

While at it, be consistent in the naming of the mtd_info structures to
ease the understanding of the new hierarchy: these structures are
usually called 'mtd', unless there are multiple instances of the same
structure. In this case, there is usually a parent/child bound so we
will call them 'parent' and 'child'.

Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20200114090952.11232-1-miquel.raynal@bootlin.com

+477 -604
+7 -5
drivers/mtd/mtdchar.c
··· 349 349 uint64_t start, uint32_t length, void __user *ptr, 350 350 uint32_t __user *retp) 351 351 { 352 + struct mtd_info *master = mtd_get_master(mtd); 352 353 struct mtd_file_info *mfi = file->private_data; 353 354 struct mtd_oob_ops ops = {}; 354 355 uint32_t retlen; ··· 361 360 if (length > 4096) 362 361 return -EINVAL; 363 362 364 - if (!mtd->_write_oob) 363 + if (!master->_write_oob) 365 364 return -EOPNOTSUPP; 366 365 367 366 ops.ooblen = length; ··· 587 586 static int mtdchar_write_ioctl(struct mtd_info *mtd, 588 587 struct mtd_write_req __user *argp) 589 588 { 589 + struct mtd_info *master = mtd_get_master(mtd); 590 590 struct mtd_write_req req; 591 591 struct mtd_oob_ops ops = {}; 592 592 const void __user *usr_data, *usr_oob; ··· 599 597 usr_data = (const void __user *)(uintptr_t)req.usr_data; 600 598 usr_oob = (const void __user *)(uintptr_t)req.usr_oob; 601 599 602 - if (!mtd->_write_oob) 600 + if (!master->_write_oob) 603 601 return -EOPNOTSUPP; 604 - 605 602 ops.mode = req.mode; 606 603 ops.len = (size_t)req.len; 607 604 ops.ooblen = (size_t)req.ooblen; ··· 636 635 { 637 636 struct mtd_file_info *mfi = file->private_data; 638 637 struct mtd_info *mtd = mfi->mtd; 638 + struct mtd_info *master = mtd_get_master(mtd); 639 639 void __user *argp = (void __user *)arg; 640 640 int ret = 0; 641 641 struct mtd_info_user info; ··· 826 824 { 827 825 struct nand_oobinfo oi; 828 826 829 - if (!mtd->ooblayout) 827 + if (!master->ooblayout) 830 828 return -EOPNOTSUPP; 831 829 832 830 ret = get_oobinfo(mtd, &oi); ··· 920 918 { 921 919 struct nand_ecclayout_user *usrlay; 922 920 923 - if (!mtd->ooblayout) 921 + if (!master->ooblayout) 924 922 return -EOPNOTSUPP; 925 923 926 924 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
+179 -71
drivers/mtd/mtdcore.c
··· 456 456 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, 457 457 struct mtd_pairing_info *info) 458 458 { 459 - int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); 459 + struct mtd_info *master = mtd_get_master(mtd); 460 + int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master); 460 461 461 462 if (wunit < 0 || wunit >= npairs) 462 463 return -EINVAL; 463 464 464 - if (mtd->pairing && mtd->pairing->get_info) 465 - return mtd->pairing->get_info(mtd, wunit, info); 465 + if (master->pairing && master->pairing->get_info) 466 + return master->pairing->get_info(master, wunit, info); 466 467 467 468 info->group = 0; 468 469 info->pair = wunit; ··· 499 498 int mtd_pairing_info_to_wunit(struct mtd_info *mtd, 500 499 const struct mtd_pairing_info *info) 501 500 { 502 - int ngroups = mtd_pairing_groups(mtd); 503 - int npairs = mtd_wunit_per_eb(mtd) / ngroups; 501 + struct mtd_info *master = mtd_get_master(mtd); 502 + int ngroups = mtd_pairing_groups(master); 503 + int npairs = mtd_wunit_per_eb(master) / ngroups; 504 504 505 505 if (!info || info->pair < 0 || info->pair >= npairs || 506 506 info->group < 0 || info->group >= ngroups) 507 507 return -EINVAL; 508 508 509 - if (mtd->pairing && mtd->pairing->get_wunit) 510 - return mtd->pairing->get_wunit(mtd, info); 509 + if (master->pairing && master->pairing->get_wunit) 510 + return mtd->pairing->get_wunit(master, info); 511 511 512 512 return info->pair; 513 513 } ··· 526 524 */ 527 525 int mtd_pairing_groups(struct mtd_info *mtd) 528 526 { 529 - if (!mtd->pairing || !mtd->pairing->ngroups) 527 + struct mtd_info *master = mtd_get_master(mtd); 528 + 529 + if (!master->pairing || !master->pairing->ngroups) 530 530 return 1; 531 531 532 - return mtd->pairing->ngroups; 532 + return master->pairing->ngroups; 533 533 } 534 534 EXPORT_SYMBOL_GPL(mtd_pairing_groups); 535 535 ··· 591 587 592 588 int add_mtd_device(struct mtd_info *mtd) 593 589 { 590 + struct mtd_info *master = mtd_get_master(mtd); 594 591 struct mtd_notifier *not; 595 592 int i, error; 596 593 ··· 613 608 (mtd->_read && mtd->_read_oob))) 614 609 return -EINVAL; 615 610 616 - if (WARN_ON((!mtd->erasesize || !mtd->_erase) && 611 + if (WARN_ON((!mtd->erasesize || !master->_erase) && 617 612 !(mtd->flags & MTD_NO_ERASE))) 618 613 return -EINVAL; 619 614 ··· 770 765 pr_debug("mtd device won't show a device symlink in sysfs\n"); 771 766 } 772 767 773 - mtd->orig_flags = mtd->flags; 768 + INIT_LIST_HEAD(&mtd->partitions); 769 + mutex_init(&mtd->master.partitions_lock); 774 770 } 775 771 776 772 /** ··· 977 971 978 972 int __get_mtd_device(struct mtd_info *mtd) 979 973 { 974 + struct mtd_info *master = mtd_get_master(mtd); 980 975 int err; 981 976 982 - if (!try_module_get(mtd->owner)) 977 + if (!try_module_get(master->owner)) 983 978 return -ENODEV; 984 979 985 - if (mtd->_get_device) { 986 - err = mtd->_get_device(mtd); 980 + if (master->_get_device) { 981 + err = master->_get_device(mtd); 987 982 988 983 if (err) { 989 - module_put(mtd->owner); 984 + module_put(master->owner); 990 985 return err; 991 986 } 992 987 } 993 - mtd->usecount++; 988 + 989 + while (mtd->parent) { 990 + mtd->usecount++; 991 + mtd = mtd->parent; 992 + } 993 + 994 994 return 0; 995 995 } 996 996 EXPORT_SYMBOL_GPL(__get_mtd_device); ··· 1050 1038 1051 1039 void __put_mtd_device(struct mtd_info *mtd) 1052 1040 { 1053 - --mtd->usecount; 1054 - BUG_ON(mtd->usecount < 0); 1041 + struct mtd_info *master = mtd_get_master(mtd); 1055 1042 1056 - if (mtd->_put_device) 1057 - mtd->_put_device(mtd); 1043 + while (mtd->parent) { 1044 + --mtd->usecount; 1045 + BUG_ON(mtd->usecount < 0); 1046 + mtd = mtd->parent; 1047 + } 1058 1048 1059 - module_put(mtd->owner); 1049 + if (master->_put_device) 1050 + master->_put_device(master); 1051 + 1052 + module_put(master->owner); 1060 1053 } 1061 1054 EXPORT_SYMBOL_GPL(__put_mtd_device); 1062 1055 ··· 1072 1055 */ 1073 1056 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 1074 1057 { 1058 + struct mtd_info *master = mtd_get_master(mtd); 1059 + u64 mst_ofs = mtd_get_master_ofs(mtd, 0); 1060 + int ret; 1061 + 1075 1062 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 1076 1063 1077 - if (!mtd->erasesize || !mtd->_erase) 1064 + if (!mtd->erasesize || !master->_erase) 1078 1065 return -ENOTSUPP; 1079 1066 1080 1067 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) ··· 1090 1069 return 0; 1091 1070 1092 1071 ledtrig_mtd_activity(); 1093 - return mtd->_erase(mtd, instr); 1072 + 1073 + instr->addr += mst_ofs; 1074 + ret = master->_erase(master, instr); 1075 + if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 1076 + instr->fail_addr -= mst_ofs; 1077 + 1078 + instr->addr -= mst_ofs; 1079 + return ret; 1094 1080 } 1095 1081 EXPORT_SYMBOL_GPL(mtd_erase); 1096 1082 ··· 1107 1079 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1108 1080 void **virt, resource_size_t *phys) 1109 1081 { 1082 + struct mtd_info *master = mtd_get_master(mtd); 1083 + 1110 1084 *retlen = 0; 1111 1085 *virt = NULL; 1112 1086 if (phys) 1113 1087 *phys = 0; 1114 - if (!mtd->_point) 1088 + if (!master->_point) 1115 1089 return -EOPNOTSUPP; 1116 1090 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1117 1091 return -EINVAL; 1118 1092 if (!len) 1119 1093 return 0; 1120 - return mtd->_point(mtd, from, len, retlen, virt, phys); 1094 + 1095 + from = mtd_get_master_ofs(mtd, from); 1096 + return master->_point(master, from, len, retlen, virt, phys); 1121 1097 } 1122 1098 EXPORT_SYMBOL_GPL(mtd_point); 1123 1099 1124 1100 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 1125 1101 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1126 1102 { 1127 - if (!mtd->_unpoint) 1103 + struct mtd_info *master = mtd_get_master(mtd); 1104 + 1105 + if (!master->_unpoint) 1128 1106 return -EOPNOTSUPP; 1129 1107 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1130 1108 return -EINVAL; 1131 1109 if (!len) 1132 1110 return 0; 1133 - return mtd->_unpoint(mtd, from, len); 1111 + return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len); 1134 1112 } 1135 1113 EXPORT_SYMBOL_GPL(mtd_unpoint); 1136 1114 ··· 1162 1128 return (unsigned long)virt; 1163 1129 } 1164 1130 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 1131 + 1132 + static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, 1133 + const struct mtd_ecc_stats *old_stats) 1134 + { 1135 + struct mtd_ecc_stats diff; 1136 + 1137 + if (master == mtd) 1138 + return; 1139 + 1140 + diff = master->ecc_stats; 1141 + diff.failed -= old_stats->failed; 1142 + diff.corrected -= old_stats->corrected; 1143 + 1144 + while (mtd->parent) { 1145 + mtd->ecc_stats.failed += diff.failed; 1146 + mtd->ecc_stats.corrected += diff.corrected; 1147 + mtd = mtd->parent; 1148 + } 1149 + } 1165 1150 1166 1151 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1167 1152 u_char *buf) ··· 1224 1171 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1225 1172 const u_char *buf) 1226 1173 { 1174 + struct mtd_info *master = mtd_get_master(mtd); 1175 + 1227 1176 *retlen = 0; 1228 - if (!mtd->_panic_write) 1177 + if (!master->_panic_write) 1229 1178 return -EOPNOTSUPP; 1230 1179 if (to < 0 || to >= mtd->size || len > mtd->size - to) 1231 1180 return -EINVAL; ··· 1238 1183 if (!mtd->oops_panic_write) 1239 1184 mtd->oops_panic_write = true; 1240 1185 1241 - return mtd->_panic_write(mtd, to, len, retlen, buf); 1186 + return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, 1187 + retlen, buf); 1242 1188 } 1243 1189 EXPORT_SYMBOL_GPL(mtd_panic_write); 1244 1190 ··· 1278 1222 1279 1223 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 1280 1224 { 1225 + struct mtd_info *master = mtd_get_master(mtd); 1226 + struct mtd_ecc_stats old_stats = master->ecc_stats; 1281 1227 int ret_code; 1228 + 1282 1229 ops->retlen = ops->oobretlen = 0; 1283 1230 1284 1231 ret_code = mtd_check_oob_ops(mtd, from, ops); ··· 1291 1232 ledtrig_mtd_activity(); 1292 1233 1293 1234 /* Check the validity of a potential fallback on mtd->_read */ 1294 - if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf)) 1235 + if (!master->_read_oob && (!master->_read || ops->oobbuf)) 1295 1236 return -EOPNOTSUPP; 1296 1237 1297 - if (mtd->_read_oob) 1298 - ret_code = mtd->_read_oob(mtd, from, ops); 1238 + from = mtd_get_master_ofs(mtd, from); 1239 + if (master->_read_oob) 1240 + ret_code = master->_read_oob(master, from, ops); 1299 1241 else 1300 - ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen, 1301 - ops->datbuf); 1242 + ret_code = master->_read(master, from, ops->len, &ops->retlen, 1243 + ops->datbuf); 1244 + 1245 + mtd_update_ecc_stats(mtd, master, &old_stats); 1302 1246 1303 1247 /* 1304 1248 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics ··· 1320 1258 int mtd_write_oob(struct mtd_info *mtd, loff_t to, 1321 1259 struct mtd_oob_ops *ops) 1322 1260 { 1261 + struct mtd_info *master = mtd_get_master(mtd); 1323 1262 int ret; 1324 1263 1325 1264 ops->retlen = ops->oobretlen = 0; ··· 1335 1272 ledtrig_mtd_activity(); 1336 1273 1337 1274 /* Check the validity of a potential fallback on mtd->_write */ 1338 - if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf)) 1275 + if (!master->_write_oob && (!master->_write || ops->oobbuf)) 1339 1276 return -EOPNOTSUPP; 1340 1277 1341 - if (mtd->_write_oob) 1342 - return mtd->_write_oob(mtd, to, ops); 1278 + to = mtd_get_master_ofs(mtd, to); 1279 + 1280 + if (master->_write_oob) 1281 + return master->_write_oob(master, to, ops); 1343 1282 else 1344 - return mtd->_write(mtd, to, ops->len, &ops->retlen, 1345 - ops->datbuf); 1283 + return master->_write(master, to, ops->len, &ops->retlen, 1284 + ops->datbuf); 1346 1285 } 1347 1286 EXPORT_SYMBOL_GPL(mtd_write_oob); 1348 1287 ··· 1367 1302 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 1368 1303 struct mtd_oob_region *oobecc) 1369 1304 { 1305 + struct mtd_info *master = mtd_get_master(mtd); 1306 + 1370 1307 memset(oobecc, 0, sizeof(*oobecc)); 1371 1308 1372 - if (!mtd || section < 0) 1309 + if (!master || section < 0) 1373 1310 return -EINVAL; 1374 1311 1375 - if (!mtd->ooblayout || !mtd->ooblayout->ecc) 1312 + if (!master->ooblayout || !master->ooblayout->ecc) 1376 1313 return -ENOTSUPP; 1377 1314 1378 - return mtd->ooblayout->ecc(mtd, section, oobecc); 1315 + return master->ooblayout->ecc(master, section, oobecc); 1379 1316 } 1380 1317 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); 1381 1318 ··· 1401 1334 int mtd_ooblayout_free(struct mtd_info *mtd, int section, 1402 1335 struct mtd_oob_region *oobfree) 1403 1336 { 1337 + struct mtd_info *master = mtd_get_master(mtd); 1338 + 1404 1339 memset(oobfree, 0, sizeof(*oobfree)); 1405 1340 1406 - if (!mtd || section < 0) 1341 + if (!master || section < 0) 1407 1342 return -EINVAL; 1408 1343 1409 - if (!mtd->ooblayout || !mtd->ooblayout->free) 1344 + if (!master->ooblayout || !master->ooblayout->free) 1410 1345 return -ENOTSUPP; 1411 1346 1412 - return mtd->ooblayout->free(mtd, section, oobfree); 1347 + return master->ooblayout->free(master, section, oobfree); 1413 1348 } 1414 1349 EXPORT_SYMBOL_GPL(mtd_ooblayout_free); 1415 1350 ··· 1720 1651 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1721 1652 struct otp_info *buf) 1722 1653 { 1723 - if (!mtd->_get_fact_prot_info) 1654 + struct mtd_info *master = mtd_get_master(mtd); 1655 + 1656 + if (!master->_get_fact_prot_info) 1724 1657 return -EOPNOTSUPP; 1725 1658 if (!len) 1726 1659 return 0; 1727 - return mtd->_get_fact_prot_info(mtd, len, retlen, buf); 1660 + return master->_get_fact_prot_info(master, len, retlen, buf); 1728 1661 } 1729 1662 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 1730 1663 1731 1664 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1732 1665 size_t *retlen, u_char *buf) 1733 1666 { 1667 + struct mtd_info *master = mtd_get_master(mtd); 1668 + 1734 1669 *retlen = 0; 1735 - if (!mtd->_read_fact_prot_reg) 1670 + if (!master->_read_fact_prot_reg) 1736 1671 return -EOPNOTSUPP; 1737 1672 if (!len) 1738 1673 return 0; 1739 - return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); 1674 + return master->_read_fact_prot_reg(master, from, len, retlen, buf); 1740 1675 } 1741 1676 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 1742 1677 1743 1678 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1744 1679 struct otp_info *buf) 1745 1680 { 1746 - if (!mtd->_get_user_prot_info) 1681 + struct mtd_info *master = mtd_get_master(mtd); 1682 + 1683 + if (!master->_get_user_prot_info) 1747 1684 return -EOPNOTSUPP; 1748 1685 if (!len) 1749 1686 return 0; 1750 - return mtd->_get_user_prot_info(mtd, len, retlen, buf); 1687 + return master->_get_user_prot_info(master, len, retlen, buf); 1751 1688 } 1752 1689 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 1753 1690 1754 1691 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1755 1692 size_t *retlen, u_char *buf) 1756 1693 { 1694 + struct mtd_info *master = mtd_get_master(mtd); 1695 + 1757 1696 *retlen = 0; 1758 - if (!mtd->_read_user_prot_reg) 1697 + if (!master->_read_user_prot_reg) 1759 1698 return -EOPNOTSUPP; 1760 1699 if (!len) 1761 1700 return 0; 1762 - return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); 1701 + return master->_read_user_prot_reg(master, from, len, retlen, buf); 1763 1702 } 1764 1703 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 1765 1704 1766 1705 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 1767 1706 size_t *retlen, u_char *buf) 1768 1707 { 1708 + struct mtd_info *master = mtd_get_master(mtd); 1769 1709 int ret; 1770 1710 1771 1711 *retlen = 0; 1772 - if (!mtd->_write_user_prot_reg) 1712 + if (!master->_write_user_prot_reg) 1773 1713 return -EOPNOTSUPP; 1774 1714 if (!len) 1775 1715 return 0; 1776 - ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); 1716 + ret = master->_write_user_prot_reg(master, to, len, retlen, buf); 1777 1717 if (ret) 1778 1718 return ret; 1779 1719 ··· 1796 1718 1797 1719 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 1798 1720 { 1799 - if (!mtd->_lock_user_prot_reg) 1721 + struct mtd_info *master = mtd_get_master(mtd); 1722 + 1723 + if (!master->_lock_user_prot_reg) 1800 1724 return -EOPNOTSUPP; 1801 1725 if (!len) 1802 1726 return 0; 1803 - return mtd->_lock_user_prot_reg(mtd, from, len); 1727 + return master->_lock_user_prot_reg(master, from, len); 1804 1728 } 1805 1729 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 1806 1730 1807 1731 /* Chip-supported device locking */ 1808 1732 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1809 1733 { 1810 - if (!mtd->_lock) 1734 + struct mtd_info *master = mtd_get_master(mtd); 1735 + 1736 + if (!master->_lock) 1811 1737 return -EOPNOTSUPP; 1812 1738 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1813 1739 return -EINVAL; 1814 1740 if (!len) 1815 1741 return 0; 1816 - return mtd->_lock(mtd, ofs, len); 1742 + return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); 1817 1743 } 1818 1744 EXPORT_SYMBOL_GPL(mtd_lock); 1819 1745 1820 1746 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1821 1747 { 1822 - if (!mtd->_unlock) 1748 + struct mtd_info *master = mtd_get_master(mtd); 1749 + 1750 + if (!master->_unlock) 1823 1751 return -EOPNOTSUPP; 1824 1752 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1825 1753 return -EINVAL; 1826 1754 if (!len) 1827 1755 return 0; 1828 - return mtd->_unlock(mtd, ofs, len); 1756 + return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); 1829 1757 } 1830 1758 EXPORT_SYMBOL_GPL(mtd_unlock); 1831 1759 1832 1760 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1833 1761 { 1834 - if (!mtd->_is_locked) 1762 + struct mtd_info *master = mtd_get_master(mtd); 1763 + 1764 + if (!master->_is_locked) 1835 1765 return -EOPNOTSUPP; 1836 1766 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1837 1767 return -EINVAL; 1838 1768 if (!len) 1839 1769 return 0; 1840 - return mtd->_is_locked(mtd, ofs, len); 1770 + return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); 1841 1771 } 1842 1772 EXPORT_SYMBOL_GPL(mtd_is_locked); 1843 1773 1844 1774 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) 1845 1775 { 1776 + struct mtd_info *master = mtd_get_master(mtd); 1777 + 1846 1778 if (ofs < 0 || ofs >= mtd->size) 1847 1779 return -EINVAL; 1848 - if (!mtd->_block_isreserved) 1780 + if (!master->_block_isreserved) 1849 1781 return 0; 1850 - return mtd->_block_isreserved(mtd, ofs); 1782 + return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); 1851 1783 } 1852 1784 EXPORT_SYMBOL_GPL(mtd_block_isreserved); 1853 1785 1854 1786 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 1855 1787 { 1788 + struct mtd_info *master = mtd_get_master(mtd); 1789 + 1856 1790 if (ofs < 0 || ofs >= mtd->size) 1857 1791 return -EINVAL; 1858 - if (!mtd->_block_isbad) 1792 + if (!master->_block_isbad) 1859 1793 return 0; 1860 - return mtd->_block_isbad(mtd, ofs); 1794 + return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); 1861 1795 } 1862 1796 EXPORT_SYMBOL_GPL(mtd_block_isbad); 1863 1797 1864 1798 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 1865 1799 { 1866 - if (!mtd->_block_markbad) 1800 + struct mtd_info *master = mtd_get_master(mtd); 1801 + int ret; 1802 + 1803 + if (!master->_block_markbad) 1867 1804 return -EOPNOTSUPP; 1868 1805 if (ofs < 0 || ofs >= mtd->size) 1869 1806 return -EINVAL; 1870 1807 if (!(mtd->flags & MTD_WRITEABLE)) 1871 1808 return -EROFS; 1872 - return mtd->_block_markbad(mtd, ofs); 1809 + 1810 + ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); 1811 + if (ret) 1812 + return ret; 1813 + 1814 + while (mtd->parent) { 1815 + mtd->ecc_stats.badblocks++; 1816 + mtd = mtd->parent; 1817 + } 1818 + 1819 + return 0; 1873 1820 } 1874 1821 EXPORT_SYMBOL_GPL(mtd_block_markbad); 1875 1822 ··· 1944 1841 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 1945 1842 unsigned long count, loff_t to, size_t *retlen) 1946 1843 { 1844 + struct mtd_info *master = mtd_get_master(mtd); 1845 + 1947 1846 *retlen = 0; 1948 1847 if (!(mtd->flags & MTD_WRITEABLE)) 1949 1848 return -EROFS; 1950 - if (!mtd->_writev) 1849 + 1850 + if (!master->_writev) 1951 1851 return default_mtd_writev(mtd, vecs, count, to, retlen); 1952 - return mtd->_writev(mtd, vecs, count, to, retlen); 1852 + 1853 + return master->_writev(master, vecs, count, 1854 + mtd_get_master_ofs(mtd, to), retlen); 1953 1855 } 1954 1856 EXPORT_SYMBOL_GPL(mtd_writev); 1955 1857
+176 -517
drivers/mtd/mtdpart.c
··· 20 20 21 21 #include "mtdcore.h" 22 22 23 - /* Our partition linked list */ 24 - static LIST_HEAD(mtd_partitions); 25 - static DEFINE_MUTEX(mtd_partitions_mutex); 26 - 27 - /** 28 - * struct mtd_part - our partition node structure 29 - * 30 - * @mtd: struct holding partition details 31 - * @parent: parent mtd - flash device or another partition 32 - * @offset: partition offset relative to the *flash device* 33 - */ 34 - struct mtd_part { 35 - struct mtd_info mtd; 36 - struct mtd_info *parent; 37 - uint64_t offset; 38 - struct list_head list; 39 - }; 40 - 41 - /* 42 - * Given a pointer to the MTD object in the mtd_part structure, we can retrieve 43 - * the pointer to that structure. 44 - */ 45 - static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd) 46 - { 47 - return container_of(mtd, struct mtd_part, mtd); 48 - } 49 - 50 - static u64 part_absolute_offset(struct mtd_info *mtd) 51 - { 52 - struct mtd_part *part = mtd_to_part(mtd); 53 - 54 - if (!mtd_is_partition(mtd)) 55 - return 0; 56 - 57 - return part_absolute_offset(part->parent) + part->offset; 58 - } 59 - 60 23 /* 61 24 * MTD methods which simply translate the effective address and pass through 62 25 * to the _real_ device. 63 26 */ 64 27 65 - static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 66 - size_t *retlen, u_char *buf) 28 + static inline void free_partition(struct mtd_info *mtd) 67 29 { 68 - struct mtd_part *part = mtd_to_part(mtd); 69 - struct mtd_ecc_stats stats; 70 - int res; 71 - 72 - stats = part->parent->ecc_stats; 73 - res = part->parent->_read(part->parent, from + part->offset, len, 74 - retlen, buf); 75 - if (unlikely(mtd_is_eccerr(res))) 76 - mtd->ecc_stats.failed += 77 - part->parent->ecc_stats.failed - stats.failed; 78 - else 79 - mtd->ecc_stats.corrected += 80 - part->parent->ecc_stats.corrected - stats.corrected; 81 - return res; 30 + kfree(mtd->name); 31 + kfree(mtd); 82 32 } 83 33 84 - static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 85 - size_t *retlen, void **virt, resource_size_t *phys) 86 - { 87 - struct mtd_part *part = mtd_to_part(mtd); 88 - 89 - return part->parent->_point(part->parent, from + part->offset, len, 90 - retlen, virt, phys); 91 - } 92 - 93 - static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 94 - { 95 - struct mtd_part *part = mtd_to_part(mtd); 96 - 97 - return part->parent->_unpoint(part->parent, from + part->offset, len); 98 - } 99 - 100 - static int part_read_oob(struct mtd_info *mtd, loff_t from, 101 - struct mtd_oob_ops *ops) 102 - { 103 - struct mtd_part *part = mtd_to_part(mtd); 104 - struct mtd_ecc_stats stats; 105 - int res; 106 - 107 - stats = part->parent->ecc_stats; 108 - res = part->parent->_read_oob(part->parent, from + part->offset, ops); 109 - if (unlikely(mtd_is_eccerr(res))) 110 - mtd->ecc_stats.failed += 111 - part->parent->ecc_stats.failed - stats.failed; 112 - else 113 - mtd->ecc_stats.corrected += 114 - part->parent->ecc_stats.corrected - stats.corrected; 115 - return res; 116 - } 117 - 118 - static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 119 - size_t len, size_t *retlen, u_char *buf) 120 - { 121 - struct mtd_part *part = mtd_to_part(mtd); 122 - return part->parent->_read_user_prot_reg(part->parent, from, len, 123 - retlen, buf); 124 - } 125 - 126 - static int part_get_user_prot_info(struct mtd_info *mtd, size_t len, 127 - size_t *retlen, struct otp_info *buf) 128 - { 129 - struct mtd_part *part = mtd_to_part(mtd); 130 - return part->parent->_get_user_prot_info(part->parent, len, retlen, 131 - buf); 132 - } 133 - 134 - static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 135 - size_t len, size_t *retlen, u_char *buf) 136 - { 137 - struct mtd_part *part = mtd_to_part(mtd); 138 - return part->parent->_read_fact_prot_reg(part->parent, from, len, 139 - retlen, buf); 140 - } 141 - 142 - static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len, 143 - size_t *retlen, struct otp_info *buf) 144 - { 145 - struct mtd_part *part = mtd_to_part(mtd); 146 - return part->parent->_get_fact_prot_info(part->parent, len, retlen, 147 - buf); 148 - } 149 - 150 - static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 151 - size_t *retlen, const u_char *buf) 152 - { 153 - struct mtd_part *part = mtd_to_part(mtd); 154 - return part->parent->_write(part->parent, to + part->offset, len, 155 - retlen, buf); 156 - } 157 - 158 - static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 159 - size_t *retlen, const u_char *buf) 160 - { 161 - struct mtd_part *part = mtd_to_part(mtd); 162 - return part->parent->_panic_write(part->parent, to + part->offset, len, 163 - retlen, buf); 164 - } 165 - 166 - static int part_write_oob(struct mtd_info *mtd, loff_t to, 167 - struct mtd_oob_ops *ops) 168 - { 169 - struct mtd_part *part = mtd_to_part(mtd); 170 - 171 - return part->parent->_write_oob(part->parent, to + part->offset, ops); 172 - } 173 - 174 - static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 175 - size_t len, size_t *retlen, u_char *buf) 176 - { 177 - struct mtd_part *part = mtd_to_part(mtd); 178 - return part->parent->_write_user_prot_reg(part->parent, from, len, 179 - retlen, buf); 180 - } 181 - 182 - static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 183 - size_t len) 184 - { 185 - struct mtd_part *part = mtd_to_part(mtd); 186 - return part->parent->_lock_user_prot_reg(part->parent, from, len); 187 - } 188 - 189 - static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 190 - unsigned long count, loff_t to, size_t *retlen) 191 - { 192 - struct mtd_part *part = mtd_to_part(mtd); 193 - return part->parent->_writev(part->parent, vecs, count, 194 - to + part->offset, retlen); 195 - } 196 - 197 - static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 198 - { 199 - struct mtd_part *part = mtd_to_part(mtd); 200 - int ret; 201 - 202 - instr->addr += part->offset; 203 - ret = part->parent->_erase(part->parent, instr); 204 - if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 205 - instr->fail_addr -= part->offset; 206 - instr->addr -= part->offset; 207 - 208 - return ret; 209 - } 210 - 211 - static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 212 - { 213 - struct mtd_part *part = mtd_to_part(mtd); 214 - return part->parent->_lock(part->parent, ofs + part->offset, len); 215 - } 216 - 217 - static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 218 - { 219 - struct mtd_part *part = mtd_to_part(mtd); 220 - return part->parent->_unlock(part->parent, ofs + part->offset, len); 221 - } 222 - 223 - static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 224 - { 225 - struct mtd_part *part = mtd_to_part(mtd); 226 - return part->parent->_is_locked(part->parent, ofs + part->offset, len); 227 - } 228 - 229 - static void part_sync(struct mtd_info *mtd) 230 - { 231 - struct mtd_part *part = mtd_to_part(mtd); 232 - part->parent->_sync(part->parent); 233 - } 234 - 235 - static int part_suspend(struct mtd_info *mtd) 236 - { 237 - struct mtd_part *part = mtd_to_part(mtd); 238 - return part->parent->_suspend(part->parent); 239 - } 240 - 241 - static void part_resume(struct mtd_info *mtd) 242 - { 243 - struct mtd_part *part = mtd_to_part(mtd); 244 - part->parent->_resume(part->parent); 245 - } 246 - 247 - static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs) 248 - { 249 - struct mtd_part *part = mtd_to_part(mtd); 250 - ofs += part->offset; 251 - return part->parent->_block_isreserved(part->parent, ofs); 252 - } 253 - 254 - static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 255 - { 256 - struct mtd_part *part = mtd_to_part(mtd); 257 - ofs += part->offset; 258 - return part->parent->_block_isbad(part->parent, ofs); 259 - } 260 - 261 - static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 262 - { 263 - struct mtd_part *part = mtd_to_part(mtd); 264 - int res; 265 - 266 - ofs += part->offset; 267 - res = part->parent->_block_markbad(part->parent, ofs); 268 - if (!res) 269 - mtd->ecc_stats.badblocks++; 270 - return res; 271 - } 272 - 273 - static int part_get_device(struct mtd_info *mtd) 274 - { 275 - struct mtd_part *part = mtd_to_part(mtd); 276 - return part->parent->_get_device(part->parent); 277 - } 278 - 279 - static void part_put_device(struct mtd_info *mtd) 280 - { 281 - struct mtd_part *part = mtd_to_part(mtd); 282 - part->parent->_put_device(part->parent); 283 - } 284 - 285 - static int part_ooblayout_ecc(struct mtd_info *mtd, int section, 286 - struct mtd_oob_region *oobregion) 287 - { 288 - struct mtd_part *part = mtd_to_part(mtd); 289 - 290 - return mtd_ooblayout_ecc(part->parent, section, oobregion); 291 - } 292 - 293 - static int part_ooblayout_free(struct mtd_info *mtd, int section, 294 - struct mtd_oob_region *oobregion) 295 - { 296 - struct mtd_part *part = mtd_to_part(mtd); 297 - 298 - return mtd_ooblayout_free(part->parent, section, oobregion); 299 - } 300 - 301 - static const struct mtd_ooblayout_ops part_ooblayout_ops = { 302 - .ecc = part_ooblayout_ecc, 303 - .free = part_ooblayout_free, 304 - }; 305 - 306 - static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) 307 - { 308 - struct mtd_part *part = mtd_to_part(mtd); 309 - 310 - return part->parent->_max_bad_blocks(part->parent, 311 - ofs + part->offset, len); 312 - } 313 - 314 - static inline void free_partition(struct mtd_part *p) 315 - { 316 - kfree(p->mtd.name); 317 - kfree(p); 318 - } 319 - 320 - static struct mtd_part *allocate_partition(struct mtd_info *parent, 321 - const struct mtd_partition *part, int partno, 322 - uint64_t cur_offset) 34 + static struct mtd_info *allocate_partition(struct mtd_info *parent, 35 + const struct mtd_partition *part, 36 + int partno, uint64_t cur_offset) 323 37 { 324 38 int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize : 325 39 parent->erasesize; 326 - struct mtd_part *slave; 40 + struct mtd_info *child, *master = mtd_get_master(parent); 327 41 u32 remainder; 328 42 char *name; 329 43 u64 tmp; 330 44 331 45 /* allocate the partition structure */ 332 - slave = kzalloc(sizeof(*slave), GFP_KERNEL); 46 + child = kzalloc(sizeof(*child), GFP_KERNEL); 333 47 name = kstrdup(part->name, GFP_KERNEL); 334 - if (!name || !slave) { 48 + if (!name || !child) { 335 49 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 336 50 parent->name); 337 51 kfree(name); 338 - kfree(slave); 52 + kfree(child); 339 53 return ERR_PTR(-ENOMEM); 340 54 } 341 55 342 56 /* set up the MTD object for this partition */ 343 - slave->mtd.type = parent->type; 344 - slave->mtd.flags = parent->orig_flags & ~part->mask_flags; 345 - slave->mtd.orig_flags = slave->mtd.flags; 346 - slave->mtd.size = part->size; 347 - slave->mtd.writesize = parent->writesize; 348 - slave->mtd.writebufsize = parent->writebufsize; 349 - slave->mtd.oobsize = parent->oobsize; 350 - slave->mtd.oobavail = parent->oobavail; 351 - slave->mtd.subpage_sft = parent->subpage_sft; 352 - slave->mtd.pairing = parent->pairing; 57 + child->type = parent->type; 58 + child->part.flags = parent->flags & ~part->mask_flags; 59 + child->flags = child->part.flags; 60 + child->size = part->size; 61 + child->writesize = parent->writesize; 62 + child->writebufsize = parent->writebufsize; 63 + child->oobsize = parent->oobsize; 64 + child->oobavail = parent->oobavail; 65 + child->subpage_sft = parent->subpage_sft; 353 66 354 - slave->mtd.name = name; 355 - slave->mtd.owner = parent->owner; 67 + child->name = name; 68 + child->owner = parent->owner; 356 69 357 70 /* NOTE: Historically, we didn't arrange MTDs as a tree out of 358 71 * concern for showing the same data in multiple partitions. ··· 73 360 * so the MTD_PARTITIONED_MASTER option allows that. The master 74 361 * will have device nodes etc only if this is set, so make the 75 362 * parent conditional on that option. Note, this is a way to 76 - * distinguish between the master and the partition in sysfs. 363 + * distinguish between the parent and its partitions in sysfs. 77 364 */ 78 - slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? 79 - &parent->dev : 80 - parent->dev.parent; 81 - slave->mtd.dev.of_node = part->of_node; 365 + child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? 366 + &parent->dev : parent->dev.parent; 367 + child->dev.of_node = part->of_node; 368 + child->parent = parent; 369 + child->part.offset = part->offset; 370 + INIT_LIST_HEAD(&child->partitions); 82 371 83 - if (parent->_read) 84 - slave->mtd._read = part_read; 85 - if (parent->_write) 86 - slave->mtd._write = part_write; 87 - 88 - if (parent->_panic_write) 89 - slave->mtd._panic_write = part_panic_write; 90 - 91 - if (parent->_point && parent->_unpoint) { 92 - slave->mtd._point = part_point; 93 - slave->mtd._unpoint = part_unpoint; 94 - } 95 - 96 - if (parent->_read_oob) 97 - slave->mtd._read_oob = part_read_oob; 98 - if (parent->_write_oob) 99 - slave->mtd._write_oob = part_write_oob; 100 - if (parent->_read_user_prot_reg) 101 - slave->mtd._read_user_prot_reg = part_read_user_prot_reg; 102 - if (parent->_read_fact_prot_reg) 103 - slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; 104 - if (parent->_write_user_prot_reg) 105 - slave->mtd._write_user_prot_reg = part_write_user_prot_reg; 106 - if (parent->_lock_user_prot_reg) 107 - slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; 108 - if (parent->_get_user_prot_info) 109 - slave->mtd._get_user_prot_info = part_get_user_prot_info; 110 - if (parent->_get_fact_prot_info) 111 - slave->mtd._get_fact_prot_info = part_get_fact_prot_info; 112 - if (parent->_sync) 113 - slave->mtd._sync = part_sync; 114 - if (!partno && !parent->dev.class && parent->_suspend && 115 - parent->_resume) { 116 - slave->mtd._suspend = part_suspend; 117 - slave->mtd._resume = part_resume; 118 - } 119 - if (parent->_writev) 120 - slave->mtd._writev = part_writev; 121 - if (parent->_lock) 122 - slave->mtd._lock = part_lock; 123 - if (parent->_unlock) 124 - slave->mtd._unlock = part_unlock; 125 - if (parent->_is_locked) 126 - slave->mtd._is_locked = part_is_locked; 127 - if (parent->_block_isreserved) 128 - slave->mtd._block_isreserved = part_block_isreserved; 129 - if (parent->_block_isbad) 130 - slave->mtd._block_isbad = part_block_isbad; 131 - if (parent->_block_markbad) 132 - slave->mtd._block_markbad = part_block_markbad; 133 - if (parent->_max_bad_blocks) 134 - slave->mtd._max_bad_blocks = part_max_bad_blocks; 135 - 136 - if (parent->_get_device) 137 - slave->mtd._get_device = part_get_device; 138 - if (parent->_put_device) 139 - slave->mtd._put_device = part_put_device; 140 - 141 - slave->mtd._erase = part_erase; 142 - slave->parent = parent; 143 - slave->offset = part->offset; 144 - 145 - if (slave->offset == MTDPART_OFS_APPEND) 146 - slave->offset = cur_offset; 147 - if (slave->offset == MTDPART_OFS_NXTBLK) { 372 + if (child->part.offset == MTDPART_OFS_APPEND) 373 + child->part.offset = cur_offset; 374 + if (child->part.offset == MTDPART_OFS_NXTBLK) { 148 375 tmp = cur_offset; 149 - slave->offset = cur_offset; 376 + child->part.offset = cur_offset; 150 377 remainder = do_div(tmp, wr_alignment); 151 378 if (remainder) { 152 - slave->offset += wr_alignment - remainder; 379 + child->part.offset += wr_alignment - remainder; 153 380 printk(KERN_NOTICE "Moving partition %d: " 154 381 "0x%012llx -> 0x%012llx\n", partno, 155 - (unsigned long long)cur_offset, (unsigned long long)slave->offset); 382 + (unsigned long long)cur_offset, 383 + child->part.offset); 156 384 } 157 385 } 158 - if (slave->offset == MTDPART_OFS_RETAIN) { 159 - slave->offset = cur_offset; 160 - if (parent->size - slave->offset >= slave->mtd.size) { 161 - slave->mtd.size = parent->size - slave->offset 162 - - slave->mtd.size; 386 + if (child->part.offset == MTDPART_OFS_RETAIN) { 387 + child->part.offset = cur_offset; 388 + if (parent->size - child->part.offset >= child->size) { 389 + child->size = parent->size - child->part.offset - 390 + child->size; 163 391 } else { 164 392 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", 165 - part->name, parent->size - slave->offset, 166 - slave->mtd.size); 393 + part->name, parent->size - child->part.offset, 394 + child->size); 167 395 /* register to preserve ordering */ 168 396 goto out_register; 169 397 } 170 398 } 171 - if (slave->mtd.size == MTDPART_SIZ_FULL) 172 - slave->mtd.size = parent->size - slave->offset; 399 + if (child->size == MTDPART_SIZ_FULL) 400 + child->size = parent->size - child->part.offset; 173 401 174 - printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 175 - (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); 402 + printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", 403 + child->part.offset, child->part.offset + child->size, 404 + child->name); 176 405 177 406 /* let's do some sanity checks */ 178 - if (slave->offset >= parent->size) { 407 + if (child->part.offset >= parent->size) { 179 408 /* let's register it anyway to preserve ordering */ 180 - slave->offset = 0; 181 - slave->mtd.size = 0; 409 + child->part.offset = 0; 410 + child->size = 0; 182 411 183 412 /* Initialize ->erasesize to make add_mtd_device() happy. */ 184 - slave->mtd.erasesize = parent->erasesize; 185 - 413 + child->erasesize = parent->erasesize; 186 414 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 187 415 part->name); 188 416 goto out_register; 189 417 } 190 - if (slave->offset + slave->mtd.size > parent->size) { 191 - slave->mtd.size = parent->size - slave->offset; 418 + if (child->part.offset + child->size > parent->size) { 419 + child->size = parent->size - child->part.offset; 192 420 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 193 - part->name, parent->name, (unsigned long long)slave->mtd.size); 421 + part->name, parent->name, child->size); 194 422 } 195 423 if (parent->numeraseregions > 1) { 196 424 /* Deal with variable erase size stuff */ 197 425 int i, max = parent->numeraseregions; 198 - u64 end = slave->offset + slave->mtd.size; 426 + u64 end = child->part.offset + child->size; 199 427 struct mtd_erase_region_info *regions = parent->eraseregions; 200 428 201 429 /* Find the first erase regions which is part of this 202 430 * partition. */ 203 - for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 431 + for (i = 0; i < max && regions[i].offset <= child->part.offset; 432 + i++) 204 433 ; 205 434 /* The loop searched for the region _behind_ the first one */ 206 435 if (i > 0) ··· 150 495 151 496 /* Pick biggest erasesize */ 152 497 for (; i < max && regions[i].offset < end; i++) { 153 - if (slave->mtd.erasesize < regions[i].erasesize) { 154 - slave->mtd.erasesize = regions[i].erasesize; 155 - } 498 + if (child->erasesize < regions[i].erasesize) 499 + child->erasesize = regions[i].erasesize; 156 500 } 157 - BUG_ON(slave->mtd.erasesize == 0); 501 + BUG_ON(child->erasesize == 0); 158 502 } else { 159 503 /* Single erase size */ 160 - slave->mtd.erasesize = parent->erasesize; 504 + child->erasesize = parent->erasesize; 161 505 } 162 506 163 507 /* 164 - * Slave erasesize might differ from the master one if the master 508 + * Child erasesize might differ from the parent one if the parent 165 509 * exposes several regions with different erasesize. Adjust 166 510 * wr_alignment accordingly. 167 511 */ 168 - if (!(slave->mtd.flags & MTD_NO_ERASE)) 169 - wr_alignment = slave->mtd.erasesize; 512 + if (!(child->flags & MTD_NO_ERASE)) 513 + wr_alignment = child->erasesize; 170 514 171 - tmp = part_absolute_offset(parent) + slave->offset; 515 + tmp = mtd_get_master_ofs(child, 0); 172 516 remainder = do_div(tmp, wr_alignment); 173 - if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { 517 + if ((child->flags & MTD_WRITEABLE) && remainder) { 174 518 /* Doesn't start on a boundary of major erase size */ 175 519 /* FIXME: Let it be writable if it is on a boundary of 176 520 * _minor_ erase size though */ 177 - slave->mtd.flags &= ~MTD_WRITEABLE; 521 + child->flags &= ~MTD_WRITEABLE; 178 522 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", 179 523 part->name); 180 524 } 181 525 182 - tmp = part_absolute_offset(parent) + slave->mtd.size; 526 + tmp = mtd_get_master_ofs(child, 0) + child->size; 183 527 remainder = do_div(tmp, wr_alignment); 184 - if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { 185 - slave->mtd.flags &= ~MTD_WRITEABLE; 528 + if ((child->flags & MTD_WRITEABLE) && remainder) { 529 + child->flags &= ~MTD_WRITEABLE; 186 530 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", 187 531 part->name); 188 532 } 189 533 190 - mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops); 191 - slave->mtd.ecc_step_size = parent->ecc_step_size; 192 - slave->mtd.ecc_strength = parent->ecc_strength; 193 - slave->mtd.bitflip_threshold = parent->bitflip_threshold; 534 + child->ecc_step_size = parent->ecc_step_size; 535 + child->ecc_strength = parent->ecc_strength; 536 + child->bitflip_threshold = parent->bitflip_threshold; 194 537 195 - if (parent->_block_isbad) { 538 + if (master->_block_isbad) { 196 539 uint64_t offs = 0; 197 540 198 - while (offs < slave->mtd.size) { 199 - if (mtd_block_isreserved(parent, offs + slave->offset)) 200 - slave->mtd.ecc_stats.bbtblocks++; 201 - else if (mtd_block_isbad(parent, offs + slave->offset)) 202 - slave->mtd.ecc_stats.badblocks++; 203 - offs += slave->mtd.erasesize; 541 + while (offs < child->size) { 542 + if (mtd_block_isreserved(child, offs)) 543 + child->ecc_stats.bbtblocks++; 544 + else if (mtd_block_isbad(child, offs)) 545 + child->ecc_stats.badblocks++; 546 + offs += child->erasesize; 204 547 } 205 548 } 206 549 207 550 out_register: 208 - return slave; 551 + return child; 209 552 } 210 553 211 554 static ssize_t mtd_partition_offset_show(struct device *dev, 212 555 struct device_attribute *attr, char *buf) 213 556 { 214 557 struct mtd_info *mtd = dev_get_drvdata(dev); 215 - struct mtd_part *part = mtd_to_part(mtd); 216 - return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset); 558 + 559 + return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset); 217 560 } 218 561 219 562 static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL); ··· 221 568 NULL 222 569 }; 223 570 224 - static int mtd_add_partition_attrs(struct mtd_part *new) 571 + static int mtd_add_partition_attrs(struct mtd_info *new) 225 572 { 226 - int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs); 573 + int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs); 227 574 if (ret) 228 575 printk(KERN_WARNING 229 576 "mtd: failed to create partition attrs, err=%d\n", ret); ··· 233 580 int mtd_add_partition(struct mtd_info *parent, const char *name, 234 581 long long offset, long long length) 235 582 { 583 + struct mtd_info *master = mtd_get_master(parent); 236 584 struct mtd_partition part; 237 - struct mtd_part *new; 585 + struct mtd_info *child; 238 586 int ret = 0; 239 587 240 588 /* the direct offset is expected */ ··· 254 600 part.size = length; 255 601 part.offset = offset; 256 602 257 - new = allocate_partition(parent, &part, -1, offset); 258 - if (IS_ERR(new)) 259 - return PTR_ERR(new); 603 + child = allocate_partition(parent, &part, -1, offset); 604 + if (IS_ERR(child)) 605 + return PTR_ERR(child); 260 606 261 - mutex_lock(&mtd_partitions_mutex); 262 - list_add(&new->list, &mtd_partitions); 263 - mutex_unlock(&mtd_partitions_mutex); 607 + mutex_lock(&master->master.partitions_lock); 608 + list_add_tail(&child->part.node, &parent->partitions); 609 + mutex_unlock(&master->master.partitions_lock); 264 610 265 - ret = add_mtd_device(&new->mtd); 611 + ret = add_mtd_device(child); 266 612 if (ret) 267 613 goto err_remove_part; 268 614 269 - mtd_add_partition_attrs(new); 615 + mtd_add_partition_attrs(child); 270 616 271 617 return 0; 272 618 273 619 err_remove_part: 274 - mutex_lock(&mtd_partitions_mutex); 275 - list_del(&new->list); 276 - mutex_unlock(&mtd_partitions_mutex); 620 + mutex_lock(&master->master.partitions_lock); 621 + list_del(&child->part.node); 622 + mutex_unlock(&master->master.partitions_lock); 277 623 278 - free_partition(new); 624 + free_partition(child); 279 625 280 626 return ret; 281 627 } ··· 284 630 /** 285 631 * __mtd_del_partition - delete MTD partition 286 632 * 287 - * @priv: internal MTD struct for partition to be deleted 633 + * @priv: MTD structure to be deleted 288 634 * 289 635 * This function must be called with the partitions mutex locked. 290 636 */ 291 - static int __mtd_del_partition(struct mtd_part *priv) 637 + static int __mtd_del_partition(struct mtd_info *mtd) 292 638 { 293 - struct mtd_part *child, *next; 639 + struct mtd_info *child, *next; 294 640 int err; 295 641 296 - list_for_each_entry_safe(child, next, &mtd_partitions, list) { 297 - if (child->parent == &priv->mtd) { 298 - err = __mtd_del_partition(child); 299 - if (err) 300 - return err; 301 - } 642 + list_for_each_entry_safe(child, next, &mtd->partitions, part.node) { 643 + err = __mtd_del_partition(child); 644 + if (err) 645 + return err; 302 646 } 303 647 304 - sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs); 648 + sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs); 305 649 306 - err = del_mtd_device(&priv->mtd); 650 + err = del_mtd_device(mtd); 307 651 if (err) 308 652 return err; 309 653 310 - list_del(&priv->list); 311 - free_partition(priv); 654 + list_del(&child->part.node); 655 + free_partition(mtd); 312 656 313 657 return 0; 314 658 } 315 659 316 660 /* 317 661 * This function unregisters and destroy all slave MTD objects which are 318 - * attached to the given MTD object. 662 + * attached to the given MTD object, recursively. 319 663 */ 320 - int del_mtd_partitions(struct mtd_info *mtd) 664 + static int __del_mtd_partitions(struct mtd_info *mtd) 321 665 { 322 - struct mtd_part *slave, *next; 666 + struct mtd_info *child, *next; 667 + LIST_HEAD(tmp_list); 323 668 int ret, err = 0; 324 669 325 - mutex_lock(&mtd_partitions_mutex); 326 - list_for_each_entry_safe(slave, next, &mtd_partitions, list) 327 - if (slave->parent == mtd) { 328 - ret = __mtd_del_partition(slave); 329 - if (ret < 0) 330 - err = ret; 670 + list_for_each_entry_safe(child, next, &mtd->partitions, part.node) { 671 + if (mtd_has_partitions(child)) 672 + del_mtd_partitions(child); 673 + 674 + pr_info("Deleting %s MTD partition\n", child->name); 675 + ret = del_mtd_device(child); 676 + if (ret < 0) { 677 + pr_err("Error when deleting partition \"%s\" (%d)\n", 678 + child->name, ret); 679 + err = ret; 680 + continue; 331 681 } 332 - mutex_unlock(&mtd_partitions_mutex); 682 + 683 + list_del(&child->part.node); 684 + free_partition(child); 685 + } 333 686 334 687 return err; 335 688 } 336 689 690 + int del_mtd_partitions(struct mtd_info *mtd) 691 + { 692 + struct mtd_info *master = mtd_get_master(mtd); 693 + int ret; 694 + 695 + pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name); 696 + 697 + mutex_lock(&master->master.partitions_lock); 698 + ret = __del_mtd_partitions(mtd); 699 + mutex_unlock(&master->master.partitions_lock); 700 + 701 + return ret; 702 + } 703 + 337 704 int mtd_del_partition(struct mtd_info *mtd, int partno) 338 705 { 339 - struct mtd_part *slave, *next; 706 + struct mtd_info *child, *master = mtd_get_master(mtd); 340 707 int ret = -EINVAL; 341 708 342 - mutex_lock(&mtd_partitions_mutex); 343 - list_for_each_entry_safe(slave, next, &mtd_partitions, list) 344 - if ((slave->parent == mtd) && 345 - (slave->mtd.index == partno)) { 346 - ret = __mtd_del_partition(slave); 709 + mutex_lock(&master->master.partitions_lock); 710 + list_for_each_entry(child, &mtd->partitions, part.node) { 711 + if (child->index == partno) { 712 + ret = __mtd_del_partition(child); 347 713 break; 348 714 } 349 - mutex_unlock(&mtd_partitions_mutex); 715 + } 716 + mutex_unlock(&master->master.partitions_lock); 350 717 351 718 return ret; 352 719 } 353 720 EXPORT_SYMBOL_GPL(mtd_del_partition); 354 721 355 722 /* 356 - * This function, given a master MTD object and a partition table, creates 357 - * and registers slave MTD objects which are bound to the master according to 358 - * the partition definitions. 723 + * This function, given a parent MTD object and a partition table, creates 724 + * and registers the child MTD objects which are bound to the parent according 725 + * to the partition definitions. 359 726 * 360 - * For historical reasons, this function's caller only registers the master 727 + * For historical reasons, this function's caller only registers the parent 361 728 * if the MTD_PARTITIONED_MASTER config option is set. 362 729 */ 363 730 364 - int add_mtd_partitions(struct mtd_info *master, 731 + int add_mtd_partitions(struct mtd_info *parent, 365 732 const struct mtd_partition *parts, 366 733 int nbparts) 367 734 { 368 - struct mtd_part *slave; 735 + struct mtd_info *child, *master = mtd_get_master(parent); 369 736 uint64_t cur_offset = 0; 370 737 int i, ret; 371 738 372 - printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 739 + printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", 740 + nbparts, parent->name); 373 741 374 742 for (i = 0; i < nbparts; i++) { 375 - slave = allocate_partition(master, parts + i, i, cur_offset); 376 - if (IS_ERR(slave)) { 377 - ret = PTR_ERR(slave); 743 + child = allocate_partition(parent, parts + i, i, cur_offset); 744 + if (IS_ERR(child)) { 745 + ret = PTR_ERR(child); 378 746 goto err_del_partitions; 379 747 } 380 748 381 - mutex_lock(&mtd_partitions_mutex); 382 - list_add(&slave->list, &mtd_partitions); 383 - mutex_unlock(&mtd_partitions_mutex); 749 + mutex_lock(&master->master.partitions_lock); 750 + list_add_tail(&child->part.node, &parent->partitions); 751 + mutex_unlock(&master->master.partitions_lock); 384 752 385 - ret = add_mtd_device(&slave->mtd); 753 + ret = add_mtd_device(child); 386 754 if (ret) { 387 - mutex_lock(&mtd_partitions_mutex); 388 - list_del(&slave->list); 389 - mutex_unlock(&mtd_partitions_mutex); 755 + mutex_lock(&master->master.partitions_lock); 756 + list_del(&child->part.node); 757 + mutex_unlock(&master->master.partitions_lock); 390 758 391 - free_partition(slave); 759 + free_partition(child); 392 760 goto err_del_partitions; 393 761 } 394 762 395 - mtd_add_partition_attrs(slave); 396 - /* Look for subpartitions */ 397 - parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); 763 + mtd_add_partition_attrs(child); 398 764 399 - cur_offset = slave->offset + slave->mtd.size; 765 + /* Look for subpartitions */ 766 + parse_mtd_partitions(child, parts[i].types, NULL); 767 + 768 + cur_offset = child->part.offset + child->size; 400 769 } 401 770 402 771 return 0; ··· 700 1023 } 701 1024 } 702 1025 703 - int mtd_is_partition(const struct mtd_info *mtd) 704 - { 705 - struct mtd_part *part; 706 - int ispart = 0; 707 - 708 - mutex_lock(&mtd_partitions_mutex); 709 - list_for_each_entry(part, &mtd_partitions, list) 710 - if (&part->mtd == mtd) { 711 - ispart = 1; 712 - break; 713 - } 714 - mutex_unlock(&mtd_partitions_mutex); 715 - 716 - return ispart; 717 - } 718 - EXPORT_SYMBOL_GPL(mtd_is_partition); 719 - 720 1026 /* Returns the size of the entire flash chip */ 721 1027 uint64_t mtd_get_device_size(const struct mtd_info *mtd) 722 1028 { 723 - if (!mtd_is_partition(mtd)) 724 - return mtd->size; 1029 + struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); 725 1030 726 - return mtd_get_device_size(mtd_to_part(mtd)->parent); 1031 + return master->size; 727 1032 } 728 1033 EXPORT_SYMBOL_GPL(mtd_get_device_size);
+115 -10
include/linux/mtd/mtd.h
··· 8 8 9 9 #include <linux/types.h> 10 10 #include <linux/uio.h> 11 + #include <linux/list.h> 11 12 #include <linux/notifier.h> 12 13 #include <linux/device.h> 13 14 #include <linux/of.h> ··· 195 194 const char *partid; 196 195 }; 197 196 197 + /** 198 + * struct mtd_part - MTD partition specific fields 199 + * 200 + * @node: list node used to add an MTD partition to the parent partition list 201 + * @offset: offset of the partition relatively to the parent offset 202 + * @flags: original flags (before the mtdpart logic decided to tweak them based 203 + * on flash constraints, like eraseblock/pagesize alignment) 204 + * 205 + * This struct is embedded in mtd_info and contains partition-specific 206 + * properties/fields. 207 + */ 208 + struct mtd_part { 209 + struct list_head node; 210 + u64 offset; 211 + u32 flags; 212 + }; 213 + 214 + /** 215 + * struct mtd_master - MTD master specific fields 216 + * 217 + * @partitions_lock: lock protecting accesses to the partition list. Protects 218 + * not only the master partition list, but also all 219 + * sub-partitions. 220 + * @suspended: et to 1 when the device is suspended, 0 otherwise 221 + * 222 + * This struct is embedded in mtd_info and contains master-specific 223 + * properties/fields. The master is the root MTD device from the MTD partition 224 + * point of view. 225 + */ 226 + struct mtd_master { 227 + struct mutex partitions_lock; 228 + unsigned int suspended : 1; 229 + }; 230 + 198 231 struct mtd_info { 199 232 u_char type; 200 233 uint32_t flags; 201 - uint32_t orig_flags; /* Flags as before running mtd checks */ 202 234 uint64_t size; // Total size of the MTD 203 235 204 236 /* "Major" erase size for the device. Naïve users may take this ··· 373 339 int usecount; 374 340 struct mtd_debug_info dbg; 375 341 struct nvmem_device *nvmem; 342 + 343 + /* 344 + * Parent device from the MTD partition point of view. 345 + * 346 + * MTD masters do not have any parent, MTD partitions do. The parent 347 + * MTD device can itself be a partition. 348 + */ 349 + struct mtd_info *parent; 350 + 351 + /* List of partitions attached to this MTD device */ 352 + struct list_head partitions; 353 + 354 + union { 355 + struct mtd_part part; 356 + struct mtd_master master; 357 + }; 376 358 }; 359 + 360 + static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd) 361 + { 362 + while (mtd->parent) 363 + mtd = mtd->parent; 364 + 365 + return mtd; 366 + } 367 + 368 + static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs) 369 + { 370 + while (mtd->parent) { 371 + ofs += mtd->part.offset; 372 + mtd = mtd->parent; 373 + } 374 + 375 + return ofs; 376 + } 377 + 378 + static inline bool mtd_is_partition(const struct mtd_info *mtd) 379 + { 380 + return mtd->parent; 381 + } 382 + 383 + static inline bool mtd_has_partitions(const struct mtd_info *mtd) 384 + { 385 + return !list_empty(&mtd->partitions); 386 + } 377 387 378 388 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 379 389 struct mtd_oob_region *oobecc); ··· 470 392 static inline int mtd_max_bad_blocks(struct mtd_info *mtd, 471 393 loff_t ofs, size_t len) 472 394 { 473 - if (!mtd->_max_bad_blocks) 395 + struct mtd_info *master = mtd_get_master(mtd); 396 + 397 + if (!master->_max_bad_blocks) 474 398 return -ENOTSUPP; 475 399 476 400 if (mtd->size < (len + ofs) || ofs < 0) 477 401 return -EINVAL; 478 402 479 - return mtd->_max_bad_blocks(mtd, ofs, len); 403 + return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs), 404 + len); 480 405 } 481 406 482 407 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, ··· 520 439 521 440 static inline void mtd_sync(struct mtd_info *mtd) 522 441 { 523 - if (mtd->_sync) 524 - mtd->_sync(mtd); 442 + struct mtd_info *master = mtd_get_master(mtd); 443 + 444 + if (master->_sync) 445 + master->_sync(master); 525 446 } 526 447 527 448 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); ··· 535 452 536 453 static inline int mtd_suspend(struct mtd_info *mtd) 537 454 { 538 - return mtd->_suspend ? mtd->_suspend(mtd) : 0; 455 + struct mtd_info *master = mtd_get_master(mtd); 456 + int ret; 457 + 458 + if (master->master.suspended) 459 + return 0; 460 + 461 + ret = master->_suspend ? master->_suspend(master) : 0; 462 + if (ret) 463 + return ret; 464 + 465 + master->master.suspended = 1; 466 + return 0; 539 467 } 540 468 541 469 static inline void mtd_resume(struct mtd_info *mtd) 542 470 { 543 - if (mtd->_resume) 544 - mtd->_resume(mtd); 471 + struct mtd_info *master = mtd_get_master(mtd); 472 + 473 + if (!master->master.suspended) 474 + return; 475 + 476 + if (master->_resume) 477 + master->_resume(master); 478 + 479 + master->master.suspended = 0; 545 480 } 546 481 547 482 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) ··· 639 538 640 539 static inline int mtd_has_oob(const struct mtd_info *mtd) 641 540 { 642 - return mtd->_read_oob && mtd->_write_oob; 541 + struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); 542 + 543 + return master->_read_oob && master->_write_oob; 643 544 } 644 545 645 546 static inline int mtd_type_is_nand(const struct mtd_info *mtd) ··· 651 548 652 549 static inline int mtd_can_have_bb(const struct mtd_info *mtd) 653 550 { 654 - return !!mtd->_block_isbad; 551 + struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); 552 + 553 + return !!master->_block_isbad; 655 554 } 656 555 657 556 /* Kernel-side ioctl definitions */
-1
include/linux/mtd/partitions.h
··· 105 105 module_driver(__mtd_part_parser, register_mtd_parser, \ 106 106 deregister_mtd_parser) 107 107 108 - int mtd_is_partition(const struct mtd_info *mtd); 109 108 int mtd_add_partition(struct mtd_info *master, const char *name, 110 109 long long offset, long long length); 111 110 int mtd_del_partition(struct mtd_info *master, int partno);