Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nand/for-6.8' into mtd/next

* Raw NAND

The most meaningful change being the conversion of the brcmnand driver
to the ->exec_op() API, this series brought additional changes to the
core in order to help controller drivers to handle themselves the WP pin
during destructive operations when relevant.

As always, there is as well a whole bunch of miscellaneous W=1 fixes,
together with a few runtime fixes (double free, timeout value, OOB
layout, missing register initialization) and the usual load of remove
callbacks turned into void (which led to switch the txx9ndfmc driver to
use module_platform_driver()).

+225 -262
+1 -1
drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
··· 85 85 86 86 static struct platform_driver bcm63138_nand_driver = { 87 87 .probe = bcm63138_nand_probe, 88 - .remove = brcmnand_remove, 88 + .remove_new = brcmnand_remove, 89 89 .driver = { 90 90 .name = "bcm63138_nand", 91 91 .pm = &brcmnand_pm_ops,
+1 -1
drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
··· 117 117 118 118 static struct platform_driver bcm6368_nand_driver = { 119 119 .probe = bcm6368_nand_probe, 120 - .remove = brcmnand_remove, 120 + .remove_new = brcmnand_remove, 121 121 .driver = { 122 122 .name = "bcm6368_nand", 123 123 .pm = &brcmnand_pm_ops,
+1 -1
drivers/mtd/nand/raw/brcmnand/bcma_nand.c
··· 119 119 120 120 static struct platform_driver brcmnand_bcma_nand_driver = { 121 121 .probe = brcmnand_bcma_nand_probe, 122 - .remove = brcmnand_remove, 122 + .remove_new = brcmnand_remove, 123 123 .driver = { 124 124 .name = "bcma_brcmnand", 125 125 .pm = &brcmnand_pm_ops,
+176 -232
drivers/mtd/nand/raw/brcmnand/brcmnand.c
··· 625 625 /* Only for v7.2 */ 626 626 #define ACC_CONTROL_ECC_EXT_SHIFT 13 627 627 628 + static u8 brcmnand_status(struct brcmnand_host *host); 629 + 628 630 static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl) 629 631 { 630 632 #if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA) ··· 1024 1022 return -1; 1025 1023 } 1026 1024 1027 - static int brcmnand_get_sector_size_1k(struct brcmnand_host *host) 1028 - { 1029 - struct brcmnand_controller *ctrl = host->ctrl; 1030 - int shift = brcmnand_sector_1k_shift(ctrl); 1031 - u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, 1032 - BRCMNAND_CS_ACC_CONTROL); 1033 - 1034 - if (shift < 0) 1035 - return 0; 1036 - 1037 - return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1; 1038 - } 1039 - 1040 1025 static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val) 1041 1026 { 1042 1027 struct brcmnand_controller *ctrl = host->ctrl; ··· 1050 1061 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), 1051 1062 }; 1052 1063 1053 - static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, 1064 + static int bcmnand_ctrl_poll_status(struct brcmnand_host *host, 1054 1065 u32 mask, u32 expected_val, 1055 1066 unsigned long timeout_ms) 1056 1067 { 1068 + struct brcmnand_controller *ctrl = host->ctrl; 1057 1069 unsigned long limit; 1058 1070 u32 val; 1059 1071 ··· 1063 1073 1064 1074 limit = jiffies + msecs_to_jiffies(timeout_ms); 1065 1075 do { 1076 + if (mask & INTFC_FLASH_STATUS) 1077 + brcmnand_status(host); 1078 + 1066 1079 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); 1067 1080 if ((val & mask) == expected_val) 1068 1081 return 0; ··· 1077 1084 * do a final check after time out in case the CPU was busy and the driver 1078 1085 * did not get enough time to perform the polling to avoid false alarms 1079 1086 */ 1087 + if (mask & INTFC_FLASH_STATUS) 1088 + brcmnand_status(host); 1089 + 1080 1090 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); 1081 1091 if ((val & mask) == expected_val) 1082 1092 return 0; ··· 1375 1379 * make sure ctrl/flash ready before and after 1376 1380 * changing state of #WP pin 1377 1381 */ 1378 - ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | 1382 + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY | 1379 1383 NAND_STATUS_READY, 1380 1384 NAND_CTRL_RDY | 1381 1385 NAND_STATUS_READY, 0); ··· 1383 1387 return; 1384 1388 1385 1389 brcmnand_set_wp(ctrl, wp); 1386 - nand_status_op(chip, NULL); 1390 + /* force controller operation to update internal copy of NAND chip status */ 1391 + brcmnand_status(host); 1387 1392 /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ 1388 - ret = bcmnand_ctrl_poll_status(ctrl, 1393 + ret = bcmnand_ctrl_poll_status(host, 1389 1394 NAND_CTRL_RDY | 1390 1395 NAND_STATUS_READY | 1391 1396 NAND_STATUS_WP, ··· 1626 1629 */ 1627 1630 if (oops_in_progress) { 1628 1631 if (ctrl->cmd_pending && 1629 - bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0)) 1632 + bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0)) 1630 1633 return; 1631 1634 } else 1632 1635 BUG_ON(ctrl->cmd_pending != 0); 1633 1636 ctrl->cmd_pending = cmd; 1634 1637 1635 - ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); 1638 + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); 1636 1639 WARN_ON(ret); 1637 1640 1638 1641 mb(); /* flush previous writes */ 1639 1642 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, 1640 1643 cmd << brcmnand_cmd_shift(ctrl)); 1641 - } 1642 - 1643 - /*********************************************************************** 1644 - * NAND MTD API: read/program/erase 1645 - ***********************************************************************/ 1646 - 1647 - static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat, 1648 - unsigned int ctrl) 1649 - { 1650 - /* intentionally left blank */ 1651 1644 } 1652 1645 1653 1646 static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip) ··· 1651 1664 if (mtd->oops_panic_write || ctrl->irq < 0) { 1652 1665 /* switch to interrupt polling and PIO mode */ 1653 1666 disable_ctrl_irqs(ctrl); 1654 - sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, 1667 + sts = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, 1655 1668 NAND_CTRL_RDY, 0); 1656 1669 err = sts < 0; 1657 1670 } else { ··· 1688 1701 } 1689 1702 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & 1690 1703 INTFC_FLASH_STATUS; 1704 + } 1705 + 1706 + static u8 brcmnand_status(struct brcmnand_host *host) 1707 + { 1708 + struct nand_chip *chip = &host->chip; 1709 + struct mtd_info *mtd = nand_to_mtd(chip); 1710 + 1711 + brcmnand_set_cmd_addr(mtd, 0); 1712 + brcmnand_send_cmd(host, CMD_STATUS_READ); 1713 + 1714 + return brcmnand_waitfunc(chip); 1715 + } 1716 + 1717 + static u8 brcmnand_reset(struct brcmnand_host *host) 1718 + { 1719 + struct nand_chip *chip = &host->chip; 1720 + 1721 + brcmnand_send_cmd(host, CMD_FLASH_RESET); 1722 + 1723 + return brcmnand_waitfunc(chip); 1691 1724 } 1692 1725 1693 1726 enum { ··· 1757 1750 1758 1751 brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP); 1759 1752 return brcmnand_waitfunc(chip); 1760 - } 1761 - 1762 - static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command, 1763 - int column, int page_addr) 1764 - { 1765 - struct mtd_info *mtd = nand_to_mtd(chip); 1766 - struct brcmnand_host *host = nand_get_controller_data(chip); 1767 - struct brcmnand_controller *ctrl = host->ctrl; 1768 - u64 addr = (u64)page_addr << chip->page_shift; 1769 - int native_cmd = 0; 1770 - 1771 - if (command == NAND_CMD_READID || command == NAND_CMD_PARAM || 1772 - command == NAND_CMD_RNDOUT) 1773 - addr = (u64)column; 1774 - /* Avoid propagating a negative, don't-care address */ 1775 - else if (page_addr < 0) 1776 - addr = 0; 1777 - 1778 - dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command, 1779 - (unsigned long long)addr); 1780 - 1781 - host->last_cmd = command; 1782 - host->last_byte = 0; 1783 - host->last_addr = addr; 1784 - 1785 - switch (command) { 1786 - case NAND_CMD_RESET: 1787 - native_cmd = CMD_FLASH_RESET; 1788 - break; 1789 - case NAND_CMD_STATUS: 1790 - native_cmd = CMD_STATUS_READ; 1791 - break; 1792 - case NAND_CMD_READID: 1793 - native_cmd = CMD_DEVICE_ID_READ; 1794 - break; 1795 - case NAND_CMD_READOOB: 1796 - native_cmd = CMD_SPARE_AREA_READ; 1797 - break; 1798 - case NAND_CMD_ERASE1: 1799 - native_cmd = CMD_BLOCK_ERASE; 1800 - brcmnand_wp(mtd, 0); 1801 - break; 1802 - case NAND_CMD_PARAM: 1803 - native_cmd = CMD_PARAMETER_READ; 1804 - break; 1805 - case NAND_CMD_SET_FEATURES: 1806 - case NAND_CMD_GET_FEATURES: 1807 - brcmnand_low_level_op(host, LL_OP_CMD, command, false); 1808 - brcmnand_low_level_op(host, LL_OP_ADDR, column, false); 1809 - break; 1810 - case NAND_CMD_RNDOUT: 1811 - native_cmd = CMD_PARAMETER_CHANGE_COL; 1812 - addr &= ~((u64)(FC_BYTES - 1)); 1813 - /* 1814 - * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0 1815 - * NB: hwcfg.sector_size_1k may not be initialized yet 1816 - */ 1817 - if (brcmnand_get_sector_size_1k(host)) { 1818 - host->hwcfg.sector_size_1k = 1819 - brcmnand_get_sector_size_1k(host); 1820 - brcmnand_set_sector_size_1k(host, 0); 1821 - } 1822 - break; 1823 - } 1824 - 1825 - if (!native_cmd) 1826 - return; 1827 - 1828 - brcmnand_set_cmd_addr(mtd, addr); 1829 - brcmnand_send_cmd(host, native_cmd); 1830 - brcmnand_waitfunc(chip); 1831 - 1832 - if (native_cmd == CMD_PARAMETER_READ || 1833 - native_cmd == CMD_PARAMETER_CHANGE_COL) { 1834 - /* Copy flash cache word-wise */ 1835 - u32 *flash_cache = (u32 *)ctrl->flash_cache; 1836 - int i; 1837 - 1838 - brcmnand_soc_data_bus_prepare(ctrl->soc, true); 1839 - 1840 - /* 1841 - * Must cache the FLASH_CACHE now, since changes in 1842 - * SECTOR_SIZE_1K may invalidate it 1843 - */ 1844 - for (i = 0; i < FC_WORDS; i++) 1845 - /* 1846 - * Flash cache is big endian for parameter pages, at 1847 - * least on STB SoCs 1848 - */ 1849 - flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i)); 1850 - 1851 - brcmnand_soc_data_bus_unprepare(ctrl->soc, true); 1852 - 1853 - /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */ 1854 - if (host->hwcfg.sector_size_1k) 1855 - brcmnand_set_sector_size_1k(host, 1856 - host->hwcfg.sector_size_1k); 1857 - } 1858 - 1859 - /* Re-enable protection is necessary only after erase */ 1860 - if (command == NAND_CMD_ERASE1) 1861 - brcmnand_wp(mtd, 1); 1862 - } 1863 - 1864 - static uint8_t brcmnand_read_byte(struct nand_chip *chip) 1865 - { 1866 - struct brcmnand_host *host = nand_get_controller_data(chip); 1867 - struct brcmnand_controller *ctrl = host->ctrl; 1868 - uint8_t ret = 0; 1869 - int addr, offs; 1870 - 1871 - switch (host->last_cmd) { 1872 - case NAND_CMD_READID: 1873 - if (host->last_byte < 4) 1874 - ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >> 1875 - (24 - (host->last_byte << 3)); 1876 - else if (host->last_byte < 8) 1877 - ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >> 1878 - (56 - (host->last_byte << 3)); 1879 - break; 1880 - 1881 - case NAND_CMD_READOOB: 1882 - ret = oob_reg_read(ctrl, host->last_byte); 1883 - break; 1884 - 1885 - case NAND_CMD_STATUS: 1886 - ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & 1887 - INTFC_FLASH_STATUS; 1888 - if (wp_on) /* hide WP status */ 1889 - ret |= NAND_STATUS_WP; 1890 - break; 1891 - 1892 - case NAND_CMD_PARAM: 1893 - case NAND_CMD_RNDOUT: 1894 - addr = host->last_addr + host->last_byte; 1895 - offs = addr & (FC_BYTES - 1); 1896 - 1897 - /* At FC_BYTES boundary, switch to next column */ 1898 - if (host->last_byte > 0 && offs == 0) 1899 - nand_change_read_column_op(chip, addr, NULL, 0, false); 1900 - 1901 - ret = ctrl->flash_cache[offs]; 1902 - break; 1903 - case NAND_CMD_GET_FEATURES: 1904 - if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) { 1905 - ret = 0; 1906 - } else { 1907 - bool last = host->last_byte == 1908 - ONFI_SUBFEATURE_PARAM_LEN - 1; 1909 - brcmnand_low_level_op(host, LL_OP_RD, 0, last); 1910 - ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff; 1911 - } 1912 - } 1913 - 1914 - dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret); 1915 - host->last_byte++; 1916 - 1917 - return ret; 1918 - } 1919 - 1920 - static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len) 1921 - { 1922 - int i; 1923 - 1924 - for (i = 0; i < len; i++, buf++) 1925 - *buf = brcmnand_read_byte(chip); 1926 - } 1927 - 1928 - static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf, 1929 - int len) 1930 - { 1931 - int i; 1932 - struct brcmnand_host *host = nand_get_controller_data(chip); 1933 - 1934 - switch (host->last_cmd) { 1935 - case NAND_CMD_SET_FEATURES: 1936 - for (i = 0; i < len; i++) 1937 - brcmnand_low_level_op(host, LL_OP_WR, buf[i], 1938 - (i + 1) == len); 1939 - break; 1940 - default: 1941 - BUG(); 1942 - break; 1943 - } 1944 1753 } 1945 1754 1946 1755 /* ··· 2168 2345 struct mtd_info *mtd = nand_to_mtd(chip); 2169 2346 struct brcmnand_host *host = nand_get_controller_data(chip); 2170 2347 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 2348 + u64 addr = (u64)page << chip->page_shift; 2171 2349 2172 - nand_read_page_op(chip, page, 0, NULL, 0); 2350 + host->last_addr = addr; 2173 2351 2174 2352 return brcmnand_read(mtd, chip, host->last_addr, 2175 2353 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); ··· 2183 2359 struct mtd_info *mtd = nand_to_mtd(chip); 2184 2360 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; 2185 2361 int ret; 2362 + u64 addr = (u64)page << chip->page_shift; 2186 2363 2187 - nand_read_page_op(chip, page, 0, NULL, 0); 2364 + host->last_addr = addr; 2188 2365 2189 2366 brcmnand_set_ecc_enabled(host, 0); 2190 2367 ret = brcmnand_read(mtd, chip, host->last_addr, ··· 2293 2468 struct mtd_info *mtd = nand_to_mtd(chip); 2294 2469 struct brcmnand_host *host = nand_get_controller_data(chip); 2295 2470 void *oob = oob_required ? chip->oob_poi : NULL; 2471 + u64 addr = (u64)page << chip->page_shift; 2296 2472 2297 - nand_prog_page_begin_op(chip, page, 0, NULL, 0); 2298 - brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 2473 + host->last_addr = addr; 2299 2474 2300 - return nand_prog_page_end_op(chip); 2475 + return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 2301 2476 } 2302 2477 2303 2478 static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, ··· 2306 2481 struct mtd_info *mtd = nand_to_mtd(chip); 2307 2482 struct brcmnand_host *host = nand_get_controller_data(chip); 2308 2483 void *oob = oob_required ? chip->oob_poi : NULL; 2484 + u64 addr = (u64)page << chip->page_shift; 2485 + int ret = 0; 2309 2486 2310 - nand_prog_page_begin_op(chip, page, 0, NULL, 0); 2487 + host->last_addr = addr; 2311 2488 brcmnand_set_ecc_enabled(host, 0); 2312 - brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 2489 + ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); 2313 2490 brcmnand_set_ecc_enabled(host, 1); 2314 2491 2315 - return nand_prog_page_end_op(chip); 2492 + return ret; 2316 2493 } 2317 2494 2318 2495 static int brcmnand_write_oob(struct nand_chip *chip, int page) ··· 2334 2507 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL, 2335 2508 (u8 *)chip->oob_poi); 2336 2509 brcmnand_set_ecc_enabled(host, 1); 2510 + 2511 + return ret; 2512 + } 2513 + 2514 + static int brcmnand_exec_instr(struct brcmnand_host *host, int i, 2515 + const struct nand_operation *op) 2516 + { 2517 + const struct nand_op_instr *instr = &op->instrs[i]; 2518 + struct brcmnand_controller *ctrl = host->ctrl; 2519 + const u8 *out; 2520 + bool last_op; 2521 + int ret = 0; 2522 + u8 *in; 2523 + 2524 + /* 2525 + * The controller needs to be aware of the last command in the operation 2526 + * (WAITRDY excepted). 2527 + */ 2528 + last_op = ((i == (op->ninstrs - 1)) && (instr->type != NAND_OP_WAITRDY_INSTR)) || 2529 + ((i == (op->ninstrs - 2)) && (op->instrs[i+1].type == NAND_OP_WAITRDY_INSTR)); 2530 + 2531 + switch (instr->type) { 2532 + case NAND_OP_CMD_INSTR: 2533 + brcmnand_low_level_op(host, LL_OP_CMD, instr->ctx.cmd.opcode, last_op); 2534 + break; 2535 + 2536 + case NAND_OP_ADDR_INSTR: 2537 + for (i = 0; i < instr->ctx.addr.naddrs; i++) 2538 + brcmnand_low_level_op(host, LL_OP_ADDR, instr->ctx.addr.addrs[i], 2539 + last_op && (i == (instr->ctx.addr.naddrs - 1))); 2540 + break; 2541 + 2542 + case NAND_OP_DATA_IN_INSTR: 2543 + in = instr->ctx.data.buf.in; 2544 + for (i = 0; i < instr->ctx.data.len; i++) { 2545 + brcmnand_low_level_op(host, LL_OP_RD, 0, 2546 + last_op && (i == (instr->ctx.data.len - 1))); 2547 + in[i] = brcmnand_read_reg(host->ctrl, BRCMNAND_LL_RDATA); 2548 + } 2549 + break; 2550 + 2551 + case NAND_OP_DATA_OUT_INSTR: 2552 + out = instr->ctx.data.buf.out; 2553 + for (i = 0; i < instr->ctx.data.len; i++) 2554 + brcmnand_low_level_op(host, LL_OP_WR, out[i], 2555 + last_op && (i == (instr->ctx.data.len - 1))); 2556 + break; 2557 + 2558 + case NAND_OP_WAITRDY_INSTR: 2559 + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); 2560 + break; 2561 + 2562 + default: 2563 + dev_err(ctrl->dev, "unsupported instruction type: %d\n", 2564 + instr->type); 2565 + ret = -EINVAL; 2566 + break; 2567 + } 2568 + 2569 + return ret; 2570 + } 2571 + 2572 + static int brcmnand_op_is_status(const struct nand_operation *op) 2573 + { 2574 + if ((op->ninstrs == 2) && 2575 + (op->instrs[0].type == NAND_OP_CMD_INSTR) && 2576 + (op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS) && 2577 + (op->instrs[1].type == NAND_OP_DATA_IN_INSTR)) 2578 + return 1; 2579 + 2580 + return 0; 2581 + } 2582 + 2583 + static int brcmnand_op_is_reset(const struct nand_operation *op) 2584 + { 2585 + if ((op->ninstrs == 2) && 2586 + (op->instrs[0].type == NAND_OP_CMD_INSTR) && 2587 + (op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET) && 2588 + (op->instrs[1].type == NAND_OP_WAITRDY_INSTR)) 2589 + return 1; 2590 + 2591 + return 0; 2592 + } 2593 + 2594 + static int brcmnand_exec_op(struct nand_chip *chip, 2595 + const struct nand_operation *op, 2596 + bool check_only) 2597 + { 2598 + struct brcmnand_host *host = nand_get_controller_data(chip); 2599 + struct mtd_info *mtd = nand_to_mtd(chip); 2600 + u8 *status; 2601 + unsigned int i; 2602 + int ret = 0; 2603 + 2604 + if (check_only) 2605 + return 0; 2606 + 2607 + if (brcmnand_op_is_status(op)) { 2608 + status = op->instrs[1].ctx.data.buf.in; 2609 + *status = brcmnand_status(host); 2610 + 2611 + return 0; 2612 + } 2613 + else if (brcmnand_op_is_reset(op)) { 2614 + ret = brcmnand_reset(host); 2615 + if (ret < 0) 2616 + return ret; 2617 + 2618 + brcmnand_wp(mtd, 1); 2619 + 2620 + return 0; 2621 + } 2622 + 2623 + if (op->deassert_wp) 2624 + brcmnand_wp(mtd, 0); 2625 + 2626 + for (i = 0; i < op->ninstrs; i++) { 2627 + ret = brcmnand_exec_instr(host, i, op); 2628 + if (ret) 2629 + break; 2630 + } 2631 + 2632 + if (op->deassert_wp) 2633 + brcmnand_wp(mtd, 1); 2337 2634 2338 2635 return ret; 2339 2636 } ··· 2772 2821 2773 2822 static const struct nand_controller_ops brcmnand_controller_ops = { 2774 2823 .attach_chip = brcmnand_attach_chip, 2824 + .exec_op = brcmnand_exec_op, 2775 2825 }; 2776 2826 2777 2827 static int brcmnand_init_cs(struct brcmnand_host *host, ··· 2797 2845 mtd->owner = THIS_MODULE; 2798 2846 mtd->dev.parent = dev; 2799 2847 2800 - chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl; 2801 - chip->legacy.cmdfunc = brcmnand_cmdfunc; 2802 - chip->legacy.waitfunc = brcmnand_waitfunc; 2803 - chip->legacy.read_byte = brcmnand_read_byte; 2804 - chip->legacy.read_buf = brcmnand_read_buf; 2805 - chip->legacy.write_buf = brcmnand_write_buf; 2806 - 2807 2848 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 2808 2849 chip->ecc.read_page = brcmnand_read_page; 2809 2850 chip->ecc.write_page = brcmnand_write_page; ··· 2808 2863 chip->ecc.write_oob = brcmnand_write_oob; 2809 2864 2810 2865 chip->controller = &ctrl->controller; 2866 + ctrl->controller.controller_wp = 1; 2811 2867 2812 2868 /* 2813 2869 * The bootloader might have configured 16bit mode but ··· 3245 3299 } 3246 3300 EXPORT_SYMBOL_GPL(brcmnand_probe); 3247 3301 3248 - int brcmnand_remove(struct platform_device *pdev) 3302 + void brcmnand_remove(struct platform_device *pdev) 3249 3303 { 3250 3304 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); 3251 3305 struct brcmnand_host *host; ··· 3262 3316 clk_disable_unprepare(ctrl->clk); 3263 3317 3264 3318 dev_set_drvdata(&pdev->dev, NULL); 3265 - 3266 - return 0; 3267 3319 } 3268 3320 EXPORT_SYMBOL_GPL(brcmnand_remove); 3269 3321
+1 -1
drivers/mtd/nand/raw/brcmnand/brcmnand.h
··· 88 88 } 89 89 90 90 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc); 91 - int brcmnand_remove(struct platform_device *pdev); 91 + void brcmnand_remove(struct platform_device *pdev); 92 92 93 93 extern const struct dev_pm_ops brcmnand_pm_ops; 94 94
+1 -1
drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
··· 23 23 24 24 static struct platform_driver brcmstb_nand_driver = { 25 25 .probe = brcmstb_nand_probe, 26 - .remove = brcmnand_remove, 26 + .remove_new = brcmnand_remove, 27 27 .driver = { 28 28 .name = "brcmstb_nand", 29 29 .pm = &brcmnand_pm_ops,
+1 -1
drivers/mtd/nand/raw/brcmnand/iproc_nand.c
··· 134 134 135 135 static struct platform_driver iproc_nand_driver = { 136 136 .probe = iproc_nand_probe, 137 - .remove = brcmnand_remove, 137 + .remove_new = brcmnand_remove, 138 138 .driver = { 139 139 .name = "iproc_nand", 140 140 .pm = &brcmnand_pm_ops,
+6 -4
drivers/mtd/nand/raw/diskonchip.c
··· 1491 1491 else 1492 1492 numchips = doc2001_init(mtd); 1493 1493 1494 - if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) { 1495 - /* DBB note: i believe nand_cleanup is necessary here, as 1496 - buffers may have been allocated in nand_base. Check with 1497 - Thomas. FIX ME! */ 1494 + ret = nand_scan(nand, numchips); 1495 + if (ret) 1496 + goto fail; 1497 + 1498 + ret = doc->late_init(mtd); 1499 + if (ret) { 1498 1500 nand_cleanup(nand); 1499 1501 goto fail; 1500 1502 }
+1 -1
drivers/mtd/nand/raw/fsl_ifc_nand.c
··· 21 21 22 22 #define ERR_BYTE 0xFF /* Value returned for read 23 23 bytes when read failed */ 24 - #define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait 24 + #define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait 25 25 for IFC NAND Machine */ 26 26 27 27 struct fsl_ifc_ctrl;
+5 -3
drivers/mtd/nand/raw/meson_nand.c
··· 90 90 91 91 /* eMMC clock register, misc control */ 92 92 #define CLK_SELECT_NAND BIT(31) 93 + #define CLK_ALWAYS_ON_NAND BIT(24) 94 + #define CLK_SELECT_FIX_PLL2 BIT(6) 93 95 94 96 #define NFC_CLK_CYCLE 6 95 97 ··· 511 509 __le64 *info; 512 510 int i, count; 513 511 514 - for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) { 512 + for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) { 515 513 info = &meson_chip->info_buf[i]; 516 514 *info |= oob_buf[count]; 517 515 *info |= oob_buf[count + 1] << 8; ··· 524 522 __le64 *info; 525 523 int i, count; 526 524 527 - for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) { 525 + for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) { 528 526 info = &meson_chip->info_buf[i]; 529 527 oob_buf[count] = *info; 530 528 oob_buf[count + 1] = *info >> 8; ··· 1156 1154 return PTR_ERR(nfc->nand_clk); 1157 1155 1158 1156 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 1159 - writel(CLK_SELECT_NAND | readl(nfc->reg_clk), 1157 + writel(CLK_ALWAYS_ON_NAND | CLK_SELECT_NAND | CLK_SELECT_FIX_PLL2, 1160 1158 nfc->reg_clk); 1161 1159 1162 1160 ret = clk_prepare_enable(nfc->core_clk);
+8 -2
drivers/mtd/nand/raw/nand_base.c
··· 366 366 if (chip->options & NAND_BROKEN_XD) 367 367 return 0; 368 368 369 + /* controller responsible for NAND write protect */ 370 + if (chip->controller->controller_wp) 371 + return 0; 372 + 369 373 /* Check the WP bit */ 370 374 ret = nand_status_op(chip, &status); 371 375 if (ret) ··· 1527 1523 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1528 1524 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), 1529 1525 }; 1530 - struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1526 + struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, 1527 + instrs); 1531 1528 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); 1532 1529 1533 1530 if (naddrs < 0) ··· 1951 1946 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 1952 1947 0), 1953 1948 }; 1954 - struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1949 + struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, 1950 + instrs); 1955 1951 1956 1952 if (chip->options & NAND_ROW_ADDR_3) 1957 1953 instrs[1].ctx.addr.naddrs++;
+1 -1
drivers/mtd/nand/raw/pl35x-nand-controller.c
··· 128 128 * @conf_regs: SMC configuration registers for command phase 129 129 * @io_regs: NAND data registers for data phase 130 130 * @controller: Core NAND controller structure 131 - * @chip: NAND chip information structure 131 + * @chips: List of connected NAND chips 132 132 * @selected_chip: NAND chip currently selected by the controller 133 133 * @assigned_cs: List of assigned CS 134 134 * @ecc_buf: Temporary buffer to extract ECC bytes
+4 -3
drivers/mtd/nand/raw/rockchip-nand-controller.c
··· 98 98 * @high: ECC count high bit index at register. 99 99 * @high_mask: mask bit 100 100 */ 101 - struct ecc_cnt_status { 101 + struct rk_ecc_cnt_status { 102 102 u8 err_flag_bit; 103 103 u8 low; 104 104 u8 low_mask; ··· 108 108 }; 109 109 110 110 /** 111 + * struct nfc_cfg: Rockchip NAND controller configuration 111 112 * @type: NFC version 112 113 * @ecc_strengths: ECC strengths 113 114 * @ecc_cfgs: ECC config values ··· 145 144 u32 int_st_off; 146 145 u32 oob0_off; 147 146 u32 oob1_off; 148 - struct ecc_cnt_status ecc0; 149 - struct ecc_cnt_status ecc1; 147 + struct rk_ecc_cnt_status ecc0; 148 + struct rk_ecc_cnt_status ecc1; 150 149 }; 151 150 152 151 struct rk_nfc_nand_chip {
-2
drivers/mtd/nand/raw/s3c2410.c
··· 105 105 106 106 /** 107 107 * struct s3c2410_nand_mtd - driver MTD structure 108 - * @mtd: The MTD instance to pass to the MTD layer. 109 108 * @chip: The NAND chip information. 110 109 * @set: The platform information supplied for this set of NAND chips. 111 110 * @info: Link back to the hardware information. ··· 144 145 * @clk_rate: The clock rate from @clk. 145 146 * @clk_state: The current clock state. 146 147 * @cpu_type: The exact type of this controller. 147 - * @freq_transition: CPUFreq notifier block 148 148 */ 149 149 struct s3c2410_nand_info { 150 150 /* mtd info */
+5 -8
drivers/mtd/nand/raw/txx9ndfmc.c
··· 276 276 .attach_chip = txx9ndfmc_attach_chip, 277 277 }; 278 278 279 - static int __init txx9ndfmc_probe(struct platform_device *dev) 279 + static int txx9ndfmc_probe(struct platform_device *dev) 280 280 { 281 281 struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev); 282 282 int hold, spw; ··· 369 369 return 0; 370 370 } 371 371 372 - static int __exit txx9ndfmc_remove(struct platform_device *dev) 372 + static void txx9ndfmc_remove(struct platform_device *dev) 373 373 { 374 374 struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev); 375 375 int ret, i; 376 376 377 - if (!drvdata) 378 - return 0; 379 377 for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) { 380 378 struct mtd_info *mtd = drvdata->mtds[i]; 381 379 struct nand_chip *chip; ··· 390 392 kfree(txx9_priv->mtdname); 391 393 kfree(txx9_priv); 392 394 } 393 - return 0; 394 395 } 395 396 396 397 #ifdef CONFIG_PM ··· 404 407 #endif 405 408 406 409 static struct platform_driver txx9ndfmc_driver = { 407 - .remove = __exit_p(txx9ndfmc_remove), 410 + .probe = txx9ndfmc_probe, 411 + .remove_new = txx9ndfmc_remove, 408 412 .resume = txx9ndfmc_resume, 409 413 .driver = { 410 414 .name = "txx9ndfmc", 411 415 }, 412 416 }; 413 - 414 - module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe); 417 + module_platform_driver(txx9ndfmc_driver); 415 418 416 419 MODULE_LICENSE("GPL"); 417 420 MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
+13
include/linux/mtd/rawnand.h
··· 1003 1003 /** 1004 1004 * struct nand_operation - NAND operation descriptor 1005 1005 * @cs: the CS line to select for this NAND operation 1006 + * @deassert_wp: set to true when the operation requires the WP pin to be 1007 + * de-asserted (ERASE, PROG, ...) 1006 1008 * @instrs: array of instructions to execute 1007 1009 * @ninstrs: length of the @instrs array 1008 1010 * ··· 1012 1010 */ 1013 1011 struct nand_operation { 1014 1012 unsigned int cs; 1013 + bool deassert_wp; 1015 1014 const struct nand_op_instr *instrs; 1016 1015 unsigned int ninstrs; 1017 1016 }; ··· 1020 1017 #define NAND_OPERATION(_cs, _instrs) \ 1021 1018 { \ 1022 1019 .cs = _cs, \ 1020 + .instrs = _instrs, \ 1021 + .ninstrs = ARRAY_SIZE(_instrs), \ 1022 + } 1023 + 1024 + #define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \ 1025 + { \ 1026 + .cs = _cs, \ 1027 + .deassert_wp = true, \ 1023 1028 .instrs = _instrs, \ 1024 1029 .ninstrs = ARRAY_SIZE(_instrs), \ 1025 1030 } ··· 1115 1104 * the bus without restarting an entire read operation nor 1116 1105 * changing the column. 1117 1106 * @supported_op.cont_read: The controller supports sequential cache reads. 1107 + * @controller_wp: the controller is in charge of handling the WP pin. 1118 1108 */ 1119 1109 struct nand_controller { 1120 1110 struct mutex lock; ··· 1124 1112 unsigned int data_only_read: 1; 1125 1113 unsigned int cont_read: 1; 1126 1114 } supported_op; 1115 + bool controller_wp; 1127 1116 }; 1128 1117 1129 1118 static inline void nand_controller_init(struct nand_controller *nfc)