Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mtd/for-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux

Pull mtd updates from Miquel Raynal:
"MTD changes:

- The atmel,dataflash binding has been converted to yaml and the
physmap one constrained. Some logs are improved, error path are
getting reworked a bit, few patches target the use of
str_enabled_disabled().

Raw NAND changes:

- i.MX8 and i.MX31 now have their own compatible, the Qcom driver got
cleaned, the Broadcom driver got fixed.

SPI NAND changes:

- OTP support has been brought, and ESMT and Micron manufacturer
drivers implement it.

- Read retry, and Macronix manufacturer driver implement it.

SPI NOR changes:

- Adding support for few flashes. Few cleanup patches for the core
driver, where we touched the headers inclusion list and we start
using the scope based mutex cleanup helpers.

There is also a bunch of minor improvements and fixes in drivers
and bindings"

* tag 'mtd/for-6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (34 commits)
dt-bindings: mtd: atmel,dataflash: convert txt to yaml
mtd: mchp48l640: Use str_enable_disable() in mchp48l640_write_prepare()
mtd: rawnand: gpmi: Use str_enabled_disabled() in gpmi_nand_attach_chip()
mtd: mtdpart: Do not supply NULL to printf()
dt-bindings: mtd: gpmi-nand: Add compatible string for i.MX8 chips
mtd: nand: Fix a kdoc comment
mtd: spinand: Improve spinand_info macros style
mtd: spi-nor: drop unused <linux/of_platform.h>
mtd: spi-nor: explicitly include <linux/of.h>
mtd: spi-nor: explicitly include <linux/math64.h>
mtd: spi-nor: macronix: add support for mx66{l2, u1}g45g
mtd: spi-nor: macronix: Add post_sfdp fixups for Quad Input Page Program
mtd: Fix error handling in mtd_device_parse_register() error path
mtd: capture device name setting failure when adding mtd
mtd: Add check for devm_kcalloc()
mtd: Replace kcalloc() with devm_kcalloc()
dt-bindings: mtd: physmap: Ensure all properties are defined
mtd: rawnand: brcmnand: fix PM resume warning
dt-bindings: mtd: mxc-nand: Document fsl,imx31-nand
mtd: spinand: macronix: Add support for read retry
...

+1122 -146
+1 -1
Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
··· 42 42 - clock-names 43 43 - interrupts 44 44 45 - unevaluatedProperties: true 45 + unevaluatedProperties: false 46 46 47 47 examples: 48 48 - |
+55
Documentation/devicetree/bindings/mtd/atmel,dataflash.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/mtd/atmel,dataflash.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Atmel DataFlash 8 + 9 + maintainers: 10 + - Nayab Sayed <nayabbasha.sayed@microchip.com> 11 + 12 + description: 13 + The Atmel DataFlash is a low pin-count serial interface sequential access 14 + Flash memory, compatible with SPI standard. The device tree may optionally 15 + contain sub-nodes describing partitions of the address space. 16 + 17 + properties: 18 + compatible: 19 + oneOf: 20 + - items: 21 + - enum: 22 + - atmel,at45db321d 23 + - atmel,at45db041e 24 + - atmel,at45db642d 25 + - atmel,at45db021d 26 + - const: atmel,at45 27 + - const: atmel,dataflash 28 + - items: 29 + - const: atmel,at45 30 + - const: atmel,dataflash 31 + 32 + reg: 33 + maxItems: 1 34 + 35 + required: 36 + - compatible 37 + - reg 38 + 39 + allOf: 40 + - $ref: mtd.yaml# 41 + - $ref: /schemas/spi/spi-peripheral-props.yaml# 42 + 43 + unevaluatedProperties: false 44 + 45 + examples: 46 + - | 47 + spi { 48 + #address-cells = <1>; 49 + #size-cells = <0>; 50 + 51 + flash@1 { 52 + compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash"; 53 + reg = <1>; 54 + }; 55 + };
-17
Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
··· 1 - * Atmel Data Flash 2 - 3 - Required properties: 4 - - compatible : "atmel,<model>", "atmel,<series>", "atmel,dataflash". 5 - 6 - The device tree may optionally contain sub-nodes describing partitions of the 7 - address space. See partition.txt for more detail. 8 - 9 - Example: 10 - 11 - flash@1 { 12 - #address-cells = <1>; 13 - #size-cells = <1>; 14 - compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash"; 15 - spi-max-frequency = <25000000>; 16 - reg = <1>; 17 - };
+7
Documentation/devicetree/bindings/mtd/gpmi-nand.yaml
··· 29 29 - enum: 30 30 - fsl,imx8mm-gpmi-nand 31 31 - fsl,imx8mn-gpmi-nand 32 + - fsl,imx8mp-gpmi-nand 33 + - fsl,imx8mq-gpmi-nand 32 34 - const: fsl,imx7d-gpmi-nand 35 + - items: 36 + - enum: 37 + - fsl,imx8dxl-gpmi-nand 38 + - fsl,imx8qm-gpmi-nand 39 + - const: fsl,imx8qxp-gpmi-nand 33 40 34 41 reg: 35 42 items:
+3 -2
Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
··· 122 122 '#size-cells': 123 123 const: 1 124 124 125 + ranges: true 126 + 125 127 big-endian: true 126 128 little-endian: true 127 129 ··· 145 143 required: 146 144 - syscon 147 145 148 - # FIXME: A parent bus may define timing properties 149 - additionalProperties: true 146 + unevaluatedProperties: false 150 147 151 148 examples: 152 149 - |
+6 -2
Documentation/devicetree/bindings/mtd/mxc-nand.yaml
··· 14 14 15 15 properties: 16 16 compatible: 17 - const: fsl,imx27-nand 18 - 17 + oneOf: 18 + - const: fsl,imx27-nand 19 + - items: 20 + - enum: 21 + - fsl,imx31-nand 22 + - const: fsl,imx27-nand 19 23 reg: 20 24 maxItems: 1 21 25
+5 -4
drivers/mtd/devices/mchp48l640.c
··· 23 23 #include <linux/spi/flash.h> 24 24 #include <linux/spi/spi.h> 25 25 #include <linux/of.h> 26 + #include <linux/string_choices.h> 26 27 27 28 struct mchp48_caps { 28 29 unsigned int size; ··· 129 128 mutex_unlock(&flash->lock); 130 129 131 130 if (ret) 132 - dev_err(&flash->spi->dev, "write %sable failed ret: %d", 133 - (enable ? "en" : "dis"), ret); 131 + dev_err(&flash->spi->dev, "write %s failed ret: %d", 132 + str_enable_disable(enable), ret); 134 133 135 - dev_dbg(&flash->spi->dev, "write %sable success ret: %d", 136 - (enable ? "en" : "dis"), ret); 134 + dev_dbg(&flash->spi->dev, "write %s success ret: %d", 135 + str_enable_disable(enable), ret); 137 136 if (enable) 138 137 return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true); 139 138
+10 -4
drivers/mtd/mtdcore.c
··· 741 741 mtd->dev.type = &mtd_devtype; 742 742 mtd->dev.class = &mtd_class; 743 743 mtd->dev.devt = MTD_DEVT(i); 744 - dev_set_name(&mtd->dev, "mtd%d", i); 744 + error = dev_set_name(&mtd->dev, "mtd%d", i); 745 + if (error) 746 + goto fail_devname; 745 747 dev_set_drvdata(&mtd->dev, mtd); 746 748 mtd_check_of_node(mtd); 747 749 of_node_get(mtd_get_of_node(mtd)); ··· 792 790 device_unregister(&mtd->dev); 793 791 fail_added: 794 792 of_node_put(mtd_get_of_node(mtd)); 793 + fail_devname: 795 794 idr_remove(&mtd_idr, i); 796 795 fail_locked: 797 796 mutex_unlock(&mtd_table_mutex); ··· 1056 1053 const struct mtd_partition *parts, 1057 1054 int nr_parts) 1058 1055 { 1059 - int ret; 1056 + int ret, err; 1060 1057 1061 1058 mtd_set_dev_defaults(mtd); 1062 1059 ··· 1108 1105 nvmem_unregister(mtd->otp_factory_nvmem); 1109 1106 } 1110 1107 1111 - if (ret && device_is_registered(&mtd->dev)) 1112 - del_mtd_device(mtd); 1108 + if (ret && device_is_registered(&mtd->dev)) { 1109 + err = del_mtd_device(mtd); 1110 + if (err) 1111 + pr_err("Error when deleting MTD device (%d)\n", err); 1112 + } 1113 1113 1114 1114 return ret; 1115 1115 }
+1 -2
drivers/mtd/mtdpart.c
··· 690 690 parser = mtd_part_parser_get(*types); 691 691 if (!parser && !request_module("%s", *types)) 692 692 parser = mtd_part_parser_get(*types); 693 - pr_debug("%s: got parser %s\n", master->name, 694 - parser ? parser->name : NULL); 695 693 if (!parser) 696 694 continue; 695 + pr_debug("%s: got parser %s\n", master->name, parser->name); 697 696 ret = mtd_part_do_parse(parser, master, &pparts, data); 698 697 if (ret <= 0) 699 698 mtd_part_parser_put(parser);
+6 -6
drivers/mtd/mtdpstore.c
··· 417 417 } 418 418 419 419 longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size)); 420 - cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); 421 - cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); 420 + cxt->rmmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); 421 + cxt->usedmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); 422 422 423 423 longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize)); 424 - cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); 424 + cxt->badmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); 425 + 426 + if (!cxt->rmmap || !cxt->usedmap || !cxt->badmap) 427 + return; 425 428 426 429 /* just support dmesg right now */ 427 430 cxt->dev.flags = PSTORE_FLAGS_DMESG; ··· 530 527 mtdpstore_flush_removed(cxt); 531 528 532 529 unregister_pstore_device(&cxt->dev); 533 - kfree(cxt->badmap); 534 - kfree(cxt->usedmap); 535 - kfree(cxt->rmmap); 536 530 cxt->mtd = NULL; 537 531 cxt->index = -1; 538 532 }
+1 -1
drivers/mtd/nand/raw/brcmnand/brcmnand.c
··· 3008 3008 brcmnand_save_restore_cs_config(host, 1); 3009 3009 3010 3010 /* Reset the chip, required by some chips after power-up */ 3011 - nand_reset_op(chip); 3011 + nand_reset(chip, 0); 3012 3012 } 3013 3013 3014 3014 return 0;
+3 -2
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
··· 17 17 #include <linux/pm_runtime.h> 18 18 #include <linux/pinctrl/consumer.h> 19 19 #include <linux/dma/mxs-dma.h> 20 + #include <linux/string_choices.h> 20 21 #include "gpmi-nand.h" 21 22 #include "gpmi-regs.h" 22 23 #include "bch-regs.h" ··· 2320 2319 "fsl,no-blockmark-swap")) 2321 2320 this->swap_block_mark = false; 2322 2321 } 2323 - dev_dbg(this->dev, "Blockmark swapping %sabled\n", 2324 - this->swap_block_mark ? "en" : "dis"); 2322 + dev_dbg(this->dev, "Blockmark swapping %s\n", 2323 + str_enabled_disabled(this->swap_block_mark)); 2325 2324 2326 2325 ret = gpmi_init_last(this); 2327 2326 if (ret)
+2 -2
drivers/mtd/nand/raw/nand_base.c
··· 1833 1833 1834 1834 /* READ_ID data bytes are received twice in NV-DDR mode */ 1835 1835 if (len && nand_interface_is_nvddr(conf)) { 1836 - ddrbuf = kzalloc(len * 2, GFP_KERNEL); 1836 + ddrbuf = kcalloc(2, len, GFP_KERNEL); 1837 1837 if (!ddrbuf) 1838 1838 return -ENOMEM; 1839 1839 ··· 2203 2203 * twice. 2204 2204 */ 2205 2205 if (force_8bit && nand_interface_is_nvddr(conf)) { 2206 - ddrbuf = kzalloc(len * 2, GFP_KERNEL); 2206 + ddrbuf = kcalloc(2, len, GFP_KERNEL); 2207 2207 if (!ddrbuf) 2208 2208 return -ENOMEM; 2209 2209
+18 -18
drivers/mtd/nand/raw/qcom_nandc.c
··· 165 165 { 166 166 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 167 167 __le32 locreg_val; 168 - u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | 169 - ((read_size) << READ_LOCATION_SIZE) | 170 - ((is_last_read_loc) << READ_LOCATION_LAST)); 168 + u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) | 169 + FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) | 170 + FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc); 171 171 172 172 locreg_val = cpu_to_le32(val); 173 173 ··· 197 197 { 198 198 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); 199 199 __le32 locreg_val; 200 - u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | 201 - ((read_size) << READ_LOCATION_SIZE) | 202 - ((is_last_read_loc) << READ_LOCATION_LAST)); 200 + u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) | 201 + FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) | 202 + FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc); 203 203 204 204 locreg_val = cpu_to_le32(val); 205 205 ··· 271 271 } 272 272 273 273 if (host->use_ecc) { 274 - cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) | 275 - (num_cw - 1) << CW_PER_PAGE); 274 + cfg0 = cpu_to_le32((host->cfg0 & ~CW_PER_PAGE_MASK) | 275 + FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1))); 276 276 277 277 cfg1 = cpu_to_le32(host->cfg1); 278 278 ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg); 279 279 } else { 280 - cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) | 281 - (num_cw - 1) << CW_PER_PAGE); 280 + cfg0 = cpu_to_le32((host->cfg0_raw & ~CW_PER_PAGE_MASK) | 281 + FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1))); 282 282 283 283 cfg1 = cpu_to_le32(host->cfg1_raw); 284 284 ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE); ··· 882 882 host->bbm_size - host->cw_data; 883 883 884 884 host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK); 885 - host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES | 886 - host->cw_data << UD_SIZE_BYTES; 885 + host->cfg0 |= FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes) | 886 + FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data); 887 887 888 888 host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK; 889 - host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES; 890 - host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS; 889 + host->ecc_bch_cfg |= FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data); 890 + host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, host->cw_data - 1); 891 891 } 892 892 893 893 /* implements ecc->read_page() */ ··· 1531 1531 FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw); 1532 1532 1533 1533 if (!nandc->props->qpic_version2) 1534 - host->ecc_buf_cfg = 0x203 << NUM_STEPS; 1534 + host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203); 1535 1535 1536 1536 host->clrflashstatus = FS_READY_BSY_N; 1537 1537 host->clrreadstatus = 0xc0; ··· 1817 1817 q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE); 1818 1818 nandc->regs->addr0 = q_op.addr1_reg; 1819 1819 nandc->regs->addr1 = q_op.addr2_reg; 1820 - nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE)); 1820 + nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~CW_PER_PAGE_MASK); 1821 1821 nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw); 1822 1822 instrs = 3; 1823 1823 } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) { ··· 1900 1900 /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ 1901 1901 if (!nandc->props->qpic_version2) { 1902 1902 nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD)); 1903 - nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR)) 1904 - | NAND_CMD_PARAM << READ_ADDR); 1903 + nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~READ_ADDR_MASK) | 1904 + FIELD_PREP(READ_ADDR_MASK, NAND_CMD_PARAM)); 1905 1905 } 1906 1906 1907 1907 nandc->regs->exec = cpu_to_le32(1);
+2 -1
drivers/mtd/nand/spi/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o 2 + spinand-objs := core.o otp.o 3 + spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o 3 4 spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o 4 5 obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
+75 -10
drivers/mtd/nand/spi/core.c
··· 534 534 return spi_mem_exec_op(spinand->spimem, &op); 535 535 } 536 536 537 - static int spinand_wait(struct spinand_device *spinand, 538 - unsigned long initial_delay_us, 539 - unsigned long poll_delay_us, 540 - u8 *s) 537 + /** 538 + * spinand_wait() - Poll memory device status 539 + * @spinand: the spinand device 540 + * @initial_delay_us: delay in us before starting to poll 541 + * @poll_delay_us: time to sleep between reads in us 542 + * @s: the pointer to variable to store the value of REG_STATUS 543 + * 544 + * This function polls a status register (REG_STATUS) and returns when 545 + * the STATUS_READY bit is 0 or when the timeout has expired. 546 + * 547 + * Return: 0 on success, a negative error code otherwise. 548 + */ 549 + int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us, 550 + unsigned long poll_delay_us, u8 *s) 541 551 { 542 552 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS, 543 553 spinand->scratchbuf); ··· 614 604 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 615 605 } 616 606 617 - static int spinand_read_page(struct spinand_device *spinand, 618 - const struct nand_page_io_req *req) 607 + /** 608 + * spinand_read_page() - Read a page 609 + * @spinand: the spinand device 610 + * @req: the I/O request 611 + * 612 + * Return: 0 or a positive number of bitflips corrected on success. 613 + * A negative error code otherwise. 614 + */ 615 + int spinand_read_page(struct spinand_device *spinand, 616 + const struct nand_page_io_req *req) 619 617 { 620 618 struct nand_device *nand = spinand_to_nand(spinand); 621 619 u8 status; ··· 653 635 return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); 654 636 } 655 637 656 - static int spinand_write_page(struct spinand_device *spinand, 657 - const struct nand_page_io_req *req) 638 + /** 639 + * spinand_write_page() - Write a page 640 + * @spinand: the spinand device 641 + * @req: the I/O request 642 + * 643 + * Return: 0 or a positive number of bitflips corrected on success. 644 + * A negative error code otherwise. 645 + */ 646 + int spinand_write_page(struct spinand_device *spinand, 647 + const struct nand_page_io_req *req) 658 648 { 659 649 struct nand_device *nand = spinand_to_nand(spinand); 660 650 u8 status; ··· 700 674 { 701 675 struct spinand_device *spinand = mtd_to_spinand(mtd); 702 676 struct nand_device *nand = mtd_to_nanddev(mtd); 677 + struct mtd_ecc_stats old_stats; 703 678 struct nand_io_iter iter; 704 679 bool disable_ecc = false; 705 680 bool ecc_failed = false; 681 + unsigned int retry_mode = 0; 706 682 int ret; 683 + 684 + old_stats = mtd->ecc_stats; 707 685 708 686 if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) 709 687 disable_ecc = true; ··· 720 690 if (ret) 721 691 break; 722 692 693 + read_retry: 723 694 ret = spinand_read_page(spinand, &iter.req); 724 695 if (ret < 0 && ret != -EBADMSG) 725 696 break; 726 697 727 - if (ret == -EBADMSG) 698 + if (ret == -EBADMSG && spinand->set_read_retry) { 699 + if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) { 700 + ret = spinand->set_read_retry(spinand, retry_mode); 701 + if (ret < 0) { 702 + spinand->set_read_retry(spinand, 0); 703 + return ret; 704 + } 705 + 706 + /* Reset ecc_stats; retry */ 707 + mtd->ecc_stats = old_stats; 708 + goto read_retry; 709 + } else { 710 + /* No more retry modes; real failure */ 711 + ecc_failed = true; 712 + } 713 + } else if (ret == -EBADMSG) { 728 714 ecc_failed = true; 729 - else 715 + } else { 730 716 *max_bitflips = max_t(unsigned int, *max_bitflips, ret); 717 + } 731 718 732 719 ret = 0; 733 720 ops->retlen += iter.req.datalen; 734 721 ops->oobretlen += iter.req.ooblen; 722 + 723 + /* Reset to retry mode 0 */ 724 + if (retry_mode) { 725 + retry_mode = 0; 726 + ret = spinand->set_read_retry(spinand, retry_mode); 727 + if (ret < 0) 728 + return ret; 729 + } 735 730 } 736 731 737 732 if (ecc_failed && !ret) ··· 1347 1292 spinand->id.len = 1 + table[i].devid.len; 1348 1293 spinand->select_target = table[i].select_target; 1349 1294 spinand->set_cont_read = table[i].set_cont_read; 1295 + spinand->fact_otp = &table[i].fact_otp; 1296 + spinand->user_otp = &table[i].user_otp; 1297 + spinand->read_retries = table[i].read_retries; 1298 + spinand->set_read_retry = table[i].set_read_retry; 1350 1299 1351 1300 op = spinand_select_op_variant(spinand, 1352 1301 info->op_variants.read_cache); ··· 1536 1477 mtd->_erase = spinand_mtd_erase; 1537 1478 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 1538 1479 mtd->_resume = spinand_mtd_resume; 1480 + 1481 + if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) { 1482 + ret = spinand_set_mtd_otp_ops(spinand); 1483 + if (ret) 1484 + goto err_cleanup_ecc_engine; 1485 + } 1539 1486 1540 1487 if (nand->ecc.engine) { 1541 1488 ret = mtd_ooblayout_count_freebytes(mtd);
+88 -2
drivers/mtd/nand/spi/esmt.c
··· 8 8 #include <linux/device.h> 9 9 #include <linux/kernel.h> 10 10 #include <linux/mtd/spinand.h> 11 + #include <linux/spi/spi-mem.h> 11 12 12 13 /* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */ 13 14 #define SPINAND_MFR_ESMT_C8 0xc8 15 + 16 + #define ESMT_F50L1G41LB_CFG_OTP_PROTECT BIT(7) 17 + #define ESMT_F50L1G41LB_CFG_OTP_LOCK \ 18 + (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT) 14 19 15 20 static SPINAND_OP_VARIANTS(read_cache_variants, 16 21 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), ··· 107 102 .free = f50l1g41lb_ooblayout_free, 108 103 }; 109 104 105 + static int f50l1g41lb_otp_info(struct spinand_device *spinand, size_t len, 106 + struct otp_info *buf, size_t *retlen, bool user) 107 + { 108 + if (len < sizeof(*buf)) 109 + return -EINVAL; 110 + 111 + buf->locked = 0; 112 + buf->start = 0; 113 + buf->length = user ? spinand_user_otp_size(spinand) : 114 + spinand_fact_otp_size(spinand); 115 + 116 + *retlen = sizeof(*buf); 117 + return 0; 118 + } 119 + 120 + static int f50l1g41lb_fact_otp_info(struct spinand_device *spinand, size_t len, 121 + struct otp_info *buf, size_t *retlen) 122 + { 123 + return f50l1g41lb_otp_info(spinand, len, buf, retlen, false); 124 + } 125 + 126 + static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len, 127 + struct otp_info *buf, size_t *retlen) 128 + { 129 + return f50l1g41lb_otp_info(spinand, len, buf, retlen, true); 130 + } 131 + 132 + static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from, 133 + size_t len) 134 + { 135 + struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true); 136 + struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0); 137 + u8 status; 138 + int ret; 139 + 140 + ret = spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 141 + ESMT_F50L1G41LB_CFG_OTP_LOCK); 142 + if (!ret) 143 + return ret; 144 + 145 + ret = spi_mem_exec_op(spinand->spimem, &write_op); 146 + if (!ret) 147 + goto out; 148 + 149 + ret = spi_mem_exec_op(spinand->spimem, &exec_op); 150 + if (!ret) 151 + goto out; 152 + 153 + ret = spinand_wait(spinand, 154 + SPINAND_WRITE_INITIAL_DELAY_US, 155 + SPINAND_WRITE_POLL_DELAY_US, 156 + &status); 157 + if (!ret && (status & STATUS_PROG_FAILED)) 158 + ret = -EIO; 159 + 160 + out: 161 + if (spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 0)) { 162 + dev_warn(&spinand_to_mtd(spinand)->dev, 163 + "Can not disable OTP mode\n"); 164 + ret = -EIO; 165 + } 166 + 167 + return ret; 168 + } 169 + 170 + static const struct spinand_user_otp_ops f50l1g41lb_user_otp_ops = { 171 + .info = f50l1g41lb_user_otp_info, 172 + .lock = f50l1g41lb_otp_lock, 173 + .read = spinand_user_otp_read, 174 + .write = spinand_user_otp_write, 175 + }; 176 + 177 + static const struct spinand_fact_otp_ops f50l1g41lb_fact_otp_ops = { 178 + .info = f50l1g41lb_fact_otp_info, 179 + .read = spinand_fact_otp_read, 180 + }; 181 + 110 182 static const struct spinand_info esmt_c8_spinand_table[] = { 111 183 SPINAND_INFO("F50L1G41LB", 112 184 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f, ··· 194 112 &write_cache_variants, 195 113 &update_cache_variants), 196 114 0, 197 - SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)), 115 + SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL), 116 + SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops), 117 + SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)), 198 118 SPINAND_INFO("F50D1G41LB", 199 119 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f, 200 120 0x7f, 0x7f), ··· 206 122 &write_cache_variants, 207 123 &update_cache_variants), 208 124 0, 209 - SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)), 125 + SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL), 126 + SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops), 127 + SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)), 210 128 SPINAND_INFO("F50D2G41KA", 211 129 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f, 212 130 0x7f, 0x7f),
+64 -15
drivers/mtd/nand/spi/macronix.c
··· 14 14 #define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr) 15 15 #define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr) 16 16 #define MACRONIX_CFG_CONT_READ BIT(2) 17 + #define MACRONIX_FEATURE_ADDR_READ_RETRY 0x70 18 + #define MACRONIX_NUM_READ_RETRY_MODES 5 17 19 18 20 #define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4) 19 21 ··· 138 136 return 0; 139 137 } 140 138 139 + /** 140 + * macronix_set_read_retry - Set the retry mode 141 + * @spinand: SPI NAND device 142 + * @retry_mode: Specify which retry mode to set 143 + * 144 + * Return: 0 on success, a negative error code otherwise. 145 + */ 146 + static int macronix_set_read_retry(struct spinand_device *spinand, 147 + unsigned int retry_mode) 148 + { 149 + struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY, 150 + spinand->scratchbuf); 151 + 152 + *spinand->scratchbuf = retry_mode; 153 + return spi_mem_exec_op(spinand->spimem, &op); 154 + } 155 + 141 156 static const struct spinand_info macronix_spinand_table[] = { 142 157 SPINAND_INFO("MX35LF1GE4AB", 143 158 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12), ··· 187 168 SPINAND_HAS_QE_BIT, 188 169 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 189 170 macronix_ecc_get_status), 190 - SPINAND_CONT_READ(macronix_set_cont_read)), 171 + SPINAND_CONT_READ(macronix_set_cont_read), 172 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 173 + macronix_set_read_retry)), 191 174 SPINAND_INFO("MX35LF4GE4AD", 192 175 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03), 193 176 NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1), ··· 200 179 SPINAND_HAS_QE_BIT, 201 180 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 202 181 macronix_ecc_get_status), 203 - SPINAND_CONT_READ(macronix_set_cont_read)), 182 + SPINAND_CONT_READ(macronix_set_cont_read), 183 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 184 + macronix_set_read_retry)), 204 185 SPINAND_INFO("MX35LF1G24AD", 205 186 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03), 206 187 NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), ··· 211 188 &write_cache_variants, 212 189 &update_cache_variants), 213 190 SPINAND_HAS_QE_BIT, 214 - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), 191 + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), 192 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 193 + macronix_set_read_retry)), 215 194 SPINAND_INFO("MX35LF2G24AD", 216 195 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03), 217 196 NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), ··· 223 198 &update_cache_variants), 224 199 SPINAND_HAS_QE_BIT | 225 200 SPINAND_HAS_PROG_PLANE_SELECT_BIT, 226 - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), 201 + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), 202 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 203 + macronix_set_read_retry)), 227 204 SPINAND_INFO("MX35LF2G24AD-Z4I8", 228 205 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03), 229 206 NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), ··· 234 207 &write_cache_variants, 235 208 &update_cache_variants), 236 209 SPINAND_HAS_QE_BIT, 237 - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), 210 + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), 211 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 212 + macronix_set_read_retry)), 238 213 SPINAND_INFO("MX35LF4G24AD", 239 214 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03), 240 215 NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1), ··· 246 217 &update_cache_variants), 247 218 SPINAND_HAS_QE_BIT | 248 219 SPINAND_HAS_PROG_PLANE_SELECT_BIT, 249 - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), 220 + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), 221 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 222 + macronix_set_read_retry)), 250 223 SPINAND_INFO("MX35LF4G24AD-Z4I8", 251 224 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03), 252 225 NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), ··· 257 226 &write_cache_variants, 258 227 &update_cache_variants), 259 228 SPINAND_HAS_QE_BIT, 260 - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), 229 + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), 230 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 231 + macronix_set_read_retry)), 261 232 SPINAND_INFO("MX31LF1GE4BC", 262 233 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e), 263 234 NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), ··· 303 270 SPINAND_HAS_QE_BIT | 304 271 SPINAND_HAS_PROG_PLANE_SELECT_BIT, 305 272 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 306 - macronix_ecc_get_status)), 273 + macronix_ecc_get_status), 274 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 275 + macronix_set_read_retry)), 307 276 SPINAND_INFO("MX35UF4G24AD-Z4I8", 308 277 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03), 309 278 NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), ··· 315 280 &update_cache_variants), 316 281 SPINAND_HAS_QE_BIT, 317 282 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 318 - macronix_ecc_get_status)), 283 + macronix_ecc_get_status), 284 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 285 + macronix_set_read_retry)), 319 286 SPINAND_INFO("MX35UF4GE4AD", 320 287 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03), 321 288 NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), ··· 328 291 SPINAND_HAS_QE_BIT, 329 292 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 330 293 macronix_ecc_get_status), 331 - SPINAND_CONT_READ(macronix_set_cont_read)), 294 + SPINAND_CONT_READ(macronix_set_cont_read), 295 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 296 + macronix_set_read_retry)), 332 297 SPINAND_INFO("MX35UF2G14AC", 333 298 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0), 334 299 NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), ··· 353 314 SPINAND_HAS_QE_BIT | 354 315 SPINAND_HAS_PROG_PLANE_SELECT_BIT, 355 316 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 356 - macronix_ecc_get_status)), 317 + macronix_ecc_get_status), 318 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 319 + macronix_set_read_retry)), 357 320 SPINAND_INFO("MX35UF2G24AD-Z4I8", 358 321 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03), 359 322 NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), ··· 365 324 &update_cache_variants), 366 325 SPINAND_HAS_QE_BIT, 367 326 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 368 - macronix_ecc_get_status)), 327 + macronix_ecc_get_status), 328 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 329 + macronix_set_read_retry)), 369 330 SPINAND_INFO("MX35UF2GE4AD", 370 331 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03), 371 332 NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), ··· 378 335 SPINAND_HAS_QE_BIT, 379 336 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 380 337 macronix_ecc_get_status), 381 - SPINAND_CONT_READ(macronix_set_cont_read)), 338 + SPINAND_CONT_READ(macronix_set_cont_read), 339 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 340 + macronix_set_read_retry)), 382 341 SPINAND_INFO("MX35UF2GE4AC", 383 342 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01), 384 343 NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), ··· 411 366 &update_cache_variants), 412 367 SPINAND_HAS_QE_BIT, 413 368 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 414 - macronix_ecc_get_status)), 369 + macronix_ecc_get_status), 370 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 371 + macronix_set_read_retry)), 415 372 SPINAND_INFO("MX35UF1GE4AD", 416 373 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03), 417 374 NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), ··· 424 377 SPINAND_HAS_QE_BIT, 425 378 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 426 379 macronix_ecc_get_status), 427 - SPINAND_CONT_READ(macronix_set_cont_read)), 380 + SPINAND_CONT_READ(macronix_set_cont_read), 381 + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, 382 + macronix_set_read_retry)), 428 383 SPINAND_INFO("MX35UF1GE4AC", 429 384 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01), 430 385 NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+134 -1
drivers/mtd/nand/spi/micron.c
··· 9 9 #include <linux/device.h> 10 10 #include <linux/kernel.h> 11 11 #include <linux/mtd/spinand.h> 12 + #include <linux/spi/spi-mem.h> 13 + #include <linux/string.h> 12 14 13 15 #define SPINAND_MFR_MICRON 0x2c 14 16 ··· 29 27 #define MICRON_DIE_SELECT_REG 0xD0 30 28 31 29 #define MICRON_SELECT_DIE(x) ((x) << 6) 30 + 31 + #define MICRON_MT29F2G01ABAGD_CFG_OTP_STATE BIT(7) 32 + #define MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK \ 33 + (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE) 32 34 33 35 static SPINAND_OP_VARIANTS(quadio_read_cache_variants, 34 36 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), ··· 174 168 return -EINVAL; 175 169 } 176 170 171 + static int mt29f2g01abagd_otp_is_locked(struct spinand_device *spinand) 172 + { 173 + size_t bufsize = spinand_otp_page_size(spinand); 174 + size_t retlen; 175 + u8 *buf; 176 + int ret; 177 + 178 + buf = kmalloc(bufsize, GFP_KERNEL); 179 + if (!buf) 180 + return -ENOMEM; 181 + 182 + ret = spinand_upd_cfg(spinand, 183 + MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 184 + MICRON_MT29F2G01ABAGD_CFG_OTP_STATE); 185 + if (ret) 186 + goto free_buf; 187 + 188 + ret = spinand_user_otp_read(spinand, 0, bufsize, &retlen, buf); 189 + 190 + if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 191 + 0)) { 192 + dev_warn(&spinand_to_mtd(spinand)->dev, 193 + "Can not disable OTP mode\n"); 194 + ret = -EIO; 195 + } 196 + 197 + if (ret) 198 + goto free_buf; 199 + 200 + /* If all zeros, then the OTP area is locked. */ 201 + if (mem_is_zero(buf, bufsize)) 202 + ret = 1; 203 + 204 + free_buf: 205 + kfree(buf); 206 + return ret; 207 + } 208 + 209 + static int mt29f2g01abagd_otp_info(struct spinand_device *spinand, size_t len, 210 + struct otp_info *buf, size_t *retlen, 211 + bool user) 212 + { 213 + int locked; 214 + 215 + if (len < sizeof(*buf)) 216 + return -EINVAL; 217 + 218 + locked = mt29f2g01abagd_otp_is_locked(spinand); 219 + if (locked < 0) 220 + return locked; 221 + 222 + buf->locked = locked; 223 + buf->start = 0; 224 + buf->length = user ? spinand_user_otp_size(spinand) : 225 + spinand_fact_otp_size(spinand); 226 + 227 + *retlen = sizeof(*buf); 228 + return 0; 229 + } 230 + 231 + static int mt29f2g01abagd_fact_otp_info(struct spinand_device *spinand, 232 + size_t len, struct otp_info *buf, 233 + size_t *retlen) 234 + { 235 + return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, false); 236 + } 237 + 238 + static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand, 239 + size_t len, struct otp_info *buf, 240 + size_t *retlen) 241 + { 242 + return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, true); 243 + } 244 + 245 + static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from, 246 + size_t len) 247 + { 248 + struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true); 249 + struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0); 250 + u8 status; 251 + int ret; 252 + 253 + ret = spinand_upd_cfg(spinand, 254 + MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 255 + MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK); 256 + if (!ret) 257 + return ret; 258 + 259 + ret = spi_mem_exec_op(spinand->spimem, &write_op); 260 + if (!ret) 261 + goto out; 262 + 263 + ret = spi_mem_exec_op(spinand->spimem, &exec_op); 264 + if (!ret) 265 + goto out; 266 + 267 + ret = spinand_wait(spinand, 268 + SPINAND_WRITE_INITIAL_DELAY_US, 269 + SPINAND_WRITE_POLL_DELAY_US, 270 + &status); 271 + if (!ret && (status & STATUS_PROG_FAILED)) 272 + ret = -EIO; 273 + 274 + out: 275 + if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 0)) { 276 + dev_warn(&spinand_to_mtd(spinand)->dev, 277 + "Can not disable OTP mode\n"); 278 + ret = -EIO; 279 + } 280 + 281 + return ret; 282 + } 283 + 284 + static const struct spinand_user_otp_ops mt29f2g01abagd_user_otp_ops = { 285 + .info = mt29f2g01abagd_user_otp_info, 286 + .lock = mt29f2g01abagd_otp_lock, 287 + .read = spinand_user_otp_read, 288 + .write = spinand_user_otp_write, 289 + }; 290 + 291 + static const struct spinand_fact_otp_ops mt29f2g01abagd_fact_otp_ops = { 292 + .info = mt29f2g01abagd_fact_otp_info, 293 + .read = spinand_fact_otp_read, 294 + }; 295 + 177 296 static const struct spinand_info micron_spinand_table[] = { 178 297 /* M79A 2Gb 3.3V */ 179 298 SPINAND_INFO("MT29F2G01ABAGD", ··· 310 179 &x4_update_cache_variants), 311 180 0, 312 181 SPINAND_ECCINFO(&micron_8_ooblayout, 313 - micron_8_ecc_get_status)), 182 + micron_8_ecc_get_status), 183 + SPINAND_USER_OTP_INFO(12, 2, &mt29f2g01abagd_user_otp_ops), 184 + SPINAND_FACT_OTP_INFO(2, 0, &mt29f2g01abagd_fact_otp_ops)), 314 185 /* M79A 2Gb 1.8V */ 315 186 SPINAND_INFO("MT29F2G01ABBGD", 316 187 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+362
drivers/mtd/nand/spi/otp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2025, SaluteDevices. All Rights Reserved. 4 + * 5 + * Author: Martin Kurbanov <mmkurbanov@salutedevices.com> 6 + */ 7 + 8 + #include <linux/mtd/mtd.h> 9 + #include <linux/mtd/spinand.h> 10 + 11 + /** 12 + * spinand_otp_page_size() - Get SPI-NAND OTP page size 13 + * @spinand: the spinand device 14 + * 15 + * Return: the OTP page size. 16 + */ 17 + size_t spinand_otp_page_size(struct spinand_device *spinand) 18 + { 19 + struct nand_device *nand = spinand_to_nand(spinand); 20 + 21 + return nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); 22 + } 23 + 24 + static size_t spinand_otp_size(struct spinand_device *spinand, 25 + const struct spinand_otp_layout *layout) 26 + { 27 + return layout->npages * spinand_otp_page_size(spinand); 28 + } 29 + 30 + /** 31 + * spinand_fact_otp_size() - Get SPI-NAND factory OTP area size 32 + * @spinand: the spinand device 33 + * 34 + * Return: the OTP size. 35 + */ 36 + size_t spinand_fact_otp_size(struct spinand_device *spinand) 37 + { 38 + return spinand_otp_size(spinand, &spinand->fact_otp->layout); 39 + } 40 + 41 + /** 42 + * spinand_user_otp_size() - Get SPI-NAND user OTP area size 43 + * @spinand: the spinand device 44 + * 45 + * Return: the OTP size. 46 + */ 47 + size_t spinand_user_otp_size(struct spinand_device *spinand) 48 + { 49 + return spinand_otp_size(spinand, &spinand->user_otp->layout); 50 + } 51 + 52 + static int spinand_otp_check_bounds(struct spinand_device *spinand, loff_t ofs, 53 + size_t len, 54 + const struct spinand_otp_layout *layout) 55 + { 56 + if (ofs < 0 || ofs + len > spinand_otp_size(spinand, layout)) 57 + return -EINVAL; 58 + 59 + return 0; 60 + } 61 + 62 + static int spinand_user_otp_check_bounds(struct spinand_device *spinand, 63 + loff_t ofs, size_t len) 64 + { 65 + return spinand_otp_check_bounds(spinand, ofs, len, 66 + &spinand->user_otp->layout); 67 + } 68 + 69 + static int spinand_otp_rw(struct spinand_device *spinand, loff_t ofs, 70 + size_t len, size_t *retlen, u8 *buf, bool is_write, 71 + const struct spinand_otp_layout *layout) 72 + { 73 + struct nand_page_io_req req = {}; 74 + unsigned long long page; 75 + size_t copied = 0; 76 + size_t otp_pagesize = spinand_otp_page_size(spinand); 77 + int ret; 78 + 79 + if (!len) 80 + return 0; 81 + 82 + ret = spinand_otp_check_bounds(spinand, ofs, len, layout); 83 + if (ret) 84 + return ret; 85 + 86 + ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, CFG_OTP_ENABLE); 87 + if (ret) 88 + return ret; 89 + 90 + page = ofs; 91 + req.dataoffs = do_div(page, otp_pagesize); 92 + req.pos.page = page + layout->start_page; 93 + req.type = is_write ? NAND_PAGE_WRITE : NAND_PAGE_READ; 94 + req.mode = MTD_OPS_RAW; 95 + req.databuf.in = buf; 96 + 97 + while (copied < len) { 98 + req.datalen = min_t(unsigned int, 99 + otp_pagesize - req.dataoffs, 100 + len - copied); 101 + 102 + if (is_write) 103 + ret = spinand_write_page(spinand, &req); 104 + else 105 + ret = spinand_read_page(spinand, &req); 106 + 107 + if (ret < 0) 108 + break; 109 + 110 + req.databuf.in += req.datalen; 111 + req.pos.page++; 112 + req.dataoffs = 0; 113 + copied += req.datalen; 114 + } 115 + 116 + *retlen = copied; 117 + 118 + if (spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0)) { 119 + dev_warn(&spinand_to_mtd(spinand)->dev, 120 + "Can not disable OTP mode\n"); 121 + ret = -EIO; 122 + } 123 + 124 + return ret; 125 + } 126 + 127 + /** 128 + * spinand_fact_otp_read() - Read from OTP area 129 + * @spinand: the spinand device 130 + * @ofs: the offset to read 131 + * @len: the number of data bytes to read 132 + * @retlen: the pointer to variable to store the number of read bytes 133 + * @buf: the buffer to store the read data 134 + * 135 + * Return: 0 on success, an error code otherwise. 136 + */ 137 + int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs, 138 + size_t len, size_t *retlen, u8 *buf) 139 + { 140 + return spinand_otp_rw(spinand, ofs, len, retlen, buf, false, 141 + &spinand->fact_otp->layout); 142 + } 143 + 144 + /** 145 + * spinand_user_otp_read() - Read from OTP area 146 + * @spinand: the spinand device 147 + * @ofs: the offset to read 148 + * @len: the number of data bytes to read 149 + * @retlen: the pointer to variable to store the number of read bytes 150 + * @buf: the buffer to store the read data 151 + * 152 + * Return: 0 on success, an error code otherwise. 153 + */ 154 + int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs, 155 + size_t len, size_t *retlen, u8 *buf) 156 + { 157 + return spinand_otp_rw(spinand, ofs, len, retlen, buf, false, 158 + &spinand->user_otp->layout); 159 + } 160 + 161 + /** 162 + * spinand_user_otp_write() - Write to OTP area 163 + * @spinand: the spinand device 164 + * @ofs: the offset to write to 165 + * @len: the number of bytes to write 166 + * @retlen: the pointer to variable to store the number of written bytes 167 + * @buf: the buffer with data to write 168 + * 169 + * Return: 0 on success, an error code otherwise. 170 + */ 171 + int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs, 172 + size_t len, size_t *retlen, const u8 *buf) 173 + { 174 + return spinand_otp_rw(spinand, ofs, len, retlen, (u8 *)buf, true, 175 + &spinand->user_otp->layout); 176 + } 177 + 178 + static int spinand_mtd_otp_info(struct mtd_info *mtd, size_t len, 179 + size_t *retlen, struct otp_info *buf, 180 + bool is_fact) 181 + { 182 + struct spinand_device *spinand = mtd_to_spinand(mtd); 183 + int ret; 184 + 185 + *retlen = 0; 186 + 187 + mutex_lock(&spinand->lock); 188 + 189 + if (is_fact) 190 + ret = spinand->fact_otp->ops->info(spinand, len, buf, retlen); 191 + else 192 + ret = spinand->user_otp->ops->info(spinand, len, buf, retlen); 193 + 194 + mutex_unlock(&spinand->lock); 195 + 196 + return ret; 197 + } 198 + 199 + static int spinand_mtd_fact_otp_info(struct mtd_info *mtd, size_t len, 200 + size_t *retlen, struct otp_info *buf) 201 + { 202 + return spinand_mtd_otp_info(mtd, len, retlen, buf, true); 203 + } 204 + 205 + static int spinand_mtd_user_otp_info(struct mtd_info *mtd, size_t len, 206 + size_t *retlen, struct otp_info *buf) 207 + { 208 + return spinand_mtd_otp_info(mtd, len, retlen, buf, false); 209 + } 210 + 211 + static int spinand_mtd_otp_read(struct mtd_info *mtd, loff_t ofs, size_t len, 212 + size_t *retlen, u8 *buf, bool is_fact) 213 + { 214 + struct spinand_device *spinand = mtd_to_spinand(mtd); 215 + int ret; 216 + 217 + *retlen = 0; 218 + 219 + if (!len) 220 + return 0; 221 + 222 + ret = spinand_otp_check_bounds(spinand, ofs, len, 223 + is_fact ? &spinand->fact_otp->layout : 224 + &spinand->user_otp->layout); 225 + if (ret) 226 + return ret; 227 + 228 + mutex_lock(&spinand->lock); 229 + 230 + if (is_fact) 231 + ret = spinand->fact_otp->ops->read(spinand, ofs, len, retlen, 232 + buf); 233 + else 234 + ret = spinand->user_otp->ops->read(spinand, ofs, len, retlen, 235 + buf); 236 + 237 + mutex_unlock(&spinand->lock); 238 + 239 + return ret; 240 + } 241 + 242 + static int spinand_mtd_fact_otp_read(struct mtd_info *mtd, loff_t ofs, 243 + size_t len, size_t *retlen, u8 *buf) 244 + { 245 + return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, true); 246 + } 247 + 248 + static int spinand_mtd_user_otp_read(struct mtd_info *mtd, loff_t ofs, 249 + size_t len, size_t *retlen, u8 *buf) 250 + { 251 + return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, false); 252 + } 253 + 254 + static int spinand_mtd_user_otp_write(struct mtd_info *mtd, loff_t ofs, 255 + size_t len, size_t *retlen, const u8 *buf) 256 + { 257 + struct spinand_device *spinand = mtd_to_spinand(mtd); 258 + const struct spinand_user_otp_ops *ops = spinand->user_otp->ops; 259 + int ret; 260 + 261 + *retlen = 0; 262 + 263 + if (!len) 264 + return 0; 265 + 266 + ret = spinand_user_otp_check_bounds(spinand, ofs, len); 267 + if (ret) 268 + return ret; 269 + 270 + mutex_lock(&spinand->lock); 271 + ret = ops->write(spinand, ofs, len, retlen, buf); 272 + mutex_unlock(&spinand->lock); 273 + 274 + return ret; 275 + } 276 + 277 + static int spinand_mtd_user_otp_erase(struct mtd_info *mtd, loff_t ofs, 278 + size_t len) 279 + { 280 + struct spinand_device *spinand = mtd_to_spinand(mtd); 281 + const struct spinand_user_otp_ops *ops = spinand->user_otp->ops; 282 + int ret; 283 + 284 + if (!len) 285 + return 0; 286 + 287 + ret = spinand_user_otp_check_bounds(spinand, ofs, len); 288 + if (ret) 289 + return ret; 290 + 291 + mutex_lock(&spinand->lock); 292 + ret = ops->erase(spinand, ofs, len); 293 + mutex_unlock(&spinand->lock); 294 + 295 + return ret; 296 + } 297 + 298 + static int spinand_mtd_user_otp_lock(struct mtd_info *mtd, loff_t ofs, 299 + size_t len) 300 + { 301 + struct spinand_device *spinand = mtd_to_spinand(mtd); 302 + const struct spinand_user_otp_ops *ops = spinand->user_otp->ops; 303 + int ret; 304 + 305 + if (!len) 306 + return 0; 307 + 308 + ret = spinand_user_otp_check_bounds(spinand, ofs, len); 309 + if (ret) 310 + return ret; 311 + 312 + mutex_lock(&spinand->lock); 313 + ret = ops->lock(spinand, ofs, len); 314 + mutex_unlock(&spinand->lock); 315 + 316 + return ret; 317 + } 318 + 319 + /** 320 + * spinand_set_mtd_otp_ops() - Setup OTP methods 321 + * @spinand: the spinand device 322 + * 323 + * Setup OTP methods. 324 + * 325 + * Return: 0 on success, a negative error code otherwise. 326 + */ 327 + int spinand_set_mtd_otp_ops(struct spinand_device *spinand) 328 + { 329 + struct mtd_info *mtd = spinand_to_mtd(spinand); 330 + const struct spinand_fact_otp_ops *fact_ops = spinand->fact_otp->ops; 331 + const struct spinand_user_otp_ops *user_ops = spinand->user_otp->ops; 332 + 333 + if (!user_ops && !fact_ops) 334 + return -EINVAL; 335 + 336 + if (user_ops) { 337 + if (user_ops->info) 338 + mtd->_get_user_prot_info = spinand_mtd_user_otp_info; 339 + 340 + if (user_ops->read) 341 + mtd->_read_user_prot_reg = spinand_mtd_user_otp_read; 342 + 343 + if (user_ops->write) 344 + mtd->_write_user_prot_reg = spinand_mtd_user_otp_write; 345 + 346 + if (user_ops->lock) 347 + mtd->_lock_user_prot_reg = spinand_mtd_user_otp_lock; 348 + 349 + if (user_ops->erase) 350 + mtd->_erase_user_prot_reg = spinand_mtd_user_otp_erase; 351 + } 352 + 353 + if (fact_ops) { 354 + if (fact_ops->info) 355 + mtd->_get_fact_prot_info = spinand_mtd_fact_otp_info; 356 + 357 + if (fact_ops->read) 358 + mtd->_read_fact_prot_reg = spinand_mtd_fact_otp_read; 359 + } 360 + 361 + return 0; 362 + }
+25 -52
drivers/mtd/spi-nor/core.c
··· 7 7 * Copyright (C) 2014, Freescale Semiconductor, Inc. 8 8 */ 9 9 10 - #include <linux/err.h> 11 - #include <linux/errno.h> 10 + #include <linux/cleanup.h> 12 11 #include <linux/delay.h> 13 12 #include <linux/device.h> 13 + #include <linux/err.h> 14 + #include <linux/errno.h> 14 15 #include <linux/math64.h> 15 16 #include <linux/module.h> 16 17 #include <linux/mtd/mtd.h> 17 18 #include <linux/mtd/spi-nor.h> 18 19 #include <linux/mutex.h> 19 - #include <linux/of_platform.h> 20 + #include <linux/of.h> 20 21 #include <linux/regulator/consumer.h> 21 22 #include <linux/sched/task_stack.h> 22 23 #include <linux/sizes.h> ··· 640 639 static int spi_nor_rww_start_rdst(struct spi_nor *nor) 641 640 { 642 641 struct spi_nor_rww *rww = &nor->rww; 643 - int ret = -EAGAIN; 644 642 645 - mutex_lock(&nor->lock); 643 + guard(mutex)(&nor->lock); 646 644 647 645 if (rww->ongoing_io || rww->ongoing_rd) 648 - goto busy; 646 + return -EAGAIN; 649 647 650 648 rww->ongoing_io = true; 651 649 rww->ongoing_rd = true; 652 - ret = 0; 653 650 654 - busy: 655 - mutex_unlock(&nor->lock); 656 - return ret; 651 + return 0; 657 652 } 658 653 659 654 static void spi_nor_rww_end_rdst(struct spi_nor *nor) 660 655 { 661 656 struct spi_nor_rww *rww = &nor->rww; 662 657 663 - mutex_lock(&nor->lock); 658 + guard(mutex)(&nor->lock); 664 659 665 660 rww->ongoing_io = false; 666 661 rww->ongoing_rd = false; 667 - 668 - mutex_unlock(&nor->lock); 669 662 } 670 663 671 664 static int spi_nor_lock_rdst(struct spi_nor *nor) ··· 1207 1212 static bool spi_nor_rww_start_io(struct spi_nor *nor) 1208 1213 { 1209 1214 struct spi_nor_rww *rww = &nor->rww; 1210 - bool start = false; 1211 1215 1212 - mutex_lock(&nor->lock); 1216 + guard(mutex)(&nor->lock); 1213 1217 1214 1218 if (rww->ongoing_io) 1215 - goto busy; 1219 + return false; 1216 1220 1217 1221 rww->ongoing_io = true; 1218 - start = true; 1219 1222 1220 - busy: 1221 - mutex_unlock(&nor->lock); 1222 - return start; 1223 + return true; 1223 1224 } 1224 1225 1225 1226 static void spi_nor_rww_end_io(struct spi_nor *nor) 1226 1227 { 1227 - mutex_lock(&nor->lock); 1228 + guard(mutex)(&nor->lock); 1228 1229 nor->rww.ongoing_io = false; 1229 - mutex_unlock(&nor->lock); 1230 1230 } 1231 1231 1232 1232 static int spi_nor_lock_device(struct spi_nor *nor) ··· 1244 1254 static bool spi_nor_rww_start_exclusive(struct spi_nor *nor) 1245 1255 { 1246 1256 struct spi_nor_rww *rww = &nor->rww; 1247 - bool start = false; 1248 1257 1249 1258 mutex_lock(&nor->lock); 1250 1259 1251 1260 if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) 1252 - goto busy; 1261 + return false; 1253 1262 1254 1263 rww->ongoing_io = true; 1255 1264 rww->ongoing_rd = true; 1256 1265 rww->ongoing_pe = true; 1257 - start = true; 1258 1266 1259 - busy: 1260 - mutex_unlock(&nor->lock); 1261 - return start; 1267 + return true; 1262 1268 } 1263 1269 1264 1270 static void spi_nor_rww_end_exclusive(struct spi_nor *nor) 1265 1271 { 1266 1272 struct spi_nor_rww *rww = &nor->rww; 1267 1273 1268 - mutex_lock(&nor->lock); 1274 + guard(mutex)(&nor->lock); 1269 1275 rww->ongoing_io = false; 1270 1276 rww->ongoing_rd = false; 1271 1277 rww->ongoing_pe = false; 1272 - mutex_unlock(&nor->lock); 1273 1278 } 1274 1279 1275 1280 int spi_nor_prep_and_lock(struct spi_nor *nor) ··· 1301 1316 { 1302 1317 struct spi_nor_rww *rww = &nor->rww; 1303 1318 unsigned int used_banks = 0; 1304 - bool started = false; 1305 1319 u8 first, last; 1306 1320 int bank; 1307 1321 1308 - mutex_lock(&nor->lock); 1322 + guard(mutex)(&nor->lock); 1309 1323 1310 1324 if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) 1311 - goto busy; 1325 + return false; 1312 1326 1313 1327 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1314 1328 for (bank = first; bank <= last; bank++) { 1315 1329 if (rww->used_banks & BIT(bank)) 1316 - goto busy; 1330 + return false; 1317 1331 1318 1332 used_banks |= BIT(bank); 1319 1333 } 1320 1334 1321 1335 rww->used_banks |= used_banks; 1322 1336 rww->ongoing_pe = true; 1323 - started = true; 1324 1337 1325 - busy: 1326 - mutex_unlock(&nor->lock); 1327 - return started; 1338 + return true; 1328 1339 } 1329 1340 1330 1341 static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len) ··· 1329 1348 u8 first, last; 1330 1349 int bank; 1331 1350 1332 - mutex_lock(&nor->lock); 1351 + guard(mutex)(&nor->lock); 1333 1352 1334 1353 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1335 1354 for (bank = first; bank <= last; bank++) 1336 1355 rww->used_banks &= ~BIT(bank); 1337 1356 1338 1357 rww->ongoing_pe = false; 1339 - 1340 - mutex_unlock(&nor->lock); 1341 1358 } 1342 1359 1343 1360 static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len) ··· 1372 1393 { 1373 1394 struct spi_nor_rww *rww = &nor->rww; 1374 1395 unsigned int used_banks = 0; 1375 - bool started = false; 1376 1396 u8 first, last; 1377 1397 int bank; 1378 1398 1379 - mutex_lock(&nor->lock); 1399 + guard(mutex)(&nor->lock); 1380 1400 1381 1401 if (rww->ongoing_io || rww->ongoing_rd) 1382 - goto busy; 1402 + return false; 1383 1403 1384 1404 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1385 1405 for (bank = first; bank <= last; bank++) { 1386 1406 if (rww->used_banks & BIT(bank)) 1387 - goto busy; 1407 + return false; 1388 1408 1389 1409 used_banks |= BIT(bank); 1390 1410 } ··· 1391 1413 rww->used_banks |= used_banks; 1392 1414 rww->ongoing_io = true; 1393 1415 rww->ongoing_rd = true; 1394 - started = true; 1395 1416 1396 - busy: 1397 - mutex_unlock(&nor->lock); 1398 - return started; 1417 + return true; 1399 1418 } 1400 1419 1401 1420 static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len) ··· 1401 1426 u8 first, last; 1402 1427 int bank; 1403 1428 1404 - mutex_lock(&nor->lock); 1429 + guard(mutex)(&nor->lock); 1405 1430 1406 1431 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1407 1432 for (bank = first; bank <= last; bank++) ··· 1409 1434 1410 1435 rww->ongoing_io = false; 1411 1436 rww->ongoing_rd = false; 1412 - 1413 - mutex_unlock(&nor->lock); 1414 1437 } 1415 1438 1416 1439 static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
+31
drivers/mtd/spi-nor/macronix.c
··· 45 45 return 0; 46 46 } 47 47 48 + static int 49 + macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor) 50 + { 51 + /* PP_1_1_4_4B is supported but missing in 4BAIT. */ 52 + struct spi_nor_flash_parameter *params = nor->params; 53 + 54 + params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4; 55 + spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4], 56 + SPINOR_OP_PP_1_1_4_4B, SNOR_PROTO_1_1_4); 57 + 58 + return 0; 59 + } 60 + 48 61 static const struct spi_nor_fixups mx25l25635_fixups = { 49 62 .post_bfpt = mx25l25635_post_bfpt_fixups, 63 + .post_sfdp = macronix_qpp4b_post_sfdp_fixups, 64 + }; 65 + 66 + static const struct spi_nor_fixups macronix_qpp4b_fixups = { 67 + .post_sfdp = macronix_qpp4b_post_sfdp_fixups, 50 68 }; 51 69 52 70 static const struct flash_info macronix_nor_parts[] = { ··· 120 102 .size = SZ_64M, 121 103 .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, 122 104 .fixup_flags = SPI_NOR_4B_OPCODES, 105 + .fixups = &macronix_qpp4b_fixups, 123 106 }, { 124 107 .id = SNOR_ID(0xc2, 0x20, 0x1b), 125 108 .name = "mx66l1g45g", 126 109 .size = SZ_128M, 127 110 .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, 111 + .fixups = &macronix_qpp4b_fixups, 112 + }, { 113 + /* MX66L2G45G */ 114 + .id = SNOR_ID(0xc2, 0x20, 0x1c), 115 + .fixups = &macronix_qpp4b_fixups, 128 116 }, { 129 117 .id = SNOR_ID(0xc2, 0x23, 0x14), 130 118 .name = "mx25v8035f", ··· 172 148 .size = SZ_64M, 173 149 .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, 174 150 .fixup_flags = SPI_NOR_4B_OPCODES, 151 + .fixups = &macronix_qpp4b_fixups, 175 152 }, { 176 153 .id = SNOR_ID(0xc2, 0x25, 0x3a), 177 154 .name = "mx66u51235f", 178 155 .size = SZ_64M, 179 156 .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, 180 157 .fixup_flags = SPI_NOR_4B_OPCODES, 158 + .fixups = &macronix_qpp4b_fixups, 159 + }, { 160 + /* MX66U1G45G */ 161 + .id = SNOR_ID(0xc2, 0x25, 0x3b), 162 + .fixups = &macronix_qpp4b_fixups, 181 163 }, { 182 164 .id = SNOR_ID(0xc2, 0x25, 0x3c), 183 165 .name = "mx66u2g45g", 184 166 .size = SZ_256M, 185 167 .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, 186 168 .fixup_flags = SPI_NOR_4B_OPCODES, 169 + .fixups = &macronix_qpp4b_fixups, 187 170 }, { 188 171 .id = SNOR_ID(0xc2, 0x26, 0x18), 189 172 .name = "mx25l12855e",
+1
drivers/mtd/spi-nor/otp.c
··· 6 6 */ 7 7 8 8 #include <linux/log2.h> 9 + #include <linux/math64.h> 9 10 #include <linux/mtd/mtd.h> 10 11 #include <linux/mtd/spi-nor.h> 11 12
+1
drivers/mtd/spi-nor/swp.c
··· 5 5 * Copyright (C) 2005, Intec Automation Inc. 6 6 * Copyright (C) 2014, Freescale Semiconductor, Inc. 7 7 */ 8 + #include <linux/math64.h> 8 9 #include <linux/mtd/mtd.h> 9 10 #include <linux/mtd/spi-nor.h> 10 11
+88
drivers/mtd/spi-nor/winbond.c
··· 10 10 11 11 #define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ 12 12 #define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */ 13 + #define WINBOND_NOR_OP_SELDIE 0xc2 /* Select active die */ 13 14 14 15 #define WINBOND_NOR_WREAR_OP(buf) \ 15 16 SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \ 17 + SPI_MEM_OP_NO_ADDR, \ 18 + SPI_MEM_OP_NO_DUMMY, \ 19 + SPI_MEM_OP_DATA_OUT(1, buf, 0)) 20 + 21 + #define WINBOND_NOR_SELDIE_OP(buf) \ 22 + SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_SELDIE, 0), \ 16 23 SPI_MEM_OP_NO_ADDR, \ 17 24 SPI_MEM_OP_NO_DUMMY, \ 18 25 SPI_MEM_OP_DATA_OUT(1, buf, 0)) ··· 71 64 72 65 static const struct spi_nor_fixups w25q256_fixups = { 73 66 .post_bfpt = w25q256_post_bfpt_fixups, 67 + }; 68 + 69 + /** 70 + * winbond_nor_select_die() - Set active die. 71 + * @nor: pointer to 'struct spi_nor'. 72 + * @die: die to set active. 73 + * 74 + * Certain Winbond chips feature more than a single die. This is mostly hidden 75 + * to the user, except that some chips may experience time deviation when 76 + * modifying the status bits between dies, which in some corner cases may 77 + * produce problematic races. Being able to explicitly select a die to check its 78 + * state in this case may be useful. 79 + * 80 + * Return: 0 on success, -errno otherwise. 81 + */ 82 + static int winbond_nor_select_die(struct spi_nor *nor, u8 die) 83 + { 84 + int ret; 85 + 86 + nor->bouncebuf[0] = die; 87 + 88 + if (nor->spimem) { 89 + struct spi_mem_op op = WINBOND_NOR_SELDIE_OP(nor->bouncebuf); 90 + 91 + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 92 + 93 + ret = spi_mem_exec_op(nor->spimem, &op); 94 + } else { 95 + ret = spi_nor_controller_ops_write_reg(nor, 96 + WINBOND_NOR_OP_SELDIE, 97 + nor->bouncebuf, 1); 98 + } 99 + 100 + if (ret) 101 + dev_dbg(nor->dev, "error %d selecting die %d\n", ret, die); 102 + 103 + return ret; 104 + } 105 + 106 + static int winbond_nor_multi_die_ready(struct spi_nor *nor) 107 + { 108 + int ret, i; 109 + 110 + for (i = 0; i < nor->params->n_dice; i++) { 111 + ret = winbond_nor_select_die(nor, i); 112 + if (ret) 113 + return ret; 114 + 115 + ret = spi_nor_sr_ready(nor); 116 + if (ret <= 0) 117 + return ret; 118 + } 119 + 120 + return 1; 121 + } 122 + 123 + static int 124 + winbond_nor_multi_die_post_sfdp_fixups(struct spi_nor *nor) 125 + { 126 + /* 127 + * SFDP supports dice numbers, but this information is only available in 128 + * optional additional tables which are not provided by these chips. 129 + * Dice number has an impact though, because these devices need extra 130 + * care when reading the busy bit. 131 + */ 132 + nor->params->n_dice = nor->params->size / SZ_64M; 133 + nor->params->ready = winbond_nor_multi_die_ready; 134 + 135 + return 0; 136 + } 137 + 138 + static const struct spi_nor_fixups winbond_nor_multi_die_fixups = { 139 + .post_sfdp = winbond_nor_multi_die_post_sfdp_fixups, 74 140 }; 75 141 76 142 static const struct flash_info winbond_nor_parts[] = { ··· 227 147 .size = SZ_64M, 228 148 .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, 229 149 }, { 150 + /* W25Q01JV */ 151 + .id = SNOR_ID(0xef, 0x40, 0x21), 152 + .fixups = &winbond_nor_multi_die_fixups, 153 + }, { 230 154 .id = SNOR_ID(0xef, 0x50, 0x12), 231 155 .name = "w25q20bw", 232 156 .size = SZ_256K, ··· 305 221 }, { 306 222 .id = SNOR_ID(0xef, 0x70, 0x19), 307 223 .name = "w25q256jvm", 224 + }, { 225 + /* W25Q02JV */ 226 + .id = SNOR_ID(0xef, 0x70, 0x22), 227 + .fixups = &winbond_nor_multi_die_fixups, 308 228 }, { 309 229 .id = SNOR_ID(0xef, 0x71, 0x19), 310 230 .name = "w25m512jv",
+5 -1
include/linux/mtd/nand-qpic-common.h
··· 108 108 #define ECC_FORCE_CLK_OPEN BIT(30) 109 109 110 110 /* NAND_DEV_CMD1 bits */ 111 - #define READ_ADDR 0 111 + #define READ_ADDR_MASK GENMASK(7, 0) 112 112 113 113 /* NAND_DEV_CMD_VLD bits */ 114 114 #define READ_START_VLD BIT(0) ··· 119 119 120 120 /* NAND_EBI2_ECC_BUF_CFG bits */ 121 121 #define NUM_STEPS 0 122 + #define NUM_STEPS_MASK GENMASK(9, 0) 122 123 123 124 /* NAND_ERASED_CW_DETECT_CFG bits */ 124 125 #define ERASED_CW_ECC_MASK 1 ··· 140 139 141 140 /* NAND_READ_LOCATION_n bits */ 142 141 #define READ_LOCATION_OFFSET 0 142 + #define READ_LOCATION_OFFSET_MASK GENMASK(9, 0) 143 143 #define READ_LOCATION_SIZE 16 144 + #define READ_LOCATION_SIZE_MASK GENMASK(25, 16) 144 145 #define READ_LOCATION_LAST 31 146 + #define READ_LOCATION_LAST_MASK BIT(31) 145 147 146 148 /* Version Mask */ 147 149 #define NAND_VERSION_MAJOR_MASK 0xf0000000
+1 -1
include/linux/mtd/nand.h
··· 21 21 * @oobsize: OOB area size 22 22 * @pages_per_eraseblock: number of pages per eraseblock 23 23 * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) 24 - * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN 24 + * @max_bad_eraseblocks_per_lun: maximum number of bad eraseblocks per LUN 25 25 * @planes_per_lun: number of planes per LUN 26 26 * @luns_per_target: number of LUN per target (target is a synonym for die) 27 27 * @ntargets: total number of targets exposed by the NAND device
+127 -2
include/linux/mtd/spinand.h
··· 375 375 }; 376 376 377 377 /** 378 + * struct spinand_otp_layout - structure to describe the SPI NAND OTP area 379 + * @npages: number of pages in the OTP 380 + * @start_page: start page of the user/factory OTP area. 381 + */ 382 + struct spinand_otp_layout { 383 + unsigned int npages; 384 + unsigned int start_page; 385 + }; 386 + 387 + /** 388 + * struct spinand_fact_otp_ops - SPI NAND OTP methods for factory area 389 + * @info: get the OTP area information 390 + * @read: read from the SPI NAND OTP area 391 + */ 392 + struct spinand_fact_otp_ops { 393 + int (*info)(struct spinand_device *spinand, size_t len, 394 + struct otp_info *buf, size_t *retlen); 395 + int (*read)(struct spinand_device *spinand, loff_t from, size_t len, 396 + size_t *retlen, u8 *buf); 397 + }; 398 + 399 + /** 400 + * struct spinand_user_otp_ops - SPI NAND OTP methods for user area 401 + * @info: get the OTP area information 402 + * @lock: lock an OTP region 403 + * @erase: erase an OTP region 404 + * @read: read from the SPI NAND OTP area 405 + * @write: write to the SPI NAND OTP area 406 + */ 407 + struct spinand_user_otp_ops { 408 + int (*info)(struct spinand_device *spinand, size_t len, 409 + struct otp_info *buf, size_t *retlen); 410 + int (*lock)(struct spinand_device *spinand, loff_t from, size_t len); 411 + int (*erase)(struct spinand_device *spinand, loff_t from, size_t len); 412 + int (*read)(struct spinand_device *spinand, loff_t from, size_t len, 413 + size_t *retlen, u8 *buf); 414 + int (*write)(struct spinand_device *spinand, loff_t from, size_t len, 415 + size_t *retlen, const u8 *buf); 416 + }; 417 + 418 + /** 419 + * struct spinand_fact_otp - SPI NAND OTP grouping structure for factory area 420 + * @layout: OTP region layout 421 + * @ops: OTP access ops 422 + */ 423 + struct spinand_fact_otp { 424 + const struct spinand_otp_layout layout; 425 + const struct spinand_fact_otp_ops *ops; 426 + }; 427 + 428 + /** 429 + * struct spinand_user_otp - SPI NAND OTP grouping structure for user area 430 + * @layout: OTP region layout 431 + * @ops: OTP access ops 432 + */ 433 + struct spinand_user_otp { 434 + const struct spinand_otp_layout layout; 435 + const struct spinand_user_otp_ops *ops; 436 + }; 437 + 438 + /** 378 439 * struct spinand_info - Structure used to describe SPI NAND chips 379 440 * @model: model name 380 441 * @devid: device ID ··· 450 389 * @select_target: function used to select a target/die. Required only for 451 390 * multi-die chips 452 391 * @set_cont_read: enable/disable continuous cached reads 392 + * @fact_otp: SPI NAND factory OTP info. 393 + * @user_otp: SPI NAND user OTP info. 394 + * @read_retries: the number of read retry modes supported 395 + * @set_read_retry: enable/disable read retry for data recovery 453 396 * 454 397 * Each SPI NAND manufacturer driver should have a spinand_info table 455 398 * describing all the chips supported by the driver. ··· 474 409 unsigned int target); 475 410 int (*set_cont_read)(struct spinand_device *spinand, 476 411 bool enable); 412 + struct spinand_fact_otp fact_otp; 413 + struct spinand_user_otp user_otp; 414 + unsigned int read_retries; 415 + int (*set_read_retry)(struct spinand_device *spinand, 416 + unsigned int read_retry); 477 417 }; 478 418 479 419 #define SPINAND_ID(__method, ...) \ ··· 502 432 } 503 433 504 434 #define SPINAND_SELECT_TARGET(__func) \ 505 - .select_target = __func, 435 + .select_target = __func 506 436 507 437 #define SPINAND_CONT_READ(__set_cont_read) \ 508 - .set_cont_read = __set_cont_read, 438 + .set_cont_read = __set_cont_read 439 + 440 + #define SPINAND_FACT_OTP_INFO(__npages, __start_page, __ops) \ 441 + .fact_otp = { \ 442 + .layout = { \ 443 + .npages = __npages, \ 444 + .start_page = __start_page, \ 445 + }, \ 446 + .ops = __ops, \ 447 + } 448 + 449 + #define SPINAND_USER_OTP_INFO(__npages, __start_page, __ops) \ 450 + .user_otp = { \ 451 + .layout = { \ 452 + .npages = __npages, \ 453 + .start_page = __start_page, \ 454 + }, \ 455 + .ops = __ops, \ 456 + } 457 + 458 + #define SPINAND_READ_RETRY(__read_retries, __set_read_retry) \ 459 + .read_retries = __read_retries, \ 460 + .set_read_retry = __set_read_retry 509 461 510 462 #define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \ 511 463 __flags, ...) \ ··· 579 487 * actually relevant to enable this feature. 580 488 * @set_cont_read: Enable/disable the continuous read feature 581 489 * @priv: manufacturer private data 490 + * @fact_otp: SPI NAND factory OTP info. 491 + * @user_otp: SPI NAND user OTP info. 492 + * @read_retries: the number of read retry modes supported 493 + * @set_read_retry: Enable/disable the read retry feature 582 494 */ 583 495 struct spinand_device { 584 496 struct nand_device base; ··· 615 519 bool cont_read_possible; 616 520 int (*set_cont_read)(struct spinand_device *spinand, 617 521 bool enable); 522 + 523 + const struct spinand_fact_otp *fact_otp; 524 + const struct spinand_user_otp *user_otp; 525 + 526 + unsigned int read_retries; 527 + int (*set_read_retry)(struct spinand_device *spinand, 528 + unsigned int retry_mode); 618 529 }; 619 530 620 531 /** ··· 690 587 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); 691 588 int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val); 692 589 int spinand_select_target(struct spinand_device *spinand, unsigned int target); 590 + 591 + int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us, 592 + unsigned long poll_delay_us, u8 *s); 593 + 594 + int spinand_read_page(struct spinand_device *spinand, 595 + const struct nand_page_io_req *req); 596 + 597 + int spinand_write_page(struct spinand_device *spinand, 598 + const struct nand_page_io_req *req); 599 + 600 + size_t spinand_otp_page_size(struct spinand_device *spinand); 601 + size_t spinand_fact_otp_size(struct spinand_device *spinand); 602 + size_t spinand_user_otp_size(struct spinand_device *spinand); 603 + 604 + int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs, 605 + size_t len, size_t *retlen, u8 *buf); 606 + int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs, 607 + size_t len, size_t *retlen, u8 *buf); 608 + int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs, 609 + size_t len, size_t *retlen, const u8 *buf); 610 + 611 + int spinand_set_mtd_otp_ops(struct spinand_device *spinand); 693 612 694 613 #endif /* __LINUX_MTD_SPINAND_H */