Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'spi-nor/for-5.9' of https://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux into mtd/next

SPI NOR core changes:
- Disable Quad Mode in spi_nor_restore().
- Don't abort BFPT parsing when QER reserved value is used.
- Add support/update capabilities for few flashes.
- Drop s70fl01gs flash: it does not support RDSR(05h) which
is critical for erase/write.
- Merge the SPIMEM DTR bits in spi-nor/next to avoid conflicts
during the release cycle.

SPI NOR controller drivers changes:
- Move the cadence-quadspi driver to spi-mem. The series was
taken through the SPI tree. Merge it also in spi-nor/next
to avoid conflicts during the release cycle.
- intel-spi:
- Add new PCI IDs.
- Ignore the Write Disable command, the controller doesn't
support it.
- Fix performance regression.

+348 -400
-11
drivers/mtd/spi-nor/controllers/Kconfig
··· 9 9 and support for the SPI flash memory controller (SPI) for 10 10 the host firmware. The implementation only supports SPI NOR. 11 11 12 - config SPI_CADENCE_QUADSPI 13 - tristate "Cadence Quad SPI controller" 14 - depends on OF && (ARM || ARM64 || COMPILE_TEST) 15 - help 16 - Enable support for the Cadence Quad SPI Flash controller. 17 - 18 - Cadence QSPI is a specialized controller for connecting an SPI 19 - Flash over 1/2/4-bit wide bus. Enable this option if you have a 20 - device with a Cadence QSPI controller and want to access the 21 - Flash as an MTD device. 22 - 23 12 config SPI_HISI_SFC 24 13 tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)" 25 14 depends on ARCH_HISI || COMPILE_TEST
-1
drivers/mtd/spi-nor/controllers/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o 3 - obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o 4 3 obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o 5 4 obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o 6 5 obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
+220 -341
drivers/mtd/spi-nor/controllers/cadence-quadspi.c drivers/spi/spi-cadence-quadspi.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Driver for Cadence QSPI Controller 4 - * 5 - * Copyright Altera Corporation (C) 2012-2014. All rights reserved. 6 - */ 2 + // 3 + // Driver for Cadence QSPI Controller 4 + // 5 + // Copyright Altera Corporation (C) 2012-2014. All rights reserved. 6 + // Copyright Intel Corporation (C) 2019-2020. All rights reserved. 7 + // Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com 8 + 7 9 #include <linux/clk.h> 8 10 #include <linux/completion.h> 9 11 #include <linux/delay.h> ··· 19 17 #include <linux/jiffies.h> 20 18 #include <linux/kernel.h> 21 19 #include <linux/module.h> 22 - #include <linux/mtd/mtd.h> 23 - #include <linux/mtd/partitions.h> 24 - #include <linux/mtd/spi-nor.h> 25 20 #include <linux/of_device.h> 26 21 #include <linux/of.h> 27 22 #include <linux/platform_device.h> ··· 26 27 #include <linux/reset.h> 27 28 #include <linux/sched.h> 28 29 #include <linux/spi/spi.h> 30 + #include <linux/spi/spi-mem.h> 29 31 #include <linux/timer.h> 30 32 31 33 #define CQSPI_NAME "cadence-qspi" ··· 34 34 35 35 /* Quirks */ 36 36 #define CQSPI_NEEDS_WR_DELAY BIT(0) 37 + #define CQSPI_DISABLE_DAC_MODE BIT(1) 37 38 38 - /* Capabilities mask */ 39 - #define CQSPI_BASE_HWCAPS_MASK \ 40 - (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST | \ 41 - SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 | \ 42 - SNOR_HWCAPS_PP) 39 + /* Capabilities */ 40 + #define CQSPI_SUPPORTS_OCTAL BIT(0) 43 41 44 42 struct cqspi_st; 45 43 46 44 struct cqspi_flash_pdata { 47 - struct spi_nor nor; 48 45 struct cqspi_st *cqspi; 49 46 u32 clk_rate; 50 47 u32 read_delay; ··· 53 56 u8 addr_width; 54 57 u8 data_width; 55 58 u8 cs; 56 - bool registered; 57 - bool use_direct_mode; 58 59 }; 59 60 60 61 struct cqspi_st { ··· 65 70 void __iomem *ahb_base; 66 71 resource_size_t ahb_size; 67 72 struct completion transfer_complete; 68 - struct mutex bus_mutex; 69 73 70 74 struct dma_chan *rx_chan; 71 75 struct completion rx_dma_complete; 72 76 dma_addr_t mmap_phys_base; 73 77 74 78 int current_cs; 75 - int current_page_size; 76 - int current_erase_size; 77 - int current_addr_width; 78 79 unsigned long master_ref_clk_hz; 79 80 bool is_decoded_cs; 80 81 u32 fifo_depth; ··· 78 87 bool rclk_en; 79 88 u32 trigger_address; 80 89 u32 wr_delay; 90 + bool use_direct_mode; 81 91 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; 82 92 }; 83 93 ··· 277 285 return IRQ_HANDLED; 278 286 } 279 287 280 - static unsigned int cqspi_calc_rdreg(struct spi_nor *nor) 288 + static unsigned int cqspi_calc_rdreg(struct cqspi_flash_pdata *f_pdata) 281 289 { 282 - struct cqspi_flash_pdata *f_pdata = nor->priv; 283 290 u32 rdreg = 0; 284 291 285 292 rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; ··· 345 354 return cqspi_wait_idle(cqspi); 346 355 } 347 356 348 - static int cqspi_command_read(struct spi_nor *nor, u8 opcode, 349 - u8 *rxbuf, size_t n_rx) 357 + static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata, 358 + const struct spi_mem_op *op) 350 359 { 351 - struct cqspi_flash_pdata *f_pdata = nor->priv; 352 360 struct cqspi_st *cqspi = f_pdata->cqspi; 353 361 void __iomem *reg_base = cqspi->iobase; 362 + u8 *rxbuf = op->data.buf.in; 363 + u8 opcode = op->cmd.opcode; 364 + size_t n_rx = op->data.nbytes; 354 365 unsigned int rdreg; 355 366 unsigned int reg; 356 367 size_t read_len; 357 368 int status; 358 369 359 370 if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { 360 - dev_err(nor->dev, 371 + dev_err(&cqspi->pdev->dev, 361 372 "Invalid input argument, len %zu rxbuf 0x%p\n", 362 373 n_rx, rxbuf); 363 374 return -EINVAL; ··· 367 374 368 375 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 369 376 370 - rdreg = cqspi_calc_rdreg(nor); 377 + rdreg = cqspi_calc_rdreg(f_pdata); 371 378 writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); 372 379 373 380 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); ··· 396 403 return 0; 397 404 } 398 405 399 - static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, 400 - const u8 *txbuf, size_t n_tx) 406 + static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata, 407 + const struct spi_mem_op *op) 401 408 { 402 - struct cqspi_flash_pdata *f_pdata = nor->priv; 403 409 struct cqspi_st *cqspi = f_pdata->cqspi; 404 410 void __iomem *reg_base = cqspi->iobase; 411 + const u8 opcode = op->cmd.opcode; 412 + const u8 *txbuf = op->data.buf.out; 413 + size_t n_tx = op->data.nbytes; 405 414 unsigned int reg; 406 415 unsigned int data; 407 416 size_t write_len; 408 - int ret; 409 417 410 418 if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { 411 - dev_err(nor->dev, 419 + dev_err(&cqspi->pdev->dev, 412 420 "Invalid input argument, cmdlen %zu txbuf 0x%p\n", 413 421 n_tx, txbuf); 414 422 return -EINVAL; 415 423 } 416 424 417 425 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 426 + 427 + if (op->addr.nbytes) { 428 + reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 429 + reg |= ((op->addr.nbytes - 1) & 430 + CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 431 + << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 432 + 433 + writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 434 + } 435 + 418 436 if (n_tx) { 419 437 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB); 420 438 reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) ··· 443 439 writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER); 444 440 } 445 441 } 446 - ret = cqspi_exec_flash_cmd(cqspi, reg); 447 - return ret; 448 - } 449 - 450 - static int cqspi_command_write_addr(struct spi_nor *nor, 451 - const u8 opcode, const unsigned int addr) 452 - { 453 - struct cqspi_flash_pdata *f_pdata = nor->priv; 454 - struct cqspi_st *cqspi = f_pdata->cqspi; 455 - void __iomem *reg_base = cqspi->iobase; 456 - unsigned int reg; 457 - 458 - reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 459 - reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 460 - reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 461 - << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 462 - 463 - writel(addr, reg_base + CQSPI_REG_CMDADDRESS); 464 442 465 443 return cqspi_exec_flash_cmd(cqspi, reg); 466 444 } 467 445 468 - static int cqspi_read_setup(struct spi_nor *nor) 446 + static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, 447 + const struct spi_mem_op *op) 469 448 { 470 - struct cqspi_flash_pdata *f_pdata = nor->priv; 471 449 struct cqspi_st *cqspi = f_pdata->cqspi; 472 450 void __iomem *reg_base = cqspi->iobase; 473 451 unsigned int dummy_clk = 0; 474 452 unsigned int reg; 475 453 476 - reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; 477 - reg |= cqspi_calc_rdreg(nor); 454 + reg = op->cmd.opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; 455 + reg |= cqspi_calc_rdreg(f_pdata); 478 456 479 457 /* Setup dummy clock cycles */ 480 - dummy_clk = nor->read_dummy; 458 + dummy_clk = op->dummy.nbytes * 8; 481 459 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 482 460 dummy_clk = CQSPI_DUMMY_CLKS_MAX; 483 461 484 - if (dummy_clk / 8) { 485 - reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB); 486 - /* Set mode bits high to ensure chip doesn't enter XIP */ 487 - writel(0xFF, reg_base + CQSPI_REG_MODE_BIT); 488 - 489 - /* Need to subtract the mode byte (8 clocks). */ 490 - if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD) 491 - dummy_clk -= 8; 492 - 493 - if (dummy_clk) 494 - reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) 495 - << CQSPI_REG_RD_INSTR_DUMMY_LSB; 496 - } 462 + if (dummy_clk) 463 + reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) 464 + << CQSPI_REG_RD_INSTR_DUMMY_LSB; 497 465 498 466 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 499 467 500 468 /* Set address width */ 501 469 reg = readl(reg_base + CQSPI_REG_SIZE); 502 470 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 503 - reg |= (nor->addr_width - 1); 471 + reg |= (op->addr.nbytes - 1); 504 472 writel(reg, reg_base + CQSPI_REG_SIZE); 505 473 return 0; 506 474 } 507 475 508 - static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf, 509 - loff_t from_addr, const size_t n_rx) 476 + static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, 477 + u8 *rxbuf, loff_t from_addr, 478 + const size_t n_rx) 510 479 { 511 - struct cqspi_flash_pdata *f_pdata = nor->priv; 512 480 struct cqspi_st *cqspi = f_pdata->cqspi; 481 + struct device *dev = &cqspi->pdev->dev; 513 482 void __iomem *reg_base = cqspi->iobase; 514 483 void __iomem *ahb_base = cqspi->ahb_base; 515 484 unsigned int remaining = n_rx; ··· 505 528 506 529 while (remaining > 0) { 507 530 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 508 - msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) 531 + msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) 509 532 ret = -ETIMEDOUT; 510 533 511 534 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 512 535 513 536 if (ret && bytes_to_read == 0) { 514 - dev_err(nor->dev, "Indirect read timeout, no bytes\n"); 537 + dev_err(dev, "Indirect read timeout, no bytes\n"); 515 538 goto failrd; 516 539 } 517 540 ··· 547 570 ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD, 548 571 CQSPI_REG_INDIRECTRD_DONE_MASK, 0); 549 572 if (ret) { 550 - dev_err(nor->dev, 551 - "Indirect read completion error (%i)\n", ret); 573 + dev_err(dev, "Indirect read completion error (%i)\n", ret); 552 574 goto failrd; 553 575 } 554 576 ··· 569 593 return ret; 570 594 } 571 595 572 - static int cqspi_write_setup(struct spi_nor *nor) 596 + static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, 597 + const struct spi_mem_op *op) 573 598 { 574 599 unsigned int reg; 575 - struct cqspi_flash_pdata *f_pdata = nor->priv; 576 600 struct cqspi_st *cqspi = f_pdata->cqspi; 577 601 void __iomem *reg_base = cqspi->iobase; 578 602 579 603 /* Set opcode. */ 580 - reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; 604 + reg = op->cmd.opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; 581 605 writel(reg, reg_base + CQSPI_REG_WR_INSTR); 582 - reg = cqspi_calc_rdreg(nor); 606 + reg = cqspi_calc_rdreg(f_pdata); 583 607 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 584 608 585 609 reg = readl(reg_base + CQSPI_REG_SIZE); 586 610 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 587 - reg |= (nor->addr_width - 1); 611 + reg |= (op->addr.nbytes - 1); 588 612 writel(reg, reg_base + CQSPI_REG_SIZE); 589 613 return 0; 590 614 } 591 615 592 - static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr, 593 - const u8 *txbuf, const size_t n_tx) 616 + static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, 617 + loff_t to_addr, const u8 *txbuf, 618 + const size_t n_tx) 594 619 { 595 - const unsigned int page_size = nor->page_size; 596 - struct cqspi_flash_pdata *f_pdata = nor->priv; 597 620 struct cqspi_st *cqspi = f_pdata->cqspi; 621 + struct device *dev = &cqspi->pdev->dev; 598 622 void __iomem *reg_base = cqspi->iobase; 599 623 unsigned int remaining = n_tx; 600 624 unsigned int write_bytes; ··· 624 648 while (remaining > 0) { 625 649 size_t write_words, mod_bytes; 626 650 627 - write_bytes = remaining > page_size ? page_size : remaining; 651 + write_bytes = remaining; 628 652 write_words = write_bytes / 4; 629 653 mod_bytes = write_bytes % 4; 630 654 /* Write 4 bytes at a time then single bytes. */ ··· 641 665 } 642 666 643 667 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 644 - msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { 645 - dev_err(nor->dev, "Indirect write timeout\n"); 668 + msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { 669 + dev_err(dev, "Indirect write timeout\n"); 646 670 ret = -ETIMEDOUT; 647 671 goto failwr; 648 672 } ··· 657 681 ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR, 658 682 CQSPI_REG_INDIRECTWR_DONE_MASK, 0); 659 683 if (ret) { 660 - dev_err(nor->dev, 661 - "Indirect write completion error (%i)\n", ret); 684 + dev_err(dev, "Indirect write completion error (%i)\n", ret); 662 685 goto failwr; 663 686 } 664 687 ··· 681 706 return ret; 682 707 } 683 708 684 - static void cqspi_chipselect(struct spi_nor *nor) 709 + static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata) 685 710 { 686 - struct cqspi_flash_pdata *f_pdata = nor->priv; 687 711 struct cqspi_st *cqspi = f_pdata->cqspi; 688 712 void __iomem *reg_base = cqspi->iobase; 689 713 unsigned int chip_select = f_pdata->cs; ··· 710 736 writel(reg, reg_base + CQSPI_REG_CONFIG); 711 737 } 712 738 713 - static void cqspi_configure_cs_and_sizes(struct spi_nor *nor) 714 - { 715 - struct cqspi_flash_pdata *f_pdata = nor->priv; 716 - struct cqspi_st *cqspi = f_pdata->cqspi; 717 - void __iomem *iobase = cqspi->iobase; 718 - unsigned int reg; 719 - 720 - /* configure page size and block size. */ 721 - reg = readl(iobase + CQSPI_REG_SIZE); 722 - reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB); 723 - reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB); 724 - reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 725 - reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB); 726 - reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB); 727 - reg |= (nor->addr_width - 1); 728 - writel(reg, iobase + CQSPI_REG_SIZE); 729 - 730 - /* configure the chip select */ 731 - cqspi_chipselect(nor); 732 - 733 - /* Store the new configuration of the controller */ 734 - cqspi->current_page_size = nor->page_size; 735 - cqspi->current_erase_size = nor->mtd.erasesize; 736 - cqspi->current_addr_width = nor->addr_width; 737 - } 738 - 739 739 static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, 740 740 const unsigned int ns_val) 741 741 { ··· 721 773 return ticks; 722 774 } 723 775 724 - static void cqspi_delay(struct spi_nor *nor) 776 + static void cqspi_delay(struct cqspi_flash_pdata *f_pdata) 725 777 { 726 - struct cqspi_flash_pdata *f_pdata = nor->priv; 727 778 struct cqspi_st *cqspi = f_pdata->cqspi; 728 779 void __iomem *iobase = cqspi->iobase; 729 780 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; ··· 806 859 writel(reg, reg_base + CQSPI_REG_CONFIG); 807 860 } 808 861 809 - static void cqspi_configure(struct spi_nor *nor) 862 + static void cqspi_configure(struct cqspi_flash_pdata *f_pdata, 863 + unsigned long sclk) 810 864 { 811 - struct cqspi_flash_pdata *f_pdata = nor->priv; 812 865 struct cqspi_st *cqspi = f_pdata->cqspi; 813 - const unsigned int sclk = f_pdata->clk_rate; 814 866 int switch_cs = (cqspi->current_cs != f_pdata->cs); 815 867 int switch_ck = (cqspi->sclk != sclk); 816 - 817 - if ((cqspi->current_page_size != nor->page_size) || 818 - (cqspi->current_erase_size != nor->mtd.erasesize) || 819 - (cqspi->current_addr_width != nor->addr_width)) 820 - switch_cs = 1; 821 868 822 869 if (switch_cs || switch_ck) 823 870 cqspi_controller_enable(cqspi, 0); ··· 819 878 /* Switch chip select. */ 820 879 if (switch_cs) { 821 880 cqspi->current_cs = f_pdata->cs; 822 - cqspi_configure_cs_and_sizes(nor); 881 + cqspi_chipselect(f_pdata); 823 882 } 824 883 825 884 /* Setup baudrate divisor and delays */ 826 885 if (switch_ck) { 827 886 cqspi->sclk = sclk; 828 887 cqspi_config_baudrate_div(cqspi); 829 - cqspi_delay(nor); 888 + cqspi_delay(f_pdata); 830 889 cqspi_readdata_capture(cqspi, !cqspi->rclk_en, 831 890 f_pdata->read_delay); 832 891 } ··· 835 894 cqspi_controller_enable(cqspi, 1); 836 895 } 837 896 838 - static int cqspi_set_protocol(struct spi_nor *nor, const int read) 897 + static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, 898 + const struct spi_mem_op *op) 839 899 { 840 - struct cqspi_flash_pdata *f_pdata = nor->priv; 841 - 842 900 f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE; 843 901 f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE; 844 902 f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; 845 903 846 - if (read) { 847 - switch (nor->read_proto) { 848 - case SNOR_PROTO_1_1_1: 904 + if (op->data.dir == SPI_MEM_DATA_IN) { 905 + switch (op->data.buswidth) { 906 + case 1: 849 907 f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; 850 908 break; 851 - case SNOR_PROTO_1_1_2: 909 + case 2: 852 910 f_pdata->data_width = CQSPI_INST_TYPE_DUAL; 853 911 break; 854 - case SNOR_PROTO_1_1_4: 912 + case 4: 855 913 f_pdata->data_width = CQSPI_INST_TYPE_QUAD; 856 914 break; 857 - case SNOR_PROTO_1_1_8: 915 + case 8: 858 916 f_pdata->data_width = CQSPI_INST_TYPE_OCTAL; 859 917 break; 860 918 default: ··· 861 921 } 862 922 } 863 923 864 - cqspi_configure(nor); 865 - 866 924 return 0; 867 925 } 868 926 869 - static ssize_t cqspi_write(struct spi_nor *nor, loff_t to, 870 - size_t len, const u_char *buf) 927 + static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata, 928 + const struct spi_mem_op *op) 871 929 { 872 - struct cqspi_flash_pdata *f_pdata = nor->priv; 873 930 struct cqspi_st *cqspi = f_pdata->cqspi; 931 + loff_t to = op->addr.val; 932 + size_t len = op->data.nbytes; 933 + const u_char *buf = op->data.buf.out; 874 934 int ret; 875 935 876 - ret = cqspi_set_protocol(nor, 0); 936 + ret = cqspi_set_protocol(f_pdata, op); 877 937 if (ret) 878 938 return ret; 879 939 880 - ret = cqspi_write_setup(nor); 940 + ret = cqspi_write_setup(f_pdata, op); 881 941 if (ret) 882 942 return ret; 883 943 884 - if (f_pdata->use_direct_mode) { 944 + if (cqspi->use_direct_mode && ((to + len) <= cqspi->ahb_size)) { 885 945 memcpy_toio(cqspi->ahb_base + to, buf, len); 886 - ret = cqspi_wait_idle(cqspi); 887 - } else { 888 - ret = cqspi_indirect_write_execute(nor, to, buf, len); 946 + return cqspi_wait_idle(cqspi); 889 947 } 890 - if (ret) 891 - return ret; 892 948 893 - return len; 949 + return cqspi_indirect_write_execute(f_pdata, to, buf, len); 894 950 } 895 951 896 952 static void cqspi_rx_dma_callback(void *param) ··· 896 960 complete(&cqspi->rx_dma_complete); 897 961 } 898 962 899 - static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf, 900 - loff_t from, size_t len) 963 + static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata, 964 + u_char *buf, loff_t from, size_t len) 901 965 { 902 - struct cqspi_flash_pdata *f_pdata = nor->priv; 903 966 struct cqspi_st *cqspi = f_pdata->cqspi; 967 + struct device *dev = &cqspi->pdev->dev; 904 968 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 905 969 dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from; 906 970 int ret = 0; ··· 913 977 return 0; 914 978 } 915 979 916 - dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE); 917 - if (dma_mapping_error(nor->dev, dma_dst)) { 918 - dev_err(nor->dev, "dma mapping failed\n"); 980 + dma_dst = dma_map_single(dev, buf, len, DMA_FROM_DEVICE); 981 + if (dma_mapping_error(dev, dma_dst)) { 982 + dev_err(dev, "dma mapping failed\n"); 919 983 return -ENOMEM; 920 984 } 921 985 tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src, 922 986 len, flags); 923 987 if (!tx) { 924 - dev_err(nor->dev, "device_prep_dma_memcpy error\n"); 988 + dev_err(dev, "device_prep_dma_memcpy error\n"); 925 989 ret = -EIO; 926 990 goto err_unmap; 927 991 } ··· 933 997 934 998 ret = dma_submit_error(cookie); 935 999 if (ret) { 936 - dev_err(nor->dev, "dma_submit_error %d\n", cookie); 1000 + dev_err(dev, "dma_submit_error %d\n", cookie); 937 1001 ret = -EIO; 938 1002 goto err_unmap; 939 1003 } ··· 942 1006 if (!wait_for_completion_timeout(&cqspi->rx_dma_complete, 943 1007 msecs_to_jiffies(len))) { 944 1008 dmaengine_terminate_sync(cqspi->rx_chan); 945 - dev_err(nor->dev, "DMA wait_for_completion_timeout\n"); 1009 + dev_err(dev, "DMA wait_for_completion_timeout\n"); 946 1010 ret = -ETIMEDOUT; 947 1011 goto err_unmap; 948 1012 } 949 1013 950 1014 err_unmap: 951 - dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE); 1015 + dma_unmap_single(dev, dma_dst, len, DMA_FROM_DEVICE); 952 1016 953 1017 return ret; 954 1018 } 955 1019 956 - static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, 957 - size_t len, u_char *buf) 1020 + static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata, 1021 + const struct spi_mem_op *op) 958 1022 { 959 - struct cqspi_flash_pdata *f_pdata = nor->priv; 960 - int ret; 961 - 962 - ret = cqspi_set_protocol(nor, 1); 963 - if (ret) 964 - return ret; 965 - 966 - ret = cqspi_read_setup(nor); 967 - if (ret) 968 - return ret; 969 - 970 - if (f_pdata->use_direct_mode) 971 - ret = cqspi_direct_read_execute(nor, buf, from, len); 972 - else 973 - ret = cqspi_indirect_read_execute(nor, buf, from, len); 974 - if (ret) 975 - return ret; 976 - 977 - return len; 978 - } 979 - 980 - static int cqspi_erase(struct spi_nor *nor, loff_t offs) 981 - { 982 - int ret; 983 - 984 - ret = cqspi_set_protocol(nor, 0); 985 - if (ret) 986 - return ret; 987 - 988 - /* Send write enable, then erase commands. */ 989 - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, NULL, 0); 990 - if (ret) 991 - return ret; 992 - 993 - /* Set up command buffer. */ 994 - ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs); 995 - if (ret) 996 - return ret; 997 - 998 - return 0; 999 - } 1000 - 1001 - static int cqspi_prep(struct spi_nor *nor) 1002 - { 1003 - struct cqspi_flash_pdata *f_pdata = nor->priv; 1004 1023 struct cqspi_st *cqspi = f_pdata->cqspi; 1024 + loff_t from = op->addr.val; 1025 + size_t len = op->data.nbytes; 1026 + u_char *buf = op->data.buf.in; 1027 + int ret; 1005 1028 1006 - mutex_lock(&cqspi->bus_mutex); 1029 + ret = cqspi_set_protocol(f_pdata, op); 1030 + if (ret) 1031 + return ret; 1007 1032 1008 - return 0; 1033 + ret = cqspi_read_setup(f_pdata, op); 1034 + if (ret) 1035 + return ret; 1036 + 1037 + if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) 1038 + return cqspi_direct_read_execute(f_pdata, buf, from, len); 1039 + 1040 + return cqspi_indirect_read_execute(f_pdata, buf, from, len); 1009 1041 } 1010 1042 1011 - static void cqspi_unprep(struct spi_nor *nor) 1043 + static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) 1012 1044 { 1013 - struct cqspi_flash_pdata *f_pdata = nor->priv; 1014 - struct cqspi_st *cqspi = f_pdata->cqspi; 1045 + struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master); 1046 + struct cqspi_flash_pdata *f_pdata; 1015 1047 1016 - mutex_unlock(&cqspi->bus_mutex); 1048 + f_pdata = &cqspi->f_pdata[mem->spi->chip_select]; 1049 + cqspi_configure(f_pdata, mem->spi->max_speed_hz); 1050 + 1051 + if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) { 1052 + if (!op->addr.nbytes) 1053 + return cqspi_command_read(f_pdata, op); 1054 + 1055 + return cqspi_read(f_pdata, op); 1056 + } 1057 + 1058 + if (!op->addr.nbytes || !op->data.buf.out) 1059 + return cqspi_command_write(f_pdata, op); 1060 + 1061 + return cqspi_write(f_pdata, op); 1017 1062 } 1018 1063 1019 - static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len) 1064 + static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) 1020 1065 { 1021 1066 int ret; 1022 1067 1023 - ret = cqspi_set_protocol(nor, 0); 1024 - if (!ret) 1025 - ret = cqspi_command_read(nor, opcode, buf, len); 1026 - 1027 - return ret; 1028 - } 1029 - 1030 - static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, 1031 - size_t len) 1032 - { 1033 - int ret; 1034 - 1035 - ret = cqspi_set_protocol(nor, 0); 1036 - if (!ret) 1037 - ret = cqspi_command_write(nor, opcode, buf, len); 1068 + ret = cqspi_mem_process(mem, op); 1069 + if (ret) 1070 + dev_err(&mem->spi->dev, "operation failed with %d\n", ret); 1038 1071 1039 1072 return ret; 1040 1073 } ··· 1045 1140 return 0; 1046 1141 } 1047 1142 1048 - static int cqspi_of_get_pdata(struct platform_device *pdev) 1143 + static int cqspi_of_get_pdata(struct cqspi_st *cqspi) 1049 1144 { 1050 - struct device_node *np = pdev->dev.of_node; 1051 - struct cqspi_st *cqspi = platform_get_drvdata(pdev); 1145 + struct device *dev = &cqspi->pdev->dev; 1146 + struct device_node *np = dev->of_node; 1052 1147 1053 1148 cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); 1054 1149 1055 1150 if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { 1056 - dev_err(&pdev->dev, "couldn't determine fifo-depth\n"); 1151 + dev_err(dev, "couldn't determine fifo-depth\n"); 1057 1152 return -ENXIO; 1058 1153 } 1059 1154 1060 1155 if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { 1061 - dev_err(&pdev->dev, "couldn't determine fifo-width\n"); 1156 + dev_err(dev, "couldn't determine fifo-width\n"); 1062 1157 return -ENXIO; 1063 1158 } 1064 1159 1065 1160 if (of_property_read_u32(np, "cdns,trigger-address", 1066 1161 &cqspi->trigger_address)) { 1067 - dev_err(&pdev->dev, "couldn't determine trigger-address\n"); 1162 + dev_err(dev, "couldn't determine trigger-address\n"); 1068 1163 return -ENXIO; 1069 1164 } 1070 1165 ··· 1107 1202 cqspi_controller_enable(cqspi, 1); 1108 1203 } 1109 1204 1110 - static void cqspi_request_mmap_dma(struct cqspi_st *cqspi) 1205 + static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) 1111 1206 { 1112 1207 dma_cap_mask_t mask; 1113 1208 ··· 1116 1211 1117 1212 cqspi->rx_chan = dma_request_chan_by_mask(&mask); 1118 1213 if (IS_ERR(cqspi->rx_chan)) { 1119 - dev_err(&cqspi->pdev->dev, "No Rx DMA available\n"); 1214 + int ret = PTR_ERR(cqspi->rx_chan); 1215 + 1216 + if (ret != -EPROBE_DEFER) 1217 + dev_err(&cqspi->pdev->dev, "No Rx DMA available\n"); 1120 1218 cqspi->rx_chan = NULL; 1219 + return ret; 1121 1220 } 1122 1221 init_completion(&cqspi->rx_dma_complete); 1222 + 1223 + return 0; 1123 1224 } 1124 1225 1125 - static const struct spi_nor_controller_ops cqspi_controller_ops = { 1126 - .prepare = cqspi_prep, 1127 - .unprepare = cqspi_unprep, 1128 - .read_reg = cqspi_read_reg, 1129 - .write_reg = cqspi_write_reg, 1130 - .read = cqspi_read, 1131 - .write = cqspi_write, 1132 - .erase = cqspi_erase, 1226 + static const struct spi_controller_mem_ops cqspi_mem_ops = { 1227 + .exec_op = cqspi_exec_mem_op, 1133 1228 }; 1134 1229 1135 - static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) 1230 + static int cqspi_setup_flash(struct cqspi_st *cqspi) 1136 1231 { 1137 1232 struct platform_device *pdev = cqspi->pdev; 1138 1233 struct device *dev = &pdev->dev; 1139 - const struct cqspi_driver_platdata *ddata; 1140 - struct spi_nor_hwcaps hwcaps; 1234 + struct device_node *np = dev->of_node; 1141 1235 struct cqspi_flash_pdata *f_pdata; 1142 - struct spi_nor *nor; 1143 - struct mtd_info *mtd; 1144 1236 unsigned int cs; 1145 - int i, ret; 1146 - 1147 - ddata = of_device_get_match_data(dev); 1148 - if (!ddata) { 1149 - dev_err(dev, "Couldn't find driver data\n"); 1150 - return -EINVAL; 1151 - } 1152 - hwcaps.mask = ddata->hwcaps_mask; 1237 + int ret; 1153 1238 1154 1239 /* Get flash device data */ 1155 1240 for_each_available_child_of_node(dev->of_node, np) { 1156 1241 ret = of_property_read_u32(np, "reg", &cs); 1157 1242 if (ret) { 1158 1243 dev_err(dev, "Couldn't determine chip select.\n"); 1159 - goto err; 1244 + return ret; 1160 1245 } 1161 1246 1162 1247 if (cs >= CQSPI_MAX_CHIPSELECT) { 1163 - ret = -EINVAL; 1164 1248 dev_err(dev, "Chip select %d out of range.\n", cs); 1165 - goto err; 1249 + return -EINVAL; 1166 1250 } 1167 1251 1168 1252 f_pdata = &cqspi->f_pdata[cs]; ··· 1160 1266 1161 1267 ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); 1162 1268 if (ret) 1163 - goto err; 1164 - 1165 - nor = &f_pdata->nor; 1166 - mtd = &nor->mtd; 1167 - 1168 - mtd->priv = nor; 1169 - 1170 - nor->dev = dev; 1171 - spi_nor_set_flash_node(nor, np); 1172 - nor->priv = f_pdata; 1173 - nor->controller_ops = &cqspi_controller_ops; 1174 - 1175 - mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", 1176 - dev_name(dev), cs); 1177 - if (!mtd->name) { 1178 - ret = -ENOMEM; 1179 - goto err; 1180 - } 1181 - 1182 - ret = spi_nor_scan(nor, NULL, &hwcaps); 1183 - if (ret) 1184 - goto err; 1185 - 1186 - ret = mtd_device_register(mtd, NULL, 0); 1187 - if (ret) 1188 - goto err; 1189 - 1190 - f_pdata->registered = true; 1191 - 1192 - if (mtd->size <= cqspi->ahb_size) { 1193 - f_pdata->use_direct_mode = true; 1194 - dev_dbg(nor->dev, "using direct mode for %s\n", 1195 - mtd->name); 1196 - 1197 - if (!cqspi->rx_chan) 1198 - cqspi_request_mmap_dma(cqspi); 1199 - } 1269 + return ret; 1200 1270 } 1201 1271 1202 1272 return 0; 1203 - 1204 - err: 1205 - for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) 1206 - if (cqspi->f_pdata[i].registered) 1207 - mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); 1208 - return ret; 1209 1273 } 1210 1274 1211 1275 static int cqspi_probe(struct platform_device *pdev) 1212 1276 { 1213 - struct device_node *np = pdev->dev.of_node; 1277 + const struct cqspi_driver_platdata *ddata; 1278 + struct reset_control *rstc, *rstc_ocp; 1214 1279 struct device *dev = &pdev->dev; 1280 + struct spi_master *master; 1281 + struct resource *res_ahb; 1215 1282 struct cqspi_st *cqspi; 1216 1283 struct resource *res; 1217 - struct resource *res_ahb; 1218 - struct reset_control *rstc, *rstc_ocp; 1219 - const struct cqspi_driver_platdata *ddata; 1220 1284 int ret; 1221 1285 int irq; 1222 1286 1223 - cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL); 1224 - if (!cqspi) 1287 + master = spi_alloc_master(&pdev->dev, sizeof(*cqspi)); 1288 + if (!master) { 1289 + dev_err(&pdev->dev, "spi_alloc_master failed\n"); 1225 1290 return -ENOMEM; 1291 + } 1292 + master->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL; 1293 + master->mem_ops = &cqspi_mem_ops; 1294 + master->dev.of_node = pdev->dev.of_node; 1226 1295 1227 - mutex_init(&cqspi->bus_mutex); 1296 + cqspi = spi_master_get_devdata(master); 1297 + 1228 1298 cqspi->pdev = pdev; 1229 - platform_set_drvdata(pdev, cqspi); 1230 1299 1231 1300 /* Obtain configuration from OF. */ 1232 - ret = cqspi_of_get_pdata(pdev); 1301 + ret = cqspi_of_get_pdata(cqspi); 1233 1302 if (ret) { 1234 1303 dev_err(dev, "Cannot get mandatory OF data.\n"); 1235 - return -ENODEV; 1304 + ret = -ENODEV; 1305 + goto probe_master_put; 1236 1306 } 1237 1307 1238 1308 /* Obtain QSPI clock. */ 1239 1309 cqspi->clk = devm_clk_get(dev, NULL); 1240 1310 if (IS_ERR(cqspi->clk)) { 1241 1311 dev_err(dev, "Cannot claim QSPI clock.\n"); 1242 - return PTR_ERR(cqspi->clk); 1312 + ret = PTR_ERR(cqspi->clk); 1313 + goto probe_master_put; 1243 1314 } 1244 1315 1245 1316 /* Obtain and remap controller address. */ ··· 1212 1353 cqspi->iobase = devm_ioremap_resource(dev, res); 1213 1354 if (IS_ERR(cqspi->iobase)) { 1214 1355 dev_err(dev, "Cannot remap controller address.\n"); 1215 - return PTR_ERR(cqspi->iobase); 1356 + ret = PTR_ERR(cqspi->iobase); 1357 + goto probe_master_put; 1216 1358 } 1217 1359 1218 1360 /* Obtain and remap AHB address. */ ··· 1221 1361 cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb); 1222 1362 if (IS_ERR(cqspi->ahb_base)) { 1223 1363 dev_err(dev, "Cannot remap AHB address.\n"); 1224 - return PTR_ERR(cqspi->ahb_base); 1364 + ret = PTR_ERR(cqspi->ahb_base); 1365 + goto probe_master_put; 1225 1366 } 1226 1367 cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; 1227 1368 cqspi->ahb_size = resource_size(res_ahb); ··· 1231 1370 1232 1371 /* Obtain IRQ line. */ 1233 1372 irq = platform_get_irq(pdev, 0); 1234 - if (irq < 0) 1235 - return -ENXIO; 1373 + if (irq < 0) { 1374 + ret = -ENXIO; 1375 + goto probe_master_put; 1376 + } 1236 1377 1237 1378 pm_runtime_enable(dev); 1238 1379 ret = pm_runtime_get_sync(dev); 1239 1380 if (ret < 0) { 1240 1381 pm_runtime_put_noidle(dev); 1241 - return ret; 1382 + goto probe_master_put; 1242 1383 } 1243 1384 1244 1385 ret = clk_prepare_enable(cqspi->clk); ··· 1253 1390 rstc = devm_reset_control_get_optional_exclusive(dev, "qspi"); 1254 1391 if (IS_ERR(rstc)) { 1255 1392 dev_err(dev, "Cannot get QSPI reset.\n"); 1256 - return PTR_ERR(rstc); 1393 + goto probe_reset_failed; 1257 1394 } 1258 1395 1259 1396 rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp"); 1260 1397 if (IS_ERR(rstc_ocp)) { 1261 1398 dev_err(dev, "Cannot get QSPI OCP reset.\n"); 1262 - return PTR_ERR(rstc_ocp); 1399 + goto probe_reset_failed; 1263 1400 } 1264 1401 1265 1402 reset_control_assert(rstc); ··· 1270 1407 1271 1408 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); 1272 1409 ddata = of_device_get_match_data(dev); 1273 - if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY)) 1274 - cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC, 1275 - cqspi->master_ref_clk_hz); 1410 + if (ddata) { 1411 + if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) 1412 + cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC, 1413 + cqspi->master_ref_clk_hz); 1414 + if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL) 1415 + master->mode_bits |= SPI_RX_OCTAL; 1416 + if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) 1417 + cqspi->use_direct_mode = true; 1418 + } 1276 1419 1277 1420 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, 1278 1421 pdev->name, cqspi); 1279 1422 if (ret) { 1280 1423 dev_err(dev, "Cannot request IRQ.\n"); 1281 - goto probe_irq_failed; 1424 + goto probe_reset_failed; 1282 1425 } 1283 1426 1284 1427 cqspi_wait_idle(cqspi); ··· 1292 1423 cqspi->current_cs = -1; 1293 1424 cqspi->sclk = 0; 1294 1425 1295 - ret = cqspi_setup_flash(cqspi, np); 1426 + ret = cqspi_setup_flash(cqspi); 1296 1427 if (ret) { 1297 - dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret); 1428 + dev_err(dev, "failed to setup flash parameters %d\n", ret); 1298 1429 goto probe_setup_failed; 1299 1430 } 1300 1431 1301 - return ret; 1432 + if (cqspi->use_direct_mode) { 1433 + ret = cqspi_request_mmap_dma(cqspi); 1434 + if (ret == -EPROBE_DEFER) 1435 + goto probe_setup_failed; 1436 + } 1437 + 1438 + ret = devm_spi_register_master(dev, master); 1439 + if (ret) { 1440 + dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret); 1441 + goto probe_setup_failed; 1442 + } 1443 + 1444 + return 0; 1302 1445 probe_setup_failed: 1303 1446 cqspi_controller_enable(cqspi, 0); 1304 - probe_irq_failed: 1447 + probe_reset_failed: 1305 1448 clk_disable_unprepare(cqspi->clk); 1306 1449 probe_clk_failed: 1307 1450 pm_runtime_put_sync(dev); 1308 1451 pm_runtime_disable(dev); 1452 + probe_master_put: 1453 + spi_master_put(master); 1309 1454 return ret; 1310 1455 } 1311 1456 1312 1457 static int cqspi_remove(struct platform_device *pdev) 1313 1458 { 1314 1459 struct cqspi_st *cqspi = platform_get_drvdata(pdev); 1315 - int i; 1316 - 1317 - for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) 1318 - if (cqspi->f_pdata[i].registered) 1319 - mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); 1320 1460 1321 1461 cqspi_controller_enable(cqspi, 0); 1322 1462 ··· 1368 1490 #endif 1369 1491 1370 1492 static const struct cqspi_driver_platdata cdns_qspi = { 1371 - .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK, 1493 + .quirks = CQSPI_DISABLE_DAC_MODE, 1372 1494 }; 1373 1495 1374 1496 static const struct cqspi_driver_platdata k2g_qspi = { 1375 - .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK, 1376 1497 .quirks = CQSPI_NEEDS_WR_DELAY, 1377 1498 }; 1378 1499 1379 1500 static const struct cqspi_driver_platdata am654_ospi = { 1380 - .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8, 1501 + .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 1381 1502 .quirks = CQSPI_NEEDS_WR_DELAY, 1382 1503 }; 1383 1504 ··· 1415 1538 MODULE_ALIAS("platform:" CQSPI_NAME); 1416 1539 MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); 1417 1540 MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>"); 1541 + MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>"); 1542 + MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+2
drivers/mtd/spi-nor/controllers/intel-spi-pci.c
··· 68 68 { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info }, 69 69 { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, 70 70 { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, 71 + { PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info }, 71 72 { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, 73 + { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info }, 72 74 { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, 73 75 { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, 74 76 { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
+11 -2
drivers/mtd/spi-nor/controllers/intel-spi.c
··· 292 292 u32 val; 293 293 294 294 return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, 295 - !(val & HSFSTS_CTL_SCIP), 40, 295 + !(val & HSFSTS_CTL_SCIP), 0, 296 296 INTEL_SPI_TIMEOUT * 1000); 297 297 } 298 298 ··· 301 301 u32 val; 302 302 303 303 return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, 304 - !(val & SSFSTS_CTL_SCIP), 40, 304 + !(val & SSFSTS_CTL_SCIP), 0, 305 305 INTEL_SPI_TIMEOUT * 1000); 306 306 } 307 307 ··· 611 611 ispi->atomic_preopcode = opcode; 612 612 return 0; 613 613 } 614 + 615 + /* 616 + * We hope that HW sequencer will do the right thing automatically and 617 + * with the SW sequencer we cannot use preopcode anyway, so just ignore 618 + * the Write Disable operation and pretend it was completed 619 + * successfully. 620 + */ 621 + if (opcode == SPINOR_OP_WRDI) 622 + return 0; 614 623 615 624 writel(0, ispi->base + FADDR); 616 625
+38 -19
drivers/mtd/spi-nor/core.c
··· 1907 1907 } 1908 1908 1909 1909 /** 1910 - * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status 1911 - * Register 1. 1910 + * spi_nor_sr1_bit6_quad_enable() - Set/Unset the Quad Enable BIT(6) in the 1911 + * Status Register 1. 1912 1912 * @nor: pointer to a 'struct spi_nor' 1913 + * @enable: true to enable Quad mode, false to disable Quad mode. 1913 1914 * 1914 1915 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. 1915 1916 * 1916 1917 * Return: 0 on success, -errno otherwise. 1917 1918 */ 1918 - int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) 1919 + int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable) 1919 1920 { 1920 1921 int ret; 1921 1922 ··· 1924 1923 if (ret) 1925 1924 return ret; 1926 1925 1927 - if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) 1926 + if ((enable && (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)) || 1927 + (!enable && !(nor->bouncebuf[0] & SR1_QUAD_EN_BIT6))) 1928 1928 return 0; 1929 1929 1930 - nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; 1930 + if (enable) 1931 + nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; 1932 + else 1933 + nor->bouncebuf[0] &= ~SR1_QUAD_EN_BIT6; 1931 1934 1932 1935 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); 1933 1936 } 1934 1937 1935 1938 /** 1936 - * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status 1937 - * Register 2. 1939 + * spi_nor_sr2_bit1_quad_enable() - set/unset the Quad Enable BIT(1) in the 1940 + * Status Register 2. 1938 1941 * @nor: pointer to a 'struct spi_nor'. 1942 + * @enable: true to enable Quad mode, false to disable Quad mode. 1939 1943 * 1940 1944 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. 1941 1945 * 1942 1946 * Return: 0 on success, -errno otherwise. 1943 1947 */ 1944 - int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) 1948 + int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable) 1945 1949 { 1946 1950 int ret; 1947 1951 1948 1952 if (nor->flags & SNOR_F_NO_READ_CR) 1949 - return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); 1953 + return spi_nor_write_16bit_cr_and_check(nor, 1954 + enable ? SR2_QUAD_EN_BIT1 : 0); 1950 1955 1951 1956 ret = spi_nor_read_cr(nor, nor->bouncebuf); 1952 1957 if (ret) 1953 1958 return ret; 1954 1959 1955 - if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) 1960 + if ((enable && (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)) || 1961 + (!enable && !(nor->bouncebuf[0] & SR2_QUAD_EN_BIT1))) 1956 1962 return 0; 1957 1963 1958 - nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; 1964 + if (enable) 1965 + nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; 1966 + else 1967 + nor->bouncebuf[0] &= ~SR2_QUAD_EN_BIT1; 1959 1968 1960 1969 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); 1961 1970 } 1962 1971 1963 1972 /** 1964 - * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. 1973 + * spi_nor_sr2_bit7_quad_enable() - set/unset QE bit in Status Register 2. 1965 1974 * @nor: pointer to a 'struct spi_nor' 1975 + * @enable: true to enable Quad mode, false to disable Quad mode. 1966 1976 * 1967 1977 * Set the Quad Enable (QE) bit in the Status Register 2. 1968 1978 * ··· 1983 1971 * 1984 1972 * Return: 0 on success, -errno otherwise. 1985 1973 */ 1986 - int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) 1974 + int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable) 1987 1975 { 1988 1976 u8 *sr2 = nor->bouncebuf; 1989 1977 int ret; ··· 1993 1981 ret = spi_nor_read_sr2(nor, sr2); 1994 1982 if (ret) 1995 1983 return ret; 1996 - if (*sr2 & SR2_QUAD_EN_BIT7) 1984 + if ((enable && (*sr2 & SR2_QUAD_EN_BIT7)) || 1985 + (!enable && !(*sr2 & SR2_QUAD_EN_BIT7))) 1997 1986 return 0; 1998 1987 1999 1988 /* Update the Quad Enable bit. */ 2000 - *sr2 |= SR2_QUAD_EN_BIT7; 1989 + if (enable) 1990 + *sr2 |= SR2_QUAD_EN_BIT7; 1991 + else 1992 + *sr2 &= ~SR2_QUAD_EN_BIT7; 2001 1993 2002 1994 ret = spi_nor_write_sr2(nor, sr2); 2003 1995 if (ret) ··· 2914 2898 } 2915 2899 2916 2900 /** 2917 - * spi_nor_quad_enable() - enable Quad I/O if needed. 2901 + * spi_nor_quad_enable() - enable/disable Quad I/O if needed. 2918 2902 * @nor: pointer to a 'struct spi_nor' 2903 + * @enable: true to enable Quad mode. false to disable Quad mode. 2919 2904 * 2920 2905 * Return: 0 on success, -errno otherwise. 2921 2906 */ 2922 - static int spi_nor_quad_enable(struct spi_nor *nor) 2907 + static int spi_nor_quad_enable(struct spi_nor *nor, bool enable) 2923 2908 { 2924 2909 if (!nor->params->quad_enable) 2925 2910 return 0; ··· 2929 2912 spi_nor_get_protocol_width(nor->write_proto) == 4)) 2930 2913 return 0; 2931 2914 2932 - return nor->params->quad_enable(nor); 2915 + return nor->params->quad_enable(nor, enable); 2933 2916 } 2934 2917 2935 2918 /** ··· 2953 2936 { 2954 2937 int err; 2955 2938 2956 - err = spi_nor_quad_enable(nor); 2939 + err = spi_nor_quad_enable(nor, true); 2957 2940 if (err) { 2958 2941 dev_dbg(nor->dev, "quad mode not supported\n"); 2959 2942 return err; ··· 3000 2983 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && 3001 2984 nor->flags & SNOR_F_BROKEN_RESET) 3002 2985 nor->params->set_4byte_addr_mode(nor, false); 2986 + 2987 + spi_nor_quad_enable(nor, false); 3003 2988 } 3004 2989 EXPORT_SYMBOL_GPL(spi_nor_restore); 3005 2990
+5 -5
drivers/mtd/spi-nor/core.h
··· 198 198 * higher index in the array, the higher priority. 199 199 * @erase_map: the erase map parsed from the SFDP Sector Map Parameter 200 200 * Table. 201 - * @quad_enable: enables SPI NOR quad mode. 201 + * @quad_enable: enables/disables SPI NOR Quad mode. 202 202 * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode. 203 203 * @convert_addr: converts an absolute address into something the flash 204 204 * will understand. Particularly useful when pagesize is ··· 219 219 220 220 struct spi_nor_erase_map erase_map; 221 221 222 - int (*quad_enable)(struct spi_nor *nor); 222 + int (*quad_enable)(struct spi_nor *nor, bool enable); 223 223 int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable); 224 224 u32 (*convert_addr)(struct spi_nor *nor, u32 addr); 225 225 int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); ··· 406 406 int spi_nor_wait_till_ready(struct spi_nor *nor); 407 407 int spi_nor_lock_and_prep(struct spi_nor *nor); 408 408 void spi_nor_unlock_and_unprep(struct spi_nor *nor); 409 - int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor); 410 - int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor); 411 - int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor); 409 + int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable); 410 + int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable); 411 + int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable); 412 412 413 413 int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr); 414 414 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+6
drivers/mtd/spi-nor/macronix.c
··· 52 52 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, 53 53 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, 54 54 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, 55 + { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32, 56 + SECT_4K | SPI_NOR_DUAL_READ | 57 + SPI_NOR_QUAD_READ) }, 55 58 { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64, 56 59 SECT_4K | SPI_NOR_DUAL_READ | 57 60 SPI_NOR_QUAD_READ) }, ··· 87 84 SPI_NOR_QUAD_READ) }, 88 85 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, 89 86 SPI_NOR_QUAD_READ) }, 87 + { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096, 88 + SECT_4K | SPI_NOR_DUAL_READ | 89 + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, 90 90 }; 91 91 92 92 static void macronix_default_init(struct spi_nor *nor)
+2 -2
drivers/mtd/spi-nor/micron-st.c
··· 71 71 SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | 72 72 NO_CHIP_ERASE) }, 73 73 { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, 74 - SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | 75 - NO_CHIP_ERASE) }, 74 + SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | 75 + SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, 76 76 77 77 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, 78 78 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+2 -1
drivers/mtd/spi-nor/sfdp.c
··· 598 598 break; 599 599 600 600 default: 601 - return -EINVAL; 601 + dev_dbg(nor->dev, "BFPT QER reserved value used\n"); 602 + break; 602 603 } 603 604 604 605 /* Stop here if not JESD216 rev C or later. */
+2 -2
drivers/mtd/spi-nor/spansion.c
··· 64 64 { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, 65 65 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) 66 66 .fixups = &s25fs_s_fixups, }, 67 - { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, 68 67 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, 69 68 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, 70 69 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, ··· 83 84 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 84 85 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, 85 86 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 86 - { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 87 + { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, 88 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 87 89 { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, 88 90 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 89 91 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
+3 -1
drivers/mtd/spi-nor/winbond.c
··· 64 64 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 65 65 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, 66 66 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 67 - { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 67 + { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, 68 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 68 69 { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, 69 70 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 70 71 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, 72 + { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) }, 71 73 { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, 72 74 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 73 75 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+11
drivers/spi/Kconfig
··· 200 200 This selects the Cadence SPI controller master driver 201 201 used by Xilinx Zynq and ZynqMP. 202 202 203 + config SPI_CADENCE_QUADSPI 204 + tristate "Cadence Quad SPI controller" 205 + depends on OF && (ARM || ARM64 || COMPILE_TEST) 206 + help 207 + Enable support for the Cadence Quad SPI Flash controller. 208 + 209 + Cadence QSPI is a specialized controller for connecting an SPI 210 + Flash over 1/2/4-bit wide bus. Enable this option if you have a 211 + device with a Cadence QSPI controller and want to access the 212 + Flash as an MTD device. 213 + 203 214 config SPI_CLPS711X 204 215 tristate "CLPS711X host SPI controller" 205 216 depends on ARCH_CLPS711X || COMPILE_TEST
+1
drivers/spi/Makefile
··· 31 31 obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o 32 32 obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o 33 33 obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o 34 + obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o 34 35 obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o 35 36 obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o 36 37 obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
+6
drivers/spi/atmel-quadspi.c
··· 285 285 op->dummy.nbytes == 0) 286 286 return false; 287 287 288 + /* DTR ops not supported. */ 289 + if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) 290 + return false; 291 + if (op->cmd.nbytes != 1) 292 + return false; 293 + 288 294 return true; 289 295 } 290 296
+10 -6
drivers/spi/spi-mem.c
··· 156 156 op->data.dir == SPI_MEM_DATA_OUT)) 157 157 return false; 158 158 159 + if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) 160 + return false; 161 + 162 + if (op->cmd.nbytes != 1) 163 + return false; 164 + 159 165 return true; 160 166 } 161 167 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); ··· 176 170 177 171 static int spi_mem_check_op(const struct spi_mem_op *op) 178 172 { 179 - if (!op->cmd.buswidth) 173 + if (!op->cmd.buswidth || !op->cmd.nbytes) 180 174 return -EINVAL; 181 175 182 176 if ((op->addr.nbytes && !op->addr.buswidth) || ··· 312 306 return ret; 313 307 } 314 308 315 - tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + 316 - op->dummy.nbytes; 309 + tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 317 310 318 311 /* 319 312 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so ··· 327 322 328 323 tmpbuf[0] = op->cmd.opcode; 329 324 xfers[xferpos].tx_buf = tmpbuf; 330 - xfers[xferpos].len = sizeof(op->cmd.opcode); 325 + xfers[xferpos].len = op->cmd.nbytes; 331 326 xfers[xferpos].tx_nbits = op->cmd.buswidth; 332 327 spi_message_add_tail(&xfers[xferpos], &msg); 333 328 xferpos++; ··· 429 424 return ctlr->mem_ops->adjust_op_size(mem, op); 430 425 431 426 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 432 - len = sizeof(op->cmd.opcode) + op->addr.nbytes + 433 - op->dummy.nbytes; 427 + len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 434 428 435 429 if (len > spi_max_transfer_size(mem->spi)) 436 430 return -EINVAL;
+8 -2
drivers/spi/spi-mtk-nor.c
··· 195 195 } 196 196 } 197 197 198 - len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes - 198 + len = MTK_NOR_PRG_MAX_SIZE - op->cmd.nbytes - op->addr.nbytes - 199 199 op->dummy.nbytes; 200 200 if (op->data.nbytes > len) 201 201 op->data.nbytes = len; ··· 211 211 if (op->cmd.buswidth != 1) 212 212 return false; 213 213 214 + /* DTR ops not supported. */ 215 + if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) 216 + return false; 217 + if (op->cmd.nbytes != 1) 218 + return false; 219 + 214 220 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 215 221 if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) 216 222 return true; ··· 225 219 (op->dummy.buswidth == 0) && 226 220 (op->data.buswidth == 1); 227 221 } 228 - len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; 222 + len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 229 223 if ((len > MTK_NOR_PRG_MAX_SIZE) || 230 224 ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE))) 231 225 return false;
+2 -1
drivers/spi/spi-mxic.c
··· 356 356 int nio = 1, i, ret; 357 357 u32 ss_ctrl; 358 358 u8 addr[8]; 359 + u8 opcode = op->cmd.opcode; 359 360 360 361 ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz); 361 362 if (ret) ··· 394 393 writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT, 395 394 mxic->regs + HC_CFG); 396 395 397 - ret = mxic_spi_data_xfer(mxic, &op->cmd.opcode, NULL, 1); 396 + ret = mxic_spi_data_xfer(mxic, &opcode, NULL, 1); 398 397 if (ret) 399 398 goto out; 400 399
+6 -5
drivers/spi/spi-zynq-qspi.c
··· 527 527 struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master); 528 528 int err = 0, i; 529 529 u8 *tmpbuf; 530 + u8 opcode = op->cmd.opcode; 530 531 531 532 dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n", 532 - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, 533 + opcode, op->cmd.buswidth, op->addr.buswidth, 533 534 op->dummy.buswidth, op->data.buswidth); 534 535 535 536 zynq_qspi_chipselect(mem->spi, true); 536 537 zynq_qspi_config_op(xqspi, mem->spi); 537 538 538 - if (op->cmd.opcode) { 539 + if (op->cmd.nbytes) { 539 540 reinit_completion(&xqspi->data_completion); 540 - xqspi->txbuf = (u8 *)&op->cmd.opcode; 541 + xqspi->txbuf = &opcode; 541 542 xqspi->rxbuf = NULL; 542 - xqspi->tx_bytes = sizeof(op->cmd.opcode); 543 - xqspi->rx_bytes = sizeof(op->cmd.opcode); 543 + xqspi->tx_bytes = op->cmd.nbytes; 544 + xqspi->rx_bytes = op->cmd.nbytes; 544 545 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true); 545 546 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET, 546 547 ZYNQ_QSPI_IXR_RXTX_MASK);
+13 -1
include/linux/spi/spi-mem.h
··· 17 17 { \ 18 18 .buswidth = __buswidth, \ 19 19 .opcode = __opcode, \ 20 + .nbytes = 1, \ 20 21 } 21 22 22 23 #define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ ··· 70 69 71 70 /** 72 71 * struct spi_mem_op - describes a SPI memory operation 72 + * @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is 73 + * sent MSB-first. 73 74 * @cmd.buswidth: number of IO lines used to transmit the command 74 75 * @cmd.opcode: operation opcode 76 + * @cmd.dtr: whether the command opcode should be sent in DTR mode or not 75 77 * @addr.nbytes: number of address bytes to send. Can be zero if the operation 76 78 * does not need to send an address 77 79 * @addr.buswidth: number of IO lines used to transmit the address cycles 80 + * @addr.dtr: whether the address should be sent in DTR mode or not 78 81 * @addr.val: address value. This value is always sent MSB first on the bus. 79 82 * Note that only @addr.nbytes are taken into account in this 80 83 * address value, so users should make sure the value fits in the ··· 86 81 * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can 87 82 * be zero if the operation does not require dummy bytes 88 83 * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes 84 + * @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not 89 85 * @data.buswidth: number of IO lanes used to send/receive the data 86 + * @data.dtr: whether the data should be sent in DTR mode or not 90 87 * @data.dir: direction of the transfer 91 88 * @data.nbytes: number of data bytes to send/receive. Can be zero if the 92 89 * operation does not involve transferring data ··· 97 90 */ 98 91 struct spi_mem_op { 99 92 struct { 93 + u8 nbytes; 100 94 u8 buswidth; 101 - u8 opcode; 95 + u8 dtr : 1; 96 + u16 opcode; 102 97 } cmd; 103 98 104 99 struct { 105 100 u8 nbytes; 106 101 u8 buswidth; 102 + u8 dtr : 1; 107 103 u64 val; 108 104 } addr; 109 105 110 106 struct { 111 107 u8 nbytes; 112 108 u8 buswidth; 109 + u8 dtr : 1; 113 110 } dummy; 114 111 115 112 struct { 116 113 u8 buswidth; 114 + u8 dtr : 1; 117 115 enum spi_mem_data_dir dir; 118 116 unsigned int nbytes; 119 117 union {