Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at nocache-cleanup 2244 lines 61 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2// 3// Driver for Cadence QSPI Controller 4// 5// Copyright Altera Corporation (C) 2012-2014. All rights reserved. 6// Copyright Intel Corporation (C) 2019-2020. All rights reserved. 7// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com 8 9#include <linux/clk.h> 10#include <linux/completion.h> 11#include <linux/delay.h> 12#include <linux/dma-mapping.h> 13#include <linux/dmaengine.h> 14#include <linux/err.h> 15#include <linux/errno.h> 16#include <linux/firmware/xlnx-zynqmp.h> 17#include <linux/interrupt.h> 18#include <linux/io.h> 19#include <linux/iopoll.h> 20#include <linux/jiffies.h> 21#include <linux/kernel.h> 22#include <linux/log2.h> 23#include <linux/module.h> 24#include <linux/of.h> 25#include <linux/platform_device.h> 26#include <linux/pm_runtime.h> 27#include <linux/reset.h> 28#include <linux/sched.h> 29#include <linux/spi/spi.h> 30#include <linux/spi/spi-mem.h> 31#include <linux/timer.h> 32 33#define CQSPI_NAME "cadence-qspi" 34#define CQSPI_MAX_CHIPSELECT 4 35 36static_assert(CQSPI_MAX_CHIPSELECT <= SPI_DEVICE_CS_CNT_MAX); 37 38/* Quirks */ 39#define CQSPI_NEEDS_WR_DELAY BIT(0) 40#define CQSPI_DISABLE_DAC_MODE BIT(1) 41#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) 42#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) 43#define CQSPI_SLOW_SRAM BIT(4) 44#define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) 45#define CQSPI_RD_NO_IRQ BIT(6) 46#define CQSPI_DMA_SET_MASK BIT(7) 47#define CQSPI_SUPPORT_DEVICE_RESET BIT(8) 48#define CQSPI_DISABLE_STIG_MODE BIT(9) 49#define CQSPI_DISABLE_RUNTIME_PM BIT(10) 50 51/* Capabilities */ 52#define CQSPI_SUPPORTS_OCTAL BIT(0) 53#define CQSPI_SUPPORTS_QUAD BIT(1) 54 55#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0) 56 57enum { 58 CLK_QSPI_APB = 0, 59 CLK_QSPI_AHB, 60 CLK_QSPI_NUM, 61}; 62 63struct cqspi_st; 64 65struct cqspi_flash_pdata { 66 struct cqspi_st *cqspi; 67 u32 clk_rate; 68 u32 read_delay; 69 u32 tshsl_ns; 70 u32 tsd2d_ns; 71 u32 tchsh_ns; 72 u32 tslch_ns; 73 u8 cs; 74}; 75 76struct cqspi_st { 77 struct platform_device *pdev; 78 struct spi_controller *host; 79 struct clk *clk; 80 struct clk *clks[CLK_QSPI_NUM]; 81 unsigned int sclk; 82 83 void __iomem *iobase; 84 void __iomem *ahb_base; 85 resource_size_t ahb_size; 86 struct completion transfer_complete; 87 88 struct dma_chan *rx_chan; 89 struct completion rx_dma_complete; 90 dma_addr_t mmap_phys_base; 91 92 int current_cs; 93 unsigned long master_ref_clk_hz; 94 bool is_decoded_cs; 95 u32 fifo_depth; 96 u32 fifo_width; 97 u32 num_chipselect; 98 bool rclk_en; 99 u32 trigger_address; 100 u32 wr_delay; 101 bool use_direct_mode; 102 bool use_direct_mode_wr; 103 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; 104 bool use_dma_read; 105 u32 pd_dev_id; 106 bool wr_completion; 107 bool slow_sram; 108 bool apb_ahb_hazard; 109 110 bool is_jh7110; /* Flag for StarFive JH7110 SoC */ 111 bool disable_stig_mode; 112 refcount_t refcount; 113 refcount_t inflight_ops; 114 115 const struct cqspi_driver_platdata *ddata; 116}; 117 118struct cqspi_driver_platdata { 119 u32 hwcaps_mask; 120 u16 quirks; 121 int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, 122 u_char *rxbuf, loff_t from_addr, size_t n_rx); 123 u32 (*get_dma_status)(struct cqspi_st *cqspi); 124 int (*jh7110_clk_init)(struct platform_device *pdev, 125 struct cqspi_st *cqspi); 126}; 127 128/* Operation timeout value */ 129#define CQSPI_TIMEOUT_MS 500 130#define CQSPI_READ_TIMEOUT_MS 10 131#define CQSPI_BUSYWAIT_TIMEOUT_US 500 132 133/* Runtime_pm autosuspend delay */ 134#define CQSPI_AUTOSUSPEND_TIMEOUT 2000 135 136#define CQSPI_DUMMY_CLKS_PER_BYTE 8 137#define CQSPI_DUMMY_BYTES_MAX 4 138#define CQSPI_DUMMY_CLKS_MAX 31 139 140#define CQSPI_STIG_DATA_LEN_MAX 8 141 142/* Register map */ 143#define CQSPI_REG_CONFIG 0x00 144#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) 145#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7) 146#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) 147#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 148#define CQSPI_REG_CONFIG_DMA_MASK BIT(15) 149#define CQSPI_REG_CONFIG_BAUD_LSB 19 150#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24) 151#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30) 152#define CQSPI_REG_CONFIG_IDLE_LSB 31 153#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF 154#define CQSPI_REG_CONFIG_BAUD_MASK 0xF 155#define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5) 156#define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6) 157 158#define CQSPI_REG_RD_INSTR 0x04 159#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 160#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 161#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 162#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 163#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 164#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 165#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 166#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 167#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 168#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F 169 170#define CQSPI_REG_WR_INSTR 0x08 171#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 172#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 173#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 174 175#define CQSPI_REG_DELAY 0x0C 176#define CQSPI_REG_DELAY_TSLCH_LSB 0 177#define CQSPI_REG_DELAY_TCHSH_LSB 8 178#define CQSPI_REG_DELAY_TSD2D_LSB 16 179#define CQSPI_REG_DELAY_TSHSL_LSB 24 180#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF 181#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF 182#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF 183#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF 184 185#define CQSPI_REG_READCAPTURE 0x10 186#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 187#define CQSPI_REG_READCAPTURE_DELAY_LSB 1 188#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF 189 190#define CQSPI_REG_SIZE 0x14 191#define CQSPI_REG_SIZE_ADDRESS_LSB 0 192#define CQSPI_REG_SIZE_PAGE_LSB 4 193#define CQSPI_REG_SIZE_BLOCK_LSB 16 194#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF 195#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF 196#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F 197 198#define CQSPI_REG_SRAMPARTITION 0x18 199#define CQSPI_REG_INDIRECTTRIGGER 0x1C 200 201#define CQSPI_REG_DMA 0x20 202#define CQSPI_REG_DMA_SINGLE_LSB 0 203#define CQSPI_REG_DMA_BURST_LSB 8 204#define CQSPI_REG_DMA_SINGLE_MASK 0xFF 205#define CQSPI_REG_DMA_BURST_MASK 0xFF 206 207#define CQSPI_REG_REMAP 0x24 208#define CQSPI_REG_MODE_BIT 0x28 209 210#define CQSPI_REG_SDRAMLEVEL 0x2C 211#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 212#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 213#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF 214#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF 215 216#define CQSPI_REG_WR_COMPLETION_CTRL 0x38 217#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14) 218 219#define CQSPI_REG_IRQSTATUS 0x40 220#define CQSPI_REG_IRQMASK 0x44 221 222#define CQSPI_REG_INDIRECTRD 0x60 223#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) 224#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) 225#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) 226 227#define CQSPI_REG_INDIRECTRDWATERMARK 0x64 228#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 229#define CQSPI_REG_INDIRECTRDBYTES 0x6C 230 231#define CQSPI_REG_CMDCTRL 0x90 232#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) 233#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) 234#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7 235#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 236#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 237#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 238#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 239#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 240#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 241#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 242#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 243#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 244#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 245#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F 246 247#define CQSPI_REG_INDIRECTWR 0x70 248#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) 249#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) 250#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) 251 252#define CQSPI_REG_INDIRECTWRWATERMARK 0x74 253#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 254#define CQSPI_REG_INDIRECTWRBYTES 0x7C 255 256#define CQSPI_REG_INDTRIG_ADDRRANGE 0x80 257 258#define CQSPI_REG_CMDADDRESS 0x94 259#define CQSPI_REG_CMDREADDATALOWER 0xA0 260#define CQSPI_REG_CMDREADDATAUPPER 0xA4 261#define CQSPI_REG_CMDWRITEDATALOWER 0xA8 262#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC 263 264#define CQSPI_REG_POLLING_STATUS 0xB0 265#define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16 266 267#define CQSPI_REG_OP_EXT_LOWER 0xE0 268#define CQSPI_REG_OP_EXT_READ_LSB 24 269#define CQSPI_REG_OP_EXT_WRITE_LSB 16 270#define CQSPI_REG_OP_EXT_STIG_LSB 0 271 272#define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000 273 274#define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800 275#define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804 276 277#define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C 278 279#define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814 280#define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818 281#define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C 282#define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1) 283 284#define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828 285 286#define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00 287#define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6 288 289/* Interrupt status bits */ 290#define CQSPI_REG_IRQ_MODE_ERR BIT(0) 291#define CQSPI_REG_IRQ_UNDERFLOW BIT(1) 292#define CQSPI_REG_IRQ_IND_COMP BIT(2) 293#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) 294#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) 295#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) 296#define CQSPI_REG_IRQ_WATERMARK BIT(6) 297#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) 298 299#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ 300 CQSPI_REG_IRQ_IND_SRAM_FULL | \ 301 CQSPI_REG_IRQ_IND_COMP) 302 303#define CQSPI_IRQ_MASK_RD_SLOW_SRAM (CQSPI_REG_IRQ_WATERMARK | \ 304 CQSPI_REG_IRQ_IND_COMP) 305 306#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ 307 CQSPI_REG_IRQ_WATERMARK | \ 308 CQSPI_REG_IRQ_UNDERFLOW) 309 310#define CQSPI_IRQ_STATUS_MASK 0x1FFFF 311#define CQSPI_DMA_UNALIGN 0x3 312 313#define CQSPI_REG_VERSAL_DMA_VAL 0x602 314 315static int cqspi_wait_for_bit(const struct cqspi_driver_platdata *ddata, 316 void __iomem *reg, const u32 mask, bool clr, 317 bool busywait) 318{ 319 u64 timeout_us = CQSPI_TIMEOUT_MS * USEC_PER_MSEC; 320 u32 val; 321 322 if (busywait) { 323 int ret = readl_relaxed_poll_timeout(reg, val, 324 (((clr ? ~val : val) & mask) == mask), 325 0, CQSPI_BUSYWAIT_TIMEOUT_US); 326 327 if (ret != -ETIMEDOUT) 328 return ret; 329 330 timeout_us -= CQSPI_BUSYWAIT_TIMEOUT_US; 331 } 332 333 return readl_relaxed_poll_timeout(reg, val, 334 (((clr ? ~val : val) & mask) == mask), 335 10, timeout_us); 336} 337 338static bool cqspi_is_idle(struct cqspi_st *cqspi) 339{ 340 u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 341 342 return reg & BIT(CQSPI_REG_CONFIG_IDLE_LSB); 343} 344 345static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) 346{ 347 u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL); 348 349 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; 350 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; 351} 352 353static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi) 354{ 355 u32 dma_status; 356 357 dma_status = readl(cqspi->iobase + 358 CQSPI_REG_VERSAL_DMA_DST_I_STS); 359 writel(dma_status, cqspi->iobase + 360 CQSPI_REG_VERSAL_DMA_DST_I_STS); 361 362 return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK; 363} 364 365static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) 366{ 367 struct cqspi_st *cqspi = dev; 368 const struct cqspi_driver_platdata *ddata = cqspi->ddata; 369 unsigned int irq_status; 370 371 /* Read interrupt status */ 372 irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); 373 374 /* Clear interrupt */ 375 writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); 376 377 if (cqspi->use_dma_read && ddata && ddata->get_dma_status) { 378 if (ddata->get_dma_status(cqspi)) { 379 complete(&cqspi->transfer_complete); 380 return IRQ_HANDLED; 381 } 382 } 383 384 else if (!cqspi->slow_sram) 385 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; 386 else 387 irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR; 388 389 if (irq_status) 390 complete(&cqspi->transfer_complete); 391 392 return IRQ_HANDLED; 393} 394 395static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op) 396{ 397 u32 rdreg = 0; 398 399 rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; 400 rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; 401 rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; 402 403 return rdreg; 404} 405 406static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op) 407{ 408 unsigned int dummy_clk; 409 410 if (!op->dummy.nbytes) 411 return 0; 412 413 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth); 414 if (op->cmd.dtr) 415 dummy_clk /= 2; 416 417 return dummy_clk; 418} 419 420static int cqspi_wait_idle(struct cqspi_st *cqspi) 421{ 422 const unsigned int poll_idle_retry = 3; 423 unsigned int count = 0; 424 unsigned long timeout; 425 426 timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); 427 while (1) { 428 /* 429 * Read few times in succession to ensure the controller 430 * is indeed idle, that is, the bit does not transition 431 * low again. 432 */ 433 if (cqspi_is_idle(cqspi)) 434 count++; 435 else 436 count = 0; 437 438 if (count >= poll_idle_retry) 439 return 0; 440 441 if (time_after(jiffies, timeout)) { 442 /* Timeout, in busy mode. */ 443 dev_err(&cqspi->pdev->dev, 444 "QSPI is still busy after %dms timeout.\n", 445 CQSPI_TIMEOUT_MS); 446 return -ETIMEDOUT; 447 } 448 449 cpu_relax(); 450 } 451} 452 453static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) 454{ 455 void __iomem *reg_base = cqspi->iobase; 456 int ret; 457 458 /* Write the CMDCTRL without start execution. */ 459 writel(reg, reg_base + CQSPI_REG_CMDCTRL); 460 /* Start execute */ 461 reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; 462 writel(reg, reg_base + CQSPI_REG_CMDCTRL); 463 464 /* Polling for completion. */ 465 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_CMDCTRL, 466 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1, true); 467 if (ret) { 468 dev_err(&cqspi->pdev->dev, 469 "Flash command execution timed out.\n"); 470 return ret; 471 } 472 473 /* Polling QSPI idle status. */ 474 return cqspi_wait_idle(cqspi); 475} 476 477static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata, 478 const struct spi_mem_op *op, 479 unsigned int shift) 480{ 481 struct cqspi_st *cqspi = f_pdata->cqspi; 482 void __iomem *reg_base = cqspi->iobase; 483 unsigned int reg; 484 u8 ext; 485 486 if (op->cmd.nbytes != 2) 487 return -EINVAL; 488 489 /* Opcode extension is the LSB. */ 490 ext = op->cmd.opcode & 0xff; 491 492 reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER); 493 reg &= ~(0xff << shift); 494 reg |= ext << shift; 495 writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER); 496 497 return 0; 498} 499 500static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata, 501 const struct spi_mem_op *op, unsigned int shift) 502{ 503 struct cqspi_st *cqspi = f_pdata->cqspi; 504 void __iomem *reg_base = cqspi->iobase; 505 unsigned int reg; 506 int ret; 507 508 reg = readl(reg_base + CQSPI_REG_CONFIG); 509 510 /* 511 * We enable dual byte opcode here. The callers have to set up the 512 * extension opcode based on which type of operation it is. 513 */ 514 if (op->cmd.dtr) { 515 reg |= CQSPI_REG_CONFIG_DTR_PROTO; 516 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE; 517 518 /* Set up command opcode extension. */ 519 ret = cqspi_setup_opcode_ext(f_pdata, op, shift); 520 if (ret) 521 return ret; 522 } else { 523 unsigned int mask = CQSPI_REG_CONFIG_DTR_PROTO | CQSPI_REG_CONFIG_DUAL_OPCODE; 524 /* Shortcut if DTR is already disabled. */ 525 if ((reg & mask) == 0) 526 return 0; 527 reg &= ~mask; 528 } 529 530 writel(reg, reg_base + CQSPI_REG_CONFIG); 531 532 return cqspi_wait_idle(cqspi); 533} 534 535static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata, 536 const struct spi_mem_op *op) 537{ 538 struct cqspi_st *cqspi = f_pdata->cqspi; 539 void __iomem *reg_base = cqspi->iobase; 540 u8 *rxbuf = op->data.buf.in; 541 u8 opcode; 542 size_t n_rx = op->data.nbytes; 543 unsigned int rdreg; 544 unsigned int reg; 545 unsigned int dummy_clk; 546 size_t read_len; 547 int status; 548 549 status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); 550 if (status) 551 return status; 552 553 if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { 554 dev_err(&cqspi->pdev->dev, 555 "Invalid input argument, len %zu rxbuf 0x%p\n", 556 n_rx, rxbuf); 557 return -EINVAL; 558 } 559 560 if (op->cmd.dtr) 561 opcode = op->cmd.opcode >> 8; 562 else 563 opcode = op->cmd.opcode; 564 565 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 566 567 rdreg = cqspi_calc_rdreg(op); 568 writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); 569 570 dummy_clk = cqspi_calc_dummy(op); 571 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 572 return -EOPNOTSUPP; 573 574 if (dummy_clk) 575 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK) 576 << CQSPI_REG_CMDCTRL_DUMMY_LSB; 577 578 reg |= BIT(CQSPI_REG_CMDCTRL_RD_EN_LSB); 579 580 /* 0 means 1 byte. */ 581 reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) 582 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); 583 584 /* setup ADDR BIT field */ 585 if (op->addr.nbytes) { 586 reg |= BIT(CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 587 reg |= ((op->addr.nbytes - 1) & 588 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 589 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 590 591 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 592 } 593 594 status = cqspi_exec_flash_cmd(cqspi, reg); 595 if (status) 596 return status; 597 598 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER); 599 600 /* Put the read value into rx_buf */ 601 read_len = (n_rx > 4) ? 4 : n_rx; 602 memcpy(rxbuf, &reg, read_len); 603 rxbuf += read_len; 604 605 if (n_rx > 4) { 606 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER); 607 608 read_len = n_rx - read_len; 609 memcpy(rxbuf, &reg, read_len); 610 } 611 612 /* Reset CMD_CTRL Reg once command read completes */ 613 writel(0, reg_base + CQSPI_REG_CMDCTRL); 614 615 return 0; 616} 617 618static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata, 619 const struct spi_mem_op *op) 620{ 621 struct cqspi_st *cqspi = f_pdata->cqspi; 622 void __iomem *reg_base = cqspi->iobase; 623 u8 opcode; 624 const u8 *txbuf = op->data.buf.out; 625 size_t n_tx = op->data.nbytes; 626 unsigned int reg; 627 unsigned int data; 628 size_t write_len; 629 int ret; 630 631 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); 632 if (ret) 633 return ret; 634 635 if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { 636 dev_err(&cqspi->pdev->dev, 637 "Invalid input argument, cmdlen %zu txbuf 0x%p\n", 638 n_tx, txbuf); 639 return -EINVAL; 640 } 641 642 reg = cqspi_calc_rdreg(op); 643 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 644 645 if (op->cmd.dtr) 646 opcode = op->cmd.opcode >> 8; 647 else 648 opcode = op->cmd.opcode; 649 650 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 651 652 if (op->addr.nbytes) { 653 reg |= BIT(CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 654 reg |= ((op->addr.nbytes - 1) & 655 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 656 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 657 658 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 659 } 660 661 if (n_tx) { 662 reg |= BIT(CQSPI_REG_CMDCTRL_WR_EN_LSB); 663 reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) 664 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; 665 data = 0; 666 write_len = (n_tx > 4) ? 4 : n_tx; 667 memcpy(&data, txbuf, write_len); 668 txbuf += write_len; 669 writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); 670 671 if (n_tx > 4) { 672 data = 0; 673 write_len = n_tx - 4; 674 memcpy(&data, txbuf, write_len); 675 writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER); 676 } 677 } 678 679 ret = cqspi_exec_flash_cmd(cqspi, reg); 680 681 /* Reset CMD_CTRL Reg once command write completes */ 682 writel(0, reg_base + CQSPI_REG_CMDCTRL); 683 684 return ret; 685} 686 687static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, 688 const struct spi_mem_op *op) 689{ 690 struct cqspi_st *cqspi = f_pdata->cqspi; 691 void __iomem *reg_base = cqspi->iobase; 692 unsigned int dummy_clk = 0; 693 unsigned int reg; 694 int ret; 695 u8 opcode; 696 697 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB); 698 if (ret) 699 return ret; 700 701 if (op->cmd.dtr) 702 opcode = op->cmd.opcode >> 8; 703 else 704 opcode = op->cmd.opcode; 705 706 reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; 707 reg |= cqspi_calc_rdreg(op); 708 709 /* Setup dummy clock cycles */ 710 dummy_clk = cqspi_calc_dummy(op); 711 712 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 713 return -EOPNOTSUPP; 714 715 if (dummy_clk) 716 reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) 717 << CQSPI_REG_RD_INSTR_DUMMY_LSB; 718 719 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 720 721 /* Set address width */ 722 reg = readl(reg_base + CQSPI_REG_SIZE); 723 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 724 reg |= (op->addr.nbytes - 1); 725 writel(reg, reg_base + CQSPI_REG_SIZE); 726 readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ 727 return 0; 728} 729 730static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, 731 u8 *rxbuf, loff_t from_addr, 732 const size_t n_rx) 733{ 734 struct cqspi_st *cqspi = f_pdata->cqspi; 735 bool use_irq = !(cqspi->ddata && cqspi->ddata->quirks & CQSPI_RD_NO_IRQ); 736 struct device *dev = &cqspi->pdev->dev; 737 void __iomem *reg_base = cqspi->iobase; 738 void __iomem *ahb_base = cqspi->ahb_base; 739 unsigned int remaining = n_rx; 740 unsigned int mod_bytes = n_rx % 4; 741 unsigned int bytes_to_read = 0; 742 u8 *rxbuf_end = rxbuf + n_rx; 743 int ret = 0; 744 745 if (!refcount_read(&cqspi->refcount)) 746 return -ENODEV; 747 748 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 749 writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); 750 751 /* Clear all interrupts. */ 752 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 753 754 /* 755 * On SoCFPGA platform reading the SRAM is slow due to 756 * hardware limitation and causing read interrupt storm to CPU, 757 * so enabling only watermark interrupt to disable all read 758 * interrupts later as we want to run "bytes to read" loop with 759 * all the read interrupts disabled for max performance. 760 */ 761 762 if (use_irq && cqspi->slow_sram) 763 writel(CQSPI_IRQ_MASK_RD_SLOW_SRAM, reg_base + CQSPI_REG_IRQMASK); 764 else if (use_irq) 765 writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); 766 else 767 writel(0, reg_base + CQSPI_REG_IRQMASK); 768 769 reinit_completion(&cqspi->transfer_complete); 770 writel(CQSPI_REG_INDIRECTRD_START_MASK, 771 reg_base + CQSPI_REG_INDIRECTRD); 772 readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */ 773 774 while (remaining > 0) { 775 ret = 0; 776 if (use_irq && 777 !wait_for_completion_timeout(&cqspi->transfer_complete, 778 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) 779 ret = -ETIMEDOUT; 780 781 /* 782 * Prevent lost interrupt and race condition by reinitializing early. 783 * A spurious wakeup and another wait cycle can occur here, 784 * which is preferable to waiting until timeout if interrupt is lost. 785 */ 786 if (use_irq) 787 reinit_completion(&cqspi->transfer_complete); 788 789 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 790 791 if (ret && bytes_to_read == 0) { 792 dev_err(dev, "Indirect read timeout, no bytes\n"); 793 goto failrd; 794 } 795 796 while (bytes_to_read != 0) { 797 unsigned int word_remain = round_down(remaining, 4); 798 799 bytes_to_read *= cqspi->fifo_width; 800 bytes_to_read = bytes_to_read > remaining ? 801 remaining : bytes_to_read; 802 bytes_to_read = round_down(bytes_to_read, 4); 803 /* Read 4 byte word chunks then single bytes */ 804 if (bytes_to_read) { 805 ioread32_rep(ahb_base, rxbuf, 806 (bytes_to_read / 4)); 807 } else if (!word_remain && mod_bytes) { 808 unsigned int temp = ioread32(ahb_base); 809 810 bytes_to_read = mod_bytes; 811 memcpy(rxbuf, &temp, min((unsigned int) 812 (rxbuf_end - rxbuf), 813 bytes_to_read)); 814 } 815 rxbuf += bytes_to_read; 816 remaining -= bytes_to_read; 817 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 818 } 819 } 820 821 /* Check indirect done status */ 822 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTRD, 823 CQSPI_REG_INDIRECTRD_DONE_MASK, 0, true); 824 if (ret) { 825 dev_err(dev, "Indirect read completion error (%i)\n", ret); 826 goto failrd; 827 } 828 829 /* Disable interrupt */ 830 writel(0, reg_base + CQSPI_REG_IRQMASK); 831 832 /* Clear indirect completion status */ 833 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD); 834 835 return 0; 836 837failrd: 838 /* Disable interrupt */ 839 writel(0, reg_base + CQSPI_REG_IRQMASK); 840 841 /* Cancel the indirect read */ 842 writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK, 843 reg_base + CQSPI_REG_INDIRECTRD); 844 return ret; 845} 846 847static void cqspi_device_reset(struct cqspi_st *cqspi) 848{ 849 u32 reg; 850 851 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 852 reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK; 853 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 854 /* 855 * NOTE: Delay timing implementation is derived from 856 * spi_nor_hw_reset() 857 */ 858 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 859 usleep_range(1, 5); 860 writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 861 usleep_range(100, 150); 862 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 863 usleep_range(1000, 1200); 864} 865 866static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) 867{ 868 void __iomem *reg_base = cqspi->iobase; 869 unsigned int reg; 870 871 reg = readl(reg_base + CQSPI_REG_CONFIG); 872 873 if (enable) 874 reg |= CQSPI_REG_CONFIG_ENABLE_MASK; 875 else 876 reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; 877 878 writel(reg, reg_base + CQSPI_REG_CONFIG); 879} 880 881static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata, 882 u_char *rxbuf, loff_t from_addr, 883 size_t n_rx) 884{ 885 struct cqspi_st *cqspi = f_pdata->cqspi; 886 struct device *dev = &cqspi->pdev->dev; 887 void __iomem *reg_base = cqspi->iobase; 888 u32 reg, bytes_to_dma; 889 loff_t addr = from_addr; 890 void *buf = rxbuf; 891 dma_addr_t dma_addr; 892 u8 bytes_rem; 893 int ret = 0; 894 895 bytes_rem = n_rx % 4; 896 bytes_to_dma = (n_rx - bytes_rem); 897 898 if (!bytes_to_dma) 899 goto nondmard; 900 901 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA); 902 if (ret) 903 return ret; 904 905 cqspi_controller_enable(cqspi, 0); 906 907 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 908 reg |= CQSPI_REG_CONFIG_DMA_MASK; 909 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 910 911 cqspi_controller_enable(cqspi, 1); 912 913 dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE); 914 if (dma_mapping_error(dev, dma_addr)) { 915 dev_err(dev, "dma mapping failed\n"); 916 return -ENOMEM; 917 } 918 919 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 920 writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES); 921 writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL, 922 reg_base + CQSPI_REG_INDTRIG_ADDRRANGE); 923 924 /* Clear all interrupts. */ 925 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 926 927 /* Enable DMA done interrupt */ 928 writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK, 929 reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN); 930 931 /* Default DMA periph configuration */ 932 writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA); 933 934 /* Configure DMA Dst address */ 935 writel(lower_32_bits(dma_addr), 936 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR); 937 writel(upper_32_bits(dma_addr), 938 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB); 939 940 /* Configure DMA Src address */ 941 writel(cqspi->trigger_address, reg_base + 942 CQSPI_REG_VERSAL_DMA_SRC_ADDR); 943 944 /* Set DMA destination size */ 945 writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE); 946 947 /* Set DMA destination control */ 948 writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL, 949 reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL); 950 951 writel(CQSPI_REG_INDIRECTRD_START_MASK, 952 reg_base + CQSPI_REG_INDIRECTRD); 953 954 reinit_completion(&cqspi->transfer_complete); 955 956 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 957 msecs_to_jiffies(max_t(size_t, bytes_to_dma, 500)))) { 958 ret = -ETIMEDOUT; 959 goto failrd; 960 } 961 962 /* Disable DMA interrupt */ 963 writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 964 965 /* Clear indirect completion status */ 966 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, 967 cqspi->iobase + CQSPI_REG_INDIRECTRD); 968 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 969 970 cqspi_controller_enable(cqspi, 0); 971 972 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 973 reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 974 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 975 976 cqspi_controller_enable(cqspi, 1); 977 978 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, 979 PM_OSPI_MUX_SEL_LINEAR); 980 if (ret) 981 return ret; 982 983nondmard: 984 if (bytes_rem) { 985 addr += bytes_to_dma; 986 buf += bytes_to_dma; 987 ret = cqspi_indirect_read_execute(f_pdata, buf, addr, 988 bytes_rem); 989 if (ret) 990 return ret; 991 } 992 993 return 0; 994 995failrd: 996 /* Disable DMA interrupt */ 997 writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 998 999 /* Cancel the indirect read */ 1000 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 1001 reg_base + CQSPI_REG_INDIRECTRD); 1002 1003 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 1004 1005 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1006 reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 1007 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1008 1009 zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR); 1010 1011 return ret; 1012} 1013 1014static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, 1015 const struct spi_mem_op *op) 1016{ 1017 unsigned int reg; 1018 int ret; 1019 struct cqspi_st *cqspi = f_pdata->cqspi; 1020 void __iomem *reg_base = cqspi->iobase; 1021 u8 opcode; 1022 1023 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB); 1024 if (ret) 1025 return ret; 1026 1027 if (op->cmd.dtr) 1028 opcode = op->cmd.opcode >> 8; 1029 else 1030 opcode = op->cmd.opcode; 1031 1032 /* Set opcode. */ 1033 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; 1034 reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB; 1035 reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB; 1036 writel(reg, reg_base + CQSPI_REG_WR_INSTR); 1037 reg = cqspi_calc_rdreg(op); 1038 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 1039 1040 /* 1041 * SPI NAND flashes require the address of the status register to be 1042 * passed in the Read SR command. Also, some SPI NOR flashes like the 1043 * cypress Semper flash expect a 4-byte dummy address in the Read SR 1044 * command in DTR mode. 1045 * 1046 * But this controller does not support address phase in the Read SR 1047 * command when doing auto-HW polling. So, disable write completion 1048 * polling on the controller's side. spinand and spi-nor will take 1049 * care of polling the status register. 1050 */ 1051 if (cqspi->wr_completion) { 1052 reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); 1053 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; 1054 writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); 1055 /* 1056 * DAC mode require auto polling as flash needs to be polled 1057 * for write completion in case of bubble in SPI transaction 1058 * due to slow CPU/DMA master. 1059 */ 1060 cqspi->use_direct_mode_wr = false; 1061 } 1062 1063 reg = readl(reg_base + CQSPI_REG_SIZE); 1064 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 1065 reg |= (op->addr.nbytes - 1); 1066 writel(reg, reg_base + CQSPI_REG_SIZE); 1067 readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ 1068 return 0; 1069} 1070 1071static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, 1072 loff_t to_addr, const u8 *txbuf, 1073 const size_t n_tx) 1074{ 1075 struct cqspi_st *cqspi = f_pdata->cqspi; 1076 struct device *dev = &cqspi->pdev->dev; 1077 void __iomem *reg_base = cqspi->iobase; 1078 unsigned int remaining = n_tx; 1079 unsigned int write_bytes; 1080 int ret; 1081 1082 if (!refcount_read(&cqspi->refcount)) 1083 return -ENODEV; 1084 1085 writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); 1086 writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); 1087 1088 /* Clear all interrupts. */ 1089 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 1090 1091 writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK); 1092 1093 reinit_completion(&cqspi->transfer_complete); 1094 writel(CQSPI_REG_INDIRECTWR_START_MASK, 1095 reg_base + CQSPI_REG_INDIRECTWR); 1096 readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */ 1097 1098 /* 1099 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access 1100 * Controller programming sequence, couple of cycles of 1101 * QSPI_REF_CLK delay is required for the above bit to 1102 * be internally synchronized by the QSPI module. Provide 5 1103 * cycles of delay. 1104 */ 1105 if (cqspi->wr_delay) 1106 ndelay(cqspi->wr_delay); 1107 1108 /* 1109 * If a hazard exists between the APB and AHB interfaces, perform a 1110 * dummy readback from the controller to ensure synchronization. 1111 */ 1112 if (cqspi->apb_ahb_hazard) 1113 readl(reg_base + CQSPI_REG_INDIRECTWR); 1114 1115 while (remaining > 0) { 1116 size_t write_words, mod_bytes; 1117 1118 write_bytes = remaining; 1119 write_words = write_bytes / 4; 1120 mod_bytes = write_bytes % 4; 1121 /* Write 4 bytes at a time then single bytes. */ 1122 if (write_words) { 1123 iowrite32_rep(cqspi->ahb_base, txbuf, write_words); 1124 txbuf += (write_words * 4); 1125 } 1126 if (mod_bytes) { 1127 unsigned int temp = 0xFFFFFFFF; 1128 1129 memcpy(&temp, txbuf, mod_bytes); 1130 iowrite32(temp, cqspi->ahb_base); 1131 txbuf += mod_bytes; 1132 } 1133 1134 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 1135 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { 1136 dev_err(dev, "Indirect write timeout\n"); 1137 ret = -ETIMEDOUT; 1138 goto failwr; 1139 } 1140 1141 remaining -= write_bytes; 1142 1143 if (remaining > 0) 1144 reinit_completion(&cqspi->transfer_complete); 1145 } 1146 1147 /* Check indirect done status */ 1148 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTWR, 1149 CQSPI_REG_INDIRECTWR_DONE_MASK, 0, false); 1150 if (ret) { 1151 dev_err(dev, "Indirect write completion error (%i)\n", ret); 1152 goto failwr; 1153 } 1154 1155 /* Disable interrupt. */ 1156 writel(0, reg_base + CQSPI_REG_IRQMASK); 1157 1158 /* Clear indirect completion status */ 1159 writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR); 1160 1161 cqspi_wait_idle(cqspi); 1162 1163 return 0; 1164 1165failwr: 1166 /* Disable interrupt. */ 1167 writel(0, reg_base + CQSPI_REG_IRQMASK); 1168 1169 /* Cancel the indirect write */ 1170 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 1171 reg_base + CQSPI_REG_INDIRECTWR); 1172 return ret; 1173} 1174 1175static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata) 1176{ 1177 struct cqspi_st *cqspi = f_pdata->cqspi; 1178 void __iomem *reg_base = cqspi->iobase; 1179 unsigned int chip_select = f_pdata->cs; 1180 unsigned int reg; 1181 1182 reg = readl(reg_base + CQSPI_REG_CONFIG); 1183 if (cqspi->is_decoded_cs) { 1184 reg |= CQSPI_REG_CONFIG_DECODE_MASK; 1185 } else { 1186 reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; 1187 1188 /* Convert CS if without decoder. 1189 * CS0 to 4b'1110 1190 * CS1 to 4b'1101 1191 * CS2 to 4b'1011 1192 * CS3 to 4b'0111 1193 */ 1194 chip_select = 0xF & ~BIT(chip_select); 1195 } 1196 1197 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK 1198 << CQSPI_REG_CONFIG_CHIPSELECT_LSB); 1199 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) 1200 << CQSPI_REG_CONFIG_CHIPSELECT_LSB; 1201 writel(reg, reg_base + CQSPI_REG_CONFIG); 1202} 1203 1204static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, 1205 const unsigned int ns_val) 1206{ 1207 unsigned int ticks; 1208 1209 ticks = ref_clk_hz / 1000; /* kHz */ 1210 ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); 1211 1212 return ticks; 1213} 1214 1215static void cqspi_delay(struct cqspi_flash_pdata *f_pdata) 1216{ 1217 struct cqspi_st *cqspi = f_pdata->cqspi; 1218 void __iomem *iobase = cqspi->iobase; 1219 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; 1220 unsigned int tshsl, tchsh, tslch, tsd2d; 1221 unsigned int reg; 1222 unsigned int tsclk; 1223 1224 /* calculate the number of ref ticks for one sclk tick */ 1225 tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk); 1226 1227 tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns); 1228 /* this particular value must be at least one sclk */ 1229 if (tshsl < tsclk) 1230 tshsl = tsclk; 1231 1232 tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns); 1233 tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns); 1234 tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns); 1235 1236 reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) 1237 << CQSPI_REG_DELAY_TSHSL_LSB; 1238 reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) 1239 << CQSPI_REG_DELAY_TCHSH_LSB; 1240 reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) 1241 << CQSPI_REG_DELAY_TSLCH_LSB; 1242 reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) 1243 << CQSPI_REG_DELAY_TSD2D_LSB; 1244 writel(reg, iobase + CQSPI_REG_DELAY); 1245} 1246 1247static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) 1248{ 1249 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; 1250 void __iomem *reg_base = cqspi->iobase; 1251 u32 reg, div; 1252 1253 /* Recalculate the baudrate divisor based on QSPI specification. */ 1254 div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1; 1255 1256 /* Maximum baud divisor */ 1257 if (div > CQSPI_REG_CONFIG_BAUD_MASK) { 1258 div = CQSPI_REG_CONFIG_BAUD_MASK; 1259 dev_warn(&cqspi->pdev->dev, 1260 "Unable to adjust clock <= %d hz. Reduced to %d hz\n", 1261 cqspi->sclk, ref_clk_hz/((div+1)*2)); 1262 } 1263 1264 reg = readl(reg_base + CQSPI_REG_CONFIG); 1265 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); 1266 reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; 1267 writel(reg, reg_base + CQSPI_REG_CONFIG); 1268} 1269 1270static void cqspi_readdata_capture(struct cqspi_st *cqspi, 1271 const bool bypass, 1272 const unsigned int delay) 1273{ 1274 void __iomem *reg_base = cqspi->iobase; 1275 unsigned int reg; 1276 1277 reg = readl(reg_base + CQSPI_REG_READCAPTURE); 1278 1279 if (bypass) 1280 reg |= BIT(CQSPI_REG_READCAPTURE_BYPASS_LSB); 1281 else 1282 reg &= ~BIT(CQSPI_REG_READCAPTURE_BYPASS_LSB); 1283 1284 reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK 1285 << CQSPI_REG_READCAPTURE_DELAY_LSB); 1286 1287 reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) 1288 << CQSPI_REG_READCAPTURE_DELAY_LSB; 1289 1290 writel(reg, reg_base + CQSPI_REG_READCAPTURE); 1291} 1292 1293static void cqspi_configure(struct cqspi_flash_pdata *f_pdata, 1294 unsigned long sclk) 1295{ 1296 struct cqspi_st *cqspi = f_pdata->cqspi; 1297 int switch_cs = (cqspi->current_cs != f_pdata->cs); 1298 int switch_ck = (cqspi->sclk != sclk); 1299 1300 if (switch_cs || switch_ck) 1301 cqspi_controller_enable(cqspi, 0); 1302 1303 /* Switch chip select. */ 1304 if (switch_cs) { 1305 cqspi->current_cs = f_pdata->cs; 1306 cqspi_chipselect(f_pdata); 1307 } 1308 1309 /* Setup baudrate divisor and delays */ 1310 if (switch_ck) { 1311 cqspi->sclk = sclk; 1312 cqspi_config_baudrate_div(cqspi); 1313 cqspi_delay(f_pdata); 1314 cqspi_readdata_capture(cqspi, !cqspi->rclk_en, 1315 f_pdata->read_delay); 1316 } 1317 1318 if (switch_cs || switch_ck) 1319 cqspi_controller_enable(cqspi, 1); 1320} 1321 1322static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata, 1323 const struct spi_mem_op *op) 1324{ 1325 struct cqspi_st *cqspi = f_pdata->cqspi; 1326 loff_t to = op->addr.val; 1327 size_t len = op->data.nbytes; 1328 const u_char *buf = op->data.buf.out; 1329 int ret; 1330 1331 ret = cqspi_write_setup(f_pdata, op); 1332 if (ret) 1333 return ret; 1334 1335 /* 1336 * Some flashes like the Cypress Semper flash expect a dummy 4-byte 1337 * address (all 0s) with the read status register command in DTR mode. 1338 * But this controller does not support sending dummy address bytes to 1339 * the flash when it is polling the write completion register in DTR 1340 * mode. So, we can not use direct mode when in DTR mode for writing 1341 * data. 1342 */ 1343 if (!op->cmd.dtr && cqspi->use_direct_mode && 1344 cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) { 1345 memcpy_toio(cqspi->ahb_base + to, buf, len); 1346 return cqspi_wait_idle(cqspi); 1347 } 1348 1349 return cqspi_indirect_write_execute(f_pdata, to, buf, len); 1350} 1351 1352static void cqspi_rx_dma_callback(void *param) 1353{ 1354 struct cqspi_st *cqspi = param; 1355 1356 complete(&cqspi->rx_dma_complete); 1357} 1358 1359static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata, 1360 u_char *buf, loff_t from, size_t len) 1361{ 1362 struct cqspi_st *cqspi = f_pdata->cqspi; 1363 struct device *dev = &cqspi->pdev->dev; 1364 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 1365 dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from; 1366 int ret = 0; 1367 struct dma_async_tx_descriptor *tx; 1368 dma_cookie_t cookie; 1369 dma_addr_t dma_dst; 1370 struct device *ddev; 1371 1372 if (!cqspi->rx_chan || !virt_addr_valid(buf)) { 1373 memcpy_fromio(buf, cqspi->ahb_base + from, len); 1374 return 0; 1375 } 1376 1377 ddev = cqspi->rx_chan->device->dev; 1378 dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE); 1379 if (dma_mapping_error(ddev, dma_dst)) { 1380 dev_err(dev, "dma mapping failed\n"); 1381 return -ENOMEM; 1382 } 1383 tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src, 1384 len, flags); 1385 if (!tx) { 1386 dev_err(dev, "device_prep_dma_memcpy error\n"); 1387 ret = -EIO; 1388 goto err_unmap; 1389 } 1390 1391 tx->callback = cqspi_rx_dma_callback; 1392 tx->callback_param = cqspi; 1393 cookie = tx->tx_submit(tx); 1394 reinit_completion(&cqspi->rx_dma_complete); 1395 1396 ret = dma_submit_error(cookie); 1397 if (ret) { 1398 dev_err(dev, "dma_submit_error %d\n", cookie); 1399 ret = -EIO; 1400 goto err_unmap; 1401 } 1402 1403 dma_async_issue_pending(cqspi->rx_chan); 1404 if (!wait_for_completion_timeout(&cqspi->rx_dma_complete, 1405 msecs_to_jiffies(max_t(size_t, len, 500)))) { 1406 dmaengine_terminate_sync(cqspi->rx_chan); 1407 dev_err(dev, "DMA wait_for_completion_timeout\n"); 1408 ret = -ETIMEDOUT; 1409 goto err_unmap; 1410 } 1411 1412err_unmap: 1413 dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE); 1414 1415 return ret; 1416} 1417 1418static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata, 1419 const struct spi_mem_op *op) 1420{ 1421 struct cqspi_st *cqspi = f_pdata->cqspi; 1422 const struct cqspi_driver_platdata *ddata = cqspi->ddata; 1423 loff_t from = op->addr.val; 1424 size_t len = op->data.nbytes; 1425 u_char *buf = op->data.buf.in; 1426 u64 dma_align = (u64)(uintptr_t)buf; 1427 int ret; 1428 1429 ret = cqspi_read_setup(f_pdata, op); 1430 if (ret) 1431 return ret; 1432 1433 if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) 1434 return cqspi_direct_read_execute(f_pdata, buf, from, len); 1435 1436 if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma && 1437 virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0)) 1438 return ddata->indirect_read_dma(f_pdata, buf, from, len); 1439 1440 return cqspi_indirect_read_execute(f_pdata, buf, from, len); 1441} 1442 1443static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) 1444{ 1445 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1446 struct cqspi_flash_pdata *f_pdata; 1447 1448 f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)]; 1449 cqspi_configure(f_pdata, op->max_freq); 1450 1451 if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) { 1452 /* 1453 * Performing reads in DAC mode forces to read minimum 4 bytes 1454 * which is unsupported on some flash devices during register 1455 * reads, prefer STIG mode for such small reads. 1456 */ 1457 if (!op->addr.nbytes || 1458 (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX && 1459 !cqspi->disable_stig_mode)) 1460 return cqspi_command_read(f_pdata, op); 1461 1462 return cqspi_read(f_pdata, op); 1463 } 1464 1465 if (!op->addr.nbytes || !op->data.buf.out) 1466 return cqspi_command_write(f_pdata, op); 1467 1468 return cqspi_write(f_pdata, op); 1469} 1470 1471static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) 1472{ 1473 int ret; 1474 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1475 struct device *dev = &cqspi->pdev->dev; 1476 const struct cqspi_driver_platdata *ddata = of_device_get_match_data(dev); 1477 1478 if (refcount_read(&cqspi->inflight_ops) == 0) 1479 return -ENODEV; 1480 1481 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 1482 ret = pm_runtime_resume_and_get(dev); 1483 if (ret) { 1484 dev_err(&mem->spi->dev, "resume failed with %d\n", ret); 1485 return ret; 1486 } 1487 } 1488 1489 if (!refcount_read(&cqspi->refcount)) 1490 return -EBUSY; 1491 1492 refcount_inc(&cqspi->inflight_ops); 1493 1494 if (!refcount_read(&cqspi->refcount)) { 1495 if (refcount_read(&cqspi->inflight_ops)) 1496 refcount_dec(&cqspi->inflight_ops); 1497 return -EBUSY; 1498 } 1499 1500 ret = cqspi_mem_process(mem, op); 1501 1502 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) 1503 pm_runtime_put_autosuspend(dev); 1504 1505 if (ret) 1506 dev_err(&mem->spi->dev, "operation failed with %d\n", ret); 1507 1508 if (refcount_read(&cqspi->inflight_ops) > 1) 1509 refcount_dec(&cqspi->inflight_ops); 1510 1511 return ret; 1512} 1513 1514static bool cqspi_supports_mem_op(struct spi_mem *mem, 1515 const struct spi_mem_op *op) 1516{ 1517 bool all_true, all_false; 1518 1519 /* 1520 * op->dummy.dtr is required for converting nbytes into ncycles. 1521 * Also, don't check the dtr field of the op phase having zero nbytes. 1522 */ 1523 all_true = op->cmd.dtr && 1524 (!op->addr.nbytes || op->addr.dtr) && 1525 (!op->dummy.nbytes || op->dummy.dtr) && 1526 (!op->data.nbytes || op->data.dtr); 1527 1528 all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr && 1529 !op->data.dtr; 1530 1531 if (all_true) { 1532 /* Right now we only support 8-8-8 DTR mode. */ 1533 if (op->cmd.nbytes && op->cmd.buswidth != 8) 1534 return false; 1535 if (op->addr.nbytes && op->addr.buswidth != 8) 1536 return false; 1537 if (op->data.nbytes && op->data.buswidth != 8) 1538 return false; 1539 } else if (!all_false) { 1540 /* Mixed DTR modes are not supported. */ 1541 return false; 1542 } 1543 1544 return spi_mem_default_supports_op(mem, op); 1545} 1546 1547static int cqspi_of_get_flash_pdata(struct platform_device *pdev, 1548 struct cqspi_flash_pdata *f_pdata, 1549 struct device_node *np) 1550{ 1551 if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) { 1552 dev_err(&pdev->dev, "couldn't determine read-delay\n"); 1553 return -ENXIO; 1554 } 1555 1556 if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) { 1557 dev_err(&pdev->dev, "couldn't determine tshsl-ns\n"); 1558 return -ENXIO; 1559 } 1560 1561 if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) { 1562 dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n"); 1563 return -ENXIO; 1564 } 1565 1566 if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) { 1567 dev_err(&pdev->dev, "couldn't determine tchsh-ns\n"); 1568 return -ENXIO; 1569 } 1570 1571 if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) { 1572 dev_err(&pdev->dev, "couldn't determine tslch-ns\n"); 1573 return -ENXIO; 1574 } 1575 1576 if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) { 1577 dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n"); 1578 return -ENXIO; 1579 } 1580 1581 return 0; 1582} 1583 1584static int cqspi_of_get_pdata(struct cqspi_st *cqspi) 1585{ 1586 struct device *dev = &cqspi->pdev->dev; 1587 struct device_node *np = dev->of_node; 1588 u32 id[2]; 1589 1590 cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); 1591 1592 if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { 1593 /* Zero signals FIFO depth should be runtime detected. */ 1594 cqspi->fifo_depth = 0; 1595 } 1596 1597 if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { 1598 dev_err(dev, "couldn't determine fifo-width\n"); 1599 return -ENXIO; 1600 } 1601 1602 if (of_property_read_u32(np, "cdns,trigger-address", 1603 &cqspi->trigger_address)) { 1604 dev_err(dev, "couldn't determine trigger-address\n"); 1605 return -ENXIO; 1606 } 1607 1608 if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect)) 1609 cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT; 1610 1611 cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); 1612 1613 if (!of_property_read_u32_array(np, "power-domains", id, 1614 ARRAY_SIZE(id))) 1615 cqspi->pd_dev_id = id[1]; 1616 1617 return 0; 1618} 1619 1620static void cqspi_controller_init(struct cqspi_st *cqspi) 1621{ 1622 u32 reg; 1623 1624 /* Configure the remap address register, no remap */ 1625 writel(0, cqspi->iobase + CQSPI_REG_REMAP); 1626 1627 /* Disable all interrupts. */ 1628 writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); 1629 1630 /* Configure the SRAM split to 1:1 . */ 1631 writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1632 1633 /* Load indirect trigger address. */ 1634 writel(cqspi->trigger_address, 1635 cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); 1636 1637 /* Program read watermark -- 1/2 of the FIFO. */ 1638 writel(cqspi->fifo_depth * cqspi->fifo_width / 2, 1639 cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); 1640 /* Program write watermark -- 1/8 of the FIFO. */ 1641 writel(cqspi->fifo_depth * cqspi->fifo_width / 8, 1642 cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); 1643 1644 /* Disable direct access controller */ 1645 if (!cqspi->use_direct_mode) { 1646 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1647 reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL; 1648 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1649 } 1650 1651 /* Enable DMA interface */ 1652 if (cqspi->use_dma_read) { 1653 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1654 reg |= CQSPI_REG_CONFIG_DMA_MASK; 1655 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1656 } 1657} 1658 1659static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi) 1660{ 1661 struct device *dev = &cqspi->pdev->dev; 1662 u32 reg, fifo_depth; 1663 1664 /* 1665 * Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N 1666 * the FIFO depth. 1667 */ 1668 writel(U32_MAX, cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1669 reg = readl(cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1670 fifo_depth = reg + 1; 1671 1672 /* FIFO depth of zero means no value from devicetree was provided. */ 1673 if (cqspi->fifo_depth == 0) { 1674 cqspi->fifo_depth = fifo_depth; 1675 dev_dbg(dev, "using FIFO depth of %u\n", fifo_depth); 1676 } else if (fifo_depth != cqspi->fifo_depth) { 1677 dev_warn(dev, "detected FIFO depth (%u) different from config (%u)\n", 1678 fifo_depth, cqspi->fifo_depth); 1679 } 1680} 1681 1682static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) 1683{ 1684 dma_cap_mask_t mask; 1685 1686 dma_cap_zero(mask); 1687 dma_cap_set(DMA_MEMCPY, mask); 1688 1689 cqspi->rx_chan = dma_request_chan_by_mask(&mask); 1690 if (IS_ERR(cqspi->rx_chan)) { 1691 int ret = PTR_ERR(cqspi->rx_chan); 1692 1693 cqspi->rx_chan = NULL; 1694 if (ret == -ENODEV) { 1695 /* DMA support is not mandatory */ 1696 dev_info(&cqspi->pdev->dev, "No Rx DMA available\n"); 1697 return 0; 1698 } 1699 1700 return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n"); 1701 } 1702 init_completion(&cqspi->rx_dma_complete); 1703 1704 return 0; 1705} 1706 1707static const char *cqspi_get_name(struct spi_mem *mem) 1708{ 1709 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1710 struct device *dev = &cqspi->pdev->dev; 1711 1712 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), 1713 spi_get_chipselect(mem->spi, 0)); 1714} 1715 1716static const struct spi_controller_mem_ops cqspi_mem_ops = { 1717 .exec_op = cqspi_exec_mem_op, 1718 .get_name = cqspi_get_name, 1719 .supports_op = cqspi_supports_mem_op, 1720}; 1721 1722static const struct spi_controller_mem_caps cqspi_mem_caps = { 1723 .dtr = true, 1724 .per_op_freq = true, 1725}; 1726 1727static int cqspi_setup_flash(struct cqspi_st *cqspi) 1728{ 1729 struct platform_device *pdev = cqspi->pdev; 1730 struct device *dev = &pdev->dev; 1731 struct cqspi_flash_pdata *f_pdata; 1732 int ret, cs, max_cs = -1; 1733 1734 /* Get flash device data */ 1735 for_each_available_child_of_node_scoped(dev->of_node, np) { 1736 ret = of_property_read_u32(np, "reg", &cs); 1737 if (ret) { 1738 dev_err(dev, "Couldn't determine chip select.\n"); 1739 return ret; 1740 } 1741 1742 if (cs >= cqspi->num_chipselect) { 1743 dev_err(dev, "Chip select %d out of range.\n", cs); 1744 return -EINVAL; 1745 } 1746 1747 max_cs = max_t(int, cs, max_cs); 1748 1749 f_pdata = &cqspi->f_pdata[cs]; 1750 f_pdata->cqspi = cqspi; 1751 f_pdata->cs = cs; 1752 1753 ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); 1754 if (ret) 1755 return ret; 1756 } 1757 1758 if (max_cs < 0) { 1759 dev_err(dev, "No flash device declared\n"); 1760 return -ENODEV; 1761 } 1762 1763 cqspi->num_chipselect = max_cs + 1; 1764 return 0; 1765} 1766 1767static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi) 1768{ 1769 static struct clk_bulk_data qspiclk[] = { 1770 { .id = "apb" }, 1771 { .id = "ahb" }, 1772 }; 1773 1774 int ret = 0; 1775 1776 ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk); 1777 if (ret) { 1778 dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__); 1779 return ret; 1780 } 1781 1782 cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk; 1783 cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk; 1784 1785 ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]); 1786 if (ret) { 1787 dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__); 1788 return ret; 1789 } 1790 1791 ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]); 1792 if (ret) { 1793 dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__); 1794 goto disable_apb_clk; 1795 } 1796 1797 cqspi->is_jh7110 = true; 1798 1799 return 0; 1800 1801disable_apb_clk: 1802 clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]); 1803 1804 return ret; 1805} 1806 1807static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi) 1808{ 1809 clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]); 1810 clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]); 1811} 1812static int cqspi_probe(struct platform_device *pdev) 1813{ 1814 const struct cqspi_driver_platdata *ddata; 1815 struct reset_control *rstc, *rstc_ocp, *rstc_ref; 1816 struct device *dev = &pdev->dev; 1817 struct spi_controller *host; 1818 struct resource *res_ahb; 1819 struct cqspi_st *cqspi; 1820 int ret; 1821 int irq; 1822 1823 host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi)); 1824 if (!host) 1825 return -ENOMEM; 1826 1827 host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL; 1828 host->mem_ops = &cqspi_mem_ops; 1829 host->mem_caps = &cqspi_mem_caps; 1830 host->dev.of_node = pdev->dev.of_node; 1831 1832 cqspi = spi_controller_get_devdata(host); 1833 1834 cqspi->pdev = pdev; 1835 cqspi->host = host; 1836 cqspi->is_jh7110 = false; 1837 cqspi->ddata = ddata = of_device_get_match_data(dev); 1838 platform_set_drvdata(pdev, cqspi); 1839 1840 /* Obtain configuration from OF. */ 1841 ret = cqspi_of_get_pdata(cqspi); 1842 if (ret) { 1843 dev_err(dev, "Cannot get mandatory OF data.\n"); 1844 return -ENODEV; 1845 } 1846 1847 /* Obtain QSPI clock. */ 1848 cqspi->clk = devm_clk_get(dev, NULL); 1849 if (IS_ERR(cqspi->clk)) { 1850 dev_err(dev, "Cannot claim QSPI clock.\n"); 1851 ret = PTR_ERR(cqspi->clk); 1852 return ret; 1853 } 1854 1855 /* Obtain and remap controller address. */ 1856 cqspi->iobase = devm_platform_ioremap_resource(pdev, 0); 1857 if (IS_ERR(cqspi->iobase)) { 1858 dev_err(dev, "Cannot remap controller address.\n"); 1859 ret = PTR_ERR(cqspi->iobase); 1860 return ret; 1861 } 1862 1863 /* Obtain and remap AHB address. */ 1864 cqspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res_ahb); 1865 if (IS_ERR(cqspi->ahb_base)) { 1866 dev_err(dev, "Cannot remap AHB address.\n"); 1867 ret = PTR_ERR(cqspi->ahb_base); 1868 return ret; 1869 } 1870 cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; 1871 cqspi->ahb_size = resource_size(res_ahb); 1872 1873 init_completion(&cqspi->transfer_complete); 1874 1875 /* Obtain IRQ line. */ 1876 irq = platform_get_irq(pdev, 0); 1877 if (irq < 0) 1878 return -ENXIO; 1879 1880 ret = pm_runtime_set_active(dev); 1881 if (ret) 1882 return ret; 1883 1884 1885 ret = clk_prepare_enable(cqspi->clk); 1886 if (ret) { 1887 dev_err(dev, "Cannot enable QSPI clock.\n"); 1888 goto probe_clk_failed; 1889 } 1890 1891 /* Obtain QSPI reset control */ 1892 rstc = devm_reset_control_get_optional_exclusive(dev, "qspi"); 1893 if (IS_ERR(rstc)) { 1894 ret = PTR_ERR(rstc); 1895 dev_err(dev, "Cannot get QSPI reset.\n"); 1896 goto probe_reset_failed; 1897 } 1898 1899 rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp"); 1900 if (IS_ERR(rstc_ocp)) { 1901 ret = PTR_ERR(rstc_ocp); 1902 dev_err(dev, "Cannot get QSPI OCP reset.\n"); 1903 goto probe_reset_failed; 1904 } 1905 1906 if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) { 1907 rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref"); 1908 if (IS_ERR(rstc_ref)) { 1909 ret = PTR_ERR(rstc_ref); 1910 dev_err(dev, "Cannot get QSPI REF reset.\n"); 1911 goto probe_reset_failed; 1912 } 1913 reset_control_assert(rstc_ref); 1914 reset_control_deassert(rstc_ref); 1915 } 1916 1917 reset_control_assert(rstc); 1918 reset_control_deassert(rstc); 1919 1920 reset_control_assert(rstc_ocp); 1921 reset_control_deassert(rstc_ocp); 1922 1923 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); 1924 host->max_speed_hz = cqspi->master_ref_clk_hz; 1925 1926 /* write completion is supported by default */ 1927 cqspi->wr_completion = true; 1928 1929 if (ddata) { 1930 if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) 1931 cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, 1932 cqspi->master_ref_clk_hz); 1933 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL) 1934 host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; 1935 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD) 1936 host->mode_bits |= SPI_TX_QUAD; 1937 if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) { 1938 cqspi->use_direct_mode = true; 1939 cqspi->use_direct_mode_wr = true; 1940 } 1941 if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA) 1942 cqspi->use_dma_read = true; 1943 if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION) 1944 cqspi->wr_completion = false; 1945 if (ddata->quirks & CQSPI_SLOW_SRAM) 1946 cqspi->slow_sram = true; 1947 if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR) 1948 cqspi->apb_ahb_hazard = true; 1949 1950 if (ddata->jh7110_clk_init) { 1951 ret = cqspi_jh7110_clk_init(pdev, cqspi); 1952 if (ret) 1953 goto probe_reset_failed; 1954 } 1955 if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) 1956 cqspi->disable_stig_mode = true; 1957 1958 if (ddata->quirks & CQSPI_DMA_SET_MASK) { 1959 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1960 if (ret) 1961 goto probe_reset_failed; 1962 } 1963 } 1964 1965 refcount_set(&cqspi->refcount, 1); 1966 refcount_set(&cqspi->inflight_ops, 1); 1967 1968 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, 1969 pdev->name, cqspi); 1970 if (ret) { 1971 dev_err(dev, "Cannot request IRQ.\n"); 1972 goto probe_reset_failed; 1973 } 1974 1975 cqspi_wait_idle(cqspi); 1976 cqspi_controller_enable(cqspi, 0); 1977 cqspi_controller_detect_fifo_depth(cqspi); 1978 cqspi_controller_init(cqspi); 1979 cqspi_controller_enable(cqspi, 1); 1980 cqspi->current_cs = -1; 1981 cqspi->sclk = 0; 1982 1983 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 1984 pm_runtime_enable(dev); 1985 pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); 1986 pm_runtime_use_autosuspend(dev); 1987 pm_runtime_get_noresume(dev); 1988 } 1989 1990 ret = cqspi_setup_flash(cqspi); 1991 if (ret) { 1992 dev_err(dev, "failed to setup flash parameters %d\n", ret); 1993 goto probe_setup_failed; 1994 } 1995 1996 host->num_chipselect = cqspi->num_chipselect; 1997 1998 if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)) 1999 cqspi_device_reset(cqspi); 2000 2001 if (cqspi->use_direct_mode) { 2002 ret = cqspi_request_mmap_dma(cqspi); 2003 if (ret == -EPROBE_DEFER) { 2004 dev_err_probe(&pdev->dev, ret, "Failed to request mmap DMA\n"); 2005 goto probe_setup_failed; 2006 } 2007 } 2008 2009 ret = spi_register_controller(host); 2010 if (ret) { 2011 dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret); 2012 goto probe_setup_failed; 2013 } 2014 2015 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 2016 pm_runtime_mark_last_busy(dev); 2017 pm_runtime_put_autosuspend(dev); 2018 } 2019 2020 return 0; 2021probe_setup_failed: 2022 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) 2023 pm_runtime_disable(dev); 2024 cqspi_controller_enable(cqspi, 0); 2025probe_reset_failed: 2026 if (cqspi->is_jh7110) 2027 cqspi_jh7110_disable_clk(pdev, cqspi); 2028 2029 if (pm_runtime_get_sync(&pdev->dev) >= 0) 2030 clk_disable_unprepare(cqspi->clk); 2031probe_clk_failed: 2032 return ret; 2033} 2034 2035static void cqspi_remove(struct platform_device *pdev) 2036{ 2037 const struct cqspi_driver_platdata *ddata; 2038 struct cqspi_st *cqspi = platform_get_drvdata(pdev); 2039 struct device *dev = &pdev->dev; 2040 2041 ddata = of_device_get_match_data(dev); 2042 2043 refcount_set(&cqspi->refcount, 0); 2044 2045 if (!refcount_dec_and_test(&cqspi->inflight_ops)) 2046 cqspi_wait_idle(cqspi); 2047 2048 spi_unregister_controller(cqspi->host); 2049 cqspi_controller_enable(cqspi, 0); 2050 2051 if (cqspi->rx_chan) 2052 dma_release_channel(cqspi->rx_chan); 2053 2054 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) 2055 if (pm_runtime_get_sync(&pdev->dev) >= 0) 2056 clk_disable(cqspi->clk); 2057 2058 if (cqspi->is_jh7110) 2059 cqspi_jh7110_disable_clk(pdev, cqspi); 2060 2061 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 2062 pm_runtime_put_sync(&pdev->dev); 2063 pm_runtime_disable(&pdev->dev); 2064 } 2065} 2066 2067static int cqspi_runtime_suspend(struct device *dev) 2068{ 2069 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2070 2071 cqspi_controller_enable(cqspi, 0); 2072 clk_disable_unprepare(cqspi->clk); 2073 return 0; 2074} 2075 2076static int cqspi_runtime_resume(struct device *dev) 2077{ 2078 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2079 2080 clk_prepare_enable(cqspi->clk); 2081 cqspi_wait_idle(cqspi); 2082 cqspi_controller_enable(cqspi, 0); 2083 cqspi_controller_init(cqspi); 2084 cqspi_controller_enable(cqspi, 1); 2085 2086 cqspi->current_cs = -1; 2087 cqspi->sclk = 0; 2088 return 0; 2089} 2090 2091static int cqspi_suspend(struct device *dev) 2092{ 2093 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2094 int ret; 2095 2096 ret = spi_controller_suspend(cqspi->host); 2097 if (ret) 2098 return ret; 2099 2100 return pm_runtime_force_suspend(dev); 2101} 2102 2103static int cqspi_resume(struct device *dev) 2104{ 2105 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2106 int ret; 2107 2108 ret = pm_runtime_force_resume(dev); 2109 if (ret) { 2110 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 2111 return ret; 2112 } 2113 2114 return spi_controller_resume(cqspi->host); 2115} 2116 2117static const struct dev_pm_ops cqspi_dev_pm_ops = { 2118 RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL) 2119 SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume) 2120}; 2121 2122static const struct cqspi_driver_platdata cdns_qspi = { 2123 .quirks = CQSPI_DISABLE_DAC_MODE, 2124}; 2125 2126static const struct cqspi_driver_platdata k2g_qspi = { 2127 .quirks = CQSPI_NEEDS_WR_DELAY, 2128}; 2129 2130static const struct cqspi_driver_platdata am654_ospi = { 2131 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD, 2132 .quirks = CQSPI_NEEDS_WR_DELAY, 2133}; 2134 2135static const struct cqspi_driver_platdata intel_lgm_qspi = { 2136 .quirks = CQSPI_DISABLE_DAC_MODE, 2137}; 2138 2139static const struct cqspi_driver_platdata socfpga_qspi = { 2140 .quirks = CQSPI_DISABLE_DAC_MODE 2141 | CQSPI_NO_SUPPORT_WR_COMPLETION 2142 | CQSPI_SLOW_SRAM 2143 | CQSPI_DISABLE_STIG_MODE 2144 | CQSPI_DISABLE_RUNTIME_PM, 2145}; 2146 2147static const struct cqspi_driver_platdata versal_ospi = { 2148 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2149 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA 2150 | CQSPI_DMA_SET_MASK, 2151 .indirect_read_dma = cqspi_versal_indirect_read_dma, 2152 .get_dma_status = cqspi_get_versal_dma_status, 2153}; 2154 2155static const struct cqspi_driver_platdata versal2_ospi = { 2156 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2157 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA 2158 | CQSPI_DMA_SET_MASK 2159 | CQSPI_SUPPORT_DEVICE_RESET, 2160 .indirect_read_dma = cqspi_versal_indirect_read_dma, 2161 .get_dma_status = cqspi_get_versal_dma_status, 2162}; 2163 2164static const struct cqspi_driver_platdata jh7110_qspi = { 2165 .quirks = CQSPI_DISABLE_DAC_MODE, 2166 .jh7110_clk_init = cqspi_jh7110_clk_init, 2167}; 2168 2169static const struct cqspi_driver_platdata pensando_cdns_qspi = { 2170 .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE, 2171}; 2172 2173static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = { 2174 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2175 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | 2176 CQSPI_RD_NO_IRQ, 2177}; 2178 2179static const struct of_device_id cqspi_dt_ids[] = { 2180 { 2181 .compatible = "cdns,qspi-nor", 2182 .data = &cdns_qspi, 2183 }, 2184 { 2185 .compatible = "ti,k2g-qspi", 2186 .data = &k2g_qspi, 2187 }, 2188 { 2189 .compatible = "ti,am654-ospi", 2190 .data = &am654_ospi, 2191 }, 2192 { 2193 .compatible = "intel,lgm-qspi", 2194 .data = &intel_lgm_qspi, 2195 }, 2196 { 2197 .compatible = "xlnx,versal-ospi-1.0", 2198 .data = &versal_ospi, 2199 }, 2200 { 2201 .compatible = "intel,socfpga-qspi", 2202 .data = &socfpga_qspi, 2203 }, 2204 { 2205 .compatible = "starfive,jh7110-qspi", 2206 .data = &jh7110_qspi, 2207 }, 2208 { 2209 .compatible = "amd,pensando-elba-qspi", 2210 .data = &pensando_cdns_qspi, 2211 }, 2212 { 2213 .compatible = "mobileye,eyeq5-ospi", 2214 .data = &mobileye_eyeq5_ospi, 2215 }, 2216 { 2217 .compatible = "amd,versal2-ospi", 2218 .data = &versal2_ospi, 2219 }, 2220 { /* end of table */ } 2221}; 2222 2223MODULE_DEVICE_TABLE(of, cqspi_dt_ids); 2224 2225static struct platform_driver cqspi_platform_driver = { 2226 .probe = cqspi_probe, 2227 .remove = cqspi_remove, 2228 .driver = { 2229 .name = CQSPI_NAME, 2230 .pm = pm_ptr(&cqspi_dev_pm_ops), 2231 .of_match_table = cqspi_dt_ids, 2232 }, 2233}; 2234 2235module_platform_driver(cqspi_platform_driver); 2236 2237MODULE_DESCRIPTION("Cadence QSPI Controller Driver"); 2238MODULE_LICENSE("GPL v2"); 2239MODULE_ALIAS("platform:" CQSPI_NAME); 2240MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); 2241MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>"); 2242MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>"); 2243MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); 2244MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");