spi: cadence-quadspi: Improve CQSPI_SLOW_SRAM quirk if flash is slow

CQSPI_SLOW_SRAM quirk on the Stratix10 platform causes fewer interrupts,
but also causes timeouts if a small block is used or if flash devices are
slower than or equal in speed to SRAM's read operations. Adding the
CQSPI_REG_IRQ_IND_COMP interrupt would resolve the problem for small
reads, and removing the disabling of interrupts would resolve the issue
with lost interrupts.
This marginally increases IRQ count. Tests show that this will cause only
a few percent more interrupts.

Test:
$ dd if=/dev/mtd0 of=/dev/null bs=1M count=64
Results from the Stratix10 platform with mt25qu02g flash.
FIFO size in all tests: 128

Serviced interrupt call counts:
Without CQSPI_SLOW_SRAM quirk: 16 668 850
With CQSPI_SLOW_SRAM quirk: 204 176
With CQSPI_SLOW_SRAM and this commit: 224 528

Signed-off-by: Mateusz Litwin <mateusz.litwin@nokia.com>
Link: https://patch.msgid.link/20251218-cqspi_indirect_read_improve-v2-2-396079972f2a@nokia.com
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by Mateusz Litwin and committed by Mark Brown 5bfbbf0a d67396c9

+5 -14
+5 -14
drivers/spi/spi-cadence-quadspi.c
··· 300 CQSPI_REG_IRQ_IND_SRAM_FULL | \ 301 CQSPI_REG_IRQ_IND_COMP) 302 303 #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ 304 CQSPI_REG_IRQ_WATERMARK | \ 305 CQSPI_REG_IRQ_UNDERFLOW) ··· 384 else if (!cqspi->slow_sram) 385 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; 386 else 387 - irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR; 388 389 if (irq_status) 390 complete(&cqspi->transfer_complete); ··· 760 */ 761 762 if (use_irq && cqspi->slow_sram) 763 - writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); 764 else if (use_irq) 765 writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); 766 else ··· 777 !wait_for_completion_timeout(&cqspi->transfer_complete, 778 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) 779 ret = -ETIMEDOUT; 780 - 781 - /* 782 - * Disable all read interrupts until 783 - * we are out of "bytes to read" 784 - */ 785 - if (cqspi->slow_sram) 786 - writel(0x0, reg_base + CQSPI_REG_IRQMASK); 787 788 /* 789 * Prevent lost interrupt and race condition by reinitializing early. ··· 815 rxbuf += bytes_to_read; 816 remaining -= bytes_to_read; 817 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 818 - } 819 - 820 - if (use_irq && remaining > 0) { 821 - if (cqspi->slow_sram) 822 - writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); 823 } 824 } 825
··· 300 CQSPI_REG_IRQ_IND_SRAM_FULL | \ 301 CQSPI_REG_IRQ_IND_COMP) 302 303 + #define CQSPI_IRQ_MASK_RD_SLOW_SRAM (CQSPI_REG_IRQ_WATERMARK | \ 304 + CQSPI_REG_IRQ_IND_COMP) 305 + 306 #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ 307 CQSPI_REG_IRQ_WATERMARK | \ 308 CQSPI_REG_IRQ_UNDERFLOW) ··· 381 else if (!cqspi->slow_sram) 382 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; 383 else 384 + irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR; 385 386 if (irq_status) 387 complete(&cqspi->transfer_complete); ··· 757 */ 758 759 if (use_irq && cqspi->slow_sram) 760 + writel(CQSPI_IRQ_MASK_RD_SLOW_SRAM, reg_base + CQSPI_REG_IRQMASK); 761 else if (use_irq) 762 writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); 763 else ··· 774 !wait_for_completion_timeout(&cqspi->transfer_complete, 775 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) 776 ret = -ETIMEDOUT; 777 778 /* 779 * Prevent lost interrupt and race condition by reinitializing early. ··· 819 rxbuf += bytes_to_read; 820 remaining -= bytes_to_read; 821 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 822 } 823 } 824