Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mtd: spi-nor: intel-spi: Convert to SPI MEM

The preferred way to implement SPI-NOR controller drivers is through SPI
subsubsystem utilizing the SPI MEM core functions. This converts the
Intel SPI flash controller driver over the SPI MEM by moving the driver
from SPI-NOR subsystem to SPI subsystem and in one go make it use the
SPI MEM functions. The driver name will be changed from intel-spi to
spi-intel to match the convention used in the SPI subsystem.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Reviewed-by: Mauro Lima <mauro.lima@eclypsium.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Acked-by: Lee Jones <lee.jones@linaro.org>
Acked-by: Pratyush Yadav <p.yadav@ti.com>
Reviewed-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Link: https://lore.kernel.org/r/20220209122706.42439-3-mika.westerberg@linux.intel.com
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by

Mika Westerberg and committed by
Mark Brown
e23e5a05 cd149eff

+1321 -1064
-36
drivers/mtd/spi-nor/controllers/Kconfig
··· 26 26 SPIFI is a specialized controller for connecting serial SPI 27 27 Flash. Enable this option if you have a device with a SPIFI 28 28 controller and want to access the Flash as a mtd device. 29 - 30 - config SPI_INTEL_SPI 31 - tristate 32 - 33 - config SPI_INTEL_SPI_PCI 34 - tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)" 35 - depends on X86 && PCI 36 - select SPI_INTEL_SPI 37 - help 38 - This enables PCI support for the Intel PCH/PCU SPI controller in 39 - master mode. This controller is present in modern Intel hardware 40 - and is used to hold BIOS and other persistent settings. Using 41 - this driver it is possible to upgrade BIOS directly from Linux. 42 - 43 - Say N here unless you know what you are doing. Overwriting the 44 - SPI flash may render the system unbootable. 45 - 46 - To compile this driver as a module, choose M here: the module 47 - will be called intel-spi-pci. 48 - 49 - config SPI_INTEL_SPI_PLATFORM 50 - tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)" 51 - depends on X86 52 - select SPI_INTEL_SPI 53 - help 54 - This enables platform support for the Intel PCH/PCU SPI 55 - controller in master mode. This controller is present in modern 56 - Intel hardware and is used to hold BIOS and other persistent 57 - settings. Using this driver it is possible to upgrade BIOS 58 - directly from Linux. 59 - 60 - Say N here unless you know what you are doing. Overwriting the 61 - SPI flash may render the system unbootable. 62 - 63 - To compile this driver as a module, choose M here: the module 64 - will be called intel-spi-platform.
-3
drivers/mtd/spi-nor/controllers/Makefile
··· 2 2 obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o 3 3 obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o 4 4 obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o 5 - obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o 6 - obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o 7 - obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
+3 -17
drivers/mtd/spi-nor/controllers/intel-spi-pci.c drivers/spi/spi-intel-pci.c
··· 2 2 /* 3 3 * Intel PCH/PCU SPI flash PCI driver. 4 4 * 5 - * Copyright (C) 2016, Intel Corporation 5 + * Copyright (C) 2016 - 2022, Intel Corporation 6 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 7 */ 8 8 9 - #include <linux/ioport.h> 10 - #include <linux/kernel.h> 11 9 #include <linux/module.h> 12 10 #include <linux/pci.h> 13 11 14 - #include "intel-spi.h" 12 + #include "spi-intel.h" 15 13 16 14 #define BCR 0xdc 17 15 #define BCR_WPD BIT(0) ··· 44 46 const struct pci_device_id *id) 45 47 { 46 48 struct intel_spi_boardinfo *info; 47 - struct intel_spi *ispi; 48 49 int ret; 49 50 50 51 ret = pcim_enable_device(pdev); ··· 56 59 return -ENOMEM; 57 60 58 61 info->data = pdev; 59 - ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info); 60 - if (IS_ERR(ispi)) 61 - return PTR_ERR(ispi); 62 - 63 - pci_set_drvdata(pdev, ispi); 64 - return 0; 65 - } 66 - 67 - static void intel_spi_pci_remove(struct pci_dev *pdev) 68 - { 69 - intel_spi_remove(pci_get_drvdata(pdev)); 62 + return intel_spi_probe(&pdev->dev, &pdev->resource[0], info); 70 63 } 71 64 72 65 static const struct pci_device_id intel_spi_pci_ids[] = { ··· 85 98 .name = "intel-spi", 86 99 .id_table = intel_spi_pci_ids, 87 100 .probe = intel_spi_pci_probe, 88 - .remove = intel_spi_pci_remove, 89 101 }; 90 102 91 103 module_pci_driver(intel_spi_pci_driver);
+3 -18
drivers/mtd/spi-nor/controllers/intel-spi-platform.c drivers/spi/spi-intel-platform.c
··· 2 2 /* 3 3 * Intel PCH/PCU SPI flash platform driver. 4 4 * 5 - * Copyright (C) 2016, Intel Corporation 5 + * Copyright (C) 2016 - 2022, Intel Corporation 6 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 7 */ 8 8 9 - #include <linux/ioport.h> 10 9 #include <linux/module.h> 11 10 #include <linux/platform_device.h> 12 11 13 - #include "intel-spi.h" 12 + #include "spi-intel.h" 14 13 15 14 static int intel_spi_platform_probe(struct platform_device *pdev) 16 15 { 17 16 struct intel_spi_boardinfo *info; 18 - struct intel_spi *ispi; 19 17 struct resource *mem; 20 18 21 19 info = dev_get_platdata(&pdev->dev); ··· 21 23 return -EINVAL; 22 24 23 25 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 24 - ispi = intel_spi_probe(&pdev->dev, mem, info); 25 - if (IS_ERR(ispi)) 26 - return PTR_ERR(ispi); 27 - 28 - platform_set_drvdata(pdev, ispi); 29 - return 0; 30 - } 31 - 32 - static int intel_spi_platform_remove(struct platform_device *pdev) 33 - { 34 - struct intel_spi *ispi = platform_get_drvdata(pdev); 35 - 36 - return intel_spi_remove(ispi); 26 + return intel_spi_probe(&pdev->dev, mem, info); 37 27 } 38 28 39 29 static struct platform_driver intel_spi_platform_driver = { 40 30 .probe = intel_spi_platform_probe, 41 - .remove = intel_spi_platform_remove, 42 31 .driver = { 43 32 .name = "intel-spi", 44 33 },
-965
drivers/mtd/spi-nor/controllers/intel-spi.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Intel PCH/PCU SPI flash driver. 4 - * 5 - * Copyright (C) 2016, Intel Corporation 6 - * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 - */ 8 - 9 - #include <linux/err.h> 10 - #include <linux/io.h> 11 - #include <linux/iopoll.h> 12 - #include <linux/module.h> 13 - #include <linux/sched.h> 14 - #include <linux/sizes.h> 15 - #include <linux/mtd/mtd.h> 16 - #include <linux/mtd/partitions.h> 17 - #include <linux/mtd/spi-nor.h> 18 - 19 - #include "intel-spi.h" 20 - 21 - /* Offsets are from @ispi->base */ 22 - #define BFPREG 0x00 23 - 24 - #define HSFSTS_CTL 0x04 25 - #define HSFSTS_CTL_FSMIE BIT(31) 26 - #define HSFSTS_CTL_FDBC_SHIFT 24 27 - #define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT) 28 - 29 - #define HSFSTS_CTL_FCYCLE_SHIFT 17 30 - #define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT) 31 - /* HW sequencer opcodes */ 32 - #define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT) 33 - #define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT) 34 - #define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT) 35 - #define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT) 36 - #define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT) 37 - #define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT) 38 - #define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT) 39 - 40 - #define HSFSTS_CTL_FGO BIT(16) 41 - #define HSFSTS_CTL_FLOCKDN BIT(15) 42 - #define HSFSTS_CTL_FDV BIT(14) 43 - #define HSFSTS_CTL_SCIP BIT(5) 44 - #define HSFSTS_CTL_AEL BIT(2) 45 - #define HSFSTS_CTL_FCERR BIT(1) 46 - #define HSFSTS_CTL_FDONE BIT(0) 47 - 48 - #define FADDR 0x08 49 - #define DLOCK 0x0c 50 - #define FDATA(n) (0x10 + ((n) * 4)) 51 - 52 - #define FRACC 0x50 53 - 54 - #define FREG(n) (0x54 + ((n) * 4)) 55 - #define FREG_BASE_MASK 0x3fff 56 - #define FREG_LIMIT_SHIFT 16 57 - #define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) 58 - 59 - /* Offset is from @ispi->pregs */ 60 - #define PR(n) ((n) * 4) 61 - #define PR_WPE BIT(31) 62 - #define PR_LIMIT_SHIFT 16 63 - #define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) 64 - #define PR_RPE BIT(15) 65 - #define PR_BASE_MASK 0x3fff 66 - 67 - /* Offsets are from @ispi->sregs */ 68 - #define SSFSTS_CTL 0x00 69 - #define SSFSTS_CTL_FSMIE BIT(23) 70 - #define SSFSTS_CTL_DS BIT(22) 71 - #define SSFSTS_CTL_DBC_SHIFT 16 72 - #define SSFSTS_CTL_SPOP BIT(11) 73 - #define SSFSTS_CTL_ACS BIT(10) 74 - #define SSFSTS_CTL_SCGO BIT(9) 75 - #define SSFSTS_CTL_COP_SHIFT 12 76 - #define SSFSTS_CTL_FRS BIT(7) 77 - #define SSFSTS_CTL_DOFRS BIT(6) 78 - #define SSFSTS_CTL_AEL BIT(4) 79 - #define SSFSTS_CTL_FCERR BIT(3) 80 - #define SSFSTS_CTL_FDONE BIT(2) 81 - #define SSFSTS_CTL_SCIP BIT(0) 82 - 83 - #define PREOP_OPTYPE 0x04 84 - #define OPMENU0 0x08 85 - #define OPMENU1 0x0c 86 - 87 - #define OPTYPE_READ_NO_ADDR 0 88 - #define OPTYPE_WRITE_NO_ADDR 1 89 - #define OPTYPE_READ_WITH_ADDR 2 90 - #define OPTYPE_WRITE_WITH_ADDR 3 91 - 92 - /* CPU specifics */ 93 - #define BYT_PR 0x74 94 - #define BYT_SSFSTS_CTL 0x90 95 - #define BYT_BCR 0xfc 96 - #define BYT_BCR_WPD BIT(0) 97 - #define BYT_FREG_NUM 5 98 - #define BYT_PR_NUM 5 99 - 100 - #define LPT_PR 0x74 101 - #define LPT_SSFSTS_CTL 0x90 102 - #define LPT_FREG_NUM 5 103 - #define LPT_PR_NUM 5 104 - 105 - #define BXT_PR 0x84 106 - #define BXT_SSFSTS_CTL 0xa0 107 - #define BXT_FREG_NUM 12 108 - #define BXT_PR_NUM 6 109 - 110 - #define CNL_PR 0x84 111 - #define CNL_FREG_NUM 6 112 - #define CNL_PR_NUM 5 113 - 114 - #define LVSCC 0xc4 115 - #define UVSCC 0xc8 116 - #define ERASE_OPCODE_SHIFT 8 117 - #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) 118 - #define ERASE_64K_OPCODE_SHIFT 16 119 - #define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) 120 - 121 - #define INTEL_SPI_TIMEOUT 5000 /* ms */ 122 - #define INTEL_SPI_FIFO_SZ 64 123 - 124 - /** 125 - * struct intel_spi - Driver private data 126 - * @dev: Device pointer 127 - * @info: Pointer to board specific info 128 - * @nor: SPI NOR layer structure 129 - * @base: Beginning of MMIO space 130 - * @pregs: Start of protection registers 131 - * @sregs: Start of software sequencer registers 132 - * @nregions: Maximum number of regions 133 - * @pr_num: Maximum number of protected range registers 134 - * @locked: Is SPI setting locked 135 - * @swseq_reg: Use SW sequencer in register reads/writes 136 - * @swseq_erase: Use SW sequencer in erase operation 137 - * @erase_64k: 64k erase supported 138 - * @atomic_preopcode: Holds preopcode when atomic sequence is requested 139 - * @opcodes: Opcodes which are supported. This are programmed by BIOS 140 - * before it locks down the controller. 141 - */ 142 - struct intel_spi { 143 - struct device *dev; 144 - const struct intel_spi_boardinfo *info; 145 - struct spi_nor nor; 146 - void __iomem *base; 147 - void __iomem *pregs; 148 - void __iomem *sregs; 149 - size_t nregions; 150 - size_t pr_num; 151 - bool locked; 152 - bool swseq_reg; 153 - bool swseq_erase; 154 - bool erase_64k; 155 - u8 atomic_preopcode; 156 - u8 opcodes[8]; 157 - }; 158 - 159 - static bool writeable; 160 - module_param(writeable, bool, 0); 161 - MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)"); 162 - 163 - static void intel_spi_dump_regs(struct intel_spi *ispi) 164 - { 165 - u32 value; 166 - int i; 167 - 168 - dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG)); 169 - 170 - value = readl(ispi->base + HSFSTS_CTL); 171 - dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value); 172 - if (value & HSFSTS_CTL_FLOCKDN) 173 - dev_dbg(ispi->dev, "-> Locked\n"); 174 - 175 - dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR)); 176 - dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK)); 177 - 178 - for (i = 0; i < 16; i++) 179 - dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n", 180 - i, readl(ispi->base + FDATA(i))); 181 - 182 - dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC)); 183 - 184 - for (i = 0; i < ispi->nregions; i++) 185 - dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, 186 - readl(ispi->base + FREG(i))); 187 - for (i = 0; i < ispi->pr_num; i++) 188 - dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, 189 - readl(ispi->pregs + PR(i))); 190 - 191 - if (ispi->sregs) { 192 - value = readl(ispi->sregs + SSFSTS_CTL); 193 - dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); 194 - dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", 195 - readl(ispi->sregs + PREOP_OPTYPE)); 196 - dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", 197 - readl(ispi->sregs + OPMENU0)); 198 - dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", 199 - readl(ispi->sregs + OPMENU1)); 200 - } 201 - 202 - if (ispi->info->type == INTEL_SPI_BYT) 203 - dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); 204 - 205 - dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); 206 - dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); 207 - 208 - dev_dbg(ispi->dev, "Protected regions:\n"); 209 - for (i = 0; i < ispi->pr_num; i++) { 210 - u32 base, limit; 211 - 212 - value = readl(ispi->pregs + PR(i)); 213 - if (!(value & (PR_WPE | PR_RPE))) 214 - continue; 215 - 216 - limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; 217 - base = value & PR_BASE_MASK; 218 - 219 - dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n", 220 - i, base << 12, (limit << 12) | 0xfff, 221 - value & PR_WPE ? 'W' : '.', 222 - value & PR_RPE ? 'R' : '.'); 223 - } 224 - 225 - dev_dbg(ispi->dev, "Flash regions:\n"); 226 - for (i = 0; i < ispi->nregions; i++) { 227 - u32 region, base, limit; 228 - 229 - region = readl(ispi->base + FREG(i)); 230 - base = region & FREG_BASE_MASK; 231 - limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; 232 - 233 - if (base >= limit || (i > 0 && limit == 0)) 234 - dev_dbg(ispi->dev, " %02d disabled\n", i); 235 - else 236 - dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n", 237 - i, base << 12, (limit << 12) | 0xfff); 238 - } 239 - 240 - dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", 241 - ispi->swseq_reg ? 'S' : 'H'); 242 - dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", 243 - ispi->swseq_erase ? 'S' : 'H'); 244 - } 245 - 246 - /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ 247 - static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size) 248 - { 249 - size_t bytes; 250 - int i = 0; 251 - 252 - if (size > INTEL_SPI_FIFO_SZ) 253 - return -EINVAL; 254 - 255 - while (size > 0) { 256 - bytes = min_t(size_t, size, 4); 257 - memcpy_fromio(buf, ispi->base + FDATA(i), bytes); 258 - size -= bytes; 259 - buf += bytes; 260 - i++; 261 - } 262 - 263 - return 0; 264 - } 265 - 266 - /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */ 267 - static int intel_spi_write_block(struct intel_spi *ispi, const void *buf, 268 - size_t size) 269 - { 270 - size_t bytes; 271 - int i = 0; 272 - 273 - if (size > INTEL_SPI_FIFO_SZ) 274 - return -EINVAL; 275 - 276 - while (size > 0) { 277 - bytes = min_t(size_t, size, 4); 278 - memcpy_toio(ispi->base + FDATA(i), buf, bytes); 279 - size -= bytes; 280 - buf += bytes; 281 - i++; 282 - } 283 - 284 - return 0; 285 - } 286 - 287 - static int intel_spi_wait_hw_busy(struct intel_spi *ispi) 288 - { 289 - u32 val; 290 - 291 - return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, 292 - !(val & HSFSTS_CTL_SCIP), 0, 293 - INTEL_SPI_TIMEOUT * 1000); 294 - } 295 - 296 - static int intel_spi_wait_sw_busy(struct intel_spi *ispi) 297 - { 298 - u32 val; 299 - 300 - return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, 301 - !(val & SSFSTS_CTL_SCIP), 0, 302 - INTEL_SPI_TIMEOUT * 1000); 303 - } 304 - 305 - static bool intel_spi_set_writeable(struct intel_spi *ispi) 306 - { 307 - if (!ispi->info->set_writeable) 308 - return false; 309 - 310 - return ispi->info->set_writeable(ispi->base, ispi->info->data); 311 - } 312 - 313 - static int intel_spi_init(struct intel_spi *ispi) 314 - { 315 - u32 opmenu0, opmenu1, lvscc, uvscc, val; 316 - int i; 317 - 318 - switch (ispi->info->type) { 319 - case INTEL_SPI_BYT: 320 - ispi->sregs = ispi->base + BYT_SSFSTS_CTL; 321 - ispi->pregs = ispi->base + BYT_PR; 322 - ispi->nregions = BYT_FREG_NUM; 323 - ispi->pr_num = BYT_PR_NUM; 324 - ispi->swseq_reg = true; 325 - break; 326 - 327 - case INTEL_SPI_LPT: 328 - ispi->sregs = ispi->base + LPT_SSFSTS_CTL; 329 - ispi->pregs = ispi->base + LPT_PR; 330 - ispi->nregions = LPT_FREG_NUM; 331 - ispi->pr_num = LPT_PR_NUM; 332 - ispi->swseq_reg = true; 333 - break; 334 - 335 - case INTEL_SPI_BXT: 336 - ispi->sregs = ispi->base + BXT_SSFSTS_CTL; 337 - ispi->pregs = ispi->base + BXT_PR; 338 - ispi->nregions = BXT_FREG_NUM; 339 - ispi->pr_num = BXT_PR_NUM; 340 - ispi->erase_64k = true; 341 - break; 342 - 343 - case INTEL_SPI_CNL: 344 - ispi->sregs = NULL; 345 - ispi->pregs = ispi->base + CNL_PR; 346 - ispi->nregions = CNL_FREG_NUM; 347 - ispi->pr_num = CNL_PR_NUM; 348 - break; 349 - 350 - default: 351 - return -EINVAL; 352 - } 353 - 354 - /* Try to disable write protection if user asked to do so */ 355 - if (writeable && !intel_spi_set_writeable(ispi)) { 356 - dev_warn(ispi->dev, "can't disable chip write protection\n"); 357 - writeable = false; 358 - } 359 - 360 - /* Disable #SMI generation from HW sequencer */ 361 - val = readl(ispi->base + HSFSTS_CTL); 362 - val &= ~HSFSTS_CTL_FSMIE; 363 - writel(val, ispi->base + HSFSTS_CTL); 364 - 365 - /* 366 - * Determine whether erase operation should use HW or SW sequencer. 367 - * 368 - * The HW sequencer has a predefined list of opcodes, with only the 369 - * erase opcode being programmable in LVSCC and UVSCC registers. 370 - * If these registers don't contain a valid erase opcode, erase 371 - * cannot be done using HW sequencer. 372 - */ 373 - lvscc = readl(ispi->base + LVSCC); 374 - uvscc = readl(ispi->base + UVSCC); 375 - if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) 376 - ispi->swseq_erase = true; 377 - /* SPI controller on Intel BXT supports 64K erase opcode */ 378 - if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) 379 - if (!(lvscc & ERASE_64K_OPCODE_MASK) || 380 - !(uvscc & ERASE_64K_OPCODE_MASK)) 381 - ispi->erase_64k = false; 382 - 383 - if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) { 384 - dev_err(ispi->dev, "software sequencer not supported, but required\n"); 385 - return -EINVAL; 386 - } 387 - 388 - /* 389 - * Some controllers can only do basic operations using hardware 390 - * sequencer. All other operations are supposed to be carried out 391 - * using software sequencer. 392 - */ 393 - if (ispi->swseq_reg) { 394 - /* Disable #SMI generation from SW sequencer */ 395 - val = readl(ispi->sregs + SSFSTS_CTL); 396 - val &= ~SSFSTS_CTL_FSMIE; 397 - writel(val, ispi->sregs + SSFSTS_CTL); 398 - } 399 - 400 - /* Check controller's lock status */ 401 - val = readl(ispi->base + HSFSTS_CTL); 402 - ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); 403 - 404 - if (ispi->locked && ispi->sregs) { 405 - /* 406 - * BIOS programs allowed opcodes and then locks down the 407 - * register. So read back what opcodes it decided to support. 408 - * That's the set we are going to support as well. 409 - */ 410 - opmenu0 = readl(ispi->sregs + OPMENU0); 411 - opmenu1 = readl(ispi->sregs + OPMENU1); 412 - 413 - if (opmenu0 && opmenu1) { 414 - for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { 415 - ispi->opcodes[i] = opmenu0 >> i * 8; 416 - ispi->opcodes[i + 4] = opmenu1 >> i * 8; 417 - } 418 - } 419 - } 420 - 421 - intel_spi_dump_regs(ispi); 422 - 423 - return 0; 424 - } 425 - 426 - static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) 427 - { 428 - int i; 429 - int preop; 430 - 431 - if (ispi->locked) { 432 - for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) 433 - if (ispi->opcodes[i] == opcode) 434 - return i; 435 - 436 - return -EINVAL; 437 - } 438 - 439 - /* The lock is off, so just use index 0 */ 440 - writel(opcode, ispi->sregs + OPMENU0); 441 - preop = readw(ispi->sregs + PREOP_OPTYPE); 442 - writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); 443 - 444 - return 0; 445 - } 446 - 447 - static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len) 448 - { 449 - u32 val, status; 450 - int ret; 451 - 452 - val = readl(ispi->base + HSFSTS_CTL); 453 - val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK); 454 - 455 - switch (opcode) { 456 - case SPINOR_OP_RDID: 457 - val |= HSFSTS_CTL_FCYCLE_RDID; 458 - break; 459 - case SPINOR_OP_WRSR: 460 - val |= HSFSTS_CTL_FCYCLE_WRSR; 461 - break; 462 - case SPINOR_OP_RDSR: 463 - val |= HSFSTS_CTL_FCYCLE_RDSR; 464 - break; 465 - default: 466 - return -EINVAL; 467 - } 468 - 469 - if (len > INTEL_SPI_FIFO_SZ) 470 - return -EINVAL; 471 - 472 - val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; 473 - val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 474 - val |= HSFSTS_CTL_FGO; 475 - writel(val, ispi->base + HSFSTS_CTL); 476 - 477 - ret = intel_spi_wait_hw_busy(ispi); 478 - if (ret) 479 - return ret; 480 - 481 - status = readl(ispi->base + HSFSTS_CTL); 482 - if (status & HSFSTS_CTL_FCERR) 483 - return -EIO; 484 - else if (status & HSFSTS_CTL_AEL) 485 - return -EACCES; 486 - 487 - return 0; 488 - } 489 - 490 - static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len, 491 - int optype) 492 - { 493 - u32 val = 0, status; 494 - u8 atomic_preopcode; 495 - int ret; 496 - 497 - ret = intel_spi_opcode_index(ispi, opcode, optype); 498 - if (ret < 0) 499 - return ret; 500 - 501 - if (len > INTEL_SPI_FIFO_SZ) 502 - return -EINVAL; 503 - 504 - /* 505 - * Always clear it after each SW sequencer operation regardless 506 - * of whether it is successful or not. 507 - */ 508 - atomic_preopcode = ispi->atomic_preopcode; 509 - ispi->atomic_preopcode = 0; 510 - 511 - /* Only mark 'Data Cycle' bit when there is data to be transferred */ 512 - if (len > 0) 513 - val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; 514 - val |= ret << SSFSTS_CTL_COP_SHIFT; 515 - val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; 516 - val |= SSFSTS_CTL_SCGO; 517 - if (atomic_preopcode) { 518 - u16 preop; 519 - 520 - switch (optype) { 521 - case OPTYPE_WRITE_NO_ADDR: 522 - case OPTYPE_WRITE_WITH_ADDR: 523 - /* Pick matching preopcode for the atomic sequence */ 524 - preop = readw(ispi->sregs + PREOP_OPTYPE); 525 - if ((preop & 0xff) == atomic_preopcode) 526 - ; /* Do nothing */ 527 - else if ((preop >> 8) == atomic_preopcode) 528 - val |= SSFSTS_CTL_SPOP; 529 - else 530 - return -EINVAL; 531 - 532 - /* Enable atomic sequence */ 533 - val |= SSFSTS_CTL_ACS; 534 - break; 535 - 536 - default: 537 - return -EINVAL; 538 - } 539 - 540 - } 541 - writel(val, ispi->sregs + SSFSTS_CTL); 542 - 543 - ret = intel_spi_wait_sw_busy(ispi); 544 - if (ret) 545 - return ret; 546 - 547 - status = readl(ispi->sregs + SSFSTS_CTL); 548 - if (status & SSFSTS_CTL_FCERR) 549 - return -EIO; 550 - else if (status & SSFSTS_CTL_AEL) 551 - return -EACCES; 552 - 553 - return 0; 554 - } 555 - 556 - static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, 557 - size_t len) 558 - { 559 - struct intel_spi *ispi = nor->priv; 560 - int ret; 561 - 562 - /* Address of the first chip */ 563 - writel(0, ispi->base + FADDR); 564 - 565 - if (ispi->swseq_reg) 566 - ret = intel_spi_sw_cycle(ispi, opcode, len, 567 - OPTYPE_READ_NO_ADDR); 568 - else 569 - ret = intel_spi_hw_cycle(ispi, opcode, len); 570 - 571 - if (ret) 572 - return ret; 573 - 574 - return intel_spi_read_block(ispi, buf, len); 575 - } 576 - 577 - static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, 578 - size_t len) 579 - { 580 - struct intel_spi *ispi = nor->priv; 581 - int ret; 582 - 583 - /* 584 - * This is handled with atomic operation and preop code in Intel 585 - * controller so we only verify that it is available. If the 586 - * controller is not locked, program the opcode to the PREOP 587 - * register for later use. 588 - * 589 - * When hardware sequencer is used there is no need to program 590 - * any opcodes (it handles them automatically as part of a command). 591 - */ 592 - if (opcode == SPINOR_OP_WREN) { 593 - u16 preop; 594 - 595 - if (!ispi->swseq_reg) 596 - return 0; 597 - 598 - preop = readw(ispi->sregs + PREOP_OPTYPE); 599 - if ((preop & 0xff) != opcode && (preop >> 8) != opcode) { 600 - if (ispi->locked) 601 - return -EINVAL; 602 - writel(opcode, ispi->sregs + PREOP_OPTYPE); 603 - } 604 - 605 - /* 606 - * This enables atomic sequence on next SW sycle. Will 607 - * be cleared after next operation. 608 - */ 609 - ispi->atomic_preopcode = opcode; 610 - return 0; 611 - } 612 - 613 - /* 614 - * We hope that HW sequencer will do the right thing automatically and 615 - * with the SW sequencer we cannot use preopcode anyway, so just ignore 616 - * the Write Disable operation and pretend it was completed 617 - * successfully. 618 - */ 619 - if (opcode == SPINOR_OP_WRDI) 620 - return 0; 621 - 622 - writel(0, ispi->base + FADDR); 623 - 624 - /* Write the value beforehand */ 625 - ret = intel_spi_write_block(ispi, buf, len); 626 - if (ret) 627 - return ret; 628 - 629 - if (ispi->swseq_reg) 630 - return intel_spi_sw_cycle(ispi, opcode, len, 631 - OPTYPE_WRITE_NO_ADDR); 632 - return intel_spi_hw_cycle(ispi, opcode, len); 633 - } 634 - 635 - static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, 636 - u_char *read_buf) 637 - { 638 - struct intel_spi *ispi = nor->priv; 639 - size_t block_size, retlen = 0; 640 - u32 val, status; 641 - ssize_t ret; 642 - 643 - /* 644 - * Atomic sequence is not expected with HW sequencer reads. Make 645 - * sure it is cleared regardless. 646 - */ 647 - if (WARN_ON_ONCE(ispi->atomic_preopcode)) 648 - ispi->atomic_preopcode = 0; 649 - 650 - switch (nor->read_opcode) { 651 - case SPINOR_OP_READ: 652 - case SPINOR_OP_READ_FAST: 653 - case SPINOR_OP_READ_4B: 654 - case SPINOR_OP_READ_FAST_4B: 655 - break; 656 - default: 657 - return -EINVAL; 658 - } 659 - 660 - while (len > 0) { 661 - block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); 662 - 663 - /* Read cannot cross 4K boundary */ 664 - block_size = min_t(loff_t, from + block_size, 665 - round_up(from + 1, SZ_4K)) - from; 666 - 667 - writel(from, ispi->base + FADDR); 668 - 669 - val = readl(ispi->base + HSFSTS_CTL); 670 - val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 671 - val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 672 - val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; 673 - val |= HSFSTS_CTL_FCYCLE_READ; 674 - val |= HSFSTS_CTL_FGO; 675 - writel(val, ispi->base + HSFSTS_CTL); 676 - 677 - ret = intel_spi_wait_hw_busy(ispi); 678 - if (ret) 679 - return ret; 680 - 681 - status = readl(ispi->base + HSFSTS_CTL); 682 - if (status & HSFSTS_CTL_FCERR) 683 - ret = -EIO; 684 - else if (status & HSFSTS_CTL_AEL) 685 - ret = -EACCES; 686 - 687 - if (ret < 0) { 688 - dev_err(ispi->dev, "read error: %llx: %#x\n", from, 689 - status); 690 - return ret; 691 - } 692 - 693 - ret = intel_spi_read_block(ispi, read_buf, block_size); 694 - if (ret) 695 - return ret; 696 - 697 - len -= block_size; 698 - from += block_size; 699 - retlen += block_size; 700 - read_buf += block_size; 701 - } 702 - 703 - return retlen; 704 - } 705 - 706 - static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, 707 - const u_char *write_buf) 708 - { 709 - struct intel_spi *ispi = nor->priv; 710 - size_t block_size, retlen = 0; 711 - u32 val, status; 712 - ssize_t ret; 713 - 714 - /* Not needed with HW sequencer write, make sure it is cleared */ 715 - ispi->atomic_preopcode = 0; 716 - 717 - while (len > 0) { 718 - block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); 719 - 720 - /* Write cannot cross 4K boundary */ 721 - block_size = min_t(loff_t, to + block_size, 722 - round_up(to + 1, SZ_4K)) - to; 723 - 724 - writel(to, ispi->base + FADDR); 725 - 726 - val = readl(ispi->base + HSFSTS_CTL); 727 - val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 728 - val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 729 - val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; 730 - val |= HSFSTS_CTL_FCYCLE_WRITE; 731 - 732 - ret = intel_spi_write_block(ispi, write_buf, block_size); 733 - if (ret) { 734 - dev_err(ispi->dev, "failed to write block\n"); 735 - return ret; 736 - } 737 - 738 - /* Start the write now */ 739 - val |= HSFSTS_CTL_FGO; 740 - writel(val, ispi->base + HSFSTS_CTL); 741 - 742 - ret = intel_spi_wait_hw_busy(ispi); 743 - if (ret) { 744 - dev_err(ispi->dev, "timeout\n"); 745 - return ret; 746 - } 747 - 748 - status = readl(ispi->base + HSFSTS_CTL); 749 - if (status & HSFSTS_CTL_FCERR) 750 - ret = -EIO; 751 - else if (status & HSFSTS_CTL_AEL) 752 - ret = -EACCES; 753 - 754 - if (ret < 0) { 755 - dev_err(ispi->dev, "write error: %llx: %#x\n", to, 756 - status); 757 - return ret; 758 - } 759 - 760 - len -= block_size; 761 - to += block_size; 762 - retlen += block_size; 763 - write_buf += block_size; 764 - } 765 - 766 - return retlen; 767 - } 768 - 769 - static int intel_spi_erase(struct spi_nor *nor, loff_t offs) 770 - { 771 - size_t erase_size, len = nor->mtd.erasesize; 772 - struct intel_spi *ispi = nor->priv; 773 - u32 val, status, cmd; 774 - int ret; 775 - 776 - /* If the hardware can do 64k erase use that when possible */ 777 - if (len >= SZ_64K && ispi->erase_64k) { 778 - cmd = HSFSTS_CTL_FCYCLE_ERASE_64K; 779 - erase_size = SZ_64K; 780 - } else { 781 - cmd = HSFSTS_CTL_FCYCLE_ERASE; 782 - erase_size = SZ_4K; 783 - } 784 - 785 - if (ispi->swseq_erase) { 786 - while (len > 0) { 787 - writel(offs, ispi->base + FADDR); 788 - 789 - ret = intel_spi_sw_cycle(ispi, nor->erase_opcode, 790 - 0, OPTYPE_WRITE_WITH_ADDR); 791 - if (ret) 792 - return ret; 793 - 794 - offs += erase_size; 795 - len -= erase_size; 796 - } 797 - 798 - return 0; 799 - } 800 - 801 - /* Not needed with HW sequencer erase, make sure it is cleared */ 802 - ispi->atomic_preopcode = 0; 803 - 804 - while (len > 0) { 805 - writel(offs, ispi->base + FADDR); 806 - 807 - val = readl(ispi->base + HSFSTS_CTL); 808 - val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 809 - val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 810 - val |= cmd; 811 - val |= HSFSTS_CTL_FGO; 812 - writel(val, ispi->base + HSFSTS_CTL); 813 - 814 - ret = intel_spi_wait_hw_busy(ispi); 815 - if (ret) 816 - return ret; 817 - 818 - status = readl(ispi->base + HSFSTS_CTL); 819 - if (status & HSFSTS_CTL_FCERR) 820 - return -EIO; 821 - else if (status & HSFSTS_CTL_AEL) 822 - return -EACCES; 823 - 824 - offs += erase_size; 825 - len -= erase_size; 826 - } 827 - 828 - return 0; 829 - } 830 - 831 - static bool intel_spi_is_protected(const struct intel_spi *ispi, 832 - unsigned int base, unsigned int limit) 833 - { 834 - int i; 835 - 836 - for (i = 0; i < ispi->pr_num; i++) { 837 - u32 pr_base, pr_limit, pr_value; 838 - 839 - pr_value = readl(ispi->pregs + PR(i)); 840 - if (!(pr_value & (PR_WPE | PR_RPE))) 841 - continue; 842 - 843 - pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; 844 - pr_base = pr_value & PR_BASE_MASK; 845 - 846 - if (pr_base >= base && pr_limit <= limit) 847 - return true; 848 - } 849 - 850 - return false; 851 - } 852 - 853 - /* 854 - * There will be a single partition holding all enabled flash regions. We 855 - * call this "BIOS". 856 - */ 857 - static void intel_spi_fill_partition(struct intel_spi *ispi, 858 - struct mtd_partition *part) 859 - { 860 - u64 end; 861 - int i; 862 - 863 - memset(part, 0, sizeof(*part)); 864 - 865 - /* Start from the mandatory descriptor region */ 866 - part->size = 4096; 867 - part->name = "BIOS"; 868 - 869 - /* 870 - * Now try to find where this partition ends based on the flash 871 - * region registers. 872 - */ 873 - for (i = 1; i < ispi->nregions; i++) { 874 - u32 region, base, limit; 875 - 876 - region = readl(ispi->base + FREG(i)); 877 - base = region & FREG_BASE_MASK; 878 - limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; 879 - 880 - if (base >= limit || limit == 0) 881 - continue; 882 - 883 - /* 884 - * If any of the regions have protection bits set, make the 885 - * whole partition read-only to be on the safe side. 886 - * 887 - * Also if the user did not ask the chip to be writeable 888 - * mask the bit too. 889 - */ 890 - if (!writeable || intel_spi_is_protected(ispi, base, limit)) 891 - part->mask_flags |= MTD_WRITEABLE; 892 - 893 - end = (limit << 12) + 4096; 894 - if (end > part->size) 895 - part->size = end; 896 - } 897 - } 898 - 899 - static const struct spi_nor_controller_ops intel_spi_controller_ops = { 900 - .read_reg = intel_spi_read_reg, 901 - .write_reg = intel_spi_write_reg, 902 - .read = intel_spi_read, 903 - .write = intel_spi_write, 904 - .erase = intel_spi_erase, 905 - }; 906 - 907 - struct intel_spi *intel_spi_probe(struct device *dev, 908 - struct resource *mem, const struct intel_spi_boardinfo *info) 909 - { 910 - const struct spi_nor_hwcaps hwcaps = { 911 - .mask = SNOR_HWCAPS_READ | 912 - SNOR_HWCAPS_READ_FAST | 913 - SNOR_HWCAPS_PP, 914 - }; 915 - struct mtd_partition part; 916 - struct intel_spi *ispi; 917 - int ret; 918 - 919 - if (!info || !mem) 920 - return ERR_PTR(-EINVAL); 921 - 922 - ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL); 923 - if (!ispi) 924 - return ERR_PTR(-ENOMEM); 925 - 926 - ispi->base = devm_ioremap_resource(dev, mem); 927 - if (IS_ERR(ispi->base)) 928 - return ERR_CAST(ispi->base); 929 - 930 - ispi->dev = dev; 931 - ispi->info = info; 932 - 933 - ret = intel_spi_init(ispi); 934 - if (ret) 935 - return ERR_PTR(ret); 936 - 937 - ispi->nor.dev = ispi->dev; 938 - ispi->nor.priv = ispi; 939 - ispi->nor.controller_ops = &intel_spi_controller_ops; 940 - 941 - ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps); 942 - if (ret) { 943 - dev_info(dev, "failed to locate the chip\n"); 944 - return ERR_PTR(ret); 945 - } 946 - 947 - intel_spi_fill_partition(ispi, &part); 948 - 949 - ret = mtd_device_register(&ispi->nor.mtd, &part, 1); 950 - if (ret) 951 - return ERR_PTR(ret); 952 - 953 - return ispi; 954 - } 955 - EXPORT_SYMBOL_GPL(intel_spi_probe); 956 - 957 - int intel_spi_remove(struct intel_spi *ispi) 958 - { 959 - return mtd_device_unregister(&ispi->nor.mtd); 960 - } 961 - EXPORT_SYMBOL_GPL(intel_spi_remove); 962 - 963 - MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver"); 964 - MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 965 - MODULE_LICENSE("GPL v2");
-21
drivers/mtd/spi-nor/controllers/intel-spi.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Intel PCH/PCU SPI flash driver. 4 - * 5 - * Copyright (C) 2016, Intel Corporation 6 - * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 - */ 8 - 9 - #ifndef INTEL_SPI_H 10 - #define INTEL_SPI_H 11 - 12 - #include <linux/platform_data/x86/intel-spi.h> 13 - 14 - struct intel_spi; 15 - struct resource; 16 - 17 - struct intel_spi *intel_spi_probe(struct device *dev, 18 - struct resource *mem, const struct intel_spi_boardinfo *info); 19 - int intel_spi_remove(struct intel_spi *ispi); 20 - 21 - #endif /* INTEL_SPI_H */
+39
drivers/spi/Kconfig
··· 427 427 To compile this driver as a module, choose M here: the module 428 428 will be called spi-ingenic. 429 429 430 + config SPI_INTEL 431 + tristate 432 + 433 + config SPI_INTEL_PCI 434 + tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)" 435 + depends on PCI 436 + depends on X86 || COMPILE_TEST 437 + depends on SPI_MEM 438 + select SPI_INTEL 439 + help 440 + This enables PCI support for the Intel PCH/PCU SPI controller in 441 + master mode. This controller is present in modern Intel hardware 442 + and is used to hold BIOS and other persistent settings. Using 443 + this driver it is possible to upgrade BIOS directly from Linux. 444 + 445 + Say N here unless you know what you are doing. Overwriting the 446 + SPI flash may render the system unbootable. 447 + 448 + To compile this driver as a module, choose M here: the module 449 + will be called spi-intel-pci. 450 + 451 + config SPI_INTEL_PLATFORM 452 + tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)" 453 + depends on X86 || COMPILE_TEST 454 + depends on SPI_MEM 455 + select SPI_INTEL 456 + help 457 + This enables platform support for the Intel PCH/PCU SPI 458 + controller in master mode. This controller is present in modern 459 + Intel hardware and is used to hold BIOS and other persistent 460 + settings. Using this driver it is possible to upgrade BIOS 461 + directly from Linux. 462 + 463 + Say N here unless you know what you are doing. Overwriting the 464 + SPI flash may render the system unbootable. 465 + 466 + To compile this driver as a module, choose M here: the module 467 + will be called spi-intel-platform. 468 + 430 469 config SPI_JCORE 431 470 tristate "J-Core SPI Master" 432 471 depends on OF && (SUPERH || COMPILE_TEST)
+3
drivers/spi/Makefile
··· 61 61 obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o 62 62 obj-$(CONFIG_SPI_IMX) += spi-imx.o 63 63 obj-$(CONFIG_SPI_INGENIC) += spi-ingenic.o 64 + obj-$(CONFIG_SPI_INTEL) += spi-intel.o 65 + obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o 66 + obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o 64 67 obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o 65 68 obj-$(CONFIG_SPI_JCORE) += spi-jcore.o 66 69 obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+1250
drivers/spi/spi-intel.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Intel PCH/PCU SPI flash driver. 4 + * 5 + * Copyright (C) 2016 - 2022, Intel Corporation 6 + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 + */ 8 + 9 + #include <linux/iopoll.h> 10 + #include <linux/module.h> 11 + 12 + #include <linux/mtd/partitions.h> 13 + #include <linux/mtd/spi-nor.h> 14 + 15 + #include <linux/spi/flash.h> 16 + #include <linux/spi/spi.h> 17 + #include <linux/spi/spi-mem.h> 18 + 19 + #include "spi-intel.h" 20 + 21 + /* Offsets are from @ispi->base */ 22 + #define BFPREG 0x00 23 + 24 + #define HSFSTS_CTL 0x04 25 + #define HSFSTS_CTL_FSMIE BIT(31) 26 + #define HSFSTS_CTL_FDBC_SHIFT 24 27 + #define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT) 28 + 29 + #define HSFSTS_CTL_FCYCLE_SHIFT 17 30 + #define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT) 31 + /* HW sequencer opcodes */ 32 + #define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT) 33 + #define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT) 34 + #define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT) 35 + #define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT) 36 + #define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT) 37 + #define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT) 38 + #define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT) 39 + 40 + #define HSFSTS_CTL_FGO BIT(16) 41 + #define HSFSTS_CTL_FLOCKDN BIT(15) 42 + #define HSFSTS_CTL_FDV BIT(14) 43 + #define HSFSTS_CTL_SCIP BIT(5) 44 + #define HSFSTS_CTL_AEL BIT(2) 45 + #define HSFSTS_CTL_FCERR BIT(1) 46 + #define HSFSTS_CTL_FDONE BIT(0) 47 + 48 + #define FADDR 0x08 49 + #define DLOCK 0x0c 50 + #define FDATA(n) (0x10 + ((n) * 4)) 51 + 52 + #define FRACC 0x50 53 + 54 + #define FREG(n) (0x54 + ((n) * 4)) 55 + #define FREG_BASE_MASK 0x3fff 56 + #define FREG_LIMIT_SHIFT 16 57 + #define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) 58 + 59 + /* Offset is from @ispi->pregs */ 60 + #define PR(n) ((n) * 4) 61 + #define PR_WPE BIT(31) 62 + #define PR_LIMIT_SHIFT 16 63 + #define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) 64 + #define PR_RPE BIT(15) 65 + #define PR_BASE_MASK 0x3fff 66 + 67 + /* Offsets are from @ispi->sregs */ 68 + #define SSFSTS_CTL 0x00 69 + #define SSFSTS_CTL_FSMIE BIT(23) 70 + #define SSFSTS_CTL_DS BIT(22) 71 + #define SSFSTS_CTL_DBC_SHIFT 16 72 + #define SSFSTS_CTL_SPOP BIT(11) 73 + #define SSFSTS_CTL_ACS BIT(10) 74 + #define SSFSTS_CTL_SCGO BIT(9) 75 + #define SSFSTS_CTL_COP_SHIFT 12 76 + #define SSFSTS_CTL_FRS BIT(7) 77 + #define SSFSTS_CTL_DOFRS BIT(6) 78 + #define SSFSTS_CTL_AEL BIT(4) 79 + #define SSFSTS_CTL_FCERR BIT(3) 80 + #define SSFSTS_CTL_FDONE BIT(2) 81 + #define SSFSTS_CTL_SCIP BIT(0) 82 + 83 + #define PREOP_OPTYPE 0x04 84 + #define OPMENU0 0x08 85 + #define OPMENU1 0x0c 86 + 87 + #define OPTYPE_READ_NO_ADDR 0 88 + #define OPTYPE_WRITE_NO_ADDR 1 89 + #define OPTYPE_READ_WITH_ADDR 2 90 + #define OPTYPE_WRITE_WITH_ADDR 3 91 + 92 + /* CPU specifics */ 93 + #define BYT_PR 0x74 94 + #define BYT_SSFSTS_CTL 0x90 95 + #define BYT_FREG_NUM 5 96 + #define BYT_PR_NUM 5 97 + 98 + #define LPT_PR 0x74 99 + #define LPT_SSFSTS_CTL 0x90 100 + #define LPT_FREG_NUM 5 101 + #define LPT_PR_NUM 5 102 + 103 + #define BXT_PR 0x84 104 + #define BXT_SSFSTS_CTL 0xa0 105 + #define BXT_FREG_NUM 12 106 + #define BXT_PR_NUM 6 107 + 108 + #define CNL_PR 0x84 109 + #define CNL_FREG_NUM 6 110 + #define CNL_PR_NUM 5 111 + 112 + #define LVSCC 0xc4 113 + #define UVSCC 0xc8 114 + #define ERASE_OPCODE_SHIFT 8 115 + #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) 116 + #define ERASE_64K_OPCODE_SHIFT 16 117 + #define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) 118 + 119 + #define INTEL_SPI_TIMEOUT 5000 /* ms */ 120 + #define INTEL_SPI_FIFO_SZ 64 121 + 122 + /** 123 + * struct intel_spi - Driver private data 124 + * @dev: Device pointer 125 + * @info: Pointer to board specific info 126 + * @base: Beginning of MMIO space 127 + * @pregs: Start of protection registers 128 + * @sregs: Start of software sequencer registers 129 + * @master: Pointer to the SPI controller structure 130 + * @nregions: Maximum number of regions 131 + * @pr_num: Maximum number of protected range registers 132 + * @locked: Is SPI setting locked 133 + * @swseq_reg: Use SW sequencer in register reads/writes 134 + * @swseq_erase: Use SW sequencer in erase operation 135 + * @atomic_preopcode: Holds preopcode when atomic sequence is requested 136 + * @opcodes: Opcodes which are supported. This are programmed by BIOS 137 + * before it locks down the controller. 138 + * @mem_ops: Pointer to SPI MEM ops supported by the controller 139 + */ 140 + struct intel_spi { 141 + struct device *dev; 142 + const struct intel_spi_boardinfo *info; 143 + void __iomem *base; 144 + void __iomem *pregs; 145 + void __iomem *sregs; 146 + struct spi_controller *master; 147 + size_t nregions; 148 + size_t pr_num; 149 + bool locked; 150 + bool swseq_reg; 151 + bool swseq_erase; 152 + u8 atomic_preopcode; 153 + u8 opcodes[8]; 154 + const struct intel_spi_mem_op *mem_ops; 155 + }; 156 + 157 + struct intel_spi_mem_op { 158 + struct spi_mem_op mem_op; 159 + u32 replacement_op; 160 + int (*exec_op)(struct intel_spi *ispi, 161 + const struct intel_spi_mem_op *iop, 162 + const struct spi_mem_op *op); 163 + }; 164 + 165 + static bool writeable; 166 + module_param(writeable, bool, 0); 167 + MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)"); 168 + 169 + static void intel_spi_dump_regs(struct intel_spi *ispi) 170 + { 171 + u32 value; 172 + int i; 173 + 174 + dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG)); 175 + 176 + value = readl(ispi->base + HSFSTS_CTL); 177 + dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value); 178 + if (value & HSFSTS_CTL_FLOCKDN) 179 + dev_dbg(ispi->dev, "-> Locked\n"); 180 + 181 + dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR)); 182 + dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK)); 183 + 184 + for (i = 0; i < 16; i++) 185 + dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n", 186 + i, readl(ispi->base + FDATA(i))); 187 + 188 + dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC)); 189 + 190 + for (i = 0; i < ispi->nregions; i++) 191 + dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, 192 + readl(ispi->base + FREG(i))); 193 + for (i = 0; i < ispi->pr_num; i++) 194 + dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, 195 + readl(ispi->pregs + PR(i))); 196 + 197 + if (ispi->sregs) { 198 + value = readl(ispi->sregs + SSFSTS_CTL); 199 + dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); 200 + dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", 201 + readl(ispi->sregs + PREOP_OPTYPE)); 202 + dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", 203 + readl(ispi->sregs + OPMENU0)); 204 + dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", 205 + readl(ispi->sregs + OPMENU1)); 206 + } 207 + 208 + dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); 209 + dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); 210 + 211 + dev_dbg(ispi->dev, "Protected regions:\n"); 212 + for (i = 0; i < ispi->pr_num; i++) { 213 + u32 base, limit; 214 + 215 + value = readl(ispi->pregs + PR(i)); 216 + if (!(value & (PR_WPE | PR_RPE))) 217 + continue; 218 + 219 + limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; 220 + base = value & PR_BASE_MASK; 221 + 222 + dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n", 223 + i, base << 12, (limit << 12) | 0xfff, 224 + value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.'); 225 + } 226 + 227 + dev_dbg(ispi->dev, "Flash regions:\n"); 228 + for (i = 0; i < ispi->nregions; i++) { 229 + u32 region, base, limit; 230 + 231 + region = readl(ispi->base + FREG(i)); 232 + base = region & FREG_BASE_MASK; 233 + limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; 234 + 235 + if (base >= limit || (i > 0 && limit == 0)) 236 + dev_dbg(ispi->dev, " %02d disabled\n", i); 237 + else 238 + dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n", 239 + i, base << 12, (limit << 12) | 0xfff); 240 + } 241 + 242 + dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", 243 + ispi->swseq_reg ? 'S' : 'H'); 244 + dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", 245 + ispi->swseq_erase ? 'S' : 'H'); 246 + } 247 + 248 + /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ 249 + static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size) 250 + { 251 + size_t bytes; 252 + int i = 0; 253 + 254 + if (size > INTEL_SPI_FIFO_SZ) 255 + return -EINVAL; 256 + 257 + while (size > 0) { 258 + bytes = min_t(size_t, size, 4); 259 + memcpy_fromio(buf, ispi->base + FDATA(i), bytes); 260 + size -= bytes; 261 + buf += bytes; 262 + i++; 263 + } 264 + 265 + return 0; 266 + } 267 + 268 + /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */ 269 + static int intel_spi_write_block(struct intel_spi *ispi, const void *buf, 270 + size_t size) 271 + { 272 + size_t bytes; 273 + int i = 0; 274 + 275 + if (size > INTEL_SPI_FIFO_SZ) 276 + return -EINVAL; 277 + 278 + while (size > 0) { 279 + bytes = min_t(size_t, size, 4); 280 + memcpy_toio(ispi->base + FDATA(i), buf, bytes); 281 + size -= bytes; 282 + buf += bytes; 283 + i++; 284 + } 285 + 286 + return 0; 287 + } 288 + 289 + static int intel_spi_wait_hw_busy(struct intel_spi *ispi) 290 + { 291 + u32 val; 292 + 293 + return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, 294 + !(val & HSFSTS_CTL_SCIP), 0, 295 + INTEL_SPI_TIMEOUT * 1000); 296 + } 297 + 298 + static int intel_spi_wait_sw_busy(struct intel_spi *ispi) 299 + { 300 + u32 val; 301 + 302 + return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, 303 + !(val & SSFSTS_CTL_SCIP), 0, 304 + INTEL_SPI_TIMEOUT * 1000); 305 + } 306 + 307 + static bool intel_spi_set_writeable(struct intel_spi *ispi) 308 + { 309 + if (!ispi->info->set_writeable) 310 + return false; 311 + 312 + return ispi->info->set_writeable(ispi->base, ispi->info->data); 313 + } 314 + 315 + static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) 316 + { 317 + int i; 318 + int preop; 319 + 320 + if (ispi->locked) { 321 + for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) 322 + if (ispi->opcodes[i] == opcode) 323 + return i; 324 + 325 + return -EINVAL; 326 + } 327 + 328 + /* The lock is off, so just use index 0 */ 329 + writel(opcode, ispi->sregs + OPMENU0); 330 + preop = readw(ispi->sregs + PREOP_OPTYPE); 331 + writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); 332 + 333 + return 0; 334 + } 335 + 336 + static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len) 337 + { 338 + u32 val, status; 339 + int ret; 340 + 341 + val = readl(ispi->base + HSFSTS_CTL); 342 + val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK); 343 + 344 + switch (opcode) { 345 + case SPINOR_OP_RDID: 346 + val |= HSFSTS_CTL_FCYCLE_RDID; 347 + break; 348 + case SPINOR_OP_WRSR: 349 + val |= HSFSTS_CTL_FCYCLE_WRSR; 350 + break; 351 + case SPINOR_OP_RDSR: 352 + val |= HSFSTS_CTL_FCYCLE_RDSR; 353 + break; 354 + default: 355 + return -EINVAL; 356 + } 357 + 358 + if (len > INTEL_SPI_FIFO_SZ) 359 + return -EINVAL; 360 + 361 + val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; 362 + val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 363 + val |= HSFSTS_CTL_FGO; 364 + writel(val, ispi->base + HSFSTS_CTL); 365 + 366 + ret = intel_spi_wait_hw_busy(ispi); 367 + if (ret) 368 + return ret; 369 + 370 + status = readl(ispi->base + HSFSTS_CTL); 371 + if (status & HSFSTS_CTL_FCERR) 372 + return -EIO; 373 + else if (status & HSFSTS_CTL_AEL) 374 + return -EACCES; 375 + 376 + return 0; 377 + } 378 + 379 + static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len, 380 + int optype) 381 + { 382 + u32 val = 0, status; 383 + u8 atomic_preopcode; 384 + int ret; 385 + 386 + ret = intel_spi_opcode_index(ispi, opcode, optype); 387 + if (ret < 0) 388 + return ret; 389 + 390 + if (len > INTEL_SPI_FIFO_SZ) 391 + return -EINVAL; 392 + 393 + /* 394 + * Always clear it after each SW sequencer operation regardless 395 + * of whether it is successful or not. 396 + */ 397 + atomic_preopcode = ispi->atomic_preopcode; 398 + ispi->atomic_preopcode = 0; 399 + 400 + /* Only mark 'Data Cycle' bit when there is data to be transferred */ 401 + if (len > 0) 402 + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; 403 + val |= ret << SSFSTS_CTL_COP_SHIFT; 404 + val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; 405 + val |= SSFSTS_CTL_SCGO; 406 + if (atomic_preopcode) { 407 + u16 preop; 408 + 409 + switch (optype) { 410 + case OPTYPE_WRITE_NO_ADDR: 411 + case OPTYPE_WRITE_WITH_ADDR: 412 + /* Pick matching preopcode for the atomic sequence */ 413 + preop = readw(ispi->sregs + PREOP_OPTYPE); 414 + if ((preop & 0xff) == atomic_preopcode) 415 + ; /* Do nothing */ 416 + else if ((preop >> 8) == atomic_preopcode) 417 + val |= SSFSTS_CTL_SPOP; 418 + else 419 + return -EINVAL; 420 + 421 + /* Enable atomic sequence */ 422 + val |= SSFSTS_CTL_ACS; 423 + break; 424 + 425 + default: 426 + return -EINVAL; 427 + } 428 + } 429 + writel(val, ispi->sregs + SSFSTS_CTL); 430 + 431 + ret = intel_spi_wait_sw_busy(ispi); 432 + if (ret) 433 + return ret; 434 + 435 + status = readl(ispi->sregs + SSFSTS_CTL); 436 + if (status & SSFSTS_CTL_FCERR) 437 + return -EIO; 438 + else if (status & SSFSTS_CTL_AEL) 439 + return -EACCES; 440 + 441 + return 0; 442 + } 443 + 444 + static int intel_spi_read_reg(struct intel_spi *ispi, 445 + const struct intel_spi_mem_op *iop, 446 + const struct spi_mem_op *op) 447 + { 448 + size_t nbytes = op->data.nbytes; 449 + u8 opcode = op->cmd.opcode; 450 + int ret; 451 + 452 + /* Address of the first chip */ 453 + writel(0, ispi->base + FADDR); 454 + 455 + if (ispi->swseq_reg) 456 + ret = intel_spi_sw_cycle(ispi, opcode, nbytes, 457 + OPTYPE_READ_NO_ADDR); 458 + else 459 + ret = intel_spi_hw_cycle(ispi, opcode, nbytes); 460 + 461 + if (ret) 462 + return ret; 463 + 464 + return intel_spi_read_block(ispi, op->data.buf.in, nbytes); 465 + } 466 + 467 + static int intel_spi_write_reg(struct intel_spi *ispi, 468 + const struct intel_spi_mem_op *iop, 469 + const struct spi_mem_op *op) 470 + { 471 + size_t nbytes = op->data.nbytes; 472 + u8 opcode = op->cmd.opcode; 473 + int ret; 474 + 475 + /* 476 + * This is handled with atomic operation and preop code in Intel 477 + * controller so we only verify that it is available. If the 478 + * controller is not locked, program the opcode to the PREOP 479 + * register for later use. 480 + * 481 + * When hardware sequencer is used there is no need to program 482 + * any opcodes (it handles them automatically as part of a command). 483 + */ 484 + if (opcode == SPINOR_OP_WREN) { 485 + u16 preop; 486 + 487 + if (!ispi->swseq_reg) 488 + return 0; 489 + 490 + preop = readw(ispi->sregs + PREOP_OPTYPE); 491 + if ((preop & 0xff) != opcode && (preop >> 8) != opcode) { 492 + if (ispi->locked) 493 + return -EINVAL; 494 + writel(opcode, ispi->sregs + PREOP_OPTYPE); 495 + } 496 + 497 + /* 498 + * This enables atomic sequence on next SW sycle. Will 499 + * be cleared after next operation. 500 + */ 501 + ispi->atomic_preopcode = opcode; 502 + return 0; 503 + } 504 + 505 + /* 506 + * We hope that HW sequencer will do the right thing automatically and 507 + * with the SW sequencer we cannot use preopcode anyway, so just ignore 508 + * the Write Disable operation and pretend it was completed 509 + * successfully. 510 + */ 511 + if (opcode == SPINOR_OP_WRDI) 512 + return 0; 513 + 514 + writel(0, ispi->base + FADDR); 515 + 516 + /* Write the value beforehand */ 517 + ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes); 518 + if (ret) 519 + return ret; 520 + 521 + if (ispi->swseq_reg) 522 + return intel_spi_sw_cycle(ispi, opcode, nbytes, 523 + OPTYPE_WRITE_NO_ADDR); 524 + return intel_spi_hw_cycle(ispi, opcode, nbytes); 525 + } 526 + 527 + static int intel_spi_read(struct intel_spi *ispi, 528 + const struct intel_spi_mem_op *iop, 529 + const struct spi_mem_op *op) 530 + { 531 + void *read_buf = op->data.buf.in; 532 + size_t block_size, nbytes = op->data.nbytes; 533 + u32 addr = op->addr.val; 534 + u32 val, status; 535 + int ret; 536 + 537 + /* 538 + * Atomic sequence is not expected with HW sequencer reads. Make 539 + * sure it is cleared regardless. 540 + */ 541 + if (WARN_ON_ONCE(ispi->atomic_preopcode)) 542 + ispi->atomic_preopcode = 0; 543 + 544 + while (nbytes > 0) { 545 + block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ); 546 + 547 + /* Read cannot cross 4K boundary */ 548 + block_size = min_t(loff_t, addr + block_size, 549 + round_up(addr + 1, SZ_4K)) - addr; 550 + 551 + writel(addr, ispi->base + FADDR); 552 + 553 + val = readl(ispi->base + HSFSTS_CTL); 554 + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 555 + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 556 + val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; 557 + val |= HSFSTS_CTL_FCYCLE_READ; 558 + val |= HSFSTS_CTL_FGO; 559 + writel(val, ispi->base + HSFSTS_CTL); 560 + 561 + ret = intel_spi_wait_hw_busy(ispi); 562 + if (ret) 563 + return ret; 564 + 565 + status = readl(ispi->base + HSFSTS_CTL); 566 + if (status & HSFSTS_CTL_FCERR) 567 + ret = -EIO; 568 + else if (status & HSFSTS_CTL_AEL) 569 + ret = -EACCES; 570 + 571 + if (ret < 0) { 572 + dev_err(ispi->dev, "read error: %x: %#x\n", addr, status); 573 + return ret; 574 + } 575 + 576 + ret = intel_spi_read_block(ispi, read_buf, block_size); 577 + if (ret) 578 + return ret; 579 + 580 + nbytes -= block_size; 581 + addr += block_size; 582 + read_buf += block_size; 583 + } 584 + 585 + return 0; 586 + } 587 + 588 + static int intel_spi_write(struct intel_spi *ispi, 589 + const struct intel_spi_mem_op *iop, 590 + const struct spi_mem_op *op) 591 + { 592 + size_t block_size, nbytes = op->data.nbytes; 593 + const void *write_buf = op->data.buf.out; 594 + u32 addr = op->addr.val; 595 + u32 val, status; 596 + int ret; 597 + 598 + /* Not needed with HW sequencer write, make sure it is cleared */ 599 + ispi->atomic_preopcode = 0; 600 + 601 + while (nbytes > 0) { 602 + block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ); 603 + 604 + /* Write cannot cross 4K boundary */ 605 + block_size = min_t(loff_t, addr + block_size, 606 + round_up(addr + 1, SZ_4K)) - addr; 607 + 608 + writel(addr, ispi->base + FADDR); 609 + 610 + val = readl(ispi->base + HSFSTS_CTL); 611 + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 612 + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 613 + val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; 614 + val |= HSFSTS_CTL_FCYCLE_WRITE; 615 + 616 + ret = intel_spi_write_block(ispi, write_buf, block_size); 617 + if (ret) { 618 + dev_err(ispi->dev, "failed to write block\n"); 619 + return ret; 620 + } 621 + 622 + /* Start the write now */ 623 + val |= HSFSTS_CTL_FGO; 624 + writel(val, ispi->base + HSFSTS_CTL); 625 + 626 + ret = intel_spi_wait_hw_busy(ispi); 627 + if (ret) { 628 + dev_err(ispi->dev, "timeout\n"); 629 + return ret; 630 + } 631 + 632 + status = readl(ispi->base + HSFSTS_CTL); 633 + if (status & HSFSTS_CTL_FCERR) 634 + ret = -EIO; 635 + else if (status & HSFSTS_CTL_AEL) 636 + ret = -EACCES; 637 + 638 + if (ret < 0) { 639 + dev_err(ispi->dev, "write error: %x: %#x\n", addr, status); 640 + return ret; 641 + } 642 + 643 + nbytes -= block_size; 644 + addr += block_size; 645 + write_buf += block_size; 646 + } 647 + 648 + return 0; 649 + } 650 + 651 + static int intel_spi_erase(struct intel_spi *ispi, 652 + const struct intel_spi_mem_op *iop, 653 + const struct spi_mem_op *op) 654 + { 655 + u8 opcode = op->cmd.opcode; 656 + u32 addr = op->addr.val; 657 + u32 val, status; 658 + int ret; 659 + 660 + writel(addr, ispi->base + FADDR); 661 + 662 + if (ispi->swseq_erase) 663 + return intel_spi_sw_cycle(ispi, opcode, 0, 664 + OPTYPE_WRITE_WITH_ADDR); 665 + 666 + /* Not needed with HW sequencer erase, make sure it is cleared */ 667 + ispi->atomic_preopcode = 0; 668 + 669 + val = readl(ispi->base + HSFSTS_CTL); 670 + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 671 + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 672 + val |= HSFSTS_CTL_FGO; 673 + val |= iop->replacement_op; 674 + writel(val, ispi->base + HSFSTS_CTL); 675 + 676 + ret = intel_spi_wait_hw_busy(ispi); 677 + if (ret) 678 + return ret; 679 + 680 + status = readl(ispi->base + HSFSTS_CTL); 681 + if (status & HSFSTS_CTL_FCERR) 682 + return -EIO; 683 + if (status & HSFSTS_CTL_AEL) 684 + return -EACCES; 685 + 686 + return 0; 687 + } 688 + 689 + static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop, 690 + const struct spi_mem_op *op) 691 + { 692 + if (iop->mem_op.cmd.nbytes != op->cmd.nbytes || 693 + iop->mem_op.cmd.buswidth != op->cmd.buswidth || 694 + iop->mem_op.cmd.dtr != op->cmd.dtr || 695 + iop->mem_op.cmd.opcode != op->cmd.opcode) 696 + return false; 697 + 698 + if (iop->mem_op.addr.nbytes != op->addr.nbytes || 699 + iop->mem_op.addr.dtr != op->addr.dtr) 700 + return false; 701 + 702 + if (iop->mem_op.data.dir != op->data.dir || 703 + iop->mem_op.data.dtr != op->data.dtr) 704 + return false; 705 + 706 + if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) { 707 + if (iop->mem_op.data.buswidth != op->data.buswidth) 708 + return false; 709 + } 710 + 711 + return true; 712 + } 713 + 714 + static const struct intel_spi_mem_op * 715 + intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op) 716 + { 717 + const struct intel_spi_mem_op *iop; 718 + 719 + for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) { 720 + if (intel_spi_cmp_mem_op(iop, op)) 721 + break; 722 + } 723 + 724 + return iop->mem_op.cmd.opcode ? iop : NULL; 725 + } 726 + 727 + static bool intel_spi_supports_mem_op(struct spi_mem *mem, 728 + const struct spi_mem_op *op) 729 + { 730 + struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master); 731 + const struct intel_spi_mem_op *iop; 732 + 733 + iop = intel_spi_match_mem_op(ispi, op); 734 + if (!iop) { 735 + dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode); 736 + return false; 737 + } 738 + 739 + /* 740 + * For software sequencer check that the opcode is actually 741 + * present in the opmenu if it is locked. 742 + */ 743 + if (ispi->swseq_reg && ispi->locked) { 744 + int i; 745 + 746 + /* Check if it is in the locked opcodes list */ 747 + for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) { 748 + if (ispi->opcodes[i] == op->cmd.opcode) 749 + return true; 750 + } 751 + 752 + dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode); 753 + return false; 754 + } 755 + 756 + return true; 757 + } 758 + 759 + static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) 760 + { 761 + struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master); 762 + const struct intel_spi_mem_op *iop; 763 + 764 + iop = intel_spi_match_mem_op(ispi, op); 765 + if (!iop) 766 + return -EOPNOTSUPP; 767 + 768 + return iop->exec_op(ispi, iop, op); 769 + } 770 + 771 + static const char *intel_spi_get_name(struct spi_mem *mem) 772 + { 773 + const struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master); 774 + 775 + /* 776 + * Return name of the flash controller device to be compatible 777 + * with the MTD version. 778 + */ 779 + return dev_name(ispi->dev); 780 + } 781 + 782 + static const struct spi_controller_mem_ops intel_spi_mem_ops = { 783 + .supports_op = intel_spi_supports_mem_op, 784 + .exec_op = intel_spi_exec_mem_op, 785 + .get_name = intel_spi_get_name, 786 + }; 787 + 788 + #define INTEL_SPI_OP_ADDR(__nbytes) \ 789 + { \ 790 + .nbytes = __nbytes, \ 791 + } 792 + 793 + #define INTEL_SPI_OP_NO_DATA \ 794 + { \ 795 + .dir = SPI_MEM_NO_DATA, \ 796 + } 797 + 798 + #define INTEL_SPI_OP_DATA_IN(__buswidth) \ 799 + { \ 800 + .dir = SPI_MEM_DATA_IN, \ 801 + .buswidth = __buswidth, \ 802 + } 803 + 804 + #define INTEL_SPI_OP_DATA_OUT(__buswidth) \ 805 + { \ 806 + .dir = SPI_MEM_DATA_OUT, \ 807 + .buswidth = __buswidth, \ 808 + } 809 + 810 + #define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op) \ 811 + { \ 812 + .mem_op = { \ 813 + .cmd = __cmd, \ 814 + .addr = __addr, \ 815 + .data = __data, \ 816 + }, \ 817 + .exec_op = __exec_op, \ 818 + } 819 + 820 + #define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \ 821 + { \ 822 + .mem_op = { \ 823 + .cmd = __cmd, \ 824 + .addr = __addr, \ 825 + .data = __data, \ 826 + }, \ 827 + .exec_op = __exec_op, \ 828 + .replacement_op = __repl, \ 829 + } 830 + 831 + /* 832 + * The controller handles pretty much everything internally based on the 833 + * SFDP data but we want to make sure we only support the operations 834 + * actually possible. Only check buswidth and transfer direction, the 835 + * core validates data. 836 + */ 837 + #define INTEL_SPI_GENERIC_OPS \ 838 + /* Status register operations */ \ 839 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \ 840 + SPI_MEM_OP_NO_ADDR, \ 841 + INTEL_SPI_OP_DATA_IN(1), \ 842 + intel_spi_read_reg), \ 843 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \ 844 + SPI_MEM_OP_NO_ADDR, \ 845 + INTEL_SPI_OP_DATA_IN(1), \ 846 + intel_spi_read_reg), \ 847 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \ 848 + SPI_MEM_OP_NO_ADDR, \ 849 + INTEL_SPI_OP_DATA_OUT(1), \ 850 + intel_spi_write_reg), \ 851 + /* Normal read */ \ 852 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ 853 + INTEL_SPI_OP_ADDR(3), \ 854 + INTEL_SPI_OP_DATA_IN(1), \ 855 + intel_spi_read), \ 856 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ 857 + INTEL_SPI_OP_ADDR(3), \ 858 + INTEL_SPI_OP_DATA_IN(2), \ 859 + intel_spi_read), \ 860 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ 861 + INTEL_SPI_OP_ADDR(3), \ 862 + INTEL_SPI_OP_DATA_IN(4), \ 863 + intel_spi_read), \ 864 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ 865 + INTEL_SPI_OP_ADDR(4), \ 866 + INTEL_SPI_OP_DATA_IN(1), \ 867 + intel_spi_read), \ 868 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ 869 + INTEL_SPI_OP_ADDR(4), \ 870 + INTEL_SPI_OP_DATA_IN(2), \ 871 + intel_spi_read), \ 872 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \ 873 + INTEL_SPI_OP_ADDR(4), \ 874 + INTEL_SPI_OP_DATA_IN(4), \ 875 + intel_spi_read), \ 876 + /* Fast read */ \ 877 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ 878 + INTEL_SPI_OP_ADDR(3), \ 879 + INTEL_SPI_OP_DATA_IN(1), \ 880 + intel_spi_read), \ 881 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ 882 + INTEL_SPI_OP_ADDR(3), \ 883 + INTEL_SPI_OP_DATA_IN(2), \ 884 + intel_spi_read), \ 885 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ 886 + INTEL_SPI_OP_ADDR(3), \ 887 + INTEL_SPI_OP_DATA_IN(4), \ 888 + intel_spi_read), \ 889 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ 890 + INTEL_SPI_OP_ADDR(4), \ 891 + INTEL_SPI_OP_DATA_IN(1), \ 892 + intel_spi_read), \ 893 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ 894 + INTEL_SPI_OP_ADDR(4), \ 895 + INTEL_SPI_OP_DATA_IN(2), \ 896 + intel_spi_read), \ 897 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \ 898 + INTEL_SPI_OP_ADDR(4), \ 899 + INTEL_SPI_OP_DATA_IN(4), \ 900 + intel_spi_read), \ 901 + /* Read with 4-byte address opcode */ \ 902 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \ 903 + INTEL_SPI_OP_ADDR(4), \ 904 + INTEL_SPI_OP_DATA_IN(1), \ 905 + intel_spi_read), \ 906 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \ 907 + INTEL_SPI_OP_ADDR(4), \ 908 + INTEL_SPI_OP_DATA_IN(2), \ 909 + intel_spi_read), \ 910 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \ 911 + INTEL_SPI_OP_ADDR(4), \ 912 + INTEL_SPI_OP_DATA_IN(4), \ 913 + intel_spi_read), \ 914 + /* Fast read with 4-byte address opcode */ \ 915 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \ 916 + INTEL_SPI_OP_ADDR(4), \ 917 + INTEL_SPI_OP_DATA_IN(1), \ 918 + intel_spi_read), \ 919 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \ 920 + INTEL_SPI_OP_ADDR(4), \ 921 + INTEL_SPI_OP_DATA_IN(2), \ 922 + intel_spi_read), \ 923 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \ 924 + INTEL_SPI_OP_ADDR(4), \ 925 + INTEL_SPI_OP_DATA_IN(4), \ 926 + intel_spi_read), \ 927 + /* Write operations */ \ 928 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \ 929 + INTEL_SPI_OP_ADDR(3), \ 930 + INTEL_SPI_OP_DATA_OUT(1), \ 931 + intel_spi_write), \ 932 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \ 933 + INTEL_SPI_OP_ADDR(4), \ 934 + INTEL_SPI_OP_DATA_OUT(1), \ 935 + intel_spi_write), \ 936 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1), \ 937 + INTEL_SPI_OP_ADDR(4), \ 938 + INTEL_SPI_OP_DATA_OUT(1), \ 939 + intel_spi_write), \ 940 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), \ 941 + SPI_MEM_OP_NO_ADDR, \ 942 + SPI_MEM_OP_NO_DATA, \ 943 + intel_spi_write_reg), \ 944 + INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), \ 945 + SPI_MEM_OP_NO_ADDR, \ 946 + SPI_MEM_OP_NO_DATA, \ 947 + intel_spi_write_reg), \ 948 + /* Erase operations */ \ 949 + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \ 950 + INTEL_SPI_OP_ADDR(3), \ 951 + SPI_MEM_OP_NO_DATA, \ 952 + intel_spi_erase, \ 953 + HSFSTS_CTL_FCYCLE_ERASE), \ 954 + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \ 955 + INTEL_SPI_OP_ADDR(4), \ 956 + SPI_MEM_OP_NO_DATA, \ 957 + intel_spi_erase, \ 958 + HSFSTS_CTL_FCYCLE_ERASE), \ 959 + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1), \ 960 + INTEL_SPI_OP_ADDR(4), \ 961 + SPI_MEM_OP_NO_DATA, \ 962 + intel_spi_erase, \ 963 + HSFSTS_CTL_FCYCLE_ERASE) \ 964 + 965 + static const struct intel_spi_mem_op generic_mem_ops[] = { 966 + INTEL_SPI_GENERIC_OPS, 967 + { }, 968 + }; 969 + 970 + static const struct intel_spi_mem_op erase_64k_mem_ops[] = { 971 + INTEL_SPI_GENERIC_OPS, 972 + /* 64k sector erase operations */ 973 + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1), 974 + INTEL_SPI_OP_ADDR(3), 975 + SPI_MEM_OP_NO_DATA, 976 + intel_spi_erase, 977 + HSFSTS_CTL_FCYCLE_ERASE_64K), 978 + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1), 979 + INTEL_SPI_OP_ADDR(4), 980 + SPI_MEM_OP_NO_DATA, 981 + intel_spi_erase, 982 + HSFSTS_CTL_FCYCLE_ERASE_64K), 983 + INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1), 984 + INTEL_SPI_OP_ADDR(4), 985 + SPI_MEM_OP_NO_DATA, 986 + intel_spi_erase, 987 + HSFSTS_CTL_FCYCLE_ERASE_64K), 988 + { }, 989 + }; 990 + 991 + static int intel_spi_init(struct intel_spi *ispi) 992 + { 993 + u32 opmenu0, opmenu1, lvscc, uvscc, val; 994 + bool erase_64k = false; 995 + int i; 996 + 997 + switch (ispi->info->type) { 998 + case INTEL_SPI_BYT: 999 + ispi->sregs = ispi->base + BYT_SSFSTS_CTL; 1000 + ispi->pregs = ispi->base + BYT_PR; 1001 + ispi->nregions = BYT_FREG_NUM; 1002 + ispi->pr_num = BYT_PR_NUM; 1003 + ispi->swseq_reg = true; 1004 + break; 1005 + 1006 + case INTEL_SPI_LPT: 1007 + ispi->sregs = ispi->base + LPT_SSFSTS_CTL; 1008 + ispi->pregs = ispi->base + LPT_PR; 1009 + ispi->nregions = LPT_FREG_NUM; 1010 + ispi->pr_num = LPT_PR_NUM; 1011 + ispi->swseq_reg = true; 1012 + break; 1013 + 1014 + case INTEL_SPI_BXT: 1015 + ispi->sregs = ispi->base + BXT_SSFSTS_CTL; 1016 + ispi->pregs = ispi->base + BXT_PR; 1017 + ispi->nregions = BXT_FREG_NUM; 1018 + ispi->pr_num = BXT_PR_NUM; 1019 + erase_64k = true; 1020 + break; 1021 + 1022 + case INTEL_SPI_CNL: 1023 + ispi->sregs = NULL; 1024 + ispi->pregs = ispi->base + CNL_PR; 1025 + ispi->nregions = CNL_FREG_NUM; 1026 + ispi->pr_num = CNL_PR_NUM; 1027 + break; 1028 + 1029 + default: 1030 + return -EINVAL; 1031 + } 1032 + 1033 + /* Try to disable write protection if user asked to do so */ 1034 + if (writeable && !intel_spi_set_writeable(ispi)) { 1035 + dev_warn(ispi->dev, "can't disable chip write protection\n"); 1036 + writeable = false; 1037 + } 1038 + 1039 + /* Disable #SMI generation from HW sequencer */ 1040 + val = readl(ispi->base + HSFSTS_CTL); 1041 + val &= ~HSFSTS_CTL_FSMIE; 1042 + writel(val, ispi->base + HSFSTS_CTL); 1043 + 1044 + /* 1045 + * Determine whether erase operation should use HW or SW sequencer. 1046 + * 1047 + * The HW sequencer has a predefined list of opcodes, with only the 1048 + * erase opcode being programmable in LVSCC and UVSCC registers. 1049 + * If these registers don't contain a valid erase opcode, erase 1050 + * cannot be done using HW sequencer. 1051 + */ 1052 + lvscc = readl(ispi->base + LVSCC); 1053 + uvscc = readl(ispi->base + UVSCC); 1054 + if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) 1055 + ispi->swseq_erase = true; 1056 + /* SPI controller on Intel BXT supports 64K erase opcode */ 1057 + if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) 1058 + if (!(lvscc & ERASE_64K_OPCODE_MASK) || 1059 + !(uvscc & ERASE_64K_OPCODE_MASK)) 1060 + erase_64k = false; 1061 + 1062 + if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) { 1063 + dev_err(ispi->dev, "software sequencer not supported, but required\n"); 1064 + return -EINVAL; 1065 + } 1066 + 1067 + /* 1068 + * Some controllers can only do basic operations using hardware 1069 + * sequencer. All other operations are supposed to be carried out 1070 + * using software sequencer. 1071 + */ 1072 + if (ispi->swseq_reg) { 1073 + /* Disable #SMI generation from SW sequencer */ 1074 + val = readl(ispi->sregs + SSFSTS_CTL); 1075 + val &= ~SSFSTS_CTL_FSMIE; 1076 + writel(val, ispi->sregs + SSFSTS_CTL); 1077 + } 1078 + 1079 + /* Check controller's lock status */ 1080 + val = readl(ispi->base + HSFSTS_CTL); 1081 + ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); 1082 + 1083 + if (ispi->locked && ispi->sregs) { 1084 + /* 1085 + * BIOS programs allowed opcodes and then locks down the 1086 + * register. So read back what opcodes it decided to support. 1087 + * That's the set we are going to support as well. 1088 + */ 1089 + opmenu0 = readl(ispi->sregs + OPMENU0); 1090 + opmenu1 = readl(ispi->sregs + OPMENU1); 1091 + 1092 + if (opmenu0 && opmenu1) { 1093 + for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { 1094 + ispi->opcodes[i] = opmenu0 >> i * 8; 1095 + ispi->opcodes[i + 4] = opmenu1 >> i * 8; 1096 + } 1097 + } 1098 + } 1099 + 1100 + if (erase_64k) { 1101 + dev_dbg(ispi->dev, "Using erase_64k memory operations"); 1102 + ispi->mem_ops = erase_64k_mem_ops; 1103 + } else { 1104 + dev_dbg(ispi->dev, "Using generic memory operations"); 1105 + ispi->mem_ops = generic_mem_ops; 1106 + } 1107 + 1108 + intel_spi_dump_regs(ispi); 1109 + return 0; 1110 + } 1111 + 1112 + static bool intel_spi_is_protected(const struct intel_spi *ispi, 1113 + unsigned int base, unsigned int limit) 1114 + { 1115 + int i; 1116 + 1117 + for (i = 0; i < ispi->pr_num; i++) { 1118 + u32 pr_base, pr_limit, pr_value; 1119 + 1120 + pr_value = readl(ispi->pregs + PR(i)); 1121 + if (!(pr_value & (PR_WPE | PR_RPE))) 1122 + continue; 1123 + 1124 + pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; 1125 + pr_base = pr_value & PR_BASE_MASK; 1126 + 1127 + if (pr_base >= base && pr_limit <= limit) 1128 + return true; 1129 + } 1130 + 1131 + return false; 1132 + } 1133 + 1134 + /* 1135 + * There will be a single partition holding all enabled flash regions. We 1136 + * call this "BIOS". 1137 + */ 1138 + static void intel_spi_fill_partition(struct intel_spi *ispi, 1139 + struct mtd_partition *part) 1140 + { 1141 + u64 end; 1142 + int i; 1143 + 1144 + memset(part, 0, sizeof(*part)); 1145 + 1146 + /* Start from the mandatory descriptor region */ 1147 + part->size = 4096; 1148 + part->name = "BIOS"; 1149 + 1150 + /* 1151 + * Now try to find where this partition ends based on the flash 1152 + * region registers. 1153 + */ 1154 + for (i = 1; i < ispi->nregions; i++) { 1155 + u32 region, base, limit; 1156 + 1157 + region = readl(ispi->base + FREG(i)); 1158 + base = region & FREG_BASE_MASK; 1159 + limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; 1160 + 1161 + if (base >= limit || limit == 0) 1162 + continue; 1163 + 1164 + /* 1165 + * If any of the regions have protection bits set, make the 1166 + * whole partition read-only to be on the safe side. 1167 + * 1168 + * Also if the user did not ask the chip to be writeable 1169 + * mask the bit too. 1170 + */ 1171 + if (!writeable || intel_spi_is_protected(ispi, base, limit)) 1172 + part->mask_flags |= MTD_WRITEABLE; 1173 + 1174 + end = (limit << 12) + 4096; 1175 + if (end > part->size) 1176 + part->size = end; 1177 + } 1178 + } 1179 + 1180 + static int intel_spi_populate_chip(struct intel_spi *ispi) 1181 + { 1182 + struct flash_platform_data *pdata; 1183 + struct spi_board_info chip; 1184 + 1185 + pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL); 1186 + if (!pdata) 1187 + return -ENOMEM; 1188 + 1189 + pdata->nr_parts = 1; 1190 + pdata->parts = devm_kcalloc(ispi->dev, sizeof(*pdata->parts), 1191 + pdata->nr_parts, GFP_KERNEL); 1192 + if (!pdata->parts) 1193 + return -ENOMEM; 1194 + 1195 + intel_spi_fill_partition(ispi, pdata->parts); 1196 + 1197 + memset(&chip, 0, sizeof(chip)); 1198 + snprintf(chip.modalias, 8, "spi-nor"); 1199 + chip.platform_data = pdata; 1200 + 1201 + return spi_new_device(ispi->master, &chip) ? 0 : -ENODEV; 1202 + } 1203 + 1204 + /** 1205 + * intel_spi_probe() - Probe the Intel SPI flash controller 1206 + * @dev: Pointer to the parent device 1207 + * @mem: MMIO resource 1208 + * @info: Platform spefific information 1209 + * 1210 + * Probes Intel SPI flash controller and creates the flash chip device. 1211 + * Returns %0 on success and negative errno in case of failure. 1212 + */ 1213 + int intel_spi_probe(struct device *dev, struct resource *mem, 1214 + const struct intel_spi_boardinfo *info) 1215 + { 1216 + struct spi_controller *master; 1217 + struct intel_spi *ispi; 1218 + int ret; 1219 + 1220 + master = devm_spi_alloc_master(dev, sizeof(*ispi)); 1221 + if (!master) 1222 + return -ENOMEM; 1223 + 1224 + master->mem_ops = &intel_spi_mem_ops; 1225 + 1226 + ispi = spi_master_get_devdata(master); 1227 + 1228 + ispi->base = devm_ioremap_resource(dev, mem); 1229 + if (IS_ERR(ispi->base)) 1230 + return PTR_ERR(ispi->base); 1231 + 1232 + ispi->dev = dev; 1233 + ispi->master = master; 1234 + ispi->info = info; 1235 + 1236 + ret = intel_spi_init(ispi); 1237 + if (ret) 1238 + return ret; 1239 + 1240 + ret = devm_spi_register_master(dev, master); 1241 + if (ret) 1242 + return ret; 1243 + 1244 + return intel_spi_populate_chip(ispi); 1245 + } 1246 + EXPORT_SYMBOL_GPL(intel_spi_probe); 1247 + 1248 + MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver"); 1249 + MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 1250 + MODULE_LICENSE("GPL v2");
+19
drivers/spi/spi-intel.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Intel PCH/PCU SPI flash driver. 4 + * 5 + * Copyright (C) 2016 - 2022, Intel Corporation 6 + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 + */ 8 + 9 + #ifndef SPI_INTEL_H 10 + #define SPI_INTEL_H 11 + 12 + #include <linux/platform_data/x86/spi-intel.h> 13 + 14 + struct resource; 15 + 16 + int intel_spi_probe(struct device *dev, struct resource *mem, 17 + const struct intel_spi_boardinfo *info); 18 + 19 + #endif /* SPI_INTEL_H */
+1 -1
include/linux/mfd/lpc_ich.h
··· 8 8 #ifndef LPC_ICH_H 9 9 #define LPC_ICH_H 10 10 11 - #include <linux/platform_data/x86/intel-spi.h> 11 + #include <linux/platform_data/x86/spi-intel.h> 12 12 13 13 /* GPIO resources */ 14 14 #define ICH_RES_GPIO 0
+3 -3
include/linux/platform_data/x86/intel-spi.h include/linux/platform_data/x86/spi-intel.h
··· 6 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 7 */ 8 8 9 - #ifndef INTEL_SPI_PDATA_H 10 - #define INTEL_SPI_PDATA_H 9 + #ifndef SPI_INTEL_PDATA_H 10 + #define SPI_INTEL_PDATA_H 11 11 12 12 enum intel_spi_type { 13 13 INTEL_SPI_BYT = 1, ··· 28 28 void *data; 29 29 }; 30 30 31 - #endif /* INTEL_SPI_PDATA_H */ 31 + #endif /* SPI_INTEL_PDATA_H */