Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.16 968 lines 24 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Intel PCH/PCU SPI flash driver. 4 * 5 * Copyright (C) 2016, Intel Corporation 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 */ 8 9#include <linux/err.h> 10#include <linux/io.h> 11#include <linux/iopoll.h> 12#include <linux/module.h> 13#include <linux/sched.h> 14#include <linux/sizes.h> 15#include <linux/mtd/mtd.h> 16#include <linux/mtd/partitions.h> 17#include <linux/mtd/spi-nor.h> 18 19#include "intel-spi.h" 20 21/* Offsets are from @ispi->base */ 22#define BFPREG 0x00 23 24#define HSFSTS_CTL 0x04 25#define HSFSTS_CTL_FSMIE BIT(31) 26#define HSFSTS_CTL_FDBC_SHIFT 24 27#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT) 28 29#define HSFSTS_CTL_FCYCLE_SHIFT 17 30#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT) 31/* HW sequencer opcodes */ 32#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT) 33#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT) 34#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT) 35#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT) 36#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT) 37#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT) 38#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT) 39 40#define HSFSTS_CTL_FGO BIT(16) 41#define HSFSTS_CTL_FLOCKDN BIT(15) 42#define HSFSTS_CTL_FDV BIT(14) 43#define HSFSTS_CTL_SCIP BIT(5) 44#define HSFSTS_CTL_AEL BIT(2) 45#define HSFSTS_CTL_FCERR BIT(1) 46#define HSFSTS_CTL_FDONE BIT(0) 47 48#define FADDR 0x08 49#define DLOCK 0x0c 50#define FDATA(n) (0x10 + ((n) * 4)) 51 52#define FRACC 0x50 53 54#define FREG(n) (0x54 + ((n) * 4)) 55#define FREG_BASE_MASK 0x3fff 56#define FREG_LIMIT_SHIFT 16 57#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) 58 59/* Offset is from @ispi->pregs */ 60#define PR(n) ((n) * 4) 61#define PR_WPE BIT(31) 62#define PR_LIMIT_SHIFT 16 63#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) 64#define PR_RPE BIT(15) 65#define PR_BASE_MASK 0x3fff 66 67/* Offsets are from @ispi->sregs */ 68#define SSFSTS_CTL 0x00 69#define SSFSTS_CTL_FSMIE BIT(23) 70#define SSFSTS_CTL_DS BIT(22) 71#define SSFSTS_CTL_DBC_SHIFT 16 72#define SSFSTS_CTL_SPOP BIT(11) 73#define SSFSTS_CTL_ACS BIT(10) 74#define SSFSTS_CTL_SCGO BIT(9) 75#define SSFSTS_CTL_COP_SHIFT 12 76#define SSFSTS_CTL_FRS BIT(7) 77#define SSFSTS_CTL_DOFRS BIT(6) 78#define SSFSTS_CTL_AEL BIT(4) 79#define SSFSTS_CTL_FCERR BIT(3) 80#define SSFSTS_CTL_FDONE BIT(2) 81#define SSFSTS_CTL_SCIP BIT(0) 82 83#define PREOP_OPTYPE 0x04 84#define OPMENU0 0x08 85#define OPMENU1 0x0c 86 87#define OPTYPE_READ_NO_ADDR 0 88#define OPTYPE_WRITE_NO_ADDR 1 89#define OPTYPE_READ_WITH_ADDR 2 90#define OPTYPE_WRITE_WITH_ADDR 3 91 92/* CPU specifics */ 93#define BYT_PR 0x74 94#define BYT_SSFSTS_CTL 0x90 95#define BYT_BCR 0xfc 96#define BYT_BCR_WPD BIT(0) 97#define BYT_FREG_NUM 5 98#define BYT_PR_NUM 5 99 100#define LPT_PR 0x74 101#define LPT_SSFSTS_CTL 0x90 102#define LPT_FREG_NUM 5 103#define LPT_PR_NUM 5 104 105#define BXT_PR 0x84 106#define BXT_SSFSTS_CTL 0xa0 107#define BXT_FREG_NUM 12 108#define BXT_PR_NUM 6 109 110#define CNL_PR 0x84 111#define CNL_FREG_NUM 6 112#define CNL_PR_NUM 5 113 114#define LVSCC 0xc4 115#define UVSCC 0xc8 116#define ERASE_OPCODE_SHIFT 8 117#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) 118#define ERASE_64K_OPCODE_SHIFT 16 119#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) 120 121#define INTEL_SPI_TIMEOUT 5000 /* ms */ 122#define INTEL_SPI_FIFO_SZ 64 123 124/** 125 * struct intel_spi - Driver private data 126 * @dev: Device pointer 127 * @info: Pointer to board specific info 128 * @nor: SPI NOR layer structure 129 * @base: Beginning of MMIO space 130 * @pregs: Start of protection registers 131 * @sregs: Start of software sequencer registers 132 * @nregions: Maximum number of regions 133 * @pr_num: Maximum number of protected range registers 134 * @writeable: Is the chip writeable 135 * @locked: Is SPI setting locked 136 * @swseq_reg: Use SW sequencer in register reads/writes 137 * @swseq_erase: Use SW sequencer in erase operation 138 * @erase_64k: 64k erase supported 139 * @atomic_preopcode: Holds preopcode when atomic sequence is requested 140 * @opcodes: Opcodes which are supported. This are programmed by BIOS 141 * before it locks down the controller. 142 */ 143struct intel_spi { 144 struct device *dev; 145 const struct intel_spi_boardinfo *info; 146 struct spi_nor nor; 147 void __iomem *base; 148 void __iomem *pregs; 149 void __iomem *sregs; 150 size_t nregions; 151 size_t pr_num; 152 bool writeable; 153 bool locked; 154 bool swseq_reg; 155 bool swseq_erase; 156 bool erase_64k; 157 u8 atomic_preopcode; 158 u8 opcodes[8]; 159}; 160 161static bool writeable; 162module_param(writeable, bool, 0); 163MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)"); 164 165static void intel_spi_dump_regs(struct intel_spi *ispi) 166{ 167 u32 value; 168 int i; 169 170 dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG)); 171 172 value = readl(ispi->base + HSFSTS_CTL); 173 dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value); 174 if (value & HSFSTS_CTL_FLOCKDN) 175 dev_dbg(ispi->dev, "-> Locked\n"); 176 177 dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR)); 178 dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK)); 179 180 for (i = 0; i < 16; i++) 181 dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n", 182 i, readl(ispi->base + FDATA(i))); 183 184 dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC)); 185 186 for (i = 0; i < ispi->nregions; i++) 187 dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, 188 readl(ispi->base + FREG(i))); 189 for (i = 0; i < ispi->pr_num; i++) 190 dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, 191 readl(ispi->pregs + PR(i))); 192 193 if (ispi->sregs) { 194 value = readl(ispi->sregs + SSFSTS_CTL); 195 dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); 196 dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", 197 readl(ispi->sregs + PREOP_OPTYPE)); 198 dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", 199 readl(ispi->sregs + OPMENU0)); 200 dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", 201 readl(ispi->sregs + OPMENU1)); 202 } 203 204 if (ispi->info->type == INTEL_SPI_BYT) 205 dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); 206 207 dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); 208 dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); 209 210 dev_dbg(ispi->dev, "Protected regions:\n"); 211 for (i = 0; i < ispi->pr_num; i++) { 212 u32 base, limit; 213 214 value = readl(ispi->pregs + PR(i)); 215 if (!(value & (PR_WPE | PR_RPE))) 216 continue; 217 218 limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; 219 base = value & PR_BASE_MASK; 220 221 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n", 222 i, base << 12, (limit << 12) | 0xfff, 223 value & PR_WPE ? 'W' : '.', 224 value & PR_RPE ? 'R' : '.'); 225 } 226 227 dev_dbg(ispi->dev, "Flash regions:\n"); 228 for (i = 0; i < ispi->nregions; i++) { 229 u32 region, base, limit; 230 231 region = readl(ispi->base + FREG(i)); 232 base = region & FREG_BASE_MASK; 233 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; 234 235 if (base >= limit || (i > 0 && limit == 0)) 236 dev_dbg(ispi->dev, " %02d disabled\n", i); 237 else 238 dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n", 239 i, base << 12, (limit << 12) | 0xfff); 240 } 241 242 dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", 243 ispi->swseq_reg ? 'S' : 'H'); 244 dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", 245 ispi->swseq_erase ? 'S' : 'H'); 246} 247 248/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ 249static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size) 250{ 251 size_t bytes; 252 int i = 0; 253 254 if (size > INTEL_SPI_FIFO_SZ) 255 return -EINVAL; 256 257 while (size > 0) { 258 bytes = min_t(size_t, size, 4); 259 memcpy_fromio(buf, ispi->base + FDATA(i), bytes); 260 size -= bytes; 261 buf += bytes; 262 i++; 263 } 264 265 return 0; 266} 267 268/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */ 269static int intel_spi_write_block(struct intel_spi *ispi, const void *buf, 270 size_t size) 271{ 272 size_t bytes; 273 int i = 0; 274 275 if (size > INTEL_SPI_FIFO_SZ) 276 return -EINVAL; 277 278 while (size > 0) { 279 bytes = min_t(size_t, size, 4); 280 memcpy_toio(ispi->base + FDATA(i), buf, bytes); 281 size -= bytes; 282 buf += bytes; 283 i++; 284 } 285 286 return 0; 287} 288 289static int intel_spi_wait_hw_busy(struct intel_spi *ispi) 290{ 291 u32 val; 292 293 return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, 294 !(val & HSFSTS_CTL_SCIP), 0, 295 INTEL_SPI_TIMEOUT * 1000); 296} 297 298static int intel_spi_wait_sw_busy(struct intel_spi *ispi) 299{ 300 u32 val; 301 302 return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, 303 !(val & SSFSTS_CTL_SCIP), 0, 304 INTEL_SPI_TIMEOUT * 1000); 305} 306 307static int intel_spi_init(struct intel_spi *ispi) 308{ 309 u32 opmenu0, opmenu1, lvscc, uvscc, val; 310 int i; 311 312 switch (ispi->info->type) { 313 case INTEL_SPI_BYT: 314 ispi->sregs = ispi->base + BYT_SSFSTS_CTL; 315 ispi->pregs = ispi->base + BYT_PR; 316 ispi->nregions = BYT_FREG_NUM; 317 ispi->pr_num = BYT_PR_NUM; 318 ispi->swseq_reg = true; 319 320 if (writeable) { 321 /* Disable write protection */ 322 val = readl(ispi->base + BYT_BCR); 323 if (!(val & BYT_BCR_WPD)) { 324 val |= BYT_BCR_WPD; 325 writel(val, ispi->base + BYT_BCR); 326 val = readl(ispi->base + BYT_BCR); 327 } 328 329 ispi->writeable = !!(val & BYT_BCR_WPD); 330 } 331 332 break; 333 334 case INTEL_SPI_LPT: 335 ispi->sregs = ispi->base + LPT_SSFSTS_CTL; 336 ispi->pregs = ispi->base + LPT_PR; 337 ispi->nregions = LPT_FREG_NUM; 338 ispi->pr_num = LPT_PR_NUM; 339 ispi->swseq_reg = true; 340 break; 341 342 case INTEL_SPI_BXT: 343 ispi->sregs = ispi->base + BXT_SSFSTS_CTL; 344 ispi->pregs = ispi->base + BXT_PR; 345 ispi->nregions = BXT_FREG_NUM; 346 ispi->pr_num = BXT_PR_NUM; 347 ispi->erase_64k = true; 348 break; 349 350 case INTEL_SPI_CNL: 351 ispi->sregs = NULL; 352 ispi->pregs = ispi->base + CNL_PR; 353 ispi->nregions = CNL_FREG_NUM; 354 ispi->pr_num = CNL_PR_NUM; 355 break; 356 357 default: 358 return -EINVAL; 359 } 360 361 /* Disable #SMI generation from HW sequencer */ 362 val = readl(ispi->base + HSFSTS_CTL); 363 val &= ~HSFSTS_CTL_FSMIE; 364 writel(val, ispi->base + HSFSTS_CTL); 365 366 /* 367 * Determine whether erase operation should use HW or SW sequencer. 368 * 369 * The HW sequencer has a predefined list of opcodes, with only the 370 * erase opcode being programmable in LVSCC and UVSCC registers. 371 * If these registers don't contain a valid erase opcode, erase 372 * cannot be done using HW sequencer. 373 */ 374 lvscc = readl(ispi->base + LVSCC); 375 uvscc = readl(ispi->base + UVSCC); 376 if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) 377 ispi->swseq_erase = true; 378 /* SPI controller on Intel BXT supports 64K erase opcode */ 379 if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) 380 if (!(lvscc & ERASE_64K_OPCODE_MASK) || 381 !(uvscc & ERASE_64K_OPCODE_MASK)) 382 ispi->erase_64k = false; 383 384 if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) { 385 dev_err(ispi->dev, "software sequencer not supported, but required\n"); 386 return -EINVAL; 387 } 388 389 /* 390 * Some controllers can only do basic operations using hardware 391 * sequencer. All other operations are supposed to be carried out 392 * using software sequencer. 393 */ 394 if (ispi->swseq_reg) { 395 /* Disable #SMI generation from SW sequencer */ 396 val = readl(ispi->sregs + SSFSTS_CTL); 397 val &= ~SSFSTS_CTL_FSMIE; 398 writel(val, ispi->sregs + SSFSTS_CTL); 399 } 400 401 /* Check controller's lock status */ 402 val = readl(ispi->base + HSFSTS_CTL); 403 ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); 404 405 if (ispi->locked && ispi->sregs) { 406 /* 407 * BIOS programs allowed opcodes and then locks down the 408 * register. So read back what opcodes it decided to support. 409 * That's the set we are going to support as well. 410 */ 411 opmenu0 = readl(ispi->sregs + OPMENU0); 412 opmenu1 = readl(ispi->sregs + OPMENU1); 413 414 if (opmenu0 && opmenu1) { 415 for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { 416 ispi->opcodes[i] = opmenu0 >> i * 8; 417 ispi->opcodes[i + 4] = opmenu1 >> i * 8; 418 } 419 } 420 } 421 422 intel_spi_dump_regs(ispi); 423 424 return 0; 425} 426 427static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) 428{ 429 int i; 430 int preop; 431 432 if (ispi->locked) { 433 for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) 434 if (ispi->opcodes[i] == opcode) 435 return i; 436 437 return -EINVAL; 438 } 439 440 /* The lock is off, so just use index 0 */ 441 writel(opcode, ispi->sregs + OPMENU0); 442 preop = readw(ispi->sregs + PREOP_OPTYPE); 443 writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); 444 445 return 0; 446} 447 448static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len) 449{ 450 u32 val, status; 451 int ret; 452 453 val = readl(ispi->base + HSFSTS_CTL); 454 val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK); 455 456 switch (opcode) { 457 case SPINOR_OP_RDID: 458 val |= HSFSTS_CTL_FCYCLE_RDID; 459 break; 460 case SPINOR_OP_WRSR: 461 val |= HSFSTS_CTL_FCYCLE_WRSR; 462 break; 463 case SPINOR_OP_RDSR: 464 val |= HSFSTS_CTL_FCYCLE_RDSR; 465 break; 466 default: 467 return -EINVAL; 468 } 469 470 if (len > INTEL_SPI_FIFO_SZ) 471 return -EINVAL; 472 473 val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; 474 val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 475 val |= HSFSTS_CTL_FGO; 476 writel(val, ispi->base + HSFSTS_CTL); 477 478 ret = intel_spi_wait_hw_busy(ispi); 479 if (ret) 480 return ret; 481 482 status = readl(ispi->base + HSFSTS_CTL); 483 if (status & HSFSTS_CTL_FCERR) 484 return -EIO; 485 else if (status & HSFSTS_CTL_AEL) 486 return -EACCES; 487 488 return 0; 489} 490 491static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len, 492 int optype) 493{ 494 u32 val = 0, status; 495 u8 atomic_preopcode; 496 int ret; 497 498 ret = intel_spi_opcode_index(ispi, opcode, optype); 499 if (ret < 0) 500 return ret; 501 502 if (len > INTEL_SPI_FIFO_SZ) 503 return -EINVAL; 504 505 /* 506 * Always clear it after each SW sequencer operation regardless 507 * of whether it is successful or not. 508 */ 509 atomic_preopcode = ispi->atomic_preopcode; 510 ispi->atomic_preopcode = 0; 511 512 /* Only mark 'Data Cycle' bit when there is data to be transferred */ 513 if (len > 0) 514 val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; 515 val |= ret << SSFSTS_CTL_COP_SHIFT; 516 val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; 517 val |= SSFSTS_CTL_SCGO; 518 if (atomic_preopcode) { 519 u16 preop; 520 521 switch (optype) { 522 case OPTYPE_WRITE_NO_ADDR: 523 case OPTYPE_WRITE_WITH_ADDR: 524 /* Pick matching preopcode for the atomic sequence */ 525 preop = readw(ispi->sregs + PREOP_OPTYPE); 526 if ((preop & 0xff) == atomic_preopcode) 527 ; /* Do nothing */ 528 else if ((preop >> 8) == atomic_preopcode) 529 val |= SSFSTS_CTL_SPOP; 530 else 531 return -EINVAL; 532 533 /* Enable atomic sequence */ 534 val |= SSFSTS_CTL_ACS; 535 break; 536 537 default: 538 return -EINVAL; 539 } 540 541 } 542 writel(val, ispi->sregs + SSFSTS_CTL); 543 544 ret = intel_spi_wait_sw_busy(ispi); 545 if (ret) 546 return ret; 547 548 status = readl(ispi->sregs + SSFSTS_CTL); 549 if (status & SSFSTS_CTL_FCERR) 550 return -EIO; 551 else if (status & SSFSTS_CTL_AEL) 552 return -EACCES; 553 554 return 0; 555} 556 557static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, 558 size_t len) 559{ 560 struct intel_spi *ispi = nor->priv; 561 int ret; 562 563 /* Address of the first chip */ 564 writel(0, ispi->base + FADDR); 565 566 if (ispi->swseq_reg) 567 ret = intel_spi_sw_cycle(ispi, opcode, len, 568 OPTYPE_READ_NO_ADDR); 569 else 570 ret = intel_spi_hw_cycle(ispi, opcode, len); 571 572 if (ret) 573 return ret; 574 575 return intel_spi_read_block(ispi, buf, len); 576} 577 578static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, 579 size_t len) 580{ 581 struct intel_spi *ispi = nor->priv; 582 int ret; 583 584 /* 585 * This is handled with atomic operation and preop code in Intel 586 * controller so we only verify that it is available. If the 587 * controller is not locked, program the opcode to the PREOP 588 * register for later use. 589 * 590 * When hardware sequencer is used there is no need to program 591 * any opcodes (it handles them automatically as part of a command). 592 */ 593 if (opcode == SPINOR_OP_WREN) { 594 u16 preop; 595 596 if (!ispi->swseq_reg) 597 return 0; 598 599 preop = readw(ispi->sregs + PREOP_OPTYPE); 600 if ((preop & 0xff) != opcode && (preop >> 8) != opcode) { 601 if (ispi->locked) 602 return -EINVAL; 603 writel(opcode, ispi->sregs + PREOP_OPTYPE); 604 } 605 606 /* 607 * This enables atomic sequence on next SW sycle. Will 608 * be cleared after next operation. 609 */ 610 ispi->atomic_preopcode = opcode; 611 return 0; 612 } 613 614 /* 615 * We hope that HW sequencer will do the right thing automatically and 616 * with the SW sequencer we cannot use preopcode anyway, so just ignore 617 * the Write Disable operation and pretend it was completed 618 * successfully. 619 */ 620 if (opcode == SPINOR_OP_WRDI) 621 return 0; 622 623 writel(0, ispi->base + FADDR); 624 625 /* Write the value beforehand */ 626 ret = intel_spi_write_block(ispi, buf, len); 627 if (ret) 628 return ret; 629 630 if (ispi->swseq_reg) 631 return intel_spi_sw_cycle(ispi, opcode, len, 632 OPTYPE_WRITE_NO_ADDR); 633 return intel_spi_hw_cycle(ispi, opcode, len); 634} 635 636static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, 637 u_char *read_buf) 638{ 639 struct intel_spi *ispi = nor->priv; 640 size_t block_size, retlen = 0; 641 u32 val, status; 642 ssize_t ret; 643 644 /* 645 * Atomic sequence is not expected with HW sequencer reads. Make 646 * sure it is cleared regardless. 647 */ 648 if (WARN_ON_ONCE(ispi->atomic_preopcode)) 649 ispi->atomic_preopcode = 0; 650 651 switch (nor->read_opcode) { 652 case SPINOR_OP_READ: 653 case SPINOR_OP_READ_FAST: 654 case SPINOR_OP_READ_4B: 655 case SPINOR_OP_READ_FAST_4B: 656 break; 657 default: 658 return -EINVAL; 659 } 660 661 while (len > 0) { 662 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); 663 664 /* Read cannot cross 4K boundary */ 665 block_size = min_t(loff_t, from + block_size, 666 round_up(from + 1, SZ_4K)) - from; 667 668 writel(from, ispi->base + FADDR); 669 670 val = readl(ispi->base + HSFSTS_CTL); 671 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 672 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 673 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; 674 val |= HSFSTS_CTL_FCYCLE_READ; 675 val |= HSFSTS_CTL_FGO; 676 writel(val, ispi->base + HSFSTS_CTL); 677 678 ret = intel_spi_wait_hw_busy(ispi); 679 if (ret) 680 return ret; 681 682 status = readl(ispi->base + HSFSTS_CTL); 683 if (status & HSFSTS_CTL_FCERR) 684 ret = -EIO; 685 else if (status & HSFSTS_CTL_AEL) 686 ret = -EACCES; 687 688 if (ret < 0) { 689 dev_err(ispi->dev, "read error: %llx: %#x\n", from, 690 status); 691 return ret; 692 } 693 694 ret = intel_spi_read_block(ispi, read_buf, block_size); 695 if (ret) 696 return ret; 697 698 len -= block_size; 699 from += block_size; 700 retlen += block_size; 701 read_buf += block_size; 702 } 703 704 return retlen; 705} 706 707static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, 708 const u_char *write_buf) 709{ 710 struct intel_spi *ispi = nor->priv; 711 size_t block_size, retlen = 0; 712 u32 val, status; 713 ssize_t ret; 714 715 /* Not needed with HW sequencer write, make sure it is cleared */ 716 ispi->atomic_preopcode = 0; 717 718 while (len > 0) { 719 block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); 720 721 /* Write cannot cross 4K boundary */ 722 block_size = min_t(loff_t, to + block_size, 723 round_up(to + 1, SZ_4K)) - to; 724 725 writel(to, ispi->base + FADDR); 726 727 val = readl(ispi->base + HSFSTS_CTL); 728 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 729 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 730 val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; 731 val |= HSFSTS_CTL_FCYCLE_WRITE; 732 733 ret = intel_spi_write_block(ispi, write_buf, block_size); 734 if (ret) { 735 dev_err(ispi->dev, "failed to write block\n"); 736 return ret; 737 } 738 739 /* Start the write now */ 740 val |= HSFSTS_CTL_FGO; 741 writel(val, ispi->base + HSFSTS_CTL); 742 743 ret = intel_spi_wait_hw_busy(ispi); 744 if (ret) { 745 dev_err(ispi->dev, "timeout\n"); 746 return ret; 747 } 748 749 status = readl(ispi->base + HSFSTS_CTL); 750 if (status & HSFSTS_CTL_FCERR) 751 ret = -EIO; 752 else if (status & HSFSTS_CTL_AEL) 753 ret = -EACCES; 754 755 if (ret < 0) { 756 dev_err(ispi->dev, "write error: %llx: %#x\n", to, 757 status); 758 return ret; 759 } 760 761 len -= block_size; 762 to += block_size; 763 retlen += block_size; 764 write_buf += block_size; 765 } 766 767 return retlen; 768} 769 770static int intel_spi_erase(struct spi_nor *nor, loff_t offs) 771{ 772 size_t erase_size, len = nor->mtd.erasesize; 773 struct intel_spi *ispi = nor->priv; 774 u32 val, status, cmd; 775 int ret; 776 777 /* If the hardware can do 64k erase use that when possible */ 778 if (len >= SZ_64K && ispi->erase_64k) { 779 cmd = HSFSTS_CTL_FCYCLE_ERASE_64K; 780 erase_size = SZ_64K; 781 } else { 782 cmd = HSFSTS_CTL_FCYCLE_ERASE; 783 erase_size = SZ_4K; 784 } 785 786 if (ispi->swseq_erase) { 787 while (len > 0) { 788 writel(offs, ispi->base + FADDR); 789 790 ret = intel_spi_sw_cycle(ispi, nor->erase_opcode, 791 0, OPTYPE_WRITE_WITH_ADDR); 792 if (ret) 793 return ret; 794 795 offs += erase_size; 796 len -= erase_size; 797 } 798 799 return 0; 800 } 801 802 /* Not needed with HW sequencer erase, make sure it is cleared */ 803 ispi->atomic_preopcode = 0; 804 805 while (len > 0) { 806 writel(offs, ispi->base + FADDR); 807 808 val = readl(ispi->base + HSFSTS_CTL); 809 val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); 810 val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; 811 val |= cmd; 812 val |= HSFSTS_CTL_FGO; 813 writel(val, ispi->base + HSFSTS_CTL); 814 815 ret = intel_spi_wait_hw_busy(ispi); 816 if (ret) 817 return ret; 818 819 status = readl(ispi->base + HSFSTS_CTL); 820 if (status & HSFSTS_CTL_FCERR) 821 return -EIO; 822 else if (status & HSFSTS_CTL_AEL) 823 return -EACCES; 824 825 offs += erase_size; 826 len -= erase_size; 827 } 828 829 return 0; 830} 831 832static bool intel_spi_is_protected(const struct intel_spi *ispi, 833 unsigned int base, unsigned int limit) 834{ 835 int i; 836 837 for (i = 0; i < ispi->pr_num; i++) { 838 u32 pr_base, pr_limit, pr_value; 839 840 pr_value = readl(ispi->pregs + PR(i)); 841 if (!(pr_value & (PR_WPE | PR_RPE))) 842 continue; 843 844 pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; 845 pr_base = pr_value & PR_BASE_MASK; 846 847 if (pr_base >= base && pr_limit <= limit) 848 return true; 849 } 850 851 return false; 852} 853 854/* 855 * There will be a single partition holding all enabled flash regions. We 856 * call this "BIOS". 857 */ 858static void intel_spi_fill_partition(struct intel_spi *ispi, 859 struct mtd_partition *part) 860{ 861 u64 end; 862 int i; 863 864 memset(part, 0, sizeof(*part)); 865 866 /* Start from the mandatory descriptor region */ 867 part->size = 4096; 868 part->name = "BIOS"; 869 870 /* 871 * Now try to find where this partition ends based on the flash 872 * region registers. 873 */ 874 for (i = 1; i < ispi->nregions; i++) { 875 u32 region, base, limit; 876 877 region = readl(ispi->base + FREG(i)); 878 base = region & FREG_BASE_MASK; 879 limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; 880 881 if (base >= limit || limit == 0) 882 continue; 883 884 /* 885 * If any of the regions have protection bits set, make the 886 * whole partition read-only to be on the safe side. 887 */ 888 if (intel_spi_is_protected(ispi, base, limit)) 889 ispi->writeable = false; 890 891 end = (limit << 12) + 4096; 892 if (end > part->size) 893 part->size = end; 894 } 895} 896 897static const struct spi_nor_controller_ops intel_spi_controller_ops = { 898 .read_reg = intel_spi_read_reg, 899 .write_reg = intel_spi_write_reg, 900 .read = intel_spi_read, 901 .write = intel_spi_write, 902 .erase = intel_spi_erase, 903}; 904 905struct intel_spi *intel_spi_probe(struct device *dev, 906 struct resource *mem, const struct intel_spi_boardinfo *info) 907{ 908 const struct spi_nor_hwcaps hwcaps = { 909 .mask = SNOR_HWCAPS_READ | 910 SNOR_HWCAPS_READ_FAST | 911 SNOR_HWCAPS_PP, 912 }; 913 struct mtd_partition part; 914 struct intel_spi *ispi; 915 int ret; 916 917 if (!info || !mem) 918 return ERR_PTR(-EINVAL); 919 920 ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL); 921 if (!ispi) 922 return ERR_PTR(-ENOMEM); 923 924 ispi->base = devm_ioremap_resource(dev, mem); 925 if (IS_ERR(ispi->base)) 926 return ERR_CAST(ispi->base); 927 928 ispi->dev = dev; 929 ispi->info = info; 930 ispi->writeable = info->writeable; 931 932 ret = intel_spi_init(ispi); 933 if (ret) 934 return ERR_PTR(ret); 935 936 ispi->nor.dev = ispi->dev; 937 ispi->nor.priv = ispi; 938 ispi->nor.controller_ops = &intel_spi_controller_ops; 939 940 ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps); 941 if (ret) { 942 dev_info(dev, "failed to locate the chip\n"); 943 return ERR_PTR(ret); 944 } 945 946 intel_spi_fill_partition(ispi, &part); 947 948 /* Prevent writes if not explicitly enabled */ 949 if (!ispi->writeable || !writeable) 950 ispi->nor.mtd.flags &= ~MTD_WRITEABLE; 951 952 ret = mtd_device_register(&ispi->nor.mtd, &part, 1); 953 if (ret) 954 return ERR_PTR(ret); 955 956 return ispi; 957} 958EXPORT_SYMBOL_GPL(intel_spi_probe); 959 960int intel_spi_remove(struct intel_spi *ispi) 961{ 962 return mtd_device_unregister(&ispi->nor.mtd); 963} 964EXPORT_SYMBOL_GPL(intel_spi_remove); 965 966MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver"); 967MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 968MODULE_LICENSE("GPL v2");