Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi: dw: Put the driver entities naming in order

Mostly due to a long driver history it's methods and macro names look a
bit messy. In particularly that concerns the code their prefixes. A
biggest part of the driver functions and macros have got the dw_spi/DW_SPI
prefixes. But there are some entities which have been just
"spi_/SPI_"-prefixed. Especially that concerns the CSR and their fields
macro definitions. It makes the code harder to comprehend since such
methods and macros can be easily confused with the global SPI-subsystem
exports. In this case the only possible way to more or less quickly
distinguish one naming space from another is either by context or by the
argument type, which most of the times isn't that easy anyway. In addition
to that a new DW SSI IP-core support has been added in the framework of
commit e539f435cb9c ("spi: dw: Add support for DesignWare DWC_ssi"), which
introduced a new set or macro-prefixes to describe CTRLR0-specific fields
and worsen the situation. Finally there are methods with
no DW SPI driver-reference prefix at all, that make the code reading even
harder. So in order to ease the driver hacking let's bring the code naming
to a common base:
1) Each method is supposed to have "dw_spi_" prefix so to be easily
distinguished from the kernel API, e.g. SPI-subsystem methods and macros.
(Exception is the local implementation of the readl/writel methods since
being just the regspace accessors.)
2) Each generically used macro should have DW_SPI_-prefix thus being
easily comprehended as the local driver definition.
3) DW APB SSI and DW SSI specific macros should have prefixes as DW_PSSI_
and DW_HSSI_ respectively so referring to the system buses they support
(APB and AHB similarly to the DT clocks naming like pclk, hclk).

Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru>
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Link: https://lore.kernel.org/r/20211115181917.7521-4-Sergey.Semin@baikalelectronics.ru
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by

Serge Semin and committed by
Mark Brown
725b0e3e 21b6b380

+206 -204
+4 -4
drivers/spi/spi-dw-bt1.c
··· 123 123 len = min_t(size_t, len, dwsbt1->map_len - offs); 124 124 125 125 /* Collect the controller configuration required by the operation */ 126 - cfg.tmode = SPI_TMOD_EPROMREAD; 126 + cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD; 127 127 cfg.dfs = 8; 128 128 cfg.ndf = 4; 129 129 cfg.freq = mem->spi->max_speed_hz; ··· 131 131 /* Make sure the corresponding CS is de-asserted on transmission */ 132 132 dw_spi_set_cs(mem->spi, false); 133 133 134 - spi_enable_chip(dws, 0); 134 + dw_spi_enable_chip(dws, 0); 135 135 136 136 dw_spi_update_config(dws, mem->spi, &cfg); 137 137 138 - spi_umask_intr(dws, SPI_INT_RXFI); 138 + dw_spi_umask_intr(dws, DW_SPI_INT_RXFI); 139 139 140 - spi_enable_chip(dws, 1); 140 + dw_spi_enable_chip(dws, 1); 141 141 142 142 /* 143 143 * Enable the transparent mode of the System Boot Controller.
+70 -68
drivers/spi/spi-dw-core.c
··· 24 24 #endif 25 25 26 26 /* Slave spi_device related */ 27 - struct chip_data { 27 + struct dw_spi_chip_data { 28 28 u32 cr0; 29 29 u32 rx_sample_dly; /* RX sample delay */ 30 30 }; ··· 109 109 EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, SPI_DW_CORE); 110 110 111 111 /* Return the max entries we can fill into tx fifo */ 112 - static inline u32 tx_max(struct dw_spi *dws) 112 + static inline u32 dw_spi_tx_max(struct dw_spi *dws) 113 113 { 114 114 u32 tx_room, rxtx_gap; 115 115 ··· 129 129 } 130 130 131 131 /* Return the max entries we should read out of rx fifo */ 132 - static inline u32 rx_max(struct dw_spi *dws) 132 + static inline u32 dw_spi_rx_max(struct dw_spi *dws) 133 133 { 134 134 return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR)); 135 135 } 136 136 137 137 static void dw_writer(struct dw_spi *dws) 138 138 { 139 - u32 max = tx_max(dws); 139 + u32 max = dw_spi_tx_max(dws); 140 140 u32 txw = 0; 141 141 142 142 while (max--) { ··· 157 157 158 158 static void dw_reader(struct dw_spi *dws) 159 159 { 160 - u32 max = rx_max(dws); 160 + u32 max = dw_spi_rx_max(dws); 161 161 u32 rxw; 162 162 163 163 while (max--) { ··· 186 186 else 187 187 irq_status = dw_readl(dws, DW_SPI_ISR); 188 188 189 - if (irq_status & SPI_INT_RXOI) { 189 + if (irq_status & DW_SPI_INT_RXOI) { 190 190 dev_err(&dws->master->dev, "RX FIFO overflow detected\n"); 191 191 ret = -EIO; 192 192 } 193 193 194 - if (irq_status & SPI_INT_RXUI) { 194 + if (irq_status & DW_SPI_INT_RXUI) { 195 195 dev_err(&dws->master->dev, "RX FIFO underflow detected\n"); 196 196 ret = -EIO; 197 197 } 198 198 199 - if (irq_status & SPI_INT_TXOI) { 199 + if (irq_status & DW_SPI_INT_TXOI) { 200 200 dev_err(&dws->master->dev, "TX FIFO overflow detected\n"); 201 201 ret = -EIO; 202 202 } 203 203 204 204 /* Generically handle the erroneous situation */ 205 205 if (ret) { 206 - spi_reset_chip(dws); 206 + dw_spi_reset_chip(dws); 207 207 if (dws->master->cur_msg) 208 208 dws->master->cur_msg->status = ret; 209 209 } ··· 230 230 */ 231 231 dw_reader(dws); 232 232 if (!dws->rx_len) { 233 - spi_mask_intr(dws, 0xff); 233 + dw_spi_mask_intr(dws, 0xff); 234 234 spi_finalize_current_transfer(dws->master); 235 235 } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) { 236 236 dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1); ··· 241 241 * disabled after the data transmission is finished so not to 242 242 * have the TXE IRQ flood at the final stage of the transfer. 243 243 */ 244 - if (irq_status & SPI_INT_TXEI) { 244 + if (irq_status & DW_SPI_INT_TXEI) { 245 245 dw_writer(dws); 246 246 if (!dws->tx_len) 247 - spi_mask_intr(dws, SPI_INT_TXEI); 247 + dw_spi_mask_intr(dws, DW_SPI_INT_TXEI); 248 248 } 249 249 250 250 return IRQ_HANDLED; ··· 260 260 return IRQ_NONE; 261 261 262 262 if (!master->cur_msg) { 263 - spi_mask_intr(dws, 0xff); 263 + dw_spi_mask_intr(dws, 0xff); 264 264 return IRQ_HANDLED; 265 265 } 266 266 ··· 271 271 { 272 272 u32 cr0 = 0; 273 273 274 - if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) { 274 + if (!(dws->caps & DW_SPI_CAP_DWC_HSSI)) { 275 275 /* CTRLR0[ 5: 4] Frame Format */ 276 - cr0 |= SPI_FRF_MOTO_SPI << SPI_FRF_OFFSET; 276 + cr0 |= DW_SPI_CTRLR0_FRF_MOTO_SPI << DW_PSSI_CTRLR0_FRF_OFFSET; 277 277 278 278 /* 279 279 * SPI mode (SCPOL|SCPH) 280 280 * CTRLR0[ 6] Serial Clock Phase 281 281 * CTRLR0[ 7] Serial Clock Polarity 282 282 */ 283 - cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET; 284 - cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET; 283 + cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DW_PSSI_CTRLR0_SCOL_OFFSET; 284 + cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DW_PSSI_CTRLR0_SCPH_OFFSET; 285 285 286 286 /* CTRLR0[11] Shift Register Loop */ 287 - cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET; 287 + cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DW_PSSI_CTRLR0_SRL_OFFSET; 288 288 } else { 289 289 /* CTRLR0[ 7: 6] Frame Format */ 290 - cr0 |= SPI_FRF_MOTO_SPI << DWC_SSI_CTRLR0_FRF_OFFSET; 290 + cr0 |= DW_SPI_CTRLR0_FRF_MOTO_SPI << DW_HSSI_CTRLR0_FRF_OFFSET; 291 291 292 292 /* 293 293 * SPI mode (SCPOL|SCPH) 294 294 * CTRLR0[ 8] Serial Clock Phase 295 295 * CTRLR0[ 9] Serial Clock Polarity 296 296 */ 297 - cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET; 298 - cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET; 297 + cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DW_HSSI_CTRLR0_SCPOL_OFFSET; 298 + cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DW_HSSI_CTRLR0_SCPH_OFFSET; 299 299 300 300 /* CTRLR0[13] Shift Register Loop */ 301 - cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET; 301 + cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DW_HSSI_CTRLR0_SRL_OFFSET; 302 302 303 303 if (dws->caps & DW_SPI_CAP_KEEMBAY_MST) 304 - cr0 |= DWC_SSI_CTRLR0_KEEMBAY_MST; 304 + cr0 |= DW_HSSI_CTRLR0_KEEMBAY_MST; 305 305 } 306 306 307 307 return cr0; ··· 310 310 void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi, 311 311 struct dw_spi_cfg *cfg) 312 312 { 313 - struct chip_data *chip = spi_get_ctldata(spi); 313 + struct dw_spi_chip_data *chip = spi_get_ctldata(spi); 314 314 u32 cr0 = chip->cr0; 315 315 u32 speed_hz; 316 316 u16 clk_div; ··· 318 318 /* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */ 319 319 cr0 |= (cfg->dfs - 1) << dws->dfs_offset; 320 320 321 - if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) 321 + if (!(dws->caps & DW_SPI_CAP_DWC_HSSI)) 322 322 /* CTRLR0[ 9:8] Transfer Mode */ 323 - cr0 |= cfg->tmode << SPI_TMOD_OFFSET; 323 + cr0 |= cfg->tmode << DW_PSSI_CTRLR0_TMOD_OFFSET; 324 324 else 325 325 /* CTRLR0[11:10] Transfer Mode */ 326 - cr0 |= cfg->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET; 326 + cr0 |= cfg->tmode << DW_HSSI_CTRLR0_TMOD_OFFSET; 327 327 328 328 dw_writel(dws, DW_SPI_CTRLR0, cr0); 329 329 330 - if (cfg->tmode == SPI_TMOD_EPROMREAD || cfg->tmode == SPI_TMOD_RO) 330 + if (cfg->tmode == DW_SPI_CTRLR0_TMOD_EPROMREAD || 331 + cfg->tmode == DW_SPI_CTRLR0_TMOD_RO) 331 332 dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0); 332 333 333 334 /* Note DW APB SSI clock divider doesn't support odd numbers */ ··· 336 335 speed_hz = dws->max_freq / clk_div; 337 336 338 337 if (dws->current_freq != speed_hz) { 339 - spi_set_clk(dws, clk_div); 338 + dw_spi_set_clk(dws, clk_div); 340 339 dws->current_freq = speed_hz; 341 340 } 342 341 ··· 364 363 365 364 dws->transfer_handler = dw_spi_transfer_handler; 366 365 367 - imask = SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI | 368 - SPI_INT_RXFI; 369 - spi_umask_intr(dws, imask); 366 + imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI | 367 + DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI; 368 + dw_spi_umask_intr(dws, imask); 370 369 } 371 370 372 371 /* ··· 406 405 } 407 406 408 407 static int dw_spi_transfer_one(struct spi_controller *master, 409 - struct spi_device *spi, struct spi_transfer *transfer) 408 + struct spi_device *spi, 409 + struct spi_transfer *transfer) 410 410 { 411 411 struct dw_spi *dws = spi_controller_get_devdata(master); 412 412 struct dw_spi_cfg cfg = { 413 - .tmode = SPI_TMOD_TR, 413 + .tmode = DW_SPI_CTRLR0_TMOD_TR, 414 414 .dfs = transfer->bits_per_word, 415 415 .freq = transfer->speed_hz, 416 416 }; ··· 427 425 /* Ensure the data above is visible for all CPUs */ 428 426 smp_mb(); 429 427 430 - spi_enable_chip(dws, 0); 428 + dw_spi_enable_chip(dws, 0); 431 429 432 430 dw_spi_update_config(dws, spi, &cfg); 433 431 ··· 438 436 dws->dma_mapped = master->cur_msg_mapped; 439 437 440 438 /* For poll mode just disable all interrupts */ 441 - spi_mask_intr(dws, 0xff); 439 + dw_spi_mask_intr(dws, 0xff); 442 440 443 441 if (dws->dma_mapped) { 444 442 ret = dws->dma_ops->dma_setup(dws, transfer); ··· 446 444 return ret; 447 445 } 448 446 449 - spi_enable_chip(dws, 1); 447 + dw_spi_enable_chip(dws, 1); 450 448 451 449 if (dws->dma_mapped) 452 450 return dws->dma_ops->dma_transfer(dws, transfer); ··· 459 457 } 460 458 461 459 static void dw_spi_handle_err(struct spi_controller *master, 462 - struct spi_message *msg) 460 + struct spi_message *msg) 463 461 { 464 462 struct dw_spi *dws = spi_controller_get_devdata(master); 465 463 466 464 if (dws->dma_mapped) 467 465 dws->dma_ops->dma_stop(dws); 468 466 469 - spi_reset_chip(dws); 467 + dw_spi_reset_chip(dws); 470 468 } 471 469 472 470 static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op) 473 471 { 474 472 if (op->data.dir == SPI_MEM_DATA_IN) 475 - op->data.nbytes = clamp_val(op->data.nbytes, 0, SPI_NDF_MASK + 1); 473 + op->data.nbytes = clamp_val(op->data.nbytes, 0, DW_SPI_NDF_MASK + 1); 476 474 477 475 return 0; 478 476 } ··· 500 498 if (op->data.dir == SPI_MEM_DATA_OUT) 501 499 len += op->data.nbytes; 502 500 503 - if (len <= SPI_BUF_SIZE) { 501 + if (len <= DW_SPI_BUF_SIZE) { 504 502 out = dws->buf; 505 503 } else { 506 504 out = kzalloc(len, GFP_KERNEL); ··· 514 512 * single buffer in order to speed the data transmission up. 515 513 */ 516 514 for (i = 0; i < op->cmd.nbytes; ++i) 517 - out[i] = SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1); 515 + out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1); 518 516 for (j = 0; j < op->addr.nbytes; ++i, ++j) 519 - out[i] = SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1); 517 + out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1); 520 518 for (j = 0; j < op->dummy.nbytes; ++i, ++j) 521 519 out[i] = 0x0; 522 520 ··· 589 587 entries = readl_relaxed(dws->regs + DW_SPI_RXFLR); 590 588 if (!entries) { 591 589 sts = readl_relaxed(dws->regs + DW_SPI_RISR); 592 - if (sts & SPI_INT_RXOI) { 590 + if (sts & DW_SPI_INT_RXOI) { 593 591 dev_err(&dws->master->dev, "FIFO overflow on Rx\n"); 594 592 return -EIO; 595 593 } ··· 605 603 606 604 static inline bool dw_spi_ctlr_busy(struct dw_spi *dws) 607 605 { 608 - return dw_readl(dws, DW_SPI_SR) & SR_BUSY; 606 + return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY; 609 607 } 610 608 611 609 static int dw_spi_wait_mem_op_done(struct dw_spi *dws) 612 610 { 613 - int retry = SPI_WAIT_RETRIES; 611 + int retry = DW_SPI_WAIT_RETRIES; 614 612 struct spi_delay delay; 615 613 unsigned long ns, us; 616 614 u32 nents; ··· 640 638 641 639 static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi) 642 640 { 643 - spi_enable_chip(dws, 0); 641 + dw_spi_enable_chip(dws, 0); 644 642 dw_spi_set_cs(spi, true); 645 - spi_enable_chip(dws, 1); 643 + dw_spi_enable_chip(dws, 1); 646 644 } 647 645 648 646 /* ··· 675 673 cfg.dfs = 8; 676 674 cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq); 677 675 if (op->data.dir == SPI_MEM_DATA_IN) { 678 - cfg.tmode = SPI_TMOD_EPROMREAD; 676 + cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD; 679 677 cfg.ndf = op->data.nbytes; 680 678 } else { 681 - cfg.tmode = SPI_TMOD_TO; 679 + cfg.tmode = DW_SPI_CTRLR0_TMOD_TO; 682 680 } 683 681 684 - spi_enable_chip(dws, 0); 682 + dw_spi_enable_chip(dws, 0); 685 683 686 684 dw_spi_update_config(dws, mem->spi, &cfg); 687 685 688 - spi_mask_intr(dws, 0xff); 686 + dw_spi_mask_intr(dws, 0xff); 689 687 690 - spi_enable_chip(dws, 1); 688 + dw_spi_enable_chip(dws, 1); 691 689 692 690 /* 693 691 * DW APB SSI controller has very nasty peculiarities. First originally ··· 770 768 static int dw_spi_setup(struct spi_device *spi) 771 769 { 772 770 struct dw_spi *dws = spi_controller_get_devdata(spi->controller); 773 - struct chip_data *chip; 771 + struct dw_spi_chip_data *chip; 774 772 775 773 /* Only alloc on first setup */ 776 774 chip = spi_get_ctldata(spi); ··· 778 776 struct dw_spi *dws = spi_controller_get_devdata(spi->controller); 779 777 u32 rx_sample_dly_ns; 780 778 781 - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 779 + chip = kzalloc(sizeof(*chip), GFP_KERNEL); 782 780 if (!chip) 783 781 return -ENOMEM; 784 782 spi_set_ctldata(spi, chip); ··· 805 803 806 804 static void dw_spi_cleanup(struct spi_device *spi) 807 805 { 808 - struct chip_data *chip = spi_get_ctldata(spi); 806 + struct dw_spi_chip_data *chip = spi_get_ctldata(spi); 809 807 810 808 kfree(chip); 811 809 spi_set_ctldata(spi, NULL); 812 810 } 813 811 814 812 /* Restart the controller, disable all interrupts, clean rx fifo */ 815 - static void spi_hw_init(struct device *dev, struct dw_spi *dws) 813 + static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws) 816 814 { 817 - spi_reset_chip(dws); 815 + dw_spi_reset_chip(dws); 818 816 819 817 /* 820 818 * Try to detect the FIFO depth if not set by interface driver, ··· 839 837 * writability. Note DWC SSI controller also has the extended DFS, but 840 838 * with zero offset. 841 839 */ 842 - if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) { 840 + if (!(dws->caps & DW_SPI_CAP_DWC_HSSI)) { 843 841 u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0); 844 842 845 - spi_enable_chip(dws, 0); 843 + dw_spi_enable_chip(dws, 0); 846 844 dw_writel(dws, DW_SPI_CTRLR0, 0xffffffff); 847 845 cr0 = dw_readl(dws, DW_SPI_CTRLR0); 848 846 dw_writel(dws, DW_SPI_CTRLR0, tmp); 849 - spi_enable_chip(dws, 1); 847 + dw_spi_enable_chip(dws, 1); 850 848 851 - if (!(cr0 & SPI_DFS_MASK)) { 849 + if (!(cr0 & DW_PSSI_CTRLR0_DFS_MASK)) { 852 850 dws->caps |= DW_SPI_CAP_DFS32; 853 - dws->dfs_offset = SPI_DFS32_OFFSET; 851 + dws->dfs_offset = DW_PSSI_CTRLR0_DFS32_OFFSET; 854 852 dev_dbg(dev, "Detected 32-bits max data frame size\n"); 855 853 } 856 854 } else { ··· 880 878 spi_controller_set_devdata(master, dws); 881 879 882 880 /* Basic HW init */ 883 - spi_hw_init(dev, dws); 881 + dw_spi_hw_init(dev, dws); 884 882 885 883 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev), 886 884 master); ··· 941 939 err_dma_exit: 942 940 if (dws->dma_ops && dws->dma_ops->dma_exit) 943 941 dws->dma_ops->dma_exit(dws); 944 - spi_enable_chip(dws, 0); 942 + dw_spi_enable_chip(dws, 0); 945 943 free_irq(dws->irq, master); 946 944 err_free_master: 947 945 spi_controller_put(master); ··· 958 956 if (dws->dma_ops && dws->dma_ops->dma_exit) 959 957 dws->dma_ops->dma_exit(dws); 960 958 961 - spi_shutdown_chip(dws); 959 + dw_spi_shutdown_chip(dws); 962 960 963 961 free_irq(dws->irq, dws->master); 964 962 } ··· 972 970 if (ret) 973 971 return ret; 974 972 975 - spi_shutdown_chip(dws); 973 + dw_spi_shutdown_chip(dws); 976 974 return 0; 977 975 } 978 976 EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_host, SPI_DW_CORE); 979 977 980 978 int dw_spi_resume_host(struct dw_spi *dws) 981 979 { 982 - spi_hw_init(&dws->master->dev, dws); 980 + dw_spi_hw_init(&dws->master->dev, dws); 983 981 return spi_controller_resume(dws->master); 984 982 } 985 983 EXPORT_SYMBOL_NS_GPL(dw_spi_resume_host, SPI_DW_CORE);
+25 -25
drivers/spi/spi-dw-dma.c
··· 18 18 19 19 #include "spi-dw.h" 20 20 21 - #define RX_BUSY 0 22 - #define RX_BURST_LEVEL 16 23 - #define TX_BUSY 1 24 - #define TX_BURST_LEVEL 16 21 + #define DW_SPI_RX_BUSY 0 22 + #define DW_SPI_RX_BURST_LEVEL 16 23 + #define DW_SPI_TX_BUSY 1 24 + #define DW_SPI_TX_BURST_LEVEL 16 25 25 26 26 static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) 27 27 { ··· 46 46 if (!ret && caps.max_burst) 47 47 max_burst = caps.max_burst; 48 48 else 49 - max_burst = RX_BURST_LEVEL; 49 + max_burst = DW_SPI_RX_BURST_LEVEL; 50 50 51 51 dws->rxburst = min(max_burst, def_burst); 52 52 dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); ··· 55 55 if (!ret && caps.max_burst) 56 56 max_burst = caps.max_burst; 57 57 else 58 - max_burst = TX_BURST_LEVEL; 58 + max_burst = DW_SPI_TX_BURST_LEVEL; 59 59 60 60 /* 61 61 * Having a Rx DMA channel serviced with higher priority than a Tx DMA ··· 227 227 228 228 static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) 229 229 { 230 - return !(dw_readl(dws, DW_SPI_SR) & SR_TF_EMPT); 230 + return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT); 231 231 } 232 232 233 233 static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, 234 234 struct spi_transfer *xfer) 235 235 { 236 - int retry = SPI_WAIT_RETRIES; 236 + int retry = DW_SPI_WAIT_RETRIES; 237 237 struct spi_delay delay; 238 238 u32 nents; 239 239 ··· 260 260 { 261 261 struct dw_spi *dws = arg; 262 262 263 - clear_bit(TX_BUSY, &dws->dma_chan_busy); 264 - if (test_bit(RX_BUSY, &dws->dma_chan_busy)) 263 + clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 264 + if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) 265 265 return; 266 266 267 267 complete(&dws->dma_completion); ··· 305 305 return ret; 306 306 } 307 307 308 - set_bit(TX_BUSY, &dws->dma_chan_busy); 308 + set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 309 309 310 310 return 0; 311 311 } 312 312 313 313 static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) 314 314 { 315 - return !!(dw_readl(dws, DW_SPI_SR) & SR_RF_NOT_EMPT); 315 + return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT); 316 316 } 317 317 318 318 static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) 319 319 { 320 - int retry = SPI_WAIT_RETRIES; 320 + int retry = DW_SPI_WAIT_RETRIES; 321 321 struct spi_delay delay; 322 322 unsigned long ns, us; 323 323 u32 nents; ··· 361 361 { 362 362 struct dw_spi *dws = arg; 363 363 364 - clear_bit(RX_BUSY, &dws->dma_chan_busy); 365 - if (test_bit(TX_BUSY, &dws->dma_chan_busy)) 364 + clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 365 + if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) 366 366 return; 367 367 368 368 complete(&dws->dma_completion); ··· 406 406 return ret; 407 407 } 408 408 409 - set_bit(RX_BUSY, &dws->dma_chan_busy); 409 + set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 410 410 411 411 return 0; 412 412 } ··· 431 431 } 432 432 433 433 /* Set the DMA handshaking interface */ 434 - dma_ctrl = SPI_DMA_TDMAE; 434 + dma_ctrl = DW_SPI_DMACR_TDMAE; 435 435 if (xfer->rx_buf) 436 - dma_ctrl |= SPI_DMA_RDMAE; 436 + dma_ctrl |= DW_SPI_DMACR_RDMAE; 437 437 dw_writel(dws, DW_SPI_DMACR, dma_ctrl); 438 438 439 439 /* Set the interrupt mask */ 440 - imr = SPI_INT_TXOI; 440 + imr = DW_SPI_INT_TXOI; 441 441 if (xfer->rx_buf) 442 - imr |= SPI_INT_RXUI | SPI_INT_RXOI; 443 - spi_umask_intr(dws, imr); 442 + imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI; 443 + dw_spi_umask_intr(dws, imr); 444 444 445 445 reinit_completion(&dws->dma_completion); 446 446 ··· 616 616 617 617 static void dw_spi_dma_stop(struct dw_spi *dws) 618 618 { 619 - if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { 619 + if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) { 620 620 dmaengine_terminate_sync(dws->txchan); 621 - clear_bit(TX_BUSY, &dws->dma_chan_busy); 621 + clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 622 622 } 623 - if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { 623 + if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) { 624 624 dmaengine_terminate_sync(dws->rxchan); 625 - clear_bit(RX_BUSY, &dws->dma_chan_busy); 625 + clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 626 626 } 627 627 } 628 628
+10 -10
drivers/spi/spi-dw-mmio.c
··· 196 196 return 0; 197 197 } 198 198 199 - static int dw_spi_dw_apb_init(struct platform_device *pdev, 200 - struct dw_spi_mmio *dwsmmio) 199 + static int dw_spi_pssi_init(struct platform_device *pdev, 200 + struct dw_spi_mmio *dwsmmio) 201 201 { 202 202 dw_spi_dma_setup_generic(&dwsmmio->dws); 203 203 204 204 return 0; 205 205 } 206 206 207 - static int dw_spi_dwc_ssi_init(struct platform_device *pdev, 208 - struct dw_spi_mmio *dwsmmio) 207 + static int dw_spi_hssi_init(struct platform_device *pdev, 208 + struct dw_spi_mmio *dwsmmio) 209 209 { 210 - dwsmmio->dws.caps = DW_SPI_CAP_DWC_SSI; 210 + dwsmmio->dws.caps = DW_SPI_CAP_DWC_HSSI; 211 211 212 212 dw_spi_dma_setup_generic(&dwsmmio->dws); 213 213 ··· 217 217 static int dw_spi_keembay_init(struct platform_device *pdev, 218 218 struct dw_spi_mmio *dwsmmio) 219 219 { 220 - dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST | DW_SPI_CAP_DWC_SSI; 220 + dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST | DW_SPI_CAP_DWC_HSSI; 221 221 222 222 return 0; 223 223 } ··· 342 342 } 343 343 344 344 static const struct of_device_id dw_spi_mmio_of_match[] = { 345 - { .compatible = "snps,dw-apb-ssi", .data = dw_spi_dw_apb_init}, 345 + { .compatible = "snps,dw-apb-ssi", .data = dw_spi_pssi_init}, 346 346 { .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init}, 347 347 { .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init}, 348 348 { .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init}, 349 - { .compatible = "renesas,rzn1-spi", .data = dw_spi_dw_apb_init}, 350 - { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_dwc_ssi_init}, 349 + { .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init}, 350 + { .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init}, 351 351 { .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init}, 352 352 { .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init}, 353 353 { .compatible = "canaan,k210-spi", dw_spi_canaan_k210_init}, ··· 357 357 358 358 #ifdef CONFIG_ACPI 359 359 static const struct acpi_device_id dw_spi_mmio_acpi_match[] = { 360 - {"HISI0173", (kernel_ulong_t)dw_spi_dw_apb_init}, 360 + {"HISI0173", (kernel_ulong_t)dw_spi_pssi_init}, 361 361 {}, 362 362 }; 363 363 MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match);
+29 -30
drivers/spi/spi-dw-pci.c
··· 24 24 #define CLK_SPI_CDIV_MASK 0x00000e00 25 25 #define CLK_SPI_DISABLE_OFFSET 8 26 26 27 - struct spi_pci_desc { 27 + struct dw_spi_pci_desc { 28 28 int (*setup)(struct dw_spi *); 29 29 u16 num_cs; 30 30 u16 bus_num; 31 31 u32 max_freq; 32 32 }; 33 33 34 - static int spi_mid_init(struct dw_spi *dws) 34 + static int dw_spi_pci_mid_init(struct dw_spi *dws) 35 35 { 36 36 void __iomem *clk_reg; 37 37 u32 clk_cdiv; ··· 53 53 return 0; 54 54 } 55 55 56 - static int spi_generic_init(struct dw_spi *dws) 56 + static int dw_spi_pci_generic_init(struct dw_spi *dws) 57 57 { 58 58 dw_spi_dma_setup_generic(dws); 59 59 60 60 return 0; 61 61 } 62 62 63 - static struct spi_pci_desc spi_pci_mid_desc_1 = { 64 - .setup = spi_mid_init, 63 + static struct dw_spi_pci_desc dw_spi_pci_mid_desc_1 = { 64 + .setup = dw_spi_pci_mid_init, 65 65 .num_cs = 5, 66 66 .bus_num = 0, 67 67 }; 68 68 69 - static struct spi_pci_desc spi_pci_mid_desc_2 = { 70 - .setup = spi_mid_init, 69 + static struct dw_spi_pci_desc dw_spi_pci_mid_desc_2 = { 70 + .setup = dw_spi_pci_mid_init, 71 71 .num_cs = 2, 72 72 .bus_num = 1, 73 73 }; 74 74 75 - static struct spi_pci_desc spi_pci_ehl_desc = { 76 - .setup = spi_generic_init, 75 + static struct dw_spi_pci_desc dw_spi_pci_ehl_desc = { 76 + .setup = dw_spi_pci_generic_init, 77 77 .num_cs = 2, 78 78 .bus_num = -1, 79 79 .max_freq = 100000000, 80 80 }; 81 81 82 - static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 82 + static int dw_spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 83 83 { 84 + struct dw_spi_pci_desc *desc = (struct dw_spi_pci_desc *)ent->driver_data; 84 85 struct dw_spi *dws; 85 - struct spi_pci_desc *desc = (struct spi_pci_desc *)ent->driver_data; 86 86 int pci_bar = 0; 87 87 int ret; 88 88 ··· 150 150 return ret; 151 151 } 152 152 153 - static void spi_pci_remove(struct pci_dev *pdev) 153 + static void dw_spi_pci_remove(struct pci_dev *pdev) 154 154 { 155 155 struct dw_spi *dws = pci_get_drvdata(pdev); 156 156 ··· 162 162 } 163 163 164 164 #ifdef CONFIG_PM_SLEEP 165 - static int spi_suspend(struct device *dev) 165 + static int dw_spi_pci_suspend(struct device *dev) 166 166 { 167 167 struct dw_spi *dws = dev_get_drvdata(dev); 168 168 169 169 return dw_spi_suspend_host(dws); 170 170 } 171 171 172 - static int spi_resume(struct device *dev) 172 + static int dw_spi_pci_resume(struct device *dev) 173 173 { 174 174 struct dw_spi *dws = dev_get_drvdata(dev); 175 175 ··· 177 177 } 178 178 #endif 179 179 180 - static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume); 180 + static SIMPLE_DEV_PM_OPS(dw_spi_pci_pm_ops, dw_spi_pci_suspend, dw_spi_pci_resume); 181 181 182 - static const struct pci_device_id pci_ids[] = { 182 + static const struct pci_device_id dw_spi_pci_ids[] = { 183 183 /* Intel MID platform SPI controller 0 */ 184 184 /* 185 185 * The access to the device 8086:0801 is disabled by HW, since it's 186 186 * exclusively used by SCU to communicate with MSIC. 187 187 */ 188 188 /* Intel MID platform SPI controller 1 */ 189 - { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1}, 189 + { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&dw_spi_pci_mid_desc_1}, 190 190 /* Intel MID platform SPI controller 2 */ 191 - { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2}, 191 + { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&dw_spi_pci_mid_desc_2}, 192 192 /* Intel Elkhart Lake PSE SPI controllers */ 193 - { PCI_VDEVICE(INTEL, 0x4b84), (kernel_ulong_t)&spi_pci_ehl_desc}, 194 - { PCI_VDEVICE(INTEL, 0x4b85), (kernel_ulong_t)&spi_pci_ehl_desc}, 195 - { PCI_VDEVICE(INTEL, 0x4b86), (kernel_ulong_t)&spi_pci_ehl_desc}, 196 - { PCI_VDEVICE(INTEL, 0x4b87), (kernel_ulong_t)&spi_pci_ehl_desc}, 193 + { PCI_VDEVICE(INTEL, 0x4b84), (kernel_ulong_t)&dw_spi_pci_ehl_desc}, 194 + { PCI_VDEVICE(INTEL, 0x4b85), (kernel_ulong_t)&dw_spi_pci_ehl_desc}, 195 + { PCI_VDEVICE(INTEL, 0x4b86), (kernel_ulong_t)&dw_spi_pci_ehl_desc}, 196 + { PCI_VDEVICE(INTEL, 0x4b87), (kernel_ulong_t)&dw_spi_pci_ehl_desc}, 197 197 {}, 198 198 }; 199 - MODULE_DEVICE_TABLE(pci, pci_ids); 199 + MODULE_DEVICE_TABLE(pci, dw_spi_pci_ids); 200 200 201 - static struct pci_driver dw_spi_driver = { 201 + static struct pci_driver dw_spi_pci_driver = { 202 202 .name = DRIVER_NAME, 203 - .id_table = pci_ids, 204 - .probe = spi_pci_probe, 205 - .remove = spi_pci_remove, 203 + .id_table = dw_spi_pci_ids, 204 + .probe = dw_spi_pci_probe, 205 + .remove = dw_spi_pci_remove, 206 206 .driver = { 207 - .pm = &dw_spi_pm_ops, 207 + .pm = &dw_spi_pci_pm_ops, 208 208 }, 209 209 }; 210 - 211 - module_pci_driver(dw_spi_driver); 210 + module_pci_driver(dw_spi_pci_driver); 212 211 213 212 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); 214 213 MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
+68 -67
drivers/spi/spi-dw.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef DW_SPI_HEADER_H 3 - #define DW_SPI_HEADER_H 2 + #ifndef __SPI_DW_H__ 3 + #define __SPI_DW_H__ 4 4 5 5 #include <linux/bits.h> 6 6 #include <linux/completion.h> ··· 11 11 #include <linux/spi/spi-mem.h> 12 12 #include <linux/bitfield.h> 13 13 14 - /* Register offsets */ 14 + /* Register offsets (Generic for both DWC APB SSI and DWC SSI IP-cores) */ 15 15 #define DW_SPI_CTRLR0 0x00 16 16 #define DW_SPI_CTRLR1 0x04 17 17 #define DW_SPI_SSIENR 0x08 ··· 40 40 #define DW_SPI_RX_SAMPLE_DLY 0xf0 41 41 #define DW_SPI_CS_OVERRIDE 0xf4 42 42 43 - /* Bit fields in CTRLR0 */ 44 - #define SPI_DFS_OFFSET 0 45 - #define SPI_DFS_MASK GENMASK(3, 0) 46 - #define SPI_DFS32_OFFSET 16 43 + /* Bit fields in CTRLR0 (DWC APB SSI) */ 44 + #define DW_PSSI_CTRLR0_DFS_OFFSET 0 45 + #define DW_PSSI_CTRLR0_DFS_MASK GENMASK(3, 0) 46 + #define DW_PSSI_CTRLR0_DFS32_OFFSET 16 47 47 48 - #define SPI_FRF_OFFSET 4 49 - #define SPI_FRF_MOTO_SPI 0x0 50 - #define SPI_FRF_TI_SSP 0x1 51 - #define SPI_FRF_NS_MICROWIRE 0x2 52 - #define SPI_FRF_RESV 0x3 48 + #define DW_PSSI_CTRLR0_FRF_OFFSET 4 49 + #define DW_SPI_CTRLR0_FRF_MOTO_SPI 0x0 50 + #define DW_SPI_CTRLR0_FRF_TI_SSP 0x1 51 + #define DW_SPI_CTRLR0_FRF_NS_MICROWIRE 0x2 52 + #define DW_SPI_CTRLR0_FRF_RESV 0x3 53 53 54 - #define SPI_MODE_OFFSET 6 55 - #define SPI_SCPH_OFFSET 6 56 - #define SPI_SCOL_OFFSET 7 54 + #define DW_PSSI_CTRLR0_MODE_OFFSET 6 55 + #define DW_PSSI_CTRLR0_SCPH_OFFSET 6 56 + #define DW_PSSI_CTRLR0_SCOL_OFFSET 7 57 57 58 - #define SPI_TMOD_OFFSET 8 59 - #define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) 60 - #define SPI_TMOD_TR 0x0 /* xmit & recv */ 61 - #define SPI_TMOD_TO 0x1 /* xmit only */ 62 - #define SPI_TMOD_RO 0x2 /* recv only */ 63 - #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ 58 + #define DW_PSSI_CTRLR0_TMOD_OFFSET 8 59 + #define DW_PSSI_CTRLR0_TMOD_MASK (0x3 << DW_PSSI_CTRLR0_TMOD_OFFSET) 60 + #define DW_SPI_CTRLR0_TMOD_TR 0x0 /* xmit & recv */ 61 + #define DW_SPI_CTRLR0_TMOD_TO 0x1 /* xmit only */ 62 + #define DW_SPI_CTRLR0_TMOD_RO 0x2 /* recv only */ 63 + #define DW_SPI_CTRLR0_TMOD_EPROMREAD 0x3 /* eeprom read mode */ 64 64 65 - #define SPI_SLVOE_OFFSET 10 66 - #define SPI_SRL_OFFSET 11 67 - #define SPI_CFS_OFFSET 12 65 + #define DW_PSSI_CTRLR0_SLVOE_OFFSET 10 66 + #define DW_PSSI_CTRLR0_SRL_OFFSET 11 67 + #define DW_PSSI_CTRLR0_CFS_OFFSET 12 68 68 69 - /* Bit fields in CTRLR0 based on DWC_ssi_databook.pdf v1.01a */ 70 - #define DWC_SSI_CTRLR0_SRL_OFFSET 13 71 - #define DWC_SSI_CTRLR0_TMOD_OFFSET 10 72 - #define DWC_SSI_CTRLR0_TMOD_MASK GENMASK(11, 10) 73 - #define DWC_SSI_CTRLR0_SCPOL_OFFSET 9 74 - #define DWC_SSI_CTRLR0_SCPH_OFFSET 8 75 - #define DWC_SSI_CTRLR0_FRF_OFFSET 6 76 - #define DWC_SSI_CTRLR0_DFS_OFFSET 0 69 + /* Bit fields in CTRLR0 (DWC SSI with AHB interface) */ 70 + #define DW_HSSI_CTRLR0_SRL_OFFSET 13 71 + #define DW_HSSI_CTRLR0_TMOD_OFFSET 10 72 + #define DW_HSSI_CTRLR0_TMOD_MASK GENMASK(11, 10) 73 + #define DW_HSSI_CTRLR0_SCPOL_OFFSET 9 74 + #define DW_HSSI_CTRLR0_SCPH_OFFSET 8 75 + #define DW_HSSI_CTRLR0_FRF_OFFSET 6 76 + #define DW_HSSI_CTRLR0_DFS_OFFSET 0 77 77 78 78 /* 79 79 * For Keem Bay, CTRLR0[31] is used to select controller mode. 80 80 * 0: SSI is slave 81 81 * 1: SSI is master 82 82 */ 83 - #define DWC_SSI_CTRLR0_KEEMBAY_MST BIT(31) 83 + #define DW_HSSI_CTRLR0_KEEMBAY_MST BIT(31) 84 84 85 85 /* Bit fields in CTRLR1 */ 86 - #define SPI_NDF_MASK GENMASK(15, 0) 86 + #define DW_SPI_NDF_MASK GENMASK(15, 0) 87 87 88 88 /* Bit fields in SR, 7 bits */ 89 - #define SR_MASK 0x7f /* cover 7 bits */ 90 - #define SR_BUSY (1 << 0) 91 - #define SR_TF_NOT_FULL (1 << 1) 92 - #define SR_TF_EMPT (1 << 2) 93 - #define SR_RF_NOT_EMPT (1 << 3) 94 - #define SR_RF_FULL (1 << 4) 95 - #define SR_TX_ERR (1 << 5) 96 - #define SR_DCOL (1 << 6) 89 + #define DW_SPI_SR_MASK 0x7f /* cover 7 bits */ 90 + #define DW_SPI_SR_BUSY (1 << 0) 91 + #define DW_SPI_SR_TF_NOT_FULL (1 << 1) 92 + #define DW_SPI_SR_TF_EMPT (1 << 2) 93 + #define DW_SPI_SR_RF_NOT_EMPT (1 << 3) 94 + #define DW_SPI_SR_RF_FULL (1 << 4) 95 + #define DW_SPI_SR_TX_ERR (1 << 5) 96 + #define DW_SPI_SR_DCOL (1 << 6) 97 97 98 98 /* Bit fields in ISR, IMR, RISR, 7 bits */ 99 - #define SPI_INT_TXEI (1 << 0) 100 - #define SPI_INT_TXOI (1 << 1) 101 - #define SPI_INT_RXUI (1 << 2) 102 - #define SPI_INT_RXOI (1 << 3) 103 - #define SPI_INT_RXFI (1 << 4) 104 - #define SPI_INT_MSTI (1 << 5) 99 + #define DW_SPI_INT_TXEI (1 << 0) 100 + #define DW_SPI_INT_TXOI (1 << 1) 101 + #define DW_SPI_INT_RXUI (1 << 2) 102 + #define DW_SPI_INT_RXOI (1 << 3) 103 + #define DW_SPI_INT_RXFI (1 << 4) 104 + #define DW_SPI_INT_MSTI (1 << 5) 105 105 106 106 /* Bit fields in DMACR */ 107 - #define SPI_DMA_RDMAE (1 << 0) 108 - #define SPI_DMA_TDMAE (1 << 1) 107 + #define DW_SPI_DMACR_RDMAE (1 << 0) 108 + #define DW_SPI_DMACR_TDMAE (1 << 1) 109 109 110 - #define SPI_WAIT_RETRIES 5 111 - #define SPI_BUF_SIZE \ 110 + /* Mem/DMA operations helpers */ 111 + #define DW_SPI_WAIT_RETRIES 5 112 + #define DW_SPI_BUF_SIZE \ 112 113 (sizeof_field(struct spi_mem_op, cmd.opcode) + \ 113 114 sizeof_field(struct spi_mem_op, addr.val) + 256) 114 - #define SPI_GET_BYTE(_val, _idx) \ 115 + #define DW_SPI_GET_BYTE(_val, _idx) \ 115 116 ((_val) >> (BITS_PER_BYTE * (_idx)) & 0xff) 116 117 117 118 /* DW SPI capabilities */ 118 119 #define DW_SPI_CAP_CS_OVERRIDE BIT(0) 119 120 #define DW_SPI_CAP_KEEMBAY_MST BIT(1) 120 - #define DW_SPI_CAP_DWC_SSI BIT(2) 121 + #define DW_SPI_CAP_DWC_HSSI BIT(2) 121 122 #define DW_SPI_CAP_DFS32 BIT(3) 122 123 123 124 /* Slave spi_transfer/spi_mem_op related */ ··· 163 162 unsigned int tx_len; 164 163 void *rx; 165 164 unsigned int rx_len; 166 - u8 buf[SPI_BUF_SIZE]; 165 + u8 buf[DW_SPI_BUF_SIZE]; 167 166 int dma_mapped; 168 167 u8 n_bytes; /* current is a 1/2 bytes op */ 169 168 irqreturn_t (*transfer_handler)(struct dw_spi *dws); ··· 225 224 } 226 225 } 227 226 228 - static inline void spi_enable_chip(struct dw_spi *dws, int enable) 227 + static inline void dw_spi_enable_chip(struct dw_spi *dws, int enable) 229 228 { 230 229 dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0)); 231 230 } 232 231 233 - static inline void spi_set_clk(struct dw_spi *dws, u16 div) 232 + static inline void dw_spi_set_clk(struct dw_spi *dws, u16 div) 234 233 { 235 234 dw_writel(dws, DW_SPI_BAUDR, div); 236 235 } 237 236 238 237 /* Disable IRQ bits */ 239 - static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) 238 + static inline void dw_spi_mask_intr(struct dw_spi *dws, u32 mask) 240 239 { 241 240 u32 new_mask; 242 241 ··· 245 244 } 246 245 247 246 /* Enable IRQ bits */ 248 - static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) 247 + static inline void dw_spi_umask_intr(struct dw_spi *dws, u32 mask) 249 248 { 250 249 u32 new_mask; 251 250 ··· 258 257 * and CS, then re-enables the controller back. Transmit and receive FIFO 259 258 * buffers are cleared when the device is disabled. 260 259 */ 261 - static inline void spi_reset_chip(struct dw_spi *dws) 260 + static inline void dw_spi_reset_chip(struct dw_spi *dws) 262 261 { 263 - spi_enable_chip(dws, 0); 264 - spi_mask_intr(dws, 0xff); 262 + dw_spi_enable_chip(dws, 0); 263 + dw_spi_mask_intr(dws, 0xff); 265 264 dw_readl(dws, DW_SPI_ICR); 266 265 dw_writel(dws, DW_SPI_SER, 0); 267 - spi_enable_chip(dws, 1); 266 + dw_spi_enable_chip(dws, 1); 268 267 } 269 268 270 - static inline void spi_shutdown_chip(struct dw_spi *dws) 269 + static inline void dw_spi_shutdown_chip(struct dw_spi *dws) 271 270 { 272 - spi_enable_chip(dws, 0); 273 - spi_set_clk(dws, 0); 271 + dw_spi_enable_chip(dws, 0); 272 + dw_spi_set_clk(dws, 0); 274 273 } 275 274 276 275 extern void dw_spi_set_cs(struct spi_device *spi, bool enable); ··· 294 293 295 294 #endif /* !CONFIG_SPI_DW_DMA */ 296 295 297 - #endif /* DW_SPI_HEADER_H */ 296 + #endif /* __SPI_DW_H__ */