Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi: sh: Switch to using core message queue

We deprecated open coding of the transfer queue back in 2017 so it's high
time we finished up converting drivers to use the standard message queue
code. The SH driver is fairly straightforward so convert to use
transfer_one_message(), it looks like the driver would be a good fit for
transfer_one() with a little bit of updating but this smaller change seems
safer.

I'm not actually clear how the driver worked robustly previously, it
clears SSA and CR1 when queueing a transfer which looks like it would
interfere with any running transfer. This clearing has been moved to the
start of the message transfer function.

I'm also unclear how exactly the chip select is managed with this driver.

Signed-off-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20220610154649.1707851-1-broonie@kernel.org
Signed-off-by: Mark Brown <broonie@kernel.org>

+26 -64
+26 -64
drivers/spi/spi-sh.c
··· 73 73 void __iomem *addr; 74 74 int irq; 75 75 struct spi_master *master; 76 - struct list_head queue; 77 - struct work_struct ws; 78 76 unsigned long cr1; 79 77 wait_queue_head_t wait; 80 - spinlock_t lock; 81 78 int width; 82 79 }; 83 80 ··· 268 271 return 0; 269 272 } 270 273 271 - static void spi_sh_work(struct work_struct *work) 274 + static int spi_sh_transfer_one_message(struct spi_controller *ctlr, 275 + struct spi_message *mesg) 272 276 { 273 - struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws); 274 - struct spi_message *mesg; 277 + struct spi_sh_data *ss = spi_controller_get_devdata(ctlr); 275 278 struct spi_transfer *t; 276 - unsigned long flags; 277 279 int ret; 278 280 279 281 pr_debug("%s: enter\n", __func__); 280 282 281 - spin_lock_irqsave(&ss->lock, flags); 282 - while (!list_empty(&ss->queue)) { 283 - mesg = list_entry(ss->queue.next, struct spi_message, queue); 284 - list_del_init(&mesg->queue); 283 + spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); 285 284 286 - spin_unlock_irqrestore(&ss->lock, flags); 287 - list_for_each_entry(t, &mesg->transfers, transfer_list) { 288 - pr_debug("tx_buf = %p, rx_buf = %p\n", 289 - t->tx_buf, t->rx_buf); 290 - pr_debug("len = %d, delay.value = %d\n", 291 - t->len, t->delay.value); 285 + list_for_each_entry(t, &mesg->transfers, transfer_list) { 286 + pr_debug("tx_buf = %p, rx_buf = %p\n", 287 + t->tx_buf, t->rx_buf); 288 + pr_debug("len = %d, delay.value = %d\n", 289 + t->len, t->delay.value); 292 290 293 - if (t->tx_buf) { 294 - ret = spi_sh_send(ss, mesg, t); 295 - if (ret < 0) 296 - goto error; 297 - } 298 - if (t->rx_buf) { 299 - ret = spi_sh_receive(ss, mesg, t); 300 - if (ret < 0) 301 - goto error; 302 - } 303 - mesg->actual_length += t->len; 291 + if (t->tx_buf) { 292 + ret = spi_sh_send(ss, mesg, t); 293 + if (ret < 0) 294 + goto error; 304 295 } 305 - spin_lock_irqsave(&ss->lock, flags); 306 - 307 - mesg->status = 0; 308 - if (mesg->complete) 309 - mesg->complete(mesg->context); 296 + if (t->rx_buf) { 297 + ret = spi_sh_receive(ss, mesg, t); 298 + if (ret < 0) 299 + goto error; 300 + } 301 + mesg->actual_length += t->len; 310 302 } 303 + 304 + mesg->status = 0; 305 + spi_finalize_current_message(ctlr); 311 306 312 307 clear_fifo(ss); 313 308 spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1); ··· 310 321 311 322 clear_fifo(ss); 312 323 313 - spin_unlock_irqrestore(&ss->lock, flags); 314 - 315 - return; 324 + return 0; 316 325 317 326 error: 318 327 mesg->status = ret; 328 + spi_finalize_current_message(ctlr); 319 329 if (mesg->complete) 320 330 mesg->complete(mesg->context); 321 331 ··· 322 334 SPI_SH_CR1); 323 335 clear_fifo(ss); 324 336 337 + return ret; 325 338 } 326 339 327 340 static int spi_sh_setup(struct spi_device *spi) ··· 340 351 /* 1/8 clock */ 341 352 spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2); 342 353 udelay(10); 343 - 344 - return 0; 345 - } 346 - 347 - static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg) 348 - { 349 - struct spi_sh_data *ss = spi_master_get_devdata(spi->master); 350 - unsigned long flags; 351 - 352 - pr_debug("%s: enter\n", __func__); 353 - pr_debug("\tmode = %02x\n", spi->mode); 354 - 355 - spin_lock_irqsave(&ss->lock, flags); 356 - 357 - mesg->actual_length = 0; 358 - mesg->status = -EINPROGRESS; 359 - 360 - spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); 361 - 362 - list_add_tail(&mesg->queue, &ss->queue); 363 - schedule_work(&ss->ws); 364 - 365 - spin_unlock_irqrestore(&ss->lock, flags); 366 354 367 355 return 0; 368 356 } ··· 382 416 struct spi_sh_data *ss = platform_get_drvdata(pdev); 383 417 384 418 spi_unregister_master(ss->master); 385 - flush_work(&ss->ws); 386 419 free_irq(ss->irq, ss); 387 420 388 421 return 0; ··· 432 467 dev_err(&pdev->dev, "ioremap error.\n"); 433 468 return -ENOMEM; 434 469 } 435 - INIT_LIST_HEAD(&ss->queue); 436 - spin_lock_init(&ss->lock); 437 - INIT_WORK(&ss->ws, spi_sh_work); 438 470 init_waitqueue_head(&ss->wait); 439 471 440 472 ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss); ··· 443 481 master->num_chipselect = 2; 444 482 master->bus_num = pdev->id; 445 483 master->setup = spi_sh_setup; 446 - master->transfer = spi_sh_transfer; 484 + master->transfer_one_message = spi_sh_transfer_one_message; 447 485 master->cleanup = spi_sh_cleanup; 448 486 449 487 ret = spi_register_master(master);