mei: vsc: Unregister interrupt handler for system suspend

Unregister the MEI VSC interrupt handler before system suspend and
re-register it at system resume time. This mirrors implementation of other
MEI devices.

This patch fixes the bug that causes continuous stream of MEI VSC errors
after system resume.

Fixes: 386a766c4169 ("mei: Add MEI hardware support for IVSC device")
Cc: stable@vger.kernel.org # for 6.8
Reported-by: Dominik Brodowski <linux@dominikbrodowski.net>
Signed-off-by: Wentong Wu <wentong.wu@intel.com>
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Acked-by: Tomas Winkler <tomas.winkler@intel.com>
Link: https://lore.kernel.org/r/20240403051341.3534650-2-wentong.wu@intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by Sakari Ailus and committed by Greg Kroah-Hartman f6085a96 e3dc66d9

Changed files
+78 -26
drivers
+16 -1
drivers/misc/mei/platform-vsc.c
··· 400 400 static int mei_vsc_suspend(struct device *dev) 401 401 { 402 402 struct mei_device *mei_dev = dev_get_drvdata(dev); 403 + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); 403 404 404 405 mei_stop(mei_dev); 406 + 407 + mei_disable_interrupts(mei_dev); 408 + 409 + vsc_tp_free_irq(hw->tp); 405 410 406 411 return 0; 407 412 } ··· 414 409 static int mei_vsc_resume(struct device *dev) 415 410 { 416 411 struct mei_device *mei_dev = dev_get_drvdata(dev); 412 + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); 417 413 int ret; 414 + 415 + ret = vsc_tp_request_irq(hw->tp); 416 + if (ret) 417 + return ret; 418 418 419 419 ret = mei_restart(mei_dev); 420 420 if (ret) 421 - return ret; 421 + goto err_free; 422 422 423 423 /* start timer if stopped in suspend */ 424 424 schedule_delayed_work(&mei_dev->timer_work, HZ); 425 425 426 426 return 0; 427 + 428 + err_free: 429 + vsc_tp_free_irq(hw->tp); 430 + 431 + return ret; 427 432 } 428 433 429 434 static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
+59 -25
drivers/misc/mei/vsc-tp.c
··· 94 94 {} 95 95 }; 96 96 97 + static irqreturn_t vsc_tp_isr(int irq, void *data) 98 + { 99 + struct vsc_tp *tp = data; 100 + 101 + atomic_inc(&tp->assert_cnt); 102 + 103 + wake_up(&tp->xfer_wait); 104 + 105 + return IRQ_WAKE_THREAD; 106 + } 107 + 108 + static irqreturn_t vsc_tp_thread_isr(int irq, void *data) 109 + { 110 + struct vsc_tp *tp = data; 111 + 112 + if (tp->event_notify) 113 + tp->event_notify(tp->event_notify_context); 114 + 115 + return IRQ_HANDLED; 116 + } 117 + 97 118 /* wakeup firmware and wait for response */ 98 119 static int vsc_tp_wakeup_request(struct vsc_tp *tp) 99 120 { ··· 405 384 EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP); 406 385 407 386 /** 387 + * vsc_tp_request_irq - request irq for vsc_tp device 388 + * @tp: vsc_tp device handle 389 + */ 390 + int vsc_tp_request_irq(struct vsc_tp *tp) 391 + { 392 + struct spi_device *spi = tp->spi; 393 + struct device *dev = &spi->dev; 394 + int ret; 395 + 396 + irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY); 397 + ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr, 398 + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 399 + dev_name(dev), tp); 400 + if (ret) 401 + return ret; 402 + 403 + return 0; 404 + } 405 + EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP); 406 + 407 + /** 408 + * vsc_tp_free_irq - free irq for vsc_tp device 409 + * @tp: vsc_tp device handle 410 + */ 411 + void vsc_tp_free_irq(struct vsc_tp *tp) 412 + { 413 + free_irq(tp->spi->irq, tp); 414 + } 415 + EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP); 416 + 417 + /** 408 418 * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt 409 419 * @tp: vsc_tp device handle 410 420 */ ··· 464 412 disable_irq(tp->spi->irq); 465 413 } 466 414 EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP); 467 - 468 - static irqreturn_t vsc_tp_isr(int irq, void *data) 469 - { 470 - struct vsc_tp *tp = data; 471 - 472 - atomic_inc(&tp->assert_cnt); 473 - 474 - wake_up(&tp->xfer_wait); 475 - 476 - return IRQ_WAKE_THREAD; 477 - } 478 - 479 - static irqreturn_t vsc_tp_thread_isr(int irq, void *data) 480 - { 481 - struct vsc_tp *tp = data; 482 - 483 - if (tp->event_notify) 484 - tp->event_notify(tp->event_notify_context); 485 - 486 - return IRQ_HANDLED; 487 - } 488 415 489 416 static int vsc_tp_match_any(struct acpi_device *adev, void *data) 490 417 { ··· 521 490 tp->spi = spi; 522 491 523 492 irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY); 524 - ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr, 525 - vsc_tp_thread_isr, 526 - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 527 - dev_name(dev), tp); 493 + ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr, 494 + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 495 + dev_name(dev), tp); 528 496 if (ret) 529 497 return ret; 530 498 ··· 552 522 err_destroy_lock: 553 523 mutex_destroy(&tp->mutex); 554 524 525 + free_irq(spi->irq, tp); 526 + 555 527 return ret; 556 528 } 557 529 ··· 564 532 platform_device_unregister(tp->pdev); 565 533 566 534 mutex_destroy(&tp->mutex); 535 + 536 + free_irq(spi->irq, tp); 567 537 } 568 538 569 539 static const struct acpi_device_id vsc_tp_acpi_ids[] = {
+3
drivers/misc/mei/vsc-tp.h
··· 37 37 int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb, 38 38 void *context); 39 39 40 + int vsc_tp_request_irq(struct vsc_tp *tp); 41 + void vsc_tp_free_irq(struct vsc_tp *tp); 42 + 40 43 void vsc_tp_intr_enable(struct vsc_tp *tp); 41 44 void vsc_tp_intr_disable(struct vsc_tp *tp); 42 45 void vsc_tp_intr_synchronize(struct vsc_tp *tp);