Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

accel/ivpu: Replace wake_thread with kfifo

Use kfifo to pass IRQ sources to IRQ thread so it will be possible to
use IRQ thread by multiple IRQ types.

Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: Wachowski, Karol <karol.wachowski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240515113006.457472-4-jacek.lawrynowicz@linux.intel.com

+46 -22
+17 -2
drivers/accel/ivpu/ivpu_drv.c
··· 320 320 321 321 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot); 322 322 while (1) { 323 - ivpu_ipc_irq_handler(vdev, NULL); 323 + ivpu_ipc_irq_handler(vdev); 324 324 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0); 325 325 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout)) 326 326 break; ··· 449 449 static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg) 450 450 { 451 451 struct ivpu_device *vdev = arg; 452 + u8 irq_src; 452 453 453 - return ivpu_ipc_irq_thread_handler(vdev); 454 + if (kfifo_is_empty(&vdev->hw->irq.fifo)) 455 + return IRQ_NONE; 456 + 457 + while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) { 458 + switch (irq_src) { 459 + case IVPU_HW_IRQ_SRC_IPC: 460 + ivpu_ipc_irq_thread_handler(vdev); 461 + break; 462 + default: 463 + ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src); 464 + break; 465 + } 466 + } 467 + 468 + return IRQ_HANDLED; 454 469 } 455 470 456 471 static int ivpu_irq_init(struct ivpu_device *vdev)
+6 -3
drivers/accel/ivpu/ivpu_hw.c
··· 263 263 264 264 void ivpu_irq_handlers_init(struct ivpu_device *vdev) 265 265 { 266 + INIT_KFIFO(vdev->hw->irq.fifo); 267 + 266 268 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 267 269 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx; 268 270 else ··· 278 276 279 277 void ivpu_hw_irq_enable(struct ivpu_device *vdev) 280 278 { 279 + kfifo_reset(&vdev->hw->irq.fifo); 281 280 ivpu_hw_ip_irq_enable(vdev); 282 281 ivpu_hw_btrs_irq_enable(vdev); 283 282 } ··· 291 288 292 289 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr) 293 290 { 294 - bool ip_handled, btrs_handled, wake_thread = false; 295 291 struct ivpu_device *vdev = ptr; 292 + bool ip_handled, btrs_handled; 296 293 297 294 ivpu_hw_btrs_global_int_disable(vdev); 298 295 299 296 btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq); 300 297 if (!ivpu_hw_is_idle((vdev)) || !btrs_handled) 301 - ip_handled = ivpu_hw_ip_irq_handler(vdev, irq, &wake_thread); 298 + ip_handled = ivpu_hw_ip_irq_handler(vdev, irq); 302 299 else 303 300 ip_handled = false; 304 301 305 302 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ 306 303 ivpu_hw_btrs_global_int_enable(vdev); 307 304 308 - if (wake_thread) 305 + if (!kfifo_is_empty(&vdev->hw->irq.fifo)) 309 306 return IRQ_WAKE_THREAD; 310 307 if (ip_handled || btrs_handled) 311 308 return IRQ_HANDLED;
+10 -3
drivers/accel/ivpu/ivpu_hw.h
··· 6 6 #ifndef __IVPU_HW_H__ 7 7 #define __IVPU_HW_H__ 8 8 9 + #include <linux/kfifo.h> 10 + 9 11 #include "ivpu_drv.h" 10 12 #include "ivpu_hw_btrs.h" 11 13 #include "ivpu_hw_ip.h" 14 + 15 + #define IVPU_HW_IRQ_FIFO_LENGTH 1024 16 + 17 + #define IVPU_HW_IRQ_SRC_IPC 1 12 18 13 19 struct ivpu_addr_range { 14 20 resource_size_t start; ··· 24 18 struct ivpu_hw_info { 25 19 struct { 26 20 bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq); 27 - bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq, bool *wake_thread); 21 + bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq); 22 + DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH); 28 23 } irq; 29 24 struct { 30 25 struct ivpu_addr_range global; ··· 68 61 return vdev->hw->irq.btrs_irq_handler(vdev, irq); 69 62 } 70 63 71 - static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq, bool *wake_thread) 64 + static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq) 72 65 { 73 - return vdev->hw->irq.ip_irq_handler(vdev, irq, wake_thread); 66 + return vdev->hw->irq.ip_irq_handler(vdev, irq); 74 67 } 75 68 76 69 static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
+4 -4
drivers/accel/ivpu/ivpu_hw_ip.c
··· 1066 1066 } 1067 1067 1068 1068 /* Handler for IRQs from NPU core */ 1069 - bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq, bool *wake_thread) 1069 + bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq) 1070 1070 { 1071 1071 u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX; 1072 1072 ··· 1079 1079 ivpu_mmu_irq_evtq_handler(vdev); 1080 1080 1081 1081 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 1082 - ivpu_ipc_irq_handler(vdev, wake_thread); 1082 + ivpu_ipc_irq_handler(vdev); 1083 1083 1084 1084 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 1085 1085 ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); ··· 1100 1100 } 1101 1101 1102 1102 /* Handler for IRQs from NPU core */ 1103 - bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq, bool *wake_thread) 1103 + bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq) 1104 1104 { 1105 1105 u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX; 1106 1106 ··· 1113 1113 ivpu_mmu_irq_evtq_handler(vdev); 1114 1114 1115 1115 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 1116 - ivpu_ipc_irq_handler(vdev, wake_thread); 1116 + ivpu_ipc_irq_handler(vdev); 1117 1117 1118 1118 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 1119 1119 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+2 -2
drivers/accel/ivpu/ivpu_hw_ip.h
··· 22 22 void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev); 23 23 u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev); 24 24 void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev); 25 - bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq, bool *wake_thread); 26 - bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq, bool *wake_thread); 25 + bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq); 26 + bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq); 27 27 void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id); 28 28 u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev); 29 29 void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr);
+5 -6
drivers/accel/ivpu/ivpu_ipc.c
··· 378 378 return false; 379 379 } 380 380 381 - void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread) 381 + void ivpu_ipc_irq_handler(struct ivpu_device *vdev) 382 382 { 383 383 struct ivpu_ipc_info *ipc = vdev->ipc; 384 384 struct ivpu_ipc_consumer *cons; ··· 442 442 } 443 443 } 444 444 445 - if (wake_thread) 446 - *wake_thread = !list_empty(&ipc->cb_msg_list); 445 + if (!list_empty(&ipc->cb_msg_list)) 446 + if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC)) 447 + ivpu_err_ratelimited(vdev, "IRQ FIFO full\n"); 447 448 } 448 449 449 - irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev) 450 + void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev) 450 451 { 451 452 struct ivpu_ipc_info *ipc = vdev->ipc; 452 453 struct ivpu_ipc_rx_msg *rx_msg, *r; ··· 463 462 rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); 464 463 ivpu_ipc_rx_msg_del(vdev, rx_msg); 465 464 } 466 - 467 - return IRQ_HANDLED; 468 465 } 469 466 470 467 int ivpu_ipc_init(struct ivpu_device *vdev)
+2 -2
drivers/accel/ivpu/ivpu_ipc.h
··· 89 89 void ivpu_ipc_disable(struct ivpu_device *vdev); 90 90 void ivpu_ipc_reset(struct ivpu_device *vdev); 91 91 92 - void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread); 93 - irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev); 92 + void ivpu_ipc_irq_handler(struct ivpu_device *vdev); 93 + void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev); 94 94 95 95 void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, 96 96 u32 channel, ivpu_ipc_rx_callback_t callback);