at v5.7 1.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_IRQ_WORK_H 3#define _LINUX_IRQ_WORK_H 4 5#include <linux/llist.h> 6 7/* 8 * An entry can be in one of four states: 9 * 10 * free NULL, 0 -> {claimed} : free to be used 11 * claimed NULL, 3 -> {pending} : claimed to be enqueued 12 * pending next, 3 -> {busy} : queued, pending callback 13 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed 14 */ 15 16#define IRQ_WORK_PENDING BIT(0) 17#define IRQ_WORK_BUSY BIT(1) 18 19/* Doesn't want IPI, wait for tick: */ 20#define IRQ_WORK_LAZY BIT(2) 21/* Run hard IRQ context, even on RT */ 22#define IRQ_WORK_HARD_IRQ BIT(3) 23 24#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) 25 26struct irq_work { 27 atomic_t flags; 28 struct llist_node llnode; 29 void (*func)(struct irq_work *); 30}; 31 32static inline 33void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) 34{ 35 atomic_set(&work->flags, 0); 36 work->func = func; 37} 38 39#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \ 40 .flags = ATOMIC_INIT(0), \ 41 .func = (_f) \ 42} 43 44 45bool irq_work_queue(struct irq_work *work); 46bool irq_work_queue_on(struct irq_work *work, int cpu); 47 48void irq_work_tick(void); 49void irq_work_sync(struct irq_work *work); 50 51#ifdef CONFIG_IRQ_WORK 52#include <asm/irq_work.h> 53 54void irq_work_run(void); 55bool irq_work_needs_cpu(void); 56#else 57static inline bool irq_work_needs_cpu(void) { return false; } 58static inline void irq_work_run(void) { } 59#endif 60 61#endif /* _LINUX_IRQ_WORK_H */