Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rv: Add rtapp_sleep monitor

Add a monitor for checking that real-time tasks do not go to sleep in a
manner that may cause undesirable latency.

Also change
RV depends on TRACING
to
RV select TRACING
to avoid the following recursive dependency:

error: recursive dependency detected!
symbol TRACING is selected by PREEMPTIRQ_TRACEPOINTS
symbol PREEMPTIRQ_TRACEPOINTS depends on TRACE_IRQFLAGS
symbol TRACE_IRQFLAGS is selected by RV_MON_SLEEP
symbol RV_MON_SLEEP depends on RV
symbol RV depends on TRACING

Cc: John Ogness <john.ogness@linutronix.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://lore.kernel.org/75bc5bcc741d153aa279c95faf778dff35c5c8ad.1752088709.git.namcao@linutronix.de
Reviewed-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

authored by

Nam Cao and committed by
Steven Rostedt (Google)
f74f8bb2 9162620e

+556 -1
+2 -1
kernel/trace/rv/Kconfig
··· 20 20 21 21 menuconfig RV 22 22 bool "Runtime Verification" 23 - depends on TRACING 23 + select TRACING 24 24 help 25 25 Enable the kernel runtime verification infrastructure. RV is a 26 26 lightweight (yet rigorous) method that complements classical ··· 43 43 source "kernel/trace/rv/monitors/sncid/Kconfig" 44 44 source "kernel/trace/rv/monitors/rtapp/Kconfig" 45 45 source "kernel/trace/rv/monitors/pagefault/Kconfig" 46 + source "kernel/trace/rv/monitors/sleep/Kconfig" 46 47 # Add new monitors here 47 48 48 49 config RV_REACTORS
+1
kernel/trace/rv/Makefile
··· 14 14 obj-$(CONFIG_RV_MON_SNCID) += monitors/sncid/sncid.o 15 15 obj-$(CONFIG_RV_MON_RTAPP) += monitors/rtapp/rtapp.o 16 16 obj-$(CONFIG_RV_MON_PAGEFAULT) += monitors/pagefault/pagefault.o 17 + obj-$(CONFIG_RV_MON_SLEEP) += monitors/sleep/sleep.o 17 18 # Add new monitors here 18 19 obj-$(CONFIG_RV_REACTORS) += rv_reactors.o 19 20 obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o
+22
kernel/trace/rv/monitors/sleep/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + # 3 + config RV_MON_SLEEP 4 + depends on RV 5 + select RV_LTL_MONITOR 6 + depends on HAVE_SYSCALL_TRACEPOINTS 7 + depends on RV_MON_RTAPP 8 + select TRACE_IRQFLAGS 9 + default y 10 + select LTL_MON_EVENTS_ID 11 + bool "sleep monitor" 12 + help 13 + Monitor that real-time tasks do not sleep in a manner that may 14 + cause undesirable latency. 15 + 16 + If you are developing a real-time system and not entirely sure whether 17 + the applications are designed correctly for real-time, you want to say 18 + Y here. 19 + 20 + Enabling this monitor may have performance impact (due to select 21 + TRACE_IRQFLAGS). Therefore, you probably should say N for 22 + production kernel.
+237
kernel/trace/rv/monitors/sleep/sleep.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/ftrace.h> 3 + #include <linux/tracepoint.h> 4 + #include <linux/init.h> 5 + #include <linux/irqflags.h> 6 + #include <linux/kernel.h> 7 + #include <linux/module.h> 8 + #include <linux/rv.h> 9 + #include <linux/sched/deadline.h> 10 + #include <linux/sched/rt.h> 11 + #include <rv/instrumentation.h> 12 + 13 + #define MODULE_NAME "sleep" 14 + 15 + #include <trace/events/syscalls.h> 16 + #include <trace/events/sched.h> 17 + #include <trace/events/lock.h> 18 + #include <uapi/linux/futex.h> 19 + #include <rv_trace.h> 20 + #include <monitors/rtapp/rtapp.h> 21 + 22 + #include "sleep.h" 23 + #include <rv/ltl_monitor.h> 24 + 25 + static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon) 26 + { 27 + /* 28 + * This includes "actual" real-time tasks and also PI-boosted 29 + * tasks. A task being PI-boosted means it is blocking an "actual" 30 + * real-task, therefore it should also obey the monitor's rule, 31 + * otherwise the "actual" real-task may be delayed. 32 + */ 33 + ltl_atom_set(mon, LTL_RT, rt_or_dl_task(task)); 34 + } 35 + 36 + static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation) 37 + { 38 + ltl_atom_set(mon, LTL_SLEEP, false); 39 + ltl_atom_set(mon, LTL_WAKE, false); 40 + ltl_atom_set(mon, LTL_ABORT_SLEEP, false); 41 + ltl_atom_set(mon, LTL_WOKEN_BY_HARDIRQ, false); 42 + ltl_atom_set(mon, LTL_WOKEN_BY_NMI, false); 43 + ltl_atom_set(mon, LTL_WOKEN_BY_EQUAL_OR_HIGHER_PRIO, false); 44 + 45 + if (task_creation) { 46 + ltl_atom_set(mon, LTL_KTHREAD_SHOULD_STOP, false); 47 + ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_MONOTONIC, false); 48 + ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_TAI, false); 49 + ltl_atom_set(mon, LTL_NANOSLEEP_TIMER_ABSTIME, false); 50 + ltl_atom_set(mon, LTL_CLOCK_NANOSLEEP, false); 51 + ltl_atom_set(mon, LTL_FUTEX_WAIT, false); 52 + ltl_atom_set(mon, LTL_FUTEX_LOCK_PI, false); 53 + ltl_atom_set(mon, LTL_BLOCK_ON_RT_MUTEX, false); 54 + } 55 + 56 + if (task->flags & PF_KTHREAD) { 57 + ltl_atom_set(mon, LTL_KERNEL_THREAD, true); 58 + 59 + /* kernel tasks do not do syscall */ 60 + ltl_atom_set(mon, LTL_FUTEX_WAIT, false); 61 + ltl_atom_set(mon, LTL_FUTEX_LOCK_PI, false); 62 + ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_MONOTONIC, false); 63 + ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_TAI, false); 64 + ltl_atom_set(mon, LTL_NANOSLEEP_TIMER_ABSTIME, false); 65 + ltl_atom_set(mon, LTL_CLOCK_NANOSLEEP, false); 66 + 67 + if (strstarts(task->comm, "migration/")) 68 + ltl_atom_set(mon, LTL_TASK_IS_MIGRATION, true); 69 + else 70 + ltl_atom_set(mon, LTL_TASK_IS_MIGRATION, false); 71 + 72 + if (strstarts(task->comm, "rcu")) 73 + ltl_atom_set(mon, LTL_TASK_IS_RCU, true); 74 + else 75 + ltl_atom_set(mon, LTL_TASK_IS_RCU, false); 76 + } else { 77 + ltl_atom_set(mon, LTL_KTHREAD_SHOULD_STOP, false); 78 + ltl_atom_set(mon, LTL_KERNEL_THREAD, false); 79 + ltl_atom_set(mon, LTL_TASK_IS_RCU, false); 80 + ltl_atom_set(mon, LTL_TASK_IS_MIGRATION, false); 81 + } 82 + 83 + } 84 + 85 + static void handle_sched_set_state(void *data, struct task_struct *task, int state) 86 + { 87 + if (state & TASK_INTERRUPTIBLE) 88 + ltl_atom_pulse(task, LTL_SLEEP, true); 89 + else if (state == TASK_RUNNING) 90 + ltl_atom_pulse(task, LTL_ABORT_SLEEP, true); 91 + } 92 + 93 + static void handle_sched_wakeup(void *data, struct task_struct *task) 94 + { 95 + ltl_atom_pulse(task, LTL_WAKE, true); 96 + } 97 + 98 + static void handle_sched_waking(void *data, struct task_struct *task) 99 + { 100 + if (this_cpu_read(hardirq_context)) { 101 + ltl_atom_pulse(task, LTL_WOKEN_BY_HARDIRQ, true); 102 + } else if (in_task()) { 103 + if (current->prio <= task->prio) 104 + ltl_atom_pulse(task, LTL_WOKEN_BY_EQUAL_OR_HIGHER_PRIO, true); 105 + } else if (in_nmi()) { 106 + ltl_atom_pulse(task, LTL_WOKEN_BY_NMI, true); 107 + } 108 + } 109 + 110 + static void handle_contention_begin(void *data, void *lock, unsigned int flags) 111 + { 112 + if (flags & LCB_F_RT) 113 + ltl_atom_update(current, LTL_BLOCK_ON_RT_MUTEX, true); 114 + } 115 + 116 + static void handle_contention_end(void *data, void *lock, int ret) 117 + { 118 + ltl_atom_update(current, LTL_BLOCK_ON_RT_MUTEX, false); 119 + } 120 + 121 + static void handle_sys_enter(void *data, struct pt_regs *regs, long id) 122 + { 123 + struct ltl_monitor *mon; 124 + unsigned long args[6]; 125 + int op, cmd; 126 + 127 + mon = ltl_get_monitor(current); 128 + 129 + switch (id) { 130 + case __NR_clock_nanosleep: 131 + #ifdef __NR_clock_nanosleep_time64 132 + case __NR_clock_nanosleep_time64: 133 + #endif 134 + syscall_get_arguments(current, regs, args); 135 + ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_MONOTONIC, args[0] == CLOCK_MONOTONIC); 136 + ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_TAI, args[0] == CLOCK_TAI); 137 + ltl_atom_set(mon, LTL_NANOSLEEP_TIMER_ABSTIME, args[1] == TIMER_ABSTIME); 138 + ltl_atom_update(current, LTL_CLOCK_NANOSLEEP, true); 139 + break; 140 + 141 + case __NR_futex: 142 + #ifdef __NR_futex_time64 143 + case __NR_futex_time64: 144 + #endif 145 + syscall_get_arguments(current, regs, args); 146 + op = args[1]; 147 + cmd = op & FUTEX_CMD_MASK; 148 + 149 + switch (cmd) { 150 + case FUTEX_LOCK_PI: 151 + case FUTEX_LOCK_PI2: 152 + ltl_atom_update(current, LTL_FUTEX_LOCK_PI, true); 153 + break; 154 + case FUTEX_WAIT: 155 + case FUTEX_WAIT_BITSET: 156 + case FUTEX_WAIT_REQUEUE_PI: 157 + ltl_atom_update(current, LTL_FUTEX_WAIT, true); 158 + break; 159 + } 160 + break; 161 + } 162 + } 163 + 164 + static void handle_sys_exit(void *data, struct pt_regs *regs, long ret) 165 + { 166 + struct ltl_monitor *mon = ltl_get_monitor(current); 167 + 168 + ltl_atom_set(mon, LTL_FUTEX_LOCK_PI, false); 169 + ltl_atom_set(mon, LTL_FUTEX_WAIT, false); 170 + ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_MONOTONIC, false); 171 + ltl_atom_set(mon, LTL_NANOSLEEP_CLOCK_TAI, false); 172 + ltl_atom_set(mon, LTL_NANOSLEEP_TIMER_ABSTIME, false); 173 + ltl_atom_update(current, LTL_CLOCK_NANOSLEEP, false); 174 + } 175 + 176 + static void handle_kthread_stop(void *data, struct task_struct *task) 177 + { 178 + /* FIXME: this could race with other tracepoint handlers */ 179 + ltl_atom_update(task, LTL_KTHREAD_SHOULD_STOP, true); 180 + } 181 + 182 + static int enable_sleep(void) 183 + { 184 + int retval; 185 + 186 + retval = ltl_monitor_init(); 187 + if (retval) 188 + return retval; 189 + 190 + rv_attach_trace_probe("rtapp_sleep", sched_waking, handle_sched_waking); 191 + rv_attach_trace_probe("rtapp_sleep", sched_wakeup, handle_sched_wakeup); 192 + rv_attach_trace_probe("rtapp_sleep", sched_set_state_tp, handle_sched_set_state); 193 + rv_attach_trace_probe("rtapp_sleep", contention_begin, handle_contention_begin); 194 + rv_attach_trace_probe("rtapp_sleep", contention_end, handle_contention_end); 195 + rv_attach_trace_probe("rtapp_sleep", sched_kthread_stop, handle_kthread_stop); 196 + rv_attach_trace_probe("rtapp_sleep", sys_enter, handle_sys_enter); 197 + rv_attach_trace_probe("rtapp_sleep", sys_exit, handle_sys_exit); 198 + return 0; 199 + } 200 + 201 + static void disable_sleep(void) 202 + { 203 + rv_detach_trace_probe("rtapp_sleep", sched_waking, handle_sched_waking); 204 + rv_detach_trace_probe("rtapp_sleep", sched_wakeup, handle_sched_wakeup); 205 + rv_detach_trace_probe("rtapp_sleep", sched_set_state_tp, handle_sched_set_state); 206 + rv_detach_trace_probe("rtapp_sleep", contention_begin, handle_contention_begin); 207 + rv_detach_trace_probe("rtapp_sleep", contention_end, handle_contention_end); 208 + rv_detach_trace_probe("rtapp_sleep", sched_kthread_stop, handle_kthread_stop); 209 + rv_detach_trace_probe("rtapp_sleep", sys_enter, handle_sys_enter); 210 + rv_detach_trace_probe("rtapp_sleep", sys_exit, handle_sys_exit); 211 + 212 + ltl_monitor_destroy(); 213 + } 214 + 215 + static struct rv_monitor rv_sleep = { 216 + .name = "sleep", 217 + .description = "Monitor that RT tasks do not undesirably sleep", 218 + .enable = enable_sleep, 219 + .disable = disable_sleep, 220 + }; 221 + 222 + static int __init register_sleep(void) 223 + { 224 + return rv_register_monitor(&rv_sleep, &rv_rtapp); 225 + } 226 + 227 + static void __exit unregister_sleep(void) 228 + { 229 + rv_unregister_monitor(&rv_sleep); 230 + } 231 + 232 + module_init(register_sleep); 233 + module_exit(unregister_sleep); 234 + 235 + MODULE_LICENSE("GPL"); 236 + MODULE_AUTHOR("Nam Cao <namcao@linutronix.de>"); 237 + MODULE_DESCRIPTION("sleep: Monitor that RT tasks do not undesirably sleep");
+257
kernel/trace/rv/monitors/sleep/sleep.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + /* 4 + * C implementation of Buchi automaton, automatically generated by 5 + * tools/verification/rvgen from the linear temporal logic specification. 6 + * For further information, see kernel documentation: 7 + * Documentation/trace/rv/linear_temporal_logic.rst 8 + */ 9 + 10 + #include <linux/rv.h> 11 + 12 + #define MONITOR_NAME sleep 13 + 14 + enum ltl_atom { 15 + LTL_ABORT_SLEEP, 16 + LTL_BLOCK_ON_RT_MUTEX, 17 + LTL_CLOCK_NANOSLEEP, 18 + LTL_FUTEX_LOCK_PI, 19 + LTL_FUTEX_WAIT, 20 + LTL_KERNEL_THREAD, 21 + LTL_KTHREAD_SHOULD_STOP, 22 + LTL_NANOSLEEP_CLOCK_MONOTONIC, 23 + LTL_NANOSLEEP_CLOCK_TAI, 24 + LTL_NANOSLEEP_TIMER_ABSTIME, 25 + LTL_RT, 26 + LTL_SLEEP, 27 + LTL_TASK_IS_MIGRATION, 28 + LTL_TASK_IS_RCU, 29 + LTL_WAKE, 30 + LTL_WOKEN_BY_EQUAL_OR_HIGHER_PRIO, 31 + LTL_WOKEN_BY_HARDIRQ, 32 + LTL_WOKEN_BY_NMI, 33 + LTL_NUM_ATOM 34 + }; 35 + static_assert(LTL_NUM_ATOM <= RV_MAX_LTL_ATOM); 36 + 37 + static const char *ltl_atom_str(enum ltl_atom atom) 38 + { 39 + static const char *const names[] = { 40 + "ab_sl", 41 + "bl_on_rt_mu", 42 + "cl_na", 43 + "fu_lo_pi", 44 + "fu_wa", 45 + "ker_th", 46 + "kth_sh_st", 47 + "na_cl_mo", 48 + "na_cl_ta", 49 + "na_ti_ab", 50 + "rt", 51 + "sl", 52 + "ta_mi", 53 + "ta_rc", 54 + "wak", 55 + "wo_eq_hi_pr", 56 + "wo_ha", 57 + "wo_nm", 58 + }; 59 + 60 + return names[atom]; 61 + } 62 + 63 + enum ltl_buchi_state { 64 + S0, 65 + S1, 66 + S2, 67 + S3, 68 + S4, 69 + S5, 70 + S6, 71 + S7, 72 + RV_NUM_BA_STATES 73 + }; 74 + static_assert(RV_NUM_BA_STATES <= RV_MAX_BA_STATES); 75 + 76 + static void ltl_start(struct task_struct *task, struct ltl_monitor *mon) 77 + { 78 + bool task_is_migration = test_bit(LTL_TASK_IS_MIGRATION, mon->atoms); 79 + bool task_is_rcu = test_bit(LTL_TASK_IS_RCU, mon->atoms); 80 + bool val40 = task_is_rcu || task_is_migration; 81 + bool futex_lock_pi = test_bit(LTL_FUTEX_LOCK_PI, mon->atoms); 82 + bool val41 = futex_lock_pi || val40; 83 + bool block_on_rt_mutex = test_bit(LTL_BLOCK_ON_RT_MUTEX, mon->atoms); 84 + bool val5 = block_on_rt_mutex || val41; 85 + bool kthread_should_stop = test_bit(LTL_KTHREAD_SHOULD_STOP, mon->atoms); 86 + bool abort_sleep = test_bit(LTL_ABORT_SLEEP, mon->atoms); 87 + bool val32 = abort_sleep || kthread_should_stop; 88 + bool woken_by_nmi = test_bit(LTL_WOKEN_BY_NMI, mon->atoms); 89 + bool val33 = woken_by_nmi || val32; 90 + bool woken_by_hardirq = test_bit(LTL_WOKEN_BY_HARDIRQ, mon->atoms); 91 + bool val34 = woken_by_hardirq || val33; 92 + bool woken_by_equal_or_higher_prio = test_bit(LTL_WOKEN_BY_EQUAL_OR_HIGHER_PRIO, 93 + mon->atoms); 94 + bool val14 = woken_by_equal_or_higher_prio || val34; 95 + bool wake = test_bit(LTL_WAKE, mon->atoms); 96 + bool val13 = !wake; 97 + bool kernel_thread = test_bit(LTL_KERNEL_THREAD, mon->atoms); 98 + bool nanosleep_clock_tai = test_bit(LTL_NANOSLEEP_CLOCK_TAI, mon->atoms); 99 + bool nanosleep_clock_monotonic = test_bit(LTL_NANOSLEEP_CLOCK_MONOTONIC, mon->atoms); 100 + bool val24 = nanosleep_clock_monotonic || nanosleep_clock_tai; 101 + bool nanosleep_timer_abstime = test_bit(LTL_NANOSLEEP_TIMER_ABSTIME, mon->atoms); 102 + bool val25 = nanosleep_timer_abstime && val24; 103 + bool clock_nanosleep = test_bit(LTL_CLOCK_NANOSLEEP, mon->atoms); 104 + bool val18 = clock_nanosleep && val25; 105 + bool futex_wait = test_bit(LTL_FUTEX_WAIT, mon->atoms); 106 + bool val9 = futex_wait || val18; 107 + bool val11 = val9 || kernel_thread; 108 + bool sleep = test_bit(LTL_SLEEP, mon->atoms); 109 + bool val2 = !sleep; 110 + bool rt = test_bit(LTL_RT, mon->atoms); 111 + bool val1 = !rt; 112 + bool val3 = val1 || val2; 113 + 114 + if (val3) 115 + __set_bit(S0, mon->states); 116 + if (val11 && val13) 117 + __set_bit(S1, mon->states); 118 + if (val11 && val14) 119 + __set_bit(S4, mon->states); 120 + if (val5) 121 + __set_bit(S5, mon->states); 122 + } 123 + 124 + static void 125 + ltl_possible_next_states(struct ltl_monitor *mon, unsigned int state, unsigned long *next) 126 + { 127 + bool task_is_migration = test_bit(LTL_TASK_IS_MIGRATION, mon->atoms); 128 + bool task_is_rcu = test_bit(LTL_TASK_IS_RCU, mon->atoms); 129 + bool val40 = task_is_rcu || task_is_migration; 130 + bool futex_lock_pi = test_bit(LTL_FUTEX_LOCK_PI, mon->atoms); 131 + bool val41 = futex_lock_pi || val40; 132 + bool block_on_rt_mutex = test_bit(LTL_BLOCK_ON_RT_MUTEX, mon->atoms); 133 + bool val5 = block_on_rt_mutex || val41; 134 + bool kthread_should_stop = test_bit(LTL_KTHREAD_SHOULD_STOP, mon->atoms); 135 + bool abort_sleep = test_bit(LTL_ABORT_SLEEP, mon->atoms); 136 + bool val32 = abort_sleep || kthread_should_stop; 137 + bool woken_by_nmi = test_bit(LTL_WOKEN_BY_NMI, mon->atoms); 138 + bool val33 = woken_by_nmi || val32; 139 + bool woken_by_hardirq = test_bit(LTL_WOKEN_BY_HARDIRQ, mon->atoms); 140 + bool val34 = woken_by_hardirq || val33; 141 + bool woken_by_equal_or_higher_prio = test_bit(LTL_WOKEN_BY_EQUAL_OR_HIGHER_PRIO, 142 + mon->atoms); 143 + bool val14 = woken_by_equal_or_higher_prio || val34; 144 + bool wake = test_bit(LTL_WAKE, mon->atoms); 145 + bool val13 = !wake; 146 + bool kernel_thread = test_bit(LTL_KERNEL_THREAD, mon->atoms); 147 + bool nanosleep_clock_tai = test_bit(LTL_NANOSLEEP_CLOCK_TAI, mon->atoms); 148 + bool nanosleep_clock_monotonic = test_bit(LTL_NANOSLEEP_CLOCK_MONOTONIC, mon->atoms); 149 + bool val24 = nanosleep_clock_monotonic || nanosleep_clock_tai; 150 + bool nanosleep_timer_abstime = test_bit(LTL_NANOSLEEP_TIMER_ABSTIME, mon->atoms); 151 + bool val25 = nanosleep_timer_abstime && val24; 152 + bool clock_nanosleep = test_bit(LTL_CLOCK_NANOSLEEP, mon->atoms); 153 + bool val18 = clock_nanosleep && val25; 154 + bool futex_wait = test_bit(LTL_FUTEX_WAIT, mon->atoms); 155 + bool val9 = futex_wait || val18; 156 + bool val11 = val9 || kernel_thread; 157 + bool sleep = test_bit(LTL_SLEEP, mon->atoms); 158 + bool val2 = !sleep; 159 + bool rt = test_bit(LTL_RT, mon->atoms); 160 + bool val1 = !rt; 161 + bool val3 = val1 || val2; 162 + 163 + switch (state) { 164 + case S0: 165 + if (val3) 166 + __set_bit(S0, next); 167 + if (val11 && val13) 168 + __set_bit(S1, next); 169 + if (val11 && val14) 170 + __set_bit(S4, next); 171 + if (val5) 172 + __set_bit(S5, next); 173 + break; 174 + case S1: 175 + if (val11 && val13) 176 + __set_bit(S1, next); 177 + if (val13 && val3) 178 + __set_bit(S2, next); 179 + if (val14 && val3) 180 + __set_bit(S3, next); 181 + if (val11 && val14) 182 + __set_bit(S4, next); 183 + if (val13 && val5) 184 + __set_bit(S6, next); 185 + if (val14 && val5) 186 + __set_bit(S7, next); 187 + break; 188 + case S2: 189 + if (val11 && val13) 190 + __set_bit(S1, next); 191 + if (val13 && val3) 192 + __set_bit(S2, next); 193 + if (val14 && val3) 194 + __set_bit(S3, next); 195 + if (val11 && val14) 196 + __set_bit(S4, next); 197 + if (val13 && val5) 198 + __set_bit(S6, next); 199 + if (val14 && val5) 200 + __set_bit(S7, next); 201 + break; 202 + case S3: 203 + if (val3) 204 + __set_bit(S0, next); 205 + if (val11 && val13) 206 + __set_bit(S1, next); 207 + if (val11 && val14) 208 + __set_bit(S4, next); 209 + if (val5) 210 + __set_bit(S5, next); 211 + break; 212 + case S4: 213 + if (val3) 214 + __set_bit(S0, next); 215 + if (val11 && val13) 216 + __set_bit(S1, next); 217 + if (val11 && val14) 218 + __set_bit(S4, next); 219 + if (val5) 220 + __set_bit(S5, next); 221 + break; 222 + case S5: 223 + if (val3) 224 + __set_bit(S0, next); 225 + if (val11 && val13) 226 + __set_bit(S1, next); 227 + if (val11 && val14) 228 + __set_bit(S4, next); 229 + if (val5) 230 + __set_bit(S5, next); 231 + break; 232 + case S6: 233 + if (val11 && val13) 234 + __set_bit(S1, next); 235 + if (val13 && val3) 236 + __set_bit(S2, next); 237 + if (val14 && val3) 238 + __set_bit(S3, next); 239 + if (val11 && val14) 240 + __set_bit(S4, next); 241 + if (val13 && val5) 242 + __set_bit(S6, next); 243 + if (val14 && val5) 244 + __set_bit(S7, next); 245 + break; 246 + case S7: 247 + if (val3) 248 + __set_bit(S0, next); 249 + if (val11 && val13) 250 + __set_bit(S1, next); 251 + if (val11 && val14) 252 + __set_bit(S4, next); 253 + if (val5) 254 + __set_bit(S5, next); 255 + break; 256 + } 257 + }
+14
kernel/trace/rv/monitors/sleep/sleep_trace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + /* 4 + * Snippet to be included in rv_trace.h 5 + */ 6 + 7 + #ifdef CONFIG_RV_MON_SLEEP 8 + DEFINE_EVENT(event_ltl_monitor_id, event_sleep, 9 + TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next), 10 + TP_ARGS(task, states, atoms, next)); 11 + DEFINE_EVENT(error_ltl_monitor_id, error_sleep, 12 + TP_PROTO(struct task_struct *task), 13 + TP_ARGS(task)); 14 + #endif /* CONFIG_RV_MON_SLEEP */
+1
kernel/trace/rv/rv_trace.h
··· 173 173 TP_printk("%s[%d]: violation detected", __get_str(comm), __entry->pid) 174 174 ); 175 175 #include <monitors/pagefault/pagefault_trace.h> 176 + #include <monitors/sleep/sleep_trace.h> 176 177 // Add new monitors based on CONFIG_LTL_MON_EVENTS_ID here 177 178 #endif /* CONFIG_LTL_MON_EVENTS_ID */ 178 179 #endif /* _TRACE_RV_H */
+22
tools/verification/models/rtapp/sleep.ltl
··· 1 + RULE = always ((RT and SLEEP) imply (RT_FRIENDLY_SLEEP or ALLOWLIST)) 2 + 3 + RT_FRIENDLY_SLEEP = (RT_VALID_SLEEP_REASON or KERNEL_THREAD) 4 + and ((not WAKE) until RT_FRIENDLY_WAKE) 5 + 6 + RT_VALID_SLEEP_REASON = FUTEX_WAIT 7 + or RT_FRIENDLY_NANOSLEEP 8 + 9 + RT_FRIENDLY_NANOSLEEP = CLOCK_NANOSLEEP 10 + and NANOSLEEP_TIMER_ABSTIME 11 + and (NANOSLEEP_CLOCK_MONOTONIC or NANOSLEEP_CLOCK_TAI) 12 + 13 + RT_FRIENDLY_WAKE = WOKEN_BY_EQUAL_OR_HIGHER_PRIO 14 + or WOKEN_BY_HARDIRQ 15 + or WOKEN_BY_NMI 16 + or ABORT_SLEEP 17 + or KTHREAD_SHOULD_STOP 18 + 19 + ALLOWLIST = BLOCK_ON_RT_MUTEX 20 + or FUTEX_LOCK_PI 21 + or TASK_IS_RCU 22 + or TASK_IS_MIGRATION