at master 5.1 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_PSI_TYPES_H 3#define _LINUX_PSI_TYPES_H 4 5#include <linux/kthread.h> 6#include <linux/seqlock.h> 7#include <linux/types.h> 8#include <linux/kref.h> 9#include <linux/wait.h> 10 11#ifdef CONFIG_PSI 12 13/* Tracked task states */ 14enum psi_task_count { 15 NR_IOWAIT, 16 NR_MEMSTALL, 17 NR_RUNNING, 18 /* 19 * For IO and CPU stalls the presence of running/oncpu tasks 20 * in the domain means a partial rather than a full stall. 21 * For memory it's not so simple because of page reclaimers: 22 * they are running/oncpu while representing a stall. To tell 23 * whether a domain has productivity left or not, we need to 24 * distinguish between regular running (i.e. productive) 25 * threads and memstall ones. 26 */ 27 NR_MEMSTALL_RUNNING, 28 NR_PSI_TASK_COUNTS = 4, 29}; 30 31/* Task state bitmasks */ 32#define TSK_IOWAIT (1 << NR_IOWAIT) 33#define TSK_MEMSTALL (1 << NR_MEMSTALL) 34#define TSK_RUNNING (1 << NR_RUNNING) 35#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING) 36 37/* Only one task can be scheduled, no corresponding task count */ 38#define TSK_ONCPU (1 << NR_PSI_TASK_COUNTS) 39 40/* Resources that workloads could be stalled on */ 41enum psi_res { 42 PSI_IO, 43 PSI_MEM, 44 PSI_CPU, 45#ifdef CONFIG_IRQ_TIME_ACCOUNTING 46 PSI_IRQ, 47#endif 48 NR_PSI_RESOURCES, 49}; 50 51/* 52 * Pressure states for each resource: 53 * 54 * SOME: Stalled tasks & working tasks 55 * FULL: Stalled tasks & no working tasks 56 */ 57enum psi_states { 58 PSI_IO_SOME, 59 PSI_IO_FULL, 60 PSI_MEM_SOME, 61 PSI_MEM_FULL, 62 PSI_CPU_SOME, 63 PSI_CPU_FULL, 64#ifdef CONFIG_IRQ_TIME_ACCOUNTING 65 PSI_IRQ_FULL, 66#endif 67 /* Only per-CPU, to weigh the CPU in the global average: */ 68 PSI_NONIDLE, 69 NR_PSI_STATES, 70}; 71 72/* Use one bit in the state mask to track TSK_ONCPU */ 73#define PSI_ONCPU (1 << NR_PSI_STATES) 74 75/* Flag whether to re-arm avgs_work, see details in get_recent_times() */ 76#define PSI_STATE_RESCHEDULE (1 << (NR_PSI_STATES + 1)) 77 78enum psi_aggregators { 79 PSI_AVGS = 0, 80 PSI_POLL, 81 NR_PSI_AGGREGATORS, 82}; 83 84struct psi_group_cpu { 85 /* 1st cacheline updated by the scheduler */ 86 87 /* States of the tasks belonging to this group */ 88 unsigned int tasks[NR_PSI_TASK_COUNTS] 89 ____cacheline_aligned_in_smp; 90 91 /* Aggregate pressure state derived from the tasks */ 92 u32 state_mask; 93 94 /* Period time sampling buckets for each state of interest (ns) */ 95 u32 times[NR_PSI_STATES]; 96 97 /* Time of last task change in this group (rq_clock) */ 98 u64 state_start; 99 100 /* 2nd cacheline updated by the aggregator */ 101 102 /* Delta detection against the sampling buckets */ 103 u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES] 104 ____cacheline_aligned_in_smp; 105}; 106 107/* PSI growth tracking window */ 108struct psi_window { 109 /* Window size in ns */ 110 u64 size; 111 112 /* Start time of the current window in ns */ 113 u64 start_time; 114 115 /* Value at the start of the window */ 116 u64 start_value; 117 118 /* Value growth in the previous window */ 119 u64 prev_growth; 120}; 121 122struct psi_trigger { 123 /* PSI state being monitored by the trigger */ 124 enum psi_states state; 125 126 /* User-spacified threshold in ns */ 127 u64 threshold; 128 129 /* List node inside triggers list */ 130 struct list_head node; 131 132 /* Backpointer needed during trigger destruction */ 133 struct psi_group *group; 134 135 /* Wait queue for polling */ 136 wait_queue_head_t event_wait; 137 138 /* Kernfs file for cgroup triggers */ 139 struct kernfs_open_file *of; 140 141 /* Pending event flag */ 142 int event; 143 144 /* Tracking window */ 145 struct psi_window win; 146 147 /* 148 * Time last event was generated. Used for rate-limiting 149 * events to one per window 150 */ 151 u64 last_event_time; 152 153 /* Deferred event(s) from previous ratelimit window */ 154 bool pending_event; 155 156 /* Trigger type - PSI_AVGS for unprivileged, PSI_POLL for RT */ 157 enum psi_aggregators aggregator; 158}; 159 160struct psi_group { 161 struct psi_group *parent; 162 bool enabled; 163 164 /* Protects data used by the aggregator */ 165 struct mutex avgs_lock; 166 167 /* Per-cpu task state & time tracking */ 168 struct psi_group_cpu __percpu *pcpu; 169 170 /* Running pressure averages */ 171 u64 avg_total[NR_PSI_STATES - 1]; 172 u64 avg_last_update; 173 u64 avg_next_update; 174 175 /* Aggregator work control */ 176 struct delayed_work avgs_work; 177 178 /* Unprivileged triggers against N*PSI_FREQ windows */ 179 struct list_head avg_triggers; 180 u32 avg_nr_triggers[NR_PSI_STATES - 1]; 181 182 /* Total stall times and sampled pressure averages */ 183 u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1]; 184 unsigned long avg[NR_PSI_STATES - 1][3]; 185 186 /* Monitor RT polling work control */ 187 struct task_struct __rcu *rtpoll_task; 188 struct timer_list rtpoll_timer; 189 wait_queue_head_t rtpoll_wait; 190 atomic_t rtpoll_wakeup; 191 atomic_t rtpoll_scheduled; 192 193 /* Protects data used by the monitor */ 194 struct mutex rtpoll_trigger_lock; 195 196 /* Configured RT polling triggers */ 197 struct list_head rtpoll_triggers; 198 u32 rtpoll_nr_triggers[NR_PSI_STATES - 1]; 199 u32 rtpoll_states; 200 u64 rtpoll_min_period; 201 202 /* Total stall times at the start of RT polling monitor activation */ 203 u64 rtpoll_total[NR_PSI_STATES - 1]; 204 u64 rtpoll_next_update; 205 u64 rtpoll_until; 206}; 207 208#else /* CONFIG_PSI */ 209 210#define NR_PSI_RESOURCES 0 211 212struct psi_group { }; 213 214#endif /* CONFIG_PSI */ 215 216#endif /* _LINUX_PSI_TYPES_H */