Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DAMON-based page reclamation
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8#define pr_fmt(fmt) "damon-reclaim: " fmt
9
10#include <linux/damon.h>
11#include <linux/ioport.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/workqueue.h>
15
16#include "modules-common.h"
17
18#ifdef MODULE_PARAM_PREFIX
19#undef MODULE_PARAM_PREFIX
20#endif
21#define MODULE_PARAM_PREFIX "damon_reclaim."
22
23/*
24 * Enable or disable DAMON_RECLAIM.
25 *
26 * You can enable DAMON_RCLAIM by setting the value of this parameter as ``Y``.
27 * Setting it as ``N`` disables DAMON_RECLAIM. Note that DAMON_RECLAIM could
28 * do no real monitoring and reclamation due to the watermarks-based activation
29 * condition. Refer to below descriptions for the watermarks parameter for
30 * this.
31 */
32static bool enabled __read_mostly;
33
34/*
35 * Make DAMON_RECLAIM reads the input parameters again, except ``enabled``.
36 *
37 * Input parameters that updated while DAMON_RECLAIM is running are not applied
38 * by default. Once this parameter is set as ``Y``, DAMON_RECLAIM reads values
39 * of parametrs except ``enabled`` again. Once the re-reading is done, this
40 * parameter is set as ``N``. If invalid parameters are found while the
41 * re-reading, DAMON_RECLAIM will be disabled.
42 */
43static bool commit_inputs __read_mostly;
44module_param(commit_inputs, bool, 0600);
45
46/*
47 * Time threshold for cold memory regions identification in microseconds.
48 *
49 * If a memory region is not accessed for this or longer time, DAMON_RECLAIM
50 * identifies the region as cold, and reclaims. 120 seconds by default.
51 */
52static unsigned long min_age __read_mostly = 120000000;
53module_param(min_age, ulong, 0600);
54
55static struct damos_quota damon_reclaim_quota = {
56 /* use up to 10 ms time, reclaim up to 128 MiB per 1 sec by default */
57 .ms = 10,
58 .sz = 128 * 1024 * 1024,
59 .reset_interval = 1000,
60 /* Within the quota, page out older regions first. */
61 .weight_sz = 0,
62 .weight_nr_accesses = 0,
63 .weight_age = 1
64};
65DEFINE_DAMON_MODULES_DAMOS_QUOTAS(damon_reclaim_quota);
66
67static struct damos_watermarks damon_reclaim_wmarks = {
68 .metric = DAMOS_WMARK_FREE_MEM_RATE,
69 .interval = 5000000, /* 5 seconds */
70 .high = 500, /* 50 percent */
71 .mid = 400, /* 40 percent */
72 .low = 200, /* 20 percent */
73};
74DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_reclaim_wmarks);
75
76static struct damon_attrs damon_reclaim_mon_attrs = {
77 .sample_interval = 5000, /* 5 ms */
78 .aggr_interval = 100000, /* 100 ms */
79 .ops_update_interval = 0,
80 .min_nr_regions = 10,
81 .max_nr_regions = 1000,
82};
83DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_reclaim_mon_attrs);
84
85/*
86 * Start of the target memory region in physical address.
87 *
88 * The start physical address of memory region that DAMON_RECLAIM will do work
89 * against. By default, biggest System RAM is used as the region.
90 */
91static unsigned long monitor_region_start __read_mostly;
92module_param(monitor_region_start, ulong, 0600);
93
94/*
95 * End of the target memory region in physical address.
96 *
97 * The end physical address of memory region that DAMON_RECLAIM will do work
98 * against. By default, biggest System RAM is used as the region.
99 */
100static unsigned long monitor_region_end __read_mostly;
101module_param(monitor_region_end, ulong, 0600);
102
103/*
104 * PID of the DAMON thread
105 *
106 * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
107 * Else, -1.
108 */
109static int kdamond_pid __read_mostly = -1;
110module_param(kdamond_pid, int, 0400);
111
112static struct damos_stat damon_reclaim_stat;
113DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_reclaim_stat,
114 reclaim_tried_regions, reclaimed_regions, quota_exceeds);
115
116static struct damon_ctx *ctx;
117static struct damon_target *target;
118
119static struct damos *damon_reclaim_new_scheme(void)
120{
121 struct damos_access_pattern pattern = {
122 /* Find regions having PAGE_SIZE or larger size */
123 .min_sz_region = PAGE_SIZE,
124 .max_sz_region = ULONG_MAX,
125 /* and not accessed at all */
126 .min_nr_accesses = 0,
127 .max_nr_accesses = 0,
128 /* for min_age or more micro-seconds */
129 .min_age_region = min_age /
130 damon_reclaim_mon_attrs.aggr_interval,
131 .max_age_region = UINT_MAX,
132 };
133
134 return damon_new_scheme(
135 &pattern,
136 /* page out those, as soon as found */
137 DAMOS_PAGEOUT,
138 /* under the quota. */
139 &damon_reclaim_quota,
140 /* (De)activate this according to the watermarks. */
141 &damon_reclaim_wmarks);
142}
143
144static int damon_reclaim_apply_parameters(void)
145{
146 struct damos *scheme;
147 int err = 0;
148
149 err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
150 if (err)
151 return err;
152
153 /* Will be freed by next 'damon_set_schemes()' below */
154 scheme = damon_reclaim_new_scheme();
155 if (!scheme)
156 return -ENOMEM;
157 damon_set_schemes(ctx, &scheme, 1);
158
159 return damon_set_region_biggest_system_ram_default(target,
160 &monitor_region_start,
161 &monitor_region_end);
162}
163
164static int damon_reclaim_turn(bool on)
165{
166 int err;
167
168 if (!on) {
169 err = damon_stop(&ctx, 1);
170 if (!err)
171 kdamond_pid = -1;
172 return err;
173 }
174
175 err = damon_reclaim_apply_parameters();
176 if (err)
177 return err;
178
179 err = damon_start(&ctx, 1, true);
180 if (err)
181 return err;
182 kdamond_pid = ctx->kdamond->pid;
183 return 0;
184}
185
186static struct delayed_work damon_reclaim_timer;
187static void damon_reclaim_timer_fn(struct work_struct *work)
188{
189 static bool last_enabled;
190 bool now_enabled;
191
192 now_enabled = enabled;
193 if (last_enabled != now_enabled) {
194 if (!damon_reclaim_turn(now_enabled))
195 last_enabled = now_enabled;
196 else
197 enabled = last_enabled;
198 }
199}
200static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
201
202static bool damon_reclaim_initialized;
203
204static int damon_reclaim_enabled_store(const char *val,
205 const struct kernel_param *kp)
206{
207 int rc = param_set_bool(val, kp);
208
209 if (rc < 0)
210 return rc;
211
212 /* system_wq might not initialized yet */
213 if (!damon_reclaim_initialized)
214 return rc;
215
216 schedule_delayed_work(&damon_reclaim_timer, 0);
217 return 0;
218}
219
220static const struct kernel_param_ops enabled_param_ops = {
221 .set = damon_reclaim_enabled_store,
222 .get = param_get_bool,
223};
224
225module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
226MODULE_PARM_DESC(enabled,
227 "Enable or disable DAMON_RECLAIM (default: disabled)");
228
229static int damon_reclaim_handle_commit_inputs(void)
230{
231 int err;
232
233 if (!commit_inputs)
234 return 0;
235
236 err = damon_reclaim_apply_parameters();
237 commit_inputs = false;
238 return err;
239}
240
241static int damon_reclaim_after_aggregation(struct damon_ctx *c)
242{
243 struct damos *s;
244
245 /* update the stats parameter */
246 damon_for_each_scheme(s, c)
247 damon_reclaim_stat = s->stat;
248
249 return damon_reclaim_handle_commit_inputs();
250}
251
252static int damon_reclaim_after_wmarks_check(struct damon_ctx *c)
253{
254 return damon_reclaim_handle_commit_inputs();
255}
256
257static int __init damon_reclaim_init(void)
258{
259 ctx = damon_new_ctx();
260 if (!ctx)
261 return -ENOMEM;
262
263 if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
264 damon_destroy_ctx(ctx);
265 return -EINVAL;
266 }
267
268 ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
269 ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
270
271 target = damon_new_target();
272 if (!target) {
273 damon_destroy_ctx(ctx);
274 return -ENOMEM;
275 }
276 damon_add_target(ctx, target);
277
278 schedule_delayed_work(&damon_reclaim_timer, 0);
279
280 damon_reclaim_initialized = true;
281 return 0;
282}
283
284module_init(damon_reclaim_init);