1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON-based page reclamation
5 * Author: SeongJae Park <sj@kernel.org>
8 #define pr_fmt(fmt) "damon-reclaim: " fmt
10 #include <linux/damon.h>
11 #include <linux/ioport.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/workqueue.h>
16 #include "modules-common.h"
18 #ifdef MODULE_PARAM_PREFIX
19 #undef MODULE_PARAM_PREFIX
21 #define MODULE_PARAM_PREFIX "damon_reclaim."
24 * Enable or disable DAMON_RECLAIM.
26 * You can enable DAMON_RCLAIM by setting the value of this parameter as ``Y``.
27 * Setting it as ``N`` disables DAMON_RECLAIM. Note that DAMON_RECLAIM could
28 * do no real monitoring and reclamation due to the watermarks-based activation
29 * condition. Refer to below descriptions for the watermarks parameter for
32 static bool enabled __read_mostly;
35 * Make DAMON_RECLAIM reads the input parameters again, except ``enabled``.
37 * Input parameters that updated while DAMON_RECLAIM is running are not applied
38 * by default. Once this parameter is set as ``Y``, DAMON_RECLAIM reads values
39 * of parametrs except ``enabled`` again. Once the re-reading is done, this
40 * parameter is set as ``N``. If invalid parameters are found while the
41 * re-reading, DAMON_RECLAIM will be disabled.
43 static bool commit_inputs __read_mostly;
44 module_param(commit_inputs, bool, 0600);
47 * Time threshold for cold memory regions identification in microseconds.
49 * If a memory region is not accessed for this or longer time, DAMON_RECLAIM
50 * identifies the region as cold, and reclaims. 120 seconds by default.
52 static unsigned long min_age __read_mostly = 120000000;
53 module_param(min_age, ulong, 0600);
55 static struct damos_quota damon_reclaim_quota = {
56 /* use up to 10 ms time, reclaim up to 128 MiB per 1 sec by default */
58 .sz = 128 * 1024 * 1024,
59 .reset_interval = 1000,
60 /* Within the quota, page out older regions first. */
62 .weight_nr_accesses = 0,
65 DEFINE_DAMON_MODULES_DAMOS_QUOTAS(damon_reclaim_quota);
67 static struct damos_watermarks damon_reclaim_wmarks = {
68 .metric = DAMOS_WMARK_FREE_MEM_RATE,
69 .interval = 5000000, /* 5 seconds */
70 .high = 500, /* 50 percent */
71 .mid = 400, /* 40 percent */
72 .low = 200, /* 20 percent */
74 DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_reclaim_wmarks);
76 static struct damon_attrs damon_reclaim_mon_attrs = {
77 .sample_interval = 5000, /* 5 ms */
78 .aggr_interval = 100000, /* 100 ms */
79 .ops_update_interval = 0,
81 .max_nr_regions = 1000,
83 DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_reclaim_mon_attrs);
86 * Start of the target memory region in physical address.
88 * The start physical address of memory region that DAMON_RECLAIM will do work
89 * against. By default, biggest System RAM is used as the region.
91 static unsigned long monitor_region_start __read_mostly;
92 module_param(monitor_region_start, ulong, 0600);
95 * End of the target memory region in physical address.
97 * The end physical address of memory region that DAMON_RECLAIM will do work
98 * against. By default, biggest System RAM is used as the region.
100 static unsigned long monitor_region_end __read_mostly;
101 module_param(monitor_region_end, ulong, 0600);
104 * PID of the DAMON thread
106 * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
109 static int kdamond_pid __read_mostly = -1;
110 module_param(kdamond_pid, int, 0400);
112 static struct damos_stat damon_reclaim_stat;
113 DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_reclaim_stat,
114 reclaim_tried_regions, reclaimed_regions, quota_exceeds);
116 static struct damon_ctx *ctx;
117 static struct damon_target *target;
119 static struct damos *damon_reclaim_new_scheme(void)
121 struct damos_access_pattern pattern = {
122 /* Find regions having PAGE_SIZE or larger size */
123 .min_sz_region = PAGE_SIZE,
124 .max_sz_region = ULONG_MAX,
125 /* and not accessed at all */
126 .min_nr_accesses = 0,
127 .max_nr_accesses = 0,
128 /* for min_age or more micro-seconds */
129 .min_age_region = min_age /
130 damon_reclaim_mon_attrs.aggr_interval,
131 .max_age_region = UINT_MAX,
134 return damon_new_scheme(
136 /* page out those, as soon as found */
138 /* under the quota. */
139 &damon_reclaim_quota,
140 /* (De)activate this according to the watermarks. */
141 &damon_reclaim_wmarks);
144 static int damon_reclaim_apply_parameters(void)
146 struct damos *scheme;
149 err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
153 /* Will be freed by next 'damon_set_schemes()' below */
154 scheme = damon_reclaim_new_scheme();
157 damon_set_schemes(ctx, &scheme, 1);
159 return damon_set_region_biggest_system_ram_default(target,
160 &monitor_region_start,
161 &monitor_region_end);
164 static int damon_reclaim_turn(bool on)
169 err = damon_stop(&ctx, 1);
175 err = damon_reclaim_apply_parameters();
179 err = damon_start(&ctx, 1, true);
182 kdamond_pid = ctx->kdamond->pid;
186 static struct delayed_work damon_reclaim_timer;
187 static void damon_reclaim_timer_fn(struct work_struct *work)
189 static bool last_enabled;
192 now_enabled = enabled;
193 if (last_enabled != now_enabled) {
194 if (!damon_reclaim_turn(now_enabled))
195 last_enabled = now_enabled;
197 enabled = last_enabled;
200 static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
202 static bool damon_reclaim_initialized;
204 static int damon_reclaim_enabled_store(const char *val,
205 const struct kernel_param *kp)
207 int rc = param_set_bool(val, kp);
212 /* system_wq might not initialized yet */
213 if (!damon_reclaim_initialized)
216 schedule_delayed_work(&damon_reclaim_timer, 0);
220 static const struct kernel_param_ops enabled_param_ops = {
221 .set = damon_reclaim_enabled_store,
222 .get = param_get_bool,
225 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
226 MODULE_PARM_DESC(enabled,
227 "Enable or disable DAMON_RECLAIM (default: disabled)");
229 static int damon_reclaim_handle_commit_inputs(void)
236 err = damon_reclaim_apply_parameters();
237 commit_inputs = false;
241 static int damon_reclaim_after_aggregation(struct damon_ctx *c)
245 /* update the stats parameter */
246 damon_for_each_scheme(s, c)
247 damon_reclaim_stat = s->stat;
249 return damon_reclaim_handle_commit_inputs();
252 static int damon_reclaim_after_wmarks_check(struct damon_ctx *c)
254 return damon_reclaim_handle_commit_inputs();
257 static int __init damon_reclaim_init(void)
259 ctx = damon_new_ctx();
263 if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
264 damon_destroy_ctx(ctx);
268 ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
269 ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
271 target = damon_new_target();
273 damon_destroy_ctx(ctx);
276 damon_add_target(ctx, target);
278 schedule_delayed_work(&damon_reclaim_timer, 0);
280 damon_reclaim_initialized = true;
284 module_init(damon_reclaim_init);