Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Memory merging support.
4 *
5 * This code enables dynamic sharing of identical pages found in different
6 * memory areas, even if they are not shared by fork()
7 *
8 * Copyright (C) 2008-2009 Red Hat, Inc.
9 * Authors:
10 * Izik Eidus
11 * Andrea Arcangeli
12 * Chris Wright
13 * Hugh Dickins
14 */
15
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/mm_inline.h>
19#include <linux/fs.h>
20#include <linux/mman.h>
21#include <linux/sched.h>
22#include <linux/sched/mm.h>
23#include <linux/sched/cputime.h>
24#include <linux/rwsem.h>
25#include <linux/pagemap.h>
26#include <linux/rmap.h>
27#include <linux/spinlock.h>
28#include <linux/xxhash.h>
29#include <linux/delay.h>
30#include <linux/kthread.h>
31#include <linux/wait.h>
32#include <linux/slab.h>
33#include <linux/rbtree.h>
34#include <linux/memory.h>
35#include <linux/mmu_notifier.h>
36#include <linux/swap.h>
37#include <linux/ksm.h>
38#include <linux/hashtable.h>
39#include <linux/freezer.h>
40#include <linux/oom.h>
41#include <linux/numa.h>
42#include <linux/pagewalk.h>
43
44#include <asm/tlbflush.h>
45#include "internal.h"
46#include "mm_slot.h"
47
48#define CREATE_TRACE_POINTS
49#include <trace/events/ksm.h>
50
51#ifdef CONFIG_NUMA
52#define NUMA(x) (x)
53#define DO_NUMA(x) do { (x); } while (0)
54#else
55#define NUMA(x) (0)
56#define DO_NUMA(x) do { } while (0)
57#endif
58
59typedef u8 rmap_age_t;
60
61/**
62 * DOC: Overview
63 *
64 * A few notes about the KSM scanning process,
65 * to make it easier to understand the data structures below:
66 *
67 * In order to reduce excessive scanning, KSM sorts the memory pages by their
68 * contents into a data structure that holds pointers to the pages' locations.
69 *
70 * Since the contents of the pages may change at any moment, KSM cannot just
71 * insert the pages into a normal sorted tree and expect it to find anything.
72 * Therefore KSM uses two data structures - the stable and the unstable tree.
73 *
74 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
75 * by their contents. Because each such page is write-protected, searching on
76 * this tree is fully assured to be working (except when pages are unmapped),
77 * and therefore this tree is called the stable tree.
78 *
79 * The stable tree node includes information required for reverse
80 * mapping from a KSM page to virtual addresses that map this page.
81 *
82 * In order to avoid large latencies of the rmap walks on KSM pages,
83 * KSM maintains two types of nodes in the stable tree:
84 *
85 * * the regular nodes that keep the reverse mapping structures in a
86 * linked list
87 * * the "chains" that link nodes ("dups") that represent the same
88 * write protected memory content, but each "dup" corresponds to a
89 * different KSM page copy of that content
90 *
91 * Internally, the regular nodes, "dups" and "chains" are represented
92 * using the same struct ksm_stable_node structure.
93 *
94 * In addition to the stable tree, KSM uses a second data structure called the
95 * unstable tree: this tree holds pointers to pages which have been found to
96 * be "unchanged for a period of time". The unstable tree sorts these pages
97 * by their contents, but since they are not write-protected, KSM cannot rely
98 * upon the unstable tree to work correctly - the unstable tree is liable to
99 * be corrupted as its contents are modified, and so it is called unstable.
100 *
101 * KSM solves this problem by several techniques:
102 *
103 * 1) The unstable tree is flushed every time KSM completes scanning all
104 * memory areas, and then the tree is rebuilt again from the beginning.
105 * 2) KSM will only insert into the unstable tree, pages whose hash value
106 * has not changed since the previous scan of all memory areas.
107 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
108 * colors of the nodes and not on their contents, assuring that even when
109 * the tree gets "corrupted" it won't get out of balance, so scanning time
110 * remains the same (also, searching and inserting nodes in an rbtree uses
111 * the same algorithm, so we have no overhead when we flush and rebuild).
112 * 4) KSM never flushes the stable tree, which means that even if it were to
113 * take 10 attempts to find a page in the unstable tree, once it is found,
114 * it is secured in the stable tree. (When we scan a new page, we first
115 * compare it against the stable tree, and then against the unstable tree.)
116 *
117 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
118 * stable trees and multiple unstable trees: one of each for each NUMA node.
119 */
120
121/**
122 * struct ksm_mm_slot - ksm information per mm that is being scanned
123 * @slot: hash lookup from mm to mm_slot
124 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
125 */
126struct ksm_mm_slot {
127 struct mm_slot slot;
128 struct ksm_rmap_item *rmap_list;
129};
130
131/**
132 * struct ksm_scan - cursor for scanning
133 * @mm_slot: the current mm_slot we are scanning
134 * @address: the next address inside that to be scanned
135 * @rmap_list: link to the next rmap to be scanned in the rmap_list
136 * @seqnr: count of completed full scans (needed when removing unstable node)
137 *
138 * There is only the one ksm_scan instance of this cursor structure.
139 */
140struct ksm_scan {
141 struct ksm_mm_slot *mm_slot;
142 unsigned long address;
143 struct ksm_rmap_item **rmap_list;
144 unsigned long seqnr;
145};
146
147/**
148 * struct ksm_stable_node - node of the stable rbtree
149 * @node: rb node of this ksm page in the stable tree
150 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
151 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
152 * @list: linked into migrate_nodes, pending placement in the proper node tree
153 * @hlist: hlist head of rmap_items using this ksm page
154 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
155 * @chain_prune_time: time of the last full garbage collection
156 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
157 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
158 */
159struct ksm_stable_node {
160 union {
161 struct rb_node node; /* when node of stable tree */
162 struct { /* when listed for migration */
163 struct list_head *head;
164 struct {
165 struct hlist_node hlist_dup;
166 struct list_head list;
167 };
168 };
169 };
170 struct hlist_head hlist;
171 union {
172 unsigned long kpfn;
173 unsigned long chain_prune_time;
174 };
175 /*
176 * STABLE_NODE_CHAIN can be any negative number in
177 * rmap_hlist_len negative range, but better not -1 to be able
178 * to reliably detect underflows.
179 */
180#define STABLE_NODE_CHAIN -1024
181 int rmap_hlist_len;
182#ifdef CONFIG_NUMA
183 int nid;
184#endif
185};
186
187/**
188 * struct ksm_rmap_item - reverse mapping item for virtual addresses
189 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
190 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
191 * @nid: NUMA node id of unstable tree in which linked (may not match page)
192 * @mm: the memory structure this rmap_item is pointing into
193 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
194 * @oldchecksum: previous checksum of the page at that virtual address
195 * @node: rb node of this rmap_item in the unstable tree
196 * @head: pointer to stable_node heading this list in the stable tree
197 * @hlist: link into hlist of rmap_items hanging off that stable_node
198 * @age: number of scan iterations since creation
199 * @remaining_skips: how many scans to skip
200 */
201struct ksm_rmap_item {
202 struct ksm_rmap_item *rmap_list;
203 union {
204 struct anon_vma *anon_vma; /* when stable */
205#ifdef CONFIG_NUMA
206 int nid; /* when node of unstable tree */
207#endif
208 };
209 struct mm_struct *mm;
210 unsigned long address; /* + low bits used for flags below */
211 unsigned int oldchecksum; /* when unstable */
212 rmap_age_t age;
213 rmap_age_t remaining_skips;
214 union {
215 struct rb_node node; /* when node of unstable tree */
216 struct { /* when listed from stable tree */
217 struct ksm_stable_node *head;
218 struct hlist_node hlist;
219 };
220 };
221};
222
223#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
224#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
225#define STABLE_FLAG 0x200 /* is listed from the stable tree */
226
227/* The stable and unstable tree heads */
228static struct rb_root one_stable_tree[1] = { RB_ROOT };
229static struct rb_root one_unstable_tree[1] = { RB_ROOT };
230static struct rb_root *root_stable_tree = one_stable_tree;
231static struct rb_root *root_unstable_tree = one_unstable_tree;
232
233/* Recently migrated nodes of stable tree, pending proper placement */
234static LIST_HEAD(migrate_nodes);
235#define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
236
237#define MM_SLOTS_HASH_BITS 10
238static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
239
240static struct ksm_mm_slot ksm_mm_head = {
241 .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node),
242};
243static struct ksm_scan ksm_scan = {
244 .mm_slot = &ksm_mm_head,
245};
246
247static struct kmem_cache *rmap_item_cache;
248static struct kmem_cache *stable_node_cache;
249static struct kmem_cache *mm_slot_cache;
250
251/* Default number of pages to scan per batch */
252#define DEFAULT_PAGES_TO_SCAN 100
253
254/* The number of pages scanned */
255static unsigned long ksm_pages_scanned;
256
257/* The number of nodes in the stable tree */
258static unsigned long ksm_pages_shared;
259
260/* The number of page slots additionally sharing those nodes */
261static unsigned long ksm_pages_sharing;
262
263/* The number of nodes in the unstable tree */
264static unsigned long ksm_pages_unshared;
265
266/* The number of rmap_items in use: to calculate pages_volatile */
267static unsigned long ksm_rmap_items;
268
269/* The number of stable_node chains */
270static unsigned long ksm_stable_node_chains;
271
272/* The number of stable_node dups linked to the stable_node chains */
273static unsigned long ksm_stable_node_dups;
274
275/* Delay in pruning stale stable_node_dups in the stable_node_chains */
276static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
277
278/* Maximum number of page slots sharing a stable node */
279static int ksm_max_page_sharing = 256;
280
281/* Number of pages ksmd should scan in one batch */
282static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
283
284/* Milliseconds ksmd should sleep between batches */
285static unsigned int ksm_thread_sleep_millisecs = 20;
286
287/* Checksum of an empty (zeroed) page */
288static unsigned int zero_checksum __read_mostly;
289
290/* Whether to merge empty (zeroed) pages with actual zero pages */
291static bool ksm_use_zero_pages __read_mostly;
292
293/* Skip pages that couldn't be de-duplicated previously */
294/* Default to true at least temporarily, for testing */
295static bool ksm_smart_scan = true;
296
297/* The number of zero pages which is placed by KSM */
298atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
299
300/* The number of pages that have been skipped due to "smart scanning" */
301static unsigned long ksm_pages_skipped;
302
303/* Don't scan more than max pages per batch. */
304static unsigned long ksm_advisor_max_pages_to_scan = 30000;
305
306/* Min CPU for scanning pages per scan */
307#define KSM_ADVISOR_MIN_CPU 10
308
309/* Max CPU for scanning pages per scan */
310static unsigned int ksm_advisor_max_cpu = 70;
311
312/* Target scan time in seconds to analyze all KSM candidate pages. */
313static unsigned long ksm_advisor_target_scan_time = 200;
314
315/* Exponentially weighted moving average. */
316#define EWMA_WEIGHT 30
317
318/**
319 * struct advisor_ctx - metadata for KSM advisor
320 * @start_scan: start time of the current scan
321 * @scan_time: scan time of previous scan
322 * @change: change in percent to pages_to_scan parameter
323 * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
324 */
325struct advisor_ctx {
326 ktime_t start_scan;
327 unsigned long scan_time;
328 unsigned long change;
329 unsigned long long cpu_time;
330};
331static struct advisor_ctx advisor_ctx;
332
333/* Define different advisor's */
334enum ksm_advisor_type {
335 KSM_ADVISOR_NONE,
336 KSM_ADVISOR_SCAN_TIME,
337};
338static enum ksm_advisor_type ksm_advisor;
339
340#ifdef CONFIG_SYSFS
341/*
342 * Only called through the sysfs control interface:
343 */
344
345/* At least scan this many pages per batch. */
346static unsigned long ksm_advisor_min_pages_to_scan = 500;
347
348static void set_advisor_defaults(void)
349{
350 if (ksm_advisor == KSM_ADVISOR_NONE) {
351 ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
352 } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
353 advisor_ctx = (const struct advisor_ctx){ 0 };
354 ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
355 }
356}
357#endif /* CONFIG_SYSFS */
358
359static inline void advisor_start_scan(void)
360{
361 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
362 advisor_ctx.start_scan = ktime_get();
363}
364
365/*
366 * Use previous scan time if available, otherwise use current scan time as an
367 * approximation for the previous scan time.
368 */
369static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
370 unsigned long scan_time)
371{
372 return ctx->scan_time ? ctx->scan_time : scan_time;
373}
374
375/* Calculate exponential weighted moving average */
376static unsigned long ewma(unsigned long prev, unsigned long curr)
377{
378 return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
379}
380
381/*
382 * The scan time advisor is based on the current scan rate and the target
383 * scan rate.
384 *
385 * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
386 *
387 * To avoid perturbations it calculates a change factor of previous changes.
388 * A new change factor is calculated for each iteration and it uses an
389 * exponentially weighted moving average. The new pages_to_scan value is
390 * multiplied with that change factor:
391 *
392 * new_pages_to_scan *= change facor
393 *
394 * The new_pages_to_scan value is limited by the cpu min and max values. It
395 * calculates the cpu percent for the last scan and calculates the new
396 * estimated cpu percent cost for the next scan. That value is capped by the
397 * cpu min and max setting.
398 *
399 * In addition the new pages_to_scan value is capped by the max and min
400 * limits.
401 */
402static void scan_time_advisor(void)
403{
404 unsigned int cpu_percent;
405 unsigned long cpu_time;
406 unsigned long cpu_time_diff;
407 unsigned long cpu_time_diff_ms;
408 unsigned long pages;
409 unsigned long per_page_cost;
410 unsigned long factor;
411 unsigned long change;
412 unsigned long last_scan_time;
413 unsigned long scan_time;
414
415 /* Convert scan time to seconds */
416 scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
417 MSEC_PER_SEC);
418 scan_time = scan_time ? scan_time : 1;
419
420 /* Calculate CPU consumption of ksmd background thread */
421 cpu_time = task_sched_runtime(current);
422 cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
423 cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
424
425 cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
426 cpu_percent = cpu_percent ? cpu_percent : 1;
427 last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
428
429 /* Calculate scan time as percentage of target scan time */
430 factor = ksm_advisor_target_scan_time * 100 / scan_time;
431 factor = factor ? factor : 1;
432
433 /*
434 * Calculate scan time as percentage of last scan time and use
435 * exponentially weighted average to smooth it
436 */
437 change = scan_time * 100 / last_scan_time;
438 change = change ? change : 1;
439 change = ewma(advisor_ctx.change, change);
440
441 /* Calculate new scan rate based on target scan rate. */
442 pages = ksm_thread_pages_to_scan * 100 / factor;
443 /* Update pages_to_scan by weighted change percentage. */
444 pages = pages * change / 100;
445
446 /* Cap new pages_to_scan value */
447 per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
448 per_page_cost = per_page_cost ? per_page_cost : 1;
449
450 pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
451 pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
452 pages = min(pages, ksm_advisor_max_pages_to_scan);
453
454 /* Update advisor context */
455 advisor_ctx.change = change;
456 advisor_ctx.scan_time = scan_time;
457 advisor_ctx.cpu_time = cpu_time;
458
459 ksm_thread_pages_to_scan = pages;
460 trace_ksm_advisor(scan_time, pages, cpu_percent);
461}
462
463static void advisor_stop_scan(void)
464{
465 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
466 scan_time_advisor();
467}
468
469#ifdef CONFIG_NUMA
470/* Zeroed when merging across nodes is not allowed */
471static unsigned int ksm_merge_across_nodes = 1;
472static int ksm_nr_node_ids = 1;
473#else
474#define ksm_merge_across_nodes 1U
475#define ksm_nr_node_ids 1
476#endif
477
478#define KSM_RUN_STOP 0
479#define KSM_RUN_MERGE 1
480#define KSM_RUN_UNMERGE 2
481#define KSM_RUN_OFFLINE 4
482static unsigned long ksm_run = KSM_RUN_STOP;
483static void wait_while_offlining(void);
484
485static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
486static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
487static DEFINE_MUTEX(ksm_thread_mutex);
488static DEFINE_SPINLOCK(ksm_mmlist_lock);
489
490static int __init ksm_slab_init(void)
491{
492 rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0);
493 if (!rmap_item_cache)
494 goto out;
495
496 stable_node_cache = KMEM_CACHE(ksm_stable_node, 0);
497 if (!stable_node_cache)
498 goto out_free1;
499
500 mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0);
501 if (!mm_slot_cache)
502 goto out_free2;
503
504 return 0;
505
506out_free2:
507 kmem_cache_destroy(stable_node_cache);
508out_free1:
509 kmem_cache_destroy(rmap_item_cache);
510out:
511 return -ENOMEM;
512}
513
514static void __init ksm_slab_free(void)
515{
516 kmem_cache_destroy(mm_slot_cache);
517 kmem_cache_destroy(stable_node_cache);
518 kmem_cache_destroy(rmap_item_cache);
519 mm_slot_cache = NULL;
520}
521
522static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain)
523{
524 return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
525}
526
527static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup)
528{
529 return dup->head == STABLE_NODE_DUP_HEAD;
530}
531
532static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup,
533 struct ksm_stable_node *chain)
534{
535 VM_BUG_ON(is_stable_node_dup(dup));
536 dup->head = STABLE_NODE_DUP_HEAD;
537 VM_BUG_ON(!is_stable_node_chain(chain));
538 hlist_add_head(&dup->hlist_dup, &chain->hlist);
539 ksm_stable_node_dups++;
540}
541
542static inline void __stable_node_dup_del(struct ksm_stable_node *dup)
543{
544 VM_BUG_ON(!is_stable_node_dup(dup));
545 hlist_del(&dup->hlist_dup);
546 ksm_stable_node_dups--;
547}
548
549static inline void stable_node_dup_del(struct ksm_stable_node *dup)
550{
551 VM_BUG_ON(is_stable_node_chain(dup));
552 if (is_stable_node_dup(dup))
553 __stable_node_dup_del(dup);
554 else
555 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
556#ifdef CONFIG_DEBUG_VM
557 dup->head = NULL;
558#endif
559}
560
561static inline struct ksm_rmap_item *alloc_rmap_item(void)
562{
563 struct ksm_rmap_item *rmap_item;
564
565 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
566 __GFP_NORETRY | __GFP_NOWARN);
567 if (rmap_item)
568 ksm_rmap_items++;
569 return rmap_item;
570}
571
572static inline void free_rmap_item(struct ksm_rmap_item *rmap_item)
573{
574 ksm_rmap_items--;
575 rmap_item->mm->ksm_rmap_items--;
576 rmap_item->mm = NULL; /* debug safety */
577 kmem_cache_free(rmap_item_cache, rmap_item);
578}
579
580static inline struct ksm_stable_node *alloc_stable_node(void)
581{
582 /*
583 * The allocation can take too long with GFP_KERNEL when memory is under
584 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
585 * grants access to memory reserves, helping to avoid this problem.
586 */
587 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
588}
589
590static inline void free_stable_node(struct ksm_stable_node *stable_node)
591{
592 VM_BUG_ON(stable_node->rmap_hlist_len &&
593 !is_stable_node_chain(stable_node));
594 kmem_cache_free(stable_node_cache, stable_node);
595}
596
597/*
598 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
599 * page tables after it has passed through ksm_exit() - which, if necessary,
600 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
601 * a special flag: they can just back out as soon as mm_users goes to zero.
602 * ksm_test_exit() is used throughout to make this test for exit: in some
603 * places for correctness, in some places just to avoid unnecessary work.
604 */
605static inline bool ksm_test_exit(struct mm_struct *mm)
606{
607 return atomic_read(&mm->mm_users) == 0;
608}
609
610/*
611 * We use break_ksm to break COW on a ksm page by triggering unsharing,
612 * such that the ksm page will get replaced by an exclusive anonymous page.
613 *
614 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
615 * in case the application has unmapped and remapped mm,addr meanwhile.
616 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
617 * mmap of /dev/mem, where we would not want to touch it.
618 *
619 * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context
620 * of the process that owns 'vma'. We also do not want to enforce
621 * protection keys here anyway.
622 */
623static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
624{
625 vm_fault_t ret = 0;
626
627 if (lock_vma)
628 vma_start_write(vma);
629
630 do {
631 bool ksm_page = false;
632 struct folio_walk fw;
633 struct folio *folio;
634
635 cond_resched();
636 folio = folio_walk_start(&fw, vma, addr,
637 FW_MIGRATION | FW_ZEROPAGE);
638 if (folio) {
639 /* Small folio implies FW_LEVEL_PTE. */
640 if (!folio_test_large(folio) &&
641 (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
642 ksm_page = true;
643 folio_walk_end(&fw, vma);
644 }
645
646 if (!ksm_page)
647 return 0;
648 ret = handle_mm_fault(vma, addr,
649 FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
650 NULL);
651 } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
652 /*
653 * We must loop until we no longer find a KSM page because
654 * handle_mm_fault() may back out if there's any difficulty e.g. if
655 * pte accessed bit gets updated concurrently.
656 *
657 * VM_FAULT_SIGBUS could occur if we race with truncation of the
658 * backing file, which also invalidates anonymous pages: that's
659 * okay, that truncation will have unmapped the KSM page for us.
660 *
661 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
662 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
663 * current task has TIF_MEMDIE set, and will be OOM killed on return
664 * to user; and ksmd, having no mm, would never be chosen for that.
665 *
666 * But if the mm is in a limited mem_cgroup, then the fault may fail
667 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
668 * even ksmd can fail in this way - though it's usually breaking ksm
669 * just to undo a merge it made a moment before, so unlikely to oom.
670 *
671 * That's a pity: we might therefore have more kernel pages allocated
672 * than we're counting as nodes in the stable tree; but ksm_do_scan
673 * will retry to break_cow on each pass, so should recover the page
674 * in due course. The important thing is to not let VM_MERGEABLE
675 * be cleared while any such pages might remain in the area.
676 */
677 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
678}
679
680static bool ksm_compatible(const struct file *file, vm_flags_t vm_flags)
681{
682 if (vm_flags & (VM_SHARED | VM_MAYSHARE | VM_SPECIAL |
683 VM_HUGETLB | VM_DROPPABLE))
684 return false; /* just ignore the advice */
685
686 if (file_is_dax(file))
687 return false;
688
689#ifdef VM_SAO
690 if (vm_flags & VM_SAO)
691 return false;
692#endif
693#ifdef VM_SPARC_ADI
694 if (vm_flags & VM_SPARC_ADI)
695 return false;
696#endif
697
698 return true;
699}
700
701static bool vma_ksm_compatible(struct vm_area_struct *vma)
702{
703 return ksm_compatible(vma->vm_file, vma->vm_flags);
704}
705
706static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
707 unsigned long addr)
708{
709 struct vm_area_struct *vma;
710 if (ksm_test_exit(mm))
711 return NULL;
712 vma = vma_lookup(mm, addr);
713 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
714 return NULL;
715 return vma;
716}
717
718static void break_cow(struct ksm_rmap_item *rmap_item)
719{
720 struct mm_struct *mm = rmap_item->mm;
721 unsigned long addr = rmap_item->address;
722 struct vm_area_struct *vma;
723
724 /*
725 * It is not an accident that whenever we want to break COW
726 * to undo, we also need to drop a reference to the anon_vma.
727 */
728 put_anon_vma(rmap_item->anon_vma);
729
730 mmap_read_lock(mm);
731 vma = find_mergeable_vma(mm, addr);
732 if (vma)
733 break_ksm(vma, addr, false);
734 mmap_read_unlock(mm);
735}
736
737static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
738{
739 struct mm_struct *mm = rmap_item->mm;
740 unsigned long addr = rmap_item->address;
741 struct vm_area_struct *vma;
742 struct page *page = NULL;
743 struct folio_walk fw;
744 struct folio *folio;
745
746 mmap_read_lock(mm);
747 vma = find_mergeable_vma(mm, addr);
748 if (!vma)
749 goto out;
750
751 folio = folio_walk_start(&fw, vma, addr, 0);
752 if (folio) {
753 if (!folio_is_zone_device(folio) &&
754 folio_test_anon(folio)) {
755 folio_get(folio);
756 page = fw.page;
757 }
758 folio_walk_end(&fw, vma);
759 }
760out:
761 if (page) {
762 flush_anon_page(vma, page, addr);
763 flush_dcache_page(page);
764 }
765 mmap_read_unlock(mm);
766 return page;
767}
768
769/*
770 * This helper is used for getting right index into array of tree roots.
771 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
772 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
773 * every node has its own stable and unstable tree.
774 */
775static inline int get_kpfn_nid(unsigned long kpfn)
776{
777 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
778}
779
780static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup,
781 struct rb_root *root)
782{
783 struct ksm_stable_node *chain = alloc_stable_node();
784 VM_BUG_ON(is_stable_node_chain(dup));
785 if (likely(chain)) {
786 INIT_HLIST_HEAD(&chain->hlist);
787 chain->chain_prune_time = jiffies;
788 chain->rmap_hlist_len = STABLE_NODE_CHAIN;
789#if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
790 chain->nid = NUMA_NO_NODE; /* debug */
791#endif
792 ksm_stable_node_chains++;
793
794 /*
795 * Put the stable node chain in the first dimension of
796 * the stable tree and at the same time remove the old
797 * stable node.
798 */
799 rb_replace_node(&dup->node, &chain->node, root);
800
801 /*
802 * Move the old stable node to the second dimension
803 * queued in the hlist_dup. The invariant is that all
804 * dup stable_nodes in the chain->hlist point to pages
805 * that are write protected and have the exact same
806 * content.
807 */
808 stable_node_chain_add_dup(dup, chain);
809 }
810 return chain;
811}
812
813static inline void free_stable_node_chain(struct ksm_stable_node *chain,
814 struct rb_root *root)
815{
816 rb_erase(&chain->node, root);
817 free_stable_node(chain);
818 ksm_stable_node_chains--;
819}
820
821static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
822{
823 struct ksm_rmap_item *rmap_item;
824
825 /* check it's not STABLE_NODE_CHAIN or negative */
826 BUG_ON(stable_node->rmap_hlist_len < 0);
827
828 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
829 if (rmap_item->hlist.next) {
830 ksm_pages_sharing--;
831 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
832 } else {
833 ksm_pages_shared--;
834 }
835
836 rmap_item->mm->ksm_merging_pages--;
837
838 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
839 stable_node->rmap_hlist_len--;
840 put_anon_vma(rmap_item->anon_vma);
841 rmap_item->address &= PAGE_MASK;
842 cond_resched();
843 }
844
845 /*
846 * We need the second aligned pointer of the migrate_nodes
847 * list_head to stay clear from the rb_parent_color union
848 * (aligned and different than any node) and also different
849 * from &migrate_nodes. This will verify that future list.h changes
850 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
851 */
852 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
853 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
854
855 trace_ksm_remove_ksm_page(stable_node->kpfn);
856 if (stable_node->head == &migrate_nodes)
857 list_del(&stable_node->list);
858 else
859 stable_node_dup_del(stable_node);
860 free_stable_node(stable_node);
861}
862
863enum ksm_get_folio_flags {
864 KSM_GET_FOLIO_NOLOCK,
865 KSM_GET_FOLIO_LOCK,
866 KSM_GET_FOLIO_TRYLOCK
867};
868
869/*
870 * ksm_get_folio: checks if the page indicated by the stable node
871 * is still its ksm page, despite having held no reference to it.
872 * In which case we can trust the content of the page, and it
873 * returns the gotten page; but if the page has now been zapped,
874 * remove the stale node from the stable tree and return NULL.
875 * But beware, the stable node's page might be being migrated.
876 *
877 * You would expect the stable_node to hold a reference to the ksm page.
878 * But if it increments the page's count, swapping out has to wait for
879 * ksmd to come around again before it can free the page, which may take
880 * seconds or even minutes: much too unresponsive. So instead we use a
881 * "keyhole reference": access to the ksm page from the stable node peeps
882 * out through its keyhole to see if that page still holds the right key,
883 * pointing back to this stable node. This relies on freeing a PageAnon
884 * page to reset its page->mapping to NULL, and relies on no other use of
885 * a page to put something that might look like our key in page->mapping.
886 * is on its way to being freed; but it is an anomaly to bear in mind.
887 */
888static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
889 enum ksm_get_folio_flags flags)
890{
891 struct folio *folio;
892 void *expected_mapping;
893 unsigned long kpfn;
894
895 expected_mapping = (void *)((unsigned long)stable_node |
896 FOLIO_MAPPING_KSM);
897again:
898 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
899 folio = pfn_folio(kpfn);
900 if (READ_ONCE(folio->mapping) != expected_mapping)
901 goto stale;
902
903 /*
904 * We cannot do anything with the page while its refcount is 0.
905 * Usually 0 means free, or tail of a higher-order page: in which
906 * case this node is no longer referenced, and should be freed;
907 * however, it might mean that the page is under page_ref_freeze().
908 * The __remove_mapping() case is easy, again the node is now stale;
909 * the same is in reuse_ksm_page() case; but if page is swapcache
910 * in folio_migrate_mapping(), it might still be our page,
911 * in which case it's essential to keep the node.
912 */
913 while (!folio_try_get(folio)) {
914 /*
915 * Another check for folio->mapping != expected_mapping
916 * would work here too. We have chosen to test the
917 * swapcache flag to optimize the common case, when the
918 * folio is or is about to be freed: the swapcache flag
919 * is cleared (under spin_lock_irq) in the ref_freeze
920 * section of __remove_mapping(); but anon folio->mapping
921 * is reset to NULL later, in free_pages_prepare().
922 */
923 if (!folio_test_swapcache(folio))
924 goto stale;
925 cpu_relax();
926 }
927
928 if (READ_ONCE(folio->mapping) != expected_mapping) {
929 folio_put(folio);
930 goto stale;
931 }
932
933 if (flags == KSM_GET_FOLIO_TRYLOCK) {
934 if (!folio_trylock(folio)) {
935 folio_put(folio);
936 return ERR_PTR(-EBUSY);
937 }
938 } else if (flags == KSM_GET_FOLIO_LOCK)
939 folio_lock(folio);
940
941 if (flags != KSM_GET_FOLIO_NOLOCK) {
942 if (READ_ONCE(folio->mapping) != expected_mapping) {
943 folio_unlock(folio);
944 folio_put(folio);
945 goto stale;
946 }
947 }
948 return folio;
949
950stale:
951 /*
952 * We come here from above when folio->mapping or the swapcache flag
953 * suggests that the node is stale; but it might be under migration.
954 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
955 * before checking whether node->kpfn has been changed.
956 */
957 smp_rmb();
958 if (READ_ONCE(stable_node->kpfn) != kpfn)
959 goto again;
960 remove_node_from_stable_tree(stable_node);
961 return NULL;
962}
963
964/*
965 * Removing rmap_item from stable or unstable tree.
966 * This function will clean the information from the stable/unstable tree.
967 */
968static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
969{
970 if (rmap_item->address & STABLE_FLAG) {
971 struct ksm_stable_node *stable_node;
972 struct folio *folio;
973
974 stable_node = rmap_item->head;
975 folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
976 if (!folio)
977 goto out;
978
979 hlist_del(&rmap_item->hlist);
980 folio_unlock(folio);
981 folio_put(folio);
982
983 if (!hlist_empty(&stable_node->hlist))
984 ksm_pages_sharing--;
985 else
986 ksm_pages_shared--;
987
988 rmap_item->mm->ksm_merging_pages--;
989
990 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
991 stable_node->rmap_hlist_len--;
992
993 put_anon_vma(rmap_item->anon_vma);
994 rmap_item->head = NULL;
995 rmap_item->address &= PAGE_MASK;
996
997 } else if (rmap_item->address & UNSTABLE_FLAG) {
998 unsigned char age;
999 /*
1000 * Usually ksmd can and must skip the rb_erase, because
1001 * root_unstable_tree was already reset to RB_ROOT.
1002 * But be careful when an mm is exiting: do the rb_erase
1003 * if this rmap_item was inserted by this scan, rather
1004 * than left over from before.
1005 */
1006 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
1007 BUG_ON(age > 1);
1008 if (!age)
1009 rb_erase(&rmap_item->node,
1010 root_unstable_tree + NUMA(rmap_item->nid));
1011 ksm_pages_unshared--;
1012 rmap_item->address &= PAGE_MASK;
1013 }
1014out:
1015 cond_resched(); /* we're called from many long loops */
1016}
1017
1018static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
1019{
1020 while (*rmap_list) {
1021 struct ksm_rmap_item *rmap_item = *rmap_list;
1022 *rmap_list = rmap_item->rmap_list;
1023 remove_rmap_item_from_tree(rmap_item);
1024 free_rmap_item(rmap_item);
1025 }
1026}
1027
1028/*
1029 * Though it's very tempting to unmerge rmap_items from stable tree rather
1030 * than check every pte of a given vma, the locking doesn't quite work for
1031 * that - an rmap_item is assigned to the stable tree after inserting ksm
1032 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
1033 * rmap_items from parent to child at fork time (so as not to waste time
1034 * if exit comes before the next scan reaches it).
1035 *
1036 * Similarly, although we'd like to remove rmap_items (so updating counts
1037 * and freeing memory) when unmerging an area, it's easier to leave that
1038 * to the next pass of ksmd - consider, for example, how ksmd might be
1039 * in cmp_and_merge_page on one of the rmap_items we would be removing.
1040 */
1041static int unmerge_ksm_pages(struct vm_area_struct *vma,
1042 unsigned long start, unsigned long end, bool lock_vma)
1043{
1044 unsigned long addr;
1045 int err = 0;
1046
1047 for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
1048 if (ksm_test_exit(vma->vm_mm))
1049 break;
1050 if (signal_pending(current))
1051 err = -ERESTARTSYS;
1052 else
1053 err = break_ksm(vma, addr, lock_vma);
1054 }
1055 return err;
1056}
1057
1058static inline
1059struct ksm_stable_node *folio_stable_node(const struct folio *folio)
1060{
1061 return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
1062}
1063
1064static inline struct ksm_stable_node *page_stable_node(struct page *page)
1065{
1066 return folio_stable_node(page_folio(page));
1067}
1068
1069static inline void folio_set_stable_node(struct folio *folio,
1070 struct ksm_stable_node *stable_node)
1071{
1072 VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio);
1073 folio->mapping = (void *)((unsigned long)stable_node | FOLIO_MAPPING_KSM);
1074}
1075
1076#ifdef CONFIG_SYSFS
1077/*
1078 * Only called through the sysfs control interface:
1079 */
1080static int remove_stable_node(struct ksm_stable_node *stable_node)
1081{
1082 struct folio *folio;
1083 int err;
1084
1085 folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
1086 if (!folio) {
1087 /*
1088 * ksm_get_folio did remove_node_from_stable_tree itself.
1089 */
1090 return 0;
1091 }
1092
1093 /*
1094 * Page could be still mapped if this races with __mmput() running in
1095 * between ksm_exit() and exit_mmap(). Just refuse to let
1096 * merge_across_nodes/max_page_sharing be switched.
1097 */
1098 err = -EBUSY;
1099 if (!folio_mapped(folio)) {
1100 /*
1101 * The stable node did not yet appear stale to ksm_get_folio(),
1102 * since that allows for an unmapped ksm folio to be recognized
1103 * right up until it is freed; but the node is safe to remove.
1104 * This folio might be in an LRU cache waiting to be freed,
1105 * or it might be in the swapcache (perhaps under writeback),
1106 * or it might have been removed from swapcache a moment ago.
1107 */
1108 folio_set_stable_node(folio, NULL);
1109 remove_node_from_stable_tree(stable_node);
1110 err = 0;
1111 }
1112
1113 folio_unlock(folio);
1114 folio_put(folio);
1115 return err;
1116}
1117
1118static int remove_stable_node_chain(struct ksm_stable_node *stable_node,
1119 struct rb_root *root)
1120{
1121 struct ksm_stable_node *dup;
1122 struct hlist_node *hlist_safe;
1123
1124 if (!is_stable_node_chain(stable_node)) {
1125 VM_BUG_ON(is_stable_node_dup(stable_node));
1126 if (remove_stable_node(stable_node))
1127 return true;
1128 else
1129 return false;
1130 }
1131
1132 hlist_for_each_entry_safe(dup, hlist_safe,
1133 &stable_node->hlist, hlist_dup) {
1134 VM_BUG_ON(!is_stable_node_dup(dup));
1135 if (remove_stable_node(dup))
1136 return true;
1137 }
1138 BUG_ON(!hlist_empty(&stable_node->hlist));
1139 free_stable_node_chain(stable_node, root);
1140 return false;
1141}
1142
1143static int remove_all_stable_nodes(void)
1144{
1145 struct ksm_stable_node *stable_node, *next;
1146 int nid;
1147 int err = 0;
1148
1149 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
1150 while (root_stable_tree[nid].rb_node) {
1151 stable_node = rb_entry(root_stable_tree[nid].rb_node,
1152 struct ksm_stable_node, node);
1153 if (remove_stable_node_chain(stable_node,
1154 root_stable_tree + nid)) {
1155 err = -EBUSY;
1156 break; /* proceed to next nid */
1157 }
1158 cond_resched();
1159 }
1160 }
1161 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
1162 if (remove_stable_node(stable_node))
1163 err = -EBUSY;
1164 cond_resched();
1165 }
1166 return err;
1167}
1168
1169static int unmerge_and_remove_all_rmap_items(void)
1170{
1171 struct ksm_mm_slot *mm_slot;
1172 struct mm_slot *slot;
1173 struct mm_struct *mm;
1174 struct vm_area_struct *vma;
1175 int err = 0;
1176
1177 spin_lock(&ksm_mmlist_lock);
1178 slot = list_entry(ksm_mm_head.slot.mm_node.next,
1179 struct mm_slot, mm_node);
1180 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
1181 spin_unlock(&ksm_mmlist_lock);
1182
1183 for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head;
1184 mm_slot = ksm_scan.mm_slot) {
1185 VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
1186
1187 mm = mm_slot->slot.mm;
1188 mmap_read_lock(mm);
1189
1190 /*
1191 * Exit right away if mm is exiting to avoid lockdep issue in
1192 * the maple tree
1193 */
1194 if (ksm_test_exit(mm))
1195 goto mm_exiting;
1196
1197 for_each_vma(vmi, vma) {
1198 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
1199 continue;
1200 err = unmerge_ksm_pages(vma,
1201 vma->vm_start, vma->vm_end, false);
1202 if (err)
1203 goto error;
1204 }
1205
1206mm_exiting:
1207 remove_trailing_rmap_items(&mm_slot->rmap_list);
1208 mmap_read_unlock(mm);
1209
1210 spin_lock(&ksm_mmlist_lock);
1211 slot = list_entry(mm_slot->slot.mm_node.next,
1212 struct mm_slot, mm_node);
1213 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
1214 if (ksm_test_exit(mm)) {
1215 hash_del(&mm_slot->slot.hash);
1216 list_del(&mm_slot->slot.mm_node);
1217 spin_unlock(&ksm_mmlist_lock);
1218
1219 mm_slot_free(mm_slot_cache, mm_slot);
1220 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1221 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
1222 mmdrop(mm);
1223 } else
1224 spin_unlock(&ksm_mmlist_lock);
1225 }
1226
1227 /* Clean up stable nodes, but don't worry if some are still busy */
1228 remove_all_stable_nodes();
1229 ksm_scan.seqnr = 0;
1230 return 0;
1231
1232error:
1233 mmap_read_unlock(mm);
1234 spin_lock(&ksm_mmlist_lock);
1235 ksm_scan.mm_slot = &ksm_mm_head;
1236 spin_unlock(&ksm_mmlist_lock);
1237 return err;
1238}
1239#endif /* CONFIG_SYSFS */
1240
1241static u32 calc_checksum(struct page *page)
1242{
1243 u32 checksum;
1244 void *addr = kmap_local_page(page);
1245 checksum = xxhash(addr, PAGE_SIZE, 0);
1246 kunmap_local(addr);
1247 return checksum;
1248}
1249
1250static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
1251 pte_t *orig_pte)
1252{
1253 struct mm_struct *mm = vma->vm_mm;
1254 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0);
1255 int swapped;
1256 int err = -EFAULT;
1257 struct mmu_notifier_range range;
1258 bool anon_exclusive;
1259 pte_t entry;
1260
1261 if (WARN_ON_ONCE(folio_test_large(folio)))
1262 return err;
1263
1264 pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma);
1265 if (pvmw.address == -EFAULT)
1266 goto out;
1267
1268 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
1269 pvmw.address + PAGE_SIZE);
1270 mmu_notifier_invalidate_range_start(&range);
1271
1272 if (!page_vma_mapped_walk(&pvmw))
1273 goto out_mn;
1274 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1275 goto out_unlock;
1276
1277 entry = ptep_get(pvmw.pte);
1278 /*
1279 * Handle PFN swap PTEs, such as device-exclusive ones, that actually
1280 * map pages: give up just like the next folio_walk would.
1281 */
1282 if (unlikely(!pte_present(entry)))
1283 goto out_unlock;
1284
1285 anon_exclusive = PageAnonExclusive(&folio->page);
1286 if (pte_write(entry) || pte_dirty(entry) ||
1287 anon_exclusive || mm_tlb_flush_pending(mm)) {
1288 swapped = folio_test_swapcache(folio);
1289 flush_cache_page(vma, pvmw.address, folio_pfn(folio));
1290 /*
1291 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1292 * take any lock, therefore the check that we are going to make
1293 * with the pagecount against the mapcount is racy and
1294 * O_DIRECT can happen right after the check.
1295 * So we clear the pte and flush the tlb before the check
1296 * this assure us that no O_DIRECT can happen after the check
1297 * or in the middle of the check.
1298 *
1299 * No need to notify as we are downgrading page table to read
1300 * only not changing it to point to a new page.
1301 *
1302 * See Documentation/mm/mmu_notifier.rst
1303 */
1304 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1305 /*
1306 * Check that no O_DIRECT or similar I/O is in progress on the
1307 * page
1308 */
1309 if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) {
1310 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1311 goto out_unlock;
1312 }
1313
1314 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
1315 if (anon_exclusive &&
1316 folio_try_share_anon_rmap_pte(folio, &folio->page)) {
1317 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1318 goto out_unlock;
1319 }
1320
1321 if (pte_dirty(entry))
1322 folio_mark_dirty(folio);
1323 entry = pte_mkclean(entry);
1324
1325 if (pte_write(entry))
1326 entry = pte_wrprotect(entry);
1327
1328 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1329 }
1330 *orig_pte = entry;
1331 err = 0;
1332
1333out_unlock:
1334 page_vma_mapped_walk_done(&pvmw);
1335out_mn:
1336 mmu_notifier_invalidate_range_end(&range);
1337out:
1338 return err;
1339}
1340
1341/**
1342 * replace_page - replace page in vma by new ksm page
1343 * @vma: vma that holds the pte pointing to page
1344 * @page: the page we are replacing by kpage
1345 * @kpage: the ksm page we replace page by
1346 * @orig_pte: the original value of the pte
1347 *
1348 * Returns 0 on success, -EFAULT on failure.
1349 */
1350static int replace_page(struct vm_area_struct *vma, struct page *page,
1351 struct page *kpage, pte_t orig_pte)
1352{
1353 struct folio *kfolio = page_folio(kpage);
1354 struct mm_struct *mm = vma->vm_mm;
1355 struct folio *folio = page_folio(page);
1356 pmd_t *pmd;
1357 pmd_t pmde;
1358 pte_t *ptep;
1359 pte_t newpte;
1360 spinlock_t *ptl;
1361 unsigned long addr;
1362 int err = -EFAULT;
1363 struct mmu_notifier_range range;
1364
1365 addr = page_address_in_vma(folio, page, vma);
1366 if (addr == -EFAULT)
1367 goto out;
1368
1369 pmd = mm_find_pmd(mm, addr);
1370 if (!pmd)
1371 goto out;
1372 /*
1373 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
1374 * without holding anon_vma lock for write. So when looking for a
1375 * genuine pmde (in which to find pte), test present and !THP together.
1376 */
1377 pmde = pmdp_get_lockless(pmd);
1378 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
1379 goto out;
1380
1381 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
1382 addr + PAGE_SIZE);
1383 mmu_notifier_invalidate_range_start(&range);
1384
1385 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1386 if (!ptep)
1387 goto out_mn;
1388 if (!pte_same(ptep_get(ptep), orig_pte)) {
1389 pte_unmap_unlock(ptep, ptl);
1390 goto out_mn;
1391 }
1392 VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
1393 VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
1394 kfolio);
1395
1396 /*
1397 * No need to check ksm_use_zero_pages here: we can only have a
1398 * zero_page here if ksm_use_zero_pages was enabled already.
1399 */
1400 if (!is_zero_pfn(page_to_pfn(kpage))) {
1401 folio_get(kfolio);
1402 folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
1403 newpte = mk_pte(kpage, vma->vm_page_prot);
1404 } else {
1405 /*
1406 * Use pte_mkdirty to mark the zero page mapped by KSM, and then
1407 * we can easily track all KSM-placed zero pages by checking if
1408 * the dirty bit in zero page's PTE is set.
1409 */
1410 newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
1411 ksm_map_zero_page(mm);
1412 /*
1413 * We're replacing an anonymous page with a zero page, which is
1414 * not anonymous. We need to do proper accounting otherwise we
1415 * will get wrong values in /proc, and a BUG message in dmesg
1416 * when tearing down the mm.
1417 */
1418 dec_mm_counter(mm, MM_ANONPAGES);
1419 }
1420
1421 flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep)));
1422 /*
1423 * No need to notify as we are replacing a read only page with another
1424 * read only page with the same content.
1425 *
1426 * See Documentation/mm/mmu_notifier.rst
1427 */
1428 ptep_clear_flush(vma, addr, ptep);
1429 set_pte_at(mm, addr, ptep, newpte);
1430
1431 folio_remove_rmap_pte(folio, page, vma);
1432 if (!folio_mapped(folio))
1433 folio_free_swap(folio);
1434 folio_put(folio);
1435
1436 pte_unmap_unlock(ptep, ptl);
1437 err = 0;
1438out_mn:
1439 mmu_notifier_invalidate_range_end(&range);
1440out:
1441 return err;
1442}
1443
1444/*
1445 * try_to_merge_one_page - take two pages and merge them into one
1446 * @vma: the vma that holds the pte pointing to page
1447 * @page: the PageAnon page that we want to replace with kpage
1448 * @kpage: the KSM page that we want to map instead of page,
1449 * or NULL the first time when we want to use page as kpage.
1450 *
1451 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1452 */
1453static int try_to_merge_one_page(struct vm_area_struct *vma,
1454 struct page *page, struct page *kpage)
1455{
1456 struct folio *folio = page_folio(page);
1457 pte_t orig_pte = __pte(0);
1458 int err = -EFAULT;
1459
1460 if (page == kpage) /* ksm page forked */
1461 return 0;
1462
1463 if (!folio_test_anon(folio))
1464 goto out;
1465
1466 /*
1467 * We need the folio lock to read a stable swapcache flag in
1468 * write_protect_page(). We trylock because we don't want to wait
1469 * here - we prefer to continue scanning and merging different
1470 * pages, then come back to this page when it is unlocked.
1471 */
1472 if (!folio_trylock(folio))
1473 goto out;
1474
1475 if (folio_test_large(folio)) {
1476 if (split_huge_page(page))
1477 goto out_unlock;
1478 folio = page_folio(page);
1479 }
1480
1481 /*
1482 * If this anonymous page is mapped only here, its pte may need
1483 * to be write-protected. If it's mapped elsewhere, all of its
1484 * ptes are necessarily already write-protected. But in either
1485 * case, we need to lock and check page_count is not raised.
1486 */
1487 if (write_protect_page(vma, folio, &orig_pte) == 0) {
1488 if (!kpage) {
1489 /*
1490 * While we hold folio lock, upgrade folio from
1491 * anon to a NULL stable_node with the KSM flag set:
1492 * stable_tree_insert() will update stable_node.
1493 */
1494 folio_set_stable_node(folio, NULL);
1495 folio_mark_accessed(folio);
1496 /*
1497 * Page reclaim just frees a clean folio with no dirty
1498 * ptes: make sure that the ksm page would be swapped.
1499 */
1500 if (!folio_test_dirty(folio))
1501 folio_mark_dirty(folio);
1502 err = 0;
1503 } else if (pages_identical(page, kpage))
1504 err = replace_page(vma, page, kpage, orig_pte);
1505 }
1506
1507out_unlock:
1508 folio_unlock(folio);
1509out:
1510 return err;
1511}
1512
1513/*
1514 * This function returns 0 if the pages were merged or if they are
1515 * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise.
1516 */
1517static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item,
1518 struct page *page)
1519{
1520 struct mm_struct *mm = rmap_item->mm;
1521 int err = -EFAULT;
1522
1523 /*
1524 * Same checksum as an empty page. We attempt to merge it with the
1525 * appropriate zero page if the user enabled this via sysfs.
1526 */
1527 if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) {
1528 struct vm_area_struct *vma;
1529
1530 mmap_read_lock(mm);
1531 vma = find_mergeable_vma(mm, rmap_item->address);
1532 if (vma) {
1533 err = try_to_merge_one_page(vma, page,
1534 ZERO_PAGE(rmap_item->address));
1535 trace_ksm_merge_one_page(
1536 page_to_pfn(ZERO_PAGE(rmap_item->address)),
1537 rmap_item, mm, err);
1538 } else {
1539 /*
1540 * If the vma is out of date, we do not need to
1541 * continue.
1542 */
1543 err = 0;
1544 }
1545 mmap_read_unlock(mm);
1546 }
1547
1548 return err;
1549}
1550
1551/*
1552 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1553 * but no new kernel page is allocated: kpage must already be a ksm page.
1554 *
1555 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1556 */
1557static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
1558 struct page *page, struct page *kpage)
1559{
1560 struct mm_struct *mm = rmap_item->mm;
1561 struct vm_area_struct *vma;
1562 int err = -EFAULT;
1563
1564 mmap_read_lock(mm);
1565 vma = find_mergeable_vma(mm, rmap_item->address);
1566 if (!vma)
1567 goto out;
1568
1569 err = try_to_merge_one_page(vma, page, kpage);
1570 if (err)
1571 goto out;
1572
1573 /* Unstable nid is in union with stable anon_vma: remove first */
1574 remove_rmap_item_from_tree(rmap_item);
1575
1576 /* Must get reference to anon_vma while still holding mmap_lock */
1577 rmap_item->anon_vma = vma->anon_vma;
1578 get_anon_vma(vma->anon_vma);
1579out:
1580 mmap_read_unlock(mm);
1581 trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
1582 rmap_item, mm, err);
1583 return err;
1584}
1585
1586/*
1587 * try_to_merge_two_pages - take two identical pages and prepare them
1588 * to be merged into one page.
1589 *
1590 * This function returns the kpage if we successfully merged two identical
1591 * pages into one ksm page, NULL otherwise.
1592 *
1593 * Note that this function upgrades page to ksm page: if one of the pages
1594 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1595 */
1596static struct folio *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
1597 struct page *page,
1598 struct ksm_rmap_item *tree_rmap_item,
1599 struct page *tree_page)
1600{
1601 int err;
1602
1603 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1604 if (!err) {
1605 err = try_to_merge_with_ksm_page(tree_rmap_item,
1606 tree_page, page);
1607 /*
1608 * If that fails, we have a ksm page with only one pte
1609 * pointing to it: so break it.
1610 */
1611 if (err)
1612 break_cow(rmap_item);
1613 }
1614 return err ? NULL : page_folio(page);
1615}
1616
1617static __always_inline
1618bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset)
1619{
1620 VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1621 /*
1622 * Check that at least one mapping still exists, otherwise
1623 * there's no much point to merge and share with this
1624 * stable_node, as the underlying tree_page of the other
1625 * sharer is going to be freed soon.
1626 */
1627 return stable_node->rmap_hlist_len &&
1628 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1629}
1630
1631static __always_inline
1632bool is_page_sharing_candidate(struct ksm_stable_node *stable_node)
1633{
1634 return __is_page_sharing_candidate(stable_node, 0);
1635}
1636
1637static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
1638 struct ksm_stable_node **_stable_node,
1639 struct rb_root *root,
1640 bool prune_stale_stable_nodes)
1641{
1642 struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1643 struct hlist_node *hlist_safe;
1644 struct folio *folio, *tree_folio = NULL;
1645 int found_rmap_hlist_len;
1646
1647 if (!prune_stale_stable_nodes ||
1648 time_before(jiffies, stable_node->chain_prune_time +
1649 msecs_to_jiffies(
1650 ksm_stable_node_chains_prune_millisecs)))
1651 prune_stale_stable_nodes = false;
1652 else
1653 stable_node->chain_prune_time = jiffies;
1654
1655 hlist_for_each_entry_safe(dup, hlist_safe,
1656 &stable_node->hlist, hlist_dup) {
1657 cond_resched();
1658 /*
1659 * We must walk all stable_node_dup to prune the stale
1660 * stable nodes during lookup.
1661 *
1662 * ksm_get_folio can drop the nodes from the
1663 * stable_node->hlist if they point to freed pages
1664 * (that's why we do a _safe walk). The "dup"
1665 * stable_node parameter itself will be freed from
1666 * under us if it returns NULL.
1667 */
1668 folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK);
1669 if (!folio)
1670 continue;
1671 /* Pick the best candidate if possible. */
1672 if (!found || (is_page_sharing_candidate(dup) &&
1673 (!is_page_sharing_candidate(found) ||
1674 dup->rmap_hlist_len > found_rmap_hlist_len))) {
1675 if (found)
1676 folio_put(tree_folio);
1677 found = dup;
1678 found_rmap_hlist_len = found->rmap_hlist_len;
1679 tree_folio = folio;
1680 /* skip put_page for found candidate */
1681 if (!prune_stale_stable_nodes &&
1682 is_page_sharing_candidate(found))
1683 break;
1684 continue;
1685 }
1686 folio_put(folio);
1687 }
1688
1689 if (found) {
1690 if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) {
1691 /*
1692 * If there's not just one entry it would
1693 * corrupt memory, better BUG_ON. In KSM
1694 * context with no lock held it's not even
1695 * fatal.
1696 */
1697 BUG_ON(stable_node->hlist.first->next);
1698
1699 /*
1700 * There's just one entry and it is below the
1701 * deduplication limit so drop the chain.
1702 */
1703 rb_replace_node(&stable_node->node, &found->node,
1704 root);
1705 free_stable_node(stable_node);
1706 ksm_stable_node_chains--;
1707 ksm_stable_node_dups--;
1708 /*
1709 * NOTE: the caller depends on the stable_node
1710 * to be equal to stable_node_dup if the chain
1711 * was collapsed.
1712 */
1713 *_stable_node = found;
1714 /*
1715 * Just for robustness, as stable_node is
1716 * otherwise left as a stable pointer, the
1717 * compiler shall optimize it away at build
1718 * time.
1719 */
1720 stable_node = NULL;
1721 } else if (stable_node->hlist.first != &found->hlist_dup &&
1722 __is_page_sharing_candidate(found, 1)) {
1723 /*
1724 * If the found stable_node dup can accept one
1725 * more future merge (in addition to the one
1726 * that is underway) and is not at the head of
1727 * the chain, put it there so next search will
1728 * be quicker in the !prune_stale_stable_nodes
1729 * case.
1730 *
1731 * NOTE: it would be inaccurate to use nr > 1
1732 * instead of checking the hlist.first pointer
1733 * directly, because in the
1734 * prune_stale_stable_nodes case "nr" isn't
1735 * the position of the found dup in the chain,
1736 * but the total number of dups in the chain.
1737 */
1738 hlist_del(&found->hlist_dup);
1739 hlist_add_head(&found->hlist_dup,
1740 &stable_node->hlist);
1741 }
1742 } else {
1743 /* Its hlist must be empty if no one found. */
1744 free_stable_node_chain(stable_node, root);
1745 }
1746
1747 *_stable_node_dup = found;
1748 return tree_folio;
1749}
1750
1751/*
1752 * Like for ksm_get_folio, this function can free the *_stable_node and
1753 * *_stable_node_dup if the returned tree_page is NULL.
1754 *
1755 * It can also free and overwrite *_stable_node with the found
1756 * stable_node_dup if the chain is collapsed (in which case
1757 * *_stable_node will be equal to *_stable_node_dup like if the chain
1758 * never existed). It's up to the caller to verify tree_page is not
1759 * NULL before dereferencing *_stable_node or *_stable_node_dup.
1760 *
1761 * *_stable_node_dup is really a second output parameter of this
1762 * function and will be overwritten in all cases, the caller doesn't
1763 * need to initialize it.
1764 */
1765static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
1766 struct ksm_stable_node **_stable_node,
1767 struct rb_root *root,
1768 bool prune_stale_stable_nodes)
1769{
1770 struct ksm_stable_node *stable_node = *_stable_node;
1771
1772 if (!is_stable_node_chain(stable_node)) {
1773 *_stable_node_dup = stable_node;
1774 return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK);
1775 }
1776 return stable_node_dup(_stable_node_dup, _stable_node, root,
1777 prune_stale_stable_nodes);
1778}
1779
1780static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d,
1781 struct ksm_stable_node **s_n,
1782 struct rb_root *root)
1783{
1784 return __stable_node_chain(s_n_d, s_n, root, true);
1785}
1786
1787static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d,
1788 struct ksm_stable_node **s_n,
1789 struct rb_root *root)
1790{
1791 return __stable_node_chain(s_n_d, s_n, root, false);
1792}
1793
1794/*
1795 * stable_tree_search - search for page inside the stable tree
1796 *
1797 * This function checks if there is a page inside the stable tree
1798 * with identical content to the page that we are scanning right now.
1799 *
1800 * This function returns the stable tree node of identical content if found,
1801 * -EBUSY if the stable node's page is being migrated, NULL otherwise.
1802 */
1803static struct folio *stable_tree_search(struct page *page)
1804{
1805 int nid;
1806 struct rb_root *root;
1807 struct rb_node **new;
1808 struct rb_node *parent;
1809 struct ksm_stable_node *stable_node, *stable_node_dup;
1810 struct ksm_stable_node *page_node;
1811 struct folio *folio;
1812
1813 folio = page_folio(page);
1814 page_node = folio_stable_node(folio);
1815 if (page_node && page_node->head != &migrate_nodes) {
1816 /* ksm page forked */
1817 folio_get(folio);
1818 return folio;
1819 }
1820
1821 nid = get_kpfn_nid(folio_pfn(folio));
1822 root = root_stable_tree + nid;
1823again:
1824 new = &root->rb_node;
1825 parent = NULL;
1826
1827 while (*new) {
1828 struct folio *tree_folio;
1829 int ret;
1830
1831 cond_resched();
1832 stable_node = rb_entry(*new, struct ksm_stable_node, node);
1833 tree_folio = chain_prune(&stable_node_dup, &stable_node, root);
1834 if (!tree_folio) {
1835 /*
1836 * If we walked over a stale stable_node,
1837 * ksm_get_folio() will call rb_erase() and it
1838 * may rebalance the tree from under us. So
1839 * restart the search from scratch. Returning
1840 * NULL would be safe too, but we'd generate
1841 * false negative insertions just because some
1842 * stable_node was stale.
1843 */
1844 goto again;
1845 }
1846
1847 ret = memcmp_pages(page, &tree_folio->page);
1848 folio_put(tree_folio);
1849
1850 parent = *new;
1851 if (ret < 0)
1852 new = &parent->rb_left;
1853 else if (ret > 0)
1854 new = &parent->rb_right;
1855 else {
1856 if (page_node) {
1857 VM_BUG_ON(page_node->head != &migrate_nodes);
1858 /*
1859 * If the mapcount of our migrated KSM folio is
1860 * at most 1, we can merge it with another
1861 * KSM folio where we know that we have space
1862 * for one more mapping without exceeding the
1863 * ksm_max_page_sharing limit: see
1864 * chain_prune(). This way, we can avoid adding
1865 * this stable node to the chain.
1866 */
1867 if (folio_mapcount(folio) > 1)
1868 goto chain_append;
1869 }
1870
1871 if (!is_page_sharing_candidate(stable_node_dup)) {
1872 /*
1873 * If the stable_node is a chain and
1874 * we got a payload match in memcmp
1875 * but we cannot merge the scanned
1876 * page in any of the existing
1877 * stable_node dups because they're
1878 * all full, we need to wait the
1879 * scanned page to find itself a match
1880 * in the unstable tree to create a
1881 * brand new KSM page to add later to
1882 * the dups of this stable_node.
1883 */
1884 return NULL;
1885 }
1886
1887 /*
1888 * Lock and unlock the stable_node's page (which
1889 * might already have been migrated) so that page
1890 * migration is sure to notice its raised count.
1891 * It would be more elegant to return stable_node
1892 * than kpage, but that involves more changes.
1893 */
1894 tree_folio = ksm_get_folio(stable_node_dup,
1895 KSM_GET_FOLIO_TRYLOCK);
1896
1897 if (PTR_ERR(tree_folio) == -EBUSY)
1898 return ERR_PTR(-EBUSY);
1899
1900 if (unlikely(!tree_folio))
1901 /*
1902 * The tree may have been rebalanced,
1903 * so re-evaluate parent and new.
1904 */
1905 goto again;
1906 folio_unlock(tree_folio);
1907
1908 if (get_kpfn_nid(stable_node_dup->kpfn) !=
1909 NUMA(stable_node_dup->nid)) {
1910 folio_put(tree_folio);
1911 goto replace;
1912 }
1913 return tree_folio;
1914 }
1915 }
1916
1917 if (!page_node)
1918 return NULL;
1919
1920 list_del(&page_node->list);
1921 DO_NUMA(page_node->nid = nid);
1922 rb_link_node(&page_node->node, parent, new);
1923 rb_insert_color(&page_node->node, root);
1924out:
1925 if (is_page_sharing_candidate(page_node)) {
1926 folio_get(folio);
1927 return folio;
1928 } else
1929 return NULL;
1930
1931replace:
1932 /*
1933 * If stable_node was a chain and chain_prune collapsed it,
1934 * stable_node has been updated to be the new regular
1935 * stable_node. A collapse of the chain is indistinguishable
1936 * from the case there was no chain in the stable
1937 * rbtree. Otherwise stable_node is the chain and
1938 * stable_node_dup is the dup to replace.
1939 */
1940 if (stable_node_dup == stable_node) {
1941 VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1942 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1943 /* there is no chain */
1944 if (page_node) {
1945 VM_BUG_ON(page_node->head != &migrate_nodes);
1946 list_del(&page_node->list);
1947 DO_NUMA(page_node->nid = nid);
1948 rb_replace_node(&stable_node_dup->node,
1949 &page_node->node,
1950 root);
1951 if (is_page_sharing_candidate(page_node))
1952 folio_get(folio);
1953 else
1954 folio = NULL;
1955 } else {
1956 rb_erase(&stable_node_dup->node, root);
1957 folio = NULL;
1958 }
1959 } else {
1960 VM_BUG_ON(!is_stable_node_chain(stable_node));
1961 __stable_node_dup_del(stable_node_dup);
1962 if (page_node) {
1963 VM_BUG_ON(page_node->head != &migrate_nodes);
1964 list_del(&page_node->list);
1965 DO_NUMA(page_node->nid = nid);
1966 stable_node_chain_add_dup(page_node, stable_node);
1967 if (is_page_sharing_candidate(page_node))
1968 folio_get(folio);
1969 else
1970 folio = NULL;
1971 } else {
1972 folio = NULL;
1973 }
1974 }
1975 stable_node_dup->head = &migrate_nodes;
1976 list_add(&stable_node_dup->list, stable_node_dup->head);
1977 return folio;
1978
1979chain_append:
1980 /*
1981 * If stable_node was a chain and chain_prune collapsed it,
1982 * stable_node has been updated to be the new regular
1983 * stable_node. A collapse of the chain is indistinguishable
1984 * from the case there was no chain in the stable
1985 * rbtree. Otherwise stable_node is the chain and
1986 * stable_node_dup is the dup to replace.
1987 */
1988 if (stable_node_dup == stable_node) {
1989 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1990 /* chain is missing so create it */
1991 stable_node = alloc_stable_node_chain(stable_node_dup,
1992 root);
1993 if (!stable_node)
1994 return NULL;
1995 }
1996 /*
1997 * Add this stable_node dup that was
1998 * migrated to the stable_node chain
1999 * of the current nid for this page
2000 * content.
2001 */
2002 VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
2003 VM_BUG_ON(page_node->head != &migrate_nodes);
2004 list_del(&page_node->list);
2005 DO_NUMA(page_node->nid = nid);
2006 stable_node_chain_add_dup(page_node, stable_node);
2007 goto out;
2008}
2009
2010/*
2011 * stable_tree_insert - insert stable tree node pointing to new ksm page
2012 * into the stable tree.
2013 *
2014 * This function returns the stable tree node just allocated on success,
2015 * NULL otherwise.
2016 */
2017static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
2018{
2019 int nid;
2020 unsigned long kpfn;
2021 struct rb_root *root;
2022 struct rb_node **new;
2023 struct rb_node *parent;
2024 struct ksm_stable_node *stable_node, *stable_node_dup;
2025 bool need_chain = false;
2026
2027 kpfn = folio_pfn(kfolio);
2028 nid = get_kpfn_nid(kpfn);
2029 root = root_stable_tree + nid;
2030again:
2031 parent = NULL;
2032 new = &root->rb_node;
2033
2034 while (*new) {
2035 struct folio *tree_folio;
2036 int ret;
2037
2038 cond_resched();
2039 stable_node = rb_entry(*new, struct ksm_stable_node, node);
2040 tree_folio = chain(&stable_node_dup, &stable_node, root);
2041 if (!tree_folio) {
2042 /*
2043 * If we walked over a stale stable_node,
2044 * ksm_get_folio() will call rb_erase() and it
2045 * may rebalance the tree from under us. So
2046 * restart the search from scratch. Returning
2047 * NULL would be safe too, but we'd generate
2048 * false negative insertions just because some
2049 * stable_node was stale.
2050 */
2051 goto again;
2052 }
2053
2054 ret = memcmp_pages(&kfolio->page, &tree_folio->page);
2055 folio_put(tree_folio);
2056
2057 parent = *new;
2058 if (ret < 0)
2059 new = &parent->rb_left;
2060 else if (ret > 0)
2061 new = &parent->rb_right;
2062 else {
2063 need_chain = true;
2064 break;
2065 }
2066 }
2067
2068 stable_node_dup = alloc_stable_node();
2069 if (!stable_node_dup)
2070 return NULL;
2071
2072 INIT_HLIST_HEAD(&stable_node_dup->hlist);
2073 stable_node_dup->kpfn = kpfn;
2074 stable_node_dup->rmap_hlist_len = 0;
2075 DO_NUMA(stable_node_dup->nid = nid);
2076 if (!need_chain) {
2077 rb_link_node(&stable_node_dup->node, parent, new);
2078 rb_insert_color(&stable_node_dup->node, root);
2079 } else {
2080 if (!is_stable_node_chain(stable_node)) {
2081 struct ksm_stable_node *orig = stable_node;
2082 /* chain is missing so create it */
2083 stable_node = alloc_stable_node_chain(orig, root);
2084 if (!stable_node) {
2085 free_stable_node(stable_node_dup);
2086 return NULL;
2087 }
2088 }
2089 stable_node_chain_add_dup(stable_node_dup, stable_node);
2090 }
2091
2092 folio_set_stable_node(kfolio, stable_node_dup);
2093
2094 return stable_node_dup;
2095}
2096
2097/*
2098 * unstable_tree_search_insert - search for identical page,
2099 * else insert rmap_item into the unstable tree.
2100 *
2101 * This function searches for a page in the unstable tree identical to the
2102 * page currently being scanned; and if no identical page is found in the
2103 * tree, we insert rmap_item as a new object into the unstable tree.
2104 *
2105 * This function returns pointer to rmap_item found to be identical
2106 * to the currently scanned page, NULL otherwise.
2107 *
2108 * This function does both searching and inserting, because they share
2109 * the same walking algorithm in an rbtree.
2110 */
2111static
2112struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item,
2113 struct page *page,
2114 struct page **tree_pagep)
2115{
2116 struct rb_node **new;
2117 struct rb_root *root;
2118 struct rb_node *parent = NULL;
2119 int nid;
2120
2121 nid = get_kpfn_nid(page_to_pfn(page));
2122 root = root_unstable_tree + nid;
2123 new = &root->rb_node;
2124
2125 while (*new) {
2126 struct ksm_rmap_item *tree_rmap_item;
2127 struct page *tree_page;
2128 int ret;
2129
2130 cond_resched();
2131 tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node);
2132 tree_page = get_mergeable_page(tree_rmap_item);
2133 if (!tree_page)
2134 return NULL;
2135
2136 /*
2137 * Don't substitute a ksm page for a forked page.
2138 */
2139 if (page == tree_page) {
2140 put_page(tree_page);
2141 return NULL;
2142 }
2143
2144 ret = memcmp_pages(page, tree_page);
2145
2146 parent = *new;
2147 if (ret < 0) {
2148 put_page(tree_page);
2149 new = &parent->rb_left;
2150 } else if (ret > 0) {
2151 put_page(tree_page);
2152 new = &parent->rb_right;
2153 } else if (!ksm_merge_across_nodes &&
2154 page_to_nid(tree_page) != nid) {
2155 /*
2156 * If tree_page has been migrated to another NUMA node,
2157 * it will be flushed out and put in the right unstable
2158 * tree next time: only merge with it when across_nodes.
2159 */
2160 put_page(tree_page);
2161 return NULL;
2162 } else {
2163 *tree_pagep = tree_page;
2164 return tree_rmap_item;
2165 }
2166 }
2167
2168 rmap_item->address |= UNSTABLE_FLAG;
2169 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
2170 DO_NUMA(rmap_item->nid = nid);
2171 rb_link_node(&rmap_item->node, parent, new);
2172 rb_insert_color(&rmap_item->node, root);
2173
2174 ksm_pages_unshared++;
2175 return NULL;
2176}
2177
2178/*
2179 * stable_tree_append - add another rmap_item to the linked list of
2180 * rmap_items hanging off a given node of the stable tree, all sharing
2181 * the same ksm page.
2182 */
2183static void stable_tree_append(struct ksm_rmap_item *rmap_item,
2184 struct ksm_stable_node *stable_node,
2185 bool max_page_sharing_bypass)
2186{
2187 /*
2188 * rmap won't find this mapping if we don't insert the
2189 * rmap_item in the right stable_node
2190 * duplicate. page_migration could break later if rmap breaks,
2191 * so we can as well crash here. We really need to check for
2192 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
2193 * for other negative values as an underflow if detected here
2194 * for the first time (and not when decreasing rmap_hlist_len)
2195 * would be sign of memory corruption in the stable_node.
2196 */
2197 BUG_ON(stable_node->rmap_hlist_len < 0);
2198
2199 stable_node->rmap_hlist_len++;
2200 if (!max_page_sharing_bypass)
2201 /* possibly non fatal but unexpected overflow, only warn */
2202 WARN_ON_ONCE(stable_node->rmap_hlist_len >
2203 ksm_max_page_sharing);
2204
2205 rmap_item->head = stable_node;
2206 rmap_item->address |= STABLE_FLAG;
2207 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2208
2209 if (rmap_item->hlist.next)
2210 ksm_pages_sharing++;
2211 else
2212 ksm_pages_shared++;
2213
2214 rmap_item->mm->ksm_merging_pages++;
2215}
2216
2217/*
2218 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2219 * if not, compare checksum to previous and if it's the same, see if page can
2220 * be inserted into the unstable tree, or merged with a page already there and
2221 * both transferred to the stable tree.
2222 *
2223 * @page: the page that we are searching identical page to.
2224 * @rmap_item: the reverse mapping into the virtual address of this page
2225 */
2226static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
2227{
2228 struct ksm_rmap_item *tree_rmap_item;
2229 struct page *tree_page = NULL;
2230 struct ksm_stable_node *stable_node;
2231 struct folio *kfolio;
2232 unsigned int checksum;
2233 int err;
2234 bool max_page_sharing_bypass = false;
2235
2236 stable_node = page_stable_node(page);
2237 if (stable_node) {
2238 if (stable_node->head != &migrate_nodes &&
2239 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2240 NUMA(stable_node->nid)) {
2241 stable_node_dup_del(stable_node);
2242 stable_node->head = &migrate_nodes;
2243 list_add(&stable_node->list, stable_node->head);
2244 }
2245 if (stable_node->head != &migrate_nodes &&
2246 rmap_item->head == stable_node)
2247 return;
2248 /*
2249 * If it's a KSM fork, allow it to go over the sharing limit
2250 * without warnings.
2251 */
2252 if (!is_page_sharing_candidate(stable_node))
2253 max_page_sharing_bypass = true;
2254 } else {
2255 remove_rmap_item_from_tree(rmap_item);
2256
2257 /*
2258 * If the hash value of the page has changed from the last time
2259 * we calculated it, this page is changing frequently: therefore we
2260 * don't want to insert it in the unstable tree, and we don't want
2261 * to waste our time searching for something identical to it there.
2262 */
2263 checksum = calc_checksum(page);
2264 if (rmap_item->oldchecksum != checksum) {
2265 rmap_item->oldchecksum = checksum;
2266 return;
2267 }
2268
2269 if (!try_to_merge_with_zero_page(rmap_item, page))
2270 return;
2271 }
2272
2273 /* Start by searching for the folio in the stable tree */
2274 kfolio = stable_tree_search(page);
2275 if (&kfolio->page == page && rmap_item->head == stable_node) {
2276 folio_put(kfolio);
2277 return;
2278 }
2279
2280 remove_rmap_item_from_tree(rmap_item);
2281
2282 if (kfolio) {
2283 if (kfolio == ERR_PTR(-EBUSY))
2284 return;
2285
2286 err = try_to_merge_with_ksm_page(rmap_item, page, &kfolio->page);
2287 if (!err) {
2288 /*
2289 * The page was successfully merged:
2290 * add its rmap_item to the stable tree.
2291 */
2292 folio_lock(kfolio);
2293 stable_tree_append(rmap_item, folio_stable_node(kfolio),
2294 max_page_sharing_bypass);
2295 folio_unlock(kfolio);
2296 }
2297 folio_put(kfolio);
2298 return;
2299 }
2300
2301 tree_rmap_item =
2302 unstable_tree_search_insert(rmap_item, page, &tree_page);
2303 if (tree_rmap_item) {
2304 bool split;
2305
2306 kfolio = try_to_merge_two_pages(rmap_item, page,
2307 tree_rmap_item, tree_page);
2308 /*
2309 * If both pages we tried to merge belong to the same compound
2310 * page, then we actually ended up increasing the reference
2311 * count of the same compound page twice, and split_huge_page
2312 * failed.
2313 * Here we set a flag if that happened, and we use it later to
2314 * try split_huge_page again. Since we call put_page right
2315 * afterwards, the reference count will be correct and
2316 * split_huge_page should succeed.
2317 */
2318 split = PageTransCompound(page)
2319 && compound_head(page) == compound_head(tree_page);
2320 put_page(tree_page);
2321 if (kfolio) {
2322 /*
2323 * The pages were successfully merged: insert new
2324 * node in the stable tree and add both rmap_items.
2325 */
2326 folio_lock(kfolio);
2327 stable_node = stable_tree_insert(kfolio);
2328 if (stable_node) {
2329 stable_tree_append(tree_rmap_item, stable_node,
2330 false);
2331 stable_tree_append(rmap_item, stable_node,
2332 false);
2333 }
2334 folio_unlock(kfolio);
2335
2336 /*
2337 * If we fail to insert the page into the stable tree,
2338 * we will have 2 virtual addresses that are pointing
2339 * to a ksm page left outside the stable tree,
2340 * in which case we need to break_cow on both.
2341 */
2342 if (!stable_node) {
2343 break_cow(tree_rmap_item);
2344 break_cow(rmap_item);
2345 }
2346 } else if (split) {
2347 /*
2348 * We are here if we tried to merge two pages and
2349 * failed because they both belonged to the same
2350 * compound page. We will split the page now, but no
2351 * merging will take place.
2352 * We do not want to add the cost of a full lock; if
2353 * the page is locked, it is better to skip it and
2354 * perhaps try again later.
2355 */
2356 if (!trylock_page(page))
2357 return;
2358 split_huge_page(page);
2359 unlock_page(page);
2360 }
2361 }
2362}
2363
2364static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
2365 struct ksm_rmap_item **rmap_list,
2366 unsigned long addr)
2367{
2368 struct ksm_rmap_item *rmap_item;
2369
2370 while (*rmap_list) {
2371 rmap_item = *rmap_list;
2372 if ((rmap_item->address & PAGE_MASK) == addr)
2373 return rmap_item;
2374 if (rmap_item->address > addr)
2375 break;
2376 *rmap_list = rmap_item->rmap_list;
2377 remove_rmap_item_from_tree(rmap_item);
2378 free_rmap_item(rmap_item);
2379 }
2380
2381 rmap_item = alloc_rmap_item();
2382 if (rmap_item) {
2383 /* It has already been zeroed */
2384 rmap_item->mm = mm_slot->slot.mm;
2385 rmap_item->mm->ksm_rmap_items++;
2386 rmap_item->address = addr;
2387 rmap_item->rmap_list = *rmap_list;
2388 *rmap_list = rmap_item;
2389 }
2390 return rmap_item;
2391}
2392
2393/*
2394 * Calculate skip age for the ksm page age. The age determines how often
2395 * de-duplicating has already been tried unsuccessfully. If the age is
2396 * smaller, the scanning of this page is skipped for less scans.
2397 *
2398 * @age: rmap_item age of page
2399 */
2400static unsigned int skip_age(rmap_age_t age)
2401{
2402 if (age <= 3)
2403 return 1;
2404 if (age <= 5)
2405 return 2;
2406 if (age <= 8)
2407 return 4;
2408
2409 return 8;
2410}
2411
2412/*
2413 * Determines if a page should be skipped for the current scan.
2414 *
2415 * @folio: folio containing the page to check
2416 * @rmap_item: associated rmap_item of page
2417 */
2418static bool should_skip_rmap_item(struct folio *folio,
2419 struct ksm_rmap_item *rmap_item)
2420{
2421 rmap_age_t age;
2422
2423 if (!ksm_smart_scan)
2424 return false;
2425
2426 /*
2427 * Never skip pages that are already KSM; pages cmp_and_merge_page()
2428 * will essentially ignore them, but we still have to process them
2429 * properly.
2430 */
2431 if (folio_test_ksm(folio))
2432 return false;
2433
2434 age = rmap_item->age;
2435 if (age != U8_MAX)
2436 rmap_item->age++;
2437
2438 /*
2439 * Smaller ages are not skipped, they need to get a chance to go
2440 * through the different phases of the KSM merging.
2441 */
2442 if (age < 3)
2443 return false;
2444
2445 /*
2446 * Are we still allowed to skip? If not, then don't skip it
2447 * and determine how much more often we are allowed to skip next.
2448 */
2449 if (!rmap_item->remaining_skips) {
2450 rmap_item->remaining_skips = skip_age(age);
2451 return false;
2452 }
2453
2454 /* Skip this page */
2455 ksm_pages_skipped++;
2456 rmap_item->remaining_skips--;
2457 remove_rmap_item_from_tree(rmap_item);
2458 return true;
2459}
2460
2461static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
2462{
2463 struct mm_struct *mm;
2464 struct ksm_mm_slot *mm_slot;
2465 struct mm_slot *slot;
2466 struct vm_area_struct *vma;
2467 struct ksm_rmap_item *rmap_item;
2468 struct vma_iterator vmi;
2469 int nid;
2470
2471 if (list_empty(&ksm_mm_head.slot.mm_node))
2472 return NULL;
2473
2474 mm_slot = ksm_scan.mm_slot;
2475 if (mm_slot == &ksm_mm_head) {
2476 advisor_start_scan();
2477 trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
2478
2479 /*
2480 * A number of pages can hang around indefinitely in per-cpu
2481 * LRU cache, raised page count preventing write_protect_page
2482 * from merging them. Though it doesn't really matter much,
2483 * it is puzzling to see some stuck in pages_volatile until
2484 * other activity jostles them out, and they also prevented
2485 * LTP's KSM test from succeeding deterministically; so drain
2486 * them here (here rather than on entry to ksm_do_scan(),
2487 * so we don't IPI too often when pages_to_scan is set low).
2488 */
2489 lru_add_drain_all();
2490
2491 /*
2492 * Whereas stale stable_nodes on the stable_tree itself
2493 * get pruned in the regular course of stable_tree_search(),
2494 * those moved out to the migrate_nodes list can accumulate:
2495 * so prune them once before each full scan.
2496 */
2497 if (!ksm_merge_across_nodes) {
2498 struct ksm_stable_node *stable_node, *next;
2499 struct folio *folio;
2500
2501 list_for_each_entry_safe(stable_node, next,
2502 &migrate_nodes, list) {
2503 folio = ksm_get_folio(stable_node,
2504 KSM_GET_FOLIO_NOLOCK);
2505 if (folio)
2506 folio_put(folio);
2507 cond_resched();
2508 }
2509 }
2510
2511 for (nid = 0; nid < ksm_nr_node_ids; nid++)
2512 root_unstable_tree[nid] = RB_ROOT;
2513
2514 spin_lock(&ksm_mmlist_lock);
2515 slot = list_entry(mm_slot->slot.mm_node.next,
2516 struct mm_slot, mm_node);
2517 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2518 ksm_scan.mm_slot = mm_slot;
2519 spin_unlock(&ksm_mmlist_lock);
2520 /*
2521 * Although we tested list_empty() above, a racing __ksm_exit
2522 * of the last mm on the list may have removed it since then.
2523 */
2524 if (mm_slot == &ksm_mm_head)
2525 return NULL;
2526next_mm:
2527 ksm_scan.address = 0;
2528 ksm_scan.rmap_list = &mm_slot->rmap_list;
2529 }
2530
2531 slot = &mm_slot->slot;
2532 mm = slot->mm;
2533 vma_iter_init(&vmi, mm, ksm_scan.address);
2534
2535 mmap_read_lock(mm);
2536 if (ksm_test_exit(mm))
2537 goto no_vmas;
2538
2539 for_each_vma(vmi, vma) {
2540 if (!(vma->vm_flags & VM_MERGEABLE))
2541 continue;
2542 if (ksm_scan.address < vma->vm_start)
2543 ksm_scan.address = vma->vm_start;
2544 if (!vma->anon_vma)
2545 ksm_scan.address = vma->vm_end;
2546
2547 while (ksm_scan.address < vma->vm_end) {
2548 struct page *tmp_page = NULL;
2549 struct folio_walk fw;
2550 struct folio *folio;
2551
2552 if (ksm_test_exit(mm))
2553 break;
2554
2555 folio = folio_walk_start(&fw, vma, ksm_scan.address, 0);
2556 if (folio) {
2557 if (!folio_is_zone_device(folio) &&
2558 folio_test_anon(folio)) {
2559 folio_get(folio);
2560 tmp_page = fw.page;
2561 }
2562 folio_walk_end(&fw, vma);
2563 }
2564
2565 if (tmp_page) {
2566 flush_anon_page(vma, tmp_page, ksm_scan.address);
2567 flush_dcache_page(tmp_page);
2568 rmap_item = get_next_rmap_item(mm_slot,
2569 ksm_scan.rmap_list, ksm_scan.address);
2570 if (rmap_item) {
2571 ksm_scan.rmap_list =
2572 &rmap_item->rmap_list;
2573
2574 if (should_skip_rmap_item(folio, rmap_item)) {
2575 folio_put(folio);
2576 goto next_page;
2577 }
2578
2579 ksm_scan.address += PAGE_SIZE;
2580 *page = tmp_page;
2581 } else {
2582 folio_put(folio);
2583 }
2584 mmap_read_unlock(mm);
2585 return rmap_item;
2586 }
2587next_page:
2588 ksm_scan.address += PAGE_SIZE;
2589 cond_resched();
2590 }
2591 }
2592
2593 if (ksm_test_exit(mm)) {
2594no_vmas:
2595 ksm_scan.address = 0;
2596 ksm_scan.rmap_list = &mm_slot->rmap_list;
2597 }
2598 /*
2599 * Nuke all the rmap_items that are above this current rmap:
2600 * because there were no VM_MERGEABLE vmas with such addresses.
2601 */
2602 remove_trailing_rmap_items(ksm_scan.rmap_list);
2603
2604 spin_lock(&ksm_mmlist_lock);
2605 slot = list_entry(mm_slot->slot.mm_node.next,
2606 struct mm_slot, mm_node);
2607 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2608 if (ksm_scan.address == 0) {
2609 /*
2610 * We've completed a full scan of all vmas, holding mmap_lock
2611 * throughout, and found no VM_MERGEABLE: so do the same as
2612 * __ksm_exit does to remove this mm from all our lists now.
2613 * This applies either when cleaning up after __ksm_exit
2614 * (but beware: we can reach here even before __ksm_exit),
2615 * or when all VM_MERGEABLE areas have been unmapped (and
2616 * mmap_lock then protects against race with MADV_MERGEABLE).
2617 */
2618 hash_del(&mm_slot->slot.hash);
2619 list_del(&mm_slot->slot.mm_node);
2620 spin_unlock(&ksm_mmlist_lock);
2621
2622 mm_slot_free(mm_slot_cache, mm_slot);
2623 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2624 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2625 mmap_read_unlock(mm);
2626 mmdrop(mm);
2627 } else {
2628 mmap_read_unlock(mm);
2629 /*
2630 * mmap_read_unlock(mm) first because after
2631 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2632 * already have been freed under us by __ksm_exit()
2633 * because the "mm_slot" is still hashed and
2634 * ksm_scan.mm_slot doesn't point to it anymore.
2635 */
2636 spin_unlock(&ksm_mmlist_lock);
2637 }
2638
2639 /* Repeat until we've completed scanning the whole list */
2640 mm_slot = ksm_scan.mm_slot;
2641 if (mm_slot != &ksm_mm_head)
2642 goto next_mm;
2643
2644 advisor_stop_scan();
2645
2646 trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
2647 ksm_scan.seqnr++;
2648 return NULL;
2649}
2650
2651/**
2652 * ksm_do_scan - the ksm scanner main worker function.
2653 * @scan_npages: number of pages we want to scan before we return.
2654 */
2655static void ksm_do_scan(unsigned int scan_npages)
2656{
2657 struct ksm_rmap_item *rmap_item;
2658 struct page *page;
2659
2660 while (scan_npages-- && likely(!freezing(current))) {
2661 cond_resched();
2662 rmap_item = scan_get_next_rmap_item(&page);
2663 if (!rmap_item)
2664 return;
2665 cmp_and_merge_page(page, rmap_item);
2666 put_page(page);
2667 ksm_pages_scanned++;
2668 }
2669}
2670
2671static int ksmd_should_run(void)
2672{
2673 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node);
2674}
2675
2676static int ksm_scan_thread(void *nothing)
2677{
2678 unsigned int sleep_ms;
2679
2680 set_freezable();
2681 set_user_nice(current, 5);
2682
2683 while (!kthread_should_stop()) {
2684 mutex_lock(&ksm_thread_mutex);
2685 wait_while_offlining();
2686 if (ksmd_should_run())
2687 ksm_do_scan(ksm_thread_pages_to_scan);
2688 mutex_unlock(&ksm_thread_mutex);
2689
2690 if (ksmd_should_run()) {
2691 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
2692 wait_event_freezable_timeout(ksm_iter_wait,
2693 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
2694 msecs_to_jiffies(sleep_ms));
2695 } else {
2696 wait_event_freezable(ksm_thread_wait,
2697 ksmd_should_run() || kthread_should_stop());
2698 }
2699 }
2700 return 0;
2701}
2702
2703static bool __ksm_should_add_vma(const struct file *file, vm_flags_t vm_flags)
2704{
2705 if (vm_flags & VM_MERGEABLE)
2706 return false;
2707
2708 return ksm_compatible(file, vm_flags);
2709}
2710
2711static void __ksm_add_vma(struct vm_area_struct *vma)
2712{
2713 if (__ksm_should_add_vma(vma->vm_file, vma->vm_flags))
2714 vm_flags_set(vma, VM_MERGEABLE);
2715}
2716
2717static int __ksm_del_vma(struct vm_area_struct *vma)
2718{
2719 int err;
2720
2721 if (!(vma->vm_flags & VM_MERGEABLE))
2722 return 0;
2723
2724 if (vma->anon_vma) {
2725 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true);
2726 if (err)
2727 return err;
2728 }
2729
2730 vm_flags_clear(vma, VM_MERGEABLE);
2731 return 0;
2732}
2733/**
2734 * ksm_vma_flags - Update VMA flags to mark as mergeable if compatible
2735 *
2736 * @mm: Proposed VMA's mm_struct
2737 * @file: Proposed VMA's file-backed mapping, if any.
2738 * @vm_flags: Proposed VMA"s flags.
2739 *
2740 * Returns: @vm_flags possibly updated to mark mergeable.
2741 */
2742vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file,
2743 vm_flags_t vm_flags)
2744{
2745 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags) &&
2746 __ksm_should_add_vma(file, vm_flags))
2747 vm_flags |= VM_MERGEABLE;
2748
2749 return vm_flags;
2750}
2751
2752static void ksm_add_vmas(struct mm_struct *mm)
2753{
2754 struct vm_area_struct *vma;
2755
2756 VMA_ITERATOR(vmi, mm, 0);
2757 for_each_vma(vmi, vma)
2758 __ksm_add_vma(vma);
2759}
2760
2761static int ksm_del_vmas(struct mm_struct *mm)
2762{
2763 struct vm_area_struct *vma;
2764 int err;
2765
2766 VMA_ITERATOR(vmi, mm, 0);
2767 for_each_vma(vmi, vma) {
2768 err = __ksm_del_vma(vma);
2769 if (err)
2770 return err;
2771 }
2772 return 0;
2773}
2774
2775/**
2776 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
2777 * compatible VMA's
2778 *
2779 * @mm: Pointer to mm
2780 *
2781 * Returns 0 on success, otherwise error code
2782 */
2783int ksm_enable_merge_any(struct mm_struct *mm)
2784{
2785 int err;
2786
2787 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2788 return 0;
2789
2790 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2791 err = __ksm_enter(mm);
2792 if (err)
2793 return err;
2794 }
2795
2796 set_bit(MMF_VM_MERGE_ANY, &mm->flags);
2797 ksm_add_vmas(mm);
2798
2799 return 0;
2800}
2801
2802/**
2803 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
2804 * previously enabled via ksm_enable_merge_any().
2805 *
2806 * Disabling merging implies unmerging any merged pages, like setting
2807 * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
2808 * merging on all compatible VMA's remains enabled.
2809 *
2810 * @mm: Pointer to mm
2811 *
2812 * Returns 0 on success, otherwise error code
2813 */
2814int ksm_disable_merge_any(struct mm_struct *mm)
2815{
2816 int err;
2817
2818 if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2819 return 0;
2820
2821 err = ksm_del_vmas(mm);
2822 if (err) {
2823 ksm_add_vmas(mm);
2824 return err;
2825 }
2826
2827 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2828 return 0;
2829}
2830
2831int ksm_disable(struct mm_struct *mm)
2832{
2833 mmap_assert_write_locked(mm);
2834
2835 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
2836 return 0;
2837 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2838 return ksm_disable_merge_any(mm);
2839 return ksm_del_vmas(mm);
2840}
2841
2842int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
2843 unsigned long end, int advice, vm_flags_t *vm_flags)
2844{
2845 struct mm_struct *mm = vma->vm_mm;
2846 int err;
2847
2848 switch (advice) {
2849 case MADV_MERGEABLE:
2850 if (vma->vm_flags & VM_MERGEABLE)
2851 return 0;
2852 if (!vma_ksm_compatible(vma))
2853 return 0;
2854
2855 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2856 err = __ksm_enter(mm);
2857 if (err)
2858 return err;
2859 }
2860
2861 *vm_flags |= VM_MERGEABLE;
2862 break;
2863
2864 case MADV_UNMERGEABLE:
2865 if (!(*vm_flags & VM_MERGEABLE))
2866 return 0; /* just ignore the advice */
2867
2868 if (vma->anon_vma) {
2869 err = unmerge_ksm_pages(vma, start, end, true);
2870 if (err)
2871 return err;
2872 }
2873
2874 *vm_flags &= ~VM_MERGEABLE;
2875 break;
2876 }
2877
2878 return 0;
2879}
2880EXPORT_SYMBOL_GPL(ksm_madvise);
2881
2882int __ksm_enter(struct mm_struct *mm)
2883{
2884 struct ksm_mm_slot *mm_slot;
2885 struct mm_slot *slot;
2886 int needs_wakeup;
2887
2888 mm_slot = mm_slot_alloc(mm_slot_cache);
2889 if (!mm_slot)
2890 return -ENOMEM;
2891
2892 slot = &mm_slot->slot;
2893
2894 /* Check ksm_run too? Would need tighter locking */
2895 needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node);
2896
2897 spin_lock(&ksm_mmlist_lock);
2898 mm_slot_insert(mm_slots_hash, mm, slot);
2899 /*
2900 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2901 * insert just behind the scanning cursor, to let the area settle
2902 * down a little; when fork is followed by immediate exec, we don't
2903 * want ksmd to waste time setting up and tearing down an rmap_list.
2904 *
2905 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2906 * scanning cursor, otherwise KSM pages in newly forked mms will be
2907 * missed: then we might as well insert at the end of the list.
2908 */
2909 if (ksm_run & KSM_RUN_UNMERGE)
2910 list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node);
2911 else
2912 list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node);
2913 spin_unlock(&ksm_mmlist_lock);
2914
2915 set_bit(MMF_VM_MERGEABLE, &mm->flags);
2916 mmgrab(mm);
2917
2918 if (needs_wakeup)
2919 wake_up_interruptible(&ksm_thread_wait);
2920
2921 trace_ksm_enter(mm);
2922 return 0;
2923}
2924
2925void __ksm_exit(struct mm_struct *mm)
2926{
2927 struct ksm_mm_slot *mm_slot;
2928 struct mm_slot *slot;
2929 int easy_to_free = 0;
2930
2931 /*
2932 * This process is exiting: if it's straightforward (as is the
2933 * case when ksmd was never running), free mm_slot immediately.
2934 * But if it's at the cursor or has rmap_items linked to it, use
2935 * mmap_lock to synchronize with any break_cows before pagetables
2936 * are freed, and leave the mm_slot on the list for ksmd to free.
2937 * Beware: ksm may already have noticed it exiting and freed the slot.
2938 */
2939
2940 spin_lock(&ksm_mmlist_lock);
2941 slot = mm_slot_lookup(mm_slots_hash, mm);
2942 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2943 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
2944 if (!mm_slot->rmap_list) {
2945 hash_del(&slot->hash);
2946 list_del(&slot->mm_node);
2947 easy_to_free = 1;
2948 } else {
2949 list_move(&slot->mm_node,
2950 &ksm_scan.mm_slot->slot.mm_node);
2951 }
2952 }
2953 spin_unlock(&ksm_mmlist_lock);
2954
2955 if (easy_to_free) {
2956 mm_slot_free(mm_slot_cache, mm_slot);
2957 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2958 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2959 mmdrop(mm);
2960 } else if (mm_slot) {
2961 mmap_write_lock(mm);
2962 mmap_write_unlock(mm);
2963 }
2964
2965 trace_ksm_exit(mm);
2966}
2967
2968struct folio *ksm_might_need_to_copy(struct folio *folio,
2969 struct vm_area_struct *vma, unsigned long addr)
2970{
2971 struct page *page = folio_page(folio, 0);
2972 struct anon_vma *anon_vma = folio_anon_vma(folio);
2973 struct folio *new_folio;
2974
2975 if (folio_test_large(folio))
2976 return folio;
2977
2978 if (folio_test_ksm(folio)) {
2979 if (folio_stable_node(folio) &&
2980 !(ksm_run & KSM_RUN_UNMERGE))
2981 return folio; /* no need to copy it */
2982 } else if (!anon_vma) {
2983 return folio; /* no need to copy it */
2984 } else if (folio->index == linear_page_index(vma, addr) &&
2985 anon_vma->root == vma->anon_vma->root) {
2986 return folio; /* still no need to copy it */
2987 }
2988 if (PageHWPoison(page))
2989 return ERR_PTR(-EHWPOISON);
2990 if (!folio_test_uptodate(folio))
2991 return folio; /* let do_swap_page report the error */
2992
2993 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
2994 if (new_folio &&
2995 mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
2996 folio_put(new_folio);
2997 new_folio = NULL;
2998 }
2999 if (new_folio) {
3000 if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
3001 addr, vma)) {
3002 folio_put(new_folio);
3003 return ERR_PTR(-EHWPOISON);
3004 }
3005 folio_set_dirty(new_folio);
3006 __folio_mark_uptodate(new_folio);
3007 __folio_set_locked(new_folio);
3008#ifdef CONFIG_SWAP
3009 count_vm_event(KSM_SWPIN_COPY);
3010#endif
3011 }
3012
3013 return new_folio;
3014}
3015
3016void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
3017{
3018 struct ksm_stable_node *stable_node;
3019 struct ksm_rmap_item *rmap_item;
3020 int search_new_forks = 0;
3021
3022 VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
3023
3024 /*
3025 * Rely on the page lock to protect against concurrent modifications
3026 * to that page's node of the stable tree.
3027 */
3028 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3029
3030 stable_node = folio_stable_node(folio);
3031 if (!stable_node)
3032 return;
3033again:
3034 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
3035 struct anon_vma *anon_vma = rmap_item->anon_vma;
3036 struct anon_vma_chain *vmac;
3037 struct vm_area_struct *vma;
3038
3039 cond_resched();
3040 if (!anon_vma_trylock_read(anon_vma)) {
3041 if (rwc->try_lock) {
3042 rwc->contended = true;
3043 return;
3044 }
3045 anon_vma_lock_read(anon_vma);
3046 }
3047 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
3048 0, ULONG_MAX) {
3049 unsigned long addr;
3050
3051 cond_resched();
3052 vma = vmac->vma;
3053
3054 /* Ignore the stable/unstable/sqnr flags */
3055 addr = rmap_item->address & PAGE_MASK;
3056
3057 if (addr < vma->vm_start || addr >= vma->vm_end)
3058 continue;
3059 /*
3060 * Initially we examine only the vma which covers this
3061 * rmap_item; but later, if there is still work to do,
3062 * we examine covering vmas in other mms: in case they
3063 * were forked from the original since ksmd passed.
3064 */
3065 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
3066 continue;
3067
3068 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
3069 continue;
3070
3071 if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
3072 anon_vma_unlock_read(anon_vma);
3073 return;
3074 }
3075 if (rwc->done && rwc->done(folio)) {
3076 anon_vma_unlock_read(anon_vma);
3077 return;
3078 }
3079 }
3080 anon_vma_unlock_read(anon_vma);
3081 }
3082 if (!search_new_forks++)
3083 goto again;
3084}
3085
3086#ifdef CONFIG_MEMORY_FAILURE
3087/*
3088 * Collect processes when the error hit an ksm page.
3089 */
3090void collect_procs_ksm(const struct folio *folio, const struct page *page,
3091 struct list_head *to_kill, int force_early)
3092{
3093 struct ksm_stable_node *stable_node;
3094 struct ksm_rmap_item *rmap_item;
3095 struct vm_area_struct *vma;
3096 struct task_struct *tsk;
3097
3098 stable_node = folio_stable_node(folio);
3099 if (!stable_node)
3100 return;
3101 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
3102 struct anon_vma *av = rmap_item->anon_vma;
3103
3104 anon_vma_lock_read(av);
3105 rcu_read_lock();
3106 for_each_process(tsk) {
3107 struct anon_vma_chain *vmac;
3108 unsigned long addr;
3109 struct task_struct *t =
3110 task_early_kill(tsk, force_early);
3111 if (!t)
3112 continue;
3113 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0,
3114 ULONG_MAX)
3115 {
3116 vma = vmac->vma;
3117 if (vma->vm_mm == t->mm) {
3118 addr = rmap_item->address & PAGE_MASK;
3119 add_to_kill_ksm(t, page, vma, to_kill,
3120 addr);
3121 }
3122 }
3123 }
3124 rcu_read_unlock();
3125 anon_vma_unlock_read(av);
3126 }
3127}
3128#endif
3129
3130#ifdef CONFIG_MIGRATION
3131void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
3132{
3133 struct ksm_stable_node *stable_node;
3134
3135 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3136 VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
3137 VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio);
3138
3139 stable_node = folio_stable_node(folio);
3140 if (stable_node) {
3141 VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
3142 stable_node->kpfn = folio_pfn(newfolio);
3143 /*
3144 * newfolio->mapping was set in advance; now we need smp_wmb()
3145 * to make sure that the new stable_node->kpfn is visible
3146 * to ksm_get_folio() before it can see that folio->mapping
3147 * has gone stale (or that the swapcache flag has been cleared).
3148 */
3149 smp_wmb();
3150 folio_set_stable_node(folio, NULL);
3151 }
3152}
3153#endif /* CONFIG_MIGRATION */
3154
3155#ifdef CONFIG_MEMORY_HOTREMOVE
3156static void wait_while_offlining(void)
3157{
3158 while (ksm_run & KSM_RUN_OFFLINE) {
3159 mutex_unlock(&ksm_thread_mutex);
3160 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
3161 TASK_UNINTERRUPTIBLE);
3162 mutex_lock(&ksm_thread_mutex);
3163 }
3164}
3165
3166static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
3167 unsigned long start_pfn,
3168 unsigned long end_pfn)
3169{
3170 if (stable_node->kpfn >= start_pfn &&
3171 stable_node->kpfn < end_pfn) {
3172 /*
3173 * Don't ksm_get_folio, page has already gone:
3174 * which is why we keep kpfn instead of page*
3175 */
3176 remove_node_from_stable_tree(stable_node);
3177 return true;
3178 }
3179 return false;
3180}
3181
3182static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node,
3183 unsigned long start_pfn,
3184 unsigned long end_pfn,
3185 struct rb_root *root)
3186{
3187 struct ksm_stable_node *dup;
3188 struct hlist_node *hlist_safe;
3189
3190 if (!is_stable_node_chain(stable_node)) {
3191 VM_BUG_ON(is_stable_node_dup(stable_node));
3192 return stable_node_dup_remove_range(stable_node, start_pfn,
3193 end_pfn);
3194 }
3195
3196 hlist_for_each_entry_safe(dup, hlist_safe,
3197 &stable_node->hlist, hlist_dup) {
3198 VM_BUG_ON(!is_stable_node_dup(dup));
3199 stable_node_dup_remove_range(dup, start_pfn, end_pfn);
3200 }
3201 if (hlist_empty(&stable_node->hlist)) {
3202 free_stable_node_chain(stable_node, root);
3203 return true; /* notify caller that tree was rebalanced */
3204 } else
3205 return false;
3206}
3207
3208static void ksm_check_stable_tree(unsigned long start_pfn,
3209 unsigned long end_pfn)
3210{
3211 struct ksm_stable_node *stable_node, *next;
3212 struct rb_node *node;
3213 int nid;
3214
3215 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
3216 node = rb_first(root_stable_tree + nid);
3217 while (node) {
3218 stable_node = rb_entry(node, struct ksm_stable_node, node);
3219 if (stable_node_chain_remove_range(stable_node,
3220 start_pfn, end_pfn,
3221 root_stable_tree +
3222 nid))
3223 node = rb_first(root_stable_tree + nid);
3224 else
3225 node = rb_next(node);
3226 cond_resched();
3227 }
3228 }
3229 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
3230 if (stable_node->kpfn >= start_pfn &&
3231 stable_node->kpfn < end_pfn)
3232 remove_node_from_stable_tree(stable_node);
3233 cond_resched();
3234 }
3235}
3236
3237static int ksm_memory_callback(struct notifier_block *self,
3238 unsigned long action, void *arg)
3239{
3240 struct memory_notify *mn = arg;
3241
3242 switch (action) {
3243 case MEM_GOING_OFFLINE:
3244 /*
3245 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
3246 * and remove_all_stable_nodes() while memory is going offline:
3247 * it is unsafe for them to touch the stable tree at this time.
3248 * But unmerge_ksm_pages(), rmap lookups and other entry points
3249 * which do not need the ksm_thread_mutex are all safe.
3250 */
3251 mutex_lock(&ksm_thread_mutex);
3252 ksm_run |= KSM_RUN_OFFLINE;
3253 mutex_unlock(&ksm_thread_mutex);
3254 break;
3255
3256 case MEM_OFFLINE:
3257 /*
3258 * Most of the work is done by page migration; but there might
3259 * be a few stable_nodes left over, still pointing to struct
3260 * pages which have been offlined: prune those from the tree,
3261 * otherwise ksm_get_folio() might later try to access a
3262 * non-existent struct page.
3263 */
3264 ksm_check_stable_tree(mn->start_pfn,
3265 mn->start_pfn + mn->nr_pages);
3266 fallthrough;
3267 case MEM_CANCEL_OFFLINE:
3268 mutex_lock(&ksm_thread_mutex);
3269 ksm_run &= ~KSM_RUN_OFFLINE;
3270 mutex_unlock(&ksm_thread_mutex);
3271
3272 smp_mb(); /* wake_up_bit advises this */
3273 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
3274 break;
3275 }
3276 return NOTIFY_OK;
3277}
3278#else
3279static void wait_while_offlining(void)
3280{
3281}
3282#endif /* CONFIG_MEMORY_HOTREMOVE */
3283
3284#ifdef CONFIG_PROC_FS
3285/*
3286 * The process is mergeable only if any VMA is currently
3287 * applicable to KSM.
3288 *
3289 * The mmap lock must be held in read mode.
3290 */
3291bool ksm_process_mergeable(struct mm_struct *mm)
3292{
3293 struct vm_area_struct *vma;
3294
3295 mmap_assert_locked(mm);
3296 VMA_ITERATOR(vmi, mm, 0);
3297 for_each_vma(vmi, vma)
3298 if (vma->vm_flags & VM_MERGEABLE)
3299 return true;
3300
3301 return false;
3302}
3303
3304long ksm_process_profit(struct mm_struct *mm)
3305{
3306 return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
3307 mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
3308}
3309#endif /* CONFIG_PROC_FS */
3310
3311#ifdef CONFIG_SYSFS
3312/*
3313 * This all compiles without CONFIG_SYSFS, but is a waste of space.
3314 */
3315
3316#define KSM_ATTR_RO(_name) \
3317 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3318#define KSM_ATTR(_name) \
3319 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3320
3321static ssize_t sleep_millisecs_show(struct kobject *kobj,
3322 struct kobj_attribute *attr, char *buf)
3323{
3324 return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs);
3325}
3326
3327static ssize_t sleep_millisecs_store(struct kobject *kobj,
3328 struct kobj_attribute *attr,
3329 const char *buf, size_t count)
3330{
3331 unsigned int msecs;
3332 int err;
3333
3334 err = kstrtouint(buf, 10, &msecs);
3335 if (err)
3336 return -EINVAL;
3337
3338 ksm_thread_sleep_millisecs = msecs;
3339 wake_up_interruptible(&ksm_iter_wait);
3340
3341 return count;
3342}
3343KSM_ATTR(sleep_millisecs);
3344
3345static ssize_t pages_to_scan_show(struct kobject *kobj,
3346 struct kobj_attribute *attr, char *buf)
3347{
3348 return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan);
3349}
3350
3351static ssize_t pages_to_scan_store(struct kobject *kobj,
3352 struct kobj_attribute *attr,
3353 const char *buf, size_t count)
3354{
3355 unsigned int nr_pages;
3356 int err;
3357
3358 if (ksm_advisor != KSM_ADVISOR_NONE)
3359 return -EINVAL;
3360
3361 err = kstrtouint(buf, 10, &nr_pages);
3362 if (err)
3363 return -EINVAL;
3364
3365 ksm_thread_pages_to_scan = nr_pages;
3366
3367 return count;
3368}
3369KSM_ATTR(pages_to_scan);
3370
3371static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
3372 char *buf)
3373{
3374 return sysfs_emit(buf, "%lu\n", ksm_run);
3375}
3376
3377static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
3378 const char *buf, size_t count)
3379{
3380 unsigned int flags;
3381 int err;
3382
3383 err = kstrtouint(buf, 10, &flags);
3384 if (err)
3385 return -EINVAL;
3386 if (flags > KSM_RUN_UNMERGE)
3387 return -EINVAL;
3388
3389 /*
3390 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
3391 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
3392 * breaking COW to free the pages_shared (but leaves mm_slots
3393 * on the list for when ksmd may be set running again).
3394 */
3395
3396 mutex_lock(&ksm_thread_mutex);
3397 wait_while_offlining();
3398 if (ksm_run != flags) {
3399 ksm_run = flags;
3400 if (flags & KSM_RUN_UNMERGE) {
3401 set_current_oom_origin();
3402 err = unmerge_and_remove_all_rmap_items();
3403 clear_current_oom_origin();
3404 if (err) {
3405 ksm_run = KSM_RUN_STOP;
3406 count = err;
3407 }
3408 }
3409 }
3410 mutex_unlock(&ksm_thread_mutex);
3411
3412 if (flags & KSM_RUN_MERGE)
3413 wake_up_interruptible(&ksm_thread_wait);
3414
3415 return count;
3416}
3417KSM_ATTR(run);
3418
3419#ifdef CONFIG_NUMA
3420static ssize_t merge_across_nodes_show(struct kobject *kobj,
3421 struct kobj_attribute *attr, char *buf)
3422{
3423 return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes);
3424}
3425
3426static ssize_t merge_across_nodes_store(struct kobject *kobj,
3427 struct kobj_attribute *attr,
3428 const char *buf, size_t count)
3429{
3430 int err;
3431 unsigned long knob;
3432
3433 err = kstrtoul(buf, 10, &knob);
3434 if (err)
3435 return err;
3436 if (knob > 1)
3437 return -EINVAL;
3438
3439 mutex_lock(&ksm_thread_mutex);
3440 wait_while_offlining();
3441 if (ksm_merge_across_nodes != knob) {
3442 if (ksm_pages_shared || remove_all_stable_nodes())
3443 err = -EBUSY;
3444 else if (root_stable_tree == one_stable_tree) {
3445 struct rb_root *buf;
3446 /*
3447 * This is the first time that we switch away from the
3448 * default of merging across nodes: must now allocate
3449 * a buffer to hold as many roots as may be needed.
3450 * Allocate stable and unstable together:
3451 * MAXSMP NODES_SHIFT 10 will use 16kB.
3452 */
3453 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
3454 GFP_KERNEL);
3455 /* Let us assume that RB_ROOT is NULL is zero */
3456 if (!buf)
3457 err = -ENOMEM;
3458 else {
3459 root_stable_tree = buf;
3460 root_unstable_tree = buf + nr_node_ids;
3461 /* Stable tree is empty but not the unstable */
3462 root_unstable_tree[0] = one_unstable_tree[0];
3463 }
3464 }
3465 if (!err) {
3466 ksm_merge_across_nodes = knob;
3467 ksm_nr_node_ids = knob ? 1 : nr_node_ids;
3468 }
3469 }
3470 mutex_unlock(&ksm_thread_mutex);
3471
3472 return err ? err : count;
3473}
3474KSM_ATTR(merge_across_nodes);
3475#endif
3476
3477static ssize_t use_zero_pages_show(struct kobject *kobj,
3478 struct kobj_attribute *attr, char *buf)
3479{
3480 return sysfs_emit(buf, "%u\n", ksm_use_zero_pages);
3481}
3482static ssize_t use_zero_pages_store(struct kobject *kobj,
3483 struct kobj_attribute *attr,
3484 const char *buf, size_t count)
3485{
3486 int err;
3487 bool value;
3488
3489 err = kstrtobool(buf, &value);
3490 if (err)
3491 return -EINVAL;
3492
3493 ksm_use_zero_pages = value;
3494
3495 return count;
3496}
3497KSM_ATTR(use_zero_pages);
3498
3499static ssize_t max_page_sharing_show(struct kobject *kobj,
3500 struct kobj_attribute *attr, char *buf)
3501{
3502 return sysfs_emit(buf, "%u\n", ksm_max_page_sharing);
3503}
3504
3505static ssize_t max_page_sharing_store(struct kobject *kobj,
3506 struct kobj_attribute *attr,
3507 const char *buf, size_t count)
3508{
3509 int err;
3510 int knob;
3511
3512 err = kstrtoint(buf, 10, &knob);
3513 if (err)
3514 return err;
3515 /*
3516 * When a KSM page is created it is shared by 2 mappings. This
3517 * being a signed comparison, it implicitly verifies it's not
3518 * negative.
3519 */
3520 if (knob < 2)
3521 return -EINVAL;
3522
3523 if (READ_ONCE(ksm_max_page_sharing) == knob)
3524 return count;
3525
3526 mutex_lock(&ksm_thread_mutex);
3527 wait_while_offlining();
3528 if (ksm_max_page_sharing != knob) {
3529 if (ksm_pages_shared || remove_all_stable_nodes())
3530 err = -EBUSY;
3531 else
3532 ksm_max_page_sharing = knob;
3533 }
3534 mutex_unlock(&ksm_thread_mutex);
3535
3536 return err ? err : count;
3537}
3538KSM_ATTR(max_page_sharing);
3539
3540static ssize_t pages_scanned_show(struct kobject *kobj,
3541 struct kobj_attribute *attr, char *buf)
3542{
3543 return sysfs_emit(buf, "%lu\n", ksm_pages_scanned);
3544}
3545KSM_ATTR_RO(pages_scanned);
3546
3547static ssize_t pages_shared_show(struct kobject *kobj,
3548 struct kobj_attribute *attr, char *buf)
3549{
3550 return sysfs_emit(buf, "%lu\n", ksm_pages_shared);
3551}
3552KSM_ATTR_RO(pages_shared);
3553
3554static ssize_t pages_sharing_show(struct kobject *kobj,
3555 struct kobj_attribute *attr, char *buf)
3556{
3557 return sysfs_emit(buf, "%lu\n", ksm_pages_sharing);
3558}
3559KSM_ATTR_RO(pages_sharing);
3560
3561static ssize_t pages_unshared_show(struct kobject *kobj,
3562 struct kobj_attribute *attr, char *buf)
3563{
3564 return sysfs_emit(buf, "%lu\n", ksm_pages_unshared);
3565}
3566KSM_ATTR_RO(pages_unshared);
3567
3568static ssize_t pages_volatile_show(struct kobject *kobj,
3569 struct kobj_attribute *attr, char *buf)
3570{
3571 long ksm_pages_volatile;
3572
3573 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
3574 - ksm_pages_sharing - ksm_pages_unshared;
3575 /*
3576 * It was not worth any locking to calculate that statistic,
3577 * but it might therefore sometimes be negative: conceal that.
3578 */
3579 if (ksm_pages_volatile < 0)
3580 ksm_pages_volatile = 0;
3581 return sysfs_emit(buf, "%ld\n", ksm_pages_volatile);
3582}
3583KSM_ATTR_RO(pages_volatile);
3584
3585static ssize_t pages_skipped_show(struct kobject *kobj,
3586 struct kobj_attribute *attr, char *buf)
3587{
3588 return sysfs_emit(buf, "%lu\n", ksm_pages_skipped);
3589}
3590KSM_ATTR_RO(pages_skipped);
3591
3592static ssize_t ksm_zero_pages_show(struct kobject *kobj,
3593 struct kobj_attribute *attr, char *buf)
3594{
3595 return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
3596}
3597KSM_ATTR_RO(ksm_zero_pages);
3598
3599static ssize_t general_profit_show(struct kobject *kobj,
3600 struct kobj_attribute *attr, char *buf)
3601{
3602 long general_profit;
3603
3604 general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
3605 ksm_rmap_items * sizeof(struct ksm_rmap_item);
3606
3607 return sysfs_emit(buf, "%ld\n", general_profit);
3608}
3609KSM_ATTR_RO(general_profit);
3610
3611static ssize_t stable_node_dups_show(struct kobject *kobj,
3612 struct kobj_attribute *attr, char *buf)
3613{
3614 return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups);
3615}
3616KSM_ATTR_RO(stable_node_dups);
3617
3618static ssize_t stable_node_chains_show(struct kobject *kobj,
3619 struct kobj_attribute *attr, char *buf)
3620{
3621 return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains);
3622}
3623KSM_ATTR_RO(stable_node_chains);
3624
3625static ssize_t
3626stable_node_chains_prune_millisecs_show(struct kobject *kobj,
3627 struct kobj_attribute *attr,
3628 char *buf)
3629{
3630 return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
3631}
3632
3633static ssize_t
3634stable_node_chains_prune_millisecs_store(struct kobject *kobj,
3635 struct kobj_attribute *attr,
3636 const char *buf, size_t count)
3637{
3638 unsigned int msecs;
3639 int err;
3640
3641 err = kstrtouint(buf, 10, &msecs);
3642 if (err)
3643 return -EINVAL;
3644
3645 ksm_stable_node_chains_prune_millisecs = msecs;
3646
3647 return count;
3648}
3649KSM_ATTR(stable_node_chains_prune_millisecs);
3650
3651static ssize_t full_scans_show(struct kobject *kobj,
3652 struct kobj_attribute *attr, char *buf)
3653{
3654 return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr);
3655}
3656KSM_ATTR_RO(full_scans);
3657
3658static ssize_t smart_scan_show(struct kobject *kobj,
3659 struct kobj_attribute *attr, char *buf)
3660{
3661 return sysfs_emit(buf, "%u\n", ksm_smart_scan);
3662}
3663
3664static ssize_t smart_scan_store(struct kobject *kobj,
3665 struct kobj_attribute *attr,
3666 const char *buf, size_t count)
3667{
3668 int err;
3669 bool value;
3670
3671 err = kstrtobool(buf, &value);
3672 if (err)
3673 return -EINVAL;
3674
3675 ksm_smart_scan = value;
3676 return count;
3677}
3678KSM_ATTR(smart_scan);
3679
3680static ssize_t advisor_mode_show(struct kobject *kobj,
3681 struct kobj_attribute *attr, char *buf)
3682{
3683 const char *output;
3684
3685 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
3686 output = "none [scan-time]";
3687 else
3688 output = "[none] scan-time";
3689
3690 return sysfs_emit(buf, "%s\n", output);
3691}
3692
3693static ssize_t advisor_mode_store(struct kobject *kobj,
3694 struct kobj_attribute *attr, const char *buf,
3695 size_t count)
3696{
3697 enum ksm_advisor_type curr_advisor = ksm_advisor;
3698
3699 if (sysfs_streq("scan-time", buf))
3700 ksm_advisor = KSM_ADVISOR_SCAN_TIME;
3701 else if (sysfs_streq("none", buf))
3702 ksm_advisor = KSM_ADVISOR_NONE;
3703 else
3704 return -EINVAL;
3705
3706 /* Set advisor default values */
3707 if (curr_advisor != ksm_advisor)
3708 set_advisor_defaults();
3709
3710 return count;
3711}
3712KSM_ATTR(advisor_mode);
3713
3714static ssize_t advisor_max_cpu_show(struct kobject *kobj,
3715 struct kobj_attribute *attr, char *buf)
3716{
3717 return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
3718}
3719
3720static ssize_t advisor_max_cpu_store(struct kobject *kobj,
3721 struct kobj_attribute *attr,
3722 const char *buf, size_t count)
3723{
3724 int err;
3725 unsigned long value;
3726
3727 err = kstrtoul(buf, 10, &value);
3728 if (err)
3729 return -EINVAL;
3730
3731 ksm_advisor_max_cpu = value;
3732 return count;
3733}
3734KSM_ATTR(advisor_max_cpu);
3735
3736static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
3737 struct kobj_attribute *attr, char *buf)
3738{
3739 return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
3740}
3741
3742static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
3743 struct kobj_attribute *attr,
3744 const char *buf, size_t count)
3745{
3746 int err;
3747 unsigned long value;
3748
3749 err = kstrtoul(buf, 10, &value);
3750 if (err)
3751 return -EINVAL;
3752
3753 ksm_advisor_min_pages_to_scan = value;
3754 return count;
3755}
3756KSM_ATTR(advisor_min_pages_to_scan);
3757
3758static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
3759 struct kobj_attribute *attr, char *buf)
3760{
3761 return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
3762}
3763
3764static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
3765 struct kobj_attribute *attr,
3766 const char *buf, size_t count)
3767{
3768 int err;
3769 unsigned long value;
3770
3771 err = kstrtoul(buf, 10, &value);
3772 if (err)
3773 return -EINVAL;
3774
3775 ksm_advisor_max_pages_to_scan = value;
3776 return count;
3777}
3778KSM_ATTR(advisor_max_pages_to_scan);
3779
3780static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
3781 struct kobj_attribute *attr, char *buf)
3782{
3783 return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
3784}
3785
3786static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
3787 struct kobj_attribute *attr,
3788 const char *buf, size_t count)
3789{
3790 int err;
3791 unsigned long value;
3792
3793 err = kstrtoul(buf, 10, &value);
3794 if (err)
3795 return -EINVAL;
3796 if (value < 1)
3797 return -EINVAL;
3798
3799 ksm_advisor_target_scan_time = value;
3800 return count;
3801}
3802KSM_ATTR(advisor_target_scan_time);
3803
3804static struct attribute *ksm_attrs[] = {
3805 &sleep_millisecs_attr.attr,
3806 &pages_to_scan_attr.attr,
3807 &run_attr.attr,
3808 &pages_scanned_attr.attr,
3809 &pages_shared_attr.attr,
3810 &pages_sharing_attr.attr,
3811 &pages_unshared_attr.attr,
3812 &pages_volatile_attr.attr,
3813 &pages_skipped_attr.attr,
3814 &ksm_zero_pages_attr.attr,
3815 &full_scans_attr.attr,
3816#ifdef CONFIG_NUMA
3817 &merge_across_nodes_attr.attr,
3818#endif
3819 &max_page_sharing_attr.attr,
3820 &stable_node_chains_attr.attr,
3821 &stable_node_dups_attr.attr,
3822 &stable_node_chains_prune_millisecs_attr.attr,
3823 &use_zero_pages_attr.attr,
3824 &general_profit_attr.attr,
3825 &smart_scan_attr.attr,
3826 &advisor_mode_attr.attr,
3827 &advisor_max_cpu_attr.attr,
3828 &advisor_min_pages_to_scan_attr.attr,
3829 &advisor_max_pages_to_scan_attr.attr,
3830 &advisor_target_scan_time_attr.attr,
3831 NULL,
3832};
3833
3834static const struct attribute_group ksm_attr_group = {
3835 .attrs = ksm_attrs,
3836 .name = "ksm",
3837};
3838#endif /* CONFIG_SYSFS */
3839
3840static int __init ksm_init(void)
3841{
3842 struct task_struct *ksm_thread;
3843 int err;
3844
3845 /* The correct value depends on page size and endianness */
3846 zero_checksum = calc_checksum(ZERO_PAGE(0));
3847 /* Default to false for backwards compatibility */
3848 ksm_use_zero_pages = false;
3849
3850 err = ksm_slab_init();
3851 if (err)
3852 goto out;
3853
3854 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
3855 if (IS_ERR(ksm_thread)) {
3856 pr_err("ksm: creating kthread failed\n");
3857 err = PTR_ERR(ksm_thread);
3858 goto out_free;
3859 }
3860
3861#ifdef CONFIG_SYSFS
3862 err = sysfs_create_group(mm_kobj, &ksm_attr_group);
3863 if (err) {
3864 pr_err("ksm: register sysfs failed\n");
3865 kthread_stop(ksm_thread);
3866 goto out_free;
3867 }
3868#else
3869 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
3870
3871#endif /* CONFIG_SYSFS */
3872
3873#ifdef CONFIG_MEMORY_HOTREMOVE
3874 /* There is no significance to this priority 100 */
3875 hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI);
3876#endif
3877 return 0;
3878
3879out_free:
3880 ksm_slab_free();
3881out:
3882 return err;
3883}
3884subsys_initcall(ksm_init);