Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * preferred many Try a set of nodes first before normal fallback. This is
35 * similar to preferred without the special case.
36 *
37 * default Allocate on the local node first, or when on a VMA
38 * use the process policy. This is what Linux always did
39 * in a NUMA aware kernel and still does by, ahem, default.
40 *
41 * The process policy is applied for most non interrupt memory allocations
42 * in that process' context. Interrupts ignore the policies and always
43 * try to allocate on the local CPU. The VMA policy is only applied for memory
44 * allocations for a VMA in the VM.
45 *
46 * Currently there are a few corner cases in swapping where the policy
47 * is not applied, but the majority should be handled. When process policy
48 * is used it is not remembered over swap outs/swap ins.
49 *
50 * Only the highest zone in the zone hierarchy gets policied. Allocations
51 * requesting a lower zone just use default policy. This implies that
52 * on systems with highmem kernel lowmem allocation don't get policied.
53 * Same with GFP_DMA allocations.
54 *
55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56 * all users and remembered even when nobody has memory mapped.
57 */
58
59/* Notebook:
60 fix mmap readahead to honour policy and enable policy for any page cache
61 object
62 statistics for bigpages
63 global policy for page cache? currently it uses process policy. Requires
64 first item above.
65 handle mremap for shared memory (currently ignored for the policy)
66 grows down?
67 make bind policy root only? It can trigger oom much faster and the
68 kernel is not always grateful with that.
69*/
70
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73#include <linux/mempolicy.h>
74#include <linux/pagewalk.h>
75#include <linux/highmem.h>
76#include <linux/hugetlb.h>
77#include <linux/kernel.h>
78#include <linux/sched.h>
79#include <linux/sched/mm.h>
80#include <linux/sched/numa_balancing.h>
81#include <linux/sched/task.h>
82#include <linux/nodemask.h>
83#include <linux/cpuset.h>
84#include <linux/slab.h>
85#include <linux/string.h>
86#include <linux/export.h>
87#include <linux/nsproxy.h>
88#include <linux/interrupt.h>
89#include <linux/init.h>
90#include <linux/compat.h>
91#include <linux/ptrace.h>
92#include <linux/swap.h>
93#include <linux/seq_file.h>
94#include <linux/proc_fs.h>
95#include <linux/migrate.h>
96#include <linux/ksm.h>
97#include <linux/rmap.h>
98#include <linux/security.h>
99#include <linux/syscalls.h>
100#include <linux/ctype.h>
101#include <linux/mm_inline.h>
102#include <linux/mmu_notifier.h>
103#include <linux/printk.h>
104#include <linux/swapops.h>
105
106#include <asm/tlbflush.h>
107#include <asm/tlb.h>
108#include <linux/uaccess.h>
109
110#include "internal.h"
111
112/* Internal flags */
113#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
114#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
115
116static struct kmem_cache *policy_cache;
117static struct kmem_cache *sn_cache;
118
119/* Highest zone. An specific allocation for a zone below that is not
120 policied. */
121enum zone_type policy_zone = 0;
122
123/*
124 * run-time system-wide default policy => local allocation
125 */
126static struct mempolicy default_policy = {
127 .refcnt = ATOMIC_INIT(1), /* never free it */
128 .mode = MPOL_LOCAL,
129};
130
131static struct mempolicy preferred_node_policy[MAX_NUMNODES];
132
133/**
134 * numa_map_to_online_node - Find closest online node
135 * @node: Node id to start the search
136 *
137 * Lookup the next closest node by distance if @nid is not online.
138 *
139 * Return: this @node if it is online, otherwise the closest node by distance
140 */
141int numa_map_to_online_node(int node)
142{
143 int min_dist = INT_MAX, dist, n, min_node;
144
145 if (node == NUMA_NO_NODE || node_online(node))
146 return node;
147
148 min_node = node;
149 for_each_online_node(n) {
150 dist = node_distance(node, n);
151 if (dist < min_dist) {
152 min_dist = dist;
153 min_node = n;
154 }
155 }
156
157 return min_node;
158}
159EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160
161struct mempolicy *get_task_policy(struct task_struct *p)
162{
163 struct mempolicy *pol = p->mempolicy;
164 int node;
165
166 if (pol)
167 return pol;
168
169 node = numa_node_id();
170 if (node != NUMA_NO_NODE) {
171 pol = &preferred_node_policy[node];
172 /* preferred_node_policy is not initialised early in boot */
173 if (pol->mode)
174 return pol;
175 }
176
177 return &default_policy;
178}
179
180static const struct mempolicy_operations {
181 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
183} mpol_ops[MPOL_MAX];
184
185static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186{
187 return pol->flags & MPOL_MODE_FLAGS;
188}
189
190static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
191 const nodemask_t *rel)
192{
193 nodemask_t tmp;
194 nodes_fold(tmp, *orig, nodes_weight(*rel));
195 nodes_onto(*ret, tmp, *rel);
196}
197
198static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
199{
200 if (nodes_empty(*nodes))
201 return -EINVAL;
202 pol->nodes = *nodes;
203 return 0;
204}
205
206static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
207{
208 if (nodes_empty(*nodes))
209 return -EINVAL;
210
211 nodes_clear(pol->nodes);
212 node_set(first_node(*nodes), pol->nodes);
213 return 0;
214}
215
216/*
217 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
218 * any, for the new policy. mpol_new() has already validated the nodes
219 * parameter with respect to the policy mode and flags.
220 *
221 * Must be called holding task's alloc_lock to protect task's mems_allowed
222 * and mempolicy. May also be called holding the mmap_lock for write.
223 */
224static int mpol_set_nodemask(struct mempolicy *pol,
225 const nodemask_t *nodes, struct nodemask_scratch *nsc)
226{
227 int ret;
228
229 /*
230 * Default (pol==NULL) resp. local memory policies are not a
231 * subject of any remapping. They also do not need any special
232 * constructor.
233 */
234 if (!pol || pol->mode == MPOL_LOCAL)
235 return 0;
236
237 /* Check N_MEMORY */
238 nodes_and(nsc->mask1,
239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
240
241 VM_BUG_ON(!nodes);
242
243 if (pol->flags & MPOL_F_RELATIVE_NODES)
244 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
245 else
246 nodes_and(nsc->mask2, *nodes, nsc->mask1);
247
248 if (mpol_store_user_nodemask(pol))
249 pol->w.user_nodemask = *nodes;
250 else
251 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
252
253 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
254 return ret;
255}
256
257/*
258 * This function just creates a new policy, does some check and simple
259 * initialization. You must invoke mpol_set_nodemask() to set nodes.
260 */
261static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262 nodemask_t *nodes)
263{
264 struct mempolicy *policy;
265
266 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
267 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
268
269 if (mode == MPOL_DEFAULT) {
270 if (nodes && !nodes_empty(*nodes))
271 return ERR_PTR(-EINVAL);
272 return NULL;
273 }
274 VM_BUG_ON(!nodes);
275
276 /*
277 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
278 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
279 * All other modes require a valid pointer to a non-empty nodemask.
280 */
281 if (mode == MPOL_PREFERRED) {
282 if (nodes_empty(*nodes)) {
283 if (((flags & MPOL_F_STATIC_NODES) ||
284 (flags & MPOL_F_RELATIVE_NODES)))
285 return ERR_PTR(-EINVAL);
286
287 mode = MPOL_LOCAL;
288 }
289 } else if (mode == MPOL_LOCAL) {
290 if (!nodes_empty(*nodes) ||
291 (flags & MPOL_F_STATIC_NODES) ||
292 (flags & MPOL_F_RELATIVE_NODES))
293 return ERR_PTR(-EINVAL);
294 } else if (nodes_empty(*nodes))
295 return ERR_PTR(-EINVAL);
296 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
297 if (!policy)
298 return ERR_PTR(-ENOMEM);
299 atomic_set(&policy->refcnt, 1);
300 policy->mode = mode;
301 policy->flags = flags;
302 policy->home_node = NUMA_NO_NODE;
303
304 return policy;
305}
306
307/* Slow path of a mpol destructor. */
308void __mpol_put(struct mempolicy *p)
309{
310 if (!atomic_dec_and_test(&p->refcnt))
311 return;
312 kmem_cache_free(policy_cache, p);
313}
314
315static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
316{
317}
318
319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
320{
321 nodemask_t tmp;
322
323 if (pol->flags & MPOL_F_STATIC_NODES)
324 nodes_and(tmp, pol->w.user_nodemask, *nodes);
325 else if (pol->flags & MPOL_F_RELATIVE_NODES)
326 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
327 else {
328 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
329 *nodes);
330 pol->w.cpuset_mems_allowed = *nodes;
331 }
332
333 if (nodes_empty(tmp))
334 tmp = *nodes;
335
336 pol->nodes = tmp;
337}
338
339static void mpol_rebind_preferred(struct mempolicy *pol,
340 const nodemask_t *nodes)
341{
342 pol->w.cpuset_mems_allowed = *nodes;
343}
344
345/*
346 * mpol_rebind_policy - Migrate a policy to a different set of nodes
347 *
348 * Per-vma policies are protected by mmap_lock. Allocations using per-task
349 * policies are protected by task->mems_allowed_seq to prevent a premature
350 * OOM/allocation failure due to parallel nodemask modification.
351 */
352static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
353{
354 if (!pol || pol->mode == MPOL_LOCAL)
355 return;
356 if (!mpol_store_user_nodemask(pol) &&
357 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
358 return;
359
360 mpol_ops[pol->mode].rebind(pol, newmask);
361}
362
363/*
364 * Wrapper for mpol_rebind_policy() that just requires task
365 * pointer, and updates task mempolicy.
366 *
367 * Called with task's alloc_lock held.
368 */
369
370void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
371{
372 mpol_rebind_policy(tsk->mempolicy, new);
373}
374
375/*
376 * Rebind each vma in mm to new nodemask.
377 *
378 * Call holding a reference to mm. Takes mm->mmap_lock during call.
379 */
380
381void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
382{
383 struct vm_area_struct *vma;
384 VMA_ITERATOR(vmi, mm, 0);
385
386 mmap_write_lock(mm);
387 for_each_vma(vmi, vma)
388 mpol_rebind_policy(vma->vm_policy, new);
389 mmap_write_unlock(mm);
390}
391
392static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
393 [MPOL_DEFAULT] = {
394 .rebind = mpol_rebind_default,
395 },
396 [MPOL_INTERLEAVE] = {
397 .create = mpol_new_nodemask,
398 .rebind = mpol_rebind_nodemask,
399 },
400 [MPOL_PREFERRED] = {
401 .create = mpol_new_preferred,
402 .rebind = mpol_rebind_preferred,
403 },
404 [MPOL_BIND] = {
405 .create = mpol_new_nodemask,
406 .rebind = mpol_rebind_nodemask,
407 },
408 [MPOL_LOCAL] = {
409 .rebind = mpol_rebind_default,
410 },
411 [MPOL_PREFERRED_MANY] = {
412 .create = mpol_new_nodemask,
413 .rebind = mpol_rebind_preferred,
414 },
415};
416
417static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
418 unsigned long flags);
419
420struct queue_pages {
421 struct list_head *pagelist;
422 unsigned long flags;
423 nodemask_t *nmask;
424 unsigned long start;
425 unsigned long end;
426 struct vm_area_struct *first;
427};
428
429/*
430 * Check if the folio's nid is in qp->nmask.
431 *
432 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
433 * in the invert of qp->nmask.
434 */
435static inline bool queue_folio_required(struct folio *folio,
436 struct queue_pages *qp)
437{
438 int nid = folio_nid(folio);
439 unsigned long flags = qp->flags;
440
441 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
442}
443
444/*
445 * queue_folios_pmd() has three possible return values:
446 * 0 - folios are placed on the right node or queued successfully, or
447 * special page is met, i.e. huge zero page.
448 * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
449 * specified.
450 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
451 * existing folio was already on a node that does not follow the
452 * policy.
453 */
454static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
455 unsigned long end, struct mm_walk *walk)
456 __releases(ptl)
457{
458 int ret = 0;
459 struct folio *folio;
460 struct queue_pages *qp = walk->private;
461 unsigned long flags;
462
463 if (unlikely(is_pmd_migration_entry(*pmd))) {
464 ret = -EIO;
465 goto unlock;
466 }
467 folio = pfn_folio(pmd_pfn(*pmd));
468 if (is_huge_zero_page(&folio->page)) {
469 walk->action = ACTION_CONTINUE;
470 goto unlock;
471 }
472 if (!queue_folio_required(folio, qp))
473 goto unlock;
474
475 flags = qp->flags;
476 /* go to folio migration */
477 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478 if (!vma_migratable(walk->vma) ||
479 migrate_folio_add(folio, qp->pagelist, flags)) {
480 ret = 1;
481 goto unlock;
482 }
483 } else
484 ret = -EIO;
485unlock:
486 spin_unlock(ptl);
487 return ret;
488}
489
490/*
491 * Scan through pages checking if pages follow certain conditions,
492 * and move them to the pagelist if they do.
493 *
494 * queue_folios_pte_range() has three possible return values:
495 * 0 - folios are placed on the right node or queued successfully, or
496 * special page is met, i.e. zero page.
497 * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
498 * specified.
499 * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
500 * on a node that does not follow the policy.
501 */
502static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
503 unsigned long end, struct mm_walk *walk)
504{
505 struct vm_area_struct *vma = walk->vma;
506 struct folio *folio;
507 struct queue_pages *qp = walk->private;
508 unsigned long flags = qp->flags;
509 bool has_unmovable = false;
510 pte_t *pte, *mapped_pte;
511 pte_t ptent;
512 spinlock_t *ptl;
513
514 ptl = pmd_trans_huge_lock(pmd, vma);
515 if (ptl)
516 return queue_folios_pmd(pmd, ptl, addr, end, walk);
517
518 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
519 if (!pte) {
520 walk->action = ACTION_AGAIN;
521 return 0;
522 }
523 for (; addr != end; pte++, addr += PAGE_SIZE) {
524 ptent = ptep_get(pte);
525 if (!pte_present(ptent))
526 continue;
527 folio = vm_normal_folio(vma, addr, ptent);
528 if (!folio || folio_is_zone_device(folio))
529 continue;
530 /*
531 * vm_normal_folio() filters out zero pages, but there might
532 * still be reserved folios to skip, perhaps in a VDSO.
533 */
534 if (folio_test_reserved(folio))
535 continue;
536 if (!queue_folio_required(folio, qp))
537 continue;
538 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
539 /* MPOL_MF_STRICT must be specified if we get here */
540 if (!vma_migratable(vma)) {
541 has_unmovable = true;
542 break;
543 }
544
545 /*
546 * Do not abort immediately since there may be
547 * temporary off LRU pages in the range. Still
548 * need migrate other LRU pages.
549 */
550 if (migrate_folio_add(folio, qp->pagelist, flags))
551 has_unmovable = true;
552 } else
553 break;
554 }
555 pte_unmap_unlock(mapped_pte, ptl);
556 cond_resched();
557
558 if (has_unmovable)
559 return 1;
560
561 return addr != end ? -EIO : 0;
562}
563
564static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
565 unsigned long addr, unsigned long end,
566 struct mm_walk *walk)
567{
568 int ret = 0;
569#ifdef CONFIG_HUGETLB_PAGE
570 struct queue_pages *qp = walk->private;
571 unsigned long flags = (qp->flags & MPOL_MF_VALID);
572 struct folio *folio;
573 spinlock_t *ptl;
574 pte_t entry;
575
576 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
577 entry = huge_ptep_get(pte);
578 if (!pte_present(entry))
579 goto unlock;
580 folio = pfn_folio(pte_pfn(entry));
581 if (!queue_folio_required(folio, qp))
582 goto unlock;
583
584 if (flags == MPOL_MF_STRICT) {
585 /*
586 * STRICT alone means only detecting misplaced folio and no
587 * need to further check other vma.
588 */
589 ret = -EIO;
590 goto unlock;
591 }
592
593 if (!vma_migratable(walk->vma)) {
594 /*
595 * Must be STRICT with MOVE*, otherwise .test_walk() have
596 * stopped walking current vma.
597 * Detecting misplaced folio but allow migrating folios which
598 * have been queued.
599 */
600 ret = 1;
601 goto unlock;
602 }
603
604 /*
605 * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it
606 * is shared it is likely not worth migrating.
607 *
608 * To check if the folio is shared, ideally we want to make sure
609 * every page is mapped to the same process. Doing that is very
610 * expensive, so check the estimated mapcount of the folio instead.
611 */
612 if (flags & (MPOL_MF_MOVE_ALL) ||
613 (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
614 !hugetlb_pmd_shared(pte))) {
615 if (!isolate_hugetlb(folio, qp->pagelist) &&
616 (flags & MPOL_MF_STRICT))
617 /*
618 * Failed to isolate folio but allow migrating pages
619 * which have been queued.
620 */
621 ret = 1;
622 }
623unlock:
624 spin_unlock(ptl);
625#else
626 BUG();
627#endif
628 return ret;
629}
630
631#ifdef CONFIG_NUMA_BALANCING
632/*
633 * This is used to mark a range of virtual addresses to be inaccessible.
634 * These are later cleared by a NUMA hinting fault. Depending on these
635 * faults, pages may be migrated for better NUMA placement.
636 *
637 * This is assuming that NUMA faults are handled using PROT_NONE. If
638 * an architecture makes a different choice, it will need further
639 * changes to the core.
640 */
641unsigned long change_prot_numa(struct vm_area_struct *vma,
642 unsigned long addr, unsigned long end)
643{
644 struct mmu_gather tlb;
645 long nr_updated;
646
647 tlb_gather_mmu(&tlb, vma->vm_mm);
648
649 nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
650 if (nr_updated > 0)
651 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
652
653 tlb_finish_mmu(&tlb);
654
655 return nr_updated;
656}
657#else
658static unsigned long change_prot_numa(struct vm_area_struct *vma,
659 unsigned long addr, unsigned long end)
660{
661 return 0;
662}
663#endif /* CONFIG_NUMA_BALANCING */
664
665static int queue_pages_test_walk(unsigned long start, unsigned long end,
666 struct mm_walk *walk)
667{
668 struct vm_area_struct *next, *vma = walk->vma;
669 struct queue_pages *qp = walk->private;
670 unsigned long endvma = vma->vm_end;
671 unsigned long flags = qp->flags;
672
673 /* range check first */
674 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
675
676 if (!qp->first) {
677 qp->first = vma;
678 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
679 (qp->start < vma->vm_start))
680 /* hole at head side of range */
681 return -EFAULT;
682 }
683 next = find_vma(vma->vm_mm, vma->vm_end);
684 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
685 ((vma->vm_end < qp->end) &&
686 (!next || vma->vm_end < next->vm_start)))
687 /* hole at middle or tail of range */
688 return -EFAULT;
689
690 /*
691 * Need check MPOL_MF_STRICT to return -EIO if possible
692 * regardless of vma_migratable
693 */
694 if (!vma_migratable(vma) &&
695 !(flags & MPOL_MF_STRICT))
696 return 1;
697
698 if (endvma > end)
699 endvma = end;
700
701 if (flags & MPOL_MF_LAZY) {
702 /* Similar to task_numa_work, skip inaccessible VMAs */
703 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
704 !(vma->vm_flags & VM_MIXEDMAP))
705 change_prot_numa(vma, start, endvma);
706 return 1;
707 }
708
709 /* queue pages from current vma */
710 if (flags & MPOL_MF_VALID)
711 return 0;
712 return 1;
713}
714
715static const struct mm_walk_ops queue_pages_walk_ops = {
716 .hugetlb_entry = queue_folios_hugetlb,
717 .pmd_entry = queue_folios_pte_range,
718 .test_walk = queue_pages_test_walk,
719};
720
721/*
722 * Walk through page tables and collect pages to be migrated.
723 *
724 * If pages found in a given range are on a set of nodes (determined by
725 * @nodes and @flags,) it's isolated and queued to the pagelist which is
726 * passed via @private.
727 *
728 * queue_pages_range() has three possible return values:
729 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
730 * specified.
731 * 0 - queue pages successfully or no misplaced page.
732 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
733 * memory range specified by nodemask and maxnode points outside
734 * your accessible address space (-EFAULT)
735 */
736static int
737queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
738 nodemask_t *nodes, unsigned long flags,
739 struct list_head *pagelist)
740{
741 int err;
742 struct queue_pages qp = {
743 .pagelist = pagelist,
744 .flags = flags,
745 .nmask = nodes,
746 .start = start,
747 .end = end,
748 .first = NULL,
749 };
750
751 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
752
753 if (!qp.first)
754 /* whole range in hole */
755 err = -EFAULT;
756
757 return err;
758}
759
760/*
761 * Apply policy to a single VMA
762 * This must be called with the mmap_lock held for writing.
763 */
764static int vma_replace_policy(struct vm_area_struct *vma,
765 struct mempolicy *pol)
766{
767 int err;
768 struct mempolicy *old;
769 struct mempolicy *new;
770
771 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
772 vma->vm_start, vma->vm_end, vma->vm_pgoff,
773 vma->vm_ops, vma->vm_file,
774 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
775
776 new = mpol_dup(pol);
777 if (IS_ERR(new))
778 return PTR_ERR(new);
779
780 if (vma->vm_ops && vma->vm_ops->set_policy) {
781 err = vma->vm_ops->set_policy(vma, new);
782 if (err)
783 goto err_out;
784 }
785
786 old = vma->vm_policy;
787 vma->vm_policy = new; /* protected by mmap_lock */
788 mpol_put(old);
789
790 return 0;
791 err_out:
792 mpol_put(new);
793 return err;
794}
795
796/* Split or merge the VMA (if required) and apply the new policy */
797static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
798 struct vm_area_struct **prev, unsigned long start,
799 unsigned long end, struct mempolicy *new_pol)
800{
801 struct vm_area_struct *merged;
802 unsigned long vmstart, vmend;
803 pgoff_t pgoff;
804 int err;
805
806 vmend = min(end, vma->vm_end);
807 if (start > vma->vm_start) {
808 *prev = vma;
809 vmstart = start;
810 } else {
811 vmstart = vma->vm_start;
812 }
813
814 if (mpol_equal(vma_policy(vma), new_pol)) {
815 *prev = vma;
816 return 0;
817 }
818
819 pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
820 merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
821 vma->anon_vma, vma->vm_file, pgoff, new_pol,
822 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
823 if (merged) {
824 *prev = merged;
825 return vma_replace_policy(merged, new_pol);
826 }
827
828 if (vma->vm_start != vmstart) {
829 err = split_vma(vmi, vma, vmstart, 1);
830 if (err)
831 return err;
832 }
833
834 if (vma->vm_end != vmend) {
835 err = split_vma(vmi, vma, vmend, 0);
836 if (err)
837 return err;
838 }
839
840 *prev = vma;
841 return vma_replace_policy(vma, new_pol);
842}
843
844/* Set the process memory policy */
845static long do_set_mempolicy(unsigned short mode, unsigned short flags,
846 nodemask_t *nodes)
847{
848 struct mempolicy *new, *old;
849 NODEMASK_SCRATCH(scratch);
850 int ret;
851
852 if (!scratch)
853 return -ENOMEM;
854
855 new = mpol_new(mode, flags, nodes);
856 if (IS_ERR(new)) {
857 ret = PTR_ERR(new);
858 goto out;
859 }
860
861 task_lock(current);
862 ret = mpol_set_nodemask(new, nodes, scratch);
863 if (ret) {
864 task_unlock(current);
865 mpol_put(new);
866 goto out;
867 }
868
869 old = current->mempolicy;
870 current->mempolicy = new;
871 if (new && new->mode == MPOL_INTERLEAVE)
872 current->il_prev = MAX_NUMNODES-1;
873 task_unlock(current);
874 mpol_put(old);
875 ret = 0;
876out:
877 NODEMASK_SCRATCH_FREE(scratch);
878 return ret;
879}
880
881/*
882 * Return nodemask for policy for get_mempolicy() query
883 *
884 * Called with task's alloc_lock held
885 */
886static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
887{
888 nodes_clear(*nodes);
889 if (p == &default_policy)
890 return;
891
892 switch (p->mode) {
893 case MPOL_BIND:
894 case MPOL_INTERLEAVE:
895 case MPOL_PREFERRED:
896 case MPOL_PREFERRED_MANY:
897 *nodes = p->nodes;
898 break;
899 case MPOL_LOCAL:
900 /* return empty node mask for local allocation */
901 break;
902 default:
903 BUG();
904 }
905}
906
907static int lookup_node(struct mm_struct *mm, unsigned long addr)
908{
909 struct page *p = NULL;
910 int ret;
911
912 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
913 if (ret > 0) {
914 ret = page_to_nid(p);
915 put_page(p);
916 }
917 return ret;
918}
919
920/* Retrieve NUMA policy */
921static long do_get_mempolicy(int *policy, nodemask_t *nmask,
922 unsigned long addr, unsigned long flags)
923{
924 int err;
925 struct mm_struct *mm = current->mm;
926 struct vm_area_struct *vma = NULL;
927 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
928
929 if (flags &
930 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
931 return -EINVAL;
932
933 if (flags & MPOL_F_MEMS_ALLOWED) {
934 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
935 return -EINVAL;
936 *policy = 0; /* just so it's initialized */
937 task_lock(current);
938 *nmask = cpuset_current_mems_allowed;
939 task_unlock(current);
940 return 0;
941 }
942
943 if (flags & MPOL_F_ADDR) {
944 /*
945 * Do NOT fall back to task policy if the
946 * vma/shared policy at addr is NULL. We
947 * want to return MPOL_DEFAULT in this case.
948 */
949 mmap_read_lock(mm);
950 vma = vma_lookup(mm, addr);
951 if (!vma) {
952 mmap_read_unlock(mm);
953 return -EFAULT;
954 }
955 if (vma->vm_ops && vma->vm_ops->get_policy)
956 pol = vma->vm_ops->get_policy(vma, addr);
957 else
958 pol = vma->vm_policy;
959 } else if (addr)
960 return -EINVAL;
961
962 if (!pol)
963 pol = &default_policy; /* indicates default behavior */
964
965 if (flags & MPOL_F_NODE) {
966 if (flags & MPOL_F_ADDR) {
967 /*
968 * Take a refcount on the mpol, because we are about to
969 * drop the mmap_lock, after which only "pol" remains
970 * valid, "vma" is stale.
971 */
972 pol_refcount = pol;
973 vma = NULL;
974 mpol_get(pol);
975 mmap_read_unlock(mm);
976 err = lookup_node(mm, addr);
977 if (err < 0)
978 goto out;
979 *policy = err;
980 } else if (pol == current->mempolicy &&
981 pol->mode == MPOL_INTERLEAVE) {
982 *policy = next_node_in(current->il_prev, pol->nodes);
983 } else {
984 err = -EINVAL;
985 goto out;
986 }
987 } else {
988 *policy = pol == &default_policy ? MPOL_DEFAULT :
989 pol->mode;
990 /*
991 * Internal mempolicy flags must be masked off before exposing
992 * the policy to userspace.
993 */
994 *policy |= (pol->flags & MPOL_MODE_FLAGS);
995 }
996
997 err = 0;
998 if (nmask) {
999 if (mpol_store_user_nodemask(pol)) {
1000 *nmask = pol->w.user_nodemask;
1001 } else {
1002 task_lock(current);
1003 get_policy_nodemask(pol, nmask);
1004 task_unlock(current);
1005 }
1006 }
1007
1008 out:
1009 mpol_cond_put(pol);
1010 if (vma)
1011 mmap_read_unlock(mm);
1012 if (pol_refcount)
1013 mpol_put(pol_refcount);
1014 return err;
1015}
1016
1017#ifdef CONFIG_MIGRATION
1018static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1019 unsigned long flags)
1020{
1021 /*
1022 * We try to migrate only unshared folios. If it is shared it
1023 * is likely not worth migrating.
1024 *
1025 * To check if the folio is shared, ideally we want to make sure
1026 * every page is mapped to the same process. Doing that is very
1027 * expensive, so check the estimated mapcount of the folio instead.
1028 */
1029 if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
1030 if (folio_isolate_lru(folio)) {
1031 list_add_tail(&folio->lru, foliolist);
1032 node_stat_mod_folio(folio,
1033 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1034 folio_nr_pages(folio));
1035 } else if (flags & MPOL_MF_STRICT) {
1036 /*
1037 * Non-movable folio may reach here. And, there may be
1038 * temporary off LRU folios or non-LRU movable folios.
1039 * Treat them as unmovable folios since they can't be
1040 * isolated, so they can't be moved at the moment. It
1041 * should return -EIO for this case too.
1042 */
1043 return -EIO;
1044 }
1045 }
1046
1047 return 0;
1048}
1049
1050/*
1051 * Migrate pages from one node to a target node.
1052 * Returns error or the number of pages not migrated.
1053 */
1054static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1055 int flags)
1056{
1057 nodemask_t nmask;
1058 struct vm_area_struct *vma;
1059 LIST_HEAD(pagelist);
1060 int err = 0;
1061 struct migration_target_control mtc = {
1062 .nid = dest,
1063 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1064 };
1065
1066 nodes_clear(nmask);
1067 node_set(source, nmask);
1068
1069 /*
1070 * This does not "check" the range but isolates all pages that
1071 * need migration. Between passing in the full user address
1072 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1073 */
1074 vma = find_vma(mm, 0);
1075 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1076 queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1077 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1078
1079 if (!list_empty(&pagelist)) {
1080 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1081 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1082 if (err)
1083 putback_movable_pages(&pagelist);
1084 }
1085
1086 return err;
1087}
1088
1089/*
1090 * Move pages between the two nodesets so as to preserve the physical
1091 * layout as much as possible.
1092 *
1093 * Returns the number of page that could not be moved.
1094 */
1095int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1096 const nodemask_t *to, int flags)
1097{
1098 int busy = 0;
1099 int err = 0;
1100 nodemask_t tmp;
1101
1102 lru_cache_disable();
1103
1104 mmap_read_lock(mm);
1105
1106 /*
1107 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1108 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1109 * bit in 'tmp', and return that <source, dest> pair for migration.
1110 * The pair of nodemasks 'to' and 'from' define the map.
1111 *
1112 * If no pair of bits is found that way, fallback to picking some
1113 * pair of 'source' and 'dest' bits that are not the same. If the
1114 * 'source' and 'dest' bits are the same, this represents a node
1115 * that will be migrating to itself, so no pages need move.
1116 *
1117 * If no bits are left in 'tmp', or if all remaining bits left
1118 * in 'tmp' correspond to the same bit in 'to', return false
1119 * (nothing left to migrate).
1120 *
1121 * This lets us pick a pair of nodes to migrate between, such that
1122 * if possible the dest node is not already occupied by some other
1123 * source node, minimizing the risk of overloading the memory on a
1124 * node that would happen if we migrated incoming memory to a node
1125 * before migrating outgoing memory source that same node.
1126 *
1127 * A single scan of tmp is sufficient. As we go, we remember the
1128 * most recent <s, d> pair that moved (s != d). If we find a pair
1129 * that not only moved, but what's better, moved to an empty slot
1130 * (d is not set in tmp), then we break out then, with that pair.
1131 * Otherwise when we finish scanning from_tmp, we at least have the
1132 * most recent <s, d> pair that moved. If we get all the way through
1133 * the scan of tmp without finding any node that moved, much less
1134 * moved to an empty node, then there is nothing left worth migrating.
1135 */
1136
1137 tmp = *from;
1138 while (!nodes_empty(tmp)) {
1139 int s, d;
1140 int source = NUMA_NO_NODE;
1141 int dest = 0;
1142
1143 for_each_node_mask(s, tmp) {
1144
1145 /*
1146 * do_migrate_pages() tries to maintain the relative
1147 * node relationship of the pages established between
1148 * threads and memory areas.
1149 *
1150 * However if the number of source nodes is not equal to
1151 * the number of destination nodes we can not preserve
1152 * this node relative relationship. In that case, skip
1153 * copying memory from a node that is in the destination
1154 * mask.
1155 *
1156 * Example: [2,3,4] -> [3,4,5] moves everything.
1157 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1158 */
1159
1160 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1161 (node_isset(s, *to)))
1162 continue;
1163
1164 d = node_remap(s, *from, *to);
1165 if (s == d)
1166 continue;
1167
1168 source = s; /* Node moved. Memorize */
1169 dest = d;
1170
1171 /* dest not in remaining from nodes? */
1172 if (!node_isset(dest, tmp))
1173 break;
1174 }
1175 if (source == NUMA_NO_NODE)
1176 break;
1177
1178 node_clear(source, tmp);
1179 err = migrate_to_node(mm, source, dest, flags);
1180 if (err > 0)
1181 busy += err;
1182 if (err < 0)
1183 break;
1184 }
1185 mmap_read_unlock(mm);
1186
1187 lru_cache_enable();
1188 if (err < 0)
1189 return err;
1190 return busy;
1191
1192}
1193
1194/*
1195 * Allocate a new page for page migration based on vma policy.
1196 * Start by assuming the page is mapped by the same vma as contains @start.
1197 * Search forward from there, if not. N.B., this assumes that the
1198 * list of pages handed to migrate_pages()--which is how we get here--
1199 * is in virtual address order.
1200 */
1201static struct folio *new_folio(struct folio *src, unsigned long start)
1202{
1203 struct vm_area_struct *vma;
1204 unsigned long address;
1205 VMA_ITERATOR(vmi, current->mm, start);
1206 gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
1207
1208 for_each_vma(vmi, vma) {
1209 address = page_address_in_vma(&src->page, vma);
1210 if (address != -EFAULT)
1211 break;
1212 }
1213
1214 if (folio_test_hugetlb(src)) {
1215 return alloc_hugetlb_folio_vma(folio_hstate(src),
1216 vma, address);
1217 }
1218
1219 if (folio_test_large(src))
1220 gfp = GFP_TRANSHUGE;
1221
1222 /*
1223 * if !vma, vma_alloc_folio() will use task or system default policy
1224 */
1225 return vma_alloc_folio(gfp, folio_order(src), vma, address,
1226 folio_test_large(src));
1227}
1228#else
1229
1230static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1231 unsigned long flags)
1232{
1233 return -EIO;
1234}
1235
1236int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1237 const nodemask_t *to, int flags)
1238{
1239 return -ENOSYS;
1240}
1241
1242static struct folio *new_folio(struct folio *src, unsigned long start)
1243{
1244 return NULL;
1245}
1246#endif
1247
1248static long do_mbind(unsigned long start, unsigned long len,
1249 unsigned short mode, unsigned short mode_flags,
1250 nodemask_t *nmask, unsigned long flags)
1251{
1252 struct mm_struct *mm = current->mm;
1253 struct vm_area_struct *vma, *prev;
1254 struct vma_iterator vmi;
1255 struct mempolicy *new;
1256 unsigned long end;
1257 int err;
1258 int ret;
1259 LIST_HEAD(pagelist);
1260
1261 if (flags & ~(unsigned long)MPOL_MF_VALID)
1262 return -EINVAL;
1263 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1264 return -EPERM;
1265
1266 if (start & ~PAGE_MASK)
1267 return -EINVAL;
1268
1269 if (mode == MPOL_DEFAULT)
1270 flags &= ~MPOL_MF_STRICT;
1271
1272 len = PAGE_ALIGN(len);
1273 end = start + len;
1274
1275 if (end < start)
1276 return -EINVAL;
1277 if (end == start)
1278 return 0;
1279
1280 new = mpol_new(mode, mode_flags, nmask);
1281 if (IS_ERR(new))
1282 return PTR_ERR(new);
1283
1284 if (flags & MPOL_MF_LAZY)
1285 new->flags |= MPOL_F_MOF;
1286
1287 /*
1288 * If we are using the default policy then operation
1289 * on discontinuous address spaces is okay after all
1290 */
1291 if (!new)
1292 flags |= MPOL_MF_DISCONTIG_OK;
1293
1294 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1295 start, start + len, mode, mode_flags,
1296 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1297
1298 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1299
1300 lru_cache_disable();
1301 }
1302 {
1303 NODEMASK_SCRATCH(scratch);
1304 if (scratch) {
1305 mmap_write_lock(mm);
1306 err = mpol_set_nodemask(new, nmask, scratch);
1307 if (err)
1308 mmap_write_unlock(mm);
1309 } else
1310 err = -ENOMEM;
1311 NODEMASK_SCRATCH_FREE(scratch);
1312 }
1313 if (err)
1314 goto mpol_out;
1315
1316 ret = queue_pages_range(mm, start, end, nmask,
1317 flags | MPOL_MF_INVERT, &pagelist);
1318
1319 if (ret < 0) {
1320 err = ret;
1321 goto up_out;
1322 }
1323
1324 vma_iter_init(&vmi, mm, start);
1325 prev = vma_prev(&vmi);
1326 for_each_vma_range(vmi, vma, end) {
1327 err = mbind_range(&vmi, vma, &prev, start, end, new);
1328 if (err)
1329 break;
1330 }
1331
1332 if (!err) {
1333 int nr_failed = 0;
1334
1335 if (!list_empty(&pagelist)) {
1336 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1337 nr_failed = migrate_pages(&pagelist, new_folio, NULL,
1338 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1339 if (nr_failed)
1340 putback_movable_pages(&pagelist);
1341 }
1342
1343 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1344 err = -EIO;
1345 } else {
1346up_out:
1347 if (!list_empty(&pagelist))
1348 putback_movable_pages(&pagelist);
1349 }
1350
1351 mmap_write_unlock(mm);
1352mpol_out:
1353 mpol_put(new);
1354 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1355 lru_cache_enable();
1356 return err;
1357}
1358
1359/*
1360 * User space interface with variable sized bitmaps for nodelists.
1361 */
1362static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1363 unsigned long maxnode)
1364{
1365 unsigned long nlongs = BITS_TO_LONGS(maxnode);
1366 int ret;
1367
1368 if (in_compat_syscall())
1369 ret = compat_get_bitmap(mask,
1370 (const compat_ulong_t __user *)nmask,
1371 maxnode);
1372 else
1373 ret = copy_from_user(mask, nmask,
1374 nlongs * sizeof(unsigned long));
1375
1376 if (ret)
1377 return -EFAULT;
1378
1379 if (maxnode % BITS_PER_LONG)
1380 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1381
1382 return 0;
1383}
1384
1385/* Copy a node mask from user space. */
1386static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1387 unsigned long maxnode)
1388{
1389 --maxnode;
1390 nodes_clear(*nodes);
1391 if (maxnode == 0 || !nmask)
1392 return 0;
1393 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1394 return -EINVAL;
1395
1396 /*
1397 * When the user specified more nodes than supported just check
1398 * if the non supported part is all zero, one word at a time,
1399 * starting at the end.
1400 */
1401 while (maxnode > MAX_NUMNODES) {
1402 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1403 unsigned long t;
1404
1405 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1406 return -EFAULT;
1407
1408 if (maxnode - bits >= MAX_NUMNODES) {
1409 maxnode -= bits;
1410 } else {
1411 maxnode = MAX_NUMNODES;
1412 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1413 }
1414 if (t)
1415 return -EINVAL;
1416 }
1417
1418 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1419}
1420
1421/* Copy a kernel node mask to user space */
1422static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1423 nodemask_t *nodes)
1424{
1425 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1426 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1427 bool compat = in_compat_syscall();
1428
1429 if (compat)
1430 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1431
1432 if (copy > nbytes) {
1433 if (copy > PAGE_SIZE)
1434 return -EINVAL;
1435 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1436 return -EFAULT;
1437 copy = nbytes;
1438 maxnode = nr_node_ids;
1439 }
1440
1441 if (compat)
1442 return compat_put_bitmap((compat_ulong_t __user *)mask,
1443 nodes_addr(*nodes), maxnode);
1444
1445 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1446}
1447
1448/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1449static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1450{
1451 *flags = *mode & MPOL_MODE_FLAGS;
1452 *mode &= ~MPOL_MODE_FLAGS;
1453
1454 if ((unsigned int)(*mode) >= MPOL_MAX)
1455 return -EINVAL;
1456 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1457 return -EINVAL;
1458 if (*flags & MPOL_F_NUMA_BALANCING) {
1459 if (*mode != MPOL_BIND)
1460 return -EINVAL;
1461 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1462 }
1463 return 0;
1464}
1465
1466static long kernel_mbind(unsigned long start, unsigned long len,
1467 unsigned long mode, const unsigned long __user *nmask,
1468 unsigned long maxnode, unsigned int flags)
1469{
1470 unsigned short mode_flags;
1471 nodemask_t nodes;
1472 int lmode = mode;
1473 int err;
1474
1475 start = untagged_addr(start);
1476 err = sanitize_mpol_flags(&lmode, &mode_flags);
1477 if (err)
1478 return err;
1479
1480 err = get_nodes(&nodes, nmask, maxnode);
1481 if (err)
1482 return err;
1483
1484 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1485}
1486
1487SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1488 unsigned long, home_node, unsigned long, flags)
1489{
1490 struct mm_struct *mm = current->mm;
1491 struct vm_area_struct *vma, *prev;
1492 struct mempolicy *new, *old;
1493 unsigned long end;
1494 int err = -ENOENT;
1495 VMA_ITERATOR(vmi, mm, start);
1496
1497 start = untagged_addr(start);
1498 if (start & ~PAGE_MASK)
1499 return -EINVAL;
1500 /*
1501 * flags is used for future extension if any.
1502 */
1503 if (flags != 0)
1504 return -EINVAL;
1505
1506 /*
1507 * Check home_node is online to avoid accessing uninitialized
1508 * NODE_DATA.
1509 */
1510 if (home_node >= MAX_NUMNODES || !node_online(home_node))
1511 return -EINVAL;
1512
1513 len = PAGE_ALIGN(len);
1514 end = start + len;
1515
1516 if (end < start)
1517 return -EINVAL;
1518 if (end == start)
1519 return 0;
1520 mmap_write_lock(mm);
1521 prev = vma_prev(&vmi);
1522 for_each_vma_range(vmi, vma, end) {
1523 /*
1524 * If any vma in the range got policy other than MPOL_BIND
1525 * or MPOL_PREFERRED_MANY we return error. We don't reset
1526 * the home node for vmas we already updated before.
1527 */
1528 old = vma_policy(vma);
1529 if (!old)
1530 continue;
1531 if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1532 err = -EOPNOTSUPP;
1533 break;
1534 }
1535 new = mpol_dup(old);
1536 if (IS_ERR(new)) {
1537 err = PTR_ERR(new);
1538 break;
1539 }
1540
1541 new->home_node = home_node;
1542 err = mbind_range(&vmi, vma, &prev, start, end, new);
1543 mpol_put(new);
1544 if (err)
1545 break;
1546 }
1547 mmap_write_unlock(mm);
1548 return err;
1549}
1550
1551SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1552 unsigned long, mode, const unsigned long __user *, nmask,
1553 unsigned long, maxnode, unsigned int, flags)
1554{
1555 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1556}
1557
1558/* Set the process memory policy */
1559static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1560 unsigned long maxnode)
1561{
1562 unsigned short mode_flags;
1563 nodemask_t nodes;
1564 int lmode = mode;
1565 int err;
1566
1567 err = sanitize_mpol_flags(&lmode, &mode_flags);
1568 if (err)
1569 return err;
1570
1571 err = get_nodes(&nodes, nmask, maxnode);
1572 if (err)
1573 return err;
1574
1575 return do_set_mempolicy(lmode, mode_flags, &nodes);
1576}
1577
1578SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1579 unsigned long, maxnode)
1580{
1581 return kernel_set_mempolicy(mode, nmask, maxnode);
1582}
1583
1584static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1585 const unsigned long __user *old_nodes,
1586 const unsigned long __user *new_nodes)
1587{
1588 struct mm_struct *mm = NULL;
1589 struct task_struct *task;
1590 nodemask_t task_nodes;
1591 int err;
1592 nodemask_t *old;
1593 nodemask_t *new;
1594 NODEMASK_SCRATCH(scratch);
1595
1596 if (!scratch)
1597 return -ENOMEM;
1598
1599 old = &scratch->mask1;
1600 new = &scratch->mask2;
1601
1602 err = get_nodes(old, old_nodes, maxnode);
1603 if (err)
1604 goto out;
1605
1606 err = get_nodes(new, new_nodes, maxnode);
1607 if (err)
1608 goto out;
1609
1610 /* Find the mm_struct */
1611 rcu_read_lock();
1612 task = pid ? find_task_by_vpid(pid) : current;
1613 if (!task) {
1614 rcu_read_unlock();
1615 err = -ESRCH;
1616 goto out;
1617 }
1618 get_task_struct(task);
1619
1620 err = -EINVAL;
1621
1622 /*
1623 * Check if this process has the right to modify the specified process.
1624 * Use the regular "ptrace_may_access()" checks.
1625 */
1626 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1627 rcu_read_unlock();
1628 err = -EPERM;
1629 goto out_put;
1630 }
1631 rcu_read_unlock();
1632
1633 task_nodes = cpuset_mems_allowed(task);
1634 /* Is the user allowed to access the target nodes? */
1635 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1636 err = -EPERM;
1637 goto out_put;
1638 }
1639
1640 task_nodes = cpuset_mems_allowed(current);
1641 nodes_and(*new, *new, task_nodes);
1642 if (nodes_empty(*new))
1643 goto out_put;
1644
1645 err = security_task_movememory(task);
1646 if (err)
1647 goto out_put;
1648
1649 mm = get_task_mm(task);
1650 put_task_struct(task);
1651
1652 if (!mm) {
1653 err = -EINVAL;
1654 goto out;
1655 }
1656
1657 err = do_migrate_pages(mm, old, new,
1658 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1659
1660 mmput(mm);
1661out:
1662 NODEMASK_SCRATCH_FREE(scratch);
1663
1664 return err;
1665
1666out_put:
1667 put_task_struct(task);
1668 goto out;
1669
1670}
1671
1672SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1673 const unsigned long __user *, old_nodes,
1674 const unsigned long __user *, new_nodes)
1675{
1676 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1677}
1678
1679
1680/* Retrieve NUMA policy */
1681static int kernel_get_mempolicy(int __user *policy,
1682 unsigned long __user *nmask,
1683 unsigned long maxnode,
1684 unsigned long addr,
1685 unsigned long flags)
1686{
1687 int err;
1688 int pval;
1689 nodemask_t nodes;
1690
1691 if (nmask != NULL && maxnode < nr_node_ids)
1692 return -EINVAL;
1693
1694 addr = untagged_addr(addr);
1695
1696 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1697
1698 if (err)
1699 return err;
1700
1701 if (policy && put_user(pval, policy))
1702 return -EFAULT;
1703
1704 if (nmask)
1705 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1706
1707 return err;
1708}
1709
1710SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1711 unsigned long __user *, nmask, unsigned long, maxnode,
1712 unsigned long, addr, unsigned long, flags)
1713{
1714 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1715}
1716
1717bool vma_migratable(struct vm_area_struct *vma)
1718{
1719 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1720 return false;
1721
1722 /*
1723 * DAX device mappings require predictable access latency, so avoid
1724 * incurring periodic faults.
1725 */
1726 if (vma_is_dax(vma))
1727 return false;
1728
1729 if (is_vm_hugetlb_page(vma) &&
1730 !hugepage_migration_supported(hstate_vma(vma)))
1731 return false;
1732
1733 /*
1734 * Migration allocates pages in the highest zone. If we cannot
1735 * do so then migration (at least from node to node) is not
1736 * possible.
1737 */
1738 if (vma->vm_file &&
1739 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1740 < policy_zone)
1741 return false;
1742 return true;
1743}
1744
1745struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1746 unsigned long addr)
1747{
1748 struct mempolicy *pol = NULL;
1749
1750 if (vma) {
1751 if (vma->vm_ops && vma->vm_ops->get_policy) {
1752 pol = vma->vm_ops->get_policy(vma, addr);
1753 } else if (vma->vm_policy) {
1754 pol = vma->vm_policy;
1755
1756 /*
1757 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1758 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1759 * count on these policies which will be dropped by
1760 * mpol_cond_put() later
1761 */
1762 if (mpol_needs_cond_ref(pol))
1763 mpol_get(pol);
1764 }
1765 }
1766
1767 return pol;
1768}
1769
1770/*
1771 * get_vma_policy(@vma, @addr)
1772 * @vma: virtual memory area whose policy is sought
1773 * @addr: address in @vma for shared policy lookup
1774 *
1775 * Returns effective policy for a VMA at specified address.
1776 * Falls back to current->mempolicy or system default policy, as necessary.
1777 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1778 * count--added by the get_policy() vm_op, as appropriate--to protect against
1779 * freeing by another task. It is the caller's responsibility to free the
1780 * extra reference for shared policies.
1781 */
1782static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1783 unsigned long addr)
1784{
1785 struct mempolicy *pol = __get_vma_policy(vma, addr);
1786
1787 if (!pol)
1788 pol = get_task_policy(current);
1789
1790 return pol;
1791}
1792
1793bool vma_policy_mof(struct vm_area_struct *vma)
1794{
1795 struct mempolicy *pol;
1796
1797 if (vma->vm_ops && vma->vm_ops->get_policy) {
1798 bool ret = false;
1799
1800 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1801 if (pol && (pol->flags & MPOL_F_MOF))
1802 ret = true;
1803 mpol_cond_put(pol);
1804
1805 return ret;
1806 }
1807
1808 pol = vma->vm_policy;
1809 if (!pol)
1810 pol = get_task_policy(current);
1811
1812 return pol->flags & MPOL_F_MOF;
1813}
1814
1815bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1816{
1817 enum zone_type dynamic_policy_zone = policy_zone;
1818
1819 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1820
1821 /*
1822 * if policy->nodes has movable memory only,
1823 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1824 *
1825 * policy->nodes is intersect with node_states[N_MEMORY].
1826 * so if the following test fails, it implies
1827 * policy->nodes has movable memory only.
1828 */
1829 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1830 dynamic_policy_zone = ZONE_MOVABLE;
1831
1832 return zone >= dynamic_policy_zone;
1833}
1834
1835/*
1836 * Return a nodemask representing a mempolicy for filtering nodes for
1837 * page allocation
1838 */
1839nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1840{
1841 int mode = policy->mode;
1842
1843 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1844 if (unlikely(mode == MPOL_BIND) &&
1845 apply_policy_zone(policy, gfp_zone(gfp)) &&
1846 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1847 return &policy->nodes;
1848
1849 if (mode == MPOL_PREFERRED_MANY)
1850 return &policy->nodes;
1851
1852 return NULL;
1853}
1854
1855/*
1856 * Return the preferred node id for 'prefer' mempolicy, and return
1857 * the given id for all other policies.
1858 *
1859 * policy_node() is always coupled with policy_nodemask(), which
1860 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1861 */
1862static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1863{
1864 if (policy->mode == MPOL_PREFERRED) {
1865 nd = first_node(policy->nodes);
1866 } else {
1867 /*
1868 * __GFP_THISNODE shouldn't even be used with the bind policy
1869 * because we might easily break the expectation to stay on the
1870 * requested node and not break the policy.
1871 */
1872 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1873 }
1874
1875 if ((policy->mode == MPOL_BIND ||
1876 policy->mode == MPOL_PREFERRED_MANY) &&
1877 policy->home_node != NUMA_NO_NODE)
1878 return policy->home_node;
1879
1880 return nd;
1881}
1882
1883/* Do dynamic interleaving for a process */
1884static unsigned interleave_nodes(struct mempolicy *policy)
1885{
1886 unsigned next;
1887 struct task_struct *me = current;
1888
1889 next = next_node_in(me->il_prev, policy->nodes);
1890 if (next < MAX_NUMNODES)
1891 me->il_prev = next;
1892 return next;
1893}
1894
1895/*
1896 * Depending on the memory policy provide a node from which to allocate the
1897 * next slab entry.
1898 */
1899unsigned int mempolicy_slab_node(void)
1900{
1901 struct mempolicy *policy;
1902 int node = numa_mem_id();
1903
1904 if (!in_task())
1905 return node;
1906
1907 policy = current->mempolicy;
1908 if (!policy)
1909 return node;
1910
1911 switch (policy->mode) {
1912 case MPOL_PREFERRED:
1913 return first_node(policy->nodes);
1914
1915 case MPOL_INTERLEAVE:
1916 return interleave_nodes(policy);
1917
1918 case MPOL_BIND:
1919 case MPOL_PREFERRED_MANY:
1920 {
1921 struct zoneref *z;
1922
1923 /*
1924 * Follow bind policy behavior and start allocation at the
1925 * first node.
1926 */
1927 struct zonelist *zonelist;
1928 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1929 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1930 z = first_zones_zonelist(zonelist, highest_zoneidx,
1931 &policy->nodes);
1932 return z->zone ? zone_to_nid(z->zone) : node;
1933 }
1934 case MPOL_LOCAL:
1935 return node;
1936
1937 default:
1938 BUG();
1939 }
1940}
1941
1942/*
1943 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1944 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1945 * number of present nodes.
1946 */
1947static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1948{
1949 nodemask_t nodemask = pol->nodes;
1950 unsigned int target, nnodes;
1951 int i;
1952 int nid;
1953 /*
1954 * The barrier will stabilize the nodemask in a register or on
1955 * the stack so that it will stop changing under the code.
1956 *
1957 * Between first_node() and next_node(), pol->nodes could be changed
1958 * by other threads. So we put pol->nodes in a local stack.
1959 */
1960 barrier();
1961
1962 nnodes = nodes_weight(nodemask);
1963 if (!nnodes)
1964 return numa_node_id();
1965 target = (unsigned int)n % nnodes;
1966 nid = first_node(nodemask);
1967 for (i = 0; i < target; i++)
1968 nid = next_node(nid, nodemask);
1969 return nid;
1970}
1971
1972/* Determine a node number for interleave */
1973static inline unsigned interleave_nid(struct mempolicy *pol,
1974 struct vm_area_struct *vma, unsigned long addr, int shift)
1975{
1976 if (vma) {
1977 unsigned long off;
1978
1979 /*
1980 * for small pages, there is no difference between
1981 * shift and PAGE_SHIFT, so the bit-shift is safe.
1982 * for huge pages, since vm_pgoff is in units of small
1983 * pages, we need to shift off the always 0 bits to get
1984 * a useful offset.
1985 */
1986 BUG_ON(shift < PAGE_SHIFT);
1987 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1988 off += (addr - vma->vm_start) >> shift;
1989 return offset_il_node(pol, off);
1990 } else
1991 return interleave_nodes(pol);
1992}
1993
1994#ifdef CONFIG_HUGETLBFS
1995/*
1996 * huge_node(@vma, @addr, @gfp_flags, @mpol)
1997 * @vma: virtual memory area whose policy is sought
1998 * @addr: address in @vma for shared policy lookup and interleave policy
1999 * @gfp_flags: for requested zone
2000 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2001 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2002 *
2003 * Returns a nid suitable for a huge page allocation and a pointer
2004 * to the struct mempolicy for conditional unref after allocation.
2005 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2006 * to the mempolicy's @nodemask for filtering the zonelist.
2007 *
2008 * Must be protected by read_mems_allowed_begin()
2009 */
2010int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2011 struct mempolicy **mpol, nodemask_t **nodemask)
2012{
2013 int nid;
2014 int mode;
2015
2016 *mpol = get_vma_policy(vma, addr);
2017 *nodemask = NULL;
2018 mode = (*mpol)->mode;
2019
2020 if (unlikely(mode == MPOL_INTERLEAVE)) {
2021 nid = interleave_nid(*mpol, vma, addr,
2022 huge_page_shift(hstate_vma(vma)));
2023 } else {
2024 nid = policy_node(gfp_flags, *mpol, numa_node_id());
2025 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2026 *nodemask = &(*mpol)->nodes;
2027 }
2028 return nid;
2029}
2030
2031/*
2032 * init_nodemask_of_mempolicy
2033 *
2034 * If the current task's mempolicy is "default" [NULL], return 'false'
2035 * to indicate default policy. Otherwise, extract the policy nodemask
2036 * for 'bind' or 'interleave' policy into the argument nodemask, or
2037 * initialize the argument nodemask to contain the single node for
2038 * 'preferred' or 'local' policy and return 'true' to indicate presence
2039 * of non-default mempolicy.
2040 *
2041 * We don't bother with reference counting the mempolicy [mpol_get/put]
2042 * because the current task is examining it's own mempolicy and a task's
2043 * mempolicy is only ever changed by the task itself.
2044 *
2045 * N.B., it is the caller's responsibility to free a returned nodemask.
2046 */
2047bool init_nodemask_of_mempolicy(nodemask_t *mask)
2048{
2049 struct mempolicy *mempolicy;
2050
2051 if (!(mask && current->mempolicy))
2052 return false;
2053
2054 task_lock(current);
2055 mempolicy = current->mempolicy;
2056 switch (mempolicy->mode) {
2057 case MPOL_PREFERRED:
2058 case MPOL_PREFERRED_MANY:
2059 case MPOL_BIND:
2060 case MPOL_INTERLEAVE:
2061 *mask = mempolicy->nodes;
2062 break;
2063
2064 case MPOL_LOCAL:
2065 init_nodemask_of_node(mask, numa_node_id());
2066 break;
2067
2068 default:
2069 BUG();
2070 }
2071 task_unlock(current);
2072
2073 return true;
2074}
2075#endif
2076
2077/*
2078 * mempolicy_in_oom_domain
2079 *
2080 * If tsk's mempolicy is "bind", check for intersection between mask and
2081 * the policy nodemask. Otherwise, return true for all other policies
2082 * including "interleave", as a tsk with "interleave" policy may have
2083 * memory allocated from all nodes in system.
2084 *
2085 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2086 */
2087bool mempolicy_in_oom_domain(struct task_struct *tsk,
2088 const nodemask_t *mask)
2089{
2090 struct mempolicy *mempolicy;
2091 bool ret = true;
2092
2093 if (!mask)
2094 return ret;
2095
2096 task_lock(tsk);
2097 mempolicy = tsk->mempolicy;
2098 if (mempolicy && mempolicy->mode == MPOL_BIND)
2099 ret = nodes_intersects(mempolicy->nodes, *mask);
2100 task_unlock(tsk);
2101
2102 return ret;
2103}
2104
2105/* Allocate a page in interleaved policy.
2106 Own path because it needs to do special accounting. */
2107static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2108 unsigned nid)
2109{
2110 struct page *page;
2111
2112 page = __alloc_pages(gfp, order, nid, NULL);
2113 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2114 if (!static_branch_likely(&vm_numa_stat_key))
2115 return page;
2116 if (page && page_to_nid(page) == nid) {
2117 preempt_disable();
2118 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2119 preempt_enable();
2120 }
2121 return page;
2122}
2123
2124static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2125 int nid, struct mempolicy *pol)
2126{
2127 struct page *page;
2128 gfp_t preferred_gfp;
2129
2130 /*
2131 * This is a two pass approach. The first pass will only try the
2132 * preferred nodes but skip the direct reclaim and allow the
2133 * allocation to fail, while the second pass will try all the
2134 * nodes in system.
2135 */
2136 preferred_gfp = gfp | __GFP_NOWARN;
2137 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2138 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2139 if (!page)
2140 page = __alloc_pages(gfp, order, nid, NULL);
2141
2142 return page;
2143}
2144
2145/**
2146 * vma_alloc_folio - Allocate a folio for a VMA.
2147 * @gfp: GFP flags.
2148 * @order: Order of the folio.
2149 * @vma: Pointer to VMA or NULL if not available.
2150 * @addr: Virtual address of the allocation. Must be inside @vma.
2151 * @hugepage: For hugepages try only the preferred node if possible.
2152 *
2153 * Allocate a folio for a specific address in @vma, using the appropriate
2154 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2155 * of the mm_struct of the VMA to prevent it from going away. Should be
2156 * used for all allocations for folios that will be mapped into user space.
2157 *
2158 * Return: The folio on success or NULL if allocation fails.
2159 */
2160struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2161 unsigned long addr, bool hugepage)
2162{
2163 struct mempolicy *pol;
2164 int node = numa_node_id();
2165 struct folio *folio;
2166 int preferred_nid;
2167 nodemask_t *nmask;
2168
2169 pol = get_vma_policy(vma, addr);
2170
2171 if (pol->mode == MPOL_INTERLEAVE) {
2172 struct page *page;
2173 unsigned nid;
2174
2175 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2176 mpol_cond_put(pol);
2177 gfp |= __GFP_COMP;
2178 page = alloc_page_interleave(gfp, order, nid);
2179 if (page && order > 1)
2180 prep_transhuge_page(page);
2181 folio = (struct folio *)page;
2182 goto out;
2183 }
2184
2185 if (pol->mode == MPOL_PREFERRED_MANY) {
2186 struct page *page;
2187
2188 node = policy_node(gfp, pol, node);
2189 gfp |= __GFP_COMP;
2190 page = alloc_pages_preferred_many(gfp, order, node, pol);
2191 mpol_cond_put(pol);
2192 if (page && order > 1)
2193 prep_transhuge_page(page);
2194 folio = (struct folio *)page;
2195 goto out;
2196 }
2197
2198 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2199 int hpage_node = node;
2200
2201 /*
2202 * For hugepage allocation and non-interleave policy which
2203 * allows the current node (or other explicitly preferred
2204 * node) we only try to allocate from the current/preferred
2205 * node and don't fall back to other nodes, as the cost of
2206 * remote accesses would likely offset THP benefits.
2207 *
2208 * If the policy is interleave or does not allow the current
2209 * node in its nodemask, we allocate the standard way.
2210 */
2211 if (pol->mode == MPOL_PREFERRED)
2212 hpage_node = first_node(pol->nodes);
2213
2214 nmask = policy_nodemask(gfp, pol);
2215 if (!nmask || node_isset(hpage_node, *nmask)) {
2216 mpol_cond_put(pol);
2217 /*
2218 * First, try to allocate THP only on local node, but
2219 * don't reclaim unnecessarily, just compact.
2220 */
2221 folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2222 __GFP_NORETRY, order, hpage_node);
2223
2224 /*
2225 * If hugepage allocations are configured to always
2226 * synchronous compact or the vma has been madvised
2227 * to prefer hugepage backing, retry allowing remote
2228 * memory with both reclaim and compact as well.
2229 */
2230 if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2231 folio = __folio_alloc(gfp, order, hpage_node,
2232 nmask);
2233
2234 goto out;
2235 }
2236 }
2237
2238 nmask = policy_nodemask(gfp, pol);
2239 preferred_nid = policy_node(gfp, pol, node);
2240 folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2241 mpol_cond_put(pol);
2242out:
2243 return folio;
2244}
2245EXPORT_SYMBOL(vma_alloc_folio);
2246
2247/**
2248 * alloc_pages - Allocate pages.
2249 * @gfp: GFP flags.
2250 * @order: Power of two of number of pages to allocate.
2251 *
2252 * Allocate 1 << @order contiguous pages. The physical address of the
2253 * first page is naturally aligned (eg an order-3 allocation will be aligned
2254 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2255 * process is honoured when in process context.
2256 *
2257 * Context: Can be called from any context, providing the appropriate GFP
2258 * flags are used.
2259 * Return: The page on success or NULL if allocation fails.
2260 */
2261struct page *alloc_pages(gfp_t gfp, unsigned order)
2262{
2263 struct mempolicy *pol = &default_policy;
2264 struct page *page;
2265
2266 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2267 pol = get_task_policy(current);
2268
2269 /*
2270 * No reference counting needed for current->mempolicy
2271 * nor system default_policy
2272 */
2273 if (pol->mode == MPOL_INTERLEAVE)
2274 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2275 else if (pol->mode == MPOL_PREFERRED_MANY)
2276 page = alloc_pages_preferred_many(gfp, order,
2277 policy_node(gfp, pol, numa_node_id()), pol);
2278 else
2279 page = __alloc_pages(gfp, order,
2280 policy_node(gfp, pol, numa_node_id()),
2281 policy_nodemask(gfp, pol));
2282
2283 return page;
2284}
2285EXPORT_SYMBOL(alloc_pages);
2286
2287struct folio *folio_alloc(gfp_t gfp, unsigned order)
2288{
2289 struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2290
2291 if (page && order > 1)
2292 prep_transhuge_page(page);
2293 return (struct folio *)page;
2294}
2295EXPORT_SYMBOL(folio_alloc);
2296
2297static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2298 struct mempolicy *pol, unsigned long nr_pages,
2299 struct page **page_array)
2300{
2301 int nodes;
2302 unsigned long nr_pages_per_node;
2303 int delta;
2304 int i;
2305 unsigned long nr_allocated;
2306 unsigned long total_allocated = 0;
2307
2308 nodes = nodes_weight(pol->nodes);
2309 nr_pages_per_node = nr_pages / nodes;
2310 delta = nr_pages - nodes * nr_pages_per_node;
2311
2312 for (i = 0; i < nodes; i++) {
2313 if (delta) {
2314 nr_allocated = __alloc_pages_bulk(gfp,
2315 interleave_nodes(pol), NULL,
2316 nr_pages_per_node + 1, NULL,
2317 page_array);
2318 delta--;
2319 } else {
2320 nr_allocated = __alloc_pages_bulk(gfp,
2321 interleave_nodes(pol), NULL,
2322 nr_pages_per_node, NULL, page_array);
2323 }
2324
2325 page_array += nr_allocated;
2326 total_allocated += nr_allocated;
2327 }
2328
2329 return total_allocated;
2330}
2331
2332static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2333 struct mempolicy *pol, unsigned long nr_pages,
2334 struct page **page_array)
2335{
2336 gfp_t preferred_gfp;
2337 unsigned long nr_allocated = 0;
2338
2339 preferred_gfp = gfp | __GFP_NOWARN;
2340 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2341
2342 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2343 nr_pages, NULL, page_array);
2344
2345 if (nr_allocated < nr_pages)
2346 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2347 nr_pages - nr_allocated, NULL,
2348 page_array + nr_allocated);
2349 return nr_allocated;
2350}
2351
2352/* alloc pages bulk and mempolicy should be considered at the
2353 * same time in some situation such as vmalloc.
2354 *
2355 * It can accelerate memory allocation especially interleaving
2356 * allocate memory.
2357 */
2358unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2359 unsigned long nr_pages, struct page **page_array)
2360{
2361 struct mempolicy *pol = &default_policy;
2362
2363 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2364 pol = get_task_policy(current);
2365
2366 if (pol->mode == MPOL_INTERLEAVE)
2367 return alloc_pages_bulk_array_interleave(gfp, pol,
2368 nr_pages, page_array);
2369
2370 if (pol->mode == MPOL_PREFERRED_MANY)
2371 return alloc_pages_bulk_array_preferred_many(gfp,
2372 numa_node_id(), pol, nr_pages, page_array);
2373
2374 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2375 policy_nodemask(gfp, pol), nr_pages, NULL,
2376 page_array);
2377}
2378
2379int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2380{
2381 struct mempolicy *pol = mpol_dup(vma_policy(src));
2382
2383 if (IS_ERR(pol))
2384 return PTR_ERR(pol);
2385 dst->vm_policy = pol;
2386 return 0;
2387}
2388
2389/*
2390 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2391 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2392 * with the mems_allowed returned by cpuset_mems_allowed(). This
2393 * keeps mempolicies cpuset relative after its cpuset moves. See
2394 * further kernel/cpuset.c update_nodemask().
2395 *
2396 * current's mempolicy may be rebinded by the other task(the task that changes
2397 * cpuset's mems), so we needn't do rebind work for current task.
2398 */
2399
2400/* Slow path of a mempolicy duplicate */
2401struct mempolicy *__mpol_dup(struct mempolicy *old)
2402{
2403 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2404
2405 if (!new)
2406 return ERR_PTR(-ENOMEM);
2407
2408 /* task's mempolicy is protected by alloc_lock */
2409 if (old == current->mempolicy) {
2410 task_lock(current);
2411 *new = *old;
2412 task_unlock(current);
2413 } else
2414 *new = *old;
2415
2416 if (current_cpuset_is_being_rebound()) {
2417 nodemask_t mems = cpuset_mems_allowed(current);
2418 mpol_rebind_policy(new, &mems);
2419 }
2420 atomic_set(&new->refcnt, 1);
2421 return new;
2422}
2423
2424/* Slow path of a mempolicy comparison */
2425bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2426{
2427 if (!a || !b)
2428 return false;
2429 if (a->mode != b->mode)
2430 return false;
2431 if (a->flags != b->flags)
2432 return false;
2433 if (a->home_node != b->home_node)
2434 return false;
2435 if (mpol_store_user_nodemask(a))
2436 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2437 return false;
2438
2439 switch (a->mode) {
2440 case MPOL_BIND:
2441 case MPOL_INTERLEAVE:
2442 case MPOL_PREFERRED:
2443 case MPOL_PREFERRED_MANY:
2444 return !!nodes_equal(a->nodes, b->nodes);
2445 case MPOL_LOCAL:
2446 return true;
2447 default:
2448 BUG();
2449 return false;
2450 }
2451}
2452
2453/*
2454 * Shared memory backing store policy support.
2455 *
2456 * Remember policies even when nobody has shared memory mapped.
2457 * The policies are kept in Red-Black tree linked from the inode.
2458 * They are protected by the sp->lock rwlock, which should be held
2459 * for any accesses to the tree.
2460 */
2461
2462/*
2463 * lookup first element intersecting start-end. Caller holds sp->lock for
2464 * reading or for writing
2465 */
2466static struct sp_node *
2467sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2468{
2469 struct rb_node *n = sp->root.rb_node;
2470
2471 while (n) {
2472 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2473
2474 if (start >= p->end)
2475 n = n->rb_right;
2476 else if (end <= p->start)
2477 n = n->rb_left;
2478 else
2479 break;
2480 }
2481 if (!n)
2482 return NULL;
2483 for (;;) {
2484 struct sp_node *w = NULL;
2485 struct rb_node *prev = rb_prev(n);
2486 if (!prev)
2487 break;
2488 w = rb_entry(prev, struct sp_node, nd);
2489 if (w->end <= start)
2490 break;
2491 n = prev;
2492 }
2493 return rb_entry(n, struct sp_node, nd);
2494}
2495
2496/*
2497 * Insert a new shared policy into the list. Caller holds sp->lock for
2498 * writing.
2499 */
2500static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2501{
2502 struct rb_node **p = &sp->root.rb_node;
2503 struct rb_node *parent = NULL;
2504 struct sp_node *nd;
2505
2506 while (*p) {
2507 parent = *p;
2508 nd = rb_entry(parent, struct sp_node, nd);
2509 if (new->start < nd->start)
2510 p = &(*p)->rb_left;
2511 else if (new->end > nd->end)
2512 p = &(*p)->rb_right;
2513 else
2514 BUG();
2515 }
2516 rb_link_node(&new->nd, parent, p);
2517 rb_insert_color(&new->nd, &sp->root);
2518 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2519 new->policy ? new->policy->mode : 0);
2520}
2521
2522/* Find shared policy intersecting idx */
2523struct mempolicy *
2524mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2525{
2526 struct mempolicy *pol = NULL;
2527 struct sp_node *sn;
2528
2529 if (!sp->root.rb_node)
2530 return NULL;
2531 read_lock(&sp->lock);
2532 sn = sp_lookup(sp, idx, idx+1);
2533 if (sn) {
2534 mpol_get(sn->policy);
2535 pol = sn->policy;
2536 }
2537 read_unlock(&sp->lock);
2538 return pol;
2539}
2540
2541static void sp_free(struct sp_node *n)
2542{
2543 mpol_put(n->policy);
2544 kmem_cache_free(sn_cache, n);
2545}
2546
2547/**
2548 * mpol_misplaced - check whether current page node is valid in policy
2549 *
2550 * @page: page to be checked
2551 * @vma: vm area where page mapped
2552 * @addr: virtual address where page mapped
2553 *
2554 * Lookup current policy node id for vma,addr and "compare to" page's
2555 * node id. Policy determination "mimics" alloc_page_vma().
2556 * Called from fault path where we know the vma and faulting address.
2557 *
2558 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2559 * policy, or a suitable node ID to allocate a replacement page from.
2560 */
2561int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2562{
2563 struct mempolicy *pol;
2564 struct zoneref *z;
2565 int curnid = page_to_nid(page);
2566 unsigned long pgoff;
2567 int thiscpu = raw_smp_processor_id();
2568 int thisnid = cpu_to_node(thiscpu);
2569 int polnid = NUMA_NO_NODE;
2570 int ret = NUMA_NO_NODE;
2571
2572 pol = get_vma_policy(vma, addr);
2573 if (!(pol->flags & MPOL_F_MOF))
2574 goto out;
2575
2576 switch (pol->mode) {
2577 case MPOL_INTERLEAVE:
2578 pgoff = vma->vm_pgoff;
2579 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2580 polnid = offset_il_node(pol, pgoff);
2581 break;
2582
2583 case MPOL_PREFERRED:
2584 if (node_isset(curnid, pol->nodes))
2585 goto out;
2586 polnid = first_node(pol->nodes);
2587 break;
2588
2589 case MPOL_LOCAL:
2590 polnid = numa_node_id();
2591 break;
2592
2593 case MPOL_BIND:
2594 /* Optimize placement among multiple nodes via NUMA balancing */
2595 if (pol->flags & MPOL_F_MORON) {
2596 if (node_isset(thisnid, pol->nodes))
2597 break;
2598 goto out;
2599 }
2600 fallthrough;
2601
2602 case MPOL_PREFERRED_MANY:
2603 /*
2604 * use current page if in policy nodemask,
2605 * else select nearest allowed node, if any.
2606 * If no allowed nodes, use current [!misplaced].
2607 */
2608 if (node_isset(curnid, pol->nodes))
2609 goto out;
2610 z = first_zones_zonelist(
2611 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2612 gfp_zone(GFP_HIGHUSER),
2613 &pol->nodes);
2614 polnid = zone_to_nid(z->zone);
2615 break;
2616
2617 default:
2618 BUG();
2619 }
2620
2621 /* Migrate the page towards the node whose CPU is referencing it */
2622 if (pol->flags & MPOL_F_MORON) {
2623 polnid = thisnid;
2624
2625 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2626 goto out;
2627 }
2628
2629 if (curnid != polnid)
2630 ret = polnid;
2631out:
2632 mpol_cond_put(pol);
2633
2634 return ret;
2635}
2636
2637/*
2638 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2639 * dropped after task->mempolicy is set to NULL so that any allocation done as
2640 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2641 * policy.
2642 */
2643void mpol_put_task_policy(struct task_struct *task)
2644{
2645 struct mempolicy *pol;
2646
2647 task_lock(task);
2648 pol = task->mempolicy;
2649 task->mempolicy = NULL;
2650 task_unlock(task);
2651 mpol_put(pol);
2652}
2653
2654static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2655{
2656 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2657 rb_erase(&n->nd, &sp->root);
2658 sp_free(n);
2659}
2660
2661static void sp_node_init(struct sp_node *node, unsigned long start,
2662 unsigned long end, struct mempolicy *pol)
2663{
2664 node->start = start;
2665 node->end = end;
2666 node->policy = pol;
2667}
2668
2669static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2670 struct mempolicy *pol)
2671{
2672 struct sp_node *n;
2673 struct mempolicy *newpol;
2674
2675 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2676 if (!n)
2677 return NULL;
2678
2679 newpol = mpol_dup(pol);
2680 if (IS_ERR(newpol)) {
2681 kmem_cache_free(sn_cache, n);
2682 return NULL;
2683 }
2684 newpol->flags |= MPOL_F_SHARED;
2685 sp_node_init(n, start, end, newpol);
2686
2687 return n;
2688}
2689
2690/* Replace a policy range. */
2691static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2692 unsigned long end, struct sp_node *new)
2693{
2694 struct sp_node *n;
2695 struct sp_node *n_new = NULL;
2696 struct mempolicy *mpol_new = NULL;
2697 int ret = 0;
2698
2699restart:
2700 write_lock(&sp->lock);
2701 n = sp_lookup(sp, start, end);
2702 /* Take care of old policies in the same range. */
2703 while (n && n->start < end) {
2704 struct rb_node *next = rb_next(&n->nd);
2705 if (n->start >= start) {
2706 if (n->end <= end)
2707 sp_delete(sp, n);
2708 else
2709 n->start = end;
2710 } else {
2711 /* Old policy spanning whole new range. */
2712 if (n->end > end) {
2713 if (!n_new)
2714 goto alloc_new;
2715
2716 *mpol_new = *n->policy;
2717 atomic_set(&mpol_new->refcnt, 1);
2718 sp_node_init(n_new, end, n->end, mpol_new);
2719 n->end = start;
2720 sp_insert(sp, n_new);
2721 n_new = NULL;
2722 mpol_new = NULL;
2723 break;
2724 } else
2725 n->end = start;
2726 }
2727 if (!next)
2728 break;
2729 n = rb_entry(next, struct sp_node, nd);
2730 }
2731 if (new)
2732 sp_insert(sp, new);
2733 write_unlock(&sp->lock);
2734 ret = 0;
2735
2736err_out:
2737 if (mpol_new)
2738 mpol_put(mpol_new);
2739 if (n_new)
2740 kmem_cache_free(sn_cache, n_new);
2741
2742 return ret;
2743
2744alloc_new:
2745 write_unlock(&sp->lock);
2746 ret = -ENOMEM;
2747 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2748 if (!n_new)
2749 goto err_out;
2750 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2751 if (!mpol_new)
2752 goto err_out;
2753 atomic_set(&mpol_new->refcnt, 1);
2754 goto restart;
2755}
2756
2757/**
2758 * mpol_shared_policy_init - initialize shared policy for inode
2759 * @sp: pointer to inode shared policy
2760 * @mpol: struct mempolicy to install
2761 *
2762 * Install non-NULL @mpol in inode's shared policy rb-tree.
2763 * On entry, the current task has a reference on a non-NULL @mpol.
2764 * This must be released on exit.
2765 * This is called at get_inode() calls and we can use GFP_KERNEL.
2766 */
2767void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2768{
2769 int ret;
2770
2771 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2772 rwlock_init(&sp->lock);
2773
2774 if (mpol) {
2775 struct vm_area_struct pvma;
2776 struct mempolicy *new;
2777 NODEMASK_SCRATCH(scratch);
2778
2779 if (!scratch)
2780 goto put_mpol;
2781 /* contextualize the tmpfs mount point mempolicy */
2782 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2783 if (IS_ERR(new))
2784 goto free_scratch; /* no valid nodemask intersection */
2785
2786 task_lock(current);
2787 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2788 task_unlock(current);
2789 if (ret)
2790 goto put_new;
2791
2792 /* Create pseudo-vma that contains just the policy */
2793 vma_init(&pvma, NULL);
2794 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2795 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2796
2797put_new:
2798 mpol_put(new); /* drop initial ref */
2799free_scratch:
2800 NODEMASK_SCRATCH_FREE(scratch);
2801put_mpol:
2802 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2803 }
2804}
2805
2806int mpol_set_shared_policy(struct shared_policy *info,
2807 struct vm_area_struct *vma, struct mempolicy *npol)
2808{
2809 int err;
2810 struct sp_node *new = NULL;
2811 unsigned long sz = vma_pages(vma);
2812
2813 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2814 vma->vm_pgoff,
2815 sz, npol ? npol->mode : -1,
2816 npol ? npol->flags : -1,
2817 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2818
2819 if (npol) {
2820 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2821 if (!new)
2822 return -ENOMEM;
2823 }
2824 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2825 if (err && new)
2826 sp_free(new);
2827 return err;
2828}
2829
2830/* Free a backing policy store on inode delete. */
2831void mpol_free_shared_policy(struct shared_policy *p)
2832{
2833 struct sp_node *n;
2834 struct rb_node *next;
2835
2836 if (!p->root.rb_node)
2837 return;
2838 write_lock(&p->lock);
2839 next = rb_first(&p->root);
2840 while (next) {
2841 n = rb_entry(next, struct sp_node, nd);
2842 next = rb_next(&n->nd);
2843 sp_delete(p, n);
2844 }
2845 write_unlock(&p->lock);
2846}
2847
2848#ifdef CONFIG_NUMA_BALANCING
2849static int __initdata numabalancing_override;
2850
2851static void __init check_numabalancing_enable(void)
2852{
2853 bool numabalancing_default = false;
2854
2855 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2856 numabalancing_default = true;
2857
2858 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2859 if (numabalancing_override)
2860 set_numabalancing_state(numabalancing_override == 1);
2861
2862 if (num_online_nodes() > 1 && !numabalancing_override) {
2863 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2864 numabalancing_default ? "Enabling" : "Disabling");
2865 set_numabalancing_state(numabalancing_default);
2866 }
2867}
2868
2869static int __init setup_numabalancing(char *str)
2870{
2871 int ret = 0;
2872 if (!str)
2873 goto out;
2874
2875 if (!strcmp(str, "enable")) {
2876 numabalancing_override = 1;
2877 ret = 1;
2878 } else if (!strcmp(str, "disable")) {
2879 numabalancing_override = -1;
2880 ret = 1;
2881 }
2882out:
2883 if (!ret)
2884 pr_warn("Unable to parse numa_balancing=\n");
2885
2886 return ret;
2887}
2888__setup("numa_balancing=", setup_numabalancing);
2889#else
2890static inline void __init check_numabalancing_enable(void)
2891{
2892}
2893#endif /* CONFIG_NUMA_BALANCING */
2894
2895/* assumes fs == KERNEL_DS */
2896void __init numa_policy_init(void)
2897{
2898 nodemask_t interleave_nodes;
2899 unsigned long largest = 0;
2900 int nid, prefer = 0;
2901
2902 policy_cache = kmem_cache_create("numa_policy",
2903 sizeof(struct mempolicy),
2904 0, SLAB_PANIC, NULL);
2905
2906 sn_cache = kmem_cache_create("shared_policy_node",
2907 sizeof(struct sp_node),
2908 0, SLAB_PANIC, NULL);
2909
2910 for_each_node(nid) {
2911 preferred_node_policy[nid] = (struct mempolicy) {
2912 .refcnt = ATOMIC_INIT(1),
2913 .mode = MPOL_PREFERRED,
2914 .flags = MPOL_F_MOF | MPOL_F_MORON,
2915 .nodes = nodemask_of_node(nid),
2916 };
2917 }
2918
2919 /*
2920 * Set interleaving policy for system init. Interleaving is only
2921 * enabled across suitably sized nodes (default is >= 16MB), or
2922 * fall back to the largest node if they're all smaller.
2923 */
2924 nodes_clear(interleave_nodes);
2925 for_each_node_state(nid, N_MEMORY) {
2926 unsigned long total_pages = node_present_pages(nid);
2927
2928 /* Preserve the largest node */
2929 if (largest < total_pages) {
2930 largest = total_pages;
2931 prefer = nid;
2932 }
2933
2934 /* Interleave this node? */
2935 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2936 node_set(nid, interleave_nodes);
2937 }
2938
2939 /* All too small, use the largest */
2940 if (unlikely(nodes_empty(interleave_nodes)))
2941 node_set(prefer, interleave_nodes);
2942
2943 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2944 pr_err("%s: interleaving failed\n", __func__);
2945
2946 check_numabalancing_enable();
2947}
2948
2949/* Reset policy of current process to default */
2950void numa_default_policy(void)
2951{
2952 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2953}
2954
2955/*
2956 * Parse and format mempolicy from/to strings
2957 */
2958
2959static const char * const policy_modes[] =
2960{
2961 [MPOL_DEFAULT] = "default",
2962 [MPOL_PREFERRED] = "prefer",
2963 [MPOL_BIND] = "bind",
2964 [MPOL_INTERLEAVE] = "interleave",
2965 [MPOL_LOCAL] = "local",
2966 [MPOL_PREFERRED_MANY] = "prefer (many)",
2967};
2968
2969
2970#ifdef CONFIG_TMPFS
2971/**
2972 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2973 * @str: string containing mempolicy to parse
2974 * @mpol: pointer to struct mempolicy pointer, returned on success.
2975 *
2976 * Format of input:
2977 * <mode>[=<flags>][:<nodelist>]
2978 *
2979 * Return: %0 on success, else %1
2980 */
2981int mpol_parse_str(char *str, struct mempolicy **mpol)
2982{
2983 struct mempolicy *new = NULL;
2984 unsigned short mode_flags;
2985 nodemask_t nodes;
2986 char *nodelist = strchr(str, ':');
2987 char *flags = strchr(str, '=');
2988 int err = 1, mode;
2989
2990 if (flags)
2991 *flags++ = '\0'; /* terminate mode string */
2992
2993 if (nodelist) {
2994 /* NUL-terminate mode or flags string */
2995 *nodelist++ = '\0';
2996 if (nodelist_parse(nodelist, nodes))
2997 goto out;
2998 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2999 goto out;
3000 } else
3001 nodes_clear(nodes);
3002
3003 mode = match_string(policy_modes, MPOL_MAX, str);
3004 if (mode < 0)
3005 goto out;
3006
3007 switch (mode) {
3008 case MPOL_PREFERRED:
3009 /*
3010 * Insist on a nodelist of one node only, although later
3011 * we use first_node(nodes) to grab a single node, so here
3012 * nodelist (or nodes) cannot be empty.
3013 */
3014 if (nodelist) {
3015 char *rest = nodelist;
3016 while (isdigit(*rest))
3017 rest++;
3018 if (*rest)
3019 goto out;
3020 if (nodes_empty(nodes))
3021 goto out;
3022 }
3023 break;
3024 case MPOL_INTERLEAVE:
3025 /*
3026 * Default to online nodes with memory if no nodelist
3027 */
3028 if (!nodelist)
3029 nodes = node_states[N_MEMORY];
3030 break;
3031 case MPOL_LOCAL:
3032 /*
3033 * Don't allow a nodelist; mpol_new() checks flags
3034 */
3035 if (nodelist)
3036 goto out;
3037 break;
3038 case MPOL_DEFAULT:
3039 /*
3040 * Insist on a empty nodelist
3041 */
3042 if (!nodelist)
3043 err = 0;
3044 goto out;
3045 case MPOL_PREFERRED_MANY:
3046 case MPOL_BIND:
3047 /*
3048 * Insist on a nodelist
3049 */
3050 if (!nodelist)
3051 goto out;
3052 }
3053
3054 mode_flags = 0;
3055 if (flags) {
3056 /*
3057 * Currently, we only support two mutually exclusive
3058 * mode flags.
3059 */
3060 if (!strcmp(flags, "static"))
3061 mode_flags |= MPOL_F_STATIC_NODES;
3062 else if (!strcmp(flags, "relative"))
3063 mode_flags |= MPOL_F_RELATIVE_NODES;
3064 else
3065 goto out;
3066 }
3067
3068 new = mpol_new(mode, mode_flags, &nodes);
3069 if (IS_ERR(new))
3070 goto out;
3071
3072 /*
3073 * Save nodes for mpol_to_str() to show the tmpfs mount options
3074 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3075 */
3076 if (mode != MPOL_PREFERRED) {
3077 new->nodes = nodes;
3078 } else if (nodelist) {
3079 nodes_clear(new->nodes);
3080 node_set(first_node(nodes), new->nodes);
3081 } else {
3082 new->mode = MPOL_LOCAL;
3083 }
3084
3085 /*
3086 * Save nodes for contextualization: this will be used to "clone"
3087 * the mempolicy in a specific context [cpuset] at a later time.
3088 */
3089 new->w.user_nodemask = nodes;
3090
3091 err = 0;
3092
3093out:
3094 /* Restore string for error message */
3095 if (nodelist)
3096 *--nodelist = ':';
3097 if (flags)
3098 *--flags = '=';
3099 if (!err)
3100 *mpol = new;
3101 return err;
3102}
3103#endif /* CONFIG_TMPFS */
3104
3105/**
3106 * mpol_to_str - format a mempolicy structure for printing
3107 * @buffer: to contain formatted mempolicy string
3108 * @maxlen: length of @buffer
3109 * @pol: pointer to mempolicy to be formatted
3110 *
3111 * Convert @pol into a string. If @buffer is too short, truncate the string.
3112 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3113 * longest flag, "relative", and to display at least a few node ids.
3114 */
3115void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3116{
3117 char *p = buffer;
3118 nodemask_t nodes = NODE_MASK_NONE;
3119 unsigned short mode = MPOL_DEFAULT;
3120 unsigned short flags = 0;
3121
3122 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3123 mode = pol->mode;
3124 flags = pol->flags;
3125 }
3126
3127 switch (mode) {
3128 case MPOL_DEFAULT:
3129 case MPOL_LOCAL:
3130 break;
3131 case MPOL_PREFERRED:
3132 case MPOL_PREFERRED_MANY:
3133 case MPOL_BIND:
3134 case MPOL_INTERLEAVE:
3135 nodes = pol->nodes;
3136 break;
3137 default:
3138 WARN_ON_ONCE(1);
3139 snprintf(p, maxlen, "unknown");
3140 return;
3141 }
3142
3143 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3144
3145 if (flags & MPOL_MODE_FLAGS) {
3146 p += snprintf(p, buffer + maxlen - p, "=");
3147
3148 /*
3149 * Currently, the only defined flags are mutually exclusive
3150 */
3151 if (flags & MPOL_F_STATIC_NODES)
3152 p += snprintf(p, buffer + maxlen - p, "static");
3153 else if (flags & MPOL_F_RELATIVE_NODES)
3154 p += snprintf(p, buffer + maxlen - p, "relative");
3155 }
3156
3157 if (!nodes_empty(nodes))
3158 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3159 nodemask_pr_args(&nodes));
3160}