at v6.17 7.2 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_MIGRATE_H 3#define _LINUX_MIGRATE_H 4 5#include <linux/mm.h> 6#include <linux/mempolicy.h> 7#include <linux/migrate_mode.h> 8#include <linux/hugetlb.h> 9 10typedef struct folio *new_folio_t(struct folio *folio, unsigned long private); 11typedef void free_folio_t(struct folio *folio, unsigned long private); 12 13struct migration_target_control; 14 15/* 16 * Return values from addresss_space_operations.migratepage(): 17 * - negative errno on page migration failure; 18 * - zero on page migration success; 19 */ 20#define MIGRATEPAGE_SUCCESS 0 21#define MIGRATEPAGE_UNMAP 1 22 23/** 24 * struct movable_operations - Driver page migration 25 * @isolate_page: 26 * The VM calls this function to prepare the page to be moved. The page 27 * is locked and the driver should not unlock it. The driver should 28 * return ``true`` if the page is movable and ``false`` if it is not 29 * currently movable. After this function returns, the VM uses the 30 * page->lru field, so the driver must preserve any information which 31 * is usually stored here. 32 * 33 * @migrate_page: 34 * After isolation, the VM calls this function with the isolated 35 * @src page. The driver should copy the contents of the 36 * @src page to the @dst page and set up the fields of @dst page. 37 * Both pages are locked. 38 * If page migration is successful, the driver should 39 * return MIGRATEPAGE_SUCCESS. 40 * If the driver cannot migrate the page at the moment, it can return 41 * -EAGAIN. The VM interprets this as a temporary migration failure and 42 * will retry it later. Any other error value is a permanent migration 43 * failure and migration will not be retried. 44 * The driver shouldn't touch the @src->lru field while in the 45 * migrate_page() function. It may write to @dst->lru. 46 * 47 * @putback_page: 48 * If migration fails on the isolated page, the VM informs the driver 49 * that the page is no longer a candidate for migration by calling 50 * this function. The driver should put the isolated page back into 51 * its own data structure. 52 */ 53struct movable_operations { 54 bool (*isolate_page)(struct page *, isolate_mode_t); 55 int (*migrate_page)(struct page *dst, struct page *src, 56 enum migrate_mode); 57 void (*putback_page)(struct page *); 58}; 59 60/* Defined in mm/debug.c: */ 61extern const char *migrate_reason_names[MR_TYPES]; 62 63#ifdef CONFIG_MIGRATION 64 65void putback_movable_pages(struct list_head *l); 66int migrate_folio(struct address_space *mapping, struct folio *dst, 67 struct folio *src, enum migrate_mode mode); 68int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, 69 unsigned long private, enum migrate_mode mode, int reason, 70 unsigned int *ret_succeeded); 71struct folio *alloc_migration_target(struct folio *src, unsigned long private); 72bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode); 73bool isolate_folio_to_list(struct folio *folio, struct list_head *list); 74 75int migrate_huge_page_move_mapping(struct address_space *mapping, 76 struct folio *dst, struct folio *src); 77void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) 78 __releases(ptl); 79void folio_migrate_flags(struct folio *newfolio, struct folio *folio); 80int folio_migrate_mapping(struct address_space *mapping, 81 struct folio *newfolio, struct folio *folio, int extra_count); 82int set_movable_ops(const struct movable_operations *ops, enum pagetype type); 83 84#else 85 86static inline void putback_movable_pages(struct list_head *l) {} 87static inline int migrate_pages(struct list_head *l, new_folio_t new, 88 free_folio_t free, unsigned long private, 89 enum migrate_mode mode, int reason, unsigned int *ret_succeeded) 90 { return -ENOSYS; } 91static inline struct folio *alloc_migration_target(struct folio *src, 92 unsigned long private) 93 { return NULL; } 94static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) 95 { return false; } 96static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list) 97 { return false; } 98 99static inline int migrate_huge_page_move_mapping(struct address_space *mapping, 100 struct folio *dst, struct folio *src) 101{ 102 return -ENOSYS; 103} 104static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type) 105{ 106 return -ENOSYS; 107} 108 109#endif /* CONFIG_MIGRATION */ 110 111#ifdef CONFIG_NUMA_BALANCING 112int migrate_misplaced_folio_prepare(struct folio *folio, 113 struct vm_area_struct *vma, int node); 114int migrate_misplaced_folio(struct folio *folio, int node); 115#else 116static inline int migrate_misplaced_folio_prepare(struct folio *folio, 117 struct vm_area_struct *vma, int node) 118{ 119 return -EAGAIN; /* can't migrate now */ 120} 121static inline int migrate_misplaced_folio(struct folio *folio, int node) 122{ 123 return -EAGAIN; /* can't migrate now */ 124} 125#endif /* CONFIG_NUMA_BALANCING */ 126 127#ifdef CONFIG_MIGRATION 128 129/* 130 * Watch out for PAE architecture, which has an unsigned long, and might not 131 * have enough bits to store all physical address and flags. So far we have 132 * enough room for all our flags. 133 */ 134#define MIGRATE_PFN_VALID (1UL << 0) 135#define MIGRATE_PFN_MIGRATE (1UL << 1) 136#define MIGRATE_PFN_WRITE (1UL << 3) 137#define MIGRATE_PFN_SHIFT 6 138 139static inline struct page *migrate_pfn_to_page(unsigned long mpfn) 140{ 141 if (!(mpfn & MIGRATE_PFN_VALID)) 142 return NULL; 143 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); 144} 145 146static inline unsigned long migrate_pfn(unsigned long pfn) 147{ 148 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; 149} 150 151enum migrate_vma_direction { 152 MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, 153 MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, 154 MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2, 155}; 156 157struct migrate_vma { 158 struct vm_area_struct *vma; 159 /* 160 * Both src and dst array must be big enough for 161 * (end - start) >> PAGE_SHIFT entries. 162 * 163 * The src array must not be modified by the caller after 164 * migrate_vma_setup(), and must not change the dst array after 165 * migrate_vma_pages() returns. 166 */ 167 unsigned long *dst; 168 unsigned long *src; 169 unsigned long cpages; 170 unsigned long npages; 171 unsigned long start; 172 unsigned long end; 173 174 /* 175 * Set to the owner value also stored in page_pgmap(page)->owner 176 * for migrating out of device private memory. The flags also need to 177 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. 178 * The caller should always set this field when using mmu notifier 179 * callbacks to avoid device MMU invalidations for device private 180 * pages that are not being migrated. 181 */ 182 void *pgmap_owner; 183 unsigned long flags; 184 185 /* 186 * Set to vmf->page if this is being called to migrate a page as part of 187 * a migrate_to_ram() callback. 188 */ 189 struct page *fault_page; 190}; 191 192int migrate_vma_setup(struct migrate_vma *args); 193void migrate_vma_pages(struct migrate_vma *migrate); 194void migrate_vma_finalize(struct migrate_vma *migrate); 195int migrate_device_range(unsigned long *src_pfns, unsigned long start, 196 unsigned long npages); 197int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages); 198void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, 199 unsigned long npages); 200void migrate_device_finalize(unsigned long *src_pfns, 201 unsigned long *dst_pfns, unsigned long npages); 202 203#endif /* CONFIG_MIGRATION */ 204 205#endif /* _LINUX_MIGRATE_H */