Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/hmm: remove hmm_mirror and related

The only two users of this are now converted to use mmu_interval_notifier,
delete all the code and update hmm.rst.

Link: https://lore.kernel.org/r/20191112202231.3856-14-jgg@ziepe.ca
Reviewed-by: Jérôme Glisse <jglisse@redhat.com>
Tested-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

+32 -538
+22 -79
Documentation/vm/hmm.rst
··· 147 147 Address space mirroring's main objective is to allow duplication of a range of 148 148 CPU page table into a device page table; HMM helps keep both synchronized. A 149 149 device driver that wants to mirror a process address space must start with the 150 - registration of an hmm_mirror struct:: 150 + registration of a mmu_interval_notifier:: 151 151 152 - int hmm_mirror_register(struct hmm_mirror *mirror, 153 - struct mm_struct *mm); 152 + mni->ops = &driver_ops; 153 + int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni, 154 + unsigned long start, unsigned long length, 155 + struct mm_struct *mm); 154 156 155 - The mirror struct has a set of callbacks that are used 156 - to propagate CPU page tables:: 157 - 158 - struct hmm_mirror_ops { 159 - /* release() - release hmm_mirror 160 - * 161 - * @mirror: pointer to struct hmm_mirror 162 - * 163 - * This is called when the mm_struct is being released. The callback 164 - * must ensure that all access to any pages obtained from this mirror 165 - * is halted before the callback returns. All future access should 166 - * fault. 167 - */ 168 - void (*release)(struct hmm_mirror *mirror); 169 - 170 - /* sync_cpu_device_pagetables() - synchronize page tables 171 - * 172 - * @mirror: pointer to struct hmm_mirror 173 - * @update: update information (see struct mmu_notifier_range) 174 - * Return: -EAGAIN if update.blockable false and callback need to 175 - * block, 0 otherwise. 176 - * 177 - * This callback ultimately originates from mmu_notifiers when the CPU 178 - * page table is updated. The device driver must update its page table 179 - * in response to this callback. The update argument tells what action 180 - * to perform. 181 - * 182 - * The device driver must not return from this callback until the device 183 - * page tables are completely updated (TLBs flushed, etc); this is a 184 - * synchronous call. 185 - */ 186 - int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, 187 - const struct hmm_update *update); 188 - }; 189 - 190 - The device driver must perform the update action to the range (mark range 191 - read only, or fully unmap, etc.). The device must complete the update before 192 - the driver callback returns. 157 + During the driver_ops->invalidate() callback the device driver must perform 158 + the update action to the range (mark range read only, or fully unmap, 159 + etc.). The device must complete the update before the driver callback returns. 193 160 194 161 When the device driver wants to populate a range of virtual addresses, it can 195 162 use:: ··· 183 216 struct hmm_range range; 184 217 ... 185 218 219 + range.notifier = &mni; 186 220 range.start = ...; 187 221 range.end = ...; 188 222 range.pfns = ...; 189 223 range.flags = ...; 190 224 range.values = ...; 191 225 range.pfn_shift = ...; 192 - hmm_range_register(&range, mirror); 193 226 194 - /* 195 - * Just wait for range to be valid, safe to ignore return value as we 196 - * will use the return value of hmm_range_fault() below under the 197 - * mmap_sem to ascertain the validity of the range. 198 - */ 199 - hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC); 227 + if (!mmget_not_zero(mni->notifier.mm)) 228 + return -EFAULT; 200 229 201 230 again: 231 + range.notifier_seq = mmu_interval_read_begin(&mni); 202 232 down_read(&mm->mmap_sem); 203 233 ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT); 204 234 if (ret) { 205 235 up_read(&mm->mmap_sem); 206 - if (ret == -EBUSY) { 207 - /* 208 - * No need to check hmm_range_wait_until_valid() return value 209 - * on retry we will get proper error with hmm_range_fault() 210 - */ 211 - hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC); 212 - goto again; 213 - } 214 - hmm_range_unregister(&range); 236 + if (ret == -EBUSY) 237 + goto again; 215 238 return ret; 216 239 } 240 + up_read(&mm->mmap_sem); 241 + 217 242 take_lock(driver->update); 218 - if (!hmm_range_valid(&range)) { 243 + if (mmu_interval_read_retry(&ni, range.notifier_seq) { 219 244 release_lock(driver->update); 220 - up_read(&mm->mmap_sem); 221 245 goto again; 222 246 } 223 247 224 - // Use pfns array content to update device page table 248 + /* Use pfns array content to update device page table, 249 + * under the update lock */ 225 250 226 - hmm_range_unregister(&range); 227 251 release_lock(driver->update); 228 - up_read(&mm->mmap_sem); 229 252 return 0; 230 253 } 231 254 232 255 The driver->update lock is the same lock that the driver takes inside its 233 - sync_cpu_device_pagetables() callback. That lock must be held before calling 234 - hmm_range_valid() to avoid any race with a concurrent CPU page table update. 235 - 236 - HMM implements all this on top of the mmu_notifier API because we wanted a 237 - simpler API and also to be able to perform optimizations latter on like doing 238 - concurrent device updates in multi-devices scenario. 239 - 240 - HMM also serves as an impedance mismatch between how CPU page table updates 241 - are done (by CPU write to the page table and TLB flushes) and how devices 242 - update their own page table. Device updates are a multi-step process. First, 243 - appropriate commands are written to a buffer, then this buffer is scheduled for 244 - execution on the device. It is only once the device has executed commands in 245 - the buffer that the update is done. Creating and scheduling the update command 246 - buffer can happen concurrently for multiple devices. Waiting for each device to 247 - report commands as executed is serialized (there is no point in doing this 248 - concurrently). 249 - 256 + invalidate() callback. That lock must be held before calling 257 + mmu_interval_read_retry() to avoid any race with a concurrent CPU page table 258 + update. 250 259 251 260 Leverage default_flags and pfn_flags_mask 252 261 =========================================
+2 -181
include/linux/hmm.h
··· 68 68 #include <linux/completion.h> 69 69 #include <linux/mmu_notifier.h> 70 70 71 - 72 - /* 73 - * struct hmm - HMM per mm struct 74 - * 75 - * @mm: mm struct this HMM struct is bound to 76 - * @lock: lock protecting ranges list 77 - * @ranges: list of range being snapshotted 78 - * @mirrors: list of mirrors for this mm 79 - * @mmu_notifier: mmu notifier to track updates to CPU page table 80 - * @mirrors_sem: read/write semaphore protecting the mirrors list 81 - * @wq: wait queue for user waiting on a range invalidation 82 - * @notifiers: count of active mmu notifiers 83 - */ 84 - struct hmm { 85 - struct mmu_notifier mmu_notifier; 86 - spinlock_t ranges_lock; 87 - struct list_head ranges; 88 - struct list_head mirrors; 89 - struct rw_semaphore mirrors_sem; 90 - wait_queue_head_t wq; 91 - long notifiers; 92 - }; 93 - 94 71 /* 95 72 * hmm_pfn_flag_e - HMM flag enums 96 73 * ··· 120 143 /* 121 144 * struct hmm_range - track invalidation lock on virtual address range 122 145 * 123 - * @notifier: an optional mmu_interval_notifier 124 - * @notifier_seq: when notifier is used this is the result of 125 - * mmu_interval_read_begin() 146 + * @notifier: a mmu_interval_notifier that includes the start/end 147 + * @notifier_seq: result of mmu_interval_read_begin() 126 148 * @hmm: the core HMM structure this range is active against 127 149 * @vma: the vm area struct for the range 128 150 * @list: all range lock are on a list ··· 138 162 struct hmm_range { 139 163 struct mmu_interval_notifier *notifier; 140 164 unsigned long notifier_seq; 141 - struct hmm *hmm; 142 - struct list_head list; 143 165 unsigned long start; 144 166 unsigned long end; 145 167 uint64_t *pfns; ··· 146 172 uint64_t default_flags; 147 173 uint64_t pfn_flags_mask; 148 174 uint8_t pfn_shift; 149 - bool valid; 150 175 }; 151 - 152 - /* 153 - * hmm_range_wait_until_valid() - wait for range to be valid 154 - * @range: range affected by invalidation to wait on 155 - * @timeout: time out for wait in ms (ie abort wait after that period of time) 156 - * Return: true if the range is valid, false otherwise. 157 - */ 158 - static inline bool hmm_range_wait_until_valid(struct hmm_range *range, 159 - unsigned long timeout) 160 - { 161 - return wait_event_timeout(range->hmm->wq, range->valid, 162 - msecs_to_jiffies(timeout)) != 0; 163 - } 164 - 165 - /* 166 - * hmm_range_valid() - test if a range is valid or not 167 - * @range: range 168 - * Return: true if the range is valid, false otherwise. 169 - */ 170 - static inline bool hmm_range_valid(struct hmm_range *range) 171 - { 172 - return range->valid; 173 - } 174 176 175 177 /* 176 178 * hmm_device_entry_to_page() - return struct page pointed to by a device entry ··· 218 268 } 219 269 220 270 /* 221 - * Mirroring: how to synchronize device page table with CPU page table. 222 - * 223 - * A device driver that is participating in HMM mirroring must always 224 - * synchronize with CPU page table updates. For this, device drivers can either 225 - * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device 226 - * drivers can decide to register one mirror per device per process, or just 227 - * one mirror per process for a group of devices. The pattern is: 228 - * 229 - * int device_bind_address_space(..., struct mm_struct *mm, ...) 230 - * { 231 - * struct device_address_space *das; 232 - * 233 - * // Device driver specific initialization, and allocation of das 234 - * // which contains an hmm_mirror struct as one of its fields. 235 - * ... 236 - * 237 - * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops); 238 - * if (ret) { 239 - * // Cleanup on error 240 - * return ret; 241 - * } 242 - * 243 - * // Other device driver specific initialization 244 - * ... 245 - * } 246 - * 247 - * Once an hmm_mirror is registered for an address space, the device driver 248 - * will get callbacks through sync_cpu_device_pagetables() operation (see 249 - * hmm_mirror_ops struct). 250 - * 251 - * Device driver must not free the struct containing the hmm_mirror struct 252 - * before calling hmm_mirror_unregister(). The expected usage is to do that when 253 - * the device driver is unbinding from an address space. 254 - * 255 - * 256 - * void device_unbind_address_space(struct device_address_space *das) 257 - * { 258 - * // Device driver specific cleanup 259 - * ... 260 - * 261 - * hmm_mirror_unregister(&das->mirror); 262 - * 263 - * // Other device driver specific cleanup, and now das can be freed 264 - * ... 265 - * } 266 - */ 267 - 268 - struct hmm_mirror; 269 - 270 - /* 271 - * struct hmm_mirror_ops - HMM mirror device operations callback 272 - * 273 - * @update: callback to update range on a device 274 - */ 275 - struct hmm_mirror_ops { 276 - /* release() - release hmm_mirror 277 - * 278 - * @mirror: pointer to struct hmm_mirror 279 - * 280 - * This is called when the mm_struct is being released. The callback 281 - * must ensure that all access to any pages obtained from this mirror 282 - * is halted before the callback returns. All future access should 283 - * fault. 284 - */ 285 - void (*release)(struct hmm_mirror *mirror); 286 - 287 - /* sync_cpu_device_pagetables() - synchronize page tables 288 - * 289 - * @mirror: pointer to struct hmm_mirror 290 - * @update: update information (see struct mmu_notifier_range) 291 - * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false 292 - * and callback needs to block, 0 otherwise. 293 - * 294 - * This callback ultimately originates from mmu_notifiers when the CPU 295 - * page table is updated. The device driver must update its page table 296 - * in response to this callback. The update argument tells what action 297 - * to perform. 298 - * 299 - * The device driver must not return from this callback until the device 300 - * page tables are completely updated (TLBs flushed, etc); this is a 301 - * synchronous call. 302 - */ 303 - int (*sync_cpu_device_pagetables)( 304 - struct hmm_mirror *mirror, 305 - const struct mmu_notifier_range *update); 306 - }; 307 - 308 - /* 309 - * struct hmm_mirror - mirror struct for a device driver 310 - * 311 - * @hmm: pointer to struct hmm (which is unique per mm_struct) 312 - * @ops: device driver callback for HMM mirror operations 313 - * @list: for list of mirrors of a given mm 314 - * 315 - * Each address space (mm_struct) being mirrored by a device must register one 316 - * instance of an hmm_mirror struct with HMM. HMM will track the list of all 317 - * mirrors for each mm_struct. 318 - */ 319 - struct hmm_mirror { 320 - struct hmm *hmm; 321 - const struct hmm_mirror_ops *ops; 322 - struct list_head list; 323 - }; 324 - 325 - /* 326 271 * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case. 327 272 */ 328 273 #define HMM_FAULT_ALLOW_RETRY (1 << 0) ··· 226 381 #define HMM_FAULT_SNAPSHOT (1 << 1) 227 382 228 383 #ifdef CONFIG_HMM_MIRROR 229 - int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); 230 - void hmm_mirror_unregister(struct hmm_mirror *mirror); 231 - 232 384 /* 233 385 * Please see Documentation/vm/hmm.rst for how to use the range API. 234 386 */ 235 - int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror); 236 - void hmm_range_unregister(struct hmm_range *range); 237 - 238 387 long hmm_range_fault(struct hmm_range *range, unsigned int flags); 239 388 240 389 long hmm_range_dma_map(struct hmm_range *range, ··· 240 401 dma_addr_t *daddrs, 241 402 bool dirty); 242 403 #else 243 - int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 244 - { 245 - return -EOPNOTSUPP; 246 - } 247 - 248 - void hmm_mirror_unregister(struct hmm_mirror *mirror) 249 - { 250 - } 251 - 252 - int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror) 253 - { 254 - return -EOPNOTSUPP; 255 - } 256 - 257 - void hmm_range_unregister(struct hmm_range *range) 258 - { 259 - } 260 - 261 404 static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags) 262 405 { 263 406 return -EOPNOTSUPP;
-1
mm/Kconfig
··· 675 675 config HMM_MIRROR 676 676 bool 677 677 depends on MMU 678 - depends on MMU_NOTIFIER 679 678 680 679 config DEVICE_PRIVATE 681 680 bool "Unaddressable device memory (GPU memory, ...)"
+8 -277
mm/hmm.c
··· 26 26 #include <linux/mmu_notifier.h> 27 27 #include <linux/memory_hotplug.h> 28 28 29 - static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm) 30 - { 31 - struct hmm *hmm; 32 - 33 - hmm = kzalloc(sizeof(*hmm), GFP_KERNEL); 34 - if (!hmm) 35 - return ERR_PTR(-ENOMEM); 36 - 37 - init_waitqueue_head(&hmm->wq); 38 - INIT_LIST_HEAD(&hmm->mirrors); 39 - init_rwsem(&hmm->mirrors_sem); 40 - INIT_LIST_HEAD(&hmm->ranges); 41 - spin_lock_init(&hmm->ranges_lock); 42 - hmm->notifiers = 0; 43 - return &hmm->mmu_notifier; 44 - } 45 - 46 - static void hmm_free_notifier(struct mmu_notifier *mn) 47 - { 48 - struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 49 - 50 - WARN_ON(!list_empty(&hmm->ranges)); 51 - WARN_ON(!list_empty(&hmm->mirrors)); 52 - kfree(hmm); 53 - } 54 - 55 - static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 56 - { 57 - struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 58 - struct hmm_mirror *mirror; 59 - 60 - /* 61 - * Since hmm_range_register() holds the mmget() lock hmm_release() is 62 - * prevented as long as a range exists. 63 - */ 64 - WARN_ON(!list_empty_careful(&hmm->ranges)); 65 - 66 - down_read(&hmm->mirrors_sem); 67 - list_for_each_entry(mirror, &hmm->mirrors, list) { 68 - /* 69 - * Note: The driver is not allowed to trigger 70 - * hmm_mirror_unregister() from this thread. 71 - */ 72 - if (mirror->ops->release) 73 - mirror->ops->release(mirror); 74 - } 75 - up_read(&hmm->mirrors_sem); 76 - } 77 - 78 - static void notifiers_decrement(struct hmm *hmm) 79 - { 80 - unsigned long flags; 81 - 82 - spin_lock_irqsave(&hmm->ranges_lock, flags); 83 - hmm->notifiers--; 84 - if (!hmm->notifiers) { 85 - struct hmm_range *range; 86 - 87 - list_for_each_entry(range, &hmm->ranges, list) { 88 - if (range->valid) 89 - continue; 90 - range->valid = true; 91 - } 92 - wake_up_all(&hmm->wq); 93 - } 94 - spin_unlock_irqrestore(&hmm->ranges_lock, flags); 95 - } 96 - 97 - static int hmm_invalidate_range_start(struct mmu_notifier *mn, 98 - const struct mmu_notifier_range *nrange) 99 - { 100 - struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 101 - struct hmm_mirror *mirror; 102 - struct hmm_range *range; 103 - unsigned long flags; 104 - int ret = 0; 105 - 106 - spin_lock_irqsave(&hmm->ranges_lock, flags); 107 - hmm->notifiers++; 108 - list_for_each_entry(range, &hmm->ranges, list) { 109 - if (nrange->end < range->start || nrange->start >= range->end) 110 - continue; 111 - 112 - range->valid = false; 113 - } 114 - spin_unlock_irqrestore(&hmm->ranges_lock, flags); 115 - 116 - if (mmu_notifier_range_blockable(nrange)) 117 - down_read(&hmm->mirrors_sem); 118 - else if (!down_read_trylock(&hmm->mirrors_sem)) { 119 - ret = -EAGAIN; 120 - goto out; 121 - } 122 - 123 - list_for_each_entry(mirror, &hmm->mirrors, list) { 124 - int rc; 125 - 126 - rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange); 127 - if (rc) { 128 - if (WARN_ON(mmu_notifier_range_blockable(nrange) || 129 - rc != -EAGAIN)) 130 - continue; 131 - ret = -EAGAIN; 132 - break; 133 - } 134 - } 135 - up_read(&hmm->mirrors_sem); 136 - 137 - out: 138 - if (ret) 139 - notifiers_decrement(hmm); 140 - return ret; 141 - } 142 - 143 - static void hmm_invalidate_range_end(struct mmu_notifier *mn, 144 - const struct mmu_notifier_range *nrange) 145 - { 146 - struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 147 - 148 - notifiers_decrement(hmm); 149 - } 150 - 151 - static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 152 - .release = hmm_release, 153 - .invalidate_range_start = hmm_invalidate_range_start, 154 - .invalidate_range_end = hmm_invalidate_range_end, 155 - .alloc_notifier = hmm_alloc_notifier, 156 - .free_notifier = hmm_free_notifier, 157 - }; 158 - 159 - /* 160 - * hmm_mirror_register() - register a mirror against an mm 161 - * 162 - * @mirror: new mirror struct to register 163 - * @mm: mm to register against 164 - * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments 165 - * 166 - * To start mirroring a process address space, the device driver must register 167 - * an HMM mirror struct. 168 - * 169 - * The caller cannot unregister the hmm_mirror while any ranges are 170 - * registered. 171 - * 172 - * Callers using this function must put a call to mmu_notifier_synchronize() 173 - * in their module exit functions. 174 - */ 175 - int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 176 - { 177 - struct mmu_notifier *mn; 178 - 179 - lockdep_assert_held_write(&mm->mmap_sem); 180 - 181 - /* Sanity check */ 182 - if (!mm || !mirror || !mirror->ops) 183 - return -EINVAL; 184 - 185 - mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm); 186 - if (IS_ERR(mn)) 187 - return PTR_ERR(mn); 188 - mirror->hmm = container_of(mn, struct hmm, mmu_notifier); 189 - 190 - down_write(&mirror->hmm->mirrors_sem); 191 - list_add(&mirror->list, &mirror->hmm->mirrors); 192 - up_write(&mirror->hmm->mirrors_sem); 193 - 194 - return 0; 195 - } 196 - EXPORT_SYMBOL(hmm_mirror_register); 197 - 198 - /* 199 - * hmm_mirror_unregister() - unregister a mirror 200 - * 201 - * @mirror: mirror struct to unregister 202 - * 203 - * Stop mirroring a process address space, and cleanup. 204 - */ 205 - void hmm_mirror_unregister(struct hmm_mirror *mirror) 206 - { 207 - struct hmm *hmm = mirror->hmm; 208 - 209 - down_write(&hmm->mirrors_sem); 210 - list_del(&mirror->list); 211 - up_write(&hmm->mirrors_sem); 212 - mmu_notifier_put(&hmm->mmu_notifier); 213 - } 214 - EXPORT_SYMBOL(hmm_mirror_unregister); 215 - 216 29 struct hmm_vma_walk { 217 30 struct hmm_range *range; 218 31 struct dev_pagemap *pgmap; ··· 598 785 *pfns = range->values[HMM_PFN_NONE]; 599 786 } 600 787 601 - /* 602 - * hmm_range_register() - start tracking change to CPU page table over a range 603 - * @range: range 604 - * @mm: the mm struct for the range of virtual address 605 - * 606 - * Return: 0 on success, -EFAULT if the address space is no longer valid 607 - * 608 - * Track updates to the CPU page table see include/linux/hmm.h 609 - */ 610 - int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror) 611 - { 612 - struct hmm *hmm = mirror->hmm; 613 - unsigned long flags; 614 - 615 - range->valid = false; 616 - range->hmm = NULL; 617 - 618 - if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1))) 619 - return -EINVAL; 620 - if (range->start >= range->end) 621 - return -EINVAL; 622 - 623 - /* Prevent hmm_release() from running while the range is valid */ 624 - if (!mmget_not_zero(hmm->mmu_notifier.mm)) 625 - return -EFAULT; 626 - 627 - /* Initialize range to track CPU page table updates. */ 628 - spin_lock_irqsave(&hmm->ranges_lock, flags); 629 - 630 - range->hmm = hmm; 631 - list_add(&range->list, &hmm->ranges); 632 - 633 - /* 634 - * If there are any concurrent notifiers we have to wait for them for 635 - * the range to be valid (see hmm_range_wait_until_valid()). 636 - */ 637 - if (!hmm->notifiers) 638 - range->valid = true; 639 - spin_unlock_irqrestore(&hmm->ranges_lock, flags); 640 - 641 - return 0; 642 - } 643 - EXPORT_SYMBOL(hmm_range_register); 644 - 645 - /* 646 - * hmm_range_unregister() - stop tracking change to CPU page table over a range 647 - * @range: range 648 - * 649 - * Range struct is used to track updates to the CPU page table after a call to 650 - * hmm_range_register(). See include/linux/hmm.h for how to use it. 651 - */ 652 - void hmm_range_unregister(struct hmm_range *range) 653 - { 654 - struct hmm *hmm = range->hmm; 655 - unsigned long flags; 656 - 657 - spin_lock_irqsave(&hmm->ranges_lock, flags); 658 - list_del_init(&range->list); 659 - spin_unlock_irqrestore(&hmm->ranges_lock, flags); 660 - 661 - /* Drop reference taken by hmm_range_register() */ 662 - mmput(hmm->mmu_notifier.mm); 663 - 664 - /* 665 - * The range is now invalid and the ref on the hmm is dropped, so 666 - * poison the pointer. Leave other fields in place, for the caller's 667 - * use. 668 - */ 669 - range->valid = false; 670 - memset(&range->hmm, POISON_INUSE, sizeof(range->hmm)); 671 - } 672 - EXPORT_SYMBOL(hmm_range_unregister); 673 - 674 - static bool needs_retry(struct hmm_range *range) 675 - { 676 - if (range->notifier) 677 - return mmu_interval_check_retry(range->notifier, 678 - range->notifier_seq); 679 - return !range->valid; 680 - } 681 - 682 788 static const struct mm_walk_ops hmm_walk_ops = { 683 789 .pud_entry = hmm_vma_walk_pud, 684 790 .pmd_entry = hmm_vma_walk_pmd, ··· 638 906 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 639 907 unsigned long start = range->start, end; 640 908 struct hmm_vma_walk hmm_vma_walk; 641 - struct mm_struct *mm; 909 + struct mm_struct *mm = range->notifier->mm; 642 910 struct vm_area_struct *vma; 643 911 int ret; 644 - 645 - if (range->notifier) 646 - mm = range->notifier->mm; 647 - else 648 - mm = range->hmm->mmu_notifier.mm; 649 912 650 913 lockdep_assert_held(&mm->mmap_sem); 651 914 652 915 do { 653 916 /* If range is no longer valid force retry. */ 654 - if (needs_retry(range)) 917 + if (mmu_interval_check_retry(range->notifier, 918 + range->notifier_seq)) 655 919 return -EBUSY; 656 920 657 921 vma = find_vma(mm, start); ··· 680 952 start = hmm_vma_walk.last; 681 953 682 954 /* Keep trying while the range is valid. */ 683 - } while (ret == -EBUSY && !needs_retry(range)); 955 + } while (ret == -EBUSY && 956 + !mmu_interval_check_retry(range->notifier, 957 + range->notifier_seq)); 684 958 685 959 if (ret) { 686 960 unsigned long i; ··· 740 1010 continue; 741 1011 742 1012 /* Check if range is being invalidated */ 743 - if (needs_retry(range)) { 1013 + if (mmu_interval_check_retry(range->notifier, 1014 + range->notifier_seq)) { 744 1015 ret = -EBUSY; 745 1016 goto unmap; 746 1017 }