Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/userptr: Unmap userptrs in the mmu notifier

If userptr pages are freed after a call to the xe mmu notifier,
the device will not be blocked out from theoretically accessing
these pages unless they are also unmapped from the iommu, and
this violates some aspects of the iommu-imposed security.

Ensure that userptrs are unmapped in the mmu notifier to
mitigate this. A naive attempt would try to free the sg table, but
the sg table itself may be accessed by a concurrent bind
operation, so settle for only unmapping.

v3:
- Update lockdep asserts.
- Fix a typo (Matthew Auld)

Fixes: 81e058a3e7fd ("drm/xe: Introduce helper to populate userptr")
Cc: Oak Zeng <oak.zeng@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: <stable@vger.kernel.org> # v6.10+
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Acked-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-4-thomas.hellstrom@linux.intel.com
(cherry picked from commit ba767b9d01a2c552d76cf6f46b125d50ec4147a6)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

authored by

Thomas Hellström and committed by
Rodrigo Vivi
333b8906 0a98219b

+52 -9
+42 -9
drivers/gpu/drm/xe/xe_hmm.c
··· 150 150 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); 151 151 } 152 152 153 + static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma) 154 + { 155 + struct xe_userptr *userptr = &uvma->userptr; 156 + struct xe_vm *vm = xe_vma_vm(&uvma->vma); 157 + 158 + lockdep_assert_held_write(&vm->lock); 159 + lockdep_assert_held(&vm->userptr.notifier_lock); 160 + 161 + mutex_lock(&userptr->unmap_mutex); 162 + xe_assert(vm->xe, !userptr->mapped); 163 + userptr->mapped = true; 164 + mutex_unlock(&userptr->unmap_mutex); 165 + } 166 + 167 + void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma) 168 + { 169 + struct xe_userptr *userptr = &uvma->userptr; 170 + struct xe_vma *vma = &uvma->vma; 171 + bool write = !xe_vma_read_only(vma); 172 + struct xe_vm *vm = xe_vma_vm(vma); 173 + struct xe_device *xe = vm->xe; 174 + 175 + if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) && 176 + !lockdep_is_held_type(&vm->lock, 0) && 177 + !(vma->gpuva.flags & XE_VMA_DESTROYED)) { 178 + /* Don't unmap in exec critical section. */ 179 + xe_vm_assert_held(vm); 180 + /* Don't unmap while mapping the sg. */ 181 + lockdep_assert_held(&vm->lock); 182 + } 183 + 184 + mutex_lock(&userptr->unmap_mutex); 185 + if (userptr->sg && userptr->mapped) 186 + dma_unmap_sgtable(xe->drm.dev, userptr->sg, 187 + write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); 188 + userptr->mapped = false; 189 + mutex_unlock(&userptr->unmap_mutex); 190 + } 191 + 153 192 /** 154 193 * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr 155 194 * @uvma: the userptr vma which hold the scatter gather table ··· 200 161 void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma) 201 162 { 202 163 struct xe_userptr *userptr = &uvma->userptr; 203 - struct xe_vma *vma = &uvma->vma; 204 - bool write = !xe_vma_read_only(vma); 205 - struct xe_vm *vm = xe_vma_vm(vma); 206 - struct xe_device *xe = vm->xe; 207 - struct device *dev = xe->drm.dev; 208 164 209 - xe_assert(xe, userptr->sg); 210 - dma_unmap_sgtable(dev, userptr->sg, 211 - write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); 212 - 165 + xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg); 166 + xe_hmm_userptr_unmap(uvma); 213 167 sg_free_table(userptr->sg); 214 168 userptr->sg = NULL; 215 169 } ··· 329 297 330 298 xe_mark_range_accessed(&hmm_range, write); 331 299 userptr->sg = &userptr->sgt; 300 + xe_hmm_userptr_set_mapped(uvma); 332 301 userptr->notifier_seq = hmm_range.notifier_seq; 333 302 up_read(&vm->userptr.notifier_lock); 334 303 kvfree(pfns);
+2
drivers/gpu/drm/xe/xe_hmm.h
··· 13 13 int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked); 14 14 15 15 void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma); 16 + 17 + void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma); 16 18 #endif
+4
drivers/gpu/drm/xe/xe_vm.c
··· 620 620 err = xe_vm_invalidate_vma(vma); 621 621 XE_WARN_ON(err); 622 622 } 623 + 624 + xe_hmm_userptr_unmap(uvma); 623 625 } 624 626 625 627 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, ··· 1041 1039 INIT_LIST_HEAD(&userptr->invalidate_link); 1042 1040 INIT_LIST_HEAD(&userptr->repin_link); 1043 1041 vma->gpuva.gem.offset = bo_offset_or_userptr; 1042 + mutex_init(&userptr->unmap_mutex); 1044 1043 1045 1044 err = mmu_interval_notifier_insert(&userptr->notifier, 1046 1045 current->mm, ··· 1083 1080 * them anymore 1084 1081 */ 1085 1082 mmu_interval_notifier_remove(&userptr->notifier); 1083 + mutex_destroy(&userptr->unmap_mutex); 1086 1084 xe_vm_put(vm); 1087 1085 } else if (xe_vma_is_null(vma)) { 1088 1086 xe_vm_put(vm);
+4
drivers/gpu/drm/xe/xe_vm_types.h
··· 59 59 struct sg_table *sg; 60 60 /** @notifier_seq: notifier sequence number */ 61 61 unsigned long notifier_seq; 62 + /** @unmap_mutex: Mutex protecting dma-unmapping */ 63 + struct mutex unmap_mutex; 62 64 /** 63 65 * @initial_bind: user pointer has been bound at least once. 64 66 * write: vm->userptr.notifier_lock in read mode and vm->resv held. 65 67 * read: vm->userptr.notifier_lock in write mode or vm->resv held. 66 68 */ 67 69 bool initial_bind; 70 + /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */ 71 + bool mapped; 68 72 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) 69 73 u32 divisor; 70 74 #endif