Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: Convert vm callbacks to helpers

The default TTM fault handler may not be completely sufficient
(vmwgfx needs to do some bookkeeping, control the write protectionand also
needs to restrict the number of prefaults).

Also make it possible replicate ttm_bo_vm_reserve() functionality for,
for example, mkwrite handlers.

So turn the TTM vm code into helpers: ttm_bo_vm_fault_reserved(),
ttm_bo_vm_open(), ttm_bo_vm_close() and ttm_bo_vm_reserve(). Also provide
a default TTM fault handler for other drivers to use.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/332900/?series=67217&rev=1
Signed-off-by: Christian König <christian.koenig@amd.com>

authored by

Thomas Hellstrom and committed by
Christian König
caa478af 8951000f

+119 -63
+105 -63
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 42 42 #include <linux/uaccess.h> 43 43 #include <linux/mem_encrypt.h> 44 44 45 - #define TTM_BO_VM_NUM_PREFAULT 16 46 - 47 45 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, 48 46 struct vm_fault *vmf) 49 47 { ··· 104 106 + page_offset; 105 107 } 106 108 107 - static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) 109 + /** 110 + * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback 111 + * @bo: The buffer object 112 + * @vmf: The fault structure handed to the callback 113 + * 114 + * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped 115 + * during long waits, and after the wait the callback will be restarted. This 116 + * is to allow other threads using the same virtual memory space concurrent 117 + * access to map(), unmap() completely unrelated buffer objects. TTM buffer 118 + * object reservations sometimes wait for GPU and should therefore be 119 + * considered long waits. This function reserves the buffer object interruptibly 120 + * taking this into account. Starvation is avoided by the vm system not 121 + * allowing too many repeated restarts. 122 + * This function is intended to be used in customized fault() and _mkwrite() 123 + * handlers. 124 + * 125 + * Return: 126 + * 0 on success and the bo was reserved. 127 + * VM_FAULT_RETRY if blocking wait. 128 + * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. 129 + */ 130 + vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, 131 + struct vm_fault *vmf) 108 132 { 109 - struct vm_area_struct *vma = vmf->vma; 110 - struct ttm_buffer_object *bo = vma->vm_private_data; 111 - struct ttm_bo_device *bdev = bo->bdev; 112 - unsigned long page_offset; 113 - unsigned long page_last; 114 - unsigned long pfn; 115 - struct ttm_tt *ttm = NULL; 116 - struct page *page; 117 - int err; 118 - int i; 119 - vm_fault_t ret = VM_FAULT_NOPAGE; 120 - unsigned long address = vmf->address; 121 - struct ttm_mem_type_manager *man = 122 - &bdev->man[bo->mem.mem_type]; 123 - struct vm_area_struct cvma; 124 - 125 133 /* 126 134 * Work around locking order reversal in fault / nopfn 127 135 * between mmap_sem and bo_reserve: Perform a trylock operation ··· 154 150 return VM_FAULT_NOPAGE; 155 151 } 156 152 153 + return 0; 154 + } 155 + EXPORT_SYMBOL(ttm_bo_vm_reserve); 156 + 157 + /** 158 + * ttm_bo_vm_fault_reserved - TTM fault helper 159 + * @vmf: The struct vm_fault given as argument to the fault callback 160 + * @prot: The page protection to be used for this memory area. 161 + * @num_prefault: Maximum number of prefault pages. The caller may want to 162 + * specify this based on madvice settings and the size of the GPU object 163 + * backed by the memory. 164 + * 165 + * This function inserts one or more page table entries pointing to the 166 + * memory backing the buffer object, and then returns a return code 167 + * instructing the caller to retry the page access. 168 + * 169 + * Return: 170 + * VM_FAULT_NOPAGE on success or pending signal 171 + * VM_FAULT_SIGBUS on unspecified error 172 + * VM_FAULT_OOM on out-of-memory 173 + * VM_FAULT_RETRY if retryable wait 174 + */ 175 + vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, 176 + pgprot_t prot, 177 + pgoff_t num_prefault) 178 + { 179 + struct vm_area_struct *vma = vmf->vma; 180 + struct vm_area_struct cvma = *vma; 181 + struct ttm_buffer_object *bo = vma->vm_private_data; 182 + struct ttm_bo_device *bdev = bo->bdev; 183 + unsigned long page_offset; 184 + unsigned long page_last; 185 + unsigned long pfn; 186 + struct ttm_tt *ttm = NULL; 187 + struct page *page; 188 + int err; 189 + pgoff_t i; 190 + vm_fault_t ret = VM_FAULT_NOPAGE; 191 + unsigned long address = vmf->address; 192 + struct ttm_mem_type_manager *man = 193 + &bdev->man[bo->mem.mem_type]; 194 + 157 195 /* 158 196 * Refuse to fault imported pages. This should be handled 159 197 * (if at all) by redirecting mmap to the exporter. 160 198 */ 161 - if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { 162 - ret = VM_FAULT_SIGBUS; 163 - goto out_unlock; 164 - } 199 + if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) 200 + return VM_FAULT_SIGBUS; 165 201 166 202 if (bdev->driver->fault_reserve_notify) { 167 203 struct dma_fence *moving = dma_fence_get(bo->moving); ··· 212 168 break; 213 169 case -EBUSY: 214 170 case -ERESTARTSYS: 215 - ret = VM_FAULT_NOPAGE; 216 - goto out_unlock; 171 + return VM_FAULT_NOPAGE; 217 172 default: 218 - ret = VM_FAULT_SIGBUS; 219 - goto out_unlock; 173 + return VM_FAULT_SIGBUS; 220 174 } 221 175 222 176 if (bo->moving != moving) { ··· 230 188 * move. 231 189 */ 232 190 ret = ttm_bo_vm_fault_idle(bo, vmf); 233 - if (unlikely(ret != 0)) { 234 - if (ret == VM_FAULT_RETRY && 235 - !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 236 - /* The BO has already been unreserved. */ 237 - return ret; 238 - } 239 - 240 - goto out_unlock; 241 - } 191 + if (unlikely(ret != 0)) 192 + return ret; 242 193 243 194 err = ttm_mem_io_lock(man, true); 244 - if (unlikely(err != 0)) { 245 - ret = VM_FAULT_NOPAGE; 246 - goto out_unlock; 247 - } 195 + if (unlikely(err != 0)) 196 + return VM_FAULT_NOPAGE; 248 197 err = ttm_mem_io_reserve_vm(bo); 249 198 if (unlikely(err != 0)) { 250 199 ret = VM_FAULT_SIGBUS; ··· 252 219 goto out_io_unlock; 253 220 } 254 221 255 - /* 256 - * Make a local vma copy to modify the page_prot member 257 - * and vm_flags if necessary. The vma parameter is protected 258 - * by mmap_sem in write mode. 259 - */ 260 - cvma = *vma; 261 - cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); 262 - 263 - if (bo->mem.bus.is_iomem) { 264 - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, 265 - cvma.vm_page_prot); 266 - } else { 222 + cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot); 223 + if (!bo->mem.bus.is_iomem) { 267 224 struct ttm_operation_ctx ctx = { 268 225 .interruptible = false, 269 226 .no_wait_gpu = false, ··· 262 239 }; 263 240 264 241 ttm = bo->ttm; 265 - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, 266 - cvma.vm_page_prot); 267 - 268 - /* Allocate all page at once, most common usage */ 269 - if (ttm_tt_populate(ttm, &ctx)) { 242 + if (ttm_tt_populate(bo->ttm, &ctx)) { 270 243 ret = VM_FAULT_OOM; 271 244 goto out_io_unlock; 272 245 } 246 + } else { 247 + /* Iomem should not be marked encrypted */ 248 + cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); 273 249 } 274 250 275 251 /* 276 252 * Speculatively prefault a number of pages. Only error on 277 253 * first page. 278 254 */ 279 - for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 255 + for (i = 0; i < num_prefault; ++i) { 280 256 if (bo->mem.bus.is_iomem) { 281 - /* Iomem should not be marked encrypted */ 282 - cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); 283 257 pfn = ttm_bo_io_mem_pfn(bo, page_offset); 284 258 } else { 285 259 page = ttm->pages[page_offset]; ··· 312 292 ret = VM_FAULT_NOPAGE; 313 293 out_io_unlock: 314 294 ttm_mem_io_unlock(man); 315 - out_unlock: 295 + return ret; 296 + } 297 + EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); 298 + 299 + static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) 300 + { 301 + struct vm_area_struct *vma = vmf->vma; 302 + pgprot_t prot; 303 + struct ttm_buffer_object *bo = vma->vm_private_data; 304 + vm_fault_t ret; 305 + 306 + ret = ttm_bo_vm_reserve(bo, vmf); 307 + if (ret) 308 + return ret; 309 + 310 + prot = vm_get_page_prot(vma->vm_flags); 311 + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); 312 + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 313 + return ret; 314 + 316 315 dma_resv_unlock(bo->base.resv); 316 + 317 317 return ret; 318 318 } 319 319 320 - static void ttm_bo_vm_open(struct vm_area_struct *vma) 320 + void ttm_bo_vm_open(struct vm_area_struct *vma) 321 321 { 322 322 struct ttm_buffer_object *bo = vma->vm_private_data; 323 323 ··· 345 305 346 306 ttm_bo_get(bo); 347 307 } 308 + EXPORT_SYMBOL(ttm_bo_vm_open); 348 309 349 - static void ttm_bo_vm_close(struct vm_area_struct *vma) 310 + void ttm_bo_vm_close(struct vm_area_struct *vma) 350 311 { 351 312 struct ttm_buffer_object *bo = vma->vm_private_data; 352 313 353 314 ttm_bo_put(bo); 354 315 vma->vm_private_data = NULL; 355 316 } 317 + EXPORT_SYMBOL(ttm_bo_vm_close); 356 318 357 319 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, 358 320 unsigned long offset,
+14
include/drm/ttm/ttm_bo_api.h
··· 727 727 { 728 728 return bo->base.dev != NULL; 729 729 } 730 + 731 + /* Default number of pre-faulted pages in the TTM fault handler */ 732 + #define TTM_BO_VM_NUM_PREFAULT 16 733 + 734 + vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, 735 + struct vm_fault *vmf); 736 + 737 + vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, 738 + pgprot_t prot, 739 + pgoff_t num_prefault); 740 + 741 + void ttm_bo_vm_open(struct vm_area_struct *vma); 742 + 743 + void ttm_bo_vm_close(struct vm_area_struct *vma); 730 744 #endif