Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: add bo as parameter to the ttm_tt_create callback

Instead of calculating the size in bytes just to recalculate the number
of pages from it pass the BO directly to the function.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
dde5da23 5d951098

+75 -87
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 982 982 .destroy = &amdgpu_ttm_backend_destroy, 983 983 }; 984 984 985 - static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, 986 - unsigned long size, uint32_t page_flags) 985 + static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, 986 + uint32_t page_flags) 987 987 { 988 988 struct amdgpu_device *adev; 989 989 struct amdgpu_ttm_tt *gtt; 990 990 991 - adev = amdgpu_ttm_adev(bdev); 991 + adev = amdgpu_ttm_adev(bo->bdev); 992 992 993 993 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 994 994 if (gtt == NULL) { 995 995 return NULL; 996 996 } 997 997 gtt->ttm.ttm.func = &amdgpu_backend_func; 998 - if (ttm_sg_tt_init(&gtt->ttm, bdev, size, page_flags)) { 998 + if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) { 999 999 kfree(gtt); 1000 1000 return NULL; 1001 1001 }
+3 -3
drivers/gpu/drm/ast/ast_ttm.c
··· 199 199 }; 200 200 201 201 202 - static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, 203 - unsigned long size, uint32_t page_flags) 202 + static struct ttm_tt *ast_ttm_tt_create(struct ttm_buffer_object *bo, 203 + uint32_t page_flags) 204 204 { 205 205 struct ttm_tt *tt; 206 206 ··· 208 208 if (tt == NULL) 209 209 return NULL; 210 210 tt->func = &ast_tt_backend_func; 211 - if (ttm_tt_init(tt, bdev, size, page_flags)) { 211 + if (ttm_tt_init(tt, bo, page_flags)) { 212 212 kfree(tt); 213 213 return NULL; 214 214 }
+2 -3
drivers/gpu/drm/bochs/bochs_mm.c
··· 176 176 .destroy = &bochs_ttm_backend_destroy, 177 177 }; 178 178 179 - static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev, 180 - unsigned long size, 179 + static struct ttm_tt *bochs_ttm_tt_create(struct ttm_buffer_object *bo, 181 180 uint32_t page_flags) 182 181 { 183 182 struct ttm_tt *tt; ··· 185 186 if (tt == NULL) 186 187 return NULL; 187 188 tt->func = &bochs_tt_backend_func; 188 - if (ttm_tt_init(tt, bdev, size, page_flags)) { 189 + if (ttm_tt_init(tt, bo, page_flags)) { 189 190 kfree(tt); 190 191 return NULL; 191 192 }
+3 -3
drivers/gpu/drm/cirrus/cirrus_ttm.c
··· 199 199 }; 200 200 201 201 202 - static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev, 203 - unsigned long size, uint32_t page_flags) 202 + static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_buffer_object *bo, 203 + uint32_t page_flags) 204 204 { 205 205 struct ttm_tt *tt; 206 206 ··· 208 208 if (tt == NULL) 209 209 return NULL; 210 210 tt->func = &cirrus_tt_backend_func; 211 - if (ttm_tt_init(tt, bdev, size, page_flags)) { 211 + if (ttm_tt_init(tt, bo, page_flags)) { 212 212 kfree(tt); 213 213 return NULL; 214 214 }
+2 -3
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
··· 200 200 .destroy = &hibmc_ttm_backend_destroy, 201 201 }; 202 202 203 - static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev, 204 - unsigned long size, 203 + static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_buffer_object *bo, 205 204 u32 page_flags) 206 205 { 207 206 struct ttm_tt *tt; ··· 212 213 return NULL; 213 214 } 214 215 tt->func = &hibmc_tt_backend_func; 215 - ret = ttm_tt_init(tt, bdev, size, page_flags); 216 + ret = ttm_tt_init(tt, bo, page_flags); 216 217 if (ret) { 217 218 DRM_ERROR("failed to initialize ttm_tt: %d\n", ret); 218 219 kfree(tt);
+3 -3
drivers/gpu/drm/mgag200/mgag200_ttm.c
··· 199 199 }; 200 200 201 201 202 - static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev, 203 - unsigned long size, uint32_t page_flags) 202 + static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_buffer_object *bo, 203 + uint32_t page_flags) 204 204 { 205 205 struct ttm_tt *tt; 206 206 ··· 208 208 if (tt == NULL) 209 209 return NULL; 210 210 tt->func = &mgag200_tt_backend_func; 211 - if (ttm_tt_init(tt, bdev, size, page_flags)) { 211 + if (ttm_tt_init(tt, bo, page_flags)) { 212 212 kfree(tt); 213 213 return NULL; 214 214 }
+4 -6
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 604 604 } 605 605 606 606 static struct ttm_tt * 607 - nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, 608 - uint32_t page_flags) 607 + nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) 609 608 { 610 609 #if IS_ENABLED(CONFIG_AGP) 611 - struct nouveau_drm *drm = nouveau_bdev(bdev); 610 + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 612 611 613 612 if (drm->agp.bridge) { 614 - return ttm_agp_tt_create(bdev, drm->agp.bridge, size, 615 - page_flags); 613 + return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); 616 614 } 617 615 #endif 618 616 619 - return nouveau_sgdma_create_ttm(bdev, size, page_flags); 617 + return nouveau_sgdma_create_ttm(bo, page_flags); 620 618 } 621 619 622 620 static int
+3 -4
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 82 82 }; 83 83 84 84 struct ttm_tt * 85 - nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, 86 - unsigned long size, uint32_t page_flags) 85 + nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) 87 86 { 88 - struct nouveau_drm *drm = nouveau_bdev(bdev); 87 + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 89 88 struct nouveau_sgdma_be *nvbe; 90 89 91 90 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); ··· 96 97 else 97 98 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 98 99 99 - if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags)) 100 + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) 100 101 /* 101 102 * A failing ttm_dma_tt_init() will call ttm_tt_destroy() 102 103 * and thus our nouveau_sgdma_destroy() hook, so we don't need
+2 -2
drivers/gpu/drm/nouveau/nouveau_ttm.h
··· 12 12 extern const struct ttm_mem_type_manager_func nouveau_gart_manager; 13 13 extern const struct ttm_mem_type_manager_func nv04_gart_manager; 14 14 15 - struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *, 16 - unsigned long size, u32 page_flags); 15 + struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, 16 + u32 page_flags); 17 17 18 18 int nouveau_ttm_init(struct nouveau_drm *drm); 19 19 void nouveau_ttm_fini(struct nouveau_drm *drm);
+4 -4
drivers/gpu/drm/qxl/qxl_ttm.c
··· 291 291 .destroy = &qxl_ttm_backend_destroy, 292 292 }; 293 293 294 - static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev, 295 - unsigned long size, uint32_t page_flags) 294 + static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, 295 + uint32_t page_flags) 296 296 { 297 297 struct qxl_device *qdev; 298 298 struct qxl_ttm_tt *gtt; 299 299 300 - qdev = qxl_get_qdev(bdev); 300 + qdev = qxl_get_qdev(bo->bdev); 301 301 gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); 302 302 if (gtt == NULL) 303 303 return NULL; 304 304 gtt->ttm.ttm.func = &qxl_backend_func; 305 305 gtt->qdev = qdev; 306 - if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) { 306 + if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) { 307 307 kfree(gtt); 308 308 return NULL; 309 309 }
+6 -6
drivers/gpu/drm/radeon/radeon_ttm.c
··· 686 686 .destroy = &radeon_ttm_backend_destroy, 687 687 }; 688 688 689 - static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, 690 - unsigned long size, uint32_t page_flags) 689 + static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, 690 + uint32_t page_flags) 691 691 { 692 692 struct radeon_device *rdev; 693 693 struct radeon_ttm_tt *gtt; 694 694 695 - rdev = radeon_get_rdev(bdev); 695 + rdev = radeon_get_rdev(bo->bdev); 696 696 #if IS_ENABLED(CONFIG_AGP) 697 697 if (rdev->flags & RADEON_IS_AGP) { 698 - return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, 699 - size, page_flags); 698 + return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge, 699 + page_flags); 700 700 } 701 701 #endif 702 702 ··· 706 706 } 707 707 gtt->ttm.ttm.func = &radeon_backend_func; 708 708 gtt->rdev = rdev; 709 - if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) { 709 + if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) { 710 710 kfree(gtt); 711 711 return NULL; 712 712 }
+3 -3
drivers/gpu/drm/ttm/ttm_agp_backend.c
··· 110 110 .destroy = ttm_agp_destroy, 111 111 }; 112 112 113 - struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, 113 + struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, 114 114 struct agp_bridge_data *bridge, 115 - unsigned long size, uint32_t page_flags) 115 + uint32_t page_flags) 116 116 { 117 117 struct ttm_agp_backend *agp_be; 118 118 ··· 124 124 agp_be->bridge = bridge; 125 125 agp_be->ttm.func = &ttm_agp_func; 126 126 127 - if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags)) { 127 + if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) { 128 128 kfree(agp_be); 129 129 return NULL; 130 130 }
+14 -15
drivers/gpu/drm/ttm/ttm_tt.c
··· 73 73 return -EINVAL; 74 74 } 75 75 76 - bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 77 - page_flags); 76 + bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags); 78 77 if (unlikely(bo->ttm == NULL)) 79 78 return -ENOMEM; 80 79 ··· 236 237 ttm->func->destroy(ttm); 237 238 } 238 239 239 - void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 240 - unsigned long size, uint32_t page_flags) 240 + void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 241 + uint32_t page_flags) 241 242 { 242 - ttm->bdev = bdev; 243 - ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 243 + ttm->bdev = bo->bdev; 244 + ttm->num_pages = bo->num_pages; 244 245 ttm->caching_state = tt_cached; 245 246 ttm->page_flags = page_flags; 246 247 ttm->state = tt_unpopulated; 247 248 ttm->swap_storage = NULL; 248 249 } 249 250 250 - int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 251 - unsigned long size, uint32_t page_flags) 251 + int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 252 + uint32_t page_flags) 252 253 { 253 - ttm_tt_init_fields(ttm, bdev, size, page_flags); 254 + ttm_tt_init_fields(ttm, bo, page_flags); 254 255 255 256 if (ttm_tt_alloc_page_directory(ttm)) { 256 257 ttm_tt_destroy(ttm); ··· 268 269 } 269 270 EXPORT_SYMBOL(ttm_tt_fini); 270 271 271 - int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 272 - unsigned long size, uint32_t page_flags) 272 + int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 273 + uint32_t page_flags) 273 274 { 274 275 struct ttm_tt *ttm = &ttm_dma->ttm; 275 276 276 - ttm_tt_init_fields(ttm, bdev, size, page_flags); 277 + ttm_tt_init_fields(ttm, bo, page_flags); 277 278 278 279 INIT_LIST_HEAD(&ttm_dma->pages_list); 279 280 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { ··· 285 286 } 286 287 EXPORT_SYMBOL(ttm_dma_tt_init); 287 288 288 - int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 289 - unsigned long size, uint32_t page_flags) 289 + int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 290 + uint32_t page_flags) 290 291 { 291 292 struct ttm_tt *ttm = &ttm_dma->ttm; 292 293 int ret; 293 294 294 - ttm_tt_init_fields(ttm, bdev, size, page_flags); 295 + ttm_tt_init_fields(ttm, bo, page_flags); 295 296 296 297 INIT_LIST_HEAD(&ttm_dma->pages_list); 297 298 if (page_flags & TTM_PAGE_FLAG_SG)
+3 -4
drivers/gpu/drm/virtio/virtgpu_ttm.c
··· 322 322 .destroy = &virtio_gpu_ttm_backend_destroy, 323 323 }; 324 324 325 - static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev, 326 - unsigned long size, 325 + static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo, 327 326 uint32_t page_flags) 328 327 { 329 328 struct virtio_gpu_device *vgdev; 330 329 struct virtio_gpu_ttm_tt *gtt; 331 330 332 - vgdev = virtio_gpu_get_vgdev(bdev); 331 + vgdev = virtio_gpu_get_vgdev(bo->bdev); 333 332 gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL); 334 333 if (gtt == NULL) 335 334 return NULL; 336 335 gtt->ttm.ttm.func = &virtio_gpu_backend_func; 337 336 gtt->vgdev = vgdev; 338 - if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags)) { 337 + if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) { 339 338 kfree(gtt); 340 339 return NULL; 341 340 }
+5 -5
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 693 693 .destroy = vmw_ttm_destroy, 694 694 }; 695 695 696 - static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, 697 - unsigned long size, uint32_t page_flags) 696 + static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, 697 + uint32_t page_flags) 698 698 { 699 699 struct vmw_ttm_tt *vmw_be; 700 700 int ret; ··· 704 704 return NULL; 705 705 706 706 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; 707 - vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); 707 + vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); 708 708 vmw_be->mob = NULL; 709 709 710 710 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 711 - ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags); 711 + ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags); 712 712 else 713 - ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags); 713 + ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags); 714 714 if (unlikely(ret != 0)) 715 715 goto out_no_init; 716 716
+2 -3
drivers/staging/vboxvideo/vbox_ttm.c
··· 193 193 .destroy = &vbox_ttm_backend_destroy, 194 194 }; 195 195 196 - static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev, 197 - unsigned long size, 196 + static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo, 198 197 u32 page_flags) 199 198 { 200 199 struct ttm_tt *tt; ··· 203 204 return NULL; 204 205 205 206 tt->func = &vbox_tt_backend_func; 206 - if (ttm_tt_init(tt, bdev, size, page_flags)) { 207 + if (ttm_tt_init(tt, bo, page_flags)) { 207 208 kfree(tt); 208 209 return NULL; 209 210 }
+2 -4
include/drm/ttm/ttm_bo_driver.h
··· 225 225 /** 226 226 * ttm_tt_create 227 227 * 228 - * @bdev: pointer to a struct ttm_bo_device: 229 - * @size: Size of the data needed backing. 228 + * @bo: The buffer object to create the ttm for. 230 229 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 231 230 * 232 231 * Create a struct ttm_tt to back data with system memory pages. ··· 233 234 * Returns: 234 235 * NULL: Out of memory. 235 236 */ 236 - struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev, 237 - unsigned long size, 237 + struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, 238 238 uint32_t page_flags); 239 239 240 240 /**
+10 -12
include/drm/ttm/ttm_tt.h
··· 150 150 * ttm_tt_init 151 151 * 152 152 * @ttm: The struct ttm_tt. 153 - * @bdev: pointer to a struct ttm_bo_device: 154 - * @size: Size of the data needed backing. 153 + * @bo: The buffer object we create the ttm for. 155 154 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 156 155 * 157 156 * Create a struct ttm_tt to back data with system memory pages. ··· 158 159 * Returns: 159 160 * NULL: Out of memory. 160 161 */ 161 - int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 162 - unsigned long size, uint32_t page_flags); 163 - int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 164 - unsigned long size, uint32_t page_flags); 165 - int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 166 - unsigned long size, uint32_t page_flags); 162 + int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 163 + uint32_t page_flags); 164 + int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 165 + uint32_t page_flags); 166 + int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 167 + uint32_t page_flags); 167 168 168 169 /** 169 170 * ttm_tt_fini ··· 253 254 /** 254 255 * ttm_agp_tt_create 255 256 * 256 - * @bdev: Pointer to a struct ttm_bo_device. 257 + * @bo: Buffer object we allocate the ttm for. 257 258 * @bridge: The agp bridge this device is sitting on. 258 - * @size: Size of the data needed backing. 259 259 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 260 260 * 261 261 * ··· 262 264 * for TT memory. This function uses the linux agpgart interface to 263 265 * bind and unbind memory backing a ttm_tt. 264 266 */ 265 - struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, 267 + struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, 266 268 struct agp_bridge_data *bridge, 267 - unsigned long size, uint32_t page_flags); 269 + uint32_t page_flags); 268 270 int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); 269 271 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); 270 272 #endif