Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/nouveau: initial changes to support multiple VMAs per buffer object

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>

+79 -25
+71 -25
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 49 49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 50 50 51 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 52 - if (nvbo->vma.node) { 53 - nouveau_vm_unmap(&nvbo->vma); 54 - nouveau_vm_put(&nvbo->vma); 55 - } 52 + nouveau_bo_vma_del(nvbo, &nvbo->vma); 56 53 kfree(nvbo); 57 54 } 58 55 ··· 100 103 return -ENOMEM; 101 104 INIT_LIST_HEAD(&nvbo->head); 102 105 INIT_LIST_HEAD(&nvbo->entry); 106 + INIT_LIST_HEAD(&nvbo->vma_list); 103 107 nvbo->tile_mode = tile_mode; 104 108 nvbo->tile_flags = tile_flags; 105 109 nvbo->bo.bdev = &dev_priv->ttm.bdev; ··· 112 114 } 113 115 114 116 nouveau_bo_fixup_align(nvbo, flags, &align, &size); 115 - align >>= PAGE_SHIFT; 117 + nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 118 + nouveau_bo_placement_set(nvbo, flags, 0); 116 119 117 120 if (dev_priv->chan_vm) { 118 - ret = nouveau_vm_get(dev_priv->chan_vm, size, nvbo->page_shift, 119 - NV_MEM_ACCESS_RW, &nvbo->vma); 121 + ret = nouveau_bo_vma_add(nvbo, dev_priv->chan_vm, &nvbo->vma); 120 122 if (ret) { 121 123 kfree(nvbo); 122 124 return ret; 123 125 } 124 126 } 125 127 126 - nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 127 - nouveau_bo_placement_set(nvbo, flags, 0); 128 - 129 128 nvbo->channel = chan; 130 129 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 131 - ttm_bo_type_device, &nvbo->placement, align, 0, 132 - false, NULL, size, nouveau_bo_del_ttm); 130 + ttm_bo_type_device, &nvbo->placement, 131 + align >> PAGE_SHIFT, 0, false, NULL, size, 132 + nouveau_bo_del_ttm); 133 133 if (ret) { 134 134 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 135 135 return ret; ··· 814 818 { 815 819 struct nouveau_mem *node = new_mem->mm_node; 816 820 struct nouveau_bo *nvbo = nouveau_bo(bo); 817 - struct nouveau_vma *vma = &nvbo->vma; 821 + struct nouveau_vma *vma; 818 822 819 - if (!vma->vm) 820 - return; 821 - 822 - if (new_mem->mem_type == TTM_PL_VRAM) { 823 - nouveau_vm_map(&nvbo->vma, new_mem->mm_node); 824 - } else 825 - if (new_mem->mem_type == TTM_PL_TT && 826 - nvbo->page_shift == nvbo->vma.vm->spg_shift) { 827 - nouveau_vm_map_sg(&nvbo->vma, 0, new_mem-> 828 - num_pages << PAGE_SHIFT, node, node->pages); 829 - } else { 830 - nouveau_vm_unmap(&nvbo->vma); 823 + list_for_each_entry(vma, &nvbo->vma_list, head) { 824 + if (new_mem->mem_type == TTM_PL_VRAM) { 825 + nouveau_vm_map(vma, new_mem->mm_node); 826 + } else 827 + if (new_mem->mem_type == TTM_PL_TT && 828 + nvbo->page_shift == vma->vm->spg_shift) { 829 + nouveau_vm_map_sg(vma, 0, new_mem-> 830 + num_pages << PAGE_SHIFT, 831 + node, node->pages); 832 + } else { 833 + nouveau_vm_unmap(vma); 834 + } 831 835 } 832 836 } 833 837 ··· 1073 1077 .io_mem_free = &nouveau_ttm_io_mem_free, 1074 1078 }; 1075 1079 1080 + struct nouveau_vma * 1081 + nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) 1082 + { 1083 + struct nouveau_vma *vma; 1084 + list_for_each_entry(vma, &nvbo->vma_list, head) { 1085 + if (vma->vm == vm) 1086 + return vma; 1087 + } 1088 + 1089 + return NULL; 1090 + } 1091 + 1092 + int 1093 + nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, 1094 + struct nouveau_vma *vma) 1095 + { 1096 + const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 1097 + struct nouveau_mem *node = nvbo->bo.mem.mm_node; 1098 + int ret; 1099 + 1100 + ret = nouveau_vm_get(vm, size, nvbo->page_shift, 1101 + NV_MEM_ACCESS_RW, vma); 1102 + if (ret) 1103 + return ret; 1104 + 1105 + if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) 1106 + nouveau_vm_map(vma, nvbo->bo.mem.mm_node); 1107 + else 1108 + if (nvbo->bo.mem.mem_type == TTM_PL_TT) 1109 + nouveau_vm_map_sg(vma, 0, size, node, node->pages); 1110 + 1111 + list_add_tail(&vma->head, &nvbo->vma_list); 1112 + return 0; 1113 + } 1114 + 1115 + void 1116 + nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 1117 + { 1118 + if (vma->node) { 1119 + if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { 1120 + spin_lock(&nvbo->bo.bdev->fence_lock); 1121 + ttm_bo_wait(&nvbo->bo, false, false, false); 1122 + spin_unlock(&nvbo->bo.bdev->fence_lock); 1123 + nouveau_vm_unmap(vma); 1124 + } 1125 + 1126 + nouveau_vm_put(vma); 1127 + list_del(&vma->head); 1128 + } 1129 + }
+7
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 116 116 struct nouveau_channel *channel; 117 117 118 118 struct nouveau_vma vma; 119 + struct list_head vma_list; 119 120 unsigned page_shift; 120 121 121 122 uint32_t tile_mode; ··· 1283 1282 extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); 1284 1283 extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 1285 1284 bool no_wait_reserve, bool no_wait_gpu); 1285 + 1286 + extern struct nouveau_vma * 1287 + nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); 1288 + extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *, 1289 + struct nouveau_vma *); 1290 + extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); 1286 1291 1287 1292 /* nouveau_fence.c */ 1288 1293 struct nouveau_fence;
+1
drivers/gpu/drm/nouveau/nouveau_vm.h
··· 41 41 }; 42 42 43 43 struct nouveau_vma { 44 + struct list_head head; 44 45 struct nouveau_vm *vm; 45 46 struct nouveau_mm_node *node; 46 47 u64 offset;