Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/nouveau: don't use ttm bo->offset v3

Store ttm bo->offset in struct nouveau_bo instead.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/372932/
Signed-off-by: Christian König <christian.koenig@amd.com>

authored by

Nirmoy Das and committed by
Christian König
0dc9b286 0b17fc08

+36 -24
+3 -3
drivers/gpu/drm/nouveau/dispnv04/crtc.c
··· 845 845 fb = nouveau_framebuffer(crtc->primary->fb); 846 846 } 847 847 848 - nv_crtc->fb.offset = fb->nvbo->bo.offset; 848 + nv_crtc->fb.offset = fb->nvbo->offset; 849 849 850 850 if (nv_crtc->lut.depth != drm_fb->format->depth) { 851 851 nv_crtc->lut.depth = drm_fb->format->depth; ··· 1013 1013 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 1014 1014 1015 1015 nouveau_bo_unmap(cursor); 1016 - nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset; 1016 + nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->offset; 1017 1017 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); 1018 1018 nv_crtc->cursor.show(nv_crtc, true); 1019 1019 out: ··· 1191 1191 /* Initialize a page flip struct */ 1192 1192 *s = (struct nv04_page_flip_state) 1193 1193 { { }, event, crtc, fb->format->cpp[0] * 8, fb->pitches[0], 1194 - new_bo->bo.offset }; 1194 + new_bo->offset }; 1195 1195 1196 1196 /* Keep vblanks on during flip, for the target crtc of this flip */ 1197 1197 drm_crtc_vblank_get(crtc);
+2 -1
drivers/gpu/drm/nouveau/dispnv04/disp.c
··· 151 151 continue; 152 152 153 153 if (nv_crtc->cursor.set_offset) 154 - nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset); 154 + nv_crtc->cursor.set_offset(nv_crtc, 155 + nv_crtc->cursor.nvbo->offset); 155 156 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, 156 157 nv_crtc->cursor_saved_y); 157 158 }
+3 -3
drivers/gpu/drm/nouveau/dispnv04/overlay.c
··· 150 150 nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); 151 151 152 152 nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0); 153 - nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset); 153 + nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->offset); 154 154 nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w); 155 155 nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x); 156 156 nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w); ··· 172 172 if (format & NV_PVIDEO_FORMAT_PLANAR) { 173 173 nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0); 174 174 nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip), 175 - nv_fb->nvbo->bo.offset + fb->offsets[1]); 175 + nv_fb->nvbo->offset + fb->offsets[1]); 176 176 } 177 177 nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]); 178 178 nvif_wr32(dev, NV_PVIDEO_STOP, 0); ··· 396 396 397 397 for (i = 0; i < 2; i++) { 398 398 nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i, 399 - nv_fb->nvbo->bo.offset); 399 + nv_fb->nvbo->offset); 400 400 nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, 401 401 fb->pitches[0]); 402 402 nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
+1 -1
drivers/gpu/drm/nouveau/dispnv50/base507c.c
··· 275 275 276 276 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, 277 277 &oclass, head, &args, sizeof(args), 278 - disp->sync->bo.offset, &wndw->wndw); 278 + disp->sync->offset, &wndw->wndw); 279 279 if (ret) { 280 280 NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret); 281 281 return ret;
+1 -1
drivers/gpu/drm/nouveau/dispnv50/core507d.c
··· 100 100 101 101 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, 102 102 &oclass, 0, &args, sizeof(args), 103 - disp->sync->bo.offset, &core->chan); 103 + disp->sync->offset, &core->chan); 104 104 if (ret) { 105 105 NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret); 106 106 return ret;
+1 -1
drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
··· 186 186 187 187 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, 188 188 &oclass, 0, &args, sizeof(args), 189 - disp->sync->bo.offset, &wndw->wndw); 189 + disp->sync->offset, &wndw->wndw); 190 190 if (ret) { 191 191 NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret); 192 192 return ret;
+1 -1
drivers/gpu/drm/nouveau/dispnv50/wndw.c
··· 511 511 } 512 512 513 513 asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv); 514 - asyw->image.offset[0] = fb->nvbo->bo.offset; 514 + asyw->image.offset[0] = fb->nvbo->offset; 515 515 516 516 if (wndw->func->prepare) { 517 517 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
+1 -1
drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
··· 298 298 299 299 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, 300 300 &oclass, 0, &args, sizeof(args), 301 - disp->sync->bo.offset, &wndw->wndw); 301 + disp->sync->offset, &wndw->wndw); 302 302 if (ret) { 303 303 NV_ERROR(drm, "qndw%04x allocation failed: %d\n", oclass, ret); 304 304 return ret;
+4 -4
drivers/gpu/drm/nouveau/nouveau_abi16.c
··· 558 558 if (drm->agp.bridge) { 559 559 args.target = NV_DMA_V0_TARGET_AGP; 560 560 args.access = NV_DMA_V0_ACCESS_RDWR; 561 - args.start += drm->agp.base + chan->ntfy->bo.offset; 562 - args.limit += drm->agp.base + chan->ntfy->bo.offset; 561 + args.start += drm->agp.base + chan->ntfy->offset; 562 + args.limit += drm->agp.base + chan->ntfy->offset; 563 563 } else { 564 564 args.target = NV_DMA_V0_TARGET_VM; 565 565 args.access = NV_DMA_V0_ACCESS_RDWR; 566 - args.start += chan->ntfy->bo.offset; 567 - args.limit += chan->ntfy->bo.offset; 566 + args.start += chan->ntfy->offset; 567 + args.limit += chan->ntfy->offset; 568 568 } 569 569 570 570 client->route = NVDRM_OBJECT_ABI16;
+8
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1317 1317 nouveau_vma_unmap(vma); 1318 1318 } 1319 1319 } 1320 + 1321 + if (new_reg) { 1322 + if (new_reg->mm_node) 1323 + nvbo->offset = (new_reg->start << PAGE_SHIFT); 1324 + else 1325 + nvbo->offset = 0; 1326 + } 1327 + 1320 1328 } 1321 1329 1322 1330 static int
+3
drivers/gpu/drm/nouveau/nouveau_bo.h
··· 24 24 int pbbo_index; 25 25 bool validate_mapped; 26 26 27 + /* GPU address space is independent of CPU word size */ 28 + uint64_t offset; 29 + 27 30 struct list_head vma_list; 28 31 29 32 unsigned contig:1;
+1 -1
drivers/gpu/drm/nouveau/nouveau_chan.c
··· 162 162 * pushbuf lives in, this is because the GEM code requires that 163 163 * we be able to call out to other (indirect) push buffers 164 164 */ 165 - chan->push.addr = chan->push.buffer->bo.offset; 165 + chan->push.addr = chan->push.buffer->offset; 166 166 167 167 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { 168 168 ret = nouveau_vma_new(chan->push.buffer, chan->vmm,
+1 -1
drivers/gpu/drm/nouveau/nouveau_dmem.c
··· 90 90 struct nouveau_dmem_chunk *chunk = page->zone_device_data; 91 91 unsigned long idx = page_to_pfn(page) - chunk->pfn_first; 92 92 93 - return (idx << PAGE_SHIFT) + chunk->bo->bo.offset; 93 + return (idx << PAGE_SHIFT) + chunk->bo->offset; 94 94 } 95 95 96 96 static void nouveau_dmem_page_free(struct page *page)
+1 -1
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 393 393 394 394 /* To allow resizeing without swapping buffers */ 395 395 NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", 396 - fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo); 396 + fb->base.width, fb->base.height, fb->nvbo->offset, nvbo); 397 397 398 398 vga_switcheroo_client_fb_set(dev->pdev, info); 399 399 return 0;
+5 -5
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 232 232 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 233 233 else 234 234 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 235 - rep->offset = nvbo->bo.offset; 235 + rep->offset = nvbo->offset; 236 236 if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 237 237 vma = nouveau_vma_find(nvbo, vmm); 238 238 if (!vma) ··· 516 516 } 517 517 518 518 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 519 - if (nvbo->bo.offset == b->presumed.offset && 519 + if (nvbo->offset == b->presumed.offset && 520 520 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 521 521 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 522 522 (nvbo->bo.mem.mem_type == TTM_PL_TT && ··· 527 527 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 528 528 else 529 529 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; 530 - b->presumed.offset = nvbo->bo.offset; 530 + b->presumed.offset = nvbo->offset; 531 531 b->presumed.valid = 0; 532 532 relocs++; 533 533 } ··· 805 805 struct nouveau_bo *nvbo = (void *)(unsigned long) 806 806 bo[push[i].bo_index].user_priv; 807 807 808 - OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); 808 + OUT_RING(chan, (nvbo->offset + push[i].offset) | 2); 809 809 OUT_RING(chan, 0); 810 810 } 811 811 } else { ··· 840 840 } 841 841 842 842 OUT_RING(chan, 0x20000000 | 843 - (nvbo->bo.offset + push[i].offset)); 843 + (nvbo->offset + push[i].offset)); 844 844 OUT_RING(chan, 0); 845 845 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 846 846 OUT_RING(chan, 0);