Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/nvd0/disp: add support for page flipping

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>

+267 -65
+4 -1
drivers/gpu/drm/nouveau/nouveau_display.c
··· 466 466 467 467 /* Emit a page flip */ 468 468 if (dev_priv->card_type >= NV_50) { 469 - ret = nv50_display_flip_next(crtc, fb, chan); 469 + if (dev_priv->card_type >= NV_D0) 470 + ret = nvd0_display_flip_next(crtc, fb, chan, 0); 471 + else 472 + ret = nv50_display_flip_next(crtc, fb, chan); 470 473 if (ret) { 471 474 nouveau_channel_put(&chan); 472 475 goto fail_unreserve;
+4
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 1353 1353 extern void nvd0_display_destroy(struct drm_device *); 1354 1354 extern int nvd0_display_init(struct drm_device *); 1355 1355 extern void nvd0_display_fini(struct drm_device *); 1356 + struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc); 1357 + void nvd0_display_flip_stop(struct drm_crtc *); 1358 + int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, 1359 + struct nouveau_channel *, u32 swap_interval); 1356 1360 1357 1361 /* nv04_crtc.c */ 1358 1362 extern int nv04_crtc_create(struct drm_device *, int index);
+14 -9
drivers/gpu/drm/nouveau/nouveau_object.c
··· 723 723 nv_wo32(chan->ramin, 0x020c, 0x000000ff); 724 724 725 725 /* map display semaphore buffers into channel's vm */ 726 - if (dev_priv->card_type >= NV_D0) 727 - return 0; 726 + for (i = 0; i < dev->mode_config.num_crtc; i++) { 727 + struct nouveau_bo *bo; 728 + if (dev_priv->card_type >= NV_D0) 729 + bo = nvd0_display_crtc_sema(dev, i); 730 + else 731 + bo = nv50_display(dev)->crtc[i].sem.bo; 728 732 729 - for (i = 0; i < 2; i++) { 730 - struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i]; 731 - 732 - ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm, 733 - &chan->dispc_vma[i]); 733 + ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]); 734 734 if (ret) 735 735 return ret; 736 736 } ··· 879 879 880 880 NV_DEBUG(dev, "ch%d\n", chan->id); 881 881 882 - if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) { 882 + if (dev_priv->card_type >= NV_D0) { 883 + for (i = 0; i < dev->mode_config.num_crtc; i++) { 884 + struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i); 885 + nouveau_bo_vma_del(bo, &chan->dispc_vma[i]); 886 + } 887 + } else 888 + if (dev_priv->card_type >= NV_50) { 883 889 struct nv50_display *disp = nv50_display(dev); 884 - 885 890 for (i = 0; i < dev->mode_config.num_crtc; i++) { 886 891 struct nv50_display_crtc *dispc = &disp->crtc[i]; 887 892 nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
+1 -1
drivers/gpu/drm/nouveau/nouveau_state.c
··· 1244 1244 getparam->value = 1; 1245 1245 break; 1246 1246 case NOUVEAU_GETPARAM_HAS_PAGEFLIP: 1247 - getparam->value = dev_priv->card_type < NV_D0; 1247 + getparam->value = 1; 1248 1248 break; 1249 1249 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1250 1250 /* NV40 and NV50 versions are quite different, but register
+244 -54
drivers/gpu/drm/nouveau/nvd0_display.c
··· 39 39 #define EVO_SYNC(c) (0x01 + (c)) 40 40 #define EVO_CURS(c) (0x0d + (c)) 41 41 42 + struct evo { 43 + int idx; 44 + dma_addr_t handle; 45 + u32 *ptr; 46 + struct { 47 + struct nouveau_bo *bo; 48 + u32 offset; 49 + u16 value; 50 + } sem; 51 + }; 52 + 42 53 struct nvd0_display { 43 54 struct nouveau_gpuobj *mem; 44 - struct { 45 - dma_addr_t handle; 46 - u32 *ptr; 47 - } evo[3]; 55 + struct evo evo[3]; 48 56 49 57 struct tasklet_struct tasklet; 50 58 u32 modeset; ··· 205 197 nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000); 206 198 } 207 199 200 + static bool 201 + evo_sync_wait(void *data) 202 + { 203 + return nouveau_bo_rd32(data, 0) != 0x00000000; 204 + } 205 + 206 + static int 207 + evo_sync(struct drm_device *dev, int ch) 208 + { 209 + struct nvd0_display *disp = nvd0_display(dev); 210 + struct evo *evo = &disp->evo[ch]; 211 + u32 *push; 212 + 213 + nouveau_bo_wr32(evo->sem.bo, 0, 0x00000000); 214 + 215 + push = evo_wait(dev, ch, 8); 216 + if (push) { 217 + evo_mthd(push, 0x0084, 1); 218 + evo_data(push, 0x80000000); 219 + evo_mthd(push, 0x0080, 2); 220 + evo_data(push, 0x00000000); 221 + evo_data(push, 0x00000000); 222 + evo_kick(push, dev, ch); 223 + if (nv_wait_cb(dev, evo_sync_wait, evo->sem.bo)) 224 + return 0; 225 + } 226 + 227 + return -EBUSY; 228 + } 229 + 230 + /****************************************************************************** 231 + * Sync channel (aka. page flipping) 232 + *****************************************************************************/ 233 + struct nouveau_bo * 234 + nvd0_display_crtc_sema(struct drm_device *dev, int crtc) 235 + { 236 + struct nvd0_display *disp = nvd0_display(dev); 237 + struct evo *evo = &disp->evo[EVO_SYNC(crtc)]; 238 + return evo->sem.bo; 239 + } 240 + 241 + void 242 + nvd0_display_flip_stop(struct drm_crtc *crtc) 243 + { 244 + struct nvd0_display *disp = nvd0_display(crtc->dev); 245 + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 246 + struct evo *evo = &disp->evo[EVO_SYNC(nv_crtc->index)]; 247 + u32 *push; 248 + 249 + push = evo_wait(crtc->dev, evo->idx, 8); 250 + if (push) { 251 + evo_mthd(push, 0x0084, 1); 252 + evo_data(push, 0x00000000); 253 + evo_mthd(push, 0x0094, 1); 254 + evo_data(push, 0x00000000); 255 + evo_mthd(push, 0x00c0, 1); 256 + evo_data(push, 0x00000000); 257 + evo_mthd(push, 0x0080, 1); 258 + evo_data(push, 0x00000000); 259 + evo_kick(push, crtc->dev, evo->idx); 260 + } 261 + } 262 + 263 + int 264 + nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, 265 + struct nouveau_channel *chan, u32 swap_interval) 266 + { 267 + struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 268 + struct nvd0_display *disp = nvd0_display(crtc->dev); 269 + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 270 + struct evo *evo = &disp->evo[EVO_SYNC(nv_crtc->index)]; 271 + u64 offset; 272 + u32 *push; 273 + int ret; 274 + 275 + swap_interval <<= 4; 276 + if (swap_interval == 0) 277 + swap_interval |= 0x100; 278 + 279 + push = evo_wait(crtc->dev, evo->idx, 128); 280 + if (unlikely(push == NULL)) 281 + return -EBUSY; 282 + 283 + /* synchronise with the rendering channel, if necessary */ 284 + if (likely(chan)) { 285 + ret = RING_SPACE(chan, 10); 286 + if (ret) 287 + return ret; 288 + 289 + offset = chan->dispc_vma[nv_crtc->index].offset; 290 + offset += evo->sem.offset; 291 + 292 + BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4); 293 + OUT_RING (chan, upper_32_bits(offset)); 294 + OUT_RING (chan, lower_32_bits(offset)); 295 + OUT_RING (chan, 0xf00d0000 | evo->sem.value); 296 + OUT_RING (chan, 0x1002); 297 + BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4); 298 + OUT_RING (chan, upper_32_bits(offset)); 299 + OUT_RING (chan, lower_32_bits(offset ^ 0x10)); 300 + OUT_RING (chan, 0x74b1e000); 301 + OUT_RING (chan, 0x1001); 302 + FIRE_RING (chan); 303 + } else { 304 + nouveau_bo_wr32(evo->sem.bo, evo->sem.offset / 4, 305 + 0xf00d0000 | evo->sem.value); 306 + evo_sync(crtc->dev, EVO_MASTER); 307 + } 308 + 309 + /* queue the flip */ 310 + evo_mthd(push, 0x0100, 1); 311 + evo_data(push, 0xfffe0000); 312 + evo_mthd(push, 0x0084, 1); 313 + evo_data(push, swap_interval); 314 + if (!(swap_interval & 0x00000100)) { 315 + evo_mthd(push, 0x00e0, 1); 316 + evo_data(push, 0x40000000); 317 + } 318 + evo_mthd(push, 0x0088, 4); 319 + evo_data(push, evo->sem.offset); 320 + evo_data(push, 0xf00d0000 | evo->sem.value); 321 + evo_data(push, 0x74b1e000); 322 + evo_data(push, NvEvoSync); 323 + evo_mthd(push, 0x00a0, 2); 324 + evo_data(push, 0x00000000); 325 + evo_data(push, 0x00000000); 326 + evo_mthd(push, 0x00c0, 1); 327 + evo_data(push, nv_fb->r_dma); 328 + evo_mthd(push, 0x0110, 2); 329 + evo_data(push, 0x00000000); 330 + evo_data(push, 0x00000000); 331 + evo_mthd(push, 0x0400, 5); 332 + evo_data(push, nv_fb->nvbo->bo.offset >> 8); 333 + evo_data(push, 0); 334 + evo_data(push, (fb->height << 16) | fb->width); 335 + evo_data(push, nv_fb->r_pitch); 336 + evo_data(push, nv_fb->r_format); 337 + evo_mthd(push, 0x0080, 1); 338 + evo_data(push, 0x00000000); 339 + evo_kick(push, crtc->dev, evo->idx); 340 + 341 + evo->sem.offset ^= 0x10; 342 + evo->sem.value++; 343 + return 0; 344 + } 345 + 208 346 /****************************************************************************** 209 347 * CRTC 210 348 *****************************************************************************/ ··· 397 243 { 398 244 struct drm_display_mode *omode, *umode = &nv_crtc->base.mode; 399 245 struct drm_device *dev = nv_crtc->base.dev; 246 + struct drm_crtc *crtc = &nv_crtc->base; 400 247 struct nouveau_connector *nv_connector; 401 248 int mode = DRM_MODE_SCALE_NONE; 402 249 u32 oX, oY, *push; ··· 463 308 break; 464 309 } 465 310 466 - push = evo_wait(dev, EVO_MASTER, 16); 311 + push = evo_wait(dev, EVO_MASTER, 8); 467 312 if (push) { 468 313 evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); 469 314 evo_data(push, (oY << 16) | oX); ··· 473 318 evo_data(push, 0x00000000); 474 319 evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); 475 320 evo_data(push, (umode->vdisplay << 16) | umode->hdisplay); 476 - if (update) { 477 - evo_mthd(push, 0x0080, 1); 478 - evo_data(push, 0x00000000); 479 - } 480 321 evo_kick(push, dev, EVO_MASTER); 322 + if (update) { 323 + nvd0_display_flip_stop(crtc); 324 + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); 325 + } 481 326 } 482 327 483 328 return 0; ··· 551 396 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 552 397 u32 *push; 553 398 399 + nvd0_display_flip_stop(crtc); 400 + 554 401 push = evo_wait(crtc->dev, EVO_MASTER, 2); 555 402 if (push) { 556 403 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); ··· 589 432 evo_kick(push, crtc->dev, EVO_MASTER); 590 433 } 591 434 592 - nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true); 435 + nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false); 436 + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); 593 437 } 594 438 595 439 static bool ··· 682 524 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); 683 525 evo_data(push, syncs); 684 526 evo_data(push, magic); 527 + evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); 528 + evo_data(push, 0x00000311); 529 + evo_data(push, 0x00000100); 685 530 evo_kick(push, crtc->dev, EVO_MASTER); 686 531 } 687 532 ··· 711 550 if (ret) 712 551 return ret; 713 552 553 + nvd0_display_flip_stop(crtc); 714 554 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true); 555 + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); 715 556 return 0; 716 557 } 717 558 ··· 723 560 enum mode_set_atomic state) 724 561 { 725 562 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 563 + nvd0_display_flip_stop(crtc); 726 564 nvd0_crtc_set_image(nv_crtc, fb, x, y, true); 727 565 return 0; 728 566 } ··· 839 675 .gamma_set = nvd0_crtc_gamma_set, 840 676 .set_config = drm_crtc_helper_set_config, 841 677 .destroy = nvd0_crtc_destroy, 678 + .page_flip = nouveau_crtc_page_flip, 842 679 }; 843 680 844 681 static void ··· 1737 1572 int i; 1738 1573 1739 1574 for (i = 0; i < 3; i++) { 1740 - pci_free_consistent(pdev, PAGE_SIZE, disp->evo[i].ptr, 1741 - disp->evo[i].handle); 1575 + struct evo *evo = &disp->evo[i]; 1576 + nouveau_bo_unmap(evo->sem.bo); 1577 + nouveau_bo_ref(NULL, &evo->sem.bo); 1578 + pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle); 1742 1579 } 1743 1580 1744 1581 nouveau_gpuobj_ref(NULL, &disp->mem); ··· 1821 1654 if (ret) 1822 1655 goto out; 1823 1656 1824 - nv_wo32(disp->mem, 0x1000, 0x00000049); 1825 - nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8); 1826 - nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8); 1827 - nv_wo32(disp->mem, 0x100c, 0x00000000); 1828 - nv_wo32(disp->mem, 0x1010, 0x00000000); 1829 - nv_wo32(disp->mem, 0x1014, 0x00000000); 1830 - nv_wo32(disp->mem, 0x0000, NvEvoSync); 1831 - nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001); 1832 - 1833 - nv_wo32(disp->mem, 0x1020, 0x00000049); 1834 - nv_wo32(disp->mem, 0x1024, 0x00000000); 1835 - nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8); 1836 - nv_wo32(disp->mem, 0x102c, 0x00000000); 1837 - nv_wo32(disp->mem, 0x1030, 0x00000000); 1838 - nv_wo32(disp->mem, 0x1034, 0x00000000); 1839 - nv_wo32(disp->mem, 0x0008, NvEvoVRAM); 1840 - nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001); 1841 - 1842 - nv_wo32(disp->mem, 0x1040, 0x00000009); 1843 - nv_wo32(disp->mem, 0x1044, 0x00000000); 1844 - nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8); 1845 - nv_wo32(disp->mem, 0x104c, 0x00000000); 1846 - nv_wo32(disp->mem, 0x1050, 0x00000000); 1847 - nv_wo32(disp->mem, 0x1054, 0x00000000); 1848 - nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP); 1849 - nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001); 1850 - 1851 - nv_wo32(disp->mem, 0x1060, 0x0fe00009); 1852 - nv_wo32(disp->mem, 0x1064, 0x00000000); 1853 - nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8); 1854 - nv_wo32(disp->mem, 0x106c, 0x00000000); 1855 - nv_wo32(disp->mem, 0x1070, 0x00000000); 1856 - nv_wo32(disp->mem, 0x1074, 0x00000000); 1857 - nv_wo32(disp->mem, 0x0018, NvEvoFB32); 1858 - nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001); 1859 - 1860 - pinstmem->flush(dev); 1861 - 1862 - /* push buffers for evo channels */ 1657 + /* create evo dma channels */ 1863 1658 for (i = 0; i < 3; i++) { 1864 - disp->evo[i].ptr = pci_alloc_consistent(pdev, PAGE_SIZE, 1865 - &disp->evo[i].handle); 1866 - if (!disp->evo[i].ptr) { 1659 + struct evo *evo = &disp->evo[i]; 1660 + u32 dmao = 0x1000 + (i * 0x100); 1661 + u32 hash = 0x0000 + (i * 0x040); 1662 + u64 offset; 1663 + 1664 + evo->idx = i; 1665 + evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle); 1666 + if (!evo->ptr) { 1867 1667 ret = -ENOMEM; 1868 1668 goto out; 1869 1669 } 1670 + 1671 + ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 1672 + 0, 0x0000, &evo->sem.bo); 1673 + if (!ret) { 1674 + ret = nouveau_bo_pin(evo->sem.bo, TTM_PL_FLAG_VRAM); 1675 + if (!ret) 1676 + ret = nouveau_bo_map(evo->sem.bo); 1677 + if (ret) 1678 + nouveau_bo_ref(NULL, &evo->sem.bo); 1679 + offset = evo->sem.bo->bo.offset; 1680 + } 1681 + 1682 + if (ret) 1683 + goto out; 1684 + 1685 + nv_wo32(disp->mem, dmao + 0x00, 0x00000049); 1686 + nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8); 1687 + nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8); 1688 + nv_wo32(disp->mem, dmao + 0x0c, 0x00000000); 1689 + nv_wo32(disp->mem, dmao + 0x10, 0x00000000); 1690 + nv_wo32(disp->mem, dmao + 0x14, 0x00000000); 1691 + nv_wo32(disp->mem, hash + 0x00, NvEvoSync); 1692 + nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) | 1693 + ((dmao + 0x00) << 9)); 1694 + 1695 + nv_wo32(disp->mem, dmao + 0x20, 0x00000049); 1696 + nv_wo32(disp->mem, dmao + 0x24, 0x00000000); 1697 + nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8); 1698 + nv_wo32(disp->mem, dmao + 0x2c, 0x00000000); 1699 + nv_wo32(disp->mem, dmao + 0x30, 0x00000000); 1700 + nv_wo32(disp->mem, dmao + 0x34, 0x00000000); 1701 + nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM); 1702 + nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) | 1703 + ((dmao + 0x20) << 9)); 1704 + 1705 + nv_wo32(disp->mem, dmao + 0x40, 0x00000009); 1706 + nv_wo32(disp->mem, dmao + 0x44, 0x00000000); 1707 + nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8); 1708 + nv_wo32(disp->mem, dmao + 0x4c, 0x00000000); 1709 + nv_wo32(disp->mem, dmao + 0x50, 0x00000000); 1710 + nv_wo32(disp->mem, dmao + 0x54, 0x00000000); 1711 + nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP); 1712 + nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) | 1713 + ((dmao + 0x40) << 9)); 1714 + 1715 + nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009); 1716 + nv_wo32(disp->mem, dmao + 0x64, 0x00000000); 1717 + nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8); 1718 + nv_wo32(disp->mem, dmao + 0x6c, 0x00000000); 1719 + nv_wo32(disp->mem, dmao + 0x70, 0x00000000); 1720 + nv_wo32(disp->mem, dmao + 0x74, 0x00000000); 1721 + nv_wo32(disp->mem, hash + 0x18, NvEvoFB32); 1722 + nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) | 1723 + ((dmao + 0x60) << 9)); 1870 1724 } 1725 + 1726 + pinstmem->flush(dev); 1871 1727 1872 1728 out: 1873 1729 if (ret)