Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (40 commits)
drm/radeon/kms: i2c s/sprintf/snprintf/g for safety
drm/radeon/kms: fix i2c pad masks on rs4xx
drm/ttm: Fix up a theoretical deadlock
drm/radeon/kms: fix tiling info on evergreen
drm/radeon/kms: fix alignment when allocating buffers
drm/vmwgfx: Fix up an error path during bo creation
drm/radeon/kms: register an i2c adapter name for the dp aux bus
drm/radeon/kms/atom: add proper external encoders support
drm/radeon/kms/atom: cleanup and unify DVO handling
drm/radeon/kms: properly power up/down the eDP panel as needed (v4)
drm/radeon/kms/atom: set sane defaults in atombios_get_encoder_mode()
drm/radeon/kms: turn the backlight off explicitly for dpms
drm/radeon/kms: fix typo in r600 cs checker
drm: radeon: fix error value sign
drm/radeon/kms: fix and unify tiled buffer alignment checking for r6xx/7xx
nouveau: Acknowledge HPD irq in handler, not bottom half
drm/nouveau: Fix a few confusions between "chipset" and "card_type".
drm/nouveau: don't expose backlight control when available through ACPI
drm/nouveau/pm: improve memtiming mappings
drm/nouveau: Make PCIE GART size depend on the available RAMIN space.
...

+1020 -423
+9
drivers/gpu/drm/nouveau/nouveau_backlight.c
··· 31 */ 32 33 #include <linux/backlight.h> 34 35 #include "drmP.h" 36 #include "nouveau_drv.h" ··· 136 int nouveau_backlight_init(struct drm_device *dev) 137 { 138 struct drm_nouveau_private *dev_priv = dev->dev_private; 139 140 switch (dev_priv->card_type) { 141 case NV_40:
··· 31 */ 32 33 #include <linux/backlight.h> 34 + #include <linux/acpi.h> 35 36 #include "drmP.h" 37 #include "nouveau_drv.h" ··· 135 int nouveau_backlight_init(struct drm_device *dev) 136 { 137 struct drm_nouveau_private *dev_priv = dev->dev_private; 138 + 139 + #ifdef CONFIG_ACPI 140 + if (acpi_video_backlight_support()) { 141 + NV_INFO(dev, "ACPI backlight interface available, " 142 + "not registering our own\n"); 143 + return 0; 144 + } 145 + #endif 146 147 switch (dev_priv->card_type) { 148 case NV_40:
+1 -1
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 6829 struct drm_nouveau_private *dev_priv = dev->dev_private; 6830 unsigned htotal; 6831 6832 - if (dev_priv->chipset >= NV_50) { 6833 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && 6834 NVReadVgaCrtc(dev, 0, 0x1a) == 0) 6835 return false;
··· 6829 struct drm_nouveau_private *dev_priv = dev->dev_private; 6830 unsigned htotal; 6831 6832 + if (dev_priv->card_type >= NV_50) { 6833 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && 6834 NVReadVgaCrtc(dev, 0, 0x1a) == 0) 6835 return false;
+38 -5
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 143 nvbo->no_vm = no_vm; 144 nvbo->tile_mode = tile_mode; 145 nvbo->tile_flags = tile_flags; 146 147 - nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); 148 align >>= PAGE_SHIFT; 149 150 nouveau_bo_placement_set(nvbo, flags, 0); ··· 178 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; 179 } 180 181 void 182 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 183 { ··· 217 pl->busy_placement = nvbo->busy_placements; 218 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 219 type | busy, flags); 220 } 221 222 int ··· 554 stride = 16 * 4; 555 height = amount / stride; 556 557 - if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { 558 ret = RING_SPACE(chan, 8); 559 if (ret) 560 return ret; ··· 576 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); 577 OUT_RING (chan, 1); 578 } 579 - if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { 580 ret = RING_SPACE(chan, 8); 581 if (ret) 582 return ret; ··· 784 if (dev_priv->card_type == NV_50) { 785 ret = nv50_mem_vm_bind_linear(dev, 786 offset + dev_priv->vm_vram_base, 787 - new_mem->size, nvbo->tile_flags, 788 offset); 789 if (ret) 790 return ret; ··· 926 * nothing to do here. 927 */ 928 if (bo->mem.mem_type != TTM_PL_VRAM) { 929 - if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) 930 return 0; 931 } 932
··· 143 nvbo->no_vm = no_vm; 144 nvbo->tile_mode = tile_mode; 145 nvbo->tile_flags = tile_flags; 146 + nvbo->bo.bdev = &dev_priv->ttm.bdev; 147 148 + nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), 149 + &align, &size); 150 align >>= PAGE_SHIFT; 151 152 nouveau_bo_placement_set(nvbo, flags, 0); ··· 176 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; 177 } 178 179 + static void 180 + set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 181 + { 182 + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 183 + 184 + if (dev_priv->card_type == NV_10 && 185 + nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { 186 + /* 187 + * Make sure that the color and depth buffers are handled 188 + * by independent memory controller units. Up to a 9x 189 + * speed up when alpha-blending and depth-test are enabled 190 + * at the same time. 191 + */ 192 + int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; 193 + 194 + if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { 195 + nvbo->placement.fpfn = vram_pages / 2; 196 + nvbo->placement.lpfn = ~0; 197 + } else { 198 + nvbo->placement.fpfn = 0; 199 + nvbo->placement.lpfn = vram_pages / 2; 200 + } 201 + } 202 + } 203 + 204 void 205 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 206 { ··· 190 pl->busy_placement = nvbo->busy_placements; 191 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 192 type | busy, flags); 193 + 194 + set_placement_range(nvbo, type); 195 } 196 197 int ··· 525 stride = 16 * 4; 526 height = amount / stride; 527 528 + if (new_mem->mem_type == TTM_PL_VRAM && 529 + nouveau_bo_tile_layout(nvbo)) { 530 ret = RING_SPACE(chan, 8); 531 if (ret) 532 return ret; ··· 546 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); 547 OUT_RING (chan, 1); 548 } 549 + if (old_mem->mem_type == TTM_PL_VRAM && 550 + nouveau_bo_tile_layout(nvbo)) { 551 ret = RING_SPACE(chan, 8); 552 if (ret) 553 return ret; ··· 753 if (dev_priv->card_type == NV_50) { 754 ret = nv50_mem_vm_bind_linear(dev, 755 offset + dev_priv->vm_vram_base, 756 + new_mem->size, 757 + nouveau_bo_tile_layout(nvbo), 758 offset); 759 if (ret) 760 return ret; ··· 894 * nothing to do here. 895 */ 896 if (bo->mem.mem_type != TTM_PL_VRAM) { 897 + if (dev_priv->card_type < NV_50 || 898 + !nouveau_bo_tile_layout(nvbo)) 899 return 0; 900 } 901
+30 -47
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 281 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 282 if (!nv_encoder && !nouveau_tv_disable) 283 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 284 - if (nv_encoder) { 285 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 286 struct drm_encoder_helper_funcs *helper = 287 encoder->helper_private; ··· 641 return ret; 642 } 643 644 static int 645 nouveau_connector_mode_valid(struct drm_connector *connector, 646 struct drm_display_mode *mode) 647 { 648 - struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 649 struct nouveau_connector *nv_connector = nouveau_connector(connector); 650 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 651 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); ··· 680 max_clock = 400000; 681 break; 682 case OUTPUT_TMDS: 683 - if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || 684 - !nv_encoder->dcb->duallink_possible) 685 - max_clock = 165000; 686 - else 687 - max_clock = 330000; 688 break; 689 case OUTPUT_ANALOG: 690 max_clock = nv_encoder->dcb->crtconf.maxfreq; ··· 722 return to_drm_encoder(nv_connector->detected_encoder); 723 724 return NULL; 725 - } 726 - 727 - void 728 - nouveau_connector_set_polling(struct drm_connector *connector) 729 - { 730 - struct drm_device *dev = connector->dev; 731 - struct drm_nouveau_private *dev_priv = dev->dev_private; 732 - struct drm_crtc *crtc; 733 - bool spare_crtc = false; 734 - 735 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 736 - spare_crtc |= !crtc->enabled; 737 - 738 - connector->polled = 0; 739 - 740 - switch (connector->connector_type) { 741 - case DRM_MODE_CONNECTOR_VGA: 742 - case DRM_MODE_CONNECTOR_TV: 743 - if (dev_priv->card_type >= NV_50 || 744 - (nv_gf4_disp_arch(dev) && spare_crtc)) 745 - connector->polled = DRM_CONNECTOR_POLL_CONNECT; 746 - break; 747 - 748 - case DRM_MODE_CONNECTOR_DVII: 749 - case DRM_MODE_CONNECTOR_DVID: 750 - case DRM_MODE_CONNECTOR_HDMIA: 751 - case DRM_MODE_CONNECTOR_DisplayPort: 752 - case DRM_MODE_CONNECTOR_eDP: 753 - if (dev_priv->card_type >= NV_50) 754 - connector->polled = DRM_CONNECTOR_POLL_HPD; 755 - else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID || 756 - spare_crtc) 757 - connector->polled = DRM_CONNECTOR_POLL_CONNECT; 758 - break; 759 - 760 - default: 761 - break; 762 - } 763 } 764 765 static const struct drm_connector_helper_funcs ··· 849 dev->mode_config.scaling_mode_property, 850 nv_connector->scaling_mode); 851 } 852 /* fall-through */ 853 case DCB_CONNECTOR_TV_0: 854 case DCB_CONNECTOR_TV_1: ··· 866 dev->mode_config.dithering_mode_property, 867 nv_connector->use_dithering ? 868 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 869 break; 870 } 871 - 872 - nouveau_connector_set_polling(connector); 873 874 drm_sysfs_connector_add(connector); 875 dcb->drm = connector;
··· 281 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 282 if (!nv_encoder && !nouveau_tv_disable) 283 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 284 + if (nv_encoder && force) { 285 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 286 struct drm_encoder_helper_funcs *helper = 287 encoder->helper_private; ··· 641 return ret; 642 } 643 644 + static unsigned 645 + get_tmds_link_bandwidth(struct drm_connector *connector) 646 + { 647 + struct nouveau_connector *nv_connector = nouveau_connector(connector); 648 + struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 649 + struct dcb_entry *dcb = nv_connector->detected_encoder->dcb; 650 + 651 + if (dcb->location != DCB_LOC_ON_CHIP || 652 + dev_priv->chipset >= 0x46) 653 + return 165000; 654 + else if (dev_priv->chipset >= 0x40) 655 + return 155000; 656 + else if (dev_priv->chipset >= 0x18) 657 + return 135000; 658 + else 659 + return 112000; 660 + } 661 + 662 static int 663 nouveau_connector_mode_valid(struct drm_connector *connector, 664 struct drm_display_mode *mode) 665 { 666 struct nouveau_connector *nv_connector = nouveau_connector(connector); 667 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 668 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); ··· 663 max_clock = 400000; 664 break; 665 case OUTPUT_TMDS: 666 + max_clock = get_tmds_link_bandwidth(connector); 667 + if (nouveau_duallink && nv_encoder->dcb->duallink_possible) 668 + max_clock *= 2; 669 break; 670 case OUTPUT_ANALOG: 671 max_clock = nv_encoder->dcb->crtconf.maxfreq; ··· 707 return to_drm_encoder(nv_connector->detected_encoder); 708 709 return NULL; 710 } 711 712 static const struct drm_connector_helper_funcs ··· 872 dev->mode_config.scaling_mode_property, 873 nv_connector->scaling_mode); 874 } 875 + connector->polled = DRM_CONNECTOR_POLL_CONNECT; 876 /* fall-through */ 877 case DCB_CONNECTOR_TV_0: 878 case DCB_CONNECTOR_TV_1: ··· 888 dev->mode_config.dithering_mode_property, 889 nv_connector->use_dithering ? 890 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); 891 + 892 + if (dcb->type != DCB_CONNECTOR_LVDS) { 893 + if (dev_priv->card_type >= NV_50) 894 + connector->polled = DRM_CONNECTOR_POLL_HPD; 895 + else 896 + connector->polled = DRM_CONNECTOR_POLL_CONNECT; 897 + } 898 break; 899 } 900 901 drm_sysfs_connector_add(connector); 902 dcb->drm = connector;
-3
drivers/gpu/drm/nouveau/nouveau_connector.h
··· 52 struct drm_connector * 53 nouveau_connector_create(struct drm_device *, int index); 54 55 - void 56 - nouveau_connector_set_polling(struct drm_connector *); 57 - 58 int 59 nouveau_connector_bpp(struct drm_connector *); 60
··· 52 struct drm_connector * 53 nouveau_connector_create(struct drm_device *, int index); 54 55 int 56 nouveau_connector_bpp(struct drm_connector *); 57
+17 -38
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 100 int pin_refcnt; 101 }; 102 103 static inline struct nouveau_bo * 104 nouveau_bo(struct ttm_buffer_object *bo) 105 { ··· 307 void (*destroy_context)(struct nouveau_channel *); 308 int (*load_context)(struct nouveau_channel *); 309 int (*unload_context)(struct drm_device *); 310 }; 311 312 struct nouveau_pgraph_object_method { ··· 340 void (*destroy_context)(struct nouveau_channel *); 341 int (*load_context)(struct nouveau_channel *); 342 int (*unload_context)(struct drm_device *); 343 344 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 345 uint32_t size, uint32_t pitch); ··· 490 }; 491 492 struct nv04_crtc_reg { 493 - unsigned char MiscOutReg; /* */ 494 uint8_t CRTC[0xa0]; 495 uint8_t CR58[0x10]; 496 uint8_t Sequencer[5]; 497 uint8_t Graphics[9]; 498 uint8_t Attribute[21]; 499 - unsigned char DAC[768]; /* Internal Colorlookuptable */ 500 501 /* PCRTC regs */ 502 uint32_t fb_start; ··· 544 }; 545 546 struct nv04_mode_state { 547 - uint32_t bpp; 548 - uint32_t width; 549 - uint32_t height; 550 - uint32_t interlace; 551 - uint32_t repaint0; 552 - uint32_t repaint1; 553 - uint32_t screen; 554 - uint32_t scale; 555 - uint32_t dither; 556 - uint32_t extra; 557 - uint32_t fifo; 558 - uint32_t pixel; 559 - uint32_t horiz; 560 - int arbitration0; 561 - int arbitration1; 562 - uint32_t pll; 563 - uint32_t pllB; 564 - uint32_t vpll; 565 - uint32_t vpll2; 566 - uint32_t vpllB; 567 - uint32_t vpll2B; 568 uint32_t pllsel; 569 uint32_t sel_clk; 570 - uint32_t general; 571 - uint32_t crtcOwner; 572 - uint32_t head; 573 - uint32_t head2; 574 - uint32_t cursorConfig; 575 - uint32_t cursor0; 576 - uint32_t cursor1; 577 - uint32_t cursor2; 578 - uint32_t timingH; 579 - uint32_t timingV; 580 - uint32_t displayV; 581 - uint32_t crtcSync; 582 - 583 - struct nv04_crtc_reg crtc_reg[2]; 584 }; 585 586 enum nouveau_card_type { ··· 583 struct workqueue_struct *wq; 584 struct work_struct irq_work; 585 struct work_struct hpd_work; 586 587 struct list_head vbl_waiting; 588 ··· 1022 extern void nv50_fifo_destroy_context(struct nouveau_channel *); 1023 extern int nv50_fifo_load_context(struct nouveau_channel *); 1024 extern int nv50_fifo_unload_context(struct drm_device *); 1025 1026 /* nvc0_fifo.c */ 1027 extern int nvc0_fifo_init(struct drm_device *); ··· 1100 extern int nv50_graph_unload_context(struct drm_device *); 1101 extern void nv50_graph_context_switch(struct drm_device *); 1102 extern int nv50_grctx_init(struct nouveau_grctx *); 1103 1104 /* nvc0_graph.c */ 1105 extern int nvc0_graph_init(struct drm_device *); ··· 1219 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1220 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1221 extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); 1222 - extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); 1223 1224 /* nouveau_fence.c */ 1225 struct nouveau_fence;
··· 100 int pin_refcnt; 101 }; 102 103 + #define nouveau_bo_tile_layout(nvbo) \ 104 + ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) 105 + 106 static inline struct nouveau_bo * 107 nouveau_bo(struct ttm_buffer_object *bo) 108 { ··· 304 void (*destroy_context)(struct nouveau_channel *); 305 int (*load_context)(struct nouveau_channel *); 306 int (*unload_context)(struct drm_device *); 307 + void (*tlb_flush)(struct drm_device *dev); 308 }; 309 310 struct nouveau_pgraph_object_method { ··· 336 void (*destroy_context)(struct nouveau_channel *); 337 int (*load_context)(struct nouveau_channel *); 338 int (*unload_context)(struct drm_device *); 339 + void (*tlb_flush)(struct drm_device *dev); 340 341 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 342 uint32_t size, uint32_t pitch); ··· 485 }; 486 487 struct nv04_crtc_reg { 488 + unsigned char MiscOutReg; 489 uint8_t CRTC[0xa0]; 490 uint8_t CR58[0x10]; 491 uint8_t Sequencer[5]; 492 uint8_t Graphics[9]; 493 uint8_t Attribute[21]; 494 + unsigned char DAC[768]; 495 496 /* PCRTC regs */ 497 uint32_t fb_start; ··· 539 }; 540 541 struct nv04_mode_state { 542 + struct nv04_crtc_reg crtc_reg[2]; 543 uint32_t pllsel; 544 uint32_t sel_clk; 545 }; 546 547 enum nouveau_card_type { ··· 612 struct workqueue_struct *wq; 613 struct work_struct irq_work; 614 struct work_struct hpd_work; 615 + 616 + struct { 617 + spinlock_t lock; 618 + uint32_t hpd0_bits; 619 + uint32_t hpd1_bits; 620 + } hpd_state; 621 622 struct list_head vbl_waiting; 623 ··· 1045 extern void nv50_fifo_destroy_context(struct nouveau_channel *); 1046 extern int nv50_fifo_load_context(struct nouveau_channel *); 1047 extern int nv50_fifo_unload_context(struct drm_device *); 1048 + extern void nv50_fifo_tlb_flush(struct drm_device *dev); 1049 1050 /* nvc0_fifo.c */ 1051 extern int nvc0_fifo_init(struct drm_device *); ··· 1122 extern int nv50_graph_unload_context(struct drm_device *); 1123 extern void nv50_graph_context_switch(struct drm_device *); 1124 extern int nv50_grctx_init(struct nouveau_grctx *); 1125 + extern void nv50_graph_tlb_flush(struct drm_device *dev); 1126 + extern void nv86_graph_tlb_flush(struct drm_device *dev); 1127 1128 /* nvc0_graph.c */ 1129 extern int nvc0_graph_init(struct drm_device *); ··· 1239 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1240 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1241 extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); 1242 1243 /* nouveau_fence.c */ 1244 struct nouveau_fence;
+6 -1
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 249 { 250 struct drm_nouveau_private *dev_priv = dev->dev_private; 251 struct nouveau_semaphore *sema; 252 253 if (!USE_SEMA(dev)) 254 return NULL; ··· 258 if (!sema) 259 goto fail; 260 261 spin_lock(&dev_priv->fence.lock); 262 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); 263 if (sema->mem) 264 - sema->mem = drm_mm_get_block(sema->mem, 4, 0); 265 spin_unlock(&dev_priv->fence.lock); 266 267 if (!sema->mem)
··· 249 { 250 struct drm_nouveau_private *dev_priv = dev->dev_private; 251 struct nouveau_semaphore *sema; 252 + int ret; 253 254 if (!USE_SEMA(dev)) 255 return NULL; ··· 257 if (!sema) 258 goto fail; 259 260 + ret = drm_mm_pre_get(&dev_priv->fence.heap); 261 + if (ret) 262 + goto fail; 263 + 264 spin_lock(&dev_priv->fence.lock); 265 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); 266 if (sema->mem) 267 + sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0); 268 spin_unlock(&dev_priv->fence.lock); 269 270 if (!sema->mem)
+21 -15
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 107 } 108 109 static bool 110 - nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { 111 - switch (tile_flags) { 112 - case 0x0000: 113 - case 0x1800: 114 - case 0x2800: 115 - case 0x4800: 116 - case 0x7000: 117 - case 0x7400: 118 - case 0x7a00: 119 - case 0xe000: 120 - break; 121 - default: 122 - NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); 123 - return false; 124 } 125 126 - return true; 127 } 128 129 int
··· 107 } 108 109 static bool 110 + nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) 111 + { 112 + struct drm_nouveau_private *dev_priv = dev->dev_private; 113 + 114 + if (dev_priv->card_type >= NV_50) { 115 + switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { 116 + case 0x0000: 117 + case 0x1800: 118 + case 0x2800: 119 + case 0x4800: 120 + case 0x7000: 121 + case 0x7400: 122 + case 0x7a00: 123 + case 0xe000: 124 + return true; 125 + } 126 + } else { 127 + if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) 128 + return true; 129 } 130 131 + NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); 132 + return false; 133 } 134 135 int
+4 -4
drivers/gpu/drm/nouveau/nouveau_hw.c
··· 519 520 struct pll_lims pll_lim; 521 struct nouveau_pll_vals pv; 522 - uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; 523 524 - if (get_pll_limits(dev, pllreg, &pll_lim)) 525 return; 526 - nouveau_hw_get_pllvals(dev, pllreg, &pv); 527 528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && 529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && ··· 536 pv.M1 = pll_lim.vco1.max_m; 537 pv.N1 = pll_lim.vco1.min_n; 538 pv.log2P = pll_lim.max_usable_log2p; 539 - nouveau_hw_setpll(dev, pllreg, &pv); 540 } 541 542 /*
··· 519 520 struct pll_lims pll_lim; 521 struct nouveau_pll_vals pv; 522 + enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0; 523 524 + if (get_pll_limits(dev, pll, &pll_lim)) 525 return; 526 + nouveau_hw_get_pllvals(dev, pll, &pv); 527 528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && 529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && ··· 536 pv.M1 = pll_lim.vco1.max_m; 537 pv.N1 = pll_lim.vco1.min_n; 538 pv.log2P = pll_lim.max_usable_log2p; 539 + nouveau_hw_setpll(dev, pll_lim.reg, &pv); 540 } 541 542 /*
+19
drivers/gpu/drm/nouveau/nouveau_hw.h
··· 416 } 417 418 static inline void 419 nv_show_cursor(struct drm_device *dev, int head, bool show) 420 { 421 struct drm_nouveau_private *dev_priv = dev->dev_private;
··· 416 } 417 418 static inline void 419 + nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) 420 + { 421 + struct drm_nouveau_private *dev_priv = dev->dev_private; 422 + 423 + NVWriteCRTC(dev, head, NV_PCRTC_START, offset); 424 + 425 + if (dev_priv->card_type == NV_04) { 426 + /* 427 + * Hilarious, the 24th bit doesn't want to stick to 428 + * PCRTC_START... 429 + */ 430 + int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); 431 + 432 + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, 433 + (cre_heb & ~0x40) | ((offset >> 18) & 0x40)); 434 + } 435 + } 436 + 437 + static inline void 438 nv_show_cursor(struct drm_device *dev, int head, bool show) 439 { 440 struct drm_nouveau_private *dev_priv = dev->dev_private;
+1 -1
drivers/gpu/drm/nouveau/nouveau_i2c.c
··· 256 if (index >= DCB_MAX_NUM_I2C_ENTRIES) 257 return NULL; 258 259 - if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { 260 uint32_t reg = 0xe500, val; 261 262 if (i2c->port_type == 6) {
··· 256 if (index >= DCB_MAX_NUM_I2C_ENTRIES) 257 return NULL; 258 259 + if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) { 260 uint32_t reg = 0xe500, val; 261 262 if (i2c->port_type == 6) {
+23 -19
drivers/gpu/drm/nouveau/nouveau_irq.c
··· 42 #include "nouveau_connector.h" 43 #include "nv50_display.h" 44 45 void 46 nouveau_irq_preinstall(struct drm_device *dev) 47 { ··· 60 if (dev_priv->card_type >= NV_50) { 61 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 62 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); 63 INIT_LIST_HEAD(&dev_priv->vbl_waiting); 64 } 65 } ··· 210 } 211 212 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 213 - u32 get = nv_rd32(dev, 0x003244); 214 - u32 put = nv_rd32(dev, 0x003240); 215 u32 push = nv_rd32(dev, 0x003220); 216 u32 state = nv_rd32(dev, 0x003228); 217 ··· 221 u32 ib_get = nv_rd32(dev, 0x003334); 222 u32 ib_put = nv_rd32(dev, 0x003330); 223 224 - NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " 225 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " 226 "State 0x%08x Push 0x%08x\n", 227 - chid, ho_get, get, ho_put, put, ib_get, ib_put, 228 - state, push); 229 230 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 231 nv_wr32(dev, 0x003364, 0x00000000); 232 - if (get != put || ho_get != ho_put) { 233 - nv_wr32(dev, 0x003244, put); 234 nv_wr32(dev, 0x003328, ho_put); 235 } else 236 if (ib_get != ib_put) { ··· 241 } else { 242 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " 243 "Put 0x%08x State 0x%08x Push 0x%08x\n", 244 - chid, get, put, state, push); 245 246 - if (get != put) 247 - nv_wr32(dev, 0x003244, put); 248 } 249 250 nv_wr32(dev, 0x003228, 0x00000000); ··· 276 } 277 278 if (status) { 279 - NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 280 - status, chid); 281 nv_wr32(dev, NV03_PFIFO_INTR_0, status); 282 status = 0; 283 } ··· 553 554 if (unhandled) 555 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); 556 - } 557 - 558 - static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); 559 - 560 - static int nouveau_ratelimit(void) 561 - { 562 - return __ratelimit(&nouveau_ratelimit_state); 563 } 564 565
··· 42 #include "nouveau_connector.h" 43 #include "nv50_display.h" 44 45 + static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); 46 + 47 + static int nouveau_ratelimit(void) 48 + { 49 + return __ratelimit(&nouveau_ratelimit_state); 50 + } 51 + 52 void 53 nouveau_irq_preinstall(struct drm_device *dev) 54 { ··· 53 if (dev_priv->card_type >= NV_50) { 54 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 55 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); 56 + spin_lock_init(&dev_priv->hpd_state.lock); 57 INIT_LIST_HEAD(&dev_priv->vbl_waiting); 58 } 59 } ··· 202 } 203 204 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 205 + u32 dma_get = nv_rd32(dev, 0x003244); 206 + u32 dma_put = nv_rd32(dev, 0x003240); 207 u32 push = nv_rd32(dev, 0x003220); 208 u32 state = nv_rd32(dev, 0x003228); 209 ··· 213 u32 ib_get = nv_rd32(dev, 0x003334); 214 u32 ib_put = nv_rd32(dev, 0x003330); 215 216 + if (nouveau_ratelimit()) 217 + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " 218 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " 219 "State 0x%08x Push 0x%08x\n", 220 + chid, ho_get, dma_get, ho_put, 221 + dma_put, ib_get, ib_put, state, 222 + push); 223 224 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ 225 nv_wr32(dev, 0x003364, 0x00000000); 226 + if (dma_get != dma_put || ho_get != ho_put) { 227 + nv_wr32(dev, 0x003244, dma_put); 228 nv_wr32(dev, 0x003328, ho_put); 229 } else 230 if (ib_get != ib_put) { ··· 231 } else { 232 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " 233 "Put 0x%08x State 0x%08x Push 0x%08x\n", 234 + chid, dma_get, dma_put, state, push); 235 236 + if (dma_get != dma_put) 237 + nv_wr32(dev, 0x003244, dma_put); 238 } 239 240 nv_wr32(dev, 0x003228, 0x00000000); ··· 266 } 267 268 if (status) { 269 + if (nouveau_ratelimit()) 270 + NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 271 + status, chid); 272 nv_wr32(dev, NV03_PFIFO_INTR_0, status); 273 status = 0; 274 } ··· 542 543 if (unhandled) 544 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); 545 } 546 547
+27 -22
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 33 #include "drmP.h" 34 #include "drm.h" 35 #include "drm_sarea.h" 36 - #include "nouveau_drv.h" 37 38 - #define MIN(a,b) a < b ? a : b 39 40 /* 41 * NV10-NV40 tiling helpers ··· 175 } 176 } 177 } 178 - dev_priv->engine.instmem.flush(dev); 179 180 - nv50_vm_flush(dev, 5); 181 - nv50_vm_flush(dev, 0); 182 - nv50_vm_flush(dev, 4); 183 nv50_vm_flush(dev, 6); 184 return 0; 185 } ··· 208 pte++; 209 } 210 } 211 - dev_priv->engine.instmem.flush(dev); 212 213 - nv50_vm_flush(dev, 5); 214 - nv50_vm_flush(dev, 0); 215 - nv50_vm_flush(dev, 4); 216 nv50_vm_flush(dev, 6); 217 } 218 ··· 651 void 652 nouveau_mem_timing_init(struct drm_device *dev) 653 { 654 struct drm_nouveau_private *dev_priv = dev->dev_private; 655 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 656 struct nouveau_pm_memtimings *memtimings = &pm->memtimings; ··· 718 tUNK_19 = 1; 719 tUNK_20 = 0; 720 tUNK_21 = 0; 721 - switch (MIN(recordlen,21)) { 722 - case 21: 723 tUNK_21 = entry[21]; 724 - case 20: 725 tUNK_20 = entry[20]; 726 - case 19: 727 tUNK_19 = entry[19]; 728 - case 18: 729 tUNK_18 = entry[18]; 730 default: 731 tUNK_0 = entry[0]; ··· 755 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 756 if(recordlen > 19) { 757 timing->reg_100228 += (tUNK_19 - 1) << 24; 758 - } else { 759 timing->reg_100228 += tUNK_12 << 24; 760 - } 761 762 /* XXX: reg_10022c */ 763 764 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 765 tUNK_13 << 8 | tUNK_13); 766 767 /* XXX: +6? */ 768 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); 769 - if(tUNK_10 > tUNK_11) { 770 - timing->reg_100234 += tUNK_10 << 16; 771 - } else { 772 - timing->reg_100234 += tUNK_11 << 16; 773 } 774 775 - /* XXX; reg_100238, reg_10023c */ 776 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 777 timing->reg_100220, timing->reg_100224, 778 timing->reg_100228, timing->reg_10022c);
··· 33 #include "drmP.h" 34 #include "drm.h" 35 #include "drm_sarea.h" 36 37 + #include "nouveau_drv.h" 38 + #include "nouveau_pm.h" 39 40 /* 41 * NV10-NV40 tiling helpers ··· 175 } 176 } 177 } 178 179 + dev_priv->engine.instmem.flush(dev); 180 + dev_priv->engine.fifo.tlb_flush(dev); 181 + dev_priv->engine.graph.tlb_flush(dev); 182 nv50_vm_flush(dev, 6); 183 return 0; 184 } ··· 209 pte++; 210 } 211 } 212 213 + dev_priv->engine.instmem.flush(dev); 214 + dev_priv->engine.fifo.tlb_flush(dev); 215 + dev_priv->engine.graph.tlb_flush(dev); 216 nv50_vm_flush(dev, 6); 217 } 218 ··· 653 void 654 nouveau_mem_timing_init(struct drm_device *dev) 655 { 656 + /* cards < NVC0 only */ 657 struct drm_nouveau_private *dev_priv = dev->dev_private; 658 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 659 struct nouveau_pm_memtimings *memtimings = &pm->memtimings; ··· 719 tUNK_19 = 1; 720 tUNK_20 = 0; 721 tUNK_21 = 0; 722 + switch (min(recordlen, 22)) { 723 + case 22: 724 tUNK_21 = entry[21]; 725 + case 21: 726 tUNK_20 = entry[20]; 727 + case 20: 728 tUNK_19 = entry[19]; 729 + case 19: 730 tUNK_18 = entry[18]; 731 default: 732 tUNK_0 = entry[0]; ··· 756 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); 757 if(recordlen > 19) { 758 timing->reg_100228 += (tUNK_19 - 1) << 24; 759 + }/* I cannot back-up this else-statement right now 760 + else { 761 timing->reg_100228 += tUNK_12 << 24; 762 + }*/ 763 764 /* XXX: reg_10022c */ 765 + timing->reg_10022c = tUNK_2 - 1; 766 767 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | 768 tUNK_13 << 8 | tUNK_13); 769 770 /* XXX: +6? */ 771 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); 772 + timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; 773 + 774 + /* XXX; reg_100238, reg_10023c 775 + * reg: 0x00?????? 776 + * reg_10023c: 777 + * 0 for pre-NV50 cards 778 + * 0x????0202 for NV50+ cards (empirical evidence) */ 779 + if(dev_priv->card_type >= NV_50) { 780 + timing->reg_10023c = 0x202; 781 } 782 783 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, 784 timing->reg_100220, timing->reg_100224, 785 timing->reg_100228, timing->reg_10022c);
+1 -1
drivers/gpu/drm/nouveau/nouveau_object.c
··· 129 if (ramin == NULL) { 130 spin_unlock(&dev_priv->ramin_lock); 131 nouveau_gpuobj_ref(NULL, &gpuobj); 132 - return ret; 133 } 134 135 ramin = drm_mm_get_block_atomic(ramin, size, align);
··· 129 if (ramin == NULL) { 130 spin_unlock(&dev_priv->ramin_lock); 131 nouveau_gpuobj_ref(NULL, &gpuobj); 132 + return -ENOMEM; 133 } 134 135 ramin = drm_mm_get_block_atomic(ramin, size, align);
+6 -1
drivers/gpu/drm/nouveau/nouveau_pm.c
··· 284 } 285 } 286 287 static ssize_t 288 nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 289 { ··· 396 static const struct attribute_group hwmon_attrgroup = { 397 .attrs = hwmon_attributes, 398 }; 399 400 static int 401 nouveau_hwmon_init(struct drm_device *dev) 402 { 403 struct drm_nouveau_private *dev_priv = dev->dev_private; 404 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 405 struct device *hwmon_dev; ··· 428 } 429 430 pm->hwmon = hwmon_dev; 431 - 432 return 0; 433 } 434 435 static void 436 nouveau_hwmon_fini(struct drm_device *dev) 437 { 438 struct drm_nouveau_private *dev_priv = dev->dev_private; 439 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 440 ··· 443 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); 444 hwmon_device_unregister(pm->hwmon); 445 } 446 } 447 448 int
··· 284 } 285 } 286 287 + #ifdef CONFIG_HWMON 288 static ssize_t 289 nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 290 { ··· 395 static const struct attribute_group hwmon_attrgroup = { 396 .attrs = hwmon_attributes, 397 }; 398 + #endif 399 400 static int 401 nouveau_hwmon_init(struct drm_device *dev) 402 { 403 + #ifdef CONFIG_HWMON 404 struct drm_nouveau_private *dev_priv = dev->dev_private; 405 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 406 struct device *hwmon_dev; ··· 425 } 426 427 pm->hwmon = hwmon_dev; 428 + #endif 429 return 0; 430 } 431 432 static void 433 nouveau_hwmon_fini(struct drm_device *dev) 434 { 435 + #ifdef CONFIG_HWMON 436 struct drm_nouveau_private *dev_priv = dev->dev_private; 437 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 438 ··· 439 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); 440 hwmon_device_unregister(pm->hwmon); 441 } 442 + #endif 443 } 444 445 int
+44 -27
drivers/gpu/drm/nouveau/nouveau_ramht.c
··· 153 return -ENOMEM; 154 } 155 156 static void 157 - nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) 158 { 159 struct drm_device *dev = chan->dev; 160 struct drm_nouveau_private *dev_priv = dev->dev_private; 161 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; 162 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; 163 - struct nouveau_ramht_entry *entry, *tmp; 164 u32 co, ho; 165 166 - list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { 167 - if (entry->channel != chan || entry->handle != handle) 168 - continue; 169 - 170 - nouveau_gpuobj_ref(NULL, &entry->gpuobj); 171 - list_del(&entry->head); 172 - kfree(entry); 173 - break; 174 - } 175 - 176 co = ho = nouveau_ramht_hash_handle(chan, handle); 177 do { 178 if (nouveau_ramht_entry_valid(dev, ramht, co) && ··· 200 nv_wo32(ramht, co + 0, 0x00000000); 201 nv_wo32(ramht, co + 4, 0x00000000); 202 instmem->flush(dev); 203 - return; 204 } 205 206 co += 8; ··· 210 211 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", 212 chan->id, handle); 213 } 214 215 void 216 nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) 217 { 218 - struct nouveau_ramht *ramht = chan->ramht; 219 - unsigned long flags; 220 221 - spin_lock_irqsave(&ramht->lock, flags); 222 - nouveau_ramht_remove_locked(chan, handle); 223 - spin_unlock_irqrestore(&ramht->lock, flags); 224 } 225 226 struct nouveau_gpuobj * ··· 286 nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, 287 struct nouveau_channel *chan) 288 { 289 - struct nouveau_ramht_entry *entry, *tmp; 290 struct nouveau_ramht *ramht; 291 - unsigned long flags; 292 293 if (ref) 294 kref_get(&ref->refcount); 295 296 ramht = *ptr; 297 if (ramht) { 298 - spin_lock_irqsave(&ramht->lock, flags); 299 - list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { 300 - if (entry->channel != chan) 301 - continue; 302 - 303 - nouveau_ramht_remove_locked(chan, entry->handle); 304 } 305 - spin_unlock_irqrestore(&ramht->lock, flags); 306 307 kref_put(&ramht->refcount, nouveau_ramht_del); 308 }
··· 153 return -ENOMEM; 154 } 155 156 + static struct nouveau_ramht_entry * 157 + nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle) 158 + { 159 + struct nouveau_ramht *ramht = chan ? chan->ramht : NULL; 160 + struct nouveau_ramht_entry *entry; 161 + unsigned long flags; 162 + 163 + if (!ramht) 164 + return NULL; 165 + 166 + spin_lock_irqsave(&ramht->lock, flags); 167 + list_for_each_entry(entry, &ramht->entries, head) { 168 + if (entry->channel == chan && 169 + (!handle || entry->handle == handle)) { 170 + list_del(&entry->head); 171 + spin_unlock_irqrestore(&ramht->lock, flags); 172 + 173 + return entry; 174 + } 175 + } 176 + spin_unlock_irqrestore(&ramht->lock, flags); 177 + 178 + return NULL; 179 + } 180 + 181 static void 182 + nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle) 183 { 184 struct drm_device *dev = chan->dev; 185 struct drm_nouveau_private *dev_priv = dev->dev_private; 186 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; 187 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; 188 + unsigned long flags; 189 u32 co, ho; 190 191 + spin_lock_irqsave(&chan->ramht->lock, flags); 192 co = ho = nouveau_ramht_hash_handle(chan, handle); 193 do { 194 if (nouveau_ramht_entry_valid(dev, ramht, co) && ··· 184 nv_wo32(ramht, co + 0, 0x00000000); 185 nv_wo32(ramht, co + 4, 0x00000000); 186 instmem->flush(dev); 187 + goto out; 188 } 189 190 co += 8; ··· 194 195 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", 196 chan->id, handle); 197 + out: 198 + spin_unlock_irqrestore(&chan->ramht->lock, flags); 199 } 200 201 void 202 nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) 203 { 204 + struct nouveau_ramht_entry *entry; 205 206 + entry = nouveau_ramht_remove_entry(chan, handle); 207 + if (!entry) 208 + return; 209 + 210 + nouveau_ramht_remove_hash(chan, entry->handle); 211 + nouveau_gpuobj_ref(NULL, &entry->gpuobj); 212 + kfree(entry); 213 } 214 215 struct nouveau_gpuobj * ··· 265 nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, 266 struct nouveau_channel *chan) 267 { 268 + struct nouveau_ramht_entry *entry; 269 struct nouveau_ramht *ramht; 270 271 if (ref) 272 kref_get(&ref->refcount); 273 274 ramht = *ptr; 275 if (ramht) { 276 + while ((entry = nouveau_ramht_remove_entry(chan, 0))) { 277 + nouveau_ramht_remove_hash(chan, entry->handle); 278 + nouveau_gpuobj_ref(NULL, &entry->gpuobj); 279 + kfree(entry); 280 } 281 282 kref_put(&ramht->refcount, nouveau_ramht_del); 283 }
+9 -5
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 120 dev_priv->engine.instmem.flush(nvbe->dev); 121 122 if (dev_priv->card_type == NV_50) { 123 - nv50_vm_flush(dev, 5); /* PGRAPH */ 124 - nv50_vm_flush(dev, 0); /* PFIFO */ 125 } 126 127 nvbe->bound = true; ··· 162 dev_priv->engine.instmem.flush(nvbe->dev); 163 164 if (dev_priv->card_type == NV_50) { 165 - nv50_vm_flush(dev, 5); 166 - nv50_vm_flush(dev, 0); 167 } 168 169 nvbe->bound = false; ··· 224 int i, ret; 225 226 if (dev_priv->card_type < NV_50) { 227 - aper_size = (64 * 1024 * 1024); 228 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 229 obj_size += 8; /* ctxdma header */ 230 } else {
··· 120 dev_priv->engine.instmem.flush(nvbe->dev); 121 122 if (dev_priv->card_type == NV_50) { 123 + dev_priv->engine.fifo.tlb_flush(dev); 124 + dev_priv->engine.graph.tlb_flush(dev); 125 } 126 127 nvbe->bound = true; ··· 162 dev_priv->engine.instmem.flush(nvbe->dev); 163 164 if (dev_priv->card_type == NV_50) { 165 + dev_priv->engine.fifo.tlb_flush(dev); 166 + dev_priv->engine.graph.tlb_flush(dev); 167 } 168 169 nvbe->bound = false; ··· 224 int i, ret; 225 226 if (dev_priv->card_type < NV_50) { 227 + if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024) 228 + aper_size = 64 * 1024 * 1024; 229 + else 230 + aper_size = 512 * 1024 * 1024; 231 + 232 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 233 obj_size += 8; /* ctxdma header */ 234 } else {
+15 -2
drivers/gpu/drm/nouveau/nouveau_state.c
··· 354 engine->graph.destroy_context = nv50_graph_destroy_context; 355 engine->graph.load_context = nv50_graph_load_context; 356 engine->graph.unload_context = nv50_graph_unload_context; 357 engine->fifo.channels = 128; 358 engine->fifo.init = nv50_fifo_init; 359 engine->fifo.takedown = nv50_fifo_takedown; ··· 374 engine->fifo.destroy_context = nv50_fifo_destroy_context; 375 engine->fifo.load_context = nv50_fifo_load_context; 376 engine->fifo.unload_context = nv50_fifo_unload_context; 377 engine->display.early_init = nv50_display_early_init; 378 engine->display.late_takedown = nv50_display_late_takedown; 379 engine->display.create = nv50_display_create; ··· 1051 case NOUVEAU_GETPARAM_PTIMER_TIME: 1052 getparam->value = dev_priv->engine.timer.read(dev); 1053 break; 1054 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1055 /* NV40 and NV50 versions are quite different, but register 1056 * address is the same. User is supposed to know the card ··· 1064 } 1065 /* FALLTHRU */ 1066 default: 1067 - NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 1068 return -EINVAL; 1069 } 1070 ··· 1079 1080 switch (setparam->param) { 1081 default: 1082 - NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); 1083 return -EINVAL; 1084 } 1085
··· 354 engine->graph.destroy_context = nv50_graph_destroy_context; 355 engine->graph.load_context = nv50_graph_load_context; 356 engine->graph.unload_context = nv50_graph_unload_context; 357 + if (dev_priv->chipset != 0x86) 358 + engine->graph.tlb_flush = nv50_graph_tlb_flush; 359 + else { 360 + /* from what i can see nvidia do this on every 361 + * pre-NVA3 board except NVAC, but, we've only 362 + * ever seen problems on NV86 363 + */ 364 + engine->graph.tlb_flush = nv86_graph_tlb_flush; 365 + } 366 engine->fifo.channels = 128; 367 engine->fifo.init = nv50_fifo_init; 368 engine->fifo.takedown = nv50_fifo_takedown; ··· 365 engine->fifo.destroy_context = nv50_fifo_destroy_context; 366 engine->fifo.load_context = nv50_fifo_load_context; 367 engine->fifo.unload_context = nv50_fifo_unload_context; 368 + engine->fifo.tlb_flush = nv50_fifo_tlb_flush; 369 engine->display.early_init = nv50_display_early_init; 370 engine->display.late_takedown = nv50_display_late_takedown; 371 engine->display.create = nv50_display_create; ··· 1041 case NOUVEAU_GETPARAM_PTIMER_TIME: 1042 getparam->value = dev_priv->engine.timer.read(dev); 1043 break; 1044 + case NOUVEAU_GETPARAM_HAS_BO_USAGE: 1045 + getparam->value = 1; 1046 + break; 1047 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1048 /* NV40 and NV50 versions are quite different, but register 1049 * address is the same. User is supposed to know the card ··· 1051 } 1052 /* FALLTHRU */ 1053 default: 1054 + NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); 1055 return -EINVAL; 1056 } 1057 ··· 1066 1067 switch (setparam->param) { 1068 default: 1069 + NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param); 1070 return -EINVAL; 1071 } 1072
+1 -1
drivers/gpu/drm/nouveau/nouveau_temp.c
··· 191 int offset = sensor->offset_mult / sensor->offset_div; 192 int core_temp; 193 194 - if (dev_priv->chipset >= 0x50) { 195 core_temp = nv_rd32(dev, 0x20008); 196 } else { 197 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
··· 191 int offset = sensor->offset_mult / sensor->offset_div; 192 int core_temp; 193 194 + if (dev_priv->card_type >= NV_50) { 195 core_temp = nv_rd32(dev, 0x20008); 196 } else { 197 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
+1 -6
drivers/gpu/drm/nouveau/nv04_crtc.c
··· 158 { 159 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 160 struct drm_device *dev = crtc->dev; 161 - struct drm_connector *connector; 162 unsigned char seq1 = 0, crtc17 = 0; 163 unsigned char crtc1A; 164 ··· 212 NVVgaSeqReset(dev, nv_crtc->index, false); 213 214 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); 215 - 216 - /* Update connector polling modes */ 217 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) 218 - nouveau_connector_set_polling(connector); 219 } 220 221 static bool ··· 826 /* Update the framebuffer location. */ 827 regp->fb_start = nv_crtc->fb.offset & ~3; 828 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); 829 - NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start); 830 831 /* Update the arbitration parameters. */ 832 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
··· 158 { 159 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 160 struct drm_device *dev = crtc->dev; 161 unsigned char seq1 = 0, crtc17 = 0; 162 unsigned char crtc1A; 163 ··· 213 NVVgaSeqReset(dev, nv_crtc->index, false); 214 215 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); 216 } 217 218 static bool ··· 831 /* Update the framebuffer location. */ 832 regp->fb_start = nv_crtc->fb.offset & ~3; 833 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); 834 + nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start); 835 836 /* Update the arbitration parameters. */ 837 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
+7 -6
drivers/gpu/drm/nouveau/nv04_dfp.c
··· 185 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 186 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); 187 188 - /* For internal panels and gpu scaling on DVI we need the native mode */ 189 - if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { 190 - if (!nv_connector->native_mode) 191 - return false; 192 nv_encoder->mode = *nv_connector->native_mode; 193 adjusted_mode->clock = nv_connector->native_mode->clock; 194 - } else { 195 - nv_encoder->mode = *adjusted_mode; 196 } 197 198 return true;
··· 185 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 186 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); 187 188 + if (!nv_connector->native_mode || 189 + nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || 190 + mode->hdisplay > nv_connector->native_mode->hdisplay || 191 + mode->vdisplay > nv_connector->native_mode->vdisplay) { 192 + nv_encoder->mode = *adjusted_mode; 193 + 194 + } else { 195 nv_encoder->mode = *nv_connector->native_mode; 196 adjusted_mode->clock = nv_connector->native_mode->clock; 197 } 198 199 return true;
+9
drivers/gpu/drm/nouveau/nv04_pm.c
··· 76 reg += 4; 77 78 nouveau_hw_setpll(dev, reg, &state->calc); 79 kfree(state); 80 } 81
··· 76 reg += 4; 77 78 nouveau_hw_setpll(dev, reg, &state->calc); 79 + 80 + if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) { 81 + if (dev_priv->card_type == NV_20) 82 + nv_mask(dev, 0x1002c4, 0, 1 << 20); 83 + 84 + /* Reset the DLLs */ 85 + nv_mask(dev, 0x1002c0, 0, 1 << 8); 86 + } 87 + 88 kfree(state); 89 } 90
+10 -6
drivers/gpu/drm/nouveau/nv50_calc.c
··· 51 int *N, int *fN, int *M, int *P) 52 { 53 fixed20_12 fb_div, a, b; 54 55 - *P = pll->vco1.maxfreq / clk; 56 if (*P > pll->max_p) 57 *P = pll->max_p; 58 if (*P < pll->min_p) 59 *P = pll->min_p; 60 61 - /* *M = ceil(refclk / pll->vco.max_inputfreq); */ 62 - a.full = dfixed_const(pll->refclk); 63 - b.full = dfixed_const(pll->vco1.max_inputfreq); 64 a.full = dfixed_div(a, b); 65 - a.full = dfixed_ceil(a); 66 *M = dfixed_trunc(a); 67 68 /* fb_div = (vco * *M) / refclk; */ 69 fb_div.full = dfixed_const(clk * *P); 70 fb_div.full = dfixed_mul(fb_div, a); 71 - a.full = dfixed_const(pll->refclk); 72 fb_div.full = dfixed_div(fb_div, a); 73 74 /* *N = floor(fb_div); */
··· 51 int *N, int *fN, int *M, int *P) 52 { 53 fixed20_12 fb_div, a, b; 54 + u32 refclk = pll->refclk / 10; 55 + u32 max_vco_freq = pll->vco1.maxfreq / 10; 56 + u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10; 57 + clk /= 10; 58 59 + *P = max_vco_freq / clk; 60 if (*P > pll->max_p) 61 *P = pll->max_p; 62 if (*P < pll->min_p) 63 *P = pll->min_p; 64 65 + /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */ 66 + a.full = dfixed_const(refclk + max_vco_inputfreq); 67 + b.full = dfixed_const(max_vco_inputfreq); 68 a.full = dfixed_div(a, b); 69 + a.full = dfixed_floor(a); 70 *M = dfixed_trunc(a); 71 72 /* fb_div = (vco * *M) / refclk; */ 73 fb_div.full = dfixed_const(clk * *P); 74 fb_div.full = dfixed_mul(fb_div, a); 75 + a.full = dfixed_const(refclk); 76 fb_div.full = dfixed_div(fb_div, a); 77 78 /* *N = floor(fb_div); */
+2 -2
drivers/gpu/drm/nouveau/nv50_crtc.c
··· 546 } 547 548 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; 549 - nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; 550 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 551 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 552 ret = RING_SPACE(evo, 2); ··· 578 fb->nvbo->tile_mode); 579 } 580 if (dev_priv->chipset == 0x50) 581 - OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format); 582 else 583 OUT_RING(evo, format); 584
··· 546 } 547 548 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; 549 + nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); 550 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 551 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 552 ret = RING_SPACE(evo, 2); ··· 578 fb->nvbo->tile_mode); 579 } 580 if (dev_priv->chipset == 0x50) 581 + OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format); 582 else 583 OUT_RING(evo, format); 584
+26 -9
drivers/gpu/drm/nouveau/nv50_display.c
··· 1032 struct drm_connector *connector; 1033 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 1034 uint32_t unplug_mask, plug_mask, change_mask; 1035 - uint32_t hpd0, hpd1 = 0; 1036 1037 - hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); 1038 if (dev_priv->chipset >= 0x90) 1039 - hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); 1040 1041 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); 1042 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); ··· 1085 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); 1086 } 1087 1088 - nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); 1089 - if (dev_priv->chipset >= 0x90) 1090 - nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); 1091 - 1092 drm_helper_hpd_irq_event(dev); 1093 } 1094 ··· 1095 uint32_t delayed = 0; 1096 1097 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { 1098 - if (!work_pending(&dev_priv->hpd_work)) 1099 - queue_work(dev_priv->wq, &dev_priv->hpd_work); 1100 } 1101 1102 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
··· 1032 struct drm_connector *connector; 1033 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 1034 uint32_t unplug_mask, plug_mask, change_mask; 1035 + uint32_t hpd0, hpd1; 1036 1037 + spin_lock_irq(&dev_priv->hpd_state.lock); 1038 + hpd0 = dev_priv->hpd_state.hpd0_bits; 1039 + dev_priv->hpd_state.hpd0_bits = 0; 1040 + hpd1 = dev_priv->hpd_state.hpd1_bits; 1041 + dev_priv->hpd_state.hpd1_bits = 0; 1042 + spin_unlock_irq(&dev_priv->hpd_state.lock); 1043 + 1044 + hpd0 &= nv_rd32(dev, 0xe050); 1045 if (dev_priv->chipset >= 0x90) 1046 + hpd1 &= nv_rd32(dev, 0xe070); 1047 1048 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); 1049 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); ··· 1078 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); 1079 } 1080 1081 drm_helper_hpd_irq_event(dev); 1082 } 1083 ··· 1092 uint32_t delayed = 0; 1093 1094 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { 1095 + uint32_t hpd0_bits, hpd1_bits = 0; 1096 + 1097 + hpd0_bits = nv_rd32(dev, 0xe054); 1098 + nv_wr32(dev, 0xe054, hpd0_bits); 1099 + 1100 + if (dev_priv->chipset >= 0x90) { 1101 + hpd1_bits = nv_rd32(dev, 0xe074); 1102 + nv_wr32(dev, 0xe074, hpd1_bits); 1103 + } 1104 + 1105 + spin_lock(&dev_priv->hpd_state.lock); 1106 + dev_priv->hpd_state.hpd0_bits |= hpd0_bits; 1107 + dev_priv->hpd_state.hpd1_bits |= hpd1_bits; 1108 + spin_unlock(&dev_priv->hpd_state.lock); 1109 + 1110 + queue_work(dev_priv->wq, &dev_priv->hpd_work); 1111 } 1112 1113 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
+5
drivers/gpu/drm/nouveau/nv50_fifo.c
··· 464 return 0; 465 } 466
··· 464 return 0; 465 } 466 467 + void 468 + nv50_fifo_tlb_flush(struct drm_device *dev) 469 + { 470 + nv50_vm_flush(dev, 5); 471 + }
+52
drivers/gpu/drm/nouveau/nv50_graph.c
··· 402 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ 403 {} 404 };
··· 402 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ 403 {} 404 }; 405 + 406 + void 407 + nv50_graph_tlb_flush(struct drm_device *dev) 408 + { 409 + nv50_vm_flush(dev, 0); 410 + } 411 + 412 + void 413 + nv86_graph_tlb_flush(struct drm_device *dev) 414 + { 415 + struct drm_nouveau_private *dev_priv = dev->dev_private; 416 + struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 417 + bool idle, timeout = false; 418 + unsigned long flags; 419 + u64 start; 420 + u32 tmp; 421 + 422 + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 423 + nv_mask(dev, 0x400500, 0x00000001, 0x00000000); 424 + 425 + start = ptimer->read(dev); 426 + do { 427 + idle = true; 428 + 429 + for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) { 430 + if ((tmp & 7) == 1) 431 + idle = false; 432 + } 433 + 434 + for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) { 435 + if ((tmp & 7) == 1) 436 + idle = false; 437 + } 438 + 439 + for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) { 440 + if ((tmp & 7) == 1) 441 + idle = false; 442 + } 443 + } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000)); 444 + 445 + if (timeout) { 446 + NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: " 447 + "0x%08x 0x%08x 0x%08x 0x%08x\n", 448 + nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380), 449 + nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); 450 + } 451 + 452 + nv50_vm_flush(dev, 0); 453 + 454 + nv_mask(dev, 0x400500, 0x00000001, 0x00000001); 455 + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 456 + }
-1
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 402 } 403 dev_priv->engine.instmem.flush(dev); 404 405 - nv50_vm_flush(dev, 4); 406 nv50_vm_flush(dev, 6); 407 408 gpuobj->im_bound = 1;
··· 402 } 403 dev_priv->engine.instmem.flush(dev); 404 405 nv50_vm_flush(dev, 6); 406 407 gpuobj->im_bound = 1;
+30 -1
drivers/gpu/drm/radeon/evergreen.c
··· 1650 } 1651 } 1652 1653 - rdev->config.evergreen.tile_config = gb_addr_config; 1654 WREG32(GB_BACKEND_MAP, gb_backend_map); 1655 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1656 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
··· 1650 } 1651 } 1652 1653 + /* setup tiling info dword. gb_addr_config is not adequate since it does 1654 + * not have bank info, so create a custom tiling dword. 1655 + * bits 3:0 num_pipes 1656 + * bits 7:4 num_banks 1657 + * bits 11:8 group_size 1658 + * bits 15:12 row_size 1659 + */ 1660 + rdev->config.evergreen.tile_config = 0; 1661 + switch (rdev->config.evergreen.max_tile_pipes) { 1662 + case 1: 1663 + default: 1664 + rdev->config.evergreen.tile_config |= (0 << 0); 1665 + break; 1666 + case 2: 1667 + rdev->config.evergreen.tile_config |= (1 << 0); 1668 + break; 1669 + case 4: 1670 + rdev->config.evergreen.tile_config |= (2 << 0); 1671 + break; 1672 + case 8: 1673 + rdev->config.evergreen.tile_config |= (3 << 0); 1674 + break; 1675 + } 1676 + rdev->config.evergreen.tile_config |= 1677 + ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1678 + rdev->config.evergreen.tile_config |= 1679 + ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; 1680 + rdev->config.evergreen.tile_config |= 1681 + ((gb_addr_config & 0x30000000) >> 28) << 12; 1682 + 1683 WREG32(GB_BACKEND_MAP, gb_backend_map); 1684 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1685 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+1 -1
drivers/gpu/drm/radeon/evergreen_blit_kms.c
··· 459 obj_size += evergreen_ps_size * 4; 460 obj_size = ALIGN(obj_size, 256); 461 462 - r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, 463 &rdev->r600_blit.shader_obj); 464 if (r) { 465 DRM_ERROR("evergreen failed to allocate shader\n");
··· 459 obj_size += evergreen_ps_size * 4; 460 obj_size = ALIGN(obj_size, 256); 461 462 + r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 463 &rdev->r600_blit.shader_obj); 464 if (r) { 465 DRM_ERROR("evergreen failed to allocate shader\n");
+1 -1
drivers/gpu/drm/radeon/r600.c
··· 2718 /* Allocate ring buffer */ 2719 if (rdev->ih.ring_obj == NULL) { 2720 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2721 - true, 2722 RADEON_GEM_DOMAIN_GTT, 2723 &rdev->ih.ring_obj); 2724 if (r) {
··· 2718 /* Allocate ring buffer */ 2719 if (rdev->ih.ring_obj == NULL) { 2720 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2721 + PAGE_SIZE, true, 2722 RADEON_GEM_DOMAIN_GTT, 2723 &rdev->ih.ring_obj); 2724 if (r) {
+1 -1
drivers/gpu/drm/radeon/r600_blit_kms.c
··· 501 obj_size += r6xx_ps_size * 4; 502 obj_size = ALIGN(obj_size, 256); 503 504 - r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, 505 &rdev->r600_blit.shader_obj); 506 if (r) { 507 DRM_ERROR("r600 failed to allocate shader\n");
··· 501 obj_size += r6xx_ps_size * 4; 502 obj_size = ALIGN(obj_size, 256); 503 504 + r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 505 &rdev->r600_blit.shader_obj); 506 if (r) { 507 DRM_ERROR("r600 failed to allocate shader\n");
+194 -117
drivers/gpu/drm/radeon/r600_cs.c
··· 50 u32 nsamples; 51 u32 cb_color_base_last[8]; 52 struct radeon_bo *cb_color_bo[8]; 53 u32 cb_color_bo_offset[8]; 54 struct radeon_bo *cb_color_frag_bo[8]; 55 struct radeon_bo *cb_color_tile_bo[8]; ··· 68 u32 db_depth_size; 69 u32 db_offset; 70 struct radeon_bo *db_bo; 71 }; 72 73 static inline int r600_bpe_from_format(u32 *bpe, u32 format) ··· 142 return 0; 143 } 144 145 static void r600_cs_track_init(struct r600_cs_track *track) 146 { 147 int i; ··· 217 track->cb_color_info[i] = 0; 218 track->cb_color_bo[i] = NULL; 219 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 220 } 221 track->cb_target_mask = 0xFFFFFFFF; 222 track->cb_shader_mask = 0xFFFFFFFF; 223 track->db_bo = NULL; 224 /* assume the biggest format and that htile is enabled */ 225 track->db_depth_info = 7 | (1 << 25); 226 track->db_depth_view = 0xFFFFC000; ··· 234 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 235 { 236 struct r600_cs_track *track = p->track; 237 - u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; 238 volatile u32 *ib = p->ib->ptr; 239 unsigned array_mode; 240 ··· 252 i, track->cb_color_info[i]); 253 return -EINVAL; 254 } 255 - /* pitch is the number of 8x8 tiles per row */ 256 - pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; 257 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 258 slice_tile_max *= 64; 259 - height = slice_tile_max / (pitch * 8); 260 if (height > 8192) 261 height = 8192; 262 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 263 switch (array_mode) { 264 case V_0280A0_ARRAY_LINEAR_GENERAL: 265 - /* technically height & 0x7 */ 266 break; 267 case V_0280A0_ARRAY_LINEAR_ALIGNED: 268 - pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; 269 - if (!IS_ALIGNED(pitch, pitch_align)) { 270 - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 271 - __func__, __LINE__, pitch); 272 - return -EINVAL; 273 - } 274 - if (!IS_ALIGNED(height, 8)) { 275 - dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", 276 - __func__, __LINE__, height); 277 - return -EINVAL; 278 - } 279 break; 280 case V_0280A0_ARRAY_1D_TILED_THIN1: 281 - pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8; 282 - if (!IS_ALIGNED(pitch, pitch_align)) { 283 - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 284 - __func__, __LINE__, pitch); 285 - return -EINVAL; 286 - } 287 /* avoid breaking userspace */ 288 if (height > 7) 289 height &= ~0x7; 290 - if (!IS_ALIGNED(height, 8)) { 291 - dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", 292 - __func__, __LINE__, height); 293 - return -EINVAL; 294 - } 295 break; 296 case V_0280A0_ARRAY_2D_TILED_THIN1: 297 - pitch_align = max((u32)track->nbanks, 298 - (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8; 299 - if (!IS_ALIGNED(pitch, pitch_align)) { 300 - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 301 - __func__, __LINE__, pitch); 302 - return -EINVAL; 303 - } 304 - if (!IS_ALIGNED((height / 8), track->npipes)) { 305 - dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", 306 - __func__, __LINE__, height); 307 - return -EINVAL; 308 - } 309 break; 310 default: 311 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, ··· 293 track->cb_color_info[i]); 294 return -EINVAL; 295 } 296 /* check offset */ 297 - tmp = height * pitch * 8 * bpe; 298 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 299 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 300 /* the initial DDX does bad things with the CB size occasionally */ 301 /* it rounds up height too far for slice tile max but the BO is smaller */ 302 - tmp = (height - 7) * 8 * bpe; 303 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 304 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 305 return -EINVAL; ··· 325 return -EINVAL; 326 } 327 } 328 - if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { 329 - dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); 330 - return -EINVAL; 331 - } 332 /* limit max tile */ 333 - tmp = (height * pitch * 8) >> 6; 334 if (tmp < slice_tile_max) 335 slice_tile_max = tmp; 336 - tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | 337 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 338 ib[track->cb_color_size_idx[i]] = tmp; 339 return 0; ··· 371 /* Check depth buffer */ 372 if (G_028800_STENCIL_ENABLE(track->db_depth_control) || 373 G_028800_Z_ENABLE(track->db_depth_control)) { 374 - u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max; 375 if (track->db_bo == NULL) { 376 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 377 return -EINVAL; ··· 419 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 420 } else { 421 size = radeon_bo_size(track->db_bo); 422 - pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; 423 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 424 slice_tile_max *= 64; 425 - height = slice_tile_max / (pitch * 8); 426 if (height > 8192) 427 height = 8192; 428 - switch (G_028010_ARRAY_MODE(track->db_depth_info)) { 429 case V_028010_ARRAY_1D_TILED_THIN1: 430 - pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8); 431 - if (!IS_ALIGNED(pitch, pitch_align)) { 432 - dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", 433 - __func__, __LINE__, pitch); 434 - return -EINVAL; 435 - } 436 /* don't break userspace */ 437 height &= ~0x7; 438 - if (!IS_ALIGNED(height, 8)) { 439 - dev_warn(p->dev, "%s:%d db height (%d) invalid\n", 440 - __func__, __LINE__, height); 441 - return -EINVAL; 442 - } 443 break; 444 case V_028010_ARRAY_2D_TILED_THIN1: 445 - pitch_align = max((u32)track->nbanks, 446 - (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; 447 - if (!IS_ALIGNED(pitch, pitch_align)) { 448 - dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", 449 - __func__, __LINE__, pitch); 450 - return -EINVAL; 451 - } 452 - if (!IS_ALIGNED((height / 8), track->npipes)) { 453 - dev_warn(p->dev, "%s:%d db height (%d) invalid\n", 454 - __func__, __LINE__, height); 455 - return -EINVAL; 456 - } 457 break; 458 default: 459 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, ··· 454 track->db_depth_info); 455 return -EINVAL; 456 } 457 - if (!IS_ALIGNED(track->db_offset, track->group_size)) { 458 - dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); 459 return -EINVAL; 460 } 461 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 462 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 463 tmp = ntiles * bpe * 64 * nviews; 464 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 465 - dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", 466 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 467 radeon_bo_size(track->db_bo)); 468 return -EINVAL; ··· 1025 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1026 track->cb_color_base_last[tmp] = ib[idx]; 1027 track->cb_color_bo[tmp] = reloc->robj; 1028 break; 1029 case DB_DEPTH_BASE: 1030 r = r600_cs_packet_next_reloc(p, &reloc); ··· 1037 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1038 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1039 track->db_bo = reloc->robj; 1040 break; 1041 case DB_HTILE_DATA_BASE: 1042 case SQ_PGM_START_FS: ··· 1159 static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1160 struct radeon_bo *texture, 1161 struct radeon_bo *mipmap, 1162 u32 tiling_flags) 1163 { 1164 struct r600_cs_track *track = p->track; 1165 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; 1166 - u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; 1167 1168 /* on legacy kernel we don't perform advanced check */ 1169 if (p->rdev == NULL) 1170 return 0; 1171 1172 word0 = radeon_get_ib_value(p, idx + 0); 1173 if (tiling_flags & RADEON_TILING_MACRO) ··· 1210 return -EINVAL; 1211 } 1212 1213 - pitch = G_038000_PITCH(word0) + 1; 1214 - switch (G_038000_TILE_MODE(word0)) { 1215 - case V_038000_ARRAY_LINEAR_GENERAL: 1216 - pitch_align = 1; 1217 - /* XXX check height align */ 1218 - break; 1219 - case V_038000_ARRAY_LINEAR_ALIGNED: 1220 - pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; 1221 - if (!IS_ALIGNED(pitch, pitch_align)) { 1222 - dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1223 - __func__, __LINE__, pitch); 1224 - return -EINVAL; 1225 - } 1226 - /* XXX check height align */ 1227 - break; 1228 - case V_038000_ARRAY_1D_TILED_THIN1: 1229 - pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; 1230 - if (!IS_ALIGNED(pitch, pitch_align)) { 1231 - dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1232 - __func__, __LINE__, pitch); 1233 - return -EINVAL; 1234 - } 1235 - /* XXX check height align */ 1236 - break; 1237 - case V_038000_ARRAY_2D_TILED_THIN1: 1238 - pitch_align = max((u32)track->nbanks, 1239 - (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; 1240 - if (!IS_ALIGNED(pitch, pitch_align)) { 1241 - dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1242 - __func__, __LINE__, pitch); 1243 - return -EINVAL; 1244 - } 1245 - /* XXX check height align */ 1246 - break; 1247 - default: 1248 - dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 1249 - G_038000_TILE_MODE(word0), word0); 1250 return -EINVAL; 1251 } 1252 - /* XXX check offset align */ 1253 1254 word0 = radeon_get_ib_value(p, idx + 4); 1255 word1 = radeon_get_ib_value(p, idx + 5); ··· 1476 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1477 mipmap = reloc->robj; 1478 r = r600_check_texture_resource(p, idx+(i*7)+1, 1479 - texture, mipmap, reloc->lobj.tiling_flags); 1480 if (r) 1481 return r; 1482 ib[idx+1+(i*7)+2] += base_offset;
··· 50 u32 nsamples; 51 u32 cb_color_base_last[8]; 52 struct radeon_bo *cb_color_bo[8]; 53 + u64 cb_color_bo_mc[8]; 54 u32 cb_color_bo_offset[8]; 55 struct radeon_bo *cb_color_frag_bo[8]; 56 struct radeon_bo *cb_color_tile_bo[8]; ··· 67 u32 db_depth_size; 68 u32 db_offset; 69 struct radeon_bo *db_bo; 70 + u64 db_bo_mc; 71 }; 72 73 static inline int r600_bpe_from_format(u32 *bpe, u32 format) ··· 140 return 0; 141 } 142 143 + struct array_mode_checker { 144 + int array_mode; 145 + u32 group_size; 146 + u32 nbanks; 147 + u32 npipes; 148 + u32 nsamples; 149 + u32 bpe; 150 + }; 151 + 152 + /* returns alignment in pixels for pitch/height/depth and bytes for base */ 153 + static inline int r600_get_array_mode_alignment(struct array_mode_checker *values, 154 + u32 *pitch_align, 155 + u32 *height_align, 156 + u32 *depth_align, 157 + u64 *base_align) 158 + { 159 + u32 tile_width = 8; 160 + u32 tile_height = 8; 161 + u32 macro_tile_width = values->nbanks; 162 + u32 macro_tile_height = values->npipes; 163 + u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples; 164 + u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; 165 + 166 + switch (values->array_mode) { 167 + case ARRAY_LINEAR_GENERAL: 168 + /* technically tile_width/_height for pitch/height */ 169 + *pitch_align = 1; /* tile_width */ 170 + *height_align = 1; /* tile_height */ 171 + *depth_align = 1; 172 + *base_align = 1; 173 + break; 174 + case ARRAY_LINEAR_ALIGNED: 175 + *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe)); 176 + *height_align = tile_height; 177 + *depth_align = 1; 178 + *base_align = values->group_size; 179 + break; 180 + case ARRAY_1D_TILED_THIN1: 181 + *pitch_align = max((u32)tile_width, 182 + (u32)(values->group_size / 183 + (tile_height * values->bpe * values->nsamples))); 184 + *height_align = tile_height; 185 + *depth_align = 1; 186 + *base_align = values->group_size; 187 + break; 188 + case ARRAY_2D_TILED_THIN1: 189 + *pitch_align = max((u32)macro_tile_width, 190 + (u32)(((values->group_size / tile_height) / 191 + (values->bpe * values->nsamples)) * 192 + values->nbanks)) * tile_width; 193 + *height_align = macro_tile_height * tile_height; 194 + *depth_align = 1; 195 + *base_align = max(macro_tile_bytes, 196 + (*pitch_align) * values->bpe * (*height_align) * values->nsamples); 197 + break; 198 + default: 199 + return -EINVAL; 200 + } 201 + 202 + return 0; 203 + } 204 + 205 static void r600_cs_track_init(struct r600_cs_track *track) 206 { 207 int i; ··· 153 track->cb_color_info[i] = 0; 154 track->cb_color_bo[i] = NULL; 155 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 156 + track->cb_color_bo_mc[i] = 0xFFFFFFFF; 157 } 158 track->cb_target_mask = 0xFFFFFFFF; 159 track->cb_shader_mask = 0xFFFFFFFF; 160 track->db_bo = NULL; 161 + track->db_bo_mc = 0xFFFFFFFF; 162 /* assume the biggest format and that htile is enabled */ 163 track->db_depth_info = 7 | (1 << 25); 164 track->db_depth_view = 0xFFFFC000; ··· 168 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 169 { 170 struct r600_cs_track *track = p->track; 171 + u32 bpe = 0, slice_tile_max, size, tmp; 172 + u32 height, height_align, pitch, pitch_align, depth_align; 173 + u64 base_offset, base_align; 174 + struct array_mode_checker array_check; 175 volatile u32 *ib = p->ib->ptr; 176 unsigned array_mode; 177 ··· 183 i, track->cb_color_info[i]); 184 return -EINVAL; 185 } 186 + /* pitch in pixels */ 187 + pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; 188 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 189 slice_tile_max *= 64; 190 + height = slice_tile_max / pitch; 191 if (height > 8192) 192 height = 8192; 193 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 194 + 195 + base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; 196 + array_check.array_mode = array_mode; 197 + array_check.group_size = track->group_size; 198 + array_check.nbanks = track->nbanks; 199 + array_check.npipes = track->npipes; 200 + array_check.nsamples = track->nsamples; 201 + array_check.bpe = bpe; 202 + if (r600_get_array_mode_alignment(&array_check, 203 + &pitch_align, &height_align, &depth_align, &base_align)) { 204 + dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 205 + G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 206 + track->cb_color_info[i]); 207 + return -EINVAL; 208 + } 209 switch (array_mode) { 210 case V_0280A0_ARRAY_LINEAR_GENERAL: 211 break; 212 case V_0280A0_ARRAY_LINEAR_ALIGNED: 213 break; 214 case V_0280A0_ARRAY_1D_TILED_THIN1: 215 /* avoid breaking userspace */ 216 if (height > 7) 217 height &= ~0x7; 218 break; 219 case V_0280A0_ARRAY_2D_TILED_THIN1: 220 break; 221 default: 222 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, ··· 244 track->cb_color_info[i]); 245 return -EINVAL; 246 } 247 + 248 + if (!IS_ALIGNED(pitch, pitch_align)) { 249 + dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 250 + __func__, __LINE__, pitch); 251 + return -EINVAL; 252 + } 253 + if (!IS_ALIGNED(height, height_align)) { 254 + dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", 255 + __func__, __LINE__, height); 256 + return -EINVAL; 257 + } 258 + if (!IS_ALIGNED(base_offset, base_align)) { 259 + dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 260 + return -EINVAL; 261 + } 262 + 263 /* check offset */ 264 + tmp = height * pitch * bpe; 265 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 266 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 267 /* the initial DDX does bad things with the CB size occasionally */ 268 /* it rounds up height too far for slice tile max but the BO is smaller */ 269 + tmp = (height - 7) * pitch * bpe; 270 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 271 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 272 return -EINVAL; ··· 260 return -EINVAL; 261 } 262 } 263 /* limit max tile */ 264 + tmp = (height * pitch) >> 6; 265 if (tmp < slice_tile_max) 266 slice_tile_max = tmp; 267 + tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 268 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 269 ib[track->cb_color_size_idx[i]] = tmp; 270 return 0; ··· 310 /* Check depth buffer */ 311 if (G_028800_STENCIL_ENABLE(track->db_depth_control) || 312 G_028800_Z_ENABLE(track->db_depth_control)) { 313 + u32 nviews, bpe, ntiles, size, slice_tile_max; 314 + u32 height, height_align, pitch, pitch_align, depth_align; 315 + u64 base_offset, base_align; 316 + struct array_mode_checker array_check; 317 + int array_mode; 318 + 319 if (track->db_bo == NULL) { 320 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 321 return -EINVAL; ··· 353 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 354 } else { 355 size = radeon_bo_size(track->db_bo); 356 + /* pitch in pixels */ 357 + pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; 358 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 359 slice_tile_max *= 64; 360 + height = slice_tile_max / pitch; 361 if (height > 8192) 362 height = 8192; 363 + base_offset = track->db_bo_mc + track->db_offset; 364 + array_mode = G_028010_ARRAY_MODE(track->db_depth_info); 365 + array_check.array_mode = array_mode; 366 + array_check.group_size = track->group_size; 367 + array_check.nbanks = track->nbanks; 368 + array_check.npipes = track->npipes; 369 + array_check.nsamples = track->nsamples; 370 + array_check.bpe = bpe; 371 + if (r600_get_array_mode_alignment(&array_check, 372 + &pitch_align, &height_align, &depth_align, &base_align)) { 373 + dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 374 + G_028010_ARRAY_MODE(track->db_depth_info), 375 + track->db_depth_info); 376 + return -EINVAL; 377 + } 378 + switch (array_mode) { 379 case V_028010_ARRAY_1D_TILED_THIN1: 380 /* don't break userspace */ 381 height &= ~0x7; 382 break; 383 case V_028010_ARRAY_2D_TILED_THIN1: 384 break; 385 default: 386 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, ··· 395 track->db_depth_info); 396 return -EINVAL; 397 } 398 + 399 + if (!IS_ALIGNED(pitch, pitch_align)) { 400 + dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", 401 + __func__, __LINE__, pitch); 402 return -EINVAL; 403 } 404 + if (!IS_ALIGNED(height, height_align)) { 405 + dev_warn(p->dev, "%s:%d db height (%d) invalid\n", 406 + __func__, __LINE__, height); 407 + return -EINVAL; 408 + } 409 + if (!IS_ALIGNED(base_offset, base_align)) { 410 + dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 411 + return -EINVAL; 412 + } 413 + 414 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 415 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 416 tmp = ntiles * bpe * 64 * nviews; 417 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 418 + dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", 419 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 420 radeon_bo_size(track->db_bo)); 421 return -EINVAL; ··· 954 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 955 track->cb_color_base_last[tmp] = ib[idx]; 956 track->cb_color_bo[tmp] = reloc->robj; 957 + track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; 958 break; 959 case DB_DEPTH_BASE: 960 r = r600_cs_packet_next_reloc(p, &reloc); ··· 965 track->db_offset = radeon_get_ib_value(p, idx) << 8; 966 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 967 track->db_bo = reloc->robj; 968 + track->db_bo_mc = reloc->lobj.gpu_offset; 969 break; 970 case DB_HTILE_DATA_BASE: 971 case SQ_PGM_START_FS: ··· 1086 static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1087 struct radeon_bo *texture, 1088 struct radeon_bo *mipmap, 1089 + u64 base_offset, 1090 + u64 mip_offset, 1091 u32 tiling_flags) 1092 { 1093 struct r600_cs_track *track = p->track; 1094 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; 1095 + u32 word0, word1, l0_size, mipmap_size; 1096 + u32 height_align, pitch, pitch_align, depth_align; 1097 + u64 base_align; 1098 + struct array_mode_checker array_check; 1099 1100 /* on legacy kernel we don't perform advanced check */ 1101 if (p->rdev == NULL) 1102 return 0; 1103 + 1104 + /* convert to bytes */ 1105 + base_offset <<= 8; 1106 + mip_offset <<= 8; 1107 1108 word0 = radeon_get_ib_value(p, idx + 0); 1109 if (tiling_flags & RADEON_TILING_MACRO) ··· 1128 return -EINVAL; 1129 } 1130 1131 + /* pitch in texels */ 1132 + pitch = (G_038000_PITCH(word0) + 1) * 8; 1133 + array_check.array_mode = G_038000_TILE_MODE(word0); 1134 + array_check.group_size = track->group_size; 1135 + array_check.nbanks = track->nbanks; 1136 + array_check.npipes = track->npipes; 1137 + array_check.nsamples = 1; 1138 + array_check.bpe = bpe; 1139 + if (r600_get_array_mode_alignment(&array_check, 1140 + &pitch_align, &height_align, &depth_align, &base_align)) { 1141 + dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", 1142 + __func__, __LINE__, G_038000_TILE_MODE(word0)); 1143 return -EINVAL; 1144 } 1145 + 1146 + /* XXX check height as well... */ 1147 + 1148 + if (!IS_ALIGNED(pitch, pitch_align)) { 1149 + dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1150 + __func__, __LINE__, pitch); 1151 + return -EINVAL; 1152 + } 1153 + if (!IS_ALIGNED(base_offset, base_align)) { 1154 + dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", 1155 + __func__, __LINE__, base_offset); 1156 + return -EINVAL; 1157 + } 1158 + if (!IS_ALIGNED(mip_offset, base_align)) { 1159 + dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", 1160 + __func__, __LINE__, mip_offset); 1161 + return -EINVAL; 1162 + } 1163 1164 word0 = radeon_get_ib_value(p, idx + 4); 1165 word1 = radeon_get_ib_value(p, idx + 5); ··· 1402 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1403 mipmap = reloc->robj; 1404 r = r600_check_texture_resource(p, idx+(i*7)+1, 1405 + texture, mipmap, 1406 + base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), 1407 + mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), 1408 + reloc->lobj.tiling_flags); 1409 if (r) 1410 return r; 1411 ib[idx+1+(i*7)+2] += base_offset;
+6
drivers/gpu/drm/radeon/r600d.h
··· 51 #define PTE_READABLE (1 << 5) 52 #define PTE_WRITEABLE (1 << 6) 53 54 /* Registers */ 55 #define ARB_POP 0x2418 56 #define ENABLE_TC128 (1 << 30)
··· 51 #define PTE_READABLE (1 << 5) 52 #define PTE_WRITEABLE (1 << 6) 53 54 + /* tiling bits */ 55 + #define ARRAY_LINEAR_GENERAL 0x00000000 56 + #define ARRAY_LINEAR_ALIGNED 0x00000001 57 + #define ARRAY_1D_TILED_THIN1 0x00000002 58 + #define ARRAY_2D_TILED_THIN1 0x00000004 59 + 60 /* Registers */ 61 #define ARB_POP 0x2418 62 #define ENABLE_TC128 (1 << 30)
+4
drivers/gpu/drm/radeon/radeon.h
··· 1262 (rdev->family == CHIP_RS400) || \ 1263 (rdev->family == CHIP_RS480)) 1264 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1265 #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1266 #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1267 #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
··· 1262 (rdev->family == CHIP_RS400) || \ 1263 (rdev->family == CHIP_RS480)) 1264 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1265 + #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ 1266 + (rdev->family == CHIP_RS690) || \ 1267 + (rdev->family == CHIP_RS740) || \ 1268 + (rdev->family >= CHIP_R600)) 1269 #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1270 #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1271 #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+2 -2
drivers/gpu/drm/radeon/radeon_benchmark.c
··· 41 42 size = bsize; 43 n = 1024; 44 - r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); 45 if (r) { 46 goto out_cleanup; 47 } ··· 53 if (r) { 54 goto out_cleanup; 55 } 56 - r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); 57 if (r) { 58 goto out_cleanup; 59 }
··· 41 42 size = bsize; 43 n = 1024; 44 + r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj); 45 if (r) { 46 goto out_cleanup; 47 } ··· 53 if (r) { 54 goto out_cleanup; 55 } 56 + r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj); 57 if (r) { 58 goto out_cleanup; 59 }
+13
drivers/gpu/drm/radeon/radeon_combios.c
··· 571 } 572 573 if (clk_mask && data_mask) { 574 i2c.mask_clk_mask = clk_mask; 575 i2c.mask_data_mask = data_mask; 576 i2c.a_clk_mask = clk_mask; ··· 580 i2c.en_data_mask = data_mask; 581 i2c.y_clk_mask = clk_mask; 582 i2c.y_data_mask = data_mask; 583 } else { 584 i2c.mask_clk_mask = RADEON_GPIO_EN_1; 585 i2c.mask_data_mask = RADEON_GPIO_EN_0; 586 i2c.a_clk_mask = RADEON_GPIO_A_1;
··· 571 } 572 573 if (clk_mask && data_mask) { 574 + /* system specific masks */ 575 i2c.mask_clk_mask = clk_mask; 576 i2c.mask_data_mask = data_mask; 577 i2c.a_clk_mask = clk_mask; ··· 579 i2c.en_data_mask = data_mask; 580 i2c.y_clk_mask = clk_mask; 581 i2c.y_data_mask = data_mask; 582 + } else if ((ddc_line == RADEON_GPIOPAD_MASK) || 583 + (ddc_line == RADEON_MDGPIO_MASK)) { 584 + /* default gpiopad masks */ 585 + i2c.mask_clk_mask = (0x20 << 8); 586 + i2c.mask_data_mask = 0x80; 587 + i2c.a_clk_mask = (0x20 << 8); 588 + i2c.a_data_mask = 0x80; 589 + i2c.en_clk_mask = (0x20 << 8); 590 + i2c.en_data_mask = 0x80; 591 + i2c.y_clk_mask = (0x20 << 8); 592 + i2c.y_data_mask = 0x80; 593 } else { 594 + /* default masks for ddc pads */ 595 i2c.mask_clk_mask = RADEON_GPIO_EN_1; 596 i2c.mask_data_mask = RADEON_GPIO_EN_0; 597 i2c.a_clk_mask = RADEON_GPIO_A_1;
+18
drivers/gpu/drm/radeon/radeon_connectors.c
··· 1008 static int radeon_dp_get_modes(struct drm_connector *connector) 1009 { 1010 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1011 int ret; 1012 1013 ret = radeon_ddc_get_modes(radeon_connector); 1014 return ret; 1015 } 1016 ··· 1041 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1042 /* eDP is always DP */ 1043 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1044 if (radeon_dp_getdpcd(radeon_connector)) 1045 ret = connector_status_connected; 1046 } else { 1047 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1048 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
··· 1008 static int radeon_dp_get_modes(struct drm_connector *connector) 1009 { 1010 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1011 + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1012 int ret; 1013 1014 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1015 + if (!radeon_dig_connector->edp_on) 1016 + atombios_set_edp_panel_power(connector, 1017 + ATOM_TRANSMITTER_ACTION_POWER_ON); 1018 + } 1019 ret = radeon_ddc_get_modes(radeon_connector); 1020 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1021 + if (!radeon_dig_connector->edp_on) 1022 + atombios_set_edp_panel_power(connector, 1023 + ATOM_TRANSMITTER_ACTION_POWER_OFF); 1024 + } 1025 + 1026 return ret; 1027 } 1028 ··· 1029 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1030 /* eDP is always DP */ 1031 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1032 + if (!radeon_dig_connector->edp_on) 1033 + atombios_set_edp_panel_power(connector, 1034 + ATOM_TRANSMITTER_ACTION_POWER_ON); 1035 if (radeon_dp_getdpcd(radeon_connector)) 1036 ret = connector_status_connected; 1037 + if (!radeon_dig_connector->edp_on) 1038 + atombios_set_edp_panel_power(connector, 1039 + ATOM_TRANSMITTER_ACTION_POWER_OFF); 1040 } else { 1041 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1042 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+1 -1
drivers/gpu/drm/radeon/radeon_device.c
··· 180 int r; 181 182 if (rdev->wb.wb_obj == NULL) { 183 - r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 184 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 185 if (r) { 186 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
··· 180 int r; 181 182 if (rdev->wb.wb_obj == NULL) { 183 + r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 184 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 185 if (r) { 186 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+307 -47
drivers/gpu/drm/radeon/radeon_encoders.c
··· 176 return false; 177 } 178 } 179 void 180 radeon_link_encoder_connector(struct drm_device *dev) 181 { ··· 225 radeon_connector = to_radeon_connector(connector); 226 if (radeon_encoder->active_device & radeon_connector->devices) 227 return connector; 228 } 229 return NULL; 230 } ··· 448 449 } 450 451 void 452 - atombios_external_tmds_setup(struct drm_encoder *encoder, int action) 453 { 454 struct drm_device *dev = encoder->dev; 455 struct radeon_device *rdev = dev->dev_private; 456 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 457 - ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args; 458 - int index = 0; 459 460 memset(&args, 0, sizeof(args)); 461 462 - index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); 463 464 - args.sXTmdsEncoder.ucEnable = action; 465 466 - if (radeon_encoder->pixel_clock > 165000) 467 - args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL; 468 469 - /*if (pScrn->rgbBits == 8)*/ 470 - args.sXTmdsEncoder.ucMisc |= (1 << 1); 471 - 472 - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 473 - 474 - } 475 - 476 - static void 477 - atombios_ddia_setup(struct drm_encoder *encoder, int action) 478 - { 479 - struct drm_device *dev = encoder->dev; 480 - struct radeon_device *rdev = dev->dev_private; 481 - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 482 - DVO_ENCODER_CONTROL_PS_ALLOCATION args; 483 - int index = 0; 484 - 485 - memset(&args, 0, sizeof(args)); 486 - 487 - index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); 488 - 489 - args.sDVOEncoder.ucAction = action; 490 - args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 491 - 492 - if (radeon_encoder->pixel_clock > 165000) 493 - args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; 494 495 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 496 - 497 } 498 499 union lvds_encoder_control { ··· 551 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) 552 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 553 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) 554 - args.v1.ucMisc |= (1 << 1); 555 } else { 556 if (dig->linkb) 557 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; 558 if (radeon_encoder->pixel_clock > 165000) 559 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 560 /*if (pScrn->rgbBits == 8) */ 561 - args.v1.ucMisc |= (1 << 1); 562 } 563 break; 564 case 2: ··· 614 int 615 atombios_get_encoder_mode(struct drm_encoder *encoder) 616 { 617 struct drm_device *dev = encoder->dev; 618 struct radeon_device *rdev = dev->dev_private; 619 struct drm_connector *connector; ··· 622 struct radeon_connector_atom_dig *dig_connector; 623 624 connector = radeon_get_connector_for_encoder(encoder); 625 - if (!connector) 626 - return 0; 627 - 628 radeon_connector = to_radeon_connector(connector); 629 630 switch (connector->connector_type) { ··· 865 memset(&args, 0, sizeof(args)); 866 867 switch (radeon_encoder->encoder_id) { 868 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 869 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 870 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: ··· 1012 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1013 } 1014 1015 static void 1016 atombios_yuv_setup(struct drm_encoder *encoder, bool enable) 1017 { ··· 1154 struct drm_device *dev = encoder->dev; 1155 struct radeon_device *rdev = dev->dev_private; 1156 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1157 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; 1158 int index = 0; 1159 bool is_dig = false; ··· 1177 break; 1178 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1179 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1180 - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1181 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); 1182 break; 1183 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1184 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); ··· 1221 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1222 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1223 1224 dp_link_train(encoder, connector); 1225 if (ASIC_IS_DCE4(rdev)) 1226 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1227 } 1228 break; 1229 case DRM_MODE_DPMS_STANDBY: 1230 case DRM_MODE_DPMS_SUSPEND: 1231 case DRM_MODE_DPMS_OFF: 1232 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); 1233 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1234 if (ASIC_IS_DCE4(rdev)) 1235 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1236 } 1237 break; 1238 } 1239 } else { 1240 switch (mode) { 1241 case DRM_MODE_DPMS_ON: 1242 args.ucAction = ATOM_ENABLE; 1243 break; 1244 case DRM_MODE_DPMS_STANDBY: 1245 case DRM_MODE_DPMS_SUSPEND: 1246 case DRM_MODE_DPMS_OFF: 1247 args.ucAction = ATOM_DISABLE; 1248 break; 1249 } 1250 - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1251 } 1252 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1253 1254 } ··· 1432 break; 1433 default: 1434 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1435 - break; 1436 } 1437 1438 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); ··· 1547 struct drm_device *dev = encoder->dev; 1548 struct radeon_device *rdev = dev->dev_private; 1549 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1550 1551 radeon_encoder->pixel_clock = adjusted_mode->clock; 1552 ··· 1591 } 1592 break; 1593 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1594 - atombios_ddia_setup(encoder, ATOM_ENABLE); 1595 - break; 1596 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1597 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1598 - atombios_external_tmds_setup(encoder, ATOM_ENABLE); 1599 break; 1600 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1601 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: ··· 1608 } 1609 break; 1610 } 1611 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1612 1613 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { ··· 1789 } 1790 break; 1791 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1792 - atombios_ddia_setup(encoder, ATOM_DISABLE); 1793 - break; 1794 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1795 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1796 - atombios_external_tmds_setup(encoder, ATOM_DISABLE); 1797 break; 1798 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1799 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: ··· 1812 } 1813 radeon_encoder->active_device = 0; 1814 } 1815 1816 static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { 1817 .dpms = radeon_atom_encoder_dpms, ··· 1969 radeon_encoder->devices = supported_device; 1970 radeon_encoder->rmx_type = RMX_OFF; 1971 radeon_encoder->underscan_type = UNDERSCAN_OFF; 1972 1973 switch (radeon_encoder->encoder_id) { 1974 case ENCODER_OBJECT_ID_INTERNAL_LVDS: ··· 2011 radeon_encoder->rmx_type = RMX_FULL; 2012 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 2013 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 2014 } else { 2015 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 2016 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); ··· 2021 radeon_encoder->underscan_type = UNDERSCAN_AUTO; 2022 } 2023 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 2024 break; 2025 } 2026 }
··· 176 return false; 177 } 178 } 179 + 180 void 181 radeon_link_encoder_connector(struct drm_device *dev) 182 { ··· 224 radeon_connector = to_radeon_connector(connector); 225 if (radeon_encoder->active_device & radeon_connector->devices) 226 return connector; 227 + } 228 + return NULL; 229 + } 230 + 231 + struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) 232 + { 233 + struct drm_device *dev = encoder->dev; 234 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 235 + struct drm_encoder *other_encoder; 236 + struct radeon_encoder *other_radeon_encoder; 237 + 238 + if (radeon_encoder->is_ext_encoder) 239 + return NULL; 240 + 241 + list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { 242 + if (other_encoder == encoder) 243 + continue; 244 + other_radeon_encoder = to_radeon_encoder(other_encoder); 245 + if (other_radeon_encoder->is_ext_encoder && 246 + (radeon_encoder->devices & other_radeon_encoder->devices)) 247 + return other_encoder; 248 } 249 return NULL; 250 } ··· 426 427 } 428 429 + union dvo_encoder_control { 430 + ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; 431 + DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; 432 + DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; 433 + }; 434 + 435 void 436 + atombios_dvo_setup(struct drm_encoder *encoder, int action) 437 { 438 struct drm_device *dev = encoder->dev; 439 struct radeon_device *rdev = dev->dev_private; 440 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 441 + union dvo_encoder_control args; 442 + int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); 443 444 memset(&args, 0, sizeof(args)); 445 446 + if (ASIC_IS_DCE3(rdev)) { 447 + /* DCE3+ */ 448 + args.dvo_v3.ucAction = action; 449 + args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 450 + args.dvo_v3.ucDVOConfig = 0; /* XXX */ 451 + } else if (ASIC_IS_DCE2(rdev)) { 452 + /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */ 453 + args.dvo.sDVOEncoder.ucAction = action; 454 + args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 455 + /* DFP1, CRT1, TV1 depending on the type of port */ 456 + args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; 457 458 + if (radeon_encoder->pixel_clock > 165000) 459 + args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; 460 + } else { 461 + /* R4xx, R5xx */ 462 + args.ext_tmds.sXTmdsEncoder.ucEnable = action; 463 464 + if (radeon_encoder->pixel_clock > 165000) 465 + args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; 466 467 + /*if (pScrn->rgbBits == 8)*/ 468 + args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; 469 + } 470 471 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 472 } 473 474 union lvds_encoder_control { ··· 532 if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) 533 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 534 if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) 535 + args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; 536 } else { 537 if (dig->linkb) 538 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; 539 if (radeon_encoder->pixel_clock > 165000) 540 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 541 /*if (pScrn->rgbBits == 8) */ 542 + args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; 543 } 544 break; 545 case 2: ··· 595 int 596 atombios_get_encoder_mode(struct drm_encoder *encoder) 597 { 598 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 599 struct drm_device *dev = encoder->dev; 600 struct radeon_device *rdev = dev->dev_private; 601 struct drm_connector *connector; ··· 602 struct radeon_connector_atom_dig *dig_connector; 603 604 connector = radeon_get_connector_for_encoder(encoder); 605 + if (!connector) { 606 + switch (radeon_encoder->encoder_id) { 607 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 608 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 609 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 610 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 611 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 612 + return ATOM_ENCODER_MODE_DVI; 613 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 614 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 615 + default: 616 + return ATOM_ENCODER_MODE_CRT; 617 + } 618 + } 619 radeon_connector = to_radeon_connector(connector); 620 621 switch (connector->connector_type) { ··· 834 memset(&args, 0, sizeof(args)); 835 836 switch (radeon_encoder->encoder_id) { 837 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 838 + index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); 839 + break; 840 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 841 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 842 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: ··· 978 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 979 } 980 981 + void 982 + atombios_set_edp_panel_power(struct drm_connector *connector, int action) 983 + { 984 + struct radeon_connector *radeon_connector = to_radeon_connector(connector); 985 + struct drm_device *dev = radeon_connector->base.dev; 986 + struct radeon_device *rdev = dev->dev_private; 987 + union dig_transmitter_control args; 988 + int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); 989 + uint8_t frev, crev; 990 + 991 + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) 992 + return; 993 + 994 + if (!ASIC_IS_DCE4(rdev)) 995 + return; 996 + 997 + if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || 998 + (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) 999 + return; 1000 + 1001 + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 1002 + return; 1003 + 1004 + memset(&args, 0, sizeof(args)); 1005 + 1006 + args.v1.ucAction = action; 1007 + 1008 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1009 + } 1010 + 1011 + union external_encoder_control { 1012 + EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; 1013 + }; 1014 + 1015 + static void 1016 + atombios_external_encoder_setup(struct drm_encoder *encoder, 1017 + struct drm_encoder *ext_encoder, 1018 + int action) 1019 + { 1020 + struct drm_device *dev = encoder->dev; 1021 + struct radeon_device *rdev = dev->dev_private; 1022 + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1023 + union external_encoder_control args; 1024 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1025 + int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); 1026 + u8 frev, crev; 1027 + int dp_clock = 0; 1028 + int dp_lane_count = 0; 1029 + int connector_object_id = 0; 1030 + 1031 + if (connector) { 1032 + struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1033 + struct radeon_connector_atom_dig *dig_connector = 1034 + radeon_connector->con_priv; 1035 + 1036 + dp_clock = dig_connector->dp_clock; 1037 + dp_lane_count = dig_connector->dp_lane_count; 1038 + connector_object_id = 1039 + (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 1040 + } 1041 + 1042 + memset(&args, 0, sizeof(args)); 1043 + 1044 + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 1045 + return; 1046 + 1047 + switch (frev) { 1048 + case 1: 1049 + /* no params on frev 1 */ 1050 + break; 1051 + case 2: 1052 + switch (crev) { 1053 + case 1: 1054 + case 2: 1055 + args.v1.sDigEncoder.ucAction = action; 1056 + args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 1057 + args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); 1058 + 1059 + if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { 1060 + if (dp_clock == 270000) 1061 + args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; 1062 + args.v1.sDigEncoder.ucLaneNum = dp_lane_count; 1063 + } else if (radeon_encoder->pixel_clock > 165000) 1064 + args.v1.sDigEncoder.ucLaneNum = 8; 1065 + else 1066 + args.v1.sDigEncoder.ucLaneNum = 4; 1067 + break; 1068 + default: 1069 + DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1070 + return; 1071 + } 1072 + break; 1073 + default: 1074 + DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1075 + return; 1076 + } 1077 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1078 + } 1079 + 1080 static void 1081 atombios_yuv_setup(struct drm_encoder *encoder, bool enable) 1082 { ··· 1021 struct drm_device *dev = encoder->dev; 1022 struct radeon_device *rdev = dev->dev_private; 1023 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1024 + struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); 1025 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; 1026 int index = 0; 1027 bool is_dig = false; ··· 1043 break; 1044 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1045 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1046 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); 1047 + break; 1048 + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1049 + if (ASIC_IS_DCE3(rdev)) 1050 + is_dig = true; 1051 + else 1052 + index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); 1053 break; 1054 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1055 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); ··· 1082 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1083 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1084 1085 + if (connector && 1086 + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 1087 + struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1088 + struct radeon_connector_atom_dig *radeon_dig_connector = 1089 + radeon_connector->con_priv; 1090 + atombios_set_edp_panel_power(connector, 1091 + ATOM_TRANSMITTER_ACTION_POWER_ON); 1092 + radeon_dig_connector->edp_on = true; 1093 + } 1094 dp_link_train(encoder, connector); 1095 if (ASIC_IS_DCE4(rdev)) 1096 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); 1097 } 1098 + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1099 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); 1100 break; 1101 case DRM_MODE_DPMS_STANDBY: 1102 case DRM_MODE_DPMS_SUSPEND: 1103 case DRM_MODE_DPMS_OFF: 1104 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); 1105 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1106 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1107 + 1108 if (ASIC_IS_DCE4(rdev)) 1109 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1110 + if (connector && 1111 + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 1112 + struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1113 + struct radeon_connector_atom_dig *radeon_dig_connector = 1114 + radeon_connector->con_priv; 1115 + atombios_set_edp_panel_power(connector, 1116 + ATOM_TRANSMITTER_ACTION_POWER_OFF); 1117 + radeon_dig_connector->edp_on = false; 1118 + } 1119 } 1120 + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1121 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); 1122 break; 1123 } 1124 } else { 1125 switch (mode) { 1126 case DRM_MODE_DPMS_ON: 1127 args.ucAction = ATOM_ENABLE; 1128 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1129 + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1130 + args.ucAction = ATOM_LCD_BLON; 1131 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1132 + } 1133 break; 1134 case DRM_MODE_DPMS_STANDBY: 1135 case DRM_MODE_DPMS_SUSPEND: 1136 case DRM_MODE_DPMS_OFF: 1137 args.ucAction = ATOM_DISABLE; 1138 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1139 + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1140 + args.ucAction = ATOM_LCD_BLOFF; 1141 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1142 + } 1143 break; 1144 } 1145 } 1146 + 1147 + if (ext_encoder) { 1148 + int action; 1149 + 1150 + switch (mode) { 1151 + case DRM_MODE_DPMS_ON: 1152 + default: 1153 + action = ATOM_ENABLE; 1154 + break; 1155 + case DRM_MODE_DPMS_STANDBY: 1156 + case DRM_MODE_DPMS_SUSPEND: 1157 + case DRM_MODE_DPMS_OFF: 1158 + action = ATOM_DISABLE; 1159 + break; 1160 + } 1161 + atombios_external_encoder_setup(encoder, ext_encoder, action); 1162 + } 1163 + 1164 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1165 1166 } ··· 1242 break; 1243 default: 1244 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1245 + return; 1246 } 1247 1248 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); ··· 1357 struct drm_device *dev = encoder->dev; 1358 struct radeon_device *rdev = dev->dev_private; 1359 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1360 + struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); 1361 1362 radeon_encoder->pixel_clock = adjusted_mode->clock; 1363 ··· 1400 } 1401 break; 1402 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1403 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1404 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1405 + atombios_dvo_setup(encoder, ATOM_ENABLE); 1406 break; 1407 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1408 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: ··· 1419 } 1420 break; 1421 } 1422 + 1423 + if (ext_encoder) { 1424 + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1425 + } 1426 + 1427 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1428 1429 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { ··· 1595 } 1596 break; 1597 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1598 case ENCODER_OBJECT_ID_INTERNAL_DVO1: 1599 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1600 + atombios_dvo_setup(encoder, ATOM_DISABLE); 1601 break; 1602 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1603 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: ··· 1620 } 1621 radeon_encoder->active_device = 0; 1622 } 1623 + 1624 + /* these are handled by the primary encoders */ 1625 + static void radeon_atom_ext_prepare(struct drm_encoder *encoder) 1626 + { 1627 + 1628 + } 1629 + 1630 + static void radeon_atom_ext_commit(struct drm_encoder *encoder) 1631 + { 1632 + 1633 + } 1634 + 1635 + static void 1636 + radeon_atom_ext_mode_set(struct drm_encoder *encoder, 1637 + struct drm_display_mode *mode, 1638 + struct drm_display_mode *adjusted_mode) 1639 + { 1640 + 1641 + } 1642 + 1643 + static void radeon_atom_ext_disable(struct drm_encoder *encoder) 1644 + { 1645 + 1646 + } 1647 + 1648 + static void 1649 + radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) 1650 + { 1651 + 1652 + } 1653 + 1654 + static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, 1655 + struct drm_display_mode *mode, 1656 + struct drm_display_mode *adjusted_mode) 1657 + { 1658 + return true; 1659 + } 1660 + 1661 + static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { 1662 + .dpms = radeon_atom_ext_dpms, 1663 + .mode_fixup = radeon_atom_ext_mode_fixup, 1664 + .prepare = radeon_atom_ext_prepare, 1665 + .mode_set = radeon_atom_ext_mode_set, 1666 + .commit = radeon_atom_ext_commit, 1667 + .disable = radeon_atom_ext_disable, 1668 + /* no detect for TMDS/LVDS yet */ 1669 + }; 1670 1671 static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { 1672 .dpms = radeon_atom_encoder_dpms, ··· 1730 radeon_encoder->devices = supported_device; 1731 radeon_encoder->rmx_type = RMX_OFF; 1732 radeon_encoder->underscan_type = UNDERSCAN_OFF; 1733 + radeon_encoder->is_ext_encoder = false; 1734 1735 switch (radeon_encoder->encoder_id) { 1736 case ENCODER_OBJECT_ID_INTERNAL_LVDS: ··· 1771 radeon_encoder->rmx_type = RMX_FULL; 1772 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 1773 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); 1774 + } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 1775 + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 1776 + radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); 1777 } else { 1778 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 1779 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); ··· 1778 radeon_encoder->underscan_type = UNDERSCAN_AUTO; 1779 } 1780 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 1781 + break; 1782 + case ENCODER_OBJECT_ID_SI170B: 1783 + case ENCODER_OBJECT_ID_CH7303: 1784 + case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 1785 + case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 1786 + case ENCODER_OBJECT_ID_TITFP513: 1787 + case ENCODER_OBJECT_ID_VT1623: 1788 + case ENCODER_OBJECT_ID_HDMI_SI1930: 1789 + /* these are handled by the primary encoders */ 1790 + radeon_encoder->is_ext_encoder = true; 1791 + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1792 + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); 1793 + else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 1794 + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 1795 + else 1796 + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); 1797 + drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); 1798 break; 1799 } 1800 }
+2 -2
drivers/gpu/drm/radeon/radeon_gart.c
··· 79 80 if (rdev->gart.table.vram.robj == NULL) { 81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, 82 - true, RADEON_GEM_DOMAIN_VRAM, 83 - &rdev->gart.table.vram.robj); 84 if (r) { 85 return r; 86 }
··· 79 80 if (rdev->gart.table.vram.robj == NULL) { 81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, 82 + PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 83 + &rdev->gart.table.vram.robj); 84 if (r) { 85 return r; 86 }
+1 -1
drivers/gpu/drm/radeon/radeon_gem.c
··· 67 if (alignment < PAGE_SIZE) { 68 alignment = PAGE_SIZE; 69 } 70 - r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); 71 if (r) { 72 if (r != -ERESTARTSYS) 73 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
··· 67 if (alignment < PAGE_SIZE) { 68 alignment = PAGE_SIZE; 69 } 70 + r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); 71 if (r) { 72 if (r != -ERESTARTSYS) 73 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+6 -2
drivers/gpu/drm/radeon/radeon_i2c.c
··· 896 ((rdev->family <= CHIP_RS480) || 897 ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { 898 /* set the radeon hw i2c adapter */ 899 - sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); 900 i2c->adapter.algo = &radeon_i2c_algo; 901 ret = i2c_add_adapter(&i2c->adapter); 902 if (ret) { ··· 906 } 907 } else { 908 /* set the radeon bit adapter */ 909 - sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); 910 i2c->adapter.algo_data = &i2c->algo.bit; 911 i2c->algo.bit.pre_xfer = pre_xfer; 912 i2c->algo.bit.post_xfer = post_xfer; ··· 948 i2c->rec = *rec; 949 i2c->adapter.owner = THIS_MODULE; 950 i2c->dev = dev; 951 i2c_set_adapdata(&i2c->adapter, i2c); 952 i2c->adapter.algo_data = &i2c->algo.dp; 953 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
··· 896 ((rdev->family <= CHIP_RS480) || 897 ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { 898 /* set the radeon hw i2c adapter */ 899 + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 900 + "Radeon i2c hw bus %s", name); 901 i2c->adapter.algo = &radeon_i2c_algo; 902 ret = i2c_add_adapter(&i2c->adapter); 903 if (ret) { ··· 905 } 906 } else { 907 /* set the radeon bit adapter */ 908 + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 909 + "Radeon i2c bit bus %s", name); 910 i2c->adapter.algo_data = &i2c->algo.bit; 911 i2c->algo.bit.pre_xfer = pre_xfer; 912 i2c->algo.bit.post_xfer = post_xfer; ··· 946 i2c->rec = *rec; 947 i2c->adapter.owner = THIS_MODULE; 948 i2c->dev = dev; 949 + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 950 + "Radeon aux bus %s", name); 951 i2c_set_adapdata(&i2c->adapter, i2c); 952 i2c->adapter.algo_data = &i2c->algo.dp; 953 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
+2 -2
drivers/gpu/drm/radeon/radeon_irq.c
··· 76 default: 77 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 78 crtc); 79 - return EINVAL; 80 } 81 } else { 82 switch (crtc) { ··· 89 default: 90 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 91 crtc); 92 - return EINVAL; 93 } 94 } 95
··· 76 default: 77 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 78 crtc); 79 + return -EINVAL; 80 } 81 } else { 82 switch (crtc) { ··· 89 default: 90 DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 91 crtc); 92 + return -EINVAL; 93 } 94 } 95
+1 -1
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
··· 670 671 if (rdev->is_atom_bios) { 672 radeon_encoder->pixel_clock = adjusted_mode->clock; 673 - atombios_external_tmds_setup(encoder, ATOM_ENABLE); 674 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 675 } else { 676 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
··· 670 671 if (rdev->is_atom_bios) { 672 radeon_encoder->pixel_clock = adjusted_mode->clock; 673 + atombios_dvo_setup(encoder, ATOM_ENABLE); 674 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); 675 } else { 676 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+4 -1
drivers/gpu/drm/radeon/radeon_mode.h
··· 375 int hdmi_config_offset; 376 int hdmi_audio_workaround; 377 int hdmi_buffer_status; 378 }; 379 380 struct radeon_connector_atom_dig { ··· 386 u8 dp_sink_type; 387 int dp_clock; 388 int dp_lane_count; 389 }; 390 391 struct radeon_gpio_rec { ··· 525 struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); 526 struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); 527 struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); 528 - extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); 529 extern void atombios_digital_setup(struct drm_encoder *encoder, int action); 530 extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 531 extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 532 533 extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
··· 375 int hdmi_config_offset; 376 int hdmi_audio_workaround; 377 int hdmi_buffer_status; 378 + bool is_ext_encoder; 379 }; 380 381 struct radeon_connector_atom_dig { ··· 385 u8 dp_sink_type; 386 int dp_clock; 387 int dp_lane_count; 388 + bool edp_on; 389 }; 390 391 struct radeon_gpio_rec { ··· 523 struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); 524 struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); 525 struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); 526 + extern void atombios_dvo_setup(struct drm_encoder *encoder, int action); 527 extern void atombios_digital_setup(struct drm_encoder *encoder, int action); 528 extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 529 + extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action); 530 extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 531 532 extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
+4 -3
drivers/gpu/drm/radeon/radeon_object.c
··· 86 } 87 88 int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, 89 - unsigned long size, bool kernel, u32 domain, 90 - struct radeon_bo **bo_ptr) 91 { 92 struct radeon_bo *bo; 93 enum ttm_bo_type type; 94 int r; 95 96 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { ··· 116 /* Kernel allocation are uninterruptible */ 117 mutex_lock(&rdev->vram_mutex); 118 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 119 - &bo->placement, 0, 0, !kernel, NULL, size, 120 &radeon_ttm_bo_destroy); 121 mutex_unlock(&rdev->vram_mutex); 122 if (unlikely(r != 0)) {
··· 86 } 87 88 int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, 89 + unsigned long size, int byte_align, bool kernel, u32 domain, 90 + struct radeon_bo **bo_ptr) 91 { 92 struct radeon_bo *bo; 93 enum ttm_bo_type type; 94 + int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 95 int r; 96 97 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { ··· 115 /* Kernel allocation are uninterruptible */ 116 mutex_lock(&rdev->vram_mutex); 117 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 118 + &bo->placement, page_align, 0, !kernel, NULL, size, 119 &radeon_ttm_bo_destroy); 120 mutex_unlock(&rdev->vram_mutex); 121 if (unlikely(r != 0)) {
+4 -3
drivers/gpu/drm/radeon/radeon_object.h
··· 137 } 138 139 extern int radeon_bo_create(struct radeon_device *rdev, 140 - struct drm_gem_object *gobj, unsigned long size, 141 - bool kernel, u32 domain, 142 - struct radeon_bo **bo_ptr); 143 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 144 extern void radeon_bo_kunmap(struct radeon_bo *bo); 145 extern void radeon_bo_unref(struct radeon_bo **bo);
··· 137 } 138 139 extern int radeon_bo_create(struct radeon_device *rdev, 140 + struct drm_gem_object *gobj, unsigned long size, 141 + int byte_align, 142 + bool kernel, u32 domain, 143 + struct radeon_bo **bo_ptr); 144 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 145 extern void radeon_bo_kunmap(struct radeon_bo *bo); 146 extern void radeon_bo_unref(struct radeon_bo **bo);
+3 -3
drivers/gpu/drm/radeon/radeon_ring.c
··· 176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); 177 /* Allocate 1M object buffer */ 178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 179 - true, RADEON_GEM_DOMAIN_GTT, 180 - &rdev->ib_pool.robj); 181 if (r) { 182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 183 return r; ··· 332 rdev->cp.ring_size = ring_size; 333 /* Allocate ring buffer */ 334 if (rdev->cp.ring_obj == NULL) { 335 - r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, 336 RADEON_GEM_DOMAIN_GTT, 337 &rdev->cp.ring_obj); 338 if (r) {
··· 176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); 177 /* Allocate 1M object buffer */ 178 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 179 + PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, 180 + &rdev->ib_pool.robj); 181 if (r) { 182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 183 return r; ··· 332 rdev->cp.ring_size = ring_size; 333 /* Allocate ring buffer */ 334 if (rdev->cp.ring_obj == NULL) { 335 + r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true, 336 RADEON_GEM_DOMAIN_GTT, 337 &rdev->cp.ring_obj); 338 if (r) {
+2 -2
drivers/gpu/drm/radeon/radeon_test.c
··· 52 goto out_cleanup; 53 } 54 55 - r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, 56 &vram_obj); 57 if (r) { 58 DRM_ERROR("Failed to create VRAM object\n"); ··· 71 void **gtt_start, **gtt_end; 72 void **vram_start, **vram_end; 73 74 - r = radeon_bo_create(rdev, NULL, size, true, 75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i); 76 if (r) { 77 DRM_ERROR("Failed to create GTT object %d\n", i);
··· 52 goto out_cleanup; 53 } 54 55 + r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 56 &vram_obj); 57 if (r) { 58 DRM_ERROR("Failed to create VRAM object\n"); ··· 71 void **gtt_start, **gtt_end; 72 void **vram_start, **vram_end; 73 74 + r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, 75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i); 76 if (r) { 77 DRM_ERROR("Failed to create GTT object %d\n", i);
+1 -1
drivers/gpu/drm/radeon/radeon_ttm.c
··· 529 DRM_ERROR("Failed initializing VRAM heap.\n"); 530 return r; 531 } 532 - r = radeon_bo_create(rdev, NULL, 256 * 1024, true, 533 RADEON_GEM_DOMAIN_VRAM, 534 &rdev->stollen_vga_memory); 535 if (r) {
··· 529 DRM_ERROR("Failed initializing VRAM heap.\n"); 530 return r; 531 } 532 + r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true, 533 RADEON_GEM_DOMAIN_VRAM, 534 &rdev->stollen_vga_memory); 535 if (r) {
+2 -2
drivers/gpu/drm/radeon/rv770.c
··· 915 916 if (rdev->vram_scratch.robj == NULL) { 917 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, 918 - true, RADEON_GEM_DOMAIN_VRAM, 919 - &rdev->vram_scratch.robj); 920 if (r) { 921 return r; 922 }
··· 915 916 if (rdev->vram_scratch.robj == NULL) { 917 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, 918 + PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 919 + &rdev->vram_scratch.robj); 920 if (r) { 921 return r; 922 }
+11
drivers/gpu/drm/ttm/ttm_bo.c
··· 224 int ret; 225 226 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 227 if (use_sequence && bo->seq_valid && 228 (sequence - bo->val_seq < (1 << 31))) { 229 return -EAGAIN; ··· 244 } 245 246 if (use_sequence) { 247 bo->val_seq = sequence; 248 bo->seq_valid = true; 249 } else {
··· 224 int ret; 225 226 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 227 + /** 228 + * Deadlock avoidance for multi-bo reserving. 229 + */ 230 if (use_sequence && bo->seq_valid && 231 (sequence - bo->val_seq < (1 << 31))) { 232 return -EAGAIN; ··· 241 } 242 243 if (use_sequence) { 244 + /** 245 + * Wake up waiters that may need to recheck for deadlock, 246 + * if we decreased the sequence number. 247 + */ 248 + if (unlikely((bo->val_seq - sequence < (1 << 31)) 249 + || !bo->seq_valid)) 250 + wake_up_all(&bo->event_queue); 251 + 252 bo->val_seq = sequence; 253 bo->seq_valid = true; 254 } else {
+8 -6
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 862 &vmw_vram_sys_placement, true, 863 &vmw_user_dmabuf_destroy); 864 if (unlikely(ret != 0)) 865 - return ret; 866 867 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 868 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, ··· 870 false, 871 ttm_buffer_type, 872 &vmw_user_dmabuf_release, NULL); 873 - if (unlikely(ret != 0)) { 874 - ttm_bo_unref(&tmp); 875 - } else { 876 rep->handle = vmw_user_bo->base.hash.key; 877 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; 878 rep->cur_gmr_id = vmw_user_bo->base.hash.key; 879 rep->cur_gmr_offset = 0; 880 } 881 - ttm_bo_unref(&tmp); 882 883 ttm_read_unlock(&vmaster->lock); 884 885 - return 0; 886 } 887 888 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
··· 862 &vmw_vram_sys_placement, true, 863 &vmw_user_dmabuf_destroy); 864 if (unlikely(ret != 0)) 865 + goto out_no_dmabuf; 866 867 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 868 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, ··· 870 false, 871 ttm_buffer_type, 872 &vmw_user_dmabuf_release, NULL); 873 + if (unlikely(ret != 0)) 874 + goto out_no_base_object; 875 + else { 876 rep->handle = vmw_user_bo->base.hash.key; 877 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; 878 rep->cur_gmr_id = vmw_user_bo->base.hash.key; 879 rep->cur_gmr_offset = 0; 880 } 881 882 + out_no_base_object: 883 + ttm_bo_unref(&tmp); 884 + out_no_dmabuf: 885 ttm_read_unlock(&vmaster->lock); 886 887 + return ret; 888 } 889 890 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+7
include/drm/nouveau_drm.h
··· 80 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 81 #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 82 #define NOUVEAU_GETPARAM_PTIMER_TIME 14 83 struct drm_nouveau_getparam { 84 uint64_t param; 85 uint64_t value; ··· 95 #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 96 #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 97 #define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) 98 99 struct drm_nouveau_gem_info { 100 uint32_t handle;
··· 80 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 81 #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 82 #define NOUVEAU_GETPARAM_PTIMER_TIME 14 83 + #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 84 struct drm_nouveau_getparam { 85 uint64_t param; 86 uint64_t value; ··· 94 #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 95 #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 96 #define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) 97 + 98 + #define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00 99 + #define NOUVEAU_GEM_TILE_16BPP 0x00000001 100 + #define NOUVEAU_GEM_TILE_32BPP 0x00000002 101 + #define NOUVEAU_GEM_TILE_ZETA 0x00000004 102 + #define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 103 104 struct drm_nouveau_gem_info { 105 uint32_t handle;