Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/radeon/kms: pll tweaks for r7xx
drm/nouveau: fix allocation of notifier object
drm/nouveau: fix notifier memory corruption bug
drm/nouveau: fix pinning of notifier block
drm/nouveau: populate ttm_alloced with false, when it's not
drm/nouveau: fix nv30 pcie boards
drm/nouveau: split ramin_lock into two locks, one hardirq safe
drm/radeon/kms: adjust evergreen display watermark setup
drm/radeon/kms: add connectors even if i2c fails
drm/radeon/kms: fix bad shift in atom iio table parser

+102 -92
+1 -1
drivers/gpu/drm/nouveau/nouveau_dma.c
··· 83 return ret; 84 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 86 - ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, 87 &chan->m2mf_ntfy); 88 if (ret) 89 return ret;
··· 83 return ret; 84 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 86 + ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 87 &chan->m2mf_ntfy); 88 if (ret) 89 return ret;
+3
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 682 /* For PFIFO and PGRAPH. */ 683 spinlock_t context_switch_lock; 684 685 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 686 struct nouveau_ramht *ramht; 687 struct nouveau_gpuobj *ramfc;
··· 682 /* For PFIFO and PGRAPH. */ 683 spinlock_t context_switch_lock; 684 685 + /* VM/PRAMIN flush, legacy PRAMIN aperture */ 686 + spinlock_t vm_lock; 687 + 688 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 689 struct nouveau_ramht *ramht; 690 struct nouveau_gpuobj *ramfc;
+2 -2
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 181 OUT_RING (chan, 0); 182 } 183 184 - nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); 185 FIRE_RING(chan); 186 mutex_unlock(&chan->mutex); 187 188 ret = -EBUSY; 189 for (i = 0; i < 100000; i++) { 190 - if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { 191 ret = 0; 192 break; 193 }
··· 181 OUT_RING (chan, 0); 182 } 183 184 + nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff); 185 FIRE_RING(chan); 186 mutex_unlock(&chan->mutex); 187 188 ret = -EBUSY; 189 for (i = 0; i < 100000; i++) { 190 + if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) { 191 ret = 0; 192 break; 193 }
+1 -1
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 398 dma_bits = 40; 399 } else 400 if (drm_pci_device_is_pcie(dev) && 401 - dev_priv->chipset != 0x40 && 402 dev_priv->chipset != 0x45) { 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 404 dma_bits = 39;
··· 398 dma_bits = 40; 399 } else 400 if (drm_pci_device_is_pcie(dev) && 401 + dev_priv->chipset > 0x40 && 402 dev_priv->chipset != 0x45) { 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 404 dma_bits = 39;
+7 -4
drivers/gpu/drm/nouveau/nouveau_notifier.c
··· 35 { 36 struct drm_device *dev = chan->dev; 37 struct nouveau_bo *ntfy = NULL; 38 - uint32_t flags; 39 int ret; 40 41 - if (nouveau_vram_notify) 42 flags = NOUVEAU_GEM_DOMAIN_VRAM; 43 - else 44 flags = NOUVEAU_GEM_DOMAIN_GART; 45 46 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); 47 if (ret) 48 return ret; 49 50 - ret = nouveau_bo_pin(ntfy, flags); 51 if (ret) 52 goto out_err; 53
··· 35 { 36 struct drm_device *dev = chan->dev; 37 struct nouveau_bo *ntfy = NULL; 38 + uint32_t flags, ttmpl; 39 int ret; 40 41 + if (nouveau_vram_notify) { 42 flags = NOUVEAU_GEM_DOMAIN_VRAM; 43 + ttmpl = TTM_PL_FLAG_VRAM; 44 + } else { 45 flags = NOUVEAU_GEM_DOMAIN_GART; 46 + ttmpl = TTM_PL_FLAG_TT; 47 + } 48 49 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); 50 if (ret) 51 return ret; 52 53 + ret = nouveau_bo_pin(ntfy, ttmpl); 54 if (ret) 55 goto out_err; 56
+6 -4
drivers/gpu/drm/nouveau/nouveau_object.c
··· 1039 { 1040 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1041 struct drm_device *dev = gpuobj->dev; 1042 1043 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1044 u64 ptr = gpuobj->vinst + offset; 1045 u32 base = ptr >> 16; 1046 u32 val; 1047 1048 - spin_lock(&dev_priv->ramin_lock); 1049 if (dev_priv->ramin_base != base) { 1050 dev_priv->ramin_base = base; 1051 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1052 } 1053 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); 1054 - spin_unlock(&dev_priv->ramin_lock); 1055 return val; 1056 } 1057 ··· 1064 { 1065 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1066 struct drm_device *dev = gpuobj->dev; 1067 1068 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1069 u64 ptr = gpuobj->vinst + offset; 1070 u32 base = ptr >> 16; 1071 1072 - spin_lock(&dev_priv->ramin_lock); 1073 if (dev_priv->ramin_base != base) { 1074 dev_priv->ramin_base = base; 1075 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1076 } 1077 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); 1078 - spin_unlock(&dev_priv->ramin_lock); 1079 return; 1080 } 1081
··· 1039 { 1040 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1041 struct drm_device *dev = gpuobj->dev; 1042 + unsigned long flags; 1043 1044 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1045 u64 ptr = gpuobj->vinst + offset; 1046 u32 base = ptr >> 16; 1047 u32 val; 1048 1049 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 1050 if (dev_priv->ramin_base != base) { 1051 dev_priv->ramin_base = base; 1052 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1053 } 1054 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); 1055 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 1056 return val; 1057 } 1058 ··· 1063 { 1064 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1065 struct drm_device *dev = gpuobj->dev; 1066 + unsigned long flags; 1067 1068 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1069 u64 ptr = gpuobj->vinst + offset; 1070 u32 base = ptr >> 16; 1071 1072 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 1073 if (dev_priv->ramin_base != base) { 1074 dev_priv->ramin_base = base; 1075 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1076 } 1077 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); 1078 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 1079 return; 1080 } 1081
+3 -2
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 55 be->func->clear(be); 56 return -EFAULT; 57 } 58 } 59 60 nvbe->nr_pages++; ··· 428 u32 aper_size, align; 429 int ret; 430 431 - if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev)) 432 aper_size = 512 * 1024 * 1024; 433 else 434 aper_size = 64 * 1024 * 1024; ··· 458 dev_priv->gart_info.func = &nv50_sgdma_backend; 459 } else 460 if (drm_pci_device_is_pcie(dev) && 461 - dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { 462 if (nv44_graph_class(dev)) { 463 dev_priv->gart_info.func = &nv44_sgdma_backend; 464 align = 512 * 1024;
··· 55 be->func->clear(be); 56 return -EFAULT; 57 } 58 + nvbe->ttm_alloced[nvbe->nr_pages] = false; 59 } 60 61 nvbe->nr_pages++; ··· 427 u32 aper_size, align; 428 int ret; 429 430 + if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev)) 431 aper_size = 512 * 1024 * 1024; 432 else 433 aper_size = 64 * 1024 * 1024; ··· 457 dev_priv->gart_info.func = &nv50_sgdma_backend; 458 } else 459 if (drm_pci_device_is_pcie(dev) && 460 + dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { 461 if (nv44_graph_class(dev)) { 462 dev_priv->gart_info.func = &nv44_sgdma_backend; 463 align = 512 * 1024;
+1
drivers/gpu/drm/nouveau/nouveau_state.c
··· 608 spin_lock_init(&dev_priv->channels.lock); 609 spin_lock_init(&dev_priv->tile.lock); 610 spin_lock_init(&dev_priv->context_switch_lock); 611 612 /* Make the CRTCs and I2C buses accessible */ 613 ret = engine->display.early_init(dev);
··· 608 spin_lock_init(&dev_priv->channels.lock); 609 spin_lock_init(&dev_priv->tile.lock); 610 spin_lock_init(&dev_priv->context_switch_lock); 611 + spin_lock_init(&dev_priv->vm_lock); 612 613 /* Make the CRTCs and I2C buses accessible */ 614 ret = engine->display.early_init(dev);
+6 -4
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 404 nv50_instmem_flush(struct drm_device *dev) 405 { 406 struct drm_nouveau_private *dev_priv = dev->dev_private; 407 408 - spin_lock(&dev_priv->ramin_lock); 409 nv_wr32(dev, 0x00330c, 0x00000001); 410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 411 NV_ERROR(dev, "PRAMIN flush timeout\n"); 412 - spin_unlock(&dev_priv->ramin_lock); 413 } 414 415 void 416 nv84_instmem_flush(struct drm_device *dev) 417 { 418 struct drm_nouveau_private *dev_priv = dev->dev_private; 419 420 - spin_lock(&dev_priv->ramin_lock); 421 nv_wr32(dev, 0x070000, 0x00000001); 422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 423 NV_ERROR(dev, "PRAMIN flush timeout\n"); 424 - spin_unlock(&dev_priv->ramin_lock); 425 } 426
··· 404 nv50_instmem_flush(struct drm_device *dev) 405 { 406 struct drm_nouveau_private *dev_priv = dev->dev_private; 407 + unsigned long flags; 408 409 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 410 nv_wr32(dev, 0x00330c, 0x00000001); 411 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 412 NV_ERROR(dev, "PRAMIN flush timeout\n"); 413 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 414 } 415 416 void 417 nv84_instmem_flush(struct drm_device *dev) 418 { 419 struct drm_nouveau_private *dev_priv = dev->dev_private; 420 + unsigned long flags; 421 422 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 423 nv_wr32(dev, 0x070000, 0x00000001); 424 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 425 NV_ERROR(dev, "PRAMIN flush timeout\n"); 426 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 427 } 428
+3 -2
drivers/gpu/drm/nouveau/nv50_vm.c
··· 174 nv50_vm_flush_engine(struct drm_device *dev, int engine) 175 { 176 struct drm_nouveau_private *dev_priv = dev->dev_private; 177 178 - spin_lock(&dev_priv->ramin_lock); 179 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 180 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 181 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 182 - spin_unlock(&dev_priv->ramin_lock); 183 }
··· 174 nv50_vm_flush_engine(struct drm_device *dev, int engine) 175 { 176 struct drm_nouveau_private *dev_priv = dev->dev_private; 177 + unsigned long flags; 178 179 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 180 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 181 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 182 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 183 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 184 }
+3 -2
drivers/gpu/drm/nouveau/nvc0_vm.c
··· 104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 105 struct drm_device *dev = vm->dev; 106 struct nouveau_vm_pgd *vpgd; 107 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; 108 109 pinstmem->flush(vm->dev); 110 111 - spin_lock(&dev_priv->ramin_lock); 112 list_for_each_entry(vpgd, &vm->pgd_list, head) { 113 /* looks like maybe a "free flush slots" counter, the 114 * faster you write to 0x100cbc to more it decreases ··· 126 nv_rd32(dev, 0x100c80), engine); 127 } 128 } 129 - spin_unlock(&dev_priv->ramin_lock); 130 }
··· 104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 105 struct drm_device *dev = vm->dev; 106 struct nouveau_vm_pgd *vpgd; 107 + unsigned long flags; 108 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; 109 110 pinstmem->flush(vm->dev); 111 112 + spin_lock_irqsave(&dev_priv->vm_lock, flags); 113 list_for_each_entry(vpgd, &vm->pgd_list, head) { 114 /* looks like maybe a "free flush slots" counter, the 115 * faster you write to 0x100cbc to more it decreases ··· 125 nv_rd32(dev, 0x100c80), engine); 126 } 127 } 128 + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 129 }
+3 -3
drivers/gpu/drm/radeon/atom.c
··· 135 case ATOM_IIO_MOVE_INDEX: 136 temp &= 137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 138 - CU8(base + 2)); 139 temp |= 140 ((index >> CU8(base + 2)) & 141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + ··· 145 case ATOM_IIO_MOVE_DATA: 146 temp &= 147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 148 - CU8(base + 2)); 149 temp |= 150 ((data >> CU8(base + 2)) & 151 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + ··· 155 case ATOM_IIO_MOVE_ATTR: 156 temp &= 157 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 158 - CU8(base + 2)); 159 temp |= 160 ((ctx-> 161 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
··· 135 case ATOM_IIO_MOVE_INDEX: 136 temp &= 137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 138 + CU8(base + 3)); 139 temp |= 140 ((index >> CU8(base + 2)) & 141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + ··· 145 case ATOM_IIO_MOVE_DATA: 146 temp &= 147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 148 + CU8(base + 3)); 149 temp |= 150 ((data >> CU8(base + 2)) & 151 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + ··· 155 case ATOM_IIO_MOVE_ATTR: 156 temp &= 157 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 158 + CU8(base + 3)); 159 temp |= 160 ((ctx-> 161 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+1 -5
drivers/gpu/drm/radeon/atombios_crtc.c
··· 532 else 533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 534 535 - if ((rdev->family == CHIP_R600) || 536 - (rdev->family == CHIP_RV610) || 537 - (rdev->family == CHIP_RV630) || 538 - (rdev->family == CHIP_RV670)) 539 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 540 } else { 541 pll->flags |= RADEON_PLL_LEGACY; ··· 562 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 563 if (ss_enabled) { 564 if (ss->refdiv) { 565 - pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 566 pll->flags |= RADEON_PLL_USE_REF_DIV; 567 pll->reference_div = ss->refdiv; 568 if (ASIC_IS_AVIVO(rdev))
··· 532 else 533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 534 535 + if (rdev->family < CHIP_RV770) 536 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 537 } else { 538 pll->flags |= RADEON_PLL_LEGACY; ··· 565 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 566 if (ss_enabled) { 567 if (ss->refdiv) { 568 pll->flags |= RADEON_PLL_USE_REF_DIV; 569 pll->reference_div = ss->refdiv; 570 if (ASIC_IS_AVIVO(rdev))
+44 -45
drivers/gpu/drm/radeon/evergreen.c
··· 353 struct drm_display_mode *mode, 354 struct drm_display_mode *other_mode) 355 { 356 - u32 tmp = 0; 357 /* 358 * Line Buffer Setup 359 * There are 3 line buffers, each one shared by 2 display controllers. ··· 363 * first display controller 364 * 0 - first half of lb (3840 * 2) 365 * 1 - first 3/4 of lb (5760 * 2) 366 - * 2 - whole lb (7680 * 2) 367 * 3 - first 1/4 of lb (1920 * 2) 368 * second display controller 369 * 4 - second half of lb (3840 * 2) 370 * 5 - second 3/4 of lb (5760 * 2) 371 - * 6 - whole lb (7680 * 2) 372 * 7 - last 1/4 of lb (1920 * 2) 373 */ 374 - if (mode && other_mode) { 375 - if (mode->hdisplay > other_mode->hdisplay) { 376 - if (mode->hdisplay > 2560) 377 - tmp = 1; /* 3/4 */ 378 - else 379 - tmp = 0; /* 1/2 */ 380 - } else if (other_mode->hdisplay > mode->hdisplay) { 381 - if (other_mode->hdisplay > 2560) 382 - tmp = 3; /* 1/4 */ 383 - else 384 - tmp = 0; /* 1/2 */ 385 - } else 386 tmp = 0; /* 1/2 */ 387 - } else if (mode) 388 - tmp = 2; /* whole */ 389 - else if (other_mode) 390 - tmp = 3; /* 1/4 */ 391 392 /* second controller of the pair uses second half of the lb */ 393 if (radeon_crtc->crtc_id % 2) 394 tmp += 4; 395 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 396 397 - switch (tmp) { 398 - case 0: 399 - case 4: 400 - default: 401 - if (ASIC_IS_DCE5(rdev)) 402 - return 4096 * 2; 403 - else 404 - return 3840 * 2; 405 - case 1: 406 - case 5: 407 - if (ASIC_IS_DCE5(rdev)) 408 - return 6144 * 2; 409 - else 410 - return 5760 * 2; 411 - case 2: 412 - case 6: 413 - if (ASIC_IS_DCE5(rdev)) 414 - return 8192 * 2; 415 - else 416 - return 7680 * 2; 417 - case 3: 418 - case 7: 419 - if (ASIC_IS_DCE5(rdev)) 420 - return 2048 * 2; 421 - else 422 - return 1920 * 2; 423 } 424 } 425 426 static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
··· 353 struct drm_display_mode *mode, 354 struct drm_display_mode *other_mode) 355 { 356 + u32 tmp; 357 /* 358 * Line Buffer Setup 359 * There are 3 line buffers, each one shared by 2 display controllers. ··· 363 * first display controller 364 * 0 - first half of lb (3840 * 2) 365 * 1 - first 3/4 of lb (5760 * 2) 366 + * 2 - whole lb (7680 * 2), other crtc must be disabled 367 * 3 - first 1/4 of lb (1920 * 2) 368 * second display controller 369 * 4 - second half of lb (3840 * 2) 370 * 5 - second 3/4 of lb (5760 * 2) 371 + * 6 - whole lb (7680 * 2), other crtc must be disabled 372 * 7 - last 1/4 of lb (1920 * 2) 373 */ 374 + /* this can get tricky if we have two large displays on a paired group 375 + * of crtcs. Ideally for multiple large displays we'd assign them to 376 + * non-linked crtcs for maximum line buffer allocation. 377 + */ 378 + if (radeon_crtc->base.enabled && mode) { 379 + if (other_mode) 380 tmp = 0; /* 1/2 */ 381 + else 382 + tmp = 2; /* whole */ 383 + } else 384 + tmp = 0; 385 386 /* second controller of the pair uses second half of the lb */ 387 if (radeon_crtc->crtc_id % 2) 388 tmp += 4; 389 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 390 391 + if (radeon_crtc->base.enabled && mode) { 392 + switch (tmp) { 393 + case 0: 394 + case 4: 395 + default: 396 + if (ASIC_IS_DCE5(rdev)) 397 + return 4096 * 2; 398 + else 399 + return 3840 * 2; 400 + case 1: 401 + case 5: 402 + if (ASIC_IS_DCE5(rdev)) 403 + return 6144 * 2; 404 + else 405 + return 5760 * 2; 406 + case 2: 407 + case 6: 408 + if (ASIC_IS_DCE5(rdev)) 409 + return 8192 * 2; 410 + else 411 + return 7680 * 2; 412 + case 3: 413 + case 7: 414 + if (ASIC_IS_DCE5(rdev)) 415 + return 2048 * 2; 416 + else 417 + return 1920 * 2; 418 + } 419 } 420 + 421 + /* controller not enabled, so no lb used */ 422 + return 0; 423 } 424 425 static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+12 -17
drivers/gpu/drm/radeon/radeon_connectors.c
··· 1199 if (router->ddc_valid || router->cd_valid) { 1200 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1201 if (!radeon_connector->router_bus) 1202 - goto failed; 1203 } 1204 switch (connector_type) { 1205 case DRM_MODE_CONNECTOR_VGA: ··· 1208 if (i2c_bus->valid) { 1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1210 if (!radeon_connector->ddc_bus) 1211 - goto failed; 1212 } 1213 radeon_connector->dac_load_detect = true; 1214 drm_connector_attach_property(&radeon_connector->base, ··· 1226 if (i2c_bus->valid) { 1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1228 if (!radeon_connector->ddc_bus) 1229 - goto failed; 1230 } 1231 radeon_connector->dac_load_detect = true; 1232 drm_connector_attach_property(&radeon_connector->base, ··· 1249 if (i2c_bus->valid) { 1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1251 if (!radeon_connector->ddc_bus) 1252 - goto failed; 1253 } 1254 subpixel_order = SubPixelHorizontalRGB; 1255 drm_connector_attach_property(&radeon_connector->base, ··· 1290 if (i2c_bus->valid) { 1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1292 if (!radeon_connector->ddc_bus) 1293 - goto failed; 1294 } 1295 drm_connector_attach_property(&radeon_connector->base, 1296 rdev->mode_info.coherent_mode_property, ··· 1329 else 1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1331 if (!radeon_dig_connector->dp_i2c_bus) 1332 - goto failed; 1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1334 if (!radeon_connector->ddc_bus) 1335 - goto failed; 1336 } 1337 subpixel_order = SubPixelHorizontalRGB; 1338 drm_connector_attach_property(&radeon_connector->base, ··· 1381 if (i2c_bus->valid) { 1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1383 if (!radeon_connector->ddc_bus) 1384 - goto failed; 1385 } 1386 drm_connector_attach_property(&radeon_connector->base, 1387 dev->mode_config.scaling_mode_property, ··· 1457 if (i2c_bus->valid) { 1458 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1459 if (!radeon_connector->ddc_bus) 1460 - goto failed; 1461 } 1462 radeon_connector->dac_load_detect = true; 1463 drm_connector_attach_property(&radeon_connector->base, ··· 1475 if (i2c_bus->valid) { 1476 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1477 if (!radeon_connector->ddc_bus) 1478 - goto failed; 1479 } 1480 radeon_connector->dac_load_detect = true; 1481 drm_connector_attach_property(&radeon_connector->base, ··· 1493 if (i2c_bus->valid) { 1494 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1495 if (!radeon_connector->ddc_bus) 1496 - goto failed; 1497 } 1498 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1499 radeon_connector->dac_load_detect = true; ··· 1538 if (i2c_bus->valid) { 1539 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1540 if (!radeon_connector->ddc_bus) 1541 - goto failed; 1542 } 1543 drm_connector_attach_property(&radeon_connector->base, 1544 dev->mode_config.scaling_mode_property, ··· 1567 radeon_legacy_backlight_init(radeon_encoder, connector); 1568 } 1569 } 1570 - return; 1571 - 1572 - failed: 1573 - drm_connector_cleanup(connector); 1574 - kfree(connector); 1575 }
··· 1199 if (router->ddc_valid || router->cd_valid) { 1200 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1201 if (!radeon_connector->router_bus) 1202 + DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); 1203 } 1204 switch (connector_type) { 1205 case DRM_MODE_CONNECTOR_VGA: ··· 1208 if (i2c_bus->valid) { 1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1210 if (!radeon_connector->ddc_bus) 1211 + DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1212 } 1213 radeon_connector->dac_load_detect = true; 1214 drm_connector_attach_property(&radeon_connector->base, ··· 1226 if (i2c_bus->valid) { 1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1228 if (!radeon_connector->ddc_bus) 1229 + DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1230 } 1231 radeon_connector->dac_load_detect = true; 1232 drm_connector_attach_property(&radeon_connector->base, ··· 1249 if (i2c_bus->valid) { 1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1251 if (!radeon_connector->ddc_bus) 1252 + DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1253 } 1254 subpixel_order = SubPixelHorizontalRGB; 1255 drm_connector_attach_property(&radeon_connector->base, ··· 1290 if (i2c_bus->valid) { 1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1292 if (!radeon_connector->ddc_bus) 1293 + DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1294 } 1295 drm_connector_attach_property(&radeon_connector->base, 1296 rdev->mode_info.coherent_mode_property, ··· 1329 else 1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1331 if (!radeon_dig_connector->dp_i2c_bus) 1332 + DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); 1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1334 if (!radeon_connector->ddc_bus) 1335 + DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1336 } 1337 subpixel_order = SubPixelHorizontalRGB; 1338 drm_connector_attach_property(&radeon_connector->base, ··· 1381 if (i2c_bus->valid) { 1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1383 if (!radeon_connector->ddc_bus) 1384 + DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1385 } 1386 drm_connector_attach_property(&radeon_connector->base, 1387 dev->mode_config.scaling_mode_property, ··· 1457 if (i2c_bus->valid) { 1458 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1459 if (!radeon_connector->ddc_bus) 1460 + DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1461 } 1462 radeon_connector->dac_load_detect = true; 1463 drm_connector_attach_property(&radeon_connector->base, ··· 1475 if (i2c_bus->valid) { 1476 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1477 if (!radeon_connector->ddc_bus) 1478 + DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1479 } 1480 radeon_connector->dac_load_detect = true; 1481 drm_connector_attach_property(&radeon_connector->base, ··· 1493 if (i2c_bus->valid) { 1494 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1495 if (!radeon_connector->ddc_bus) 1496 + DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1497 } 1498 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1499 radeon_connector->dac_load_detect = true; ··· 1538 if (i2c_bus->valid) { 1539 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1540 if (!radeon_connector->ddc_bus) 1541 + DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1542 } 1543 drm_connector_attach_property(&radeon_connector->base, 1544 dev->mode_config.scaling_mode_property, ··· 1567 radeon_legacy_backlight_init(radeon_encoder, connector); 1568 } 1569 } 1570 }
+6
drivers/gpu/drm/radeon/radeon_i2c.c
··· 1096 if (!radeon_connector->router.ddc_valid) 1097 return; 1098 1099 radeon_i2c_get_byte(radeon_connector->router_bus, 1100 radeon_connector->router.i2c_addr, 1101 0x3, &val); ··· 1122 u8 val; 1123 1124 if (!radeon_connector->router.cd_valid) 1125 return; 1126 1127 radeon_i2c_get_byte(radeon_connector->router_bus,
··· 1096 if (!radeon_connector->router.ddc_valid) 1097 return; 1098 1099 + if (!radeon_connector->router_bus) 1100 + return; 1101 + 1102 radeon_i2c_get_byte(radeon_connector->router_bus, 1103 radeon_connector->router.i2c_addr, 1104 0x3, &val); ··· 1119 u8 val; 1120 1121 if (!radeon_connector->router.cd_valid) 1122 + return; 1123 + 1124 + if (!radeon_connector->router_bus) 1125 return; 1126 1127 radeon_i2c_get_byte(radeon_connector->router_bus,