Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (29 commits)
drm/nouveau: bail out of auxch transaction if we repeatedly recieve defers
drm/nv50: implement gpio set/get routines
drm/nv50: parse/use some more de-magiced parts of gpio table entries
drm/nouveau: store raw gpio table entry in bios gpio structs
drm/nv40: Init some tiling-related PGRAPH state.
drm/nv50: Add NVA3 support in ctxprog/ctxvals generator.
drm/nv50: another dodgy DP hack
drm/nv50: punt hotplug irq handling out to workqueue
drm/nv50: preserve an unknown SOR_MODECTRL value for DP encoders
drm/nv50: Allow using the NVA3 new compute class.
drm/nv50: cleanup properly if PDISPLAY init fails
drm/nouveau: fixup the init failure paths some more
drm/nv50: fix instmem init on IGPs if stolen mem crosses 4GiB mark
drm/nv40: add LVDS table quirk for Dell Latitude D620
drm/nv40: rework lvds table parsing
drm/nouveau: detect vram amount once, and save the value
drm/nouveau: remove some unused members from drm_nouveau_private
drm/nouveau: Make use of TTM busy_placements.
drm/nv50: add more 0x100c80 flushy magic
drm/nv50: fix fbcon when framebuffer above 4GiB mark
...

+506 -296
+2
drivers/gpu/drm/drm_edid.c
··· 85 86 /* Envision Peripherals, Inc. EN-7100e */ 87 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, 88 89 /* Funai Electronics PM36B */ 90 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
··· 85 86 /* Envision Peripherals, Inc. EN-7100e */ 87 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, 88 + /* Envision EN2028 */ 89 + { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, 90 91 /* Funai Electronics PM36B */ 92 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+1 -1
drivers/gpu/drm/nouveau/Makefile
··· 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ 25 - nv17_gpio.o 26 27 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 28 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
··· 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ 25 + nv17_gpio.o nv50_gpio.o 26 27 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 28 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+59 -68
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 2573 * each GPIO according to various values listed in each entry 2574 */ 2575 2576 - const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; 2578 - const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr]; 2579 - const uint8_t *gpio_entry; 2580 int i; 2581 2582 if (!iexec->execute) 2583 return 1; 2584 2585 - if (bios->dcb.version != 0x40) { 2586 - NV_ERROR(bios->dev, "DCB table not version 4.0\n"); 2587 - return 0; 2588 - } 2589 2590 - if (!bios->dcb.gpio_table_ptr) { 2591 - NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); 2592 - return 0; 2593 - } 2594 2595 - gpio_entry = gpio_table + gpio_table[1]; 2596 - for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) { 2597 - uint32_t entry = ROM32(gpio_entry[0]), r, s, v; 2598 - int line = (entry & 0x0000001f); 2599 2600 - BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry); 2601 - 2602 - if ((entry & 0x0000ff00) == 0x0000ff00) 2603 - continue; 2604 - 2605 - r = nv50_gpio_reg[line >> 3]; 2606 - s = (line & 0x07) << 2; 2607 - v = bios_rd32(bios, r) & ~(0x00000003 << s); 2608 - if (entry & 0x01000000) 2609 - v |= (((entry & 0x60000000) >> 29) ^ 2) << s; 2610 - else 2611 - v |= (((entry & 0x18000000) >> 27) ^ 2) << s; 2612 - bios_wr32(bios, r, v); 2613 - 2614 - r = nv50_gpio_ctl[line >> 4]; 2615 - s = (line & 0x0f); 2616 v = bios_rd32(bios, r) & ~(0x00010001 << s); 2617 - switch ((entry & 0x06000000) >> 25) { 2618 case 1: 2619 v |= (0x00000001 << s); 2620 break; ··· 3184 struct nvbios *bios = &dev_priv->vbios; 3185 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 3186 uint16_t scriptptr = 0, clktable; 3187 - uint8_t clktableptr = 0; 3188 3189 /* 3190 * For now we assume version 3.0 table - g80 support will need some ··· 3202 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); 3203 break; 3204 case LVDS_RESET: 3205 if (dcbent->lvdsconf.use_straps_for_mode) { 3206 if (bios->fp.dual_link) 3207 - clktableptr += 2; 3208 - if (bios->fp.BITbit1) 3209 - clktableptr++; 3210 } else { 3211 /* using EDID */ 3212 - uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; 3213 - int fallbackcmpval = (dcbent->or == 4) ? 4 : 1; 3214 3215 if (bios->fp.dual_link) { 3216 - clktableptr += 2; 3217 - fallbackcmpval *= 2; 3218 } 3219 - if (fallbackcmpval & fallback) 3220 - clktableptr++; 3221 } 3222 3223 - /* adding outputset * 8 may not be correct */ 3224 - clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]); 3225 if (!clktable) { 3226 NV_ERROR(dev, "Pixel clock comparison table not found\n"); 3227 return -ENOENT; ··· 3626 *if_is_24bit = bios->data[lvdsofs] & 16; 3627 break; 3628 case 0x30: 3629 - /* 3630 - * My money would be on there being a 24 bit interface bit in 3631 - * this table, but I have no example of a laptop bios with a 3632 - * 24 bit panel to confirm that. Hence we shout loudly if any 3633 - * bit other than bit 0 is set (I've not even seen bit 1) 3634 - */ 3635 - if (bios->data[lvdsofs] > 1) 3636 - NV_ERROR(dev, 3637 - "You have a very unusual laptop display; please report it\n"); 3638 /* 3639 * No sign of the "power off for reset" or "reset for panel 3640 * on" bits, but it's safer to assume we should 3641 */ 3642 bios->fp.power_off_for_reset = true; 3643 bios->fp.reset_after_pclk_change = true; 3644 /* 3645 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is 3646 - * over-written, and BITbit1 isn't used 3647 */ 3648 - bios->fp.dual_link = bios->data[lvdsofs] & 1; 3649 - bios->fp.BITbit1 = bios->data[lvdsofs] & 2; 3650 - bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; 3651 - break; 3652 - case 0x40: 3653 bios->fp.dual_link = bios->data[lvdsofs] & 1; 3654 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; 3655 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; 3656 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; 3657 break; 3658 } 3659 3660 /* set dual_link flag for EDID case */ ··· 5068 gpio->tag = tag; 5069 gpio->line = line; 5070 gpio->invert = flags != 4; 5071 } 5072 5073 static void 5074 parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) 5075 { 5076 struct dcb_gpio_entry *gpio; 5077 - uint32_t ent = ROM32(bios->data[offset]); 5078 - uint8_t line = ent & 0x1f, 5079 - tag = ent >> 8 & 0xff; 5080 5081 - if (tag == 0xff) 5082 return; 5083 5084 gpio = new_gpio_entry(bios); 5085 - 5086 - /* Currently unused, we may need more fields parsed at some 5087 - * point. */ 5088 - gpio->tag = tag; 5089 - gpio->line = line; 5090 } 5091 5092 static void
··· 2573 * each GPIO according to various values listed in each entry 2574 */ 2575 2576 + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; 2578 int i; 2579 + 2580 + if (dev_priv->card_type != NV_50) { 2581 + NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n"); 2582 + return -ENODEV; 2583 + } 2584 2585 if (!iexec->execute) 2586 return 1; 2587 2588 + for (i = 0; i < bios->dcb.gpio.entries; i++) { 2589 + struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i]; 2590 + uint32_t r, s, v; 2591 2592 + BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); 2593 2594 + nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); 2595 2596 + /* The NVIDIA binary driver doesn't appear to actually do 2597 + * any of this, my VBIOS does however. 2598 + */ 2599 + /* Not a clue, needs de-magicing */ 2600 + r = nv50_gpio_ctl[gpio->line >> 4]; 2601 + s = (gpio->line & 0x0f); 2602 v = bios_rd32(bios, r) & ~(0x00010001 << s); 2603 + switch ((gpio->entry & 0x06000000) >> 25) { 2604 case 1: 2605 v |= (0x00000001 << s); 2606 break; ··· 3198 struct nvbios *bios = &dev_priv->vbios; 3199 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 3200 uint16_t scriptptr = 0, clktable; 3201 3202 /* 3203 * For now we assume version 3.0 table - g80 support will need some ··· 3217 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); 3218 break; 3219 case LVDS_RESET: 3220 + clktable = bios->fp.lvdsmanufacturerpointer + 15; 3221 + if (dcbent->or == 4) 3222 + clktable += 8; 3223 + 3224 if (dcbent->lvdsconf.use_straps_for_mode) { 3225 if (bios->fp.dual_link) 3226 + clktable += 4; 3227 + if (bios->fp.if_is_24bit) 3228 + clktable += 2; 3229 } else { 3230 /* using EDID */ 3231 + int cmpval_24bit = (dcbent->or == 4) ? 4 : 1; 3232 3233 if (bios->fp.dual_link) { 3234 + clktable += 4; 3235 + cmpval_24bit <<= 1; 3236 } 3237 + 3238 + if (bios->fp.strapless_is_24bit & cmpval_24bit) 3239 + clktable += 2; 3240 } 3241 3242 + clktable = ROM16(bios->data[clktable]); 3243 if (!clktable) { 3244 NV_ERROR(dev, "Pixel clock comparison table not found\n"); 3245 return -ENOENT; ··· 3638 *if_is_24bit = bios->data[lvdsofs] & 16; 3639 break; 3640 case 0x30: 3641 + case 0x40: 3642 /* 3643 * No sign of the "power off for reset" or "reset for panel 3644 * on" bits, but it's safer to assume we should 3645 */ 3646 bios->fp.power_off_for_reset = true; 3647 bios->fp.reset_after_pclk_change = true; 3648 + 3649 /* 3650 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is 3651 + * over-written, and if_is_24bit isn't used 3652 */ 3653 bios->fp.dual_link = bios->data[lvdsofs] & 1; 3654 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; 3655 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; 3656 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; 3657 break; 3658 + } 3659 + 3660 + /* Dell Latitude D620 reports a too-high value for the dual-link 3661 + * transition freq, causing us to program the panel incorrectly. 3662 + * 3663 + * It doesn't appear the VBIOS actually uses its transition freq 3664 + * (90000kHz), instead it uses the "Number of LVDS channels" field 3665 + * out of the panel ID structure (http://www.spwg.org/). 3666 + * 3667 + * For the moment, a quirk will do :) 3668 + */ 3669 + if ((dev->pdev->device == 0x01d7) && 3670 + (dev->pdev->subsystem_vendor == 0x1028) && 3671 + (dev->pdev->subsystem_device == 0x01c2)) { 3672 + bios->fp.duallink_transition_clk = 80000; 3673 } 3674 3675 /* set dual_link flag for EDID case */ ··· 5077 gpio->tag = tag; 5078 gpio->line = line; 5079 gpio->invert = flags != 4; 5080 + gpio->entry = ent; 5081 } 5082 5083 static void 5084 parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) 5085 { 5086 + uint32_t entry = ROM32(bios->data[offset]); 5087 struct dcb_gpio_entry *gpio; 5088 5089 + if ((entry & 0x0000ff00) == 0x0000ff00) 5090 return; 5091 5092 gpio = new_gpio_entry(bios); 5093 + gpio->tag = (entry & 0x0000ff00) >> 8; 5094 + gpio->line = (entry & 0x0000001f) >> 0; 5095 + gpio->state_default = (entry & 0x01000000) >> 24; 5096 + gpio->state[0] = (entry & 0x18000000) >> 27; 5097 + gpio->state[1] = (entry & 0x60000000) >> 29; 5098 + gpio->entry = entry; 5099 } 5100 5101 static void
+3 -1
drivers/gpu/drm/nouveau/nouveau_bios.h
··· 49 enum dcb_gpio_tag tag; 50 int line; 51 bool invert; 52 }; 53 54 struct dcb_gpio_table { ··· 270 bool reset_after_pclk_change; 271 bool dual_link; 272 bool link_c_increment; 273 - bool BITbit1; 274 bool if_is_24bit; 275 int duallink_transition_clk; 276 uint8_t strapless_is_24bit;
··· 49 enum dcb_gpio_tag tag; 50 int line; 51 bool invert; 52 + uint32_t entry; 53 + uint8_t state_default; 54 + uint8_t state[2]; 55 }; 56 57 struct dcb_gpio_table { ··· 267 bool reset_after_pclk_change; 268 bool dual_link; 269 bool link_c_increment; 270 bool if_is_24bit; 271 int duallink_transition_clk; 272 uint8_t strapless_is_24bit;
+35 -30
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 72 * many small buffers. 73 */ 74 if (dev_priv->card_type == NV_50) { 75 - uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; 76 int i; 77 78 switch (tile_flags) { ··· 154 155 nvbo->placement.fpfn = 0; 156 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; 157 - nouveau_bo_placement_set(nvbo, flags); 158 159 nvbo->channel = chan; 160 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, ··· 173 return 0; 174 } 175 176 - void 177 - nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) 178 { 179 - int n = 0; 180 181 - if (memtype & TTM_PL_FLAG_VRAM) 182 - nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; 183 - if (memtype & TTM_PL_FLAG_TT) 184 - nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 185 - if (memtype & TTM_PL_FLAG_SYSTEM) 186 - nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; 187 - nvbo->placement.placement = nvbo->placements; 188 - nvbo->placement.busy_placement = nvbo->placements; 189 - nvbo->placement.num_placement = n; 190 - nvbo->placement.num_busy_placement = n; 191 192 - if (nvbo->pin_refcnt) { 193 - while (n--) 194 - nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT; 195 - } 196 } 197 198 int ··· 207 { 208 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 209 struct ttm_buffer_object *bo = &nvbo->bo; 210 - int ret, i; 211 212 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 213 NV_ERROR(nouveau_bdev(bo->bdev)->dev, ··· 223 if (ret) 224 goto out; 225 226 - nouveau_bo_placement_set(nvbo, memtype); 227 - for (i = 0; i < nvbo->placement.num_placement; i++) 228 - nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 229 230 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 231 if (ret == 0) { ··· 250 { 251 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 252 struct ttm_buffer_object *bo = &nvbo->bo; 253 - int ret, i; 254 255 if (--nvbo->pin_refcnt) 256 return 0; ··· 259 if (ret) 260 return ret; 261 262 - for (i = 0; i < nvbo->placement.num_placement; i++) 263 - nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 264 265 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 266 if (ret == 0) { ··· 400 man->io_addr = NULL; 401 man->io_offset = drm_get_resource_start(dev, 1); 402 man->io_size = drm_get_resource_len(dev, 1); 403 - if (man->io_size > nouveau_mem_fb_amount(dev)) 404 - man->io_size = nouveau_mem_fb_amount(dev); 405 406 man->gpu_offset = dev_priv->vm_vram_base; 407 break; ··· 444 445 switch (bo->mem.mem_type) { 446 case TTM_PL_VRAM: 447 - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT); 448 break; 449 default: 450 - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); 451 break; 452 } 453
··· 72 * many small buffers. 73 */ 74 if (dev_priv->card_type == NV_50) { 75 + uint32_t block_size = dev_priv->vram_size >> 15; 76 int i; 77 78 switch (tile_flags) { ··· 154 155 nvbo->placement.fpfn = 0; 156 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; 157 + nouveau_bo_placement_set(nvbo, flags, 0); 158 159 nvbo->channel = chan; 160 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, ··· 173 return 0; 174 } 175 176 + static void 177 + set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) 178 { 179 + *n = 0; 180 181 + if (type & TTM_PL_FLAG_VRAM) 182 + pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; 183 + if (type & TTM_PL_FLAG_TT) 184 + pl[(*n)++] = TTM_PL_FLAG_TT | flags; 185 + if (type & TTM_PL_FLAG_SYSTEM) 186 + pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; 187 + } 188 189 + void 190 + nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 191 + { 192 + struct ttm_placement *pl = &nvbo->placement; 193 + uint32_t flags = TTM_PL_MASK_CACHING | 194 + (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); 195 + 196 + pl->placement = nvbo->placements; 197 + set_placement_list(nvbo->placements, &pl->num_placement, 198 + type, flags); 199 + 200 + pl->busy_placement = nvbo->busy_placements; 201 + set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 202 + type | busy, flags); 203 } 204 205 int ··· 200 { 201 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 202 struct ttm_buffer_object *bo = &nvbo->bo; 203 + int ret; 204 205 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 206 NV_ERROR(nouveau_bdev(bo->bdev)->dev, ··· 216 if (ret) 217 goto out; 218 219 + nouveau_bo_placement_set(nvbo, memtype, 0); 220 221 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 222 if (ret == 0) { ··· 245 { 246 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 247 struct ttm_buffer_object *bo = &nvbo->bo; 248 + int ret; 249 250 if (--nvbo->pin_refcnt) 251 return 0; ··· 254 if (ret) 255 return ret; 256 257 + nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 258 259 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 260 if (ret == 0) { ··· 396 man->io_addr = NULL; 397 man->io_offset = drm_get_resource_start(dev, 1); 398 man->io_size = drm_get_resource_len(dev, 1); 399 + if (man->io_size > dev_priv->vram_size) 400 + man->io_size = dev_priv->vram_size; 401 402 man->gpu_offset = dev_priv->vm_vram_base; 403 break; ··· 440 441 switch (bo->mem.mem_type) { 442 case TTM_PL_VRAM: 443 + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 444 + TTM_PL_FLAG_SYSTEM); 445 break; 446 default: 447 + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); 448 break; 449 } 450
-2
drivers/gpu/drm/nouveau/nouveau_channel.c
··· 142 GFP_KERNEL); 143 if (!dev_priv->fifos[channel]) 144 return -ENOMEM; 145 - dev_priv->fifo_alloc_count++; 146 chan = dev_priv->fifos[channel]; 147 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 148 INIT_LIST_HEAD(&chan->fence.pending); ··· 320 iounmap(chan->user); 321 322 dev_priv->fifos[chan->id] = NULL; 323 - dev_priv->fifo_alloc_count--; 324 kfree(chan); 325 } 326
··· 142 GFP_KERNEL); 143 if (!dev_priv->fifos[channel]) 144 return -ENOMEM; 145 chan = dev_priv->fifos[channel]; 146 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 147 INIT_LIST_HEAD(&chan->fence.pending); ··· 321 iounmap(chan->user); 322 323 dev_priv->fifos[chan->id] = NULL; 324 kfree(chan); 325 } 326
+2 -3
drivers/gpu/drm/nouveau/nouveau_debugfs.c
··· 137 { 138 struct drm_info_node *node = (struct drm_info_node *) m->private; 139 struct drm_minor *minor = node->minor; 140 - struct drm_device *dev = minor->dev; 141 142 - seq_printf(m, "VRAM total: %dKiB\n", 143 - (int)(nouveau_mem_fb_amount(dev) >> 10)); 144 return 0; 145 } 146
··· 137 { 138 struct drm_info_node *node = (struct drm_info_node *) m->private; 139 struct drm_minor *minor = node->minor; 140 + struct drm_nouveau_private *dev_priv = minor->dev->dev_private; 141 142 + seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10)); 143 return 0; 144 } 145
+7 -1
drivers/gpu/drm/nouveau/nouveau_dp.c
··· 483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); 484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); 485 486 - for (;;) { 487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); 488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); 489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); ··· 500 if ((stat & NV50_AUXCH_STAT_REPLY_AUX) != 501 NV50_AUXCH_STAT_REPLY_AUX_DEFER) 502 break; 503 } 504 505 if (cmd & 1) {
··· 483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); 484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); 485 486 + for (i = 0; i < 16; i++) { 487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); 488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); 489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); ··· 500 if ((stat & NV50_AUXCH_STAT_REPLY_AUX) != 501 NV50_AUXCH_STAT_REPLY_AUX_DEFER) 502 break; 503 + } 504 + 505 + if (i == 16) { 506 + NV_ERROR(dev, "auxch DEFER too many times, bailing\n"); 507 + ret = -EREMOTEIO; 508 + goto out; 509 } 510 511 if (cmd & 1) {
+19 -21
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 76 struct ttm_buffer_object bo; 77 struct ttm_placement placement; 78 u32 placements[3]; 79 struct ttm_bo_kmap_obj kmap; 80 struct list_head head; 81 ··· 520 521 struct workqueue_struct *wq; 522 struct work_struct irq_work; 523 524 struct list_head vbl_waiting; 525 ··· 535 536 struct fb_info *fbdev_info; 537 538 - int fifo_alloc_count; 539 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 540 541 struct nouveau_engine engine; ··· 554 uint32_t ramro_offset; 555 uint32_t ramro_size; 556 557 - /* base physical addresses */ 558 - uint64_t fb_phys; 559 - uint64_t fb_available_size; 560 - uint64_t fb_mappable_pages; 561 - uint64_t fb_aper_free; 562 - 563 struct { 564 enum { 565 NOUVEAU_GART_NONE = 0, ··· 567 struct nouveau_gpuobj *sg_ctxdma; 568 struct page *sg_dummy_page; 569 dma_addr_t sg_dummy_bus; 570 - 571 - /* nottm hack */ 572 - struct drm_ttm_backend *sg_be; 573 - unsigned long sg_handle; 574 } gart_info; 575 576 /* nv10-nv40 tiling regions */ ··· 574 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; 575 spinlock_t lock; 576 } tile; 577 578 /* G8x/G9x virtual address space */ 579 uint64_t vm_gart_base; ··· 593 uint64_t vm_end; 594 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 595 int vm_vram_pt_nr; 596 - uint64_t vram_sys_base; 597 - 598 - /* the mtrr covering the FB */ 599 - int fb_mtrr; 600 601 struct mem_block *ramin_heap; 602 ··· 611 uint32_t dac_users[4]; 612 613 struct nouveau_suspend_resume { 614 - uint32_t fifo_mode; 615 - uint32_t graph_ctx_control; 616 - uint32_t graph_state; 617 uint32_t *ramin_copy; 618 - uint64_t ramin_size; 619 } susres; 620 621 struct backlight_device *backlight; ··· 710 struct drm_file *, int tail); 711 extern void nouveau_mem_takedown(struct mem_block **heap); 712 extern void nouveau_mem_free_block(struct mem_block *); 713 - extern uint64_t nouveau_mem_fb_amount(struct drm_device *); 714 extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); 715 extern int nouveau_mem_init(struct drm_device *); 716 extern int nouveau_mem_init_agp(struct drm_device *); ··· 1117 extern int nouveau_bo_unpin(struct nouveau_bo *); 1118 extern int nouveau_bo_map(struct nouveau_bo *); 1119 extern void nouveau_bo_unmap(struct nouveau_bo *); 1120 - extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype); 1121 extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); 1122 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1123 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); ··· 1161 /* nv17_gpio.c */ 1162 int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1163 int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1164 1165 #ifndef ioread32_native 1166 #ifdef __BIG_ENDIAN
··· 76 struct ttm_buffer_object bo; 77 struct ttm_placement placement; 78 u32 placements[3]; 79 + u32 busy_placements[3]; 80 struct ttm_bo_kmap_obj kmap; 81 struct list_head head; 82 ··· 519 520 struct workqueue_struct *wq; 521 struct work_struct irq_work; 522 + struct work_struct hpd_work; 523 524 struct list_head vbl_waiting; 525 ··· 533 534 struct fb_info *fbdev_info; 535 536 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 537 538 struct nouveau_engine engine; ··· 553 uint32_t ramro_offset; 554 uint32_t ramro_size; 555 556 struct { 557 enum { 558 NOUVEAU_GART_NONE = 0, ··· 572 struct nouveau_gpuobj *sg_ctxdma; 573 struct page *sg_dummy_page; 574 dma_addr_t sg_dummy_bus; 575 } gart_info; 576 577 /* nv10-nv40 tiling regions */ ··· 583 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; 584 spinlock_t lock; 585 } tile; 586 + 587 + /* VRAM/fb configuration */ 588 + uint64_t vram_size; 589 + uint64_t vram_sys_base; 590 + 591 + uint64_t fb_phys; 592 + uint64_t fb_available_size; 593 + uint64_t fb_mappable_pages; 594 + uint64_t fb_aper_free; 595 + int fb_mtrr; 596 597 /* G8x/G9x virtual address space */ 598 uint64_t vm_gart_base; ··· 592 uint64_t vm_end; 593 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 594 int vm_vram_pt_nr; 595 596 struct mem_block *ramin_heap; 597 ··· 614 uint32_t dac_users[4]; 615 616 struct nouveau_suspend_resume { 617 uint32_t *ramin_copy; 618 } susres; 619 620 struct backlight_device *backlight; ··· 717 struct drm_file *, int tail); 718 extern void nouveau_mem_takedown(struct mem_block **heap); 719 extern void nouveau_mem_free_block(struct mem_block *); 720 + extern int nouveau_mem_detect(struct drm_device *dev); 721 extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); 722 extern int nouveau_mem_init(struct drm_device *); 723 extern int nouveau_mem_init_agp(struct drm_device *); ··· 1124 extern int nouveau_bo_unpin(struct nouveau_bo *); 1125 extern int nouveau_bo_map(struct nouveau_bo *); 1126 extern void nouveau_bo_unmap(struct nouveau_bo *); 1127 + extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type, 1128 + uint32_t busy); 1129 extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); 1130 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1131 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); ··· 1167 /* nv17_gpio.c */ 1168 int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1169 int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1170 + 1171 + /* nv50_gpio.c */ 1172 + int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1173 + int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1174 1175 #ifndef ioread32_native 1176 #ifdef __BIG_ENDIAN
+1
drivers/gpu/drm/nouveau/nouveau_encoder.h
··· 47 48 union { 49 struct { 50 int dpcd_version; 51 int link_nr; 52 int link_bw;
··· 47 48 union { 49 struct { 50 + int mc_unknown; 51 int dpcd_version; 52 int link_nr; 53 int link_bw;
+25 -30
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 180 { 181 struct nouveau_bo *nvbo = gem->driver_private; 182 struct ttm_buffer_object *bo = &nvbo->bo; 183 - uint64_t flags; 184 185 - if (!valid_domains || (!read_domains && !write_domains)) 186 return -EINVAL; 187 188 - if (write_domains) { 189 - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 190 - (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) 191 - flags = TTM_PL_FLAG_VRAM; 192 - else 193 - if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && 194 - (write_domains & NOUVEAU_GEM_DOMAIN_GART)) 195 - flags = TTM_PL_FLAG_TT; 196 - else 197 - return -EINVAL; 198 - } else { 199 - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 200 - (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 201 - bo->mem.mem_type == TTM_PL_VRAM) 202 - flags = TTM_PL_FLAG_VRAM; 203 - else 204 - if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && 205 - (read_domains & NOUVEAU_GEM_DOMAIN_GART) && 206 - bo->mem.mem_type == TTM_PL_TT) 207 - flags = TTM_PL_FLAG_TT; 208 - else 209 - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 210 - (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) 211 - flags = TTM_PL_FLAG_VRAM; 212 - else 213 - flags = TTM_PL_FLAG_TT; 214 - } 215 216 - nouveau_bo_placement_set(nvbo, flags); 217 return 0; 218 } 219
··· 180 { 181 struct nouveau_bo *nvbo = gem->driver_private; 182 struct ttm_buffer_object *bo = &nvbo->bo; 183 + uint32_t domains = valid_domains & 184 + (write_domains ? write_domains : read_domains); 185 + uint32_t pref_flags = 0, valid_flags = 0; 186 187 + if (!domains) 188 return -EINVAL; 189 190 + if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 191 + valid_flags |= TTM_PL_FLAG_VRAM; 192 193 + if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) 194 + valid_flags |= TTM_PL_FLAG_TT; 195 + 196 + if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && 197 + bo->mem.mem_type == TTM_PL_VRAM) 198 + pref_flags |= TTM_PL_FLAG_VRAM; 199 + 200 + else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && 201 + bo->mem.mem_type == TTM_PL_TT) 202 + pref_flags |= TTM_PL_FLAG_TT; 203 + 204 + else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) 205 + pref_flags |= TTM_PL_FLAG_VRAM; 206 + 207 + else 208 + pref_flags |= TTM_PL_FLAG_TT; 209 + 210 + nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); 211 + 212 return 0; 213 } 214
+1
drivers/gpu/drm/nouveau/nouveau_irq.c
··· 51 52 if (dev_priv->card_type == NV_50) { 53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 54 INIT_LIST_HEAD(&dev_priv->vbl_waiting); 55 } 56 }
··· 51 52 if (dev_priv->card_type == NV_50) { 53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 54 + INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); 55 INIT_LIST_HEAD(&dev_priv->vbl_waiting); 56 } 57 }
+75 -47
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 347 return -EBUSY; 348 } 349 350 return 0; 351 } 352 ··· 398 } 399 400 nv_wr32(dev, 0x100c80, 0x00000001); 401 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 402 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 403 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); ··· 477 } 478 } 479 480 - /*XXX won't work on BSD because of pci_read_config_dword */ 481 static uint32_t 482 - nouveau_mem_fb_amount_igp(struct drm_device *dev) 483 { 484 struct drm_nouveau_private *dev_priv = dev->dev_private; 485 struct pci_dev *bridge; ··· 512 return 0; 513 } 514 515 - if (dev_priv->flags&NV_NFORCE) { 516 pci_read_config_dword(bridge, 0x7C, &mem); 517 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; 518 } else 519 - if (dev_priv->flags&NV_NFORCE2) { 520 pci_read_config_dword(bridge, 0x84, &mem); 521 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; 522 } ··· 526 } 527 528 /* returns the amount of FB ram in bytes */ 529 - uint64_t nouveau_mem_fb_amount(struct drm_device *dev) 530 { 531 struct drm_nouveau_private *dev_priv = dev->dev_private; 532 - uint32_t boot0; 533 534 - switch (dev_priv->card_type) { 535 - case NV_04: 536 - boot0 = nv_rd32(dev, NV03_BOOT_0); 537 - if (boot0 & 0x00000100) 538 - return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; 539 - 540 - switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { 541 - case NV04_BOOT_0_RAM_AMOUNT_32MB: 542 - return 32 * 1024 * 1024; 543 - case NV04_BOOT_0_RAM_AMOUNT_16MB: 544 - return 16 * 1024 * 1024; 545 - case NV04_BOOT_0_RAM_AMOUNT_8MB: 546 - return 8 * 1024 * 1024; 547 - case NV04_BOOT_0_RAM_AMOUNT_4MB: 548 - return 4 * 1024 * 1024; 549 - } 550 - break; 551 - case NV_10: 552 - case NV_20: 553 - case NV_30: 554 - case NV_40: 555 - case NV_50: 556 - default: 557 - if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { 558 - return nouveau_mem_fb_amount_igp(dev); 559 - } else { 560 - uint64_t mem; 561 - mem = (nv_rd32(dev, NV04_FIFO_DATA) & 562 - NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> 563 - NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; 564 - return mem * 1024 * 1024; 565 - } 566 - break; 567 } 568 569 - NV_ERROR(dev, 570 - "Unable to detect video ram size. Please report your setup to " 571 - DRIVER_EMAIL "\n"); 572 - return 0; 573 } 574 575 #if __OS_HAS_AGP ··· 662 spin_lock_init(&dev_priv->ttm.bo_list_lock); 663 spin_lock_init(&dev_priv->tile.lock); 664 665 - dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); 666 - 667 dev_priv->fb_mappable_pages = dev_priv->fb_available_size; 668 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) 669 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); 670 dev_priv->fb_mappable_pages >>= PAGE_SHIFT; 671 - 672 - NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20)); 673 674 /* remove reserved space at end of vram from available amount */ 675 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
··· 347 return -EBUSY; 348 } 349 350 + nv_wr32(dev, 0x100c80, 0x00040001); 351 + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 352 + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 353 + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); 354 + return -EBUSY; 355 + } 356 + 357 + nv_wr32(dev, 0x100c80, 0x00060001); 358 + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 359 + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 360 + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); 361 + return -EBUSY; 362 + } 363 + 364 return 0; 365 } 366 ··· 384 } 385 386 nv_wr32(dev, 0x100c80, 0x00000001); 387 + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 388 + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 389 + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); 390 + return; 391 + } 392 + 393 + nv_wr32(dev, 0x100c80, 0x00040001); 394 + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 395 + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 396 + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); 397 + return; 398 + } 399 + 400 + nv_wr32(dev, 0x100c80, 0x00060001); 401 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 402 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 403 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); ··· 449 } 450 } 451 452 static uint32_t 453 + nouveau_mem_detect_nv04(struct drm_device *dev) 454 + { 455 + uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0); 456 + 457 + if (boot0 & 0x00000100) 458 + return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; 459 + 460 + switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { 461 + case NV04_BOOT_0_RAM_AMOUNT_32MB: 462 + return 32 * 1024 * 1024; 463 + case NV04_BOOT_0_RAM_AMOUNT_16MB: 464 + return 16 * 1024 * 1024; 465 + case NV04_BOOT_0_RAM_AMOUNT_8MB: 466 + return 8 * 1024 * 1024; 467 + case NV04_BOOT_0_RAM_AMOUNT_4MB: 468 + return 4 * 1024 * 1024; 469 + } 470 + 471 + return 0; 472 + } 473 + 474 + static uint32_t 475 + nouveau_mem_detect_nforce(struct drm_device *dev) 476 { 477 struct drm_nouveau_private *dev_priv = dev->dev_private; 478 struct pci_dev *bridge; ··· 463 return 0; 464 } 465 466 + if (dev_priv->flags & NV_NFORCE) { 467 pci_read_config_dword(bridge, 0x7C, &mem); 468 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; 469 } else 470 + if (dev_priv->flags & NV_NFORCE2) { 471 pci_read_config_dword(bridge, 0x84, &mem); 472 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; 473 } ··· 477 } 478 479 /* returns the amount of FB ram in bytes */ 480 + int 481 + nouveau_mem_detect(struct drm_device *dev) 482 { 483 struct drm_nouveau_private *dev_priv = dev->dev_private; 484 485 + if (dev_priv->card_type == NV_04) { 486 + dev_priv->vram_size = nouveau_mem_detect_nv04(dev); 487 + } else 488 + if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { 489 + dev_priv->vram_size = nouveau_mem_detect_nforce(dev); 490 + } else { 491 + dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); 492 + dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; 493 + if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) 494 + dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; 495 } 496 497 + NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); 498 + if (dev_priv->vram_sys_base) { 499 + NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", 500 + dev_priv->vram_sys_base); 501 + } 502 + 503 + if (dev_priv->vram_size) 504 + return 0; 505 + return -ENOMEM; 506 } 507 508 #if __OS_HAS_AGP ··· 631 spin_lock_init(&dev_priv->ttm.bo_list_lock); 632 spin_lock_init(&dev_priv->tile.lock); 633 634 + dev_priv->fb_available_size = dev_priv->vram_size; 635 dev_priv->fb_mappable_pages = dev_priv->fb_available_size; 636 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) 637 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); 638 dev_priv->fb_mappable_pages >>= PAGE_SHIFT; 639 640 /* remove reserved space at end of vram from available amount */ 641 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
+18
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 172 } 173 dev_priv->engine.instmem.finish_access(nvbe->dev); 174 175 nvbe->bound = false; 176 return 0; 177 }
··· 172 } 173 dev_priv->engine.instmem.finish_access(nvbe->dev); 174 175 + if (dev_priv->card_type == NV_50) { 176 + nv_wr32(dev, 0x100c80, 0x00050001); 177 + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 178 + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 179 + NV_ERROR(dev, "0x100c80 = 0x%08x\n", 180 + nv_rd32(dev, 0x100c80)); 181 + return -EBUSY; 182 + } 183 + 184 + nv_wr32(dev, 0x100c80, 0x00000001); 185 + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 186 + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 187 + NV_ERROR(dev, "0x100c80 = 0x%08x\n", 188 + nv_rd32(dev, 0x100c80)); 189 + return -EBUSY; 190 + } 191 + } 192 + 193 nvbe->bound = false; 194 return 0; 195 }
+12 -2
drivers/gpu/drm/nouveau/nouveau_state.c
··· 341 342 gpuobj = NULL; 343 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 344 - 0, nouveau_mem_fb_amount(dev), 345 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, 346 &gpuobj); 347 if (ret) ··· 427 goto out; 428 } 429 430 ret = nouveau_gpuobj_early_init(dev); 431 if (ret) 432 goto out_bios; ··· 506 else 507 ret = nv04_display_create(dev); 508 if (ret) 509 - goto out_irq; 510 } 511 512 ret = nouveau_backlight_init(dev); ··· 520 521 return 0; 522 523 out_irq: 524 drm_irq_uninstall(dev); 525 out_fifo: ··· 542 out_gpuobj: 543 nouveau_gpuobj_takedown(dev); 544 out_mem: 545 nouveau_mem_close(dev); 546 out_instmem: 547 engine->instmem.takedown(dev);
··· 341 342 gpuobj = NULL; 343 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 344 + 0, dev_priv->vram_size, 345 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, 346 &gpuobj); 347 if (ret) ··· 427 goto out; 428 } 429 430 + ret = nouveau_mem_detect(dev); 431 + if (ret) 432 + goto out_bios; 433 + 434 ret = nouveau_gpuobj_early_init(dev); 435 if (ret) 436 goto out_bios; ··· 502 else 503 ret = nv04_display_create(dev); 504 if (ret) 505 + goto out_channel; 506 } 507 508 ret = nouveau_backlight_init(dev); ··· 516 517 return 0; 518 519 + out_channel: 520 + if (dev_priv->channel) { 521 + nouveau_channel_free(dev_priv->channel); 522 + dev_priv->channel = NULL; 523 + } 524 out_irq: 525 drm_irq_uninstall(dev); 526 out_fifo: ··· 533 out_gpuobj: 534 nouveau_gpuobj_takedown(dev); 535 out_mem: 536 + nouveau_sgdma_takedown(dev); 537 nouveau_mem_close(dev); 538 out_instmem: 539 engine->instmem.takedown(dev);
+1 -1
drivers/gpu/drm/nouveau/nv40_fifo.c
··· 278 default: 279 nv_wr32(dev, 0x2230, 0); 280 nv_wr32(dev, NV40_PFIFO_RAMFC, 281 - ((nouveau_mem_fb_amount(dev) - 512 * 1024 + 282 dev_priv->ramfc_offset) >> 16) | (3 << 16)); 283 break; 284 }
··· 278 default: 279 nv_wr32(dev, 0x2230, 0); 280 nv_wr32(dev, NV40_PFIFO_RAMFC, 281 + ((dev_priv->vram_size - 512 * 1024 + 282 dev_priv->ramfc_offset) >> 16) | (3 << 16)); 283 break; 284 }
+21
drivers/gpu/drm/nouveau/nv40_graph.c
··· 335 nv_wr32(dev, 0x400b38, 0x2ffff800); 336 nv_wr32(dev, 0x400b3c, 0x00006000); 337 338 /* Turn all the tiling regions off. */ 339 for (i = 0; i < pfb->num_tiles; i++) 340 nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
··· 335 nv_wr32(dev, 0x400b38, 0x2ffff800); 336 nv_wr32(dev, 0x400b3c, 0x00006000); 337 338 + /* Tiling related stuff. */ 339 + switch (dev_priv->chipset) { 340 + case 0x44: 341 + case 0x4a: 342 + nv_wr32(dev, 0x400bc4, 0x1003d888); 343 + nv_wr32(dev, 0x400bbc, 0xb7a7b500); 344 + break; 345 + case 0x46: 346 + nv_wr32(dev, 0x400bc4, 0x0000e024); 347 + nv_wr32(dev, 0x400bbc, 0xb7a7b520); 348 + break; 349 + case 0x4c: 350 + case 0x4e: 351 + case 0x67: 352 + nv_wr32(dev, 0x400bc4, 0x1003d888); 353 + nv_wr32(dev, 0x400bbc, 0xb7a7b540); 354 + break; 355 + default: 356 + break; 357 + } 358 + 359 /* Turn all the tiling regions off. */ 360 for (i = 0; i < pfb->num_tiles; i++) 361 nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
+14 -8
drivers/gpu/drm/nouveau/nv50_display.c
··· 143 } 144 145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, 146 - 0, nouveau_mem_fb_amount(dev)); 147 if (ret) { 148 nv50_evo_channel_del(pchan); 149 return ret; ··· 231 /* This used to be in crtc unblank, but seems out of place there. */ 232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); 233 /* RAM is clamped to 256 MiB. */ 234 - ram_amount = nouveau_mem_fb_amount(dev); 235 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); 236 if (ram_amount > 256*1024*1024) 237 ram_amount = 256*1024*1024; ··· 529 } 530 531 ret = nv50_display_init(dev); 532 - if (ret) 533 return ret; 534 535 return 0; 536 } ··· 887 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); 888 } 889 890 - static void 891 - nv50_display_irq_hotplug(struct drm_device *dev) 892 { 893 - struct drm_nouveau_private *dev_priv = dev->dev_private; 894 struct drm_connector *connector; 895 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 896 uint32_t unplug_mask, plug_mask, change_mask; ··· 953 struct drm_nouveau_private *dev_priv = dev->dev_private; 954 uint32_t delayed = 0; 955 956 - while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) 957 - nv50_display_irq_hotplug(dev); 958 959 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 960 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
··· 143 } 144 145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, 146 + 0, dev_priv->vram_size); 147 if (ret) { 148 nv50_evo_channel_del(pchan); 149 return ret; ··· 231 /* This used to be in crtc unblank, but seems out of place there. */ 232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); 233 /* RAM is clamped to 256 MiB. */ 234 + ram_amount = dev_priv->vram_size; 235 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); 236 if (ram_amount > 256*1024*1024) 237 ram_amount = 256*1024*1024; ··· 529 } 530 531 ret = nv50_display_init(dev); 532 + if (ret) { 533 + nv50_display_destroy(dev); 534 return ret; 535 + } 536 537 return 0; 538 } ··· 885 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); 886 } 887 888 + void 889 + nv50_display_irq_hotplug_bh(struct work_struct *work) 890 { 891 + struct drm_nouveau_private *dev_priv = 892 + container_of(work, struct drm_nouveau_private, hpd_work); 893 + struct drm_device *dev = dev_priv->dev; 894 struct drm_connector *connector; 895 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 896 uint32_t unplug_mask, plug_mask, change_mask; ··· 949 struct drm_nouveau_private *dev_priv = dev->dev_private; 950 uint32_t delayed = 0; 951 952 + if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { 953 + if (!work_pending(&dev_priv->hpd_work)) 954 + queue_work(dev_priv->wq, &dev_priv->hpd_work); 955 + } 956 957 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 958 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
+1
drivers/gpu/drm/nouveau/nv50_display.h
··· 37 38 void nv50_display_irq_handler(struct drm_device *dev); 39 void nv50_display_irq_handler_bh(struct work_struct *work); 40 int nv50_display_init(struct drm_device *dev); 41 int nv50_display_create(struct drm_device *dev); 42 int nv50_display_destroy(struct drm_device *dev);
··· 37 38 void nv50_display_irq_handler(struct drm_device *dev); 39 void nv50_display_irq_handler_bh(struct work_struct *work); 40 + void nv50_display_irq_hotplug_bh(struct work_struct *work); 41 int nv50_display_init(struct drm_device *dev); 42 int nv50_display_create(struct drm_device *dev); 43 int nv50_display_destroy(struct drm_device *dev);
+7 -6
drivers/gpu/drm/nouveau/nv50_fbcon.c
··· 157 struct drm_nouveau_private *dev_priv = dev->dev_private; 158 struct nouveau_channel *chan = dev_priv->channel; 159 struct nouveau_gpuobj *eng2d = NULL; 160 int ret, format; 161 162 switch (info->var.bits_per_pixel) { 163 case 8: ··· 251 OUT_RING(chan, info->fix.line_length); 252 OUT_RING(chan, info->var.xres_virtual); 253 OUT_RING(chan, info->var.yres_virtual); 254 - OUT_RING(chan, 0); 255 - OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 256 - dev_priv->vm_vram_base); 257 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 258 OUT_RING(chan, format); 259 OUT_RING(chan, 1); ··· 260 OUT_RING(chan, info->fix.line_length); 261 OUT_RING(chan, info->var.xres_virtual); 262 OUT_RING(chan, info->var.yres_virtual); 263 - OUT_RING(chan, 0); 264 - OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 265 - dev_priv->vm_vram_base); 266 267 return 0; 268 }
··· 157 struct drm_nouveau_private *dev_priv = dev->dev_private; 158 struct nouveau_channel *chan = dev_priv->channel; 159 struct nouveau_gpuobj *eng2d = NULL; 160 + uint64_t fb; 161 int ret, format; 162 + 163 + fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; 164 165 switch (info->var.bits_per_pixel) { 166 case 8: ··· 248 OUT_RING(chan, info->fix.line_length); 249 OUT_RING(chan, info->var.xres_virtual); 250 OUT_RING(chan, info->var.yres_virtual); 251 + OUT_RING(chan, upper_32_bits(fb)); 252 + OUT_RING(chan, lower_32_bits(fb)); 253 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 254 OUT_RING(chan, format); 255 OUT_RING(chan, 1); ··· 258 OUT_RING(chan, info->fix.line_length); 259 OUT_RING(chan, info->var.xres_virtual); 260 OUT_RING(chan, info->var.yres_virtual); 261 + OUT_RING(chan, upper_32_bits(fb)); 262 + OUT_RING(chan, lower_32_bits(fb)); 263 264 return 0; 265 }
+76
drivers/gpu/drm/nouveau/nv50_gpio.c
···
··· 1 + /* 2 + * Copyright 2010 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: Ben Skeggs 23 + */ 24 + 25 + #include "drmP.h" 26 + #include "nouveau_drv.h" 27 + #include "nouveau_hw.h" 28 + 29 + static int 30 + nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) 31 + { 32 + const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 33 + 34 + if (gpio->line > 32) 35 + return -EINVAL; 36 + 37 + *reg = nv50_gpio_reg[gpio->line >> 3]; 38 + *shift = (gpio->line & 7) << 2; 39 + return 0; 40 + } 41 + 42 + int 43 + nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) 44 + { 45 + struct dcb_gpio_entry *gpio; 46 + uint32_t r, s, v; 47 + 48 + gpio = nouveau_bios_gpio_entry(dev, tag); 49 + if (!gpio) 50 + return -ENOENT; 51 + 52 + if (nv50_gpio_location(gpio, &r, &s)) 53 + return -EINVAL; 54 + 55 + v = nv_rd32(dev, r) >> (s + 2); 56 + return ((v & 1) == (gpio->state[1] & 1)); 57 + } 58 + 59 + int 60 + nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) 61 + { 62 + struct dcb_gpio_entry *gpio; 63 + uint32_t r, s, v; 64 + 65 + gpio = nouveau_bios_gpio_entry(dev, tag); 66 + if (!gpio) 67 + return -ENOENT; 68 + 69 + if (nv50_gpio_location(gpio, &r, &s)) 70 + return -EINVAL; 71 + 72 + v = nv_rd32(dev, r) & ~(0x3 << s); 73 + v |= (gpio->state[state] ^ 2) << s; 74 + nv_wr32(dev, r, v); 75 + return 0; 76 + }
+4 -3
drivers/gpu/drm/nouveau/nv50_graph.c
··· 410 { 0x5039, false, NULL }, /* m2mf */ 411 { 0x502d, false, NULL }, /* 2d */ 412 { 0x50c0, false, NULL }, /* compute */ 413 { 0x5097, false, NULL }, /* tesla (nv50) */ 414 - { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ 415 - { 0x8397, false, NULL }, /* tesla (nva0) */ 416 - { 0x8597, false, NULL }, /* tesla (nva8) */ 417 {} 418 };
··· 410 { 0x5039, false, NULL }, /* m2mf */ 411 { 0x502d, false, NULL }, /* 2d */ 412 { 0x50c0, false, NULL }, /* compute */ 413 + { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ 414 { 0x5097, false, NULL }, /* tesla (nv50) */ 415 + { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ 416 + { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ 417 + { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ 418 {} 419 };
+15 -4
drivers/gpu/drm/nouveau/nv50_grctx.c
··· 55 #define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) 56 #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 57 #define CP_FLAG_AUTO_LOAD_PENDING 1 58 #define CP_FLAG_XFER ((2 * 32) + 11) 59 #define CP_FLAG_XFER_IDLE 0 60 #define CP_FLAG_XFER_BUSY 1 61 - #define CP_FLAG_NEWCTX ((2 * 32) + 12) 62 - #define CP_FLAG_NEWCTX_BUSY 0 63 - #define CP_FLAG_NEWCTX_DONE 1 64 #define CP_FLAG_ALWAYS ((2 * 32) + 13) 65 #define CP_FLAG_ALWAYS_FALSE 0 66 #define CP_FLAG_ALWAYS_TRUE 1 ··· 177 case 0x96: 178 case 0x98: 179 case 0xa0: 180 case 0xa5: 181 case 0xa8: 182 case 0xaa: ··· 365 case 0xac: 366 gr_def(ctx, 0x401c00, 0x042500df); 367 break; 368 case 0xa5: 369 case 0xa8: 370 gr_def(ctx, 0x401c00, 0x142500df); ··· 420 break; 421 case 0x84: 422 case 0xa0: 423 case 0xa5: 424 case 0xa8: 425 case 0xaa: ··· 795 case 0xa5: 796 gr_def(ctx, offset + 0x1c, 0x310c0000); 797 break; 798 case 0xa8: 799 case 0xaa: 800 case 0xac: ··· 863 else 864 gr_def(ctx, offset + 0x8, 0x05010202); 865 gr_def(ctx, offset + 0xc, 0x00030201); 866 867 cp_ctx(ctx, base + 0x400, 2); 868 gr_def(ctx, base + 0x404, 0x00000040); ··· 1165 nv50_graph_construct_gene_unk8(ctx); 1166 if (dev_priv->chipset == 0xa0) 1167 xf_emit(ctx, 0x189, 0); 1168 - else if (dev_priv->chipset < 0xa8) 1169 xf_emit(ctx, 0x99, 0); 1170 else if (dev_priv->chipset == 0xaa) 1171 xf_emit(ctx, 0x65, 0); ··· 1205 ctx->ctxvals_pos = offset + 4; 1206 if (dev_priv->chipset == 0xa0) 1207 xf_emit(ctx, 0xa80, 0); 1208 else 1209 xf_emit(ctx, 0xa7a, 0); 1210 xf_emit(ctx, 1, 0x3fffff); ··· 1351 xf_emit(ctx, 0x942, 0); 1352 break; 1353 case 0xa0: 1354 xf_emit(ctx, 0x2042, 0); 1355 break; 1356 case 0xa5:
··· 55 #define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) 56 #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 57 #define CP_FLAG_AUTO_LOAD_PENDING 1 58 + #define CP_FLAG_NEWCTX ((2 * 32) + 10) 59 + #define CP_FLAG_NEWCTX_BUSY 0 60 + #define CP_FLAG_NEWCTX_DONE 1 61 #define CP_FLAG_XFER ((2 * 32) + 11) 62 #define CP_FLAG_XFER_IDLE 0 63 #define CP_FLAG_XFER_BUSY 1 64 #define CP_FLAG_ALWAYS ((2 * 32) + 13) 65 #define CP_FLAG_ALWAYS_FALSE 0 66 #define CP_FLAG_ALWAYS_TRUE 1 ··· 177 case 0x96: 178 case 0x98: 179 case 0xa0: 180 + case 0xa3: 181 case 0xa5: 182 case 0xa8: 183 case 0xaa: ··· 364 case 0xac: 365 gr_def(ctx, 0x401c00, 0x042500df); 366 break; 367 + case 0xa3: 368 case 0xa5: 369 case 0xa8: 370 gr_def(ctx, 0x401c00, 0x142500df); ··· 418 break; 419 case 0x84: 420 case 0xa0: 421 + case 0xa3: 422 case 0xa5: 423 case 0xa8: 424 case 0xaa: ··· 792 case 0xa5: 793 gr_def(ctx, offset + 0x1c, 0x310c0000); 794 break; 795 + case 0xa3: 796 case 0xa8: 797 case 0xaa: 798 case 0xac: ··· 859 else 860 gr_def(ctx, offset + 0x8, 0x05010202); 861 gr_def(ctx, offset + 0xc, 0x00030201); 862 + if (dev_priv->chipset == 0xa3) 863 + cp_ctx(ctx, base + 0x36c, 1); 864 865 cp_ctx(ctx, base + 0x400, 2); 866 gr_def(ctx, base + 0x404, 0x00000040); ··· 1159 nv50_graph_construct_gene_unk8(ctx); 1160 if (dev_priv->chipset == 0xa0) 1161 xf_emit(ctx, 0x189, 0); 1162 + else if (dev_priv->chipset == 0xa3) 1163 + xf_emit(ctx, 0xd5, 0); 1164 + else if (dev_priv->chipset == 0xa5) 1165 xf_emit(ctx, 0x99, 0); 1166 else if (dev_priv->chipset == 0xaa) 1167 xf_emit(ctx, 0x65, 0); ··· 1197 ctx->ctxvals_pos = offset + 4; 1198 if (dev_priv->chipset == 0xa0) 1199 xf_emit(ctx, 0xa80, 0); 1200 + else if (dev_priv->chipset == 0xa3) 1201 + xf_emit(ctx, 0xa7c, 0); 1202 else 1203 xf_emit(ctx, 0xa7a, 0); 1204 xf_emit(ctx, 1, 0x3fffff); ··· 1341 xf_emit(ctx, 0x942, 0); 1342 break; 1343 case 0xa0: 1344 + case 0xa3: 1345 xf_emit(ctx, 0x2042, 0); 1346 break; 1347 case 0xa5:
+6 -10
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 63 struct drm_nouveau_private *dev_priv = dev->dev_private; 64 struct nouveau_channel *chan; 65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; 66 struct nv50_instmem_priv *priv; 67 int ret, i; 68 - uint32_t v, save_nv001700; 69 70 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 71 if (!priv) ··· 77 for (i = 0x1700; i <= 0x1710; i += 4) 78 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 79 80 - if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) 81 - dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; 82 - else 83 - dev_priv->vram_sys_base = 0; 84 - 85 /* Reserve the last MiB of VRAM, we should probably try to avoid 86 * setting up the below tables over the top of the VBIOS image at 87 * some point. 88 */ 89 dev_priv->ramin_rsvd_vram = 1 << 20; 90 - c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; 91 c_size = 128 << 10; 92 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; 93 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; ··· 102 dev_priv->vm_gart_size = NV50_VM_BLOCK; 103 104 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; 105 - dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev); 106 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) 107 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; 108 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); ··· 185 186 i = 0; 187 while (v < dev_priv->vram_sys_base + c_offset + c_size) { 188 - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); 189 - BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 190 v += 0x1000; 191 i += 8; 192 }
··· 63 struct drm_nouveau_private *dev_priv = dev->dev_private; 64 struct nouveau_channel *chan; 65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; 66 + uint32_t save_nv001700; 67 + uint64_t v; 68 struct nv50_instmem_priv *priv; 69 int ret, i; 70 71 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 72 if (!priv) ··· 76 for (i = 0x1700; i <= 0x1710; i += 4) 77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 78 79 /* Reserve the last MiB of VRAM, we should probably try to avoid 80 * setting up the below tables over the top of the VBIOS image at 81 * some point. 82 */ 83 dev_priv->ramin_rsvd_vram = 1 << 20; 84 + c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; 85 c_size = 128 << 10; 86 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; 87 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; ··· 106 dev_priv->vm_gart_size = NV50_VM_BLOCK; 107 108 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; 109 + dev_priv->vm_vram_size = dev_priv->vram_size; 110 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) 111 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; 112 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); ··· 189 190 i = 0; 191 while (v < dev_priv->vram_sys_base + c_offset + c_size) { 192 + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); 193 + BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); 194 v += 0x1000; 195 i += 8; 196 }
+24 -1
drivers/gpu/drm/nouveau/nv50_sor.c
··· 211 mode_ctl = 0x0200; 212 break; 213 case OUTPUT_DP: 214 - mode_ctl |= 0x00050000; 215 if (nv_encoder->dcb->sorconf.link & 1) 216 mode_ctl |= 0x00000800; 217 else ··· 274 int 275 nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) 276 { 277 struct nouveau_encoder *nv_encoder = NULL; 278 struct drm_encoder *encoder; 279 bool dum; ··· 319 320 encoder->possible_crtcs = entry->heads; 321 encoder->possible_clones = 0; 322 323 return 0; 324 }
··· 211 mode_ctl = 0x0200; 212 break; 213 case OUTPUT_DP: 214 + mode_ctl |= (nv_encoder->dp.mc_unknown << 16); 215 if (nv_encoder->dcb->sorconf.link & 1) 216 mode_ctl |= 0x00000800; 217 else ··· 274 int 275 nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) 276 { 277 + struct drm_nouveau_private *dev_priv = dev->dev_private; 278 struct nouveau_encoder *nv_encoder = NULL; 279 struct drm_encoder *encoder; 280 bool dum; ··· 318 319 encoder->possible_crtcs = entry->heads; 320 encoder->possible_clones = 0; 321 + 322 + if (nv_encoder->dcb->type == OUTPUT_DP) { 323 + uint32_t mc, or = nv_encoder->or; 324 + 325 + if (dev_priv->chipset < 0x90 || 326 + dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) 327 + mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or)); 328 + else 329 + mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or)); 330 + 331 + switch ((mc & 0x00000f00) >> 8) { 332 + case 8: 333 + case 9: 334 + nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16; 335 + break; 336 + default: 337 + break; 338 + } 339 + 340 + if (!nv_encoder->dp.mc_unknown) 341 + nv_encoder->dp.mc_unknown = 5; 342 + } 343 344 return 0; 345 }
+5 -2
drivers/gpu/drm/radeon/atom.c
··· 1137 int len, ws, ps, ptr; 1138 unsigned char op; 1139 atom_exec_context ectx; 1140 1141 if (!base) 1142 return -EINVAL; ··· 1170 if (ectx.abort) { 1171 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", 1172 base, len, ws, ps, ptr - 1); 1173 - return -EINVAL; 1174 } 1175 1176 if (op < ATOM_OP_CNT && op > 0) ··· 1186 debug_depth--; 1187 SDEBUG("<<\n"); 1188 1189 if (ws) 1190 kfree(ectx.ws); 1191 - return 0; 1192 } 1193 1194 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
··· 1137 int len, ws, ps, ptr; 1138 unsigned char op; 1139 atom_exec_context ectx; 1140 + int ret = 0; 1141 1142 if (!base) 1143 return -EINVAL; ··· 1169 if (ectx.abort) { 1170 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", 1171 base, len, ws, ps, ptr - 1); 1172 + ret = -EINVAL; 1173 + goto free; 1174 } 1175 1176 if (op < ATOM_OP_CNT && op > 0) ··· 1184 debug_depth--; 1185 SDEBUG("<<\n"); 1186 1187 + free: 1188 if (ws) 1189 kfree(ectx.ws); 1190 + return ret; 1191 } 1192 1193 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+3 -2
drivers/gpu/drm/radeon/r300.c
··· 325 326 r100_hdp_reset(rdev); 327 /* FIXME: rv380 one pipes ? */ 328 - if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { 329 /* r300,r350 */ 330 rdev->num_gb_pipes = 2; 331 } else { 332 - /* rv350,rv370,rv380 */ 333 rdev->num_gb_pipes = 1; 334 } 335 rdev->num_z_pipes = 1;
··· 325 326 r100_hdp_reset(rdev); 327 /* FIXME: rv380 one pipes ? */ 328 + if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || 329 + (rdev->family == CHIP_R350)) { 330 /* r300,r350 */ 331 rdev->num_gb_pipes = 2; 332 } else { 333 + /* rv350,rv370,rv380,r300 AD */ 334 rdev->num_gb_pipes = 1; 335 } 336 rdev->num_z_pipes = 1;
+7 -4
drivers/gpu/drm/radeon/radeon_atombios.c
··· 69 struct radeon_i2c_bus_rec i2c; 70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); 71 struct _ATOM_GPIO_I2C_INFO *i2c_info; 72 - uint16_t data_offset; 73 - int i; 74 75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 76 i2c.valid = false; 77 78 - if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 80 81 - for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 82 gpio = &i2c_info->asGPIO_Info[i]; 83 84 if (gpio->sucI2cId.ucAccess == id) {
··· 69 struct radeon_i2c_bus_rec i2c; 70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); 71 struct _ATOM_GPIO_I2C_INFO *i2c_info; 72 + uint16_t data_offset, size; 73 + int i, num_indices; 74 75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 76 i2c.valid = false; 77 78 + if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 80 81 + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 82 + sizeof(ATOM_GPIO_I2C_ASSIGMENT); 83 + 84 + for (i = 0; i < num_indices; i++) { 85 gpio = &i2c_info->asGPIO_Info[i]; 86 87 if (gpio->sucI2cId.ucAccess == id) {
+15 -5
drivers/gpu/drm/radeon/radeon_combios.c
··· 760 dac = RBIOS8(dac_info + 0x3) & 0xf; 761 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 762 } 763 - found = 1; 764 } 765 766 if (!found) /* fallback to defaults */ ··· 897 bg = RBIOS8(dac_info + 0x10) & 0xf; 898 dac = RBIOS8(dac_info + 0x11) & 0xf; 899 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 900 - found = 1; 901 } else if (rev > 1) { 902 bg = RBIOS8(dac_info + 0xc) & 0xf; 903 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; ··· 912 bg = RBIOS8(dac_info + 0xe) & 0xf; 913 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; 914 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 915 - found = 1; 916 } 917 tv_dac->tv_std = radeon_combios_get_tv_info(rdev); 918 } ··· 931 (bg << 16) | (dac << 20); 932 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 933 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 934 - found = 1; 935 } else { 936 bg = RBIOS8(dac_info + 0x4) & 0xf; 937 dac = RBIOS8(dac_info + 0x5) & 0xf; ··· 941 (bg << 16) | (dac << 20); 942 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 943 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 944 - found = 1; 945 } 946 } else { 947 DRM_INFO("No TV DAC info found in BIOS\n");
··· 760 dac = RBIOS8(dac_info + 0x3) & 0xf; 761 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 762 } 763 + /* if the values are all zeros, use the table */ 764 + if (p_dac->ps2_pdac_adj) 765 + found = 1; 766 } 767 768 if (!found) /* fallback to defaults */ ··· 895 bg = RBIOS8(dac_info + 0x10) & 0xf; 896 dac = RBIOS8(dac_info + 0x11) & 0xf; 897 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 898 + /* if the values are all zeros, use the table */ 899 + if (tv_dac->ps2_tvdac_adj) 900 + found = 1; 901 } else if (rev > 1) { 902 bg = RBIOS8(dac_info + 0xc) & 0xf; 903 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; ··· 908 bg = RBIOS8(dac_info + 0xe) & 0xf; 909 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; 910 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 911 + /* if the values are all zeros, use the table */ 912 + if (tv_dac->ps2_tvdac_adj) 913 + found = 1; 914 } 915 tv_dac->tv_std = radeon_combios_get_tv_info(rdev); 916 } ··· 925 (bg << 16) | (dac << 20); 926 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 927 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 928 + /* if the values are all zeros, use the table */ 929 + if (tv_dac->ps2_tvdac_adj) 930 + found = 1; 931 } else { 932 bg = RBIOS8(dac_info + 0x4) & 0xf; 933 dac = RBIOS8(dac_info + 0x5) & 0xf; ··· 933 (bg << 16) | (dac << 20); 934 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 935 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 936 + /* if the values are all zeros, use the table */ 937 + if (tv_dac->ps2_tvdac_adj) 938 + found = 1; 939 } 940 } else { 941 DRM_INFO("No TV DAC info found in BIOS\n");
+1 -1
drivers/gpu/drm/radeon/radeon_connectors.c
··· 315 radeon_encoder = to_radeon_encoder(encoder); 316 if (!radeon_encoder->enc_priv) 317 return 0; 318 - if (rdev->is_atom_bios) { 319 struct radeon_encoder_atom_dac *dac_int; 320 dac_int = radeon_encoder->enc_priv; 321 dac_int->tv_std = val;
··· 315 radeon_encoder = to_radeon_encoder(encoder); 316 if (!radeon_encoder->enc_priv) 317 return 0; 318 + if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) { 319 struct radeon_encoder_atom_dac *dac_int; 320 dac_int = radeon_encoder->enc_priv; 321 dac_int->tv_std = val;
+6 -4
drivers/gpu/drm/radeon/radeon_cp.c
··· 417 return -EBUSY; 418 } 419 420 - static void radeon_init_pipes(drm_radeon_private_t *dev_priv) 421 { 422 uint32_t gb_tile_config, gb_pipe_sel = 0; 423 424 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { ··· 437 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; 438 } else { 439 /* R3xx */ 440 - if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || 441 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { 442 dev_priv->num_gb_pipes = 2; 443 } else { 444 - /* R3Vxx */ 445 dev_priv->num_gb_pipes = 1; 446 } 447 } ··· 738 739 /* setup the raster pipes */ 740 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) 741 - radeon_init_pipes(dev_priv); 742 743 /* Reset the CP ring */ 744 radeon_do_cp_reset(dev_priv);
··· 417 return -EBUSY; 418 } 419 420 + static void radeon_init_pipes(struct drm_device *dev) 421 { 422 + drm_radeon_private_t *dev_priv = dev->dev_private; 423 uint32_t gb_tile_config, gb_pipe_sel = 0; 424 425 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { ··· 436 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; 437 } else { 438 /* R3xx */ 439 + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && 440 + dev->pdev->device != 0x4144) || 441 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { 442 dev_priv->num_gb_pipes = 2; 443 } else { 444 + /* RV3xx/R300 AD */ 445 dev_priv->num_gb_pipes = 1; 446 } 447 } ··· 736 737 /* setup the raster pipes */ 738 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) 739 + radeon_init_pipes(dev); 740 741 /* Reset the CP ring */ 742 radeon_do_cp_reset(dev_priv);
+7 -14
drivers/gpu/drm/radeon/radeon_encoders.c
··· 317 struct radeon_device *rdev = dev->dev_private; 318 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 319 DAC_ENCODER_CONTROL_PS_ALLOCATION args; 320 - int index = 0, num = 0; 321 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; 322 - enum radeon_tv_std tv_std = TV_STD_NTSC; 323 - 324 - if (dac_info->tv_std) 325 - tv_std = dac_info->tv_std; 326 327 memset(&args, 0, sizeof(args)); 328 ··· 326 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 327 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 328 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); 329 - num = 1; 330 break; 331 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 332 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 333 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); 334 - num = 2; 335 break; 336 } 337 ··· 340 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 341 args.ucDacStandard = ATOM_DAC1_CV; 342 else { 343 - switch (tv_std) { 344 case TV_STD_PAL: 345 case TV_STD_PAL_M: 346 case TV_STD_SCART_PAL: ··· 371 TV_ENCODER_CONTROL_PS_ALLOCATION args; 372 int index = 0; 373 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; 374 - enum radeon_tv_std tv_std = TV_STD_NTSC; 375 - 376 - if (dac_info->tv_std) 377 - tv_std = dac_info->tv_std; 378 379 memset(&args, 0, sizeof(args)); 380 ··· 381 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 382 args.sTVEncoder.ucTvStandard = ATOM_TV_CV; 383 else { 384 - switch (tv_std) { 385 case TV_STD_NTSC: 386 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; 387 break; ··· 1548 struct radeon_encoder_atom_dac * 1549 radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) 1550 { 1551 struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); 1552 1553 if (!dac) 1554 return NULL; 1555 1556 - dac->tv_std = TV_STD_NTSC; 1557 return dac; 1558 } 1559 ··· 1633 break; 1634 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1635 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 1636 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); 1637 break; 1638 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
··· 317 struct radeon_device *rdev = dev->dev_private; 318 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 319 DAC_ENCODER_CONTROL_PS_ALLOCATION args; 320 + int index = 0; 321 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; 322 323 memset(&args, 0, sizeof(args)); 324 ··· 330 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 331 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 332 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); 333 break; 334 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 335 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 336 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); 337 break; 338 } 339 ··· 346 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 347 args.ucDacStandard = ATOM_DAC1_CV; 348 else { 349 + switch (dac_info->tv_std) { 350 case TV_STD_PAL: 351 case TV_STD_PAL_M: 352 case TV_STD_SCART_PAL: ··· 377 TV_ENCODER_CONTROL_PS_ALLOCATION args; 378 int index = 0; 379 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; 380 381 memset(&args, 0, sizeof(args)); 382 ··· 391 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 392 args.sTVEncoder.ucTvStandard = ATOM_TV_CV; 393 else { 394 + switch (dac_info->tv_std) { 395 case TV_STD_NTSC: 396 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; 397 break; ··· 1558 struct radeon_encoder_atom_dac * 1559 radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) 1560 { 1561 + struct drm_device *dev = radeon_encoder->base.dev; 1562 + struct radeon_device *rdev = dev->dev_private; 1563 struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); 1564 1565 if (!dac) 1566 return NULL; 1567 1568 + dac->tv_std = radeon_atombios_get_tv_info(rdev); 1569 return dac; 1570 } 1571 ··· 1641 break; 1642 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1643 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 1644 + radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); 1645 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); 1646 break; 1647 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+33 -25
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
··· 830 crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; 831 832 if (rdev->family == CHIP_R420 || 833 - rdev->family == CHIP_R423 || 834 - rdev->family == CHIP_RV410) 835 tv_dac_cntl |= (R420_TV_DAC_RDACPD | 836 R420_TV_DAC_GDACPD | 837 R420_TV_DAC_BDACPD | ··· 907 if (rdev->family != CHIP_R200) { 908 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 909 if (rdev->family == CHIP_R420 || 910 - rdev->family == CHIP_R423 || 911 - rdev->family == CHIP_RV410) { 912 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 913 - RADEON_TV_DAC_BGADJ_MASK | 914 - R420_TV_DAC_DACADJ_MASK | 915 - R420_TV_DAC_RDACPD | 916 - R420_TV_DAC_GDACPD | 917 - R420_TV_DAC_BDACPD | 918 - R420_TV_DAC_TVENABLE); 919 } else { 920 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 921 - RADEON_TV_DAC_BGADJ_MASK | 922 - RADEON_TV_DAC_DACADJ_MASK | 923 - RADEON_TV_DAC_RDACPD | 924 - RADEON_TV_DAC_GDACPD | 925 - RADEON_TV_DAC_BDACPD); 926 } 927 928 - /* FIXME TV */ 929 - if (tv_dac) { 930 - struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; 931 - tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 932 - RADEON_TV_DAC_NHOLD | 933 - RADEON_TV_DAC_STD_PS2 | 934 - tv_dac->ps2_tvdac_adj); 935 } else 936 - tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 937 - RADEON_TV_DAC_NHOLD | 938 - RADEON_TV_DAC_STD_PS2); 939 940 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 941 }
··· 830 crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; 831 832 if (rdev->family == CHIP_R420 || 833 + rdev->family == CHIP_R423 || 834 + rdev->family == CHIP_RV410) 835 tv_dac_cntl |= (R420_TV_DAC_RDACPD | 836 R420_TV_DAC_GDACPD | 837 R420_TV_DAC_BDACPD | ··· 907 if (rdev->family != CHIP_R200) { 908 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 909 if (rdev->family == CHIP_R420 || 910 + rdev->family == CHIP_R423 || 911 + rdev->family == CHIP_RV410) { 912 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 913 + RADEON_TV_DAC_BGADJ_MASK | 914 + R420_TV_DAC_DACADJ_MASK | 915 + R420_TV_DAC_RDACPD | 916 + R420_TV_DAC_GDACPD | 917 + R420_TV_DAC_BDACPD | 918 + R420_TV_DAC_TVENABLE); 919 } else { 920 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 921 + RADEON_TV_DAC_BGADJ_MASK | 922 + RADEON_TV_DAC_DACADJ_MASK | 923 + RADEON_TV_DAC_RDACPD | 924 + RADEON_TV_DAC_GDACPD | 925 + RADEON_TV_DAC_BDACPD); 926 } 927 928 + tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD; 929 + 930 + if (is_tv) { 931 + if (tv_dac->tv_std == TV_STD_NTSC || 932 + tv_dac->tv_std == TV_STD_NTSC_J || 933 + tv_dac->tv_std == TV_STD_PAL_M || 934 + tv_dac->tv_std == TV_STD_PAL_60) 935 + tv_dac_cntl |= tv_dac->ntsc_tvdac_adj; 936 + else 937 + tv_dac_cntl |= tv_dac->pal_tvdac_adj; 938 + 939 + if (tv_dac->tv_std == TV_STD_NTSC || 940 + tv_dac->tv_std == TV_STD_NTSC_J) 941 + tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC; 942 + else 943 + tv_dac_cntl |= RADEON_TV_DAC_STD_PAL; 944 } else 945 + tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 | 946 + tv_dac->ps2_tvdac_adj); 947 948 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 949 }