Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (95 commits)
drm/radeon/kms: preface warning printk with driver name
drm/radeon/kms: drop unnecessary printks.
drm: fix regression in fb blank handling
drm/radeon/kms: make hibernate work on IGPs
drm/vmwgfx: Optimize memory footprint for DMA buffers.
drm/ttm: Allow system memory as a busy placement.
drm/ttm: Fix race condition in ttm_bo_delayed_delete (v3, final)
drm/nv50: prevent switching off SOR when in use for DVI-over-DP
drm/nv50: fail auxch transaction if reply count not what we expect
drm/nouveau: fix failure path if userspace specifies no valid memtypes
drm/nouveau: report LVDS as disconnected if lid closed
drm/radeon/kms: fix legacy get_engine/memory clock
drm/radeon/kms/atom: atom parser fixes
drm/radeon/kms: clean up atombios pll code
drm/radeon/kms: clean up pll struct
drm/radeon/kms/atom: fix crtc lock ordering
drm/radeon: r6xx/r7xx possible security issue, system ram access
drm/radeon/kms: r600/r700 don't test ib if ib initialization fails
drm/radeon/kms: Forbid creation of framebuffer with no valid GEM object
drm/radeon/kms: r600 handle irq vector ring overflow
...

+959 -521
+1 -2
drivers/gpu/drm/drm_edid.c
··· 633 return NULL; 634 } 635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { 636 - printk(KERN_WARNING "integrated sync not supported\n"); 637 - return NULL; 638 } 639 640 /* it is incorrect if hsync/vsync width is zero */
··· 633 return NULL; 634 } 635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { 636 + printk(KERN_WARNING "composite sync not supported\n"); 637 } 638 639 /* it is incorrect if hsync/vsync width is zero */
+1 -1
drivers/gpu/drm/drm_fb_helper.c
··· 389 break; 390 /* Display: Off; HSync: On, VSync: On */ 391 case FB_BLANK_NORMAL: 392 - drm_fb_helper_off(info, DRM_MODE_DPMS_ON); 393 break; 394 /* Display: Off; HSync: Off, VSync: On */ 395 case FB_BLANK_HSYNC_SUSPEND:
··· 389 break; 390 /* Display: Off; HSync: On, VSync: On */ 391 case FB_BLANK_NORMAL: 392 + drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); 393 break; 394 /* Display: Off; HSync: Off, VSync: On */ 395 case FB_BLANK_HSYNC_SUSPEND:
+81 -108
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 310 struct drm_device *dev = bios->dev; 311 312 /* C51 has misaligned regs on purpose. Marvellous */ 313 - if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { 314 - NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", 315 - reg); 316 - return 0; 317 - } 318 - /* 319 - * Warn on C51 regs that have not been verified accessible in 320 - * mmiotracing 321 - */ 322 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 323 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 324 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 325 reg); 326 327 - /* Trust the init scripts on G80 */ 328 - if (dev_priv->card_type >= NV_50) 329 - return 1; 330 - 331 - #define WITHIN(x, y, z) ((x >= y) && (x < y + z)) 332 - if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE)) 333 - return 1; 334 - if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE)) 335 - return 1; 336 - if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE)) 337 - return 1; 338 - if (dev_priv->VBIOS.pub.chip_version >= 0x30 && 339 - (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600)) 340 - return 1; 341 - if (dev_priv->VBIOS.pub.chip_version >= 0x40 && 342 - WITHIN(reg, 0xc000, 0x48)) 343 - return 1; 344 - if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204) 345 - return 1; 346 - if (dev_priv->VBIOS.pub.chip_version >= 0x40) { 347 - if (reg == 0x00011014 || reg == 0x00020328) 348 - return 1; 349 - if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */ 350 - return 1; 351 } 352 - if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE)) 353 - return 1; 354 - if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE)) 355 - return 1; 356 - if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2)) 357 - return 1; 358 - if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2)) 359 - return 1; 360 - if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0) 361 - return 1; 362 - if (dev_priv->VBIOS.pub.chip_version == 0x51 && 363 - WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE)) 364 - return 1; 365 - #undef WITHIN 366 367 - NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg); 368 - 369 - return 0; 370 } 371 372 static bool ··· 3155 } 3156 #ifdef __powerpc__ 3157 /* Powerbook specific quirks */ 3158 - if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) 3159 - nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 3160 - if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { 3161 - if (script == LVDS_PANEL_ON) { 3162 - bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); 3163 - bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); 3164 - } 3165 - if (script == LVDS_PANEL_OFF) { 3166 - bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); 3167 - bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); 3168 } 3169 } 3170 #endif ··· 5402 parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5403 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5404 { 5405 - if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && 5406 - conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && 5407 - conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && 5408 - conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && 5409 - conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && 5410 - conn != 0xf2205004 && conn != 0xf2209004) { 5411 - NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); 5412 - 5413 - /* cause output setting to fail for !TV, so message is seen */ 5414 - if ((conn & 0xf) != 0x1) 5415 - dcb->entries = 0; 5416 - 5417 return false; 5418 } 5419 - /* most of the below is a "best guess" atm */ 5420 - entry->type = conn & 0xf; 5421 - if (entry->type == 2) 5422 - /* another way of specifying straps based lvds... */ 5423 - entry->type = OUTPUT_LVDS; 5424 - if (entry->type == 4) { /* digital */ 5425 - if (conn & 0x10) 5426 - entry->type = OUTPUT_LVDS; 5427 - else 5428 - entry->type = OUTPUT_TMDS; 5429 - } 5430 - /* what's in bits 5-13? could be some encoder maker thing, in tv case */ 5431 - entry->i2c_index = (conn >> 14) & 0xf; 5432 - /* raw heads field is in range 0-1, so move to 1-2 */ 5433 - entry->heads = ((conn >> 18) & 0x7) + 1; 5434 - entry->location = (conn >> 21) & 0xf; 5435 - /* unused: entry->bus = (conn >> 25) & 0x7; */ 5436 - /* set or to be same as heads -- hopefully safe enough */ 5437 - entry->or = entry->heads; 5438 entry->duallink_possible = false; 5439 5440 switch (entry->type) { 5441 case OUTPUT_ANALOG: 5442 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 5443 break; 5444 - case OUTPUT_LVDS: 5445 - /* 5446 - * This is probably buried in conn's unknown bits. 5447 - * This will upset EDID-ful models, if they exist 5448 - */ 5449 - entry->lvdsconf.use_straps_for_mode = true; 5450 - entry->lvdsconf.use_power_scripts = true; 5451 break; 5452 case OUTPUT_TMDS: 5453 /* ··· 5453 */ 5454 fabricate_vga_output(dcb, entry->i2c_index, entry->heads); 5455 break; 5456 - case OUTPUT_TV: 5457 - entry->tvconf.has_component_output = false; 5458 break; 5459 } 5460 ··· 5533 dcb->entries = newentries; 5534 } 5535 5536 - static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5537 { 5538 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5539 struct parsed_dcb *dcb; 5540 - uint16_t dcbptr, i2ctabptr = 0; 5541 uint8_t *dcbtable; 5542 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5543 bool configblock = true; ··· 5550 dcb->entries = 0; 5551 5552 /* get the offset from 0x36 */ 5553 - dcbptr = ROM16(bios->data[0x36]); 5554 5555 if (dcbptr == 0x0) { 5556 - NV_WARN(dev, "No output data (DCB) found in BIOS, " 5557 - "assuming a CRT output exists\n"); 5558 - /* this situation likely means a really old card, pre DCB */ 5559 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 5560 5561 - if (nv04_tv_identify(dev, 5562 - bios->legacy.i2c_indices.tv) >= 0) 5563 fabricate_tv_output(dcb, twoHeads); 5564 5565 return 0;
··· 310 struct drm_device *dev = bios->dev; 311 312 /* C51 has misaligned regs on purpose. Marvellous */ 313 + if (reg & 0x2 || 314 + (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) 315 + NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg); 316 + 317 + /* warn on C51 regs that haven't been verified accessible in tracing */ 318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 321 reg); 322 323 + if (reg >= (8*1024*1024)) { 324 + NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg); 325 + return 0; 326 } 327 328 + return 1; 329 } 330 331 static bool ··· 3196 } 3197 #ifdef __powerpc__ 3198 /* Powerbook specific quirks */ 3199 + if ((dev->pci_device & 0xffff) == 0x0179 || 3200 + (dev->pci_device & 0xffff) == 0x0189 || 3201 + (dev->pci_device & 0xffff) == 0x0329) { 3202 + if (script == LVDS_RESET) { 3203 + nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 3204 + 3205 + } else if (script == LVDS_PANEL_ON) { 3206 + bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, 3207 + bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) 3208 + | (1 << 31)); 3209 + bios_wr32(bios, NV_PCRTC_GPIO_EXT, 3210 + bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); 3211 + 3212 + } else if (script == LVDS_PANEL_OFF) { 3213 + bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, 3214 + bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) 3215 + & ~(1 << 31)); 3216 + bios_wr32(bios, NV_PCRTC_GPIO_EXT, 3217 + bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); 3218 } 3219 } 3220 #endif ··· 5434 parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5435 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5436 { 5437 + switch (conn & 0x0000000f) { 5438 + case 0: 5439 + entry->type = OUTPUT_ANALOG; 5440 + break; 5441 + case 1: 5442 + entry->type = OUTPUT_TV; 5443 + break; 5444 + case 2: 5445 + case 3: 5446 + entry->type = OUTPUT_LVDS; 5447 + break; 5448 + case 4: 5449 + switch ((conn & 0x000000f0) >> 4) { 5450 + case 0: 5451 + entry->type = OUTPUT_TMDS; 5452 + break; 5453 + case 1: 5454 + entry->type = OUTPUT_LVDS; 5455 + break; 5456 + default: 5457 + NV_ERROR(dev, "Unknown DCB subtype 4/%d\n", 5458 + (conn & 0x000000f0) >> 4); 5459 + return false; 5460 + } 5461 + break; 5462 + default: 5463 + NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); 5464 return false; 5465 } 5466 + 5467 + entry->i2c_index = (conn & 0x0003c000) >> 14; 5468 + entry->heads = ((conn & 0x001c0000) >> 18) + 1; 5469 + entry->or = entry->heads; /* same as heads, hopefully safe enough */ 5470 + entry->location = (conn & 0x01e00000) >> 21; 5471 + entry->bus = (conn & 0x0e000000) >> 25; 5472 entry->duallink_possible = false; 5473 5474 switch (entry->type) { 5475 case OUTPUT_ANALOG: 5476 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 5477 break; 5478 + case OUTPUT_TV: 5479 + entry->tvconf.has_component_output = false; 5480 break; 5481 case OUTPUT_TMDS: 5482 /* ··· 5488 */ 5489 fabricate_vga_output(dcb, entry->i2c_index, entry->heads); 5490 break; 5491 + case OUTPUT_LVDS: 5492 + if ((conn & 0x00003f00) != 0x10) 5493 + entry->lvdsconf.use_straps_for_mode = true; 5494 + entry->lvdsconf.use_power_scripts = true; 5495 + break; 5496 + default: 5497 break; 5498 } 5499 ··· 5564 dcb->entries = newentries; 5565 } 5566 5567 + static int 5568 + parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5569 { 5570 + struct drm_nouveau_private *dev_priv = dev->dev_private; 5571 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5572 struct parsed_dcb *dcb; 5573 + uint16_t dcbptr = 0, i2ctabptr = 0; 5574 uint8_t *dcbtable; 5575 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5576 bool configblock = true; ··· 5579 dcb->entries = 0; 5580 5581 /* get the offset from 0x36 */ 5582 + if (dev_priv->card_type > NV_04) { 5583 + dcbptr = ROM16(bios->data[0x36]); 5584 + if (dcbptr == 0x0000) 5585 + NV_WARN(dev, "No output data (DCB) found in BIOS\n"); 5586 + } 5587 5588 + /* this situation likely means a really old card, pre DCB */ 5589 if (dcbptr == 0x0) { 5590 + NV_INFO(dev, "Assuming a CRT output exists\n"); 5591 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 5592 5593 + if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) 5594 fabricate_tv_output(dcb, twoHeads); 5595 5596 return 0;
+2
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 469 470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 471 evict, no_wait, new_mem); 472 nouveau_fence_unref((void *)&fence); 473 return ret; 474 }
··· 469 470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 471 evict, no_wait, new_mem); 472 + if (nvbo->channel && nvbo->channel != chan) 473 + ret = nouveau_fence_wait(fence, NULL, false, false); 474 nouveau_fence_unref((void *)&fence); 475 return ret; 476 }
+26 -5
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 24 * 25 */ 26 27 #include "drmP.h" 28 #include "drm_edid.h" 29 #include "drm_crtc_helper.h" 30 #include "nouveau_reg.h" 31 #include "nouveau_drv.h" 32 #include "nouveau_encoder.h" ··· 86 static void 87 nouveau_connector_destroy(struct drm_connector *drm_connector) 88 { 89 - struct nouveau_connector *connector = nouveau_connector(drm_connector); 90 - struct drm_device *dev = connector->base.dev; 91 92 NV_DEBUG_KMS(dev, "\n"); 93 94 - if (!connector) 95 return; 96 97 drm_sysfs_connector_remove(drm_connector); 98 drm_connector_cleanup(drm_connector); 99 kfree(drm_connector); ··· 238 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 239 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 240 if (nv_encoder && nv_connector->native_mode) { 241 nouveau_connector_set_encoder(connector, nv_encoder); 242 return connector_status_connected; 243 } 244 245 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); ··· 263 if (!nv_connector->edid) { 264 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 265 drm_get_connector_name(connector)); 266 - return connector_status_disconnected; 267 } 268 269 if (nv_encoder->dcb->type == OUTPUT_DP && ··· 297 return connector_status_connected; 298 } 299 300 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 301 if (!nv_encoder) 302 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); ··· 704 */ 705 if (!nv_connector->edid && !nv_connector->native_mode && 706 !dev_priv->VBIOS.pub.fp_no_ddc) { 707 - nv_connector->edid = 708 (struct edid *)nouveau_bios_embedded_edid(dev); 709 } 710 711 if (!nv_connector->edid)
··· 24 * 25 */ 26 27 + #include <acpi/button.h> 28 + 29 #include "drmP.h" 30 #include "drm_edid.h" 31 #include "drm_crtc_helper.h" 32 + 33 #include "nouveau_reg.h" 34 #include "nouveau_drv.h" 35 #include "nouveau_encoder.h" ··· 83 static void 84 nouveau_connector_destroy(struct drm_connector *drm_connector) 85 { 86 + struct nouveau_connector *nv_connector = 87 + nouveau_connector(drm_connector); 88 + struct drm_device *dev = nv_connector->base.dev; 89 90 NV_DEBUG_KMS(dev, "\n"); 91 92 + if (!nv_connector) 93 return; 94 95 + kfree(nv_connector->edid); 96 drm_sysfs_connector_remove(drm_connector); 97 drm_connector_cleanup(drm_connector); 98 kfree(drm_connector); ··· 233 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 234 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 235 if (nv_encoder && nv_connector->native_mode) { 236 + #ifdef CONFIG_ACPI 237 + if (!nouveau_ignorelid && !acpi_lid_open()) 238 + return connector_status_disconnected; 239 + #endif 240 nouveau_connector_set_encoder(connector, nv_encoder); 241 return connector_status_connected; 242 + } 243 + 244 + /* Cleanup the previous EDID block. */ 245 + if (nv_connector->edid) { 246 + drm_mode_connector_update_edid_property(connector, NULL); 247 + kfree(nv_connector->edid); 248 + nv_connector->edid = NULL; 249 } 250 251 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); ··· 247 if (!nv_connector->edid) { 248 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 249 drm_get_connector_name(connector)); 250 + goto detect_analog; 251 } 252 253 if (nv_encoder->dcb->type == OUTPUT_DP && ··· 281 return connector_status_connected; 282 } 283 284 + detect_analog: 285 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 286 if (!nv_encoder) 287 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); ··· 687 */ 688 if (!nv_connector->edid && !nv_connector->native_mode && 689 !dev_priv->VBIOS.pub.fp_no_ddc) { 690 + struct edid *edid = 691 (struct edid *)nouveau_bios_embedded_edid(dev); 692 + if (edid) { 693 + nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 694 + *(nv_connector->edid) = *edid; 695 + } 696 } 697 698 if (!nv_connector->edid)
+47 -29
drivers/gpu/drm/nouveau/nouveau_dma.c
··· 126 chan->dma.cur += nr_dwords; 127 } 128 129 - static inline bool 130 - READ_GET(struct nouveau_channel *chan, uint32_t *get) 131 { 132 uint32_t val; 133 134 val = nvchan_rd32(chan, chan->user_get); 135 - if (val < chan->pushbuf_base || 136 - val > chan->pushbuf_base + (chan->dma.max << 2)) { 137 - /* meaningless to dma_wait() except to know whether the 138 - * GPU has stalled or not 139 - */ 140 - *get = val; 141 - return false; 142 } 143 144 - *get = (val - chan->pushbuf_base) >> 2; 145 - return true; 146 } 147 148 int 149 nouveau_dma_wait(struct nouveau_channel *chan, int size) 150 { 151 - uint32_t get, prev_get = 0, cnt = 0; 152 - bool get_valid; 153 154 while (chan->dma.free < size) { 155 - /* reset counter as long as GET is still advancing, this is 156 - * to avoid misdetecting a GPU lockup if the GPU happens to 157 - * just be processing an operation that takes a long time 158 - */ 159 - get_valid = READ_GET(chan, &get); 160 - if (get != prev_get) { 161 - prev_get = get; 162 - cnt = 0; 163 - } 164 - 165 - if ((++cnt & 0xff) == 0) { 166 - DRM_UDELAY(1); 167 - if (cnt > 100000) 168 - return -EBUSY; 169 - } 170 171 /* loop until we have a usable GET pointer. the value 172 * we read from the GPU may be outside the main ring if ··· 182 * from the SKIPS area, so the code below doesn't have to deal 183 * with some fun corner cases. 184 */ 185 - if (!get_valid || get < NOUVEAU_DMA_SKIPS) 186 continue; 187 188 if (get <= chan->dma.cur) { ··· 208 * after processing the currently pending commands. 209 */ 210 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 211 WRITE_PUT(NOUVEAU_DMA_SKIPS); 212 213 /* we're now submitting commands at the start of
··· 126 chan->dma.cur += nr_dwords; 127 } 128 129 + /* Fetch and adjust GPU GET pointer 130 + * 131 + * Returns: 132 + * value >= 0, the adjusted GET pointer 133 + * -EINVAL if GET pointer currently outside main push buffer 134 + * -EBUSY if timeout exceeded 135 + */ 136 + static inline int 137 + READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout) 138 { 139 uint32_t val; 140 141 val = nvchan_rd32(chan, chan->user_get); 142 + 143 + /* reset counter as long as GET is still advancing, this is 144 + * to avoid misdetecting a GPU lockup if the GPU happens to 145 + * just be processing an operation that takes a long time 146 + */ 147 + if (val != *prev_get) { 148 + *prev_get = val; 149 + *timeout = 0; 150 } 151 152 + if ((++*timeout & 0xff) == 0) { 153 + DRM_UDELAY(1); 154 + if (*timeout > 100000) 155 + return -EBUSY; 156 + } 157 + 158 + if (val < chan->pushbuf_base || 159 + val > chan->pushbuf_base + (chan->dma.max << 2)) 160 + return -EINVAL; 161 + 162 + return (val - chan->pushbuf_base) >> 2; 163 } 164 165 int 166 nouveau_dma_wait(struct nouveau_channel *chan, int size) 167 { 168 + uint32_t prev_get = 0, cnt = 0; 169 + int get; 170 171 while (chan->dma.free < size) { 172 + get = READ_GET(chan, &prev_get, &cnt); 173 + if (unlikely(get == -EBUSY)) 174 + return -EBUSY; 175 176 /* loop until we have a usable GET pointer. the value 177 * we read from the GPU may be outside the main ring if ··· 177 * from the SKIPS area, so the code below doesn't have to deal 178 * with some fun corner cases. 179 */ 180 + if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS) 181 continue; 182 183 if (get <= chan->dma.cur) { ··· 203 * after processing the currently pending commands. 204 */ 205 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 206 + 207 + /* wait for GET to depart from the skips area. 208 + * prevents writing GET==PUT and causing a race 209 + * condition that causes us to think the GPU is 210 + * idle when it's not. 211 + */ 212 + do { 213 + get = READ_GET(chan, &prev_get, &cnt); 214 + if (unlikely(get == -EBUSY)) 215 + return -EBUSY; 216 + if (unlikely(get == -EINVAL)) 217 + continue; 218 + } while (get <= NOUVEAU_DMA_SKIPS); 219 WRITE_PUT(NOUVEAU_DMA_SKIPS); 220 221 /* we're now submitting commands at the start of
+7 -1
drivers/gpu/drm/nouveau/nouveau_dp.c
··· 490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { 491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", 492 nv_rd32(dev, NV50_AUXCH_CTRL(index))); 493 - return -EBUSY; 494 } 495 496 udelay(400); ··· 500 if ((stat & NV50_AUXCH_STAT_REPLY_AUX) != 501 NV50_AUXCH_STAT_REPLY_AUX_DEFER) 502 break; 503 } 504 505 if (cmd & 1) {
··· 490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { 491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", 492 nv_rd32(dev, NV50_AUXCH_CTRL(index))); 493 + ret = -EBUSY; 494 + goto out; 495 } 496 497 udelay(400); ··· 499 if ((stat & NV50_AUXCH_STAT_REPLY_AUX) != 500 NV50_AUXCH_STAT_REPLY_AUX_DEFER) 501 break; 502 + } 503 + 504 + if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { 505 + ret = -EREMOTEIO; 506 + goto out; 507 } 508 509 if (cmd & 1) {
+4
drivers/gpu/drm/nouveau/nouveau_drv.c
··· 71 int nouveau_uscript_tmds = -1; 72 module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); 73 74 MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 75 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 76 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
··· 71 int nouveau_uscript_tmds = -1; 72 module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); 73 74 + MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); 75 + int nouveau_ignorelid = 0; 76 + module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 77 + 78 MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 79 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 80 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
+3
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 509 void __iomem *ramin; 510 uint32_t ramin_size; 511 512 struct workqueue_struct *wq; 513 struct work_struct irq_work; 514 ··· 677 extern int nouveau_reg_debug; 678 extern char *nouveau_vbios; 679 extern int nouveau_ctxfw; 680 681 /* nouveau_state.c */ 682 extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
··· 509 void __iomem *ramin; 510 uint32_t ramin_size; 511 512 + struct nouveau_bo *vga_ram; 513 + 514 struct workqueue_struct *wq; 515 struct work_struct irq_work; 516 ··· 675 extern int nouveau_reg_debug; 676 extern char *nouveau_vbios; 677 extern int nouveau_ctxfw; 678 + extern int nouveau_ignorelid; 679 680 /* nouveau_state.c */ 681 extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+17 -3
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 321 else { 322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 323 b->valid_domains); 324 validate_fini(op, NULL); 325 return -EINVAL; 326 } ··· 467 static int 468 nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 469 struct drm_nouveau_gem_pushbuf_bo *bo, 470 - int nr_relocs, uint64_t ptr_relocs, 471 - int nr_dwords, int first_dword, 472 uint32_t *pushbuf, bool is_iomem) 473 { 474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 475 struct drm_device *dev = chan->dev; 476 - int ret = 0, i; 477 478 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 479 if (IS_ERR(reloc)) ··· 668 goto out; 669 } 670 pbbo = nouveau_gem_object(gem); 671 672 ret = ttm_bo_reserve(&pbbo->bo, false, false, true, 673 chan->fence.sequence);
··· 321 else { 322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 323 b->valid_domains); 324 + list_add_tail(&nvbo->entry, &op->both_list); 325 validate_fini(op, NULL); 326 return -EINVAL; 327 } ··· 466 static int 467 nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 468 struct drm_nouveau_gem_pushbuf_bo *bo, 469 + unsigned nr_relocs, uint64_t ptr_relocs, 470 + unsigned nr_dwords, unsigned first_dword, 471 uint32_t *pushbuf, bool is_iomem) 472 { 473 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 474 struct drm_device *dev = chan->dev; 475 + int ret = 0; 476 + unsigned i; 477 478 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 479 if (IS_ERR(reloc)) ··· 666 goto out; 667 } 668 pbbo = nouveau_gem_object(gem); 669 + 670 + if ((req->offset & 3) || req->nr_dwords < 2 || 671 + (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size || 672 + (unsigned long)req->nr_dwords > 673 + ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) { 674 + NV_ERROR(dev, "pb call misaligned or out of bounds: " 675 + "%d + %d * 4 > %ld\n", 676 + req->offset, req->nr_dwords, pbbo->bo.mem.size); 677 + ret = -EINVAL; 678 + drm_gem_object_unreference(gem); 679 + goto out; 680 + } 681 682 ret = ttm_bo_reserve(&pbbo->bo, false, false, true, 683 chan->fence.sequence);
+7
drivers/gpu/drm/nouveau/nouveau_irq.c
··· 483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 484 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 485 unhandled = 1; 486 } else { 487 unhandled = 1; 488 }
··· 483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 484 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 485 unhandled = 1; 486 + } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { 487 + uint32_t v = nv_rd32(dev, 0x402000); 488 + nv_wr32(dev, 0x402000, v); 489 + 490 + /* dump the error anyway for now: it's useful for 491 + Gallium development */ 492 + unhandled = 1; 493 } else { 494 unhandled = 1; 495 }
+12 -3
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 383 { 384 struct drm_nouveau_private *dev_priv = dev->dev_private; 385 386 - if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) 387 - ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); 388 - ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); 389 390 ttm_bo_device_release(&dev_priv->ttm.bdev); 391 ··· 621 return ret; 622 } 623 624 /* GART */ 625 #if !defined(__powerpc__) && !defined(__ia64__) 626 if (drm_device_is_agp(dev) && dev->agp) { ··· 661 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), 662 drm_get_resource_len(dev, 1), 663 DRM_MTRR_WC); 664 return 0; 665 } 666
··· 383 { 384 struct drm_nouveau_private *dev_priv = dev->dev_private; 385 386 + nouveau_bo_unpin(dev_priv->vga_ram); 387 + nouveau_bo_ref(NULL, &dev_priv->vga_ram); 388 389 ttm_bo_device_release(&dev_priv->ttm.bdev); 390 ··· 622 return ret; 623 } 624 625 + ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, 626 + 0, 0, true, true, &dev_priv->vga_ram); 627 + if (ret == 0) 628 + ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM); 629 + if (ret) { 630 + NV_WARN(dev, "failed to reserve VGA memory\n"); 631 + nouveau_bo_ref(NULL, &dev_priv->vga_ram); 632 + } 633 + 634 /* GART */ 635 #if !defined(__powerpc__) && !defined(__ia64__) 636 if (drm_device_is_agp(dev) && dev->agp) { ··· 653 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), 654 drm_get_resource_len(dev, 1), 655 DRM_MTRR_WC); 656 + 657 return 0; 658 } 659
+1
drivers/gpu/drm/nouveau/nouveau_state.c
··· 525 engine->mc.takedown(dev); 526 527 mutex_lock(&dev->struct_mutex); 528 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 529 mutex_unlock(&dev->struct_mutex); 530 nouveau_sgdma_takedown(dev);
··· 525 engine->mc.takedown(dev); 526 527 mutex_lock(&dev->struct_mutex); 528 + ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); 529 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 530 mutex_unlock(&dev->struct_mutex); 531 nouveau_sgdma_takedown(dev);
+1 -1
drivers/gpu/drm/nouveau/nv04_instmem.c
··· 30 * of vram. For now, only reserve a small piece until we know 31 * more about what each chipset requires. 32 */ 33 - switch (dev_priv->chipset & 0xf0) { 34 case 0x40: 35 case 0x47: 36 case 0x49:
··· 30 * of vram. For now, only reserve a small piece until we know 31 * more about what each chipset requires. 32 */ 33 + switch (dev_priv->chipset) { 34 case 0x40: 35 case 0x47: 36 case 0x49:
+21 -1
drivers/gpu/drm/nouveau/nv50_crtc.c
··· 432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 433 struct drm_device *dev = crtc->dev; 434 struct drm_encoder *encoder; 435 436 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 437 ··· 440 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 441 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 442 443 - if (drm_helper_encoder_in_use(encoder)) 444 continue; 445 446 nv_encoder->disconnect(nv_encoder); 447 }
··· 432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 433 struct drm_device *dev = crtc->dev; 434 struct drm_encoder *encoder; 435 + uint32_t dac = 0, sor = 0; 436 437 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 438 ··· 439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 440 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 441 442 + if (!drm_helper_encoder_in_use(encoder)) 443 continue; 444 + 445 + if (nv_encoder->dcb->type == OUTPUT_ANALOG || 446 + nv_encoder->dcb->type == OUTPUT_TV) 447 + dac |= (1 << nv_encoder->or); 448 + else 449 + sor |= (1 << nv_encoder->or); 450 + } 451 + 452 + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 453 + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 454 + 455 + if (nv_encoder->dcb->type == OUTPUT_ANALOG || 456 + nv_encoder->dcb->type == OUTPUT_TV) { 457 + if (dac & (1 << nv_encoder->or)) 458 + continue; 459 + } else { 460 + if (sor & (1 << nv_encoder->or)) 461 + continue; 462 + } 463 464 nv_encoder->disconnect(nv_encoder); 465 }
+1 -1
drivers/gpu/drm/nouveau/nv50_fifo.c
··· 272 return ret; 273 ramfc = chan->ramfc->gpuobj; 274 275 - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, 276 0, &chan->cache); 277 if (ret) 278 return ret;
··· 272 return ret; 273 ramfc = chan->ramfc->gpuobj; 274 275 + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024, 276 0, &chan->cache); 277 if (ret) 278 return ret;
+2 -1
drivers/gpu/drm/nouveau/nv50_graph.c
··· 84 nv_wr32(dev, 0x400804, 0xc0000000); 85 nv_wr32(dev, 0x406800, 0xc0000000); 86 nv_wr32(dev, 0x400c04, 0xc0000000); 87 - nv_wr32(dev, 0x401804, 0xc0000000); 88 nv_wr32(dev, 0x405018, 0xc0000000); 89 nv_wr32(dev, 0x402000, 0xc0000000); 90 ··· 282 return 0; 283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 284 285 nv_wr32(dev, 0x400500, fifo & ~1); 286 nv_wr32(dev, 0x400784, inst); 287 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
··· 84 nv_wr32(dev, 0x400804, 0xc0000000); 85 nv_wr32(dev, 0x406800, 0xc0000000); 86 nv_wr32(dev, 0x400c04, 0xc0000000); 87 + nv_wr32(dev, 0x401800, 0xc0000000); 88 nv_wr32(dev, 0x405018, 0xc0000000); 89 nv_wr32(dev, 0x402000, 0xc0000000); 90 ··· 282 return 0; 283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 284 285 + nouveau_wait_for_idle(dev); 286 nv_wr32(dev, 0x400500, fifo & ~1); 287 nv_wr32(dev, 0x400784, inst); 288 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
+13
drivers/gpu/drm/nouveau/nv50_sor.c
··· 90 { 91 struct drm_device *dev = encoder->dev; 92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 93 uint32_t val; 94 int or = nv_encoder->or; 95 96 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 97 98 /* wait for it to be done */ 99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
··· 90 { 91 struct drm_device *dev = encoder->dev; 92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 93 + struct drm_encoder *enc; 94 uint32_t val; 95 int or = nv_encoder->or; 96 97 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 98 + 99 + nv_encoder->last_dpms = mode; 100 + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { 101 + struct nouveau_encoder *nvenc = nouveau_encoder(enc); 102 + 103 + if (nvenc == nv_encoder || 104 + nvenc->dcb->or != nv_encoder->dcb->or) 105 + continue; 106 + 107 + if (nvenc->last_dpms == DRM_MODE_DPMS_ON) 108 + return; 109 + } 110 111 /* wait for it to be done */ 112 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
+85 -17
drivers/gpu/drm/radeon/atom.c
··· 246 case ATOM_WS_ATTRIBUTES: 247 val = gctx->io_attr; 248 break; 249 default: 250 val = ctx->ws[idx]; 251 } ··· 388 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 389 } 390 391 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 392 int *ptr, uint32_t *saved, int print) 393 { ··· 510 break; 511 case ATOM_WS_ATTRIBUTES: 512 gctx->io_attr = val; 513 break; 514 default: 515 ctx->ws[idx] = val; ··· 709 SDEBUG(" dst: "); 710 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 711 SDEBUG(" src1: "); 712 - src1 = atom_get_src(ctx, attr, ptr); 713 SDEBUG(" src2: "); 714 src2 = atom_get_src(ctx, attr, ptr); 715 dst &= src1; ··· 841 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 842 } 843 844 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 845 { 846 uint8_t attr = U8((*ptr)++), shift; ··· 882 attr |= atom_def_dst[attr >> 3] << 6; 883 SDEBUG(" dst: "); 884 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 885 - shift = U8((*ptr)++); 886 SDEBUG(" shift: %d\n", shift); 887 dst <<= shift; 888 SDEBUG(" dst: "); ··· 898 attr |= atom_def_dst[attr >> 3] << 6; 899 SDEBUG(" dst: "); 900 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 901 - shift = U8((*ptr)++); 902 SDEBUG(" shift: %d\n", shift); 903 dst >>= shift; 904 SDEBUG(" dst: "); ··· 1001 atom_op_or, ATOM_ARG_FB}, { 1002 atom_op_or, ATOM_ARG_PLL}, { 1003 atom_op_or, ATOM_ARG_MC}, { 1004 - atom_op_shl, ATOM_ARG_REG}, { 1005 - atom_op_shl, ATOM_ARG_PS}, { 1006 - atom_op_shl, ATOM_ARG_WS}, { 1007 - atom_op_shl, ATOM_ARG_FB}, { 1008 - atom_op_shl, ATOM_ARG_PLL}, { 1009 - atom_op_shl, ATOM_ARG_MC}, { 1010 - atom_op_shr, ATOM_ARG_REG}, { 1011 - atom_op_shr, ATOM_ARG_PS}, { 1012 - atom_op_shr, ATOM_ARG_WS}, { 1013 - atom_op_shr, ATOM_ARG_FB}, { 1014 - atom_op_shr, ATOM_ARG_PLL}, { 1015 - atom_op_shr, ATOM_ARG_MC}, { 1016 atom_op_mul, ATOM_ARG_REG}, { 1017 atom_op_mul, ATOM_ARG_PS}, { 1018 atom_op_mul, ATOM_ARG_WS}, { ··· 1122 1123 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1124 1125 - /* reset reg block */ 1126 - ctx->reg_block = 0; 1127 ectx.ctx = ctx; 1128 ectx.ps_shift = ps / 4; 1129 ectx.start = base; ··· 1158 void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1159 { 1160 mutex_lock(&ctx->mutex); 1161 atom_execute_table_locked(ctx, index, params); 1162 mutex_unlock(&ctx->mutex); 1163 }
··· 246 case ATOM_WS_ATTRIBUTES: 247 val = gctx->io_attr; 248 break; 249 + case ATOM_WS_REGPTR: 250 + val = gctx->reg_block; 251 + break; 252 default: 253 val = ctx->ws[idx]; 254 } ··· 385 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 386 } 387 388 + static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) 389 + { 390 + uint32_t val = 0xCDCDCDCD; 391 + 392 + switch (align) { 393 + case ATOM_SRC_DWORD: 394 + val = U32(*ptr); 395 + (*ptr) += 4; 396 + break; 397 + case ATOM_SRC_WORD0: 398 + case ATOM_SRC_WORD8: 399 + case ATOM_SRC_WORD16: 400 + val = U16(*ptr); 401 + (*ptr) += 2; 402 + break; 403 + case ATOM_SRC_BYTE0: 404 + case ATOM_SRC_BYTE8: 405 + case ATOM_SRC_BYTE16: 406 + case ATOM_SRC_BYTE24: 407 + val = U8(*ptr); 408 + (*ptr)++; 409 + break; 410 + } 411 + return val; 412 + } 413 + 414 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 415 int *ptr, uint32_t *saved, int print) 416 { ··· 481 break; 482 case ATOM_WS_ATTRIBUTES: 483 gctx->io_attr = val; 484 + break; 485 + case ATOM_WS_REGPTR: 486 + gctx->reg_block = val; 487 break; 488 default: 489 ctx->ws[idx] = val; ··· 677 SDEBUG(" dst: "); 678 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 679 SDEBUG(" src1: "); 680 + src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); 681 SDEBUG(" src2: "); 682 src2 = atom_get_src(ctx, attr, ptr); 683 dst &= src1; ··· 809 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 810 } 811 812 + static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) 813 + { 814 + uint8_t attr = U8((*ptr)++), shift; 815 + uint32_t saved, dst; 816 + int dptr = *ptr; 817 + attr &= 0x38; 818 + attr |= atom_def_dst[attr >> 3] << 6; 819 + SDEBUG(" dst: "); 820 + dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 821 + shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 822 + SDEBUG(" shift: %d\n", shift); 823 + dst <<= shift; 824 + SDEBUG(" dst: "); 825 + atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 826 + } 827 + 828 + static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) 829 + { 830 + uint8_t attr = U8((*ptr)++), shift; 831 + uint32_t saved, dst; 832 + int dptr = *ptr; 833 + attr &= 0x38; 834 + attr |= atom_def_dst[attr >> 3] << 6; 835 + SDEBUG(" dst: "); 836 + dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 837 + shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 838 + SDEBUG(" shift: %d\n", shift); 839 + dst >>= shift; 840 + SDEBUG(" dst: "); 841 + atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 842 + } 843 + 844 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 845 { 846 uint8_t attr = U8((*ptr)++), shift; ··· 818 attr |= atom_def_dst[attr >> 3] << 6; 819 SDEBUG(" dst: "); 820 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 821 + shift = atom_get_src(ctx, attr, ptr); 822 SDEBUG(" shift: %d\n", shift); 823 dst <<= shift; 824 SDEBUG(" dst: "); ··· 834 attr |= atom_def_dst[attr >> 3] << 6; 835 SDEBUG(" dst: "); 836 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 837 + shift = atom_get_src(ctx, attr, ptr); 838 SDEBUG(" shift: %d\n", shift); 839 dst >>= shift; 840 SDEBUG(" dst: "); ··· 937 atom_op_or, ATOM_ARG_FB}, { 938 atom_op_or, ATOM_ARG_PLL}, { 939 atom_op_or, ATOM_ARG_MC}, { 940 + atom_op_shift_left, ATOM_ARG_REG}, { 941 + atom_op_shift_left, ATOM_ARG_PS}, { 942 + atom_op_shift_left, ATOM_ARG_WS}, { 943 + atom_op_shift_left, ATOM_ARG_FB}, { 944 + atom_op_shift_left, ATOM_ARG_PLL}, { 945 + atom_op_shift_left, ATOM_ARG_MC}, { 946 + atom_op_shift_right, ATOM_ARG_REG}, { 947 + atom_op_shift_right, ATOM_ARG_PS}, { 948 + atom_op_shift_right, ATOM_ARG_WS}, { 949 + atom_op_shift_right, ATOM_ARG_FB}, { 950 + atom_op_shift_right, ATOM_ARG_PLL}, { 951 + atom_op_shift_right, ATOM_ARG_MC}, { 952 atom_op_mul, ATOM_ARG_REG}, { 953 atom_op_mul, ATOM_ARG_PS}, { 954 atom_op_mul, ATOM_ARG_WS}, { ··· 1058 1059 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1060 1061 ectx.ctx = ctx; 1062 ectx.ps_shift = ps / 4; 1063 ectx.start = base; ··· 1096 void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1097 { 1098 mutex_lock(&ctx->mutex); 1099 + /* reset reg block */ 1100 + ctx->reg_block = 0; 1101 + /* reset fb window */ 1102 + ctx->fb_base = 0; 1103 + /* reset io mode */ 1104 + ctx->io_mode = ATOM_IO_MM; 1105 atom_execute_table_locked(ctx, index, params); 1106 mutex_unlock(&ctx->mutex); 1107 }
+1
drivers/gpu/drm/radeon/atom.h
··· 91 #define ATOM_WS_AND_MASK 0x45 92 #define ATOM_WS_FB_WINDOW 0x46 93 #define ATOM_WS_ATTRIBUTES 0x47 94 95 #define ATOM_IIO_NOP 0 96 #define ATOM_IIO_START 1
··· 91 #define ATOM_WS_AND_MASK 0x45 92 #define ATOM_WS_FB_WINDOW 0x46 93 #define ATOM_WS_ATTRIBUTES 0x47 94 + #define ATOM_WS_REGPTR 0x48 95 96 #define ATOM_IIO_NOP 0 97 #define ATOM_IIO_START 1
+169 -90
drivers/gpu/drm/radeon/atombios_crtc.c
··· 307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 308 args.ucCRTC = radeon_crtc->crtc_id; 309 310 - printk("executing set crtc dtd timing\n"); 311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 312 } 313 ··· 346 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 347 args.ucCRTC = radeon_crtc->crtc_id; 348 349 - printk("executing set crtc timing\n"); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 351 } 352 ··· 407 } 408 } 409 410 - void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 411 { 412 - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 413 struct drm_device *dev = crtc->dev; 414 struct radeon_device *rdev = dev->dev_private; 415 struct drm_encoder *encoder = NULL; 416 struct radeon_encoder *radeon_encoder = NULL; 417 - uint8_t frev, crev; 418 - int index; 419 - SET_PIXEL_CLOCK_PS_ALLOCATION args; 420 - PIXEL_CLOCK_PARAMETERS *spc1_ptr; 421 - PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr; 422 - PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr; 423 - uint32_t pll_clock = mode->clock; 424 - uint32_t adjusted_clock; 425 - uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 426 - struct radeon_pll *pll; 427 - int pll_flags = 0; 428 429 - memset(&args, 0, sizeof(args)); 430 431 if (ASIC_IS_AVIVO(rdev)) { 432 if ((rdev->family == CHIP_RS600) || 433 (rdev->family == CHIP_RS690) || 434 (rdev->family == CHIP_RS740)) 435 - pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 436 - RADEON_PLL_PREFER_CLOSEST_LOWER); 437 438 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 439 - pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 440 else 441 - pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 442 } else { 443 - pll_flags |= RADEON_PLL_LEGACY; 444 445 if (mode->clock > 200000) /* range limits??? */ 446 - pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 447 else 448 - pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 449 450 } 451 452 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 453 if (encoder->crtc == crtc) { 454 - if (!ASIC_IS_AVIVO(rdev)) { 455 - if (encoder->encoder_type != 456 - DRM_MODE_ENCODER_DAC) 457 - pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 458 - if (encoder->encoder_type == 459 - DRM_MODE_ENCODER_LVDS) 460 - pll_flags |= RADEON_PLL_USE_REF_DIV; 461 - } 462 radeon_encoder = to_radeon_encoder(encoder); 463 break; 464 } 465 } ··· 467 * special hw requirements. 468 */ 469 if (ASIC_IS_DCE3(rdev)) { 470 - ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; 471 472 - if (!encoder) 473 - return; 474 - 475 - memset(&adjust_pll_args, 0, sizeof(adjust_pll_args)); 476 - adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10); 477 - adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id; 478 - adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder); 479 480 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 481 - atom_execute_table(rdev->mode_info.atom_context, 482 - index, (uint32_t *)&adjust_pll_args); 483 - adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; 484 - } else { 485 - /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 486 - if (ASIC_IS_AVIVO(rdev) && 487 - (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) 488 - adjusted_clock = mode->clock * 2; 489 - else 490 - adjusted_clock = mode->clock; 491 } 492 493 if (radeon_crtc->crtc_id == 0) 494 pll = &rdev->clock.p1pll; 495 else 496 pll = &rdev->clock.p2pll; 497 498 if (ASIC_IS_AVIVO(rdev)) { 499 if (radeon_new_pll) 500 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, 501 &fb_div, &frac_fb_div, 502 - &ref_div, &post_div, pll_flags); 503 else 504 radeon_compute_pll(pll, adjusted_clock, &pll_clock, 505 &fb_div, &frac_fb_div, 506 - &ref_div, &post_div, pll_flags); 507 } else 508 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 509 - &ref_div, &post_div, pll_flags); 510 511 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 512 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, ··· 571 case 1: 572 switch (crev) { 573 case 1: 574 - spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; 575 - spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 576 - spc1_ptr->usRefDiv = cpu_to_le16(ref_div); 577 - spc1_ptr->usFbDiv = cpu_to_le16(fb_div); 578 - spc1_ptr->ucFracFbDiv = frac_fb_div; 579 - spc1_ptr->ucPostDiv = post_div; 580 - spc1_ptr->ucPpll = 581 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 582 - spc1_ptr->ucCRTC = radeon_crtc->crtc_id; 583 - spc1_ptr->ucRefDivSrc = 1; 584 break; 585 case 2: 586 - spc2_ptr = 587 - (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; 588 - spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 589 - spc2_ptr->usRefDiv = cpu_to_le16(ref_div); 590 - spc2_ptr->usFbDiv = cpu_to_le16(fb_div); 591 - spc2_ptr->ucFracFbDiv = frac_fb_div; 592 - spc2_ptr->ucPostDiv = post_div; 593 - spc2_ptr->ucPpll = 594 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 595 - spc2_ptr->ucCRTC = radeon_crtc->crtc_id; 596 - spc2_ptr->ucRefDivSrc = 1; 597 break; 598 case 3: 599 - if (!encoder) 600 - return; 601 - spc3_ptr = 602 - (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; 603 - spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 604 - spc3_ptr->usRefDiv = cpu_to_le16(ref_div); 605 - spc3_ptr->usFbDiv = cpu_to_le16(fb_div); 606 - spc3_ptr->ucFracFbDiv = frac_fb_div; 607 - spc3_ptr->ucPostDiv = post_div; 608 - spc3_ptr->ucPpll = 609 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 610 - spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); 611 - spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; 612 - spc3_ptr->ucEncoderMode = 613 atombios_get_encoder_mode(encoder); 614 break; 615 default: ··· 615 return; 616 } 617 618 - printk("executing set pll\n"); 619 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 620 } 621 622 - int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 623 - struct drm_framebuffer *old_fb) 624 { 625 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 626 struct drm_device *dev = crtc->dev; ··· 749 return 0; 750 } 751 752 int atombios_crtc_mode_set(struct drm_crtc *crtc, 753 struct drm_display_mode *mode, 754 struct drm_display_mode *adjusted_mode, ··· 806 else { 807 if (radeon_crtc->crtc_id == 0) 808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 809 - radeon_crtc_set_base(crtc, x, y, old_fb); 810 - radeon_legacy_atom_set_surface(crtc); 811 } 812 atombios_overscan_setup(crtc, mode, adjusted_mode); 813 atombios_scaler_setup(crtc); ··· 825 826 static void atombios_crtc_prepare(struct drm_crtc *crtc) 827 { 828 - atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 829 atombios_lock_crtc(crtc, 1); 830 } 831 832 static void atombios_crtc_commit(struct drm_crtc *crtc)
··· 307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 308 args.ucCRTC = radeon_crtc->crtc_id; 309 310 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 311 } 312 ··· 347 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 348 args.ucCRTC = radeon_crtc->crtc_id; 349 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 351 } 352 ··· 409 } 410 } 411 412 + union adjust_pixel_clock { 413 + ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; 414 + }; 415 + 416 + static u32 atombios_adjust_pll(struct drm_crtc *crtc, 417 + struct drm_display_mode *mode, 418 + struct radeon_pll *pll) 419 { 420 struct drm_device *dev = crtc->dev; 421 struct radeon_device *rdev = dev->dev_private; 422 struct drm_encoder *encoder = NULL; 423 struct radeon_encoder *radeon_encoder = NULL; 424 + u32 adjusted_clock = mode->clock; 425 426 + /* reset the pll flags */ 427 + pll->flags = 0; 428 429 if (ASIC_IS_AVIVO(rdev)) { 430 if ((rdev->family == CHIP_RS600) || 431 (rdev->family == CHIP_RS690) || 432 (rdev->family == CHIP_RS740)) 433 + pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 434 + RADEON_PLL_PREFER_CLOSEST_LOWER); 435 436 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 437 + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 438 else 439 + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 440 } else { 441 + pll->flags |= RADEON_PLL_LEGACY; 442 443 if (mode->clock > 200000) /* range limits??? */ 444 + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 445 else 446 + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 447 448 } 449 450 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 451 if (encoder->crtc == crtc) { 452 radeon_encoder = to_radeon_encoder(encoder); 453 + if (ASIC_IS_AVIVO(rdev)) { 454 + /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 455 + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 456 + adjusted_clock = mode->clock * 2; 457 + } else { 458 + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 459 + pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 460 + if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) 461 + pll->flags |= RADEON_PLL_USE_REF_DIV; 462 + } 463 break; 464 } 465 } ··· 471 * special hw requirements. 472 */ 473 if (ASIC_IS_DCE3(rdev)) { 474 + union adjust_pixel_clock args; 475 + struct radeon_encoder_atom_dig *dig; 476 + u8 frev, crev; 477 + int index; 478 479 + if (!radeon_encoder->enc_priv) 480 + return adjusted_clock; 481 + dig = radeon_encoder->enc_priv; 482 483 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 484 + atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 485 + &crev); 486 + 487 + memset(&args, 0, sizeof(args)); 488 + 489 + switch (frev) { 490 + case 1: 491 + switch (crev) { 492 + case 1: 493 + case 2: 494 + args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 495 + args.v1.ucTransmitterID = radeon_encoder->encoder_id; 496 + args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); 497 + 498 + atom_execute_table(rdev->mode_info.atom_context, 499 + index, (uint32_t *)&args); 500 + adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; 501 + break; 502 + default: 503 + DRM_ERROR("Unknown table version %d %d\n", frev, crev); 504 + return adjusted_clock; 505 + } 506 + break; 507 + default: 508 + DRM_ERROR("Unknown table version %d %d\n", frev, crev); 509 + return adjusted_clock; 510 + } 511 } 512 + return adjusted_clock; 513 + } 514 + 515 + union set_pixel_clock { 516 + SET_PIXEL_CLOCK_PS_ALLOCATION base; 517 + PIXEL_CLOCK_PARAMETERS v1; 518 + PIXEL_CLOCK_PARAMETERS_V2 v2; 519 + PIXEL_CLOCK_PARAMETERS_V3 v3; 520 + }; 521 + 522 + void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 523 + { 524 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 525 + struct drm_device *dev = crtc->dev; 526 + struct radeon_device *rdev = dev->dev_private; 527 + struct drm_encoder *encoder = NULL; 528 + struct radeon_encoder *radeon_encoder = NULL; 529 + u8 frev, crev; 530 + int index; 531 + union set_pixel_clock args; 532 + u32 pll_clock = mode->clock; 533 + u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 534 + struct radeon_pll *pll; 535 + u32 adjusted_clock; 536 + 537 + memset(&args, 0, sizeof(args)); 538 + 539 + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 540 + if (encoder->crtc == crtc) { 541 + radeon_encoder = to_radeon_encoder(encoder); 542 + break; 543 + } 544 + } 545 + 546 + if (!radeon_encoder) 547 + return; 548 549 if (radeon_crtc->crtc_id == 0) 550 pll = &rdev->clock.p1pll; 551 else 552 pll = &rdev->clock.p2pll; 553 554 + /* adjust pixel clock as needed */ 555 + adjusted_clock = atombios_adjust_pll(crtc, mode, pll); 556 + 557 if (ASIC_IS_AVIVO(rdev)) { 558 if (radeon_new_pll) 559 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, 560 &fb_div, &frac_fb_div, 561 + &ref_div, &post_div); 562 else 563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, 564 &fb_div, &frac_fb_div, 565 + &ref_div, &post_div); 566 } else 567 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 568 + &ref_div, &post_div); 569 570 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 571 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, ··· 520 case 1: 521 switch (crev) { 522 case 1: 523 + args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 524 + args.v1.usRefDiv = cpu_to_le16(ref_div); 525 + args.v1.usFbDiv = cpu_to_le16(fb_div); 526 + args.v1.ucFracFbDiv = frac_fb_div; 527 + args.v1.ucPostDiv = post_div; 528 + args.v1.ucPpll = 529 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 530 + args.v1.ucCRTC = radeon_crtc->crtc_id; 531 + args.v1.ucRefDivSrc = 1; 532 break; 533 case 2: 534 + args.v2.usPixelClock = cpu_to_le16(mode->clock / 10); 535 + args.v2.usRefDiv = cpu_to_le16(ref_div); 536 + args.v2.usFbDiv = cpu_to_le16(fb_div); 537 + args.v2.ucFracFbDiv = frac_fb_div; 538 + args.v2.ucPostDiv = post_div; 539 + args.v2.ucPpll = 540 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 541 + args.v2.ucCRTC = radeon_crtc->crtc_id; 542 + args.v2.ucRefDivSrc = 1; 543 break; 544 case 3: 545 + args.v3.usPixelClock = cpu_to_le16(mode->clock / 10); 546 + args.v3.usRefDiv = cpu_to_le16(ref_div); 547 + args.v3.usFbDiv = cpu_to_le16(fb_div); 548 + args.v3.ucFracFbDiv = frac_fb_div; 549 + args.v3.ucPostDiv = post_div; 550 + args.v3.ucPpll = 551 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 552 + args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2); 553 + args.v3.ucTransmitterId = radeon_encoder->encoder_id; 554 + args.v3.ucEncoderMode = 555 atombios_get_encoder_mode(encoder); 556 break; 557 default: ··· 571 return; 572 } 573 574 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 575 } 576 577 + static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, 578 + struct drm_framebuffer *old_fb) 579 { 580 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 581 struct drm_device *dev = crtc->dev; ··· 706 return 0; 707 } 708 709 + int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 710 + struct drm_framebuffer *old_fb) 711 + { 712 + struct drm_device *dev = crtc->dev; 713 + struct radeon_device *rdev = dev->dev_private; 714 + 715 + if (ASIC_IS_AVIVO(rdev)) 716 + return avivo_crtc_set_base(crtc, x, y, old_fb); 717 + else 718 + return radeon_crtc_set_base(crtc, x, y, old_fb); 719 + } 720 + 721 + /* properly set additional regs when using atombios */ 722 + static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) 723 + { 724 + struct drm_device *dev = crtc->dev; 725 + struct radeon_device *rdev = dev->dev_private; 726 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 727 + u32 disp_merge_cntl; 728 + 729 + switch (radeon_crtc->crtc_id) { 730 + case 0: 731 + disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); 732 + disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; 733 + WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); 734 + break; 735 + case 1: 736 + disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); 737 + disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; 738 + WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); 739 + WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); 740 + WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); 741 + break; 742 + } 743 + } 744 + 745 int atombios_crtc_mode_set(struct drm_crtc *crtc, 746 struct drm_display_mode *mode, 747 struct drm_display_mode *adjusted_mode, ··· 727 else { 728 if (radeon_crtc->crtc_id == 0) 729 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 730 + atombios_crtc_set_base(crtc, x, y, old_fb); 731 + radeon_legacy_atom_fixup(crtc); 732 } 733 atombios_overscan_setup(crtc, mode, adjusted_mode); 734 atombios_scaler_setup(crtc); ··· 746 747 static void atombios_crtc_prepare(struct drm_crtc *crtc) 748 { 749 atombios_lock_crtc(crtc, 1); 750 + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 751 } 752 753 static void atombios_crtc_commit(struct drm_crtc *crtc)
+2 -3
drivers/gpu/drm/radeon/r100.c
··· 1504 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1505 return -EINVAL; 1506 } 1507 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1508 track->immd_dwords = pkt->count - 1; 1509 r = r100_cs_track_check(p->rdev, track); ··· 3400 if (rdev->flags & RADEON_IS_AGP) { 3401 r = radeon_agp_init(rdev); 3402 if (r) { 3403 - printk(KERN_WARNING "[drm] Disabling AGP\n"); 3404 - rdev->flags &= ~RADEON_IS_AGP; 3405 - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 3406 } else { 3407 rdev->mc.gtt_location = rdev->mc.agp_base; 3408 }
··· 1504 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1505 return -EINVAL; 1506 } 1507 + track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0)); 1508 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1509 track->immd_dwords = pkt->count - 1; 1510 r = r100_cs_track_check(p->rdev, track); ··· 3399 if (rdev->flags & RADEON_IS_AGP) { 3400 r = radeon_agp_init(rdev); 3401 if (r) { 3402 + radeon_agp_disable(rdev); 3403 } else { 3404 rdev->mc.gtt_location = rdev->mc.agp_base; 3405 }
+5 -2
drivers/gpu/drm/radeon/r200.c
··· 371 case 5: 372 case 6: 373 case 7: 374 track->textures[i].tex_coord_type = 0; 375 break; 376 case 1: 377 - track->textures[i].tex_coord_type = 1; 378 break; 379 case 2: 380 - track->textures[i].tex_coord_type = 2; 381 break; 382 } 383 break;
··· 371 case 5: 372 case 6: 373 case 7: 374 + /* 1D/2D */ 375 track->textures[i].tex_coord_type = 0; 376 break; 377 case 1: 378 + /* CUBE */ 379 + track->textures[i].tex_coord_type = 2; 380 break; 381 case 2: 382 + /* 3D */ 383 + track->textures[i].tex_coord_type = 1; 384 break; 385 } 386 break;
+1 -3
drivers/gpu/drm/radeon/r420.c
··· 50 if (rdev->flags & RADEON_IS_AGP) { 51 r = radeon_agp_init(rdev); 52 if (r) { 53 - printk(KERN_WARNING "[drm] Disabling AGP\n"); 54 - rdev->flags &= ~RADEON_IS_AGP; 55 - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 56 } else { 57 rdev->mc.gtt_location = rdev->mc.agp_base; 58 }
··· 50 if (rdev->flags & RADEON_IS_AGP) { 51 r = radeon_agp_init(rdev); 52 if (r) { 53 + radeon_agp_disable(rdev); 54 } else { 55 rdev->mc.gtt_location = rdev->mc.agp_base; 56 }
+47 -35
drivers/gpu/drm/radeon/r600.c
··· 624 fixed20_12 a; 625 u32 tmp; 626 int chansize, numchan; 627 - int r; 628 629 /* Get VRAM informations */ 630 rdev->mc.vram_is_ddr = true; ··· 666 rdev->mc.real_vram_size = rdev->mc.aper_size; 667 668 if (rdev->flags & RADEON_IS_AGP) { 669 - r = radeon_agp_init(rdev); 670 - if (r) 671 - return r; 672 /* gtt_size is setup by radeon_agp_init */ 673 rdev->mc.gtt_location = rdev->mc.agp_base; 674 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; ··· 1954 /* FIXME: we should wait for ring to be empty */ 1955 r600_cp_stop(rdev); 1956 rdev->cp.ready = false; 1957 r600_wb_disable(rdev); 1958 r600_pcie_gart_disable(rdev); 1959 /* unpin shaders bo */ 1960 - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1961 - if (unlikely(r != 0)) 1962 - return r; 1963 - radeon_bo_unpin(rdev->r600_blit.shader_obj); 1964 - radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1965 return 0; 1966 } 1967 ··· 2025 r = radeon_fence_driver_init(rdev); 2026 if (r) 2027 return r; 2028 r = r600_mc_init(rdev); 2029 if (r) 2030 return r; ··· 2064 if (rdev->accel_working) { 2065 r = radeon_ib_pool_init(rdev); 2066 if (r) { 2067 - DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 2068 rdev->accel_working = false; 2069 - } 2070 - r = r600_ib_test(rdev); 2071 - if (r) { 2072 - DRM_ERROR("radeon: failed testing IB (%d).\n", r); 2073 - rdev->accel_working = false; 2074 } 2075 } 2076 ··· 2202 rb_bufsz = drm_order(ring_size / 4); 2203 ring_size = (1 << rb_bufsz) * 4; 2204 rdev->ih.ring_size = ring_size; 2205 - rdev->ih.align_mask = 4 - 1; 2206 } 2207 2208 - static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) 2209 { 2210 int r; 2211 2212 - rdev->ih.ring_size = ring_size; 2213 /* Allocate ring buffer */ 2214 if (rdev->ih.ring_obj == NULL) { 2215 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, ··· 2239 return r; 2240 } 2241 } 2242 - rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1; 2243 - rdev->ih.rptr = 0; 2244 - 2245 return 0; 2246 } 2247 ··· 2388 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 2389 2390 /* allocate ring */ 2391 - ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); 2392 if (ret) 2393 return ret; 2394 ··· 2451 return ret; 2452 } 2453 2454 - void r600_irq_fini(struct radeon_device *rdev) 2455 { 2456 r600_disable_interrupts(rdev); 2457 r600_rlc_stop(rdev); 2458 r600_ih_ring_fini(rdev); 2459 } 2460 ··· 2474 return -EINVAL; 2475 } 2476 /* don't enable anything if the ih is disabled */ 2477 - if (!rdev->ih.enabled) 2478 return 0; 2479 2480 if (ASIC_IS_DCE3(rdev)) { 2481 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; ··· 2649 wptr = RREG32(IH_RB_WPTR); 2650 2651 if (wptr & RB_OVERFLOW) { 2652 - WARN_ON(1); 2653 - /* XXX deal with overflow */ 2654 - DRM_ERROR("IH RB overflow\n"); 2655 tmp = RREG32(IH_RB_CNTL); 2656 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2657 WREG32(IH_RB_CNTL, tmp); 2658 } 2659 - wptr = wptr & WPTR_OFFSET_MASK; 2660 - 2661 - return wptr; 2662 } 2663 2664 /* r600 IV Ring ··· 2696 u32 wptr = r600_get_ih_wptr(rdev); 2697 u32 rptr = rdev->ih.rptr; 2698 u32 src_id, src_data; 2699 - u32 last_entry = rdev->ih.ring_size - 16; 2700 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 2701 unsigned long flags; 2702 bool queue_hotplug = false; 2703 2704 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2705 2706 spin_lock_irqsave(&rdev->ih.lock, flags); 2707 ··· 2831 } 2832 2833 /* wptr/rptr are in bytes! */ 2834 - if (rptr == last_entry) 2835 - rptr = 0; 2836 - else 2837 - rptr += 16; 2838 } 2839 /* make sure wptr hasn't changed while processing */ 2840 wptr = r600_get_ih_wptr(rdev);
··· 624 fixed20_12 a; 625 u32 tmp; 626 int chansize, numchan; 627 628 /* Get VRAM informations */ 629 rdev->mc.vram_is_ddr = true; ··· 667 rdev->mc.real_vram_size = rdev->mc.aper_size; 668 669 if (rdev->flags & RADEON_IS_AGP) { 670 /* gtt_size is setup by radeon_agp_init */ 671 rdev->mc.gtt_location = rdev->mc.agp_base; 672 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; ··· 1958 /* FIXME: we should wait for ring to be empty */ 1959 r600_cp_stop(rdev); 1960 rdev->cp.ready = false; 1961 + r600_irq_suspend(rdev); 1962 r600_wb_disable(rdev); 1963 r600_pcie_gart_disable(rdev); 1964 /* unpin shaders bo */ 1965 + if (rdev->r600_blit.shader_obj) { 1966 + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1967 + if (!r) { 1968 + radeon_bo_unpin(rdev->r600_blit.shader_obj); 1969 + radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1970 + } 1971 + } 1972 return 0; 1973 } 1974 ··· 2026 r = radeon_fence_driver_init(rdev); 2027 if (r) 2028 return r; 2029 + if (rdev->flags & RADEON_IS_AGP) { 2030 + r = radeon_agp_init(rdev); 2031 + if (r) 2032 + radeon_agp_disable(rdev); 2033 + } 2034 r = r600_mc_init(rdev); 2035 if (r) 2036 return r; ··· 2060 if (rdev->accel_working) { 2061 r = radeon_ib_pool_init(rdev); 2062 if (r) { 2063 + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 2064 rdev->accel_working = false; 2065 + } else { 2066 + r = r600_ib_test(rdev); 2067 + if (r) { 2068 + dev_err(rdev->dev, "IB test failed (%d).\n", r); 2069 + rdev->accel_working = false; 2070 + } 2071 } 2072 } 2073 ··· 2197 rb_bufsz = drm_order(ring_size / 4); 2198 ring_size = (1 << rb_bufsz) * 4; 2199 rdev->ih.ring_size = ring_size; 2200 + rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 2201 + rdev->ih.rptr = 0; 2202 } 2203 2204 + static int r600_ih_ring_alloc(struct radeon_device *rdev) 2205 { 2206 int r; 2207 2208 /* Allocate ring buffer */ 2209 if (rdev->ih.ring_obj == NULL) { 2210 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, ··· 2234 return r; 2235 } 2236 } 2237 return 0; 2238 } 2239 ··· 2386 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 2387 2388 /* allocate ring */ 2389 + ret = r600_ih_ring_alloc(rdev); 2390 if (ret) 2391 return ret; 2392 ··· 2449 return ret; 2450 } 2451 2452 + void r600_irq_suspend(struct radeon_device *rdev) 2453 { 2454 r600_disable_interrupts(rdev); 2455 r600_rlc_stop(rdev); 2456 + } 2457 + 2458 + void r600_irq_fini(struct radeon_device *rdev) 2459 + { 2460 + r600_irq_suspend(rdev); 2461 r600_ih_ring_fini(rdev); 2462 } 2463 ··· 2467 return -EINVAL; 2468 } 2469 /* don't enable anything if the ih is disabled */ 2470 + if (!rdev->ih.enabled) { 2471 + r600_disable_interrupts(rdev); 2472 + /* force the active interrupt state to all disabled */ 2473 + r600_disable_interrupt_state(rdev); 2474 return 0; 2475 + } 2476 2477 if (ASIC_IS_DCE3(rdev)) { 2478 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; ··· 2638 wptr = RREG32(IH_RB_WPTR); 2639 2640 if (wptr & RB_OVERFLOW) { 2641 + /* When a ring buffer overflow happen start parsing interrupt 2642 + * from the last not overwritten vector (wptr + 16). Hopefully 2643 + * this should allow us to catchup. 2644 + */ 2645 + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 2646 + wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 2647 + rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 2648 tmp = RREG32(IH_RB_CNTL); 2649 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2650 WREG32(IH_RB_CNTL, tmp); 2651 } 2652 + return (wptr & rdev->ih.ptr_mask); 2653 } 2654 2655 /* r600 IV Ring ··· 2683 u32 wptr = r600_get_ih_wptr(rdev); 2684 u32 rptr = rdev->ih.rptr; 2685 u32 src_id, src_data; 2686 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 2687 unsigned long flags; 2688 bool queue_hotplug = false; 2689 2690 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2691 + if (!rdev->ih.enabled) 2692 + return IRQ_NONE; 2693 2694 spin_lock_irqsave(&rdev->ih.lock, flags); 2695 ··· 2817 } 2818 2819 /* wptr/rptr are in bytes! */ 2820 + rptr += 16; 2821 + rptr &= rdev->ih.ptr_mask; 2822 } 2823 /* make sure wptr hasn't changed while processing */ 2824 wptr = r600_get_ih_wptr(rdev);
+8 -6
drivers/gpu/drm/radeon/r600_blit_kms.c
··· 512 { 513 int r; 514 515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 516 - if (unlikely(r != 0)) { 517 - dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); 518 - goto out_unref; 519 } 520 - radeon_bo_unpin(rdev->r600_blit.shader_obj); 521 - radeon_bo_unreserve(rdev->r600_blit.shader_obj); 522 - out_unref: 523 radeon_bo_unref(&rdev->r600_blit.shader_obj); 524 } 525
··· 512 { 513 int r; 514 515 + if (rdev->r600_blit.shader_obj == NULL) 516 + return; 517 + /* If we can't reserve the bo, unref should be enough to destroy 518 + * it when it becomes idle. 519 + */ 520 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 521 + if (!r) { 522 + radeon_bo_unpin(rdev->r600_blit.shader_obj); 523 + radeon_bo_unreserve(rdev->r600_blit.shader_obj); 524 } 525 radeon_bo_unref(&rdev->r600_blit.shader_obj); 526 } 527
+83
drivers/gpu/drm/radeon/r600_cs.c
··· 36 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 37 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 38 39 /** 40 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 41 * @parser: parser structure holding parsing context. ··· 178 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; 179 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 180 return 0; 181 } 182 183 /** ··· 363 struct radeon_cs_packet *pkt) 364 { 365 struct radeon_cs_reloc *reloc; 366 volatile u32 *ib; 367 unsigned idx; 368 unsigned i; ··· 371 int r; 372 u32 idx_value; 373 374 ib = p->ib->ptr; 375 idx = pkt->idx + 1; 376 idx_value = radeon_get_ib_value(p, idx); ··· 531 for (i = 0; i < pkt->count; i++) { 532 reg = start_reg + (4 * i); 533 switch (reg) { 534 case DB_DEPTH_BASE: 535 case DB_HTILE_DATA_BASE: 536 case CB_COLOR0_BASE: 537 case CB_COLOR1_BASE: 538 case CB_COLOR2_BASE: 539 case CB_COLOR3_BASE: ··· 757 int r600_cs_parse(struct radeon_cs_parser *p) 758 { 759 struct radeon_cs_packet pkt; 760 int r; 761 762 do { 763 r = r600_cs_packet_parse(p, &pkt, p->idx); 764 if (r) { ··· 839 /* initialize parser */ 840 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 841 parser.filp = filp; 842 parser.rdev = NULL; 843 parser.family = family; 844 parser.ib = &fake_ib;
··· 36 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 37 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 38 39 + struct r600_cs_track { 40 + u32 cb_color0_base_last; 41 + }; 42 + 43 /** 44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 45 * @parser: parser structure holding parsing context. ··· 174 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; 175 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 176 return 0; 177 + } 178 + 179 + /** 180 + * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc 181 + * @parser: parser structure holding parsing context. 182 + * 183 + * Check next packet is relocation packet3, do bo validation and compute 184 + * GPU offset using the provided start. 185 + **/ 186 + static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) 187 + { 188 + struct radeon_cs_packet p3reloc; 189 + int r; 190 + 191 + r = r600_cs_packet_parse(p, &p3reloc, p->idx); 192 + if (r) { 193 + return 0; 194 + } 195 + if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 196 + return 0; 197 + } 198 + return 1; 199 } 200 201 /** ··· 337 struct radeon_cs_packet *pkt) 338 { 339 struct radeon_cs_reloc *reloc; 340 + struct r600_cs_track *track; 341 volatile u32 *ib; 342 unsigned idx; 343 unsigned i; ··· 344 int r; 345 u32 idx_value; 346 347 + track = (struct r600_cs_track *)p->track; 348 ib = p->ib->ptr; 349 idx = pkt->idx + 1; 350 idx_value = radeon_get_ib_value(p, idx); ··· 503 for (i = 0; i < pkt->count; i++) { 504 reg = start_reg + (4 * i); 505 switch (reg) { 506 + /* This register were added late, there is userspace 507 + * which does provide relocation for those but set 508 + * 0 offset. In order to avoid breaking old userspace 509 + * we detect this and set address to point to last 510 + * CB_COLOR0_BASE, note that if userspace doesn't set 511 + * CB_COLOR0_BASE before this register we will report 512 + * error. Old userspace always set CB_COLOR0_BASE 513 + * before any of this. 514 + */ 515 + case R_0280E0_CB_COLOR0_FRAG: 516 + case R_0280E4_CB_COLOR1_FRAG: 517 + case R_0280E8_CB_COLOR2_FRAG: 518 + case R_0280EC_CB_COLOR3_FRAG: 519 + case R_0280F0_CB_COLOR4_FRAG: 520 + case R_0280F4_CB_COLOR5_FRAG: 521 + case R_0280F8_CB_COLOR6_FRAG: 522 + case R_0280FC_CB_COLOR7_FRAG: 523 + case R_0280C0_CB_COLOR0_TILE: 524 + case R_0280C4_CB_COLOR1_TILE: 525 + case R_0280C8_CB_COLOR2_TILE: 526 + case R_0280CC_CB_COLOR3_TILE: 527 + case R_0280D0_CB_COLOR4_TILE: 528 + case R_0280D4_CB_COLOR5_TILE: 529 + case R_0280D8_CB_COLOR6_TILE: 530 + case R_0280DC_CB_COLOR7_TILE: 531 + if (!r600_cs_packet_next_is_pkt3_nop(p)) { 532 + if (!track->cb_color0_base_last) { 533 + dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 534 + return -EINVAL; 535 + } 536 + ib[idx+1+i] = track->cb_color0_base_last; 537 + printk_once(KERN_WARNING "radeon: You have old & broken userspace " 538 + "please consider updating mesa & xf86-video-ati\n"); 539 + } else { 540 + r = r600_cs_packet_next_reloc(p, &reloc); 541 + if (r) { 542 + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 543 + return -EINVAL; 544 + } 545 + ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 546 + } 547 + break; 548 case DB_DEPTH_BASE: 549 case DB_HTILE_DATA_BASE: 550 case CB_COLOR0_BASE: 551 + r = r600_cs_packet_next_reloc(p, &reloc); 552 + if (r) { 553 + DRM_ERROR("bad SET_CONTEXT_REG " 554 + "0x%04X\n", reg); 555 + return -EINVAL; 556 + } 557 + ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 558 + track->cb_color0_base_last = ib[idx+1+i]; 559 + break; 560 case CB_COLOR1_BASE: 561 case CB_COLOR2_BASE: 562 case CB_COLOR3_BASE: ··· 678 int r600_cs_parse(struct radeon_cs_parser *p) 679 { 680 struct radeon_cs_packet pkt; 681 + struct r600_cs_track *track; 682 int r; 683 684 + track = kzalloc(sizeof(*track), GFP_KERNEL); 685 + p->track = track; 686 do { 687 r = r600_cs_packet_parse(p, &pkt, p->idx); 688 if (r) { ··· 757 /* initialize parser */ 758 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 759 parser.filp = filp; 760 + parser.dev = &dev->pdev->dev; 761 parser.rdev = NULL; 762 parser.family = family; 763 parser.ib = &fake_ib;
+25
drivers/gpu/drm/radeon/r600d.h
··· 882 #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 883 884 #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 885 #endif
··· 882 #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 883 884 #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 885 + 886 + #define R_0280E0_CB_COLOR0_FRAG 0x0280E0 887 + #define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) 888 + #define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) 889 + #define C_0280E0_BASE_256B 0x00000000 890 + #define R_0280E4_CB_COLOR1_FRAG 0x0280E4 891 + #define R_0280E8_CB_COLOR2_FRAG 0x0280E8 892 + #define R_0280EC_CB_COLOR3_FRAG 0x0280EC 893 + #define R_0280F0_CB_COLOR4_FRAG 0x0280F0 894 + #define R_0280F4_CB_COLOR5_FRAG 0x0280F4 895 + #define R_0280F8_CB_COLOR6_FRAG 0x0280F8 896 + #define R_0280FC_CB_COLOR7_FRAG 0x0280FC 897 + #define R_0280C0_CB_COLOR0_TILE 0x0280C0 898 + #define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) 899 + #define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) 900 + #define C_0280C0_BASE_256B 0x00000000 901 + #define R_0280C4_CB_COLOR1_TILE 0x0280C4 902 + #define R_0280C8_CB_COLOR2_TILE 0x0280C8 903 + #define R_0280CC_CB_COLOR3_TILE 0x0280CC 904 + #define R_0280D0_CB_COLOR4_TILE 0x0280D0 905 + #define R_0280D4_CB_COLOR5_TILE 0x0280D4 906 + #define R_0280D8_CB_COLOR6_TILE 0x0280D8 907 + #define R_0280DC_CB_COLOR7_TILE 0x0280DC 908 + 909 + 910 #endif
+7 -4
drivers/gpu/drm/radeon/radeon.h
··· 410 unsigned wptr_old; 411 unsigned ring_size; 412 uint64_t gpu_addr; 413 - uint32_t align_mask; 414 uint32_t ptr_mask; 415 spinlock_t lock; 416 bool enabled; ··· 464 }; 465 466 struct radeon_cs_parser { 467 struct radeon_device *rdev; 468 struct drm_file *filp; 469 /* chunks */ ··· 847 848 static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 849 { 850 - if (reg < 0x10000) 851 return readl(((void __iomem *)rdev->rmmio) + reg); 852 else { 853 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); ··· 857 858 static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 859 { 860 - if (reg < 0x10000) 861 writel(v, ((void __iomem *)rdev->rmmio) + reg); 862 else { 863 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); ··· 1017 #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1018 1019 /* Common functions */ 1020 extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1021 extern int radeon_modeset_init(struct radeon_device *rdev); 1022 extern void radeon_modeset_fini(struct radeon_device *rdev); ··· 1162 extern void r600_irq_fini(struct radeon_device *rdev); 1163 extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1164 extern int r600_irq_set(struct radeon_device *rdev); 1165 - 1166 extern int r600_audio_init(struct radeon_device *rdev); 1167 extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1168 extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
··· 410 unsigned wptr_old; 411 unsigned ring_size; 412 uint64_t gpu_addr; 413 uint32_t ptr_mask; 414 spinlock_t lock; 415 bool enabled; ··· 465 }; 466 467 struct radeon_cs_parser { 468 + struct device *dev; 469 struct radeon_device *rdev; 470 struct drm_file *filp; 471 /* chunks */ ··· 847 848 static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 849 { 850 + if (reg < rdev->rmmio_size) 851 return readl(((void __iomem *)rdev->rmmio) + reg); 852 else { 853 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); ··· 857 858 static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 859 { 860 + if (reg < rdev->rmmio_size) 861 writel(v, ((void __iomem *)rdev->rmmio) + reg); 862 else { 863 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); ··· 1017 #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1018 1019 /* Common functions */ 1020 + /* AGP */ 1021 + extern void radeon_agp_disable(struct radeon_device *rdev); 1022 extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1023 extern int radeon_modeset_init(struct radeon_device *rdev); 1024 extern void radeon_modeset_fini(struct radeon_device *rdev); ··· 1160 extern void r600_irq_fini(struct radeon_device *rdev); 1161 extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1162 extern int r600_irq_set(struct radeon_device *rdev); 1163 + extern void r600_irq_suspend(struct radeon_device *rdev); 1164 + /* r600 audio */ 1165 extern int r600_audio_init(struct radeon_device *rdev); 1166 extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1167 extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+7
drivers/gpu/drm/radeon/radeon_agp.c
··· 133 bool is_v3; 134 int ret; 135 136 /* Acquire AGP. */ 137 if (!rdev->ddev->agp->acquired) { 138 ret = drm_agp_acquire(rdev->ddev);
··· 133 bool is_v3; 134 int ret; 135 136 + if (rdev->ddev->agp->agp_info.aper_size < 32) { 137 + dev_warn(rdev->dev, "AGP aperture to small (%dM) " 138 + "need at least 32M, disabling AGP\n", 139 + rdev->ddev->agp->agp_info.aper_size); 140 + return -EINVAL; 141 + } 142 + 143 /* Acquire AGP. */ 144 if (!rdev->ddev->agp->acquired) { 145 ret = drm_agp_acquire(rdev->ddev);
+2 -2
drivers/gpu/drm/radeon/radeon_clocks.c
··· 56 else if (post_div == 3) 57 sclk >>= 2; 58 else if (post_div == 4) 59 - sclk >>= 4; 60 61 return sclk; 62 } ··· 86 else if (post_div == 3) 87 mclk >>= 2; 88 else if (post_div == 4) 89 - mclk >>= 4; 90 91 return mclk; 92 }
··· 56 else if (post_div == 3) 57 sclk >>= 2; 58 else if (post_div == 4) 59 + sclk >>= 3; 60 61 return sclk; 62 } ··· 86 else if (post_div == 3) 87 mclk >>= 2; 88 else if (post_div == 4) 89 + mclk >>= 3; 90 91 return mclk; 92 }
+1
drivers/gpu/drm/radeon/radeon_cs.c
··· 231 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 232 parser.filp = filp; 233 parser.rdev = rdev; 234 r = radeon_cs_parser_init(&parser, data); 235 if (r) { 236 DRM_ERROR("Failed to initialize parser !\n");
··· 231 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 232 parser.filp = filp; 233 parser.rdev = rdev; 234 + parser.dev = rdev->dev; 235 r = radeon_cs_parser_init(&parser, data); 236 if (r) { 237 DRM_ERROR("Failed to initialize parser !\n");
+1
drivers/gpu/drm/radeon/radeon_device.c
··· 544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 545 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 546 } 547 } 548 549 void radeon_check_arguments(struct radeon_device *rdev)
··· 544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 545 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 546 } 547 + rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 548 } 549 550 void radeon_check_arguments(struct radeon_device *rdev)
+26 -19
drivers/gpu/drm/radeon/radeon_display.c
··· 357 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 358 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 359 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 360 - if (dig->dp_i2c_bus) 361 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 362 } 363 if (!radeon_connector->ddc_bus) ··· 411 uint32_t *fb_div_p, 412 uint32_t *frac_fb_div_p, 413 uint32_t *ref_div_p, 414 - uint32_t *post_div_p, 415 - int flags) 416 { 417 uint32_t min_ref_div = pll->min_ref_div; 418 uint32_t max_ref_div = pll->max_ref_div; 419 uint32_t min_fractional_feed_div = 0; 420 uint32_t max_fractional_feed_div = 0; 421 uint32_t best_vco = pll->best_vco; ··· 432 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 433 freq = freq * 1000; 434 435 - if (flags & RADEON_PLL_USE_REF_DIV) 436 min_ref_div = max_ref_div = pll->reference_div; 437 else { 438 while (min_ref_div < max_ref_div-1) { ··· 447 } 448 } 449 450 - if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { 451 min_fractional_feed_div = pll->min_frac_feedback_div; 452 max_fractional_feed_div = pll->max_frac_feedback_div; 453 } 454 455 - for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { 456 uint32_t ref_div; 457 458 - if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 459 continue; 460 461 /* legacy radeons only have a few post_divs */ 462 - if (flags & RADEON_PLL_LEGACY) { 463 if ((post_div == 5) || 464 (post_div == 7) || 465 (post_div == 9) || ··· 509 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 510 current_freq = radeon_div(tmp, ref_div * post_div); 511 512 - if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 513 error = freq - current_freq; 514 error = error < 0 ? 0xffffffff : error; 515 } else ··· 536 best_freq = current_freq; 537 best_error = error; 538 best_vco_diff = vco_diff; 539 - } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 540 - ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 541 - ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 542 - ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 543 - ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 544 - ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 545 best_post_div = post_div; 546 best_ref_div = ref_div; 547 best_feedback_div = feedback_div; ··· 577 uint32_t *fb_div_p, 578 uint32_t *frac_fb_div_p, 579 uint32_t *ref_div_p, 580 - uint32_t *post_div_p, 581 - int flags) 582 { 583 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 584 fixed20_12 pll_out_max, pll_out_min; ··· 671 radeonfb_remove(dev, fb); 672 673 if (radeon_fb->obj) { 674 - radeon_gem_object_unpin(radeon_fb->obj); 675 mutex_lock(&dev->struct_mutex); 676 drm_gem_object_unreference(radeon_fb->obj); 677 mutex_unlock(&dev->struct_mutex); ··· 718 struct drm_gem_object *obj; 719 720 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 721 - 722 return radeon_framebuffer_create(dev, mode_cmd, obj); 723 } 724
··· 357 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 358 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 359 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 360 + if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 361 + dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) 362 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 363 } 364 if (!radeon_connector->ddc_bus) ··· 410 uint32_t *fb_div_p, 411 uint32_t *frac_fb_div_p, 412 uint32_t *ref_div_p, 413 + uint32_t *post_div_p) 414 { 415 uint32_t min_ref_div = pll->min_ref_div; 416 uint32_t max_ref_div = pll->max_ref_div; 417 + uint32_t min_post_div = pll->min_post_div; 418 + uint32_t max_post_div = pll->max_post_div; 419 uint32_t min_fractional_feed_div = 0; 420 uint32_t max_fractional_feed_div = 0; 421 uint32_t best_vco = pll->best_vco; ··· 430 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 431 freq = freq * 1000; 432 433 + if (pll->flags & RADEON_PLL_USE_REF_DIV) 434 min_ref_div = max_ref_div = pll->reference_div; 435 else { 436 while (min_ref_div < max_ref_div-1) { ··· 445 } 446 } 447 448 + if (pll->flags & RADEON_PLL_USE_POST_DIV) 449 + min_post_div = max_post_div = pll->post_div; 450 + 451 + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 452 min_fractional_feed_div = pll->min_frac_feedback_div; 453 max_fractional_feed_div = pll->max_frac_feedback_div; 454 } 455 456 + for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { 457 uint32_t ref_div; 458 459 + if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 460 continue; 461 462 /* legacy radeons only have a few post_divs */ 463 + if (pll->flags & RADEON_PLL_LEGACY) { 464 if ((post_div == 5) || 465 (post_div == 7) || 466 (post_div == 9) || ··· 504 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 505 current_freq = radeon_div(tmp, ref_div * post_div); 506 507 + if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 508 error = freq - current_freq; 509 error = error < 0 ? 0xffffffff : error; 510 } else ··· 531 best_freq = current_freq; 532 best_error = error; 533 best_vco_diff = vco_diff; 534 + } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 535 + ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 536 + ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 537 + ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 538 + ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 539 + ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 540 best_post_div = post_div; 541 best_ref_div = ref_div; 542 best_feedback_div = feedback_div; ··· 572 uint32_t *fb_div_p, 573 uint32_t *frac_fb_div_p, 574 uint32_t *ref_div_p, 575 + uint32_t *post_div_p) 576 { 577 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 578 fixed20_12 pll_out_max, pll_out_min; ··· 667 radeonfb_remove(dev, fb); 668 669 if (radeon_fb->obj) { 670 mutex_lock(&dev->struct_mutex); 671 drm_gem_object_unreference(radeon_fb->obj); 672 mutex_unlock(&dev->struct_mutex); ··· 715 struct drm_gem_object *obj; 716 717 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 718 + if (obj == NULL) { 719 + dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " 720 + "can't create framebuffer\n", mode_cmd->handle); 721 + return NULL; 722 + } 723 return radeon_framebuffer_create(dev, mode_cmd, obj); 724 } 725
+7 -70
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
··· 339 } 340 } 341 342 - /* properly set crtc bpp when using atombios */ 343 - void radeon_legacy_atom_set_surface(struct drm_crtc *crtc) 344 - { 345 - struct drm_device *dev = crtc->dev; 346 - struct radeon_device *rdev = dev->dev_private; 347 - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 348 - int format; 349 - uint32_t crtc_gen_cntl; 350 - uint32_t disp_merge_cntl; 351 - uint32_t crtc_pitch; 352 - 353 - switch (crtc->fb->bits_per_pixel) { 354 - case 8: 355 - format = 2; 356 - break; 357 - case 15: /* 555 */ 358 - format = 3; 359 - break; 360 - case 16: /* 565 */ 361 - format = 4; 362 - break; 363 - case 24: /* RGB */ 364 - format = 5; 365 - break; 366 - case 32: /* xRGB */ 367 - format = 6; 368 - break; 369 - default: 370 - return; 371 - } 372 - 373 - crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) + 374 - ((crtc->fb->bits_per_pixel * 8) - 1)) / 375 - (crtc->fb->bits_per_pixel * 8)); 376 - crtc_pitch |= crtc_pitch << 16; 377 - 378 - WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); 379 - 380 - switch (radeon_crtc->crtc_id) { 381 - case 0: 382 - disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL); 383 - disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN; 384 - WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl); 385 - 386 - crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff; 387 - crtc_gen_cntl |= (format << 8); 388 - crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN; 389 - WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl); 390 - break; 391 - case 1: 392 - disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); 393 - disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; 394 - WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl); 395 - 396 - crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff; 397 - crtc_gen_cntl |= (format << 8); 398 - WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl); 399 - WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID)); 400 - WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID)); 401 - break; 402 - } 403 - } 404 - 405 int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 406 struct drm_framebuffer *old_fb) 407 { ··· 692 uint32_t post_divider = 0; 693 uint32_t freq = 0; 694 uint8_t pll_gain; 695 - int pll_flags = RADEON_PLL_LEGACY; 696 bool use_bios_divs = false; 697 /* PLL registers */ 698 uint32_t pll_ref_div = 0; ··· 725 else 726 pll = &rdev->clock.p1pll; 727 728 if (mode->clock > 200000) /* range limits??? */ 729 - pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 730 else 731 - pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 732 733 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 734 if (encoder->crtc == crtc) { ··· 742 } 743 744 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 745 - pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 746 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 747 if (!rdev->is_atom_bios) { 748 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ··· 757 } 758 } 759 } 760 - pll_flags |= RADEON_PLL_USE_REF_DIV; 761 } 762 } 763 } ··· 767 if (!use_bios_divs) { 768 radeon_compute_pll(pll, mode->clock, 769 &freq, &feedback_div, &frac_fb_div, 770 - &reference_div, &post_divider, 771 - pll_flags); 772 773 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 774 if (post_div->divider == post_divider)
··· 339 } 340 } 341 342 int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 343 struct drm_framebuffer *old_fb) 344 { ··· 755 uint32_t post_divider = 0; 756 uint32_t freq = 0; 757 uint8_t pll_gain; 758 bool use_bios_divs = false; 759 /* PLL registers */ 760 uint32_t pll_ref_div = 0; ··· 789 else 790 pll = &rdev->clock.p1pll; 791 792 + pll->flags = RADEON_PLL_LEGACY; 793 + 794 if (mode->clock > 200000) /* range limits??? */ 795 + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 796 else 797 + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 798 799 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 800 if (encoder->crtc == crtc) { ··· 804 } 805 806 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 807 + pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 808 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 809 if (!rdev->is_atom_bios) { 810 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); ··· 819 } 820 } 821 } 822 + pll->flags |= RADEON_PLL_USE_REF_DIV; 823 } 824 } 825 } ··· 829 if (!use_bios_divs) { 830 radeon_compute_pll(pll, mode->clock, 831 &freq, &feedback_div, &frac_fb_div, 832 + &reference_div, &post_divider); 833 834 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 835 if (post_div->divider == post_divider)
+19 -9
drivers/gpu/drm/radeon/radeon_mode.h
··· 125 #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 126 #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 127 #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 128 129 struct radeon_pll { 130 - uint16_t reference_freq; 131 - uint16_t reference_div; 132 uint32_t pll_in_min; 133 uint32_t pll_in_max; 134 uint32_t pll_out_min; 135 uint32_t pll_out_max; 136 - uint16_t xclk; 137 138 uint32_t min_ref_div; 139 uint32_t max_ref_div; 140 uint32_t min_post_div; ··· 151 uint32_t max_feedback_div; 152 uint32_t min_frac_feedback_div; 153 uint32_t max_frac_feedback_div; 154 - uint32_t best_vco; 155 }; 156 157 struct radeon_i2c_chan { ··· 430 uint32_t *fb_div_p, 431 uint32_t *frac_fb_div_p, 432 uint32_t *ref_div_p, 433 - uint32_t *post_div_p, 434 - int flags); 435 436 extern void radeon_compute_pll_avivo(struct radeon_pll *pll, 437 uint64_t freq, ··· 438 uint32_t *fb_div_p, 439 uint32_t *frac_fb_div_p, 440 uint32_t *ref_div_p, 441 - uint32_t *post_div_p, 442 - int flags); 443 444 extern void radeon_setup_encoder_clones(struct drm_device *dev); 445 ··· 464 465 extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 466 struct drm_framebuffer *old_fb); 467 - extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc); 468 469 extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, 470 struct drm_file *file_priv,
··· 125 #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 126 #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 127 #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 128 + #define RADEON_PLL_USE_POST_DIV (1 << 12) 129 130 struct radeon_pll { 131 + /* reference frequency */ 132 + uint32_t reference_freq; 133 + 134 + /* fixed dividers */ 135 + uint32_t reference_div; 136 + uint32_t post_div; 137 + 138 + /* pll in/out limits */ 139 uint32_t pll_in_min; 140 uint32_t pll_in_max; 141 uint32_t pll_out_min; 142 uint32_t pll_out_max; 143 + uint32_t best_vco; 144 145 + /* divider limits */ 146 uint32_t min_ref_div; 147 uint32_t max_ref_div; 148 uint32_t min_post_div; ··· 143 uint32_t max_feedback_div; 144 uint32_t min_frac_feedback_div; 145 uint32_t max_frac_feedback_div; 146 + 147 + /* flags for the current clock */ 148 + uint32_t flags; 149 + 150 + /* pll id */ 151 + uint32_t id; 152 }; 153 154 struct radeon_i2c_chan { ··· 417 uint32_t *fb_div_p, 418 uint32_t *frac_fb_div_p, 419 uint32_t *ref_div_p, 420 + uint32_t *post_div_p); 421 422 extern void radeon_compute_pll_avivo(struct radeon_pll *pll, 423 uint64_t freq, ··· 426 uint32_t *fb_div_p, 427 uint32_t *frac_fb_div_p, 428 uint32_t *ref_div_p, 429 + uint32_t *post_div_p); 430 431 extern void radeon_setup_encoder_clones(struct drm_device *dev); 432 ··· 453 454 extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 455 struct drm_framebuffer *old_fb); 456 457 extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, 458 struct drm_file *file_priv,
+2 -1
drivers/gpu/drm/radeon/radeon_object.c
··· 220 221 int radeon_bo_evict_vram(struct radeon_device *rdev) 222 { 223 - if (rdev->flags & RADEON_IS_IGP) { 224 if (rdev->mc.igp_sideport_enabled == false) 225 /* Useless to evict on IGP chips */ 226 return 0;
··· 220 221 int radeon_bo_evict_vram(struct radeon_device *rdev) 222 { 223 + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 224 + if (0 && (rdev->flags & RADEON_IS_IGP)) { 225 if (rdev->mc.igp_sideport_enabled == false) 226 /* Useless to evict on IGP chips */ 227 return 0;
+2
drivers/gpu/drm/radeon/reg_srcs/r200
··· 91 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL 92 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 93 0x22c4 SE_TCL_POINT_SPRITE_CNTL 94 0x2648 RE_POINTSIZE 95 0x26c0 RE_TOP_LEFT 96 0x26c4 RE_MISC
··· 91 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL 92 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 93 0x22c4 SE_TCL_POINT_SPRITE_CNTL 94 + 0x22d0 SE_PVS_CNTL 95 + 0x22d4 SE_PVS_CONST_CNTL 96 0x2648 RE_POINTSIZE 97 0x26c0 RE_TOP_LEFT 98 0x26c4 RE_MISC
+19 -14
drivers/gpu/drm/radeon/rv770.c
··· 779 fixed20_12 a; 780 u32 tmp; 781 int chansize, numchan; 782 - int r; 783 784 /* Get VRAM informations */ 785 rdev->mc.vram_is_ddr = true; ··· 821 rdev->mc.real_vram_size = rdev->mc.aper_size; 822 823 if (rdev->flags & RADEON_IS_AGP) { 824 - r = radeon_agp_init(rdev); 825 - if (r) 826 - return r; 827 /* gtt_size is setup by radeon_agp_init */ 828 rdev->mc.gtt_location = rdev->mc.agp_base; 829 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; ··· 968 /* FIXME: we should wait for ring to be empty */ 969 r700_cp_stop(rdev); 970 rdev->cp.ready = false; 971 r600_wb_disable(rdev); 972 rv770_pcie_gart_disable(rdev); 973 /* unpin shaders bo */ 974 - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 975 - if (likely(r == 0)) { 976 - radeon_bo_unpin(rdev->r600_blit.shader_obj); 977 - radeon_bo_unreserve(rdev->r600_blit.shader_obj); 978 } 979 return 0; 980 } ··· 1036 r = radeon_fence_driver_init(rdev); 1037 if (r) 1038 return r; 1039 r = rv770_mc_init(rdev); 1040 if (r) 1041 return r; ··· 1075 if (rdev->accel_working) { 1076 r = radeon_ib_pool_init(rdev); 1077 if (r) { 1078 - DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1079 rdev->accel_working = false; 1080 - } 1081 - r = r600_ib_test(rdev); 1082 - if (r) { 1083 - DRM_ERROR("radeon: failed testing IB (%d).\n", r); 1084 - rdev->accel_working = false; 1085 } 1086 } 1087 return 0;
··· 779 fixed20_12 a; 780 u32 tmp; 781 int chansize, numchan; 782 783 /* Get VRAM informations */ 784 rdev->mc.vram_is_ddr = true; ··· 822 rdev->mc.real_vram_size = rdev->mc.aper_size; 823 824 if (rdev->flags & RADEON_IS_AGP) { 825 /* gtt_size is setup by radeon_agp_init */ 826 rdev->mc.gtt_location = rdev->mc.agp_base; 827 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; ··· 972 /* FIXME: we should wait for ring to be empty */ 973 r700_cp_stop(rdev); 974 rdev->cp.ready = false; 975 + r600_irq_suspend(rdev); 976 r600_wb_disable(rdev); 977 rv770_pcie_gart_disable(rdev); 978 /* unpin shaders bo */ 979 + if (rdev->r600_blit.shader_obj) { 980 + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 981 + if (likely(r == 0)) { 982 + radeon_bo_unpin(rdev->r600_blit.shader_obj); 983 + radeon_bo_unreserve(rdev->r600_blit.shader_obj); 984 + } 985 } 986 return 0; 987 } ··· 1037 r = radeon_fence_driver_init(rdev); 1038 if (r) 1039 return r; 1040 + if (rdev->flags & RADEON_IS_AGP) { 1041 + r = radeon_agp_init(rdev); 1042 + if (r) 1043 + radeon_agp_disable(rdev); 1044 + } 1045 r = rv770_mc_init(rdev); 1046 if (r) 1047 return r; ··· 1071 if (rdev->accel_working) { 1072 r = radeon_ib_pool_init(rdev); 1073 if (r) { 1074 + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1075 rdev->accel_working = false; 1076 + } else { 1077 + r = r600_ib_test(rdev); 1078 + if (r) { 1079 + dev_err(rdev->dev, "IB test failed (%d).\n", r); 1080 + rdev->accel_working = false; 1081 + } 1082 } 1083 } 1084 return 0;
+37 -32
drivers/gpu/drm/ttm/ttm_bo.c
··· 426 bdev->man[bo->mem.mem_type].gpu_offset; 427 bo->cur_placement = bo->mem.placement; 428 spin_unlock(&bo->lock); 429 - } 430 431 return 0; 432 ··· 524 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 525 { 526 struct ttm_bo_global *glob = bdev->glob; 527 - struct ttm_buffer_object *entry, *nentry; 528 - struct list_head *list, *next; 529 - int ret; 530 531 spin_lock(&glob->lru_lock); 532 - list_for_each_safe(list, next, &bdev->ddestroy) { 533 - entry = list_entry(list, struct ttm_buffer_object, ddestroy); 534 - nentry = NULL; 535 536 - /* 537 - * Protect the next list entry from destruction while we 538 - * unlock the lru_lock. 539 - */ 540 541 - if (next != &bdev->ddestroy) { 542 - nentry = list_entry(next, struct ttm_buffer_object, 543 - ddestroy); 544 kref_get(&nentry->list_kref); 545 } 546 - kref_get(&entry->list_kref); 547 548 spin_unlock(&glob->lru_lock); 549 ret = ttm_bo_cleanup_refs(entry, remove_all); 550 kref_put(&entry->list_kref, ttm_bo_release_list); 551 552 spin_lock(&glob->lru_lock); 553 - if (nentry) { 554 - bool next_onlist = !list_empty(next); 555 - spin_unlock(&glob->lru_lock); 556 - kref_put(&nentry->list_kref, ttm_bo_release_list); 557 - spin_lock(&glob->lru_lock); 558 - /* 559 - * Someone might have raced us and removed the 560 - * next entry from the list. We don't bother restarting 561 - * list traversal. 562 - */ 563 - 564 - if (!next_onlist) 565 - break; 566 - } 567 - if (ret) 568 break; 569 } 570 - ret = !list_empty(&bdev->ddestroy); 571 - spin_unlock(&glob->lru_lock); 572 573 return ret; 574 } 575 ··· 942 */ 943 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 944 ~TTM_PL_MASK_MEMTYPE); 945 946 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 947 interruptible, no_wait); ··· 1845 * anyone tries to access a ttm page. 1846 */ 1847 1848 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1849 out: 1850 ··· 1868 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1869 ; 1870 }
··· 426 bdev->man[bo->mem.mem_type].gpu_offset; 427 bo->cur_placement = bo->mem.placement; 428 spin_unlock(&bo->lock); 429 + } else 430 + bo->offset = 0; 431 432 return 0; 433 ··· 523 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 524 { 525 struct ttm_bo_global *glob = bdev->glob; 526 + struct ttm_buffer_object *entry = NULL; 527 + int ret = 0; 528 529 spin_lock(&glob->lru_lock); 530 + if (list_empty(&bdev->ddestroy)) 531 + goto out_unlock; 532 533 + entry = list_first_entry(&bdev->ddestroy, 534 + struct ttm_buffer_object, ddestroy); 535 + kref_get(&entry->list_kref); 536 537 + for (;;) { 538 + struct ttm_buffer_object *nentry = NULL; 539 + 540 + if (entry->ddestroy.next != &bdev->ddestroy) { 541 + nentry = list_first_entry(&entry->ddestroy, 542 + struct ttm_buffer_object, ddestroy); 543 kref_get(&nentry->list_kref); 544 } 545 546 spin_unlock(&glob->lru_lock); 547 ret = ttm_bo_cleanup_refs(entry, remove_all); 548 kref_put(&entry->list_kref, ttm_bo_release_list); 549 + entry = nentry; 550 + 551 + if (ret || !entry) 552 + goto out; 553 554 spin_lock(&glob->lru_lock); 555 + if (list_empty(&entry->ddestroy)) 556 break; 557 } 558 559 + out_unlock: 560 + spin_unlock(&glob->lru_lock); 561 + out: 562 + if (entry) 563 + kref_put(&entry->list_kref, ttm_bo_release_list); 564 return ret; 565 } 566 ··· 949 */ 950 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 951 ~TTM_PL_MASK_MEMTYPE); 952 + 953 + 954 + if (mem_type == TTM_PL_SYSTEM) { 955 + mem->mem_type = mem_type; 956 + mem->placement = cur_flags; 957 + mem->mm_node = NULL; 958 + return 0; 959 + } 960 961 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 962 interruptible, no_wait); ··· 1844 * anyone tries to access a ttm page. 1845 */ 1846 1847 + if (bo->bdev->driver->swap_notify) 1848 + bo->bdev->driver->swap_notify(bo); 1849 + 1850 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1851 out: 1852 ··· 1864 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1865 ; 1866 } 1867 + EXPORT_SYMBOL(ttm_bo_swapout_all);
+2
drivers/gpu/drm/ttm/ttm_lock.c
··· 288 wake_up_all(&lock->queue); 289 spin_unlock(&lock->lock); 290 } 291 292 static bool __ttm_suspend_lock(struct ttm_lock *lock) 293 { ··· 310 { 311 wait_event(lock->queue, __ttm_suspend_lock(lock)); 312 }
··· 288 wake_up_all(&lock->queue); 289 spin_unlock(&lock->lock); 290 } 291 + EXPORT_SYMBOL(ttm_suspend_unlock); 292 293 static bool __ttm_suspend_lock(struct ttm_lock *lock) 294 { ··· 309 { 310 wait_event(lock->queue, __ttm_suspend_lock(lock)); 311 } 312 + EXPORT_SYMBOL(ttm_suspend_lock);
+24 -1
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 48 .busy_placement = &vram_placement_flags 49 }; 50 51 struct ttm_placement vmw_vram_ne_placement = { 52 .fpfn = 0, 53 .lpfn = 0, ··· 181 return 0; 182 } 183 184 /** 185 * FIXME: We're using the old vmware polling method to sync. 186 * Do this with fences instead. ··· 246 .sync_obj_wait = vmw_sync_obj_wait, 247 .sync_obj_flush = vmw_sync_obj_flush, 248 .sync_obj_unref = vmw_sync_obj_unref, 249 - .sync_obj_ref = vmw_sync_obj_ref 250 };
··· 48 .busy_placement = &vram_placement_flags 49 }; 50 51 + struct ttm_placement vmw_vram_sys_placement = { 52 + .fpfn = 0, 53 + .lpfn = 0, 54 + .num_placement = 1, 55 + .placement = &vram_placement_flags, 56 + .num_busy_placement = 1, 57 + .busy_placement = &sys_placement_flags 58 + }; 59 + 60 struct ttm_placement vmw_vram_ne_placement = { 61 .fpfn = 0, 62 .lpfn = 0, ··· 172 return 0; 173 } 174 175 + static void vmw_move_notify(struct ttm_buffer_object *bo, 176 + struct ttm_mem_reg *new_mem) 177 + { 178 + if (new_mem->mem_type != TTM_PL_SYSTEM) 179 + vmw_dmabuf_gmr_unbind(bo); 180 + } 181 + 182 + static void vmw_swap_notify(struct ttm_buffer_object *bo) 183 + { 184 + vmw_dmabuf_gmr_unbind(bo); 185 + } 186 + 187 /** 188 * FIXME: We're using the old vmware polling method to sync. 189 * Do this with fences instead. ··· 225 .sync_obj_wait = vmw_sync_obj_wait, 226 .sync_obj_flush = vmw_sync_obj_flush, 227 .sync_obj_unref = vmw_sync_obj_unref, 228 + .sync_obj_ref = vmw_sync_obj_ref, 229 + .move_notify = vmw_move_notify, 230 + .swap_notify = vmw_swap_notify 231 };
+62 -1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 147 148 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 149 static void vmw_master_init(struct vmw_master *); 150 151 static void vmw_print_capabilities(uint32_t capabilities) 152 { ··· 219 220 dev_priv->dev = dev; 221 dev_priv->vmw_chipset = chipset; 222 mutex_init(&dev_priv->hw_mutex); 223 mutex_init(&dev_priv->cmdbuf_mutex); 224 rwlock_init(&dev_priv->resource_lock); ··· 354 vmw_fb_init(dev_priv); 355 } 356 357 return 0; 358 359 out_no_device: ··· 390 struct vmw_private *dev_priv = vmw_priv(dev); 391 392 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); 393 394 if (!dev_priv->stealth) { 395 vmw_fb_close(dev_priv); ··· 658 drm_put_dev(dev); 659 } 660 661 static struct drm_driver driver = { 662 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 663 DRIVER_MODESET, ··· 748 .name = VMWGFX_DRIVER_NAME, 749 .id_table = vmw_pci_id_list, 750 .probe = vmw_probe, 751 - .remove = vmw_remove 752 }, 753 .name = VMWGFX_DRIVER_NAME, 754 .desc = VMWGFX_DRIVER_DESC,
··· 147 148 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 149 static void vmw_master_init(struct vmw_master *); 150 + static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 151 + void *ptr); 152 153 static void vmw_print_capabilities(uint32_t capabilities) 154 { ··· 217 218 dev_priv->dev = dev; 219 dev_priv->vmw_chipset = chipset; 220 + dev_priv->last_read_sequence = (uint32_t) -100; 221 mutex_init(&dev_priv->hw_mutex); 222 mutex_init(&dev_priv->cmdbuf_mutex); 223 rwlock_init(&dev_priv->resource_lock); ··· 351 vmw_fb_init(dev_priv); 352 } 353 354 + dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 355 + register_pm_notifier(&dev_priv->pm_nb); 356 + 357 return 0; 358 359 out_no_device: ··· 384 struct vmw_private *dev_priv = vmw_priv(dev); 385 386 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); 387 + 388 + unregister_pm_notifier(&dev_priv->pm_nb); 389 390 if (!dev_priv->stealth) { 391 vmw_fb_close(dev_priv); ··· 650 drm_put_dev(dev); 651 } 652 653 + static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 654 + void *ptr) 655 + { 656 + struct vmw_private *dev_priv = 657 + container_of(nb, struct vmw_private, pm_nb); 658 + struct vmw_master *vmaster = dev_priv->active_master; 659 + 660 + switch (val) { 661 + case PM_HIBERNATION_PREPARE: 662 + case PM_SUSPEND_PREPARE: 663 + ttm_suspend_lock(&vmaster->lock); 664 + 665 + /** 666 + * This empties VRAM and unbinds all GMR bindings. 667 + * Buffer contents is moved to swappable memory. 668 + */ 669 + ttm_bo_swapout_all(&dev_priv->bdev); 670 + break; 671 + case PM_POST_HIBERNATION: 672 + case PM_POST_SUSPEND: 673 + ttm_suspend_unlock(&vmaster->lock); 674 + break; 675 + case PM_RESTORE_PREPARE: 676 + break; 677 + case PM_POST_RESTORE: 678 + break; 679 + default: 680 + break; 681 + } 682 + return 0; 683 + } 684 + 685 + /** 686 + * These might not be needed with the virtual SVGA device. 687 + */ 688 + 689 + int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 690 + { 691 + pci_save_state(pdev); 692 + pci_disable_device(pdev); 693 + pci_set_power_state(pdev, PCI_D3hot); 694 + return 0; 695 + } 696 + 697 + int vmw_pci_resume(struct pci_dev *pdev) 698 + { 699 + pci_set_power_state(pdev, PCI_D0); 700 + pci_restore_state(pdev); 701 + return pci_enable_device(pdev); 702 + } 703 + 704 static struct drm_driver driver = { 705 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 706 DRIVER_MODESET, ··· 689 .name = VMWGFX_DRIVER_NAME, 690 .id_table = vmw_pci_id_list, 691 .probe = vmw_probe, 692 + .remove = vmw_remove, 693 + .suspend = vmw_pci_suspend, 694 + .resume = vmw_pci_resume 695 }, 696 .name = VMWGFX_DRIVER_NAME, 697 .desc = VMWGFX_DRIVER_DESC,
+4
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 32 #include "drmP.h" 33 #include "vmwgfx_drm.h" 34 #include "drm_hashtab.h" 35 #include "ttm/ttm_bo_driver.h" 36 #include "ttm/ttm_object.h" 37 #include "ttm/ttm_lock.h" ··· 259 260 struct vmw_master *active_master; 261 struct vmw_master fbdev_master; 262 }; 263 264 static inline struct vmw_private *vmw_priv(struct drm_device *dev) ··· 355 struct vmw_dma_buffer *bo); 356 extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 357 struct vmw_dma_buffer *bo); 358 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 359 struct drm_file *file_priv); 360 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, ··· 404 405 extern struct ttm_placement vmw_vram_placement; 406 extern struct ttm_placement vmw_vram_ne_placement; 407 extern struct ttm_placement vmw_sys_placement; 408 extern struct ttm_bo_driver vmw_bo_driver; 409 extern int vmw_dma_quiescent(struct drm_device *dev);
··· 32 #include "drmP.h" 33 #include "vmwgfx_drm.h" 34 #include "drm_hashtab.h" 35 + #include "linux/suspend.h" 36 #include "ttm/ttm_bo_driver.h" 37 #include "ttm/ttm_object.h" 38 #include "ttm/ttm_lock.h" ··· 258 259 struct vmw_master *active_master; 260 struct vmw_master fbdev_master; 261 + struct notifier_block pm_nb; 262 }; 263 264 static inline struct vmw_private *vmw_priv(struct drm_device *dev) ··· 353 struct vmw_dma_buffer *bo); 354 extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 355 struct vmw_dma_buffer *bo); 356 + extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo); 357 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 358 struct drm_file *file_priv); 359 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, ··· 401 402 extern struct ttm_placement vmw_vram_placement; 403 extern struct ttm_placement vmw_vram_ne_placement; 404 + extern struct ttm_placement vmw_vram_sys_placement; 405 extern struct ttm_placement vmw_sys_placement; 406 extern struct ttm_bo_driver vmw_bo_driver; 407 extern int vmw_dma_quiescent(struct drm_device *dev);
+19
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) 491 return 0; 492 493 ret = vmw_gmr_bind(dev_priv, bo); 494 if (likely(ret == 0 || ret == -ERESTARTSYS)) 495 return ret; 496 497 498 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 499 return ret;
··· 490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) 491 return 0; 492 493 + /** 494 + * Put BO in VRAM, only if there is space. 495 + */ 496 + 497 + ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); 498 + if (unlikely(ret == -ERESTARTSYS)) 499 + return ret; 500 + 501 + /** 502 + * Otherwise, set it up as GMR. 503 + */ 504 + 505 + if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) 506 + return 0; 507 + 508 ret = vmw_gmr_bind(dev_priv, bo); 509 if (likely(ret == 0 || ret == -ERESTARTSYS)) 510 return ret; 511 512 + /** 513 + * If that failed, try VRAM again, this time evicting 514 + * previous contents. 515 + */ 516 517 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 518 return ret;
-8
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 649 if (unlikely(ret != 0)) 650 goto err_unlock; 651 652 - if (vmw_bo->gmr_bound) { 653 - vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id); 654 - spin_lock(&bo->glob->lru_lock); 655 - ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id); 656 - spin_unlock(&bo->glob->lru_lock); 657 - vmw_bo->gmr_bound = NULL; 658 - } 659 - 660 ret = ttm_bo_validate(bo, &ne_placement, false, false); 661 ttm_bo_unreserve(bo); 662 err_unlock:
··· 649 if (unlikely(ret != 0)) 650 goto err_unlock; 651 652 ret = ttm_bo_validate(bo, &ne_placement, false, false); 653 ttm_bo_unreserve(bo); 654 err_unlock:
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
··· 98 (unsigned int) min, 99 (unsigned int) fifo->capabilities); 100 101 - dev_priv->fence_seq = (uint32_t) -100; 102 - dev_priv->last_read_sequence = (uint32_t) -100; 103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 104 105 return vmw_fifo_send_fence(dev_priv, &dummy);
··· 98 (unsigned int) min, 99 (unsigned int) fifo->capabilities); 100 101 + dev_priv->fence_seq = dev_priv->last_read_sequence; 102 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 103 104 return vmw_fifo_send_fence(dev_priv, &dummy);
+5 -7
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 553 } *cmd; 554 int i, increment = 1; 555 556 - if (!num_clips || 557 - !(dev_priv->fifo.capabilities & 558 - SVGA_FIFO_CAP_SCREEN_OBJECT)) { 559 num_clips = 1; 560 clips = &norect; 561 norect.x1 = norect.y1 = 0; ··· 572 573 for (i = 0; i < num_clips; i++, clips += increment) { 574 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 575 - cmd[i].body.x = cpu_to_le32(clips[i].x1); 576 - cmd[i].body.y = cpu_to_le32(clips[i].y1); 577 - cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); 578 - cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); 579 } 580 581 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
··· 553 } *cmd; 554 int i, increment = 1; 555 556 + if (!num_clips) { 557 num_clips = 1; 558 clips = &norect; 559 norect.x1 = norect.y1 = 0; ··· 574 575 for (i = 0; i < num_clips; i++, clips += increment) { 576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 577 + cmd[i].body.x = cpu_to_le32(clips->x1); 578 + cmd[i].body.y = cpu_to_le32(clips->y1); 579 + cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1); 580 + cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); 581 } 582 583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
-9
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
··· 104 bool pin, bool interruptible) 105 { 106 struct ttm_buffer_object *bo = &buf->base; 107 - struct ttm_bo_global *glob = bo->glob; 108 struct ttm_placement *overlay_placement = &vmw_vram_placement; 109 int ret; 110 ··· 114 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 115 if (unlikely(ret != 0)) 116 goto err; 117 - 118 - if (buf->gmr_bound) { 119 - vmw_gmr_unbind(dev_priv, buf->gmr_id); 120 - spin_lock(&glob->lru_lock); 121 - ida_remove(&dev_priv->gmr_ida, buf->gmr_id); 122 - spin_unlock(&glob->lru_lock); 123 - buf->gmr_bound = NULL; 124 - } 125 126 if (pin) 127 overlay_placement = &vmw_vram_ne_placement;
··· 104 bool pin, bool interruptible) 105 { 106 struct ttm_buffer_object *bo = &buf->base; 107 struct ttm_placement *overlay_placement = &vmw_vram_placement; 108 int ret; 109 ··· 115 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 116 if (unlikely(ret != 0)) 117 goto err; 118 119 if (pin) 120 overlay_placement = &vmw_vram_ne_placement;
+34 -30
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 599 if (unlikely(ret != 0)) 600 goto out_err1; 601 602 user_srf->base.shareable = false; 603 user_srf->base.tfile = NULL; 604 ··· 642 vmw_resource_unreference(&res); 643 return ret; 644 } 645 - 646 - if (srf->flags & (1 << 9) && 647 - srf->num_sizes == 1 && 648 - srf->sizes[0].width == 64 && 649 - srf->sizes[0].height == 64 && 650 - srf->format == SVGA3D_A8R8G8B8) { 651 - 652 - srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); 653 - /* clear the image */ 654 - if (srf->snooper.image) 655 - memset(srf->snooper.image, 0x00, 64 * 64 * 4); 656 - else 657 - DRM_ERROR("Failed to allocate cursor_image\n"); 658 - 659 - } else { 660 - srf->snooper.image = NULL; 661 - } 662 - srf->snooper.crtc = NULL; 663 664 rep->sid = user_srf->base.hash.key; 665 if (rep->sid == SVGA3D_INVALID_ID) ··· 757 return bo_user_size + page_array_size; 758 } 759 760 - void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 761 { 762 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 763 struct ttm_bo_global *glob = bo->glob; 764 struct vmw_private *dev_priv = 765 container_of(bo->bdev, struct vmw_private, bdev); 766 767 - ttm_mem_global_free(glob->mem_glob, bo->acc_size); 768 if (vmw_bo->gmr_bound) { 769 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); 770 spin_lock(&glob->lru_lock); 771 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); 772 spin_unlock(&glob->lru_lock); 773 } 774 kfree(vmw_bo); 775 } 776 ··· 825 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 826 { 827 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 828 - struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma; 829 struct ttm_bo_global *glob = bo->glob; 830 - struct vmw_private *dev_priv = 831 - container_of(bo->bdev, struct vmw_private, bdev); 832 833 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 834 - if (vmw_bo->gmr_bound) { 835 - vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); 836 - spin_lock(&glob->lru_lock); 837 - ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); 838 - spin_unlock(&glob->lru_lock); 839 - } 840 kfree(vmw_user_bo); 841 } 842 ··· 872 } 873 874 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 875 - &vmw_vram_placement, true, 876 &vmw_user_dmabuf_destroy); 877 if (unlikely(ret != 0)) 878 return ret;
··· 599 if (unlikely(ret != 0)) 600 goto out_err1; 601 602 + 603 + if (srf->flags & (1 << 9) && 604 + srf->num_sizes == 1 && 605 + srf->sizes[0].width == 64 && 606 + srf->sizes[0].height == 64 && 607 + srf->format == SVGA3D_A8R8G8B8) { 608 + 609 + srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); 610 + /* clear the image */ 611 + if (srf->snooper.image) { 612 + memset(srf->snooper.image, 0x00, 64 * 64 * 4); 613 + } else { 614 + DRM_ERROR("Failed to allocate cursor_image\n"); 615 + ret = -ENOMEM; 616 + goto out_err1; 617 + } 618 + } else { 619 + srf->snooper.image = NULL; 620 + } 621 + srf->snooper.crtc = NULL; 622 + 623 user_srf->base.shareable = false; 624 user_srf->base.tfile = NULL; 625 ··· 621 vmw_resource_unreference(&res); 622 return ret; 623 } 624 625 rep->sid = user_srf->base.hash.key; 626 if (rep->sid == SVGA3D_INVALID_ID) ··· 754 return bo_user_size + page_array_size; 755 } 756 757 + void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo) 758 { 759 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 760 struct ttm_bo_global *glob = bo->glob; 761 struct vmw_private *dev_priv = 762 container_of(bo->bdev, struct vmw_private, bdev); 763 764 if (vmw_bo->gmr_bound) { 765 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); 766 spin_lock(&glob->lru_lock); 767 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); 768 spin_unlock(&glob->lru_lock); 769 + vmw_bo->gmr_bound = false; 770 } 771 + } 772 + 773 + void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 774 + { 775 + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 776 + struct ttm_bo_global *glob = bo->glob; 777 + 778 + vmw_dmabuf_gmr_unbind(bo); 779 + ttm_mem_global_free(glob->mem_glob, bo->acc_size); 780 kfree(vmw_bo); 781 } 782 ··· 813 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 814 { 815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 816 struct ttm_bo_global *glob = bo->glob; 817 818 + vmw_dmabuf_gmr_unbind(bo); 819 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 820 kfree(vmw_user_bo); 821 } 822 ··· 868 } 869 870 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 871 + &vmw_vram_sys_placement, true, 872 &vmw_user_dmabuf_destroy); 873 if (unlikely(ret != 0)) 874 return ret;
+5
include/drm/ttm/ttm_bo_driver.h
··· 353 /* notify the driver we are taking a fault on this BO 354 * and have reserved it */ 355 void (*fault_reserve_notify)(struct ttm_buffer_object *bo); 356 }; 357 358 /**
··· 353 /* notify the driver we are taking a fault on this BO 354 * and have reserved it */ 355 void (*fault_reserve_notify)(struct ttm_buffer_object *bo); 356 + 357 + /** 358 + * notify the driver that we're about to swap out this bo 359 + */ 360 + void (*swap_notify) (struct ttm_buffer_object *bo); 361 }; 362 363 /**