Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (41 commits)
drm/i915: add HAS_BSD check to i915_getparam
drm/i915: Honor sync polarity from VBT panel timing descriptors
drm/i915: Unmask interrupt for render engine on Sandybridge
drm/i915: Fix PIPE_CONTROL command on Sandybridge
drm/i915: Fix up address spaces in slow_kernel_write()
drm/i915: Use non-atomic kmap for slow copy paths
drm/i915: Avoid moving from CPU domain during pwrite
drm/i915: Cleanup after failed initialization of ringbuffers
drm/i915: Reject bind_to_gtt() early if object > aperture
drm/i915: Check error code whilst moving buffer to GTT domain.
drm/i915: Remove spurious warning "Failure to install fence"
drm/i915: Rebind bo if currently bound with incorrect alignment.
drm/i915: Include pitch in set_base debug statement.
drm/i915: Only print "nothing to do" debug message as required.
drm/i915: Propagate error from unbinding an unfenceable object.
drm/i915: Avoid nesting of domain changes when setting display plane
drm/i915: Hold the spinlock whilst resetting unpin_work along error path
drm/i915: Only print an message if there was an error
drm/i915: Clean up leftover bits from hws move to ring structure.
drm/i915: Add CxSR support on Pineview DDR3
...

+2584 -1076
+39 -7
drivers/char/agp/intel-gtt.c
··· 1059 1059 } 1060 1060 } 1061 1061 1062 - static int intel_i915_configure(void) 1062 + static int intel_i9xx_configure(void) 1063 1063 { 1064 1064 struct aper_size_info_fixed *current_size; 1065 1065 u32 temp; ··· 1207 1207 return 0; 1208 1208 } 1209 1209 1210 + static int intel_i915_get_gtt_size(void) 1211 + { 1212 + int size; 1213 + 1214 + if (IS_G33) { 1215 + u16 gmch_ctrl; 1216 + 1217 + /* G33's GTT size defined in gmch_ctrl */ 1218 + pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); 1219 + switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { 1220 + case G33_PGETBL_SIZE_1M: 1221 + size = 1024; 1222 + break; 1223 + case G33_PGETBL_SIZE_2M: 1224 + size = 2048; 1225 + break; 1226 + default: 1227 + dev_info(&agp_bridge->dev->dev, 1228 + "unknown page table size 0x%x, assuming 512KB\n", 1229 + (gmch_ctrl & G33_PGETBL_SIZE_MASK)); 1230 + size = 512; 1231 + } 1232 + } else { 1233 + /* On previous hardware, the GTT size was just what was 1234 + * required to map the aperture. 1235 + */ 1236 + size = agp_bridge->driver->fetch_size(); 1237 + } 1238 + 1239 + return KB(size); 1240 + } 1241 + 1210 1242 /* The intel i915 automatically initializes the agp aperture during POST. 1211 1243 * Use the memory already set aside for in the GTT. 1212 1244 */ ··· 1248 1216 struct aper_size_info_fixed *size; 1249 1217 int num_entries; 1250 1218 u32 temp, temp2; 1251 - int gtt_map_size = 256 * 1024; 1219 + int gtt_map_size; 1252 1220 1253 1221 size = agp_bridge->current_size; 1254 1222 page_order = size->page_order; ··· 1258 1226 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); 1259 1227 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); 1260 1228 1261 - if (IS_G33) 1262 - gtt_map_size = 1024 * 1024; /* 1M on G33 */ 1229 + gtt_map_size = intel_i915_get_gtt_size(); 1230 + 1263 1231 intel_private.gtt = ioremap(temp2, gtt_map_size); 1264 1232 if (!intel_private.gtt) 1265 1233 return -ENOMEM; ··· 1454 1422 .size_type = FIXED_APER_SIZE, 1455 1423 .num_aperture_sizes = 4, 1456 1424 .needs_scratch_page = true, 1457 - .configure = intel_i915_configure, 1425 + .configure = intel_i9xx_configure, 1458 1426 .fetch_size = intel_i9xx_fetch_size, 1459 1427 .cleanup = intel_i915_cleanup, 1460 1428 .mask_memory = intel_i810_mask_memory, ··· 1487 1455 .size_type = FIXED_APER_SIZE, 1488 1456 .num_aperture_sizes = 4, 1489 1457 .needs_scratch_page = true, 1490 - .configure = intel_i915_configure, 1458 + .configure = intel_i9xx_configure, 1491 1459 .fetch_size = intel_i9xx_fetch_size, 1492 1460 .cleanup = intel_i915_cleanup, 1493 1461 .mask_memory = intel_i965_mask_memory, ··· 1520 1488 .size_type = FIXED_APER_SIZE, 1521 1489 .num_aperture_sizes = 4, 1522 1490 .needs_scratch_page = true, 1523 - .configure = intel_i915_configure, 1491 + .configure = intel_i9xx_configure, 1524 1492 .fetch_size = intel_i9xx_fetch_size, 1525 1493 .cleanup = intel_i915_cleanup, 1526 1494 .mask_memory = intel_i965_mask_memory,
+1
drivers/gpu/drm/i915/Makefile
··· 22 22 intel_fb.o \ 23 23 intel_tv.o \ 24 24 intel_dvo.o \ 25 + intel_ringbuffer.o \ 25 26 intel_overlay.o \ 26 27 dvo_ch7xxx.o \ 27 28 dvo_ch7017.o \
+63 -19
drivers/gpu/drm/i915/i915_debugfs.c
··· 77 77 case ACTIVE_LIST: 78 78 seq_printf(m, "Active:\n"); 79 79 lock = &dev_priv->mm.active_list_lock; 80 - head = &dev_priv->mm.active_list; 80 + head = &dev_priv->render_ring.active_list; 81 81 break; 82 82 case INACTIVE_LIST: 83 83 seq_printf(m, "Inactive:\n"); ··· 129 129 struct drm_i915_gem_request *gem_request; 130 130 131 131 seq_printf(m, "Request:\n"); 132 - list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { 132 + list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, 133 + list) { 133 134 seq_printf(m, " %d @ %d\n", 134 135 gem_request->seqno, 135 136 (int) (jiffies - gem_request->emitted_jiffies)); ··· 144 143 struct drm_device *dev = node->minor->dev; 145 144 drm_i915_private_t *dev_priv = dev->dev_private; 146 145 147 - if (dev_priv->hw_status_page != NULL) { 146 + if (dev_priv->render_ring.status_page.page_addr != NULL) { 148 147 seq_printf(m, "Current sequence: %d\n", 149 - i915_get_gem_seqno(dev)); 148 + i915_get_gem_seqno(dev, &dev_priv->render_ring)); 150 149 } else { 151 150 seq_printf(m, "Current sequence: hws uninitialized\n"); 152 151 } ··· 196 195 } 197 196 seq_printf(m, "Interrupts received: %d\n", 198 197 atomic_read(&dev_priv->irq_received)); 199 - if (dev_priv->hw_status_page != NULL) { 198 + if (dev_priv->render_ring.status_page.page_addr != NULL) { 200 199 seq_printf(m, "Current sequence: %d\n", 201 - i915_get_gem_seqno(dev)); 200 + i915_get_gem_seqno(dev, &dev_priv->render_ring)); 202 201 } else { 203 202 seq_printf(m, "Current sequence: hws uninitialized\n"); 204 203 } ··· 252 251 int i; 253 252 volatile u32 *hws; 254 253 255 - hws = (volatile u32 *)dev_priv->hw_status_page; 254 + hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; 256 255 if (hws == NULL) 257 256 return 0; 258 257 ··· 288 287 289 288 spin_lock(&dev_priv->mm.active_list_lock); 290 289 291 - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 290 + list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, 291 + list) { 292 292 obj = &obj_priv->base; 293 293 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 294 294 ret = i915_gem_object_get_pages(obj, 0); ··· 319 317 u8 *virt; 320 318 uint32_t *ptr, off; 321 319 322 - if (!dev_priv->ring.ring_obj) { 320 + if (!dev_priv->render_ring.gem_object) { 323 321 seq_printf(m, "No ringbuffer setup\n"); 324 322 return 0; 325 323 } 326 324 327 - virt = dev_priv->ring.virtual_start; 325 + virt = dev_priv->render_ring.virtual_start; 328 326 329 - for (off = 0; off < dev_priv->ring.Size; off += 4) { 327 + for (off = 0; off < dev_priv->render_ring.size; off += 4) { 330 328 ptr = (uint32_t *)(virt + off); 331 329 seq_printf(m, "%08x : %08x\n", off, *ptr); 332 330 } ··· 346 344 347 345 seq_printf(m, "RingHead : %08x\n", head); 348 346 seq_printf(m, "RingTail : %08x\n", tail); 349 - seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); 347 + seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); 350 348 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 351 349 352 350 return 0; ··· 491 489 struct drm_device *dev = node->minor->dev; 492 490 drm_i915_private_t *dev_priv = dev->dev_private; 493 491 u16 rgvswctl = I915_READ16(MEMSWCTL); 492 + u16 rgvstat = I915_READ16(MEMSTAT_ILK); 494 493 495 - seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); 496 - seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); 497 - seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, 498 - rgvswctl & 0x3f); 494 + seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 495 + seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 496 + seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 497 + MEMSTAT_VID_SHIFT); 498 + seq_printf(m, "Current P-state: %d\n", 499 + (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 499 500 500 501 return 0; 501 502 } ··· 513 508 514 509 for (i = 0; i < 16; i++) { 515 510 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 516 - seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); 511 + seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 512 + (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 517 513 } 518 514 519 515 return 0; ··· 547 541 struct drm_device *dev = node->minor->dev; 548 542 drm_i915_private_t *dev_priv = dev->dev_private; 549 543 u32 rgvmodectl = I915_READ(MEMMODECTL); 544 + u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); 545 + u16 crstandvid = I915_READ16(CRSTANDVID); 550 546 551 547 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 552 548 "yes" : "no"); ··· 563 555 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 564 556 seq_printf(m, "Starting frequency: P%d\n", 565 557 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 566 - seq_printf(m, "Max frequency: P%d\n", 558 + seq_printf(m, "Max P-state: P%d\n", 567 559 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 568 - seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 560 + seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 561 + seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 562 + seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 563 + seq_printf(m, "Render standby enabled: %s\n", 564 + (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 569 565 570 566 return 0; 571 567 } ··· 629 617 630 618 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : 631 619 "disabled"); 620 + 621 + return 0; 622 + } 623 + 624 + static int i915_emon_status(struct seq_file *m, void *unused) 625 + { 626 + struct drm_info_node *node = (struct drm_info_node *) m->private; 627 + struct drm_device *dev = node->minor->dev; 628 + drm_i915_private_t *dev_priv = dev->dev_private; 629 + unsigned long temp, chipset, gfx; 630 + 631 + temp = i915_mch_val(dev_priv); 632 + chipset = i915_chipset_val(dev_priv); 633 + gfx = i915_gfx_val(dev_priv); 634 + 635 + seq_printf(m, "GMCH temp: %ld\n", temp); 636 + seq_printf(m, "Chipset power: %ld\n", chipset); 637 + seq_printf(m, "GFX power: %ld\n", gfx); 638 + seq_printf(m, "Total power: %ld\n", chipset + gfx); 639 + 640 + return 0; 641 + } 642 + 643 + static int i915_gfxec(struct seq_file *m, void *unused) 644 + { 645 + struct drm_info_node *node = (struct drm_info_node *) m->private; 646 + struct drm_device *dev = node->minor->dev; 647 + drm_i915_private_t *dev_priv = dev->dev_private; 648 + 649 + seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 632 650 633 651 return 0; 634 652 } ··· 785 743 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 786 744 {"i915_inttoext_table", i915_inttoext_table, 0}, 787 745 {"i915_drpc_info", i915_drpc_info, 0}, 746 + {"i915_emon_status", i915_emon_status, 0}, 747 + {"i915_gfxec", i915_gfxec, 0}, 788 748 {"i915_fbc_status", i915_fbc_status, 0}, 789 749 {"i915_sr_status", i915_sr_status, 0}, 790 750 };
+582 -129
drivers/gpu/drm/i915/i915_dma.c
··· 40 40 #include <linux/vga_switcheroo.h> 41 41 #include <linux/slab.h> 42 42 43 - /* Really want an OS-independent resettable timer. Would like to have 44 - * this loop run for (eg) 3 sec, but have the timer reset every time 45 - * the head pointer changes, so that EBUSY only happens if the ring 46 - * actually stalls for (eg) 3 seconds. 47 - */ 48 - int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 49 - { 50 - drm_i915_private_t *dev_priv = dev->dev_private; 51 - drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 52 - u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 53 - u32 last_acthd = I915_READ(acthd_reg); 54 - u32 acthd; 55 - u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 56 - int i; 57 - 58 - trace_i915_ring_wait_begin (dev); 59 - 60 - for (i = 0; i < 100000; i++) { 61 - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 62 - acthd = I915_READ(acthd_reg); 63 - ring->space = ring->head - (ring->tail + 8); 64 - if (ring->space < 0) 65 - ring->space += ring->Size; 66 - if (ring->space >= n) { 67 - trace_i915_ring_wait_end (dev); 68 - return 0; 69 - } 70 - 71 - if (dev->primary->master) { 72 - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 73 - if (master_priv->sarea_priv) 74 - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 75 - } 76 - 77 - 78 - if (ring->head != last_head) 79 - i = 0; 80 - if (acthd != last_acthd) 81 - i = 0; 82 - 83 - last_head = ring->head; 84 - last_acthd = acthd; 85 - msleep_interruptible(10); 86 - 87 - } 88 - 89 - trace_i915_ring_wait_end (dev); 90 - return -EBUSY; 91 - } 92 - 93 - /* As a ringbuffer is only allowed to wrap between instructions, fill 94 - * the tail with NOOPs. 95 - */ 96 - int i915_wrap_ring(struct drm_device *dev) 97 - { 98 - drm_i915_private_t *dev_priv = dev->dev_private; 99 - volatile unsigned int *virt; 100 - int rem; 101 - 102 - rem = dev_priv->ring.Size - dev_priv->ring.tail; 103 - if (dev_priv->ring.space < rem) { 104 - int ret = i915_wait_ring(dev, rem, __func__); 105 - if (ret) 106 - return ret; 107 - } 108 - dev_priv->ring.space -= rem; 109 - 110 - virt = (unsigned int *) 111 - (dev_priv->ring.virtual_start + dev_priv->ring.tail); 112 - rem /= 4; 113 - while (rem--) 114 - *virt++ = MI_NOOP; 115 - 116 - dev_priv->ring.tail = 0; 117 - 118 - return 0; 119 - } 120 - 121 43 /** 122 44 * Sets up the hardware status page for devices that need a physical address 123 45 * in the register. ··· 55 133 DRM_ERROR("Can not allocate hardware status page\n"); 56 134 return -ENOMEM; 57 135 } 58 - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 136 + dev_priv->render_ring.status_page.page_addr 137 + = dev_priv->status_page_dmah->vaddr; 59 138 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 60 139 61 - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 140 + memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 62 141 63 142 if (IS_I965G(dev)) 64 143 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & ··· 82 159 dev_priv->status_page_dmah = NULL; 83 160 } 84 161 85 - if (dev_priv->status_gfx_addr) { 86 - dev_priv->status_gfx_addr = 0; 162 + if (dev_priv->render_ring.status_page.gfx_addr) { 163 + dev_priv->render_ring.status_page.gfx_addr = 0; 87 164 drm_core_ioremapfree(&dev_priv->hws_map, dev); 88 165 } 89 166 ··· 95 172 { 96 173 drm_i915_private_t *dev_priv = dev->dev_private; 97 174 struct drm_i915_master_private *master_priv; 98 - drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 175 + struct intel_ring_buffer *ring = &dev_priv->render_ring; 99 176 100 177 /* 101 178 * We should never lose context on the ring with modesetting ··· 108 185 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 109 186 ring->space = ring->head - (ring->tail + 8); 110 187 if (ring->space < 0) 111 - ring->space += ring->Size; 188 + ring->space += ring->size; 112 189 113 190 if (!dev->primary->master) 114 191 return; ··· 128 205 if (dev->irq_enabled) 129 206 drm_irq_uninstall(dev); 130 207 131 - if (dev_priv->ring.virtual_start) { 132 - drm_core_ioremapfree(&dev_priv->ring.map, dev); 133 - dev_priv->ring.virtual_start = NULL; 134 - dev_priv->ring.map.handle = NULL; 135 - dev_priv->ring.map.size = 0; 136 - } 208 + intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 209 + if (HAS_BSD(dev)) 210 + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 137 211 138 212 /* Clear the HWS virtual address at teardown */ 139 213 if (I915_NEED_GFX_HWS(dev)) ··· 153 233 } 154 234 155 235 if (init->ring_size != 0) { 156 - if (dev_priv->ring.ring_obj != NULL) { 236 + if (dev_priv->render_ring.gem_object != NULL) { 157 237 i915_dma_cleanup(dev); 158 238 DRM_ERROR("Client tried to initialize ringbuffer in " 159 239 "GEM mode\n"); 160 240 return -EINVAL; 161 241 } 162 242 163 - dev_priv->ring.Size = init->ring_size; 243 + dev_priv->render_ring.size = init->ring_size; 164 244 165 - dev_priv->ring.map.offset = init->ring_start; 166 - dev_priv->ring.map.size = init->ring_size; 167 - dev_priv->ring.map.type = 0; 168 - dev_priv->ring.map.flags = 0; 169 - dev_priv->ring.map.mtrr = 0; 245 + dev_priv->render_ring.map.offset = init->ring_start; 246 + dev_priv->render_ring.map.size = init->ring_size; 247 + dev_priv->render_ring.map.type = 0; 248 + dev_priv->render_ring.map.flags = 0; 249 + dev_priv->render_ring.map.mtrr = 0; 170 250 171 - drm_core_ioremap_wc(&dev_priv->ring.map, dev); 251 + drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); 172 252 173 - if (dev_priv->ring.map.handle == NULL) { 253 + if (dev_priv->render_ring.map.handle == NULL) { 174 254 i915_dma_cleanup(dev); 175 255 DRM_ERROR("can not ioremap virtual address for" 176 256 " ring buffer\n"); ··· 178 258 } 179 259 } 180 260 181 - dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 261 + dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; 182 262 183 263 dev_priv->cpp = init->cpp; 184 264 dev_priv->back_offset = init->back_offset; ··· 198 278 { 199 279 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 200 280 281 + struct intel_ring_buffer *ring; 201 282 DRM_DEBUG_DRIVER("%s\n", __func__); 202 283 203 - if (dev_priv->ring.map.handle == NULL) { 284 + ring = &dev_priv->render_ring; 285 + 286 + if (ring->map.handle == NULL) { 204 287 DRM_ERROR("can not ioremap virtual address for" 205 288 " ring buffer\n"); 206 289 return -ENOMEM; 207 290 } 208 291 209 292 /* Program Hardware Status Page */ 210 - if (!dev_priv->hw_status_page) { 293 + if (!ring->status_page.page_addr) { 211 294 DRM_ERROR("Can not find hardware status page\n"); 212 295 return -EINVAL; 213 296 } 214 297 DRM_DEBUG_DRIVER("hw status page @ %p\n", 215 - dev_priv->hw_status_page); 216 - 217 - if (dev_priv->status_gfx_addr != 0) 218 - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 298 + ring->status_page.page_addr); 299 + if (ring->status_page.gfx_addr != 0) 300 + ring->setup_status_page(dev, ring); 219 301 else 220 302 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 303 + 221 304 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 222 305 223 306 return 0; ··· 330 407 { 331 408 drm_i915_private_t *dev_priv = dev->dev_private; 332 409 int i; 333 - RING_LOCALS; 334 410 335 - if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 411 + if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) 336 412 return -EINVAL; 337 413 338 414 BEGIN_LP_RING((dwords+1)&~1); ··· 364 442 struct drm_clip_rect *boxes, 365 443 int i, int DR1, int DR4) 366 444 { 367 - drm_i915_private_t *dev_priv = dev->dev_private; 368 445 struct drm_clip_rect box = boxes[i]; 369 - RING_LOCALS; 370 446 371 447 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 372 448 DRM_ERROR("Bad box %d,%d..%d,%d\n", ··· 401 481 { 402 482 drm_i915_private_t *dev_priv = dev->dev_private; 403 483 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 404 - RING_LOCALS; 405 484 406 485 dev_priv->counter++; 407 486 if (dev_priv->counter > 0x7FFFFFFFUL) ··· 454 535 drm_i915_batchbuffer_t * batch, 455 536 struct drm_clip_rect *cliprects) 456 537 { 457 - drm_i915_private_t *dev_priv = dev->dev_private; 458 538 int nbox = batch->num_cliprects; 459 539 int i = 0, count; 460 - RING_LOCALS; 461 540 462 541 if ((batch->start | batch->used) & 0x7) { 463 542 DRM_ERROR("alignment"); ··· 504 587 drm_i915_private_t *dev_priv = dev->dev_private; 505 588 struct drm_i915_master_private *master_priv = 506 589 dev->primary->master->driver_priv; 507 - RING_LOCALS; 508 590 509 591 if (!master_priv->sarea_priv) 510 592 return -EINVAL; ··· 556 640 drm_i915_private_t *dev_priv = dev->dev_private; 557 641 558 642 i915_kernel_lost_context(dev); 559 - return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); 643 + return intel_wait_ring_buffer(dev, &dev_priv->render_ring, 644 + dev_priv->render_ring.size - 8); 560 645 } 561 646 562 647 static int i915_flush_ioctl(struct drm_device *dev, void *data, ··· 744 827 /* depends on GEM */ 745 828 value = dev_priv->has_gem; 746 829 break; 830 + case I915_PARAM_HAS_BSD: 831 + value = HAS_BSD(dev); 832 + break; 747 833 default: 748 834 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 749 835 param->param); ··· 802 882 { 803 883 drm_i915_private_t *dev_priv = dev->dev_private; 804 884 drm_i915_hws_addr_t *hws = data; 885 + struct intel_ring_buffer *ring = &dev_priv->render_ring; 805 886 806 887 if (!I915_NEED_GFX_HWS(dev)) 807 888 return -EINVAL; ··· 819 898 820 899 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 821 900 822 - dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 901 + ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 823 902 824 903 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 825 904 dev_priv->hws_map.size = 4*1024; ··· 830 909 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 831 910 if (dev_priv->hws_map.handle == NULL) { 832 911 i915_dma_cleanup(dev); 833 - dev_priv->status_gfx_addr = 0; 912 + ring->status_page.gfx_addr = 0; 834 913 DRM_ERROR("can not ioremap virtual address for" 835 914 " G33 hw status page\n"); 836 915 return -ENOMEM; 837 916 } 838 - dev_priv->hw_status_page = dev_priv->hws_map.handle; 917 + ring->status_page.page_addr = dev_priv->hws_map.handle; 918 + memset(ring->status_page.page_addr, 0, PAGE_SIZE); 919 + I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 839 920 840 - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 841 - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 842 921 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 843 - dev_priv->status_gfx_addr); 922 + ring->status_page.gfx_addr); 844 923 DRM_DEBUG_DRIVER("load hws at %p\n", 845 - dev_priv->hw_status_page); 924 + ring->status_page.page_addr); 846 925 return 0; 847 926 } 848 927 ··· 1460 1539 master->driver_priv = NULL; 1461 1540 } 1462 1541 1463 - static void i915_get_mem_freq(struct drm_device *dev) 1542 + static void i915_pineview_get_mem_freq(struct drm_device *dev) 1464 1543 { 1465 1544 drm_i915_private_t *dev_priv = dev->dev_private; 1466 1545 u32 tmp; 1467 - 1468 - if (!IS_PINEVIEW(dev)) 1469 - return; 1470 1546 1471 1547 tmp = I915_READ(CLKCFG); 1472 1548 ··· 1493 1575 dev_priv->mem_freq = 800; 1494 1576 break; 1495 1577 } 1578 + 1579 + /* detect pineview DDR3 setting */ 1580 + tmp = I915_READ(CSHRDDR3CTL); 1581 + dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 1496 1582 } 1583 + 1584 + static void i915_ironlake_get_mem_freq(struct drm_device *dev) 1585 + { 1586 + drm_i915_private_t *dev_priv = dev->dev_private; 1587 + u16 ddrpll, csipll; 1588 + 1589 + ddrpll = I915_READ16(DDRMPLL1); 1590 + csipll = I915_READ16(CSIPLL0); 1591 + 1592 + switch (ddrpll & 0xff) { 1593 + case 0xc: 1594 + dev_priv->mem_freq = 800; 1595 + break; 1596 + case 0x10: 1597 + dev_priv->mem_freq = 1066; 1598 + break; 1599 + case 0x14: 1600 + dev_priv->mem_freq = 1333; 1601 + break; 1602 + case 0x18: 1603 + dev_priv->mem_freq = 1600; 1604 + break; 1605 + default: 1606 + DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 1607 + ddrpll & 0xff); 1608 + dev_priv->mem_freq = 0; 1609 + break; 1610 + } 1611 + 1612 + dev_priv->r_t = dev_priv->mem_freq; 1613 + 1614 + switch (csipll & 0x3ff) { 1615 + case 0x00c: 1616 + dev_priv->fsb_freq = 3200; 1617 + break; 1618 + case 0x00e: 1619 + dev_priv->fsb_freq = 3733; 1620 + break; 1621 + case 0x010: 1622 + dev_priv->fsb_freq = 4266; 1623 + break; 1624 + case 0x012: 1625 + dev_priv->fsb_freq = 4800; 1626 + break; 1627 + case 0x014: 1628 + dev_priv->fsb_freq = 5333; 1629 + break; 1630 + case 0x016: 1631 + dev_priv->fsb_freq = 5866; 1632 + break; 1633 + case 0x018: 1634 + dev_priv->fsb_freq = 6400; 1635 + break; 1636 + default: 1637 + DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 1638 + csipll & 0x3ff); 1639 + dev_priv->fsb_freq = 0; 1640 + break; 1641 + } 1642 + 1643 + if (dev_priv->fsb_freq == 3200) { 1644 + dev_priv->c_m = 0; 1645 + } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 1646 + dev_priv->c_m = 1; 1647 + } else { 1648 + dev_priv->c_m = 2; 1649 + } 1650 + } 1651 + 1652 + struct v_table { 1653 + u8 vid; 1654 + unsigned long vd; /* in .1 mil */ 1655 + unsigned long vm; /* in .1 mil */ 1656 + u8 pvid; 1657 + }; 1658 + 1659 + static struct v_table v_table[] = { 1660 + { 0, 16125, 15000, 0x7f, }, 1661 + { 1, 16000, 14875, 0x7e, }, 1662 + { 2, 15875, 14750, 0x7d, }, 1663 + { 3, 15750, 14625, 0x7c, }, 1664 + { 4, 15625, 14500, 0x7b, }, 1665 + { 5, 15500, 14375, 0x7a, }, 1666 + { 6, 15375, 14250, 0x79, }, 1667 + { 7, 15250, 14125, 0x78, }, 1668 + { 8, 15125, 14000, 0x77, }, 1669 + { 9, 15000, 13875, 0x76, }, 1670 + { 10, 14875, 13750, 0x75, }, 1671 + { 11, 14750, 13625, 0x74, }, 1672 + { 12, 14625, 13500, 0x73, }, 1673 + { 13, 14500, 13375, 0x72, }, 1674 + { 14, 14375, 13250, 0x71, }, 1675 + { 15, 14250, 13125, 0x70, }, 1676 + { 16, 14125, 13000, 0x6f, }, 1677 + { 17, 14000, 12875, 0x6e, }, 1678 + { 18, 13875, 12750, 0x6d, }, 1679 + { 19, 13750, 12625, 0x6c, }, 1680 + { 20, 13625, 12500, 0x6b, }, 1681 + { 21, 13500, 12375, 0x6a, }, 1682 + { 22, 13375, 12250, 0x69, }, 1683 + { 23, 13250, 12125, 0x68, }, 1684 + { 24, 13125, 12000, 0x67, }, 1685 + { 25, 13000, 11875, 0x66, }, 1686 + { 26, 12875, 11750, 0x65, }, 1687 + { 27, 12750, 11625, 0x64, }, 1688 + { 28, 12625, 11500, 0x63, }, 1689 + { 29, 12500, 11375, 0x62, }, 1690 + { 30, 12375, 11250, 0x61, }, 1691 + { 31, 12250, 11125, 0x60, }, 1692 + { 32, 12125, 11000, 0x5f, }, 1693 + { 33, 12000, 10875, 0x5e, }, 1694 + { 34, 11875, 10750, 0x5d, }, 1695 + { 35, 11750, 10625, 0x5c, }, 1696 + { 36, 11625, 10500, 0x5b, }, 1697 + { 37, 11500, 10375, 0x5a, }, 1698 + { 38, 11375, 10250, 0x59, }, 1699 + { 39, 11250, 10125, 0x58, }, 1700 + { 40, 11125, 10000, 0x57, }, 1701 + { 41, 11000, 9875, 0x56, }, 1702 + { 42, 10875, 9750, 0x55, }, 1703 + { 43, 10750, 9625, 0x54, }, 1704 + { 44, 10625, 9500, 0x53, }, 1705 + { 45, 10500, 9375, 0x52, }, 1706 + { 46, 10375, 9250, 0x51, }, 1707 + { 47, 10250, 9125, 0x50, }, 1708 + { 48, 10125, 9000, 0x4f, }, 1709 + { 49, 10000, 8875, 0x4e, }, 1710 + { 50, 9875, 8750, 0x4d, }, 1711 + { 51, 9750, 8625, 0x4c, }, 1712 + { 52, 9625, 8500, 0x4b, }, 1713 + { 53, 9500, 8375, 0x4a, }, 1714 + { 54, 9375, 8250, 0x49, }, 1715 + { 55, 9250, 8125, 0x48, }, 1716 + { 56, 9125, 8000, 0x47, }, 1717 + { 57, 9000, 7875, 0x46, }, 1718 + { 58, 8875, 7750, 0x45, }, 1719 + { 59, 8750, 7625, 0x44, }, 1720 + { 60, 8625, 7500, 0x43, }, 1721 + { 61, 8500, 7375, 0x42, }, 1722 + { 62, 8375, 7250, 0x41, }, 1723 + { 63, 8250, 7125, 0x40, }, 1724 + { 64, 8125, 7000, 0x3f, }, 1725 + { 65, 8000, 6875, 0x3e, }, 1726 + { 66, 7875, 6750, 0x3d, }, 1727 + { 67, 7750, 6625, 0x3c, }, 1728 + { 68, 7625, 6500, 0x3b, }, 1729 + { 69, 7500, 6375, 0x3a, }, 1730 + { 70, 7375, 6250, 0x39, }, 1731 + { 71, 7250, 6125, 0x38, }, 1732 + { 72, 7125, 6000, 0x37, }, 1733 + { 73, 7000, 5875, 0x36, }, 1734 + { 74, 6875, 5750, 0x35, }, 1735 + { 75, 6750, 5625, 0x34, }, 1736 + { 76, 6625, 5500, 0x33, }, 1737 + { 77, 6500, 5375, 0x32, }, 1738 + { 78, 6375, 5250, 0x31, }, 1739 + { 79, 6250, 5125, 0x30, }, 1740 + { 80, 6125, 5000, 0x2f, }, 1741 + { 81, 6000, 4875, 0x2e, }, 1742 + { 82, 5875, 4750, 0x2d, }, 1743 + { 83, 5750, 4625, 0x2c, }, 1744 + { 84, 5625, 4500, 0x2b, }, 1745 + { 85, 5500, 4375, 0x2a, }, 1746 + { 86, 5375, 4250, 0x29, }, 1747 + { 87, 5250, 4125, 0x28, }, 1748 + { 88, 5125, 4000, 0x27, }, 1749 + { 89, 5000, 3875, 0x26, }, 1750 + { 90, 4875, 3750, 0x25, }, 1751 + { 91, 4750, 3625, 0x24, }, 1752 + { 92, 4625, 3500, 0x23, }, 1753 + { 93, 4500, 3375, 0x22, }, 1754 + { 94, 4375, 3250, 0x21, }, 1755 + { 95, 4250, 3125, 0x20, }, 1756 + { 96, 4125, 3000, 0x1f, }, 1757 + { 97, 4125, 3000, 0x1e, }, 1758 + { 98, 4125, 3000, 0x1d, }, 1759 + { 99, 4125, 3000, 0x1c, }, 1760 + { 100, 4125, 3000, 0x1b, }, 1761 + { 101, 4125, 3000, 0x1a, }, 1762 + { 102, 4125, 3000, 0x19, }, 1763 + { 103, 4125, 3000, 0x18, }, 1764 + { 104, 4125, 3000, 0x17, }, 1765 + { 105, 4125, 3000, 0x16, }, 1766 + { 106, 4125, 3000, 0x15, }, 1767 + { 107, 4125, 3000, 0x14, }, 1768 + { 108, 4125, 3000, 0x13, }, 1769 + { 109, 4125, 3000, 0x12, }, 1770 + { 110, 4125, 3000, 0x11, }, 1771 + { 111, 4125, 3000, 0x10, }, 1772 + { 112, 4125, 3000, 0x0f, }, 1773 + { 113, 4125, 3000, 0x0e, }, 1774 + { 114, 4125, 3000, 0x0d, }, 1775 + { 115, 4125, 3000, 0x0c, }, 1776 + { 116, 4125, 3000, 0x0b, }, 1777 + { 117, 4125, 3000, 0x0a, }, 1778 + { 118, 4125, 3000, 0x09, }, 1779 + { 119, 4125, 3000, 0x08, }, 1780 + { 120, 1125, 0, 0x07, }, 1781 + { 121, 1000, 0, 0x06, }, 1782 + { 122, 875, 0, 0x05, }, 1783 + { 123, 750, 0, 0x04, }, 1784 + { 124, 625, 0, 0x03, }, 1785 + { 125, 500, 0, 0x02, }, 1786 + { 126, 375, 0, 0x01, }, 1787 + { 127, 0, 0, 0x00, }, 1788 + }; 1789 + 1790 + struct cparams { 1791 + int i; 1792 + int t; 1793 + int m; 1794 + int c; 1795 + }; 1796 + 1797 + static struct cparams cparams[] = { 1798 + { 1, 1333, 301, 28664 }, 1799 + { 1, 1066, 294, 24460 }, 1800 + { 1, 800, 294, 25192 }, 1801 + { 0, 1333, 276, 27605 }, 1802 + { 0, 1066, 276, 27605 }, 1803 + { 0, 800, 231, 23784 }, 1804 + }; 1805 + 1806 + unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 1807 + { 1808 + u64 total_count, diff, ret; 1809 + u32 count1, count2, count3, m = 0, c = 0; 1810 + unsigned long now = jiffies_to_msecs(jiffies), diff1; 1811 + int i; 1812 + 1813 + diff1 = now - dev_priv->last_time1; 1814 + 1815 + count1 = I915_READ(DMIEC); 1816 + count2 = I915_READ(DDREC); 1817 + count3 = I915_READ(CSIEC); 1818 + 1819 + total_count = count1 + count2 + count3; 1820 + 1821 + /* FIXME: handle per-counter overflow */ 1822 + if (total_count < dev_priv->last_count1) { 1823 + diff = ~0UL - dev_priv->last_count1; 1824 + diff += total_count; 1825 + } else { 1826 + diff = total_count - dev_priv->last_count1; 1827 + } 1828 + 1829 + for (i = 0; i < ARRAY_SIZE(cparams); i++) { 1830 + if (cparams[i].i == dev_priv->c_m && 1831 + cparams[i].t == dev_priv->r_t) { 1832 + m = cparams[i].m; 1833 + c = cparams[i].c; 1834 + break; 1835 + } 1836 + } 1837 + 1838 + div_u64(diff, diff1); 1839 + ret = ((m * diff) + c); 1840 + div_u64(ret, 10); 1841 + 1842 + dev_priv->last_count1 = total_count; 1843 + dev_priv->last_time1 = now; 1844 + 1845 + return ret; 1846 + } 1847 + 1848 + unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 1849 + { 1850 + unsigned long m, x, b; 1851 + u32 tsfs; 1852 + 1853 + tsfs = I915_READ(TSFS); 1854 + 1855 + m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 1856 + x = I915_READ8(TR1); 1857 + 1858 + b = tsfs & TSFS_INTR_MASK; 1859 + 1860 + return ((m * x) / 127) - b; 1861 + } 1862 + 1863 + static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1864 + { 1865 + unsigned long val = 0; 1866 + int i; 1867 + 1868 + for (i = 0; i < ARRAY_SIZE(v_table); i++) { 1869 + if (v_table[i].pvid == pxvid) { 1870 + if (IS_MOBILE(dev_priv->dev)) 1871 + val = v_table[i].vm; 1872 + else 1873 + val = v_table[i].vd; 1874 + } 1875 + } 1876 + 1877 + return val; 1878 + } 1879 + 1880 + void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1881 + { 1882 + struct timespec now, diff1; 1883 + u64 diff; 1884 + unsigned long diffms; 1885 + u32 count; 1886 + 1887 + getrawmonotonic(&now); 1888 + diff1 = timespec_sub(now, dev_priv->last_time2); 1889 + 1890 + /* Don't divide by 0 */ 1891 + diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 1892 + if (!diffms) 1893 + return; 1894 + 1895 + count = I915_READ(GFXEC); 1896 + 1897 + if (count < dev_priv->last_count2) { 1898 + diff = ~0UL - dev_priv->last_count2; 1899 + diff += count; 1900 + } else { 1901 + diff = count - dev_priv->last_count2; 1902 + } 1903 + 1904 + dev_priv->last_count2 = count; 1905 + dev_priv->last_time2 = now; 1906 + 1907 + /* More magic constants... */ 1908 + diff = diff * 1181; 1909 + div_u64(diff, diffms * 10); 1910 + dev_priv->gfx_power = diff; 1911 + } 1912 + 1913 + unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 1914 + { 1915 + unsigned long t, corr, state1, corr2, state2; 1916 + u32 pxvid, ext_v; 1917 + 1918 + pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 1919 + pxvid = (pxvid >> 24) & 0x7f; 1920 + ext_v = pvid_to_extvid(dev_priv, pxvid); 1921 + 1922 + state1 = ext_v; 1923 + 1924 + t = i915_mch_val(dev_priv); 1925 + 1926 + /* Revel in the empirically derived constants */ 1927 + 1928 + /* Correction factor in 1/100000 units */ 1929 + if (t > 80) 1930 + corr = ((t * 2349) + 135940); 1931 + else if (t >= 50) 1932 + corr = ((t * 964) + 29317); 1933 + else /* < 50 */ 1934 + corr = ((t * 301) + 1004); 1935 + 1936 + corr = corr * ((150142 * state1) / 10000 - 78642); 1937 + corr /= 100000; 1938 + corr2 = (corr * dev_priv->corr); 1939 + 1940 + state2 = (corr2 * state1) / 10000; 1941 + state2 /= 100; /* convert to mW */ 1942 + 1943 + i915_update_gfx_val(dev_priv); 1944 + 1945 + return dev_priv->gfx_power + state2; 1946 + } 1947 + 1948 + /* Global for IPS driver to get at the current i915 device */ 1949 + static struct drm_i915_private *i915_mch_dev; 1950 + /* 1951 + * Lock protecting IPS related data structures 1952 + * - i915_mch_dev 1953 + * - dev_priv->max_delay 1954 + * - dev_priv->min_delay 1955 + * - dev_priv->fmax 1956 + * - dev_priv->gpu_busy 1957 + */ 1958 + DEFINE_SPINLOCK(mchdev_lock); 1959 + 1960 + /** 1961 + * i915_read_mch_val - return value for IPS use 1962 + * 1963 + * Calculate and return a value for the IPS driver to use when deciding whether 1964 + * we have thermal and power headroom to increase CPU or GPU power budget. 1965 + */ 1966 + unsigned long i915_read_mch_val(void) 1967 + { 1968 + struct drm_i915_private *dev_priv; 1969 + unsigned long chipset_val, graphics_val, ret = 0; 1970 + 1971 + spin_lock(&mchdev_lock); 1972 + if (!i915_mch_dev) 1973 + goto out_unlock; 1974 + dev_priv = i915_mch_dev; 1975 + 1976 + chipset_val = i915_chipset_val(dev_priv); 1977 + graphics_val = i915_gfx_val(dev_priv); 1978 + 1979 + ret = chipset_val + graphics_val; 1980 + 1981 + out_unlock: 1982 + spin_unlock(&mchdev_lock); 1983 + 1984 + return ret; 1985 + } 1986 + EXPORT_SYMBOL_GPL(i915_read_mch_val); 1987 + 1988 + /** 1989 + * i915_gpu_raise - raise GPU frequency limit 1990 + * 1991 + * Raise the limit; IPS indicates we have thermal headroom. 1992 + */ 1993 + bool i915_gpu_raise(void) 1994 + { 1995 + struct drm_i915_private *dev_priv; 1996 + bool ret = true; 1997 + 1998 + spin_lock(&mchdev_lock); 1999 + if (!i915_mch_dev) { 2000 + ret = false; 2001 + goto out_unlock; 2002 + } 2003 + dev_priv = i915_mch_dev; 2004 + 2005 + if (dev_priv->max_delay > dev_priv->fmax) 2006 + dev_priv->max_delay--; 2007 + 2008 + out_unlock: 2009 + spin_unlock(&mchdev_lock); 2010 + 2011 + return ret; 2012 + } 2013 + EXPORT_SYMBOL_GPL(i915_gpu_raise); 2014 + 2015 + /** 2016 + * i915_gpu_lower - lower GPU frequency limit 2017 + * 2018 + * IPS indicates we're close to a thermal limit, so throttle back the GPU 2019 + * frequency maximum. 2020 + */ 2021 + bool i915_gpu_lower(void) 2022 + { 2023 + struct drm_i915_private *dev_priv; 2024 + bool ret = true; 2025 + 2026 + spin_lock(&mchdev_lock); 2027 + if (!i915_mch_dev) { 2028 + ret = false; 2029 + goto out_unlock; 2030 + } 2031 + dev_priv = i915_mch_dev; 2032 + 2033 + if (dev_priv->max_delay < dev_priv->min_delay) 2034 + dev_priv->max_delay++; 2035 + 2036 + out_unlock: 2037 + spin_unlock(&mchdev_lock); 2038 + 2039 + return ret; 2040 + } 2041 + EXPORT_SYMBOL_GPL(i915_gpu_lower); 2042 + 2043 + /** 2044 + * i915_gpu_busy - indicate GPU business to IPS 2045 + * 2046 + * Tell the IPS driver whether or not the GPU is busy. 2047 + */ 2048 + bool i915_gpu_busy(void) 2049 + { 2050 + struct drm_i915_private *dev_priv; 2051 + bool ret = false; 2052 + 2053 + spin_lock(&mchdev_lock); 2054 + if (!i915_mch_dev) 2055 + goto out_unlock; 2056 + dev_priv = i915_mch_dev; 2057 + 2058 + ret = dev_priv->busy; 2059 + 2060 + out_unlock: 2061 + spin_unlock(&mchdev_lock); 2062 + 2063 + return ret; 2064 + } 2065 + EXPORT_SYMBOL_GPL(i915_gpu_busy); 2066 + 2067 + /** 2068 + * i915_gpu_turbo_disable - disable graphics turbo 2069 + * 2070 + * Disable graphics turbo by resetting the max frequency and setting the 2071 + * current frequency to the default. 2072 + */ 2073 + bool i915_gpu_turbo_disable(void) 2074 + { 2075 + struct drm_i915_private *dev_priv; 2076 + bool ret = true; 2077 + 2078 + spin_lock(&mchdev_lock); 2079 + if (!i915_mch_dev) { 2080 + ret = false; 2081 + goto out_unlock; 2082 + } 2083 + dev_priv = i915_mch_dev; 2084 + 2085 + dev_priv->max_delay = dev_priv->fstart; 2086 + 2087 + if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 2088 + ret = false; 2089 + 2090 + out_unlock: 2091 + spin_unlock(&mchdev_lock); 2092 + 2093 + return ret; 2094 + } 2095 + EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 1497 2096 1498 2097 /** 1499 2098 * i915_driver_load - setup chip and create an initial config ··· 2029 1594 resource_size_t base, size; 2030 1595 int ret = 0, mmio_bar; 2031 1596 uint32_t agp_size, prealloc_size, prealloc_start; 2032 - 2033 1597 /* i915 has 4 more counters */ 2034 1598 dev->counters += 4; 2035 1599 dev->types[6] = _DRM_STAT_IRQ; ··· 2106 1672 dev_priv->has_gem = 0; 2107 1673 } 2108 1674 1675 + if (dev_priv->has_gem == 0 && 1676 + drm_core_check_feature(dev, DRIVER_MODESET)) { 1677 + DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); 1678 + ret = -ENODEV; 1679 + goto out_iomapfree; 1680 + } 1681 + 2109 1682 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2110 1683 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2111 1684 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { ··· 2132 1691 goto out_workqueue_free; 2133 1692 } 2134 1693 2135 - i915_get_mem_freq(dev); 1694 + if (IS_PINEVIEW(dev)) 1695 + i915_pineview_get_mem_freq(dev); 1696 + else if (IS_IRONLAKE(dev)) 1697 + i915_ironlake_get_mem_freq(dev); 2136 1698 2137 1699 /* On the 945G/GM, the chipset reports the MSI capability on the 2138 1700 * integrated graphics even though the support isn't actually there ··· 2153 1709 2154 1710 spin_lock_init(&dev_priv->user_irq_lock); 2155 1711 spin_lock_init(&dev_priv->error_lock); 2156 - dev_priv->user_irq_refcount = 0; 2157 1712 dev_priv->trace_irq_seqno = 0; 2158 1713 2159 1714 ret = drm_vblank_init(dev, I915_NUM_PIPE); ··· 2181 1738 2182 1739 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2183 1740 (unsigned long) dev); 1741 + 1742 + spin_lock(&mchdev_lock); 1743 + i915_mch_dev = dev_priv; 1744 + dev_priv->mchdev_lock = &mchdev_lock; 1745 + spin_unlock(&mchdev_lock); 1746 + 2184 1747 return 0; 2185 1748 2186 1749 out_workqueue_free: ··· 2207 1758 struct drm_i915_private *dev_priv = dev->dev_private; 2208 1759 2209 1760 i915_destroy_error_state(dev); 1761 + 1762 + spin_lock(&mchdev_lock); 1763 + i915_mch_dev = NULL; 1764 + spin_unlock(&mchdev_lock); 2210 1765 2211 1766 destroy_workqueue(dev_priv->wq); 2212 1767 del_timer_sync(&dev_priv->hangcheck_timer);
+24 -46
drivers/gpu/drm/i915/i915_drv.c
··· 60 60 .subdevice = PCI_ANY_ID, \ 61 61 .driver_data = (unsigned long) info } 62 62 63 - const static struct intel_device_info intel_i830_info = { 63 + static const struct intel_device_info intel_i830_info = { 64 64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 65 65 }; 66 66 67 - const static struct intel_device_info intel_845g_info = { 67 + static const struct intel_device_info intel_845g_info = { 68 68 .is_i8xx = 1, 69 69 }; 70 70 71 - const static struct intel_device_info intel_i85x_info = { 71 + static const struct intel_device_info intel_i85x_info = { 72 72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 73 73 .cursor_needs_physical = 1, 74 74 }; 75 75 76 - const static struct intel_device_info intel_i865g_info = { 76 + static const struct intel_device_info intel_i865g_info = { 77 77 .is_i8xx = 1, 78 78 }; 79 79 80 - const static struct intel_device_info intel_i915g_info = { 80 + static const struct intel_device_info intel_i915g_info = { 81 81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 82 82 }; 83 - const static struct intel_device_info intel_i915gm_info = { 83 + static const struct intel_device_info intel_i915gm_info = { 84 84 .is_i9xx = 1, .is_mobile = 1, 85 85 .cursor_needs_physical = 1, 86 86 }; 87 - const static struct intel_device_info intel_i945g_info = { 87 + static const struct intel_device_info intel_i945g_info = { 88 88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 89 89 }; 90 - const static struct intel_device_info intel_i945gm_info = { 90 + static const struct intel_device_info intel_i945gm_info = { 91 91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 92 92 .has_hotplug = 1, .cursor_needs_physical = 1, 93 93 }; 94 94 95 - const static struct intel_device_info intel_i965g_info = { 95 + static const struct intel_device_info intel_i965g_info = { 96 96 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 97 97 }; 98 98 99 - const static struct intel_device_info intel_i965gm_info = { 99 + static const struct intel_device_info intel_i965gm_info = { 100 100 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, 101 101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 102 102 .has_hotplug = 1, 103 103 }; 104 104 105 - const static struct intel_device_info intel_g33_info = { 105 + static const struct intel_device_info intel_g33_info = { 106 106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, 107 107 .has_hotplug = 1, 108 108 }; 109 109 110 - const static struct intel_device_info intel_g45_info = { 110 + static const struct intel_device_info intel_g45_info = { 111 111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 112 112 .has_pipe_cxsr = 1, 113 113 .has_hotplug = 1, 114 114 }; 115 115 116 - const static struct intel_device_info intel_gm45_info = { 116 + static const struct intel_device_info intel_gm45_info = { 117 117 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, 118 118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 119 119 .has_pipe_cxsr = 1, 120 120 .has_hotplug = 1, 121 121 }; 122 122 123 - const static struct intel_device_info intel_pineview_info = { 123 + static const struct intel_device_info intel_pineview_info = { 124 124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 125 125 .need_gfx_hws = 1, 126 126 .has_hotplug = 1, 127 127 }; 128 128 129 - const static struct intel_device_info intel_ironlake_d_info = { 129 + static const struct intel_device_info intel_ironlake_d_info = { 130 130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 131 131 .has_pipe_cxsr = 1, 132 132 .has_hotplug = 1, 133 133 }; 134 134 135 - const static struct intel_device_info intel_ironlake_m_info = { 135 + static const struct intel_device_info intel_ironlake_m_info = { 136 136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 137 137 .need_gfx_hws = 1, .has_rc6 = 1, 138 138 .has_hotplug = 1, 139 139 }; 140 140 141 - const static struct intel_device_info intel_sandybridge_d_info = { 141 + static const struct intel_device_info intel_sandybridge_d_info = { 142 142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 143 143 .has_hotplug = 1, .is_gen6 = 1, 144 144 }; 145 145 146 - const static struct intel_device_info intel_sandybridge_m_info = { 146 + static const struct intel_device_info intel_sandybridge_m_info = { 147 147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, 148 148 .has_hotplug = 1, .is_gen6 = 1, 149 149 }; 150 150 151 - const static struct pci_device_id pciidlist[] = { 151 + static const struct pci_device_id pciidlist[] = { 152 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 153 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 154 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), ··· 340 340 /* 341 341 * Clear request list 342 342 */ 343 - i915_gem_retire_requests(dev); 343 + i915_gem_retire_requests(dev, &dev_priv->render_ring); 344 344 345 345 if (need_display) 346 346 i915_save_display(dev); ··· 370 370 } 371 371 } else { 372 372 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); 373 + mutex_unlock(&dev->struct_mutex); 373 374 return -ENODEV; 374 375 } 375 376 ··· 389 388 * switched away). 390 389 */ 391 390 if (drm_core_check_feature(dev, DRIVER_MODESET) || 392 - !dev_priv->mm.suspended) { 393 - drm_i915_ring_buffer_t *ring = &dev_priv->ring; 394 - struct drm_gem_object *obj = ring->ring_obj; 395 - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 391 + !dev_priv->mm.suspended) { 392 + struct intel_ring_buffer *ring = &dev_priv->render_ring; 396 393 dev_priv->mm.suspended = 0; 397 - 398 - /* Stop the ring if it's running. */ 399 - I915_WRITE(PRB0_CTL, 0); 400 - I915_WRITE(PRB0_TAIL, 0); 401 - I915_WRITE(PRB0_HEAD, 0); 402 - 403 - /* Initialize the ring. */ 404 - I915_WRITE(PRB0_START, obj_priv->gtt_offset); 405 - I915_WRITE(PRB0_CTL, 406 - ((obj->size - 4096) & RING_NR_PAGES) | 407 - RING_NO_REPORT | 408 - RING_VALID); 409 - if (!drm_core_check_feature(dev, DRIVER_MODESET)) 410 - i915_kernel_lost_context(dev); 411 - else { 412 - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 413 - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 414 - ring->space = ring->head - (ring->tail + 8); 415 - if (ring->space < 0) 416 - ring->space += ring->Size; 417 - } 418 - 394 + ring->init(dev, ring); 419 395 mutex_unlock(&dev->struct_mutex); 420 396 drm_irq_uninstall(dev); 421 397 drm_irq_install(dev);
+116 -93
drivers/gpu/drm/i915/i915_drv.h
··· 32 32 33 33 #include "i915_reg.h" 34 34 #include "intel_bios.h" 35 + #include "intel_ringbuffer.h" 35 36 #include <linux/io-mapping.h> 36 37 37 38 /* General customization: ··· 55 54 }; 56 55 57 56 #define I915_NUM_PIPE 2 57 + 58 + #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 58 59 59 60 /* Interface history: 60 61 * ··· 91 88 drm_dma_handle_t *handle; 92 89 struct drm_gem_object *cur_obj; 93 90 }; 94 - 95 - typedef struct _drm_i915_ring_buffer { 96 - unsigned long Size; 97 - u8 *virtual_start; 98 - int head; 99 - int tail; 100 - int space; 101 - drm_local_map_t map; 102 - struct drm_gem_object *ring_obj; 103 - } drm_i915_ring_buffer_t; 104 91 105 92 struct mem_block { 106 93 struct mem_block *next; ··· 234 241 void __iomem *regs; 235 242 236 243 struct pci_dev *bridge_dev; 237 - drm_i915_ring_buffer_t ring; 244 + struct intel_ring_buffer render_ring; 245 + struct intel_ring_buffer bsd_ring; 238 246 239 247 drm_dma_handle_t *status_page_dmah; 240 - void *hw_status_page; 241 248 void *seqno_page; 242 249 dma_addr_t dma_status_page; 243 250 uint32_t counter; 244 - unsigned int status_gfx_addr; 245 251 unsigned int seqno_gfx_addr; 246 252 drm_local_map_t hws_map; 247 - struct drm_gem_object *hws_obj; 248 253 struct drm_gem_object *seqno_obj; 249 254 struct drm_gem_object *pwrctx; 250 255 ··· 258 267 atomic_t irq_received; 259 268 /** Protects user_irq_refcount and irq_mask_reg */ 260 269 spinlock_t user_irq_lock; 261 - /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ 262 - int user_irq_refcount; 263 270 u32 trace_irq_seqno; 264 271 /** Cached value of IMR to avoid reads in updating the bitfield */ 265 272 u32 irq_mask_reg; ··· 323 334 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 324 335 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 325 336 326 - unsigned int fsb_freq, mem_freq; 337 + unsigned int fsb_freq, mem_freq, is_ddr3; 327 338 328 339 spinlock_t error_lock; 329 340 struct drm_i915_error_state *first_error; ··· 503 514 */ 504 515 struct list_head shrink_list; 505 516 506 - /** 507 - * List of objects currently involved in rendering from the 508 - * ringbuffer. 509 - * 510 - * Includes buffers having the contents of their GPU caches 511 - * flushed, not necessarily primitives. last_rendering_seqno 512 - * represents when the rendering involved will be completed. 513 - * 514 - * A reference is held on the buffer while on this list. 515 - */ 516 517 spinlock_t active_list_lock; 517 - struct list_head active_list; 518 518 519 519 /** 520 520 * List of objects which are not in the ringbuffer but which ··· 539 561 540 562 /** LRU list of objects with fence regs on them. */ 541 563 struct list_head fence_list; 542 - 543 - /** 544 - * List of breadcrumbs associated with GPU requests currently 545 - * outstanding. 546 - */ 547 - struct list_head request_list; 548 564 549 565 /** 550 566 * We leave the user IRQ off as much as possible, ··· 616 644 u8 cur_delay; 617 645 u8 min_delay; 618 646 u8 max_delay; 647 + u8 fmax; 648 + u8 fstart; 649 + 650 + u64 last_count1; 651 + unsigned long last_time1; 652 + u64 last_count2; 653 + struct timespec last_time2; 654 + unsigned long gfx_power; 655 + int c_m; 656 + int r_t; 657 + u8 corr; 658 + spinlock_t *mchdev_lock; 619 659 620 660 enum no_fbc_reason no_fbc_reason; 621 661 ··· 655 671 * (has pending rendering), and is not set if it's on inactive (ready 656 672 * to be unbound). 657 673 */ 658 - int active; 674 + unsigned int active : 1; 659 675 660 676 /** 661 677 * This is set if the object has been written to since last bound 662 678 * to the GTT 663 679 */ 664 - int dirty; 680 + unsigned int dirty : 1; 681 + 682 + /** 683 + * Fence register bits (if any) for this object. Will be set 684 + * as needed when mapped into the GTT. 685 + * Protected by dev->struct_mutex. 686 + * 687 + * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) 688 + */ 689 + int fence_reg : 5; 690 + 691 + /** 692 + * Used for checking the object doesn't appear more than once 693 + * in an execbuffer object list. 694 + */ 695 + unsigned int in_execbuffer : 1; 696 + 697 + /** 698 + * Advice: are the backing pages purgeable? 699 + */ 700 + unsigned int madv : 2; 701 + 702 + /** 703 + * Refcount for the pages array. With the current locking scheme, there 704 + * are at most two concurrent users: Binding a bo to the gtt and 705 + * pwrite/pread using physical addresses. So two bits for a maximum 706 + * of two users are enough. 707 + */ 708 + unsigned int pages_refcount : 2; 709 + #define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 710 + 711 + /** 712 + * Current tiling mode for the object. 713 + */ 714 + unsigned int tiling_mode : 2; 715 + 716 + /** How many users have pinned this object in GTT space. The following 717 + * users can each hold at most one reference: pwrite/pread, pin_ioctl 718 + * (via user_pin_count), execbuffer (objects are not allowed multiple 719 + * times for the same batchbuffer), and the framebuffer code. When 720 + * switching/pageflipping, the framebuffer code has at most two buffers 721 + * pinned per crtc. 722 + * 723 + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 724 + * bits with absolutely no headroom. So use 4 bits. */ 725 + int pin_count : 4; 726 + #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 665 727 666 728 /** AGP memory structure for our GTT binding. */ 667 729 DRM_AGP_MEM *agp_mem; 668 730 669 731 struct page **pages; 670 - int pages_refcount; 671 732 672 733 /** 673 734 * Current offset of the object in GTT space. ··· 721 692 */ 722 693 uint32_t gtt_offset; 723 694 695 + /* Which ring is refering to is this object */ 696 + struct intel_ring_buffer *ring; 697 + 724 698 /** 725 699 * Fake offset for use by mmap(2) 726 700 */ 727 701 uint64_t mmap_offset; 728 702 729 - /** 730 - * Fence register bits (if any) for this object. Will be set 731 - * as needed when mapped into the GTT. 732 - * Protected by dev->struct_mutex. 733 - */ 734 - int fence_reg; 735 - 736 - /** How many users have pinned this object in GTT space */ 737 - int pin_count; 738 - 739 703 /** Breadcrumb of last rendering to the buffer. */ 740 704 uint32_t last_rendering_seqno; 741 705 742 - /** Current tiling mode for the object. */ 743 - uint32_t tiling_mode; 706 + /** Current tiling stride for the object, if it's tiled. */ 744 707 uint32_t stride; 745 708 746 709 /** Record of address bit 17 of each page at last unbind. */ ··· 755 734 struct drm_i915_gem_phys_object *phys_obj; 756 735 757 736 /** 758 - * Used for checking the object doesn't appear more than once 759 - * in an execbuffer object list. 760 - */ 761 - int in_execbuffer; 762 - 763 - /** 764 - * Advice: are the backing pages purgeable? 765 - */ 766 - int madv; 767 - 768 - /** 769 737 * Number of crtcs where this object is currently the fb, but 770 738 * will be page flipped away on the next vblank. When it 771 739 * reaches 0, dev_priv->pending_flip_queue will be woken up. ··· 775 765 * an emission time with seqnos for tracking how far ahead of the GPU we are. 776 766 */ 777 767 struct drm_i915_gem_request { 768 + /** On Which ring this request was generated */ 769 + struct intel_ring_buffer *ring; 770 + 778 771 /** GEM sequence number associated with this request. */ 779 772 uint32_t seqno; 780 773 ··· 834 821 struct drm_clip_rect *boxes, 835 822 int i, int DR1, int DR4); 836 823 extern int i965_reset(struct drm_device *dev, u8 flags); 824 + extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 825 + extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 826 + extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 827 + extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 828 + 837 829 838 830 /* i915_irq.c */ 839 831 void i915_hangcheck_elapsed(unsigned long data); ··· 847 829 struct drm_file *file_priv); 848 830 extern int i915_irq_wait(struct drm_device *dev, void *data, 849 831 struct drm_file *file_priv); 850 - void i915_user_irq_get(struct drm_device *dev); 851 832 void i915_trace_irq_get(struct drm_device *dev, u32 seqno); 852 - void i915_user_irq_put(struct drm_device *dev); 853 833 extern void i915_enable_interrupt (struct drm_device *dev); 854 834 855 835 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); ··· 865 849 extern int i915_vblank_swap(struct drm_device *dev, void *data, 866 850 struct drm_file *file_priv); 867 851 extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 852 + extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); 853 + extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, 854 + u32 mask); 855 + extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, 856 + u32 mask); 868 857 869 858 void 870 859 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); ··· 943 922 int i915_gem_object_unbind(struct drm_gem_object *obj); 944 923 void i915_gem_release_mmap(struct drm_gem_object *obj); 945 924 void i915_gem_lastclose(struct drm_device *dev); 946 - uint32_t i915_get_gem_seqno(struct drm_device *dev); 925 + uint32_t i915_get_gem_seqno(struct drm_device *dev, 926 + struct intel_ring_buffer *ring); 947 927 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 948 928 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 949 929 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 950 - void i915_gem_retire_requests(struct drm_device *dev); 930 + void i915_gem_retire_requests(struct drm_device *dev, 931 + struct intel_ring_buffer *ring); 951 932 void i915_gem_retire_work_handler(struct work_struct *work); 952 933 void i915_gem_clflush_object(struct drm_gem_object *obj); 953 934 int i915_gem_object_set_domain(struct drm_gem_object *obj, ··· 960 937 int i915_gem_do_init(struct drm_device *dev, unsigned long start, 961 938 unsigned long end); 962 939 int i915_gem_idle(struct drm_device *dev); 963 - uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 964 - uint32_t flush_domains); 965 - int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); 940 + uint32_t i915_add_request(struct drm_device *dev, 941 + struct drm_file *file_priv, 942 + uint32_t flush_domains, 943 + struct intel_ring_buffer *ring); 944 + int i915_do_wait_request(struct drm_device *dev, 945 + uint32_t seqno, int interruptible, 946 + struct intel_ring_buffer *ring); 966 947 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 967 948 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 968 949 int write); ··· 1042 1015 extern void intel_disable_fbc(struct drm_device *dev); 1043 1016 extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1044 1017 extern bool intel_fbc_enabled(struct drm_device *dev); 1045 - 1018 + extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1046 1019 extern void intel_detect_pch (struct drm_device *dev); 1047 1020 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1048 1021 ··· 1053 1026 * has access to the ring. 1054 1027 */ 1055 1028 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 1056 - if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ 1029 + if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ 1030 + == NULL) \ 1057 1031 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 1058 1032 } while (0) 1059 1033 ··· 1067 1039 #define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) 1068 1040 #define I915_READ64(reg) readq(dev_priv->regs + (reg)) 1069 1041 #define POSTING_READ(reg) (void)I915_READ(reg) 1042 + #define POSTING_READ16(reg) (void)I915_READ16(reg) 1070 1043 1071 1044 #define I915_VERBOSE 0 1072 1045 1073 - #define RING_LOCALS volatile unsigned int *ring_virt__; 1074 - 1075 - #define BEGIN_LP_RING(n) do { \ 1076 - int bytes__ = 4*(n); \ 1077 - if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ 1078 - /* a wrap must occur between instructions so pad beforehand */ \ 1079 - if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \ 1080 - i915_wrap_ring(dev); \ 1081 - if (unlikely (dev_priv->ring.space < bytes__)) \ 1082 - i915_wait_ring(dev, bytes__, __func__); \ 1083 - ring_virt__ = (unsigned int *) \ 1084 - (dev_priv->ring.virtual_start + dev_priv->ring.tail); \ 1085 - dev_priv->ring.tail += bytes__; \ 1086 - dev_priv->ring.tail &= dev_priv->ring.Size - 1; \ 1087 - dev_priv->ring.space -= bytes__; \ 1046 + #define BEGIN_LP_RING(n) do { \ 1047 + drm_i915_private_t *dev_priv = dev->dev_private; \ 1048 + if (I915_VERBOSE) \ 1049 + DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ 1050 + intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \ 1088 1051 } while (0) 1089 1052 1090 - #define OUT_RING(n) do { \ 1091 - if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 1092 - *ring_virt__++ = (n); \ 1053 + 1054 + #define OUT_RING(x) do { \ 1055 + drm_i915_private_t *dev_priv = dev->dev_private; \ 1056 + if (I915_VERBOSE) \ 1057 + DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ 1058 + intel_ring_emit(dev, &dev_priv->render_ring, x); \ 1093 1059 } while (0) 1094 1060 1095 1061 #define ADVANCE_LP_RING() do { \ 1062 + drm_i915_private_t *dev_priv = dev->dev_private; \ 1096 1063 if (I915_VERBOSE) \ 1097 - DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \ 1098 - I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \ 1064 + DRM_DEBUG("ADVANCE_LP_RING %x\n", \ 1065 + dev_priv->render_ring.tail); \ 1066 + intel_ring_advance(dev, &dev_priv->render_ring); \ 1099 1067 } while(0) 1100 1068 1101 1069 /** ··· 1109 1085 * 1110 1086 * The area from dword 0x20 to 0x3ff is available for driver usage. 1111 1087 */ 1112 - #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 1088 + #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ 1089 + (dev_priv->render_ring.status_page.page_addr))[reg]) 1113 1090 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 1114 1091 #define I915_GEM_HWS_INDEX 0x20 1115 1092 #define I915_BREADCRUMB_INDEX 0x21 1116 - 1117 - extern int i915_wrap_ring(struct drm_device * dev); 1118 - extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 1119 1093 1120 1094 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1121 1095 ··· 1160 1138 (dev)->pci_device == 0x2A42 || \ 1161 1139 (dev)->pci_device == 0x2E42) 1162 1140 1141 + #define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) 1163 1142 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1164 1143 1165 1144 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+329 -566
drivers/gpu/drm/i915/i915_gem.c
··· 35 35 #include <linux/swap.h> 36 36 #include <linux/pci.h> 37 37 38 - #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 39 - 40 38 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 41 39 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 42 40 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); ··· 167 169 obj_priv->tiling_mode != I915_TILING_NONE; 168 170 } 169 171 170 - static inline int 172 + static inline void 171 173 slow_shmem_copy(struct page *dst_page, 172 174 int dst_offset, 173 175 struct page *src_page, ··· 176 178 { 177 179 char *dst_vaddr, *src_vaddr; 178 180 179 - dst_vaddr = kmap_atomic(dst_page, KM_USER0); 180 - if (dst_vaddr == NULL) 181 - return -ENOMEM; 182 - 183 - src_vaddr = kmap_atomic(src_page, KM_USER1); 184 - if (src_vaddr == NULL) { 185 - kunmap_atomic(dst_vaddr, KM_USER0); 186 - return -ENOMEM; 187 - } 181 + dst_vaddr = kmap(dst_page); 182 + src_vaddr = kmap(src_page); 188 183 189 184 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); 190 185 191 - kunmap_atomic(src_vaddr, KM_USER1); 192 - kunmap_atomic(dst_vaddr, KM_USER0); 193 - 194 - return 0; 186 + kunmap(src_page); 187 + kunmap(dst_page); 195 188 } 196 189 197 - static inline int 190 + static inline void 198 191 slow_shmem_bit17_copy(struct page *gpu_page, 199 192 int gpu_offset, 200 193 struct page *cpu_page, ··· 205 216 cpu_page, cpu_offset, length); 206 217 } 207 218 208 - gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); 209 - if (gpu_vaddr == NULL) 210 - return -ENOMEM; 211 - 212 - cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); 213 - if (cpu_vaddr == NULL) { 214 - kunmap_atomic(gpu_vaddr, KM_USER0); 215 - return -ENOMEM; 216 - } 219 + gpu_vaddr = kmap(gpu_page); 220 + cpu_vaddr = kmap(cpu_page); 217 221 218 222 /* Copy the data, XORing A6 with A17 (1). The user already knows he's 219 223 * XORing with the other bits (A9 for Y, A9 and A10 for X) ··· 230 248 length -= this_length; 231 249 } 232 250 233 - kunmap_atomic(cpu_vaddr, KM_USER1); 234 - kunmap_atomic(gpu_vaddr, KM_USER0); 235 - 236 - return 0; 251 + kunmap(cpu_page); 252 + kunmap(gpu_page); 237 253 } 238 254 239 255 /** ··· 407 427 page_length = PAGE_SIZE - data_page_offset; 408 428 409 429 if (do_bit17_swizzling) { 410 - ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 411 - shmem_page_offset, 412 - user_pages[data_page_index], 413 - data_page_offset, 414 - page_length, 415 - 1); 416 - } else { 417 - ret = slow_shmem_copy(user_pages[data_page_index], 418 - data_page_offset, 419 - obj_priv->pages[shmem_page_index], 430 + slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 420 431 shmem_page_offset, 421 - page_length); 432 + user_pages[data_page_index], 433 + data_page_offset, 434 + page_length, 435 + 1); 436 + } else { 437 + slow_shmem_copy(user_pages[data_page_index], 438 + data_page_offset, 439 + obj_priv->pages[shmem_page_index], 440 + shmem_page_offset, 441 + page_length); 422 442 } 423 - if (ret) 424 - goto fail_put_pages; 425 443 426 444 remain -= page_length; 427 445 data_ptr += page_length; ··· 509 531 * page faults 510 532 */ 511 533 512 - static inline int 534 + static inline void 513 535 slow_kernel_write(struct io_mapping *mapping, 514 536 loff_t gtt_base, int gtt_offset, 515 537 struct page *user_page, int user_offset, 516 538 int length) 517 539 { 518 - char *src_vaddr, *dst_vaddr; 519 - unsigned long unwritten; 540 + char __iomem *dst_vaddr; 541 + char *src_vaddr; 520 542 521 - dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); 522 - src_vaddr = kmap_atomic(user_page, KM_USER1); 523 - unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, 524 - src_vaddr + user_offset, 525 - length); 526 - kunmap_atomic(src_vaddr, KM_USER1); 527 - io_mapping_unmap_atomic(dst_vaddr); 528 - if (unwritten) 529 - return -EFAULT; 530 - return 0; 543 + dst_vaddr = io_mapping_map_wc(mapping, gtt_base); 544 + src_vaddr = kmap(user_page); 545 + 546 + memcpy_toio(dst_vaddr + gtt_offset, 547 + src_vaddr + user_offset, 548 + length); 549 + 550 + kunmap(user_page); 551 + io_mapping_unmap(dst_vaddr); 531 552 } 532 553 533 554 static inline int ··· 699 722 if ((data_page_offset + page_length) > PAGE_SIZE) 700 723 page_length = PAGE_SIZE - data_page_offset; 701 724 702 - ret = slow_kernel_write(dev_priv->mm.gtt_mapping, 703 - gtt_page_base, gtt_page_offset, 704 - user_pages[data_page_index], 705 - data_page_offset, 706 - page_length); 707 - 708 - /* If we get a fault while copying data, then (presumably) our 709 - * source page isn't available. Return the error and we'll 710 - * retry in the slow path. 711 - */ 712 - if (ret) 713 - goto out_unpin_object; 725 + slow_kernel_write(dev_priv->mm.gtt_mapping, 726 + gtt_page_base, gtt_page_offset, 727 + user_pages[data_page_index], 728 + data_page_offset, 729 + page_length); 714 730 715 731 remain -= page_length; 716 732 offset += page_length; ··· 872 902 page_length = PAGE_SIZE - data_page_offset; 873 903 874 904 if (do_bit17_swizzling) { 875 - ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 876 - shmem_page_offset, 877 - user_pages[data_page_index], 878 - data_page_offset, 879 - page_length, 880 - 0); 881 - } else { 882 - ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], 905 + slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 883 906 shmem_page_offset, 884 907 user_pages[data_page_index], 885 908 data_page_offset, 886 - page_length); 909 + page_length, 910 + 0); 911 + } else { 912 + slow_shmem_copy(obj_priv->pages[shmem_page_index], 913 + shmem_page_offset, 914 + user_pages[data_page_index], 915 + data_page_offset, 916 + page_length); 887 917 } 888 - if (ret) 889 - goto fail_put_pages; 890 918 891 919 remain -= page_length; 892 920 data_ptr += page_length; ··· 941 973 if (obj_priv->phys_obj) 942 974 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 943 975 else if (obj_priv->tiling_mode == I915_TILING_NONE && 944 - dev->gtt_total != 0) { 976 + dev->gtt_total != 0 && 977 + obj->write_domain != I915_GEM_DOMAIN_CPU) { 945 978 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); 946 979 if (ret == -EFAULT) { 947 980 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, ··· 1453 1484 } 1454 1485 1455 1486 static void 1456 - i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) 1487 + i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, 1488 + struct intel_ring_buffer *ring) 1457 1489 { 1458 1490 struct drm_device *dev = obj->dev; 1459 1491 drm_i915_private_t *dev_priv = dev->dev_private; 1460 1492 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1493 + BUG_ON(ring == NULL); 1494 + obj_priv->ring = ring; 1461 1495 1462 1496 /* Add a reference if we're newly entering the active list. */ 1463 1497 if (!obj_priv->active) { ··· 1469 1497 } 1470 1498 /* Move from whatever list we were on to the tail of execution. */ 1471 1499 spin_lock(&dev_priv->mm.active_list_lock); 1472 - list_move_tail(&obj_priv->list, 1473 - &dev_priv->mm.active_list); 1500 + list_move_tail(&obj_priv->list, &ring->active_list); 1474 1501 spin_unlock(&dev_priv->mm.active_list_lock); 1475 1502 obj_priv->last_rendering_seqno = seqno; 1476 1503 } ··· 1522 1551 BUG_ON(!list_empty(&obj_priv->gpu_write_list)); 1523 1552 1524 1553 obj_priv->last_rendering_seqno = 0; 1554 + obj_priv->ring = NULL; 1525 1555 if (obj_priv->active) { 1526 1556 obj_priv->active = 0; 1527 1557 drm_gem_object_unreference(obj); ··· 1532 1560 1533 1561 static void 1534 1562 i915_gem_process_flushing_list(struct drm_device *dev, 1535 - uint32_t flush_domains, uint32_t seqno) 1563 + uint32_t flush_domains, uint32_t seqno, 1564 + struct intel_ring_buffer *ring) 1536 1565 { 1537 1566 drm_i915_private_t *dev_priv = dev->dev_private; 1538 1567 struct drm_i915_gem_object *obj_priv, *next; ··· 1544 1571 struct drm_gem_object *obj = &obj_priv->base; 1545 1572 1546 1573 if ((obj->write_domain & flush_domains) == 1547 - obj->write_domain) { 1574 + obj->write_domain && 1575 + obj_priv->ring->ring_flag == ring->ring_flag) { 1548 1576 uint32_t old_write_domain = obj->write_domain; 1549 1577 1550 1578 obj->write_domain = 0; 1551 1579 list_del_init(&obj_priv->gpu_write_list); 1552 - i915_gem_object_move_to_active(obj, seqno); 1580 + i915_gem_object_move_to_active(obj, seqno, ring); 1553 1581 1554 1582 /* update the fence lru list */ 1555 1583 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { ··· 1567 1593 } 1568 1594 } 1569 1595 1570 - #define PIPE_CONTROL_FLUSH(addr) \ 1571 - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 1572 - PIPE_CONTROL_DEPTH_STALL); \ 1573 - OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ 1574 - OUT_RING(0); \ 1575 - OUT_RING(0); \ 1576 - 1577 - /** 1578 - * Creates a new sequence number, emitting a write of it to the status page 1579 - * plus an interrupt, which will trigger i915_user_interrupt_handler. 1580 - * 1581 - * Must be called with struct_lock held. 1582 - * 1583 - * Returned sequence numbers are nonzero on success. 1584 - */ 1585 1596 uint32_t 1586 1597 i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1587 - uint32_t flush_domains) 1598 + uint32_t flush_domains, struct intel_ring_buffer *ring) 1588 1599 { 1589 1600 drm_i915_private_t *dev_priv = dev->dev_private; 1590 1601 struct drm_i915_file_private *i915_file_priv = NULL; 1591 1602 struct drm_i915_gem_request *request; 1592 1603 uint32_t seqno; 1593 1604 int was_empty; 1594 - RING_LOCALS; 1595 1605 1596 1606 if (file_priv != NULL) 1597 1607 i915_file_priv = file_priv->driver_priv; ··· 1584 1626 if (request == NULL) 1585 1627 return 0; 1586 1628 1587 - /* Grab the seqno we're going to make this request be, and bump the 1588 - * next (skipping 0 so it can be the reserved no-seqno value). 1589 - */ 1590 - seqno = dev_priv->mm.next_gem_seqno; 1591 - dev_priv->mm.next_gem_seqno++; 1592 - if (dev_priv->mm.next_gem_seqno == 0) 1593 - dev_priv->mm.next_gem_seqno++; 1594 - 1595 - if (HAS_PIPE_CONTROL(dev)) { 1596 - u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; 1597 - 1598 - /* 1599 - * Workaround qword write incoherence by flushing the 1600 - * PIPE_NOTIFY buffers out to memory before requesting 1601 - * an interrupt. 1602 - */ 1603 - BEGIN_LP_RING(32); 1604 - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 1605 - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); 1606 - OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 1607 - OUT_RING(seqno); 1608 - OUT_RING(0); 1609 - PIPE_CONTROL_FLUSH(scratch_addr); 1610 - scratch_addr += 128; /* write to separate cachelines */ 1611 - PIPE_CONTROL_FLUSH(scratch_addr); 1612 - scratch_addr += 128; 1613 - PIPE_CONTROL_FLUSH(scratch_addr); 1614 - scratch_addr += 128; 1615 - PIPE_CONTROL_FLUSH(scratch_addr); 1616 - scratch_addr += 128; 1617 - PIPE_CONTROL_FLUSH(scratch_addr); 1618 - scratch_addr += 128; 1619 - PIPE_CONTROL_FLUSH(scratch_addr); 1620 - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 1621 - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | 1622 - PIPE_CONTROL_NOTIFY); 1623 - OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 1624 - OUT_RING(seqno); 1625 - OUT_RING(0); 1626 - ADVANCE_LP_RING(); 1627 - } else { 1628 - BEGIN_LP_RING(4); 1629 - OUT_RING(MI_STORE_DWORD_INDEX); 1630 - OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1631 - OUT_RING(seqno); 1632 - 1633 - OUT_RING(MI_USER_INTERRUPT); 1634 - ADVANCE_LP_RING(); 1635 - } 1636 - 1637 - DRM_DEBUG_DRIVER("%d\n", seqno); 1629 + seqno = ring->add_request(dev, ring, file_priv, flush_domains); 1638 1630 1639 1631 request->seqno = seqno; 1632 + request->ring = ring; 1640 1633 request->emitted_jiffies = jiffies; 1641 - was_empty = list_empty(&dev_priv->mm.request_list); 1642 - list_add_tail(&request->list, &dev_priv->mm.request_list); 1634 + was_empty = list_empty(&ring->request_list); 1635 + list_add_tail(&request->list, &ring->request_list); 1636 + 1643 1637 if (i915_file_priv) { 1644 1638 list_add_tail(&request->client_list, 1645 1639 &i915_file_priv->mm.request_list); ··· 1603 1693 * domain we're flushing with our flush. 1604 1694 */ 1605 1695 if (flush_domains != 0) 1606 - i915_gem_process_flushing_list(dev, flush_domains, seqno); 1696 + i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); 1607 1697 1608 1698 if (!dev_priv->mm.suspended) { 1609 1699 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); ··· 1620 1710 * before signalling the CPU 1621 1711 */ 1622 1712 static uint32_t 1623 - i915_retire_commands(struct drm_device *dev) 1713 + i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) 1624 1714 { 1625 - drm_i915_private_t *dev_priv = dev->dev_private; 1626 - uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 1627 1715 uint32_t flush_domains = 0; 1628 - RING_LOCALS; 1629 1716 1630 1717 /* The sampler always gets flushed on i965 (sigh) */ 1631 1718 if (IS_I965G(dev)) 1632 1719 flush_domains |= I915_GEM_DOMAIN_SAMPLER; 1633 - BEGIN_LP_RING(2); 1634 - OUT_RING(cmd); 1635 - OUT_RING(0); /* noop */ 1636 - ADVANCE_LP_RING(); 1720 + 1721 + ring->flush(dev, ring, 1722 + I915_GEM_DOMAIN_COMMAND, flush_domains); 1637 1723 return flush_domains; 1638 1724 } 1639 1725 ··· 1649 1743 * by the ringbuffer to the flushing/inactive lists as appropriate. 1650 1744 */ 1651 1745 spin_lock(&dev_priv->mm.active_list_lock); 1652 - while (!list_empty(&dev_priv->mm.active_list)) { 1746 + while (!list_empty(&request->ring->active_list)) { 1653 1747 struct drm_gem_object *obj; 1654 1748 struct drm_i915_gem_object *obj_priv; 1655 1749 1656 - obj_priv = list_first_entry(&dev_priv->mm.active_list, 1750 + obj_priv = list_first_entry(&request->ring->active_list, 1657 1751 struct drm_i915_gem_object, 1658 1752 list); 1659 1753 obj = &obj_priv->base; ··· 1700 1794 } 1701 1795 1702 1796 uint32_t 1703 - i915_get_gem_seqno(struct drm_device *dev) 1797 + i915_get_gem_seqno(struct drm_device *dev, 1798 + struct intel_ring_buffer *ring) 1704 1799 { 1705 - drm_i915_private_t *dev_priv = dev->dev_private; 1706 - 1707 - if (HAS_PIPE_CONTROL(dev)) 1708 - return ((volatile u32 *)(dev_priv->seqno_page))[0]; 1709 - else 1710 - return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); 1800 + return ring->get_gem_seqno(dev, ring); 1711 1801 } 1712 1802 1713 1803 /** 1714 1804 * This function clears the request list as sequence numbers are passed. 1715 1805 */ 1716 1806 void 1717 - i915_gem_retire_requests(struct drm_device *dev) 1807 + i915_gem_retire_requests(struct drm_device *dev, 1808 + struct intel_ring_buffer *ring) 1718 1809 { 1719 1810 drm_i915_private_t *dev_priv = dev->dev_private; 1720 1811 uint32_t seqno; 1721 1812 1722 - if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) 1813 + if (!ring->status_page.page_addr 1814 + || list_empty(&ring->request_list)) 1723 1815 return; 1724 1816 1725 - seqno = i915_get_gem_seqno(dev); 1817 + seqno = i915_get_gem_seqno(dev, ring); 1726 1818 1727 - while (!list_empty(&dev_priv->mm.request_list)) { 1819 + while (!list_empty(&ring->request_list)) { 1728 1820 struct drm_i915_gem_request *request; 1729 1821 uint32_t retiring_seqno; 1730 1822 1731 - request = list_first_entry(&dev_priv->mm.request_list, 1823 + request = list_first_entry(&ring->request_list, 1732 1824 struct drm_i915_gem_request, 1733 1825 list); 1734 1826 retiring_seqno = request->seqno; ··· 1744 1840 1745 1841 if (unlikely (dev_priv->trace_irq_seqno && 1746 1842 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1747 - i915_user_irq_put(dev); 1843 + 1844 + ring->user_irq_put(dev, ring); 1748 1845 dev_priv->trace_irq_seqno = 0; 1749 1846 } 1750 1847 } ··· 1761 1856 dev = dev_priv->dev; 1762 1857 1763 1858 mutex_lock(&dev->struct_mutex); 1764 - i915_gem_retire_requests(dev); 1859 + i915_gem_retire_requests(dev, &dev_priv->render_ring); 1860 + 1861 + if (HAS_BSD(dev)) 1862 + i915_gem_retire_requests(dev, &dev_priv->bsd_ring); 1863 + 1765 1864 if (!dev_priv->mm.suspended && 1766 - !list_empty(&dev_priv->mm.request_list)) 1865 + (!list_empty(&dev_priv->render_ring.request_list) || 1866 + (HAS_BSD(dev) && 1867 + !list_empty(&dev_priv->bsd_ring.request_list)))) 1767 1868 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1768 1869 mutex_unlock(&dev->struct_mutex); 1769 1870 } 1770 1871 1771 1872 int 1772 - i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) 1873 + i915_do_wait_request(struct drm_device *dev, uint32_t seqno, 1874 + int interruptible, struct intel_ring_buffer *ring) 1773 1875 { 1774 1876 drm_i915_private_t *dev_priv = dev->dev_private; 1775 1877 u32 ier; ··· 1787 1875 if (atomic_read(&dev_priv->mm.wedged)) 1788 1876 return -EIO; 1789 1877 1790 - if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1878 + if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { 1791 1879 if (HAS_PCH_SPLIT(dev)) 1792 1880 ier = I915_READ(DEIER) | I915_READ(GTIER); 1793 1881 else ··· 1801 1889 1802 1890 trace_i915_gem_request_wait_begin(dev, seqno); 1803 1891 1804 - dev_priv->mm.waiting_gem_seqno = seqno; 1805 - i915_user_irq_get(dev); 1892 + ring->waiting_gem_seqno = seqno; 1893 + ring->user_irq_get(dev, ring); 1806 1894 if (interruptible) 1807 - ret = wait_event_interruptible(dev_priv->irq_queue, 1808 - i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1809 - atomic_read(&dev_priv->mm.wedged)); 1895 + ret = wait_event_interruptible(ring->irq_queue, 1896 + i915_seqno_passed( 1897 + ring->get_gem_seqno(dev, ring), seqno) 1898 + || atomic_read(&dev_priv->mm.wedged)); 1810 1899 else 1811 - wait_event(dev_priv->irq_queue, 1812 - i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1813 - atomic_read(&dev_priv->mm.wedged)); 1900 + wait_event(ring->irq_queue, 1901 + i915_seqno_passed( 1902 + ring->get_gem_seqno(dev, ring), seqno) 1903 + || atomic_read(&dev_priv->mm.wedged)); 1814 1904 1815 - i915_user_irq_put(dev); 1816 - dev_priv->mm.waiting_gem_seqno = 0; 1905 + ring->user_irq_put(dev, ring); 1906 + ring->waiting_gem_seqno = 0; 1817 1907 1818 1908 trace_i915_gem_request_wait_end(dev, seqno); 1819 1909 } ··· 1824 1910 1825 1911 if (ret && ret != -ERESTARTSYS) 1826 1912 DRM_ERROR("%s returns %d (awaiting %d at %d)\n", 1827 - __func__, ret, seqno, i915_get_gem_seqno(dev)); 1913 + __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); 1828 1914 1829 1915 /* Directly dispatch request retiring. While we have the work queue 1830 1916 * to handle this, the waiter on a request often wants an associated ··· 1832 1918 * a separate wait queue to handle that. 1833 1919 */ 1834 1920 if (ret == 0) 1835 - i915_gem_retire_requests(dev); 1921 + i915_gem_retire_requests(dev, ring); 1836 1922 1837 1923 return ret; 1838 1924 } ··· 1842 1928 * request and object lists appropriately for that event. 1843 1929 */ 1844 1930 static int 1845 - i915_wait_request(struct drm_device *dev, uint32_t seqno) 1931 + i915_wait_request(struct drm_device *dev, uint32_t seqno, 1932 + struct intel_ring_buffer *ring) 1846 1933 { 1847 - return i915_do_wait_request(dev, seqno, 1); 1934 + return i915_do_wait_request(dev, seqno, 1, ring); 1848 1935 } 1849 1936 1850 1937 static void ··· 1854 1939 uint32_t flush_domains) 1855 1940 { 1856 1941 drm_i915_private_t *dev_priv = dev->dev_private; 1857 - uint32_t cmd; 1858 - RING_LOCALS; 1859 - 1860 - #if WATCH_EXEC 1861 - DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 1862 - invalidate_domains, flush_domains); 1863 - #endif 1864 - trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, 1865 - invalidate_domains, flush_domains); 1866 - 1867 1942 if (flush_domains & I915_GEM_DOMAIN_CPU) 1868 1943 drm_agp_chipset_flush(dev); 1944 + dev_priv->render_ring.flush(dev, &dev_priv->render_ring, 1945 + invalidate_domains, 1946 + flush_domains); 1869 1947 1870 - if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 1871 - /* 1872 - * read/write caches: 1873 - * 1874 - * I915_GEM_DOMAIN_RENDER is always invalidated, but is 1875 - * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 1876 - * also flushed at 2d versus 3d pipeline switches. 1877 - * 1878 - * read-only caches: 1879 - * 1880 - * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 1881 - * MI_READ_FLUSH is set, and is always flushed on 965. 1882 - * 1883 - * I915_GEM_DOMAIN_COMMAND may not exist? 1884 - * 1885 - * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 1886 - * invalidated when MI_EXE_FLUSH is set. 1887 - * 1888 - * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 1889 - * invalidated with every MI_FLUSH. 1890 - * 1891 - * TLBs: 1892 - * 1893 - * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 1894 - * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 1895 - * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 1896 - * are flushed at any MI_FLUSH. 1897 - */ 1948 + if (HAS_BSD(dev)) 1949 + dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, 1950 + invalidate_domains, 1951 + flush_domains); 1952 + } 1898 1953 1899 - cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 1900 - if ((invalidate_domains|flush_domains) & 1901 - I915_GEM_DOMAIN_RENDER) 1902 - cmd &= ~MI_NO_WRITE_FLUSH; 1903 - if (!IS_I965G(dev)) { 1904 - /* 1905 - * On the 965, the sampler cache always gets flushed 1906 - * and this bit is reserved. 1907 - */ 1908 - if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 1909 - cmd |= MI_READ_FLUSH; 1910 - } 1911 - if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 1912 - cmd |= MI_EXE_FLUSH; 1913 - 1914 - #if WATCH_EXEC 1915 - DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 1916 - #endif 1917 - BEGIN_LP_RING(2); 1918 - OUT_RING(cmd); 1919 - OUT_RING(MI_NOOP); 1920 - ADVANCE_LP_RING(); 1921 - } 1954 + static void 1955 + i915_gem_flush_ring(struct drm_device *dev, 1956 + uint32_t invalidate_domains, 1957 + uint32_t flush_domains, 1958 + struct intel_ring_buffer *ring) 1959 + { 1960 + if (flush_domains & I915_GEM_DOMAIN_CPU) 1961 + drm_agp_chipset_flush(dev); 1962 + ring->flush(dev, ring, 1963 + invalidate_domains, 1964 + flush_domains); 1922 1965 } 1923 1966 1924 1967 /** ··· 1903 2030 DRM_INFO("%s: object %p wait for seqno %08x\n", 1904 2031 __func__, obj, obj_priv->last_rendering_seqno); 1905 2032 #endif 1906 - ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); 2033 + ret = i915_wait_request(dev, 2034 + obj_priv->last_rendering_seqno, obj_priv->ring); 1907 2035 if (ret != 0) 1908 2036 return ret; 1909 2037 } ··· 2020 2146 { 2021 2147 drm_i915_private_t *dev_priv = dev->dev_private; 2022 2148 bool lists_empty; 2023 - uint32_t seqno; 2149 + uint32_t seqno1, seqno2; 2150 + int ret; 2024 2151 2025 2152 spin_lock(&dev_priv->mm.active_list_lock); 2026 - lists_empty = list_empty(&dev_priv->mm.flushing_list) && 2027 - list_empty(&dev_priv->mm.active_list); 2153 + lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2154 + list_empty(&dev_priv->render_ring.active_list) && 2155 + (!HAS_BSD(dev) || 2156 + list_empty(&dev_priv->bsd_ring.active_list))); 2028 2157 spin_unlock(&dev_priv->mm.active_list_lock); 2029 2158 2030 2159 if (lists_empty) ··· 2035 2158 2036 2159 /* Flush everything onto the inactive list. */ 2037 2160 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2038 - seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); 2039 - if (seqno == 0) 2161 + seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, 2162 + &dev_priv->render_ring); 2163 + if (seqno1 == 0) 2040 2164 return -ENOMEM; 2165 + ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); 2041 2166 2042 - return i915_wait_request(dev, seqno); 2167 + if (HAS_BSD(dev)) { 2168 + seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, 2169 + &dev_priv->bsd_ring); 2170 + if (seqno2 == 0) 2171 + return -ENOMEM; 2172 + 2173 + ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); 2174 + if (ret) 2175 + return ret; 2176 + } 2177 + 2178 + 2179 + return ret; 2043 2180 } 2044 2181 2045 2182 static int ··· 2066 2175 spin_lock(&dev_priv->mm.active_list_lock); 2067 2176 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2068 2177 list_empty(&dev_priv->mm.flushing_list) && 2069 - list_empty(&dev_priv->mm.active_list)); 2178 + list_empty(&dev_priv->render_ring.active_list) && 2179 + (!HAS_BSD(dev) 2180 + || list_empty(&dev_priv->bsd_ring.active_list))); 2070 2181 spin_unlock(&dev_priv->mm.active_list_lock); 2071 2182 2072 2183 if (lists_empty) ··· 2088 2195 spin_lock(&dev_priv->mm.active_list_lock); 2089 2196 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2090 2197 list_empty(&dev_priv->mm.flushing_list) && 2091 - list_empty(&dev_priv->mm.active_list)); 2198 + list_empty(&dev_priv->render_ring.active_list) && 2199 + (!HAS_BSD(dev) 2200 + || list_empty(&dev_priv->bsd_ring.active_list))); 2092 2201 spin_unlock(&dev_priv->mm.active_list_lock); 2093 2202 BUG_ON(!lists_empty); 2094 2203 ··· 2104 2209 struct drm_gem_object *obj; 2105 2210 int ret; 2106 2211 2212 + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 2213 + struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; 2107 2214 for (;;) { 2108 - i915_gem_retire_requests(dev); 2215 + i915_gem_retire_requests(dev, render_ring); 2216 + 2217 + if (HAS_BSD(dev)) 2218 + i915_gem_retire_requests(dev, bsd_ring); 2109 2219 2110 2220 /* If there's an inactive buffer available now, grab it 2111 2221 * and be done. ··· 2134 2234 * things, wait for the next to finish and hopefully leave us 2135 2235 * a buffer to evict. 2136 2236 */ 2137 - if (!list_empty(&dev_priv->mm.request_list)) { 2237 + if (!list_empty(&render_ring->request_list)) { 2138 2238 struct drm_i915_gem_request *request; 2139 2239 2140 - request = list_first_entry(&dev_priv->mm.request_list, 2240 + request = list_first_entry(&render_ring->request_list, 2141 2241 struct drm_i915_gem_request, 2142 2242 list); 2143 2243 2144 - ret = i915_wait_request(dev, request->seqno); 2244 + ret = i915_wait_request(dev, 2245 + request->seqno, request->ring); 2246 + if (ret) 2247 + return ret; 2248 + 2249 + continue; 2250 + } 2251 + 2252 + if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { 2253 + struct drm_i915_gem_request *request; 2254 + 2255 + request = list_first_entry(&bsd_ring->request_list, 2256 + struct drm_i915_gem_request, 2257 + list); 2258 + 2259 + ret = i915_wait_request(dev, 2260 + request->seqno, request->ring); 2145 2261 if (ret) 2146 2262 return ret; 2147 2263 ··· 2184 2268 if (obj != NULL) { 2185 2269 uint32_t seqno; 2186 2270 2187 - i915_gem_flush(dev, 2271 + i915_gem_flush_ring(dev, 2188 2272 obj->write_domain, 2189 - obj->write_domain); 2190 - seqno = i915_add_request(dev, NULL, obj->write_domain); 2273 + obj->write_domain, 2274 + obj_priv->ring); 2275 + seqno = i915_add_request(dev, NULL, 2276 + obj->write_domain, 2277 + obj_priv->ring); 2191 2278 if (seqno == 0) 2192 2279 return -ENOMEM; 2193 2280 continue; ··· 2217 2298 struct address_space *mapping; 2218 2299 struct inode *inode; 2219 2300 struct page *page; 2301 + 2302 + BUG_ON(obj_priv->pages_refcount 2303 + == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); 2220 2304 2221 2305 if (obj_priv->pages_refcount++ != 0) 2222 2306 return 0; ··· 2619 2697 return -EINVAL; 2620 2698 } 2621 2699 2700 + /* If the object is bigger than the entire aperture, reject it early 2701 + * before evicting everything in a vain attempt to find space. 2702 + */ 2703 + if (obj->size > dev->gtt_total) { 2704 + DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2705 + return -E2BIG; 2706 + } 2707 + 2622 2708 search_free: 2623 2709 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2624 2710 obj->size, alignment, 0); ··· 2737 2807 { 2738 2808 struct drm_device *dev = obj->dev; 2739 2809 uint32_t old_write_domain; 2810 + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2740 2811 2741 2812 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2742 2813 return; ··· 2745 2814 /* Queue the GPU write cache flushing we need. */ 2746 2815 old_write_domain = obj->write_domain; 2747 2816 i915_gem_flush(dev, 0, obj->write_domain); 2748 - (void) i915_add_request(dev, NULL, obj->write_domain); 2817 + (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); 2749 2818 BUG_ON(obj->write_domain); 2750 2819 2751 2820 trace_i915_gem_object_change_domain(obj, ··· 2885 2954 DRM_INFO("%s: object %p wait for seqno %08x\n", 2886 2955 __func__, obj, obj_priv->last_rendering_seqno); 2887 2956 #endif 2888 - ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); 2957 + ret = i915_do_wait_request(dev, 2958 + obj_priv->last_rendering_seqno, 2959 + 0, 2960 + obj_priv->ring); 2889 2961 if (ret != 0) 2890 2962 return ret; 2891 2963 } 2892 2964 2965 + i915_gem_object_flush_cpu_write_domain(obj); 2966 + 2893 2967 old_write_domain = obj->write_domain; 2894 2968 old_read_domains = obj->read_domains; 2895 - 2896 - obj->read_domains &= I915_GEM_DOMAIN_GTT; 2897 - 2898 - i915_gem_object_flush_cpu_write_domain(obj); 2899 2969 2900 2970 /* It should now be out of any other write domains, and we can update 2901 2971 * the domain values for our changes. 2902 2972 */ 2903 2973 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 2904 - obj->read_domains |= I915_GEM_DOMAIN_GTT; 2974 + obj->read_domains = I915_GEM_DOMAIN_GTT; 2905 2975 obj->write_domain = I915_GEM_DOMAIN_GTT; 2906 2976 obj_priv->dirty = 1; 2907 2977 ··· 3286 3354 obj_priv->tiling_mode != I915_TILING_NONE; 3287 3355 3288 3356 /* Check fence reg constraints and rebind if necessary */ 3289 - if (need_fence && !i915_gem_object_fence_offset_ok(obj, 3290 - obj_priv->tiling_mode)) 3291 - i915_gem_object_unbind(obj); 3357 + if (need_fence && 3358 + !i915_gem_object_fence_offset_ok(obj, 3359 + obj_priv->tiling_mode)) { 3360 + ret = i915_gem_object_unbind(obj); 3361 + if (ret) 3362 + return ret; 3363 + } 3292 3364 3293 3365 /* Choose the GTT offset for our buffer and put it there. */ 3294 3366 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); ··· 3306 3370 if (need_fence) { 3307 3371 ret = i915_gem_object_get_fence_reg(obj); 3308 3372 if (ret != 0) { 3309 - if (ret != -EBUSY && ret != -ERESTARTSYS) 3310 - DRM_ERROR("Failure to install fence: %d\n", 3311 - ret); 3312 3373 i915_gem_object_unpin(obj); 3313 3374 return ret; 3314 3375 } ··· 3478 3545 return 0; 3479 3546 } 3480 3547 3481 - /** Dispatch a batchbuffer to the ring 3482 - */ 3483 - static int 3484 - i915_dispatch_gem_execbuffer(struct drm_device *dev, 3485 - struct drm_i915_gem_execbuffer2 *exec, 3486 - struct drm_clip_rect *cliprects, 3487 - uint64_t exec_offset) 3488 - { 3489 - drm_i915_private_t *dev_priv = dev->dev_private; 3490 - int nbox = exec->num_cliprects; 3491 - int i = 0, count; 3492 - uint32_t exec_start, exec_len; 3493 - RING_LOCALS; 3494 - 3495 - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3496 - exec_len = (uint32_t) exec->batch_len; 3497 - 3498 - trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); 3499 - 3500 - count = nbox ? nbox : 1; 3501 - 3502 - for (i = 0; i < count; i++) { 3503 - if (i < nbox) { 3504 - int ret = i915_emit_box(dev, cliprects, i, 3505 - exec->DR1, exec->DR4); 3506 - if (ret) 3507 - return ret; 3508 - } 3509 - 3510 - if (IS_I830(dev) || IS_845G(dev)) { 3511 - BEGIN_LP_RING(4); 3512 - OUT_RING(MI_BATCH_BUFFER); 3513 - OUT_RING(exec_start | MI_BATCH_NON_SECURE); 3514 - OUT_RING(exec_start + exec_len - 4); 3515 - OUT_RING(0); 3516 - ADVANCE_LP_RING(); 3517 - } else { 3518 - BEGIN_LP_RING(2); 3519 - if (IS_I965G(dev)) { 3520 - OUT_RING(MI_BATCH_BUFFER_START | 3521 - (2 << 6) | 3522 - MI_BATCH_NON_SECURE_I965); 3523 - OUT_RING(exec_start); 3524 - } else { 3525 - OUT_RING(MI_BATCH_BUFFER_START | 3526 - (2 << 6)); 3527 - OUT_RING(exec_start | MI_BATCH_NON_SECURE); 3528 - } 3529 - ADVANCE_LP_RING(); 3530 - } 3531 - } 3532 - 3533 - /* XXX breadcrumb */ 3534 - return 0; 3535 - } 3536 - 3537 3548 /* Throttle our rendering by waiting until the ring has completed our requests 3538 3549 * emitted over 20 msec ago. 3539 3550 * ··· 3506 3629 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3507 3630 break; 3508 3631 3509 - ret = i915_wait_request(dev, request->seqno); 3632 + ret = i915_wait_request(dev, request->seqno, request->ring); 3510 3633 if (ret != 0) 3511 3634 break; 3512 3635 } ··· 3663 3786 uint32_t seqno, flush_domains, reloc_index; 3664 3787 int pin_tries, flips; 3665 3788 3789 + struct intel_ring_buffer *ring = NULL; 3790 + 3666 3791 #if WATCH_EXEC 3667 3792 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3668 3793 (int) args->buffers_ptr, args->buffer_count, args->batch_len); 3669 3794 #endif 3795 + if (args->flags & I915_EXEC_BSD) { 3796 + if (!HAS_BSD(dev)) { 3797 + DRM_ERROR("execbuf with wrong flag\n"); 3798 + return -EINVAL; 3799 + } 3800 + ring = &dev_priv->bsd_ring; 3801 + } else { 3802 + ring = &dev_priv->render_ring; 3803 + } 3804 + 3670 3805 3671 3806 if (args->buffer_count < 1) { 3672 3807 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); ··· 3791 3902 if (ret != -ENOSPC || pin_tries >= 1) { 3792 3903 if (ret != -ERESTARTSYS) { 3793 3904 unsigned long long total_size = 0; 3794 - for (i = 0; i < args->buffer_count; i++) 3905 + int num_fences = 0; 3906 + for (i = 0; i < args->buffer_count; i++) { 3907 + obj_priv = object_list[i]->driver_private; 3908 + 3795 3909 total_size += object_list[i]->size; 3796 - DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", 3910 + num_fences += 3911 + exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && 3912 + obj_priv->tiling_mode != I915_TILING_NONE; 3913 + } 3914 + DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", 3797 3915 pinned+1, args->buffer_count, 3798 - total_size, ret); 3916 + total_size, num_fences, 3917 + ret); 3799 3918 DRM_ERROR("%d objects [%d pinned], " 3800 3919 "%d object bytes [%d pinned], " 3801 3920 "%d/%d gtt bytes\n", ··· 3873 3976 i915_gem_flush(dev, 3874 3977 dev->invalidate_domains, 3875 3978 dev->flush_domains); 3876 - if (dev->flush_domains & I915_GEM_GPU_DOMAINS) 3979 + if (dev->flush_domains & I915_GEM_GPU_DOMAINS) { 3877 3980 (void)i915_add_request(dev, file_priv, 3878 - dev->flush_domains); 3981 + dev->flush_domains, 3982 + &dev_priv->render_ring); 3983 + 3984 + if (HAS_BSD(dev)) 3985 + (void)i915_add_request(dev, file_priv, 3986 + dev->flush_domains, 3987 + &dev_priv->bsd_ring); 3988 + } 3879 3989 } 3880 3990 3881 3991 for (i = 0; i < args->buffer_count; i++) { ··· 3919 4015 #endif 3920 4016 3921 4017 /* Exec the batchbuffer */ 3922 - ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); 4018 + ret = ring->dispatch_gem_execbuffer(dev, ring, args, 4019 + cliprects, exec_offset); 3923 4020 if (ret) { 3924 4021 DRM_ERROR("dispatch failed %d\n", ret); 3925 4022 goto err; ··· 3930 4025 * Ensure that the commands in the batch buffer are 3931 4026 * finished before the interrupt fires 3932 4027 */ 3933 - flush_domains = i915_retire_commands(dev); 4028 + flush_domains = i915_retire_commands(dev, ring); 3934 4029 3935 4030 i915_verify_inactive(dev, __FILE__, __LINE__); 3936 4031 ··· 3941 4036 * *some* interrupts representing completion of buffers that we can 3942 4037 * wait on when trying to clear up gtt space). 3943 4038 */ 3944 - seqno = i915_add_request(dev, file_priv, flush_domains); 4039 + seqno = i915_add_request(dev, file_priv, flush_domains, ring); 3945 4040 BUG_ON(seqno == 0); 3946 4041 for (i = 0; i < args->buffer_count; i++) { 3947 4042 struct drm_gem_object *obj = object_list[i]; 4043 + obj_priv = to_intel_bo(obj); 3948 4044 3949 - i915_gem_object_move_to_active(obj, seqno); 4045 + i915_gem_object_move_to_active(obj, seqno, ring); 3950 4046 #if WATCH_LRU 3951 4047 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 3952 4048 #endif ··· 4059 4153 exec2.DR4 = args->DR4; 4060 4154 exec2.num_cliprects = args->num_cliprects; 4061 4155 exec2.cliprects_ptr = args->cliprects_ptr; 4062 - exec2.flags = 0; 4156 + exec2.flags = I915_EXEC_RENDER; 4063 4157 4064 4158 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); 4065 4159 if (!ret) { ··· 4145 4239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4146 4240 int ret; 4147 4241 4242 + BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 4243 + 4148 4244 i915_verify_inactive(dev, __FILE__, __LINE__); 4245 + 4246 + if (obj_priv->gtt_space != NULL) { 4247 + if (alignment == 0) 4248 + alignment = i915_gem_get_gtt_alignment(obj); 4249 + if (obj_priv->gtt_offset & (alignment - 1)) { 4250 + ret = i915_gem_object_unbind(obj); 4251 + if (ret) 4252 + return ret; 4253 + } 4254 + } 4255 + 4149 4256 if (obj_priv->gtt_space == NULL) { 4150 4257 ret = i915_gem_object_bind_to_gtt(obj, alignment); 4151 4258 if (ret) ··· 4311 4392 struct drm_i915_gem_busy *args = data; 4312 4393 struct drm_gem_object *obj; 4313 4394 struct drm_i915_gem_object *obj_priv; 4395 + drm_i915_private_t *dev_priv = dev->dev_private; 4314 4396 4315 4397 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4316 4398 if (obj == NULL) { ··· 4326 4406 * actually unmasked, and our working set ends up being larger than 4327 4407 * required. 4328 4408 */ 4329 - i915_gem_retire_requests(dev); 4409 + i915_gem_retire_requests(dev, &dev_priv->render_ring); 4410 + 4411 + if (HAS_BSD(dev)) 4412 + i915_gem_retire_requests(dev, &dev_priv->bsd_ring); 4330 4413 4331 4414 obj_priv = to_intel_bo(obj); 4332 4415 /* Don't count being on the flushing list against the object being ··· 4496 4573 4497 4574 mutex_lock(&dev->struct_mutex); 4498 4575 4499 - if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { 4576 + if (dev_priv->mm.suspended || 4577 + (dev_priv->render_ring.gem_object == NULL) || 4578 + (HAS_BSD(dev) && 4579 + dev_priv->bsd_ring.gem_object == NULL)) { 4500 4580 mutex_unlock(&dev->struct_mutex); 4501 4581 return 0; 4502 4582 } ··· 4580 4654 return ret; 4581 4655 } 4582 4656 4583 - static int 4584 - i915_gem_init_hws(struct drm_device *dev) 4585 - { 4586 - drm_i915_private_t *dev_priv = dev->dev_private; 4587 - struct drm_gem_object *obj; 4588 - struct drm_i915_gem_object *obj_priv; 4589 - int ret; 4590 - 4591 - /* If we need a physical address for the status page, it's already 4592 - * initialized at driver load time. 4593 - */ 4594 - if (!I915_NEED_GFX_HWS(dev)) 4595 - return 0; 4596 - 4597 - obj = i915_gem_alloc_object(dev, 4096); 4598 - if (obj == NULL) { 4599 - DRM_ERROR("Failed to allocate status page\n"); 4600 - ret = -ENOMEM; 4601 - goto err; 4602 - } 4603 - obj_priv = to_intel_bo(obj); 4604 - obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 4605 - 4606 - ret = i915_gem_object_pin(obj, 4096); 4607 - if (ret != 0) { 4608 - drm_gem_object_unreference(obj); 4609 - goto err_unref; 4610 - } 4611 - 4612 - dev_priv->status_gfx_addr = obj_priv->gtt_offset; 4613 - 4614 - dev_priv->hw_status_page = kmap(obj_priv->pages[0]); 4615 - if (dev_priv->hw_status_page == NULL) { 4616 - DRM_ERROR("Failed to map status page.\n"); 4617 - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4618 - ret = -EINVAL; 4619 - goto err_unpin; 4620 - } 4621 - 4622 - if (HAS_PIPE_CONTROL(dev)) { 4623 - ret = i915_gem_init_pipe_control(dev); 4624 - if (ret) 4625 - goto err_unpin; 4626 - } 4627 - 4628 - dev_priv->hws_obj = obj; 4629 - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4630 - if (IS_GEN6(dev)) { 4631 - I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); 4632 - I915_READ(HWS_PGA_GEN6); /* posting read */ 4633 - } else { 4634 - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4635 - I915_READ(HWS_PGA); /* posting read */ 4636 - } 4637 - DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4638 - 4639 - return 0; 4640 - 4641 - err_unpin: 4642 - i915_gem_object_unpin(obj); 4643 - err_unref: 4644 - drm_gem_object_unreference(obj); 4645 - err: 4646 - return 0; 4647 - } 4648 4657 4649 4658 static void 4650 4659 i915_gem_cleanup_pipe_control(struct drm_device *dev) ··· 4598 4737 dev_priv->seqno_page = NULL; 4599 4738 } 4600 4739 4601 - static void 4602 - i915_gem_cleanup_hws(struct drm_device *dev) 4603 - { 4604 - drm_i915_private_t *dev_priv = dev->dev_private; 4605 - struct drm_gem_object *obj; 4606 - struct drm_i915_gem_object *obj_priv; 4607 - 4608 - if (dev_priv->hws_obj == NULL) 4609 - return; 4610 - 4611 - obj = dev_priv->hws_obj; 4612 - obj_priv = to_intel_bo(obj); 4613 - 4614 - kunmap(obj_priv->pages[0]); 4615 - i915_gem_object_unpin(obj); 4616 - drm_gem_object_unreference(obj); 4617 - dev_priv->hws_obj = NULL; 4618 - 4619 - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4620 - dev_priv->hw_status_page = NULL; 4621 - 4622 - if (HAS_PIPE_CONTROL(dev)) 4623 - i915_gem_cleanup_pipe_control(dev); 4624 - 4625 - /* Write high address into HWS_PGA when disabling. */ 4626 - I915_WRITE(HWS_PGA, 0x1ffff000); 4627 - } 4628 - 4629 4740 int 4630 4741 i915_gem_init_ringbuffer(struct drm_device *dev) 4631 4742 { 4632 4743 drm_i915_private_t *dev_priv = dev->dev_private; 4633 - struct drm_gem_object *obj; 4634 - struct drm_i915_gem_object *obj_priv; 4635 - drm_i915_ring_buffer_t *ring = &dev_priv->ring; 4636 4744 int ret; 4637 - u32 head; 4638 4745 4639 - ret = i915_gem_init_hws(dev); 4640 - if (ret != 0) 4641 - return ret; 4746 + dev_priv->render_ring = render_ring; 4642 4747 4643 - obj = i915_gem_alloc_object(dev, 128 * 1024); 4644 - if (obj == NULL) { 4645 - DRM_ERROR("Failed to allocate ringbuffer\n"); 4646 - i915_gem_cleanup_hws(dev); 4647 - return -ENOMEM; 4648 - } 4649 - obj_priv = to_intel_bo(obj); 4650 - 4651 - ret = i915_gem_object_pin(obj, 4096); 4652 - if (ret != 0) { 4653 - drm_gem_object_unreference(obj); 4654 - i915_gem_cleanup_hws(dev); 4655 - return ret; 4748 + if (!I915_NEED_GFX_HWS(dev)) { 4749 + dev_priv->render_ring.status_page.page_addr 4750 + = dev_priv->status_page_dmah->vaddr; 4751 + memset(dev_priv->render_ring.status_page.page_addr, 4752 + 0, PAGE_SIZE); 4656 4753 } 4657 4754 4658 - /* Set up the kernel mapping for the ring. */ 4659 - ring->Size = obj->size; 4660 - 4661 - ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 4662 - ring->map.size = obj->size; 4663 - ring->map.type = 0; 4664 - ring->map.flags = 0; 4665 - ring->map.mtrr = 0; 4666 - 4667 - drm_core_ioremap_wc(&ring->map, dev); 4668 - if (ring->map.handle == NULL) { 4669 - DRM_ERROR("Failed to map ringbuffer.\n"); 4670 - memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 4671 - i915_gem_object_unpin(obj); 4672 - drm_gem_object_unreference(obj); 4673 - i915_gem_cleanup_hws(dev); 4674 - return -EINVAL; 4675 - } 4676 - ring->ring_obj = obj; 4677 - ring->virtual_start = ring->map.handle; 4678 - 4679 - /* Stop the ring if it's running. */ 4680 - I915_WRITE(PRB0_CTL, 0); 4681 - I915_WRITE(PRB0_TAIL, 0); 4682 - I915_WRITE(PRB0_HEAD, 0); 4683 - 4684 - /* Initialize the ring. */ 4685 - I915_WRITE(PRB0_START, obj_priv->gtt_offset); 4686 - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 4687 - 4688 - /* G45 ring initialization fails to reset head to zero */ 4689 - if (head != 0) { 4690 - DRM_ERROR("Ring head not reset to zero " 4691 - "ctl %08x head %08x tail %08x start %08x\n", 4692 - I915_READ(PRB0_CTL), 4693 - I915_READ(PRB0_HEAD), 4694 - I915_READ(PRB0_TAIL), 4695 - I915_READ(PRB0_START)); 4696 - I915_WRITE(PRB0_HEAD, 0); 4697 - 4698 - DRM_ERROR("Ring head forced to zero " 4699 - "ctl %08x head %08x tail %08x start %08x\n", 4700 - I915_READ(PRB0_CTL), 4701 - I915_READ(PRB0_HEAD), 4702 - I915_READ(PRB0_TAIL), 4703 - I915_READ(PRB0_START)); 4755 + if (HAS_PIPE_CONTROL(dev)) { 4756 + ret = i915_gem_init_pipe_control(dev); 4757 + if (ret) 4758 + return ret; 4704 4759 } 4705 4760 4706 - I915_WRITE(PRB0_CTL, 4707 - ((obj->size - 4096) & RING_NR_PAGES) | 4708 - RING_NO_REPORT | 4709 - RING_VALID); 4761 + ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); 4762 + if (ret) 4763 + goto cleanup_pipe_control; 4710 4764 4711 - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 4712 - 4713 - /* If the head is still not zero, the ring is dead */ 4714 - if (head != 0) { 4715 - DRM_ERROR("Ring initialization failed " 4716 - "ctl %08x head %08x tail %08x start %08x\n", 4717 - I915_READ(PRB0_CTL), 4718 - I915_READ(PRB0_HEAD), 4719 - I915_READ(PRB0_TAIL), 4720 - I915_READ(PRB0_START)); 4721 - return -EIO; 4722 - } 4723 - 4724 - /* Update our cache of the ring state */ 4725 - if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4726 - i915_kernel_lost_context(dev); 4727 - else { 4728 - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 4729 - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 4730 - ring->space = ring->head - (ring->tail + 8); 4731 - if (ring->space < 0) 4732 - ring->space += ring->Size; 4733 - } 4734 - 4735 - if (IS_I9XX(dev) && !IS_GEN3(dev)) { 4736 - I915_WRITE(MI_MODE, 4737 - (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 4765 + if (HAS_BSD(dev)) { 4766 + dev_priv->bsd_ring = bsd_ring; 4767 + ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); 4768 + if (ret) 4769 + goto cleanup_render_ring; 4738 4770 } 4739 4771 4740 4772 return 0; 4773 + 4774 + cleanup_render_ring: 4775 + intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 4776 + cleanup_pipe_control: 4777 + if (HAS_PIPE_CONTROL(dev)) 4778 + i915_gem_cleanup_pipe_control(dev); 4779 + return ret; 4741 4780 } 4742 4781 4743 4782 void ··· 4645 4884 { 4646 4885 drm_i915_private_t *dev_priv = dev->dev_private; 4647 4886 4648 - if (dev_priv->ring.ring_obj == NULL) 4649 - return; 4650 - 4651 - drm_core_ioremapfree(&dev_priv->ring.map, dev); 4652 - 4653 - i915_gem_object_unpin(dev_priv->ring.ring_obj); 4654 - drm_gem_object_unreference(dev_priv->ring.ring_obj); 4655 - dev_priv->ring.ring_obj = NULL; 4656 - memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 4657 - 4658 - i915_gem_cleanup_hws(dev); 4887 + intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 4888 + if (HAS_BSD(dev)) 4889 + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 4890 + if (HAS_PIPE_CONTROL(dev)) 4891 + i915_gem_cleanup_pipe_control(dev); 4659 4892 } 4660 4893 4661 4894 int ··· 4677 4922 } 4678 4923 4679 4924 spin_lock(&dev_priv->mm.active_list_lock); 4680 - BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4925 + BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); 4926 + BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); 4681 4927 spin_unlock(&dev_priv->mm.active_list_lock); 4682 4928 4683 4929 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4684 4930 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4685 - BUG_ON(!list_empty(&dev_priv->mm.request_list)); 4931 + BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); 4932 + BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); 4686 4933 mutex_unlock(&dev->struct_mutex); 4687 4934 4688 4935 drm_irq_install(dev); ··· 4723 4966 drm_i915_private_t *dev_priv = dev->dev_private; 4724 4967 4725 4968 spin_lock_init(&dev_priv->mm.active_list_lock); 4726 - INIT_LIST_HEAD(&dev_priv->mm.active_list); 4727 4969 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4728 4970 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4729 4971 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4730 - INIT_LIST_HEAD(&dev_priv->mm.request_list); 4731 4972 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4973 + INIT_LIST_HEAD(&dev_priv->render_ring.active_list); 4974 + INIT_LIST_HEAD(&dev_priv->render_ring.request_list); 4975 + if (HAS_BSD(dev)) { 4976 + INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); 4977 + INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); 4978 + } 4732 4979 for (i = 0; i < 16; i++) 4733 4980 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4734 4981 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4735 4982 i915_gem_retire_work_handler); 4736 - dev_priv->mm.next_gem_seqno = 1; 4737 - 4738 4983 spin_lock(&shrink_list_lock); 4739 4984 list_add(&dev_priv->mm.shrink_list, &shrink_list); 4740 4985 spin_unlock(&shrink_list_lock); ··· 4968 5209 4969 5210 spin_lock(&dev_priv->mm.active_list_lock); 4970 5211 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4971 - list_empty(&dev_priv->mm.active_list); 5212 + list_empty(&dev_priv->render_ring.active_list); 5213 + if (HAS_BSD(dev)) 5214 + lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); 4972 5215 spin_unlock(&dev_priv->mm.active_list_lock); 4973 5216 4974 5217 return !lists_empty; ··· 5015 5254 continue; 5016 5255 5017 5256 spin_unlock(&shrink_list_lock); 5257 + i915_gem_retire_requests(dev, &dev_priv->render_ring); 5018 5258 5019 - i915_gem_retire_requests(dev); 5259 + if (HAS_BSD(dev)) 5260 + i915_gem_retire_requests(dev, &dev_priv->bsd_ring); 5020 5261 5021 5262 list_for_each_entry_safe(obj_priv, next_obj, 5022 5263 &dev_priv->mm.inactive_list,
+82 -100
drivers/gpu/drm/i915/i915_irq.c
··· 53 53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 54 54 55 55 /** Interrupts that we mask and unmask at runtime. */ 56 - #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 56 + #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) 57 57 58 58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 59 59 PIPE_VBLANK_INTERRUPT_STATUS) ··· 74 74 } 75 75 } 76 76 77 - static inline void 77 + void 78 78 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 79 79 { 80 80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { ··· 115 115 } 116 116 } 117 117 118 - static inline void 118 + void 119 119 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 120 120 { 121 121 if ((dev_priv->irq_mask_reg & mask) != mask) { ··· 278 278 { 279 279 drm_i915_private_t *dev_priv = dev->dev_private; 280 280 u32 busy_up, busy_down, max_avg, min_avg; 281 - u16 rgvswctl; 282 281 u8 new_delay = dev_priv->cur_delay; 283 282 284 - I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); 283 + I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 285 284 busy_up = I915_READ(RCPREVBSYTUPAVG); 286 285 busy_down = I915_READ(RCPREVBSYTDNAVG); 287 286 max_avg = I915_READ(RCBMAXAVG); ··· 299 300 new_delay = dev_priv->min_delay; 300 301 } 301 302 302 - DRM_DEBUG("rps change requested: %d -> %d\n", 303 - dev_priv->cur_delay, new_delay); 304 - 305 - rgvswctl = I915_READ(MEMSWCTL); 306 - if (rgvswctl & MEMCTL_CMD_STS) { 307 - DRM_ERROR("gpu busy, RCS change rejected\n"); 308 - return; /* still busy with another command */ 309 - } 310 - 311 - /* Program the new state */ 312 - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 313 - (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 314 - I915_WRITE(MEMSWCTL, rgvswctl); 315 - POSTING_READ(MEMSWCTL); 316 - 317 - rgvswctl |= MEMCTL_CMD_STS; 318 - I915_WRITE(MEMSWCTL, rgvswctl); 319 - 320 - dev_priv->cur_delay = new_delay; 321 - 322 - DRM_DEBUG("rps changed\n"); 303 + if (ironlake_set_drps(dev, new_delay)) 304 + dev_priv->cur_delay = new_delay; 323 305 324 306 return; 325 307 } ··· 311 331 int ret = IRQ_NONE; 312 332 u32 de_iir, gt_iir, de_ier, pch_iir; 313 333 struct drm_i915_master_private *master_priv; 334 + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 314 335 315 336 /* disable master interrupt before clearing iir */ 316 337 de_ier = I915_READ(DEIER); ··· 335 354 } 336 355 337 356 if (gt_iir & GT_PIPE_NOTIFY) { 338 - u32 seqno = i915_get_gem_seqno(dev); 339 - dev_priv->mm.irq_gem_seqno = seqno; 357 + u32 seqno = render_ring->get_gem_seqno(dev, render_ring); 358 + render_ring->irq_gem_seqno = seqno; 340 359 trace_i915_gem_request_complete(dev, seqno); 341 - DRM_WAKEUP(&dev_priv->irq_queue); 360 + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 342 361 dev_priv->hangcheck_count = 0; 343 362 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 344 363 } 364 + if (gt_iir & GT_BSD_USER_INTERRUPT) 365 + DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 366 + 345 367 346 368 if (de_iir & DE_GSE) 347 369 ironlake_opregion_gse_intr(dev); ··· 372 388 } 373 389 374 390 if (de_iir & DE_PCU_EVENT) { 375 - I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); 391 + I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 376 392 i915_handle_rps_change(dev); 377 393 } 378 394 ··· 520 536 */ 521 537 bbaddr = 0; 522 538 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 523 - ring = (u32 *)(dev_priv->ring.virtual_start + head); 539 + ring = (u32 *)(dev_priv->render_ring.virtual_start + head); 524 540 525 - while (--ring >= (u32 *)dev_priv->ring.virtual_start) { 541 + while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 526 542 bbaddr = i915_get_bbaddr(dev, ring); 527 543 if (bbaddr) 528 544 break; 529 545 } 530 546 531 547 if (bbaddr == 0) { 532 - ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); 533 - while (--ring >= (u32 *)dev_priv->ring.virtual_start) { 548 + ring = (u32 *)(dev_priv->render_ring.virtual_start 549 + + dev_priv->render_ring.size); 550 + while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 534 551 bbaddr = i915_get_bbaddr(dev, ring); 535 552 if (bbaddr) 536 553 break; ··· 572 587 return; 573 588 } 574 589 575 - error->seqno = i915_get_gem_seqno(dev); 590 + error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); 576 591 error->eir = I915_READ(EIR); 577 592 error->pgtbl_er = I915_READ(PGTBL_ER); 578 593 error->pipeastat = I915_READ(PIPEASTAT); ··· 600 615 batchbuffer[0] = NULL; 601 616 batchbuffer[1] = NULL; 602 617 count = 0; 603 - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 618 + list_for_each_entry(obj_priv, 619 + &dev_priv->render_ring.active_list, list) { 620 + 604 621 struct drm_gem_object *obj = &obj_priv->base; 605 622 606 623 if (batchbuffer[0] == NULL && ··· 626 639 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); 627 640 628 641 /* Record the ringbuffer */ 629 - error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); 642 + error->ringbuffer = i915_error_object_create(dev, 643 + dev_priv->render_ring.gem_object); 630 644 631 645 /* Record buffers on the active list. */ 632 646 error->active_bo = NULL; ··· 639 651 640 652 if (error->active_bo) { 641 653 int i = 0; 642 - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 654 + list_for_each_entry(obj_priv, 655 + &dev_priv->render_ring.active_list, list) { 643 656 struct drm_gem_object *obj = &obj_priv->base; 644 657 645 658 error->active_bo[i].size = obj->size; ··· 692 703 i915_error_state_free(dev, error); 693 704 } 694 705 695 - /** 696 - * i915_handle_error - handle an error interrupt 697 - * @dev: drm device 698 - * 699 - * Do some basic checking of regsiter state at error interrupt time and 700 - * dump it to the syslog. Also call i915_capture_error_state() to make 701 - * sure we get a record and make it available in debugfs. Fire a uevent 702 - * so userspace knows something bad happened (should trigger collection 703 - * of a ring dump etc.). 704 - */ 705 - static void i915_handle_error(struct drm_device *dev, bool wedged) 706 + static void i915_report_and_clear_eir(struct drm_device *dev) 706 707 { 707 708 struct drm_i915_private *dev_priv = dev->dev_private; 708 709 u32 eir = I915_READ(EIR); 709 - u32 pipea_stats = I915_READ(PIPEASTAT); 710 - u32 pipeb_stats = I915_READ(PIPEBSTAT); 711 710 712 - i915_capture_error_state(dev); 711 + if (!eir) 712 + return; 713 713 714 714 printk(KERN_ERR "render error detected, EIR: 0x%08x\n", 715 715 eir); ··· 744 766 } 745 767 746 768 if (eir & I915_ERROR_MEMORY_REFRESH) { 769 + u32 pipea_stats = I915_READ(PIPEASTAT); 770 + u32 pipeb_stats = I915_READ(PIPEBSTAT); 771 + 747 772 printk(KERN_ERR "memory refresh error\n"); 748 773 printk(KERN_ERR "PIPEASTAT: 0x%08x\n", 749 774 pipea_stats); ··· 803 822 I915_WRITE(EMR, I915_READ(EMR) | eir); 804 823 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 805 824 } 825 + } 826 + 827 + /** 828 + * i915_handle_error - handle an error interrupt 829 + * @dev: drm device 830 + * 831 + * Do some basic checking of regsiter state at error interrupt time and 832 + * dump it to the syslog. Also call i915_capture_error_state() to make 833 + * sure we get a record and make it available in debugfs. Fire a uevent 834 + * so userspace knows something bad happened (should trigger collection 835 + * of a ring dump etc.). 836 + */ 837 + static void i915_handle_error(struct drm_device *dev, bool wedged) 838 + { 839 + struct drm_i915_private *dev_priv = dev->dev_private; 840 + 841 + i915_capture_error_state(dev); 842 + i915_report_and_clear_eir(dev); 806 843 807 844 if (wedged) { 808 845 atomic_set(&dev_priv->mm.wedged, 1); ··· 828 829 /* 829 830 * Wakeup waiting processes so they don't hang 830 831 */ 831 - DRM_WAKEUP(&dev_priv->irq_queue); 832 + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 832 833 } 833 834 834 835 queue_work(dev_priv->wq, &dev_priv->error_work); ··· 847 848 unsigned long irqflags; 848 849 int irq_received; 849 850 int ret = IRQ_NONE; 851 + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 850 852 851 853 atomic_inc(&dev_priv->irq_received); 852 854 ··· 928 928 } 929 929 930 930 if (iir & I915_USER_INTERRUPT) { 931 - u32 seqno = i915_get_gem_seqno(dev); 932 - dev_priv->mm.irq_gem_seqno = seqno; 931 + u32 seqno = 932 + render_ring->get_gem_seqno(dev, render_ring); 933 + render_ring->irq_gem_seqno = seqno; 933 934 trace_i915_gem_request_complete(dev, seqno); 934 - DRM_WAKEUP(&dev_priv->irq_queue); 935 + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 935 936 dev_priv->hangcheck_count = 0; 936 937 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 937 938 } 939 + 940 + if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 941 + DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 938 942 939 943 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 940 944 intel_prepare_page_flip(dev, 0); ··· 988 984 { 989 985 drm_i915_private_t *dev_priv = dev->dev_private; 990 986 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 991 - RING_LOCALS; 992 987 993 988 i915_kernel_lost_context(dev); 994 989 ··· 1009 1006 return dev_priv->counter; 1010 1007 } 1011 1008 1012 - void i915_user_irq_get(struct drm_device *dev) 1013 - { 1014 - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1015 - unsigned long irqflags; 1016 - 1017 - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1018 - if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1019 - if (HAS_PCH_SPLIT(dev)) 1020 - ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 1021 - else 1022 - i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1023 - } 1024 - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1025 - } 1026 - 1027 - void i915_user_irq_put(struct drm_device *dev) 1028 - { 1029 - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1030 - unsigned long irqflags; 1031 - 1032 - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1033 - BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1034 - if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1035 - if (HAS_PCH_SPLIT(dev)) 1036 - ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 1037 - else 1038 - i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1039 - } 1040 - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1041 - } 1042 - 1043 1009 void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1044 1010 { 1045 1011 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1012 + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1046 1013 1047 1014 if (dev_priv->trace_irq_seqno == 0) 1048 - i915_user_irq_get(dev); 1015 + render_ring->user_irq_get(dev, render_ring); 1049 1016 1050 1017 dev_priv->trace_irq_seqno = seqno; 1051 1018 } ··· 1025 1052 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1026 1053 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1027 1054 int ret = 0; 1055 + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1028 1056 1029 1057 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1030 1058 READ_BREADCRUMB(dev_priv)); ··· 1039 1065 if (master_priv->sarea_priv) 1040 1066 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1041 1067 1042 - i915_user_irq_get(dev); 1043 - DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 1068 + render_ring->user_irq_get(dev, render_ring); 1069 + DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, 1044 1070 READ_BREADCRUMB(dev_priv) >= irq_nr); 1045 - i915_user_irq_put(dev); 1071 + render_ring->user_irq_put(dev, render_ring); 1046 1072 1047 1073 if (ret == -EBUSY) { 1048 1074 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", ··· 1061 1087 drm_i915_irq_emit_t *emit = data; 1062 1088 int result; 1063 1089 1064 - if (!dev_priv || !dev_priv->ring.virtual_start) { 1090 + if (!dev_priv || !dev_priv->render_ring.virtual_start) { 1065 1091 DRM_ERROR("called with no initialization\n"); 1066 1092 return -EINVAL; 1067 1093 } ··· 1207 1233 return -EINVAL; 1208 1234 } 1209 1235 1210 - struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { 1236 + struct drm_i915_gem_request * 1237 + i915_get_tail_request(struct drm_device *dev) 1238 + { 1211 1239 drm_i915_private_t *dev_priv = dev->dev_private; 1212 - return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); 1240 + return list_entry(dev_priv->render_ring.request_list.prev, 1241 + struct drm_i915_gem_request, list); 1213 1242 } 1214 1243 1215 1244 /** ··· 1237 1260 acthd = I915_READ(ACTHD_I965); 1238 1261 1239 1262 /* If all work is done then ACTHD clearly hasn't advanced. */ 1240 - if (list_empty(&dev_priv->mm.request_list) || 1241 - i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { 1263 + if (list_empty(&dev_priv->render_ring.request_list) || 1264 + i915_seqno_passed(i915_get_gem_seqno(dev, 1265 + &dev_priv->render_ring), 1266 + i915_get_tail_request(dev)->seqno)) { 1242 1267 dev_priv->hangcheck_count = 0; 1243 1268 return; 1244 1269 } ··· 1293 1314 /* enable kind of interrupts always enabled */ 1294 1315 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1295 1316 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1296 - u32 render_mask = GT_PIPE_NOTIFY; 1317 + u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; 1297 1318 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1298 1319 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1299 1320 ··· 1307 1328 (void) I915_READ(DEIER); 1308 1329 1309 1330 /* user interrupt should be enabled, but masked initial */ 1310 - dev_priv->gt_irq_mask_reg = 0xffffffff; 1331 + dev_priv->gt_irq_mask_reg = ~render_mask; 1311 1332 dev_priv->gt_irq_enable_reg = render_mask; 1312 1333 1313 1334 I915_WRITE(GTIIR, I915_READ(GTIIR)); ··· 1370 1391 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1371 1392 u32 error_mask; 1372 1393 1373 - DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 1394 + DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); 1395 + 1396 + if (HAS_BSD(dev)) 1397 + DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); 1374 1398 1375 1399 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1376 1400
+81 -1
drivers/gpu/drm/i915/i915_reg.h
··· 334 334 #define I915_DEBUG_INTERRUPT (1<<2) 335 335 #define I915_USER_INTERRUPT (1<<1) 336 336 #define I915_ASLE_INTERRUPT (1<<0) 337 + #define I915_BSD_USER_INTERRUPT (1<<25) 337 338 #define EIR 0x020b0 338 339 #define EMR 0x020b4 339 340 #define ESR 0x020b8 ··· 369 368 #define BB_ADDR 0x02140 /* 8 bytes */ 370 369 #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 371 370 371 + /* GEN6 interrupt control */ 372 + #define GEN6_RENDER_HWSTAM 0x2098 373 + #define GEN6_RENDER_IMR 0x20a8 374 + #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) 375 + #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) 376 + #define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) 377 + #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) 378 + #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) 379 + #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) 380 + #define GEN6_RENDER_SYNC_STATUS (1 << 2) 381 + #define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1) 382 + #define GEN6_RENDER_USER_INTERRUPT (1 << 0) 383 + 384 + #define GEN6_BLITTER_HWSTAM 0x22098 385 + #define GEN6_BLITTER_IMR 0x220a8 386 + #define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26) 387 + #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) 388 + #define GEN6_BLITTER_SYNC_STATUS (1 << 24) 389 + #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) 390 + /* 391 + * BSD (bit stream decoder instruction and interrupt control register defines 392 + * (G4X and Ironlake only) 393 + */ 394 + 395 + #define BSD_RING_TAIL 0x04030 396 + #define BSD_RING_HEAD 0x04034 397 + #define BSD_RING_START 0x04038 398 + #define BSD_RING_CTL 0x0403c 399 + #define BSD_RING_ACTHD 0x04074 400 + #define BSD_HWS_PGA 0x04080 372 401 373 402 /* 374 403 * Framebuffer compression (915+ only) ··· 836 805 #define DCC_CHANNEL_XOR_DISABLE (1 << 10) 837 806 #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 838 807 808 + /** Pineview MCH register contains DDR3 setting */ 809 + #define CSHRDDR3CTL 0x101a8 810 + #define CSHRDDR3CTL_DDR3 (1 << 2) 811 + 839 812 /** 965 MCH register controlling DRAM channel configuration */ 840 813 #define C0DRB3 0x10206 841 814 #define C1DRB3 0x10606 ··· 860 825 #define CLKCFG_MEM_667 (2 << 4) 861 826 #define CLKCFG_MEM_800 (3 << 4) 862 827 #define CLKCFG_MEM_MASK (7 << 4) 828 + 829 + #define TR1 0x11006 830 + #define TSFS 0x11020 831 + #define TSFS_SLOPE_MASK 0x0000ff00 832 + #define TSFS_SLOPE_SHIFT 8 833 + #define TSFS_INTR_MASK 0x000000ff 863 834 864 835 #define CRSTANDVID 0x11100 865 836 #define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ ··· 1005 964 #define MEMSTAT_SRC_CTL_STDBY 3 1006 965 #define RCPREVBSYTUPAVG 0x113b8 1007 966 #define RCPREVBSYTDNAVG 0x113bc 967 + #define SDEW 0x1124c 968 + #define CSIEW0 0x11250 969 + #define CSIEW1 0x11254 970 + #define CSIEW2 0x11258 971 + #define PEW 0x1125c 972 + #define DEW 0x11270 973 + #define MCHAFE 0x112c0 974 + #define CSIEC 0x112e0 975 + #define DMIEC 0x112e4 976 + #define DDREC 0x112e8 977 + #define PEG0EC 0x112ec 978 + #define PEG1EC 0x112f0 979 + #define GFXEC 0x112f4 980 + #define RPPREVBSYTUPAVG 0x113b8 981 + #define RPPREVBSYTDNAVG 0x113bc 982 + #define ECR 0x11600 983 + #define ECR_GPFE (1<<31) 984 + #define ECR_IMONE (1<<30) 985 + #define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ 986 + #define OGW0 0x11608 987 + #define OGW1 0x1160c 988 + #define EG0 0x11610 989 + #define EG1 0x11614 990 + #define EG2 0x11618 991 + #define EG3 0x1161c 992 + #define EG4 0x11620 993 + #define EG5 0x11624 994 + #define EG6 0x11628 995 + #define EG7 0x1162c 996 + #define PXW 0x11664 997 + #define PXWL 0x11680 998 + #define LCFUSE02 0x116c0 999 + #define LCFUSE_HIV_MASK 0x000000ff 1000 + #define CSIPLL0 0x12c10 1001 + #define DDRMPLL1 0X12c20 1008 1002 #define PEG_BAND_GAP_DATA 0x14d68 1009 1003 1010 1004 /* ··· 1131 1055 #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 1132 1056 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1133 1057 #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 1134 - #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f 1135 1058 1136 1059 #define PORT_HOTPLUG_STAT 0x61114 1137 1060 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) ··· 2430 2355 #define GT_PIPE_NOTIFY (1 << 4) 2431 2356 #define GT_SYNC_STATUS (1 << 2) 2432 2357 #define GT_USER_INTERRUPT (1 << 0) 2358 + #define GT_BSD_USER_INTERRUPT (1 << 5) 2359 + 2433 2360 2434 2361 #define GTISR 0x44010 2435 2362 #define GTIMR 0x44014 ··· 2767 2690 #define SDVO_ENCODING (0) 2768 2691 #define TMDS_ENCODING (2 << 10) 2769 2692 #define NULL_PACKET_VSYNC_ENABLE (1 << 9) 2693 + /* CPT */ 2694 + #define HDMI_MODE_SELECT (1 << 9) 2695 + #define DVI_MODE_SELECT (0) 2770 2696 #define SDVOB_BORDER_ENABLE (1 << 7) 2771 2697 #define AUDIO_ENABLE (1 << 6) 2772 2698 #define VSYNC_ACTIVE_HIGH (1 << 4)
+7 -17
drivers/gpu/drm/i915/i915_trace.h
··· 53 53 __entry->obj, __entry->gtt_offset) 54 54 ); 55 55 56 - TRACE_EVENT(i915_gem_object_clflush, 57 - 58 - TP_PROTO(struct drm_gem_object *obj), 59 - 60 - TP_ARGS(obj), 61 - 62 - TP_STRUCT__entry( 63 - __field(struct drm_gem_object *, obj) 64 - ), 65 - 66 - TP_fast_assign( 67 - __entry->obj = obj; 68 - ), 69 - 70 - TP_printk("obj=%p", __entry->obj) 71 - ); 72 - 73 56 TRACE_EVENT(i915_gem_object_change_domain, 74 57 75 58 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), ··· 113 130 ), 114 131 115 132 TP_printk("obj=%p", __entry->obj) 133 + ); 134 + 135 + DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 136 + 137 + TP_PROTO(struct drm_gem_object *obj), 138 + 139 + TP_ARGS(obj) 116 140 ); 117 141 118 142 DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
+10
drivers/gpu/drm/i915/intel_bios.c
··· 95 95 panel_fixed_mode->clock = dvo_timing->clock * 10; 96 96 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; 97 97 98 + if (dvo_timing->hsync_positive) 99 + panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; 100 + else 101 + panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; 102 + 103 + if (dvo_timing->vsync_positive) 104 + panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; 105 + else 106 + panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; 107 + 98 108 /* Some VBTs have bogus h/vtotal values */ 99 109 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 100 110 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+15 -8
drivers/gpu/drm/i915/intel_crt.c
··· 217 217 { 218 218 struct drm_device *dev = connector->dev; 219 219 struct drm_i915_private *dev_priv = dev->dev_private; 220 - u32 hotplug_en; 220 + u32 hotplug_en, orig, stat; 221 + bool ret = false; 221 222 int i, tries = 0; 222 223 223 224 if (HAS_PCH_SPLIT(dev)) ··· 233 232 tries = 2; 234 233 else 235 234 tries = 1; 236 - hotplug_en = I915_READ(PORT_HOTPLUG_EN); 237 - hotplug_en &= CRT_FORCE_HOTPLUG_MASK; 235 + hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN); 236 + hotplug_en &= CRT_HOTPLUG_MASK; 238 237 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; 239 238 240 239 if (IS_G4X(dev)) ··· 256 255 } while (time_after(timeout, jiffies)); 257 256 } 258 257 259 - if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) != 260 - CRT_HOTPLUG_MONITOR_NONE) 261 - return true; 258 + stat = I915_READ(PORT_HOTPLUG_STAT); 259 + if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) 260 + ret = true; 262 261 263 - return false; 262 + /* clear the interrupt we just generated, if any */ 263 + I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); 264 + 265 + /* and put the bits back */ 266 + I915_WRITE(PORT_HOTPLUG_EN, orig); 267 + 268 + return ret; 264 269 } 265 270 266 271 static bool intel_crt_detect_ddc(struct drm_encoder *encoder) ··· 576 569 (1 << INTEL_ANALOG_CLONE_BIT) | 577 570 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 578 571 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 579 - connector->interlace_allowed = 0; 572 + connector->interlace_allowed = 1; 580 573 connector->doublescan_allowed = 0; 581 574 582 575 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
+212 -59
drivers/gpu/drm/i915/intel_display.c
··· 1029 1029 void i8xx_disable_fbc(struct drm_device *dev) 1030 1030 { 1031 1031 struct drm_i915_private *dev_priv = dev->dev_private; 1032 + unsigned long timeout = jiffies + msecs_to_jiffies(1); 1032 1033 u32 fbc_ctl; 1033 1034 1034 1035 if (!I915_HAS_FBC(dev)) 1035 1036 return; 1037 + 1038 + if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) 1039 + return; /* Already off, just return */ 1036 1040 1037 1041 /* Disable compression */ 1038 1042 fbc_ctl = I915_READ(FBC_CONTROL); ··· 1044 1040 I915_WRITE(FBC_CONTROL, fbc_ctl); 1045 1041 1046 1042 /* Wait for compressing bit to clear */ 1047 - while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) 1048 - ; /* nothing */ 1043 + while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) { 1044 + if (time_after(jiffies, timeout)) { 1045 + DRM_DEBUG_DRIVER("FBC idle timed out\n"); 1046 + break; 1047 + } 1048 + ; /* do nothing */ 1049 + } 1049 1050 1050 1051 intel_wait_for_vblank(dev); 1051 1052 ··· 1248 1239 return; 1249 1240 1250 1241 out_disable: 1251 - DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 1252 1242 /* Multiple disables should be harmless */ 1253 - if (intel_fbc_enabled(dev)) 1243 + if (intel_fbc_enabled(dev)) { 1244 + DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 1254 1245 intel_disable_fbc(dev); 1246 + } 1255 1247 } 1256 1248 1257 1249 static int ··· 1396 1386 Start = obj_priv->gtt_offset; 1397 1387 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 1398 1388 1399 - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1389 + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1390 + Start, Offset, x, y, crtc->fb->pitch); 1400 1391 I915_WRITE(dspstride, crtc->fb->pitch); 1401 1392 if (IS_I965G(dev)) { 1402 1393 I915_WRITE(dspbase, Offset); ··· 2356 2345 if (mode->clock * 3 > 27000 * 4) 2357 2346 return MODE_CLOCK_HIGH; 2358 2347 } 2348 + 2349 + drm_mode_set_crtcinfo(adjusted_mode, 0); 2359 2350 return true; 2360 2351 } 2361 2352 ··· 2642 2629 2643 2630 struct cxsr_latency { 2644 2631 int is_desktop; 2632 + int is_ddr3; 2645 2633 unsigned long fsb_freq; 2646 2634 unsigned long mem_freq; 2647 2635 unsigned long display_sr; ··· 2652 2638 }; 2653 2639 2654 2640 static struct cxsr_latency cxsr_latency_table[] = { 2655 - {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 2656 - {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 2657 - {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 2641 + {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 2642 + {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 2643 + {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 2644 + {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 2645 + {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 2658 2646 2659 - {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 2660 - {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 2661 - {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 2647 + {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 2648 + {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 2649 + {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 2650 + {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 2651 + {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 2662 2652 2663 - {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 2664 - {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 2665 - {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 2653 + {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 2654 + {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 2655 + {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 2656 + {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 2657 + {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 2666 2658 2667 - {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 2668 - {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 2669 - {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 2659 + {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 2660 + {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 2661 + {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 2662 + {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 2663 + {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 2670 2664 2671 - {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 2672 - {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 2673 - {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 2665 + {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 2666 + {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 2667 + {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 2668 + {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 2669 + {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 2674 2670 2675 - {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 2676 - {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 2677 - {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 2671 + {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 2672 + {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 2673 + {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 2674 + {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 2675 + {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 2678 2676 }; 2679 2677 2680 - static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, 2681 - int mem) 2678 + static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, 2679 + int fsb, int mem) 2682 2680 { 2683 2681 int i; 2684 2682 struct cxsr_latency *latency; ··· 2701 2675 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 2702 2676 latency = &cxsr_latency_table[i]; 2703 2677 if (is_desktop == latency->is_desktop && 2678 + is_ddr3 == latency->is_ddr3 && 2704 2679 fsb == latency->fsb_freq && mem == latency->mem_freq) 2705 2680 return latency; 2706 2681 } ··· 2816 2789 struct cxsr_latency *latency; 2817 2790 int sr_clock; 2818 2791 2819 - latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, 2820 - dev_priv->mem_freq); 2792 + latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 2793 + dev_priv->fsb_freq, dev_priv->mem_freq); 2821 2794 if (!latency) { 2822 2795 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 2823 2796 pineview_disable_cxsr(dev); ··· 3799 3772 } 3800 3773 } 3801 3774 3775 + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 3776 + pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 3777 + /* the chip adds 2 halflines automatically */ 3778 + adjusted_mode->crtc_vdisplay -= 1; 3779 + adjusted_mode->crtc_vtotal -= 1; 3780 + adjusted_mode->crtc_vblank_start -= 1; 3781 + adjusted_mode->crtc_vblank_end -= 1; 3782 + adjusted_mode->crtc_vsync_end -= 1; 3783 + adjusted_mode->crtc_vsync_start -= 1; 3784 + } else 3785 + pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ 3786 + 3802 3787 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 3803 3788 ((adjusted_mode->crtc_htotal - 1) << 16)); 3804 3789 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ··· 4475 4436 4476 4437 mutex_lock(&dev->struct_mutex); 4477 4438 4439 + i915_update_gfx_val(dev_priv); 4440 + 4478 4441 if (IS_I945G(dev) || IS_I945GM(dev)) { 4479 4442 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); 4480 4443 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); ··· 4605 4564 spin_lock_irqsave(&dev->event_lock, flags); 4606 4565 work = intel_crtc->unpin_work; 4607 4566 if (work == NULL || !work->pending) { 4608 - if (work && !work->pending) { 4609 - obj_priv = to_intel_bo(work->pending_flip_obj); 4610 - DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 4611 - obj_priv, 4612 - atomic_read(&obj_priv->pending_flip)); 4613 - } 4614 4567 spin_unlock_irqrestore(&dev->event_lock, flags); 4615 4568 return; 4616 4569 } ··· 4664 4629 unsigned long flags; 4665 4630 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4666 4631 int ret, pipesrc; 4667 - RING_LOCALS; 4668 4632 4669 4633 work = kzalloc(sizeof *work, GFP_KERNEL); 4670 4634 if (work == NULL) 4671 4635 return -ENOMEM; 4672 - 4673 - mutex_lock(&dev->struct_mutex); 4674 4636 4675 4637 work->event = event; 4676 4638 work->dev = crtc->dev; ··· 4678 4646 /* We borrow the event spin lock for protecting unpin_work */ 4679 4647 spin_lock_irqsave(&dev->event_lock, flags); 4680 4648 if (intel_crtc->unpin_work) { 4681 - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 4682 4649 spin_unlock_irqrestore(&dev->event_lock, flags); 4683 4650 kfree(work); 4684 - mutex_unlock(&dev->struct_mutex); 4651 + 4652 + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 4685 4653 return -EBUSY; 4686 4654 } 4687 4655 intel_crtc->unpin_work = work; ··· 4690 4658 intel_fb = to_intel_framebuffer(fb); 4691 4659 obj = intel_fb->obj; 4692 4660 4661 + mutex_lock(&dev->struct_mutex); 4693 4662 ret = intel_pin_and_fence_fb_obj(dev, obj); 4694 4663 if (ret != 0) { 4695 - DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", 4696 - to_intel_bo(obj)); 4697 - kfree(work); 4698 - intel_crtc->unpin_work = NULL; 4699 4664 mutex_unlock(&dev->struct_mutex); 4665 + 4666 + spin_lock_irqsave(&dev->event_lock, flags); 4667 + intel_crtc->unpin_work = NULL; 4668 + spin_unlock_irqrestore(&dev->event_lock, flags); 4669 + 4670 + kfree(work); 4671 + 4672 + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", 4673 + to_intel_bo(obj)); 4700 4674 return ret; 4701 4675 } 4702 4676 ··· 5061 5023 return NULL; 5062 5024 } 5063 5025 5026 + bool ironlake_set_drps(struct drm_device *dev, u8 val) 5027 + { 5028 + struct drm_i915_private *dev_priv = dev->dev_private; 5029 + u16 rgvswctl; 5030 + 5031 + rgvswctl = I915_READ16(MEMSWCTL); 5032 + if (rgvswctl & MEMCTL_CMD_STS) { 5033 + DRM_DEBUG("gpu busy, RCS change rejected\n"); 5034 + return false; /* still busy with another command */ 5035 + } 5036 + 5037 + rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 5038 + (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 5039 + I915_WRITE16(MEMSWCTL, rgvswctl); 5040 + POSTING_READ16(MEMSWCTL); 5041 + 5042 + rgvswctl |= MEMCTL_CMD_STS; 5043 + I915_WRITE16(MEMSWCTL, rgvswctl); 5044 + 5045 + return true; 5046 + } 5047 + 5064 5048 void ironlake_enable_drps(struct drm_device *dev) 5065 5049 { 5066 5050 struct drm_i915_private *dev_priv = dev->dev_private; 5067 - u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; 5051 + u32 rgvmodectl = I915_READ(MEMMODECTL); 5068 5052 u8 fmax, fmin, fstart, vstart; 5069 5053 int i = 0; 5070 5054 ··· 5105 5045 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5106 5046 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5107 5047 MEMMODE_FSTART_SHIFT; 5048 + fstart = fmax; 5049 + 5108 5050 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5109 5051 PXVFREQ_PX_SHIFT; 5110 5052 5111 - dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ 5053 + dev_priv->fmax = fstart; /* IPS callback will increase this */ 5054 + dev_priv->fstart = fstart; 5055 + 5056 + dev_priv->max_delay = fmax; 5112 5057 dev_priv->min_delay = fmin; 5113 5058 dev_priv->cur_delay = fstart; 5059 + 5060 + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, 5061 + fstart); 5114 5062 5115 5063 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5116 5064 ··· 5141 5073 } 5142 5074 msleep(1); 5143 5075 5144 - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 5145 - (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 5146 - I915_WRITE(MEMSWCTL, rgvswctl); 5147 - POSTING_READ(MEMSWCTL); 5076 + ironlake_set_drps(dev, fstart); 5148 5077 5149 - rgvswctl |= MEMCTL_CMD_STS; 5150 - I915_WRITE(MEMSWCTL, rgvswctl); 5078 + dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 5079 + I915_READ(0x112e0); 5080 + dev_priv->last_time1 = jiffies_to_msecs(jiffies); 5081 + dev_priv->last_count2 = I915_READ(0x112f4); 5082 + getrawmonotonic(&dev_priv->last_time2); 5151 5083 } 5152 5084 5153 5085 void ironlake_disable_drps(struct drm_device *dev) 5154 5086 { 5155 5087 struct drm_i915_private *dev_priv = dev->dev_private; 5156 - u32 rgvswctl; 5157 - u8 fstart; 5088 + u16 rgvswctl = I915_READ16(MEMSWCTL); 5158 5089 5159 5090 /* Ack interrupts, disable EFC interrupt */ 5160 5091 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); ··· 5163 5096 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 5164 5097 5165 5098 /* Go back to the starting frequency */ 5166 - fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> 5167 - MEMMODE_FSTART_SHIFT; 5168 - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 5169 - (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 5170 - I915_WRITE(MEMSWCTL, rgvswctl); 5099 + ironlake_set_drps(dev, dev_priv->fstart); 5171 5100 msleep(1); 5172 5101 rgvswctl |= MEMCTL_CMD_STS; 5173 5102 I915_WRITE(MEMSWCTL, rgvswctl); 5174 5103 msleep(1); 5175 5104 5105 + } 5106 + 5107 + static unsigned long intel_pxfreq(u32 vidfreq) 5108 + { 5109 + unsigned long freq; 5110 + int div = (vidfreq & 0x3f0000) >> 16; 5111 + int post = (vidfreq & 0x3000) >> 12; 5112 + int pre = (vidfreq & 0x7); 5113 + 5114 + if (!pre) 5115 + return 0; 5116 + 5117 + freq = ((div * 133333) / ((1<<post) * pre)); 5118 + 5119 + return freq; 5120 + } 5121 + 5122 + void intel_init_emon(struct drm_device *dev) 5123 + { 5124 + struct drm_i915_private *dev_priv = dev->dev_private; 5125 + u32 lcfuse; 5126 + u8 pxw[16]; 5127 + int i; 5128 + 5129 + /* Disable to program */ 5130 + I915_WRITE(ECR, 0); 5131 + POSTING_READ(ECR); 5132 + 5133 + /* Program energy weights for various events */ 5134 + I915_WRITE(SDEW, 0x15040d00); 5135 + I915_WRITE(CSIEW0, 0x007f0000); 5136 + I915_WRITE(CSIEW1, 0x1e220004); 5137 + I915_WRITE(CSIEW2, 0x04000004); 5138 + 5139 + for (i = 0; i < 5; i++) 5140 + I915_WRITE(PEW + (i * 4), 0); 5141 + for (i = 0; i < 3; i++) 5142 + I915_WRITE(DEW + (i * 4), 0); 5143 + 5144 + /* Program P-state weights to account for frequency power adjustment */ 5145 + for (i = 0; i < 16; i++) { 5146 + u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 5147 + unsigned long freq = intel_pxfreq(pxvidfreq); 5148 + unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 5149 + PXVFREQ_PX_SHIFT; 5150 + unsigned long val; 5151 + 5152 + val = vid * vid; 5153 + val *= (freq / 1000); 5154 + val *= 255; 5155 + val /= (127*127*900); 5156 + if (val > 0xff) 5157 + DRM_ERROR("bad pxval: %ld\n", val); 5158 + pxw[i] = val; 5159 + } 5160 + /* Render standby states get 0 weight */ 5161 + pxw[14] = 0; 5162 + pxw[15] = 0; 5163 + 5164 + for (i = 0; i < 4; i++) { 5165 + u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 5166 + (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 5167 + I915_WRITE(PXW + (i * 4), val); 5168 + } 5169 + 5170 + /* Adjust magic regs to magic values (more experimental results) */ 5171 + I915_WRITE(OGW0, 0); 5172 + I915_WRITE(OGW1, 0); 5173 + I915_WRITE(EG0, 0x00007f00); 5174 + I915_WRITE(EG1, 0x0000000e); 5175 + I915_WRITE(EG2, 0x000e0000); 5176 + I915_WRITE(EG3, 0x68000300); 5177 + I915_WRITE(EG4, 0x42000000); 5178 + I915_WRITE(EG5, 0x00140031); 5179 + I915_WRITE(EG6, 0); 5180 + I915_WRITE(EG7, 0); 5181 + 5182 + for (i = 0; i < 8; i++) 5183 + I915_WRITE(PXWL + (i * 4), 0); 5184 + 5185 + /* Enable PMON + select events */ 5186 + I915_WRITE(ECR, 0x80000019); 5187 + 5188 + lcfuse = I915_READ(LCFUSE02); 5189 + 5190 + dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 5176 5191 } 5177 5192 5178 5193 void intel_init_clock_gating(struct drm_device *dev) ··· 5426 5277 dev_priv->display.update_wm = NULL; 5427 5278 } else if (IS_PINEVIEW(dev)) { 5428 5279 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 5280 + dev_priv->is_ddr3, 5429 5281 dev_priv->fsb_freq, 5430 5282 dev_priv->mem_freq)) { 5431 5283 DRM_INFO("failed to find known CxSR latency " 5432 - "(found fsb freq %d, mem freq %d), " 5284 + "(found ddr%s fsb freq %d, mem freq %d), " 5433 5285 "disabling CxSR\n", 5286 + (dev_priv->is_ddr3 == 1) ? "3": "2", 5434 5287 dev_priv->fsb_freq, dev_priv->mem_freq); 5435 5288 /* Disable CxSR and never update its watermark again */ 5436 5289 pineview_disable_cxsr(dev); ··· 5505 5354 5506 5355 intel_init_clock_gating(dev); 5507 5356 5508 - if (IS_IRONLAKE_M(dev)) 5357 + if (IS_IRONLAKE_M(dev)) { 5509 5358 ironlake_enable_drps(dev); 5359 + intel_init_emon(dev); 5360 + } 5510 5361 5511 5362 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 5512 5363 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
+6 -5
drivers/gpu/drm/i915/intel_dp.c
··· 576 576 struct intel_encoder *intel_encoder; 577 577 struct intel_dp_priv *dp_priv; 578 578 579 - if (!encoder || encoder->crtc != crtc) 579 + if (encoder->crtc != crtc) 580 580 continue; 581 581 582 582 intel_encoder = enc_to_intel_encoder(encoder); ··· 675 675 dp_priv->link_configuration[1] = dp_priv->lane_count; 676 676 677 677 /* 678 - * Check for DPCD version > 1.1, 679 - * enable enahanced frame stuff in that case 678 + * Check for DPCD version > 1.1 and enhanced framing support 680 679 */ 681 - if (dp_priv->dpcd[0] >= 0x11) { 680 + if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { 682 681 dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 683 682 dp_priv->DP |= DP_ENHANCED_FRAMING; 684 683 } ··· 1207 1208 if (dp_priv->dpcd[0] != 0) 1208 1209 status = connector_status_connected; 1209 1210 } 1211 + DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0], 1212 + dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]); 1210 1213 return status; 1211 1214 } 1212 1215 ··· 1353 1352 struct intel_encoder *intel_encoder = NULL; 1354 1353 1355 1354 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 1356 - if (!encoder || encoder->crtc != crtc) 1355 + if (encoder->crtc != crtc) 1357 1356 continue; 1358 1357 1359 1358 intel_encoder = enc_to_intel_encoder(encoder);
+5 -1
drivers/gpu/drm/i915/intel_fb.c
··· 105 105 } 106 106 107 107 /* Flush everything out, we'll be doing GTT only from now on */ 108 - i915_gem_object_set_to_gtt_domain(fbo, 1); 108 + ret = i915_gem_object_set_to_gtt_domain(fbo, 1); 109 + if (ret) { 110 + DRM_ERROR("failed to bind fb: %d.\n", ret); 111 + goto out_unpin; 112 + } 109 113 110 114 info = framebuffer_alloc(0, device); 111 115 if (!info) {
+4 -1
drivers/gpu/drm/i915/intel_hdmi.c
··· 59 59 SDVO_VSYNC_ACTIVE_HIGH | 60 60 SDVO_HSYNC_ACTIVE_HIGH; 61 61 62 - if (hdmi_priv->has_hdmi_sink) 62 + if (hdmi_priv->has_hdmi_sink) { 63 63 sdvox |= SDVO_AUDIO_ENABLE; 64 + if (HAS_PCH_CPT(dev)) 65 + sdvox |= HDMI_MODE_SELECT; 66 + } 64 67 65 68 if (intel_crtc->pipe == 1) { 66 69 if (HAS_PCH_CPT(dev))
+30 -22
drivers/gpu/drm/i915/intel_overlay.c
··· 211 211 static int intel_overlay_on(struct intel_overlay *overlay) 212 212 { 213 213 struct drm_device *dev = overlay->dev; 214 - drm_i915_private_t *dev_priv = dev->dev_private; 215 214 int ret; 216 - RING_LOCALS; 215 + drm_i915_private_t *dev_priv = dev->dev_private; 217 216 218 217 BUG_ON(overlay->active); 219 218 ··· 226 227 OUT_RING(MI_NOOP); 227 228 ADVANCE_LP_RING(); 228 229 229 - overlay->last_flip_req = i915_add_request(dev, NULL, 0); 230 + overlay->last_flip_req = 231 + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 230 232 if (overlay->last_flip_req == 0) 231 233 return -ENOMEM; 232 234 233 - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 235 + ret = i915_do_wait_request(dev, 236 + overlay->last_flip_req, 1, &dev_priv->render_ring); 234 237 if (ret != 0) 235 238 return ret; 236 239 ··· 249 248 drm_i915_private_t *dev_priv = dev->dev_private; 250 249 u32 flip_addr = overlay->flip_addr; 251 250 u32 tmp; 252 - RING_LOCALS; 253 251 254 252 BUG_ON(!overlay->active); 255 253 ··· 265 265 OUT_RING(flip_addr); 266 266 ADVANCE_LP_RING(); 267 267 268 - overlay->last_flip_req = i915_add_request(dev, NULL, 0); 268 + overlay->last_flip_req = 269 + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 269 270 } 270 271 271 272 static int intel_overlay_wait_flip(struct intel_overlay *overlay) ··· 275 274 drm_i915_private_t *dev_priv = dev->dev_private; 276 275 int ret; 277 276 u32 tmp; 278 - RING_LOCALS; 279 277 280 278 if (overlay->last_flip_req != 0) { 281 - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 279 + ret = i915_do_wait_request(dev, overlay->last_flip_req, 280 + 1, &dev_priv->render_ring); 282 281 if (ret == 0) { 283 282 overlay->last_flip_req = 0; 284 283 ··· 297 296 OUT_RING(MI_NOOP); 298 297 ADVANCE_LP_RING(); 299 298 300 - overlay->last_flip_req = i915_add_request(dev, NULL, 0); 299 + overlay->last_flip_req = 300 + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 301 301 if (overlay->last_flip_req == 0) 302 302 return -ENOMEM; 303 303 304 - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 304 + ret = i915_do_wait_request(dev, overlay->last_flip_req, 305 + 1, &dev_priv->render_ring); 305 306 if (ret != 0) 306 307 return ret; 307 308 ··· 317 314 { 318 315 u32 flip_addr = overlay->flip_addr; 319 316 struct drm_device *dev = overlay->dev; 320 - drm_i915_private_t *dev_priv = dev->dev_private; 317 + drm_i915_private_t *dev_priv = dev->dev_private; 321 318 int ret; 322 - RING_LOCALS; 323 319 324 320 BUG_ON(!overlay->active); 325 321 ··· 338 336 OUT_RING(MI_NOOP); 339 337 ADVANCE_LP_RING(); 340 338 341 - overlay->last_flip_req = i915_add_request(dev, NULL, 0); 339 + overlay->last_flip_req = 340 + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 342 341 if (overlay->last_flip_req == 0) 343 342 return -ENOMEM; 344 343 345 - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 344 + ret = i915_do_wait_request(dev, overlay->last_flip_req, 345 + 1, &dev_priv->render_ring); 346 346 if (ret != 0) 347 347 return ret; 348 348 ··· 358 354 OUT_RING(MI_NOOP); 359 355 ADVANCE_LP_RING(); 360 356 361 - overlay->last_flip_req = i915_add_request(dev, NULL, 0); 357 + overlay->last_flip_req = 358 + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 362 359 if (overlay->last_flip_req == 0) 363 360 return -ENOMEM; 364 361 365 - ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 362 + ret = i915_do_wait_request(dev, overlay->last_flip_req, 363 + 1, &dev_priv->render_ring); 366 364 if (ret != 0) 367 365 return ret; 368 366 ··· 396 390 int interruptible) 397 391 { 398 392 struct drm_device *dev = overlay->dev; 399 - drm_i915_private_t *dev_priv = dev->dev_private; 400 393 struct drm_gem_object *obj; 394 + drm_i915_private_t *dev_priv = dev->dev_private; 401 395 u32 flip_addr; 402 396 int ret; 403 - RING_LOCALS; 404 397 405 398 if (overlay->hw_wedged == HW_WEDGED) 406 399 return -EIO; 407 400 408 401 if (overlay->last_flip_req == 0) { 409 - overlay->last_flip_req = i915_add_request(dev, NULL, 0); 402 + overlay->last_flip_req = 403 + i915_add_request(dev, NULL, 0, &dev_priv->render_ring); 410 404 if (overlay->last_flip_req == 0) 411 405 return -ENOMEM; 412 406 } 413 407 414 - ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); 408 + ret = i915_do_wait_request(dev, overlay->last_flip_req, 409 + interruptible, &dev_priv->render_ring); 415 410 if (ret != 0) 416 411 return ret; 417 412 ··· 436 429 OUT_RING(MI_NOOP); 437 430 ADVANCE_LP_RING(); 438 431 439 - overlay->last_flip_req = i915_add_request(dev, NULL, 0); 432 + overlay->last_flip_req = i915_add_request(dev, NULL, 433 + 0, &dev_priv->render_ring); 440 434 if (overlay->last_flip_req == 0) 441 435 return -ENOMEM; 442 436 443 437 ret = i915_do_wait_request(dev, overlay->last_flip_req, 444 - interruptible); 438 + interruptible, &dev_priv->render_ring); 445 439 if (ret != 0) 446 440 return ret; 447 441
+849
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 1 + /* 2 + * Copyright © 2008-2010 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * Zou Nan hai <nanhai.zou@intel.com> 26 + * Xiang Hai hao<haihao.xiang@intel.com> 27 + * 28 + */ 29 + 30 + #include "drmP.h" 31 + #include "drm.h" 32 + #include "i915_drv.h" 33 + #include "i915_drm.h" 34 + #include "i915_trace.h" 35 + 36 + static void 37 + render_ring_flush(struct drm_device *dev, 38 + struct intel_ring_buffer *ring, 39 + u32 invalidate_domains, 40 + u32 flush_domains) 41 + { 42 + #if WATCH_EXEC 43 + DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 44 + invalidate_domains, flush_domains); 45 + #endif 46 + u32 cmd; 47 + trace_i915_gem_request_flush(dev, ring->next_seqno, 48 + invalidate_domains, flush_domains); 49 + 50 + if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 51 + /* 52 + * read/write caches: 53 + * 54 + * I915_GEM_DOMAIN_RENDER is always invalidated, but is 55 + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 56 + * also flushed at 2d versus 3d pipeline switches. 57 + * 58 + * read-only caches: 59 + * 60 + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 61 + * MI_READ_FLUSH is set, and is always flushed on 965. 62 + * 63 + * I915_GEM_DOMAIN_COMMAND may not exist? 64 + * 65 + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 66 + * invalidated when MI_EXE_FLUSH is set. 67 + * 68 + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 69 + * invalidated with every MI_FLUSH. 70 + * 71 + * TLBs: 72 + * 73 + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 74 + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 75 + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 76 + * are flushed at any MI_FLUSH. 77 + */ 78 + 79 + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 80 + if ((invalidate_domains|flush_domains) & 81 + I915_GEM_DOMAIN_RENDER) 82 + cmd &= ~MI_NO_WRITE_FLUSH; 83 + if (!IS_I965G(dev)) { 84 + /* 85 + * On the 965, the sampler cache always gets flushed 86 + * and this bit is reserved. 87 + */ 88 + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 89 + cmd |= MI_READ_FLUSH; 90 + } 91 + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 92 + cmd |= MI_EXE_FLUSH; 93 + 94 + #if WATCH_EXEC 95 + DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 96 + #endif 97 + intel_ring_begin(dev, ring, 8); 98 + intel_ring_emit(dev, ring, cmd); 99 + intel_ring_emit(dev, ring, MI_NOOP); 100 + intel_ring_advance(dev, ring); 101 + } 102 + } 103 + 104 + static unsigned int render_ring_get_head(struct drm_device *dev, 105 + struct intel_ring_buffer *ring) 106 + { 107 + drm_i915_private_t *dev_priv = dev->dev_private; 108 + return I915_READ(PRB0_HEAD) & HEAD_ADDR; 109 + } 110 + 111 + static unsigned int render_ring_get_tail(struct drm_device *dev, 112 + struct intel_ring_buffer *ring) 113 + { 114 + drm_i915_private_t *dev_priv = dev->dev_private; 115 + return I915_READ(PRB0_TAIL) & TAIL_ADDR; 116 + } 117 + 118 + static unsigned int render_ring_get_active_head(struct drm_device *dev, 119 + struct intel_ring_buffer *ring) 120 + { 121 + drm_i915_private_t *dev_priv = dev->dev_private; 122 + u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 123 + 124 + return I915_READ(acthd_reg); 125 + } 126 + 127 + static void render_ring_advance_ring(struct drm_device *dev, 128 + struct intel_ring_buffer *ring) 129 + { 130 + drm_i915_private_t *dev_priv = dev->dev_private; 131 + I915_WRITE(PRB0_TAIL, ring->tail); 132 + } 133 + 134 + static int init_ring_common(struct drm_device *dev, 135 + struct intel_ring_buffer *ring) 136 + { 137 + u32 head; 138 + drm_i915_private_t *dev_priv = dev->dev_private; 139 + struct drm_i915_gem_object *obj_priv; 140 + obj_priv = to_intel_bo(ring->gem_object); 141 + 142 + /* Stop the ring if it's running. */ 143 + I915_WRITE(ring->regs.ctl, 0); 144 + I915_WRITE(ring->regs.head, 0); 145 + I915_WRITE(ring->regs.tail, 0); 146 + 147 + /* Initialize the ring. */ 148 + I915_WRITE(ring->regs.start, obj_priv->gtt_offset); 149 + head = ring->get_head(dev, ring); 150 + 151 + /* G45 ring initialization fails to reset head to zero */ 152 + if (head != 0) { 153 + DRM_ERROR("%s head not reset to zero " 154 + "ctl %08x head %08x tail %08x start %08x\n", 155 + ring->name, 156 + I915_READ(ring->regs.ctl), 157 + I915_READ(ring->regs.head), 158 + I915_READ(ring->regs.tail), 159 + I915_READ(ring->regs.start)); 160 + 161 + I915_WRITE(ring->regs.head, 0); 162 + 163 + DRM_ERROR("%s head forced to zero " 164 + "ctl %08x head %08x tail %08x start %08x\n", 165 + ring->name, 166 + I915_READ(ring->regs.ctl), 167 + I915_READ(ring->regs.head), 168 + I915_READ(ring->regs.tail), 169 + I915_READ(ring->regs.start)); 170 + } 171 + 172 + I915_WRITE(ring->regs.ctl, 173 + ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 174 + | RING_NO_REPORT | RING_VALID); 175 + 176 + head = I915_READ(ring->regs.head) & HEAD_ADDR; 177 + /* If the head is still not zero, the ring is dead */ 178 + if (head != 0) { 179 + DRM_ERROR("%s initialization failed " 180 + "ctl %08x head %08x tail %08x start %08x\n", 181 + ring->name, 182 + I915_READ(ring->regs.ctl), 183 + I915_READ(ring->regs.head), 184 + I915_READ(ring->regs.tail), 185 + I915_READ(ring->regs.start)); 186 + return -EIO; 187 + } 188 + 189 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 190 + i915_kernel_lost_context(dev); 191 + else { 192 + ring->head = ring->get_head(dev, ring); 193 + ring->tail = ring->get_tail(dev, ring); 194 + ring->space = ring->head - (ring->tail + 8); 195 + if (ring->space < 0) 196 + ring->space += ring->size; 197 + } 198 + return 0; 199 + } 200 + 201 + static int init_render_ring(struct drm_device *dev, 202 + struct intel_ring_buffer *ring) 203 + { 204 + drm_i915_private_t *dev_priv = dev->dev_private; 205 + int ret = init_ring_common(dev, ring); 206 + if (IS_I9XX(dev) && !IS_GEN3(dev)) { 207 + I915_WRITE(MI_MODE, 208 + (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 209 + } 210 + return ret; 211 + } 212 + 213 + #define PIPE_CONTROL_FLUSH(addr) \ 214 + do { \ 215 + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 216 + PIPE_CONTROL_DEPTH_STALL | 2); \ 217 + OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ 218 + OUT_RING(0); \ 219 + OUT_RING(0); \ 220 + } while (0) 221 + 222 + /** 223 + * Creates a new sequence number, emitting a write of it to the status page 224 + * plus an interrupt, which will trigger i915_user_interrupt_handler. 225 + * 226 + * Must be called with struct_lock held. 227 + * 228 + * Returned sequence numbers are nonzero on success. 229 + */ 230 + static u32 231 + render_ring_add_request(struct drm_device *dev, 232 + struct intel_ring_buffer *ring, 233 + struct drm_file *file_priv, 234 + u32 flush_domains) 235 + { 236 + u32 seqno; 237 + drm_i915_private_t *dev_priv = dev->dev_private; 238 + seqno = intel_ring_get_seqno(dev, ring); 239 + 240 + if (IS_GEN6(dev)) { 241 + BEGIN_LP_RING(6); 242 + OUT_RING(GFX_OP_PIPE_CONTROL | 3); 243 + OUT_RING(PIPE_CONTROL_QW_WRITE | 244 + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | 245 + PIPE_CONTROL_NOTIFY); 246 + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 247 + OUT_RING(seqno); 248 + OUT_RING(0); 249 + OUT_RING(0); 250 + ADVANCE_LP_RING(); 251 + } else if (HAS_PIPE_CONTROL(dev)) { 252 + u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; 253 + 254 + /* 255 + * Workaround qword write incoherence by flushing the 256 + * PIPE_NOTIFY buffers out to memory before requesting 257 + * an interrupt. 258 + */ 259 + BEGIN_LP_RING(32); 260 + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 261 + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); 262 + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 263 + OUT_RING(seqno); 264 + OUT_RING(0); 265 + PIPE_CONTROL_FLUSH(scratch_addr); 266 + scratch_addr += 128; /* write to separate cachelines */ 267 + PIPE_CONTROL_FLUSH(scratch_addr); 268 + scratch_addr += 128; 269 + PIPE_CONTROL_FLUSH(scratch_addr); 270 + scratch_addr += 128; 271 + PIPE_CONTROL_FLUSH(scratch_addr); 272 + scratch_addr += 128; 273 + PIPE_CONTROL_FLUSH(scratch_addr); 274 + scratch_addr += 128; 275 + PIPE_CONTROL_FLUSH(scratch_addr); 276 + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 277 + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | 278 + PIPE_CONTROL_NOTIFY); 279 + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 280 + OUT_RING(seqno); 281 + OUT_RING(0); 282 + ADVANCE_LP_RING(); 283 + } else { 284 + BEGIN_LP_RING(4); 285 + OUT_RING(MI_STORE_DWORD_INDEX); 286 + OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 287 + OUT_RING(seqno); 288 + 289 + OUT_RING(MI_USER_INTERRUPT); 290 + ADVANCE_LP_RING(); 291 + } 292 + return seqno; 293 + } 294 + 295 + static u32 296 + render_ring_get_gem_seqno(struct drm_device *dev, 297 + struct intel_ring_buffer *ring) 298 + { 299 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 300 + if (HAS_PIPE_CONTROL(dev)) 301 + return ((volatile u32 *)(dev_priv->seqno_page))[0]; 302 + else 303 + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 304 + } 305 + 306 + static void 307 + render_ring_get_user_irq(struct drm_device *dev, 308 + struct intel_ring_buffer *ring) 309 + { 310 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 311 + unsigned long irqflags; 312 + 313 + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 314 + if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { 315 + if (HAS_PCH_SPLIT(dev)) 316 + ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 317 + else 318 + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 319 + } 320 + spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 321 + } 322 + 323 + static void 324 + render_ring_put_user_irq(struct drm_device *dev, 325 + struct intel_ring_buffer *ring) 326 + { 327 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 328 + unsigned long irqflags; 329 + 330 + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 331 + BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); 332 + if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { 333 + if (HAS_PCH_SPLIT(dev)) 334 + ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 335 + else 336 + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 337 + } 338 + spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 339 + } 340 + 341 + static void render_setup_status_page(struct drm_device *dev, 342 + struct intel_ring_buffer *ring) 343 + { 344 + drm_i915_private_t *dev_priv = dev->dev_private; 345 + if (IS_GEN6(dev)) { 346 + I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); 347 + I915_READ(HWS_PGA_GEN6); /* posting read */ 348 + } else { 349 + I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); 350 + I915_READ(HWS_PGA); /* posting read */ 351 + } 352 + 353 + } 354 + 355 + void 356 + bsd_ring_flush(struct drm_device *dev, 357 + struct intel_ring_buffer *ring, 358 + u32 invalidate_domains, 359 + u32 flush_domains) 360 + { 361 + intel_ring_begin(dev, ring, 8); 362 + intel_ring_emit(dev, ring, MI_FLUSH); 363 + intel_ring_emit(dev, ring, MI_NOOP); 364 + intel_ring_advance(dev, ring); 365 + } 366 + 367 + static inline unsigned int bsd_ring_get_head(struct drm_device *dev, 368 + struct intel_ring_buffer *ring) 369 + { 370 + drm_i915_private_t *dev_priv = dev->dev_private; 371 + return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; 372 + } 373 + 374 + static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, 375 + struct intel_ring_buffer *ring) 376 + { 377 + drm_i915_private_t *dev_priv = dev->dev_private; 378 + return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; 379 + } 380 + 381 + static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, 382 + struct intel_ring_buffer *ring) 383 + { 384 + drm_i915_private_t *dev_priv = dev->dev_private; 385 + return I915_READ(BSD_RING_ACTHD); 386 + } 387 + 388 + static inline void bsd_ring_advance_ring(struct drm_device *dev, 389 + struct intel_ring_buffer *ring) 390 + { 391 + drm_i915_private_t *dev_priv = dev->dev_private; 392 + I915_WRITE(BSD_RING_TAIL, ring->tail); 393 + } 394 + 395 + static int init_bsd_ring(struct drm_device *dev, 396 + struct intel_ring_buffer *ring) 397 + { 398 + return init_ring_common(dev, ring); 399 + } 400 + 401 + static u32 402 + bsd_ring_add_request(struct drm_device *dev, 403 + struct intel_ring_buffer *ring, 404 + struct drm_file *file_priv, 405 + u32 flush_domains) 406 + { 407 + u32 seqno; 408 + seqno = intel_ring_get_seqno(dev, ring); 409 + intel_ring_begin(dev, ring, 4); 410 + intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); 411 + intel_ring_emit(dev, ring, 412 + I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 413 + intel_ring_emit(dev, ring, seqno); 414 + intel_ring_emit(dev, ring, MI_USER_INTERRUPT); 415 + intel_ring_advance(dev, ring); 416 + 417 + DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); 418 + 419 + return seqno; 420 + } 421 + 422 + static void bsd_setup_status_page(struct drm_device *dev, 423 + struct intel_ring_buffer *ring) 424 + { 425 + drm_i915_private_t *dev_priv = dev->dev_private; 426 + I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); 427 + I915_READ(BSD_HWS_PGA); 428 + } 429 + 430 + static void 431 + bsd_ring_get_user_irq(struct drm_device *dev, 432 + struct intel_ring_buffer *ring) 433 + { 434 + /* do nothing */ 435 + } 436 + static void 437 + bsd_ring_put_user_irq(struct drm_device *dev, 438 + struct intel_ring_buffer *ring) 439 + { 440 + /* do nothing */ 441 + } 442 + 443 + static u32 444 + bsd_ring_get_gem_seqno(struct drm_device *dev, 445 + struct intel_ring_buffer *ring) 446 + { 447 + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 448 + } 449 + 450 + static int 451 + bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, 452 + struct intel_ring_buffer *ring, 453 + struct drm_i915_gem_execbuffer2 *exec, 454 + struct drm_clip_rect *cliprects, 455 + uint64_t exec_offset) 456 + { 457 + uint32_t exec_start; 458 + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 459 + intel_ring_begin(dev, ring, 2); 460 + intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | 461 + (2 << 6) | MI_BATCH_NON_SECURE_I965); 462 + intel_ring_emit(dev, ring, exec_start); 463 + intel_ring_advance(dev, ring); 464 + return 0; 465 + } 466 + 467 + 468 + static int 469 + render_ring_dispatch_gem_execbuffer(struct drm_device *dev, 470 + struct intel_ring_buffer *ring, 471 + struct drm_i915_gem_execbuffer2 *exec, 472 + struct drm_clip_rect *cliprects, 473 + uint64_t exec_offset) 474 + { 475 + drm_i915_private_t *dev_priv = dev->dev_private; 476 + int nbox = exec->num_cliprects; 477 + int i = 0, count; 478 + uint32_t exec_start, exec_len; 479 + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 480 + exec_len = (uint32_t) exec->batch_len; 481 + 482 + trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); 483 + 484 + count = nbox ? nbox : 1; 485 + 486 + for (i = 0; i < count; i++) { 487 + if (i < nbox) { 488 + int ret = i915_emit_box(dev, cliprects, i, 489 + exec->DR1, exec->DR4); 490 + if (ret) 491 + return ret; 492 + } 493 + 494 + if (IS_I830(dev) || IS_845G(dev)) { 495 + intel_ring_begin(dev, ring, 4); 496 + intel_ring_emit(dev, ring, MI_BATCH_BUFFER); 497 + intel_ring_emit(dev, ring, 498 + exec_start | MI_BATCH_NON_SECURE); 499 + intel_ring_emit(dev, ring, exec_start + exec_len - 4); 500 + intel_ring_emit(dev, ring, 0); 501 + } else { 502 + intel_ring_begin(dev, ring, 4); 503 + if (IS_I965G(dev)) { 504 + intel_ring_emit(dev, ring, 505 + MI_BATCH_BUFFER_START | (2 << 6) 506 + | MI_BATCH_NON_SECURE_I965); 507 + intel_ring_emit(dev, ring, exec_start); 508 + } else { 509 + intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START 510 + | (2 << 6)); 511 + intel_ring_emit(dev, ring, exec_start | 512 + MI_BATCH_NON_SECURE); 513 + } 514 + } 515 + intel_ring_advance(dev, ring); 516 + } 517 + 518 + /* XXX breadcrumb */ 519 + return 0; 520 + } 521 + 522 + static void cleanup_status_page(struct drm_device *dev, 523 + struct intel_ring_buffer *ring) 524 + { 525 + drm_i915_private_t *dev_priv = dev->dev_private; 526 + struct drm_gem_object *obj; 527 + struct drm_i915_gem_object *obj_priv; 528 + 529 + obj = ring->status_page.obj; 530 + if (obj == NULL) 531 + return; 532 + obj_priv = to_intel_bo(obj); 533 + 534 + kunmap(obj_priv->pages[0]); 535 + i915_gem_object_unpin(obj); 536 + drm_gem_object_unreference(obj); 537 + ring->status_page.obj = NULL; 538 + 539 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 540 + } 541 + 542 + static int init_status_page(struct drm_device *dev, 543 + struct intel_ring_buffer *ring) 544 + { 545 + drm_i915_private_t *dev_priv = dev->dev_private; 546 + struct drm_gem_object *obj; 547 + struct drm_i915_gem_object *obj_priv; 548 + int ret; 549 + 550 + obj = i915_gem_alloc_object(dev, 4096); 551 + if (obj == NULL) { 552 + DRM_ERROR("Failed to allocate status page\n"); 553 + ret = -ENOMEM; 554 + goto err; 555 + } 556 + obj_priv = to_intel_bo(obj); 557 + obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 558 + 559 + ret = i915_gem_object_pin(obj, 4096); 560 + if (ret != 0) { 561 + goto err_unref; 562 + } 563 + 564 + ring->status_page.gfx_addr = obj_priv->gtt_offset; 565 + ring->status_page.page_addr = kmap(obj_priv->pages[0]); 566 + if (ring->status_page.page_addr == NULL) { 567 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 568 + goto err_unpin; 569 + } 570 + ring->status_page.obj = obj; 571 + memset(ring->status_page.page_addr, 0, PAGE_SIZE); 572 + 573 + ring->setup_status_page(dev, ring); 574 + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 575 + ring->name, ring->status_page.gfx_addr); 576 + 577 + return 0; 578 + 579 + err_unpin: 580 + i915_gem_object_unpin(obj); 581 + err_unref: 582 + drm_gem_object_unreference(obj); 583 + err: 584 + return ret; 585 + } 586 + 587 + 588 + int intel_init_ring_buffer(struct drm_device *dev, 589 + struct intel_ring_buffer *ring) 590 + { 591 + int ret; 592 + struct drm_i915_gem_object *obj_priv; 593 + struct drm_gem_object *obj; 594 + ring->dev = dev; 595 + 596 + if (I915_NEED_GFX_HWS(dev)) { 597 + ret = init_status_page(dev, ring); 598 + if (ret) 599 + return ret; 600 + } 601 + 602 + obj = i915_gem_alloc_object(dev, ring->size); 603 + if (obj == NULL) { 604 + DRM_ERROR("Failed to allocate ringbuffer\n"); 605 + ret = -ENOMEM; 606 + goto cleanup; 607 + } 608 + 609 + ring->gem_object = obj; 610 + 611 + ret = i915_gem_object_pin(obj, ring->alignment); 612 + if (ret != 0) { 613 + drm_gem_object_unreference(obj); 614 + goto cleanup; 615 + } 616 + 617 + obj_priv = to_intel_bo(obj); 618 + ring->map.size = ring->size; 619 + ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 620 + ring->map.type = 0; 621 + ring->map.flags = 0; 622 + ring->map.mtrr = 0; 623 + 624 + drm_core_ioremap_wc(&ring->map, dev); 625 + if (ring->map.handle == NULL) { 626 + DRM_ERROR("Failed to map ringbuffer.\n"); 627 + i915_gem_object_unpin(obj); 628 + drm_gem_object_unreference(obj); 629 + ret = -EINVAL; 630 + goto cleanup; 631 + } 632 + 633 + ring->virtual_start = ring->map.handle; 634 + ret = ring->init(dev, ring); 635 + if (ret != 0) { 636 + intel_cleanup_ring_buffer(dev, ring); 637 + return ret; 638 + } 639 + 640 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 641 + i915_kernel_lost_context(dev); 642 + else { 643 + ring->head = ring->get_head(dev, ring); 644 + ring->tail = ring->get_tail(dev, ring); 645 + ring->space = ring->head - (ring->tail + 8); 646 + if (ring->space < 0) 647 + ring->space += ring->size; 648 + } 649 + INIT_LIST_HEAD(&ring->active_list); 650 + INIT_LIST_HEAD(&ring->request_list); 651 + return ret; 652 + cleanup: 653 + cleanup_status_page(dev, ring); 654 + return ret; 655 + } 656 + 657 + void intel_cleanup_ring_buffer(struct drm_device *dev, 658 + struct intel_ring_buffer *ring) 659 + { 660 + if (ring->gem_object == NULL) 661 + return; 662 + 663 + drm_core_ioremapfree(&ring->map, dev); 664 + 665 + i915_gem_object_unpin(ring->gem_object); 666 + drm_gem_object_unreference(ring->gem_object); 667 + ring->gem_object = NULL; 668 + cleanup_status_page(dev, ring); 669 + } 670 + 671 + int intel_wrap_ring_buffer(struct drm_device *dev, 672 + struct intel_ring_buffer *ring) 673 + { 674 + unsigned int *virt; 675 + int rem; 676 + rem = ring->size - ring->tail; 677 + 678 + if (ring->space < rem) { 679 + int ret = intel_wait_ring_buffer(dev, ring, rem); 680 + if (ret) 681 + return ret; 682 + } 683 + 684 + virt = (unsigned int *)(ring->virtual_start + ring->tail); 685 + rem /= 4; 686 + while (rem--) 687 + *virt++ = MI_NOOP; 688 + 689 + ring->tail = 0; 690 + 691 + return 0; 692 + } 693 + 694 + int intel_wait_ring_buffer(struct drm_device *dev, 695 + struct intel_ring_buffer *ring, int n) 696 + { 697 + unsigned long end; 698 + 699 + trace_i915_ring_wait_begin (dev); 700 + end = jiffies + 3 * HZ; 701 + do { 702 + ring->head = ring->get_head(dev, ring); 703 + ring->space = ring->head - (ring->tail + 8); 704 + if (ring->space < 0) 705 + ring->space += ring->size; 706 + if (ring->space >= n) { 707 + trace_i915_ring_wait_end (dev); 708 + return 0; 709 + } 710 + 711 + if (dev->primary->master) { 712 + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 713 + if (master_priv->sarea_priv) 714 + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 715 + } 716 + 717 + yield(); 718 + } while (!time_after(jiffies, end)); 719 + trace_i915_ring_wait_end (dev); 720 + return -EBUSY; 721 + } 722 + 723 + void intel_ring_begin(struct drm_device *dev, 724 + struct intel_ring_buffer *ring, int n) 725 + { 726 + if (unlikely(ring->tail + n > ring->size)) 727 + intel_wrap_ring_buffer(dev, ring); 728 + if (unlikely(ring->space < n)) 729 + intel_wait_ring_buffer(dev, ring, n); 730 + } 731 + 732 + void intel_ring_emit(struct drm_device *dev, 733 + struct intel_ring_buffer *ring, unsigned int data) 734 + { 735 + unsigned int *virt = ring->virtual_start + ring->tail; 736 + *virt = data; 737 + ring->tail += 4; 738 + ring->tail &= ring->size - 1; 739 + ring->space -= 4; 740 + } 741 + 742 + void intel_ring_advance(struct drm_device *dev, 743 + struct intel_ring_buffer *ring) 744 + { 745 + ring->advance_ring(dev, ring); 746 + } 747 + 748 + void intel_fill_struct(struct drm_device *dev, 749 + struct intel_ring_buffer *ring, 750 + void *data, 751 + unsigned int len) 752 + { 753 + unsigned int *virt = ring->virtual_start + ring->tail; 754 + BUG_ON((len&~(4-1)) != 0); 755 + intel_ring_begin(dev, ring, len); 756 + memcpy(virt, data, len); 757 + ring->tail += len; 758 + ring->tail &= ring->size - 1; 759 + ring->space -= len; 760 + intel_ring_advance(dev, ring); 761 + } 762 + 763 + u32 intel_ring_get_seqno(struct drm_device *dev, 764 + struct intel_ring_buffer *ring) 765 + { 766 + u32 seqno; 767 + seqno = ring->next_seqno; 768 + 769 + /* reserve 0 for non-seqno */ 770 + if (++ring->next_seqno == 0) 771 + ring->next_seqno = 1; 772 + return seqno; 773 + } 774 + 775 + struct intel_ring_buffer render_ring = { 776 + .name = "render ring", 777 + .regs = { 778 + .ctl = PRB0_CTL, 779 + .head = PRB0_HEAD, 780 + .tail = PRB0_TAIL, 781 + .start = PRB0_START 782 + }, 783 + .ring_flag = I915_EXEC_RENDER, 784 + .size = 32 * PAGE_SIZE, 785 + .alignment = PAGE_SIZE, 786 + .virtual_start = NULL, 787 + .dev = NULL, 788 + .gem_object = NULL, 789 + .head = 0, 790 + .tail = 0, 791 + .space = 0, 792 + .next_seqno = 1, 793 + .user_irq_refcount = 0, 794 + .irq_gem_seqno = 0, 795 + .waiting_gem_seqno = 0, 796 + .setup_status_page = render_setup_status_page, 797 + .init = init_render_ring, 798 + .get_head = render_ring_get_head, 799 + .get_tail = render_ring_get_tail, 800 + .get_active_head = render_ring_get_active_head, 801 + .advance_ring = render_ring_advance_ring, 802 + .flush = render_ring_flush, 803 + .add_request = render_ring_add_request, 804 + .get_gem_seqno = render_ring_get_gem_seqno, 805 + .user_irq_get = render_ring_get_user_irq, 806 + .user_irq_put = render_ring_put_user_irq, 807 + .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, 808 + .status_page = {NULL, 0, NULL}, 809 + .map = {0,} 810 + }; 811 + 812 + /* ring buffer for bit-stream decoder */ 813 + 814 + struct intel_ring_buffer bsd_ring = { 815 + .name = "bsd ring", 816 + .regs = { 817 + .ctl = BSD_RING_CTL, 818 + .head = BSD_RING_HEAD, 819 + .tail = BSD_RING_TAIL, 820 + .start = BSD_RING_START 821 + }, 822 + .ring_flag = I915_EXEC_BSD, 823 + .size = 32 * PAGE_SIZE, 824 + .alignment = PAGE_SIZE, 825 + .virtual_start = NULL, 826 + .dev = NULL, 827 + .gem_object = NULL, 828 + .head = 0, 829 + .tail = 0, 830 + .space = 0, 831 + .next_seqno = 1, 832 + .user_irq_refcount = 0, 833 + .irq_gem_seqno = 0, 834 + .waiting_gem_seqno = 0, 835 + .setup_status_page = bsd_setup_status_page, 836 + .init = init_bsd_ring, 837 + .get_head = bsd_ring_get_head, 838 + .get_tail = bsd_ring_get_tail, 839 + .get_active_head = bsd_ring_get_active_head, 840 + .advance_ring = bsd_ring_advance_ring, 841 + .flush = bsd_ring_flush, 842 + .add_request = bsd_ring_add_request, 843 + .get_gem_seqno = bsd_ring_get_gem_seqno, 844 + .user_irq_get = bsd_ring_get_user_irq, 845 + .user_irq_put = bsd_ring_put_user_irq, 846 + .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, 847 + .status_page = {NULL, 0, NULL}, 848 + .map = {0,} 849 + };
+124
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 1 + #ifndef _INTEL_RINGBUFFER_H_ 2 + #define _INTEL_RINGBUFFER_H_ 3 + 4 + struct intel_hw_status_page { 5 + void *page_addr; 6 + unsigned int gfx_addr; 7 + struct drm_gem_object *obj; 8 + }; 9 + 10 + struct drm_i915_gem_execbuffer2; 11 + struct intel_ring_buffer { 12 + const char *name; 13 + struct ring_regs { 14 + u32 ctl; 15 + u32 head; 16 + u32 tail; 17 + u32 start; 18 + } regs; 19 + unsigned int ring_flag; 20 + unsigned long size; 21 + unsigned int alignment; 22 + void *virtual_start; 23 + struct drm_device *dev; 24 + struct drm_gem_object *gem_object; 25 + 26 + unsigned int head; 27 + unsigned int tail; 28 + unsigned int space; 29 + u32 next_seqno; 30 + struct intel_hw_status_page status_page; 31 + 32 + u32 irq_gem_seqno; /* last seq seem at irq time */ 33 + u32 waiting_gem_seqno; 34 + int user_irq_refcount; 35 + void (*user_irq_get)(struct drm_device *dev, 36 + struct intel_ring_buffer *ring); 37 + void (*user_irq_put)(struct drm_device *dev, 38 + struct intel_ring_buffer *ring); 39 + void (*setup_status_page)(struct drm_device *dev, 40 + struct intel_ring_buffer *ring); 41 + 42 + int (*init)(struct drm_device *dev, 43 + struct intel_ring_buffer *ring); 44 + 45 + unsigned int (*get_head)(struct drm_device *dev, 46 + struct intel_ring_buffer *ring); 47 + unsigned int (*get_tail)(struct drm_device *dev, 48 + struct intel_ring_buffer *ring); 49 + unsigned int (*get_active_head)(struct drm_device *dev, 50 + struct intel_ring_buffer *ring); 51 + void (*advance_ring)(struct drm_device *dev, 52 + struct intel_ring_buffer *ring); 53 + void (*flush)(struct drm_device *dev, 54 + struct intel_ring_buffer *ring, 55 + u32 invalidate_domains, 56 + u32 flush_domains); 57 + u32 (*add_request)(struct drm_device *dev, 58 + struct intel_ring_buffer *ring, 59 + struct drm_file *file_priv, 60 + u32 flush_domains); 61 + u32 (*get_gem_seqno)(struct drm_device *dev, 62 + struct intel_ring_buffer *ring); 63 + int (*dispatch_gem_execbuffer)(struct drm_device *dev, 64 + struct intel_ring_buffer *ring, 65 + struct drm_i915_gem_execbuffer2 *exec, 66 + struct drm_clip_rect *cliprects, 67 + uint64_t exec_offset); 68 + 69 + /** 70 + * List of objects currently involved in rendering from the 71 + * ringbuffer. 72 + * 73 + * Includes buffers having the contents of their GPU caches 74 + * flushed, not necessarily primitives. last_rendering_seqno 75 + * represents when the rendering involved will be completed. 76 + * 77 + * A reference is held on the buffer while on this list. 78 + */ 79 + struct list_head active_list; 80 + 81 + /** 82 + * List of breadcrumbs associated with GPU requests currently 83 + * outstanding. 84 + */ 85 + struct list_head request_list; 86 + 87 + wait_queue_head_t irq_queue; 88 + drm_local_map_t map; 89 + }; 90 + 91 + static inline u32 92 + intel_read_status_page(struct intel_ring_buffer *ring, 93 + int reg) 94 + { 95 + u32 *regs = ring->status_page.page_addr; 96 + return regs[reg]; 97 + } 98 + 99 + int intel_init_ring_buffer(struct drm_device *dev, 100 + struct intel_ring_buffer *ring); 101 + void intel_cleanup_ring_buffer(struct drm_device *dev, 102 + struct intel_ring_buffer *ring); 103 + int intel_wait_ring_buffer(struct drm_device *dev, 104 + struct intel_ring_buffer *ring, int n); 105 + int intel_wrap_ring_buffer(struct drm_device *dev, 106 + struct intel_ring_buffer *ring); 107 + void intel_ring_begin(struct drm_device *dev, 108 + struct intel_ring_buffer *ring, int n); 109 + void intel_ring_emit(struct drm_device *dev, 110 + struct intel_ring_buffer *ring, u32 data); 111 + void intel_fill_struct(struct drm_device *dev, 112 + struct intel_ring_buffer *ring, 113 + void *data, 114 + unsigned int len); 115 + void intel_ring_advance(struct drm_device *dev, 116 + struct intel_ring_buffer *ring); 117 + 118 + u32 intel_ring_get_seqno(struct drm_device *dev, 119 + struct intel_ring_buffer *ring); 120 + 121 + extern struct intel_ring_buffer render_ring; 122 + extern struct intel_ring_buffer bsd_ring; 123 + 124 + #endif /* _INTEL_RINGBUFFER_H_ */
+1 -1
drivers/gpu/drm/i915/intel_sdvo.c
··· 1479 1479 intel_encoder = enc_to_intel_encoder(encoder); 1480 1480 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { 1481 1481 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1482 - if (connector && encoder == intel_attached_encoder(connector)) 1482 + if (encoder == intel_attached_encoder(connector)) 1483 1483 return connector; 1484 1484 } 1485 1485 }
+4 -1
include/drm/i915_drm.h
··· 275 275 #define I915_PARAM_HAS_OVERLAY 7 276 276 #define I915_PARAM_HAS_PAGEFLIPPING 8 277 277 #define I915_PARAM_HAS_EXECBUF2 9 278 + #define I915_PARAM_HAS_BSD 10 278 279 279 280 typedef struct drm_i915_getparam { 280 281 int param; ··· 617 616 __u32 num_cliprects; 618 617 /** This is a struct drm_clip_rect *cliprects */ 619 618 __u64 cliprects_ptr; 620 - __u64 flags; /* currently unused */ 619 + #define I915_EXEC_RENDER (1<<0) 620 + #define I915_EXEC_BSD (1<<1) 621 + __u64 flags; 621 622 __u64 rsvd1; 622 623 __u64 rsvd2; 623 624 };