Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/i915: Move legacy breadcrumb out of the reserved status page area
drm/i915: Filter pci devices based on PCI_CLASS_DISPLAY_VGA
drm/radeon: map registers at load time
drm: Remove infrastructure for supporting i915's vblank swapping.
i915: Remove racy delayed vblank swap ioctl.
i915: Don't whine when pci_enable_msi() fails.
i915: Don't attempt to short-circuit object_wait_rendering by checking domains.
i915: Clean up sarea pointers on leavevt
i915: Save/restore MCHBAR_RENDER_STANDBY on GM965/GM45

+84 -522
+9 -1
drivers/gpu/drm/drm_drv.c
··· 266 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { 267 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; 268 269 pdev = NULL; 270 - /* pass back in pdev to account for multiple identical cards */ 271 while ((pdev = 272 pci_get_subsys(pid->vendor, pid->device, pid->subvendor, 273 pid->subdevice, pdev)) != NULL) { 274 /* stealth mode requires a manual probe */ 275 pci_dev_get(pdev); 276 drm_get_dev(pdev, pid, driver);
··· 266 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { 267 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; 268 269 + /* Loop around setting up a DRM device for each PCI device 270 + * matching our ID and device class. If we had the internal 271 + * function that pci_get_subsys and pci_get_class used, we'd 272 + * be able to just pass pid in instead of doing a two-stage 273 + * thing. 274 + */ 275 pdev = NULL; 276 while ((pdev = 277 pci_get_subsys(pid->vendor, pid->device, pid->subvendor, 278 pid->subdevice, pdev)) != NULL) { 279 + if ((pdev->class & pid->class_mask) != pid->class) 280 + continue; 281 + 282 /* stealth mode requires a manual probe */ 283 pci_dev_get(pdev); 284 drm_get_dev(pdev, pid, driver);
-80
drivers/gpu/drm/drm_irq.c
··· 280 281 drm_vblank_cleanup(dev); 282 283 - dev->locked_tasklet_func = NULL; 284 - 285 return 0; 286 } 287 EXPORT_SYMBOL(drm_irq_uninstall); ··· 697 drm_vbl_send_signals(dev, crtc); 698 } 699 EXPORT_SYMBOL(drm_handle_vblank); 700 - 701 - /** 702 - * Tasklet wrapper function. 703 - * 704 - * \param data DRM device in disguise. 705 - * 706 - * Attempts to grab the HW lock and calls the driver callback on success. On 707 - * failure, leave the lock marked as contended so the callback can be called 708 - * from drm_unlock(). 709 - */ 710 - static void drm_locked_tasklet_func(unsigned long data) 711 - { 712 - struct drm_device *dev = (struct drm_device *)data; 713 - unsigned long irqflags; 714 - void (*tasklet_func)(struct drm_device *); 715 - 716 - spin_lock_irqsave(&dev->tasklet_lock, irqflags); 717 - tasklet_func = dev->locked_tasklet_func; 718 - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 719 - 720 - if (!tasklet_func || 721 - !drm_lock_take(&dev->lock, 722 - DRM_KERNEL_CONTEXT)) { 723 - return; 724 - } 725 - 726 - dev->lock.lock_time = jiffies; 727 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 728 - 729 - spin_lock_irqsave(&dev->tasklet_lock, irqflags); 730 - tasklet_func = dev->locked_tasklet_func; 731 - dev->locked_tasklet_func = NULL; 732 - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 733 - 734 - if (tasklet_func != NULL) 735 - tasklet_func(dev); 736 - 737 - drm_lock_free(&dev->lock, 738 - DRM_KERNEL_CONTEXT); 739 - } 740 - 741 - /** 742 - * Schedule a tasklet to call back a driver hook with the HW lock held. 743 - * 744 - * \param dev DRM device. 745 - * \param func Driver callback. 746 - * 747 - * This is intended for triggering actions that require the HW lock from an 748 - * interrupt handler. The lock will be grabbed ASAP after the interrupt handler 749 - * completes. Note that the callback may be called from interrupt or process 750 - * context, it must not make any assumptions about this. Also, the HW lock will 751 - * be held with the kernel context or any client context. 752 - */ 753 - void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *)) 754 - { 755 - unsigned long irqflags; 756 - static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); 757 - 758 - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) || 759 - test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state)) 760 - return; 761 - 762 - spin_lock_irqsave(&dev->tasklet_lock, irqflags); 763 - 764 - if (dev->locked_tasklet_func) { 765 - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 766 - return; 767 - } 768 - 769 - dev->locked_tasklet_func = func; 770 - 771 - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 772 - 773 - drm_tasklet.data = (unsigned long)dev; 774 - 775 - tasklet_hi_schedule(&drm_tasklet); 776 - } 777 - EXPORT_SYMBOL(drm_locked_tasklet);
··· 280 281 drm_vblank_cleanup(dev); 282 283 return 0; 284 } 285 EXPORT_SYMBOL(drm_irq_uninstall); ··· 699 drm_vbl_send_signals(dev, crtc); 700 } 701 EXPORT_SYMBOL(drm_handle_vblank);
-9
drivers/gpu/drm/drm_lock.c
··· 154 int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) 155 { 156 struct drm_lock *lock = data; 157 - unsigned long irqflags; 158 - void (*tasklet_func)(struct drm_device *); 159 160 if (lock->context == DRM_KERNEL_CONTEXT) { 161 DRM_ERROR("Process %d using kernel context %d\n", 162 task_pid_nr(current), lock->context); 163 return -EINVAL; 164 } 165 - 166 - spin_lock_irqsave(&dev->tasklet_lock, irqflags); 167 - tasklet_func = dev->locked_tasklet_func; 168 - dev->locked_tasklet_func = NULL; 169 - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 170 - if (tasklet_func != NULL) 171 - tasklet_func(dev); 172 173 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 174
··· 154 int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) 155 { 156 struct drm_lock *lock = data; 157 158 if (lock->context == DRM_KERNEL_CONTEXT) { 159 DRM_ERROR("Process %d using kernel context %d\n", 160 task_pid_nr(current), lock->context); 161 return -EINVAL; 162 } 163 164 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); 165
-1
drivers/gpu/drm/drm_stub.c
··· 92 93 spin_lock_init(&dev->count_lock); 94 spin_lock_init(&dev->drw_lock); 95 - spin_lock_init(&dev->tasklet_lock); 96 spin_lock_init(&dev->lock.spinlock); 97 init_timer(&dev->timer); 98 mutex_init(&dev->struct_mutex);
··· 92 93 spin_lock_init(&dev->count_lock); 94 spin_lock_init(&dev->drw_lock); 95 spin_lock_init(&dev->lock.spinlock); 96 init_timer(&dev->timer); 97 mutex_init(&dev->struct_mutex);
+8 -8
drivers/gpu/drm/i915/i915_dma.c
··· 154 if (I915_NEED_GFX_HWS(dev)) 155 i915_free_hws(dev); 156 157 return 0; 158 } 159 ··· 445 446 BEGIN_LP_RING(4); 447 OUT_RING(MI_STORE_DWORD_INDEX); 448 - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 449 OUT_RING(dev_priv->counter); 450 OUT_RING(0); 451 ADVANCE_LP_RING(); ··· 576 577 BEGIN_LP_RING(4); 578 OUT_RING(MI_STORE_DWORD_INDEX); 579 - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 580 OUT_RING(dev_priv->counter); 581 OUT_RING(0); 582 ADVANCE_LP_RING(); ··· 611 struct drm_file *file_priv) 612 { 613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 614 - u32 *hw_status = dev_priv->hw_status_page; 615 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 616 dev_priv->sarea_priv; 617 drm_i915_batchbuffer_t *batch = data; ··· 636 mutex_unlock(&dev->struct_mutex); 637 638 if (sarea_priv) 639 - sarea_priv->last_dispatch = (int)hw_status[5]; 640 return ret; 641 } 642 ··· 644 struct drm_file *file_priv) 645 { 646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 647 - u32 *hw_status = dev_priv->hw_status_page; 648 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 649 dev_priv->sarea_priv; 650 drm_i915_cmdbuffer_t *cmdbuf = data; ··· 671 } 672 673 if (sarea_priv) 674 - sarea_priv->last_dispatch = (int)hw_status[5]; 675 return 0; 676 } 677 ··· 850 * be lost or delayed 851 */ 852 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) 853 - if (pci_enable_msi(dev->pdev)) 854 - DRM_ERROR("failed to enable MSI\n"); 855 856 intel_opregion_init(dev); 857
··· 154 if (I915_NEED_GFX_HWS(dev)) 155 i915_free_hws(dev); 156 157 + dev_priv->sarea = NULL; 158 + dev_priv->sarea_priv = NULL; 159 + 160 return 0; 161 } 162 ··· 442 443 BEGIN_LP_RING(4); 444 OUT_RING(MI_STORE_DWORD_INDEX); 445 + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 446 OUT_RING(dev_priv->counter); 447 OUT_RING(0); 448 ADVANCE_LP_RING(); ··· 573 574 BEGIN_LP_RING(4); 575 OUT_RING(MI_STORE_DWORD_INDEX); 576 + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 577 OUT_RING(dev_priv->counter); 578 OUT_RING(0); 579 ADVANCE_LP_RING(); ··· 608 struct drm_file *file_priv) 609 { 610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 611 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 612 dev_priv->sarea_priv; 613 drm_i915_batchbuffer_t *batch = data; ··· 634 mutex_unlock(&dev->struct_mutex); 635 636 if (sarea_priv) 637 + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 638 return ret; 639 } 640 ··· 642 struct drm_file *file_priv) 643 { 644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 645 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 646 dev_priv->sarea_priv; 647 drm_i915_cmdbuffer_t *cmdbuf = data; ··· 670 } 671 672 if (sarea_priv) 673 + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 674 return 0; 675 } 676 ··· 849 * be lost or delayed 850 */ 851 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) 852 + pci_enable_msi(dev->pdev); 853 854 intel_opregion_init(dev); 855
+3 -16
drivers/gpu/drm/i915/i915_drv.h
··· 88 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ 89 }; 90 91 - typedef struct _drm_i915_vbl_swap { 92 - struct list_head head; 93 - drm_drawable_t drw_id; 94 - unsigned int pipe; 95 - unsigned int sequence; 96 - } drm_i915_vbl_swap_t; 97 - 98 struct opregion_header; 99 struct opregion_acpi; 100 struct opregion_swsci; ··· 139 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 140 int vblank_pipe; 141 142 - spinlock_t swaps_lock; 143 - drm_i915_vbl_swap_t vbl_swaps; 144 - unsigned int swaps_pending; 145 - 146 struct intel_opregion opregion; 147 148 /* Register state */ ··· 146 u32 saveDSPACNTR; 147 u32 saveDSPBCNTR; 148 u32 saveDSPARB; 149 u32 savePIPEACONF; 150 u32 savePIPEBCONF; 151 u32 savePIPEASRC; ··· 230 u8 saveDACMASK; 231 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 232 u8 saveCR[37]; 233 - 234 - /** Work task for vblank-related ring access */ 235 - struct work_struct vblank_work; 236 237 struct { 238 struct drm_mm gtt_space; ··· 431 void i915_user_irq_get(struct drm_device *dev); 432 void i915_user_irq_put(struct drm_device *dev); 433 434 - extern void i915_vblank_work_handler(struct work_struct *work); 435 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 436 extern void i915_driver_irq_preinstall(struct drm_device * dev); 437 extern int i915_driver_irq_postinstall(struct drm_device *dev); ··· 608 * The area from dword 0x20 to 0x3ff is available for driver usage. 609 */ 610 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 611 - #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) 612 #define I915_GEM_HWS_INDEX 0x20 613 614 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 615
··· 88 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ 89 }; 90 91 struct opregion_header; 92 struct opregion_acpi; 93 struct opregion_swsci; ··· 146 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 147 int vblank_pipe; 148 149 struct intel_opregion opregion; 150 151 /* Register state */ ··· 157 u32 saveDSPACNTR; 158 u32 saveDSPBCNTR; 159 u32 saveDSPARB; 160 + u32 saveRENDERSTANDBY; 161 u32 savePIPEACONF; 162 u32 savePIPEBCONF; 163 u32 savePIPEASRC; ··· 240 u8 saveDACMASK; 241 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 242 u8 saveCR[37]; 243 244 struct { 245 struct drm_mm gtt_space; ··· 444 void i915_user_irq_get(struct drm_device *dev); 445 void i915_user_irq_put(struct drm_device *dev); 446 447 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 448 extern void i915_driver_irq_preinstall(struct drm_device * dev); 449 extern int i915_driver_irq_postinstall(struct drm_device *dev); ··· 622 * The area from dword 0x20 to 0x3ff is available for driver usage. 623 */ 624 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 625 + #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 626 #define I915_GEM_HWS_INDEX 0x20 627 + #define I915_BREADCRUMB_INDEX 0x21 628 629 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 630
+3 -5
drivers/gpu/drm/i915/i915_gem.c
··· 1455 read_domains, write_domain); 1456 1457 /* Wait on any GPU rendering to the object to be flushed. */ 1458 - if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { 1459 - ret = i915_gem_object_wait_rendering(obj); 1460 - if (ret) 1461 - return ret; 1462 - } 1463 1464 if (obj_priv->page_cpu_valid == NULL) { 1465 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
··· 1455 read_domains, write_domain); 1456 1457 /* Wait on any GPU rendering to the object to be flushed. */ 1458 + ret = i915_gem_object_wait_rendering(obj); 1459 + if (ret) 1460 + return ret; 1461 1462 if (obj_priv->page_cpu_valid == NULL) { 1463 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
+16 -367
drivers/gpu/drm/i915/i915_irq.c
··· 80 return 0; 81 } 82 83 - /** 84 - * Emit blits for scheduled buffer swaps. 85 - * 86 - * This function will be called with the HW lock held. 87 - * Because this function must grab the ring mutex (dev->struct_mutex), 88 - * it can no longer run at soft irq time. We'll fix this when we do 89 - * the DRI2 swap buffer work. 90 - */ 91 - static void i915_vblank_tasklet(struct drm_device *dev) 92 - { 93 - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 94 - unsigned long irqflags; 95 - struct list_head *list, *tmp, hits, *hit; 96 - int nhits, nrects, slice[2], upper[2], lower[2], i; 97 - unsigned counter[2]; 98 - struct drm_drawable_info *drw; 99 - drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 100 - u32 cpp = dev_priv->cpp; 101 - u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | 102 - XY_SRC_COPY_BLT_WRITE_ALPHA | 103 - XY_SRC_COPY_BLT_WRITE_RGB) 104 - : XY_SRC_COPY_BLT_CMD; 105 - u32 src_pitch = sarea_priv->pitch * cpp; 106 - u32 dst_pitch = sarea_priv->pitch * cpp; 107 - u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); 108 - RING_LOCALS; 109 - 110 - mutex_lock(&dev->struct_mutex); 111 - 112 - if (IS_I965G(dev) && sarea_priv->front_tiled) { 113 - cmd |= XY_SRC_COPY_BLT_DST_TILED; 114 - dst_pitch >>= 2; 115 - } 116 - if (IS_I965G(dev) && sarea_priv->back_tiled) { 117 - cmd |= XY_SRC_COPY_BLT_SRC_TILED; 118 - src_pitch >>= 2; 119 - } 120 - 121 - counter[0] = drm_vblank_count(dev, 0); 122 - counter[1] = drm_vblank_count(dev, 1); 123 - 124 - DRM_DEBUG("\n"); 125 - 126 - INIT_LIST_HEAD(&hits); 127 - 128 - nhits = nrects = 0; 129 - 130 - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 131 - 132 - /* Find buffer swaps scheduled for this vertical blank */ 133 - list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 134 - drm_i915_vbl_swap_t *vbl_swap = 135 - list_entry(list, drm_i915_vbl_swap_t, head); 136 - int pipe = vbl_swap->pipe; 137 - 138 - if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) 139 - continue; 140 - 141 - list_del(list); 142 - dev_priv->swaps_pending--; 143 - drm_vblank_put(dev, pipe); 144 - 145 - spin_unlock(&dev_priv->swaps_lock); 146 - spin_lock(&dev->drw_lock); 147 - 148 - drw = drm_get_drawable_info(dev, vbl_swap->drw_id); 149 - 150 - list_for_each(hit, &hits) { 151 - drm_i915_vbl_swap_t *swap_cmp = 152 - list_entry(hit, drm_i915_vbl_swap_t, head); 153 - struct drm_drawable_info *drw_cmp = 154 - drm_get_drawable_info(dev, swap_cmp->drw_id); 155 - 156 - /* Make sure both drawables are still 157 - * around and have some rectangles before 158 - * we look inside to order them for the 159 - * blts below. 160 - */ 161 - if (drw_cmp && drw_cmp->num_rects > 0 && 162 - drw && drw->num_rects > 0 && 163 - drw_cmp->rects[0].y1 > drw->rects[0].y1) { 164 - list_add_tail(list, hit); 165 - break; 166 - } 167 - } 168 - 169 - spin_unlock(&dev->drw_lock); 170 - 171 - /* List of hits was empty, or we reached the end of it */ 172 - if (hit == &hits) 173 - list_add_tail(list, hits.prev); 174 - 175 - nhits++; 176 - 177 - spin_lock(&dev_priv->swaps_lock); 178 - } 179 - 180 - if (nhits == 0) { 181 - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 182 - mutex_unlock(&dev->struct_mutex); 183 - return; 184 - } 185 - 186 - spin_unlock(&dev_priv->swaps_lock); 187 - 188 - i915_kernel_lost_context(dev); 189 - 190 - if (IS_I965G(dev)) { 191 - BEGIN_LP_RING(4); 192 - 193 - OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 194 - OUT_RING(0); 195 - OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16)); 196 - OUT_RING(0); 197 - ADVANCE_LP_RING(); 198 - } else { 199 - BEGIN_LP_RING(6); 200 - 201 - OUT_RING(GFX_OP_DRAWRECT_INFO); 202 - OUT_RING(0); 203 - OUT_RING(0); 204 - OUT_RING(sarea_priv->width | sarea_priv->height << 16); 205 - OUT_RING(sarea_priv->width | sarea_priv->height << 16); 206 - OUT_RING(0); 207 - 208 - ADVANCE_LP_RING(); 209 - } 210 - 211 - sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; 212 - 213 - upper[0] = upper[1] = 0; 214 - slice[0] = max(sarea_priv->pipeA_h / nhits, 1); 215 - slice[1] = max(sarea_priv->pipeB_h / nhits, 1); 216 - lower[0] = sarea_priv->pipeA_y + slice[0]; 217 - lower[1] = sarea_priv->pipeB_y + slice[0]; 218 - 219 - spin_lock(&dev->drw_lock); 220 - 221 - /* Emit blits for buffer swaps, partitioning both outputs into as many 222 - * slices as there are buffer swaps scheduled in order to avoid tearing 223 - * (based on the assumption that a single buffer swap would always 224 - * complete before scanout starts). 225 - */ 226 - for (i = 0; i++ < nhits; 227 - upper[0] = lower[0], lower[0] += slice[0], 228 - upper[1] = lower[1], lower[1] += slice[1]) { 229 - if (i == nhits) 230 - lower[0] = lower[1] = sarea_priv->height; 231 - 232 - list_for_each(hit, &hits) { 233 - drm_i915_vbl_swap_t *swap_hit = 234 - list_entry(hit, drm_i915_vbl_swap_t, head); 235 - struct drm_clip_rect *rect; 236 - int num_rects, pipe; 237 - unsigned short top, bottom; 238 - 239 - drw = drm_get_drawable_info(dev, swap_hit->drw_id); 240 - 241 - /* The drawable may have been destroyed since 242 - * the vblank swap was queued 243 - */ 244 - if (!drw) 245 - continue; 246 - 247 - rect = drw->rects; 248 - pipe = swap_hit->pipe; 249 - top = upper[pipe]; 250 - bottom = lower[pipe]; 251 - 252 - for (num_rects = drw->num_rects; num_rects--; rect++) { 253 - int y1 = max(rect->y1, top); 254 - int y2 = min(rect->y2, bottom); 255 - 256 - if (y1 >= y2) 257 - continue; 258 - 259 - BEGIN_LP_RING(8); 260 - 261 - OUT_RING(cmd); 262 - OUT_RING(ropcpp | dst_pitch); 263 - OUT_RING((y1 << 16) | rect->x1); 264 - OUT_RING((y2 << 16) | rect->x2); 265 - OUT_RING(sarea_priv->front_offset); 266 - OUT_RING((y1 << 16) | rect->x1); 267 - OUT_RING(src_pitch); 268 - OUT_RING(sarea_priv->back_offset); 269 - 270 - ADVANCE_LP_RING(); 271 - } 272 - } 273 - } 274 - 275 - spin_unlock_irqrestore(&dev->drw_lock, irqflags); 276 - mutex_unlock(&dev->struct_mutex); 277 - 278 - list_for_each_safe(hit, tmp, &hits) { 279 - drm_i915_vbl_swap_t *swap_hit = 280 - list_entry(hit, drm_i915_vbl_swap_t, head); 281 - 282 - list_del(hit); 283 - 284 - drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER); 285 - } 286 - } 287 - 288 /* Called from drm generic code, passed a 'crtc', which 289 * we use as a pipe index 290 */ ··· 115 count = (high1 << 8) | low; 116 117 return count; 118 - } 119 - 120 - void 121 - i915_vblank_work_handler(struct work_struct *work) 122 - { 123 - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 124 - vblank_work); 125 - struct drm_device *dev = dev_priv->dev; 126 - unsigned long irqflags; 127 - 128 - if (dev->lock.hw_lock == NULL) { 129 - i915_vblank_tasklet(dev); 130 - return; 131 - } 132 - 133 - spin_lock_irqsave(&dev->tasklet_lock, irqflags); 134 - dev->locked_tasklet_func = i915_vblank_tasklet; 135 - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 136 - 137 - /* Try to get the lock now, if this fails, the lock 138 - * holder will execute the tasklet during unlock 139 - */ 140 - if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) 141 - return; 142 - 143 - dev->lock.lock_time = jiffies; 144 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 145 - 146 - spin_lock_irqsave(&dev->tasklet_lock, irqflags); 147 - dev->locked_tasklet_func = NULL; 148 - spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); 149 - 150 - i915_vblank_tasklet(dev); 151 - drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); 152 } 153 154 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) ··· 194 if (iir & I915_ASLE_INTERRUPT) 195 opregion_asle_intr(dev); 196 197 - if (vblank && dev_priv->swaps_pending > 0) 198 - schedule_work(&dev_priv->vblank_work); 199 - 200 return IRQ_HANDLED; 201 } 202 ··· 212 if (dev_priv->sarea_priv) 213 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 214 215 - BEGIN_LP_RING(6); 216 OUT_RING(MI_STORE_DWORD_INDEX); 217 - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 218 OUT_RING(dev_priv->counter); 219 - OUT_RING(0); 220 - OUT_RING(0); 221 OUT_RING(MI_USER_INTERRUPT); 222 ADVANCE_LP_RING(); 223 ··· 452 int i915_vblank_swap(struct drm_device *dev, void *data, 453 struct drm_file *file_priv) 454 { 455 - drm_i915_private_t *dev_priv = dev->dev_private; 456 - drm_i915_vblank_swap_t *swap = data; 457 - drm_i915_vbl_swap_t *vbl_swap, *vbl_old; 458 - unsigned int pipe, seqtype, curseq; 459 - unsigned long irqflags; 460 - struct list_head *list; 461 - int ret; 462 - 463 - if (!dev_priv || !dev_priv->sarea_priv) { 464 - DRM_ERROR("%s called with no initialization\n", __func__); 465 - return -EINVAL; 466 - } 467 - 468 - if (dev_priv->sarea_priv->rotation) { 469 - DRM_DEBUG("Rotation not supported\n"); 470 - return -EINVAL; 471 - } 472 - 473 - if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | 474 - _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { 475 - DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); 476 - return -EINVAL; 477 - } 478 - 479 - pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 480 - 481 - seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 482 - 483 - if (!(dev_priv->vblank_pipe & (1 << pipe))) { 484 - DRM_ERROR("Invalid pipe %d\n", pipe); 485 - return -EINVAL; 486 - } 487 - 488 - spin_lock_irqsave(&dev->drw_lock, irqflags); 489 - 490 - if (!drm_get_drawable_info(dev, swap->drawable)) { 491 - spin_unlock_irqrestore(&dev->drw_lock, irqflags); 492 - DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); 493 - return -EINVAL; 494 - } 495 - 496 - spin_unlock_irqrestore(&dev->drw_lock, irqflags); 497 - 498 - /* 499 - * We take the ref here and put it when the swap actually completes 500 - * in the tasklet. 501 */ 502 - ret = drm_vblank_get(dev, pipe); 503 - if (ret) 504 - return ret; 505 - curseq = drm_vblank_count(dev, pipe); 506 - 507 - if (seqtype == _DRM_VBLANK_RELATIVE) 508 - swap->sequence += curseq; 509 - 510 - if ((curseq - swap->sequence) <= (1<<23)) { 511 - if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) { 512 - swap->sequence = curseq + 1; 513 - } else { 514 - DRM_DEBUG("Missed target sequence\n"); 515 - drm_vblank_put(dev, pipe); 516 - return -EINVAL; 517 - } 518 - } 519 - 520 - vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); 521 - 522 - if (!vbl_swap) { 523 - DRM_ERROR("Failed to allocate memory to queue swap\n"); 524 - drm_vblank_put(dev, pipe); 525 - return -ENOMEM; 526 - } 527 - 528 - vbl_swap->drw_id = swap->drawable; 529 - vbl_swap->pipe = pipe; 530 - vbl_swap->sequence = swap->sequence; 531 - 532 - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 533 - 534 - list_for_each(list, &dev_priv->vbl_swaps.head) { 535 - vbl_old = list_entry(list, drm_i915_vbl_swap_t, head); 536 - 537 - if (vbl_old->drw_id == swap->drawable && 538 - vbl_old->pipe == pipe && 539 - vbl_old->sequence == swap->sequence) { 540 - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 541 - drm_vblank_put(dev, pipe); 542 - drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); 543 - DRM_DEBUG("Already scheduled\n"); 544 - return 0; 545 - } 546 - } 547 - 548 - if (dev_priv->swaps_pending >= 10) { 549 - DRM_DEBUG("Too many swaps queued\n"); 550 - DRM_DEBUG(" pipe 0: %d pipe 1: %d\n", 551 - drm_vblank_count(dev, 0), 552 - drm_vblank_count(dev, 1)); 553 - 554 - list_for_each(list, &dev_priv->vbl_swaps.head) { 555 - vbl_old = list_entry(list, drm_i915_vbl_swap_t, head); 556 - DRM_DEBUG("\tdrw %x pipe %d seq %x\n", 557 - vbl_old->drw_id, vbl_old->pipe, 558 - vbl_old->sequence); 559 - } 560 - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 561 - drm_vblank_put(dev, pipe); 562 - drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); 563 - return -EBUSY; 564 - } 565 - 566 - list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); 567 - dev_priv->swaps_pending++; 568 - 569 - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 570 - 571 - return 0; 572 } 573 574 /* drm_dma.h hooks ··· 484 { 485 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 486 int ret, num_pipes = 2; 487 - 488 - spin_lock_init(&dev_priv->swaps_lock); 489 - INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 490 - INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler); 491 - dev_priv->swaps_pending = 0; 492 493 /* Set initial unmasked IRQs to just the selected vblank pipes. */ 494 dev_priv->irq_mask_reg = ~0;
··· 80 return 0; 81 } 82 83 /* Called from drm generic code, passed a 'crtc', which 84 * we use as a pipe index 85 */ ··· 320 count = (high1 << 8) | low; 321 322 return count; 323 } 324 325 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) ··· 433 if (iir & I915_ASLE_INTERRUPT) 434 opregion_asle_intr(dev); 435 436 return IRQ_HANDLED; 437 } 438 ··· 454 if (dev_priv->sarea_priv) 455 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 456 457 + BEGIN_LP_RING(4); 458 OUT_RING(MI_STORE_DWORD_INDEX); 459 + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 460 OUT_RING(dev_priv->counter); 461 OUT_RING(MI_USER_INTERRUPT); 462 ADVANCE_LP_RING(); 463 ··· 696 int i915_vblank_swap(struct drm_device *dev, void *data, 697 struct drm_file *file_priv) 698 { 699 + /* The delayed swap mechanism was fundamentally racy, and has been 700 + * removed. The model was that the client requested a delayed flip/swap 701 + * from the kernel, then waited for vblank before continuing to perform 702 + * rendering. The problem was that the kernel might wake the client 703 + * up before it dispatched the vblank swap (since the lock has to be 704 + * held while touching the ringbuffer), in which case the client would 705 + * clear and start the next frame before the swap occurred, and 706 + * flicker would occur in addition to likely missing the vblank. 707 + * 708 + * In the absence of this ioctl, userland falls back to a correct path 709 + * of waiting for a vblank, then dispatching the swap on its own. 710 + * Context switching to userland and back is plenty fast enough for 711 + * meeting the requirements of vblank swapping. 712 */ 713 + return -EINVAL; 714 } 715 716 /* drm_dma.h hooks ··· 830 { 831 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 832 int ret, num_pipes = 2; 833 834 /* Set initial unmasked IRQs to just the selected vblank pipes. */ 835 dev_priv->irq_mask_reg = ~0;
+3
drivers/gpu/drm/i915/i915_reg.h
··· 527 #define C0DRB3 0x10206 528 #define C1DRB3 0x10606 529 530 /* 531 * Overlay regs 532 */
··· 527 #define C0DRB3 0x10206 528 #define C1DRB3 0x10606 529 530 + /** GM965 GM45 render standby register */ 531 + #define MCHBAR_RENDER_STANDBY 0x111B8 532 + 533 /* 534 * Overlay regs 535 */
+9
drivers/gpu/drm/i915/i915_suspend.c
··· 240 241 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 242 243 /* Display arbitration control */ 244 dev_priv->saveDSPARB = I915_READ(DSPARB); 245 ··· 369 370 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 371 372 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 373 374 /* Pipe & plane A info */
··· 240 241 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 242 243 + /* Render Standby */ 244 + if (IS_I965G(dev) && IS_MOBILE(dev)) 245 + dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); 246 + 247 /* Display arbitration control */ 248 dev_priv->saveDSPARB = I915_READ(DSPARB); 249 ··· 365 366 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 367 368 + /* Render Standby */ 369 + if (IS_I965G(dev) && IS_MOBILE(dev)) 370 + I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); 371 + 372 + /* Display arbitration */ 373 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 374 375 /* Pipe & plane A info */
+9 -6
drivers/gpu/drm/radeon/radeon_cp.c
··· 1751 else 1752 dev_priv->flags |= RADEON_IS_PCI; 1753 1754 DRM_DEBUG("%s card detected\n", 1755 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); 1756 return ret; ··· 1773 1774 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 1775 1776 - ret = drm_addmap(dev, drm_get_resource_start(dev, 2), 1777 - drm_get_resource_len(dev, 2), _DRM_REGISTERS, 1778 - _DRM_READ_ONLY, &dev_priv->mmio); 1779 - if (ret != 0) 1780 - return ret; 1781 - 1782 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); 1783 ret = drm_addmap(dev, dev_priv->fb_aper_offset, 1784 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, ··· 1788 drm_radeon_private_t *dev_priv = dev->dev_private; 1789 1790 DRM_DEBUG("\n"); 1791 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 1792 1793 dev->dev_private = NULL;
··· 1751 else 1752 dev_priv->flags |= RADEON_IS_PCI; 1753 1754 + ret = drm_addmap(dev, drm_get_resource_start(dev, 2), 1755 + drm_get_resource_len(dev, 2), _DRM_REGISTERS, 1756 + _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); 1757 + if (ret != 0) 1758 + return ret; 1759 + 1760 DRM_DEBUG("%s card detected\n", 1761 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); 1762 return ret; ··· 1767 1768 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 1769 1770 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); 1771 ret = drm_addmap(dev, dev_priv->fb_aper_offset, 1772 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, ··· 1788 drm_radeon_private_t *dev_priv = dev->dev_private; 1789 1790 DRM_DEBUG("\n"); 1791 + 1792 + drm_rmmap(dev, dev_priv->mmio); 1793 + 1794 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 1795 1796 dev->dev_private = NULL;
+1 -1
drivers/gpu/drm/radeon/radeon_drv.h
··· 287 unsigned long gart_textures_offset; 288 289 drm_local_map_t *sarea; 290 - drm_local_map_t *mmio; 291 drm_local_map_t *cp_ring; 292 drm_local_map_t *ring_rptr; 293 drm_local_map_t *gart_textures; ··· 317 318 int num_gb_pipes; 319 int track_flush; 320 } drm_radeon_private_t; 321 322 typedef struct drm_radeon_buf_priv {
··· 287 unsigned long gart_textures_offset; 288 289 drm_local_map_t *sarea; 290 drm_local_map_t *cp_ring; 291 drm_local_map_t *ring_rptr; 292 drm_local_map_t *gart_textures; ··· 318 319 int num_gb_pipes; 320 int track_flush; 321 + drm_local_map_t *mmio; 322 } drm_radeon_private_t; 323 324 typedef struct drm_radeon_buf_priv {
-5
include/drm/drmP.h
··· 861 struct timer_list vblank_disable_timer; 862 863 u32 max_vblank_count; /**< size of vblank counter register */ 864 - spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ 865 - void (*locked_tasklet_func)(struct drm_device *dev); 866 867 /*@} */ 868 cycles_t ctx_start; ··· 1147 extern int drm_wait_vblank(struct drm_device *dev, void *data, 1148 struct drm_file *filp); 1149 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1150 - extern void drm_locked_tasklet(struct drm_device *dev, 1151 - void(*func)(struct drm_device *)); 1152 extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1153 extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1154 extern int drm_vblank_get(struct drm_device *dev, int crtc); ··· 1154 /* Modesetting support */ 1155 extern int drm_modeset_ctl(struct drm_device *dev, void *data, 1156 struct drm_file *file_priv); 1157 - extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 1158 1159 /* AGP/GART support (drm_agpsupport.h) */ 1160 extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
··· 861 struct timer_list vblank_disable_timer; 862 863 u32 max_vblank_count; /**< size of vblank counter register */ 864 865 /*@} */ 866 cycles_t ctx_start; ··· 1149 extern int drm_wait_vblank(struct drm_device *dev, void *data, 1150 struct drm_file *filp); 1151 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1152 extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1153 extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1154 extern int drm_vblank_get(struct drm_device *dev, int crtc); ··· 1158 /* Modesetting support */ 1159 extern int drm_modeset_ctl(struct drm_device *dev, void *data, 1160 struct drm_file *file_priv); 1161 1162 /* AGP/GART support (drm_agpsupport.h) */ 1163 extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+23 -23
include/drm/drm_pciids.h
··· 395 {0, 0, 0} 396 397 #define i915_PCI_IDS \ 398 - {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 399 - {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 400 - {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 401 - {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 402 - {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 403 - {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 404 - {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 405 - {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 406 - {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 407 - {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 408 - {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 409 - {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 410 - {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 411 - {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 412 - {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 413 - {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 414 - {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 415 - {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 416 - {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 417 - {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 418 - {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 419 - {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 420 - {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 421 {0, 0, 0}
··· 395 {0, 0, 0} 396 397 #define i915_PCI_IDS \ 398 + {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 399 + {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 400 + {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 401 + {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 402 + {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 403 + {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 404 + {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 405 + {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 406 + {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 407 + {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 408 + {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 409 + {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 410 + {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 411 + {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 412 + {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 413 + {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 414 + {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 415 + {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 416 + {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 417 + {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 418 + {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 419 + {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 420 + {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 421 {0, 0, 0}