Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vbl rework: rework how the drm deals with vblank.

Other Authors: Michel Dänzer <michel@tungstengraphics.com>
mga: Ian Romanick <idr@us.ibm.com>
via: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>

This re-works the DRM internals to provide a better interface for drivers
to expose vblank on multiple crtcs.

It also includes work done by Michel on making i915 triple buffering and pageflipping work properly.

Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by

Jesse Barnes and committed by
Dave Airlie
ac741ab7 2c14f28b

+1373 -474
+17
drivers/char/drm/drm.h
··· 471 471 enum drm_vblank_seq_type { 472 472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 473 473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 474 + _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ 474 475 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 475 476 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 476 477 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ ··· 502 501 union drm_wait_vblank { 503 502 struct drm_wait_vblank_request request; 504 503 struct drm_wait_vblank_reply reply; 504 + }; 505 + 506 + enum drm_modeset_ctl_cmd { 507 + _DRM_PRE_MODESET = 1, 508 + _DRM_POST_MODESET = 2, 509 + }; 510 + 511 + /** 512 + * DRM_IOCTL_MODESET_CTL ioctl argument type 513 + * 514 + * \sa drmModesetCtl(). 515 + */ 516 + struct drm_modeset_ctl { 517 + unsigned long arg; 518 + enum drm_modeset_ctl_cmd cmd; 505 519 }; 506 520 507 521 /** ··· 603 587 #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) 604 588 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 605 589 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 590 + #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) 606 591 607 592 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 608 593 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+74 -17
drivers/char/drm/drmP.h
··· 100 100 #define DRIVER_HAVE_DMA 0x20 101 101 #define DRIVER_HAVE_IRQ 0x40 102 102 #define DRIVER_IRQ_SHARED 0x80 103 - #define DRIVER_IRQ_VBL 0x100 104 103 #define DRIVER_DMA_QUEUE 0x200 105 104 #define DRIVER_FB_DMA 0x400 106 - #define DRIVER_IRQ_VBL2 0x800 107 105 108 106 /***********************************************************************/ 109 107 /** \name Begin the DRM... */ ··· 577 579 int (*context_dtor) (struct drm_device *dev, int context); 578 580 int (*kernel_context_switch) (struct drm_device *dev, int old, 579 581 int new); 580 - void (*kernel_context_switch_unlock) (struct drm_device *dev); 581 - int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence); 582 - int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence); 583 - int (*dri_library_name) (struct drm_device *dev, char *buf); 582 + void (*kernel_context_switch_unlock) (struct drm_device * dev); 583 + /** 584 + * get_vblank_counter - get raw hardware vblank counter 585 + * @dev: DRM device 586 + * @crtc: counter to fetch 587 + * 588 + * Driver callback for fetching a raw hardware vblank counter 589 + * for @crtc. If a device doesn't have a hardware counter, the 590 + * driver can simply return the value of drm_vblank_count and 591 + * make the enable_vblank() and disable_vblank() hooks into no-ops, 592 + * leaving interrupts enabled at all times. 593 + * 594 + * Wraparound handling and loss of events due to modesetting is dealt 595 + * with in the DRM core code. 596 + * 597 + * RETURNS 598 + * Raw vblank counter value. 599 + */ 600 + u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); 601 + 602 + /** 603 + * enable_vblank - enable vblank interrupt events 604 + * @dev: DRM device 605 + * @crtc: which irq to enable 606 + * 607 + * Enable vblank interrupts for @crtc. If the device doesn't have 608 + * a hardware vblank counter, this routine should be a no-op, since 609 + * interrupts will have to stay on to keep the count accurate. 610 + * 611 + * RETURNS 612 + * Zero on success, appropriate errno if the given @crtc's vblank 613 + * interrupt cannot be enabled. 614 + */ 615 + int (*enable_vblank) (struct drm_device *dev, int crtc); 616 + 617 + /** 618 + * disable_vblank - disable vblank interrupt events 619 + * @dev: DRM device 620 + * @crtc: which irq to enable 621 + * 622 + * Disable vblank interrupts for @crtc. If the device doesn't have 623 + * a hardware vblank counter, this routine should be a no-op, since 624 + * interrupts will have to stay on to keep the count accurate. 625 + */ 626 + void (*disable_vblank) (struct drm_device *dev, int crtc); 627 + int (*dri_library_name) (struct drm_device *dev, char * buf); 584 628 585 629 /** 586 630 * Called by \c drm_device_is_agp. Typically used to determine if a ··· 641 601 642 602 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 643 603 void (*irq_preinstall) (struct drm_device *dev); 644 - void (*irq_postinstall) (struct drm_device *dev); 604 + int (*irq_postinstall) (struct drm_device *dev); 645 605 void (*irq_uninstall) (struct drm_device *dev); 646 606 void (*reclaim_buffers) (struct drm_device *dev, 647 607 struct drm_file * file_priv); ··· 770 730 /** \name VBLANK IRQ support */ 771 731 /*@{ */ 772 732 773 - wait_queue_head_t vbl_queue; /**< VBLANK wait queue */ 774 - atomic_t vbl_received; 775 - atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */ 733 + wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ 734 + atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ 776 735 spinlock_t vbl_lock; 777 - struct list_head vbl_sigs; /**< signal list to send on VBLANK */ 778 - struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */ 779 - unsigned int vbl_pending; 736 + struct list_head *vbl_sigs; /**< signal list to send on VBLANK */ 737 + atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/ 738 + atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */ 739 + u32 *last_vblank; /* protected by dev->vbl_lock, used */ 740 + /* for wraparound handling */ 741 + u32 *vblank_offset; /* used to track how many vblanks */ 742 + int *vblank_enabled; /* so we don't call enable more than 743 + once per disable */ 744 + u32 *vblank_premodeset; /* were lost during modeset */ 745 + struct timer_list vblank_disable_timer; 746 + 747 + unsigned long max_vblank_count; /**< size of vblank counter register */ 780 748 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ 781 749 void (*locked_tasklet_func)(struct drm_device *dev); 782 750 ··· 804 756 #ifdef __alpha__ 805 757 struct pci_controller *hose; 806 758 #endif 759 + int num_crtcs; /**< Number of CRTCs on this device */ 807 760 struct drm_sg_mem *sg; /**< Scatter gather memory */ 808 761 void *dev_private; /**< device private data */ 809 762 struct drm_sigdata sigdata; /**< For block_all_signals */ ··· 1039 990 extern void drm_driver_irq_postinstall(struct drm_device *dev); 1040 991 extern void drm_driver_irq_uninstall(struct drm_device *dev); 1041 992 1042 - extern int drm_wait_vblank(struct drm_device *dev, void *data, 1043 - struct drm_file *file_priv); 1044 - extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1045 - extern void drm_vbl_send_signals(struct drm_device *dev); 993 + extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 994 + extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); 995 + extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq); 1046 996 extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 997 + extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 998 + extern void drm_update_vblank_count(struct drm_device *dev, int crtc); 999 + extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1000 + extern int drm_vblank_get(struct drm_device *dev, int crtc); 1001 + extern void drm_vblank_put(struct drm_device *dev, int crtc); 1002 + 1003 + /* Modesetting support */ 1004 + extern int drm_modeset_ctl(struct drm_device *dev, void *data, 1005 + struct drm_file *file_priv); 1047 1006 1048 1007 /* AGP/GART support (drm_agpsupport.h) */ 1049 1008 extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+324 -55
drivers/char/drm/drm_irq.c
··· 71 71 return 0; 72 72 } 73 73 74 + static void vblank_disable_fn(unsigned long arg) 75 + { 76 + struct drm_device *dev = (struct drm_device *)arg; 77 + unsigned long irqflags; 78 + int i; 79 + 80 + for (i = 0; i < dev->num_crtcs; i++) { 81 + spin_lock_irqsave(&dev->vbl_lock, irqflags); 82 + if (atomic_read(&dev->vblank_refcount[i]) == 0 && 83 + dev->vblank_enabled[i]) { 84 + dev->driver->disable_vblank(dev, i); 85 + dev->vblank_enabled[i] = 0; 86 + } 87 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 88 + } 89 + } 90 + 91 + static void drm_vblank_cleanup(struct drm_device *dev) 92 + { 93 + /* Bail if the driver didn't call drm_vblank_init() */ 94 + if (dev->num_crtcs == 0) 95 + return; 96 + 97 + del_timer(&dev->vblank_disable_timer); 98 + 99 + vblank_disable_fn((unsigned long)dev); 100 + 101 + drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, 102 + DRM_MEM_DRIVER); 103 + drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs, 104 + DRM_MEM_DRIVER); 105 + drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * 106 + dev->num_crtcs, DRM_MEM_DRIVER); 107 + drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * 108 + dev->num_crtcs, DRM_MEM_DRIVER); 109 + drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * 110 + dev->num_crtcs, DRM_MEM_DRIVER); 111 + drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, 112 + DRM_MEM_DRIVER); 113 + drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) * 114 + dev->num_crtcs, DRM_MEM_DRIVER); 115 + drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs, 116 + DRM_MEM_DRIVER); 117 + 118 + dev->num_crtcs = 0; 119 + } 120 + 121 + int drm_vblank_init(struct drm_device *dev, int num_crtcs) 122 + { 123 + int i, ret = -ENOMEM; 124 + 125 + setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, 126 + (unsigned long)dev); 127 + spin_lock_init(&dev->vbl_lock); 128 + atomic_set(&dev->vbl_signal_pending, 0); 129 + dev->num_crtcs = num_crtcs; 130 + 131 + dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, 132 + DRM_MEM_DRIVER); 133 + if (!dev->vbl_queue) 134 + goto err; 135 + 136 + dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs, 137 + DRM_MEM_DRIVER); 138 + if (!dev->vbl_sigs) 139 + goto err; 140 + 141 + dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, 142 + DRM_MEM_DRIVER); 143 + if (!dev->_vblank_count) 144 + goto err; 145 + 146 + dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, 147 + DRM_MEM_DRIVER); 148 + if (!dev->vblank_refcount) 149 + goto err; 150 + 151 + dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), 152 + DRM_MEM_DRIVER); 153 + if (!dev->vblank_enabled) 154 + goto err; 155 + 156 + dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); 157 + if (!dev->last_vblank) 158 + goto err; 159 + 160 + dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32), 161 + DRM_MEM_DRIVER); 162 + if (!dev->vblank_premodeset) 163 + goto err; 164 + 165 + dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); 166 + if (!dev->vblank_offset) 167 + goto err; 168 + 169 + /* Zero per-crtc vblank stuff */ 170 + for (i = 0; i < num_crtcs; i++) { 171 + init_waitqueue_head(&dev->vbl_queue[i]); 172 + INIT_LIST_HEAD(&dev->vbl_sigs[i]); 173 + atomic_set(&dev->_vblank_count[i], 0); 174 + atomic_set(&dev->vblank_refcount[i], 0); 175 + } 176 + 177 + return 0; 178 + 179 + err: 180 + drm_vblank_cleanup(dev); 181 + return ret; 182 + } 183 + EXPORT_SYMBOL(drm_vblank_init); 184 + 74 185 /** 75 186 * Install IRQ handler. 76 187 * ··· 220 109 221 110 DRM_DEBUG("irq=%d\n", dev->irq); 222 111 223 - if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) { 224 - init_waitqueue_head(&dev->vbl_queue); 225 - 226 - spin_lock_init(&dev->vbl_lock); 227 - 228 - INIT_LIST_HEAD(&dev->vbl_sigs); 229 - INIT_LIST_HEAD(&dev->vbl_sigs2); 230 - 231 - dev->vbl_pending = 0; 232 - } 233 - 234 112 /* Before installing handler */ 235 113 dev->driver->irq_preinstall(dev); 236 114 ··· 237 137 } 238 138 239 139 /* After installing handler */ 240 - dev->driver->irq_postinstall(dev); 140 + ret = dev->driver->irq_postinstall(dev); 141 + if (ret < 0) { 142 + mutex_lock(&dev->struct_mutex); 143 + dev->irq_enabled = 0; 144 + mutex_unlock(&dev->struct_mutex); 145 + } 241 146 242 - return 0; 147 + return ret; 243 148 } 244 149 245 150 /** ··· 274 169 dev->driver->irq_uninstall(dev); 275 170 276 171 free_irq(dev->irq, dev); 172 + 173 + drm_vblank_cleanup(dev); 277 174 278 175 dev->locked_tasklet_func = NULL; 279 176 ··· 321 214 } 322 215 323 216 /** 217 + * drm_vblank_count - retrieve "cooked" vblank counter value 218 + * @dev: DRM device 219 + * @crtc: which counter to retrieve 220 + * 221 + * Fetches the "cooked" vblank count value that represents the number of 222 + * vblank events since the system was booted, including lost events due to 223 + * modesetting activity. 224 + */ 225 + u32 drm_vblank_count(struct drm_device *dev, int crtc) 226 + { 227 + return atomic_read(&dev->_vblank_count[crtc]) + 228 + dev->vblank_offset[crtc]; 229 + } 230 + EXPORT_SYMBOL(drm_vblank_count); 231 + 232 + /** 233 + * drm_update_vblank_count - update the master vblank counter 234 + * @dev: DRM device 235 + * @crtc: counter to update 236 + * 237 + * Call back into the driver to update the appropriate vblank counter 238 + * (specified by @crtc). Deal with wraparound, if it occurred, and 239 + * update the last read value so we can deal with wraparound on the next 240 + * call if necessary. 241 + */ 242 + void drm_update_vblank_count(struct drm_device *dev, int crtc) 243 + { 244 + unsigned long irqflags; 245 + u32 cur_vblank, diff; 246 + 247 + /* 248 + * Interrupts were disabled prior to this call, so deal with counter 249 + * wrap if needed. 250 + * NOTE! It's possible we lost a full dev->max_vblank_count events 251 + * here if the register is small or we had vblank interrupts off for 252 + * a long time. 253 + */ 254 + cur_vblank = dev->driver->get_vblank_counter(dev, crtc); 255 + spin_lock_irqsave(&dev->vbl_lock, irqflags); 256 + if (cur_vblank < dev->last_vblank[crtc]) { 257 + diff = dev->max_vblank_count - 258 + dev->last_vblank[crtc]; 259 + diff += cur_vblank; 260 + } else { 261 + diff = cur_vblank - dev->last_vblank[crtc]; 262 + } 263 + dev->last_vblank[crtc] = cur_vblank; 264 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 265 + 266 + atomic_add(diff, &dev->_vblank_count[crtc]); 267 + } 268 + EXPORT_SYMBOL(drm_update_vblank_count); 269 + 270 + /** 271 + * drm_vblank_get - get a reference count on vblank events 272 + * @dev: DRM device 273 + * @crtc: which CRTC to own 274 + * 275 + * Acquire a reference count on vblank events to avoid having them disabled 276 + * while in use. Note callers will probably want to update the master counter 277 + * using drm_update_vblank_count() above before calling this routine so that 278 + * wakeups occur on the right vblank event. 279 + * 280 + * RETURNS 281 + * Zero on success, nonzero on failure. 282 + */ 283 + int drm_vblank_get(struct drm_device *dev, int crtc) 284 + { 285 + unsigned long irqflags; 286 + int ret = 0; 287 + 288 + spin_lock_irqsave(&dev->vbl_lock, irqflags); 289 + /* Going from 0->1 means we have to enable interrupts again */ 290 + if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && 291 + !dev->vblank_enabled[crtc]) { 292 + ret = dev->driver->enable_vblank(dev, crtc); 293 + if (ret) 294 + atomic_dec(&dev->vblank_refcount[crtc]); 295 + else 296 + dev->vblank_enabled[crtc] = 1; 297 + } 298 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 299 + 300 + return ret; 301 + } 302 + EXPORT_SYMBOL(drm_vblank_get); 303 + 304 + /** 305 + * drm_vblank_put - give up ownership of vblank events 306 + * @dev: DRM device 307 + * @crtc: which counter to give up 308 + * 309 + * Release ownership of a given vblank counter, turning off interrupts 310 + * if possible. 311 + */ 312 + void drm_vblank_put(struct drm_device *dev, int crtc) 313 + { 314 + /* Last user schedules interrupt disable */ 315 + if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) 316 + mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); 317 + } 318 + EXPORT_SYMBOL(drm_vblank_put); 319 + 320 + /** 321 + * drm_modeset_ctl - handle vblank event counter changes across mode switch 322 + * @DRM_IOCTL_ARGS: standard ioctl arguments 323 + * 324 + * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET 325 + * ioctls around modesetting so that any lost vblank events are accounted for. 326 + */ 327 + int drm_modeset_ctl(struct drm_device *dev, void *data, 328 + struct drm_file *file_priv) 329 + { 330 + struct drm_modeset_ctl *modeset = data; 331 + int crtc, ret = 0; 332 + u32 new; 333 + 334 + crtc = modeset->arg; 335 + if (crtc >= dev->num_crtcs) { 336 + ret = -EINVAL; 337 + goto out; 338 + } 339 + 340 + switch (modeset->cmd) { 341 + case _DRM_PRE_MODESET: 342 + dev->vblank_premodeset[crtc] = 343 + dev->driver->get_vblank_counter(dev, crtc); 344 + break; 345 + case _DRM_POST_MODESET: 346 + new = dev->driver->get_vblank_counter(dev, crtc); 347 + dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new; 348 + break; 349 + default: 350 + ret = -EINVAL; 351 + break; 352 + } 353 + 354 + out: 355 + return ret; 356 + } 357 + 358 + /** 324 359 * Wait for VBLANK. 325 360 * 326 361 * \param inode device inode. ··· 481 232 * 482 233 * If a signal is not requested, then calls vblank_wait(). 483 234 */ 484 - int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) 235 + int drm_wait_vblank(struct drm_device *dev, void *data, 236 + struct drm_file *file_priv) 485 237 { 486 238 union drm_wait_vblank *vblwait = data; 487 239 struct timeval now; 488 240 int ret = 0; 489 - unsigned int flags, seq; 241 + unsigned int flags, seq, crtc; 490 242 491 243 if ((!dev->irq) || (!dev->irq_enabled)) 492 244 return -EINVAL; ··· 501 251 } 502 252 503 253 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 254 + crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; 504 255 505 - if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? 506 - DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) 256 + if (crtc >= dev->num_crtcs) 507 257 return -EINVAL; 508 258 509 - seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 510 - : &dev->vbl_received); 259 + drm_update_vblank_count(dev, crtc); 260 + seq = drm_vblank_count(dev, crtc); 511 261 512 262 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 513 263 case _DRM_VBLANK_RELATIVE: ··· 526 276 527 277 if (flags & _DRM_VBLANK_SIGNAL) { 528 278 unsigned long irqflags; 529 - struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) 530 - ? &dev->vbl_sigs2 : &dev->vbl_sigs; 279 + struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; 531 280 struct drm_vbl_sig *vbl_sig; 532 281 533 282 spin_lock_irqsave(&dev->vbl_lock, irqflags); ··· 547 298 } 548 299 } 549 300 550 - if (dev->vbl_pending >= 100) { 301 + if (atomic_read(&dev->vbl_signal_pending) >= 100) { 551 302 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 552 303 return -EBUSY; 553 304 } 554 305 555 - dev->vbl_pending++; 556 - 557 306 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 558 307 559 - if (! 560 - (vbl_sig = 561 - drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { 308 + vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), 309 + DRM_MEM_DRIVER); 310 + if (!vbl_sig) 562 311 return -ENOMEM; 312 + 313 + ret = drm_vblank_get(dev, crtc); 314 + if (ret) { 315 + drm_free(vbl_sig, sizeof(struct drm_vbl_sig), 316 + DRM_MEM_DRIVER); 317 + return ret; 563 318 } 564 319 565 - memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); 320 + atomic_inc(&dev->vbl_signal_pending); 566 321 567 322 vbl_sig->sequence = vblwait->request.sequence; 568 323 vbl_sig->info.si_signo = vblwait->request.signal; ··· 580 327 581 328 vblwait->reply.sequence = seq; 582 329 } else { 583 - if (flags & _DRM_VBLANK_SECONDARY) { 584 - if (dev->driver->vblank_wait2) 585 - ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); 586 - } else if (dev->driver->vblank_wait) 587 - ret = 588 - dev->driver->vblank_wait(dev, 589 - &vblwait->request.sequence); 330 + unsigned long cur_vblank; 590 331 332 + ret = drm_vblank_get(dev, crtc); 333 + if (ret) 334 + return ret; 335 + DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, 336 + (((cur_vblank = drm_vblank_count(dev, crtc)) 337 + - vblwait->request.sequence) <= (1 << 23))); 338 + drm_vblank_put(dev, crtc); 591 339 do_gettimeofday(&now); 340 + 592 341 vblwait->reply.tval_sec = now.tv_sec; 593 342 vblwait->reply.tval_usec = now.tv_usec; 343 + vblwait->reply.sequence = cur_vblank; 594 344 } 595 345 596 346 done: ··· 604 348 * Send the VBLANK signals. 605 349 * 606 350 * \param dev DRM device. 351 + * \param crtc CRTC where the vblank event occurred 607 352 * 608 353 * Sends a signal for each task in drm_device::vbl_sigs and empties the list. 609 354 * 610 355 * If a signal is not requested, then calls vblank_wait(). 611 356 */ 612 - void drm_vbl_send_signals(struct drm_device * dev) 357 + static void drm_vbl_send_signals(struct drm_device * dev, int crtc) 613 358 { 359 + struct drm_vbl_sig *vbl_sig, *tmp; 360 + struct list_head *vbl_sigs; 361 + unsigned int vbl_seq; 614 362 unsigned long flags; 615 - int i; 616 363 617 364 spin_lock_irqsave(&dev->vbl_lock, flags); 618 365 619 - for (i = 0; i < 2; i++) { 620 - struct drm_vbl_sig *vbl_sig, *tmp; 621 - struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs; 622 - unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 : 623 - &dev->vbl_received); 366 + vbl_sigs = &dev->vbl_sigs[crtc]; 367 + vbl_seq = drm_vblank_count(dev, crtc); 624 368 625 - list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { 626 - if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { 627 - vbl_sig->info.si_code = vbl_seq; 628 - send_sig_info(vbl_sig->info.si_signo, 629 - &vbl_sig->info, vbl_sig->task); 369 + list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { 370 + if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { 371 + vbl_sig->info.si_code = vbl_seq; 372 + send_sig_info(vbl_sig->info.si_signo, 373 + &vbl_sig->info, vbl_sig->task); 630 374 631 - list_del(&vbl_sig->head); 375 + list_del(&vbl_sig->head); 632 376 633 - drm_free(vbl_sig, sizeof(*vbl_sig), 634 - DRM_MEM_DRIVER); 635 - 636 - dev->vbl_pending--; 637 - } 638 - } 377 + drm_free(vbl_sig, sizeof(*vbl_sig), 378 + DRM_MEM_DRIVER); 379 + atomic_dec(&dev->vbl_signal_pending); 380 + drm_vblank_put(dev, crtc); 381 + } 639 382 } 640 383 641 384 spin_unlock_irqrestore(&dev->vbl_lock, flags); 642 385 } 643 386 644 - EXPORT_SYMBOL(drm_vbl_send_signals); 387 + /** 388 + * drm_handle_vblank - handle a vblank event 389 + * @dev: DRM device 390 + * @crtc: where this event occurred 391 + * 392 + * Drivers should call this routine in their vblank interrupt handlers to 393 + * update the vblank counter and send any signals that may be pending. 394 + */ 395 + void drm_handle_vblank(struct drm_device *dev, int crtc) 396 + { 397 + drm_update_vblank_count(dev, crtc); 398 + DRM_WAKEUP(&dev->vbl_queue[crtc]); 399 + drm_vbl_send_signals(dev, crtc); 400 + } 401 + EXPORT_SYMBOL(drm_handle_vblank); 645 402 646 403 /** 647 404 * Tasklet wrapper function.
+121 -43
drivers/char/drm/i915_dma.c
··· 415 415 drm_i915_private_t *dev_priv = dev->dev_private; 416 416 RING_LOCALS; 417 417 418 - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 418 + if (++dev_priv->counter > BREADCRUMB_MASK) { 419 + dev_priv->counter = 1; 420 + DRM_DEBUG("Breadcrumb counter wrapped around\n"); 421 + } 419 422 420 - if (dev_priv->counter > 0x7FFFFFFFUL) 421 - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; 423 + if (dev_priv->sarea_priv) 424 + dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 422 425 423 426 BEGIN_LP_RING(4); 424 427 OUT_RING(CMD_STORE_DWORD_IDX); ··· 429 426 OUT_RING(dev_priv->counter); 430 427 OUT_RING(0); 431 428 ADVANCE_LP_RING(); 429 + } 430 + 431 + int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) 432 + { 433 + drm_i915_private_t *dev_priv = dev->dev_private; 434 + uint32_t flush_cmd = CMD_MI_FLUSH; 435 + RING_LOCALS; 436 + 437 + flush_cmd |= flush; 438 + 439 + i915_kernel_lost_context(dev); 440 + 441 + BEGIN_LP_RING(4); 442 + OUT_RING(flush_cmd); 443 + OUT_RING(0); 444 + OUT_RING(0); 445 + OUT_RING(0); 446 + ADVANCE_LP_RING(); 447 + 448 + return 0; 432 449 } 433 450 434 451 static int i915_dispatch_cmdbuffer(struct drm_device * dev, ··· 534 511 return 0; 535 512 } 536 513 537 - static int i915_dispatch_flip(struct drm_device * dev) 514 + static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync) 538 515 { 539 516 drm_i915_private_t *dev_priv = dev->dev_private; 517 + u32 num_pages, current_page, next_page, dspbase; 518 + int shift = 2 * plane, x, y; 540 519 RING_LOCALS; 541 520 542 - DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 543 - __FUNCTION__, 544 - dev_priv->current_page, 545 - dev_priv->sarea_priv->pf_current_page); 521 + /* Calculate display base offset */ 522 + num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; 523 + current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3; 524 + next_page = (current_page + 1) % num_pages; 546 525 547 - i915_kernel_lost_context(dev); 548 - 549 - BEGIN_LP_RING(2); 550 - OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 551 - OUT_RING(0); 552 - ADVANCE_LP_RING(); 553 - 554 - BEGIN_LP_RING(6); 555 - OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 556 - OUT_RING(0); 557 - if (dev_priv->current_page == 0) { 558 - OUT_RING(dev_priv->back_offset); 559 - dev_priv->current_page = 1; 560 - } else { 561 - OUT_RING(dev_priv->front_offset); 562 - dev_priv->current_page = 0; 526 + switch (next_page) { 527 + default: 528 + case 0: 529 + dspbase = dev_priv->sarea_priv->front_offset; 530 + break; 531 + case 1: 532 + dspbase = dev_priv->sarea_priv->back_offset; 533 + break; 534 + case 2: 535 + dspbase = dev_priv->sarea_priv->third_offset; 536 + break; 563 537 } 564 - OUT_RING(0); 565 - ADVANCE_LP_RING(); 566 538 567 - BEGIN_LP_RING(2); 568 - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 569 - OUT_RING(0); 570 - ADVANCE_LP_RING(); 539 + if (plane == 0) { 540 + x = dev_priv->sarea_priv->planeA_x; 541 + y = dev_priv->sarea_priv->planeA_y; 542 + } else { 543 + x = dev_priv->sarea_priv->planeB_x; 544 + y = dev_priv->sarea_priv->planeB_y; 545 + } 571 546 572 - dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 547 + dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp; 548 + 549 + DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page, 550 + dspbase); 573 551 574 552 BEGIN_LP_RING(4); 575 - OUT_RING(CMD_STORE_DWORD_IDX); 576 - OUT_RING(20); 577 - OUT_RING(dev_priv->counter); 578 - OUT_RING(0); 553 + OUT_RING(sync ? 0 : 554 + (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP : 555 + MI_WAIT_FOR_PLANE_A_FLIP))); 556 + OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) | 557 + (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A)); 558 + OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp); 559 + OUT_RING(dspbase); 579 560 ADVANCE_LP_RING(); 580 561 581 - dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 582 - return 0; 562 + dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift); 563 + dev_priv->sarea_priv->pf_current_page |= next_page << shift; 564 + } 565 + 566 + void i915_dispatch_flip(struct drm_device * dev, int planes, int sync) 567 + { 568 + drm_i915_private_t *dev_priv = dev->dev_private; 569 + int i; 570 + 571 + DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n", 572 + planes, dev_priv->sarea_priv->pf_current_page); 573 + 574 + i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH); 575 + 576 + for (i = 0; i < 2; i++) 577 + if (planes & (1 << i)) 578 + i915_do_dispatch_flip(dev, i, sync); 579 + 580 + i915_emit_breadcrumb(dev); 581 + 583 582 } 584 583 585 584 static int i915_quiescent(struct drm_device * dev) ··· 624 579 struct drm_file *file_priv) 625 580 { 626 581 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 627 - u32 *hw_status = dev_priv->hw_status_page; 628 582 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 629 583 dev_priv->sarea_priv; 630 584 drm_i915_batchbuffer_t *batch = data; ··· 646 602 647 603 ret = i915_dispatch_batchbuffer(dev, batch); 648 604 649 - sarea_priv->last_dispatch = (int)hw_status[5]; 605 + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 650 606 return ret; 651 607 } 652 608 ··· 654 610 struct drm_file *file_priv) 655 611 { 656 612 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 657 - u32 *hw_status = dev_priv->hw_status_page; 658 613 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 659 614 dev_priv->sarea_priv; 660 615 drm_i915_cmdbuffer_t *cmdbuf = data; ··· 678 635 return ret; 679 636 } 680 637 681 - sarea_priv->last_dispatch = (int)hw_status[5]; 638 + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 639 + return 0; 640 + } 641 + 642 + static int i915_do_cleanup_pageflip(struct drm_device * dev) 643 + { 644 + drm_i915_private_t *dev_priv = dev->dev_private; 645 + int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; 646 + 647 + DRM_DEBUG("\n"); 648 + 649 + for (i = 0, planes = 0; i < 2; i++) 650 + if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) { 651 + dev_priv->sarea_priv->pf_current_page = 652 + (dev_priv->sarea_priv->pf_current_page & 653 + ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i)); 654 + 655 + planes |= 1 << i; 656 + } 657 + 658 + if (planes) 659 + i915_dispatch_flip(dev, planes, 0); 660 + 682 661 return 0; 683 662 } 684 663 685 664 static int i915_flip_bufs(struct drm_device *dev, void *data, 686 665 struct drm_file *file_priv) 687 666 { 688 - DRM_DEBUG("%s\n", __FUNCTION__); 667 + drm_i915_flip_t *param = data; 668 + 669 + DRM_DEBUG("\n"); 689 670 690 671 LOCK_TEST_WITH_RETURN(dev, file_priv); 691 672 692 - return i915_dispatch_flip(dev); 673 + /* This is really planes */ 674 + if (param->pipes & ~0x3) { 675 + DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n", 676 + param->pipes); 677 + return -EINVAL; 678 + } 679 + 680 + i915_dispatch_flip(dev, param->pipes, 0); 681 + 682 + return 0; 693 683 } 694 684 695 685 static int i915_getparam(struct drm_device *dev, void *data, ··· 883 807 if (!dev_priv) 884 808 return; 885 809 810 + if (drm_getsarea(dev) && dev_priv->sarea_priv) 811 + i915_do_cleanup_pageflip(dev); 886 812 if (dev_priv->agp_heap) 887 813 i915_mem_takedown(&(dev_priv->agp_heap)); 888 814
+36 -9
drivers/char/drm/i915_drm.h
··· 105 105 unsigned int rotated_tiled; 106 106 unsigned int rotated2_tiled; 107 107 108 - int pipeA_x; 109 - int pipeA_y; 110 - int pipeA_w; 111 - int pipeA_h; 112 - int pipeB_x; 113 - int pipeB_y; 114 - int pipeB_w; 115 - int pipeB_h; 108 + int planeA_x; 109 + int planeA_y; 110 + int planeA_w; 111 + int planeA_h; 112 + int planeB_x; 113 + int planeB_y; 114 + int planeB_w; 115 + int planeB_h; 116 + 117 + /* Triple buffering */ 118 + drm_handle_t third_handle; 119 + int third_offset; 120 + int third_size; 121 + unsigned int third_tiled; 122 + 123 + /* buffer object handles for the static buffers. May change 124 + * over the lifetime of the client, though it doesn't in our current 125 + * implementation. 126 + */ 127 + unsigned int front_bo_handle; 128 + unsigned int back_bo_handle; 129 + unsigned int third_bo_handle; 130 + unsigned int depth_bo_handle; 116 131 } drm_i915_sarea_t; 117 132 118 133 /* Flags for perf_boxes ··· 161 146 162 147 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 163 148 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 164 - #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 149 + #define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t) 165 150 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 166 151 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 167 152 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) ··· 175 160 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 176 161 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 177 162 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 163 + 164 + /* Asynchronous page flipping: 165 + */ 166 + typedef struct drm_i915_flip { 167 + /* 168 + * This is really talking about planes, and we could rename it 169 + * except for the fact that some of the duplicated i915_drm.h files 170 + * out there check for HAVE_I915_FLIP and so might pick up this 171 + * version. 172 + */ 173 + int pipes; 174 + } drm_i915_flip_t; 178 175 179 176 /* Allow drivers to submit batchbuffers directly to hardware, relying 180 177 * on the security mechanisms provided by hardware.
+4 -4
drivers/char/drm/i915_drv.c
··· 533 533 */ 534 534 .driver_features = 535 535 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 536 - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | 537 - DRIVER_IRQ_VBL2, 536 + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 538 537 .load = i915_driver_load, 539 538 .unload = i915_driver_unload, 540 539 .lastclose = i915_driver_lastclose, ··· 541 542 .suspend = i915_suspend, 542 543 .resume = i915_resume, 543 544 .device_is_agp = i915_driver_device_is_agp, 544 - .vblank_wait = i915_driver_vblank_wait, 545 - .vblank_wait2 = i915_driver_vblank_wait2, 545 + .get_vblank_counter = i915_get_vblank_counter, 546 + .enable_vblank = i915_enable_vblank, 547 + .disable_vblank = i915_disable_vblank, 546 548 .irq_preinstall = i915_driver_irq_preinstall, 547 549 .irq_postinstall = i915_driver_irq_postinstall, 548 550 .irq_uninstall = i915_driver_irq_uninstall,
+90 -11
drivers/char/drm/i915_drv.h
··· 76 76 typedef struct _drm_i915_vbl_swap { 77 77 struct list_head head; 78 78 drm_drawable_t drw_id; 79 - unsigned int pipe; 79 + unsigned int plane; 80 80 unsigned int sequence; 81 + int flip; 81 82 } drm_i915_vbl_swap_t; 82 83 83 84 typedef struct drm_i915_private { ··· 91 90 drm_dma_handle_t *status_page_dmah; 92 91 void *hw_status_page; 93 92 dma_addr_t dma_status_page; 94 - unsigned long counter; 93 + uint32_t counter; 95 94 unsigned int status_gfx_addr; 96 95 drm_local_map_t hws_map; 97 96 ··· 104 103 105 104 wait_queue_head_t irq_queue; 106 105 atomic_t irq_received; 107 - atomic_t irq_emitted; 106 + atomic_t irq_emited; 108 107 109 108 int tex_lru_log_granularity; 110 109 int allow_batchbuffer; 111 110 struct mem_block *agp_heap; 112 111 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 113 112 int vblank_pipe; 113 + spinlock_t user_irq_lock; 114 + int user_irq_refcount; 115 + int fence_irq_on; 116 + uint32_t irq_enable_reg; 117 + int irq_enabled; 114 118 115 119 spinlock_t swaps_lock; 116 120 drm_i915_vbl_swap_t vbl_swaps; ··· 222 216 extern int i915_driver_device_is_agp(struct drm_device * dev); 223 217 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 224 218 unsigned long arg); 225 - 219 + extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync); 226 220 /* i915_irq.c */ 227 221 extern int i915_irq_emit(struct drm_device *dev, void *data, 228 222 struct drm_file *file_priv); ··· 233 227 extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); 234 228 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 235 229 extern void i915_driver_irq_preinstall(struct drm_device * dev); 236 - extern void i915_driver_irq_postinstall(struct drm_device * dev); 230 + extern int i915_driver_irq_postinstall(struct drm_device * dev); 237 231 extern void i915_driver_irq_uninstall(struct drm_device * dev); 238 232 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 239 233 struct drm_file *file_priv); ··· 241 235 struct drm_file *file_priv); 242 236 extern int i915_vblank_swap(struct drm_device *dev, void *data, 243 237 struct drm_file *file_priv); 238 + extern int i915_enable_vblank(struct drm_device *dev, int crtc); 239 + extern void i915_disable_vblank(struct drm_device *dev, int crtc); 240 + extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 244 241 245 242 /* i915_mem.c */ 246 243 extern int i915_mem_alloc(struct drm_device *dev, void *data, ··· 388 379 389 380 /* Interrupt bits: 390 381 */ 391 - #define USER_INT_FLAG (1<<1) 392 - #define VSYNC_PIPEB_FLAG (1<<5) 393 - #define VSYNC_PIPEA_FLAG (1<<7) 394 - #define HWB_OOM_FLAG (1<<13) /* binner out of memory */ 382 + #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 383 + #define I915_DISPLAY_PORT_INTERRUPT (1<<17) 384 + #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 385 + #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) 386 + #define I915_HWB_OOM_INTERRUPT (1<<13) /* binner out of memory */ 387 + #define I915_SYNC_STATUS_INTERRUPT (1<<12) 388 + #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 389 + #define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) 390 + #define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) 391 + #define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) 392 + #define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) 393 + #define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) 394 + #define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) 395 + #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) 396 + #define I915_DEBUG_INTERRUPT (1<<2) 397 + #define I915_USER_INTERRUPT (1<<1) 398 + 395 399 396 400 #define I915REG_HWSTAM 0x02098 397 401 #define I915REG_INT_IDENTITY_R 0x020a4 398 402 #define I915REG_INT_MASK_R 0x020a8 399 403 #define I915REG_INT_ENABLE_R 0x020a0 404 + #define I915REG_INSTPM 0x020c0 405 + 406 + #define PIPEADSL 0x70000 407 + #define PIPEBDSL 0x71000 400 408 401 409 #define I915REG_PIPEASTAT 0x70024 402 410 #define I915REG_PIPEBSTAT 0x71024 411 + /* 412 + * The two pipe frame counter registers are not synchronized, so 413 + * reading a stable value is somewhat tricky. The following code 414 + * should work: 415 + * 416 + * do { 417 + * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> 418 + * PIPE_FRAME_HIGH_SHIFT; 419 + * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> 420 + * PIPE_FRAME_LOW_SHIFT); 421 + * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> 422 + * PIPE_FRAME_HIGH_SHIFT); 423 + * } while (high1 != high2); 424 + * frame = (high1 << 8) | low1; 425 + */ 426 + #define PIPEAFRAMEHIGH 0x70040 427 + #define PIPEBFRAMEHIGH 0x71040 428 + #define PIPE_FRAME_HIGH_MASK 0x0000ffff 429 + #define PIPE_FRAME_HIGH_SHIFT 0 430 + #define PIPEAFRAMEPIXEL 0x70044 431 + #define PIPEBFRAMEPIXEL 0x71044 403 432 404 - #define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) 405 - #define I915_VBLANK_CLEAR (1UL<<1) 433 + #define PIPE_FRAME_LOW_MASK 0xff000000 434 + #define PIPE_FRAME_LOW_SHIFT 24 435 + /* 436 + * Pixel within the current frame is counted in the PIPEAFRAMEPIXEL register 437 + * and is 24 bits wide. 438 + */ 439 + #define PIPE_PIXEL_MASK 0x00ffffff 440 + #define PIPE_PIXEL_SHIFT 0 441 + 442 + #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 443 + #define I915_CRC_ERROR_ENABLE (1UL<<29) 444 + #define I915_CRC_DONE_ENABLE (1UL<<28) 445 + #define I915_GMBUS_EVENT_ENABLE (1UL<<27) 446 + #define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) 447 + #define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) 448 + #define I915_DPST_EVENT_ENABLE (1UL<<23) 449 + #define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 450 + #define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 451 + #define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 452 + #define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 453 + #define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) 454 + #define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) 455 + #define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 456 + #define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 457 + #define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) 458 + #define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) 459 + #define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 460 + #define I915_DPST_EVENT_STATUS (1UL<<7) 461 + #define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) 462 + #define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) 463 + #define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) 464 + #define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 465 + #define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) 466 + #define I915_OVERLAY_UPDATED_STATUS (1UL<<0) 406 467 407 468 #define SRX_INDEX 0x3c4 408 469 #define SRX_DATA 0x3c5
+443 -150
drivers/char/drm/i915_irq.c
··· 38 38 #define MAX_NOPID ((u32)~0) 39 39 40 40 /** 41 + * i915_get_pipe - return the the pipe associated with a given plane 42 + * @dev: DRM device 43 + * @plane: plane to look for 44 + * 45 + * The Intel Mesa & 2D drivers call the vblank routines with a plane number 46 + * rather than a pipe number, since they may not always be equal. This routine 47 + * maps the given @plane back to a pipe number. 48 + */ 49 + static int 50 + i915_get_pipe(struct drm_device *dev, int plane) 51 + { 52 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 53 + u32 dspcntr; 54 + 55 + dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR); 56 + 57 + return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0; 58 + } 59 + 60 + /** 61 + * i915_get_plane - return the the plane associated with a given pipe 62 + * @dev: DRM device 63 + * @pipe: pipe to look for 64 + * 65 + * The Intel Mesa & 2D drivers call the vblank routines with a plane number 66 + * rather than a plane number, since they may not always be equal. This routine 67 + * maps the given @pipe back to a plane number. 68 + */ 69 + static int 70 + i915_get_plane(struct drm_device *dev, int pipe) 71 + { 72 + if (i915_get_pipe(dev, 0) == pipe) 73 + return 0; 74 + return 1; 75 + } 76 + 77 + /** 78 + * i915_pipe_enabled - check if a pipe is enabled 79 + * @dev: DRM device 80 + * @pipe: pipe to check 81 + * 82 + * Reading certain registers when the pipe is disabled can hang the chip. 83 + * Use this routine to make sure the PLL is running and the pipe is active 84 + * before reading such registers if unsure. 85 + */ 86 + static int 87 + i915_pipe_enabled(struct drm_device *dev, int pipe) 88 + { 89 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 90 + unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; 91 + 92 + if (I915_READ(pipeconf) & PIPEACONF_ENABLE) 93 + return 1; 94 + 95 + return 0; 96 + } 97 + 98 + /** 99 + * Emit a synchronous flip. 100 + * 101 + * This function must be called with the drawable spinlock held. 102 + */ 103 + static void 104 + i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw, 105 + int plane) 106 + { 107 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 108 + drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 109 + u16 x1, y1, x2, y2; 110 + int pf_planes = 1 << plane; 111 + 112 + /* If the window is visible on the other plane, we have to flip on that 113 + * plane as well. 114 + */ 115 + if (plane == 1) { 116 + x1 = sarea_priv->planeA_x; 117 + y1 = sarea_priv->planeA_y; 118 + x2 = x1 + sarea_priv->planeA_w; 119 + y2 = y1 + sarea_priv->planeA_h; 120 + } else { 121 + x1 = sarea_priv->planeB_x; 122 + y1 = sarea_priv->planeB_y; 123 + x2 = x1 + sarea_priv->planeB_w; 124 + y2 = y1 + sarea_priv->planeB_h; 125 + } 126 + 127 + if (x2 > 0 && y2 > 0) { 128 + int i, num_rects = drw->num_rects; 129 + struct drm_clip_rect *rect = drw->rects; 130 + 131 + for (i = 0; i < num_rects; i++) 132 + if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 || 133 + rect[i].x2 <= x1 || rect[i].y2 <= y1)) { 134 + pf_planes = 0x3; 135 + 136 + break; 137 + } 138 + } 139 + 140 + i915_dispatch_flip(dev, pf_planes, 1); 141 + } 142 + 143 + /** 41 144 * Emit blits for scheduled buffer swaps. 42 145 * 43 146 * This function will be called with the HW lock held. ··· 148 45 static void i915_vblank_tasklet(struct drm_device *dev) 149 46 { 150 47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 151 - unsigned long irqflags; 152 48 struct list_head *list, *tmp, hits, *hit; 153 - int nhits, nrects, slice[2], upper[2], lower[2], i; 154 - unsigned counter[2] = { atomic_read(&dev->vbl_received), 155 - atomic_read(&dev->vbl_received2) }; 49 + int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages; 50 + unsigned counter[2]; 156 51 struct drm_drawable_info *drw; 157 52 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 158 - u32 cpp = dev_priv->cpp; 53 + u32 cpp = dev_priv->cpp, offsets[3]; 159 54 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | 160 55 XY_SRC_COPY_BLT_WRITE_ALPHA | 161 56 XY_SRC_COPY_BLT_WRITE_RGB) 162 57 : XY_SRC_COPY_BLT_CMD; 163 58 u32 src_pitch = sarea_priv->pitch * cpp; 164 59 u32 dst_pitch = sarea_priv->pitch * cpp; 60 + /* COPY rop (0xcc), map cpp to magic color depth constants */ 165 61 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); 166 62 RING_LOCALS; 167 63 ··· 173 71 src_pitch >>= 2; 174 72 } 175 73 74 + counter[0] = drm_vblank_count(dev, 0); 75 + counter[1] = drm_vblank_count(dev, 1); 76 + 176 77 DRM_DEBUG("\n"); 177 78 178 79 INIT_LIST_HEAD(&hits); 179 80 180 81 nhits = nrects = 0; 181 82 182 - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 83 + /* No irqsave/restore necessary. This tasklet may be run in an 84 + * interrupt context or normal context, but we don't have to worry 85 + * about getting interrupted by something acquiring the lock, because 86 + * we are the interrupt context thing that acquires the lock. 87 + */ 88 + spin_lock(&dev_priv->swaps_lock); 183 89 184 90 /* Find buffer swaps scheduled for this vertical blank */ 185 91 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 186 92 drm_i915_vbl_swap_t *vbl_swap = 187 93 list_entry(list, drm_i915_vbl_swap_t, head); 94 + int pipe = i915_get_pipe(dev, vbl_swap->plane); 188 95 189 - if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) 96 + if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) 190 97 continue; 191 98 192 99 list_del(list); 193 100 dev_priv->swaps_pending--; 101 + drm_vblank_put(dev, pipe); 194 102 195 103 spin_unlock(&dev_priv->swaps_lock); 196 104 spin_lock(&dev->drw_lock); ··· 238 126 spin_lock(&dev_priv->swaps_lock); 239 127 } 240 128 241 - if (nhits == 0) { 242 - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 243 - return; 244 - } 245 - 246 129 spin_unlock(&dev_priv->swaps_lock); 130 + 131 + if (nhits == 0) 132 + return; 247 133 248 134 i915_kernel_lost_context(dev); 249 135 250 - if (IS_I965G(dev)) { 251 - BEGIN_LP_RING(4); 252 - 253 - OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 254 - OUT_RING(0); 255 - OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16)); 256 - OUT_RING(0); 257 - ADVANCE_LP_RING(); 258 - } else { 259 - BEGIN_LP_RING(6); 260 - 261 - OUT_RING(GFX_OP_DRAWRECT_INFO); 262 - OUT_RING(0); 263 - OUT_RING(0); 264 - OUT_RING(sarea_priv->width | sarea_priv->height << 16); 265 - OUT_RING(sarea_priv->width | sarea_priv->height << 16); 266 - OUT_RING(0); 267 - 268 - ADVANCE_LP_RING(); 269 - } 270 - 271 - sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; 272 - 273 136 upper[0] = upper[1] = 0; 274 - slice[0] = max(sarea_priv->pipeA_h / nhits, 1); 275 - slice[1] = max(sarea_priv->pipeB_h / nhits, 1); 276 - lower[0] = sarea_priv->pipeA_y + slice[0]; 277 - lower[1] = sarea_priv->pipeB_y + slice[0]; 137 + slice[0] = max(sarea_priv->planeA_h / nhits, 1); 138 + slice[1] = max(sarea_priv->planeB_h / nhits, 1); 139 + lower[0] = sarea_priv->planeA_y + slice[0]; 140 + lower[1] = sarea_priv->planeB_y + slice[0]; 141 + 142 + offsets[0] = sarea_priv->front_offset; 143 + offsets[1] = sarea_priv->back_offset; 144 + offsets[2] = sarea_priv->third_offset; 145 + num_pages = sarea_priv->third_handle ? 3 : 2; 278 146 279 147 spin_lock(&dev->drw_lock); 280 148 ··· 266 174 for (i = 0; i++ < nhits; 267 175 upper[0] = lower[0], lower[0] += slice[0], 268 176 upper[1] = lower[1], lower[1] += slice[1]) { 177 + int init_drawrect = 1; 178 + 269 179 if (i == nhits) 270 180 lower[0] = lower[1] = sarea_priv->height; 271 181 ··· 275 181 drm_i915_vbl_swap_t *swap_hit = 276 182 list_entry(hit, drm_i915_vbl_swap_t, head); 277 183 struct drm_clip_rect *rect; 278 - int num_rects, pipe; 184 + int num_rects, plane, front, back; 279 185 unsigned short top, bottom; 280 186 281 187 drw = drm_get_drawable_info(dev, swap_hit->drw_id); ··· 283 189 if (!drw) 284 190 continue; 285 191 192 + plane = swap_hit->plane; 193 + 194 + if (swap_hit->flip) { 195 + i915_dispatch_vsync_flip(dev, drw, plane); 196 + continue; 197 + } 198 + 199 + if (init_drawrect) { 200 + int width = sarea_priv->width; 201 + int height = sarea_priv->height; 202 + if (IS_I965G(dev)) { 203 + BEGIN_LP_RING(4); 204 + 205 + OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 206 + OUT_RING(0); 207 + OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16)); 208 + OUT_RING(0); 209 + 210 + ADVANCE_LP_RING(); 211 + } else { 212 + BEGIN_LP_RING(6); 213 + 214 + OUT_RING(GFX_OP_DRAWRECT_INFO); 215 + OUT_RING(0); 216 + OUT_RING(0); 217 + OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16)); 218 + OUT_RING(0); 219 + OUT_RING(0); 220 + 221 + ADVANCE_LP_RING(); 222 + } 223 + 224 + sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; 225 + 226 + init_drawrect = 0; 227 + } 228 + 286 229 rect = drw->rects; 287 - pipe = swap_hit->pipe; 288 - top = upper[pipe]; 289 - bottom = lower[pipe]; 230 + top = upper[plane]; 231 + bottom = lower[plane]; 232 + 233 + front = (dev_priv->sarea_priv->pf_current_page >> 234 + (2 * plane)) & 0x3; 235 + back = (front + 1) % num_pages; 290 236 291 237 for (num_rects = drw->num_rects; num_rects--; rect++) { 292 238 int y1 = max(rect->y1, top); ··· 341 207 OUT_RING(ropcpp | dst_pitch); 342 208 OUT_RING((y1 << 16) | rect->x1); 343 209 OUT_RING((y2 << 16) | rect->x2); 344 - OUT_RING(sarea_priv->front_offset); 210 + OUT_RING(offsets[front]); 345 211 OUT_RING((y1 << 16) | rect->x1); 346 212 OUT_RING(src_pitch); 347 - OUT_RING(sarea_priv->back_offset); 213 + OUT_RING(offsets[back]); 348 214 349 215 ADVANCE_LP_RING(); 350 216 } 351 217 } 352 218 } 353 219 354 - spin_unlock_irqrestore(&dev->drw_lock, irqflags); 220 + spin_unlock(&dev->drw_lock); 355 221 356 222 list_for_each_safe(hit, tmp, &hits) { 357 223 drm_i915_vbl_swap_t *swap_hit = ··· 363 229 } 364 230 } 365 231 232 + u32 i915_get_vblank_counter(struct drm_device *dev, int plane) 233 + { 234 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 235 + unsigned long high_frame; 236 + unsigned long low_frame; 237 + u32 high1, high2, low, count; 238 + int pipe; 239 + 240 + pipe = i915_get_pipe(dev, plane); 241 + high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; 242 + low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 243 + 244 + if (!i915_pipe_enabled(dev, pipe)) { 245 + printk(KERN_ERR "trying to get vblank count for disabled " 246 + "pipe %d\n", pipe); 247 + return 0; 248 + } 249 + 250 + /* 251 + * High & low register fields aren't synchronized, so make sure 252 + * we get a low value that's stable across two reads of the high 253 + * register. 254 + */ 255 + do { 256 + high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 257 + PIPE_FRAME_HIGH_SHIFT); 258 + low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> 259 + PIPE_FRAME_LOW_SHIFT); 260 + high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 261 + PIPE_FRAME_HIGH_SHIFT); 262 + } while (high1 != high2); 263 + 264 + count = (high1 << 8) | low; 265 + 266 + /* count may be reset by other driver(e.g. 2D driver), 267 + we have no way to know if it is wrapped or resetted 268 + when count is zero. do a rough guess. 269 + */ 270 + if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2) 271 + dev->last_vblank[pipe] = 0; 272 + 273 + return count; 274 + } 275 + 366 276 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 367 277 { 368 278 struct drm_device *dev = (struct drm_device *) arg; 369 279 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 370 - u16 temp; 280 + u32 iir; 371 281 u32 pipea_stats, pipeb_stats; 282 + int vblank = 0; 372 283 373 - pipea_stats = I915_READ(I915REG_PIPEASTAT); 374 - pipeb_stats = I915_READ(I915REG_PIPEBSTAT); 375 - 376 - temp = I915_READ16(I915REG_INT_IDENTITY_R); 377 - 378 - temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG); 379 - 380 - DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 381 - 382 - if (temp == 0) 284 + iir = I915_READ(I915REG_INT_IDENTITY_R); 285 + if (iir == 0) { 286 + DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n", 287 + iir, 288 + I915_READ(I915REG_INT_MASK_R), 289 + I915_READ(I915REG_INT_ENABLE_R), 290 + I915_READ(I915REG_PIPEASTAT), 291 + I915_READ(I915REG_PIPEBSTAT)); 383 292 return IRQ_NONE; 293 + } 384 294 385 - I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 386 - (void) I915_READ16(I915REG_INT_IDENTITY_R); 387 - DRM_READMEMORYBARRIER(); 295 + /* 296 + * Clear the PIPE(A|B)STAT regs before the IIR otherwise 297 + * we may get extra interrupts. 298 + */ 299 + if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { 300 + pipea_stats = I915_READ(I915REG_PIPEASTAT); 301 + if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS| 302 + I915_VBLANK_INTERRUPT_STATUS)) 303 + { 304 + vblank++; 305 + drm_handle_vblank(dev, i915_get_plane(dev, 0)); 306 + } 307 + I915_WRITE(I915REG_PIPEASTAT, pipea_stats); 308 + } 309 + if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { 310 + pipeb_stats = I915_READ(I915REG_PIPEBSTAT); 311 + if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS| 312 + I915_VBLANK_INTERRUPT_STATUS)) 313 + { 314 + vblank++; 315 + drm_handle_vblank(dev, i915_get_plane(dev, 1)); 316 + } 317 + I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats); 318 + } 388 319 389 - dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 320 + if (dev_priv->sarea_priv) 321 + dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 390 322 391 - if (temp & USER_INT_FLAG) 323 + I915_WRITE(I915REG_INT_IDENTITY_R, iir); 324 + (void) I915_READ(I915REG_INT_IDENTITY_R); /* Flush posted write */ 325 + 326 + if (iir & I915_USER_INTERRUPT) { 392 327 DRM_WAKEUP(&dev_priv->irq_queue); 393 - 394 - if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) { 395 - int vblank_pipe = dev_priv->vblank_pipe; 396 - 397 - if ((vblank_pipe & 398 - (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) 399 - == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) { 400 - if (temp & VSYNC_PIPEA_FLAG) 401 - atomic_inc(&dev->vbl_received); 402 - if (temp & VSYNC_PIPEB_FLAG) 403 - atomic_inc(&dev->vbl_received2); 404 - } else if (((temp & VSYNC_PIPEA_FLAG) && 405 - (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) || 406 - ((temp & VSYNC_PIPEB_FLAG) && 407 - (vblank_pipe & DRM_I915_VBLANK_PIPE_B))) 408 - atomic_inc(&dev->vbl_received); 409 - 410 - DRM_WAKEUP(&dev->vbl_queue); 411 - drm_vbl_send_signals(dev); 412 - 328 + } 329 + if (vblank) { 413 330 if (dev_priv->swaps_pending > 0) 414 331 drm_locked_tasklet(dev, i915_vblank_tasklet); 415 - I915_WRITE(I915REG_PIPEASTAT, 416 - pipea_stats|I915_VBLANK_INTERRUPT_ENABLE| 417 - I915_VBLANK_CLEAR); 418 - I915_WRITE(I915REG_PIPEBSTAT, 419 - pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE| 420 - I915_VBLANK_CLEAR); 421 332 } 422 333 423 334 return IRQ_HANDLED; 424 335 } 425 336 426 - static int i915_emit_irq(struct drm_device * dev) 337 + static int i915_emit_irq(struct drm_device *dev) 427 338 { 428 339 drm_i915_private_t *dev_priv = dev->dev_private; 429 340 RING_LOCALS; ··· 515 336 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 516 337 } 517 338 518 - dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 339 + if (dev_priv->sarea_priv) 340 + dev_priv->sarea_priv->last_dispatch = 341 + READ_BREADCRUMB(dev_priv); 519 342 return ret; 520 - } 521 - 522 - static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence, 523 - atomic_t *counter) 524 - { 525 - drm_i915_private_t *dev_priv = dev->dev_private; 526 - unsigned int cur_vblank; 527 - int ret = 0; 528 - 529 - if (!dev_priv) { 530 - DRM_ERROR("called with no initialization\n"); 531 - return -EINVAL; 532 - } 533 - 534 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 535 - (((cur_vblank = atomic_read(counter)) 536 - - *sequence) <= (1<<23))); 537 - 538 - *sequence = cur_vblank; 539 - 540 - return ret; 541 - } 542 - 543 - 544 - int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) 545 - { 546 - return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received); 547 - } 548 - 549 - int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) 550 - { 551 - return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2); 552 343 } 553 344 554 345 /* Needs the lock as it touches the ring. ··· 563 414 return i915_wait_irq(dev, irqwait->irq_seq); 564 415 } 565 416 417 + int i915_enable_vblank(struct drm_device *dev, int plane) 418 + { 419 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 420 + int pipe = i915_get_pipe(dev, plane); 421 + u32 pipestat_reg = 0; 422 + u32 pipestat; 423 + 424 + switch (pipe) { 425 + case 0: 426 + pipestat_reg = I915REG_PIPEASTAT; 427 + dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 428 + break; 429 + case 1: 430 + pipestat_reg = I915REG_PIPEBSTAT; 431 + dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 432 + break; 433 + default: 434 + DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", 435 + pipe); 436 + break; 437 + } 438 + 439 + if (pipestat_reg) 440 + { 441 + pipestat = I915_READ (pipestat_reg); 442 + /* 443 + * Older chips didn't have the start vblank interrupt, 444 + * but 445 + */ 446 + if (IS_I965G (dev)) 447 + pipestat |= I915_START_VBLANK_INTERRUPT_ENABLE; 448 + else 449 + pipestat |= I915_VBLANK_INTERRUPT_ENABLE; 450 + /* 451 + * Clear any pending status 452 + */ 453 + pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS | 454 + I915_VBLANK_INTERRUPT_STATUS); 455 + I915_WRITE(pipestat_reg, pipestat); 456 + } 457 + I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); 458 + 459 + return 0; 460 + } 461 + 462 + void i915_disable_vblank(struct drm_device *dev, int plane) 463 + { 464 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 465 + int pipe = i915_get_pipe(dev, plane); 466 + u32 pipestat_reg = 0; 467 + u32 pipestat; 468 + 469 + switch (pipe) { 470 + case 0: 471 + pipestat_reg = I915REG_PIPEASTAT; 472 + dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 473 + break; 474 + case 1: 475 + pipestat_reg = I915REG_PIPEBSTAT; 476 + dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 477 + break; 478 + default: 479 + DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", 480 + pipe); 481 + break; 482 + } 483 + 484 + I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); 485 + if (pipestat_reg) 486 + { 487 + pipestat = I915_READ (pipestat_reg); 488 + pipestat &= ~(I915_START_VBLANK_INTERRUPT_ENABLE | 489 + I915_VBLANK_INTERRUPT_ENABLE); 490 + /* 491 + * Clear any pending status 492 + */ 493 + pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS | 494 + I915_VBLANK_INTERRUPT_STATUS); 495 + I915_WRITE(pipestat_reg, pipestat); 496 + } 497 + } 498 + 566 499 static void i915_enable_interrupt (struct drm_device *dev) 567 500 { 568 501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 569 - u16 flag; 570 502 571 - flag = 0; 572 - if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A) 573 - flag |= VSYNC_PIPEA_FLAG; 574 - if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B) 575 - flag |= VSYNC_PIPEB_FLAG; 503 + dev_priv->irq_enable_reg |= I915_USER_INTERRUPT; 576 504 577 - I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag); 505 + I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); 506 + dev_priv->irq_enabled = 1; 578 507 } 579 508 580 509 /* Set the vblank monitor pipe ··· 675 448 676 449 dev_priv->vblank_pipe = pipe->pipe; 677 450 678 - i915_enable_interrupt (dev); 679 - 680 451 return 0; 681 452 } 682 453 ··· 692 467 693 468 flag = I915_READ(I915REG_INT_ENABLE_R); 694 469 pipe->pipe = 0; 695 - if (flag & VSYNC_PIPEA_FLAG) 470 + if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) 696 471 pipe->pipe |= DRM_I915_VBLANK_PIPE_A; 697 - if (flag & VSYNC_PIPEB_FLAG) 472 + if (flag & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) 698 473 pipe->pipe |= DRM_I915_VBLANK_PIPE_B; 699 474 700 475 return 0; ··· 709 484 drm_i915_private_t *dev_priv = dev->dev_private; 710 485 drm_i915_vblank_swap_t *swap = data; 711 486 drm_i915_vbl_swap_t *vbl_swap; 712 - unsigned int pipe, seqtype, curseq; 487 + unsigned int pipe, seqtype, curseq, plane; 713 488 unsigned long irqflags; 714 489 struct list_head *list; 490 + int ret; 715 491 716 492 if (!dev_priv) { 717 493 DRM_ERROR("%s called with no initialization\n", __func__); 718 494 return -EINVAL; 719 495 } 720 496 721 - if (dev_priv->sarea_priv->rotation) { 497 + if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) { 722 498 DRM_DEBUG("Rotation not supported\n"); 723 499 return -EINVAL; 724 500 } 725 501 726 502 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | 727 - _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { 503 + _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS | 504 + _DRM_VBLANK_FLIP)) { 728 505 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); 729 506 return -EINVAL; 730 507 } 731 508 732 - pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 509 + plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 510 + pipe = i915_get_pipe(dev, plane); 733 511 734 512 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 735 513 ··· 743 515 744 516 spin_lock_irqsave(&dev->drw_lock, irqflags); 745 517 518 + /* It makes no sense to schedule a swap for a drawable that doesn't have 519 + * valid information at this point. E.g. this could mean that the X 520 + * server is too old to push drawable information to the DRM, in which 521 + * case all such swaps would become ineffective. 522 + */ 746 523 if (!drm_get_drawable_info(dev, swap->drawable)) { 747 524 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 748 525 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); ··· 756 523 757 524 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 758 525 759 - curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); 526 + drm_update_vblank_count(dev, pipe); 527 + curseq = drm_vblank_count(dev, pipe); 760 528 761 529 if (seqtype == _DRM_VBLANK_RELATIVE) 762 530 swap->sequence += curseq; ··· 771 537 } 772 538 } 773 539 540 + if (swap->seqtype & _DRM_VBLANK_FLIP) { 541 + swap->sequence--; 542 + 543 + if ((curseq - swap->sequence) <= (1<<23)) { 544 + struct drm_drawable_info *drw; 545 + 546 + LOCK_TEST_WITH_RETURN(dev, file_priv); 547 + 548 + spin_lock_irqsave(&dev->drw_lock, irqflags); 549 + 550 + drw = drm_get_drawable_info(dev, swap->drawable); 551 + 552 + if (!drw) { 553 + spin_unlock_irqrestore(&dev->drw_lock, 554 + irqflags); 555 + DRM_DEBUG("Invalid drawable ID %d\n", 556 + swap->drawable); 557 + return -EINVAL; 558 + } 559 + 560 + i915_dispatch_vsync_flip(dev, drw, plane); 561 + 562 + spin_unlock_irqrestore(&dev->drw_lock, irqflags); 563 + 564 + return 0; 565 + } 566 + } 567 + 774 568 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 775 569 776 570 list_for_each(list, &dev_priv->vbl_swaps.head) { 777 571 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 778 572 779 573 if (vbl_swap->drw_id == swap->drawable && 780 - vbl_swap->pipe == pipe && 574 + vbl_swap->plane == plane && 781 575 vbl_swap->sequence == swap->sequence) { 576 + vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); 782 577 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 783 578 DRM_DEBUG("Already scheduled\n"); 784 579 return 0; ··· 830 567 831 568 DRM_DEBUG("\n"); 832 569 570 + ret = drm_vblank_get(dev, pipe); 571 + if (ret) { 572 + drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); 573 + return ret; 574 + } 575 + 833 576 vbl_swap->drw_id = swap->drawable; 834 - vbl_swap->pipe = pipe; 577 + vbl_swap->plane = plane; 835 578 vbl_swap->sequence = swap->sequence; 579 + vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); 580 + 581 + if (vbl_swap->flip) 582 + swap->sequence++; 836 583 837 584 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 838 585 ··· 860 587 { 861 588 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 862 589 863 - I915_WRITE16(I915REG_HWSTAM, 0xfffe); 590 + I915_WRITE16(I915REG_HWSTAM, 0xeffe); 864 591 I915_WRITE16(I915REG_INT_MASK_R, 0x0); 865 592 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 866 593 } 867 594 868 - void i915_driver_irq_postinstall(struct drm_device * dev) 595 + int i915_driver_irq_postinstall(struct drm_device * dev) 869 596 { 870 597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 598 + int ret, num_pipes = 2; 871 599 872 600 spin_lock_init(&dev_priv->swaps_lock); 873 601 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 874 602 dev_priv->swaps_pending = 0; 875 603 876 - if (!dev_priv->vblank_pipe) 877 - dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; 604 + dev_priv->user_irq_refcount = 0; 605 + dev_priv->irq_enable_reg = 0; 606 + 607 + ret = drm_vblank_init(dev, num_pipes); 608 + if (ret) 609 + return ret; 610 + 611 + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 612 + 878 613 i915_enable_interrupt(dev); 879 614 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 615 + 616 + /* 617 + * Initialize the hardware status page IRQ location. 618 + */ 619 + 620 + I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21)); 621 + return 0; 880 622 } 881 623 882 624 void i915_driver_irq_uninstall(struct drm_device * dev) 883 625 { 884 626 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 885 - u16 temp; 627 + u32 temp; 886 628 887 629 if (!dev_priv) 888 630 return; 889 631 890 - I915_WRITE16(I915REG_HWSTAM, 0xffff); 891 - I915_WRITE16(I915REG_INT_MASK_R, 0xffff); 892 - I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 632 + dev_priv->irq_enabled = 0; 633 + I915_WRITE(I915REG_HWSTAM, 0xffffffff); 634 + I915_WRITE(I915REG_INT_MASK_R, 0xffffffff); 635 + I915_WRITE(I915REG_INT_ENABLE_R, 0x0); 893 636 894 - temp = I915_READ16(I915REG_INT_IDENTITY_R); 895 - I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 637 + temp = I915_READ(I915REG_PIPEASTAT); 638 + I915_WRITE(I915REG_PIPEASTAT, temp); 639 + temp = I915_READ(I915REG_PIPEBSTAT); 640 + I915_WRITE(I915REG_PIPEBSTAT, temp); 641 + temp = I915_READ(I915REG_INT_IDENTITY_R); 642 + I915_WRITE(I915REG_INT_IDENTITY_R, temp); 896 643 }
+4 -3
drivers/char/drm/mga_drv.c
··· 45 45 static struct drm_driver driver = { 46 46 .driver_features = 47 47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 48 - DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 49 - DRIVER_IRQ_VBL, 48 + DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 50 49 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 51 50 .load = mga_driver_load, 52 51 .unload = mga_driver_unload, 53 52 .lastclose = mga_driver_lastclose, 54 53 .dma_quiescent = mga_driver_dma_quiescent, 55 54 .device_is_agp = mga_driver_device_is_agp, 56 - .vblank_wait = mga_driver_vblank_wait, 55 + .get_vblank_counter = mga_get_vblank_counter, 56 + .enable_vblank = mga_enable_vblank, 57 + .disable_vblank = mga_disable_vblank, 57 58 .irq_preinstall = mga_driver_irq_preinstall, 58 59 .irq_postinstall = mga_driver_irq_postinstall, 59 60 .irq_uninstall = mga_driver_irq_uninstall,
+5 -1
drivers/char/drm/mga_drv.h
··· 120 120 u32 clear_cmd; 121 121 u32 maccess; 122 122 123 + atomic_t vbl_received; /**< Number of vblanks received. */ 123 124 wait_queue_head_t fence_queue; 124 125 atomic_t last_fence_retired; 125 126 u32 next_fence_to_post; ··· 182 181 extern int mga_warp_init(drm_mga_private_t * dev_priv); 183 182 184 183 /* mga_irq.c */ 184 + extern int mga_enable_vblank(struct drm_device *dev, int crtc); 185 + extern void mga_disable_vblank(struct drm_device *dev, int crtc); 186 + extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); 185 187 extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); 186 188 extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 187 189 extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); 188 190 extern void mga_driver_irq_preinstall(struct drm_device * dev); 189 - extern void mga_driver_irq_postinstall(struct drm_device * dev); 191 + extern int mga_driver_irq_postinstall(struct drm_device * dev); 190 192 extern void mga_driver_irq_uninstall(struct drm_device * dev); 191 193 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 192 194 unsigned long arg);
+52 -19
drivers/char/drm/mga_irq.c
··· 35 35 #include "mga_drm.h" 36 36 #include "mga_drv.h" 37 37 38 + u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) 39 + { 40 + const drm_mga_private_t *const dev_priv = 41 + (drm_mga_private_t *) dev->dev_private; 42 + 43 + if (crtc != 0) { 44 + return 0; 45 + } 46 + 47 + 48 + return atomic_read(&dev_priv->vbl_received); 49 + } 50 + 51 + 38 52 irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) 39 53 { 40 54 struct drm_device *dev = (struct drm_device *) arg; ··· 61 47 /* VBLANK interrupt */ 62 48 if (status & MGA_VLINEPEN) { 63 49 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); 64 - atomic_inc(&dev->vbl_received); 65 - DRM_WAKEUP(&dev->vbl_queue); 66 - drm_vbl_send_signals(dev); 50 + atomic_inc(&dev_priv->vbl_received); 51 + drm_handle_vblank(dev, 0); 67 52 handled = 1; 68 53 } 69 54 ··· 91 78 return IRQ_NONE; 92 79 } 93 80 94 - int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 81 + int mga_enable_vblank(struct drm_device *dev, int crtc) 95 82 { 96 - unsigned int cur_vblank; 97 - int ret = 0; 83 + drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 98 84 99 - /* Assume that the user has missed the current sequence number 100 - * by about a day rather than she wants to wait for years 101 - * using vertical blanks... 85 + if (crtc != 0) { 86 + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 87 + crtc); 88 + return 0; 89 + } 90 + 91 + MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); 92 + return 0; 93 + } 94 + 95 + 96 + void mga_disable_vblank(struct drm_device *dev, int crtc) 97 + { 98 + if (crtc != 0) { 99 + DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", 100 + crtc); 101 + } 102 + 103 + /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have 104 + * a nice hardware counter that tracks the number of refreshes when 105 + * the interrupt is disabled, and the kernel doesn't know the refresh 106 + * rate to calculate an estimate. 102 107 */ 103 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 104 - (((cur_vblank = atomic_read(&dev->vbl_received)) 105 - - *sequence) <= (1 << 23))); 106 - 107 - *sequence = cur_vblank; 108 - 109 - return ret; 108 + /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ 110 109 } 111 110 112 111 int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) ··· 150 125 MGA_WRITE(MGA_ICLEAR, ~0); 151 126 } 152 127 153 - void mga_driver_irq_postinstall(struct drm_device * dev) 128 + int mga_driver_irq_postinstall(struct drm_device * dev) 154 129 { 155 130 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 131 + int ret; 132 + 133 + ret = drm_vblank_init(dev, 1); 134 + if (ret) 135 + return ret; 156 136 157 137 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); 158 138 159 - /* Turn on vertical blank interrupt and soft trap interrupt. */ 160 - MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); 139 + /* Turn on soft trap interrupt. Vertical blank interrupts are enabled 140 + * in mga_enable_vblank. 141 + */ 142 + MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN); 143 + return 0; 161 144 } 162 145 163 146 void mga_driver_irq_uninstall(struct drm_device * dev)
+4 -3
drivers/char/drm/r128_drv.c
··· 43 43 static struct drm_driver driver = { 44 44 .driver_features = 45 45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 46 - DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 47 - DRIVER_IRQ_VBL, 46 + DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 48 47 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 49 48 .preclose = r128_driver_preclose, 50 49 .lastclose = r128_driver_lastclose, 51 - .vblank_wait = r128_driver_vblank_wait, 50 + .get_vblank_counter = r128_get_vblank_counter, 51 + .enable_vblank = r128_enable_vblank, 52 + .disable_vblank = r128_disable_vblank, 52 53 .irq_preinstall = r128_driver_irq_preinstall, 53 54 .irq_postinstall = r128_driver_irq_postinstall, 54 55 .irq_uninstall = r128_driver_irq_uninstall,
+6 -3
drivers/char/drm/r128_drv.h
··· 97 97 u32 crtc_offset; 98 98 u32 crtc_offset_cntl; 99 99 100 + atomic_t vbl_received; 101 + 100 102 u32 color_fmt; 101 103 unsigned int front_offset; 102 104 unsigned int front_pitch; ··· 151 149 extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); 152 150 extern int r128_do_cleanup_cce(struct drm_device * dev); 153 151 154 - extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 155 - 152 + extern int r128_enable_vblank(struct drm_device *dev, int crtc); 153 + extern void r128_disable_vblank(struct drm_device *dev, int crtc); 154 + extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); 156 155 extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); 157 156 extern void r128_driver_irq_preinstall(struct drm_device * dev); 158 - extern void r128_driver_irq_postinstall(struct drm_device * dev); 157 + extern int r128_driver_irq_postinstall(struct drm_device * dev); 159 158 extern void r128_driver_irq_uninstall(struct drm_device * dev); 160 159 extern void r128_driver_lastclose(struct drm_device * dev); 161 160 extern void r128_driver_preclose(struct drm_device * dev,
+36 -21
drivers/char/drm/r128_irq.c
··· 35 35 #include "r128_drm.h" 36 36 #include "r128_drv.h" 37 37 38 + u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) 39 + { 40 + const drm_r128_private_t *dev_priv = dev->dev_private; 41 + 42 + if (crtc != 0) 43 + return 0; 44 + 45 + return atomic_read(&dev_priv->vbl_received); 46 + } 47 + 38 48 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) 39 49 { 40 50 struct drm_device *dev = (struct drm_device *) arg; ··· 56 46 /* VBLANK interrupt */ 57 47 if (status & R128_CRTC_VBLANK_INT) { 58 48 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 59 - atomic_inc(&dev->vbl_received); 60 - DRM_WAKEUP(&dev->vbl_queue); 61 - drm_vbl_send_signals(dev); 49 + atomic_inc(&dev_priv->vbl_received); 50 + drm_handle_vblank(dev, 0); 62 51 return IRQ_HANDLED; 63 52 } 64 53 return IRQ_NONE; 65 54 } 66 55 67 - int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 56 + int r128_enable_vblank(struct drm_device *dev, int crtc) 68 57 { 69 - unsigned int cur_vblank; 70 - int ret = 0; 58 + drm_r128_private_t *dev_priv = dev->dev_private; 71 59 72 - /* Assume that the user has missed the current sequence number 73 - * by about a day rather than she wants to wait for years 74 - * using vertical blanks... 60 + if (crtc != 0) { 61 + DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); 62 + return -EINVAL; 63 + } 64 + 65 + R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); 66 + return 0; 67 + } 68 + 69 + void r128_disable_vblank(struct drm_device *dev, int crtc) 70 + { 71 + if (crtc != 0) 72 + DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); 73 + 74 + /* 75 + * FIXME: implement proper interrupt disable by using the vblank 76 + * counter register (if available) 77 + * 78 + * R128_WRITE(R128_GEN_INT_CNTL, 79 + * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN); 75 80 */ 76 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 77 - (((cur_vblank = atomic_read(&dev->vbl_received)) 78 - - *sequence) <= (1 << 23))); 79 - 80 - *sequence = cur_vblank; 81 - 82 - return ret; 83 81 } 84 82 85 83 void r128_driver_irq_preinstall(struct drm_device * dev) ··· 100 82 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 101 83 } 102 84 103 - void r128_driver_irq_postinstall(struct drm_device * dev) 85 + int r128_driver_irq_postinstall(struct drm_device * dev) 104 86 { 105 - drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; 106 - 107 - /* Turn on VBL interrupt */ 108 - R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); 87 + return drm_vblank_init(dev, 1); 109 88 } 110 89 111 90 void r128_driver_irq_uninstall(struct drm_device * dev)
+4 -4
drivers/char/drm/radeon_drv.c
··· 59 59 static struct drm_driver driver = { 60 60 .driver_features = 61 61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 62 - DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | 63 - DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2, 62 + DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, 64 63 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 65 64 .load = radeon_driver_load, 66 65 .firstopen = radeon_driver_firstopen, ··· 68 69 .postclose = radeon_driver_postclose, 69 70 .lastclose = radeon_driver_lastclose, 70 71 .unload = radeon_driver_unload, 71 - .vblank_wait = radeon_driver_vblank_wait, 72 - .vblank_wait2 = radeon_driver_vblank_wait2, 72 + .get_vblank_counter = radeon_get_vblank_counter, 73 + .enable_vblank = radeon_enable_vblank, 74 + .disable_vblank = radeon_disable_vblank, 73 75 .dri_library_name = dri_library_name, 74 76 .irq_preinstall = radeon_driver_irq_preinstall, 75 77 .irq_postinstall = radeon_driver_irq_postinstall,
+14 -5
drivers/char/drm/radeon_drv.h
··· 304 304 305 305 u32 scratch_ages[5]; 306 306 307 + unsigned int crtc_last_cnt; 308 + unsigned int crtc2_last_cnt; 309 + 307 310 /* starting from here on, data is preserved accross an open */ 308 311 uint32_t flags; /* see radeon_chip_flags */ 309 312 unsigned long fb_aper_offset; ··· 377 374 extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); 378 375 379 376 extern void radeon_do_release(struct drm_device * dev); 380 - extern int radeon_driver_vblank_wait(struct drm_device * dev, 381 - unsigned int *sequence); 382 - extern int radeon_driver_vblank_wait2(struct drm_device * dev, 383 - unsigned int *sequence); 377 + extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); 378 + extern int radeon_enable_vblank(struct drm_device *dev, int crtc); 379 + extern void radeon_disable_vblank(struct drm_device *dev, int crtc); 380 + extern void radeon_do_release(struct drm_device * dev); 384 381 extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); 385 382 extern void radeon_driver_irq_preinstall(struct drm_device * dev); 386 - extern void radeon_driver_irq_postinstall(struct drm_device * dev); 383 + extern int radeon_driver_irq_postinstall(struct drm_device * dev); 387 384 extern void radeon_driver_irq_uninstall(struct drm_device * dev); 388 385 extern int radeon_vblank_crtc_get(struct drm_device *dev); 389 386 extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); ··· 560 557 #define GET_SCRATCH( x ) (dev_priv->writeback_works \ 561 558 ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \ 562 559 : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) 560 + 561 + #define RADEON_CRTC_CRNT_FRAME 0x0214 562 + #define RADEON_CRTC2_CRNT_FRAME 0x0314 563 + 564 + #define RADEON_CRTC_STATUS 0x005c 565 + #define RADEON_CRTC2_STATUS 0x03fc 563 566 564 567 #define RADEON_GEN_INT_CNTL 0x0040 565 568 # define RADEON_CRTC_VBLANK_MASK (1 << 0)
+83 -88
drivers/char/drm/radeon_irq.c
··· 35 35 #include "radeon_drm.h" 36 36 #include "radeon_drv.h" 37 37 38 - static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, 39 - u32 mask) 38 + static void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) 40 39 { 41 - u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask; 40 + drm_radeon_private_t *dev_priv = dev->dev_private; 41 + 42 + if (state) 43 + dev_priv->irq_enable_reg |= mask; 44 + else 45 + dev_priv->irq_enable_reg &= ~mask; 46 + 47 + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 48 + } 49 + 50 + int radeon_enable_vblank(struct drm_device *dev, int crtc) 51 + { 52 + switch (crtc) { 53 + case 0: 54 + radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1); 55 + break; 56 + case 1: 57 + radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1); 58 + break; 59 + default: 60 + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 61 + crtc); 62 + return EINVAL; 63 + } 64 + 65 + return 0; 66 + } 67 + 68 + void radeon_disable_vblank(struct drm_device *dev, int crtc) 69 + { 70 + switch (crtc) { 71 + case 0: 72 + radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0); 73 + break; 74 + case 1: 75 + radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0); 76 + break; 77 + default: 78 + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 79 + crtc); 80 + break; 81 + } 82 + } 83 + 84 + static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv) 85 + { 86 + u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & 87 + (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | 88 + RADEON_CRTC2_VBLANK_STAT); 89 + 42 90 if (irqs) 43 91 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); 92 + 44 93 return irqs; 45 94 } 46 95 ··· 121 72 /* Only consider the bits we're interested in - others could be used 122 73 * outside the DRM 123 74 */ 124 - stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 125 - RADEON_CRTC_VBLANK_STAT | 126 - RADEON_CRTC2_VBLANK_STAT)); 75 + stat = radeon_acknowledge_irqs(dev_priv); 127 76 if (!stat) 128 77 return IRQ_NONE; 129 78 130 79 stat &= dev_priv->irq_enable_reg; 131 80 132 81 /* SW interrupt */ 133 - if (stat & RADEON_SW_INT_TEST) { 82 + if (stat & RADEON_SW_INT_TEST) 134 83 DRM_WAKEUP(&dev_priv->swi_queue); 135 - } 136 84 137 85 /* VBLANK interrupt */ 138 - if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) { 139 - int vblank_crtc = dev_priv->vblank_crtc; 140 - 141 - if ((vblank_crtc & 142 - (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) == 143 - (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { 144 - if (stat & RADEON_CRTC_VBLANK_STAT) 145 - atomic_inc(&dev->vbl_received); 146 - if (stat & RADEON_CRTC2_VBLANK_STAT) 147 - atomic_inc(&dev->vbl_received2); 148 - } else if (((stat & RADEON_CRTC_VBLANK_STAT) && 149 - (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) || 150 - ((stat & RADEON_CRTC2_VBLANK_STAT) && 151 - (vblank_crtc & DRM_RADEON_VBLANK_CRTC2))) 152 - atomic_inc(&dev->vbl_received); 153 - 154 - DRM_WAKEUP(&dev->vbl_queue); 155 - drm_vbl_send_signals(dev); 156 - } 86 + if (stat & RADEON_CRTC_VBLANK_STAT) 87 + drm_handle_vblank(dev, 0); 88 + if (stat & RADEON_CRTC2_VBLANK_STAT) 89 + drm_handle_vblank(dev, 1); 157 90 158 91 return IRQ_HANDLED; 159 92 } ··· 175 144 return ret; 176 145 } 177 146 178 - static int radeon_driver_vblank_do_wait(struct drm_device * dev, 179 - unsigned int *sequence, int crtc) 147 + u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc) 180 148 { 181 - drm_radeon_private_t *dev_priv = 182 - (drm_radeon_private_t *) dev->dev_private; 183 - unsigned int cur_vblank; 184 - int ret = 0; 185 - int ack = 0; 186 - atomic_t *counter; 149 + drm_radeon_private_t *dev_priv = dev->dev_private; 150 + u32 crtc_cnt_reg, crtc_status_reg; 151 + 187 152 if (!dev_priv) { 188 153 DRM_ERROR("called with no initialization\n"); 189 154 return -EINVAL; 190 155 } 191 156 192 - if (crtc == DRM_RADEON_VBLANK_CRTC1) { 193 - counter = &dev->vbl_received; 194 - ack |= RADEON_CRTC_VBLANK_STAT; 195 - } else if (crtc == DRM_RADEON_VBLANK_CRTC2) { 196 - counter = &dev->vbl_received2; 197 - ack |= RADEON_CRTC2_VBLANK_STAT; 198 - } else 157 + if (crtc == 0) { 158 + crtc_cnt_reg = RADEON_CRTC_CRNT_FRAME; 159 + crtc_status_reg = RADEON_CRTC_STATUS; 160 + } else if (crtc == 1) { 161 + crtc_cnt_reg = RADEON_CRTC2_CRNT_FRAME; 162 + crtc_status_reg = RADEON_CRTC2_STATUS; 163 + } else { 199 164 return -EINVAL; 165 + } 200 166 201 - radeon_acknowledge_irqs(dev_priv, ack); 202 - 203 - dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 204 - 205 - /* Assume that the user has missed the current sequence number 206 - * by about a day rather than she wants to wait for years 207 - * using vertical blanks... 208 - */ 209 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 210 - (((cur_vblank = atomic_read(counter)) 211 - - *sequence) <= (1 << 23))); 212 - 213 - *sequence = cur_vblank; 214 - 215 - return ret; 216 - } 217 - 218 - int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) 219 - { 220 - return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1); 221 - } 222 - 223 - int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) 224 - { 225 - return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2); 167 + return RADEON_READ(crtc_cnt_reg) + (RADEON_READ(crtc_status_reg) & 1); 226 168 } 227 169 228 170 /* Needs the lock as it touches the ring. ··· 238 234 return radeon_wait_irq(dev, irqwait->irq_seq); 239 235 } 240 236 241 - static void radeon_enable_interrupt(struct drm_device *dev) 242 - { 243 - drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; 244 - 245 - dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE; 246 - if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1) 247 - dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK; 248 - 249 - if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2) 250 - dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK; 251 - 252 - RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 253 - dev_priv->irq_enabled = 1; 254 - } 255 - 256 237 /* drm_dma.h hooks 257 238 */ 258 239 void radeon_driver_irq_preinstall(struct drm_device * dev) ··· 249 260 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 250 261 251 262 /* Clear bits if they're already high */ 252 - radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 253 - RADEON_CRTC_VBLANK_STAT | 254 - RADEON_CRTC2_VBLANK_STAT)); 263 + radeon_acknowledge_irqs(dev_priv); 255 264 } 256 265 257 - void radeon_driver_irq_postinstall(struct drm_device * dev) 266 + int radeon_driver_irq_postinstall(struct drm_device * dev) 258 267 { 259 268 drm_radeon_private_t *dev_priv = 260 269 (drm_radeon_private_t *) dev->dev_private; 270 + int ret; 261 271 262 272 atomic_set(&dev_priv->swi_emitted, 0); 263 273 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); 264 274 265 - radeon_enable_interrupt(dev); 275 + ret = drm_vblank_init(dev, 2); 276 + if (ret) 277 + return ret; 278 + 279 + dev->max_vblank_count = 0x001fffff; 280 + 281 + radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 282 + 283 + return 0; 266 284 } 267 285 268 286 void radeon_driver_irq_uninstall(struct drm_device * dev) ··· 311 315 return -EINVAL; 312 316 } 313 317 dev_priv->vblank_crtc = (unsigned int)value; 314 - radeon_enable_interrupt(dev); 315 318 return 0; 316 319 }
+4 -2
drivers/char/drm/via_drv.c
··· 40 40 static struct drm_driver driver = { 41 41 .driver_features = 42 42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 43 - DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 43 + DRIVER_IRQ_SHARED, 44 44 .load = via_driver_load, 45 45 .unload = via_driver_unload, 46 46 .context_dtor = via_final_context, 47 - .vblank_wait = via_driver_vblank_wait, 47 + .get_vblank_counter = via_get_vblank_counter, 48 + .enable_vblank = via_enable_vblank, 49 + .disable_vblank = via_disable_vblank, 48 50 .irq_preinstall = via_driver_irq_preinstall, 49 51 .irq_postinstall = via_driver_irq_postinstall, 50 52 .irq_uninstall = via_driver_irq_uninstall,
+5 -2
drivers/char/drm/via_drv.h
··· 75 75 struct timeval last_vblank; 76 76 int last_vblank_valid; 77 77 unsigned usec_per_vblank; 78 + atomic_t vbl_received; 78 79 drm_via_state_t hc_state; 79 80 char pci_buf[VIA_PCI_BUF_SIZE]; 80 81 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; ··· 131 130 extern int via_final_context(struct drm_device * dev, int context); 132 131 133 132 extern int via_do_cleanup_map(struct drm_device * dev); 134 - extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 133 + extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc); 134 + extern int via_enable_vblank(struct drm_device *dev, int crtc); 135 + extern void via_disable_vblank(struct drm_device *dev, int crtc); 135 136 136 137 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); 137 138 extern void via_driver_irq_preinstall(struct drm_device * dev); 138 - extern void via_driver_irq_postinstall(struct drm_device * dev); 139 + extern int via_driver_irq_postinstall(struct drm_device * dev); 139 140 extern void via_driver_irq_uninstall(struct drm_device * dev); 140 141 141 142 extern int via_dma_cleanup(struct drm_device * dev);
+47 -34
drivers/char/drm/via_irq.c
··· 92 92 static unsigned time_diff(struct timeval *now, struct timeval *then) 93 93 { 94 94 return (now->tv_usec >= then->tv_usec) ? 95 - now->tv_usec - then->tv_usec : 96 - 1000000 - (then->tv_usec - now->tv_usec); 95 + now->tv_usec - then->tv_usec : 96 + 1000000 - (then->tv_usec - now->tv_usec); 97 + } 98 + 99 + u32 via_get_vblank_counter(struct drm_device *dev, int crtc) 100 + { 101 + drm_via_private_t *dev_priv = dev->dev_private; 102 + if (crtc != 0) 103 + return 0; 104 + 105 + return atomic_read(&dev_priv->vbl_received); 97 106 } 98 107 99 108 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) ··· 117 108 118 109 status = VIA_READ(VIA_REG_INTERRUPT); 119 110 if (status & VIA_IRQ_VBLANK_PENDING) { 120 - atomic_inc(&dev->vbl_received); 121 - if (!(atomic_read(&dev->vbl_received) & 0x0F)) { 111 + atomic_inc(&dev_priv->vbl_received); 112 + if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { 122 113 do_gettimeofday(&cur_vblank); 123 114 if (dev_priv->last_vblank_valid) { 124 115 dev_priv->usec_per_vblank = ··· 128 119 dev_priv->last_vblank = cur_vblank; 129 120 dev_priv->last_vblank_valid = 1; 130 121 } 131 - if (!(atomic_read(&dev->vbl_received) & 0xFF)) { 122 + if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { 132 123 DRM_DEBUG("US per vblank is: %u\n", 133 124 dev_priv->usec_per_vblank); 134 125 } 135 - DRM_WAKEUP(&dev->vbl_queue); 136 - drm_vbl_send_signals(dev); 126 + drm_handle_vblank(dev, 0); 137 127 handled = 1; 138 128 } 139 129 ··· 171 163 } 172 164 } 173 165 174 - int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 166 + int via_enable_vblank(struct drm_device *dev, int crtc) 175 167 { 176 - drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 177 - unsigned int cur_vblank; 178 - int ret = 0; 168 + drm_via_private_t *dev_priv = dev->dev_private; 169 + u32 status; 179 170 180 - DRM_DEBUG("\n"); 181 - if (!dev_priv) { 182 - DRM_ERROR("called with no initialization\n"); 171 + if (crtc != 0) { 172 + DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); 183 173 return -EINVAL; 184 174 } 185 175 186 - viadrv_acknowledge_irqs(dev_priv); 176 + status = VIA_READ(VIA_REG_INTERRUPT); 177 + VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE); 187 178 188 - /* Assume that the user has missed the current sequence number 189 - * by about a day rather than she wants to wait for years 190 - * using vertical blanks... 191 - */ 179 + VIA_WRITE8(0x83d4, 0x11); 180 + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 192 181 193 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 194 - (((cur_vblank = atomic_read(&dev->vbl_received)) - 195 - *sequence) <= (1 << 23))); 182 + return 0; 183 + } 196 184 197 - *sequence = cur_vblank; 198 - return ret; 185 + void via_disable_vblank(struct drm_device *dev, int crtc) 186 + { 187 + drm_via_private_t *dev_priv = dev->dev_private; 188 + 189 + VIA_WRITE8(0x83d4, 0x11); 190 + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); 191 + 192 + if (crtc != 0) 193 + DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); 199 194 } 200 195 201 196 static int ··· 303 292 } 304 293 } 305 294 306 - void via_driver_irq_postinstall(struct drm_device * dev) 295 + int via_driver_irq_postinstall(struct drm_device * dev) 307 296 { 308 297 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 309 298 u32 status; 310 299 311 - DRM_DEBUG("\n"); 312 - if (dev_priv) { 313 - status = VIA_READ(VIA_REG_INTERRUPT); 314 - VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL 315 - | dev_priv->irq_enable_mask); 300 + DRM_DEBUG("via_driver_irq_postinstall\n"); 301 + if (!dev_priv) 302 + return -EINVAL; 316 303 317 - /* Some magic, oh for some data sheets ! */ 304 + drm_vblank_init(dev, 1); 305 + status = VIA_READ(VIA_REG_INTERRUPT); 306 + VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL 307 + | dev_priv->irq_enable_mask); 318 308 319 - VIA_WRITE8(0x83d4, 0x11); 320 - VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 309 + /* Some magic, oh for some data sheets ! */ 310 + VIA_WRITE8(0x83d4, 0x11); 311 + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 321 312 322 - } 313 + return 0; 323 314 } 324 315 325 316 void via_driver_irq_uninstall(struct drm_device * dev)