drm: update VIA driver to 2.7.2

Add PCI DMA blitengine to VIA DRM
Add portability code for porting VIA to FreeBSD.
Sync via_drm.h with 3d driver

From: Thomas Hellstrom <unichrome@shipmail.org>, Eric Anholt <anholt@freebsd.org>
Signed-off-by: Dave Airlie <airlied@linux.ie>

authored by Dave Airlie and committed by Dave Airlie 92514243 792d2b9a

+210 -97
+1 -1
drivers/char/drm/Makefile
··· 18 ffb-objs := ffb_drv.o ffb_context.o 19 sis-objs := sis_drv.o sis_ds.o sis_mm.o 20 savage-objs := savage_drv.o savage_bci.o savage_state.o 21 - via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o 22 23 ifeq ($(CONFIG_COMPAT),y) 24 drm-objs += drm_ioc32.o
··· 18 ffb-objs := ffb_drv.o ffb_context.o 19 sis-objs := sis_drv.o sis_ds.o sis_mm.o 20 savage-objs := savage_drv.o savage_bci.o savage_state.o 21 + via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o 22 23 ifeq ($(CONFIG_COMPAT),y) 24 drm-objs += drm_ioc32.o
+1
drivers/char/drm/drm_os_linux.h
··· 13 #define DRM_ERR(d) -(d) 14 /** Current process ID */ 15 #define DRM_CURRENTPID current->pid 16 #define DRM_UDELAY(d) udelay(d) 17 /** Read a byte from a MMIO region */ 18 #define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
··· 13 #define DRM_ERR(d) -(d) 14 /** Current process ID */ 15 #define DRM_CURRENTPID current->pid 16 + #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) 17 #define DRM_UDELAY(d) udelay(d) 18 /** Read a byte from a MMIO region */ 19 #define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
+26 -10
drivers/char/drm/via_dma.c
··· 213 dev_priv->dma_wrap = init->size; 214 dev_priv->dma_offset = init->offset; 215 dev_priv->last_pause_ptr = NULL; 216 - dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr; 217 218 via_cmdbuf_start(dev_priv); 219 ··· 234 235 switch (init.func) { 236 case VIA_INIT_DMA: 237 - if (!capable(CAP_SYS_ADMIN)) 238 retcode = DRM_ERR(EPERM); 239 else 240 retcode = via_initialize(dev, dev_priv, &init); 241 break; 242 case VIA_CLEANUP_DMA: 243 - if (!capable(CAP_SYS_ADMIN)) 244 retcode = DRM_ERR(EPERM); 245 else 246 retcode = via_dma_cleanup(dev); ··· 351 return 0; 352 } 353 354 - extern int 355 - via_parse_command_stream(drm_device_t * dev, const uint32_t * buf, 356 - unsigned int size); 357 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, 358 drm_via_cmdbuffer_t * cmd) 359 { ··· 449 if ((count <= 8) && (count >= 0)) { 450 uint32_t rgtr, ptr; 451 rgtr = *(dev_priv->hw_addr_ptr); 452 - ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) + 453 - dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 - 454 - CMDBUF_ALIGNMENT_SIZE; 455 if (rgtr <= ptr) { 456 DRM_ERROR 457 ("Command regulator\npaused at count %d, address %x, " ··· 471 && count--) ; 472 473 rgtr = *(dev_priv->hw_addr_ptr); 474 - ptr = ((char *)paused_at - dev_priv->dma_ptr) + 475 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; 476 477 ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ? ··· 723 sizeof(d_siz)); 724 return ret; 725 }
··· 213 dev_priv->dma_wrap = init->size; 214 dev_priv->dma_offset = init->offset; 215 dev_priv->last_pause_ptr = NULL; 216 + dev_priv->hw_addr_ptr = 217 + (volatile uint32_t *)((char *)dev_priv->mmio->handle + 218 + init->reg_pause_addr); 219 220 via_cmdbuf_start(dev_priv); 221 ··· 232 233 switch (init.func) { 234 case VIA_INIT_DMA: 235 + if (!DRM_SUSER(DRM_CURPROC)) 236 retcode = DRM_ERR(EPERM); 237 else 238 retcode = via_initialize(dev, dev_priv, &init); 239 break; 240 case VIA_CLEANUP_DMA: 241 + if (!DRM_SUSER(DRM_CURPROC)) 242 retcode = DRM_ERR(EPERM); 243 else 244 retcode = via_dma_cleanup(dev); ··· 349 return 0; 350 } 351 352 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, 353 drm_via_cmdbuffer_t * cmd) 354 { ··· 450 if ((count <= 8) && (count >= 0)) { 451 uint32_t rgtr, ptr; 452 rgtr = *(dev_priv->hw_addr_ptr); 453 + ptr = ((volatile char *)dev_priv->last_pause_ptr - 454 + dev_priv->dma_ptr) + dev_priv->dma_offset + 455 + (uint32_t) dev_priv->agpAddr + 4 - CMDBUF_ALIGNMENT_SIZE; 456 if (rgtr <= ptr) { 457 DRM_ERROR 458 ("Command regulator\npaused at count %d, address %x, " ··· 472 && count--) ; 473 474 rgtr = *(dev_priv->hw_addr_ptr); 475 + ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) + 476 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; 477 478 ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ? ··· 724 sizeof(d_siz)); 725 return ret; 726 } 727 + 728 + drm_ioctl_desc_t via_ioctls[] = { 729 + [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0}, 730 + [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0}, 731 + [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0}, 732 + [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0}, 733 + [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0}, 734 + [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0}, 735 + [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0}, 736 + [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0}, 737 + [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0}, 738 + [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0}, 739 + [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0}, 740 + [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0} 741 + }; 742 + 743 + int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
+42 -16
drivers/char/drm/via_drm.h
··· 75 #define DRM_VIA_CMDBUF_SIZE 0x0b 76 #define NOT_USED 77 #define DRM_VIA_WAIT_IRQ 0x0d 78 79 #define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t) 80 #define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t) ··· 91 #define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \ 92 drm_via_cmdbuf_size_t) 93 #define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t) 94 95 /* Indices into buf.Setup where various bits of state are mirrored per 96 * context and per buffer. These can be fired at the card as a unit, ··· 107 #define VIA_BACK 0x2 108 #define VIA_DEPTH 0x4 109 #define VIA_STENCIL 0x8 110 - #define VIDEO 0 111 - #define AGP 1 112 typedef struct { 113 uint32_t offset; 114 uint32_t size; ··· 200 unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; 201 unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */ 202 203 } drm_via_sarea_t; 204 205 typedef struct _drm_via_cmdbuf_size { ··· 223 224 #define VIA_IRQ_FLAGS_MASK 0xF0000000 225 226 struct drm_via_wait_irq_request { 227 unsigned irq; 228 via_irq_seq_type_t type; ··· 245 struct drm_wait_vblank_reply reply; 246 } drm_via_irqwait_t; 247 248 - #ifdef __KERNEL__ 249 250 - int via_fb_init(DRM_IOCTL_ARGS); 251 - int via_mem_alloc(DRM_IOCTL_ARGS); 252 - int via_mem_free(DRM_IOCTL_ARGS); 253 - int via_agp_init(DRM_IOCTL_ARGS); 254 - int via_map_init(DRM_IOCTL_ARGS); 255 - int via_decoder_futex(DRM_IOCTL_ARGS); 256 - int via_dma_init(DRM_IOCTL_ARGS); 257 - int via_cmdbuffer(DRM_IOCTL_ARGS); 258 - int via_flush_ioctl(DRM_IOCTL_ARGS); 259 - int via_pci_cmdbuffer(DRM_IOCTL_ARGS); 260 - int via_cmdbuf_size(DRM_IOCTL_ARGS); 261 - int via_wait_irq(DRM_IOCTL_ARGS); 262 263 - #endif 264 #endif /* _VIA_DRM_H_ */
··· 75 #define DRM_VIA_CMDBUF_SIZE 0x0b 76 #define NOT_USED 77 #define DRM_VIA_WAIT_IRQ 0x0d 78 + #define DRM_VIA_DMA_BLIT 0x0e 79 + #define DRM_VIA_BLIT_SYNC 0x0f 80 81 #define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t) 82 #define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t) ··· 89 #define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \ 90 drm_via_cmdbuf_size_t) 91 #define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t) 92 + #define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t) 93 + #define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t) 94 95 /* Indices into buf.Setup where various bits of state are mirrored per 96 * context and per buffer. These can be fired at the card as a unit, ··· 103 #define VIA_BACK 0x2 104 #define VIA_DEPTH 0x4 105 #define VIA_STENCIL 0x8 106 + #define VIA_MEM_VIDEO 0 /* matches drm constant */ 107 + #define VIA_MEM_AGP 1 /* matches drm constant */ 108 + #define VIA_MEM_SYSTEM 2 109 + #define VIA_MEM_MIXED 3 110 + #define VIA_MEM_UNKNOWN 4 111 + 112 typedef struct { 113 uint32_t offset; 114 uint32_t size; ··· 192 unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; 193 unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */ 194 195 + /* Used bt the 3d driver only at this point, for pageflipping: 196 + */ 197 + unsigned int pfCurrentOffset; 198 } drm_via_sarea_t; 199 200 typedef struct _drm_via_cmdbuf_size { ··· 212 213 #define VIA_IRQ_FLAGS_MASK 0xF0000000 214 215 + enum drm_via_irqs { 216 + drm_via_irq_hqv0 = 0, 217 + drm_via_irq_hqv1, 218 + drm_via_irq_dma0_dd, 219 + drm_via_irq_dma0_td, 220 + drm_via_irq_dma1_dd, 221 + drm_via_irq_dma1_td, 222 + drm_via_irq_num 223 + }; 224 + 225 struct drm_via_wait_irq_request { 226 unsigned irq; 227 via_irq_seq_type_t type; ··· 224 struct drm_wait_vblank_reply reply; 225 } drm_via_irqwait_t; 226 227 + typedef struct drm_via_blitsync { 228 + uint32_t sync_handle; 229 + unsigned engine; 230 + } drm_via_blitsync_t; 231 232 + typedef struct drm_via_dmablit { 233 + uint32_t num_lines; 234 + uint32_t line_length; 235 + 236 + uint32_t fb_addr; 237 + uint32_t fb_stride; 238 239 + unsigned char *mem_addr; 240 + uint32_t mem_stride; 241 + 242 + int bounce_buffer; 243 + int to_fb; 244 + 245 + drm_via_blitsync_t sync; 246 + } drm_via_dmablit_t; 247 + 248 #endif /* _VIA_DRM_H_ */
+4 -17
drivers/char/drm/via_drv.c
··· 38 viadrv_PCI_IDS 39 }; 40 41 - static drm_ioctl_desc_t ioctls[] = { 42 - [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0}, 43 - [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0}, 44 - [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0}, 45 - [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0}, 46 - [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0}, 47 - [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0}, 48 - [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0}, 49 - [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0}, 50 - [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0}, 51 - [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0}, 52 - [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0}, 53 - [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0} 54 - }; 55 - 56 static struct drm_driver driver = { 57 .driver_features = 58 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 59 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 60 .context_ctor = via_init_context, 61 .context_dtor = via_final_context, 62 .vblank_wait = via_driver_vblank_wait, ··· 56 .reclaim_buffers = drm_core_reclaim_buffers, 57 .get_map_ofs = drm_core_get_map_ofs, 58 .get_reg_ofs = drm_core_get_reg_ofs, 59 - .ioctls = ioctls, 60 - .num_ioctls = DRM_ARRAY_SIZE(ioctls), 61 .fops = { 62 .owner = THIS_MODULE, 63 .open = drm_open, ··· 81 82 static int __init via_init(void) 83 { 84 via_init_command_verifier(); 85 return drm_init(&driver); 86 }
··· 38 viadrv_PCI_IDS 39 }; 40 41 static struct drm_driver driver = { 42 .driver_features = 43 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 44 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 45 + .load = via_driver_load, 46 + .unload = via_driver_unload, 47 .context_ctor = via_init_context, 48 .context_dtor = via_final_context, 49 .vblank_wait = via_driver_vblank_wait, ··· 69 .reclaim_buffers = drm_core_reclaim_buffers, 70 .get_map_ofs = drm_core_get_map_ofs, 71 .get_reg_ofs = drm_core_get_reg_ofs, 72 + .ioctls = via_ioctls, 73 .fops = { 74 .owner = THIS_MODULE, 75 .open = drm_open, ··· 95 96 static int __init via_init(void) 97 { 98 + driver.num_ioctls = via_max_ioctl; 99 via_init_command_verifier(); 100 return drm_init(&driver); 101 }
+43 -13
drivers/char/drm/via_drv.h
··· 24 #ifndef _VIA_DRV_H_ 25 #define _VIA_DRV_H_ 26 27 - #define DRIVER_AUTHOR "VIA" 28 29 #define DRIVER_NAME "via" 30 #define DRIVER_DESC "VIA Unichrome / Pro" 31 - #define DRIVER_DATE "20050523" 32 33 #define DRIVER_MAJOR 2 34 - #define DRIVER_MINOR 6 35 - #define DRIVER_PATCHLEVEL 3 36 37 #include "via_verifier.h" 38 39 #define VIA_PCI_BUF_SIZE 60000 40 #define VIA_FIRE_BUF_SIZE 1024 41 - #define VIA_NUM_IRQS 2 42 43 typedef struct drm_via_ring_buffer { 44 - drm_map_t map; 45 char *virtual_start; 46 } drm_via_ring_buffer_t; 47 ··· 58 59 typedef struct drm_via_private { 60 drm_via_sarea_t *sarea_priv; 61 - drm_map_t *sarea; 62 - drm_map_t *fb; 63 - drm_map_t *mmio; 64 unsigned long agpAddr; 65 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS]; 66 char *dma_ptr; ··· 84 maskarray_t *irq_masks; 85 uint32_t irq_enable_mask; 86 uint32_t irq_pending_mask; 87 } drm_via_private_t; 88 89 /* VIA MMIO register access */ 90 #define VIA_BASE ((dev_priv->mmio)) ··· 101 #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 102 #define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 103 104 extern int via_init_context(drm_device_t * dev, int context); 105 extern int via_final_context(drm_device_t * dev, int context); 106 107 extern int via_do_cleanup_map(drm_device_t * dev); 108 - extern int via_map_init(struct inode *inode, struct file *filp, 109 - unsigned int cmd, unsigned long arg); 110 extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); 111 112 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); ··· 139 extern void via_init_futex(drm_via_private_t * dev_priv); 140 extern void via_cleanup_futex(drm_via_private_t * dev_priv); 141 extern void via_release_futex(drm_via_private_t * dev_priv, int context); 142 143 - extern int via_parse_command_stream(drm_device_t * dev, const uint32_t * buf, 144 - unsigned int size); 145 146 #endif
··· 24 #ifndef _VIA_DRV_H_ 25 #define _VIA_DRV_H_ 26 27 + #define DRIVER_AUTHOR "Various" 28 29 #define DRIVER_NAME "via" 30 #define DRIVER_DESC "VIA Unichrome / Pro" 31 + #define DRIVER_DATE "20051022" 32 33 #define DRIVER_MAJOR 2 34 + #define DRIVER_MINOR 7 35 + #define DRIVER_PATCHLEVEL 2 36 37 #include "via_verifier.h" 38 39 + #include "via_dmablit.h" 40 + 41 #define VIA_PCI_BUF_SIZE 60000 42 #define VIA_FIRE_BUF_SIZE 1024 43 + #define VIA_NUM_IRQS 4 44 45 typedef struct drm_via_ring_buffer { 46 + drm_local_map_t map; 47 char *virtual_start; 48 } drm_via_ring_buffer_t; 49 ··· 56 57 typedef struct drm_via_private { 58 drm_via_sarea_t *sarea_priv; 59 + drm_local_map_t *sarea; 60 + drm_local_map_t *fb; 61 + drm_local_map_t *mmio; 62 unsigned long agpAddr; 63 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS]; 64 char *dma_ptr; ··· 82 maskarray_t *irq_masks; 83 uint32_t irq_enable_mask; 84 uint32_t irq_pending_mask; 85 + int *irq_map; 86 + drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES]; 87 } drm_via_private_t; 88 + 89 + enum via_family { 90 + VIA_OTHER = 0, 91 + VIA_PRO_GROUP_A, 92 + }; 93 94 /* VIA MMIO register access */ 95 #define VIA_BASE ((dev_priv->mmio)) ··· 92 #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 93 #define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 94 95 + extern drm_ioctl_desc_t via_ioctls[]; 96 + extern int via_max_ioctl; 97 + 98 + extern int via_fb_init(DRM_IOCTL_ARGS); 99 + extern int via_mem_alloc(DRM_IOCTL_ARGS); 100 + extern int via_mem_free(DRM_IOCTL_ARGS); 101 + extern int via_agp_init(DRM_IOCTL_ARGS); 102 + extern int via_map_init(DRM_IOCTL_ARGS); 103 + extern int via_decoder_futex(DRM_IOCTL_ARGS); 104 + extern int via_dma_init(DRM_IOCTL_ARGS); 105 + extern int via_cmdbuffer(DRM_IOCTL_ARGS); 106 + extern int via_flush_ioctl(DRM_IOCTL_ARGS); 107 + extern int via_pci_cmdbuffer(DRM_IOCTL_ARGS); 108 + extern int via_cmdbuf_size(DRM_IOCTL_ARGS); 109 + extern int via_wait_irq(DRM_IOCTL_ARGS); 110 + extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); 111 + extern int via_dma_blit( DRM_IOCTL_ARGS ); 112 + 113 + extern int via_driver_load(drm_device_t *dev, unsigned long chipset); 114 + extern int via_driver_unload(drm_device_t *dev); 115 + 116 extern int via_init_context(drm_device_t * dev, int context); 117 extern int via_final_context(drm_device_t * dev, int context); 118 119 extern int via_do_cleanup_map(drm_device_t * dev); 120 extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); 121 122 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); ··· 111 extern void via_init_futex(drm_via_private_t * dev_priv); 112 extern void via_cleanup_futex(drm_via_private_t * dev_priv); 113 extern void via_release_futex(drm_via_private_t * dev_priv, int context); 114 + extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq, 115 + int force_sequence, unsigned int *sequence); 116 117 + extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq); 118 + extern void via_init_dmablit(drm_device_t *dev); 119 120 #endif
+44 -9
drivers/char/drm/via_irq.c
··· 50 #define VIA_IRQ_HQV1_ENABLE (1 << 25) 51 #define VIA_IRQ_HQV0_PENDING (1 << 9) 52 #define VIA_IRQ_HQV1_PENDING (1 << 10) 53 54 /* 55 * Device-specific IRQs go here. This type might need to be extended with ··· 70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 71 0x00000000}, 72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 73 - 0x00000000} 74 }; 75 static int via_num_pro_group_a = 76 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t); 77 78 - static maskarray_t via_unichrome_irqs[] = { }; 79 static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t); 80 81 static unsigned time_diff(struct timeval *now, struct timeval *then) 82 { ··· 133 atomic_inc(&cur_irq->irq_received); 134 DRM_WAKEUP(&cur_irq->irq_queue); 135 handled = 1; 136 } 137 cur_irq++; 138 } ··· 190 return ret; 191 } 192 193 - static int 194 via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, 195 unsigned int *sequence) 196 { ··· 199 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 200 int ret = 0; 201 maskarray_t *masks = dev_priv->irq_masks; 202 203 DRM_DEBUG("%s\n", __FUNCTION__); 204 ··· 208 return DRM_ERR(EINVAL); 209 } 210 211 - if (irq >= dev_priv->num_irqs) { 212 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 213 irq); 214 return DRM_ERR(EINVAL); 215 } 216 217 - cur_irq += irq; 218 219 - if (masks[irq][2] && !force_sequence) { 220 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, 221 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 222 masks[irq][4])); ··· 260 via_pro_group_a_irqs : via_unichrome_irqs; 261 dev_priv->num_irqs = (dev_priv->pro_group_a) ? 262 via_num_pro_group_a : via_num_unichrome; 263 264 for (i = 0; i < dev_priv->num_irqs; ++i) { 265 atomic_set(&cur_irq->irq_received, 0); ··· 277 278 dev_priv->last_vblank_valid = 0; 279 280 - // Clear VSync interrupt regs 281 status = VIA_READ(VIA_REG_INTERRUPT); 282 VIA_WRITE(VIA_REG_INTERRUPT, status & 283 ~(dev_priv->irq_enable_mask)); ··· 327 328 int via_wait_irq(DRM_IOCTL_ARGS) 329 { 330 - drm_file_t *priv = filp->private_data; 331 - drm_device_t *dev = priv->head->dev; 332 drm_via_irqwait_t __user *argp = (void __user *)data; 333 drm_via_irqwait_t irqwait; 334 struct timeval now;
··· 50 #define VIA_IRQ_HQV1_ENABLE (1 << 25) 51 #define VIA_IRQ_HQV0_PENDING (1 << 9) 52 #define VIA_IRQ_HQV1_PENDING (1 << 10) 53 + #define VIA_IRQ_DMA0_DD_ENABLE (1 << 20) 54 + #define VIA_IRQ_DMA0_TD_ENABLE (1 << 21) 55 + #define VIA_IRQ_DMA1_DD_ENABLE (1 << 22) 56 + #define VIA_IRQ_DMA1_TD_ENABLE (1 << 23) 57 + #define VIA_IRQ_DMA0_DD_PENDING (1 << 4) 58 + #define VIA_IRQ_DMA0_TD_PENDING (1 << 5) 59 + #define VIA_IRQ_DMA1_DD_PENDING (1 << 6) 60 + #define VIA_IRQ_DMA1_TD_PENDING (1 << 7) 61 + 62 63 /* 64 * Device-specific IRQs go here. This type might need to be extended with ··· 61 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 62 0x00000000}, 63 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 64 + 0x00000000}, 65 + {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 66 + VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 67 + {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 68 + VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 69 }; 70 static int via_num_pro_group_a = 71 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t); 72 + static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; 73 74 + static maskarray_t via_unichrome_irqs[] = { 75 + {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 76 + VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 77 + {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 78 + VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} 79 + }; 80 static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t); 81 + static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; 82 83 static unsigned time_diff(struct timeval *now, struct timeval *then) 84 { ··· 113 atomic_inc(&cur_irq->irq_received); 114 DRM_WAKEUP(&cur_irq->irq_queue); 115 handled = 1; 116 + if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { 117 + via_dmablit_handler(dev, 0, 1); 118 + } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) { 119 + via_dmablit_handler(dev, 1, 1); 120 + } 121 } 122 cur_irq++; 123 } ··· 165 return ret; 166 } 167 168 + int 169 via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, 170 unsigned int *sequence) 171 { ··· 174 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 175 int ret = 0; 176 maskarray_t *masks = dev_priv->irq_masks; 177 + int real_irq; 178 179 DRM_DEBUG("%s\n", __FUNCTION__); 180 ··· 182 return DRM_ERR(EINVAL); 183 } 184 185 + if (irq >= drm_via_irq_num) { 186 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 187 irq); 188 return DRM_ERR(EINVAL); 189 } 190 191 + real_irq = dev_priv->irq_map[irq]; 192 193 + if (real_irq < 0) { 194 + DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", 195 + __FUNCTION__, irq); 196 + return DRM_ERR(EINVAL); 197 + } 198 + 199 + cur_irq += real_irq; 200 + 201 + if (masks[real_irq][2] && !force_sequence) { 202 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, 203 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 204 masks[irq][4])); ··· 226 via_pro_group_a_irqs : via_unichrome_irqs; 227 dev_priv->num_irqs = (dev_priv->pro_group_a) ? 228 via_num_pro_group_a : via_num_unichrome; 229 + dev_priv->irq_map = (dev_priv->pro_group_a) ? 230 + via_irqmap_pro_group_a : via_irqmap_unichrome; 231 232 for (i = 0; i < dev_priv->num_irqs; ++i) { 233 atomic_set(&cur_irq->irq_received, 0); ··· 241 242 dev_priv->last_vblank_valid = 0; 243 244 + /* Clear VSync interrupt regs */ 245 status = VIA_READ(VIA_REG_INTERRUPT); 246 VIA_WRITE(VIA_REG_INTERRUPT, status & 247 ~(dev_priv->irq_enable_mask)); ··· 291 292 int via_wait_irq(DRM_IOCTL_ARGS) 293 { 294 + DRM_DEVICE; 295 drm_via_irqwait_t __user *argp = (void __user *)data; 296 drm_via_irqwait_t irqwait; 297 struct timeval now;
+30 -17
drivers/char/drm/via_map.c
··· 27 28 static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) 29 { 30 - drm_via_private_t *dev_priv; 31 32 DRM_DEBUG("%s\n", __FUNCTION__); 33 - 34 - dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER); 35 - if (dev_priv == NULL) 36 - return -ENOMEM; 37 - 38 - memset(dev_priv, 0, sizeof(drm_via_private_t)); 39 40 DRM_GETSAREA(); 41 if (!dev_priv->sarea) { ··· 61 dev_priv->agpAddr = init->agpAddr; 62 63 via_init_futex(dev_priv); 64 - dev_priv->pro_group_a = (dev->pdev->device == 0x3118); 65 66 dev->dev_private = (void *)dev_priv; 67 return 0; ··· 70 71 int via_do_cleanup_map(drm_device_t * dev) 72 { 73 - if (dev->dev_private) { 74 - 75 - drm_via_private_t *dev_priv = dev->dev_private; 76 - 77 - via_dma_cleanup(dev); 78 - 79 - drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 80 - dev->dev_private = NULL; 81 - } 82 83 return 0; 84 } ··· 94 95 return -EINVAL; 96 }
··· 27 28 static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) 29 { 30 + drm_via_private_t *dev_priv = dev->dev_private; 31 32 DRM_DEBUG("%s\n", __FUNCTION__); 33 34 DRM_GETSAREA(); 35 if (!dev_priv->sarea) { ··· 67 dev_priv->agpAddr = init->agpAddr; 68 69 via_init_futex(dev_priv); 70 + 71 + via_init_dmablit(dev); 72 73 dev->dev_private = (void *)dev_priv; 74 return 0; ··· 75 76 int via_do_cleanup_map(drm_device_t * dev) 77 { 78 + via_dma_cleanup(dev); 79 80 return 0; 81 } ··· 107 108 return -EINVAL; 109 } 110 + 111 + int via_driver_load(drm_device_t *dev, unsigned long chipset) 112 + { 113 + drm_via_private_t *dev_priv; 114 + 115 + dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 116 + if (dev_priv == NULL) 117 + return DRM_ERR(ENOMEM); 118 + 119 + dev->dev_private = (void *)dev_priv; 120 + 121 + if (chipset == VIA_PRO_GROUP_A) 122 + dev_priv->pro_group_a = 1; 123 + 124 + return 0; 125 + } 126 + 127 + int via_driver_unload(drm_device_t *dev) 128 + { 129 + drm_via_private_t *dev_priv = dev->dev_private; 130 + 131 + drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 132 + 133 + return 0; 134 + } 135 +
+8 -8
drivers/char/drm/via_mm.c
··· 199 sizeof(mem)); 200 201 switch (mem.type) { 202 - case VIDEO: 203 if (via_fb_alloc(&mem) < 0) 204 return -EFAULT; 205 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, 206 sizeof(mem)); 207 return 0; 208 - case AGP: 209 if (via_agp_alloc(&mem) < 0) 210 return -EFAULT; 211 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, ··· 232 if (block) { 233 fb.offset = block->ofs; 234 fb.free = (unsigned long)block; 235 - if (!add_alloc_set(fb.context, VIDEO, fb.free)) { 236 DRM_DEBUG("adding to allocation set fails\n"); 237 via_mmFreeMem((PMemBlock) fb.free); 238 retval = -1; ··· 269 if (block) { 270 agp.offset = block->ofs; 271 agp.free = (unsigned long)block; 272 - if (!add_alloc_set(agp.context, AGP, agp.free)) { 273 DRM_DEBUG("adding to allocation set fails\n"); 274 via_mmFreeMem((PMemBlock) agp.free); 275 retval = -1; ··· 297 298 switch (mem.type) { 299 300 - case VIDEO: 301 if (via_fb_free(&mem) == 0) 302 return 0; 303 break; 304 - case AGP: 305 if (via_agp_free(&mem) == 0) 306 return 0; 307 break; ··· 329 330 via_mmFreeMem((PMemBlock) fb.free); 331 332 - if (!del_alloc_set(fb.context, VIDEO, fb.free)) { 333 retval = -1; 334 } 335 ··· 352 353 via_mmFreeMem((PMemBlock) agp.free); 354 355 - if (!del_alloc_set(agp.context, AGP, agp.free)) { 356 retval = -1; 357 } 358
··· 199 sizeof(mem)); 200 201 switch (mem.type) { 202 + case VIA_MEM_VIDEO: 203 if (via_fb_alloc(&mem) < 0) 204 return -EFAULT; 205 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, 206 sizeof(mem)); 207 return 0; 208 + case VIA_MEM_AGP: 209 if (via_agp_alloc(&mem) < 0) 210 return -EFAULT; 211 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, ··· 232 if (block) { 233 fb.offset = block->ofs; 234 fb.free = (unsigned long)block; 235 + if (!add_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) { 236 DRM_DEBUG("adding to allocation set fails\n"); 237 via_mmFreeMem((PMemBlock) fb.free); 238 retval = -1; ··· 269 if (block) { 270 agp.offset = block->ofs; 271 agp.free = (unsigned long)block; 272 + if (!add_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) { 273 DRM_DEBUG("adding to allocation set fails\n"); 274 via_mmFreeMem((PMemBlock) agp.free); 275 retval = -1; ··· 297 298 switch (mem.type) { 299 300 + case VIA_MEM_VIDEO: 301 if (via_fb_free(&mem) == 0) 302 return 0; 303 break; 304 + case VIA_MEM_AGP: 305 if (via_agp_free(&mem) == 0) 306 return 0; 307 break; ··· 329 330 via_mmFreeMem((PMemBlock) fb.free); 331 332 + if (!del_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) { 333 retval = -1; 334 } 335 ··· 352 353 via_mmFreeMem((PMemBlock) agp.free); 354 355 + if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) { 356 retval = -1; 357 } 358
+3 -3
drivers/char/drm/via_verifier.c
··· 237 static __inline__ int 238 eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) 239 { 240 - if ((*buf - buf_end) >= num_words) { 241 *buf += num_words; 242 return 0; 243 } ··· 249 * Partially stolen from drm_memory.h 250 */ 251 252 - static __inline__ drm_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, 253 unsigned long offset, 254 unsigned long size, 255 drm_device_t * dev) 256 { 257 struct list_head *list; 258 drm_map_list_t *r_list; 259 - drm_map_t *map = seq->map_cache; 260 261 if (map && map->offset <= offset 262 && (offset + size) <= (map->offset + map->size)) {
··· 237 static __inline__ int 238 eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) 239 { 240 + if ((buf_end - *buf) >= num_words) { 241 *buf += num_words; 242 return 0; 243 } ··· 249 * Partially stolen from drm_memory.h 250 */ 251 252 + static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, 253 unsigned long offset, 254 unsigned long size, 255 drm_device_t * dev) 256 { 257 struct list_head *list; 258 drm_map_list_t *r_list; 259 + drm_local_map_t *map = seq->map_cache; 260 261 if (map && map->offset <= offset 262 && (offset + size) <= (map->offset + map->size)) {
+3 -1
drivers/char/drm/via_verifier.h
··· 47 int agp_texture; 48 int multitex; 49 drm_device_t *dev; 50 - drm_map_t *map_cache; 51 uint32_t vertex_count; 52 int agp; 53 const uint32_t *buf_start; ··· 55 56 extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, 57 drm_device_t * dev, int agp); 58 59 #endif
··· 47 int agp_texture; 48 int multitex; 49 drm_device_t *dev; 50 + drm_local_map_t *map_cache; 51 uint32_t vertex_count; 52 int agp; 53 const uint32_t *buf_start; ··· 55 56 extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, 57 drm_device_t * dev, int agp); 58 + extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf, 59 + unsigned int size); 60 61 #endif
+5 -2
drivers/char/drm/via_video.c
··· 50 unsigned int i; 51 volatile int *lock; 52 53 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { 54 - lock = (int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); 55 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { 56 if (_DRM_LOCK_IS_HELD(*lock) 57 && (*lock & _DRM_LOCK_CONT)) { ··· 82 if (fx.lock > VIA_NR_XVMC_LOCKS) 83 return -EFAULT; 84 85 - lock = (int *)XVMCLOCKPTR(sAPriv, fx.lock); 86 87 switch (fx.func) { 88 case VIA_FUTEX_WAIT:
··· 50 unsigned int i; 51 volatile int *lock; 52 53 + if (!dev_priv->sarea_priv) 54 + return; 55 + 56 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { 57 + lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); 58 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { 59 if (_DRM_LOCK_IS_HELD(*lock) 60 && (*lock & _DRM_LOCK_CONT)) { ··· 79 if (fx.lock > VIA_NR_XVMC_LOCKS) 80 return -EFAULT; 81 82 + lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock); 83 84 switch (fx.func) { 85 case VIA_FUTEX_WAIT: