Merge branch 'drm-forlinus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

+2790 -1808
+2 -2
drivers/char/drm/Makefile
··· 3 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 4 5 drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ 6 - drm_drv.o drm_fops.o drm_init.o drm_ioctl.o drm_irq.o \ 7 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 9 drm_sysfs.o ··· 18 ffb-objs := ffb_drv.o ffb_context.o 19 sis-objs := sis_drv.o sis_ds.o sis_mm.o 20 savage-objs := savage_drv.o savage_bci.o savage_state.o 21 - via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o 22 23 ifeq ($(CONFIG_COMPAT),y) 24 drm-objs += drm_ioc32.o
··· 3 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 4 5 drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ 6 + drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ 7 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 9 drm_sysfs.o ··· 18 ffb-objs := ffb_drv.o ffb_context.o 19 sis-objs := sis_drv.o sis_ds.o sis_mm.o 20 savage-objs := savage_drv.o savage_bci.o savage_state.o 21 + via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o 22 23 ifeq ($(CONFIG_COMPAT),y) 24 drm-objs += drm_ioc32.o
+11 -12
drivers/char/drm/ati_pcigart.c
··· 52 # define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */ 53 # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ 54 55 - static unsigned long drm_ati_alloc_pcigart_table(void) 56 { 57 unsigned long address; 58 struct page *page; ··· 72 } 73 74 DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address); 75 - return address; 76 } 77 78 - static void drm_ati_free_pcigart_table(unsigned long address) 79 { 80 struct page *page; 81 int i; 82 DRM_DEBUG("%s\n", __FUNCTION__); 83 84 - page = virt_to_page(address); 85 86 for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) { 87 __put_page(page); 88 ClearPageReserved(page); 89 } 90 91 - free_pages(address, ATI_PCIGART_TABLE_ORDER); 92 } 93 94 - int drm_ati_pcigart_cleanup(drm_device_t * dev, 95 - drm_ati_pcigart_info * gart_info) 96 { 97 drm_sg_mem_t *entry = dev->sg; 98 unsigned long pages; ··· 135 136 EXPORT_SYMBOL(drm_ati_pcigart_cleanup); 137 138 - int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info) 139 { 140 drm_sg_mem_t *entry = dev->sg; 141 - unsigned long address = 0; 142 unsigned long pages; 143 u32 *pci_gart, page_base, bus_address = 0; 144 int i, j, ret = 0; ··· 162 goto done; 163 } 164 165 - bus_address = pci_map_single(dev->pdev, (void *)address, 166 ATI_PCIGART_TABLE_PAGES * 167 PAGE_SIZE, PCI_DMA_TODEVICE); 168 if (bus_address == 0) { ··· 175 address = gart_info->addr; 176 bus_address = gart_info->bus_addr; 177 DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n", 178 - bus_address, address); 179 } 180 181 pci_gart = (u32 *) address; ··· 194 if (entry->busaddr[i] == 0) { 195 DRM_ERROR("unable to map PCIGART pages!\n"); 196 drm_ati_pcigart_cleanup(dev, gart_info); 197 - address = 0; 198 bus_address = 0; 199 goto done; 200 }
··· 52 # define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */ 53 # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ 54 55 + static void *drm_ati_alloc_pcigart_table(void) 56 { 57 unsigned long address; 58 struct page *page; ··· 72 } 73 74 DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address); 75 + return (void *)address; 76 } 77 78 + static void drm_ati_free_pcigart_table(void *address) 79 { 80 struct page *page; 81 int i; 82 DRM_DEBUG("%s\n", __FUNCTION__); 83 84 + page = virt_to_page((unsigned long)address); 85 86 for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) { 87 __put_page(page); 88 ClearPageReserved(page); 89 } 90 91 + free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER); 92 } 93 94 + int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) 95 { 96 drm_sg_mem_t *entry = dev->sg; 97 unsigned long pages; ··· 136 137 EXPORT_SYMBOL(drm_ati_pcigart_cleanup); 138 139 + int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info) 140 { 141 drm_sg_mem_t *entry = dev->sg; 142 + void *address = NULL; 143 unsigned long pages; 144 u32 *pci_gart, page_base, bus_address = 0; 145 int i, j, ret = 0; ··· 163 goto done; 164 } 165 166 + bus_address = pci_map_single(dev->pdev, address, 167 ATI_PCIGART_TABLE_PAGES * 168 PAGE_SIZE, PCI_DMA_TODEVICE); 169 if (bus_address == 0) { ··· 176 address = gart_info->addr; 177 bus_address = gart_info->bus_addr; 178 DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n", 179 + bus_address, (unsigned long)address); 180 } 181 182 pci_gart = (u32 *) address; ··· 195 if (entry->busaddr[i] == 0) { 196 DRM_ERROR("unable to map PCIGART pages!\n"); 197 drm_ati_pcigart_cleanup(dev, gart_info); 198 + address = NULL; 199 bus_address = 0; 200 goto done; 201 }
+2 -2
drivers/char/drm/drm.h
··· 90 #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ 91 #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ 92 93 - #define _DRM_LOCK_HELD 0x80000000 /**< Hardware lock is held */ 94 - #define _DRM_LOCK_CONT 0x40000000 /**< Hardware lock is contended */ 95 #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) 96 #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) 97 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
··· 90 #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ 91 #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ 92 93 + #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ 94 + #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ 95 #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) 96 #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) 97 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+72 -50
drivers/char/drm/drmP.h
··· 144 /** \name Backward compatibility section */ 145 /*@{*/ 146 147 - #ifndef MODULE_LICENSE 148 - #define MODULE_LICENSE(x) 149 - #endif 150 - 151 - #ifndef preempt_disable 152 - #define preempt_disable() 153 - #define preempt_enable() 154 - #endif 155 - 156 - #ifndef pte_offset_map 157 - #define pte_offset_map pte_offset 158 - #define pte_unmap(pte) 159 - #endif 160 - 161 #define DRM_RPR_ARG(vma) vma, 162 163 #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) ··· 272 typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 273 unsigned long arg); 274 275 typedef struct drm_ioctl_desc { 276 drm_ioctl_t *func; 277 - int auth_needed; 278 - int root_only; 279 } drm_ioctl_desc_t; 280 281 typedef struct drm_devstate { ··· 373 /** File private data */ 374 typedef struct drm_file { 375 int authenticated; 376 int minor; 377 pid_t pid; 378 uid_t uid; ··· 522 typedef struct ati_pcigart_info { 523 int gart_table_location; 524 int is_pcie; 525 - unsigned long addr; 526 dma_addr_t bus_addr; 527 } drm_ati_pcigart_info; 528 529 /** ··· 535 struct drm_device; 536 537 struct drm_driver { 538 - int (*preinit) (struct drm_device *, unsigned long flags); 539 - void (*prerelease) (struct drm_device *, struct file * filp); 540 - void (*pretakedown) (struct drm_device *); 541 - int (*postcleanup) (struct drm_device *); 542 - int (*presetup) (struct drm_device *); 543 - int (*postsetup) (struct drm_device *); 544 int (*dma_ioctl) (DRM_IOCTL_ARGS); 545 - int (*open_helper) (struct drm_device *, drm_file_t *); 546 - void (*free_filp_priv) (struct drm_device *, drm_file_t *); 547 - void (*release) (struct drm_device *, struct file * filp); 548 void (*dma_ready) (struct drm_device *); 549 int (*dma_quiescent) (struct drm_device *); 550 int (*context_ctor) (struct drm_device * dev, int context); ··· 550 int (*kernel_context_switch) (struct drm_device * dev, int old, 551 int new); 552 void (*kernel_context_switch_unlock) (struct drm_device * dev, 553 - drm_lock_t * lock); 554 int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence); 555 556 /** 557 * Called by \c drm_device_is_agp. Typically used to determine if a ··· 569 570 /* these have to be filled in */ 571 572 - int (*postinit) (struct drm_device *, unsigned long flags); 573 - irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 574 void (*irq_preinstall) (struct drm_device * dev); 575 void (*irq_postinstall) (struct drm_device * dev); 576 void (*irq_uninstall) (struct drm_device * dev); 577 void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); 578 unsigned long (*get_map_ofs) (drm_map_t * map); 579 unsigned long (*get_reg_ofs) (struct drm_device * dev); 580 void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); 581 - int (*version) (drm_version_t * version); 582 u32 driver_features; 583 int dev_priv_size; 584 drm_ioctl_desc_t *ioctls; ··· 750 { 751 return drm_core_check_feature(dev, DRIVER_USE_MTRR); 752 } 753 #else 754 #define drm_core_has_MTRR(dev) (0) 755 #endif 756 757 /******************************************************************/ 758 /** \name Internal function definitions */ 759 /*@{*/ 760 - 761 - /* Misc. support (drm_init.h) */ 762 - extern int drm_flags; 763 - extern void drm_parse_options(char *s); 764 - extern int drm_cpu_valid(void); 765 766 /* Driver support (drm_drv.h) */ 767 extern int drm_init(struct drm_driver *driver); ··· 794 unsigned int cmd, unsigned long arg); 795 extern long drm_compat_ioctl(struct file *filp, 796 unsigned int cmd, unsigned long arg); 797 - extern int drm_takedown(drm_device_t * dev); 798 799 /* Device support (drm_fops.h) */ 800 extern int drm_open(struct inode *inode, struct file *filp); 801 extern int drm_stub_open(struct inode *inode, struct file *filp); 802 - extern int drm_flush(struct file *filp); 803 extern int drm_fasync(int fd, struct file *filp, int on); 804 extern int drm_release(struct inode *inode, struct file *filp); 805 ··· 840 unsigned int cmd, unsigned long arg); 841 extern int drm_setversion(struct inode *inode, struct file *filp, 842 unsigned int cmd, unsigned long arg); 843 844 /* Context IOCTL support (drm_context.h) */ 845 extern int drm_resctx(struct inode *inode, struct file *filp, ··· 880 extern int drm_authmagic(struct inode *inode, struct file *filp, 881 unsigned int cmd, unsigned long arg); 882 883 - /* Placeholder for ioctls past */ 884 - extern int drm_noop(struct inode *inode, struct file *filp, 885 - unsigned int cmd, unsigned long arg); 886 - 887 /* Locking IOCTL support (drm_lock.h) */ 888 extern int drm_lock(struct inode *inode, struct file *filp, 889 unsigned int cmd, unsigned long arg); ··· 892 /* Buffer management support (drm_bufs.h) */ 893 extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); 894 extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); 895 extern int drm_addmap(drm_device_t * dev, unsigned int offset, 896 unsigned int size, drm_map_type_t type, 897 drm_map_flags_t flags, drm_local_map_t ** map_ptr); ··· 928 /* IRQ support (drm_irq.h) */ 929 extern int drm_control(struct inode *inode, struct file *filp, 930 unsigned int cmd, unsigned long arg); 931 - extern int drm_irq_uninstall(drm_device_t * dev); 932 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 933 extern void drm_driver_irq_preinstall(drm_device_t * dev); 934 extern void drm_driver_irq_postinstall(drm_device_t * dev); 935 extern void drm_driver_irq_uninstall(drm_device_t * dev); ··· 953 extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info); 954 extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, 955 unsigned int cmd, unsigned long arg); 956 - extern int drm_agp_alloc(struct inode *inode, struct file *filp, 957 unsigned int cmd, unsigned long arg); 958 - extern int drm_agp_free(struct inode *inode, struct file *filp, 959 unsigned int cmd, unsigned long arg); 960 - extern int drm_agp_unbind(struct inode *inode, struct file *filp, 961 unsigned int cmd, unsigned long arg); 962 - extern int drm_agp_bind(struct inode *inode, struct file *filp, 963 unsigned int cmd, unsigned long arg); 964 extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, 965 size_t pages, u32 type); ··· 1015 char *name); 1016 extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); 1017 extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, 1018 - dev_t dev, 1019 - struct device *device, 1020 - const char *fmt, ...); 1021 - extern void drm_sysfs_device_remove(dev_t dev); 1022 1023 /* Inline replacements for DRM_IOREMAP macros */ 1024 static __inline__ void drm_core_ioremap(struct drm_map *map,
··· 144 /** \name Backward compatibility section */ 145 /*@{*/ 146 147 #define DRM_RPR_ARG(vma) vma, 148 149 #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) ··· 286 typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 287 unsigned long arg); 288 289 + #define DRM_AUTH 0x1 290 + #define DRM_MASTER 0x2 291 + #define DRM_ROOT_ONLY 0x4 292 + 293 typedef struct drm_ioctl_desc { 294 drm_ioctl_t *func; 295 + int flags; 296 } drm_ioctl_desc_t; 297 298 typedef struct drm_devstate { ··· 384 /** File private data */ 385 typedef struct drm_file { 386 int authenticated; 387 + int master; 388 int minor; 389 pid_t pid; 390 uid_t uid; ··· 532 typedef struct ati_pcigart_info { 533 int gart_table_location; 534 int is_pcie; 535 + void *addr; 536 dma_addr_t bus_addr; 537 + drm_local_map_t mapping; 538 } drm_ati_pcigart_info; 539 540 /** ··· 544 struct drm_device; 545 546 struct drm_driver { 547 + int (*load) (struct drm_device *, unsigned long flags); 548 + int (*firstopen) (struct drm_device *); 549 + int (*open) (struct drm_device *, drm_file_t *); 550 + void (*preclose) (struct drm_device *, struct file * filp); 551 + void (*postclose) (struct drm_device *, drm_file_t *); 552 + void (*lastclose) (struct drm_device *); 553 + int (*unload) (struct drm_device *); 554 int (*dma_ioctl) (DRM_IOCTL_ARGS); 555 void (*dma_ready) (struct drm_device *); 556 int (*dma_quiescent) (struct drm_device *); 557 int (*context_ctor) (struct drm_device * dev, int context); ··· 561 int (*kernel_context_switch) (struct drm_device * dev, int old, 562 int new); 563 void (*kernel_context_switch_unlock) (struct drm_device * dev, 564 + drm_lock_t *lock); 565 int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence); 566 + int (*dri_library_name) (struct drm_device *dev, char *buf); 567 568 /** 569 * Called by \c drm_device_is_agp. Typically used to determine if a ··· 579 580 /* these have to be filled in */ 581 582 + irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 583 void (*irq_preinstall) (struct drm_device * dev); 584 void (*irq_postinstall) (struct drm_device * dev); 585 void (*irq_uninstall) (struct drm_device * dev); 586 void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); 587 + void (*reclaim_buffers_locked) (struct drm_device *dev, 588 + struct file *filp); 589 unsigned long (*get_map_ofs) (drm_map_t * map); 590 unsigned long (*get_reg_ofs) (struct drm_device * dev); 591 void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); 592 + 593 + int major; 594 + int minor; 595 + int patchlevel; 596 + char *name; 597 + char *desc; 598 + char *date; 599 + 600 u32 driver_features; 601 int dev_priv_size; 602 drm_ioctl_desc_t *ioctls; ··· 752 { 753 return drm_core_check_feature(dev, DRIVER_USE_MTRR); 754 } 755 + 756 + #define DRM_MTRR_WC MTRR_TYPE_WRCOMB 757 + 758 + static inline int drm_mtrr_add(unsigned long offset, unsigned long size, 759 + unsigned int flags) 760 + { 761 + return mtrr_add(offset, size, flags, 1); 762 + } 763 + 764 + static inline int drm_mtrr_del(int handle, unsigned long offset, 765 + unsigned long size, unsigned int flags) 766 + { 767 + return mtrr_del(handle, offset, size); 768 + } 769 + 770 #else 771 #define drm_core_has_MTRR(dev) (0) 772 + 773 + #define DRM_MTRR_WC 0 774 + 775 + static inline int drm_mtrr_add(unsigned long offset, unsigned long size, 776 + unsigned int flags) 777 + { 778 + return 0; 779 + } 780 + 781 + static inline int drm_mtrr_del(int handle, unsigned long offset, 782 + unsigned long size, unsigned int flags) 783 + { 784 + return 0; 785 + } 786 #endif 787 788 /******************************************************************/ 789 /** \name Internal function definitions */ 790 /*@{*/ 791 792 /* Driver support (drm_drv.h) */ 793 extern int drm_init(struct drm_driver *driver); ··· 772 unsigned int cmd, unsigned long arg); 773 extern long drm_compat_ioctl(struct file *filp, 774 unsigned int cmd, unsigned long arg); 775 + extern int drm_lastclose(drm_device_t *dev); 776 777 /* Device support (drm_fops.h) */ 778 extern int drm_open(struct inode *inode, struct file *filp); 779 extern int drm_stub_open(struct inode *inode, struct file *filp); 780 extern int drm_fasync(int fd, struct file *filp, int on); 781 extern int drm_release(struct inode *inode, struct file *filp); 782 ··· 819 unsigned int cmd, unsigned long arg); 820 extern int drm_setversion(struct inode *inode, struct file *filp, 821 unsigned int cmd, unsigned long arg); 822 + extern int drm_noop(struct inode *inode, struct file *filp, 823 + unsigned int cmd, unsigned long arg); 824 825 /* Context IOCTL support (drm_context.h) */ 826 extern int drm_resctx(struct inode *inode, struct file *filp, ··· 857 extern int drm_authmagic(struct inode *inode, struct file *filp, 858 unsigned int cmd, unsigned long arg); 859 860 /* Locking IOCTL support (drm_lock.h) */ 861 extern int drm_lock(struct inode *inode, struct file *filp, 862 unsigned int cmd, unsigned long arg); ··· 873 /* Buffer management support (drm_bufs.h) */ 874 extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); 875 extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); 876 + extern int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request); 877 extern int drm_addmap(drm_device_t * dev, unsigned int offset, 878 unsigned int size, drm_map_type_t type, 879 drm_map_flags_t flags, drm_local_map_t ** map_ptr); ··· 908 /* IRQ support (drm_irq.h) */ 909 extern int drm_control(struct inode *inode, struct file *filp, 910 unsigned int cmd, unsigned long arg); 911 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 912 + extern int drm_irq_uninstall(drm_device_t * dev); 913 extern void drm_driver_irq_preinstall(drm_device_t * dev); 914 extern void drm_driver_irq_postinstall(drm_device_t * dev); 915 extern void drm_driver_irq_uninstall(drm_device_t * dev); ··· 933 extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info); 934 extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, 935 unsigned int cmd, unsigned long arg); 936 + extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request); 937 + extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, 938 unsigned int cmd, unsigned long arg); 939 + extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request); 940 + extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, 941 unsigned int cmd, unsigned long arg); 942 + extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request); 943 + extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, 944 unsigned int cmd, unsigned long arg); 945 + extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request); 946 + extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, 947 unsigned int cmd, unsigned long arg); 948 extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, 949 size_t pages, u32 type); ··· 991 char *name); 992 extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); 993 extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, 994 + drm_head_t *head); 995 + extern void drm_sysfs_device_remove(struct class_device *class_dev); 996 997 /* Inline replacements for DRM_IOREMAP macros */ 998 static __inline__ void drm_core_ioremap(struct drm_map *map,
+87 -46
drivers/char/drm/drm_agpsupport.c
··· 1 /** 2 - * \file drm_agpsupport.h 3 * DRM support for AGP/GART backend 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> ··· 91 /** 92 * Acquire the AGP device. 93 * 94 - * \param dev DRM device that is to acquire AGP 95 * \return zero on success or a negative number on failure. 96 * 97 * Verifies the AGP device hasn't been acquired before and calls ··· 134 /** 135 * Release the AGP device. 136 * 137 - * \param dev DRM device that is to release AGP 138 * \return zero on success or a negative number on failure. 139 * 140 * Verifies the AGP device has been acquired and calls \c agp_backend_release. ··· 147 dev->agp->acquired = 0; 148 return 0; 149 } 150 - 151 EXPORT_SYMBOL(drm_agp_release); 152 153 int drm_agp_release_ioctl(struct inode *inode, struct file *filp, ··· 207 * Verifies the AGP device is present and has been acquired, allocates the 208 * memory via alloc_agp() and creates a drm_agp_mem entry for it. 209 */ 210 - int drm_agp_alloc(struct inode *inode, struct file *filp, 211 - unsigned int cmd, unsigned long arg) 212 { 213 - drm_file_t *priv = filp->private_data; 214 - drm_device_t *dev = priv->head->dev; 215 - drm_agp_buffer_t request; 216 drm_agp_mem_t *entry; 217 DRM_AGP_MEM *memory; 218 unsigned long pages; 219 u32 type; 220 - drm_agp_buffer_t __user *argp = (void __user *)arg; 221 222 if (!dev->agp || !dev->agp->acquired) 223 return -EINVAL; 224 - if (copy_from_user(&request, argp, sizeof(request))) 225 - return -EFAULT; 226 if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) 227 return -ENOMEM; 228 229 memset(entry, 0, sizeof(*entry)); 230 231 - pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; 232 - type = (u32) request.type; 233 - 234 if (!(memory = drm_alloc_agp(dev, pages, type))) { 235 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 236 return -ENOMEM; ··· 238 dev->agp->memory->prev = entry; 239 dev->agp->memory = entry; 240 241 - request.handle = entry->handle; 242 - request.physical = memory->physical; 243 244 if (copy_to_user(argp, &request, sizeof(request))) { 245 dev->agp->memory = entry->next; 246 dev->agp->memory->prev = NULL; 247 - drm_free_agp(memory, pages); 248 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 249 return -EFAULT; 250 } 251 return 0; 252 } 253 ··· 307 * Verifies the AGP device is present and acquired, looks-up the AGP memory 308 * entry and passes it to the unbind_agp() function. 309 */ 310 - int drm_agp_unbind(struct inode *inode, struct file *filp, 311 - unsigned int cmd, unsigned long arg) 312 { 313 - drm_file_t *priv = filp->private_data; 314 - drm_device_t *dev = priv->head->dev; 315 - drm_agp_binding_t request; 316 drm_agp_mem_t *entry; 317 int ret; 318 319 if (!dev->agp || !dev->agp->acquired) 320 return -EINVAL; 321 - if (copy_from_user 322 - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) 323 - return -EFAULT; 324 - if (!(entry = drm_agp_lookup_entry(dev, request.handle))) 325 return -EINVAL; 326 if (!entry->bound) 327 return -EINVAL; ··· 322 if (ret == 0) 323 entry->bound = 0; 324 return ret; 325 } 326 327 /** ··· 352 * is currently bound into the GATT. Looks-up the AGP memory entry and passes 353 * it to bind_agp() function. 354 */ 355 - int drm_agp_bind(struct inode *inode, struct file *filp, 356 - unsigned int cmd, unsigned long arg) 357 { 358 - drm_file_t *priv = filp->private_data; 359 - drm_device_t *dev = priv->head->dev; 360 - drm_agp_binding_t request; 361 drm_agp_mem_t *entry; 362 int retcode; 363 int page; 364 365 if (!dev->agp || !dev->agp->acquired) 366 return -EINVAL; 367 - if (copy_from_user 368 - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) 369 - return -EFAULT; 370 - if (!(entry = drm_agp_lookup_entry(dev, request.handle))) 371 return -EINVAL; 372 if (entry->bound) 373 return -EINVAL; 374 - page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE; 375 if ((retcode = drm_bind_agp(entry->memory, page))) 376 return retcode; 377 entry->bound = dev->agp->base + (page << PAGE_SHIFT); 378 DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", 379 dev->agp->base, entry->bound); 380 return 0; 381 } 382 383 /** ··· 402 * unbind_agp(). Frees it via free_agp() as well as the entry itself 403 * and unlinks from the doubly linked list it's inserted in. 404 */ 405 - int drm_agp_free(struct inode *inode, struct file *filp, 406 - unsigned int cmd, unsigned long arg) 407 { 408 - drm_file_t *priv = filp->private_data; 409 - drm_device_t *dev = priv->head->dev; 410 - drm_agp_buffer_t request; 411 drm_agp_mem_t *entry; 412 413 if (!dev->agp || !dev->agp->acquired) 414 return -EINVAL; 415 - if (copy_from_user 416 - (&request, (drm_agp_buffer_t __user *) arg, sizeof(request))) 417 - return -EFAULT; 418 - if (!(entry = drm_agp_lookup_entry(dev, request.handle))) 419 return -EINVAL; 420 if (entry->bound) 421 drm_unbind_agp(entry->memory); ··· 425 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 426 return 0; 427 } 428 429 /** 430 * Initialize the AGP resources. 431 * 432 * \return pointer to a drm_agp_head structure. 433 * 434 */ 435 drm_agp_head_t *drm_agp_init(drm_device_t * dev) 436 {
··· 1 /** 2 + * \file drm_agpsupport.c 3 * DRM support for AGP/GART backend 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> ··· 91 /** 92 * Acquire the AGP device. 93 * 94 + * \param dev DRM device that is to acquire AGP. 95 * \return zero on success or a negative number on failure. 96 * 97 * Verifies the AGP device hasn't been acquired before and calls ··· 134 /** 135 * Release the AGP device. 136 * 137 + * \param dev DRM device that is to release AGP. 138 * \return zero on success or a negative number on failure. 139 * 140 * Verifies the AGP device has been acquired and calls \c agp_backend_release. ··· 147 dev->agp->acquired = 0; 148 return 0; 149 } 150 EXPORT_SYMBOL(drm_agp_release); 151 152 int drm_agp_release_ioctl(struct inode *inode, struct file *filp, ··· 208 * Verifies the AGP device is present and has been acquired, allocates the 209 * memory via alloc_agp() and creates a drm_agp_mem entry for it. 210 */ 211 + int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request) 212 { 213 drm_agp_mem_t *entry; 214 DRM_AGP_MEM *memory; 215 unsigned long pages; 216 u32 type; 217 218 if (!dev->agp || !dev->agp->acquired) 219 return -EINVAL; 220 if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) 221 return -ENOMEM; 222 223 memset(entry, 0, sizeof(*entry)); 224 225 + pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; 226 + type = (u32) request->type; 227 if (!(memory = drm_alloc_agp(dev, pages, type))) { 228 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 229 return -ENOMEM; ··· 247 dev->agp->memory->prev = entry; 248 dev->agp->memory = entry; 249 250 + request->handle = entry->handle; 251 + request->physical = memory->physical; 252 + 253 + return 0; 254 + } 255 + EXPORT_SYMBOL(drm_agp_alloc); 256 + 257 + int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, 258 + unsigned int cmd, unsigned long arg) 259 + { 260 + drm_file_t *priv = filp->private_data; 261 + drm_device_t *dev = priv->head->dev; 262 + drm_agp_buffer_t request; 263 + drm_agp_buffer_t __user *argp = (void __user *)arg; 264 + int err; 265 + 266 + if (copy_from_user(&request, argp, sizeof(request))) 267 + return -EFAULT; 268 + 269 + err = drm_agp_alloc(dev, &request); 270 + if (err) 271 + return err; 272 273 if (copy_to_user(argp, &request, sizeof(request))) { 274 + drm_agp_mem_t *entry = dev->agp->memory; 275 + 276 dev->agp->memory = entry->next; 277 dev->agp->memory->prev = NULL; 278 + drm_free_agp(entry->memory, entry->pages); 279 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 280 return -EFAULT; 281 } 282 + 283 return 0; 284 } 285 ··· 293 * Verifies the AGP device is present and acquired, looks-up the AGP memory 294 * entry and passes it to the unbind_agp() function. 295 */ 296 + int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request) 297 { 298 drm_agp_mem_t *entry; 299 int ret; 300 301 if (!dev->agp || !dev->agp->acquired) 302 return -EINVAL; 303 + if (!(entry = drm_agp_lookup_entry(dev, request->handle))) 304 return -EINVAL; 305 if (!entry->bound) 306 return -EINVAL; ··· 315 if (ret == 0) 316 entry->bound = 0; 317 return ret; 318 + } 319 + EXPORT_SYMBOL(drm_agp_unbind); 320 + 321 + int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, 322 + unsigned int cmd, unsigned long arg) 323 + { 324 + drm_file_t *priv = filp->private_data; 325 + drm_device_t *dev = priv->head->dev; 326 + drm_agp_binding_t request; 327 + 328 + if (copy_from_user 329 + (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) 330 + return -EFAULT; 331 + 332 + return drm_agp_unbind(dev, &request); 333 } 334 335 /** ··· 330 * is currently bound into the GATT. Looks-up the AGP memory entry and passes 331 * it to bind_agp() function. 332 */ 333 + int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request) 334 { 335 drm_agp_mem_t *entry; 336 int retcode; 337 int page; 338 339 if (!dev->agp || !dev->agp->acquired) 340 return -EINVAL; 341 + if (!(entry = drm_agp_lookup_entry(dev, request->handle))) 342 return -EINVAL; 343 if (entry->bound) 344 return -EINVAL; 345 + page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE; 346 if ((retcode = drm_bind_agp(entry->memory, page))) 347 return retcode; 348 entry->bound = dev->agp->base + (page << PAGE_SHIFT); 349 DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", 350 dev->agp->base, entry->bound); 351 return 0; 352 + } 353 + EXPORT_SYMBOL(drm_agp_bind); 354 + 355 + int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, 356 + unsigned int cmd, unsigned long arg) 357 + { 358 + drm_file_t *priv = filp->private_data; 359 + drm_device_t *dev = priv->head->dev; 360 + drm_agp_binding_t request; 361 + 362 + if (copy_from_user 363 + (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) 364 + return -EFAULT; 365 + 366 + return drm_agp_bind(dev, &request); 367 } 368 369 /** ··· 372 * unbind_agp(). Frees it via free_agp() as well as the entry itself 373 * and unlinks from the doubly linked list it's inserted in. 374 */ 375 + int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request) 376 { 377 drm_agp_mem_t *entry; 378 379 if (!dev->agp || !dev->agp->acquired) 380 return -EINVAL; 381 + if (!(entry = drm_agp_lookup_entry(dev, request->handle))) 382 return -EINVAL; 383 if (entry->bound) 384 drm_unbind_agp(entry->memory); ··· 402 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 403 return 0; 404 } 405 + EXPORT_SYMBOL(drm_agp_free); 406 + 407 + int drm_agp_free_ioctl(struct inode *inode, struct file *filp, 408 + unsigned int cmd, unsigned long arg) 409 + { 410 + drm_file_t *priv = filp->private_data; 411 + drm_device_t *dev = priv->head->dev; 412 + drm_agp_buffer_t request; 413 + 414 + if (copy_from_user 415 + (&request, (drm_agp_buffer_t __user *) arg, sizeof(request))) 416 + return -EFAULT; 417 + 418 + return drm_agp_free(dev, &request); 419 + } 420 421 /** 422 * Initialize the AGP resources. 423 * 424 * \return pointer to a drm_agp_head structure. 425 * 426 + * Gets the drm_agp_t structure which is made available by the agpgart module 427 + * via the inter_module_* functions. Creates and initializes a drm_agp_head 428 + * structure. 429 */ 430 drm_agp_head_t *drm_agp_init(drm_device_t * dev) 431 {
+33 -16
drivers/char/drm/drm_bufs.c
··· 36 #include <linux/vmalloc.h> 37 #include "drmP.h" 38 39 - unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource) 40 { 41 return pci_resource_start(dev->pdev, resource); 42 } 43 - 44 EXPORT_SYMBOL(drm_get_resource_start); 45 46 - unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource) 47 { 48 return pci_resource_len(dev->pdev, resource); 49 } 50 51 EXPORT_SYMBOL(drm_get_resource_len); 52 53 - static drm_map_list_t *drm_find_matching_map(drm_device_t * dev, 54 - drm_local_map_t * map) 55 { 56 struct list_head *list; 57 ··· 73 74 #ifdef _LP64 75 static __inline__ unsigned int HandleID(unsigned long lhandle, 76 - drm_device_t * dev) 77 { 78 static unsigned int map32_handle = START_RANGE; 79 unsigned int hash; ··· 154 case _DRM_REGISTERS: 155 case _DRM_FRAME_BUFFER: 156 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) 157 - if (map->offset + map->size < map->offset || 158 map->offset < virt_to_phys(high_memory)) { 159 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 160 return -EINVAL; ··· 300 return -EFAULT; 301 } 302 303 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 304 &maplist); 305 ··· 334 * 335 * \sa drm_addmap 336 */ 337 - int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map) 338 { 339 struct list_head *list; 340 drm_map_list_t *r_list = NULL; ··· 386 387 return 0; 388 } 389 - 390 EXPORT_SYMBOL(drm_rmmap_locked); 391 392 - int drm_rmmap(drm_device_t * dev, drm_local_map_t * map) 393 { 394 int ret; 395 ··· 398 399 return ret; 400 } 401 - 402 EXPORT_SYMBOL(drm_rmmap); 403 404 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on ··· 548 DRM_DEBUG("count: %d\n", count); 549 DRM_DEBUG("order: %d\n", order); 550 DRM_DEBUG("size: %d\n", size); 551 - DRM_DEBUG("agp_offset: %lu\n", agp_offset); 552 DRM_DEBUG("alignment: %d\n", alignment); 553 DRM_DEBUG("page_order: %d\n", page_order); 554 DRM_DEBUG("total: %d\n", total); ··· 649 } 650 651 dma->buf_count += entry->buf_count; 652 dma->byte_count += byte_count; 653 654 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ··· 666 atomic_dec(&dev->buf_alloc); 667 return 0; 668 } 669 - 670 EXPORT_SYMBOL(drm_addbufs_agp); 671 #endif /* __OS_HAS_AGP */ 672 ··· 690 691 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 692 return -EINVAL; 693 if (!dma) 694 return -EINVAL; 695 696 count = request->count; 697 order = drm_order(request->size); ··· 887 return 0; 888 889 } 890 - 891 EXPORT_SYMBOL(drm_addbufs_pci); 892 893 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) ··· 911 912 if (!dma) 913 return -EINVAL; 914 915 count = request->count; 916 order = drm_order(request->size); ··· 1033 } 1034 1035 dma->buf_count += entry->buf_count; 1036 dma->byte_count += byte_count; 1037 1038 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ··· 1051 return 0; 1052 } 1053 1054 - static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) 1055 { 1056 drm_device_dma_t *dma = dev->dma; 1057 drm_buf_entry_t *entry; ··· 1073 1074 if (!dma) 1075 return -EINVAL; 1076 1077 count = request->count; 1078 order = drm_order(request->size); ··· 1193 } 1194 1195 dma->buf_count += entry->buf_count; 1196 dma->byte_count += byte_count; 1197 1198 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ··· 1210 atomic_dec(&dev->buf_alloc); 1211 return 0; 1212 } 1213 1214 /** 1215 * Add buffers for DMA transfers (ioctl). ··· 1593 1594 return order; 1595 } 1596 - 1597 EXPORT_SYMBOL(drm_order);
··· 36 #include <linux/vmalloc.h> 37 #include "drmP.h" 38 39 + unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) 40 { 41 return pci_resource_start(dev->pdev, resource); 42 } 43 EXPORT_SYMBOL(drm_get_resource_start); 44 45 + unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) 46 { 47 return pci_resource_len(dev->pdev, resource); 48 } 49 50 EXPORT_SYMBOL(drm_get_resource_len); 51 52 + static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, 53 + drm_local_map_t *map) 54 { 55 struct list_head *list; 56 ··· 74 75 #ifdef _LP64 76 static __inline__ unsigned int HandleID(unsigned long lhandle, 77 + drm_device_t *dev) 78 { 79 static unsigned int map32_handle = START_RANGE; 80 unsigned int hash; ··· 155 case _DRM_REGISTERS: 156 case _DRM_FRAME_BUFFER: 157 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) 158 + if (map->offset + (map->size-1) < map->offset || 159 map->offset < virt_to_phys(high_memory)) { 160 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 161 return -EINVAL; ··· 301 return -EFAULT; 302 } 303 304 + if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) 305 + return -EPERM; 306 + 307 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 308 &maplist); 309 ··· 332 * 333 * \sa drm_addmap 334 */ 335 + int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) 336 { 337 struct list_head *list; 338 drm_map_list_t *r_list = NULL; ··· 384 385 return 0; 386 } 387 EXPORT_SYMBOL(drm_rmmap_locked); 388 389 + int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) 390 { 391 int ret; 392 ··· 397 398 return ret; 399 } 400 EXPORT_SYMBOL(drm_rmmap); 401 402 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on ··· 548 DRM_DEBUG("count: %d\n", count); 549 DRM_DEBUG("order: %d\n", order); 550 DRM_DEBUG("size: %d\n", size); 551 + DRM_DEBUG("agp_offset: %lx\n", agp_offset); 552 DRM_DEBUG("alignment: %d\n", alignment); 553 DRM_DEBUG("page_order: %d\n", page_order); 554 DRM_DEBUG("total: %d\n", total); ··· 649 } 650 651 dma->buf_count += entry->buf_count; 652 + dma->seg_count += entry->seg_count; 653 + dma->page_count += byte_count >> PAGE_SHIFT; 654 dma->byte_count += byte_count; 655 656 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ··· 664 atomic_dec(&dev->buf_alloc); 665 return 0; 666 } 667 EXPORT_SYMBOL(drm_addbufs_agp); 668 #endif /* __OS_HAS_AGP */ 669 ··· 689 690 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 691 return -EINVAL; 692 + 693 if (!dma) 694 return -EINVAL; 695 + 696 + if (!capable(CAP_SYS_ADMIN)) 697 + return -EPERM; 698 699 count = request->count; 700 order = drm_order(request->size); ··· 882 return 0; 883 884 } 885 EXPORT_SYMBOL(drm_addbufs_pci); 886 887 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) ··· 907 908 if (!dma) 909 return -EINVAL; 910 + 911 + if (!capable(CAP_SYS_ADMIN)) 912 + return -EPERM; 913 914 count = request->count; 915 order = drm_order(request->size); ··· 1026 } 1027 1028 dma->buf_count += entry->buf_count; 1029 + dma->seg_count += entry->seg_count; 1030 + dma->page_count += byte_count >> PAGE_SHIFT; 1031 dma->byte_count += byte_count; 1032 1033 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ··· 1042 return 0; 1043 } 1044 1045 + int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) 1046 { 1047 drm_device_dma_t *dma = dev->dma; 1048 drm_buf_entry_t *entry; ··· 1064 1065 if (!dma) 1066 return -EINVAL; 1067 + 1068 + if (!capable(CAP_SYS_ADMIN)) 1069 + return -EPERM; 1070 1071 count = request->count; 1072 order = drm_order(request->size); ··· 1181 } 1182 1183 dma->buf_count += entry->buf_count; 1184 + dma->seg_count += entry->seg_count; 1185 + dma->page_count += byte_count >> PAGE_SHIFT; 1186 dma->byte_count += byte_count; 1187 1188 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ··· 1196 atomic_dec(&dev->buf_alloc); 1197 return 0; 1198 } 1199 + EXPORT_SYMBOL(drm_addbufs_fb); 1200 + 1201 1202 /** 1203 * Add buffers for DMA transfers (ioctl). ··· 1577 1578 return order; 1579 } 1580 EXPORT_SYMBOL(drm_order); 1581 + 1582 +
+1 -1
drivers/char/drm/drm_context.c
··· 433 if (ctx.handle != DRM_KERNEL_CONTEXT) { 434 if (dev->driver->context_ctor) 435 if (!dev->driver->context_ctor(dev, ctx.handle)) { 436 - DRM_DEBUG( "Running out of ctxs or memory.\n"); 437 return -ENOMEM; 438 } 439 }
··· 433 if (ctx.handle != DRM_KERNEL_CONTEXT) { 434 if (dev->driver->context_ctor) 435 if (!dev->driver->context_ctor(dev, ctx.handle)) { 436 + DRM_DEBUG("Running out of ctxs or memory.\n"); 437 return -ENOMEM; 438 } 439 }
+2 -2
drivers/char/drm/drm_core.h
··· 24 25 #define CORE_NAME "drm" 26 #define CORE_DESC "DRM shared core routines" 27 - #define CORE_DATE "20040925" 28 29 #define DRM_IF_MAJOR 1 30 #define DRM_IF_MINOR 2 31 32 #define CORE_MAJOR 1 33 #define CORE_MINOR 0 34 - #define CORE_PATCHLEVEL 0
··· 24 25 #define CORE_NAME "drm" 26 #define CORE_DESC "DRM shared core routines" 27 + #define CORE_DATE "20051102" 28 29 #define DRM_IF_MAJOR 1 30 #define DRM_IF_MINOR 2 31 32 #define CORE_MAJOR 1 33 #define CORE_MINOR 0 34 + #define CORE_PATCHLEVEL 1
+71 -67
drivers/char/drm/drm_drv.c
··· 56 57 /** Ioctl table */ 58 static drm_ioctl_desc_t drm_ioctls[] = { 59 - [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0, 0}, 60 - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0, 0}, 61 - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0, 0}, 62 - [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, 0, 1}, 63 - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0, 0}, 64 - [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0, 0}, 65 - [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0, 0}, 66 - [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, 0, 1}, 67 68 - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, 1, 1}, 69 - [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, 1, 1}, 70 - [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, 1, 1}, 71 - [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, 1, 1}, 72 73 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, 1, 1}, 74 - [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, 1, 0}, 75 76 - [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, 1, 1}, 77 - [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, 1, 0}, 78 79 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, 1, 1}, 80 - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, 1, 1}, 81 - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, 1, 1}, 82 - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, 1, 0}, 83 - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, 1, 1}, 84 - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, 1, 1}, 85 - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, 1, 0}, 86 87 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, 1, 1}, 88 - [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, 1, 1}, 89 90 - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, 1, 0}, 91 - [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, 1, 0}, 92 93 - [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, 1, 0}, 94 - 95 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, 1, 1}, 96 - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, 1, 1}, 97 - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, 1, 0}, 98 - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, 1, 0}, 99 - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, 1, 0}, 100 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 101 102 - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, 1, 1}, 103 104 #if __OS_HAS_AGP 105 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, 1, 1}, 106 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, 1, 1}, 107 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, 1, 1}, 108 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, 1, 0}, 109 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1}, 110 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1}, 111 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind, 1, 1}, 112 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind, 1, 1}, 113 #endif 114 115 - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, 1, 1}, 116 - [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, 1, 1}, 117 118 - [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0, 0}, 119 }; 120 121 #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls ) ··· 129 * 130 * \sa drm_device 131 */ 132 - int drm_takedown(drm_device_t * dev) 133 { 134 drm_magic_entry_t *pt, *next; 135 drm_map_list_t *r_list; ··· 138 139 DRM_DEBUG("\n"); 140 141 - if (dev->driver->pretakedown) 142 - dev->driver->pretakedown(dev); 143 - DRM_DEBUG("driver pretakedown completed\n"); 144 145 if (dev->unique) { 146 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); ··· 233 } 234 up(&dev->struct_sem); 235 236 - DRM_DEBUG("takedown completed\n"); 237 return 0; 238 } 239 ··· 281 /** 282 * Called via cleanup_module() at module unload time. 283 * 284 - * Cleans up all DRM device, calling takedown(). 285 * 286 * \sa drm_init 287 */ ··· 294 return; 295 } 296 297 - drm_takedown(dev); 298 299 if (dev->maplist) { 300 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); ··· 317 dev->agp = NULL; 318 } 319 320 - if (dev->driver->postcleanup) 321 - dev->driver->postcleanup(dev); 322 323 drm_put_head(&dev->primary); 324 if (drm_put_dev(dev)) ··· 342 if (head->dev->driver != driver) 343 continue; 344 dev = head->dev; 345 - } 346 - if (dev) { 347 - /* release the pci driver */ 348 - if (dev->pdev) 349 - pci_dev_put(dev->pdev); 350 - drm_cleanup(dev); 351 } 352 DRM_INFO("Module unloaded\n"); 353 } ··· 432 drm_device_t *dev = priv->head->dev; 433 drm_version_t __user *argp = (void __user *)arg; 434 drm_version_t version; 435 - int ret; 436 437 if (copy_from_user(&version, argp, sizeof(version))) 438 return -EFAULT; 439 440 - /* version is a required function to return the personality module version */ 441 - if ((ret = dev->driver->version(&version))) 442 - return ret; 443 444 if (copy_to_user(argp, &version, sizeof(version))) 445 return -EFAULT; ··· 496 if (!func) { 497 DRM_DEBUG("no function\n"); 498 retcode = -EINVAL; 499 - } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) || 500 - (ioctl->auth_needed && !priv->authenticated)) { 501 retcode = -EACCES; 502 } else { 503 retcode = func(inode, filp, cmd, arg);
··· 56 57 /** Ioctl table */ 58 static drm_ioctl_desc_t drm_ioctls[] = { 59 + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, 60 + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, 61 + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, 62 + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, 63 + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0}, 64 + [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0}, 65 + [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0}, 66 + [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY}, 67 + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 68 + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 69 + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 70 + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 71 72 + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 73 + [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH}, 74 75 + [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 76 + [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, 77 78 + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 79 + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 80 + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 81 + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, 82 + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 83 + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 84 + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH}, 85 86 + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 87 + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 88 89 + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH}, 90 + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH}, 91 92 + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH}, 93 94 + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 95 + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 96 + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH}, 97 + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH}, 98 + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH}, 99 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 100 + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH}, 101 102 + [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 103 104 #if __OS_HAS_AGP 105 + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 106 + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 107 + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 108 + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH}, 109 + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 110 + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 111 + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 112 + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 113 #endif 114 115 + [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 116 + [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 117 118 + [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, 119 }; 120 121 #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls ) ··· 129 * 130 * \sa drm_device 131 */ 132 + int drm_lastclose(drm_device_t * dev) 133 { 134 drm_magic_entry_t *pt, *next; 135 drm_map_list_t *r_list; ··· 138 139 DRM_DEBUG("\n"); 140 141 + if (dev->driver->lastclose) 142 + dev->driver->lastclose(dev); 143 + DRM_DEBUG("driver lastclose completed\n"); 144 145 if (dev->unique) { 146 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); ··· 233 } 234 up(&dev->struct_sem); 235 236 + DRM_DEBUG("lastclose completed\n"); 237 return 0; 238 } 239 ··· 281 /** 282 * Called via cleanup_module() at module unload time. 283 * 284 + * Cleans up all DRM device, calling drm_lastclose(). 285 * 286 * \sa drm_init 287 */ ··· 294 return; 295 } 296 297 + drm_lastclose(dev); 298 299 if (dev->maplist) { 300 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); ··· 317 dev->agp = NULL; 318 } 319 320 + if (dev->driver->unload) 321 + dev->driver->unload(dev); 322 323 drm_put_head(&dev->primary); 324 if (drm_put_dev(dev)) ··· 342 if (head->dev->driver != driver) 343 continue; 344 dev = head->dev; 345 + if (dev) { 346 + /* release the pci driver */ 347 + if (dev->pdev) 348 + pci_dev_put(dev->pdev); 349 + drm_cleanup(dev); 350 + } 351 } 352 DRM_INFO("Module unloaded\n"); 353 } ··· 432 drm_device_t *dev = priv->head->dev; 433 drm_version_t __user *argp = (void __user *)arg; 434 drm_version_t version; 435 + int len; 436 437 if (copy_from_user(&version, argp, sizeof(version))) 438 return -EFAULT; 439 440 + version.version_major = dev->driver->major; 441 + version.version_minor = dev->driver->minor; 442 + version.version_patchlevel = dev->driver->patchlevel; 443 + DRM_COPY(version.name, dev->driver->name); 444 + DRM_COPY(version.date, dev->driver->date); 445 + DRM_COPY(version.desc, dev->driver->desc); 446 447 if (copy_to_user(argp, &version, sizeof(version))) 448 return -EFAULT; ··· 493 if (!func) { 494 DRM_DEBUG("no function\n"); 495 retcode = -EINVAL; 496 + } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || 497 + ((ioctl->flags & DRM_AUTH) && !priv->authenticated) || 498 + ((ioctl->flags & DRM_MASTER) && !priv->master)) { 499 retcode = -EACCES; 500 } else { 501 retcode = func(inode, filp, cmd, arg);
+180 -137
drivers/char/drm/drm_fops.c
··· 35 */ 36 37 #include "drmP.h" 38 #include <linux/poll.h> 39 40 static int drm_open_helper(struct inode *inode, struct file *filp, ··· 43 44 static int drm_setup(drm_device_t * dev) 45 { 46 int i; 47 int ret; 48 49 - if (dev->driver->presetup) { 50 - ret = dev->driver->presetup(dev); 51 if (ret != 0) 52 return ret; 53 } 54 55 atomic_set(&dev->ioctl_count, 0); 56 atomic_set(&dev->vma_count, 0); ··· 116 * drm_select_queue fails between the time the interrupt is 117 * initialized and the time the queues are initialized. 118 */ 119 - if (dev->driver->postsetup) 120 - dev->driver->postsetup(dev); 121 122 return 0; 123 } ··· 159 160 return retcode; 161 } 162 - 163 EXPORT_SYMBOL(drm_open); 164 165 /** 166 * Release file. ··· 330 * If the hardware lock is held then free it, and take it again for the kernel 331 * context since it's necessary to reclaim buffers. Unlink the file private 332 * data from its list and free it. Decreases the open count and if it reaches 333 - * zero calls takedown(). 334 */ 335 int drm_release(struct inode *inode, struct file *filp) 336 { ··· 343 344 DRM_DEBUG("open_count = %d\n", dev->open_count); 345 346 - if (dev->driver->prerelease) 347 - dev->driver->prerelease(dev, filp); 348 349 /* ======================================================== 350 * Begin inline drm_release ··· 360 DRM_DEBUG("File %p released, freeing lock for context %d\n", 361 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 362 363 - if (dev->driver->release) 364 - dev->driver->release(dev, filp); 365 366 drm_lock_free(dev, &dev->lock.hw_lock->lock, 367 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); ··· 370 hardware at this point, possibly 371 processed via a callback to the X 372 server. */ 373 - } else if (dev->driver->release && priv->lock_count 374 && dev->lock.hw_lock) { 375 /* The lock is required to reclaim buffers */ 376 DECLARE_WAITQUEUE(entry, current); ··· 400 __set_current_state(TASK_RUNNING); 401 remove_wait_queue(&dev->lock.lock_queue, &entry); 402 if (!retcode) { 403 - if (dev->driver->release) 404 - dev->driver->release(dev, filp); 405 drm_lock_free(dev, &dev->lock.hw_lock->lock, 406 DRM_KERNEL_CONTEXT); 407 } 408 } 409 410 - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) 411 - && !dev->driver->release) { 412 dev->driver->reclaim_buffers(dev, filp); 413 } 414 ··· 454 } 455 up(&dev->struct_sem); 456 457 - if (dev->driver->free_filp_priv) 458 - dev->driver->free_filp_priv(dev, priv); 459 - 460 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 461 462 /* ======================================================== ··· 474 } 475 spin_unlock(&dev->count_lock); 476 unlock_kernel(); 477 - return drm_takedown(dev); 478 } 479 spin_unlock(&dev->count_lock); 480 ··· 482 483 return retcode; 484 } 485 - 486 EXPORT_SYMBOL(drm_release); 487 - 488 - /** 489 - * Called whenever a process opens /dev/drm. 490 - * 491 - * \param inode device inode. 492 - * \param filp file pointer. 493 - * \param dev device. 494 - * \return zero on success or a negative number on failure. 495 - * 496 - * Creates and initializes a drm_file structure for the file private data in \p 497 - * filp and add it into the double linked list in \p dev. 498 - */ 499 - static int drm_open_helper(struct inode *inode, struct file *filp, 500 - drm_device_t * dev) 501 - { 502 - int minor = iminor(inode); 503 - drm_file_t *priv; 504 - int ret; 505 - 506 - if (filp->f_flags & O_EXCL) 507 - return -EBUSY; /* No exclusive opens */ 508 - if (!drm_cpu_valid()) 509 - return -EINVAL; 510 - 511 - DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor); 512 - 513 - priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 514 - if (!priv) 515 - return -ENOMEM; 516 - 517 - memset(priv, 0, sizeof(*priv)); 518 - filp->private_data = priv; 519 - priv->uid = current->euid; 520 - priv->pid = current->pid; 521 - priv->minor = minor; 522 - priv->head = drm_heads[minor]; 523 - priv->ioctl_count = 0; 524 - priv->authenticated = capable(CAP_SYS_ADMIN); 525 - priv->lock_count = 0; 526 - 527 - if (dev->driver->open_helper) { 528 - ret = dev->driver->open_helper(dev, priv); 529 - if (ret < 0) 530 - goto out_free; 531 - } 532 - 533 - down(&dev->struct_sem); 534 - if (!dev->file_last) { 535 - priv->next = NULL; 536 - priv->prev = NULL; 537 - dev->file_first = priv; 538 - dev->file_last = priv; 539 - } else { 540 - priv->next = NULL; 541 - priv->prev = dev->file_last; 542 - dev->file_last->next = priv; 543 - dev->file_last = priv; 544 - } 545 - up(&dev->struct_sem); 546 - 547 - #ifdef __alpha__ 548 - /* 549 - * Default the hose 550 - */ 551 - if (!dev->hose) { 552 - struct pci_dev *pci_dev; 553 - pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); 554 - if (pci_dev) { 555 - dev->hose = pci_dev->sysdata; 556 - pci_dev_put(pci_dev); 557 - } 558 - if (!dev->hose) { 559 - struct pci_bus *b = pci_bus_b(pci_root_buses.next); 560 - if (b) 561 - dev->hose = b->sysdata; 562 - } 563 - } 564 - #endif 565 - 566 - return 0; 567 - out_free: 568 - drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 569 - filp->private_data = NULL; 570 - return ret; 571 - } 572 - 573 - /** No-op. */ 574 - int drm_flush(struct file *filp) 575 - { 576 - drm_file_t *priv = filp->private_data; 577 - drm_device_t *dev = priv->head->dev; 578 - 579 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 580 - current->pid, (long)old_encode_dev(priv->head->device), 581 - dev->open_count); 582 - return 0; 583 - } 584 - 585 - EXPORT_SYMBOL(drm_flush); 586 - 587 - /** No-op. */ 588 - int drm_fasync(int fd, struct file *filp, int on) 589 - { 590 - drm_file_t *priv = filp->private_data; 591 - drm_device_t *dev = priv->head->dev; 592 - int retcode; 593 - 594 - DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, 595 - (long)old_encode_dev(priv->head->device)); 596 - retcode = fasync_helper(fd, filp, on, &dev->buf_async); 597 - if (retcode < 0) 598 - return retcode; 599 - return 0; 600 - } 601 - 602 - EXPORT_SYMBOL(drm_fasync); 603 604 /** No-op. */ 605 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) 606 { 607 return 0; 608 } 609 - 610 EXPORT_SYMBOL(drm_poll);
··· 35 */ 36 37 #include "drmP.h" 38 + #include "drm_sarea.h" 39 #include <linux/poll.h> 40 41 static int drm_open_helper(struct inode *inode, struct file *filp, ··· 42 43 static int drm_setup(drm_device_t * dev) 44 { 45 + drm_local_map_t *map; 46 int i; 47 int ret; 48 49 + if (dev->driver->firstopen) { 50 + ret = dev->driver->firstopen(dev); 51 if (ret != 0) 52 return ret; 53 } 54 + 55 + /* prebuild the SAREA */ 56 + i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); 57 + if (i != 0) 58 + return i; 59 60 atomic_set(&dev->ioctl_count, 0); 61 atomic_set(&dev->vma_count, 0); ··· 109 * drm_select_queue fails between the time the interrupt is 110 * initialized and the time the queues are initialized. 111 */ 112 113 return 0; 114 } ··· 154 155 return retcode; 156 } 157 EXPORT_SYMBOL(drm_open); 158 + 159 + /** 160 + * File \c open operation. 161 + * 162 + * \param inode device inode. 163 + * \param filp file pointer. 164 + * 165 + * Puts the dev->fops corresponding to the device minor number into 166 + * \p filp, call the \c open method, and restore the file operations. 167 + */ 168 + int drm_stub_open(struct inode *inode, struct file *filp) 169 + { 170 + drm_device_t *dev = NULL; 171 + int minor = iminor(inode); 172 + int err = -ENODEV; 173 + struct file_operations *old_fops; 174 + 175 + DRM_DEBUG("\n"); 176 + 177 + if (!((minor >= 0) && (minor < drm_cards_limit))) 178 + return -ENODEV; 179 + 180 + if (!drm_heads[minor]) 181 + return -ENODEV; 182 + 183 + if (!(dev = drm_heads[minor]->dev)) 184 + return -ENODEV; 185 + 186 + old_fops = filp->f_op; 187 + filp->f_op = fops_get(&dev->driver->fops); 188 + if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { 189 + fops_put(filp->f_op); 190 + filp->f_op = fops_get(old_fops); 191 + } 192 + fops_put(old_fops); 193 + 194 + return err; 195 + } 196 + 197 + /** 198 + * Check whether DRI will run on this CPU. 199 + * 200 + * \return non-zero if the DRI will run on this CPU, or zero otherwise. 201 + */ 202 + static int drm_cpu_valid(void) 203 + { 204 + #if defined(__i386__) 205 + if (boot_cpu_data.x86 == 3) 206 + return 0; /* No cmpxchg on a 386 */ 207 + #endif 208 + #if defined(__sparc__) && !defined(__sparc_v9__) 209 + return 0; /* No cmpxchg before v9 sparc. */ 210 + #endif 211 + return 1; 212 + } 213 + 214 + /** 215 + * Called whenever a process opens /dev/drm. 216 + * 217 + * \param inode device inode. 218 + * \param filp file pointer. 219 + * \param dev device. 220 + * \return zero on success or a negative number on failure. 221 + * 222 + * Creates and initializes a drm_file structure for the file private data in \p 223 + * filp and add it into the double linked list in \p dev. 224 + */ 225 + static int drm_open_helper(struct inode *inode, struct file *filp, 226 + drm_device_t * dev) 227 + { 228 + int minor = iminor(inode); 229 + drm_file_t *priv; 230 + int ret; 231 + 232 + if (filp->f_flags & O_EXCL) 233 + return -EBUSY; /* No exclusive opens */ 234 + if (!drm_cpu_valid()) 235 + return -EINVAL; 236 + 237 + DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor); 238 + 239 + priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 240 + if (!priv) 241 + return -ENOMEM; 242 + 243 + memset(priv, 0, sizeof(*priv)); 244 + filp->private_data = priv; 245 + priv->uid = current->euid; 246 + priv->pid = current->pid; 247 + priv->minor = minor; 248 + priv->head = drm_heads[minor]; 249 + priv->ioctl_count = 0; 250 + /* for compatibility root is always authenticated */ 251 + priv->authenticated = capable(CAP_SYS_ADMIN); 252 + priv->lock_count = 0; 253 + 254 + if (dev->driver->open) { 255 + ret = dev->driver->open(dev, priv); 256 + if (ret < 0) 257 + goto out_free; 258 + } 259 + 260 + down(&dev->struct_sem); 261 + if (!dev->file_last) { 262 + priv->next = NULL; 263 + priv->prev = NULL; 264 + dev->file_first = priv; 265 + dev->file_last = priv; 266 + /* first opener automatically becomes master */ 267 + priv->master = 1; 268 + } else { 269 + priv->next = NULL; 270 + priv->prev = dev->file_last; 271 + dev->file_last->next = priv; 272 + dev->file_last = priv; 273 + } 274 + up(&dev->struct_sem); 275 + 276 + #ifdef __alpha__ 277 + /* 278 + * Default the hose 279 + */ 280 + if (!dev->hose) { 281 + struct pci_dev *pci_dev; 282 + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); 283 + if (pci_dev) { 284 + dev->hose = pci_dev->sysdata; 285 + pci_dev_put(pci_dev); 286 + } 287 + if (!dev->hose) { 288 + struct pci_bus *b = pci_bus_b(pci_root_buses.next); 289 + if (b) 290 + dev->hose = b->sysdata; 291 + } 292 + } 293 + #endif 294 + 295 + return 0; 296 + out_free: 297 + drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 298 + filp->private_data = NULL; 299 + return ret; 300 + } 301 + 302 + /** No-op. */ 303 + int drm_fasync(int fd, struct file *filp, int on) 304 + { 305 + drm_file_t *priv = filp->private_data; 306 + drm_device_t *dev = priv->head->dev; 307 + int retcode; 308 + 309 + DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, 310 + (long)old_encode_dev(priv->head->device)); 311 + retcode = fasync_helper(fd, filp, on, &dev->buf_async); 312 + if (retcode < 0) 313 + return retcode; 314 + return 0; 315 + } 316 + EXPORT_SYMBOL(drm_fasync); 317 318 /** 319 * Release file. ··· 167 * If the hardware lock is held then free it, and take it again for the kernel 168 * context since it's necessary to reclaim buffers. Unlink the file private 169 * data from its list and free it. Decreases the open count and if it reaches 170 + * zero calls drm_lastclose(). 171 */ 172 int drm_release(struct inode *inode, struct file *filp) 173 { ··· 180 181 DRM_DEBUG("open_count = %d\n", dev->open_count); 182 183 + if (dev->driver->preclose) 184 + dev->driver->preclose(dev, filp); 185 186 /* ======================================================== 187 * Begin inline drm_release ··· 197 DRM_DEBUG("File %p released, freeing lock for context %d\n", 198 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 199 200 + if (dev->driver->reclaim_buffers_locked) 201 + dev->driver->reclaim_buffers_locked(dev, filp); 202 203 drm_lock_free(dev, &dev->lock.hw_lock->lock, 204 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); ··· 207 hardware at this point, possibly 208 processed via a callback to the X 209 server. */ 210 + } else if (dev->driver->reclaim_buffers_locked && priv->lock_count 211 && dev->lock.hw_lock) { 212 /* The lock is required to reclaim buffers */ 213 DECLARE_WAITQUEUE(entry, current); ··· 237 __set_current_state(TASK_RUNNING); 238 remove_wait_queue(&dev->lock.lock_queue, &entry); 239 if (!retcode) { 240 + dev->driver->reclaim_buffers_locked(dev, filp); 241 drm_lock_free(dev, &dev->lock.hw_lock->lock, 242 DRM_KERNEL_CONTEXT); 243 } 244 } 245 246 + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 247 + !dev->driver->reclaim_buffers_locked) { 248 dev->driver->reclaim_buffers(dev, filp); 249 } 250 ··· 292 } 293 up(&dev->struct_sem); 294 295 + if (dev->driver->postclose) 296 + dev->driver->postclose(dev, priv); 297 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 298 299 /* ======================================================== ··· 313 } 314 spin_unlock(&dev->count_lock); 315 unlock_kernel(); 316 + return drm_lastclose(dev); 317 } 318 spin_unlock(&dev->count_lock); 319 ··· 321 322 return retcode; 323 } 324 EXPORT_SYMBOL(drm_release); 325 326 /** No-op. */ 327 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) 328 { 329 return 0; 330 } 331 EXPORT_SYMBOL(drm_poll);
-53
drivers/char/drm/drm_init.c
··· 1 - /** 2 - * \file drm_init.c 3 - * Setup/Cleanup for DRM 4 - * 5 - * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 - * \author Gareth Hughes <gareth@valinux.com> 7 - */ 8 - 9 - /* 10 - * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com 11 - * 12 - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 13 - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 14 - * All Rights Reserved. 15 - * 16 - * Permission is hereby granted, free of charge, to any person obtaining a 17 - * copy of this software and associated documentation files (the "Software"), 18 - * to deal in the Software without restriction, including without limitation 19 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 - * and/or sell copies of the Software, and to permit persons to whom the 21 - * Software is furnished to do so, subject to the following conditions: 22 - * 23 - * The above copyright notice and this permission notice (including the next 24 - * paragraph) shall be included in all copies or substantial portions of the 25 - * Software. 26 - * 27 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 30 - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 31 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 32 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33 - * OTHER DEALINGS IN THE SOFTWARE. 34 - */ 35 - 36 - #include "drmP.h" 37 - 38 - /** 39 - * Check whether DRI will run on this CPU. 40 - * 41 - * \return non-zero if the DRI will run on this CPU, or zero otherwise. 42 - */ 43 - int drm_cpu_valid(void) 44 - { 45 - #if defined(__i386__) 46 - if (boot_cpu_data.x86 == 3) 47 - return 0; /* No cmpxchg on a 386 */ 48 - #endif 49 - #if defined(__sparc__) && !defined(__sparc_v9__) 50 - return 0; /* No cmpxchg before v9 sparc. */ 51 - #endif 52 - return 1; 53 - }
···
+14 -13
drivers/char/drm/drm_ioctl.c
··· 137 138 static int drm_set_busid(drm_device_t * dev) 139 { 140 if (dev->unique != NULL) 141 return EBUSY; 142 143 - dev->unique_len = 20; 144 dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); 145 if (dev->unique == NULL) 146 return ENOMEM; 147 148 - snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", 149 dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); 150 151 dev->devname = 152 drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + ··· 244 { 245 drm_file_t *priv = filp->private_data; 246 drm_device_t *dev = priv->head->dev; 247 - drm_client_t __user *argp = (void __user *)arg; 248 drm_client_t client; 249 drm_file_t *pt; 250 int idx; ··· 267 client.iocs = pt->ioctl_count; 268 up(&dev->struct_sem); 269 270 - if (copy_to_user((drm_client_t __user *) arg, &client, sizeof(client))) 271 return -EFAULT; 272 return 0; 273 } ··· 330 drm_set_version_t retv; 331 int if_version; 332 drm_set_version_t __user *argp = (void __user *)data; 333 - drm_version_t version; 334 335 DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv)); 336 337 - memset(&version, 0, sizeof(version)); 338 - 339 - dev->driver->version(&version); 340 retv.drm_di_major = DRM_IF_MAJOR; 341 retv.drm_di_minor = DRM_IF_MINOR; 342 - retv.drm_dd_major = version.version_major; 343 - retv.drm_dd_minor = version.version_minor; 344 345 DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv)); 346 ··· 344 if (sv.drm_di_major != DRM_IF_MAJOR || 345 sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) 346 return EINVAL; 347 - if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor); 348 dev->if_version = DRM_MAX(if_version, dev->if_version); 349 if (sv.drm_di_minor >= 1) { 350 /* ··· 355 } 356 357 if (sv.drm_dd_major != -1) { 358 - if (sv.drm_dd_major != version.version_major || 359 sv.drm_dd_minor < 0 360 - || sv.drm_dd_minor > version.version_minor) 361 return EINVAL; 362 363 if (dev->driver->set_version)
··· 137 138 static int drm_set_busid(drm_device_t * dev) 139 { 140 + int len; 141 + 142 if (dev->unique != NULL) 143 return EBUSY; 144 145 + dev->unique_len = 40; 146 dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); 147 if (dev->unique == NULL) 148 return ENOMEM; 149 150 + len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", 151 dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); 152 + 153 + if (len > dev->unique_len) 154 + DRM_ERROR("Unique buffer overflowed\n"); 155 156 dev->devname = 157 drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + ··· 239 { 240 drm_file_t *priv = filp->private_data; 241 drm_device_t *dev = priv->head->dev; 242 + drm_client_t __user *argp = (drm_client_t __user *)arg; 243 drm_client_t client; 244 drm_file_t *pt; 245 int idx; ··· 262 client.iocs = pt->ioctl_count; 263 up(&dev->struct_sem); 264 265 + if (copy_to_user(argp, &client, sizeof(client))) 266 return -EFAULT; 267 return 0; 268 } ··· 325 drm_set_version_t retv; 326 int if_version; 327 drm_set_version_t __user *argp = (void __user *)data; 328 329 DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv)); 330 331 retv.drm_di_major = DRM_IF_MAJOR; 332 retv.drm_di_minor = DRM_IF_MINOR; 333 + retv.drm_dd_major = dev->driver->major; 334 + retv.drm_dd_minor = dev->driver->minor; 335 336 DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv)); 337 ··· 343 if (sv.drm_di_major != DRM_IF_MAJOR || 344 sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) 345 return EINVAL; 346 + if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor); 347 dev->if_version = DRM_MAX(if_version, dev->if_version); 348 if (sv.drm_di_minor >= 1) { 349 /* ··· 354 } 355 356 if (sv.drm_dd_major != -1) { 357 + if (sv.drm_dd_major != dev->driver->major || 358 sv.drm_dd_minor < 0 359 + || sv.drm_dd_minor > dev->driver->minor) 360 return EINVAL; 361 362 if (dev->driver->set_version)
-1
drivers/char/drm/drm_lock.c
··· 130 /* dev->driver->kernel_context_switch isn't used by any of the x86 131 * drivers but is used by the Sparc driver. 132 */ 133 - 134 if (dev->driver->kernel_context_switch && 135 dev->last_context != lock.context) { 136 dev->driver->kernel_context_switch(dev, dev->last_context,
··· 130 /* dev->driver->kernel_context_switch isn't used by any of the x86 131 * drivers but is used by the Sparc driver. 132 */ 133 if (dev->driver->kernel_context_switch && 134 dev->last_context != lock.context) { 135 dev->driver->kernel_context_switch(dev, dev->last_context,
-8
drivers/char/drm/drm_memory.c
··· 145 return drm_agp_allocate_memory(dev->agp->bridge, pages, type); 146 } 147 148 - EXPORT_SYMBOL(drm_alloc_agp); 149 - 150 /** Wrapper around agp_free_memory() */ 151 int drm_free_agp(DRM_AGP_MEM * handle, int pages) 152 { 153 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 154 } 155 - 156 - EXPORT_SYMBOL(drm_free_agp); 157 158 /** Wrapper around agp_bind_memory() */ 159 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ··· 157 return drm_agp_bind_memory(handle, start); 158 } 159 160 - EXPORT_SYMBOL(drm_bind_agp); 161 - 162 /** Wrapper around agp_unbind_memory() */ 163 int drm_unbind_agp(DRM_AGP_MEM * handle) 164 { 165 return drm_agp_unbind_memory(handle); 166 } 167 - 168 - EXPORT_SYMBOL(drm_unbind_agp); 169 #endif /* agp */ 170 #endif /* debug_memory */
··· 145 return drm_agp_allocate_memory(dev->agp->bridge, pages, type); 146 } 147 148 /** Wrapper around agp_free_memory() */ 149 int drm_free_agp(DRM_AGP_MEM * handle, int pages) 150 { 151 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 152 } 153 154 /** Wrapper around agp_bind_memory() */ 155 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ··· 161 return drm_agp_bind_memory(handle, start); 162 } 163 164 /** Wrapper around agp_unbind_memory() */ 165 int drm_unbind_agp(DRM_AGP_MEM * handle) 166 { 167 return drm_agp_unbind_memory(handle); 168 } 169 #endif /* agp */ 170 #endif /* debug_memory */
+134 -135
drivers/char/drm/drm_memory_debug.h
··· 1 /** 2 - * \file drm_memory.h 3 * Memory management wrappers for DRM. 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> ··· 43 unsigned long bytes_freed; 44 } drm_mem_stats_t; 45 46 - static DEFINE_SPINLOCK(DRM(mem_lock)); 47 - static unsigned long DRM(ram_available) = 0; /* In pages */ 48 - static unsigned long DRM(ram_used) = 0; 49 - static drm_mem_stats_t DRM(mem_stats)[] = 50 { 51 - [DRM_MEM_DMA] = { 52 - "dmabufs"},[DRM_MEM_SAREA] = { 53 - "sareas"},[DRM_MEM_DRIVER] = { 54 - "driver"},[DRM_MEM_MAGIC] = { 55 - "magic"},[DRM_MEM_IOCTLS] = { 56 - "ioctltab"},[DRM_MEM_MAPS] = { 57 - "maplist"},[DRM_MEM_VMAS] = { 58 - "vmalist"},[DRM_MEM_BUFS] = { 59 - "buflist"},[DRM_MEM_SEGS] = { 60 - "seglist"},[DRM_MEM_PAGES] = { 61 - "pagelist"},[DRM_MEM_FILES] = { 62 - "files"},[DRM_MEM_QUEUES] = { 63 - "queues"},[DRM_MEM_CMDS] = { 64 - "commands"},[DRM_MEM_MAPPINGS] = { 65 - "mappings"},[DRM_MEM_BUFLISTS] = { 66 - "buflists"},[DRM_MEM_AGPLISTS] = { 67 - "agplist"},[DRM_MEM_SGLISTS] = { 68 - "sglist"},[DRM_MEM_TOTALAGP] = { 69 - "totalagp"},[DRM_MEM_BOUNDAGP] = { 70 - "boundagp"},[DRM_MEM_CTXBITMAP] = { 71 - "ctxbitmap"},[DRM_MEM_CTXLIST] = { 72 - "ctxlist"},[DRM_MEM_STUB] = { 73 - "stub"}, { 74 - NULL, 0,} /* Last entry must be null */ 75 }; 76 77 - void DRM(mem_init) (void) { 78 drm_mem_stats_t *mem; 79 struct sysinfo si; 80 81 - for (mem = DRM(mem_stats); mem->name; ++mem) { 82 mem->succeed_count = 0; 83 mem->free_count = 0; 84 mem->fail_count = 0; ··· 86 } 87 88 si_meminfo(&si); 89 - DRM(ram_available) = si.totalram; 90 - DRM(ram_used) = 0; 91 } 92 93 /* drm_mem_info is called whenever a process reads /dev/drm/mem. */ 94 95 - static int DRM(_mem_info) (char *buf, char **start, off_t offset, 96 int request, int *eof, void *data) { 97 drm_mem_stats_t *pt; 98 int len = 0; ··· 111 " | allocs bytes\n\n"); 112 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 113 "system", 0, 0, 0, 114 - DRM(ram_available) << (PAGE_SHIFT - 10)); 115 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 116 - "locked", 0, 0, 0, DRM(ram_used) >> 10); 117 DRM_PROC_PRINT("\n"); 118 - for (pt = DRM(mem_stats); pt->name; pt++) { 119 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", 120 pt->name, 121 pt->succeed_count, ··· 134 return len - offset; 135 } 136 137 - int DRM(mem_info) (char *buf, char **start, off_t offset, 138 int len, int *eof, void *data) { 139 int ret; 140 141 - spin_lock(&DRM(mem_lock)); 142 - ret = DRM(_mem_info) (buf, start, offset, len, eof, data); 143 - spin_unlock(&DRM(mem_lock)); 144 return ret; 145 } 146 147 - void *DRM(alloc) (size_t size, int area) { 148 void *pt; 149 150 if (!size) { ··· 153 } 154 155 if (!(pt = kmalloc(size, GFP_KERNEL))) { 156 - spin_lock(&DRM(mem_lock)); 157 - ++DRM(mem_stats)[area].fail_count; 158 - spin_unlock(&DRM(mem_lock)); 159 return NULL; 160 } 161 - spin_lock(&DRM(mem_lock)); 162 - ++DRM(mem_stats)[area].succeed_count; 163 - DRM(mem_stats)[area].bytes_allocated += size; 164 - spin_unlock(&DRM(mem_lock)); 165 return pt; 166 } 167 168 - void *DRM(calloc) (size_t nmemb, size_t size, int area) { 169 void *addr; 170 171 - addr = DRM(alloc) (nmemb * size, area); 172 if (addr != NULL) 173 memset((void *)addr, 0, size * nmemb); 174 175 return addr; 176 } 177 178 - void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) { 179 void *pt; 180 181 - if (!(pt = DRM(alloc) (size, area))) 182 return NULL; 183 if (oldpt && oldsize) { 184 memcpy(pt, oldpt, oldsize); 185 - DRM(free) (oldpt, oldsize, area); 186 } 187 return pt; 188 } 189 190 - void DRM(free) (void *pt, size_t size, int area) { 191 int alloc_count; 192 int free_count; 193 ··· 195 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); 196 else 197 kfree(pt); 198 - spin_lock(&DRM(mem_lock)); 199 - DRM(mem_stats)[area].bytes_freed += size; 200 - free_count = ++DRM(mem_stats)[area].free_count; 201 - alloc_count = DRM(mem_stats)[area].succeed_count; 202 - spin_unlock(&DRM(mem_lock)); 203 if (free_count > alloc_count) { 204 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", 205 free_count, alloc_count); 206 } 207 } 208 209 - unsigned long DRM(alloc_pages) (int order, int area) { 210 unsigned long address; 211 unsigned long bytes = PAGE_SIZE << order; 212 unsigned long addr; 213 unsigned int sz; 214 215 - spin_lock(&DRM(mem_lock)); 216 - if ((DRM(ram_used) >> PAGE_SHIFT) 217 - > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) { 218 - spin_unlock(&DRM(mem_lock)); 219 return 0; 220 } 221 - spin_unlock(&DRM(mem_lock)); 222 223 address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order); 224 if (!address) { 225 - spin_lock(&DRM(mem_lock)); 226 - ++DRM(mem_stats)[area].fail_count; 227 - spin_unlock(&DRM(mem_lock)); 228 return 0; 229 } 230 - spin_lock(&DRM(mem_lock)); 231 - ++DRM(mem_stats)[area].succeed_count; 232 - DRM(mem_stats)[area].bytes_allocated += bytes; 233 - DRM(ram_used) += bytes; 234 - spin_unlock(&DRM(mem_lock)); 235 236 /* Zero outside the lock */ 237 memset((void *)address, 0, bytes); ··· 245 return address; 246 } 247 248 - void DRM(free_pages) (unsigned long address, int order, int area) { 249 unsigned long bytes = PAGE_SIZE << order; 250 int alloc_count; 251 int free_count; ··· 263 free_pages(address, order); 264 } 265 266 - spin_lock(&DRM(mem_lock)); 267 - free_count = ++DRM(mem_stats)[area].free_count; 268 - alloc_count = DRM(mem_stats)[area].succeed_count; 269 - DRM(mem_stats)[area].bytes_freed += bytes; 270 - DRM(ram_used) -= bytes; 271 - spin_unlock(&DRM(mem_lock)); 272 if (free_count > alloc_count) { 273 DRM_MEM_ERROR(area, 274 "Excess frees: %d frees, %d allocs\n", ··· 276 } 277 } 278 279 - void *DRM(ioremap) (unsigned long offset, unsigned long size, 280 drm_device_t * dev) { 281 void *pt; 282 ··· 287 } 288 289 if (!(pt = drm_ioremap(offset, size, dev))) { 290 - spin_lock(&DRM(mem_lock)); 291 - ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; 292 - spin_unlock(&DRM(mem_lock)); 293 return NULL; 294 } 295 - spin_lock(&DRM(mem_lock)); 296 - ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 297 - DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; 298 - spin_unlock(&DRM(mem_lock)); 299 return pt; 300 } 301 302 - void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size, 303 drm_device_t * dev) { 304 void *pt; 305 ··· 310 } 311 312 if (!(pt = drm_ioremap_nocache(offset, size, dev))) { 313 - spin_lock(&DRM(mem_lock)); 314 - ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; 315 - spin_unlock(&DRM(mem_lock)); 316 return NULL; 317 } 318 - spin_lock(&DRM(mem_lock)); 319 - ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 320 - DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; 321 - spin_unlock(&DRM(mem_lock)); 322 return pt; 323 } 324 325 - void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) { 326 int alloc_count; 327 int free_count; 328 ··· 332 else 333 drm_ioremapfree(pt, size, dev); 334 335 - spin_lock(&DRM(mem_lock)); 336 - DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size; 337 - free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count; 338 - alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 339 - spin_unlock(&DRM(mem_lock)); 340 if (free_count > alloc_count) { 341 DRM_MEM_ERROR(DRM_MEM_MAPPINGS, 342 "Excess frees: %d frees, %d allocs\n", ··· 346 347 #if __OS_HAS_AGP 348 349 - DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) { 350 DRM_AGP_MEM *handle; 351 352 if (!pages) { ··· 354 return NULL; 355 } 356 357 - if ((handle = DRM(agp_allocate_memory) (pages, type))) { 358 - spin_lock(&DRM(mem_lock)); 359 - ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count; 360 - DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated 361 += pages << PAGE_SHIFT; 362 - spin_unlock(&DRM(mem_lock)); 363 return handle; 364 } 365 - spin_lock(&DRM(mem_lock)); 366 - ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count; 367 - spin_unlock(&DRM(mem_lock)); 368 return NULL; 369 } 370 371 - int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) { 372 int alloc_count; 373 int free_count; 374 int retval = -EINVAL; ··· 379 return retval; 380 } 381 382 - if (DRM(agp_free_memory) (handle)) { 383 - spin_lock(&DRM(mem_lock)); 384 - free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count; 385 - alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count; 386 - DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed 387 += pages << PAGE_SHIFT; 388 - spin_unlock(&DRM(mem_lock)); 389 if (free_count > alloc_count) { 390 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, 391 "Excess frees: %d frees, %d allocs\n", ··· 396 return retval; 397 } 398 399 - int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) { 400 int retcode = -EINVAL; 401 402 if (!handle) { ··· 405 return retcode; 406 } 407 408 - if (!(retcode = DRM(agp_bind_memory) (handle, start))) { 409 - spin_lock(&DRM(mem_lock)); 410 - ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count; 411 - DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated 412 += handle->page_count << PAGE_SHIFT; 413 - spin_unlock(&DRM(mem_lock)); 414 return retcode; 415 } 416 - spin_lock(&DRM(mem_lock)); 417 - ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count; 418 - spin_unlock(&DRM(mem_lock)); 419 return retcode; 420 } 421 422 - int DRM(unbind_agp) (DRM_AGP_MEM * handle) { 423 int alloc_count; 424 int free_count; 425 int retcode = -EINVAL; ··· 430 return retcode; 431 } 432 433 - if ((retcode = DRM(agp_unbind_memory) (handle))) 434 return retcode; 435 - spin_lock(&DRM(mem_lock)); 436 - free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count; 437 - alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count; 438 - DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed 439 += handle->page_count << PAGE_SHIFT; 440 - spin_unlock(&DRM(mem_lock)); 441 if (free_count > alloc_count) { 442 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 443 "Excess frees: %d frees, %d allocs\n",
··· 1 /** 2 + * \file drm_memory_debug.h 3 * Memory management wrappers for DRM. 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> ··· 43 unsigned long bytes_freed; 44 } drm_mem_stats_t; 45 46 + static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; 47 + static unsigned long drm_ram_available = 0; /* In pages */ 48 + static unsigned long drm_ram_used = 0; 49 + static drm_mem_stats_t drm_mem_stats[] = 50 { 51 + [DRM_MEM_DMA] = {"dmabufs"}, 52 + [DRM_MEM_SAREA] = {"sareas"}, 53 + [DRM_MEM_DRIVER] = {"driver"}, 54 + [DRM_MEM_MAGIC] = {"magic"}, 55 + [DRM_MEM_IOCTLS] = {"ioctltab"}, 56 + [DRM_MEM_MAPS] = {"maplist"}, 57 + [DRM_MEM_VMAS] = {"vmalist"}, 58 + [DRM_MEM_BUFS] = {"buflist"}, 59 + [DRM_MEM_SEGS] = {"seglist"}, 60 + [DRM_MEM_PAGES] = {"pagelist"}, 61 + [DRM_MEM_FILES] = {"files"}, 62 + [DRM_MEM_QUEUES] = {"queues"}, 63 + [DRM_MEM_CMDS] = {"commands"}, 64 + [DRM_MEM_MAPPINGS] = {"mappings"}, 65 + [DRM_MEM_BUFLISTS] = {"buflists"}, 66 + [DRM_MEM_AGPLISTS] = {"agplist"}, 67 + [DRM_MEM_SGLISTS] = {"sglist"}, 68 + [DRM_MEM_TOTALAGP] = {"totalagp"}, 69 + [DRM_MEM_BOUNDAGP] = {"boundagp"}, 70 + [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, 71 + [DRM_MEM_CTXLIST] = {"ctxlist"}, 72 + [DRM_MEM_STUB] = {"stub"}, 73 + {NULL, 0,} /* Last entry must be null */ 74 }; 75 76 + void drm_mem_init (void) { 77 drm_mem_stats_t *mem; 78 struct sysinfo si; 79 80 + for (mem = drm_mem_stats; mem->name; ++mem) { 81 mem->succeed_count = 0; 82 mem->free_count = 0; 83 mem->fail_count = 0; ··· 87 } 88 89 si_meminfo(&si); 90 + drm_ram_available = si.totalram; 91 + drm_ram_used = 0; 92 } 93 94 /* drm_mem_info is called whenever a process reads /dev/drm/mem. */ 95 96 + static int drm__mem_info (char *buf, char **start, off_t offset, 97 int request, int *eof, void *data) { 98 drm_mem_stats_t *pt; 99 int len = 0; ··· 112 " | allocs bytes\n\n"); 113 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 114 "system", 0, 0, 0, 115 + drm_ram_available << (PAGE_SHIFT - 10)); 116 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 117 + "locked", 0, 0, 0, drm_ram_used >> 10); 118 DRM_PROC_PRINT("\n"); 119 + for (pt = drm_mem_stats; pt->name; pt++) { 120 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", 121 pt->name, 122 pt->succeed_count, ··· 135 return len - offset; 136 } 137 138 + int drm_mem_info (char *buf, char **start, off_t offset, 139 int len, int *eof, void *data) { 140 int ret; 141 142 + spin_lock(&drm_mem_lock); 143 + ret = drm__mem_info (buf, start, offset, len, eof, data); 144 + spin_unlock(&drm_mem_lock); 145 return ret; 146 } 147 148 + void *drm_alloc (size_t size, int area) { 149 void *pt; 150 151 if (!size) { ··· 154 } 155 156 if (!(pt = kmalloc(size, GFP_KERNEL))) { 157 + spin_lock(&drm_mem_lock); 158 + ++drm_mem_stats[area].fail_count; 159 + spin_unlock(&drm_mem_lock); 160 return NULL; 161 } 162 + spin_lock(&drm_mem_lock); 163 + ++drm_mem_stats[area].succeed_count; 164 + drm_mem_stats[area].bytes_allocated += size; 165 + spin_unlock(&drm_mem_lock); 166 return pt; 167 } 168 169 + void *drm_calloc (size_t nmemb, size_t size, int area) { 170 void *addr; 171 172 + addr = drm_alloc (nmemb * size, area); 173 if (addr != NULL) 174 memset((void *)addr, 0, size * nmemb); 175 176 return addr; 177 } 178 179 + void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) { 180 void *pt; 181 182 + if (!(pt = drm_alloc (size, area))) 183 return NULL; 184 if (oldpt && oldsize) { 185 memcpy(pt, oldpt, oldsize); 186 + drm_free (oldpt, oldsize, area); 187 } 188 return pt; 189 } 190 191 + void drm_free (void *pt, size_t size, int area) { 192 int alloc_count; 193 int free_count; 194 ··· 196 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); 197 else 198 kfree(pt); 199 + spin_lock(&drm_mem_lock); 200 + drm_mem_stats[area].bytes_freed += size; 201 + free_count = ++drm_mem_stats[area].free_count; 202 + alloc_count = drm_mem_stats[area].succeed_count; 203 + spin_unlock(&drm_mem_lock); 204 if (free_count > alloc_count) { 205 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", 206 free_count, alloc_count); 207 } 208 } 209 210 + unsigned long drm_alloc_pages (int order, int area) { 211 unsigned long address; 212 unsigned long bytes = PAGE_SIZE << order; 213 unsigned long addr; 214 unsigned int sz; 215 216 + spin_lock(&drm_mem_lock); 217 + if ((drm_ram_used >> PAGE_SHIFT) 218 + > (DRM_RAM_PERCENT * drm_ram_available) / 100) { 219 + spin_unlock(&drm_mem_lock); 220 return 0; 221 } 222 + spin_unlock(&drm_mem_lock); 223 224 address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order); 225 if (!address) { 226 + spin_lock(&drm_mem_lock); 227 + ++drm_mem_stats[area].fail_count; 228 + spin_unlock(&drm_mem_lock); 229 return 0; 230 } 231 + spin_lock(&drm_mem_lock); 232 + ++drm_mem_stats[area].succeed_count; 233 + drm_mem_stats[area].bytes_allocated += bytes; 234 + drm_ram_used += bytes; 235 + spin_unlock(&drm_mem_lock); 236 237 /* Zero outside the lock */ 238 memset((void *)address, 0, bytes); ··· 246 return address; 247 } 248 249 + void drm_free_pages (unsigned long address, int order, int area) { 250 unsigned long bytes = PAGE_SIZE << order; 251 int alloc_count; 252 int free_count; ··· 264 free_pages(address, order); 265 } 266 267 + spin_lock(&drm_mem_lock); 268 + free_count = ++drm_mem_stats[area].free_count; 269 + alloc_count = drm_mem_stats[area].succeed_count; 270 + drm_mem_stats[area].bytes_freed += bytes; 271 + drm_ram_used -= bytes; 272 + spin_unlock(&drm_mem_lock); 273 if (free_count > alloc_count) { 274 DRM_MEM_ERROR(area, 275 "Excess frees: %d frees, %d allocs\n", ··· 277 } 278 } 279 280 + void *drm_ioremap (unsigned long offset, unsigned long size, 281 drm_device_t * dev) { 282 void *pt; 283 ··· 288 } 289 290 if (!(pt = drm_ioremap(offset, size, dev))) { 291 + spin_lock(&drm_mem_lock); 292 + ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; 293 + spin_unlock(&drm_mem_lock); 294 return NULL; 295 } 296 + spin_lock(&drm_mem_lock); 297 + ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; 298 + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; 299 + spin_unlock(&drm_mem_lock); 300 return pt; 301 } 302 303 + void *drm_ioremap_nocache (unsigned long offset, unsigned long size, 304 drm_device_t * dev) { 305 void *pt; 306 ··· 311 } 312 313 if (!(pt = drm_ioremap_nocache(offset, size, dev))) { 314 + spin_lock(&drm_mem_lock); 315 + ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; 316 + spin_unlock(&drm_mem_lock); 317 return NULL; 318 } 319 + spin_lock(&drm_mem_lock); 320 + ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; 321 + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; 322 + spin_unlock(&drm_mem_lock); 323 return pt; 324 } 325 326 + void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) { 327 int alloc_count; 328 int free_count; 329 ··· 333 else 334 drm_ioremapfree(pt, size, dev); 335 336 + spin_lock(&drm_mem_lock); 337 + drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size; 338 + free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count; 339 + alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; 340 + spin_unlock(&drm_mem_lock); 341 if (free_count > alloc_count) { 342 DRM_MEM_ERROR(DRM_MEM_MAPPINGS, 343 "Excess frees: %d frees, %d allocs\n", ··· 347 348 #if __OS_HAS_AGP 349 350 + DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { 351 DRM_AGP_MEM *handle; 352 353 if (!pages) { ··· 355 return NULL; 356 } 357 358 + if ((handle = drm_agp_allocate_memory (pages, type))) { 359 + spin_lock(&drm_mem_lock); 360 + ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; 361 + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated 362 += pages << PAGE_SHIFT; 363 + spin_unlock(&drm_mem_lock); 364 return handle; 365 } 366 + spin_lock(&drm_mem_lock); 367 + ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; 368 + spin_unlock(&drm_mem_lock); 369 return NULL; 370 } 371 372 + int drm_free_agp (DRM_AGP_MEM * handle, int pages) { 373 int alloc_count; 374 int free_count; 375 int retval = -EINVAL; ··· 380 return retval; 381 } 382 383 + if (drm_agp_free_memory (handle)) { 384 + spin_lock(&drm_mem_lock); 385 + free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; 386 + alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; 387 + drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed 388 += pages << PAGE_SHIFT; 389 + spin_unlock(&drm_mem_lock); 390 if (free_count > alloc_count) { 391 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, 392 "Excess frees: %d frees, %d allocs\n", ··· 397 return retval; 398 } 399 400 + int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) { 401 int retcode = -EINVAL; 402 403 if (!handle) { ··· 406 return retcode; 407 } 408 409 + if (!(retcode = drm_agp_bind_memory (handle, start))) { 410 + spin_lock(&drm_mem_lock); 411 + ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; 412 + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated 413 += handle->page_count << PAGE_SHIFT; 414 + spin_unlock(&drm_mem_lock); 415 return retcode; 416 } 417 + spin_lock(&drm_mem_lock); 418 + ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; 419 + spin_unlock(&drm_mem_lock); 420 return retcode; 421 } 422 423 + int drm_unbind_agp (DRM_AGP_MEM * handle) { 424 int alloc_count; 425 int free_count; 426 int retcode = -EINVAL; ··· 431 return retcode; 432 } 433 434 + if ((retcode = drm_agp_unbind_memory (handle))) 435 return retcode; 436 + spin_lock(&drm_mem_lock); 437 + free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; 438 + alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; 439 + drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed 440 += handle->page_count << PAGE_SHIFT; 441 + spin_unlock(&drm_mem_lock); 442 if (free_count > alloc_count) { 443 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 444 "Excess frees: %d frees, %d allocs\n",
+1
drivers/char/drm/drm_os_linux.h
··· 13 #define DRM_ERR(d) -(d) 14 /** Current process ID */ 15 #define DRM_CURRENTPID current->pid 16 #define DRM_UDELAY(d) udelay(d) 17 /** Read a byte from a MMIO region */ 18 #define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
··· 13 #define DRM_ERR(d) -(d) 14 /** Current process ID */ 15 #define DRM_CURRENTPID current->pid 16 + #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) 17 #define DRM_UDELAY(d) udelay(d) 18 /** Read a byte from a MMIO region */ 19 #define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
+11 -1
drivers/char/drm/drm_pciids.h
··· 46 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 47 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 48 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 49 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 50 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 51 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ ··· 70 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 71 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 72 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ 73 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 74 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 75 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ ··· 84 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 85 {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 86 {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 87 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ 88 {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 89 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ 90 {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 91 {0, 0, 0} 92 93 #define r128_PCI_IDS \ ··· 181 182 #define viadrv_PCI_IDS \ 183 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 184 - {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 185 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 186 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 187 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ··· 199 {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 200 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 201 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 202 {0, 0, 0} 203 204 #define savage_PCI_IDS \ ··· 243 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 244 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 245 {0, 0, 0}
··· 46 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 47 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 48 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 49 + {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 50 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 51 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 52 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ ··· 69 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 70 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 71 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ 72 + {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ 73 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 74 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 75 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ ··· 82 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 83 {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 84 {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 85 + {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ 86 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ 87 {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 88 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ 89 {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 90 + {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ 91 + {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \ 92 {0, 0, 0} 93 94 #define r128_PCI_IDS \ ··· 176 177 #define viadrv_PCI_IDS \ 178 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 179 + {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ 180 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 181 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 182 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ··· 194 {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 195 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 196 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 197 + {0, 0, 0} 198 + 199 + #define gamma_PCI_IDS \ 200 + {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 201 {0, 0, 0} 202 203 #define savage_PCI_IDS \ ··· 234 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 235 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 236 {0, 0, 0} 237 +
+7 -9
drivers/char/drm/drm_proc.c
··· 61 const char *name; /**< file name */ 62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ 63 } drm_proc_list[] = { 64 - { 65 - "name", drm_name_info}, { 66 - "mem", drm_mem_info}, { 67 - "vm", drm_vm_info}, { 68 - "clients", drm_clients_info}, { 69 - "queues", drm_queues_info}, { 70 - "bufs", drm_bufs_info}, 71 #if DRM_DEBUG_CODE 72 - { 73 - "vma", drm_vma_info}, 74 #endif 75 }; 76
··· 61 const char *name; /**< file name */ 62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ 63 } drm_proc_list[] = { 64 + {"name", drm_name_info}, 65 + {"mem", drm_mem_info}, 66 + {"vm", drm_vm_info}, 67 + {"clients", drm_clients_info}, 68 + {"queues", drm_queues_info}, 69 + {"bufs", drm_bufs_info}, 70 #if DRM_DEBUG_CODE 71 + {"vma", drm_vma_info}, 72 #endif 73 }; 74
+10 -53
drivers/char/drm/drm_stub.c
··· 93 94 dev->driver = driver; 95 96 - if (dev->driver->preinit) 97 - if ((retcode = dev->driver->preinit(dev, ent->driver_data))) 98 goto error_out_unreg; 99 100 if (drm_core_has_AGP(dev)) { ··· 124 return 0; 125 126 error_out_unreg: 127 - drm_takedown(dev); 128 return retcode; 129 } 130 131 - /** 132 - * File \c open operation. 133 - * 134 - * \param inode device inode. 135 - * \param filp file pointer. 136 - * 137 - * Puts the dev->fops corresponding to the device minor number into 138 - * \p filp, call the \c open method, and restore the file operations. 139 - */ 140 - int drm_stub_open(struct inode *inode, struct file *filp) 141 - { 142 - drm_device_t *dev = NULL; 143 - int minor = iminor(inode); 144 - int err = -ENODEV; 145 - struct file_operations *old_fops; 146 - 147 - DRM_DEBUG("\n"); 148 - 149 - if (!((minor >= 0) && (minor < drm_cards_limit))) 150 - return -ENODEV; 151 - 152 - if (!drm_heads[minor]) 153 - return -ENODEV; 154 - 155 - if (!(dev = drm_heads[minor]->dev)) 156 - return -ENODEV; 157 - 158 - old_fops = filp->f_op; 159 - filp->f_op = fops_get(&dev->driver->fops); 160 - if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { 161 - fops_put(filp->f_op); 162 - filp->f_op = fops_get(old_fops); 163 - } 164 - fops_put(old_fops); 165 - 166 - return err; 167 - } 168 169 /** 170 * Get a secondary minor number. ··· 163 goto err_g1; 164 } 165 166 - head->dev_class = drm_sysfs_device_add(drm_class, 167 - MKDEV(DRM_MAJOR, 168 - minor), 169 - &dev->pdev->dev, 170 - "card%d", minor); 171 if (IS_ERR(head->dev_class)) { 172 printk(KERN_ERR 173 "DRM: Error sysfs_device_add.\n"); ··· 217 } 218 if ((ret = drm_get_head(dev, &dev->primary))) 219 goto err_g1; 220 - 221 - /* postinit is a required function to display the signon banner */ 222 - /* drivers add secondary heads here if needed */ 223 - if ((ret = dev->driver->postinit(dev, ent->driver_data))) 224 - goto err_g1; 225 226 return 0; 227 ··· 276 DRM_DEBUG("release secondary minor %d\n", minor); 277 278 drm_proc_cleanup(minor, drm_proc_root, head->dev_root); 279 - drm_sysfs_device_remove(MKDEV(DRM_MAJOR, head->minor)); 280 281 - *head = (drm_head_t) { 282 - .dev = NULL}; 283 284 drm_heads[minor] = NULL; 285
··· 93 94 dev->driver = driver; 95 96 + if (dev->driver->load) 97 + if ((retcode = dev->driver->load(dev, ent->driver_data))) 98 goto error_out_unreg; 99 100 if (drm_core_has_AGP(dev)) { ··· 124 return 0; 125 126 error_out_unreg: 127 + drm_lastclose(dev); 128 return retcode; 129 } 130 131 132 /** 133 * Get a secondary minor number. ··· 200 goto err_g1; 201 } 202 203 + head->dev_class = drm_sysfs_device_add(drm_class, head); 204 if (IS_ERR(head->dev_class)) { 205 printk(KERN_ERR 206 "DRM: Error sysfs_device_add.\n"); ··· 258 } 259 if ((ret = drm_get_head(dev, &dev->primary))) 260 goto err_g1; 261 + 262 + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 263 + driver->name, driver->major, driver->minor, driver->patchlevel, 264 + driver->date, dev->primary.minor); 265 266 return 0; 267 ··· 318 DRM_DEBUG("release secondary minor %d\n", minor); 319 320 drm_proc_cleanup(minor, drm_proc_root, head->dev_root); 321 + drm_sysfs_device_remove(head->dev_class); 322 323 + *head = (drm_head_t) {.dev = NULL}; 324 325 drm_heads[minor] = NULL; 326
+28 -38
drivers/char/drm/drm_sysfs.c
··· 15 #include <linux/device.h> 16 #include <linux/kdev_t.h> 17 #include <linux/err.h> 18 - #include <linux/slab.h> 19 - #include <linux/string.h> 20 21 #include "drm_core.h" 22 #include "drmP.h" ··· 26 #define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class) 27 28 struct simple_dev { 29 - struct list_head node; 30 dev_t dev; 31 struct class_device class_dev; 32 }; 33 #define to_simple_dev(d) container_of(d, struct simple_dev, class_dev) 34 - 35 - static LIST_HEAD(simple_dev_list); 36 - static DEFINE_SPINLOCK(simple_dev_list_lock); 37 38 static void release_simple_dev(struct class_device *class_dev) 39 { ··· 118 class_unregister(&cs->class); 119 } 120 121 /** 122 * drm_sysfs_device_add - adds a class device to sysfs for a character driver 123 * @cs: pointer to the struct drm_sysfs_class that this device should be registered to. ··· 144 * Note: the struct drm_sysfs_class passed to this function must have previously been 145 * created with a call to drm_sysfs_create(). 146 */ 147 - struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev, 148 - struct device *device, 149 - const char *fmt, ...) 150 { 151 - va_list args; 152 struct simple_dev *s_dev = NULL; 153 - int retval; 154 155 if ((cs == NULL) || (IS_ERR(cs))) { 156 retval = -ENODEV; ··· 162 } 163 memset(s_dev, 0x00, sizeof(*s_dev)); 164 165 - s_dev->dev = dev; 166 - s_dev->class_dev.dev = device; 167 s_dev->class_dev.class = &cs->class; 168 169 - va_start(args, fmt); 170 - vsnprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, fmt, args); 171 - va_end(args); 172 retval = class_device_register(&s_dev->class_dev); 173 if (retval) 174 goto error; 175 176 class_device_create_file(&s_dev->class_dev, &cs->attr); 177 178 - spin_lock(&simple_dev_list_lock); 179 - list_add(&s_dev->node, &simple_dev_list); 180 - spin_unlock(&simple_dev_list_lock); 181 - 182 return &s_dev->class_dev; 183 184 - error: 185 kfree(s_dev); 186 return ERR_PTR(retval); 187 } ··· 190 * This call unregisters and cleans up a class device that was created with a 191 * call to drm_sysfs_device_add() 192 */ 193 - void drm_sysfs_device_remove(dev_t dev) 194 { 195 - struct simple_dev *s_dev = NULL; 196 - int found = 0; 197 198 - spin_lock(&simple_dev_list_lock); 199 - list_for_each_entry(s_dev, &simple_dev_list, node) { 200 - if (s_dev->dev == dev) { 201 - found = 1; 202 - break; 203 - } 204 - } 205 - if (found) { 206 - list_del(&s_dev->node); 207 - spin_unlock(&simple_dev_list_lock); 208 - class_device_unregister(&s_dev->class_dev); 209 - } else { 210 - spin_unlock(&simple_dev_list_lock); 211 - } 212 }
··· 15 #include <linux/device.h> 16 #include <linux/kdev_t.h> 17 #include <linux/err.h> 18 19 #include "drm_core.h" 20 #include "drmP.h" ··· 28 #define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class) 29 30 struct simple_dev { 31 dev_t dev; 32 struct class_device class_dev; 33 }; 34 #define to_simple_dev(d) container_of(d, struct simple_dev, class_dev) 35 36 static void release_simple_dev(struct class_device *class_dev) 37 { ··· 124 class_unregister(&cs->class); 125 } 126 127 + static ssize_t show_dri(struct class_device *class_device, char *buf) 128 + { 129 + drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev; 130 + if (dev->driver->dri_library_name) 131 + return dev->driver->dri_library_name(dev, buf); 132 + return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); 133 + } 134 + 135 + static struct class_device_attribute class_device_attrs[] = { 136 + __ATTR(dri_library_name, S_IRUGO, show_dri, NULL), 137 + }; 138 + 139 /** 140 * drm_sysfs_device_add - adds a class device to sysfs for a character driver 141 * @cs: pointer to the struct drm_sysfs_class that this device should be registered to. ··· 138 * Note: the struct drm_sysfs_class passed to this function must have previously been 139 * created with a call to drm_sysfs_create(). 140 */ 141 + struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, 142 + drm_head_t *head) 143 { 144 struct simple_dev *s_dev = NULL; 145 + int i, retval; 146 147 if ((cs == NULL) || (IS_ERR(cs))) { 148 retval = -ENODEV; ··· 158 } 159 memset(s_dev, 0x00, sizeof(*s_dev)); 160 161 + s_dev->dev = MKDEV(DRM_MAJOR, head->minor); 162 + s_dev->class_dev.dev = &(head->dev->pdev)->dev; 163 s_dev->class_dev.class = &cs->class; 164 165 + snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor); 166 retval = class_device_register(&s_dev->class_dev); 167 if (retval) 168 goto error; 169 170 class_device_create_file(&s_dev->class_dev, &cs->attr); 171 + class_set_devdata(&s_dev->class_dev, head); 172 173 + for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) 174 + class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]); 175 return &s_dev->class_dev; 176 177 + error: 178 kfree(s_dev); 179 return ERR_PTR(retval); 180 } ··· 189 * This call unregisters and cleans up a class device that was created with a 190 * call to drm_sysfs_device_add() 191 */ 192 + void drm_sysfs_device_remove(struct class_device *class_dev) 193 { 194 + struct simple_dev *s_dev = to_simple_dev(class_dev); 195 + int i; 196 197 + for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) 198 + class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]); 199 + class_device_unregister(&s_dev->class_dev); 200 }
+30 -19
drivers/char/drm/i810_dma.c
··· 114 115 static struct file_operations i810_buffer_fops = { 116 .open = drm_open, 117 - .flush = drm_flush, 118 .release = drm_release, 119 .ioctl = drm_ioctl, 120 .mmap = i810_mmap_buffers, ··· 1318 return 0; 1319 } 1320 1321 - void i810_driver_pretakedown(drm_device_t * dev) 1322 { 1323 i810_dma_cleanup(dev); 1324 } 1325 1326 - void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp) 1327 { 1328 if (dev->dev_private) { 1329 drm_i810_private_t *dev_priv = dev->dev_private; ··· 1345 } 1346 } 1347 1348 - void i810_driver_release(drm_device_t * dev, struct file *filp) 1349 { 1350 i810_reclaim_buffers(dev, filp); 1351 } ··· 1357 } 1358 1359 drm_ioctl_desc_t i810_ioctls[] = { 1360 - [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, 1, 1}, 1361 - [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, 1, 0}, 1362 - [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, 1, 0}, 1363 - [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, 1, 0}, 1364 - [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, 1, 0}, 1365 - [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, 1, 0}, 1366 - [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, 1, 0}, 1367 - [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, 1, 0}, 1368 - [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, 1, 0}, 1369 - [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, 1, 0}, 1370 - [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, 1, 0}, 1371 - [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, 1, 0}, 1372 - [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, 1, 1}, 1373 - [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, 1, 0}, 1374 - [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, 1, 0} 1375 }; 1376 1377 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
··· 114 115 static struct file_operations i810_buffer_fops = { 116 .open = drm_open, 117 .release = drm_release, 118 .ioctl = drm_ioctl, 119 .mmap = i810_mmap_buffers, ··· 1319 return 0; 1320 } 1321 1322 + int i810_driver_load(drm_device_t *dev, unsigned long flags) 1323 + { 1324 + /* i810 has 4 more counters */ 1325 + dev->counters += 4; 1326 + dev->types[6] = _DRM_STAT_IRQ; 1327 + dev->types[7] = _DRM_STAT_PRIMARY; 1328 + dev->types[8] = _DRM_STAT_SECONDARY; 1329 + dev->types[9] = _DRM_STAT_DMA; 1330 + 1331 + return 0; 1332 + } 1333 + 1334 + void i810_driver_lastclose(drm_device_t * dev) 1335 { 1336 i810_dma_cleanup(dev); 1337 } 1338 1339 + void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) 1340 { 1341 if (dev->dev_private) { 1342 drm_i810_private_t *dev_priv = dev->dev_private; ··· 1334 } 1335 } 1336 1337 + void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) 1338 { 1339 i810_reclaim_buffers(dev, filp); 1340 } ··· 1346 } 1347 1348 drm_ioctl_desc_t i810_ioctls[] = { 1349 + [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1350 + [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, 1351 + [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, 1352 + [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, 1353 + [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, 1354 + [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, 1355 + [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, 1356 + [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, 1357 + [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, 1358 + [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, 1359 + [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, 1360 + [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, 1361 + [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1362 + [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, 1363 + [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} 1364 }; 1365 1366 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
+16 -44
drivers/char/drm/i810_drv.c
··· 38 39 #include "drm_pciids.h" 40 41 - static int postinit(struct drm_device *dev, unsigned long flags) 42 - { 43 - /* i810 has 4 more counters */ 44 - dev->counters += 4; 45 - dev->types[6] = _DRM_STAT_IRQ; 46 - dev->types[7] = _DRM_STAT_PRIMARY; 47 - dev->types[8] = _DRM_STAT_SECONDARY; 48 - dev->types[9] = _DRM_STAT_DMA; 49 - 50 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 51 - DRIVER_NAME, 52 - DRIVER_MAJOR, 53 - DRIVER_MINOR, 54 - DRIVER_PATCHLEVEL, 55 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 56 - ); 57 - return 0; 58 - } 59 - 60 - static int version(drm_version_t * version) 61 - { 62 - int len; 63 - 64 - version->version_major = DRIVER_MAJOR; 65 - version->version_minor = DRIVER_MINOR; 66 - version->version_patchlevel = DRIVER_PATCHLEVEL; 67 - DRM_COPY(version->name, DRIVER_NAME); 68 - DRM_COPY(version->date, DRIVER_DATE); 69 - DRM_COPY(version->desc, DRIVER_DESC); 70 - return 0; 71 - } 72 - 73 static struct pci_device_id pciidlist[] = { 74 i810_PCI_IDS 75 }; ··· 47 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 48 DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, 49 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 50 - .pretakedown = i810_driver_pretakedown, 51 - .prerelease = i810_driver_prerelease, 52 .device_is_agp = i810_driver_device_is_agp, 53 - .release = i810_driver_release, 54 .dma_quiescent = i810_driver_dma_quiescent, 55 - .reclaim_buffers = i810_reclaim_buffers, 56 .get_map_ofs = drm_core_get_map_ofs, 57 .get_reg_ofs = drm_core_get_reg_ofs, 58 - .postinit = postinit, 59 - .version = version, 60 .ioctls = i810_ioctls, 61 .fops = { 62 .owner = THIS_MODULE, ··· 64 .mmap = drm_mmap, 65 .poll = drm_poll, 66 .fasync = drm_fasync, 67 - } 68 - , 69 .pci_driver = { 70 - .name = DRIVER_NAME, 71 - .id_table = pciidlist, 72 - } 73 - , 74 }; 75 76 static int __init i810_init(void)
··· 38 39 #include "drm_pciids.h" 40 41 static struct pci_device_id pciidlist[] = { 42 i810_PCI_IDS 43 }; ··· 79 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 80 DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, 81 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 82 + .load = i810_driver_load, 83 + .lastclose = i810_driver_lastclose, 84 + .preclose = i810_driver_preclose, 85 .device_is_agp = i810_driver_device_is_agp, 86 + .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, 87 .dma_quiescent = i810_driver_dma_quiescent, 88 .get_map_ofs = drm_core_get_map_ofs, 89 .get_reg_ofs = drm_core_get_reg_ofs, 90 .ioctls = i810_ioctls, 91 .fops = { 92 .owner = THIS_MODULE, ··· 98 .mmap = drm_mmap, 99 .poll = drm_poll, 100 .fasync = drm_fasync, 101 + }, 102 + 103 .pci_driver = { 104 + .name = DRIVER_NAME, 105 + .id_table = pciidlist, 106 + }, 107 + 108 + .name = DRIVER_NAME, 109 + .desc = DRIVER_DESC, 110 + .date = DRIVER_DATE, 111 + .major = DRIVER_MAJOR, 112 + .minor = DRIVER_MINOR, 113 + .patchlevel = DRIVER_PATCHLEVEL, 114 }; 115 116 static int __init i810_init(void)
+7 -3
drivers/char/drm/i810_drv.h
··· 116 extern void i810_reclaim_buffers(drm_device_t * dev, struct file *filp); 117 118 extern int i810_driver_dma_quiescent(drm_device_t * dev); 119 - extern void i810_driver_release(drm_device_t * dev, struct file *filp); 120 - extern void i810_driver_pretakedown(drm_device_t * dev); 121 - extern void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp); 122 extern int i810_driver_device_is_agp(drm_device_t * dev); 123 124 extern drm_ioctl_desc_t i810_ioctls[];
··· 116 extern void i810_reclaim_buffers(drm_device_t * dev, struct file *filp); 117 118 extern int i810_driver_dma_quiescent(drm_device_t * dev); 119 + extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, 120 + struct file *filp); 121 + extern int i810_driver_load(struct drm_device *, unsigned long flags); 122 + extern void i810_driver_lastclose(drm_device_t * dev); 123 + extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp); 124 + extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, 125 + struct file *filp); 126 extern int i810_driver_device_is_agp(drm_device_t * dev); 127 128 extern drm_ioctl_desc_t i810_ioctls[];
+29 -18
drivers/char/drm/i830_dma.c
··· 116 117 static struct file_operations i830_buffer_fops = { 118 .open = drm_open, 119 - .flush = drm_flush, 120 .release = drm_release, 121 .ioctl = drm_ioctl, 122 .mmap = i830_mmap_buffers, ··· 1516 return 0; 1517 } 1518 1519 - void i830_driver_pretakedown(drm_device_t * dev) 1520 { 1521 i830_dma_cleanup(dev); 1522 } 1523 1524 - void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp) 1525 { 1526 if (dev->dev_private) { 1527 drm_i830_private_t *dev_priv = dev->dev_private; ··· 1543 } 1544 } 1545 1546 - void i830_driver_release(drm_device_t * dev, struct file *filp) 1547 { 1548 i830_reclaim_buffers(dev, filp); 1549 } ··· 1555 } 1556 1557 drm_ioctl_desc_t i830_ioctls[] = { 1558 - [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, 1, 1}, 1559 - [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, 1, 0}, 1560 - [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, 1, 0}, 1561 - [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, 1, 0}, 1562 - [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, 1, 0}, 1563 - [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, 1, 0}, 1564 - [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, 1, 0}, 1565 - [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, 1, 0}, 1566 - [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, 1, 0}, 1567 - [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, 1, 0}, 1568 - [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, 1, 0}, 1569 - [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, 1, 0}, 1570 - [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, 1, 0}, 1571 - [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, 1, 0} 1572 }; 1573 1574 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
··· 116 117 static struct file_operations i830_buffer_fops = { 118 .open = drm_open, 119 .release = drm_release, 120 .ioctl = drm_ioctl, 121 .mmap = i830_mmap_buffers, ··· 1517 return 0; 1518 } 1519 1520 + int i830_driver_load(drm_device_t *dev, unsigned long flags) 1521 + { 1522 + /* i830 has 4 more counters */ 1523 + dev->counters += 4; 1524 + dev->types[6] = _DRM_STAT_IRQ; 1525 + dev->types[7] = _DRM_STAT_PRIMARY; 1526 + dev->types[8] = _DRM_STAT_SECONDARY; 1527 + dev->types[9] = _DRM_STAT_DMA; 1528 + 1529 + return 0; 1530 + } 1531 + 1532 + void i830_driver_lastclose(drm_device_t * dev) 1533 { 1534 i830_dma_cleanup(dev); 1535 } 1536 1537 + void i830_driver_preclose(drm_device_t * dev, DRMFILE filp) 1538 { 1539 if (dev->dev_private) { 1540 drm_i830_private_t *dev_priv = dev->dev_private; ··· 1532 } 1533 } 1534 1535 + void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) 1536 { 1537 i830_reclaim_buffers(dev, filp); 1538 } ··· 1544 } 1545 1546 drm_ioctl_desc_t i830_ioctls[] = { 1547 + [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1548 + [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH}, 1549 + [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH}, 1550 + [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH}, 1551 + [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH}, 1552 + [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH}, 1553 + [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH}, 1554 + [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH}, 1555 + [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH}, 1556 + [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH}, 1557 + [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH}, 1558 + [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH}, 1559 + [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH}, 1560 + [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH} 1561 }; 1562 1563 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
+16 -43
drivers/char/drm/i830_drv.c
··· 40 41 #include "drm_pciids.h" 42 43 - static int postinit(struct drm_device *dev, unsigned long flags) 44 - { 45 - dev->counters += 4; 46 - dev->types[6] = _DRM_STAT_IRQ; 47 - dev->types[7] = _DRM_STAT_PRIMARY; 48 - dev->types[8] = _DRM_STAT_SECONDARY; 49 - dev->types[9] = _DRM_STAT_DMA; 50 - 51 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 52 - DRIVER_NAME, 53 - DRIVER_MAJOR, 54 - DRIVER_MINOR, 55 - DRIVER_PATCHLEVEL, 56 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 57 - ); 58 - return 0; 59 - } 60 - 61 - static int version(drm_version_t * version) 62 - { 63 - int len; 64 - 65 - version->version_major = DRIVER_MAJOR; 66 - version->version_minor = DRIVER_MINOR; 67 - version->version_patchlevel = DRIVER_PATCHLEVEL; 68 - DRM_COPY(version->name, DRIVER_NAME); 69 - DRM_COPY(version->date, DRIVER_DATE); 70 - DRM_COPY(version->desc, DRIVER_DESC); 71 - return 0; 72 - } 73 - 74 static struct pci_device_id pciidlist[] = { 75 i830_PCI_IDS 76 }; ··· 52 .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ, 53 #endif 54 .dev_priv_size = sizeof(drm_i830_buf_priv_t), 55 - .pretakedown = i830_driver_pretakedown, 56 - .prerelease = i830_driver_prerelease, 57 .device_is_agp = i830_driver_device_is_agp, 58 - .release = i830_driver_release, 59 .dma_quiescent = i830_driver_dma_quiescent, 60 - .reclaim_buffers = i830_reclaim_buffers, 61 .get_map_ofs = drm_core_get_map_ofs, 62 .get_reg_ofs = drm_core_get_reg_ofs, 63 #if USE_IRQS ··· 66 .irq_uninstall = i830_driver_irq_uninstall, 67 .irq_handler = i830_driver_irq_handler, 68 #endif 69 - .postinit = postinit, 70 - .version = version, 71 .ioctls = i830_ioctls, 72 .fops = { 73 .owner = THIS_MODULE, ··· 75 .mmap = drm_mmap, 76 .poll = drm_poll, 77 .fasync = drm_fasync, 78 - } 79 - , 80 - .pci_driver = { 81 - .name = DRIVER_NAME, 82 - .id_table = pciidlist, 83 - } 84 85 }; 86 87 static int __init i830_init(void)
··· 40 41 #include "drm_pciids.h" 42 43 static struct pci_device_id pciidlist[] = { 44 i830_PCI_IDS 45 }; ··· 83 .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ, 84 #endif 85 .dev_priv_size = sizeof(drm_i830_buf_priv_t), 86 + .load = i830_driver_load, 87 + .lastclose = i830_driver_lastclose, 88 + .preclose = i830_driver_preclose, 89 .device_is_agp = i830_driver_device_is_agp, 90 + .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked, 91 .dma_quiescent = i830_driver_dma_quiescent, 92 .get_map_ofs = drm_core_get_map_ofs, 93 .get_reg_ofs = drm_core_get_reg_ofs, 94 #if USE_IRQS ··· 97 .irq_uninstall = i830_driver_irq_uninstall, 98 .irq_handler = i830_driver_irq_handler, 99 #endif 100 .ioctls = i830_ioctls, 101 .fops = { 102 .owner = THIS_MODULE, ··· 108 .mmap = drm_mmap, 109 .poll = drm_poll, 110 .fasync = drm_fasync, 111 + }, 112 113 + .pci_driver = { 114 + .name = DRIVER_NAME, 115 + .id_table = pciidlist, 116 + }, 117 + 118 + .name = DRIVER_NAME, 119 + .desc = DRIVER_DESC, 120 + .date = DRIVER_DATE, 121 + .major = DRIVER_MAJOR, 122 + .minor = DRIVER_MINOR, 123 + .patchlevel = DRIVER_PATCHLEVEL, 124 }; 125 126 static int __init i830_init(void)
+5 -3
drivers/char/drm/i830_drv.h
··· 136 extern void i830_driver_irq_preinstall(drm_device_t * dev); 137 extern void i830_driver_irq_postinstall(drm_device_t * dev); 138 extern void i830_driver_irq_uninstall(drm_device_t * dev); 139 - extern void i830_driver_pretakedown(drm_device_t * dev); 140 - extern void i830_driver_release(drm_device_t * dev, struct file *filp); 141 extern int i830_driver_dma_quiescent(drm_device_t * dev); 142 - extern void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp); 143 extern int i830_driver_device_is_agp(drm_device_t * dev); 144 145 #define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
··· 136 extern void i830_driver_irq_preinstall(drm_device_t * dev); 137 extern void i830_driver_irq_postinstall(drm_device_t * dev); 138 extern void i830_driver_irq_uninstall(drm_device_t * dev); 139 + extern int i830_driver_load(struct drm_device *, unsigned long flags); 140 + extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp); 141 + extern void i830_driver_lastclose(drm_device_t * dev); 142 + extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev, 143 + struct file *filp); 144 extern int i830_driver_dma_quiescent(drm_device_t * dev); 145 extern int i830_driver_device_is_agp(drm_device_t * dev); 146 147 #define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
+33 -19
drivers/char/drm/i915_dma.c
··· 1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3 - /************************************************************************** 4 - * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * ··· 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 - **************************************************************************/ 28 29 #include "drmP.h" 30 #include "drm.h" ··· 195 return 0; 196 } 197 198 - static int i915_resume(drm_device_t * dev) 199 { 200 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 201 ··· 252 retcode = i915_dma_cleanup(dev); 253 break; 254 case I915_RESUME_DMA: 255 - retcode = i915_resume(dev); 256 break; 257 default: 258 retcode = -EINVAL; ··· 653 case I915_PARAM_ALLOW_BATCHBUFFER: 654 value = dev_priv->allow_batchbuffer ? 1 : 0; 655 break; 656 default: 657 DRM_ERROR("Unkown parameter %d\n", param.param); 658 return DRM_ERR(EINVAL); ··· 701 return 0; 702 } 703 704 - void i915_driver_pretakedown(drm_device_t * dev) 705 { 706 if (dev->dev_private) { 707 drm_i915_private_t *dev_priv = dev->dev_private; ··· 722 i915_dma_cleanup(dev); 723 } 724 725 - void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp) 726 { 727 if (dev->dev_private) { 728 drm_i915_private_t *dev_priv = dev->dev_private; ··· 731 } 732 733 drm_ioctl_desc_t i915_ioctls[] = { 734 - [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, 1, 1}, 735 - [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, 1, 0}, 736 - [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, 1, 0}, 737 - [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, 1, 0}, 738 - [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, 1, 0}, 739 - [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, 1, 0}, 740 - [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, 1, 0}, 741 - [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, 1, 1}, 742 - [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, 1, 0}, 743 - [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, 1, 0}, 744 - [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, 1, 1}, 745 - [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, 1, 0} 746 }; 747 748 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
··· 1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3 + /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * ··· 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 + */ 29 30 #include "drmP.h" 31 #include "drm.h" ··· 196 return 0; 197 } 198 199 + static int i915_dma_resume(drm_device_t * dev) 200 { 201 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 202 ··· 253 retcode = i915_dma_cleanup(dev); 254 break; 255 case I915_RESUME_DMA: 256 + retcode = i915_dma_resume(dev); 257 break; 258 default: 259 retcode = -EINVAL; ··· 654 case I915_PARAM_ALLOW_BATCHBUFFER: 655 value = dev_priv->allow_batchbuffer ? 1 : 0; 656 break; 657 + case I915_PARAM_LAST_DISPATCH: 658 + value = READ_BREADCRUMB(dev_priv); 659 + break; 660 default: 661 DRM_ERROR("Unkown parameter %d\n", param.param); 662 return DRM_ERR(EINVAL); ··· 699 return 0; 700 } 701 702 + int i915_driver_load(drm_device_t *dev, unsigned long flags) 703 + { 704 + /* i915 has 4 more counters */ 705 + dev->counters += 4; 706 + dev->types[6] = _DRM_STAT_IRQ; 707 + dev->types[7] = _DRM_STAT_PRIMARY; 708 + dev->types[8] = _DRM_STAT_SECONDARY; 709 + dev->types[9] = _DRM_STAT_DMA; 710 + 711 + return 0; 712 + } 713 + 714 + void i915_driver_lastclose(drm_device_t * dev) 715 { 716 if (dev->dev_private) { 717 drm_i915_private_t *dev_priv = dev->dev_private; ··· 708 i915_dma_cleanup(dev); 709 } 710 711 + void i915_driver_preclose(drm_device_t * dev, DRMFILE filp) 712 { 713 if (dev->dev_private) { 714 drm_i915_private_t *dev_priv = dev->dev_private; ··· 717 } 718 719 drm_ioctl_desc_t i915_ioctls[] = { 720 + [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 721 + [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH}, 722 + [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH}, 723 + [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH}, 724 + [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH}, 725 + [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH}, 726 + [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH}, 727 + [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 728 + [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH}, 729 + [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH}, 730 + [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 731 + [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH} 732 }; 733 734 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+3 -3
drivers/char/drm/i915_drm.h
··· 1 - /************************************************************************** 2 - * 3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * ··· 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 - **************************************************************************/ 26 27 #ifndef _I915_DRM_H_ 28 #define _I915_DRM_H_ ··· 151 */ 152 #define I915_PARAM_IRQ_ACTIVE 1 153 #define I915_PARAM_ALLOW_BATCHBUFFER 2 154 155 typedef struct drm_i915_getparam { 156 int param;
··· 1 + /* 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3 * All Rights Reserved. 4 * ··· 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 + */ 27 28 #ifndef _I915_DRM_H_ 29 #define _I915_DRM_H_ ··· 152 */ 153 #define I915_PARAM_IRQ_ACTIVE 1 154 #define I915_PARAM_ALLOW_BATCHBUFFER 2 155 + #define I915_PARAM_LAST_DISPATCH 3 156 157 typedef struct drm_i915_getparam { 158 int param;
+23 -43
drivers/char/drm/i915_drv.c
··· 1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 - /************************************************************************** 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. ··· 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 - **************************************************************************/ 29 30 #include "drmP.h" 31 #include "drm.h" ··· 34 35 #include "drm_pciids.h" 36 37 - static int postinit(struct drm_device *dev, unsigned long flags) 38 - { 39 - dev->counters += 4; 40 - dev->types[6] = _DRM_STAT_IRQ; 41 - dev->types[7] = _DRM_STAT_PRIMARY; 42 - dev->types[8] = _DRM_STAT_SECONDARY; 43 - dev->types[9] = _DRM_STAT_DMA; 44 - 45 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 46 - DRIVER_NAME, 47 - DRIVER_MAJOR, 48 - DRIVER_MINOR, 49 - DRIVER_PATCHLEVEL, 50 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 51 - ); 52 - return 0; 53 - } 54 - 55 - static int version(drm_version_t * version) 56 - { 57 - int len; 58 - 59 - version->version_major = DRIVER_MAJOR; 60 - version->version_minor = DRIVER_MINOR; 61 - version->version_patchlevel = DRIVER_PATCHLEVEL; 62 - DRM_COPY(version->name, DRIVER_NAME); 63 - DRM_COPY(version->date, DRIVER_DATE); 64 - DRM_COPY(version->desc, DRIVER_DESC); 65 - return 0; 66 - } 67 - 68 static struct pci_device_id pciidlist[] = { 69 i915_PCI_IDS 70 }; 71 72 static struct drm_driver driver = { 73 .driver_features = 74 - DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 75 - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 76 - .pretakedown = i915_driver_pretakedown, 77 - .prerelease = i915_driver_prerelease, 78 .device_is_agp = i915_driver_device_is_agp, 79 .irq_preinstall = i915_driver_irq_preinstall, 80 .irq_postinstall = i915_driver_irq_postinstall, 81 .irq_uninstall = i915_driver_irq_uninstall, ··· 57 .reclaim_buffers = drm_core_reclaim_buffers, 58 .get_map_ofs = drm_core_get_map_ofs, 59 .get_reg_ofs = drm_core_get_reg_ofs, 60 - .postinit = postinit, 61 - .version = version, 62 .ioctls = i915_ioctls, 63 .fops = { 64 .owner = THIS_MODULE, ··· 69 #ifdef CONFIG_COMPAT 70 .compat_ioctl = i915_compat_ioctl, 71 #endif 72 - }, 73 .pci_driver = { 74 - .name = DRIVER_NAME, 75 - .id_table = pciidlist, 76 - } 77 }; 78 79 static int __init i915_init(void)
··· 1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 + /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. ··· 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 + */ 29 30 #include "drmP.h" 31 #include "drm.h" ··· 34 35 #include "drm_pciids.h" 36 37 static struct pci_device_id pciidlist[] = { 38 i915_PCI_IDS 39 }; 40 41 static struct drm_driver driver = { 42 + /* don't use mtrr's here, the Xserver or user space app should 43 + * deal with them for intel hardware. 44 + */ 45 .driver_features = 46 + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 47 + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 48 + .load = i915_driver_load, 49 + .lastclose = i915_driver_lastclose, 50 + .preclose = i915_driver_preclose, 51 .device_is_agp = i915_driver_device_is_agp, 52 + .vblank_wait = i915_driver_vblank_wait, 53 .irq_preinstall = i915_driver_irq_preinstall, 54 .irq_postinstall = i915_driver_irq_postinstall, 55 .irq_uninstall = i915_driver_irq_uninstall, ··· 83 .reclaim_buffers = drm_core_reclaim_buffers, 84 .get_map_ofs = drm_core_get_map_ofs, 85 .get_reg_ofs = drm_core_get_reg_ofs, 86 .ioctls = i915_ioctls, 87 .fops = { 88 .owner = THIS_MODULE, ··· 97 #ifdef CONFIG_COMPAT 98 .compat_ioctl = i915_compat_ioctl, 99 #endif 100 + }, 101 + 102 .pci_driver = { 103 + .name = DRIVER_NAME, 104 + .id_table = pciidlist, 105 + }, 106 + 107 + .name = DRIVER_NAME, 108 + .desc = DRIVER_DESC, 109 + .date = DRIVER_DATE, 110 + .major = DRIVER_MAJOR, 111 + .minor = DRIVER_MINOR, 112 + .patchlevel = DRIVER_PATCHLEVEL, 113 }; 114 115 static int __init i915_init(void)
+26 -18
drivers/char/drm/i915_drv.h
··· 1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 - /************************************************************************** 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. ··· 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 - **************************************************************************/ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ ··· 37 38 #define DRIVER_NAME "i915" 39 #define DRIVER_DESC "Intel Graphics" 40 - #define DRIVER_DATE "20040405" 41 42 /* Interface history: 43 * 44 * 1.1: Original. 45 */ 46 #define DRIVER_MAJOR 1 47 - #define DRIVER_MINOR 1 48 #define DRIVER_PATCHLEVEL 0 49 - 50 - /* We use our own dma mechanisms, not the drm template code. However, 51 - * the shared IRQ code is useful to us: 52 - */ 53 - #define __HAVE_PM 1 54 55 typedef struct _drm_i915_ring_buffer { 56 int tail_mask; ··· 94 int tex_lru_log_granularity; 95 int allow_batchbuffer; 96 struct mem_block *agp_heap; 97 } drm_i915_private_t; 98 99 extern drm_ioctl_desc_t i915_ioctls[]; ··· 102 103 /* i915_dma.c */ 104 extern void i915_kernel_lost_context(drm_device_t * dev); 105 - extern void i915_driver_pretakedown(drm_device_t * dev); 106 - extern void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp); 107 extern int i915_driver_device_is_agp(drm_device_t * dev); 108 109 /* i915_irq.c */ 110 extern int i915_irq_emit(DRM_IOCTL_ARGS); 111 extern int i915_irq_wait(DRM_IOCTL_ARGS); 112 113 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 114 extern void i915_driver_irq_preinstall(drm_device_t * dev); 115 extern void i915_driver_irq_postinstall(drm_device_t * dev); ··· 127 extern void i915_mem_release(drm_device_t * dev, 128 DRMFILE filp, struct mem_block *heap); 129 130 - extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 131 - unsigned long arg); 132 - 133 - #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, reg) 134 - #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val) 135 - #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg) 136 - #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val) 137 138 #define I915_VERBOSE 0 139 ··· 194 #define PPCR 0x61204 195 #define PPCR_ON (1<<0) 196 197 #define ADPA 0x61100 198 #define ADPA_DPMS_MASK (~(3<<10)) 199 #define ADPA_DPMS_ON (0<<10) ··· 263 #define ASYNC_FLIP (1<<22) 264 265 #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 266 267 #endif
··· 1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 + /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. ··· 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 + */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ ··· 37 38 #define DRIVER_NAME "i915" 39 #define DRIVER_DESC "Intel Graphics" 40 + #define DRIVER_DATE "20051209" 41 42 /* Interface history: 43 * 44 * 1.1: Original. 45 + * 1.2: Add Power Management 46 + * 1.3: Add vblank support 47 */ 48 #define DRIVER_MAJOR 1 49 + #define DRIVER_MINOR 3 50 #define DRIVER_PATCHLEVEL 0 51 52 typedef struct _drm_i915_ring_buffer { 53 int tail_mask; ··· 97 int tex_lru_log_granularity; 98 int allow_batchbuffer; 99 struct mem_block *agp_heap; 100 + unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 101 } drm_i915_private_t; 102 103 extern drm_ioctl_desc_t i915_ioctls[]; ··· 104 105 /* i915_dma.c */ 106 extern void i915_kernel_lost_context(drm_device_t * dev); 107 + extern int i915_driver_load(struct drm_device *, unsigned long flags); 108 + extern void i915_driver_lastclose(drm_device_t * dev); 109 + extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp); 110 extern int i915_driver_device_is_agp(drm_device_t * dev); 111 + extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 112 + unsigned long arg); 113 114 /* i915_irq.c */ 115 extern int i915_irq_emit(DRM_IOCTL_ARGS); 116 extern int i915_irq_wait(DRM_IOCTL_ARGS); 117 118 + extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); 119 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 120 extern void i915_driver_irq_preinstall(drm_device_t * dev); 121 extern void i915_driver_irq_postinstall(drm_device_t * dev); ··· 125 extern void i915_mem_release(drm_device_t * dev, 126 DRMFILE filp, struct mem_block *heap); 127 128 + #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 129 + #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 130 + #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) 131 + #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) 132 133 #define I915_VERBOSE 0 134 ··· 195 #define PPCR 0x61204 196 #define PPCR_ON (1<<0) 197 198 + #define DVOB 0x61140 199 + #define DVOB_ON (1<<31) 200 + #define DVOC 0x61160 201 + #define DVOC_ON (1<<31) 202 + #define LVDS 0x61180 203 + #define LVDS_ON (1<<31) 204 + 205 #define ADPA 0x61100 206 #define ADPA_DPMS_MASK (~(3<<10)) 207 #define ADPA_DPMS_ON (0<<10) ··· 257 #define ASYNC_FLIP (1<<22) 258 259 #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 260 + 261 + #define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5]) 262 263 #endif
+39 -9
drivers/char/drm/i915_irq.c
··· 1 - /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 2 */ 3 - /************************************************************************** 4 - * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * ··· 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 - **************************************************************************/ 28 29 #include "drmP.h" 30 #include "drm.h" 31 #include "i915_drm.h" 32 #include "i915_drv.h" 33 34 - #define USER_INT_FLAG 0x2 35 #define MAX_NOPID ((u32)~0) 36 - #define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5]) 37 38 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 39 { ··· 44 u16 temp; 45 46 temp = I915_READ16(I915REG_INT_IDENTITY_R); 47 - temp &= USER_INT_FLAG; 48 49 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 50 ··· 52 return IRQ_NONE; 53 54 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 55 - DRM_WAKEUP(&dev_priv->irq_queue); 56 57 return IRQ_HANDLED; 58 } ··· 110 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 111 return ret; 112 } 113 114 /* Needs the lock as it touches the ring. 115 */ ··· 195 { 196 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 197 198 - I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG); 199 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 200 } 201
··· 1 + /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 + /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * ··· 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 + */ 29 30 #include "drmP.h" 31 #include "drm.h" 32 #include "i915_drm.h" 33 #include "i915_drv.h" 34 35 + #define USER_INT_FLAG (1<<1) 36 + #define VSYNC_PIPEB_FLAG (1<<5) 37 + #define VSYNC_PIPEA_FLAG (1<<7) 38 + 39 #define MAX_NOPID ((u32)~0) 40 41 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 42 { ··· 43 u16 temp; 44 45 temp = I915_READ16(I915REG_INT_IDENTITY_R); 46 + temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG); 47 48 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 49 ··· 51 return IRQ_NONE; 52 53 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 54 + 55 + if (temp & USER_INT_FLAG) 56 + DRM_WAKEUP(&dev_priv->irq_queue); 57 + 58 + if (temp & VSYNC_PIPEA_FLAG) { 59 + atomic_inc(&dev->vbl_received); 60 + DRM_WAKEUP(&dev->vbl_queue); 61 + drm_vbl_send_signals(dev); 62 + } 63 64 return IRQ_HANDLED; 65 } ··· 101 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 102 return ret; 103 } 104 + 105 + int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) 106 + { 107 + drm_i915_private_t *dev_priv = dev->dev_private; 108 + unsigned int cur_vblank; 109 + int ret = 0; 110 + 111 + if (!dev_priv) { 112 + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 113 + return DRM_ERR(EINVAL); 114 + } 115 + 116 + DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 117 + (((cur_vblank = atomic_read(&dev->vbl_received)) 118 + - *sequence) <= (1<<23))); 119 + 120 + *sequence = cur_vblank; 121 + 122 + return ret; 123 + } 124 + 125 126 /* Needs the lock as it touches the ring. 127 */ ··· 165 { 166 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 167 168 + I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | VSYNC_PIPEA_FLAG); 169 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 170 } 171
+2 -3
drivers/char/drm/i915_mem.c
··· 1 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*- 2 */ 3 - /************************************************************************** 4 - * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * ··· 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 - **************************************************************************/ 28 29 #include "drmP.h" 30 #include "drm.h"
··· 1 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*- 2 */ 3 + /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * ··· 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 + */ 29 30 #include "drmP.h" 31 #include "drm.h"
+100 -62
drivers/char/drm/mga_dma.c
··· 44 #define MGA_DEFAULT_USEC_TIMEOUT 10000 45 #define MGA_FREELIST_DEBUG 0 46 47 - static int mga_do_cleanup_dma(drm_device_t * dev); 48 49 /* ================================================================ 50 * Engine control ··· 393 * DMA initialization, cleanup 394 */ 395 396 - int mga_driver_preinit(drm_device_t * dev, unsigned long flags) 397 { 398 drm_mga_private_t *dev_priv; 399 ··· 406 407 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 408 dev_priv->chipset = flags; 409 410 return 0; 411 } ··· 448 drm_buf_desc_t req; 449 drm_agp_mode_t mode; 450 drm_agp_info_t info; 451 452 /* Acquire AGP. */ 453 err = drm_agp_acquire(dev); 454 if (err) { 455 - DRM_ERROR("Unable to acquire AGP\n"); 456 return err; 457 } 458 459 err = drm_agp_info(dev, &info); 460 if (err) { 461 - DRM_ERROR("Unable to get AGP info\n"); 462 return err; 463 } 464 ··· 484 } 485 486 /* Allocate and bind AGP memory. */ 487 - dev_priv->agp_pages = agp_size / PAGE_SIZE; 488 - dev_priv->agp_mem = drm_alloc_agp(dev, dev_priv->agp_pages, 0); 489 - if (dev_priv->agp_mem == NULL) { 490 - dev_priv->agp_pages = 0; 491 DRM_ERROR("Unable to allocate %uMB AGP memory\n", 492 dma_bs->agp_size); 493 - return DRM_ERR(ENOMEM); 494 } 495 496 - err = drm_bind_agp(dev_priv->agp_mem, 0); 497 if (err) { 498 - DRM_ERROR("Unable to bind AGP memory\n"); 499 return err; 500 } 501 ··· 515 err = drm_addmap(dev, offset, warp_size, 516 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); 517 if (err) { 518 - DRM_ERROR("Unable to map WARP microcode\n"); 519 return err; 520 } 521 ··· 523 err = drm_addmap(dev, offset, dma_bs->primary_size, 524 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary); 525 if (err) { 526 - DRM_ERROR("Unable to map primary DMA region\n"); 527 return err; 528 } 529 ··· 531 err = drm_addmap(dev, offset, secondary_size, 532 _DRM_AGP, 0, &dev->agp_buffer_map); 533 if (err) { 534 - DRM_ERROR("Unable to map secondary DMA region\n"); 535 return err; 536 } 537 ··· 543 544 err = drm_addbufs_agp(dev, &req); 545 if (err) { 546 - DRM_ERROR("Unable to add secondary DMA buffers\n"); 547 return err; 548 } 549 550 offset += secondary_size; 551 err = drm_addmap(dev, offset, agp_size - offset, 552 _DRM_AGP, 0, &dev_priv->agp_textures); 553 if (err) { 554 - DRM_ERROR("Unable to map AGP texture region\n"); 555 return err; 556 } 557 ··· 635 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, 636 _DRM_READ_ONLY, &dev_priv->warp); 637 if (err != 0) { 638 - DRM_ERROR("Unable to create mapping for WARP microcode\n"); 639 return err; 640 } 641 ··· 655 } 656 657 if (err != 0) { 658 - DRM_ERROR("Unable to allocate primary DMA region\n"); 659 return DRM_ERR(ENOMEM); 660 } 661 ··· 679 } 680 681 if (bin_count == 0) { 682 - DRM_ERROR("Unable to add secondary DMA buffers\n"); 683 return err; 684 } 685 ··· 715 err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, 716 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); 717 if (err) { 718 - DRM_ERROR("Unable to map MMIO region\n"); 719 return err; 720 } 721 ··· 723 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, 724 &dev_priv->status); 725 if (err) { 726 - DRM_ERROR("Unable to map status region\n"); 727 return err; 728 } 729 ··· 741 */ 742 743 if (err) { 744 - mga_do_cleanup_dma(dev); 745 } 746 747 /* Not only do we want to try and initialized PCI cards for PCI DMA, ··· 764 DRM_DEVICE; 765 drm_mga_dma_bootstrap_t bootstrap; 766 int err; 767 768 DRM_COPY_FROM_USER_IOCTL(bootstrap, 769 (drm_mga_dma_bootstrap_t __user *) data, 770 sizeof(bootstrap)); 771 772 err = mga_do_dma_bootstrap(dev, &bootstrap); 773 - if (!err) { 774 - static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; 775 - const drm_mga_private_t *const dev_priv = 776 - (drm_mga_private_t *) dev->dev_private; 777 - 778 - if (dev_priv->agp_textures != NULL) { 779 - bootstrap.texture_handle = 780 - dev_priv->agp_textures->offset; 781 - bootstrap.texture_size = dev_priv->agp_textures->size; 782 - } else { 783 - bootstrap.texture_handle = 0; 784 - bootstrap.texture_size = 0; 785 - } 786 - 787 - bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07]; 788 - if (DRM_COPY_TO_USER((void __user *)data, &bootstrap, 789 - sizeof(bootstrap))) { 790 - err = DRM_ERR(EFAULT); 791 - } 792 - } else { 793 - mga_do_cleanup_dma(dev); 794 } 795 796 return err; 797 } ··· 883 884 ret = mga_warp_install_microcode(dev_priv); 885 if (ret < 0) { 886 - DRM_ERROR("failed to install WARP ucode!\n"); 887 return ret; 888 } 889 890 ret = mga_warp_init(dev_priv); 891 if (ret < 0) { 892 - DRM_ERROR("failed to init WARP engine!\n"); 893 return ret; 894 } 895 ··· 934 return 0; 935 } 936 937 - static int mga_do_cleanup_dma(drm_device_t * dev) 938 { 939 int err = 0; 940 DRM_DEBUG("\n"); ··· 962 963 if (dev_priv->used_new_dma_init) { 964 #if __OS_HAS_AGP 965 - if (dev_priv->agp_mem != NULL) { 966 - dev_priv->agp_textures = NULL; 967 - drm_unbind_agp(dev_priv->agp_mem); 968 969 - drm_free_agp(dev_priv->agp_mem, 970 - dev_priv->agp_pages); 971 - dev_priv->agp_pages = 0; 972 - dev_priv->agp_mem = NULL; 973 } 974 975 if ((dev->agp != NULL) && dev->agp->acquired) { 976 err = drm_agp_release(dev); 977 } 978 #endif 979 - dev_priv->used_new_dma_init = 0; 980 } 981 982 dev_priv->warp = NULL; 983 dev_priv->primary = NULL; 984 - dev_priv->mmio = NULL; 985 - dev_priv->status = NULL; 986 dev_priv->sarea = NULL; 987 dev_priv->sarea_priv = NULL; 988 dev->agp_buffer_map = NULL; 989 990 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); 991 dev_priv->warp_pipe = 0; ··· 1005 } 1006 } 1007 1008 - return err; 1009 } 1010 1011 int mga_dma_init(DRM_IOCTL_ARGS) ··· 1023 case MGA_INIT_DMA: 1024 err = mga_do_init_dma(dev, &init); 1025 if (err) { 1026 - (void)mga_do_cleanup_dma(dev); 1027 } 1028 return err; 1029 case MGA_CLEANUP_DMA: 1030 - return mga_do_cleanup_dma(dev); 1031 } 1032 1033 return DRM_ERR(EINVAL); ··· 1156 /** 1157 * Called just before the module is unloaded. 1158 */ 1159 - int mga_driver_postcleanup(drm_device_t * dev) 1160 { 1161 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 1162 dev->dev_private = NULL; ··· 1167 /** 1168 * Called when the last opener of the device is closed. 1169 */ 1170 - void mga_driver_pretakedown(drm_device_t * dev) 1171 { 1172 - mga_do_cleanup_dma(dev); 1173 } 1174 1175 int mga_driver_dma_quiescent(drm_device_t * dev)
··· 44 #define MGA_DEFAULT_USEC_TIMEOUT 10000 45 #define MGA_FREELIST_DEBUG 0 46 47 + #define MINIMAL_CLEANUP 0 48 + #define FULL_CLEANUP 1 49 + static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup); 50 51 /* ================================================================ 52 * Engine control ··· 391 * DMA initialization, cleanup 392 */ 393 394 + int mga_driver_load(drm_device_t * dev, unsigned long flags) 395 { 396 drm_mga_private_t *dev_priv; 397 ··· 404 405 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 406 dev_priv->chipset = flags; 407 + 408 + dev_priv->mmio_base = drm_get_resource_start(dev, 1); 409 + dev_priv->mmio_size = drm_get_resource_len(dev, 1); 410 + 411 + dev->counters += 3; 412 + dev->types[6] = _DRM_STAT_IRQ; 413 + dev->types[7] = _DRM_STAT_PRIMARY; 414 + dev->types[8] = _DRM_STAT_SECONDARY; 415 416 return 0; 417 } ··· 438 drm_buf_desc_t req; 439 drm_agp_mode_t mode; 440 drm_agp_info_t info; 441 + drm_agp_buffer_t agp_req; 442 + drm_agp_binding_t bind_req; 443 444 /* Acquire AGP. */ 445 err = drm_agp_acquire(dev); 446 if (err) { 447 + DRM_ERROR("Unable to acquire AGP: %d\n", err); 448 return err; 449 } 450 451 err = drm_agp_info(dev, &info); 452 if (err) { 453 + DRM_ERROR("Unable to get AGP info: %d\n", err); 454 return err; 455 } 456 ··· 472 } 473 474 /* Allocate and bind AGP memory. */ 475 + agp_req.size = agp_size; 476 + agp_req.type = 0; 477 + err = drm_agp_alloc(dev, &agp_req); 478 + if (err) { 479 + dev_priv->agp_size = 0; 480 DRM_ERROR("Unable to allocate %uMB AGP memory\n", 481 dma_bs->agp_size); 482 + return err; 483 } 484 + 485 + dev_priv->agp_size = agp_size; 486 + dev_priv->agp_handle = agp_req.handle; 487 488 + bind_req.handle = agp_req.handle; 489 + bind_req.offset = 0; 490 + err = drm_agp_bind(dev, &bind_req); 491 if (err) { 492 + DRM_ERROR("Unable to bind AGP memory: %d\n", err); 493 return err; 494 } 495 ··· 497 err = drm_addmap(dev, offset, warp_size, 498 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); 499 if (err) { 500 + DRM_ERROR("Unable to map WARP microcode: %d\n", err); 501 return err; 502 } 503 ··· 505 err = drm_addmap(dev, offset, dma_bs->primary_size, 506 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary); 507 if (err) { 508 + DRM_ERROR("Unable to map primary DMA region: %d\n", err); 509 return err; 510 } 511 ··· 513 err = drm_addmap(dev, offset, secondary_size, 514 _DRM_AGP, 0, &dev->agp_buffer_map); 515 if (err) { 516 + DRM_ERROR("Unable to map secondary DMA region: %d\n", err); 517 return err; 518 } 519 ··· 525 526 err = drm_addbufs_agp(dev, &req); 527 if (err) { 528 + DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); 529 return err; 530 + } 531 + 532 + { 533 + drm_map_list_t *_entry; 534 + unsigned long agp_token = 0; 535 + 536 + list_for_each_entry(_entry, &dev->maplist->head, head) { 537 + if (_entry->map == dev->agp_buffer_map) 538 + agp_token = _entry->user_token; 539 + } 540 + if (!agp_token) 541 + return -EFAULT; 542 + 543 + dev->agp_buffer_token = agp_token; 544 } 545 546 offset += secondary_size; 547 err = drm_addmap(dev, offset, agp_size - offset, 548 _DRM_AGP, 0, &dev_priv->agp_textures); 549 if (err) { 550 + DRM_ERROR("Unable to map AGP texture region %d\n", err); 551 return err; 552 } 553 ··· 603 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, 604 _DRM_READ_ONLY, &dev_priv->warp); 605 if (err != 0) { 606 + DRM_ERROR("Unable to create mapping for WARP microcode: %d\n", 607 + err); 608 return err; 609 } 610 ··· 622 } 623 624 if (err != 0) { 625 + DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); 626 return DRM_ERR(ENOMEM); 627 } 628 ··· 646 } 647 648 if (bin_count == 0) { 649 + DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); 650 return err; 651 } 652 ··· 682 err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, 683 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); 684 if (err) { 685 + DRM_ERROR("Unable to map MMIO region: %d\n", err); 686 return err; 687 } 688 ··· 690 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, 691 &dev_priv->status); 692 if (err) { 693 + DRM_ERROR("Unable to map status region: %d\n", err); 694 return err; 695 } 696 ··· 708 */ 709 710 if (err) { 711 + mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); 712 } 713 714 /* Not only do we want to try and initialized PCI cards for PCI DMA, ··· 731 DRM_DEVICE; 732 drm_mga_dma_bootstrap_t bootstrap; 733 int err; 734 + static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; 735 + const drm_mga_private_t *const dev_priv = 736 + (drm_mga_private_t *) dev->dev_private; 737 738 DRM_COPY_FROM_USER_IOCTL(bootstrap, 739 (drm_mga_dma_bootstrap_t __user *) data, 740 sizeof(bootstrap)); 741 742 err = mga_do_dma_bootstrap(dev, &bootstrap); 743 + if (err) { 744 + mga_do_cleanup_dma(dev, FULL_CLEANUP); 745 + return err; 746 } 747 + 748 + if (dev_priv->agp_textures != NULL) { 749 + bootstrap.texture_handle = dev_priv->agp_textures->offset; 750 + bootstrap.texture_size = dev_priv->agp_textures->size; 751 + } else { 752 + bootstrap.texture_handle = 0; 753 + bootstrap.texture_size = 0; 754 + } 755 + 756 + bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07]; 757 + DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data, 758 + bootstrap, sizeof(bootstrap)); 759 760 return err; 761 } ··· 853 854 ret = mga_warp_install_microcode(dev_priv); 855 if (ret < 0) { 856 + DRM_ERROR("failed to install WARP ucode!: %d\n", ret); 857 return ret; 858 } 859 860 ret = mga_warp_init(dev_priv); 861 if (ret < 0) { 862 + DRM_ERROR("failed to init WARP engine!: %d\n", ret); 863 return ret; 864 } 865 ··· 904 return 0; 905 } 906 907 + static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup) 908 { 909 int err = 0; 910 DRM_DEBUG("\n"); ··· 932 933 if (dev_priv->used_new_dma_init) { 934 #if __OS_HAS_AGP 935 + if (dev_priv->agp_handle != 0) { 936 + drm_agp_binding_t unbind_req; 937 + drm_agp_buffer_t free_req; 938 939 + unbind_req.handle = dev_priv->agp_handle; 940 + drm_agp_unbind(dev, &unbind_req); 941 + 942 + free_req.handle = dev_priv->agp_handle; 943 + drm_agp_free(dev, &free_req); 944 + 945 + dev_priv->agp_textures = NULL; 946 + dev_priv->agp_size = 0; 947 + dev_priv->agp_handle = 0; 948 } 949 950 if ((dev->agp != NULL) && dev->agp->acquired) { 951 err = drm_agp_release(dev); 952 } 953 #endif 954 } 955 956 dev_priv->warp = NULL; 957 dev_priv->primary = NULL; 958 dev_priv->sarea = NULL; 959 dev_priv->sarea_priv = NULL; 960 dev->agp_buffer_map = NULL; 961 + 962 + if (full_cleanup) { 963 + dev_priv->mmio = NULL; 964 + dev_priv->status = NULL; 965 + dev_priv->used_new_dma_init = 0; 966 + } 967 968 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); 969 dev_priv->warp_pipe = 0; ··· 967 } 968 } 969 970 + return 0; 971 } 972 973 int mga_dma_init(DRM_IOCTL_ARGS) ··· 985 case MGA_INIT_DMA: 986 err = mga_do_init_dma(dev, &init); 987 if (err) { 988 + (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); 989 } 990 return err; 991 case MGA_CLEANUP_DMA: 992 + return mga_do_cleanup_dma(dev, FULL_CLEANUP); 993 } 994 995 return DRM_ERR(EINVAL); ··· 1118 /** 1119 * Called just before the module is unloaded. 1120 */ 1121 + int mga_driver_unload(drm_device_t * dev) 1122 { 1123 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 1124 dev->dev_private = NULL; ··· 1129 /** 1130 * Called when the last opener of the device is closed. 1131 */ 1132 + void mga_driver_lastclose(drm_device_t * dev) 1133 { 1134 + mga_do_cleanup_dma(dev, FULL_CLEANUP); 1135 } 1136 1137 int mga_driver_dma_quiescent(drm_device_t * dev)
+14 -44
drivers/char/drm/mga_drv.c
··· 38 #include "drm_pciids.h" 39 40 static int mga_driver_device_is_agp(drm_device_t * dev); 41 - static int postinit(struct drm_device *dev, unsigned long flags) 42 - { 43 - drm_mga_private_t *const dev_priv = 44 - (drm_mga_private_t *) dev->dev_private; 45 - 46 - dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); 47 - dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); 48 - 49 - dev->counters += 3; 50 - dev->types[6] = _DRM_STAT_IRQ; 51 - dev->types[7] = _DRM_STAT_PRIMARY; 52 - dev->types[8] = _DRM_STAT_SECONDARY; 53 - 54 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 55 - DRIVER_NAME, 56 - DRIVER_MAJOR, 57 - DRIVER_MINOR, 58 - DRIVER_PATCHLEVEL, 59 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 60 - ); 61 - return 0; 62 - } 63 - 64 - static int version(drm_version_t * version) 65 - { 66 - int len; 67 - 68 - version->version_major = DRIVER_MAJOR; 69 - version->version_minor = DRIVER_MINOR; 70 - version->version_patchlevel = DRIVER_PATCHLEVEL; 71 - DRM_COPY(version->name, DRIVER_NAME); 72 - DRM_COPY(version->date, DRIVER_DATE); 73 - DRM_COPY(version->desc, DRIVER_DESC); 74 - return 0; 75 - } 76 77 static struct pci_device_id pciidlist[] = { 78 mga_PCI_IDS ··· 45 46 static struct drm_driver driver = { 47 .driver_features = 48 - DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 49 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 50 DRIVER_IRQ_VBL, 51 - .preinit = mga_driver_preinit, 52 - .postcleanup = mga_driver_postcleanup, 53 - .pretakedown = mga_driver_pretakedown, 54 .dma_quiescent = mga_driver_dma_quiescent, 55 .device_is_agp = mga_driver_device_is_agp, 56 .vblank_wait = mga_driver_vblank_wait, ··· 61 .reclaim_buffers = drm_core_reclaim_buffers, 62 .get_map_ofs = drm_core_get_map_ofs, 63 .get_reg_ofs = drm_core_get_reg_ofs, 64 - .postinit = postinit, 65 - .version = version, 66 .ioctls = mga_ioctls, 67 .dma_ioctl = mga_dma_buffers, 68 .fops = { ··· 76 #endif 77 }, 78 .pci_driver = { 79 - .name = DRIVER_NAME, 80 - .id_table = pciidlist, 81 - } 82 }; 83 84 static int __init mga_init(void)
··· 38 #include "drm_pciids.h" 39 40 static int mga_driver_device_is_agp(drm_device_t * dev); 41 42 static struct pci_device_id pciidlist[] = { 43 mga_PCI_IDS ··· 80 81 static struct drm_driver driver = { 82 .driver_features = 83 + DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 84 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 85 DRIVER_IRQ_VBL, 86 + .load = mga_driver_load, 87 + .unload = mga_driver_unload, 88 + .lastclose = mga_driver_lastclose, 89 .dma_quiescent = mga_driver_dma_quiescent, 90 .device_is_agp = mga_driver_device_is_agp, 91 .vblank_wait = mga_driver_vblank_wait, ··· 96 .reclaim_buffers = drm_core_reclaim_buffers, 97 .get_map_ofs = drm_core_get_map_ofs, 98 .get_reg_ofs = drm_core_get_reg_ofs, 99 .ioctls = mga_ioctls, 100 .dma_ioctl = mga_dma_buffers, 101 .fops = { ··· 113 #endif 114 }, 115 .pci_driver = { 116 + .name = DRIVER_NAME, 117 + .id_table = pciidlist, 118 + }, 119 + 120 + .name = DRIVER_NAME, 121 + .desc = DRIVER_DESC, 122 + .date = DRIVER_DATE, 123 + .major = DRIVER_MAJOR, 124 + .minor = DRIVER_MINOR, 125 + .patchlevel = DRIVER_PATCHLEVEL, 126 }; 127 128 static int __init mga_init(void)
+7 -7
drivers/char/drm/mga_drv.h
··· 38 39 #define DRIVER_NAME "mga" 40 #define DRIVER_DESC "Matrox G200/G400" 41 - #define DRIVER_DATE "20050607" 42 43 #define DRIVER_MAJOR 3 44 #define DRIVER_MINOR 2 45 - #define DRIVER_PATCHLEVEL 0 46 47 typedef struct drm_mga_primary_buffer { 48 u8 *start; ··· 144 drm_local_map_t *primary; 145 drm_local_map_t *agp_textures; 146 147 - DRM_AGP_MEM *agp_mem; 148 - unsigned int agp_pages; 149 } drm_mga_private_t; 150 151 extern drm_ioctl_desc_t mga_ioctls[]; 152 extern int mga_max_ioctl; 153 154 /* mga_dma.c */ 155 - extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags); 156 extern int mga_dma_bootstrap(DRM_IOCTL_ARGS); 157 extern int mga_dma_init(DRM_IOCTL_ARGS); 158 extern int mga_dma_flush(DRM_IOCTL_ARGS); 159 extern int mga_dma_reset(DRM_IOCTL_ARGS); 160 extern int mga_dma_buffers(DRM_IOCTL_ARGS); 161 - extern int mga_driver_postcleanup(drm_device_t * dev); 162 - extern void mga_driver_pretakedown(drm_device_t * dev); 163 extern int mga_driver_dma_quiescent(drm_device_t * dev); 164 165 extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
··· 38 39 #define DRIVER_NAME "mga" 40 #define DRIVER_DESC "Matrox G200/G400" 41 + #define DRIVER_DATE "20051102" 42 43 #define DRIVER_MAJOR 3 44 #define DRIVER_MINOR 2 45 + #define DRIVER_PATCHLEVEL 1 46 47 typedef struct drm_mga_primary_buffer { 48 u8 *start; ··· 144 drm_local_map_t *primary; 145 drm_local_map_t *agp_textures; 146 147 + unsigned long agp_handle; 148 + unsigned int agp_size; 149 } drm_mga_private_t; 150 151 extern drm_ioctl_desc_t mga_ioctls[]; 152 extern int mga_max_ioctl; 153 154 /* mga_dma.c */ 155 extern int mga_dma_bootstrap(DRM_IOCTL_ARGS); 156 extern int mga_dma_init(DRM_IOCTL_ARGS); 157 extern int mga_dma_flush(DRM_IOCTL_ARGS); 158 extern int mga_dma_reset(DRM_IOCTL_ARGS); 159 extern int mga_dma_buffers(DRM_IOCTL_ARGS); 160 + extern int mga_driver_load(drm_device_t *dev, unsigned long flags); 161 + extern int mga_driver_unload(drm_device_t * dev); 162 + extern void mga_driver_lastclose(drm_device_t * dev); 163 extern int mga_driver_dma_quiescent(drm_device_t * dev); 164 165 extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
+13 -13
drivers/char/drm/mga_state.c
··· 1127 } 1128 1129 drm_ioctl_desc_t mga_ioctls[] = { 1130 - [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1}, 1131 - [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0}, 1132 - [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0}, 1133 - [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0}, 1134 - [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0}, 1135 - [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0}, 1136 - [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0}, 1137 - [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0}, 1138 - [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0}, 1139 - [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0}, 1140 - [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0}, 1141 - [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0}, 1142 - [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1}, 1143 }; 1144 1145 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
··· 1127 } 1128 1129 drm_ioctl_desc_t mga_ioctls[] = { 1130 + [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1131 + [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH}, 1132 + [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH}, 1133 + [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH}, 1134 + [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH}, 1135 + [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH}, 1136 + [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH}, 1137 + [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH}, 1138 + [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH}, 1139 + [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH}, 1140 + [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH}, 1141 + [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH}, 1142 + [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1143 }; 1144 1145 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
+9 -6
drivers/char/drm/r128_cce.c
··· 1 - /* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- 2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com 3 - * 4 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. ··· 560 if (dev_priv->is_pci) { 561 #endif 562 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; 563 - dev_priv->gart_info.addr = dev_priv->gart_info.bus_addr = 0; 564 dev_priv->gart_info.is_pcie = 0; 565 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 566 DRM_ERROR("failed to init PCI GART!\n"); ··· 603 drm_core_ioremapfree(dev_priv->cce_ring, dev); 604 if (dev_priv->ring_rptr != NULL) 605 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 606 - if (dev->agp_buffer_map != NULL) 607 drm_core_ioremapfree(dev->agp_buffer_map, dev); 608 } else 609 #endif 610 { 611 if (dev_priv->gart_info.bus_addr) 612 if (!drm_ati_pcigart_cleanup(dev, 613 - &dev_priv-> 614 - gart_info)) 615 DRM_ERROR 616 ("failed to cleanup PCI GART!\n"); 617 }
··· 1 + /* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- 2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com 3 + */ 4 + /* 5 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 7 * All Rights Reserved. ··· 559 if (dev_priv->is_pci) { 560 #endif 561 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; 562 + dev_priv->gart_info.addr = NULL; 563 + dev_priv->gart_info.bus_addr = 0; 564 dev_priv->gart_info.is_pcie = 0; 565 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 566 DRM_ERROR("failed to init PCI GART!\n"); ··· 601 drm_core_ioremapfree(dev_priv->cce_ring, dev); 602 if (dev_priv->ring_rptr != NULL) 603 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 604 + if (dev->agp_buffer_map != NULL) { 605 drm_core_ioremapfree(dev->agp_buffer_map, dev); 606 + dev->agp_buffer_map = NULL; 607 + } 608 } else 609 #endif 610 { 611 if (dev_priv->gart_info.bus_addr) 612 if (!drm_ati_pcigart_cleanup(dev, 613 + &dev_priv->gart_info)) 614 DRM_ERROR 615 ("failed to cleanup PCI GART!\n"); 616 }
+2 -2
drivers/char/drm/r128_drm.h
··· 1 /* r128_drm.h -- Public header for the r128 driver -*- linux-c -*- 2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com 3 - * 4 - * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All rights reserved. 7 *
··· 1 /* r128_drm.h -- Public header for the r128 driver -*- linux-c -*- 2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com 3 + */ 4 + /* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All rights reserved. 7 *
+14 -34
drivers/char/drm/r128_drv.c
··· 37 38 #include "drm_pciids.h" 39 40 - static int postinit(struct drm_device *dev, unsigned long flags) 41 - { 42 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 43 - DRIVER_NAME, 44 - DRIVER_MAJOR, 45 - DRIVER_MINOR, 46 - DRIVER_PATCHLEVEL, 47 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 48 - ); 49 - return 0; 50 - } 51 - 52 - static int version(drm_version_t * version) 53 - { 54 - int len; 55 - 56 - version->version_major = DRIVER_MAJOR; 57 - version->version_minor = DRIVER_MINOR; 58 - version->version_patchlevel = DRIVER_PATCHLEVEL; 59 - DRM_COPY(version->name, DRIVER_NAME); 60 - DRM_COPY(version->date, DRIVER_DATE); 61 - DRM_COPY(version->desc, DRIVER_DESC); 62 - return 0; 63 - } 64 - 65 static struct pci_device_id pciidlist[] = { 66 r128_PCI_IDS 67 }; ··· 47 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 48 DRIVER_IRQ_VBL, 49 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 50 - .prerelease = r128_driver_prerelease, 51 - .pretakedown = r128_driver_pretakedown, 52 .vblank_wait = r128_driver_vblank_wait, 53 .irq_preinstall = r128_driver_irq_preinstall, 54 .irq_postinstall = r128_driver_irq_postinstall, ··· 57 .reclaim_buffers = drm_core_reclaim_buffers, 58 .get_map_ofs = drm_core_get_map_ofs, 59 .get_reg_ofs = drm_core_get_reg_ofs, 60 - .postinit = postinit, 61 - .version = version, 62 .ioctls = r128_ioctls, 63 .dma_ioctl = r128_cce_buffers, 64 .fops = { ··· 70 #ifdef CONFIG_COMPAT 71 .compat_ioctl = r128_compat_ioctl, 72 #endif 73 - } 74 - , 75 .pci_driver = { 76 - .name = DRIVER_NAME, 77 - .id_table = pciidlist, 78 - } 79 }; 80 81 static int __init r128_init(void)
··· 37 38 #include "drm_pciids.h" 39 40 static struct pci_device_id pciidlist[] = { 41 r128_PCI_IDS 42 }; ··· 72 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 73 DRIVER_IRQ_VBL, 74 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 75 + .preclose = r128_driver_preclose, 76 + .lastclose = r128_driver_lastclose, 77 .vblank_wait = r128_driver_vblank_wait, 78 .irq_preinstall = r128_driver_irq_preinstall, 79 .irq_postinstall = r128_driver_irq_postinstall, ··· 82 .reclaim_buffers = drm_core_reclaim_buffers, 83 .get_map_ofs = drm_core_get_map_ofs, 84 .get_reg_ofs = drm_core_get_reg_ofs, 85 .ioctls = r128_ioctls, 86 .dma_ioctl = r128_cce_buffers, 87 .fops = { ··· 97 #ifdef CONFIG_COMPAT 98 .compat_ioctl = r128_compat_ioctl, 99 #endif 100 + }, 101 + 102 .pci_driver = { 103 + .name = DRIVER_NAME, 104 + .id_table = pciidlist, 105 + }, 106 + 107 + .name = DRIVER_NAME, 108 + .desc = DRIVER_DESC, 109 + .date = DRIVER_DATE, 110 + .major = DRIVER_MAJOR, 111 + .minor = DRIVER_MINOR, 112 + .patchlevel = DRIVER_PATCHLEVEL, 113 }; 114 115 static int __init r128_init(void)
+4 -4
drivers/char/drm/r128_drv.h
··· 1 /* r128_drv.h -- Private header for r128 driver -*- linux-c -*- 2 * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com 3 - * 4 - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All rights reserved. 7 * ··· 154 extern void r128_driver_irq_preinstall(drm_device_t * dev); 155 extern void r128_driver_irq_postinstall(drm_device_t * dev); 156 extern void r128_driver_irq_uninstall(drm_device_t * dev); 157 - extern void r128_driver_pretakedown(drm_device_t * dev); 158 - extern void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp); 159 160 extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, 161 unsigned long arg);
··· 1 /* r128_drv.h -- Private header for r128 driver -*- linux-c -*- 2 * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com 3 + */ 4 + /* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All rights reserved. 7 * ··· 154 extern void r128_driver_irq_preinstall(drm_device_t * dev); 155 extern void r128_driver_irq_postinstall(drm_device_t * dev); 156 extern void r128_driver_irq_uninstall(drm_device_t * dev); 157 + extern void r128_driver_lastclose(drm_device_t * dev); 158 + extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp); 159 160 extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, 161 unsigned long arg);
+2 -2
drivers/char/drm/r128_irq.c
··· 1 - /* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- 2 - * 3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 4 * 5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
··· 1 + /* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */ 2 + /* 3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 4 * 5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
+21 -21
drivers/char/drm/r128_state.c
··· 1 /* r128_state.c -- State support for r128 -*- linux-c -*- 2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com 3 - * 4 - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a ··· 1674 return 0; 1675 } 1676 1677 - void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp) 1678 { 1679 if (dev->dev_private) { 1680 drm_r128_private_t *dev_priv = dev->dev_private; ··· 1684 } 1685 } 1686 1687 - void r128_driver_pretakedown(drm_device_t * dev) 1688 { 1689 r128_do_cleanup_cce(dev); 1690 } 1691 1692 drm_ioctl_desc_t r128_ioctls[] = { 1693 - [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, 1, 1}, 1694 - [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, 1, 1}, 1695 - [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, 1, 1}, 1696 - [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, 1, 1}, 1697 - [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, 1, 0}, 1698 - [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, 1, 0}, 1699 - [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, 1, 0}, 1700 - [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, 1, 0}, 1701 - [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, 1, 0}, 1702 - [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, 1, 0}, 1703 - [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, 1, 0}, 1704 - [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, 1, 0}, 1705 - [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, 1, 0}, 1706 - [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, 1, 0}, 1707 - [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, 1, 0}, 1708 - [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, 1, 1}, 1709 - [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, 1, 0}, 1710 }; 1711 1712 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
··· 1 /* r128_state.c -- State support for r128 -*- linux-c -*- 2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com 3 + */ 4 + /* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a ··· 1674 return 0; 1675 } 1676 1677 + void r128_driver_preclose(drm_device_t * dev, DRMFILE filp) 1678 { 1679 if (dev->dev_private) { 1680 drm_r128_private_t *dev_priv = dev->dev_private; ··· 1684 } 1685 } 1686 1687 + void r128_driver_lastclose(drm_device_t * dev) 1688 { 1689 r128_do_cleanup_cce(dev); 1690 } 1691 1692 drm_ioctl_desc_t r128_ioctls[] = { 1693 + [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1694 + [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1695 + [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1696 + [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1697 + [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH}, 1698 + [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH}, 1699 + [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH}, 1700 + [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH}, 1701 + [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH}, 1702 + [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH}, 1703 + [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH}, 1704 + [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH}, 1705 + [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH}, 1706 + [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH}, 1707 + [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH}, 1708 + [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1709 + [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH}, 1710 }; 1711 1712 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
+20 -18
drivers/char/drm/r300_cmdbuf.c
··· 52 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command 53 * buffer, starting with index n. 54 */ 55 - static int r300_emit_cliprects(drm_radeon_private_t * dev_priv, 56 - drm_radeon_kcmd_buffer_t * cmdbuf, int n) 57 { 58 drm_clip_rect_t box; 59 int nr; ··· 216 ADD_RANGE(R300_TX_UNK1_0, 16); 217 ADD_RANGE(R300_TX_SIZE_0, 16); 218 ADD_RANGE(R300_TX_FORMAT_0, 16); 219 /* Texture offset is dangerous and needs more checking */ 220 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); 221 ADD_RANGE(R300_TX_UNK4_0, 16); ··· 243 244 /* we expect offsets passed to the framebuffer to be either within video memory or 245 within AGP space */ 246 - static __inline__ int r300_check_offset(drm_radeon_private_t * dev_priv, 247 u32 offset) 248 { 249 /* we realy want to check against end of video aperture ··· 318 * 319 * Note that checks are performed on contents and addresses of the registers 320 */ 321 - static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv, 322 - drm_radeon_kcmd_buffer_t * cmdbuf, 323 drm_r300_cmd_header_t header) 324 { 325 int reg; ··· 364 * the graphics card. 365 * Called by r300_do_cp_cmdbuf. 366 */ 367 - static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv, 368 - drm_radeon_kcmd_buffer_t * cmdbuf, 369 drm_r300_cmd_header_t header) 370 { 371 int sz; ··· 401 * Emit a clear packet from userspace. 402 * Called by r300_emit_packet3. 403 */ 404 - static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv, 405 - drm_radeon_kcmd_buffer_t * cmdbuf) 406 { 407 RING_LOCALS; 408 ··· 422 return 0; 423 } 424 425 - static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv, 426 - drm_radeon_kcmd_buffer_t * cmdbuf, 427 u32 header) 428 { 429 int count, i, k; ··· 490 return 0; 491 } 492 493 - static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv, 494 - drm_radeon_kcmd_buffer_t * cmdbuf) 495 { 496 u32 header; 497 int count; ··· 555 * Emit a rendering packet3 from userspace. 556 * Called by r300_do_cp_cmdbuf. 557 */ 558 - static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv, 559 - drm_radeon_kcmd_buffer_t * cmdbuf, 560 drm_r300_cmd_header_t header) 561 { 562 int n; ··· 624 /** 625 * Emit the sequence to pacify R300. 626 */ 627 - static __inline__ void r300_pacify(drm_radeon_private_t * dev_priv) 628 { 629 RING_LOCALS; 630 ··· 658 * commands on the DMA ring buffer. 659 * Called by the ioctl handler function radeon_cp_cmdbuf. 660 */ 661 - int r300_do_cp_cmdbuf(drm_device_t * dev, 662 DRMFILE filp, 663 - drm_file_t * filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf) 664 { 665 drm_radeon_private_t *dev_priv = dev->dev_private; 666 drm_device_dma_t *dma = dev->dma;
··· 52 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command 53 * buffer, starting with index n. 54 */ 55 + static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, 56 + drm_radeon_kcmd_buffer_t *cmdbuf, int n) 57 { 58 drm_clip_rect_t box; 59 int nr; ··· 216 ADD_RANGE(R300_TX_UNK1_0, 16); 217 ADD_RANGE(R300_TX_SIZE_0, 16); 218 ADD_RANGE(R300_TX_FORMAT_0, 16); 219 + ADD_RANGE(R300_TX_PITCH_0, 16); 220 /* Texture offset is dangerous and needs more checking */ 221 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); 222 ADD_RANGE(R300_TX_UNK4_0, 16); ··· 242 243 /* we expect offsets passed to the framebuffer to be either within video memory or 244 within AGP space */ 245 + static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv, 246 u32 offset) 247 { 248 /* we realy want to check against end of video aperture ··· 317 * 318 * Note that checks are performed on contents and addresses of the registers 319 */ 320 + static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, 321 + drm_radeon_kcmd_buffer_t *cmdbuf, 322 drm_r300_cmd_header_t header) 323 { 324 int reg; ··· 363 * the graphics card. 364 * Called by r300_do_cp_cmdbuf. 365 */ 366 + static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, 367 + drm_radeon_kcmd_buffer_t *cmdbuf, 368 drm_r300_cmd_header_t header) 369 { 370 int sz; ··· 400 * Emit a clear packet from userspace. 401 * Called by r300_emit_packet3. 402 */ 403 + static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, 404 + drm_radeon_kcmd_buffer_t *cmdbuf) 405 { 406 RING_LOCALS; 407 ··· 421 return 0; 422 } 423 424 + static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, 425 + drm_radeon_kcmd_buffer_t *cmdbuf, 426 u32 header) 427 { 428 int count, i, k; ··· 489 return 0; 490 } 491 492 + static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, 493 + drm_radeon_kcmd_buffer_t *cmdbuf) 494 { 495 u32 header; 496 int count; ··· 554 * Emit a rendering packet3 from userspace. 555 * Called by r300_do_cp_cmdbuf. 556 */ 557 + static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, 558 + drm_radeon_kcmd_buffer_t *cmdbuf, 559 drm_r300_cmd_header_t header) 560 { 561 int n; ··· 623 /** 624 * Emit the sequence to pacify R300. 625 */ 626 + static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) 627 { 628 RING_LOCALS; 629 ··· 657 * commands on the DMA ring buffer. 658 * Called by the ioctl handler function radeon_cp_cmdbuf. 659 */ 660 + int r300_do_cp_cmdbuf(drm_device_t *dev, 661 DRMFILE filp, 662 + drm_file_t *filp_priv, 663 + drm_radeon_kcmd_buffer_t *cmdbuf) 664 { 665 drm_radeon_private_t *dev_priv = dev->dev_private; 666 drm_device_dma_t *dma = dev->dma;
+1
drivers/char/drm/r300_reg.h
··· 797 798 # define R300_TX_FORMAT_YUV_MODE 0x00800000 799 800 #define R300_TX_OFFSET_0 0x4540 801 /* BEGIN: Guess from R200 */ 802 # define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
··· 797 798 # define R300_TX_FORMAT_YUV_MODE 0x00800000 799 800 + #define R300_TX_PITCH_0 0x4500 801 #define R300_TX_OFFSET_0 0x4540 802 /* BEGIN: Guess from R200 */ 803 # define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
+52 -54
drivers/char/drm/radeon_cp.c
··· 1 - /* radeon_cp.c -- CP support for Radeon -*- linux-c -*- 2 - * 3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 5 * All Rights Reserved. ··· 824 return RADEON_READ(RADEON_CLOCK_CNTL_DATA); 825 } 826 827 - static int RADEON_READ_PCIE(drm_radeon_private_t * dev_priv, int addr) 828 { 829 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); 830 return RADEON_READ(RADEON_PCIE_DATA); ··· 1125 | (dev_priv->fb_location >> 16)); 1126 1127 #if __OS_HAS_AGP 1128 - if (!dev_priv->is_pci) { 1129 RADEON_WRITE(RADEON_MC_AGP_LOCATION, 1130 (((dev_priv->gart_vm_start - 1 + 1131 dev_priv->gart_size) & 0xffff0000) | ··· 1152 dev_priv->ring.tail = cur_read_ptr; 1153 1154 #if __OS_HAS_AGP 1155 - if (!dev_priv->is_pci) { 1156 /* set RADEON_AGP_BASE here instead of relying on X from user space */ 1157 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); 1158 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, ··· 1278 /* Enable or disable PCI GART on the chip */ 1279 static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) 1280 { 1281 - u32 tmp = RADEON_READ(RADEON_AIC_CNTL); 1282 1283 if (dev_priv->flags & CHIP_IS_PCIE) { 1284 radeon_set_pciegart(dev_priv, on); 1285 return; 1286 } 1287 1288 if (on) { 1289 RADEON_WRITE(RADEON_AIC_CNTL, ··· 1314 static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) 1315 { 1316 drm_radeon_private_t *dev_priv = dev->dev_private; 1317 DRM_DEBUG("\n"); 1318 1319 - dev_priv->is_pci = init->is_pci; 1320 1321 - if (dev_priv->is_pci && !dev->sg) { 1322 DRM_ERROR("PCI GART memory not allocated!\n"); 1323 - dev->dev_private = (void *)dev_priv; 1324 radeon_do_cleanup_cp(dev); 1325 return DRM_ERR(EINVAL); 1326 } ··· 1333 if (dev_priv->usec_timeout < 1 || 1334 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 1335 DRM_DEBUG("TIMEOUT problem!\n"); 1336 - dev->dev_private = (void *)dev_priv; 1337 radeon_do_cleanup_cp(dev); 1338 return DRM_ERR(EINVAL); 1339 } 1340 1341 - switch (init->func) { 1342 case RADEON_INIT_R200_CP: 1343 dev_priv->microcode_version = UCODE_R200; 1344 break; ··· 1358 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && 1359 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 1360 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 1361 - dev->dev_private = (void *)dev_priv; 1362 radeon_do_cleanup_cp(dev); 1363 return DRM_ERR(EINVAL); 1364 } ··· 1420 1421 DRM_GETSAREA(); 1422 1423 - dev_priv->fb_offset = init->fb_offset; 1424 - dev_priv->mmio_offset = init->mmio_offset; 1425 dev_priv->ring_offset = init->ring_offset; 1426 dev_priv->ring_rptr_offset = init->ring_rptr_offset; 1427 dev_priv->buffers_offset = init->buffers_offset; ··· 1427 1428 if (!dev_priv->sarea) { 1429 DRM_ERROR("could not find sarea!\n"); 1430 - dev->dev_private = (void *)dev_priv; 1431 radeon_do_cleanup_cp(dev); 1432 return DRM_ERR(EINVAL); 1433 } 1434 1435 - dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 1436 - if (!dev_priv->mmio) { 1437 - DRM_ERROR("could not find mmio region!\n"); 1438 - dev->dev_private = (void *)dev_priv; 1439 - radeon_do_cleanup_cp(dev); 1440 - return DRM_ERR(EINVAL); 1441 - } 1442 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1443 if (!dev_priv->cp_ring) { 1444 DRM_ERROR("could not find cp ring region!\n"); 1445 - dev->dev_private = (void *)dev_priv; 1446 radeon_do_cleanup_cp(dev); 1447 return DRM_ERR(EINVAL); 1448 } 1449 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1450 if (!dev_priv->ring_rptr) { 1451 DRM_ERROR("could not find ring read pointer!\n"); 1452 - dev->dev_private = (void *)dev_priv; 1453 radeon_do_cleanup_cp(dev); 1454 return DRM_ERR(EINVAL); 1455 } ··· 1447 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1448 if (!dev->agp_buffer_map) { 1449 DRM_ERROR("could not find dma buffer region!\n"); 1450 - dev->dev_private = (void *)dev_priv; 1451 radeon_do_cleanup_cp(dev); 1452 return DRM_ERR(EINVAL); 1453 } ··· 1456 drm_core_findmap(dev, init->gart_textures_offset); 1457 if (!dev_priv->gart_textures) { 1458 DRM_ERROR("could not find GART texture region!\n"); 1459 - dev->dev_private = (void *)dev_priv; 1460 radeon_do_cleanup_cp(dev); 1461 return DRM_ERR(EINVAL); 1462 } ··· 1466 init->sarea_priv_offset); 1467 1468 #if __OS_HAS_AGP 1469 - if (!dev_priv->is_pci) { 1470 drm_core_ioremap(dev_priv->cp_ring, dev); 1471 drm_core_ioremap(dev_priv->ring_rptr, dev); 1472 drm_core_ioremap(dev->agp_buffer_map, dev); ··· 1474 !dev_priv->ring_rptr->handle || 1475 !dev->agp_buffer_map->handle) { 1476 DRM_ERROR("could not find ioremap agp regions!\n"); 1477 - dev->dev_private = (void *)dev_priv; 1478 radeon_do_cleanup_cp(dev); 1479 return DRM_ERR(EINVAL); 1480 } ··· 1514 + RADEON_READ(RADEON_CONFIG_APER_SIZE); 1515 1516 #if __OS_HAS_AGP 1517 - if (!dev_priv->is_pci) 1518 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 1519 - dev->agp->base 1520 + dev_priv->gart_vm_start); ··· 1540 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1541 1542 #if __OS_HAS_AGP 1543 - if (!dev_priv->is_pci) { 1544 /* Turn off PCI GART */ 1545 radeon_set_pcigart(dev_priv, 0); 1546 } else ··· 1550 if (dev_priv->pcigart_offset) { 1551 dev_priv->gart_info.bus_addr = 1552 dev_priv->pcigart_offset + dev_priv->fb_location; 1553 dev_priv->gart_info.addr = 1554 - (unsigned long)drm_ioremap(dev_priv->gart_info. 1555 - bus_addr, 1556 - RADEON_PCIGART_TABLE_SIZE, 1557 - dev); 1558 1559 dev_priv->gart_info.is_pcie = 1560 !!(dev_priv->flags & CHIP_IS_PCIE); 1561 dev_priv->gart_info.gart_table_location = 1562 DRM_ATI_GART_FB; 1563 1564 - DRM_DEBUG("Setting phys_pci_gart to %08lX %08lX\n", 1565 dev_priv->gart_info.addr, 1566 dev_priv->pcigart_offset); 1567 } else { 1568 dev_priv->gart_info.gart_table_location = 1569 DRM_ATI_GART_MAIN; 1570 - dev_priv->gart_info.addr = 1571 - dev_priv->gart_info.bus_addr = 0; 1572 if (dev_priv->flags & CHIP_IS_PCIE) { 1573 DRM_ERROR 1574 ("Cannot use PCI Express without GART in FB memory\n"); ··· 1582 1583 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 1584 DRM_ERROR("failed to init PCI GART!\n"); 1585 - dev->dev_private = (void *)dev_priv; 1586 radeon_do_cleanup_cp(dev); 1587 return DRM_ERR(ENOMEM); 1588 } ··· 1594 radeon_cp_init_ring_buffer(dev, dev_priv); 1595 1596 dev_priv->last_buf = 0; 1597 - 1598 - dev->dev_private = (void *)dev_priv; 1599 1600 radeon_do_engine_reset(dev); 1601 ··· 1613 drm_irq_uninstall(dev); 1614 1615 #if __OS_HAS_AGP 1616 - if (!dev_priv->is_pci) { 1617 - if (dev_priv->cp_ring != NULL) 1618 drm_core_ioremapfree(dev_priv->cp_ring, dev); 1619 - if (dev_priv->ring_rptr != NULL) 1620 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1621 if (dev->agp_buffer_map != NULL) { 1622 drm_core_ioremapfree(dev->agp_buffer_map, dev); 1623 dev->agp_buffer_map = NULL; ··· 1629 } else 1630 #endif 1631 { 1632 - if (dev_priv->gart_info.bus_addr) 1633 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) 1634 DRM_ERROR("failed to cleanup PCI GART!\n"); 1635 1636 - if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { 1637 - drm_ioremapfree((void *)dev_priv->gart_info.addr, 1638 - RADEON_PCIGART_TABLE_SIZE, dev); 1639 dev_priv->gart_info.addr = 0; 1640 } 1641 } 1642 - 1643 /* only clear to the start of flags */ 1644 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); 1645 ··· 1668 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1669 1670 #if __OS_HAS_AGP 1671 - if (!dev_priv->is_pci) { 1672 /* Turn off PCI GART */ 1673 radeon_set_pcigart(dev_priv, 0); 1674 } else ··· 2099 return ret; 2100 } 2101 2102 - int radeon_driver_preinit(struct drm_device *dev, unsigned long flags) 2103 { 2104 drm_radeon_private_t *dev_priv; 2105 int ret = 0; ··· 2132 dev_priv->flags |= CHIP_IS_PCIE; 2133 2134 DRM_DEBUG("%s card detected\n", 2135 - ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI")); 2136 return ret; 2137 } 2138 2139 - int radeon_presetup(struct drm_device *dev) 2140 { 2141 int ret; 2142 drm_local_map_t *map; ··· 2160 return 0; 2161 } 2162 2163 - int radeon_driver_postcleanup(struct drm_device *dev) 2164 { 2165 drm_radeon_private_t *dev_priv = dev->dev_private; 2166 2167 DRM_DEBUG("\n"); 2168 - 2169 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 2170 2171 dev->dev_private = NULL;
··· 1 + /* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */ 2 + /* 3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 5 * All Rights Reserved. ··· 824 return RADEON_READ(RADEON_CLOCK_CNTL_DATA); 825 } 826 827 + static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) 828 { 829 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); 830 return RADEON_READ(RADEON_PCIE_DATA); ··· 1125 | (dev_priv->fb_location >> 16)); 1126 1127 #if __OS_HAS_AGP 1128 + if (dev_priv->flags & CHIP_IS_AGP) { 1129 RADEON_WRITE(RADEON_MC_AGP_LOCATION, 1130 (((dev_priv->gart_vm_start - 1 + 1131 dev_priv->gart_size) & 0xffff0000) | ··· 1152 dev_priv->ring.tail = cur_read_ptr; 1153 1154 #if __OS_HAS_AGP 1155 + if (dev_priv->flags & CHIP_IS_AGP) { 1156 /* set RADEON_AGP_BASE here instead of relying on X from user space */ 1157 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); 1158 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, ··· 1278 /* Enable or disable PCI GART on the chip */ 1279 static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) 1280 { 1281 + u32 tmp; 1282 1283 if (dev_priv->flags & CHIP_IS_PCIE) { 1284 radeon_set_pciegart(dev_priv, on); 1285 return; 1286 } 1287 + 1288 + tmp = RADEON_READ(RADEON_AIC_CNTL); 1289 1290 if (on) { 1291 RADEON_WRITE(RADEON_AIC_CNTL, ··· 1312 static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) 1313 { 1314 drm_radeon_private_t *dev_priv = dev->dev_private; 1315 + 1316 DRM_DEBUG("\n"); 1317 1318 + if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP)) 1319 + { 1320 + DRM_DEBUG("Forcing AGP card to PCI mode\n"); 1321 + dev_priv->flags &= ~CHIP_IS_AGP; 1322 + } 1323 1324 + if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) { 1325 DRM_ERROR("PCI GART memory not allocated!\n"); 1326 radeon_do_cleanup_cp(dev); 1327 return DRM_ERR(EINVAL); 1328 } ··· 1327 if (dev_priv->usec_timeout < 1 || 1328 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 1329 DRM_DEBUG("TIMEOUT problem!\n"); 1330 radeon_do_cleanup_cp(dev); 1331 return DRM_ERR(EINVAL); 1332 } 1333 1334 + switch(init->func) { 1335 case RADEON_INIT_R200_CP: 1336 dev_priv->microcode_version = UCODE_R200; 1337 break; ··· 1353 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && 1354 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 1355 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 1356 radeon_do_cleanup_cp(dev); 1357 return DRM_ERR(EINVAL); 1358 } ··· 1416 1417 DRM_GETSAREA(); 1418 1419 dev_priv->ring_offset = init->ring_offset; 1420 dev_priv->ring_rptr_offset = init->ring_rptr_offset; 1421 dev_priv->buffers_offset = init->buffers_offset; ··· 1425 1426 if (!dev_priv->sarea) { 1427 DRM_ERROR("could not find sarea!\n"); 1428 radeon_do_cleanup_cp(dev); 1429 return DRM_ERR(EINVAL); 1430 } 1431 1432 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1433 if (!dev_priv->cp_ring) { 1434 DRM_ERROR("could not find cp ring region!\n"); 1435 radeon_do_cleanup_cp(dev); 1436 return DRM_ERR(EINVAL); 1437 } 1438 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1439 if (!dev_priv->ring_rptr) { 1440 DRM_ERROR("could not find ring read pointer!\n"); 1441 radeon_do_cleanup_cp(dev); 1442 return DRM_ERR(EINVAL); 1443 } ··· 1455 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1456 if (!dev->agp_buffer_map) { 1457 DRM_ERROR("could not find dma buffer region!\n"); 1458 radeon_do_cleanup_cp(dev); 1459 return DRM_ERR(EINVAL); 1460 } ··· 1465 drm_core_findmap(dev, init->gart_textures_offset); 1466 if (!dev_priv->gart_textures) { 1467 DRM_ERROR("could not find GART texture region!\n"); 1468 radeon_do_cleanup_cp(dev); 1469 return DRM_ERR(EINVAL); 1470 } ··· 1476 init->sarea_priv_offset); 1477 1478 #if __OS_HAS_AGP 1479 + if (dev_priv->flags & CHIP_IS_AGP) { 1480 drm_core_ioremap(dev_priv->cp_ring, dev); 1481 drm_core_ioremap(dev_priv->ring_rptr, dev); 1482 drm_core_ioremap(dev->agp_buffer_map, dev); ··· 1484 !dev_priv->ring_rptr->handle || 1485 !dev->agp_buffer_map->handle) { 1486 DRM_ERROR("could not find ioremap agp regions!\n"); 1487 radeon_do_cleanup_cp(dev); 1488 return DRM_ERR(EINVAL); 1489 } ··· 1525 + RADEON_READ(RADEON_CONFIG_APER_SIZE); 1526 1527 #if __OS_HAS_AGP 1528 + if (dev_priv->flags & CHIP_IS_AGP) 1529 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 1530 - dev->agp->base 1531 + dev_priv->gart_vm_start); ··· 1551 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1552 1553 #if __OS_HAS_AGP 1554 + if (dev_priv->flags & CHIP_IS_AGP) { 1555 /* Turn off PCI GART */ 1556 radeon_set_pcigart(dev_priv, 0); 1557 } else ··· 1561 if (dev_priv->pcigart_offset) { 1562 dev_priv->gart_info.bus_addr = 1563 dev_priv->pcigart_offset + dev_priv->fb_location; 1564 + dev_priv->gart_info.mapping.offset = 1565 + dev_priv->gart_info.bus_addr; 1566 + dev_priv->gart_info.mapping.size = 1567 + RADEON_PCIGART_TABLE_SIZE; 1568 + 1569 + drm_core_ioremap(&dev_priv->gart_info.mapping, dev); 1570 dev_priv->gart_info.addr = 1571 + dev_priv->gart_info.mapping.handle; 1572 1573 dev_priv->gart_info.is_pcie = 1574 !!(dev_priv->flags & CHIP_IS_PCIE); 1575 dev_priv->gart_info.gart_table_location = 1576 DRM_ATI_GART_FB; 1577 1578 + DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", 1579 dev_priv->gart_info.addr, 1580 dev_priv->pcigart_offset); 1581 } else { 1582 dev_priv->gart_info.gart_table_location = 1583 DRM_ATI_GART_MAIN; 1584 + dev_priv->gart_info.addr = NULL; 1585 + dev_priv->gart_info.bus_addr = 0; 1586 if (dev_priv->flags & CHIP_IS_PCIE) { 1587 DRM_ERROR 1588 ("Cannot use PCI Express without GART in FB memory\n"); ··· 1590 1591 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 1592 DRM_ERROR("failed to init PCI GART!\n"); 1593 radeon_do_cleanup_cp(dev); 1594 return DRM_ERR(ENOMEM); 1595 } ··· 1603 radeon_cp_init_ring_buffer(dev, dev_priv); 1604 1605 dev_priv->last_buf = 0; 1606 1607 radeon_do_engine_reset(dev); 1608 ··· 1624 drm_irq_uninstall(dev); 1625 1626 #if __OS_HAS_AGP 1627 + if (dev_priv->flags & CHIP_IS_AGP) { 1628 + if (dev_priv->cp_ring != NULL) { 1629 drm_core_ioremapfree(dev_priv->cp_ring, dev); 1630 + dev_priv->cp_ring = NULL; 1631 + } 1632 + if (dev_priv->ring_rptr != NULL) { 1633 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1634 + dev_priv->ring_rptr = NULL; 1635 + } 1636 if (dev->agp_buffer_map != NULL) { 1637 drm_core_ioremapfree(dev->agp_buffer_map, dev); 1638 dev->agp_buffer_map = NULL; ··· 1636 } else 1637 #endif 1638 { 1639 + 1640 + if (dev_priv->gart_info.bus_addr) { 1641 + /* Turn off PCI GART */ 1642 + radeon_set_pcigart(dev_priv, 0); 1643 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) 1644 DRM_ERROR("failed to cleanup PCI GART!\n"); 1645 + } 1646 1647 + if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) 1648 + { 1649 + drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); 1650 dev_priv->gart_info.addr = 0; 1651 } 1652 } 1653 /* only clear to the start of flags */ 1654 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); 1655 ··· 1672 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1673 1674 #if __OS_HAS_AGP 1675 + if (dev_priv->flags & CHIP_IS_AGP) { 1676 /* Turn off PCI GART */ 1677 radeon_set_pcigart(dev_priv, 0); 1678 } else ··· 2103 return ret; 2104 } 2105 2106 + int radeon_driver_load(struct drm_device *dev, unsigned long flags) 2107 { 2108 drm_radeon_private_t *dev_priv; 2109 int ret = 0; ··· 2136 dev_priv->flags |= CHIP_IS_PCIE; 2137 2138 DRM_DEBUG("%s card detected\n", 2139 + ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI")))); 2140 return ret; 2141 } 2142 2143 + /* Create mappings for registers and framebuffer so userland doesn't necessarily 2144 + * have to find them. 2145 + */ 2146 + int radeon_driver_firstopen(struct drm_device *dev) 2147 { 2148 int ret; 2149 drm_local_map_t *map; ··· 2161 return 0; 2162 } 2163 2164 + int radeon_driver_unload(struct drm_device *dev) 2165 { 2166 drm_radeon_private_t *dev_priv = dev->dev_private; 2167 2168 DRM_DEBUG("\n"); 2169 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 2170 2171 dev->dev_private = NULL;
+6
drivers/char/drm/radeon_drm.h
··· 624 int discard; 625 } drm_radeon_indirect_t; 626 627 /* 1.3: An ioctl to get parameters that aren't available to the 3d 628 * client any other way. 629 */ ··· 645 #define RADEON_PARAM_SAREA_HANDLE 9 646 #define RADEON_PARAM_GART_TEX_HANDLE 10 647 #define RADEON_PARAM_SCRATCH_OFFSET 11 648 649 typedef struct drm_radeon_getparam { 650 int param;
··· 624 int discard; 625 } drm_radeon_indirect_t; 626 627 + /* enum for card type parameters */ 628 + #define RADEON_CARD_PCI 0 629 + #define RADEON_CARD_AGP 1 630 + #define RADEON_CARD_PCIE 2 631 + 632 /* 1.3: An ioctl to get parameters that aren't available to the 3d 633 * client any other way. 634 */ ··· 640 #define RADEON_PARAM_SAREA_HANDLE 9 641 #define RADEON_PARAM_GART_TEX_HANDLE 10 642 #define RADEON_PARAM_SCRATCH_OFFSET 11 643 + #define RADEON_PARAM_CARD_TYPE 12 644 645 typedef struct drm_radeon_getparam { 646 int param;
+27 -35
drivers/char/drm/radeon_drv.c
··· 42 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n"); 43 module_param_named(no_wb, radeon_no_wb, int, 0444); 44 45 - static int postinit(struct drm_device *dev, unsigned long flags) 46 { 47 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 48 - DRIVER_NAME, 49 - DRIVER_MAJOR, 50 - DRIVER_MINOR, 51 - DRIVER_PATCHLEVEL, 52 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 53 - ); 54 - return 0; 55 - } 56 57 - static int version(drm_version_t * version) 58 - { 59 - int len; 60 - 61 - version->version_major = DRIVER_MAJOR; 62 - version->version_minor = DRIVER_MINOR; 63 - version->version_patchlevel = DRIVER_PATCHLEVEL; 64 - DRM_COPY(version->name, DRIVER_NAME); 65 - DRM_COPY(version->date, DRIVER_DATE); 66 - DRM_COPY(version->desc, DRIVER_DESC); 67 - return 0; 68 } 69 70 static struct pci_device_id pciidlist[] = { ··· 63 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | 64 DRIVER_IRQ_VBL, 65 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 66 - .preinit = radeon_driver_preinit, 67 - .presetup = radeon_presetup, 68 - .postcleanup = radeon_driver_postcleanup, 69 - .prerelease = radeon_driver_prerelease, 70 - .pretakedown = radeon_driver_pretakedown, 71 - .open_helper = radeon_driver_open_helper, 72 .vblank_wait = radeon_driver_vblank_wait, 73 .irq_preinstall = radeon_driver_irq_preinstall, 74 .irq_postinstall = radeon_driver_irq_postinstall, 75 .irq_uninstall = radeon_driver_irq_uninstall, 76 .irq_handler = radeon_driver_irq_handler, 77 - .free_filp_priv = radeon_driver_free_filp_priv, 78 .reclaim_buffers = drm_core_reclaim_buffers, 79 .get_map_ofs = drm_core_get_map_ofs, 80 .get_reg_ofs = drm_core_get_reg_ofs, 81 - .postinit = postinit, 82 - .version = version, 83 .ioctls = radeon_ioctls, 84 .dma_ioctl = radeon_cp_buffers, 85 .fops = { ··· 92 #ifdef CONFIG_COMPAT 93 .compat_ioctl = radeon_compat_ioctl, 94 #endif 95 - } 96 - , 97 .pci_driver = { 98 - .name = DRIVER_NAME, 99 - .id_table = pciidlist, 100 - } 101 }; 102 103 static int __init radeon_init(void)
··· 42 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n"); 43 module_param_named(no_wb, radeon_no_wb, int, 0444); 44 45 + static int dri_library_name(struct drm_device *dev, char *buf) 46 { 47 + drm_radeon_private_t *dev_priv = dev->dev_private; 48 + int family = dev_priv->flags & CHIP_FAMILY_MASK; 49 50 + return snprintf(buf, PAGE_SIZE, "%s\n", 51 + (family < CHIP_R200) ? "radeon" : 52 + ((family < CHIP_R300) ? "r200" : 53 + "r300")); 54 } 55 56 static struct pci_device_id pciidlist[] = { ··· 77 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | 78 DRIVER_IRQ_VBL, 79 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 80 + .load = radeon_driver_load, 81 + .firstopen = radeon_driver_firstopen, 82 + .open = radeon_driver_open, 83 + .preclose = radeon_driver_preclose, 84 + .postclose = radeon_driver_postclose, 85 + .lastclose = radeon_driver_lastclose, 86 + .unload = radeon_driver_unload, 87 .vblank_wait = radeon_driver_vblank_wait, 88 + .dri_library_name = dri_library_name, 89 .irq_preinstall = radeon_driver_irq_preinstall, 90 .irq_postinstall = radeon_driver_irq_postinstall, 91 .irq_uninstall = radeon_driver_irq_uninstall, 92 .irq_handler = radeon_driver_irq_handler, 93 .reclaim_buffers = drm_core_reclaim_buffers, 94 .get_map_ofs = drm_core_get_map_ofs, 95 .get_reg_ofs = drm_core_get_reg_ofs, 96 .ioctls = radeon_ioctls, 97 .dma_ioctl = radeon_cp_buffers, 98 .fops = { ··· 107 #ifdef CONFIG_COMPAT 108 .compat_ioctl = radeon_compat_ioctl, 109 #endif 110 + }, 111 + 112 .pci_driver = { 113 + .name = DRIVER_NAME, 114 + .id_table = pciidlist, 115 + }, 116 + 117 + .name = DRIVER_NAME, 118 + .desc = DRIVER_DESC, 119 + .date = DRIVER_DATE, 120 + .major = DRIVER_MAJOR, 121 + .minor = DRIVER_MINOR, 122 + .patchlevel = DRIVER_PATCHLEVEL, 123 }; 124 125 static int __init radeon_init(void)
+20 -21
drivers/char/drm/radeon_drv.h
··· 38 39 #define DRIVER_NAME "radeon" 40 #define DRIVER_DESC "ATI Radeon" 41 - #define DRIVER_DATE "20050911" 42 43 /* Interface history: 44 * ··· 73 * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color 74 * and GL_EXT_blend_[func|equation]_separate on r200 75 * 1.12- Add R300 CP microcode support - this just loads the CP on r300 76 - * (No 3D support yet - just microcode loading) 77 * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters 78 * - Add hyperz support, add hyperz flags to clear ioctl. 79 * 1.14- Add support for color tiling ··· 88 * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR 89 * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6) 90 * 1.19- Add support for gart table in FB memory and PCIE r300 91 */ 92 #define DRIVER_MAJOR 1 93 - #define DRIVER_MINOR 19 94 #define DRIVER_PATCHLEVEL 0 95 - 96 - #define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) 97 - #define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) ) 98 99 /* 100 * Radeon chip families ··· 102 CHIP_R100, 103 CHIP_RS100, 104 CHIP_RV100, 105 - CHIP_R200, 106 CHIP_RV200, 107 CHIP_RS200, 108 CHIP_R250, 109 CHIP_RS250, ··· 136 CHIP_HAS_HIERZ = 0x00100000UL, 137 CHIP_IS_PCIE = 0x00200000UL, 138 }; 139 140 typedef struct drm_radeon_freelist { 141 unsigned int age; ··· 247 248 drm_radeon_depth_clear_t depth_clear; 249 250 - unsigned long fb_offset; 251 - unsigned long mmio_offset; 252 unsigned long ring_offset; 253 unsigned long ring_rptr_offset; 254 unsigned long buffers_offset; ··· 273 274 /* starting from here on, data is preserved accross an open */ 275 uint32_t flags; /* see radeon_chip_flags */ 276 - int is_pci; 277 } drm_radeon_private_t; 278 279 typedef struct drm_radeon_buf_priv { ··· 329 extern void radeon_driver_irq_preinstall(drm_device_t * dev); 330 extern void radeon_driver_irq_postinstall(drm_device_t * dev); 331 extern void radeon_driver_irq_uninstall(drm_device_t * dev); 332 - extern void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp); 333 - extern void radeon_driver_pretakedown(drm_device_t * dev); 334 - extern int radeon_driver_open_helper(drm_device_t * dev, 335 - drm_file_t * filp_priv); 336 - extern void radeon_driver_free_filp_priv(drm_device_t * dev, 337 - drm_file_t * filp_priv); 338 339 - extern int radeon_preinit(struct drm_device *dev, unsigned long flags); 340 - extern int radeon_postinit(struct drm_device *dev, unsigned long flags); 341 - extern int radeon_postcleanup(struct drm_device *dev); 342 - 343 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 344 unsigned long arg); 345 ··· 360 */ 361 362 #define RADEON_AGP_COMMAND 0x0f60 363 #define RADEON_AUX_SCISSOR_CNTL 0x26f0 364 # define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) 365 # define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) ··· 649 650 #define RADEON_WAIT_UNTIL 0x1720 651 # define RADEON_WAIT_CRTC_PFLIP (1 << 0) 652 # define RADEON_WAIT_2D_IDLECLEAN (1 << 16) 653 # define RADEON_WAIT_3D_IDLECLEAN (1 << 17) 654 # define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) ··· 1105 write = 0; \ 1106 _tab += _i; \ 1107 } \ 1108 - \ 1109 while (_size > 0) { \ 1110 *(ring + write) = *_tab++; \ 1111 write++; \
··· 38 39 #define DRIVER_NAME "radeon" 40 #define DRIVER_DESC "ATI Radeon" 41 + #define DRIVER_DATE "20051229" 42 43 /* Interface history: 44 * ··· 73 * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color 74 * and GL_EXT_blend_[func|equation]_separate on r200 75 * 1.12- Add R300 CP microcode support - this just loads the CP on r300 76 + * (No 3D support yet - just microcode loading). 77 * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters 78 * - Add hyperz support, add hyperz flags to clear ioctl. 79 * 1.14- Add support for color tiling ··· 88 * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR 89 * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6) 90 * 1.19- Add support for gart table in FB memory and PCIE r300 91 + * 1.20- Add support for r300 texrect 92 + * 1.21- Add support for card type getparam 93 */ 94 #define DRIVER_MAJOR 1 95 + #define DRIVER_MINOR 21 96 #define DRIVER_PATCHLEVEL 0 97 98 /* 99 * Radeon chip families ··· 103 CHIP_R100, 104 CHIP_RS100, 105 CHIP_RV100, 106 CHIP_RV200, 107 + CHIP_R200, 108 CHIP_RS200, 109 CHIP_R250, 110 CHIP_RS250, ··· 137 CHIP_HAS_HIERZ = 0x00100000UL, 138 CHIP_IS_PCIE = 0x00200000UL, 139 }; 140 + 141 + #define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) 142 + #define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) ) 143 144 typedef struct drm_radeon_freelist { 145 unsigned int age; ··· 245 246 drm_radeon_depth_clear_t depth_clear; 247 248 unsigned long ring_offset; 249 unsigned long ring_rptr_offset; 250 unsigned long buffers_offset; ··· 273 274 /* starting from here on, data is preserved accross an open */ 275 uint32_t flags; /* see radeon_chip_flags */ 276 } drm_radeon_private_t; 277 278 typedef struct drm_radeon_buf_priv { ··· 330 extern void radeon_driver_irq_preinstall(drm_device_t * dev); 331 extern void radeon_driver_irq_postinstall(drm_device_t * dev); 332 extern void radeon_driver_irq_uninstall(drm_device_t * dev); 333 334 + extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); 335 + extern int radeon_driver_unload(struct drm_device *dev); 336 + extern int radeon_driver_firstopen(struct drm_device *dev); 337 + extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp); 338 + extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp); 339 + extern void radeon_driver_lastclose(drm_device_t * dev); 340 + extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv); 341 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 342 unsigned long arg); 343 ··· 364 */ 365 366 #define RADEON_AGP_COMMAND 0x0f60 367 + #define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */ 368 + # define RADEON_AGP_ENABLE (1<<8) 369 #define RADEON_AUX_SCISSOR_CNTL 0x26f0 370 # define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) 371 # define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) ··· 651 652 #define RADEON_WAIT_UNTIL 0x1720 653 # define RADEON_WAIT_CRTC_PFLIP (1 << 0) 654 + # define RADEON_WAIT_2D_IDLE (1 << 14) 655 + # define RADEON_WAIT_3D_IDLE (1 << 15) 656 # define RADEON_WAIT_2D_IDLECLEAN (1 << 16) 657 # define RADEON_WAIT_3D_IDLECLEAN (1 << 17) 658 # define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) ··· 1105 write = 0; \ 1106 _tab += _i; \ 1107 } \ 1108 while (_size > 0) { \ 1109 *(ring + write) = *_tab++; \ 1110 write++; \
+103 -143
drivers/char/drm/radeon_state.c
··· 1 - /* radeon_state.c -- State support for Radeon -*- linux-c -*- 2 - * 3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 4 * All Rights Reserved. 5 * ··· 72 73 case RADEON_EMIT_PP_MISC: 74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 75 - &data[(RADEON_RB3D_DEPTHOFFSET 76 - - 77 - RADEON_PP_MISC) / 78 - 4])) { 79 DRM_ERROR("Invalid depth buffer offset\n"); 80 return DRM_ERR(EINVAL); 81 } ··· 80 81 case RADEON_EMIT_PP_CNTL: 82 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 83 - &data[(RADEON_RB3D_COLOROFFSET 84 - - 85 - RADEON_PP_CNTL) / 86 - 4])) { 87 DRM_ERROR("Invalid colour buffer offset\n"); 88 return DRM_ERR(EINVAL); 89 } ··· 103 case RADEON_EMIT_PP_TXFILTER_1: 104 case RADEON_EMIT_PP_TXFILTER_2: 105 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 106 - &data[(RADEON_PP_TXOFFSET_0 107 - - 108 - RADEON_PP_TXFILTER_0) / 109 - 4])) { 110 DRM_ERROR("Invalid R100 texture offset\n"); 111 return DRM_ERR(EINVAL); 112 } ··· 117 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 118 int i; 119 for (i = 0; i < 5; i++) { 120 - if (radeon_check_and_fixup_offset 121 - (dev_priv, filp_priv, &data[i])) { 122 DRM_ERROR 123 ("Invalid R200 cubic texture offset\n"); 124 return DRM_ERR(EINVAL); ··· 231 232 static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * 233 dev_priv, 234 - drm_file_t * filp_priv, 235 - drm_radeon_kcmd_buffer_t *cmdbuf, 236 unsigned int *cmdsz) 237 { 238 u32 *cmd = (u32 *) cmdbuf->buf; ··· 548 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, 549 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, 550 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, 551 - {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, 552 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, 553 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, 554 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, ··· 563 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, 564 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, 565 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ 566 - {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ 567 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, 568 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, 569 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, ··· 586 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, 587 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, 588 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, 589 - {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ 590 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, 591 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, 592 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, ··· 979 * rendering a quad into just those buffers. Thus, we have to 980 * make sure the 3D engine is configured correctly. 981 */ 982 - if ((dev_priv->microcode_version == UCODE_R200) && 983 - (flags & (RADEON_DEPTH | RADEON_STENCIL))) { 984 985 int tempPP_CNTL; 986 int tempRE_CNTL; ··· 1631 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); 1632 dwords = size / 4; 1633 1634 if (microtile) { 1635 /* texture micro tiling in use, minimum texture width is thus 16 bytes. 1636 however, we cannot use blitter directly for texture width < 64 bytes, ··· 1650 from user space. */ 1651 if (tex->height == 1) { 1652 if (tex_width >= 64 || tex_width <= 16) { 1653 - if (DRM_COPY_FROM_USER(buffer, data, 1654 - tex_width * 1655 - sizeof(u32))) { 1656 - DRM_ERROR 1657 - ("EFAULT on pad, %d bytes\n", 1658 - tex_width); 1659 - return DRM_ERR(EFAULT); 1660 - } 1661 } else if (tex_width == 32) { 1662 - if (DRM_COPY_FROM_USER 1663 - (buffer, data, 16)) { 1664 - DRM_ERROR 1665 - ("EFAULT on pad, %d bytes\n", 1666 - tex_width); 1667 - return DRM_ERR(EFAULT); 1668 - } 1669 - if (DRM_COPY_FROM_USER 1670 - (buffer + 8, data + 16, 16)) { 1671 - DRM_ERROR 1672 - ("EFAULT on pad, %d bytes\n", 1673 - tex_width); 1674 - return DRM_ERR(EFAULT); 1675 - } 1676 } 1677 } else if (tex_width >= 64 || tex_width == 16) { 1678 - if (DRM_COPY_FROM_USER(buffer, data, 1679 - dwords * sizeof(u32))) { 1680 - DRM_ERROR("EFAULT on data, %d dwords\n", 1681 - dwords); 1682 - return DRM_ERR(EFAULT); 1683 - } 1684 } else if (tex_width < 16) { 1685 for (i = 0; i < tex->height; i++) { 1686 - if (DRM_COPY_FROM_USER 1687 - (buffer, data, tex_width)) { 1688 - DRM_ERROR 1689 - ("EFAULT on pad, %d bytes\n", 1690 - tex_width); 1691 - return DRM_ERR(EFAULT); 1692 - } 1693 buffer += 4; 1694 data += tex_width; 1695 } ··· 1670 /* TODO: make sure this works when not fitting in one buffer 1671 (i.e. 32bytes x 2048...) */ 1672 for (i = 0; i < tex->height; i += 2) { 1673 - if (DRM_COPY_FROM_USER 1674 - (buffer, data, 16)) { 1675 - DRM_ERROR 1676 - ("EFAULT on pad, %d bytes\n", 1677 - tex_width); 1678 - return DRM_ERR(EFAULT); 1679 - } 1680 data += 16; 1681 - if (DRM_COPY_FROM_USER 1682 - (buffer + 8, data, 16)) { 1683 - DRM_ERROR 1684 - ("EFAULT on pad, %d bytes\n", 1685 - tex_width); 1686 - return DRM_ERR(EFAULT); 1687 - } 1688 data += 16; 1689 - if (DRM_COPY_FROM_USER 1690 - (buffer + 4, data, 16)) { 1691 - DRM_ERROR 1692 - ("EFAULT on pad, %d bytes\n", 1693 - tex_width); 1694 - return DRM_ERR(EFAULT); 1695 - } 1696 data += 16; 1697 - if (DRM_COPY_FROM_USER 1698 - (buffer + 12, data, 16)) { 1699 - DRM_ERROR 1700 - ("EFAULT on pad, %d bytes\n", 1701 - tex_width); 1702 - return DRM_ERR(EFAULT); 1703 - } 1704 data += 16; 1705 buffer += 16; 1706 } ··· 1686 /* Texture image width is larger than the minimum, so we 1687 * can upload it directly. 1688 */ 1689 - if (DRM_COPY_FROM_USER(buffer, data, 1690 - dwords * sizeof(u32))) { 1691 - DRM_ERROR("EFAULT on data, %d dwords\n", 1692 - dwords); 1693 - return DRM_ERR(EFAULT); 1694 - } 1695 } else { 1696 /* Texture image width is less than the minimum, so we 1697 * need to pad out each image scanline to the minimum 1698 * width. 1699 */ 1700 for (i = 0; i < tex->height; i++) { 1701 - if (DRM_COPY_FROM_USER 1702 - (buffer, data, tex_width)) { 1703 - DRM_ERROR 1704 - ("EFAULT on pad, %d bytes\n", 1705 - tex_width); 1706 - return DRM_ERR(EFAULT); 1707 - } 1708 buffer += 8; 1709 data += tex_width; 1710 } 1711 } 1712 } 1713 1714 buf->filp = filp; 1715 buf->used = size; 1716 offset = dev_priv->gart_buffers_offset + buf->offset; ··· 1763 } 1764 1765 static void radeon_apply_surface_regs(int surf_index, 1766 - drm_radeon_private_t * dev_priv) 1767 { 1768 if (!dev_priv->mmio) 1769 return; ··· 1789 * freed, we suddenly need two surfaces to store A and C, which might 1790 * not always be available. 1791 */ 1792 - static int alloc_surface(drm_radeon_surface_alloc_t * new, 1793 - drm_radeon_private_t * dev_priv, DRMFILE filp) 1794 { 1795 struct radeon_virt_surface *s; 1796 int i; ··· 2099 drm_radeon_tcl_prim_t prim; 2100 2101 LOCK_TEST_WITH_RETURN(dev, filp); 2102 2103 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 2104 ··· 2543 return 0; 2544 } 2545 2546 - static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv, 2547 drm_radeon_cmd_header_t header, 2548 - drm_radeon_kcmd_buffer_t * cmdbuf) 2549 { 2550 int sz = header.scalars.count; 2551 int start = header.scalars.offset; ··· 2565 2566 /* God this is ugly 2567 */ 2568 - static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv, 2569 drm_radeon_cmd_header_t header, 2570 - drm_radeon_kcmd_buffer_t * cmdbuf) 2571 { 2572 int sz = header.scalars.count; 2573 int start = ((unsigned int)header.scalars.offset) + 0x100; ··· 2585 return 0; 2586 } 2587 2588 - static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv, 2589 drm_radeon_cmd_header_t header, 2590 - drm_radeon_kcmd_buffer_t * cmdbuf) 2591 { 2592 int sz = header.vectors.count; 2593 int start = header.vectors.offset; ··· 2632 return 0; 2633 } 2634 2635 - static int radeon_emit_packet3_cliprect(drm_device_t * dev, 2636 - drm_file_t * filp_priv, 2637 drm_radeon_kcmd_buffer_t *cmdbuf, 2638 int orig_nbox) 2639 { ··· 2765 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2766 if (kbuf == NULL) 2767 return DRM_ERR(ENOMEM); 2768 - if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) { 2769 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2770 return DRM_ERR(EFAULT); 2771 } ··· 2929 value = dev_priv->gart_vm_start; 2930 break; 2931 case RADEON_PARAM_REGISTER_HANDLE: 2932 - value = dev_priv->mmio_offset; 2933 break; 2934 case RADEON_PARAM_STATUS_HANDLE: 2935 value = dev_priv->ring_rptr_offset; ··· 2951 #endif 2952 case RADEON_PARAM_GART_TEX_HANDLE: 2953 value = dev_priv->gart_textures_offset; 2954 break; 2955 default: 2956 return DRM_ERR(EINVAL); ··· 3023 /* When a client dies: 3024 * - Check for and clean up flipped page state 3025 * - Free any alloced GART memory. 3026 * 3027 * DRM infrastructure takes care of reclaiming dma buffers. 3028 */ 3029 - void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp) 3030 { 3031 if (dev->dev_private) { 3032 drm_radeon_private_t *dev_priv = dev->dev_private; ··· 3040 } 3041 } 3042 3043 - void radeon_driver_pretakedown(drm_device_t * dev) 3044 { 3045 radeon_do_release(dev); 3046 } 3047 3048 - int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv) 3049 { 3050 drm_radeon_private_t *dev_priv = dev->dev_private; 3051 struct drm_radeon_driver_file_fields *radeon_priv; 3052 3053 radeon_priv = 3054 (struct drm_radeon_driver_file_fields *) 3055 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); ··· 3059 return -ENOMEM; 3060 3061 filp_priv->driver_priv = radeon_priv; 3062 if (dev_priv) 3063 radeon_priv->radeon_fb_delta = dev_priv->fb_location; 3064 else ··· 3067 return 0; 3068 } 3069 3070 - void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv) 3071 { 3072 struct drm_radeon_driver_file_fields *radeon_priv = 3073 filp_priv->driver_priv; ··· 3076 } 3077 3078 drm_ioctl_desc_t radeon_ioctls[] = { 3079 - [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, 1, 1}, 3080 - [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, 1, 1}, 3081 - [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, 1, 1}, 3082 - [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, 1, 1}, 3083 - [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, 1, 0}, 3084 - [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, 1, 0}, 3085 - [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, 1, 0}, 3086 - [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, 1, 0}, 3087 - [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, 1, 0}, 3088 - [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, 1, 0}, 3089 - [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, 1, 0}, 3090 - [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, 1, 0}, 3091 - [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, 1, 0}, 3092 - [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, 1, 0}, 3093 - [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, 1, 1}, 3094 - [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, 1, 0}, 3095 - [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, 1, 0}, 3096 - [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, 1, 0}, 3097 - [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, 1, 0}, 3098 - [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, 1, 0}, 3099 - [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, 1, 0}, 3100 - [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, 1, 1}, 3101 - [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, 1, 0}, 3102 - [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, 1, 0}, 3103 - [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, 1, 0}, 3104 - [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, 1, 0}, 3105 - [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, 1, 0} 3106 }; 3107 3108 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
··· 1 + /* radeon_state.c -- State support for Radeon -*- linux-c -*- */ 2 + /* 3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 4 * All Rights Reserved. 5 * ··· 72 73 case RADEON_EMIT_PP_MISC: 74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 75 + &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { 76 DRM_ERROR("Invalid depth buffer offset\n"); 77 return DRM_ERR(EINVAL); 78 } ··· 83 84 case RADEON_EMIT_PP_CNTL: 85 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 86 + &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { 87 DRM_ERROR("Invalid colour buffer offset\n"); 88 return DRM_ERR(EINVAL); 89 } ··· 109 case RADEON_EMIT_PP_TXFILTER_1: 110 case RADEON_EMIT_PP_TXFILTER_2: 111 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 112 + &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { 113 DRM_ERROR("Invalid R100 texture offset\n"); 114 return DRM_ERR(EINVAL); 115 } ··· 126 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 127 int i; 128 for (i = 0; i < 5; i++) { 129 + if (radeon_check_and_fixup_offset(dev_priv, 130 + filp_priv, 131 + &data[i])) { 132 DRM_ERROR 133 ("Invalid R200 cubic texture offset\n"); 134 return DRM_ERR(EINVAL); ··· 239 240 static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * 241 dev_priv, 242 + drm_file_t *filp_priv, 243 + drm_radeon_kcmd_buffer_t * 244 + cmdbuf, 245 unsigned int *cmdsz) 246 { 247 u32 *cmd = (u32 *) cmdbuf->buf; ··· 555 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, 556 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, 557 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, 558 + {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, 559 + "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, 560 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, 561 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, 562 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, ··· 569 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, 570 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, 571 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ 572 + {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ 573 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, 574 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, 575 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, ··· 592 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, 593 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, 594 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, 595 + {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ 596 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, 597 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, 598 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, ··· 985 * rendering a quad into just those buffers. Thus, we have to 986 * make sure the 3D engine is configured correctly. 987 */ 988 + else if ((dev_priv->microcode_version == UCODE_R200) && 989 + (flags & (RADEON_DEPTH | RADEON_STENCIL))) { 990 991 int tempPP_CNTL; 992 int tempRE_CNTL; ··· 1637 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); 1638 dwords = size / 4; 1639 1640 + #define RADEON_COPY_MT(_buf, _data, _width) \ 1641 + do { \ 1642 + if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ 1643 + DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ 1644 + return DRM_ERR(EFAULT); \ 1645 + } \ 1646 + } while(0) 1647 + 1648 if (microtile) { 1649 /* texture micro tiling in use, minimum texture width is thus 16 bytes. 1650 however, we cannot use blitter directly for texture width < 64 bytes, ··· 1648 from user space. */ 1649 if (tex->height == 1) { 1650 if (tex_width >= 64 || tex_width <= 16) { 1651 + RADEON_COPY_MT(buffer, data, 1652 + (int)(tex_width * sizeof(u32))); 1653 } else if (tex_width == 32) { 1654 + RADEON_COPY_MT(buffer, data, 16); 1655 + RADEON_COPY_MT(buffer + 8, 1656 + data + 16, 16); 1657 } 1658 } else if (tex_width >= 64 || tex_width == 16) { 1659 + RADEON_COPY_MT(buffer, data, 1660 + (int)(dwords * sizeof(u32))); 1661 } else if (tex_width < 16) { 1662 for (i = 0; i < tex->height; i++) { 1663 + RADEON_COPY_MT(buffer, data, tex_width); 1664 buffer += 4; 1665 data += tex_width; 1666 } ··· 1695 /* TODO: make sure this works when not fitting in one buffer 1696 (i.e. 32bytes x 2048...) */ 1697 for (i = 0; i < tex->height; i += 2) { 1698 + RADEON_COPY_MT(buffer, data, 16); 1699 data += 16; 1700 + RADEON_COPY_MT(buffer + 8, data, 16); 1701 data += 16; 1702 + RADEON_COPY_MT(buffer + 4, data, 16); 1703 data += 16; 1704 + RADEON_COPY_MT(buffer + 12, data, 16); 1705 data += 16; 1706 buffer += 16; 1707 } ··· 1735 /* Texture image width is larger than the minimum, so we 1736 * can upload it directly. 1737 */ 1738 + RADEON_COPY_MT(buffer, data, 1739 + (int)(dwords * sizeof(u32))); 1740 } else { 1741 /* Texture image width is less than the minimum, so we 1742 * need to pad out each image scanline to the minimum 1743 * width. 1744 */ 1745 for (i = 0; i < tex->height; i++) { 1746 + RADEON_COPY_MT(buffer, data, tex_width); 1747 buffer += 8; 1748 data += tex_width; 1749 } 1750 } 1751 } 1752 1753 + #undef RADEON_COPY_MT 1754 buf->filp = filp; 1755 buf->used = size; 1756 offset = dev_priv->gart_buffers_offset + buf->offset; ··· 1821 } 1822 1823 static void radeon_apply_surface_regs(int surf_index, 1824 + drm_radeon_private_t *dev_priv) 1825 { 1826 if (!dev_priv->mmio) 1827 return; ··· 1847 * freed, we suddenly need two surfaces to store A and C, which might 1848 * not always be available. 1849 */ 1850 + static int alloc_surface(drm_radeon_surface_alloc_t *new, 1851 + drm_radeon_private_t *dev_priv, DRMFILE filp) 1852 { 1853 struct radeon_virt_surface *s; 1854 int i; ··· 2157 drm_radeon_tcl_prim_t prim; 2158 2159 LOCK_TEST_WITH_RETURN(dev, filp); 2160 + 2161 + if (!dev_priv) { 2162 + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 2163 + return DRM_ERR(EINVAL); 2164 + } 2165 2166 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 2167 ··· 2596 return 0; 2597 } 2598 2599 + static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv, 2600 drm_radeon_cmd_header_t header, 2601 + drm_radeon_kcmd_buffer_t *cmdbuf) 2602 { 2603 int sz = header.scalars.count; 2604 int start = header.scalars.offset; ··· 2618 2619 /* God this is ugly 2620 */ 2621 + static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv, 2622 drm_radeon_cmd_header_t header, 2623 + drm_radeon_kcmd_buffer_t *cmdbuf) 2624 { 2625 int sz = header.scalars.count; 2626 int start = ((unsigned int)header.scalars.offset) + 0x100; ··· 2638 return 0; 2639 } 2640 2641 + static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv, 2642 drm_radeon_cmd_header_t header, 2643 + drm_radeon_kcmd_buffer_t *cmdbuf) 2644 { 2645 int sz = header.vectors.count; 2646 int start = header.vectors.offset; ··· 2685 return 0; 2686 } 2687 2688 + static int radeon_emit_packet3_cliprect(drm_device_t *dev, 2689 + drm_file_t *filp_priv, 2690 drm_radeon_kcmd_buffer_t *cmdbuf, 2691 int orig_nbox) 2692 { ··· 2818 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2819 if (kbuf == NULL) 2820 return DRM_ERR(ENOMEM); 2821 + if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, 2822 + cmdbuf.bufsz)) { 2823 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2824 return DRM_ERR(EFAULT); 2825 } ··· 2981 value = dev_priv->gart_vm_start; 2982 break; 2983 case RADEON_PARAM_REGISTER_HANDLE: 2984 + value = dev_priv->mmio->offset; 2985 break; 2986 case RADEON_PARAM_STATUS_HANDLE: 2987 value = dev_priv->ring_rptr_offset; ··· 3003 #endif 3004 case RADEON_PARAM_GART_TEX_HANDLE: 3005 value = dev_priv->gart_textures_offset; 3006 + break; 3007 + 3008 + case RADEON_PARAM_CARD_TYPE: 3009 + if (dev_priv->flags & CHIP_IS_PCIE) 3010 + value = RADEON_CARD_PCIE; 3011 + else if (dev_priv->flags & CHIP_IS_AGP) 3012 + value = RADEON_CARD_AGP; 3013 + else 3014 + value = RADEON_CARD_PCI; 3015 break; 3016 default: 3017 return DRM_ERR(EINVAL); ··· 3066 /* When a client dies: 3067 * - Check for and clean up flipped page state 3068 * - Free any alloced GART memory. 3069 + * - Free any alloced radeon surfaces. 3070 * 3071 * DRM infrastructure takes care of reclaiming dma buffers. 3072 */ 3073 + void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp) 3074 { 3075 if (dev->dev_private) { 3076 drm_radeon_private_t *dev_priv = dev->dev_private; ··· 3082 } 3083 } 3084 3085 + void radeon_driver_lastclose(drm_device_t * dev) 3086 { 3087 radeon_do_release(dev); 3088 } 3089 3090 + int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv) 3091 { 3092 drm_radeon_private_t *dev_priv = dev->dev_private; 3093 struct drm_radeon_driver_file_fields *radeon_priv; 3094 3095 + DRM_DEBUG("\n"); 3096 radeon_priv = 3097 (struct drm_radeon_driver_file_fields *) 3098 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); ··· 3100 return -ENOMEM; 3101 3102 filp_priv->driver_priv = radeon_priv; 3103 + 3104 if (dev_priv) 3105 radeon_priv->radeon_fb_delta = dev_priv->fb_location; 3106 else ··· 3107 return 0; 3108 } 3109 3110 + void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv) 3111 { 3112 struct drm_radeon_driver_file_fields *radeon_priv = 3113 filp_priv->driver_priv; ··· 3116 } 3117 3118 drm_ioctl_desc_t radeon_ioctls[] = { 3119 + [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3120 + [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3121 + [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3122 + [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3123 + [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH}, 3124 + [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH}, 3125 + [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH}, 3126 + [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH}, 3127 + [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH}, 3128 + [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH}, 3129 + [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH}, 3130 + [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH}, 3131 + [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH}, 3132 + [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH}, 3133 + [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3134 + [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH}, 3135 + [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH}, 3136 + [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH}, 3137 + [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH}, 3138 + [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH}, 3139 + [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH}, 3140 + [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3141 + [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH}, 3142 + [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH}, 3143 + [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH}, 3144 + [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH}, 3145 + [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH} 3146 }; 3147 3148 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
+47 -38
drivers/char/drm/savage_bci.c
··· 533 dev_priv->first_dma_page = dev_priv->current_dma_page = 0; 534 } 535 536 - /* 537 - * Initalize mappings. On Savage4 and SavageIX the alignment 538 - * and size of the aperture is not suitable for automatic MTRR setup 539 - * in drm_addmap. Therefore we do it manually before the maps are 540 - * initialized. We also need to take care of deleting the MTRRs in 541 - * postcleanup. 542 - */ 543 - int savage_preinit(drm_device_t * dev, unsigned long chipset) 544 { 545 drm_savage_private_t *dev_priv; 546 - unsigned long mmio_base, fb_base, fb_size, aperture_base; 547 - /* fb_rsrc and aper_rsrc aren't really used currently, but still exist 548 - * in case we decide we need information on the BAR for BSD in the 549 - * future. 550 - */ 551 - unsigned int fb_rsrc, aper_rsrc; 552 - int ret = 0; 553 554 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 555 if (dev_priv == NULL) ··· 543 544 memset(dev_priv, 0, sizeof(drm_savage_private_t)); 545 dev->dev_private = (void *)dev_priv; 546 dev_priv->chipset = (enum savage_family)chipset; 547 548 dev_priv->mtrr[0].handle = -1; 549 dev_priv->mtrr[1].handle = -1; ··· 584 dev_priv->mtrr[0].base = fb_base; 585 dev_priv->mtrr[0].size = 0x01000000; 586 dev_priv->mtrr[0].handle = 587 - mtrr_add(dev_priv->mtrr[0].base, 588 - dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB, 589 - 1); 590 dev_priv->mtrr[1].base = fb_base + 0x02000000; 591 dev_priv->mtrr[1].size = 0x02000000; 592 dev_priv->mtrr[1].handle = 593 - mtrr_add(dev_priv->mtrr[1].base, 594 - dev_priv->mtrr[1].size, MTRR_TYPE_WRCOMB, 595 - 1); 596 dev_priv->mtrr[2].base = fb_base + 0x04000000; 597 dev_priv->mtrr[2].size = 0x04000000; 598 dev_priv->mtrr[2].handle = 599 - mtrr_add(dev_priv->mtrr[2].base, 600 - dev_priv->mtrr[2].size, MTRR_TYPE_WRCOMB, 601 - 1); 602 } else { 603 DRM_ERROR("strange pci_resource_len %08lx\n", 604 drm_get_resource_len(dev, 0)); 605 } 606 - } else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) { 607 mmio_base = drm_get_resource_start(dev, 0); 608 fb_rsrc = 1; 609 fb_base = drm_get_resource_start(dev, 1); ··· 615 dev_priv->mtrr[0].base = fb_base; 616 dev_priv->mtrr[0].size = 0x08000000; 617 dev_priv->mtrr[0].handle = 618 - mtrr_add(dev_priv->mtrr[0].base, 619 - dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB, 620 - 1); 621 } else { 622 DRM_ERROR("strange pci_resource_len %08lx\n", 623 drm_get_resource_len(dev, 1)); ··· 653 /* 654 * Delete MTRRs and free device-private data. 655 */ 656 - int savage_postcleanup(drm_device_t * dev) 657 { 658 drm_savage_private_t *dev_priv = dev->dev_private; 659 int i; 660 661 for (i = 0; i < 3; ++i) 662 if (dev_priv->mtrr[i].handle >= 0) 663 - mtrr_del(dev_priv->mtrr[i].handle, 664 dev_priv->mtrr[i].base, 665 - dev_priv->mtrr[i].size); 666 667 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 668 ··· 1004 * DMA buffer management 1005 */ 1006 1007 - static int savage_bci_get_buffers(DRMFILE filp, drm_device_t * dev, 1008 - drm_dma_t * d) 1009 { 1010 drm_buf_t *buf; 1011 int i; ··· 1066 return ret; 1067 } 1068 1069 - void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp) 1070 { 1071 drm_device_dma_t *dma = dev->dma; 1072 drm_savage_private_t *dev_priv = dev->dev_private; ··· 1099 } 1100 1101 drm_ioctl_desc_t savage_ioctls[] = { 1102 - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1}, 1103 - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0}, 1104 - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0}, 1105 - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0}, 1106 }; 1107 1108 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
··· 533 dev_priv->first_dma_page = dev_priv->current_dma_page = 0; 534 } 535 536 + int savage_driver_load(drm_device_t *dev, unsigned long chipset) 537 { 538 drm_savage_private_t *dev_priv; 539 540 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 541 if (dev_priv == NULL) ··· 557 558 memset(dev_priv, 0, sizeof(drm_savage_private_t)); 559 dev->dev_private = (void *)dev_priv; 560 + 561 dev_priv->chipset = (enum savage_family)chipset; 562 + 563 + return 0; 564 + } 565 + 566 + 567 + /* 568 + * Initalize mappings. On Savage4 and SavageIX the alignment 569 + * and size of the aperture is not suitable for automatic MTRR setup 570 + * in drm_addmap. Therefore we add them manually before the maps are 571 + * initialized, and tear them down on last close. 572 + */ 573 + int savage_driver_firstopen(drm_device_t *dev) 574 + { 575 + drm_savage_private_t *dev_priv = dev->dev_private; 576 + unsigned long mmio_base, fb_base, fb_size, aperture_base; 577 + /* fb_rsrc and aper_rsrc aren't really used currently, but still exist 578 + * in case we decide we need information on the BAR for BSD in the 579 + * future. 580 + */ 581 + unsigned int fb_rsrc, aper_rsrc; 582 + int ret = 0; 583 584 dev_priv->mtrr[0].handle = -1; 585 dev_priv->mtrr[1].handle = -1; ··· 576 dev_priv->mtrr[0].base = fb_base; 577 dev_priv->mtrr[0].size = 0x01000000; 578 dev_priv->mtrr[0].handle = 579 + drm_mtrr_add(dev_priv->mtrr[0].base, 580 + dev_priv->mtrr[0].size, DRM_MTRR_WC); 581 dev_priv->mtrr[1].base = fb_base + 0x02000000; 582 dev_priv->mtrr[1].size = 0x02000000; 583 dev_priv->mtrr[1].handle = 584 + drm_mtrr_add(dev_priv->mtrr[1].base, 585 + dev_priv->mtrr[1].size, DRM_MTRR_WC); 586 dev_priv->mtrr[2].base = fb_base + 0x04000000; 587 dev_priv->mtrr[2].size = 0x04000000; 588 dev_priv->mtrr[2].handle = 589 + drm_mtrr_add(dev_priv->mtrr[2].base, 590 + dev_priv->mtrr[2].size, DRM_MTRR_WC); 591 } else { 592 DRM_ERROR("strange pci_resource_len %08lx\n", 593 drm_get_resource_len(dev, 0)); 594 } 595 + } else if (dev_priv->chipset != S3_SUPERSAVAGE && 596 + dev_priv->chipset != S3_SAVAGE2000) { 597 mmio_base = drm_get_resource_start(dev, 0); 598 fb_rsrc = 1; 599 fb_base = drm_get_resource_start(dev, 1); ··· 609 dev_priv->mtrr[0].base = fb_base; 610 dev_priv->mtrr[0].size = 0x08000000; 611 dev_priv->mtrr[0].handle = 612 + drm_mtrr_add(dev_priv->mtrr[0].base, 613 + dev_priv->mtrr[0].size, DRM_MTRR_WC); 614 } else { 615 DRM_ERROR("strange pci_resource_len %08lx\n", 616 drm_get_resource_len(dev, 1)); ··· 648 /* 649 * Delete MTRRs and free device-private data. 650 */ 651 + void savage_driver_lastclose(drm_device_t *dev) 652 { 653 drm_savage_private_t *dev_priv = dev->dev_private; 654 int i; 655 656 for (i = 0; i < 3; ++i) 657 if (dev_priv->mtrr[i].handle >= 0) 658 + drm_mtrr_del(dev_priv->mtrr[i].handle, 659 dev_priv->mtrr[i].base, 660 + dev_priv->mtrr[i].size, DRM_MTRR_WC); 661 + } 662 + 663 + int savage_driver_unload(drm_device_t *dev) 664 + { 665 + drm_savage_private_t *dev_priv = dev->dev_private; 666 667 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 668 ··· 994 * DMA buffer management 995 */ 996 997 + static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d) 998 { 999 drm_buf_t *buf; 1000 int i; ··· 1057 return ret; 1058 } 1059 1060 + void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) 1061 { 1062 drm_device_dma_t *dma = dev->dma; 1063 drm_savage_private_t *dev_priv = dev->dev_private; ··· 1090 } 1091 1092 drm_ioctl_desc_t savage_ioctls[] = { 1093 + [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1094 + [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH}, 1095 + [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH}, 1096 + [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH}, 1097 }; 1098 1099 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
+16 -34
drivers/char/drm/savage_drv.c
··· 30 31 #include "drm_pciids.h" 32 33 - static int postinit(struct drm_device *dev, unsigned long flags) 34 - { 35 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 36 - DRIVER_NAME, 37 - DRIVER_MAJOR, 38 - DRIVER_MINOR, 39 - DRIVER_PATCHLEVEL, 40 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 41 - ); 42 - return 0; 43 - } 44 - 45 - static int version(drm_version_t * version) 46 - { 47 - int len; 48 - 49 - version->version_major = DRIVER_MAJOR; 50 - version->version_minor = DRIVER_MINOR; 51 - version->version_patchlevel = DRIVER_PATCHLEVEL; 52 - DRM_COPY(version->name, DRIVER_NAME); 53 - DRM_COPY(version->date, DRIVER_DATE); 54 - DRM_COPY(version->desc, DRIVER_DESC); 55 - return 0; 56 - } 57 - 58 static struct pci_device_id pciidlist[] = { 59 savage_PCI_IDS 60 }; ··· 38 .driver_features = 39 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, 40 .dev_priv_size = sizeof(drm_savage_buf_priv_t), 41 - .preinit = savage_preinit, 42 - .postinit = postinit, 43 - .postcleanup = savage_postcleanup, 44 .reclaim_buffers = savage_reclaim_buffers, 45 .get_map_ofs = drm_core_get_map_ofs, 46 .get_reg_ofs = drm_core_get_reg_ofs, 47 - .version = version, 48 .ioctls = savage_ioctls, 49 .dma_ioctl = savage_bci_buffers, 50 .fops = { ··· 55 .mmap = drm_mmap, 56 .poll = drm_poll, 57 .fasync = drm_fasync, 58 - } 59 - , 60 .pci_driver = { 61 - .name = DRIVER_NAME, 62 - .id_table = pciidlist, 63 - } 64 }; 65 66 static int __init savage_init(void)
··· 30 31 #include "drm_pciids.h" 32 33 static struct pci_device_id pciidlist[] = { 34 savage_PCI_IDS 35 }; ··· 63 .driver_features = 64 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, 65 .dev_priv_size = sizeof(drm_savage_buf_priv_t), 66 + .load = savage_driver_load, 67 + .firstopen = savage_driver_firstopen, 68 + .lastclose = savage_driver_lastclose, 69 + .unload = savage_driver_unload, 70 .reclaim_buffers = savage_reclaim_buffers, 71 .get_map_ofs = drm_core_get_map_ofs, 72 .get_reg_ofs = drm_core_get_reg_ofs, 73 .ioctls = savage_ioctls, 74 .dma_ioctl = savage_bci_buffers, 75 .fops = { ··· 80 .mmap = drm_mmap, 81 .poll = drm_poll, 82 .fasync = drm_fasync, 83 + }, 84 + 85 .pci_driver = { 86 + .name = DRIVER_NAME, 87 + .id_table = pciidlist, 88 + }, 89 + 90 + .name = DRIVER_NAME, 91 + .desc = DRIVER_DESC, 92 + .date = DRIVER_DATE, 93 + .major = DRIVER_MAJOR, 94 + .minor = DRIVER_MINOR, 95 + .patchlevel = DRIVER_PATCHLEVEL, 96 }; 97 98 static int __init savage_init(void)
+11 -18
drivers/char/drm/savage_drv.h
··· 1 - /* savage_drv.h -- Private header for the savage driver 2 - * 3 * Copyright 2004 Felix Kuehling 4 * All Rights Reserved. 5 * ··· 192 /* Err, there is a macro wait_event in include/linux/wait.h. 193 * Avoid unwanted macro expansion. */ 194 void (*emit_clip_rect) (struct drm_savage_private * dev_priv, 195 - drm_clip_rect_t * pbox); 196 void (*dma_flush) (struct drm_savage_private * dev_priv); 197 } drm_savage_private_t; 198 ··· 208 extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page); 209 extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, 210 unsigned int n); 211 - extern int savage_preinit(drm_device_t * dev, unsigned long chipset); 212 - extern int savage_postcleanup(drm_device_t * dev); 213 extern int savage_do_cleanup_bci(drm_device_t * dev); 214 extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp); 215 216 /* state functions */ 217 extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 218 - drm_clip_rect_t * pbox); 219 extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 220 - drm_clip_rect_t * pbox); 221 222 #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ 223 #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ ··· 502 503 #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) 504 505 - #define BCI_COPY_FROM_USER(src,n) do { \ 506 - unsigned int i; \ 507 - for (i = 0; i < n; ++i) { \ 508 - uint32_t val; \ 509 - DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \ 510 - BCI_WRITE(val); \ 511 - } \ 512 - } while(0) 513 - 514 /* 515 * command DMA support 516 */ ··· 527 528 #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) 529 530 - #define DMA_COPY_FROM_USER(src,n) do { \ 531 - DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \ 532 dma_ptr += n; \ 533 } while(0) 534
··· 1 + /* savage_drv.h -- Private header for the savage driver */ 2 + /* 3 * Copyright 2004 Felix Kuehling 4 * All Rights Reserved. 5 * ··· 192 /* Err, there is a macro wait_event in include/linux/wait.h. 193 * Avoid unwanted macro expansion. */ 194 void (*emit_clip_rect) (struct drm_savage_private * dev_priv, 195 + const drm_clip_rect_t * pbox); 196 void (*dma_flush) (struct drm_savage_private * dev_priv); 197 } drm_savage_private_t; 198 ··· 208 extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page); 209 extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, 210 unsigned int n); 211 + extern int savage_driver_load(drm_device_t *dev, unsigned long chipset); 212 + extern int savage_driver_firstopen(drm_device_t *dev); 213 + extern void savage_driver_lastclose(drm_device_t *dev); 214 + extern int savage_driver_unload(drm_device_t *dev); 215 extern int savage_do_cleanup_bci(drm_device_t * dev); 216 extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp); 217 218 /* state functions */ 219 extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 220 + const drm_clip_rect_t * pbox); 221 extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 222 + const drm_clip_rect_t * pbox); 223 224 #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ 225 #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ ··· 500 501 #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) 502 503 /* 504 * command DMA support 505 */ ··· 534 535 #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) 536 537 + #define DMA_COPY(src, n) do { \ 538 + memcpy(dma_ptr, (src), (n)*4); \ 539 dma_ptr += n; \ 540 } while(0) 541
+164 -160
drivers/char/drm/savage_state.c
··· 27 #include "savage_drv.h" 28 29 void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 30 - drm_clip_rect_t * pbox) 31 { 32 uint32_t scstart = dev_priv->state.s3d.new_scstart; 33 uint32_t scend = dev_priv->state.s3d.new_scend; ··· 53 } 54 55 void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 56 - drm_clip_rect_t * pbox) 57 { 58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; 59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; ··· 115 116 #define SAVE_STATE(reg,where) \ 117 if(start <= reg && start+count > reg) \ 118 - DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start]) 119 #define SAVE_STATE_MASK(reg,where,mask) do { \ 120 if(start <= reg && start+count > reg) { \ 121 uint32_t tmp; \ 122 - DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]); \ 123 dev_priv->state.where = (tmp & (mask)) | \ 124 (dev_priv->state.where & ~(mask)); \ 125 } \ 126 } while (0) 127 static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, 128 unsigned int start, unsigned int count, 129 - const uint32_t __user * regs) 130 { 131 if (start < SAVAGE_TEXPALADDR_S3D || 132 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { ··· 149 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); 150 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) 151 return savage_verify_texaddr(dev_priv, 0, 152 - dev_priv->state.s3d. 153 - texaddr); 154 } 155 156 return 0; ··· 157 158 static int savage_verify_state_s4(drm_savage_private_t * dev_priv, 159 unsigned int start, unsigned int count, 160 - const uint32_t __user * regs) 161 { 162 int ret = 0; 163 ··· 174 ~SAVAGE_SCISSOR_MASK_S4); 175 176 /* if any texture regs were changed ... */ 177 - if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) { 178 /* ... check texture state */ 179 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); 180 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); 181 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); 182 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) 183 - ret |= 184 - savage_verify_texaddr(dev_priv, 0, 185 - dev_priv->state.s4.texaddr0); 186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) 187 - ret |= 188 - savage_verify_texaddr(dev_priv, 1, 189 - dev_priv->state.s4.texaddr1); 190 } 191 192 return ret; ··· 196 197 static int savage_dispatch_state(drm_savage_private_t * dev_priv, 198 const drm_savage_cmd_header_t * cmd_header, 199 - const uint32_t __user * regs) 200 { 201 unsigned int count = cmd_header->state.count; 202 unsigned int start = cmd_header->state.start; ··· 207 208 if (!count) 209 return 0; 210 - 211 - if (DRM_VERIFYAREA_READ(regs, count * 4)) 212 - return DRM_ERR(EFAULT); 213 214 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 215 ret = savage_verify_state_s3d(dev_priv, start, count, regs); ··· 232 /* scissor regs are emitted in savage_dispatch_draw */ 233 if (start < SAVAGE_DRAWCTRL0_S4) { 234 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) 235 - count2 = 236 - count - (SAVAGE_DRAWCTRL1_S4 + 1 - start); 237 if (start + count > SAVAGE_DRAWCTRL0_S4) 238 count = SAVAGE_DRAWCTRL0_S4 - start; 239 } else if (start <= SAVAGE_DRAWCTRL1_S4) { ··· 259 while (count > 0) { 260 unsigned int n = count < 255 ? count : 255; 261 DMA_SET_REGISTERS(start, n); 262 - DMA_COPY_FROM_USER(regs, n); 263 count -= n; 264 start += n; 265 regs += n; ··· 417 418 static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, 419 const drm_savage_cmd_header_t * cmd_header, 420 - const uint32_t __user * vtxbuf, 421 - unsigned int vb_size, unsigned int vb_stride) 422 { 423 unsigned char reorder = 0; 424 unsigned int prim = cmd_header->prim.prim; ··· 503 504 for (i = start; i < start + count; ++i) { 505 unsigned int j = i + reorder[i % 3]; 506 - DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 507 - vtx_size); 508 } 509 510 DMA_COMMIT(); ··· 512 DMA_DRAW_PRIMITIVE(count, prim, skip); 513 514 if (vb_stride == vtx_size) { 515 - DMA_COPY_FROM_USER(&vtxbuf[vb_stride * start], 516 - vtx_size * count); 517 } else { 518 for (i = start; i < start + count; ++i) { 519 - DMA_COPY_FROM_USER(&vtxbuf 520 - [vb_stride * i], 521 - vtx_size); 522 } 523 } 524 ··· 535 536 static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, 537 const drm_savage_cmd_header_t * cmd_header, 538 - const uint16_t __user * usr_idx, 539 const drm_buf_t * dmabuf) 540 { 541 unsigned char reorder = 0; ··· 622 while (n != 0) { 623 /* Can emit up to 255 indices (85 triangles) at once. */ 624 unsigned int count = n > 255 ? 255 : n; 625 - /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ 626 - uint16_t idx[255]; 627 628 - /* Copy and check indices */ 629 - DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2); 630 for (i = 0; i < count; ++i) { 631 if (idx[i] > dmabuf->total / 32) { 632 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ··· 643 644 for (i = 1; i + 1 < count; i += 2) 645 BCI_WRITE(idx[i + reorder[i % 3]] | 646 - (idx[i + 1 + reorder[(i + 1) % 3]] << 647 - 16)); 648 if (i < count) 649 BCI_WRITE(idx[i + reorder[i % 3]]); 650 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ··· 665 BCI_WRITE(idx[i]); 666 } 667 668 - usr_idx += count; 669 n -= count; 670 671 prim |= BCI_CMD_DRAW_CONT; ··· 676 677 static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, 678 const drm_savage_cmd_header_t * cmd_header, 679 - const uint16_t __user * usr_idx, 680 - const uint32_t __user * vtxbuf, 681 unsigned int vb_size, unsigned int vb_stride) 682 { 683 unsigned char reorder = 0; ··· 742 while (n != 0) { 743 /* Can emit up to 255 vertices (85 triangles) at once. */ 744 unsigned int count = n > 255 ? 255 : n; 745 - /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ 746 - uint16_t idx[255]; 747 - 748 - /* Copy and check indices */ 749 - DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2); 750 for (i = 0; i < count; ++i) { 751 if (idx[i] > vb_size / (vb_stride * 4)) { 752 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ··· 763 764 for (i = 0; i < count; ++i) { 765 unsigned int j = idx[i + reorder[i % 3]]; 766 - DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 767 - vtx_size); 768 } 769 770 DMA_COMMIT(); ··· 773 774 for (i = 0; i < count; ++i) { 775 unsigned int j = idx[i]; 776 - DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 777 - vtx_size); 778 } 779 780 DMA_COMMIT(); 781 } 782 783 - usr_idx += count; 784 n -= count; 785 786 prim |= BCI_CMD_DRAW_CONT; ··· 790 791 static int savage_dispatch_clear(drm_savage_private_t * dev_priv, 792 const drm_savage_cmd_header_t * cmd_header, 793 - const drm_savage_cmd_header_t __user * data, 794 unsigned int nbox, 795 - const drm_clip_rect_t __user * usr_boxes) 796 { 797 - unsigned int flags = cmd_header->clear0.flags, mask, value; 798 unsigned int clear_cmd; 799 unsigned int i, nbufs; 800 DMA_LOCALS; 801 802 if (nbox == 0) 803 return 0; 804 - 805 - DRM_GET_USER_UNCHECKED(mask, &data->clear1.mask); 806 - DRM_GET_USER_UNCHECKED(value, &data->clear1.value); 807 808 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | 809 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; ··· 811 if (nbufs == 0) 812 return 0; 813 814 - if (mask != 0xffffffff) { 815 /* set mask */ 816 BEGIN_DMA(2); 817 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); 818 - DMA_WRITE(mask); 819 DMA_COMMIT(); 820 } 821 for (i = 0; i < nbox; ++i) { 822 - drm_clip_rect_t box; 823 unsigned int x, y, w, h; 824 unsigned int buf; 825 - DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); 826 - x = box.x1, y = box.y1; 827 - w = box.x2 - box.x1; 828 - h = box.y2 - box.y1; 829 BEGIN_DMA(nbufs * 6); 830 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { 831 if (!(flags & buf)) ··· 843 DMA_WRITE(dev_priv->depth_bd); 844 break; 845 } 846 - DMA_WRITE(value); 847 DMA_WRITE(BCI_X_Y(x, y)); 848 DMA_WRITE(BCI_W_H(w, h)); 849 } 850 DMA_COMMIT(); 851 } 852 - if (mask != 0xffffffff) { 853 /* reset mask */ 854 BEGIN_DMA(2); 855 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); ··· 861 } 862 863 static int savage_dispatch_swap(drm_savage_private_t * dev_priv, 864 - unsigned int nbox, 865 - const drm_clip_rect_t __user * usr_boxes) 866 { 867 unsigned int swap_cmd; 868 unsigned int i; ··· 875 BCI_CMD_SET_ROP(swap_cmd, 0xCC); 876 877 for (i = 0; i < nbox; ++i) { 878 - drm_clip_rect_t box; 879 - DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); 880 - 881 BEGIN_DMA(6); 882 DMA_WRITE(swap_cmd); 883 DMA_WRITE(dev_priv->back_offset); 884 DMA_WRITE(dev_priv->back_bd); 885 - DMA_WRITE(BCI_X_Y(box.x1, box.y1)); 886 - DMA_WRITE(BCI_X_Y(box.x1, box.y1)); 887 - DMA_WRITE(BCI_W_H(box.x2 - box.x1, box.y2 - box.y1)); 888 DMA_COMMIT(); 889 } 890 ··· 890 } 891 892 static int savage_dispatch_draw(drm_savage_private_t * dev_priv, 893 - const drm_savage_cmd_header_t __user * start, 894 - const drm_savage_cmd_header_t __user * end, 895 const drm_buf_t * dmabuf, 896 - const unsigned int __user * usr_vtxbuf, 897 unsigned int vb_size, unsigned int vb_stride, 898 unsigned int nbox, 899 - const drm_clip_rect_t __user * usr_boxes) 900 { 901 unsigned int i, j; 902 int ret; 903 904 for (i = 0; i < nbox; ++i) { 905 - drm_clip_rect_t box; 906 - const drm_savage_cmd_header_t __user *usr_cmdbuf; 907 - DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); 908 - dev_priv->emit_clip_rect(dev_priv, &box); 909 910 - usr_cmdbuf = start; 911 - while (usr_cmdbuf < end) { 912 drm_savage_cmd_header_t cmd_header; 913 - DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, 914 - sizeof(cmd_header)); 915 - usr_cmdbuf++; 916 switch (cmd_header.cmd.cmd) { 917 case SAVAGE_CMD_DMA_PRIM: 918 - ret = 919 - savage_dispatch_dma_prim(dev_priv, 920 - &cmd_header, 921 - dmabuf); 922 break; 923 case SAVAGE_CMD_VB_PRIM: 924 - ret = 925 - savage_dispatch_vb_prim(dev_priv, 926 - &cmd_header, 927 - (const uint32_t 928 - __user *) 929 - usr_vtxbuf, vb_size, 930 - vb_stride); 931 break; 932 case SAVAGE_CMD_DMA_IDX: 933 j = (cmd_header.idx.count + 3) / 4; 934 /* j was check in savage_bci_cmdbuf */ 935 - ret = 936 - savage_dispatch_dma_idx(dev_priv, 937 - &cmd_header, 938 - (const uint16_t 939 - __user *) 940 - usr_cmdbuf, dmabuf); 941 - usr_cmdbuf += j; 942 break; 943 case SAVAGE_CMD_VB_IDX: 944 j = (cmd_header.idx.count + 3) / 4; 945 /* j was check in savage_bci_cmdbuf */ 946 - ret = 947 - savage_dispatch_vb_idx(dev_priv, 948 - &cmd_header, 949 - (const uint16_t 950 - __user *)usr_cmdbuf, 951 - (const uint32_t 952 - __user *)usr_vtxbuf, 953 - vb_size, vb_stride); 954 - usr_cmdbuf += j; 955 break; 956 default: 957 /* What's the best return code? EFAULT? */ ··· 960 drm_device_dma_t *dma = dev->dma; 961 drm_buf_t *dmabuf; 962 drm_savage_cmdbuf_t cmdbuf; 963 - drm_savage_cmd_header_t __user *usr_cmdbuf; 964 - drm_savage_cmd_header_t __user *first_draw_cmd; 965 - unsigned int __user *usr_vtxbuf; 966 - drm_clip_rect_t __user *usr_boxes; 967 unsigned int i, j; 968 int ret = 0; 969 ··· 986 dmabuf = NULL; 987 } 988 989 - usr_cmdbuf = (drm_savage_cmd_header_t __user *) cmdbuf.cmd_addr; 990 - usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr; 991 - usr_boxes = (drm_clip_rect_t __user *) cmdbuf.box_addr; 992 - if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size * 8)) || 993 - (cmdbuf.vb_size && DRM_VERIFYAREA_READ(usr_vtxbuf, cmdbuf.vb_size)) 994 - || (cmdbuf.nbox 995 - && DRM_VERIFYAREA_READ(usr_boxes, 996 - cmdbuf.nbox * sizeof(drm_clip_rect_t)))) 997 - return DRM_ERR(EFAULT); 998 999 /* Make sure writes to DMA buffers are finished before sending 1000 * DMA commands to the graphics hardware. */ ··· 1046 first_draw_cmd = NULL; 1047 while (i < cmdbuf.size) { 1048 drm_savage_cmd_header_t cmd_header; 1049 - DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, 1050 - sizeof(cmd_header)); 1051 - usr_cmdbuf++; 1052 i++; 1053 1054 /* Group drawing commands with same state to minimize ··· 1067 case SAVAGE_CMD_DMA_PRIM: 1068 case SAVAGE_CMD_VB_PRIM: 1069 if (!first_draw_cmd) 1070 - first_draw_cmd = usr_cmdbuf - 1; 1071 - usr_cmdbuf += j; 1072 i += j; 1073 break; 1074 default: 1075 if (first_draw_cmd) { 1076 - ret = 1077 - savage_dispatch_draw(dev_priv, 1078 - first_draw_cmd, 1079 - usr_cmdbuf - 1, dmabuf, 1080 - usr_vtxbuf, 1081 - cmdbuf.vb_size, 1082 - cmdbuf.vb_stride, 1083 - cmdbuf.nbox, 1084 - usr_boxes); 1085 if (ret != 0) 1086 return ret; 1087 first_draw_cmd = NULL; ··· 1094 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1095 "beyond end of command buffer\n"); 1096 DMA_FLUSH(); 1097 - return DRM_ERR(EINVAL); 1098 } 1099 ret = savage_dispatch_state(dev_priv, &cmd_header, 1100 - (uint32_t __user *) 1101 - usr_cmdbuf); 1102 - usr_cmdbuf += j; 1103 i += j; 1104 break; 1105 case SAVAGE_CMD_CLEAR: ··· 1107 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1108 "beyond end of command buffer\n"); 1109 DMA_FLUSH(); 1110 - return DRM_ERR(EINVAL); 1111 } 1112 ret = savage_dispatch_clear(dev_priv, &cmd_header, 1113 - usr_cmdbuf, 1114 - cmdbuf.nbox, usr_boxes); 1115 - usr_cmdbuf++; 1116 i++; 1117 break; 1118 case SAVAGE_CMD_SWAP: 1119 - ret = savage_dispatch_swap(dev_priv, 1120 - cmdbuf.nbox, usr_boxes); 1121 break; 1122 default: 1123 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1124 DMA_FLUSH(); 1125 - return DRM_ERR(EINVAL); 1126 } 1127 1128 if (ret != 0) { 1129 DMA_FLUSH(); 1130 - return ret; 1131 } 1132 } 1133 1134 if (first_draw_cmd) { 1135 - ret = 1136 - savage_dispatch_draw(dev_priv, first_draw_cmd, usr_cmdbuf, 1137 - dmabuf, usr_vtxbuf, cmdbuf.vb_size, 1138 - cmdbuf.vb_stride, cmdbuf.nbox, 1139 - usr_boxes); 1140 if (ret != 0) { 1141 DMA_FLUSH(); 1142 - return ret; 1143 } 1144 } 1145 ··· 1154 savage_freelist_put(dev, dmabuf); 1155 } 1156 1157 - return 0; 1158 }
··· 27 #include "savage_drv.h" 28 29 void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 30 + const drm_clip_rect_t * pbox) 31 { 32 uint32_t scstart = dev_priv->state.s3d.new_scstart; 33 uint32_t scend = dev_priv->state.s3d.new_scend; ··· 53 } 54 55 void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 56 + const drm_clip_rect_t * pbox) 57 { 58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; 59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; ··· 115 116 #define SAVE_STATE(reg,where) \ 117 if(start <= reg && start+count > reg) \ 118 + dev_priv->state.where = regs[reg - start] 119 #define SAVE_STATE_MASK(reg,where,mask) do { \ 120 if(start <= reg && start+count > reg) { \ 121 uint32_t tmp; \ 122 + tmp = regs[reg - start]; \ 123 dev_priv->state.where = (tmp & (mask)) | \ 124 (dev_priv->state.where & ~(mask)); \ 125 } \ 126 } while (0) 127 + 128 static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, 129 unsigned int start, unsigned int count, 130 + const uint32_t *regs) 131 { 132 if (start < SAVAGE_TEXPALADDR_S3D || 133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { ··· 148 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); 149 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) 150 return savage_verify_texaddr(dev_priv, 0, 151 + dev_priv->state.s3d.texaddr); 152 } 153 154 return 0; ··· 157 158 static int savage_verify_state_s4(drm_savage_private_t * dev_priv, 159 unsigned int start, unsigned int count, 160 + const uint32_t *regs) 161 { 162 int ret = 0; 163 ··· 174 ~SAVAGE_SCISSOR_MASK_S4); 175 176 /* if any texture regs were changed ... */ 177 + if (start <= SAVAGE_TEXDESCR_S4 && 178 + start + count > SAVAGE_TEXPALADDR_S4) { 179 /* ... check texture state */ 180 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); 181 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); 182 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); 183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) 184 + ret |= savage_verify_texaddr(dev_priv, 0, 185 + dev_priv->state.s4.texaddr0); 186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) 187 + ret |= savage_verify_texaddr(dev_priv, 1, 188 + dev_priv->state.s4.texaddr1); 189 } 190 191 return ret; ··· 197 198 static int savage_dispatch_state(drm_savage_private_t * dev_priv, 199 const drm_savage_cmd_header_t * cmd_header, 200 + const uint32_t *regs) 201 { 202 unsigned int count = cmd_header->state.count; 203 unsigned int start = cmd_header->state.start; ··· 208 209 if (!count) 210 return 0; 211 212 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 213 ret = savage_verify_state_s3d(dev_priv, start, count, regs); ··· 236 /* scissor regs are emitted in savage_dispatch_draw */ 237 if (start < SAVAGE_DRAWCTRL0_S4) { 238 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) 239 + count2 = count - 240 + (SAVAGE_DRAWCTRL1_S4 + 1 - start); 241 if (start + count > SAVAGE_DRAWCTRL0_S4) 242 count = SAVAGE_DRAWCTRL0_S4 - start; 243 } else if (start <= SAVAGE_DRAWCTRL1_S4) { ··· 263 while (count > 0) { 264 unsigned int n = count < 255 ? count : 255; 265 DMA_SET_REGISTERS(start, n); 266 + DMA_COPY(regs, n); 267 count -= n; 268 start += n; 269 regs += n; ··· 421 422 static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, 423 const drm_savage_cmd_header_t * cmd_header, 424 + const uint32_t *vtxbuf, unsigned int vb_size, 425 + unsigned int vb_stride) 426 { 427 unsigned char reorder = 0; 428 unsigned int prim = cmd_header->prim.prim; ··· 507 508 for (i = start; i < start + count; ++i) { 509 unsigned int j = i + reorder[i % 3]; 510 + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); 511 } 512 513 DMA_COMMIT(); ··· 517 DMA_DRAW_PRIMITIVE(count, prim, skip); 518 519 if (vb_stride == vtx_size) { 520 + DMA_COPY(&vtxbuf[vb_stride * start], 521 + vtx_size * count); 522 } else { 523 for (i = start; i < start + count; ++i) { 524 + DMA_COPY(&vtxbuf [vb_stride * i], 525 + vtx_size); 526 } 527 } 528 ··· 541 542 static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, 543 const drm_savage_cmd_header_t * cmd_header, 544 + const uint16_t *idx, 545 const drm_buf_t * dmabuf) 546 { 547 unsigned char reorder = 0; ··· 628 while (n != 0) { 629 /* Can emit up to 255 indices (85 triangles) at once. */ 630 unsigned int count = n > 255 ? 255 : n; 631 632 + /* check indices */ 633 for (i = 0; i < count; ++i) { 634 if (idx[i] > dmabuf->total / 32) { 635 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ··· 652 653 for (i = 1; i + 1 < count; i += 2) 654 BCI_WRITE(idx[i + reorder[i % 3]] | 655 + (idx[i + 1 + 656 + reorder[(i + 1) % 3]] << 16)); 657 if (i < count) 658 BCI_WRITE(idx[i + reorder[i % 3]]); 659 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ··· 674 BCI_WRITE(idx[i]); 675 } 676 677 + idx += count; 678 n -= count; 679 680 prim |= BCI_CMD_DRAW_CONT; ··· 685 686 static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, 687 const drm_savage_cmd_header_t * cmd_header, 688 + const uint16_t *idx, 689 + const uint32_t *vtxbuf, 690 unsigned int vb_size, unsigned int vb_stride) 691 { 692 unsigned char reorder = 0; ··· 751 while (n != 0) { 752 /* Can emit up to 255 vertices (85 triangles) at once. */ 753 unsigned int count = n > 255 ? 255 : n; 754 + 755 + /* Check indices */ 756 for (i = 0; i < count; ++i) { 757 if (idx[i] > vb_size / (vb_stride * 4)) { 758 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ··· 775 776 for (i = 0; i < count; ++i) { 777 unsigned int j = idx[i + reorder[i % 3]]; 778 + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); 779 } 780 781 DMA_COMMIT(); ··· 786 787 for (i = 0; i < count; ++i) { 788 unsigned int j = idx[i]; 789 + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); 790 } 791 792 DMA_COMMIT(); 793 } 794 795 + idx += count; 796 n -= count; 797 798 prim |= BCI_CMD_DRAW_CONT; ··· 804 805 static int savage_dispatch_clear(drm_savage_private_t * dev_priv, 806 const drm_savage_cmd_header_t * cmd_header, 807 + const drm_savage_cmd_header_t *data, 808 unsigned int nbox, 809 + const drm_clip_rect_t *boxes) 810 { 811 + unsigned int flags = cmd_header->clear0.flags; 812 unsigned int clear_cmd; 813 unsigned int i, nbufs; 814 DMA_LOCALS; 815 816 if (nbox == 0) 817 return 0; 818 819 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | 820 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; ··· 828 if (nbufs == 0) 829 return 0; 830 831 + if (data->clear1.mask != 0xffffffff) { 832 /* set mask */ 833 BEGIN_DMA(2); 834 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); 835 + DMA_WRITE(data->clear1.mask); 836 DMA_COMMIT(); 837 } 838 for (i = 0; i < nbox; ++i) { 839 unsigned int x, y, w, h; 840 unsigned int buf; 841 + x = boxes[i].x1, y = boxes[i].y1; 842 + w = boxes[i].x2 - boxes[i].x1; 843 + h = boxes[i].y2 - boxes[i].y1; 844 BEGIN_DMA(nbufs * 6); 845 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { 846 if (!(flags & buf)) ··· 862 DMA_WRITE(dev_priv->depth_bd); 863 break; 864 } 865 + DMA_WRITE(data->clear1.value); 866 DMA_WRITE(BCI_X_Y(x, y)); 867 DMA_WRITE(BCI_W_H(w, h)); 868 } 869 DMA_COMMIT(); 870 } 871 + if (data->clear1.mask != 0xffffffff) { 872 /* reset mask */ 873 BEGIN_DMA(2); 874 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); ··· 880 } 881 882 static int savage_dispatch_swap(drm_savage_private_t * dev_priv, 883 + unsigned int nbox, const drm_clip_rect_t *boxes) 884 { 885 unsigned int swap_cmd; 886 unsigned int i; ··· 895 BCI_CMD_SET_ROP(swap_cmd, 0xCC); 896 897 for (i = 0; i < nbox; ++i) { 898 BEGIN_DMA(6); 899 DMA_WRITE(swap_cmd); 900 DMA_WRITE(dev_priv->back_offset); 901 DMA_WRITE(dev_priv->back_bd); 902 + DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); 903 + DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); 904 + DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, 905 + boxes[i].y2 - boxes[i].y1)); 906 DMA_COMMIT(); 907 } 908 ··· 912 } 913 914 static int savage_dispatch_draw(drm_savage_private_t * dev_priv, 915 + const drm_savage_cmd_header_t *start, 916 + const drm_savage_cmd_header_t *end, 917 const drm_buf_t * dmabuf, 918 + const unsigned int *vtxbuf, 919 unsigned int vb_size, unsigned int vb_stride, 920 unsigned int nbox, 921 + const drm_clip_rect_t *boxes) 922 { 923 unsigned int i, j; 924 int ret; 925 926 for (i = 0; i < nbox; ++i) { 927 + const drm_savage_cmd_header_t *cmdbuf; 928 + dev_priv->emit_clip_rect(dev_priv, &boxes[i]); 929 930 + cmdbuf = start; 931 + while (cmdbuf < end) { 932 drm_savage_cmd_header_t cmd_header; 933 + cmd_header = *cmdbuf; 934 + cmdbuf++; 935 switch (cmd_header.cmd.cmd) { 936 case SAVAGE_CMD_DMA_PRIM: 937 + ret = savage_dispatch_dma_prim( 938 + dev_priv, &cmd_header, dmabuf); 939 break; 940 case SAVAGE_CMD_VB_PRIM: 941 + ret = savage_dispatch_vb_prim( 942 + dev_priv, &cmd_header, 943 + vtxbuf, vb_size, vb_stride); 944 break; 945 case SAVAGE_CMD_DMA_IDX: 946 j = (cmd_header.idx.count + 3) / 4; 947 /* j was check in savage_bci_cmdbuf */ 948 + ret = savage_dispatch_dma_idx(dev_priv, 949 + &cmd_header, (const uint16_t *)cmdbuf, 950 + dmabuf); 951 + cmdbuf += j; 952 break; 953 case SAVAGE_CMD_VB_IDX: 954 j = (cmd_header.idx.count + 3) / 4; 955 /* j was check in savage_bci_cmdbuf */ 956 + ret = savage_dispatch_vb_idx(dev_priv, 957 + &cmd_header, (const uint16_t *)cmdbuf, 958 + (const uint32_t *)vtxbuf, vb_size, 959 + vb_stride); 960 + cmdbuf += j; 961 break; 962 default: 963 /* What's the best return code? EFAULT? */ ··· 998 drm_device_dma_t *dma = dev->dma; 999 drm_buf_t *dmabuf; 1000 drm_savage_cmdbuf_t cmdbuf; 1001 + drm_savage_cmd_header_t *kcmd_addr = NULL; 1002 + drm_savage_cmd_header_t *first_draw_cmd; 1003 + unsigned int *kvb_addr = NULL; 1004 + drm_clip_rect_t *kbox_addr = NULL; 1005 unsigned int i, j; 1006 int ret = 0; 1007 ··· 1024 dmabuf = NULL; 1025 } 1026 1027 + /* Copy the user buffers into kernel temporary areas. This hasn't been 1028 + * a performance loss compared to VERIFYAREA_READ/ 1029 + * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct 1030 + * for locking on FreeBSD. 1031 + */ 1032 + if (cmdbuf.size) { 1033 + kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); 1034 + if (kcmd_addr == NULL) 1035 + return ENOMEM; 1036 + 1037 + if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, 1038 + cmdbuf.size * 8)) 1039 + { 1040 + drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1041 + return DRM_ERR(EFAULT); 1042 + } 1043 + cmdbuf.cmd_addr = kcmd_addr; 1044 + } 1045 + if (cmdbuf.vb_size) { 1046 + kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); 1047 + if (kvb_addr == NULL) { 1048 + ret = DRM_ERR(ENOMEM); 1049 + goto done; 1050 + } 1051 + 1052 + if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, 1053 + cmdbuf.vb_size)) { 1054 + ret = DRM_ERR(EFAULT); 1055 + goto done; 1056 + } 1057 + cmdbuf.vb_addr = kvb_addr; 1058 + } 1059 + if (cmdbuf.nbox) { 1060 + kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t), 1061 + DRM_MEM_DRIVER); 1062 + if (kbox_addr == NULL) { 1063 + ret = DRM_ERR(ENOMEM); 1064 + goto done; 1065 + } 1066 + 1067 + if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, 1068 + cmdbuf.nbox * sizeof(drm_clip_rect_t))) { 1069 + ret = DRM_ERR(EFAULT); 1070 + goto done; 1071 + } 1072 + cmdbuf.box_addr = kbox_addr; 1073 + } 1074 1075 /* Make sure writes to DMA buffers are finished before sending 1076 * DMA commands to the graphics hardware. */ ··· 1046 first_draw_cmd = NULL; 1047 while (i < cmdbuf.size) { 1048 drm_savage_cmd_header_t cmd_header; 1049 + cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr; 1050 + cmdbuf.cmd_addr++; 1051 i++; 1052 1053 /* Group drawing commands with same state to minimize ··· 1068 case SAVAGE_CMD_DMA_PRIM: 1069 case SAVAGE_CMD_VB_PRIM: 1070 if (!first_draw_cmd) 1071 + first_draw_cmd = cmdbuf.cmd_addr - 1; 1072 + cmdbuf.cmd_addr += j; 1073 i += j; 1074 break; 1075 default: 1076 if (first_draw_cmd) { 1077 + ret = savage_dispatch_draw( 1078 + dev_priv, first_draw_cmd, 1079 + cmdbuf.cmd_addr - 1, 1080 + dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size, 1081 + cmdbuf.vb_stride, 1082 + cmdbuf.nbox, cmdbuf.box_addr); 1083 if (ret != 0) 1084 return ret; 1085 first_draw_cmd = NULL; ··· 1098 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1099 "beyond end of command buffer\n"); 1100 DMA_FLUSH(); 1101 + ret = DRM_ERR(EINVAL); 1102 + goto done; 1103 } 1104 ret = savage_dispatch_state(dev_priv, &cmd_header, 1105 + (const uint32_t *)cmdbuf.cmd_addr); 1106 + cmdbuf.cmd_addr += j; 1107 i += j; 1108 break; 1109 case SAVAGE_CMD_CLEAR: ··· 1111 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1112 "beyond end of command buffer\n"); 1113 DMA_FLUSH(); 1114 + ret = DRM_ERR(EINVAL); 1115 + goto done; 1116 } 1117 ret = savage_dispatch_clear(dev_priv, &cmd_header, 1118 + cmdbuf.cmd_addr, 1119 + cmdbuf.nbox, cmdbuf.box_addr); 1120 + cmdbuf.cmd_addr++; 1121 i++; 1122 break; 1123 case SAVAGE_CMD_SWAP: 1124 + ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox, 1125 + cmdbuf.box_addr); 1126 break; 1127 default: 1128 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1129 DMA_FLUSH(); 1130 + ret = DRM_ERR(EINVAL); 1131 + goto done; 1132 } 1133 1134 if (ret != 0) { 1135 DMA_FLUSH(); 1136 + goto done; 1137 } 1138 } 1139 1140 if (first_draw_cmd) { 1141 + ret = savage_dispatch_draw ( 1142 + dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf, 1143 + cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride, 1144 + cmdbuf.nbox, cmdbuf.box_addr); 1145 if (ret != 0) { 1146 DMA_FLUSH(); 1147 + goto done; 1148 } 1149 } 1150 ··· 1157 savage_freelist_put(dev, dmabuf); 1158 } 1159 1160 + done: 1161 + /* If we didn't need to allocate them, these'll be NULL */ 1162 + drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1163 + drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); 1164 + drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t), 1165 + DRM_MEM_DRIVER); 1166 + 1167 + return ret; 1168 }
+25
drivers/char/drm/sis_drm.h
··· 1 2 #ifndef __SIS_DRM_H__ 3 #define __SIS_DRM_H__
··· 1 + /* sis_drv.h -- Private header for sis driver -*- linux-c -*- */ 2 + /* 3 + * Copyright 2005 Eric Anholt 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the "Software"), 8 + * to deal in the Software without restriction, including without limitation 9 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 + * and/or sell copies of the Software, and to permit persons to whom the 11 + * Software is furnished to do so, subject to the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the next 14 + * paragraph) shall be included in all copies or substantial portions of the 15 + * Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 + * SOFTWARE. 24 + * 25 + */ 26 27 #ifndef __SIS_DRM_H__ 28 #define __SIS_DRM_H__
+11 -31
drivers/char/drm/sis_drv.c
··· 32 33 #include "drm_pciids.h" 34 35 - static int postinit(struct drm_device *dev, unsigned long flags) 36 - { 37 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 38 - DRIVER_NAME, 39 - DRIVER_MAJOR, 40 - DRIVER_MINOR, 41 - DRIVER_PATCHLEVEL, 42 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 43 - ); 44 - return 0; 45 - } 46 - 47 - static int version(drm_version_t * version) 48 - { 49 - int len; 50 - 51 - version->version_major = DRIVER_MAJOR; 52 - version->version_minor = DRIVER_MINOR; 53 - version->version_patchlevel = DRIVER_PATCHLEVEL; 54 - DRM_COPY(version->name, DRIVER_NAME); 55 - DRM_COPY(version->date, DRIVER_DATE); 56 - DRM_COPY(version->desc, DRIVER_DESC); 57 - return 0; 58 - } 59 - 60 static struct pci_device_id pciidlist[] = { 61 sisdrv_PCI_IDS 62 }; ··· 43 .reclaim_buffers = drm_core_reclaim_buffers, 44 .get_map_ofs = drm_core_get_map_ofs, 45 .get_reg_ofs = drm_core_get_reg_ofs, 46 - .postinit = postinit, 47 - .version = version, 48 .ioctls = sis_ioctls, 49 .fops = { 50 .owner = THIS_MODULE, ··· 52 .mmap = drm_mmap, 53 .poll = drm_poll, 54 .fasync = drm_fasync, 55 - }, 56 .pci_driver = { 57 - .name = DRIVER_NAME, 58 - .id_table = pciidlist, 59 - } 60 }; 61 62 static int __init sis_init(void)
··· 32 33 #include "drm_pciids.h" 34 35 static struct pci_device_id pciidlist[] = { 36 sisdrv_PCI_IDS 37 }; ··· 68 .reclaim_buffers = drm_core_reclaim_buffers, 69 .get_map_ofs = drm_core_get_map_ofs, 70 .get_reg_ofs = drm_core_get_reg_ofs, 71 .ioctls = sis_ioctls, 72 .fops = { 73 .owner = THIS_MODULE, ··· 79 .mmap = drm_mmap, 80 .poll = drm_poll, 81 .fasync = drm_fasync, 82 + }, 83 .pci_driver = { 84 + .name = DRIVER_NAME, 85 + .id_table = pciidlist, 86 + }, 87 + 88 + .name = DRIVER_NAME, 89 + .desc = DRIVER_DESC, 90 + .date = DRIVER_DATE, 91 + .major = DRIVER_MAJOR, 92 + .minor = DRIVER_MINOR, 93 + .patchlevel = DRIVER_PATCHLEVEL, 94 }; 95 96 static int __init sis_init(void)
+2 -2
drivers/char/drm/sis_drv.h
··· 1 - /* sis_drv.h -- Private header for sis driver -*- linux-c -*- 2 - * 3 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * All rights reserved.
··· 1 + /* sis_drv.h -- Private header for sis driver -*- linux-c -*- */ 2 + /* 3 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * All rights reserved.
+4 -3
drivers/char/drm/sis_ds.h
··· 1 - /* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*- 2 * Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw 3 - * 4 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan. 5 * All rights reserved. 6 * ··· 36 37 #define SET_SIZE 5000 38 39 - typedef unsigned int ITEM_TYPE; 40 41 typedef struct { 42 ITEM_TYPE val;
··· 1 + /* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*- 2 * Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw 3 + */ 4 + /* 5 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan. 6 * All rights reserved. 7 * ··· 35 36 #define SET_SIZE 5000 37 38 + typedef unsigned long ITEM_TYPE; 39 40 typedef struct { 41 ITEM_TYPE val;
+15 -15
drivers/char/drm/sis_mm.c
··· 86 { 87 drm_sis_mem_t fb; 88 struct sis_memreq req; 89 - drm_sis_mem_t __user *argp = (void __user *)data; 90 int retval = 0; 91 92 DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb)); ··· 110 111 DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb)); 112 113 - DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, req.offset); 114 115 return retval; 116 } ··· 127 128 if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) 129 retval = DRM_ERR(EINVAL); 130 - sis_free((u32) fb.free); 131 132 - DRM_DEBUG("free fb, offset = %lu\n", fb.free); 133 134 return retval; 135 } ··· 176 { 177 DRM_DEVICE; 178 drm_sis_private_t *dev_priv = dev->dev_private; 179 - drm_sis_mem_t __user *argp = (void __user *)data; 180 drm_sis_mem_t fb; 181 PMemBlock block; 182 int retval = 0; ··· 267 { 268 DRM_DEVICE; 269 drm_sis_private_t *dev_priv = dev->dev_private; 270 - drm_sis_mem_t __user *argp = (void __user *)data; 271 drm_sis_mem_t agp; 272 PMemBlock block; 273 int retval = 0; ··· 367 368 if (i < MAX_CONTEXT) { 369 set_t *set; 370 - unsigned int item; 371 int retval; 372 373 DRM_DEBUG("find socket %d, context = %d\n", i, context); ··· 376 set = global_ppriv[i].sets[0]; 377 retval = setFirst(set, &item); 378 while (retval) { 379 - DRM_DEBUG("free video memory 0x%x\n", item); 380 #if defined(__linux__) && defined(CONFIG_FB_SIS) 381 sis_free(item); 382 #else ··· 390 set = global_ppriv[i].sets[1]; 391 retval = setFirst(set, &item); 392 while (retval) { 393 - DRM_DEBUG("free agp memory 0x%x\n", item); 394 mmFreeMem((PMemBlock) item); 395 retval = setNext(set, &item); 396 } ··· 403 } 404 405 drm_ioctl_desc_t sis_ioctls[] = { 406 - [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, 1, 0}, 407 - [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, 1, 0}, 408 - [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, 1, 1}, 409 - [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, 1, 0}, 410 - [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, 1, 0}, 411 - [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, 1, 1} 412 }; 413 414 int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
··· 86 { 87 drm_sis_mem_t fb; 88 struct sis_memreq req; 89 + drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data; 90 int retval = 0; 91 92 DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb)); ··· 110 111 DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb)); 112 113 + DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset); 114 115 return retval; 116 } ··· 127 128 if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) 129 retval = DRM_ERR(EINVAL); 130 + sis_free(fb.free); 131 132 + DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free); 133 134 return retval; 135 } ··· 176 { 177 DRM_DEVICE; 178 drm_sis_private_t *dev_priv = dev->dev_private; 179 + drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data; 180 drm_sis_mem_t fb; 181 PMemBlock block; 182 int retval = 0; ··· 267 { 268 DRM_DEVICE; 269 drm_sis_private_t *dev_priv = dev->dev_private; 270 + drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data; 271 drm_sis_mem_t agp; 272 PMemBlock block; 273 int retval = 0; ··· 367 368 if (i < MAX_CONTEXT) { 369 set_t *set; 370 + ITEM_TYPE item; 371 int retval; 372 373 DRM_DEBUG("find socket %d, context = %d\n", i, context); ··· 376 set = global_ppriv[i].sets[0]; 377 retval = setFirst(set, &item); 378 while (retval) { 379 + DRM_DEBUG("free video memory 0x%lx\n", item); 380 #if defined(__linux__) && defined(CONFIG_FB_SIS) 381 sis_free(item); 382 #else ··· 390 set = global_ppriv[i].sets[1]; 391 retval = setFirst(set, &item); 392 while (retval) { 393 + DRM_DEBUG("free agp memory 0x%lx\n", item); 394 mmFreeMem((PMemBlock) item); 395 retval = setNext(set, &item); 396 } ··· 403 } 404 405 drm_ioctl_desc_t sis_ioctls[] = { 406 + [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, 407 + [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH}, 408 + [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 409 + [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, 410 + [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH}, 411 + [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY} 412 }; 413 414 int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
+11 -31
drivers/char/drm/tdfx_drv.c
··· 36 37 #include "drm_pciids.h" 38 39 - static int postinit(struct drm_device *dev, unsigned long flags) 40 - { 41 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 42 - DRIVER_NAME, 43 - DRIVER_MAJOR, 44 - DRIVER_MINOR, 45 - DRIVER_PATCHLEVEL, 46 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 47 - ); 48 - return 0; 49 - } 50 - 51 - static int version(drm_version_t * version) 52 - { 53 - int len; 54 - 55 - version->version_major = DRIVER_MAJOR; 56 - version->version_minor = DRIVER_MINOR; 57 - version->version_patchlevel = DRIVER_PATCHLEVEL; 58 - DRM_COPY(version->name, DRIVER_NAME); 59 - DRM_COPY(version->date, DRIVER_DATE); 60 - DRM_COPY(version->desc, DRIVER_DESC); 61 - return 0; 62 - } 63 - 64 static struct pci_device_id pciidlist[] = { 65 tdfx_PCI_IDS 66 }; ··· 45 .reclaim_buffers = drm_core_reclaim_buffers, 46 .get_map_ofs = drm_core_get_map_ofs, 47 .get_reg_ofs = drm_core_get_reg_ofs, 48 - .postinit = postinit, 49 - .version = version, 50 .fops = { 51 .owner = THIS_MODULE, 52 .open = drm_open, ··· 53 .mmap = drm_mmap, 54 .poll = drm_poll, 55 .fasync = drm_fasync, 56 - }, 57 .pci_driver = { 58 - .name = DRIVER_NAME, 59 - .id_table = pciidlist, 60 - } 61 }; 62 63 static int __init tdfx_init(void)
··· 36 37 #include "drm_pciids.h" 38 39 static struct pci_device_id pciidlist[] = { 40 tdfx_PCI_IDS 41 }; ··· 70 .reclaim_buffers = drm_core_reclaim_buffers, 71 .get_map_ofs = drm_core_get_map_ofs, 72 .get_reg_ofs = drm_core_get_reg_ofs, 73 .fops = { 74 .owner = THIS_MODULE, 75 .open = drm_open, ··· 80 .mmap = drm_mmap, 81 .poll = drm_poll, 82 .fasync = drm_fasync, 83 + }, 84 .pci_driver = { 85 + .name = DRIVER_NAME, 86 + .id_table = pciidlist, 87 + }, 88 + 89 + .name = DRIVER_NAME, 90 + .desc = DRIVER_DESC, 91 + .date = DRIVER_DATE, 92 + .major = DRIVER_MAJOR, 93 + .minor = DRIVER_MINOR, 94 + .patchlevel = DRIVER_PATCHLEVEL, 95 }; 96 97 static int __init tdfx_init(void)
+2 -5
drivers/char/drm/tdfx_drv.h
··· 1 /* tdfx.h -- 3dfx DRM template customization -*- linux-c -*- 2 * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com 3 - * 4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * ··· 30 31 #ifndef __TDFX_H__ 32 #define __TDFX_H__ 33 - 34 - /* This remains constant for all DRM template files. 35 - */ 36 - #define DRM(x) tdfx_##x 37 38 /* General customization: 39 */
··· 1 /* tdfx.h -- 3dfx DRM template customization -*- linux-c -*- 2 * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com 3 + */ 4 + /* 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * ··· 29 30 #ifndef __TDFX_H__ 31 #define __TDFX_H__ 32 33 /* General customization: 34 */
+28 -10
drivers/char/drm/via_dma.c
··· 213 dev_priv->dma_wrap = init->size; 214 dev_priv->dma_offset = init->offset; 215 dev_priv->last_pause_ptr = NULL; 216 - dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr; 217 218 via_cmdbuf_start(dev_priv); 219 ··· 234 235 switch (init.func) { 236 case VIA_INIT_DMA: 237 - if (!capable(CAP_SYS_ADMIN)) 238 retcode = DRM_ERR(EPERM); 239 else 240 retcode = via_initialize(dev, dev_priv, &init); 241 break; 242 case VIA_CLEANUP_DMA: 243 - if (!capable(CAP_SYS_ADMIN)) 244 retcode = DRM_ERR(EPERM); 245 else 246 retcode = via_dma_cleanup(dev); ··· 351 return 0; 352 } 353 354 - extern int 355 - via_parse_command_stream(drm_device_t * dev, const uint32_t * buf, 356 - unsigned int size); 357 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, 358 drm_via_cmdbuffer_t * cmd) 359 { ··· 449 if ((count <= 8) && (count >= 0)) { 450 uint32_t rgtr, ptr; 451 rgtr = *(dev_priv->hw_addr_ptr); 452 - ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) + 453 - dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 - 454 - CMDBUF_ALIGNMENT_SIZE; 455 if (rgtr <= ptr) { 456 DRM_ERROR 457 ("Command regulator\npaused at count %d, address %x, " ··· 471 && count--) ; 472 473 rgtr = *(dev_priv->hw_addr_ptr); 474 - ptr = ((char *)paused_at - dev_priv->dma_ptr) + 475 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; 476 477 ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ? ··· 723 sizeof(d_siz)); 724 return ret; 725 }
··· 213 dev_priv->dma_wrap = init->size; 214 dev_priv->dma_offset = init->offset; 215 dev_priv->last_pause_ptr = NULL; 216 + dev_priv->hw_addr_ptr = 217 + (volatile uint32_t *)((char *)dev_priv->mmio->handle + 218 + init->reg_pause_addr); 219 220 via_cmdbuf_start(dev_priv); 221 ··· 232 233 switch (init.func) { 234 case VIA_INIT_DMA: 235 + if (!DRM_SUSER(DRM_CURPROC)) 236 retcode = DRM_ERR(EPERM); 237 else 238 retcode = via_initialize(dev, dev_priv, &init); 239 break; 240 case VIA_CLEANUP_DMA: 241 + if (!DRM_SUSER(DRM_CURPROC)) 242 retcode = DRM_ERR(EPERM); 243 else 244 retcode = via_dma_cleanup(dev); ··· 349 return 0; 350 } 351 352 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, 353 drm_via_cmdbuffer_t * cmd) 354 { ··· 450 if ((count <= 8) && (count >= 0)) { 451 uint32_t rgtr, ptr; 452 rgtr = *(dev_priv->hw_addr_ptr); 453 + ptr = ((volatile char *)dev_priv->last_pause_ptr - 454 + dev_priv->dma_ptr) + dev_priv->dma_offset + 455 + (uint32_t) dev_priv->agpAddr + 4 - CMDBUF_ALIGNMENT_SIZE; 456 if (rgtr <= ptr) { 457 DRM_ERROR 458 ("Command regulator\npaused at count %d, address %x, " ··· 472 && count--) ; 473 474 rgtr = *(dev_priv->hw_addr_ptr); 475 + ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) + 476 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; 477 478 ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ? ··· 724 sizeof(d_siz)); 725 return ret; 726 } 727 + 728 + drm_ioctl_desc_t via_ioctls[] = { 729 + [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH}, 730 + [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH}, 731 + [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER}, 732 + [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER}, 733 + [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER}, 734 + [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH}, 735 + [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH}, 736 + [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH}, 737 + [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH}, 738 + [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH}, 739 + [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH}, 740 + [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH}, 741 + [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH}, 742 + [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH} 743 + }; 744 + 745 + int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
+805
drivers/char/drm/via_dmablit.c
···
··· 1 + /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro 2 + * 3 + * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sub license, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice (including the 13 + * next paragraph) shall be included in all copies or substantial portions 14 + * of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 + * 24 + * Authors: 25 + * Thomas Hellstrom. 26 + * Partially based on code obtained from Digeo Inc. 27 + */ 28 + 29 + 30 + /* 31 + * Unmaps the DMA mappings. 32 + * FIXME: Is this a NoOp on x86? Also 33 + * FIXME: What happens if this one is called and a pending blit has previously done 34 + * the same DMA mappings? 35 + */ 36 + 37 + #include "drmP.h" 38 + #include "via_drm.h" 39 + #include "via_drv.h" 40 + #include "via_dmablit.h" 41 + 42 + #include <linux/pagemap.h> 43 + 44 + #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 45 + #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 46 + #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) 47 + 48 + typedef struct _drm_via_descriptor { 49 + uint32_t mem_addr; 50 + uint32_t dev_addr; 51 + uint32_t size; 52 + uint32_t next; 53 + } drm_via_descriptor_t; 54 + 55 + 56 + /* 57 + * Unmap a DMA mapping. 58 + */ 59 + 60 + 61 + 62 + static void 63 + via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 64 + { 65 + int num_desc = vsg->num_desc; 66 + unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; 67 + unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; 68 + drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 69 + descriptor_this_page; 70 + dma_addr_t next = vsg->chain_start; 71 + 72 + while(num_desc--) { 73 + if (descriptor_this_page-- == 0) { 74 + cur_descriptor_page--; 75 + descriptor_this_page = vsg->descriptors_per_page - 1; 76 + desc_ptr = vsg->desc_pages[cur_descriptor_page] + 77 + descriptor_this_page; 78 + } 79 + dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); 80 + dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); 81 + next = (dma_addr_t) desc_ptr->next; 82 + desc_ptr--; 83 + } 84 + } 85 + 86 + /* 87 + * If mode = 0, count how many descriptors are needed. 88 + * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors. 89 + * Descriptors are run in reverse order by the hardware because we are not allowed to update the 90 + * 'next' field without syncing calls when the descriptor is already mapped. 91 + */ 92 + 93 + static void 94 + via_map_blit_for_device(struct pci_dev *pdev, 95 + const drm_via_dmablit_t *xfer, 96 + drm_via_sg_info_t *vsg, 97 + int mode) 98 + { 99 + unsigned cur_descriptor_page = 0; 100 + unsigned num_descriptors_this_page = 0; 101 + unsigned char *mem_addr = xfer->mem_addr; 102 + unsigned char *cur_mem; 103 + unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr); 104 + uint32_t fb_addr = xfer->fb_addr; 105 + uint32_t cur_fb; 106 + unsigned long line_len; 107 + unsigned remaining_len; 108 + int num_desc = 0; 109 + int cur_line; 110 + dma_addr_t next = 0 | VIA_DMA_DPR_EC; 111 + drm_via_descriptor_t *desc_ptr = 0; 112 + 113 + if (mode == 1) 114 + desc_ptr = vsg->desc_pages[cur_descriptor_page]; 115 + 116 + for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { 117 + 118 + line_len = xfer->line_length; 119 + cur_fb = fb_addr; 120 + cur_mem = mem_addr; 121 + 122 + while (line_len > 0) { 123 + 124 + remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 125 + line_len -= remaining_len; 126 + 127 + if (mode == 1) { 128 + desc_ptr->mem_addr = 129 + dma_map_page(&pdev->dev, 130 + vsg->pages[VIA_PFN(cur_mem) - 131 + VIA_PFN(first_addr)], 132 + VIA_PGOFF(cur_mem), remaining_len, 133 + vsg->direction); 134 + desc_ptr->dev_addr = cur_fb; 135 + 136 + desc_ptr->size = remaining_len; 137 + desc_ptr->next = (uint32_t) next; 138 + next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 139 + DMA_TO_DEVICE); 140 + desc_ptr++; 141 + if (++num_descriptors_this_page >= vsg->descriptors_per_page) { 142 + num_descriptors_this_page = 0; 143 + desc_ptr = vsg->desc_pages[++cur_descriptor_page]; 144 + } 145 + } 146 + 147 + num_desc++; 148 + cur_mem += remaining_len; 149 + cur_fb += remaining_len; 150 + } 151 + 152 + mem_addr += xfer->mem_stride; 153 + fb_addr += xfer->fb_stride; 154 + } 155 + 156 + if (mode == 1) { 157 + vsg->chain_start = next; 158 + vsg->state = dr_via_device_mapped; 159 + } 160 + vsg->num_desc = num_desc; 161 + } 162 + 163 + /* 164 + * Function that frees up all resources for a blit. It is usable even if the 165 + * blit info has only be partially built as long as the status enum is consistent 166 + * with the actual status of the used resources. 167 + */ 168 + 169 + 170 + void 171 + via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 172 + { 173 + struct page *page; 174 + int i; 175 + 176 + switch(vsg->state) { 177 + case dr_via_device_mapped: 178 + via_unmap_blit_from_device(pdev, vsg); 179 + case dr_via_desc_pages_alloc: 180 + for (i=0; i<vsg->num_desc_pages; ++i) { 181 + if (vsg->desc_pages[i] != NULL) 182 + free_page((unsigned long)vsg->desc_pages[i]); 183 + } 184 + kfree(vsg->desc_pages); 185 + case dr_via_pages_locked: 186 + for (i=0; i<vsg->num_pages; ++i) { 187 + if ( NULL != (page = vsg->pages[i])) { 188 + if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 189 + SetPageDirty(page); 190 + page_cache_release(page); 191 + } 192 + } 193 + case dr_via_pages_alloc: 194 + vfree(vsg->pages); 195 + default: 196 + vsg->state = dr_via_sg_init; 197 + } 198 + if (vsg->bounce_buffer) { 199 + vfree(vsg->bounce_buffer); 200 + vsg->bounce_buffer = NULL; 201 + } 202 + vsg->free_on_sequence = 0; 203 + } 204 + 205 + /* 206 + * Fire a blit engine. 207 + */ 208 + 209 + static void 210 + via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine) 211 + { 212 + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 213 + 214 + VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); 215 + VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); 216 + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 217 + VIA_DMA_CSR_DE); 218 + VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 219 + VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); 220 + VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); 221 + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); 222 + } 223 + 224 + /* 225 + * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will 226 + * occur here if the calling user does not have access to the submitted address. 227 + */ 228 + 229 + static int 230 + via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 231 + { 232 + int ret; 233 + unsigned long first_pfn = VIA_PFN(xfer->mem_addr); 234 + vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 235 + first_pfn + 1; 236 + 237 + if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 238 + return DRM_ERR(ENOMEM); 239 + memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); 240 + down_read(&current->mm->mmap_sem); 241 + ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, 242 + vsg->num_pages, vsg->direction, 0, vsg->pages, NULL); 243 + 244 + up_read(&current->mm->mmap_sem); 245 + if (ret != vsg->num_pages) { 246 + if (ret < 0) 247 + return ret; 248 + vsg->state = dr_via_pages_locked; 249 + return DRM_ERR(EINVAL); 250 + } 251 + vsg->state = dr_via_pages_locked; 252 + DRM_DEBUG("DMA pages locked\n"); 253 + return 0; 254 + } 255 + 256 + /* 257 + * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the 258 + * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be 259 + * quite large for some blits, and pages don't need to be contingous. 260 + */ 261 + 262 + static int 263 + via_alloc_desc_pages(drm_via_sg_info_t *vsg) 264 + { 265 + int i; 266 + 267 + vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); 268 + vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 269 + vsg->descriptors_per_page; 270 + 271 + if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) 272 + return DRM_ERR(ENOMEM); 273 + 274 + memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages); 275 + vsg->state = dr_via_desc_pages_alloc; 276 + for (i=0; i<vsg->num_desc_pages; ++i) { 277 + if (NULL == (vsg->desc_pages[i] = 278 + (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 279 + return DRM_ERR(ENOMEM); 280 + } 281 + DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 282 + vsg->num_desc); 283 + return 0; 284 + } 285 + 286 + static void 287 + via_abort_dmablit(drm_device_t *dev, int engine) 288 + { 289 + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 290 + 291 + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA); 292 + } 293 + 294 + static void 295 + via_dmablit_engine_off(drm_device_t *dev, int engine) 296 + { 297 + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 298 + 299 + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 300 + } 301 + 302 + 303 + 304 + /* 305 + * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here. 306 + * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue 307 + * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while 308 + * the workqueue task takes care of processing associated with the old blit. 309 + */ 310 + 311 + void 312 + via_dmablit_handler(drm_device_t *dev, int engine, int from_irq) 313 + { 314 + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 315 + drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 316 + int cur; 317 + int done_transfer; 318 + unsigned long irqsave=0; 319 + uint32_t status = 0; 320 + 321 + DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", 322 + engine, from_irq, (unsigned long) blitq); 323 + 324 + if (from_irq) { 325 + spin_lock(&blitq->blit_lock); 326 + } else { 327 + spin_lock_irqsave(&blitq->blit_lock, irqsave); 328 + } 329 + 330 + done_transfer = blitq->is_active && 331 + (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); 332 + done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); 333 + 334 + cur = blitq->cur; 335 + if (done_transfer) { 336 + 337 + blitq->blits[cur]->aborted = blitq->aborting; 338 + blitq->done_blit_handle++; 339 + DRM_WAKEUP(blitq->blit_queue + cur); 340 + 341 + cur++; 342 + if (cur >= VIA_NUM_BLIT_SLOTS) 343 + cur = 0; 344 + blitq->cur = cur; 345 + 346 + /* 347 + * Clear transfer done flag. 348 + */ 349 + 350 + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD); 351 + 352 + blitq->is_active = 0; 353 + blitq->aborting = 0; 354 + schedule_work(&blitq->wq); 355 + 356 + } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { 357 + 358 + /* 359 + * Abort transfer after one second. 360 + */ 361 + 362 + via_abort_dmablit(dev, engine); 363 + blitq->aborting = 1; 364 + blitq->end = jiffies + DRM_HZ; 365 + } 366 + 367 + if (!blitq->is_active) { 368 + if (blitq->num_outstanding) { 369 + via_fire_dmablit(dev, blitq->blits[cur], engine); 370 + blitq->is_active = 1; 371 + blitq->cur = cur; 372 + blitq->num_outstanding--; 373 + blitq->end = jiffies + DRM_HZ; 374 + if (!timer_pending(&blitq->poll_timer)) { 375 + blitq->poll_timer.expires = jiffies+1; 376 + add_timer(&blitq->poll_timer); 377 + } 378 + } else { 379 + if (timer_pending(&blitq->poll_timer)) { 380 + del_timer(&blitq->poll_timer); 381 + } 382 + via_dmablit_engine_off(dev, engine); 383 + } 384 + } 385 + 386 + if (from_irq) { 387 + spin_unlock(&blitq->blit_lock); 388 + } else { 389 + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 390 + } 391 + } 392 + 393 + 394 + 395 + /* 396 + * Check whether this blit is still active, performing necessary locking. 397 + */ 398 + 399 + static int 400 + via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue) 401 + { 402 + unsigned long irqsave; 403 + uint32_t slot; 404 + int active; 405 + 406 + spin_lock_irqsave(&blitq->blit_lock, irqsave); 407 + 408 + /* 409 + * Allow for handle wraparounds. 410 + */ 411 + 412 + active = ((blitq->done_blit_handle - handle) > (1 << 23)) && 413 + ((blitq->cur_blit_handle - handle) <= (1 << 23)); 414 + 415 + if (queue && active) { 416 + slot = handle - blitq->done_blit_handle + blitq->cur -1; 417 + if (slot >= VIA_NUM_BLIT_SLOTS) { 418 + slot -= VIA_NUM_BLIT_SLOTS; 419 + } 420 + *queue = blitq->blit_queue + slot; 421 + } 422 + 423 + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 424 + 425 + return active; 426 + } 427 + 428 + /* 429 + * Sync. Wait for at least three seconds for the blit to be performed. 430 + */ 431 + 432 + static int 433 + via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) 434 + { 435 + 436 + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 437 + drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 438 + wait_queue_head_t *queue; 439 + int ret = 0; 440 + 441 + if (via_dmablit_active(blitq, engine, handle, &queue)) { 442 + DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, 443 + !via_dmablit_active(blitq, engine, handle, NULL)); 444 + } 445 + DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", 446 + handle, engine, ret); 447 + 448 + return ret; 449 + } 450 + 451 + 452 + /* 453 + * A timer that regularly polls the blit engine in cases where we don't have interrupts: 454 + * a) Broken hardware (typically those that don't have any video capture facility). 455 + * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. 456 + * The timer and hardware IRQ's can and do work in parallel. If the hardware has 457 + * irqs, it will shorten the latency somewhat. 458 + */ 459 + 460 + 461 + 462 + static void 463 + via_dmablit_timer(unsigned long data) 464 + { 465 + drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; 466 + drm_device_t *dev = blitq->dev; 467 + int engine = (int) 468 + (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); 469 + 470 + DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 471 + (unsigned long) jiffies); 472 + 473 + via_dmablit_handler(dev, engine, 0); 474 + 475 + if (!timer_pending(&blitq->poll_timer)) { 476 + blitq->poll_timer.expires = jiffies+1; 477 + add_timer(&blitq->poll_timer); 478 + } 479 + via_dmablit_handler(dev, engine, 0); 480 + 481 + } 482 + 483 + 484 + 485 + 486 + /* 487 + * Workqueue task that frees data and mappings associated with a blit. 488 + * Also wakes up waiting processes. Each of these tasks handles one 489 + * blit engine only and may not be called on each interrupt. 490 + */ 491 + 492 + 493 + static void 494 + via_dmablit_workqueue(void *data) 495 + { 496 + drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; 497 + drm_device_t *dev = blitq->dev; 498 + unsigned long irqsave; 499 + drm_via_sg_info_t *cur_sg; 500 + int cur_released; 501 + 502 + 503 + DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 504 + (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); 505 + 506 + spin_lock_irqsave(&blitq->blit_lock, irqsave); 507 + 508 + while(blitq->serviced != blitq->cur) { 509 + 510 + cur_released = blitq->serviced++; 511 + 512 + DRM_DEBUG("Releasing blit slot %d\n", cur_released); 513 + 514 + if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 515 + blitq->serviced = 0; 516 + 517 + cur_sg = blitq->blits[cur_released]; 518 + blitq->num_free++; 519 + 520 + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 521 + 522 + DRM_WAKEUP(&blitq->busy_queue); 523 + 524 + via_free_sg_info(dev->pdev, cur_sg); 525 + kfree(cur_sg); 526 + 527 + spin_lock_irqsave(&blitq->blit_lock, irqsave); 528 + } 529 + 530 + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 531 + } 532 + 533 + 534 + /* 535 + * Init all blit engines. Currently we use two, but some hardware have 4. 536 + */ 537 + 538 + 539 + void 540 + via_init_dmablit(drm_device_t *dev) 541 + { 542 + int i,j; 543 + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 544 + drm_via_blitq_t *blitq; 545 + 546 + pci_set_master(dev->pdev); 547 + 548 + for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { 549 + blitq = dev_priv->blit_queues + i; 550 + blitq->dev = dev; 551 + blitq->cur_blit_handle = 0; 552 + blitq->done_blit_handle = 0; 553 + blitq->head = 0; 554 + blitq->cur = 0; 555 + blitq->serviced = 0; 556 + blitq->num_free = VIA_NUM_BLIT_SLOTS; 557 + blitq->num_outstanding = 0; 558 + blitq->is_active = 0; 559 + blitq->aborting = 0; 560 + blitq->blit_lock = SPIN_LOCK_UNLOCKED; 561 + for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { 562 + DRM_INIT_WAITQUEUE(blitq->blit_queue + j); 563 + } 564 + DRM_INIT_WAITQUEUE(&blitq->busy_queue); 565 + INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); 566 + init_timer(&blitq->poll_timer); 567 + blitq->poll_timer.function = &via_dmablit_timer; 568 + blitq->poll_timer.data = (unsigned long) blitq; 569 + } 570 + } 571 + 572 + /* 573 + * Build all info and do all mappings required for a blit. 574 + */ 575 + 576 + 577 + static int 578 + via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) 579 + { 580 + int draw = xfer->to_fb; 581 + int ret = 0; 582 + 583 + vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 584 + vsg->bounce_buffer = 0; 585 + 586 + vsg->state = dr_via_sg_init; 587 + 588 + if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 589 + DRM_ERROR("Zero size bitblt.\n"); 590 + return DRM_ERR(EINVAL); 591 + } 592 + 593 + /* 594 + * Below check is a driver limitation, not a hardware one. We 595 + * don't want to lock unused pages, and don't want to incoporate the 596 + * extra logic of avoiding them. Make sure there are no. 597 + * (Not a big limitation anyway.) 598 + */ 599 + 600 + if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) || 601 + (xfer->mem_stride > 2048*4)) { 602 + DRM_ERROR("Too large system memory stride. Stride: %d, " 603 + "Length: %d\n", xfer->mem_stride, xfer->line_length); 604 + return DRM_ERR(EINVAL); 605 + } 606 + 607 + if (xfer->num_lines > 2048) { 608 + DRM_ERROR("Too many PCI DMA bitblt lines.\n"); 609 + return DRM_ERR(EINVAL); 610 + } 611 + 612 + /* 613 + * we allow a negative fb stride to allow flipping of images in 614 + * transfer. 615 + */ 616 + 617 + if (xfer->mem_stride < xfer->line_length || 618 + abs(xfer->fb_stride) < xfer->line_length) { 619 + DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 620 + return DRM_ERR(EINVAL); 621 + } 622 + 623 + /* 624 + * A hardware bug seems to be worked around if system memory addresses start on 625 + * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted 626 + * about this. Meanwhile, impose the following restrictions: 627 + */ 628 + 629 + #ifdef VIA_BUGFREE 630 + if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 631 + ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) { 632 + DRM_ERROR("Invalid DRM bitblt alignment.\n"); 633 + return DRM_ERR(EINVAL); 634 + } 635 + #else 636 + if ((((unsigned long)xfer->mem_addr & 15) || 637 + ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) || 638 + (xfer->fb_stride & 3)) { 639 + DRM_ERROR("Invalid DRM bitblt alignment.\n"); 640 + return DRM_ERR(EINVAL); 641 + } 642 + #endif 643 + 644 + if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { 645 + DRM_ERROR("Could not lock DMA pages.\n"); 646 + via_free_sg_info(dev->pdev, vsg); 647 + return ret; 648 + } 649 + 650 + via_map_blit_for_device(dev->pdev, xfer, vsg, 0); 651 + if (0 != (ret = via_alloc_desc_pages(vsg))) { 652 + DRM_ERROR("Could not allocate DMA descriptor pages.\n"); 653 + via_free_sg_info(dev->pdev, vsg); 654 + return ret; 655 + } 656 + via_map_blit_for_device(dev->pdev, xfer, vsg, 1); 657 + 658 + return 0; 659 + } 660 + 661 + 662 + /* 663 + * Reserve one free slot in the blit queue. Will wait for one second for one 664 + * to become available. Otherwise -EBUSY is returned. 665 + */ 666 + 667 + static int 668 + via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 669 + { 670 + int ret=0; 671 + unsigned long irqsave; 672 + 673 + DRM_DEBUG("Num free is %d\n", blitq->num_free); 674 + spin_lock_irqsave(&blitq->blit_lock, irqsave); 675 + while(blitq->num_free == 0) { 676 + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 677 + 678 + DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); 679 + if (ret) { 680 + return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; 681 + } 682 + 683 + spin_lock_irqsave(&blitq->blit_lock, irqsave); 684 + } 685 + 686 + blitq->num_free--; 687 + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 688 + 689 + return 0; 690 + } 691 + 692 + /* 693 + * Hand back a free slot if we changed our mind. 694 + */ 695 + 696 + static void 697 + via_dmablit_release_slot(drm_via_blitq_t *blitq) 698 + { 699 + unsigned long irqsave; 700 + 701 + spin_lock_irqsave(&blitq->blit_lock, irqsave); 702 + blitq->num_free++; 703 + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 704 + DRM_WAKEUP( &blitq->busy_queue ); 705 + } 706 + 707 + /* 708 + * Grab a free slot. Build blit info and queue a blit. 709 + */ 710 + 711 + 712 + static int 713 + via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) 714 + { 715 + drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 716 + drm_via_sg_info_t *vsg; 717 + drm_via_blitq_t *blitq; 718 + int ret; 719 + int engine; 720 + unsigned long irqsave; 721 + 722 + if (dev_priv == NULL) { 723 + DRM_ERROR("Called without initialization.\n"); 724 + return DRM_ERR(EINVAL); 725 + } 726 + 727 + engine = (xfer->to_fb) ? 0 : 1; 728 + blitq = dev_priv->blit_queues + engine; 729 + if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { 730 + return ret; 731 + } 732 + if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 733 + via_dmablit_release_slot(blitq); 734 + return DRM_ERR(ENOMEM); 735 + } 736 + if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 737 + via_dmablit_release_slot(blitq); 738 + kfree(vsg); 739 + return ret; 740 + } 741 + spin_lock_irqsave(&blitq->blit_lock, irqsave); 742 + 743 + blitq->blits[blitq->head++] = vsg; 744 + if (blitq->head >= VIA_NUM_BLIT_SLOTS) 745 + blitq->head = 0; 746 + blitq->num_outstanding++; 747 + xfer->sync.sync_handle = ++blitq->cur_blit_handle; 748 + 749 + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 750 + xfer->sync.engine = engine; 751 + 752 + via_dmablit_handler(dev, engine, 0); 753 + 754 + return 0; 755 + } 756 + 757 + /* 758 + * Sync on a previously submitted blit. Note that the X server use signals extensively, and 759 + * that there is a very big proability that this IOCTL will be interrupted by a signal. In that 760 + * case it returns with -EAGAIN for the signal to be delivered. 761 + * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 762 + */ 763 + 764 + int 765 + via_dma_blit_sync( DRM_IOCTL_ARGS ) 766 + { 767 + drm_via_blitsync_t sync; 768 + int err; 769 + DRM_DEVICE; 770 + 771 + DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); 772 + 773 + if (sync.engine >= VIA_NUM_BLIT_ENGINES) 774 + return DRM_ERR(EINVAL); 775 + 776 + err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); 777 + 778 + if (DRM_ERR(EINTR) == err) 779 + err = DRM_ERR(EAGAIN); 780 + 781 + return err; 782 + } 783 + 784 + 785 + /* 786 + * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal 787 + * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 788 + * be reissued. See the above IOCTL code. 789 + */ 790 + 791 + int 792 + via_dma_blit( DRM_IOCTL_ARGS ) 793 + { 794 + drm_via_dmablit_t xfer; 795 + int err; 796 + DRM_DEVICE; 797 + 798 + DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer)); 799 + 800 + err = via_dmablit(dev, &xfer); 801 + 802 + DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); 803 + 804 + return err; 805 + }
+140
drivers/char/drm/via_dmablit.h
···
··· 1 + /* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro 2 + * 3 + * Copyright 2005 Thomas Hellstrom. 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the "Software"), 8 + * to deal in the Software without restriction, including without limitation 9 + * the rights to use, copy, modify, merge, publish, distribute, sub license, 10 + * and/or sell copies of the Software, and to permit persons to whom the 11 + * Software is furnished to do so, subject to the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the 14 + * next paragraph) shall be included in all copies or substantial portions 15 + * of the Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 20 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 21 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 23 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 + * 25 + * Authors: 26 + * Thomas Hellstrom. 27 + * Register info from Digeo Inc. 28 + */ 29 + 30 + #ifndef _VIA_DMABLIT_H 31 + #define _VIA_DMABLIT_H 32 + 33 + #include <linux/dma-mapping.h> 34 + 35 + #define VIA_NUM_BLIT_ENGINES 2 36 + #define VIA_NUM_BLIT_SLOTS 8 37 + 38 + struct _drm_via_descriptor; 39 + 40 + typedef struct _drm_via_sg_info { 41 + struct page **pages; 42 + unsigned long num_pages; 43 + struct _drm_via_descriptor **desc_pages; 44 + int num_desc_pages; 45 + int num_desc; 46 + enum dma_data_direction direction; 47 + unsigned char *bounce_buffer; 48 + dma_addr_t chain_start; 49 + uint32_t free_on_sequence; 50 + unsigned int descriptors_per_page; 51 + int aborted; 52 + enum { 53 + dr_via_device_mapped, 54 + dr_via_desc_pages_alloc, 55 + dr_via_pages_locked, 56 + dr_via_pages_alloc, 57 + dr_via_sg_init 58 + } state; 59 + } drm_via_sg_info_t; 60 + 61 + typedef struct _drm_via_blitq { 62 + drm_device_t *dev; 63 + uint32_t cur_blit_handle; 64 + uint32_t done_blit_handle; 65 + unsigned serviced; 66 + unsigned head; 67 + unsigned cur; 68 + unsigned num_free; 69 + unsigned num_outstanding; 70 + unsigned long end; 71 + int aborting; 72 + int is_active; 73 + drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS]; 74 + spinlock_t blit_lock; 75 + wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS]; 76 + wait_queue_head_t busy_queue; 77 + struct work_struct wq; 78 + struct timer_list poll_timer; 79 + } drm_via_blitq_t; 80 + 81 + 82 + /* 83 + * PCI DMA Registers 84 + * Channels 2 & 3 don't seem to be implemented in hardware. 85 + */ 86 + 87 + #define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */ 88 + #define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */ 89 + #define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */ 90 + #define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */ 91 + 92 + #define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */ 93 + #define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */ 94 + #define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */ 95 + #define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */ 96 + 97 + #define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */ 98 + #define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */ 99 + #define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */ 100 + #define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */ 101 + 102 + #define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */ 103 + #define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */ 104 + #define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */ 105 + #define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */ 106 + 107 + #define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */ 108 + #define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */ 109 + #define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */ 110 + #define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */ 111 + 112 + #define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */ 113 + #define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */ 114 + #define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */ 115 + #define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */ 116 + 117 + #define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */ 118 + 119 + /* Define for DMA engine */ 120 + /* DPR */ 121 + #define VIA_DMA_DPR_EC (1<<1) /* end of chain */ 122 + #define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */ 123 + #define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */ 124 + 125 + /* MR */ 126 + #define VIA_DMA_MR_CM (1<<0) /* chaining mode */ 127 + #define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */ 128 + #define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */ 129 + 130 + /* CSR */ 131 + #define VIA_DMA_CSR_DE (1<<0) /* DMA enable */ 132 + #define VIA_DMA_CSR_TS (1<<1) /* transfer start */ 133 + #define VIA_DMA_CSR_TA (1<<2) /* transfer abort */ 134 + #define VIA_DMA_CSR_TD (1<<3) /* transfer done */ 135 + #define VIA_DMA_CSR_DD (1<<4) /* descriptor done */ 136 + #define VIA_DMA_DPR_EC (1<<1) /* end of chain */ 137 + 138 + 139 + 140 + #endif
+42 -16
drivers/char/drm/via_drm.h
··· 75 #define DRM_VIA_CMDBUF_SIZE 0x0b 76 #define NOT_USED 77 #define DRM_VIA_WAIT_IRQ 0x0d 78 79 #define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t) 80 #define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t) ··· 91 #define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \ 92 drm_via_cmdbuf_size_t) 93 #define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t) 94 95 /* Indices into buf.Setup where various bits of state are mirrored per 96 * context and per buffer. These can be fired at the card as a unit, ··· 107 #define VIA_BACK 0x2 108 #define VIA_DEPTH 0x4 109 #define VIA_STENCIL 0x8 110 - #define VIDEO 0 111 - #define AGP 1 112 typedef struct { 113 uint32_t offset; 114 uint32_t size; ··· 200 unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; 201 unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */ 202 203 } drm_via_sarea_t; 204 205 typedef struct _drm_via_cmdbuf_size { ··· 223 224 #define VIA_IRQ_FLAGS_MASK 0xF0000000 225 226 struct drm_via_wait_irq_request { 227 unsigned irq; 228 via_irq_seq_type_t type; ··· 245 struct drm_wait_vblank_reply reply; 246 } drm_via_irqwait_t; 247 248 - #ifdef __KERNEL__ 249 250 - int via_fb_init(DRM_IOCTL_ARGS); 251 - int via_mem_alloc(DRM_IOCTL_ARGS); 252 - int via_mem_free(DRM_IOCTL_ARGS); 253 - int via_agp_init(DRM_IOCTL_ARGS); 254 - int via_map_init(DRM_IOCTL_ARGS); 255 - int via_decoder_futex(DRM_IOCTL_ARGS); 256 - int via_dma_init(DRM_IOCTL_ARGS); 257 - int via_cmdbuffer(DRM_IOCTL_ARGS); 258 - int via_flush_ioctl(DRM_IOCTL_ARGS); 259 - int via_pci_cmdbuffer(DRM_IOCTL_ARGS); 260 - int via_cmdbuf_size(DRM_IOCTL_ARGS); 261 - int via_wait_irq(DRM_IOCTL_ARGS); 262 263 - #endif 264 #endif /* _VIA_DRM_H_ */
··· 75 #define DRM_VIA_CMDBUF_SIZE 0x0b 76 #define NOT_USED 77 #define DRM_VIA_WAIT_IRQ 0x0d 78 + #define DRM_VIA_DMA_BLIT 0x0e 79 + #define DRM_VIA_BLIT_SYNC 0x0f 80 81 #define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t) 82 #define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t) ··· 89 #define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \ 90 drm_via_cmdbuf_size_t) 91 #define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t) 92 + #define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t) 93 + #define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t) 94 95 /* Indices into buf.Setup where various bits of state are mirrored per 96 * context and per buffer. These can be fired at the card as a unit, ··· 103 #define VIA_BACK 0x2 104 #define VIA_DEPTH 0x4 105 #define VIA_STENCIL 0x8 106 + #define VIA_MEM_VIDEO 0 /* matches drm constant */ 107 + #define VIA_MEM_AGP 1 /* matches drm constant */ 108 + #define VIA_MEM_SYSTEM 2 109 + #define VIA_MEM_MIXED 3 110 + #define VIA_MEM_UNKNOWN 4 111 + 112 typedef struct { 113 uint32_t offset; 114 uint32_t size; ··· 192 unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; 193 unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */ 194 195 + /* Used by the 3d driver only at this point, for pageflipping: 196 + */ 197 + unsigned int pfCurrentOffset; 198 } drm_via_sarea_t; 199 200 typedef struct _drm_via_cmdbuf_size { ··· 212 213 #define VIA_IRQ_FLAGS_MASK 0xF0000000 214 215 + enum drm_via_irqs { 216 + drm_via_irq_hqv0 = 0, 217 + drm_via_irq_hqv1, 218 + drm_via_irq_dma0_dd, 219 + drm_via_irq_dma0_td, 220 + drm_via_irq_dma1_dd, 221 + drm_via_irq_dma1_td, 222 + drm_via_irq_num 223 + }; 224 + 225 struct drm_via_wait_irq_request { 226 unsigned irq; 227 via_irq_seq_type_t type; ··· 224 struct drm_wait_vblank_reply reply; 225 } drm_via_irqwait_t; 226 227 + typedef struct drm_via_blitsync { 228 + uint32_t sync_handle; 229 + unsigned engine; 230 + } drm_via_blitsync_t; 231 232 + typedef struct drm_via_dmablit { 233 + uint32_t num_lines; 234 + uint32_t line_length; 235 + 236 + uint32_t fb_addr; 237 + uint32_t fb_stride; 238 239 + unsigned char *mem_addr; 240 + uint32_t mem_stride; 241 + 242 + int bounce_buffer; 243 + int to_fb; 244 + 245 + drm_via_blitsync_t sync; 246 + } drm_via_dmablit_t; 247 + 248 #endif /* _VIA_DRM_H_ */
+18 -45
drivers/char/drm/via_drv.c
··· 29 30 #include "drm_pciids.h" 31 32 - static int postinit(struct drm_device *dev, unsigned long flags) 33 { 34 - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 35 - DRIVER_NAME, 36 - DRIVER_MAJOR, 37 - DRIVER_MINOR, 38 - DRIVER_PATCHLEVEL, 39 - DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev) 40 - ); 41 - return 0; 42 - } 43 - 44 - static int version(drm_version_t * version) 45 - { 46 - int len; 47 - 48 - version->version_major = DRIVER_MAJOR; 49 - version->version_minor = DRIVER_MINOR; 50 - version->version_patchlevel = DRIVER_PATCHLEVEL; 51 - DRM_COPY(version->name, DRIVER_NAME); 52 - DRM_COPY(version->date, DRIVER_DATE); 53 - DRM_COPY(version->desc, DRIVER_DESC); 54 - return 0; 55 } 56 57 static struct pci_device_id pciidlist[] = { 58 viadrv_PCI_IDS 59 }; 60 61 - static drm_ioctl_desc_t ioctls[] = { 62 - [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0}, 63 - [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0}, 64 - [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0}, 65 - [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0}, 66 - [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0}, 67 - [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0}, 68 - [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0}, 69 - [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0}, 70 - [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0}, 71 - [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0}, 72 - [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0}, 73 - [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0} 74 - }; 75 - 76 static struct drm_driver driver = { 77 .driver_features = 78 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 79 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 80 .context_ctor = via_init_context, 81 .context_dtor = via_final_context, 82 .vblank_wait = via_driver_vblank_wait, ··· 52 .irq_uninstall = via_driver_irq_uninstall, 53 .irq_handler = via_driver_irq_handler, 54 .dma_quiescent = via_driver_dma_quiescent, 55 .reclaim_buffers = drm_core_reclaim_buffers, 56 .get_map_ofs = drm_core_get_map_ofs, 57 .get_reg_ofs = drm_core_get_reg_ofs, 58 - .postinit = postinit, 59 - .version = version, 60 - .ioctls = ioctls, 61 - .num_ioctls = DRM_ARRAY_SIZE(ioctls), 62 .fops = { 63 .owner = THIS_MODULE, 64 .open = drm_open, ··· 65 .mmap = drm_mmap, 66 .poll = drm_poll, 67 .fasync = drm_fasync, 68 - }, 69 .pci_driver = { 70 - .name = DRIVER_NAME, 71 - .id_table = pciidlist, 72 - } 73 }; 74 75 static int __init via_init(void) 76 { 77 via_init_command_verifier(); 78 return drm_init(&driver); 79 }
··· 29 30 #include "drm_pciids.h" 31 32 + static int dri_library_name(struct drm_device *dev, char *buf) 33 { 34 + return snprintf(buf, PAGE_SIZE, "unichrome"); 35 } 36 37 static struct pci_device_id pciidlist[] = { 38 viadrv_PCI_IDS 39 }; 40 41 static struct drm_driver driver = { 42 .driver_features = 43 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 44 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 45 + .load = via_driver_load, 46 + .unload = via_driver_unload, 47 .context_ctor = via_init_context, 48 .context_dtor = via_final_context, 49 .vblank_wait = via_driver_vblank_wait, ··· 85 .irq_uninstall = via_driver_irq_uninstall, 86 .irq_handler = via_driver_irq_handler, 87 .dma_quiescent = via_driver_dma_quiescent, 88 + .dri_library_name = dri_library_name, 89 .reclaim_buffers = drm_core_reclaim_buffers, 90 .get_map_ofs = drm_core_get_map_ofs, 91 .get_reg_ofs = drm_core_get_reg_ofs, 92 + .ioctls = via_ioctls, 93 .fops = { 94 .owner = THIS_MODULE, 95 .open = drm_open, ··· 100 .mmap = drm_mmap, 101 .poll = drm_poll, 102 .fasync = drm_fasync, 103 + }, 104 .pci_driver = { 105 + .name = DRIVER_NAME, 106 + .id_table = pciidlist, 107 + }, 108 + 109 + .name = DRIVER_NAME, 110 + .desc = DRIVER_DESC, 111 + .date = DRIVER_DATE, 112 + .major = DRIVER_MAJOR, 113 + .minor = DRIVER_MINOR, 114 + .patchlevel = DRIVER_PATCHLEVEL, 115 }; 116 117 static int __init via_init(void) 118 { 119 + driver.num_ioctls = via_max_ioctl; 120 via_init_command_verifier(); 121 return drm_init(&driver); 122 }
+43 -13
drivers/char/drm/via_drv.h
··· 24 #ifndef _VIA_DRV_H_ 25 #define _VIA_DRV_H_ 26 27 - #define DRIVER_AUTHOR "VIA" 28 29 #define DRIVER_NAME "via" 30 #define DRIVER_DESC "VIA Unichrome / Pro" 31 - #define DRIVER_DATE "20050523" 32 33 #define DRIVER_MAJOR 2 34 - #define DRIVER_MINOR 6 35 - #define DRIVER_PATCHLEVEL 3 36 37 #include "via_verifier.h" 38 39 #define VIA_PCI_BUF_SIZE 60000 40 #define VIA_FIRE_BUF_SIZE 1024 41 - #define VIA_NUM_IRQS 2 42 43 typedef struct drm_via_ring_buffer { 44 - drm_map_t map; 45 char *virtual_start; 46 } drm_via_ring_buffer_t; 47 ··· 58 59 typedef struct drm_via_private { 60 drm_via_sarea_t *sarea_priv; 61 - drm_map_t *sarea; 62 - drm_map_t *fb; 63 - drm_map_t *mmio; 64 unsigned long agpAddr; 65 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS]; 66 char *dma_ptr; ··· 84 maskarray_t *irq_masks; 85 uint32_t irq_enable_mask; 86 uint32_t irq_pending_mask; 87 } drm_via_private_t; 88 89 /* VIA MMIO register access */ 90 #define VIA_BASE ((dev_priv->mmio)) ··· 101 #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 102 #define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 103 104 extern int via_init_context(drm_device_t * dev, int context); 105 extern int via_final_context(drm_device_t * dev, int context); 106 107 extern int via_do_cleanup_map(drm_device_t * dev); 108 - extern int via_map_init(struct inode *inode, struct file *filp, 109 - unsigned int cmd, unsigned long arg); 110 extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); 111 112 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); ··· 139 extern void via_init_futex(drm_via_private_t * dev_priv); 140 extern void via_cleanup_futex(drm_via_private_t * dev_priv); 141 extern void via_release_futex(drm_via_private_t * dev_priv, int context); 142 143 - extern int via_parse_command_stream(drm_device_t * dev, const uint32_t * buf, 144 - unsigned int size); 145 146 #endif
··· 24 #ifndef _VIA_DRV_H_ 25 #define _VIA_DRV_H_ 26 27 + #define DRIVER_AUTHOR "Various" 28 29 #define DRIVER_NAME "via" 30 #define DRIVER_DESC "VIA Unichrome / Pro" 31 + #define DRIVER_DATE "20051116" 32 33 #define DRIVER_MAJOR 2 34 + #define DRIVER_MINOR 7 35 + #define DRIVER_PATCHLEVEL 4 36 37 #include "via_verifier.h" 38 39 + #include "via_dmablit.h" 40 + 41 #define VIA_PCI_BUF_SIZE 60000 42 #define VIA_FIRE_BUF_SIZE 1024 43 + #define VIA_NUM_IRQS 4 44 45 typedef struct drm_via_ring_buffer { 46 + drm_local_map_t map; 47 char *virtual_start; 48 } drm_via_ring_buffer_t; 49 ··· 56 57 typedef struct drm_via_private { 58 drm_via_sarea_t *sarea_priv; 59 + drm_local_map_t *sarea; 60 + drm_local_map_t *fb; 61 + drm_local_map_t *mmio; 62 unsigned long agpAddr; 63 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS]; 64 char *dma_ptr; ··· 82 maskarray_t *irq_masks; 83 uint32_t irq_enable_mask; 84 uint32_t irq_pending_mask; 85 + int *irq_map; 86 + drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES]; 87 } drm_via_private_t; 88 + 89 + enum via_family { 90 + VIA_OTHER = 0, 91 + VIA_PRO_GROUP_A, 92 + }; 93 94 /* VIA MMIO register access */ 95 #define VIA_BASE ((dev_priv->mmio)) ··· 92 #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 93 #define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 94 95 + extern drm_ioctl_desc_t via_ioctls[]; 96 + extern int via_max_ioctl; 97 + 98 + extern int via_fb_init(DRM_IOCTL_ARGS); 99 + extern int via_mem_alloc(DRM_IOCTL_ARGS); 100 + extern int via_mem_free(DRM_IOCTL_ARGS); 101 + extern int via_agp_init(DRM_IOCTL_ARGS); 102 + extern int via_map_init(DRM_IOCTL_ARGS); 103 + extern int via_decoder_futex(DRM_IOCTL_ARGS); 104 + extern int via_dma_init(DRM_IOCTL_ARGS); 105 + extern int via_cmdbuffer(DRM_IOCTL_ARGS); 106 + extern int via_flush_ioctl(DRM_IOCTL_ARGS); 107 + extern int via_pci_cmdbuffer(DRM_IOCTL_ARGS); 108 + extern int via_cmdbuf_size(DRM_IOCTL_ARGS); 109 + extern int via_wait_irq(DRM_IOCTL_ARGS); 110 + extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); 111 + extern int via_dma_blit( DRM_IOCTL_ARGS ); 112 + 113 + extern int via_driver_load(drm_device_t *dev, unsigned long chipset); 114 + extern int via_driver_unload(drm_device_t *dev); 115 + 116 extern int via_init_context(drm_device_t * dev, int context); 117 extern int via_final_context(drm_device_t * dev, int context); 118 119 extern int via_do_cleanup_map(drm_device_t * dev); 120 extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); 121 122 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); ··· 111 extern void via_init_futex(drm_via_private_t * dev_priv); 112 extern void via_cleanup_futex(drm_via_private_t * dev_priv); 113 extern void via_release_futex(drm_via_private_t * dev_priv, int context); 114 + extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq, 115 + int force_sequence, unsigned int *sequence); 116 117 + extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq); 118 + extern void via_init_dmablit(drm_device_t *dev); 119 120 #endif
+1 -8
drivers/char/drm/via_ds.c
··· 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 * DEALINGS IN THE SOFTWARE. 24 */ 25 - #include <linux/module.h> 26 - #include <linux/delay.h> 27 - #include <linux/errno.h> 28 - #include <linux/kernel.h> 29 - #include <linux/slab.h> 30 - #include <linux/poll.h> 31 - #include <linux/pci.h> 32 - #include <asm/io.h> 33 34 #include "via_ds.h" 35 extern unsigned int VIA_DEBUG;
··· 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 * DEALINGS IN THE SOFTWARE. 24 */ 25 + #include "drmP.h" 26 27 #include "via_ds.h" 28 extern unsigned int VIA_DEBUG;
+44 -9
drivers/char/drm/via_irq.c
··· 50 #define VIA_IRQ_HQV1_ENABLE (1 << 25) 51 #define VIA_IRQ_HQV0_PENDING (1 << 9) 52 #define VIA_IRQ_HQV1_PENDING (1 << 10) 53 54 /* 55 * Device-specific IRQs go here. This type might need to be extended with ··· 70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 71 0x00000000}, 72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 73 - 0x00000000} 74 }; 75 static int via_num_pro_group_a = 76 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t); 77 78 - static maskarray_t via_unichrome_irqs[] = { }; 79 static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t); 80 81 static unsigned time_diff(struct timeval *now, struct timeval *then) 82 { ··· 133 atomic_inc(&cur_irq->irq_received); 134 DRM_WAKEUP(&cur_irq->irq_queue); 135 handled = 1; 136 } 137 cur_irq++; 138 } ··· 190 return ret; 191 } 192 193 - static int 194 via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, 195 unsigned int *sequence) 196 { ··· 199 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 200 int ret = 0; 201 maskarray_t *masks = dev_priv->irq_masks; 202 203 DRM_DEBUG("%s\n", __FUNCTION__); 204 ··· 208 return DRM_ERR(EINVAL); 209 } 210 211 - if (irq >= dev_priv->num_irqs) { 212 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 213 irq); 214 return DRM_ERR(EINVAL); 215 } 216 217 - cur_irq += irq; 218 219 - if (masks[irq][2] && !force_sequence) { 220 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, 221 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 222 masks[irq][4])); ··· 260 via_pro_group_a_irqs : via_unichrome_irqs; 261 dev_priv->num_irqs = (dev_priv->pro_group_a) ? 262 via_num_pro_group_a : via_num_unichrome; 263 264 for (i = 0; i < dev_priv->num_irqs; ++i) { 265 atomic_set(&cur_irq->irq_received, 0); ··· 277 278 dev_priv->last_vblank_valid = 0; 279 280 - // Clear VSync interrupt regs 281 status = VIA_READ(VIA_REG_INTERRUPT); 282 VIA_WRITE(VIA_REG_INTERRUPT, status & 283 ~(dev_priv->irq_enable_mask)); ··· 327 328 int via_wait_irq(DRM_IOCTL_ARGS) 329 { 330 - drm_file_t *priv = filp->private_data; 331 - drm_device_t *dev = priv->head->dev; 332 drm_via_irqwait_t __user *argp = (void __user *)data; 333 drm_via_irqwait_t irqwait; 334 struct timeval now;
··· 50 #define VIA_IRQ_HQV1_ENABLE (1 << 25) 51 #define VIA_IRQ_HQV0_PENDING (1 << 9) 52 #define VIA_IRQ_HQV1_PENDING (1 << 10) 53 + #define VIA_IRQ_DMA0_DD_ENABLE (1 << 20) 54 + #define VIA_IRQ_DMA0_TD_ENABLE (1 << 21) 55 + #define VIA_IRQ_DMA1_DD_ENABLE (1 << 22) 56 + #define VIA_IRQ_DMA1_TD_ENABLE (1 << 23) 57 + #define VIA_IRQ_DMA0_DD_PENDING (1 << 4) 58 + #define VIA_IRQ_DMA0_TD_PENDING (1 << 5) 59 + #define VIA_IRQ_DMA1_DD_PENDING (1 << 6) 60 + #define VIA_IRQ_DMA1_TD_PENDING (1 << 7) 61 + 62 63 /* 64 * Device-specific IRQs go here. This type might need to be extended with ··· 61 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 62 0x00000000}, 63 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 64 + 0x00000000}, 65 + {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 66 + VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 67 + {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 68 + VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 69 }; 70 static int via_num_pro_group_a = 71 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t); 72 + static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; 73 74 + static maskarray_t via_unichrome_irqs[] = { 75 + {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 76 + VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 77 + {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 78 + VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} 79 + }; 80 static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t); 81 + static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; 82 83 static unsigned time_diff(struct timeval *now, struct timeval *then) 84 { ··· 113 atomic_inc(&cur_irq->irq_received); 114 DRM_WAKEUP(&cur_irq->irq_queue); 115 handled = 1; 116 + if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { 117 + via_dmablit_handler(dev, 0, 1); 118 + } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) { 119 + via_dmablit_handler(dev, 1, 1); 120 + } 121 } 122 cur_irq++; 123 } ··· 165 return ret; 166 } 167 168 + int 169 via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, 170 unsigned int *sequence) 171 { ··· 174 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 175 int ret = 0; 176 maskarray_t *masks = dev_priv->irq_masks; 177 + int real_irq; 178 179 DRM_DEBUG("%s\n", __FUNCTION__); 180 ··· 182 return DRM_ERR(EINVAL); 183 } 184 185 + if (irq >= drm_via_irq_num) { 186 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 187 irq); 188 return DRM_ERR(EINVAL); 189 } 190 191 + real_irq = dev_priv->irq_map[irq]; 192 193 + if (real_irq < 0) { 194 + DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", 195 + __FUNCTION__, irq); 196 + return DRM_ERR(EINVAL); 197 + } 198 + 199 + cur_irq += real_irq; 200 + 201 + if (masks[real_irq][2] && !force_sequence) { 202 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, 203 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 204 masks[irq][4])); ··· 226 via_pro_group_a_irqs : via_unichrome_irqs; 227 dev_priv->num_irqs = (dev_priv->pro_group_a) ? 228 via_num_pro_group_a : via_num_unichrome; 229 + dev_priv->irq_map = (dev_priv->pro_group_a) ? 230 + via_irqmap_pro_group_a : via_irqmap_unichrome; 231 232 for (i = 0; i < dev_priv->num_irqs; ++i) { 233 atomic_set(&cur_irq->irq_received, 0); ··· 241 242 dev_priv->last_vblank_valid = 0; 243 244 + /* Clear VSync interrupt regs */ 245 status = VIA_READ(VIA_REG_INTERRUPT); 246 VIA_WRITE(VIA_REG_INTERRUPT, status & 247 ~(dev_priv->irq_enable_mask)); ··· 291 292 int via_wait_irq(DRM_IOCTL_ARGS) 293 { 294 + DRM_DEVICE; 295 drm_via_irqwait_t __user *argp = (void __user *)data; 296 drm_via_irqwait_t irqwait; 297 struct timeval now;
+30 -17
drivers/char/drm/via_map.c
··· 27 28 static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) 29 { 30 - drm_via_private_t *dev_priv; 31 32 DRM_DEBUG("%s\n", __FUNCTION__); 33 - 34 - dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER); 35 - if (dev_priv == NULL) 36 - return -ENOMEM; 37 - 38 - memset(dev_priv, 0, sizeof(drm_via_private_t)); 39 40 DRM_GETSAREA(); 41 if (!dev_priv->sarea) { ··· 61 dev_priv->agpAddr = init->agpAddr; 62 63 via_init_futex(dev_priv); 64 - dev_priv->pro_group_a = (dev->pdev->device == 0x3118); 65 66 dev->dev_private = (void *)dev_priv; 67 return 0; ··· 70 71 int via_do_cleanup_map(drm_device_t * dev) 72 { 73 - if (dev->dev_private) { 74 - 75 - drm_via_private_t *dev_priv = dev->dev_private; 76 - 77 - via_dma_cleanup(dev); 78 - 79 - drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 80 - dev->dev_private = NULL; 81 - } 82 83 return 0; 84 } ··· 94 95 return -EINVAL; 96 }
··· 27 28 static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) 29 { 30 + drm_via_private_t *dev_priv = dev->dev_private; 31 32 DRM_DEBUG("%s\n", __FUNCTION__); 33 34 DRM_GETSAREA(); 35 if (!dev_priv->sarea) { ··· 67 dev_priv->agpAddr = init->agpAddr; 68 69 via_init_futex(dev_priv); 70 + 71 + via_init_dmablit(dev); 72 73 dev->dev_private = (void *)dev_priv; 74 return 0; ··· 75 76 int via_do_cleanup_map(drm_device_t * dev) 77 { 78 + via_dma_cleanup(dev); 79 80 return 0; 81 } ··· 107 108 return -EINVAL; 109 } 110 + 111 + int via_driver_load(drm_device_t *dev, unsigned long chipset) 112 + { 113 + drm_via_private_t *dev_priv; 114 + 115 + dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 116 + if (dev_priv == NULL) 117 + return DRM_ERR(ENOMEM); 118 + 119 + dev->dev_private = (void *)dev_priv; 120 + 121 + if (chipset == VIA_PRO_GROUP_A) 122 + dev_priv->pro_group_a = 1; 123 + 124 + return 0; 125 + } 126 + 127 + int via_driver_unload(drm_device_t *dev) 128 + { 129 + drm_via_private_t *dev_priv = dev->dev_private; 130 + 131 + drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 132 + 133 + return 0; 134 + } 135 +
+10 -10
drivers/char/drm/via_mm.c
··· 42 static int via_fb_alloc(drm_via_mem_t * mem); 43 static int via_fb_free(drm_via_mem_t * mem); 44 45 - static int add_alloc_set(int context, int type, unsigned int val) 46 { 47 int i, retval = 0; 48 ··· 56 return retval; 57 } 58 59 - static int del_alloc_set(int context, int type, unsigned int val) 60 { 61 int i, retval = 0; 62 ··· 199 sizeof(mem)); 200 201 switch (mem.type) { 202 - case VIDEO: 203 if (via_fb_alloc(&mem) < 0) 204 return -EFAULT; 205 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, 206 sizeof(mem)); 207 return 0; 208 - case AGP: 209 if (via_agp_alloc(&mem) < 0) 210 return -EFAULT; 211 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, ··· 232 if (block) { 233 fb.offset = block->ofs; 234 fb.free = (unsigned long)block; 235 - if (!add_alloc_set(fb.context, VIDEO, fb.free)) { 236 DRM_DEBUG("adding to allocation set fails\n"); 237 via_mmFreeMem((PMemBlock) fb.free); 238 retval = -1; ··· 269 if (block) { 270 agp.offset = block->ofs; 271 agp.free = (unsigned long)block; 272 - if (!add_alloc_set(agp.context, AGP, agp.free)) { 273 DRM_DEBUG("adding to allocation set fails\n"); 274 via_mmFreeMem((PMemBlock) agp.free); 275 retval = -1; ··· 297 298 switch (mem.type) { 299 300 - case VIDEO: 301 if (via_fb_free(&mem) == 0) 302 return 0; 303 break; 304 - case AGP: 305 if (via_agp_free(&mem) == 0) 306 return 0; 307 break; ··· 329 330 via_mmFreeMem((PMemBlock) fb.free); 331 332 - if (!del_alloc_set(fb.context, VIDEO, fb.free)) { 333 retval = -1; 334 } 335 ··· 352 353 via_mmFreeMem((PMemBlock) agp.free); 354 355 - if (!del_alloc_set(agp.context, AGP, agp.free)) { 356 retval = -1; 357 } 358
··· 42 static int via_fb_alloc(drm_via_mem_t * mem); 43 static int via_fb_free(drm_via_mem_t * mem); 44 45 + static int add_alloc_set(int context, int type, unsigned long val) 46 { 47 int i, retval = 0; 48 ··· 56 return retval; 57 } 58 59 + static int del_alloc_set(int context, int type, unsigned long val) 60 { 61 int i, retval = 0; 62 ··· 199 sizeof(mem)); 200 201 switch (mem.type) { 202 + case VIA_MEM_VIDEO: 203 if (via_fb_alloc(&mem) < 0) 204 return -EFAULT; 205 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, 206 sizeof(mem)); 207 return 0; 208 + case VIA_MEM_AGP: 209 if (via_agp_alloc(&mem) < 0) 210 return -EFAULT; 211 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, ··· 232 if (block) { 233 fb.offset = block->ofs; 234 fb.free = (unsigned long)block; 235 + if (!add_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) { 236 DRM_DEBUG("adding to allocation set fails\n"); 237 via_mmFreeMem((PMemBlock) fb.free); 238 retval = -1; ··· 269 if (block) { 270 agp.offset = block->ofs; 271 agp.free = (unsigned long)block; 272 + if (!add_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) { 273 DRM_DEBUG("adding to allocation set fails\n"); 274 via_mmFreeMem((PMemBlock) agp.free); 275 retval = -1; ··· 297 298 switch (mem.type) { 299 300 + case VIA_MEM_VIDEO: 301 if (via_fb_free(&mem) == 0) 302 return 0; 303 break; 304 + case VIA_MEM_AGP: 305 if (via_agp_free(&mem) == 0) 306 return 0; 307 break; ··· 329 330 via_mmFreeMem((PMemBlock) fb.free); 331 332 + if (!del_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) { 333 retval = -1; 334 } 335 ··· 352 353 via_mmFreeMem((PMemBlock) agp.free); 354 355 + if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) { 356 retval = -1; 357 } 358
+3 -3
drivers/char/drm/via_verifier.c
··· 237 static __inline__ int 238 eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) 239 { 240 - if ((*buf - buf_end) >= num_words) { 241 *buf += num_words; 242 return 0; 243 } ··· 249 * Partially stolen from drm_memory.h 250 */ 251 252 - static __inline__ drm_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, 253 unsigned long offset, 254 unsigned long size, 255 drm_device_t * dev) 256 { 257 struct list_head *list; 258 drm_map_list_t *r_list; 259 - drm_map_t *map = seq->map_cache; 260 261 if (map && map->offset <= offset 262 && (offset + size) <= (map->offset + map->size)) {
··· 237 static __inline__ int 238 eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) 239 { 240 + if ((buf_end - *buf) >= num_words) { 241 *buf += num_words; 242 return 0; 243 } ··· 249 * Partially stolen from drm_memory.h 250 */ 251 252 + static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, 253 unsigned long offset, 254 unsigned long size, 255 drm_device_t * dev) 256 { 257 struct list_head *list; 258 drm_map_list_t *r_list; 259 + drm_local_map_t *map = seq->map_cache; 260 261 if (map && map->offset <= offset 262 && (offset + size) <= (map->offset + map->size)) {
+3 -1
drivers/char/drm/via_verifier.h
··· 47 int agp_texture; 48 int multitex; 49 drm_device_t *dev; 50 - drm_map_t *map_cache; 51 uint32_t vertex_count; 52 int agp; 53 const uint32_t *buf_start; ··· 55 56 extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, 57 drm_device_t * dev, int agp); 58 59 #endif
··· 47 int agp_texture; 48 int multitex; 49 drm_device_t *dev; 50 + drm_local_map_t *map_cache; 51 uint32_t vertex_count; 52 int agp; 53 const uint32_t *buf_start; ··· 55 56 extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, 57 drm_device_t * dev, int agp); 58 + extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf, 59 + unsigned int size); 60 61 #endif
+5 -2
drivers/char/drm/via_video.c
··· 50 unsigned int i; 51 volatile int *lock; 52 53 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { 54 - lock = (int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); 55 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { 56 if (_DRM_LOCK_IS_HELD(*lock) 57 && (*lock & _DRM_LOCK_CONT)) { ··· 82 if (fx.lock > VIA_NR_XVMC_LOCKS) 83 return -EFAULT; 84 85 - lock = (int *)XVMCLOCKPTR(sAPriv, fx.lock); 86 87 switch (fx.func) { 88 case VIA_FUTEX_WAIT:
··· 50 unsigned int i; 51 volatile int *lock; 52 53 + if (!dev_priv->sarea_priv) 54 + return; 55 + 56 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { 57 + lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); 58 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { 59 if (_DRM_LOCK_IS_HELD(*lock) 60 && (*lock & _DRM_LOCK_CONT)) { ··· 79 if (fx.lock > VIA_NR_XVMC_LOCKS) 80 return -EFAULT; 81 82 + lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock); 83 84 switch (fx.func) { 85 case VIA_FUTEX_WAIT: