Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: Replace DRM_IOCTL_ARGS with (dev, data, file_priv) and remove DRM_DEVICE.

The data is now in kernel space, copied in/out as appropriate according to t
This results in DRM_COPY_{TO,FROM}_USER going away, and error paths to deal
with those failures. This also means that XFree86 4.2.0 support for i810 DR
is lost.

Signed-off-by: Dave Airlie <airlied@linux.ie>

authored by

Eric Anholt and committed by
Dave Airlie
c153f45f b589ee59

+1481 -2138
+114 -105
drivers/char/drm/drmP.h
··· 34 34 #ifndef _DRM_P_H_ 35 35 #define _DRM_P_H_ 36 36 37 - struct drm_file; 38 - 39 37 /* If you want the memory alloc debug functionality, change define below */ 40 38 /* #define DEBUG_MEMORY */ 41 39 ··· 79 81 80 82 #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) 81 83 #define __OS_HAS_MTRR (defined(CONFIG_MTRR)) 84 + 85 + struct drm_file; 86 + struct drm_device; 82 87 83 88 #include "drm_os_linux.h" 84 89 #include "drm_hashtab.h" ··· 234 233 * \param dev DRM device. 235 234 * \param filp file pointer of the caller. 236 235 */ 237 - #define LOCK_TEST_WITH_RETURN( dev, filp ) \ 236 + #define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ 238 237 do { \ 239 238 if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ 240 - dev->lock.filp != filp ) { \ 241 - DRM_ERROR( "%s called without lock held\n", \ 242 - __FUNCTION__ ); \ 239 + dev->lock.file_priv != file_priv ) { \ 240 + DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ 241 + __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ 242 + dev->lock.file_priv, file_priv ); \ 243 243 return -EINVAL; \ 244 244 } \ 245 245 } while (0) ··· 265 263 * \param cmd command. 266 264 * \param arg argument. 267 265 */ 268 - typedef int drm_ioctl_t(struct inode *inode, struct drm_file *file_priv, 269 - unsigned int cmd, unsigned long arg); 266 + typedef int drm_ioctl_t(struct drm_device *dev, void *data, 267 + struct drm_file *file_priv); 270 268 271 269 typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 272 270 unsigned long arg); ··· 275 273 #define DRM_MASTER 0x2 276 274 #define DRM_ROOT_ONLY 0x4 277 275 278 - typedef struct drm_ioctl_desc { 276 + struct drm_ioctl_desc { 277 + unsigned int cmd; 279 278 drm_ioctl_t *func; 280 279 int flags; 281 - } drm_ioctl_desc_t; 280 + }; 281 + 282 + /** 283 + * Creates a driver or general drm_ioctl_desc array entry for the given 284 + * ioctl, for use by drm_ioctl(). 285 + */ 286 + #define DRM_IOCTL_DEF(ioctl, func, flags) \ 287 + [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} 282 288 283 289 struct drm_magic_entry { 284 290 struct list_head head; ··· 569 559 void (*postclose) (struct drm_device *, struct drm_file *); 570 560 void (*lastclose) (struct drm_device *); 571 561 int (*unload) (struct drm_device *); 572 - int (*dma_ioctl) (DRM_IOCTL_ARGS); 562 + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); 573 563 void (*dma_ready) (struct drm_device *); 574 564 int (*dma_quiescent) (struct drm_device *); 575 565 int (*context_ctor) (struct drm_device *dev, int context); ··· 620 610 621 611 u32 driver_features; 622 612 int dev_priv_size; 623 - drm_ioctl_desc_t *ioctls; 613 + struct drm_ioctl_desc *ioctls; 624 614 int num_ioctls; 625 615 struct file_operations fops; 626 616 struct pci_driver pci_driver; ··· 864 854 extern int drm_unbind_agp(DRM_AGP_MEM * handle); 865 855 866 856 /* Misc. IOCTL support (drm_ioctl.h) */ 867 - extern int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, 868 - unsigned int cmd, unsigned long arg); 869 - extern int drm_getunique(struct inode *inode, struct drm_file *file_priv, 870 - unsigned int cmd, unsigned long arg); 871 - extern int drm_setunique(struct inode *inode, struct drm_file *file_priv, 872 - unsigned int cmd, unsigned long arg); 873 - extern int drm_getmap(struct inode *inode, struct drm_file *file_priv, 874 - unsigned int cmd, unsigned long arg); 875 - extern int drm_getclient(struct inode *inode, struct drm_file *file_priv, 876 - unsigned int cmd, unsigned long arg); 877 - extern int drm_getstats(struct inode *inode, struct drm_file *file_priv, 878 - unsigned int cmd, unsigned long arg); 879 - extern int drm_setversion(struct inode *inode, struct drm_file *file_priv, 880 - unsigned int cmd, unsigned long arg); 881 - extern int drm_noop(struct inode *inode, struct drm_file *file_priv, 882 - unsigned int cmd, unsigned long arg); 857 + extern int drm_irq_by_busid(struct drm_device *dev, void *data, 858 + struct drm_file *file_priv); 859 + extern int drm_getunique(struct drm_device *dev, void *data, 860 + struct drm_file *file_priv); 861 + extern int drm_setunique(struct drm_device *dev, void *data, 862 + struct drm_file *file_priv); 863 + extern int drm_getmap(struct drm_device *dev, void *data, 864 + struct drm_file *file_priv); 865 + extern int drm_getclient(struct drm_device *dev, void *data, 866 + struct drm_file *file_priv); 867 + extern int drm_getstats(struct drm_device *dev, void *data, 868 + struct drm_file *file_priv); 869 + extern int drm_setversion(struct drm_device *dev, void *data, 870 + struct drm_file *file_priv); 871 + extern int drm_noop(struct drm_device *dev, void *data, 872 + struct drm_file *file_priv); 883 873 884 874 /* Context IOCTL support (drm_context.h) */ 885 - extern int drm_resctx(struct inode *inode, struct drm_file *file_priv, 886 - unsigned int cmd, unsigned long arg); 887 - extern int drm_addctx(struct inode *inode, struct drm_file *file_priv, 888 - unsigned int cmd, unsigned long arg); 889 - extern int drm_modctx(struct inode *inode, struct drm_file *file_priv, 890 - unsigned int cmd, unsigned long arg); 891 - extern int drm_getctx(struct inode *inode, struct drm_file *file_priv, 892 - unsigned int cmd, unsigned long arg); 893 - extern int drm_switchctx(struct inode *inode, struct drm_file *file_priv, 894 - unsigned int cmd, unsigned long arg); 895 - extern int drm_newctx(struct inode *inode, struct drm_file *file_priv, 896 - unsigned int cmd, unsigned long arg); 897 - extern int drm_rmctx(struct inode *inode, struct drm_file *file_priv, 898 - unsigned int cmd, unsigned long arg); 875 + extern int drm_resctx(struct drm_device *dev, void *data, 876 + struct drm_file *file_priv); 877 + extern int drm_addctx(struct drm_device *dev, void *data, 878 + struct drm_file *file_priv); 879 + extern int drm_modctx(struct drm_device *dev, void *data, 880 + struct drm_file *file_priv); 881 + extern int drm_getctx(struct drm_device *dev, void *data, 882 + struct drm_file *file_priv); 883 + extern int drm_switchctx(struct drm_device *dev, void *data, 884 + struct drm_file *file_priv); 885 + extern int drm_newctx(struct drm_device *dev, void *data, 886 + struct drm_file *file_priv); 887 + extern int drm_rmctx(struct drm_device *dev, void *data, 888 + struct drm_file *file_priv); 899 889 900 890 extern int drm_ctxbitmap_init(struct drm_device *dev); 901 891 extern void drm_ctxbitmap_cleanup(struct drm_device *dev); 902 892 extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); 903 893 904 - extern int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, 905 - unsigned int cmd, unsigned long arg); 906 - extern int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, 907 - unsigned int cmd, unsigned long arg); 894 + extern int drm_setsareactx(struct drm_device *dev, void *data, 895 + struct drm_file *file_priv); 896 + extern int drm_getsareactx(struct drm_device *dev, void *data, 897 + struct drm_file *file_priv); 908 898 909 899 /* Drawable IOCTL support (drm_drawable.h) */ 910 - extern int drm_adddraw(struct inode *inode, struct drm_file *file_priv, 911 - unsigned int cmd, unsigned long arg); 912 - extern int drm_rmdraw(struct inode *inode, struct drm_file *file_priv, 913 - unsigned int cmd, unsigned long arg); 914 - extern int drm_update_drawable_info(struct inode *inode, struct drm_file *file_priv, 915 - unsigned int cmd, unsigned long arg); 900 + extern int drm_adddraw(struct drm_device *dev, void *data, 901 + struct drm_file *file_priv); 902 + extern int drm_rmdraw(struct drm_device *dev, void *data, 903 + struct drm_file *file_priv); 904 + extern int drm_update_drawable_info(struct drm_device *dev, void *data, 905 + struct drm_file *file_priv); 916 906 extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, 917 907 drm_drawable_t id); 918 908 extern void drm_drawable_free_all(struct drm_device *dev); 919 909 920 910 /* Authentication IOCTL support (drm_auth.h) */ 921 - extern int drm_getmagic(struct inode *inode, struct drm_file *file_priv, 922 - unsigned int cmd, unsigned long arg); 923 - extern int drm_authmagic(struct inode *inode, struct drm_file *file_priv, 924 - unsigned int cmd, unsigned long arg); 911 + extern int drm_getmagic(struct drm_device *dev, void *data, 912 + struct drm_file *file_priv); 913 + extern int drm_authmagic(struct drm_device *dev, void *data, 914 + struct drm_file *file_priv); 925 915 926 916 /* Locking IOCTL support (drm_lock.h) */ 927 - extern int drm_lock(struct inode *inode, struct drm_file *file_priv, 928 - unsigned int cmd, unsigned long arg); 929 - extern int drm_unlock(struct inode *inode, struct drm_file *file_priv, 930 - unsigned int cmd, unsigned long arg); 917 + extern int drm_lock(struct drm_device *dev, void *data, 918 + struct drm_file *file_priv); 919 + extern int drm_unlock(struct drm_device *dev, void *data, 920 + struct drm_file *file_priv); 931 921 extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); 932 922 extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); 933 923 extern void drm_idlelock_take(struct drm_lock_data *lock_data); ··· 938 928 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. 939 929 */ 940 930 941 - extern int drm_i_have_hw_lock(struct drm_file *file_priv); 931 + extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv); 942 932 943 933 /* Buffer management support (drm_bufs.h) */ 944 934 extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); ··· 946 936 extern int drm_addmap(struct drm_device *dev, unsigned int offset, 947 937 unsigned int size, enum drm_map_type type, 948 938 enum drm_map_flags flags, drm_local_map_t ** map_ptr); 949 - extern int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, 950 - unsigned int cmd, unsigned long arg); 951 - extern int drm_rmmap(struct drm_device *dev, drm_local_map_t * map); 952 - extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t * map); 953 - extern int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, 954 - unsigned int cmd, unsigned long arg); 955 - 939 + extern int drm_addmap_ioctl(struct drm_device *dev, void *data, 940 + struct drm_file *file_priv); 941 + extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); 942 + extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); 943 + extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, 944 + struct drm_file *file_priv); 945 + extern int drm_addbufs(struct drm_device *dev, void *data, 946 + struct drm_file *file_priv); 947 + extern int drm_infobufs(struct drm_device *dev, void *data, 948 + struct drm_file *file_priv); 949 + extern int drm_markbufs(struct drm_device *dev, void *data, 950 + struct drm_file *file_priv); 951 + extern int drm_freebufs(struct drm_device *dev, void *data, 952 + struct drm_file *file_priv); 953 + extern int drm_mapbufs(struct drm_device *dev, void *data, 954 + struct drm_file *file_priv); 956 955 extern int drm_order(unsigned long size); 957 - extern int drm_addbufs(struct inode *inode, struct drm_file *file_priv, 958 - unsigned int cmd, unsigned long arg); 959 - extern int drm_infobufs(struct inode *inode, struct drm_file *file_priv, 960 - unsigned int cmd, unsigned long arg); 961 - extern int drm_markbufs(struct inode *inode, struct drm_file *file_priv, 962 - unsigned int cmd, unsigned long arg); 963 - extern int drm_freebufs(struct inode *inode, struct drm_file *file_priv, 964 - unsigned int cmd, unsigned long arg); 965 - extern int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, 966 - unsigned int cmd, unsigned long arg); 967 956 extern unsigned long drm_get_resource_start(struct drm_device *dev, 968 957 unsigned int resource); 969 958 extern unsigned long drm_get_resource_len(struct drm_device *dev, ··· 976 967 struct drm_file *filp); 977 968 978 969 /* IRQ support (drm_irq.h) */ 979 - extern int drm_control(struct inode *inode, struct drm_file *file_priv, 980 - unsigned int cmd, unsigned long arg); 970 + extern int drm_control(struct drm_device *dev, void *data, 971 + struct drm_file *file_priv); 981 972 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 982 973 extern int drm_irq_uninstall(struct drm_device *dev); 983 974 extern void drm_driver_irq_preinstall(struct drm_device *dev); 984 975 extern void drm_driver_irq_postinstall(struct drm_device *dev); 985 976 extern void drm_driver_irq_uninstall(struct drm_device *dev); 986 977 987 - extern int drm_wait_vblank(struct inode *inode, struct drm_file *file_priv, 988 - unsigned int cmd, unsigned long arg); 978 + extern int drm_wait_vblank(struct drm_device *dev, void *data, 979 + struct drm_file *file_priv); 989 980 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 990 981 extern void drm_vbl_send_signals(struct drm_device *dev); 991 982 extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); ··· 993 984 /* AGP/GART support (drm_agpsupport.h) */ 994 985 extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); 995 986 extern int drm_agp_acquire(struct drm_device *dev); 996 - extern int drm_agp_acquire_ioctl(struct inode *inode, struct drm_file *file_priv, 997 - unsigned int cmd, unsigned long arg); 987 + extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, 988 + struct drm_file *file_priv); 998 989 extern int drm_agp_release(struct drm_device *dev); 999 - extern int drm_agp_release_ioctl(struct inode *inode, struct drm_file *file_priv, 1000 - unsigned int cmd, unsigned long arg); 990 + extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, 991 + struct drm_file *file_priv); 1001 992 extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); 1002 - extern int drm_agp_enable_ioctl(struct inode *inode, struct drm_file *file_priv, 1003 - unsigned int cmd, unsigned long arg); 1004 - extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info * info); 1005 - extern int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, 1006 - unsigned int cmd, unsigned long arg); 993 + extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, 994 + struct drm_file *file_priv); 995 + extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); 996 + extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, 997 + struct drm_file *file_priv); 1007 998 extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); 1008 - extern int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, 1009 - unsigned int cmd, unsigned long arg); 999 + extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, 1000 + struct drm_file *file_priv); 1010 1001 extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); 1011 - extern int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, 1012 - unsigned int cmd, unsigned long arg); 1002 + extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, 1003 + struct drm_file *file_priv); 1013 1004 extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); 1014 - extern int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, 1015 - unsigned int cmd, unsigned long arg); 1005 + extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, 1006 + struct drm_file *file_priv); 1016 1007 extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); 1017 - extern int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, 1018 - unsigned int cmd, unsigned long arg); 1019 - extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, 1020 - size_t pages, u32 type); 1008 + extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, 1009 + struct drm_file *file_priv); 1010 + extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type); 1021 1011 extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1022 1012 extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1023 1013 extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); ··· 1045 1037 1046 1038 /* Scatter Gather Support (drm_scatter.h) */ 1047 1039 extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1048 - extern int drm_sg_alloc(struct inode *inode, struct drm_file *file_priv, 1049 - unsigned int cmd, unsigned long arg); 1050 - extern int drm_sg_free(struct inode *inode, struct drm_file *file_priv, 1051 - unsigned int cmd, unsigned long arg); 1040 + extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, 1041 + struct drm_file *file_priv); 1042 + extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); 1043 + extern int drm_sg_free(struct drm_device *dev, void *data, 1044 + struct drm_file *file_priv); 1052 1045 1053 1046 /* ATI PCIGART support (ati_pcigart.h) */ 1054 1047 extern int drm_ati_pcigart_init(struct drm_device *dev,
+33 -74
drivers/char/drm/drm_agpsupport.c
··· 71 71 72 72 EXPORT_SYMBOL(drm_agp_info); 73 73 74 - int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, 75 - unsigned int cmd, unsigned long arg) 74 + int drm_agp_info_ioctl(struct drm_device *dev, void *data, 75 + struct drm_file *file_priv) 76 76 { 77 - struct drm_device *dev = file_priv->head->dev; 78 - struct drm_agp_info info; 77 + struct drm_agp_info *info = data; 79 78 int err; 80 79 81 - err = drm_agp_info(dev, &info); 80 + err = drm_agp_info(dev, info); 82 81 if (err) 83 82 return err; 84 83 85 - if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info))) 86 - return -EFAULT; 87 84 return 0; 88 85 } 89 86 ··· 119 122 * Verifies the AGP device hasn't been acquired before and calls 120 123 * \c agp_backend_acquire. 121 124 */ 122 - int drm_agp_acquire_ioctl(struct inode *inode, struct drm_file *file_priv, 123 - unsigned int cmd, unsigned long arg) 125 + int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, 126 + struct drm_file *file_priv) 124 127 { 125 128 return drm_agp_acquire((struct drm_device *) file_priv->head->dev); 126 129 } ··· 143 146 } 144 147 EXPORT_SYMBOL(drm_agp_release); 145 148 146 - int drm_agp_release_ioctl(struct inode *inode, struct drm_file *file_priv, 147 - unsigned int cmd, unsigned long arg) 149 + int drm_agp_release_ioctl(struct drm_device *dev, void *data, 150 + struct drm_file *file_priv) 148 151 { 149 - struct drm_device *dev = file_priv->head->dev; 150 - 151 152 return drm_agp_release(dev); 152 153 } 153 154 ··· 173 178 174 179 EXPORT_SYMBOL(drm_agp_enable); 175 180 176 - int drm_agp_enable_ioctl(struct inode *inode, struct drm_file *file_priv, 177 - unsigned int cmd, unsigned long arg) 181 + int drm_agp_enable_ioctl(struct drm_device *dev, void *data, 182 + struct drm_file *file_priv) 178 183 { 179 - struct drm_device *dev = file_priv->head->dev; 180 - struct drm_agp_mode mode; 184 + struct drm_agp_mode *mode = data; 181 185 182 - if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode))) 183 - return -EFAULT; 184 - 185 - return drm_agp_enable(dev, mode); 186 + return drm_agp_enable(dev, *mode); 186 187 } 187 188 188 189 /** ··· 227 236 } 228 237 EXPORT_SYMBOL(drm_agp_alloc); 229 238 230 - int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, 231 - unsigned int cmd, unsigned long arg) 239 + 240 + int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, 241 + struct drm_file *file_priv) 232 242 { 233 - struct drm_device *dev = file_priv->head->dev; 234 - struct drm_agp_buffer request; 235 - struct drm_agp_buffer __user *argp = (void __user *)arg; 236 - int err; 243 + struct drm_agp_buffer *request = data; 237 244 238 - if (copy_from_user(&request, argp, sizeof(request))) 239 - return -EFAULT; 240 - 241 - err = drm_agp_alloc(dev, &request); 242 - if (err) 243 - return err; 244 - 245 - if (copy_to_user(argp, &request, sizeof(request))) { 246 - struct drm_agp_mem *entry; 247 - list_for_each_entry(entry, &dev->agp->memory, head) { 248 - if (entry->handle == request.handle) 249 - break; 250 - } 251 - list_del(&entry->head); 252 - drm_free_agp(entry->memory, entry->pages); 253 - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 254 - return -EFAULT; 255 - } 256 - 257 - return 0; 245 + return drm_agp_alloc(dev, request); 258 246 } 259 247 260 248 /** ··· 287 317 } 288 318 EXPORT_SYMBOL(drm_agp_unbind); 289 319 290 - int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, 291 - unsigned int cmd, unsigned long arg) 320 + 321 + int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, 322 + struct drm_file *file_priv) 292 323 { 293 - struct drm_device *dev = file_priv->head->dev; 294 - struct drm_agp_binding request; 324 + struct drm_agp_binding *request = data; 295 325 296 - if (copy_from_user 297 - (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) 298 - return -EFAULT; 299 - 300 - return drm_agp_unbind(dev, &request); 326 + return drm_agp_unbind(dev, request); 301 327 } 302 328 303 329 /** ··· 331 365 } 332 366 EXPORT_SYMBOL(drm_agp_bind); 333 367 334 - int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, 335 - unsigned int cmd, unsigned long arg) 368 + 369 + int drm_agp_bind_ioctl(struct drm_device *dev, void *data, 370 + struct drm_file *file_priv) 336 371 { 337 - struct drm_device *dev = file_priv->head->dev; 338 - struct drm_agp_binding request; 372 + struct drm_agp_binding *request = data; 339 373 340 - if (copy_from_user 341 - (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) 342 - return -EFAULT; 343 - 344 - return drm_agp_bind(dev, &request); 374 + return drm_agp_bind(dev, request); 345 375 } 346 376 347 377 /** ··· 373 411 } 374 412 EXPORT_SYMBOL(drm_agp_free); 375 413 376 - int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, 377 - unsigned int cmd, unsigned long arg) 414 + 415 + 416 + int drm_agp_free_ioctl(struct drm_device *dev, void *data, 417 + struct drm_file *file_priv) 378 418 { 379 - struct drm_device *dev = file_priv->head->dev; 380 - struct drm_agp_buffer request; 419 + struct drm_agp_buffer *request = data; 381 420 382 - if (copy_from_user 383 - (&request, (struct drm_agp_buffer __user *) arg, sizeof(request))) 384 - return -EFAULT; 385 - 386 - return drm_agp_free(dev, &request); 421 + return drm_agp_free(dev, request); 387 422 } 388 423 389 424 /**
+15 -21
drivers/char/drm/drm_auth.c
··· 137 137 * searches an unique non-zero magic number and add it associating it with \p 138 138 * file_priv. 139 139 */ 140 - int drm_getmagic(struct inode *inode, struct drm_file *file_priv, 141 - unsigned int cmd, unsigned long arg) 140 + int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) 142 141 { 143 142 static drm_magic_t sequence = 0; 144 143 static DEFINE_SPINLOCK(lock); 145 - struct drm_device *dev = file_priv->head->dev; 146 - struct drm_auth auth; 144 + struct drm_auth *auth = data; 147 145 148 146 /* Find unique magic */ 149 147 if (file_priv->magic) { 150 - auth.magic = file_priv->magic; 148 + auth->magic = file_priv->magic; 151 149 } else { 152 150 do { 153 151 spin_lock(&lock); 154 152 if (!sequence) 155 153 ++sequence; /* reserve 0 */ 156 - auth.magic = sequence++; 154 + auth->magic = sequence++; 157 155 spin_unlock(&lock); 158 - } while (drm_find_file(dev, auth.magic)); 159 - file_priv->magic = auth.magic; 160 - drm_add_magic(dev, file_priv, auth.magic); 156 + } while (drm_find_file(dev, auth->magic)); 157 + file_priv->magic = auth->magic; 158 + drm_add_magic(dev, file_priv, auth->magic); 161 159 } 162 160 163 - DRM_DEBUG("%u\n", auth.magic); 164 - if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth))) 165 - return -EFAULT; 161 + DRM_DEBUG("%u\n", auth->magic); 162 + 166 163 return 0; 167 164 } 168 165 ··· 174 177 * 175 178 * Checks if \p file_priv is associated with the magic number passed in \arg. 176 179 */ 177 - int drm_authmagic(struct inode *inode, struct drm_file *file_priv, 178 - unsigned int cmd, unsigned long arg) 180 + int drm_authmagic(struct drm_device *dev, void *data, 181 + struct drm_file *file_priv) 179 182 { 180 - struct drm_device *dev = file_priv->head->dev; 181 - struct drm_auth auth; 183 + struct drm_auth *auth = data; 182 184 struct drm_file *file; 183 185 184 - if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth))) 185 - return -EFAULT; 186 - DRM_DEBUG("%u\n", auth.magic); 187 - if ((file = drm_find_file(dev, auth.magic))) { 186 + DRM_DEBUG("%u\n", auth->magic); 187 + if ((file = drm_find_file(dev, auth->magic))) { 188 188 file->authenticated = 1; 189 - drm_remove_magic(dev, auth.magic); 189 + drm_remove_magic(dev, auth->magic); 190 190 return 0; 191 191 } 192 192 return -EINVAL;
+57 -108
drivers/char/drm/drm_bufs.c
··· 332 332 333 333 EXPORT_SYMBOL(drm_addmap); 334 334 335 - int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, 336 - unsigned int cmd, unsigned long arg) 335 + int drm_addmap_ioctl(struct drm_device *dev, void *data, 336 + struct drm_file *file_priv) 337 337 { 338 - struct drm_device *dev = file_priv->head->dev; 339 - struct drm_map map; 338 + struct drm_map *map = data; 340 339 struct drm_map_list *maplist; 341 - struct drm_map __user *argp = (void __user *)arg; 342 340 int err; 343 341 344 - if (copy_from_user(&map, argp, sizeof(map))) { 345 - return -EFAULT; 346 - } 347 - 348 - if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) 342 + if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) 349 343 return -EPERM; 350 344 351 - err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 352 - &maplist); 345 + err = drm_addmap_core(dev, map->offset, map->size, map->type, 346 + map->flags, &maplist); 353 347 354 348 if (err) 355 349 return err; 356 350 357 - if (copy_to_user(argp, maplist->map, sizeof(struct drm_map))) 358 - return -EFAULT; 359 - 360 351 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 361 - if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) 362 - return -EFAULT; 352 + map->handle = (void *)(unsigned long)maplist->user_token; 363 353 return 0; 364 354 } 365 355 ··· 439 449 * gets used by drivers that the server doesn't need to care about. This seems 440 450 * unlikely. 441 451 */ 442 - int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, 443 - unsigned int cmd, unsigned long arg) 452 + int drm_rmmap_ioctl(struct drm_device *dev, void *data, 453 + struct drm_file *file_priv) 444 454 { 445 - struct drm_device *dev = file_priv->head->dev; 446 - struct drm_map request; 455 + struct drm_map *request = data; 447 456 drm_local_map_t *map = NULL; 448 457 struct drm_map_list *r_list; 449 458 int ret; 450 459 451 - if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { 452 - return -EFAULT; 453 - } 454 - 455 460 mutex_lock(&dev->struct_mutex); 456 461 list_for_each_entry(r_list, &dev->maplist, head) { 457 462 if (r_list->map && 458 - r_list->user_token == (unsigned long)request.handle && 463 + r_list->user_token == (unsigned long)request->handle && 459 464 r_list->map->flags & _DRM_REMOVABLE) { 460 465 map = r_list->map; 461 466 break; ··· 1265 1280 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1266 1281 * PCI memory respectively. 1267 1282 */ 1268 - int drm_addbufs(struct inode *inode, struct drm_file *file_priv, 1269 - unsigned int cmd, unsigned long arg) 1283 + int drm_addbufs(struct drm_device *dev, void *data, 1284 + struct drm_file *file_priv) 1270 1285 { 1271 - struct drm_buf_desc request; 1272 - struct drm_device *dev = file_priv->head->dev; 1286 + struct drm_buf_desc *request = data; 1273 1287 int ret; 1274 1288 1275 1289 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1276 1290 return -EINVAL; 1277 1291 1278 - if (copy_from_user(&request, (struct drm_buf_desc __user *) arg, 1279 - sizeof(request))) 1280 - return -EFAULT; 1281 - 1282 1292 #if __OS_HAS_AGP 1283 - if (request.flags & _DRM_AGP_BUFFER) 1284 - ret = drm_addbufs_agp(dev, &request); 1293 + if (request->flags & _DRM_AGP_BUFFER) 1294 + ret = drm_addbufs_agp(dev, request); 1285 1295 else 1286 1296 #endif 1287 - if (request.flags & _DRM_SG_BUFFER) 1288 - ret = drm_addbufs_sg(dev, &request); 1289 - else if (request.flags & _DRM_FB_BUFFER) 1290 - ret = drm_addbufs_fb(dev, &request); 1297 + if (request->flags & _DRM_SG_BUFFER) 1298 + ret = drm_addbufs_sg(dev, request); 1299 + else if (request->flags & _DRM_FB_BUFFER) 1300 + ret = drm_addbufs_fb(dev, request); 1291 1301 else 1292 - ret = drm_addbufs_pci(dev, &request); 1302 + ret = drm_addbufs_pci(dev, request); 1293 1303 1294 - if (ret == 0) { 1295 - if (copy_to_user((void __user *)arg, &request, sizeof(request))) { 1296 - ret = -EFAULT; 1297 - } 1298 - } 1299 1304 return ret; 1300 1305 } 1301 1306 ··· 1306 1331 * lock, preventing of allocating more buffers after this call. Information 1307 1332 * about each requested buffer is then copied into user space. 1308 1333 */ 1309 - int drm_infobufs(struct inode *inode, struct drm_file *file_priv, 1310 - unsigned int cmd, unsigned long arg) 1334 + int drm_infobufs(struct drm_device *dev, void *data, 1335 + struct drm_file *file_priv) 1311 1336 { 1312 - struct drm_device *dev = file_priv->head->dev; 1313 1337 struct drm_device_dma *dma = dev->dma; 1314 - struct drm_buf_info request; 1315 - struct drm_buf_info __user *argp = (void __user *)arg; 1338 + struct drm_buf_info *request = data; 1316 1339 int i; 1317 1340 int count; 1318 1341 ··· 1328 1355 ++dev->buf_use; /* Can't allocate more after this call */ 1329 1356 spin_unlock(&dev->count_lock); 1330 1357 1331 - if (copy_from_user(&request, argp, sizeof(request))) 1332 - return -EFAULT; 1333 - 1334 1358 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1335 1359 if (dma->bufs[i].buf_count) 1336 1360 ++count; ··· 1335 1365 1336 1366 DRM_DEBUG("count = %d\n", count); 1337 1367 1338 - if (request.count >= count) { 1368 + if (request->count >= count) { 1339 1369 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1340 1370 if (dma->bufs[i].buf_count) { 1341 1371 struct drm_buf_desc __user *to = 1342 - &request.list[count]; 1372 + &request->list[count]; 1343 1373 struct drm_buf_entry *from = &dma->bufs[i]; 1344 1374 struct drm_freelist *list = &dma->bufs[i].freelist; 1345 1375 if (copy_to_user(&to->count, ··· 1366 1396 } 1367 1397 } 1368 1398 } 1369 - request.count = count; 1370 - 1371 - if (copy_to_user(argp, &request, sizeof(request))) 1372 - return -EFAULT; 1399 + request->count = count; 1373 1400 1374 1401 return 0; 1375 1402 } ··· 1385 1418 * 1386 1419 * \note This ioctl is deprecated and mostly never used. 1387 1420 */ 1388 - int drm_markbufs(struct inode *inode, struct drm_file *file_priv, 1389 - unsigned int cmd, unsigned long arg) 1421 + int drm_markbufs(struct drm_device *dev, void *data, 1422 + struct drm_file *file_priv) 1390 1423 { 1391 - struct drm_device *dev = file_priv->head->dev; 1392 1424 struct drm_device_dma *dma = dev->dma; 1393 - struct drm_buf_desc request; 1425 + struct drm_buf_desc *request = data; 1394 1426 int order; 1395 1427 struct drm_buf_entry *entry; 1396 1428 ··· 1399 1433 if (!dma) 1400 1434 return -EINVAL; 1401 1435 1402 - if (copy_from_user(&request, 1403 - (struct drm_buf_desc __user *) arg, sizeof(request))) 1404 - return -EFAULT; 1405 - 1406 1436 DRM_DEBUG("%d, %d, %d\n", 1407 - request.size, request.low_mark, request.high_mark); 1408 - order = drm_order(request.size); 1437 + request->size, request->low_mark, request->high_mark); 1438 + order = drm_order(request->size); 1409 1439 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1410 1440 return -EINVAL; 1411 1441 entry = &dma->bufs[order]; 1412 1442 1413 - if (request.low_mark < 0 || request.low_mark > entry->buf_count) 1443 + if (request->low_mark < 0 || request->low_mark > entry->buf_count) 1414 1444 return -EINVAL; 1415 - if (request.high_mark < 0 || request.high_mark > entry->buf_count) 1445 + if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1416 1446 return -EINVAL; 1417 1447 1418 - entry->freelist.low_mark = request.low_mark; 1419 - entry->freelist.high_mark = request.high_mark; 1448 + entry->freelist.low_mark = request->low_mark; 1449 + entry->freelist.high_mark = request->high_mark; 1420 1450 1421 1451 return 0; 1422 1452 } ··· 1429 1467 * Calls free_buffer() for each used buffer. 1430 1468 * This function is primarily used for debugging. 1431 1469 */ 1432 - int drm_freebufs(struct inode *inode, struct drm_file *file_priv, 1433 - unsigned int cmd, unsigned long arg) 1470 + int drm_freebufs(struct drm_device *dev, void *data, 1471 + struct drm_file *file_priv) 1434 1472 { 1435 - struct drm_device *dev = file_priv->head->dev; 1436 1473 struct drm_device_dma *dma = dev->dma; 1437 - struct drm_buf_free request; 1474 + struct drm_buf_free *request = data; 1438 1475 int i; 1439 1476 int idx; 1440 1477 struct drm_buf *buf; ··· 1444 1483 if (!dma) 1445 1484 return -EINVAL; 1446 1485 1447 - if (copy_from_user(&request, 1448 - (struct drm_buf_free __user *) arg, sizeof(request))) 1449 - return -EFAULT; 1450 - 1451 - DRM_DEBUG("%d\n", request.count); 1452 - for (i = 0; i < request.count; i++) { 1453 - if (copy_from_user(&idx, &request.list[i], sizeof(idx))) 1486 + DRM_DEBUG("%d\n", request->count); 1487 + for (i = 0; i < request->count; i++) { 1488 + if (copy_from_user(&idx, &request->list[i], sizeof(idx))) 1454 1489 return -EFAULT; 1455 1490 if (idx < 0 || idx >= dma->buf_count) { 1456 1491 DRM_ERROR("Index %d (of %d max)\n", ··· 1479 1522 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1480 1523 * drm_mmap_dma(). 1481 1524 */ 1482 - int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, 1483 - unsigned int cmd, unsigned long arg) 1525 + int drm_mapbufs(struct drm_device *dev, void *data, 1526 + struct drm_file *file_priv) 1484 1527 { 1485 - struct drm_device *dev = file_priv->head->dev; 1486 1528 struct drm_device_dma *dma = dev->dma; 1487 - struct drm_buf_map __user *argp = (void __user *)arg; 1488 1529 int retcode = 0; 1489 1530 const int zero = 0; 1490 1531 unsigned long virtual; 1491 1532 unsigned long address; 1492 - struct drm_buf_map request; 1533 + struct drm_buf_map *request = data; 1493 1534 int i; 1494 1535 1495 1536 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ··· 1504 1549 dev->buf_use++; /* Can't allocate more after this call */ 1505 1550 spin_unlock(&dev->count_lock); 1506 1551 1507 - if (copy_from_user(&request, argp, sizeof(request))) 1508 - return -EFAULT; 1509 - 1510 - if (request.count >= dma->buf_count) { 1552 + if (request->count >= dma->buf_count) { 1511 1553 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1512 1554 || (drm_core_check_feature(dev, DRIVER_SG) 1513 1555 && (dma->flags & _DRM_DMA_USE_SG)) ··· 1517 1565 retcode = -EINVAL; 1518 1566 goto done; 1519 1567 } 1520 - 1521 1568 down_write(&current->mm->mmap_sem); 1522 1569 virtual = do_mmap(file_priv->filp, 0, map->size, 1523 1570 PROT_READ | PROT_WRITE, 1524 - MAP_SHARED, token); 1571 + MAP_SHARED, 1572 + token); 1525 1573 up_write(&current->mm->mmap_sem); 1526 1574 } else { 1527 1575 down_write(&current->mm->mmap_sem); ··· 1535 1583 retcode = (signed long)virtual; 1536 1584 goto done; 1537 1585 } 1538 - request.virtual = (void __user *)virtual; 1586 + request->virtual = (void __user *)virtual; 1539 1587 1540 1588 for (i = 0; i < dma->buf_count; i++) { 1541 - if (copy_to_user(&request.list[i].idx, 1589 + if (copy_to_user(&request->list[i].idx, 1542 1590 &dma->buflist[i]->idx, 1543 - sizeof(request.list[0].idx))) { 1591 + sizeof(request->list[0].idx))) { 1544 1592 retcode = -EFAULT; 1545 1593 goto done; 1546 1594 } 1547 - if (copy_to_user(&request.list[i].total, 1595 + if (copy_to_user(&request->list[i].total, 1548 1596 &dma->buflist[i]->total, 1549 - sizeof(request.list[0].total))) { 1597 + sizeof(request->list[0].total))) { 1550 1598 retcode = -EFAULT; 1551 1599 goto done; 1552 1600 } 1553 - if (copy_to_user(&request.list[i].used, 1601 + if (copy_to_user(&request->list[i].used, 1554 1602 &zero, sizeof(zero))) { 1555 1603 retcode = -EFAULT; 1556 1604 goto done; 1557 1605 } 1558 1606 address = virtual + dma->buflist[i]->offset; /* *** */ 1559 - if (copy_to_user(&request.list[i].address, 1607 + if (copy_to_user(&request->list[i].address, 1560 1608 &address, sizeof(address))) { 1561 1609 retcode = -EFAULT; 1562 1610 goto done; ··· 1564 1612 } 1565 1613 } 1566 1614 done: 1567 - request.count = dma->buf_count; 1568 - DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); 1569 - 1570 - if (copy_to_user(argp, &request, sizeof(request))) 1571 - return -EFAULT; 1615 + request->count = dma->buf_count; 1616 + DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); 1572 1617 1573 1618 return retcode; 1574 1619 }
+53 -98
drivers/char/drm/drm_context.c
··· 139 139 * Gets the map from drm_device::ctx_idr with the handle specified and 140 140 * returns its handle. 141 141 */ 142 - int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, 143 - unsigned int cmd, unsigned long arg) 142 + int drm_getsareactx(struct drm_device *dev, void *data, 143 + struct drm_file *file_priv) 144 144 { 145 - struct drm_device *dev = file_priv->head->dev; 146 - struct drm_ctx_priv_map __user *argp = (void __user *)arg; 147 - struct drm_ctx_priv_map request; 145 + struct drm_ctx_priv_map *request = data; 148 146 struct drm_map *map; 149 147 struct drm_map_list *_entry; 150 148 151 - if (copy_from_user(&request, argp, sizeof(request))) 152 - return -EFAULT; 153 - 154 149 mutex_lock(&dev->struct_mutex); 155 150 156 - map = idr_find(&dev->ctx_idr, request.ctx_id); 151 + map = idr_find(&dev->ctx_idr, request->ctx_id); 157 152 if (!map) { 158 153 mutex_unlock(&dev->struct_mutex); 159 154 return -EINVAL; ··· 156 161 157 162 mutex_unlock(&dev->struct_mutex); 158 163 159 - request.handle = NULL; 164 + request->handle = NULL; 160 165 list_for_each_entry(_entry, &dev->maplist, head) { 161 166 if (_entry->map == map) { 162 - request.handle = 167 + request->handle = 163 168 (void *)(unsigned long)_entry->user_token; 164 169 break; 165 170 } 166 171 } 167 - if (request.handle == NULL) 172 + if (request->handle == NULL) 168 173 return -EINVAL; 169 174 170 - if (copy_to_user(argp, &request, sizeof(request))) 171 - return -EFAULT; 172 175 return 0; 173 176 } 174 177 ··· 182 189 * Searches the mapping specified in \p arg and update the entry in 183 190 * drm_device::ctx_idr with it. 184 191 */ 185 - int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, 186 - unsigned int cmd, unsigned long arg) 192 + int drm_setsareactx(struct drm_device *dev, void *data, 193 + struct drm_file *file_priv) 187 194 { 188 - struct drm_device *dev = file_priv->head->dev; 189 - struct drm_ctx_priv_map request; 195 + struct drm_ctx_priv_map *request = data; 190 196 struct drm_map *map = NULL; 191 197 struct drm_map_list *r_list = NULL; 192 - 193 - if (copy_from_user(&request, 194 - (struct drm_ctx_priv_map __user *) arg, 195 - sizeof(request))) 196 - return -EFAULT; 197 198 198 199 mutex_lock(&dev->struct_mutex); 199 200 list_for_each_entry(r_list, &dev->maplist, head) { 200 201 if (r_list->map 201 - && r_list->user_token == (unsigned long)request.handle) 202 + && r_list->user_token == (unsigned long) request->handle) 202 203 goto found; 203 204 } 204 205 bad: ··· 204 217 if (!map) 205 218 goto bad; 206 219 207 - if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id))) 220 + if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) 208 221 goto bad; 209 222 210 223 mutex_unlock(&dev->struct_mutex); 224 + 211 225 return 0; 212 226 } 213 227 ··· 283 295 * \param arg user argument pointing to a drm_ctx_res structure. 284 296 * \return zero on success or a negative number on failure. 285 297 */ 286 - int drm_resctx(struct inode *inode, struct drm_file *file_priv, 287 - unsigned int cmd, unsigned long arg) 298 + int drm_resctx(struct drm_device *dev, void *data, 299 + struct drm_file *file_priv) 288 300 { 289 - struct drm_ctx_res res; 290 - struct drm_ctx_res __user *argp = (void __user *)arg; 301 + struct drm_ctx_res *res = data; 291 302 struct drm_ctx ctx; 292 303 int i; 293 304 294 - if (copy_from_user(&res, argp, sizeof(res))) 295 - return -EFAULT; 296 - 297 - if (res.count >= DRM_RESERVED_CONTEXTS) { 305 + if (res->count >= DRM_RESERVED_CONTEXTS) { 298 306 memset(&ctx, 0, sizeof(ctx)); 299 307 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 300 308 ctx.handle = i; 301 - if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx))) 309 + if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) 302 310 return -EFAULT; 303 311 } 304 312 } 305 - res.count = DRM_RESERVED_CONTEXTS; 313 + res->count = DRM_RESERVED_CONTEXTS; 306 314 307 - if (copy_to_user(argp, &res, sizeof(res))) 308 - return -EFAULT; 309 315 return 0; 310 316 } 311 317 ··· 314 332 * 315 333 * Get a new handle for the context and copy to userspace. 316 334 */ 317 - int drm_addctx(struct inode *inode, struct drm_file *file_priv, 318 - unsigned int cmd, unsigned long arg) 335 + int drm_addctx(struct drm_device *dev, void *data, 336 + struct drm_file *file_priv) 319 337 { 320 - struct drm_device *dev = file_priv->head->dev; 321 338 struct drm_ctx_list *ctx_entry; 322 - struct drm_ctx __user *argp = (void __user *)arg; 323 - struct drm_ctx ctx; 339 + struct drm_ctx *ctx = data; 324 340 325 - if (copy_from_user(&ctx, argp, sizeof(ctx))) 326 - return -EFAULT; 327 - 328 - ctx.handle = drm_ctxbitmap_next(dev); 329 - if (ctx.handle == DRM_KERNEL_CONTEXT) { 341 + ctx->handle = drm_ctxbitmap_next(dev); 342 + if (ctx->handle == DRM_KERNEL_CONTEXT) { 330 343 /* Skip kernel's context and get a new one. */ 331 - ctx.handle = drm_ctxbitmap_next(dev); 344 + ctx->handle = drm_ctxbitmap_next(dev); 332 345 } 333 - DRM_DEBUG("%d\n", ctx.handle); 334 - if (ctx.handle == -1) { 346 + DRM_DEBUG("%d\n", ctx->handle); 347 + if (ctx->handle == -1) { 335 348 DRM_DEBUG("Not enough free contexts.\n"); 336 349 /* Should this return -EBUSY instead? */ 337 350 return -ENOMEM; 338 351 } 339 352 340 - if (ctx.handle != DRM_KERNEL_CONTEXT) { 353 + if (ctx->handle != DRM_KERNEL_CONTEXT) { 341 354 if (dev->driver->context_ctor) 342 - if (!dev->driver->context_ctor(dev, ctx.handle)) { 355 + if (!dev->driver->context_ctor(dev, ctx->handle)) { 343 356 DRM_DEBUG("Running out of ctxs or memory.\n"); 344 357 return -ENOMEM; 345 358 } ··· 347 370 } 348 371 349 372 INIT_LIST_HEAD(&ctx_entry->head); 350 - ctx_entry->handle = ctx.handle; 373 + ctx_entry->handle = ctx->handle; 351 374 ctx_entry->tag = file_priv; 352 375 353 376 mutex_lock(&dev->ctxlist_mutex); ··· 355 378 ++dev->ctx_count; 356 379 mutex_unlock(&dev->ctxlist_mutex); 357 380 358 - if (copy_to_user(argp, &ctx, sizeof(ctx))) 359 - return -EFAULT; 360 381 return 0; 361 382 } 362 383 363 - int drm_modctx(struct inode *inode, struct drm_file *file_priv, 364 - unsigned int cmd, unsigned long arg) 384 + int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv) 365 385 { 366 386 /* This does nothing */ 367 387 return 0; ··· 373 399 * \param arg user argument pointing to a drm_ctx structure. 374 400 * \return zero on success or a negative number on failure. 375 401 */ 376 - int drm_getctx(struct inode *inode, struct drm_file *file_priv, 377 - unsigned int cmd, unsigned long arg) 402 + int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) 378 403 { 379 - struct drm_ctx __user *argp = (void __user *)arg; 380 - struct drm_ctx ctx; 381 - 382 - if (copy_from_user(&ctx, argp, sizeof(ctx))) 383 - return -EFAULT; 404 + struct drm_ctx *ctx = data; 384 405 385 406 /* This is 0, because we don't handle any context flags */ 386 - ctx.flags = 0; 407 + ctx->flags = 0; 387 408 388 - if (copy_to_user(argp, &ctx, sizeof(ctx))) 389 - return -EFAULT; 390 409 return 0; 391 410 } 392 411 ··· 394 427 * 395 428 * Calls context_switch(). 396 429 */ 397 - int drm_switchctx(struct inode *inode, struct drm_file *file_priv, 398 - unsigned int cmd, unsigned long arg) 430 + int drm_switchctx(struct drm_device *dev, void *data, 431 + struct drm_file *file_priv) 399 432 { 400 - struct drm_device *dev = file_priv->head->dev; 401 - struct drm_ctx ctx; 433 + struct drm_ctx *ctx = data; 402 434 403 - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) 404 - return -EFAULT; 405 - 406 - DRM_DEBUG("%d\n", ctx.handle); 407 - return drm_context_switch(dev, dev->last_context, ctx.handle); 435 + DRM_DEBUG("%d\n", ctx->handle); 436 + return drm_context_switch(dev, dev->last_context, ctx->handle); 408 437 } 409 438 410 439 /** ··· 414 451 * 415 452 * Calls context_switch_complete(). 416 453 */ 417 - int drm_newctx(struct inode *inode, struct drm_file *file_priv, 418 - unsigned int cmd, unsigned long arg) 454 + int drm_newctx(struct drm_device *dev, void *data, 455 + struct drm_file *file_priv) 419 456 { 420 - struct drm_device *dev = file_priv->head->dev; 421 - struct drm_ctx ctx; 457 + struct drm_ctx *ctx = data; 422 458 423 - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) 424 - return -EFAULT; 425 - 426 - DRM_DEBUG("%d\n", ctx.handle); 427 - drm_context_switch_complete(dev, ctx.handle); 459 + DRM_DEBUG("%d\n", ctx->handle); 460 + drm_context_switch_complete(dev, ctx->handle); 428 461 429 462 return 0; 430 463 } ··· 436 477 * 437 478 * If not the special kernel context, calls ctxbitmap_free() to free the specified context. 438 479 */ 439 - int drm_rmctx(struct inode *inode, struct drm_file *file_priv, 440 - unsigned int cmd, unsigned long arg) 480 + int drm_rmctx(struct drm_device *dev, void *data, 481 + struct drm_file *file_priv) 441 482 { 442 - struct drm_device *dev = file_priv->head->dev; 443 - struct drm_ctx ctx; 483 + struct drm_ctx *ctx = data; 444 484 445 - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) 446 - return -EFAULT; 447 - 448 - DRM_DEBUG("%d\n", ctx.handle); 449 - if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { 485 + DRM_DEBUG("%d\n", ctx->handle); 486 + if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { 450 487 file_priv->remove_auth_on_close = 1; 451 488 } 452 - if (ctx.handle != DRM_KERNEL_CONTEXT) { 489 + if (ctx->handle != DRM_KERNEL_CONTEXT) { 453 490 if (dev->driver->context_dtor) 454 - dev->driver->context_dtor(dev, ctx.handle); 455 - drm_ctxbitmap_free(dev, ctx.handle); 491 + dev->driver->context_dtor(dev, ctx->handle); 492 + drm_ctxbitmap_free(dev, ctx->handle); 456 493 } 457 494 458 495 mutex_lock(&dev->ctxlist_mutex); ··· 456 501 struct drm_ctx_list *pos, *n; 457 502 458 503 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 459 - if (pos->handle == ctx.handle) { 504 + if (pos->handle == ctx->handle) { 460 505 list_del(&pos->head); 461 506 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); 462 507 --dev->ctx_count;
+25 -36
drivers/char/drm/drm_drawable.c
··· 40 40 /** 41 41 * Allocate drawable ID and memory to store information about it. 42 42 */ 43 - int drm_adddraw(DRM_IOCTL_ARGS) 43 + int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv) 44 44 { 45 - DRM_DEVICE; 46 45 unsigned long irqflags; 47 - struct drm_draw draw; 46 + struct drm_draw *draw = data; 48 47 int new_id = 0; 49 48 int ret; 50 49 ··· 62 63 63 64 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 64 65 65 - draw.handle = new_id; 66 + draw->handle = new_id; 66 67 67 - DRM_DEBUG("%d\n", draw.handle); 68 - 69 - DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw)); 68 + DRM_DEBUG("%d\n", draw->handle); 70 69 71 70 return 0; 72 71 } ··· 72 75 /** 73 76 * Free drawable ID and memory to store information about it. 74 77 */ 75 - int drm_rmdraw(DRM_IOCTL_ARGS) 78 + int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) 76 79 { 77 - DRM_DEVICE; 78 - struct drm_draw draw; 80 + struct drm_draw *draw = data; 79 81 unsigned long irqflags; 80 - 81 - DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data, 82 - sizeof(draw)); 83 82 84 83 spin_lock_irqsave(&dev->drw_lock, irqflags); 85 84 86 - drm_free(drm_get_drawable_info(dev, draw.handle), 85 + drm_free(drm_get_drawable_info(dev, draw->handle), 87 86 sizeof(struct drm_drawable_info), DRM_MEM_BUFS); 88 87 89 - idr_remove(&dev->drw_idr, draw.handle); 88 + idr_remove(&dev->drw_idr, draw->handle); 90 89 91 90 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 92 - DRM_DEBUG("%d\n", draw.handle); 91 + DRM_DEBUG("%d\n", draw->handle); 93 92 return 0; 94 93 } 95 94 96 - int drm_update_drawable_info(DRM_IOCTL_ARGS) 95 + int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv) 97 96 { 98 - DRM_DEVICE; 99 - struct drm_update_draw update; 97 + struct drm_update_draw *update = data; 100 98 unsigned long irqflags; 101 99 struct drm_clip_rect *rects; 102 100 struct drm_drawable_info *info; 103 101 int err; 104 102 105 - DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data, 106 - sizeof(update)); 107 - 108 - info = idr_find(&dev->drw_idr, update.handle); 103 + info = idr_find(&dev->drw_idr, update->handle); 109 104 if (!info) { 110 105 info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); 111 106 if (!info) 112 107 return -ENOMEM; 113 - if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) { 114 - DRM_ERROR("No such drawable %d\n", update.handle); 108 + if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { 109 + DRM_ERROR("No such drawable %d\n", update->handle); 115 110 drm_free(info, sizeof(*info), DRM_MEM_BUFS); 116 111 return -EINVAL; 117 112 } 118 113 } 119 114 120 - switch (update.type) { 115 + switch (update->type) { 121 116 case DRM_DRAWABLE_CLIPRECTS: 122 - if (update.num != info->num_rects) { 123 - rects = drm_alloc(update.num * sizeof(struct drm_clip_rect), 117 + if (update->num != info->num_rects) { 118 + rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), 124 119 DRM_MEM_BUFS); 125 120 } else 126 121 rects = info->rects; 127 122 128 - if (update.num && !rects) { 123 + if (update->num && !rects) { 129 124 DRM_ERROR("Failed to allocate cliprect memory\n"); 130 125 err = -ENOMEM; 131 126 goto error; 132 127 } 133 128 134 - if (update.num && DRM_COPY_FROM_USER(rects, 129 + if (update->num && DRM_COPY_FROM_USER(rects, 135 130 (struct drm_clip_rect __user *) 136 - (unsigned long)update.data, 137 - update.num * 131 + (unsigned long)update->data, 132 + update->num * 138 133 sizeof(*rects))) { 139 134 DRM_ERROR("Failed to copy cliprects from userspace\n"); 140 135 err = -EFAULT; ··· 141 152 } 142 153 143 154 info->rects = rects; 144 - info->num_rects = update.num; 155 + info->num_rects = update->num; 145 156 146 157 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 147 158 148 159 DRM_DEBUG("Updated %d cliprects for drawable %d\n", 149 - info->num_rects, update.handle); 160 + info->num_rects, update->handle); 150 161 break; 151 162 default: 152 - DRM_ERROR("Invalid update type %d\n", update.type); 163 + DRM_ERROR("Invalid update type %d\n", update->type); 153 164 return -EINVAL; 154 165 } 155 166 ··· 157 168 158 169 error: 159 170 if (rects != info->rects) 160 - drm_free(rects, update.num * sizeof(struct drm_clip_rect), 171 + drm_free(rects, update->num * sizeof(struct drm_clip_rect), 161 172 DRM_MEM_BUFS); 162 173 163 174 return err;
+85 -68
drivers/char/drm/drm_drv.c
··· 49 49 #include "drmP.h" 50 50 #include "drm_core.h" 51 51 52 - static int drm_version(struct inode *inode, struct drm_file *file_priv, 53 - unsigned int cmd, unsigned long arg); 52 + static int drm_version(struct drm_device *dev, void *data, 53 + struct drm_file *file_priv); 54 54 55 55 /** Ioctl table */ 56 - static drm_ioctl_desc_t drm_ioctls[] = { 57 - [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, 58 - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, 59 - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, 60 - [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, 61 - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0}, 62 - [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0}, 63 - [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0}, 64 - [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY}, 65 - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 66 - [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 67 - [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 68 - [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 56 + static struct drm_ioctl_desc drm_ioctls[] = { 57 + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), 58 + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 59 + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 60 + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 61 + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), 62 + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), 63 + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), 64 + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), 69 65 70 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 71 - [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH}, 66 + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 67 + DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 68 + DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 69 + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 72 70 73 - [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 74 - [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, 71 + DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 72 + DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), 75 73 76 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY}, 77 - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_ROOT_ONLY}, 78 - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 79 - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, 80 - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 81 - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 82 - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH}, 74 + DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 75 + DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), 83 76 84 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 85 - [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 77 + DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 78 + DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 79 + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 80 + DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), 81 + DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 82 + DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 83 + DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), 86 84 87 - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH}, 88 - [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH}, 85 + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 86 + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 89 87 90 - [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH}, 88 + DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), 89 + DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), 91 90 92 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 93 - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 94 - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH}, 95 - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH}, 96 - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH}, 91 + DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), 92 + 93 + DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 94 + DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 95 + DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), 96 + DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), 97 + DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), 97 98 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 98 - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH}, 99 + DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), 99 100 100 - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 101 + DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 101 102 102 103 #if __OS_HAS_AGP 103 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 104 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 105 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 106 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH}, 107 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 108 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 109 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 110 - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 104 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 105 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 106 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 107 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), 108 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 109 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 110 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 111 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 111 112 #endif 112 113 113 - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 114 - [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 114 + DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 115 + DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 115 116 116 - [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, 117 + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 117 118 118 - [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 119 + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 119 120 }; 120 121 121 122 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) ··· 419 418 * 420 419 * Fills in the version information in \p arg. 421 420 */ 422 - static int drm_version(struct inode *inode, struct drm_file *file_priv, 423 - unsigned int cmd, unsigned long arg) 421 + static int drm_version(struct drm_device *dev, void *data, 422 + struct drm_file *file_priv) 424 423 { 425 - struct drm_device *dev = file_priv->head->dev; 426 - struct drm_version __user *argp = (void __user *)arg; 427 - struct drm_version version; 424 + struct drm_version *version = data; 428 425 int len; 429 426 430 - if (copy_from_user(&version, argp, sizeof(version))) 431 - return -EFAULT; 427 + version->version_major = dev->driver->major; 428 + version->version_minor = dev->driver->minor; 429 + version->version_patchlevel = dev->driver->patchlevel; 430 + DRM_COPY(version->name, dev->driver->name); 431 + DRM_COPY(version->date, dev->driver->date); 432 + DRM_COPY(version->desc, dev->driver->desc); 432 433 433 - version.version_major = dev->driver->major; 434 - version.version_minor = dev->driver->minor; 435 - version.version_patchlevel = dev->driver->patchlevel; 436 - DRM_COPY(version.name, dev->driver->name); 437 - DRM_COPY(version.date, dev->driver->date); 438 - DRM_COPY(version.desc, dev->driver->desc); 439 - 440 - if (copy_to_user(argp, &version, sizeof(version))) 441 - return -EFAULT; 442 434 return 0; 443 435 } 444 436 ··· 452 458 { 453 459 struct drm_file *file_priv = filp->private_data; 454 460 struct drm_device *dev = file_priv->head->dev; 455 - drm_ioctl_desc_t *ioctl; 461 + struct drm_ioctl_desc *ioctl; 456 462 drm_ioctl_t *func; 457 463 unsigned int nr = DRM_IOCTL_NR(cmd); 458 464 int retcode = -EINVAL; 465 + char *kdata = NULL; 459 466 460 467 atomic_inc(&dev->ioctl_count); 461 468 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ··· 483 488 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) 484 489 func = dev->driver->dma_ioctl; 485 490 491 + 486 492 if (!func) { 487 493 DRM_DEBUG("no function\n"); 488 494 retcode = -EINVAL; ··· 492 496 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { 493 497 retcode = -EACCES; 494 498 } else { 495 - retcode = func(inode, file_priv, cmd, arg); 499 + if (cmd & (IOC_IN | IOC_OUT)) { 500 + kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); 501 + if (!kdata) 502 + return -ENOMEM; 503 + } 504 + 505 + if (cmd & IOC_IN) { 506 + if (copy_from_user(kdata, (void __user *)arg, 507 + _IOC_SIZE(cmd)) != 0) { 508 + retcode = -EACCES; 509 + goto err_i1; 510 + } 511 + } 512 + retcode = func(dev, kdata, file_priv); 513 + 514 + if (cmd & IOC_OUT) { 515 + if (copy_to_user((void __user *)arg, kdata, 516 + _IOC_SIZE(cmd)) != 0) 517 + retcode = -EACCES; 518 + } 496 519 } 497 520 498 521 err_i1: 522 + if (kdata) 523 + kfree(kdata); 499 524 atomic_dec(&dev->ioctl_count); 500 525 if (retcode) 501 526 DRM_DEBUG("ret = %x\n", retcode);
+2 -2
drivers/char/drm/drm_fops.c
··· 343 343 dev->open_count); 344 344 345 345 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { 346 - if (drm_i_have_hw_lock(file_priv)) { 346 + if (drm_i_have_hw_lock(dev, file_priv)) { 347 347 dev->driver->reclaim_buffers_locked(dev, file_priv); 348 348 } else { 349 349 unsigned long _end=jiffies + 3*DRM_HZ; ··· 383 383 384 384 } 385 385 386 - if (drm_i_have_hw_lock(file_priv)) { 386 + if (drm_i_have_hw_lock(dev, file_priv)) { 387 387 DRM_DEBUG("File %p released, freeing lock for context %d\n", 388 388 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 389 389
+73 -102
drivers/char/drm/drm_ioctl.c
··· 49 49 * 50 50 * Copies the bus id from drm_device::unique into user space. 51 51 */ 52 - int drm_getunique(struct inode *inode, struct drm_file *file_priv, 53 - unsigned int cmd, unsigned long arg) 52 + int drm_getunique(struct drm_device *dev, void *data, 53 + struct drm_file *file_priv) 54 54 { 55 - struct drm_device *dev = file_priv->head->dev; 56 - struct drm_unique __user *argp = (void __user *)arg; 57 - struct drm_unique u; 55 + struct drm_unique *u = data; 58 56 59 - if (copy_from_user(&u, argp, sizeof(u))) 60 - return -EFAULT; 61 - if (u.unique_len >= dev->unique_len) { 62 - if (copy_to_user(u.unique, dev->unique, dev->unique_len)) 57 + if (u->unique_len >= dev->unique_len) { 58 + if (copy_to_user(u->unique, dev->unique, dev->unique_len)) 63 59 return -EFAULT; 64 60 } 65 - u.unique_len = dev->unique_len; 66 - if (copy_to_user(argp, &u, sizeof(u))) 67 - return -EFAULT; 61 + u->unique_len = dev->unique_len; 62 + 68 63 return 0; 69 64 } 70 65 ··· 77 82 * in interface version 1.1 and will return EBUSY when setversion has requested 78 83 * version 1.1 or greater. 79 84 */ 80 - int drm_setunique(struct inode *inode, struct drm_file *file_priv, 81 - unsigned int cmd, unsigned long arg) 85 + int drm_setunique(struct drm_device *dev, void *data, 86 + struct drm_file *file_priv) 82 87 { 83 - struct drm_device *dev = file_priv->head->dev; 84 - struct drm_unique u; 88 + struct drm_unique *u = data; 85 89 int domain, bus, slot, func, ret; 86 90 87 91 if (dev->unique_len || dev->unique) 88 92 return -EBUSY; 89 93 90 - if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u))) 91 - return -EFAULT; 92 - 93 - if (!u.unique_len || u.unique_len > 1024) 94 + if (!u->unique_len || u->unique_len > 1024) 94 95 return -EINVAL; 95 96 96 - dev->unique_len = u.unique_len; 97 - dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER); 97 + dev->unique_len = u->unique_len; 98 + dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); 98 99 if (!dev->unique) 99 100 return -ENOMEM; 100 - if (copy_from_user(dev->unique, u.unique, dev->unique_len)) 101 + if (copy_from_user(dev->unique, u->unique, dev->unique_len)) 101 102 return -EFAULT; 102 103 103 104 dev->unique[dev->unique_len] = '\0'; ··· 170 179 * Searches for the mapping with the specified offset and copies its information 171 180 * into userspace 172 181 */ 173 - int drm_getmap(struct inode *inode, struct drm_file *file_priv, 174 - unsigned int cmd, unsigned long arg) 182 + int drm_getmap(struct drm_device *dev, void *data, 183 + struct drm_file *file_priv) 175 184 { 176 - struct drm_device *dev = file_priv->head->dev; 177 - struct drm_map __user *argp = (void __user *)arg; 178 - struct drm_map map; 185 + struct drm_map *map = data; 179 186 struct drm_map_list *r_list = NULL; 180 187 struct list_head *list; 181 188 int idx; 182 189 int i; 183 190 184 - if (copy_from_user(&map, argp, sizeof(map))) 185 - return -EFAULT; 186 - idx = map.offset; 191 + idx = map->offset; 187 192 188 193 mutex_lock(&dev->struct_mutex); 189 194 if (idx < 0) { ··· 200 213 return -EINVAL; 201 214 } 202 215 203 - map.offset = r_list->map->offset; 204 - map.size = r_list->map->size; 205 - map.type = r_list->map->type; 206 - map.flags = r_list->map->flags; 207 - map.handle = (void *)(unsigned long)r_list->user_token; 208 - map.mtrr = r_list->map->mtrr; 216 + map->offset = r_list->map->offset; 217 + map->size = r_list->map->size; 218 + map->type = r_list->map->type; 219 + map->flags = r_list->map->flags; 220 + map->handle = (void *)(unsigned long) r_list->user_token; 221 + map->mtrr = r_list->map->mtrr; 209 222 mutex_unlock(&dev->struct_mutex); 210 223 211 - if (copy_to_user(argp, &map, sizeof(map))) 212 - return -EFAULT; 213 224 return 0; 214 225 } 215 226 ··· 224 239 * Searches for the client with the specified index and copies its information 225 240 * into userspace 226 241 */ 227 - int drm_getclient(struct inode *inode, struct drm_file *file_priv, 228 - unsigned int cmd, unsigned long arg) 242 + int drm_getclient(struct drm_device *dev, void *data, 243 + struct drm_file *file_priv) 229 244 { 230 - struct drm_device *dev = file_priv->head->dev; 231 - struct drm_client __user *argp = (struct drm_client __user *)arg; 232 - struct drm_client client; 245 + struct drm_client *client = data; 233 246 struct drm_file *pt; 234 247 int idx; 235 248 int i; 236 249 237 - if (copy_from_user(&client, argp, sizeof(client))) 238 - return -EFAULT; 239 - idx = client.idx; 250 + idx = client->idx; 240 251 mutex_lock(&dev->struct_mutex); 241 252 242 253 if (list_empty(&dev->filelist)) { ··· 246 265 break; 247 266 } 248 267 249 - client.auth = pt->authenticated; 250 - client.pid = pt->pid; 251 - client.uid = pt->uid; 252 - client.magic = pt->magic; 253 - client.iocs = pt->ioctl_count; 268 + client->auth = pt->authenticated; 269 + client->pid = pt->pid; 270 + client->uid = pt->uid; 271 + client->magic = pt->magic; 272 + client->iocs = pt->ioctl_count; 254 273 mutex_unlock(&dev->struct_mutex); 255 274 256 - if (copy_to_user(argp, &client, sizeof(client))) 257 - return -EFAULT; 258 275 return 0; 259 276 } 260 277 ··· 266 287 * 267 288 * \return zero on success or a negative number on failure. 268 289 */ 269 - int drm_getstats(struct inode *inode, struct drm_file *file_priv, 270 - unsigned int cmd, unsigned long arg) 290 + int drm_getstats(struct drm_device *dev, void *data, 291 + struct drm_file *file_priv) 271 292 { 272 - struct drm_device *dev = file_priv->head->dev; 273 - struct drm_stats stats; 293 + struct drm_stats *stats = data; 274 294 int i; 275 295 276 - memset(&stats, 0, sizeof(stats)); 296 + memset(stats, 0, sizeof(stats)); 277 297 278 298 mutex_lock(&dev->struct_mutex); 279 299 280 300 for (i = 0; i < dev->counters; i++) { 281 301 if (dev->types[i] == _DRM_STAT_LOCK) 282 - stats.data[i].value 283 - = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); 302 + stats->data[i].value = 303 + (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); 284 304 else 285 - stats.data[i].value = atomic_read(&dev->counts[i]); 286 - stats.data[i].type = dev->types[i]; 305 + stats->data[i].value = atomic_read(&dev->counts[i]); 306 + stats->data[i].type = dev->types[i]; 287 307 } 288 308 289 - stats.count = dev->counters; 309 + stats->count = dev->counters; 290 310 291 311 mutex_unlock(&dev->struct_mutex); 292 312 293 - if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats))) 294 - return -EFAULT; 295 313 return 0; 296 314 } 297 315 ··· 303 327 * 304 328 * Sets the requested interface version 305 329 */ 306 - int drm_setversion(DRM_IOCTL_ARGS) 330 + int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) 307 331 { 308 - DRM_DEVICE; 309 - struct drm_set_version sv; 310 - struct drm_set_version retv; 311 - int if_version; 312 - struct drm_set_version __user *argp = (void __user *)data; 313 - int ret; 332 + struct drm_set_version *sv = data; 333 + int if_version, retcode = 0; 314 334 315 - if (copy_from_user(&sv, argp, sizeof(sv))) 316 - return -EFAULT; 317 - 318 - retv.drm_di_major = DRM_IF_MAJOR; 319 - retv.drm_di_minor = DRM_IF_MINOR; 320 - retv.drm_dd_major = dev->driver->major; 321 - retv.drm_dd_minor = dev->driver->minor; 322 - 323 - if (copy_to_user(argp, &retv, sizeof(retv))) 324 - return -EFAULT; 325 - 326 - if (sv.drm_di_major != -1) { 327 - if (sv.drm_di_major != DRM_IF_MAJOR || 328 - sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) 329 - return -EINVAL; 330 - if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor); 335 + if (sv->drm_di_major != -1) { 336 + if (sv->drm_di_major != DRM_IF_MAJOR || 337 + sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { 338 + retcode = -EINVAL; 339 + goto done; 340 + } 341 + if_version = DRM_IF_VERSION(sv->drm_di_major, 342 + sv->drm_di_minor); 331 343 dev->if_version = max(if_version, dev->if_version); 332 - if (sv.drm_di_minor >= 1) { 344 + if (sv->drm_di_minor >= 1) { 333 345 /* 334 346 * Version 1.1 includes tying of DRM to specific device 335 347 */ 336 - ret = drm_set_busid(dev); 337 - if (ret) 338 - return ret; 348 + drm_set_busid(dev); 339 349 } 340 350 } 341 351 342 - if (sv.drm_dd_major != -1) { 343 - if (sv.drm_dd_major != dev->driver->major || 344 - sv.drm_dd_minor < 0 345 - || sv.drm_dd_minor > dev->driver->minor) 346 - return -EINVAL; 352 + if (sv->drm_dd_major != -1) { 353 + if (sv->drm_dd_major != dev->driver->major || 354 + sv->drm_dd_minor < 0 || sv->drm_dd_minor > 355 + dev->driver->minor) { 356 + retcode = -EINVAL; 357 + goto done; 358 + } 347 359 348 360 if (dev->driver->set_version) 349 - dev->driver->set_version(dev, &sv); 361 + dev->driver->set_version(dev, sv); 350 362 } 351 - return 0; 363 + 364 + done: 365 + sv->drm_di_major = DRM_IF_MAJOR; 366 + sv->drm_di_minor = DRM_IF_MINOR; 367 + sv->drm_dd_major = dev->driver->major; 368 + sv->drm_dd_minor = dev->driver->minor; 369 + 370 + return retcode; 352 371 } 353 372 354 373 /** No-op ioctl. */ 355 - int drm_noop(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, 356 - unsigned long arg) 374 + int drm_noop(struct drm_device *dev, void *data, 375 + struct drm_file *file_priv) 357 376 { 358 377 DRM_DEBUG("\n"); 359 378 return 0;
+37 -52
drivers/char/drm/drm_irq.c
··· 50 50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal 51 51 * to that of the device that this DRM instance attached to. 52 52 */ 53 - int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, 54 - unsigned int cmd, unsigned long arg) 53 + int drm_irq_by_busid(struct drm_device *dev, void *data, 54 + struct drm_file *file_priv) 55 55 { 56 - struct drm_device *dev = file_priv->head->dev; 57 - struct drm_irq_busid __user *argp = (void __user *)arg; 58 - struct drm_irq_busid p; 56 + struct drm_irq_busid *p = data; 59 57 60 58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 61 59 return -EINVAL; 62 60 63 - if (copy_from_user(&p, argp, sizeof(p))) 64 - return -EFAULT; 65 - 66 - if ((p.busnum >> 8) != drm_get_pci_domain(dev) || 67 - (p.busnum & 0xff) != dev->pdev->bus->number || 68 - p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn)) 61 + if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 62 + (p->busnum & 0xff) != dev->pdev->bus->number || 63 + p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) 69 64 return -EINVAL; 70 65 71 - p.irq = dev->irq; 66 + p->irq = dev->irq; 72 67 73 - DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq); 74 - if (copy_to_user(argp, &p, sizeof(p))) 75 - return -EFAULT; 68 + DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 69 + p->irq); 70 + 76 71 return 0; 77 72 } 78 73 ··· 188 193 * 189 194 * Calls irq_install() or irq_uninstall() according to \p arg. 190 195 */ 191 - int drm_control(struct inode *inode, struct drm_file *file_priv, 192 - unsigned int cmd, unsigned long arg) 196 + int drm_control(struct drm_device *dev, void *data, 197 + struct drm_file *file_priv) 193 198 { 194 - struct drm_device *dev = file_priv->head->dev; 195 - struct drm_control ctl; 199 + struct drm_control *ctl = data; 196 200 197 201 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ 198 202 199 - if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl))) 200 - return -EFAULT; 201 203 202 - switch (ctl.func) { 204 + switch (ctl->func) { 203 205 case DRM_INST_HANDLER: 204 206 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 205 207 return 0; 206 208 if (dev->if_version < DRM_IF_VERSION(1, 2) && 207 - ctl.irq != dev->irq) 209 + ctl->irq != dev->irq) 208 210 return -EINVAL; 209 211 return drm_irq_install(dev); 210 212 case DRM_UNINST_HANDLER: ··· 232 240 * 233 241 * If a signal is not requested, then calls vblank_wait(). 234 242 */ 235 - int drm_wait_vblank(DRM_IOCTL_ARGS) 243 + int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) 236 244 { 237 - struct drm_device *dev = file_priv->head->dev; 238 - union drm_wait_vblank __user *argp = (void __user *)data; 239 - union drm_wait_vblank vblwait; 245 + union drm_wait_vblank *vblwait = data; 240 246 struct timeval now; 241 247 int ret = 0; 242 248 unsigned int flags, seq; 243 249 244 - if (!dev->irq) 250 + if ((!dev->irq) || (!dev->irq_enabled)) 245 251 return -EINVAL; 246 252 247 - if (copy_from_user(&vblwait, argp, sizeof(vblwait))) 248 - return -EFAULT; 249 - 250 - if (vblwait.request.type & 253 + if (vblwait->request.type & 251 254 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { 252 255 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", 253 - vblwait.request.type, 256 + vblwait->request.type, 254 257 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); 255 258 return -EINVAL; 256 259 } 257 260 258 - flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; 261 + flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 259 262 260 263 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? 261 264 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) ··· 259 272 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 260 273 : &dev->vbl_received); 261 274 262 - switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) { 275 + switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 263 276 case _DRM_VBLANK_RELATIVE: 264 - vblwait.request.sequence += seq; 265 - vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; 277 + vblwait->request.sequence += seq; 278 + vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; 266 279 case _DRM_VBLANK_ABSOLUTE: 267 280 break; 268 281 default: ··· 270 283 } 271 284 272 285 if ((flags & _DRM_VBLANK_NEXTONMISS) && 273 - (seq - vblwait.request.sequence) <= (1<<23)) { 274 - vblwait.request.sequence = seq + 1; 286 + (seq - vblwait->request.sequence) <= (1<<23)) { 287 + vblwait->request.sequence = seq + 1; 275 288 } 276 289 277 290 if (flags & _DRM_VBLANK_SIGNAL) { ··· 287 300 * that case 288 301 */ 289 302 list_for_each_entry(vbl_sig, vbl_sigs, head) { 290 - if (vbl_sig->sequence == vblwait.request.sequence 291 - && vbl_sig->info.si_signo == vblwait.request.signal 303 + if (vbl_sig->sequence == vblwait->request.sequence 304 + && vbl_sig->info.si_signo == 305 + vblwait->request.signal 292 306 && vbl_sig->task == current) { 293 307 spin_unlock_irqrestore(&dev->vbl_lock, 294 308 irqflags); 295 - vblwait.reply.sequence = seq; 309 + vblwait->reply.sequence = seq; 296 310 goto done; 297 311 } 298 312 } ··· 315 327 316 328 memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); 317 329 318 - vbl_sig->sequence = vblwait.request.sequence; 319 - vbl_sig->info.si_signo = vblwait.request.signal; 330 + vbl_sig->sequence = vblwait->request.sequence; 331 + vbl_sig->info.si_signo = vblwait->request.signal; 320 332 vbl_sig->task = current; 321 333 322 334 spin_lock_irqsave(&dev->vbl_lock, irqflags); ··· 325 337 326 338 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 327 339 328 - vblwait.reply.sequence = seq; 340 + vblwait->reply.sequence = seq; 329 341 } else { 330 342 if (flags & _DRM_VBLANK_SECONDARY) { 331 343 if (dev->driver->vblank_wait2) 332 - ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence); 344 + ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); 333 345 } else if (dev->driver->vblank_wait) 334 346 ret = 335 347 dev->driver->vblank_wait(dev, 336 - &vblwait.request.sequence); 348 + &vblwait->request.sequence); 337 349 338 350 do_gettimeofday(&now); 339 - vblwait.reply.tval_sec = now.tv_sec; 340 - vblwait.reply.tval_usec = now.tv_usec; 351 + vblwait->reply.tval_sec = now.tv_sec; 352 + vblwait->reply.tval_usec = now.tv_usec; 341 353 } 342 354 343 355 done: 344 - if (copy_to_user(argp, &vblwait, sizeof(vblwait))) 345 - return -EFAULT; 346 - 347 356 return ret; 348 357 } 349 358
+24 -33
drivers/char/drm/drm_lock.c
··· 48 48 * 49 49 * Add the current task to the lock wait queue, and attempt to take to lock. 50 50 */ 51 - int drm_lock(struct inode *inode, struct drm_file *file_priv, 52 - unsigned int cmd, unsigned long arg) 51 + int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) 53 52 { 54 - struct drm_device *dev = file_priv->head->dev; 55 53 DECLARE_WAITQUEUE(entry, current); 56 - struct drm_lock lock; 54 + struct drm_lock *lock = data; 57 55 int ret = 0; 58 56 59 57 ++file_priv->lock_count; 60 58 61 - if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) 62 - return -EFAULT; 63 - 64 - if (lock.context == DRM_KERNEL_CONTEXT) { 59 + if (lock->context == DRM_KERNEL_CONTEXT) { 65 60 DRM_ERROR("Process %d using kernel context %d\n", 66 - current->pid, lock.context); 61 + current->pid, lock->context); 67 62 return -EINVAL; 68 63 } 69 64 70 65 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", 71 - lock.context, current->pid, 72 - dev->lock.hw_lock->lock, lock.flags); 66 + lock->context, current->pid, 67 + dev->lock.hw_lock->lock, lock->flags); 73 68 74 69 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) 75 - if (lock.context < 0) 70 + if (lock->context < 0) 76 71 return -EINVAL; 77 72 78 73 add_wait_queue(&dev->lock.lock_queue, &entry); ··· 81 86 ret = -EINTR; 82 87 break; 83 88 } 84 - if (drm_lock_take(&dev->lock, lock.context)) { 89 + if (drm_lock_take(&dev->lock, lock->context)) { 85 90 dev->lock.file_priv = file_priv; 86 91 dev->lock.lock_time = jiffies; 87 92 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); ··· 101 106 __set_current_state(TASK_RUNNING); 102 107 remove_wait_queue(&dev->lock.lock_queue, &entry); 103 108 104 - DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); 109 + DRM_DEBUG("%d %s\n", lock->context, 110 + ret ? "interrupted" : "has lock"); 105 111 if (ret) return ret; 106 112 107 113 sigemptyset(&dev->sigmask); ··· 110 114 sigaddset(&dev->sigmask, SIGTSTP); 111 115 sigaddset(&dev->sigmask, SIGTTIN); 112 116 sigaddset(&dev->sigmask, SIGTTOU); 113 - dev->sigdata.context = lock.context; 117 + dev->sigdata.context = lock->context; 114 118 dev->sigdata.lock = dev->lock.hw_lock; 115 119 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); 116 120 117 - if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) 121 + if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) 118 122 dev->driver->dma_ready(dev); 119 123 120 - if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { 124 + if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) 125 + { 121 126 if (dev->driver->dma_quiescent(dev)) { 122 - DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); 127 + DRM_DEBUG("%d waiting for DMA quiescent\n", 128 + lock->context); 123 129 return -EBUSY; 124 130 } 125 131 } 126 132 127 133 if (dev->driver->kernel_context_switch && 128 - dev->last_context != lock.context) { 134 + dev->last_context != lock->context) { 129 135 dev->driver->kernel_context_switch(dev, dev->last_context, 130 - lock.context); 136 + lock->context); 131 137 } 132 138 133 139 return 0; ··· 146 148 * 147 149 * Transfer and free the lock. 148 150 */ 149 - int drm_unlock(struct inode *inode, struct drm_file *file_priv, 150 - unsigned int cmd, unsigned long arg) 151 + int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) 151 152 { 152 - struct drm_device *dev = file_priv->head->dev; 153 - struct drm_lock lock; 153 + struct drm_lock *lock = data; 154 154 unsigned long irqflags; 155 155 156 - if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) 157 - return -EFAULT; 158 - 159 - if (lock.context == DRM_KERNEL_CONTEXT) { 156 + if (lock->context == DRM_KERNEL_CONTEXT) { 160 157 DRM_ERROR("Process %d using kernel context %d\n", 161 - current->pid, lock.context); 158 + current->pid, lock->context); 162 159 return -EINVAL; 163 160 } 164 161 ··· 175 182 if (dev->driver->kernel_context_switch_unlock) 176 183 dev->driver->kernel_context_switch_unlock(dev); 177 184 else { 178 - if (drm_lock_free(&dev->lock,lock.context)) { 185 + if (drm_lock_free(&dev->lock,lock->context)) { 179 186 /* FIXME: Should really bail out here. */ 180 187 } 181 188 } ··· 381 388 EXPORT_SYMBOL(drm_idlelock_release); 382 389 383 390 384 - int drm_i_have_hw_lock(struct drm_file *file_priv) 391 + int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) 385 392 { 386 - DRM_DEVICE; 387 - 388 393 return (file_priv->lock_count && dev->lock.hw_lock && 389 394 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && 390 395 dev->lock.file_priv == file_priv);
-4
drivers/char/drm/drm_os_linux.h
··· 6 6 #include <linux/interrupt.h> /* For task queue support */ 7 7 #include <linux/delay.h> 8 8 9 - /** Ioctl arguments */ 10 - #define DRM_IOCTL_ARGS struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long data 11 9 /** Current process ID */ 12 10 #define DRM_CURRENTPID current->pid 13 11 #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) ··· 28 30 #define DRM_WRITEMEMORYBARRIER() wmb() 29 31 /** Read/write memory barrier */ 30 32 #define DRM_MEMORYBARRIER() mb() 31 - /** DRM device local declaration */ 32 - #define DRM_DEVICE struct drm_device *dev = file_priv->head->dev 33 33 34 34 /** IRQ handler arguments and return type and values */ 35 35 #define DRM_IRQ_ARGS int irq, void *arg
+19 -27
drivers/char/drm/drm_scatter.c
··· 62 62 # define ScatterHandle(x) (unsigned int)(x) 63 63 #endif 64 64 65 - int drm_sg_alloc(struct inode *inode, struct drm_file *file_priv, 66 - unsigned int cmd, unsigned long arg) 65 + int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) 67 66 { 68 - struct drm_device *dev = file_priv->head->dev; 69 - struct drm_scatter_gather __user *argp = (void __user *)arg; 70 - struct drm_scatter_gather request; 71 67 struct drm_sg_mem *entry; 72 68 unsigned long pages, i, j; 73 69 ··· 75 79 if (dev->sg) 76 80 return -EINVAL; 77 81 78 - if (copy_from_user(&request, argp, sizeof(request))) 79 - return -EFAULT; 80 - 81 82 entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); 82 83 if (!entry) 83 84 return -ENOMEM; 84 85 85 86 memset(entry, 0, sizeof(*entry)); 86 - 87 - pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; 88 - DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages); 87 + pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; 88 + DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages); 89 89 90 90 entry->pages = pages; 91 91 entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), ··· 133 141 SetPageReserved(entry->pagelist[j]); 134 142 } 135 143 136 - request.handle = entry->handle; 137 - 138 - if (copy_to_user(argp, &request, sizeof(request))) { 139 - drm_sg_cleanup(entry); 140 - return -EFAULT; 141 - } 144 + request->handle = entry->handle; 142 145 143 146 dev->sg = entry; 144 147 ··· 183 196 drm_sg_cleanup(entry); 184 197 return -ENOMEM; 185 198 } 199 + EXPORT_SYMBOL(drm_sg_alloc); 186 200 187 - int drm_sg_free(struct inode *inode, struct drm_file *file_priv, 188 - unsigned int cmd, unsigned long arg) 201 + 202 + int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, 203 + struct drm_file *file_priv) 189 204 { 190 - struct drm_device *dev = file_priv->head->dev; 191 - struct drm_scatter_gather request; 205 + struct drm_scatter_gather *request = data; 206 + 207 + return drm_sg_alloc(dev, request); 208 + 209 + } 210 + 211 + int drm_sg_free(struct drm_device *dev, void *data, 212 + struct drm_file *file_priv) 213 + { 214 + struct drm_scatter_gather *request = data; 192 215 struct drm_sg_mem *entry; 193 216 194 217 if (!drm_core_check_feature(dev, DRIVER_SG)) 195 218 return -EINVAL; 196 219 197 - if (copy_from_user(&request, 198 - (struct drm_scatter_gather __user *) arg, 199 - sizeof(request))) 200 - return -EFAULT; 201 - 202 220 entry = dev->sg; 203 221 dev->sg = NULL; 204 222 205 - if (!entry || entry->handle != request.handle) 223 + if (!entry || entry->handle != request->handle) 206 224 return -EINVAL; 207 225 208 226 DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
+73 -176
drivers/char/drm/i810_dma.c
··· 429 429 return 0; 430 430 } 431 431 432 - /* i810 DRM version 1.1 used a smaller init structure with different 433 - * ordering of values than is currently used (drm >= 1.2). There is 434 - * no defined way to detect the XFree version to correct this problem, 435 - * however by checking using this procedure we can detect the correct 436 - * thing to do. 437 - * 438 - * #1 Read the Smaller init structure from user-space 439 - * #2 Verify the overlay_physical is a valid physical address, or NULL 440 - * If it isn't then we have a v1.1 client. Fix up params. 441 - * If it is, then we have a 1.2 client... get the rest of the data. 442 - */ 443 - static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) 432 + static int i810_dma_init(struct drm_device *dev, void *data, 433 + struct drm_file *file_priv) 444 434 { 445 - 446 - /* Get v1.1 init data */ 447 - if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg, 448 - sizeof(drm_i810_pre12_init_t))) { 449 - return -EFAULT; 450 - } 451 - 452 - if ((!init->overlay_physical) || (init->overlay_physical > 4096)) { 453 - 454 - /* This is a v1.2 client, just get the v1.2 init data */ 455 - DRM_INFO("Using POST v1.2 init.\n"); 456 - if (copy_from_user(init, (drm_i810_init_t __user *) arg, 457 - sizeof(drm_i810_init_t))) { 458 - return -EFAULT; 459 - } 460 - } else { 461 - 462 - /* This is a v1.1 client, fix the params */ 463 - DRM_INFO("Using PRE v1.2 init.\n"); 464 - init->pitch_bits = init->h; 465 - init->pitch = init->w; 466 - init->h = init->overlay_physical; 467 - init->w = init->overlay_offset; 468 - init->overlay_physical = 0; 469 - init->overlay_offset = 0; 470 - } 471 - 472 - return 0; 473 - } 474 - 475 - static int i810_dma_init(struct inode *inode, struct drm_file *file_priv, 476 - unsigned int cmd, unsigned long arg) 477 - { 478 - struct drm_device *dev = file_priv->head->dev; 479 435 drm_i810_private_t *dev_priv; 480 - drm_i810_init_t init; 436 + drm_i810_init_t *init = data; 481 437 int retcode = 0; 482 438 483 - /* Get only the init func */ 484 - if (copy_from_user 485 - (&init, (void __user *)arg, sizeof(drm_i810_init_func_t))) 486 - return -EFAULT; 487 - 488 - switch (init.func) { 489 - case I810_INIT_DMA: 490 - /* This case is for backward compatibility. It 491 - * handles XFree 4.1.0 and 4.2.0, and has to 492 - * do some parameter checking as described below. 493 - * It will someday go away. 494 - */ 495 - retcode = i810_dma_init_compat(&init, arg); 496 - if (retcode) 497 - return retcode; 498 - 499 - dev_priv = drm_alloc(sizeof(drm_i810_private_t), 500 - DRM_MEM_DRIVER); 501 - if (dev_priv == NULL) 502 - return -ENOMEM; 503 - retcode = i810_dma_initialize(dev, dev_priv, &init); 504 - break; 505 - 506 - default: 439 + switch (init->func) { 507 440 case I810_INIT_DMA_1_4: 508 441 DRM_INFO("Using v1.4 init.\n"); 509 - if (copy_from_user(&init, (drm_i810_init_t __user *) arg, 510 - sizeof(drm_i810_init_t))) { 511 - return -EFAULT; 512 - } 513 442 dev_priv = drm_alloc(sizeof(drm_i810_private_t), 514 443 DRM_MEM_DRIVER); 515 444 if (dev_priv == NULL) 516 445 return -ENOMEM; 517 - retcode = i810_dma_initialize(dev, dev_priv, &init); 446 + retcode = i810_dma_initialize(dev, dev_priv, init); 518 447 break; 519 448 520 449 case I810_CLEANUP_DMA: 521 450 DRM_INFO("DMA Cleanup\n"); 522 451 retcode = i810_dma_cleanup(dev); 523 452 break; 453 + default: 454 + return -EINVAL; 524 455 } 525 456 526 457 return retcode; ··· 928 997 } 929 998 } 930 999 931 - static int i810_flush_ioctl(struct inode *inode, struct drm_file *file_priv, 932 - unsigned int cmd, unsigned long arg) 1000 + static int i810_flush_ioctl(struct drm_device *dev, void *data, 1001 + struct drm_file *file_priv) 933 1002 { 934 - struct drm_device *dev = file_priv->head->dev; 935 - 936 1003 LOCK_TEST_WITH_RETURN(dev, file_priv); 937 1004 938 1005 i810_flush_queue(dev); 939 1006 return 0; 940 1007 } 941 1008 942 - static int i810_dma_vertex(struct inode *inode, struct drm_file *file_priv, 943 - unsigned int cmd, unsigned long arg) 1009 + static int i810_dma_vertex(struct drm_device *dev, void *data, 1010 + struct drm_file *file_priv) 944 1011 { 945 - struct drm_device *dev = file_priv->head->dev; 946 1012 struct drm_device_dma *dma = dev->dma; 947 1013 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 948 1014 u32 *hw_status = dev_priv->hw_status_page; 949 1015 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 950 1016 dev_priv->sarea_priv; 951 - drm_i810_vertex_t vertex; 952 - 953 - if (copy_from_user 954 - (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex))) 955 - return -EFAULT; 1017 + drm_i810_vertex_t *vertex = data; 956 1018 957 1019 LOCK_TEST_WITH_RETURN(dev, file_priv); 958 1020 959 1021 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", 960 - vertex.idx, vertex.used, vertex.discard); 1022 + vertex->idx, vertex->used, vertex->discard); 961 1023 962 - if (vertex.idx < 0 || vertex.idx > dma->buf_count) 1024 + if (vertex->idx < 0 || vertex->idx > dma->buf_count) 963 1025 return -EINVAL; 964 1026 965 1027 i810_dma_dispatch_vertex(dev, 966 - dma->buflist[vertex.idx], 967 - vertex.discard, vertex.used); 1028 + dma->buflist[vertex->idx], 1029 + vertex->discard, vertex->used); 968 1030 969 - atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); 1031 + atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); 970 1032 atomic_inc(&dev->counts[_DRM_STAT_DMA]); 971 1033 sarea_priv->last_enqueue = dev_priv->counter - 1; 972 1034 sarea_priv->last_dispatch = (int)hw_status[5]; ··· 967 1043 return 0; 968 1044 } 969 1045 970 - static int i810_clear_bufs(struct inode *inode, struct drm_file *file_priv, 971 - unsigned int cmd, unsigned long arg) 1046 + static int i810_clear_bufs(struct drm_device *dev, void *data, 1047 + struct drm_file *file_priv) 972 1048 { 973 - struct drm_device *dev = file_priv->head->dev; 974 - drm_i810_clear_t clear; 975 - 976 - if (copy_from_user 977 - (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear))) 978 - return -EFAULT; 1049 + drm_i810_clear_t *clear = data; 979 1050 980 1051 LOCK_TEST_WITH_RETURN(dev, file_priv); 981 1052 ··· 979 1060 return -EINVAL; 980 1061 } 981 1062 982 - i810_dma_dispatch_clear(dev, clear.flags, 983 - clear.clear_color, clear.clear_depth); 1063 + i810_dma_dispatch_clear(dev, clear->flags, 1064 + clear->clear_color, clear->clear_depth); 984 1065 return 0; 985 1066 } 986 1067 987 - static int i810_swap_bufs(struct inode *inode, struct drm_file *file_priv, 988 - unsigned int cmd, unsigned long arg) 1068 + static int i810_swap_bufs(struct drm_device *dev, void *data, 1069 + struct drm_file *file_priv) 989 1070 { 990 - struct drm_device *dev = file_priv->head->dev; 991 - 992 1071 DRM_DEBUG("i810_swap_bufs\n"); 993 1072 994 1073 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 995 1078 return 0; 996 1079 } 997 1080 998 - static int i810_getage(struct inode *inode, struct drm_file *file_priv, 999 - unsigned int cmd, 1000 - unsigned long arg) 1081 + static int i810_getage(struct drm_device *dev, void *data, 1082 + struct drm_file *file_priv) 1001 1083 { 1002 - struct drm_device *dev = file_priv->head->dev; 1003 1084 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1004 1085 u32 *hw_status = dev_priv->hw_status_page; 1005 1086 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ··· 1007 1092 return 0; 1008 1093 } 1009 1094 1010 - static int i810_getbuf(struct inode *inode, struct drm_file *file_priv, 1011 - unsigned int cmd, unsigned long arg) 1095 + static int i810_getbuf(struct drm_device *dev, void *data, 1096 + struct drm_file *file_priv) 1012 1097 { 1013 - struct drm_device *dev = file_priv->head->dev; 1014 1098 int retcode = 0; 1015 - drm_i810_dma_t d; 1099 + drm_i810_dma_t *d = data; 1016 1100 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1017 1101 u32 *hw_status = dev_priv->hw_status_page; 1018 1102 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1019 1103 dev_priv->sarea_priv; 1020 1104 1021 - if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) 1022 - return -EFAULT; 1023 - 1024 1105 LOCK_TEST_WITH_RETURN(dev, file_priv); 1025 1106 1026 - d.granted = 0; 1107 + d->granted = 0; 1027 1108 1028 - retcode = i810_dma_get_buffer(dev, &d, file_priv); 1109 + retcode = i810_dma_get_buffer(dev, d, file_priv); 1029 1110 1030 1111 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", 1031 - current->pid, retcode, d.granted); 1112 + current->pid, retcode, d->granted); 1032 1113 1033 - if (copy_to_user((void __user *) arg, &d, sizeof(d))) 1034 - return -EFAULT; 1035 1114 sarea_priv->last_dispatch = (int)hw_status[5]; 1036 1115 1037 1116 return retcode; 1038 1117 } 1039 1118 1040 - static int i810_copybuf(struct inode *inode, struct drm_file *file_priv, 1041 - unsigned int cmd, unsigned long arg) 1119 + static int i810_copybuf(struct drm_device *dev, void *data, 1120 + struct drm_file *file_priv) 1042 1121 { 1043 1122 /* Never copy - 2.4.x doesn't need it */ 1044 1123 return 0; 1045 1124 } 1046 1125 1047 - static int i810_docopy(struct inode *inode, struct drm_file *file_priv, 1048 - unsigned int cmd, unsigned long arg) 1126 + static int i810_docopy(struct drm_device *dev, void *data, 1127 + struct drm_file *file_priv) 1049 1128 { 1050 1129 /* Never copy - 2.4.x doesn't need it */ 1051 1130 return 0; ··· 1105 1196 ADVANCE_LP_RING(); 1106 1197 } 1107 1198 1108 - static int i810_dma_mc(struct inode *inode, struct drm_file *file_priv, 1109 - unsigned int cmd, unsigned long arg) 1199 + static int i810_dma_mc(struct drm_device *dev, void *data, 1200 + struct drm_file *file_priv) 1110 1201 { 1111 - struct drm_device *dev = file_priv->head->dev; 1112 1202 struct drm_device_dma *dma = dev->dma; 1113 1203 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1114 1204 u32 *hw_status = dev_priv->hw_status_page; 1115 1205 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) 1116 1206 dev_priv->sarea_priv; 1117 - drm_i810_mc_t mc; 1118 - 1119 - if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc))) 1120 - return -EFAULT; 1207 + drm_i810_mc_t *mc = data; 1121 1208 1122 1209 LOCK_TEST_WITH_RETURN(dev, file_priv); 1123 1210 1124 - if (mc.idx >= dma->buf_count || mc.idx < 0) 1211 + if (mc->idx >= dma->buf_count || mc->idx < 0) 1125 1212 return -EINVAL; 1126 1213 1127 - i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, 1128 - mc.last_render); 1214 + i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, 1215 + mc->last_render); 1129 1216 1130 - atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); 1217 + atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); 1131 1218 atomic_inc(&dev->counts[_DRM_STAT_DMA]); 1132 1219 sarea_priv->last_enqueue = dev_priv->counter - 1; 1133 1220 sarea_priv->last_dispatch = (int)hw_status[5]; ··· 1131 1226 return 0; 1132 1227 } 1133 1228 1134 - static int i810_rstatus(struct inode *inode, struct drm_file *file_priv, 1135 - unsigned int cmd, unsigned long arg) 1229 + static int i810_rstatus(struct drm_device *dev, void *data, 1230 + struct drm_file *file_priv) 1136 1231 { 1137 - struct drm_device *dev = file_priv->head->dev; 1138 1232 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1139 1233 1140 1234 return (int)(((u32 *) (dev_priv->hw_status_page))[4]); 1141 1235 } 1142 1236 1143 - static int i810_ov0_info(struct inode *inode, struct drm_file *file_priv, 1144 - unsigned int cmd, unsigned long arg) 1237 + static int i810_ov0_info(struct drm_device *dev, void *data, 1238 + struct drm_file *file_priv) 1145 1239 { 1146 - struct drm_device *dev = file_priv->head->dev; 1147 1240 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1148 - drm_i810_overlay_t data; 1241 + drm_i810_overlay_t *ov = data; 1149 1242 1150 - data.offset = dev_priv->overlay_offset; 1151 - data.physical = dev_priv->overlay_physical; 1152 - if (copy_to_user 1153 - ((drm_i810_overlay_t __user *) arg, &data, sizeof(data))) 1154 - return -EFAULT; 1243 + ov->offset = dev_priv->overlay_offset; 1244 + ov->physical = dev_priv->overlay_physical; 1245 + 1155 1246 return 0; 1156 1247 } 1157 1248 1158 - static int i810_fstatus(struct inode *inode, struct drm_file *file_priv, 1159 - unsigned int cmd, unsigned long arg) 1249 + static int i810_fstatus(struct drm_device *dev, void *data, 1250 + struct drm_file *file_priv) 1160 1251 { 1161 - struct drm_device *dev = file_priv->head->dev; 1162 1252 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1163 1253 1164 1254 LOCK_TEST_WITH_RETURN(dev, file_priv); 1165 - 1166 1255 return I810_READ(0x30008); 1167 1256 } 1168 1257 1169 - static int i810_ov0_flip(struct inode *inode, struct drm_file *file_priv, 1170 - unsigned int cmd, unsigned long arg) 1258 + static int i810_ov0_flip(struct drm_device *dev, void *data, 1259 + struct drm_file *file_priv) 1171 1260 { 1172 - struct drm_device *dev = file_priv->head->dev; 1173 1261 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; 1174 1262 1175 1263 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 1197 1299 return 0; 1198 1300 } 1199 1301 1200 - static int i810_flip_bufs(struct inode *inode, struct drm_file *file_priv, 1201 - unsigned int cmd, unsigned long arg) 1302 + static int i810_flip_bufs(struct drm_device *dev, void *data, 1303 + struct drm_file *file_priv) 1202 1304 { 1203 - struct drm_device *dev = file_priv->head->dev; 1204 1305 drm_i810_private_t *dev_priv = dev->dev_private; 1205 1306 1206 1307 DRM_DEBUG("%s\n", __FUNCTION__); ··· 1252 1355 return 0; 1253 1356 } 1254 1357 1255 - drm_ioctl_desc_t i810_ioctls[] = { 1256 - [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1257 - [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, 1258 - [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, 1259 - [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, 1260 - [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, 1261 - [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, 1262 - [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, 1263 - [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, 1264 - [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, 1265 - [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, 1266 - [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, 1267 - [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, 1268 - [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1269 - [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, 1270 - [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} 1358 + struct drm_ioctl_desc i810_ioctls[] = { 1359 + DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1360 + DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH), 1361 + DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH), 1362 + DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), 1363 + DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH), 1364 + DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH), 1365 + DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH), 1366 + DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH), 1367 + DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH), 1368 + DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH), 1369 + DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH), 1370 + DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH), 1371 + DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1372 + DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH), 1373 + DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH) 1271 1374 }; 1272 1375 1273 1376 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
+1 -1
drivers/char/drm/i810_drv.h
··· 126 126 struct drm_file *file_priv); 127 127 extern int i810_driver_device_is_agp(struct drm_device * dev); 128 128 129 - extern drm_ioctl_desc_t i810_ioctls[]; 129 + extern struct drm_ioctl_desc i810_ioctls[]; 130 130 extern int i810_max_ioctl; 131 131 132 132 #define I810_BASE(reg) ((unsigned long) \
+61 -96
drivers/char/drm/i830_dma.c
··· 450 450 return 0; 451 451 } 452 452 453 - static int i830_dma_init(struct inode *inode, struct drm_file *file_priv, 454 - unsigned int cmd, unsigned long arg) 453 + static int i830_dma_init(struct drm_device *dev, void *data, 454 + struct drm_file *file_priv) 455 455 { 456 - struct drm_device *dev = file_priv->head->dev; 457 456 drm_i830_private_t *dev_priv; 458 - drm_i830_init_t init; 457 + drm_i830_init_t *init = data; 459 458 int retcode = 0; 460 459 461 - if (copy_from_user(&init, (void *__user)arg, sizeof(init))) 462 - return -EFAULT; 463 - 464 - switch (init.func) { 460 + switch (init->func) { 465 461 case I830_INIT_DMA: 466 462 dev_priv = drm_alloc(sizeof(drm_i830_private_t), 467 463 DRM_MEM_DRIVER); 468 464 if (dev_priv == NULL) 469 465 return -ENOMEM; 470 - retcode = i830_dma_initialize(dev, dev_priv, &init); 466 + retcode = i830_dma_initialize(dev, dev_priv, init); 471 467 break; 472 468 case I830_CLEANUP_DMA: 473 469 retcode = i830_dma_cleanup(dev); ··· 1272 1276 } 1273 1277 } 1274 1278 1275 - static int i830_flush_ioctl(struct inode *inode, struct drm_file *file_priv, 1276 - unsigned int cmd, unsigned long arg) 1279 + static int i830_flush_ioctl(struct drm_device *dev, void *data, 1280 + struct drm_file *file_priv) 1277 1281 { 1278 - struct drm_device *dev = file_priv->head->dev; 1279 - 1280 1282 LOCK_TEST_WITH_RETURN(dev, file_priv); 1281 1283 1282 1284 i830_flush_queue(dev); 1283 1285 return 0; 1284 1286 } 1285 1287 1286 - static int i830_dma_vertex(struct inode *inode, struct drm_file *file_priv, 1287 - unsigned int cmd, unsigned long arg) 1288 + static int i830_dma_vertex(struct drm_device *dev, void *data, 1289 + struct drm_file *file_priv) 1288 1290 { 1289 - struct drm_device *dev = file_priv->head->dev; 1290 1291 struct drm_device_dma *dma = dev->dma; 1291 1292 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1292 1293 u32 *hw_status = dev_priv->hw_status_page; 1293 1294 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1294 1295 dev_priv->sarea_priv; 1295 - drm_i830_vertex_t vertex; 1296 - 1297 - if (copy_from_user 1298 - (&vertex, (drm_i830_vertex_t __user *) arg, sizeof(vertex))) 1299 - return -EFAULT; 1296 + drm_i830_vertex_t *vertex = data; 1300 1297 1301 1298 LOCK_TEST_WITH_RETURN(dev, file_priv); 1302 1299 1303 1300 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n", 1304 - vertex.idx, vertex.used, vertex.discard); 1301 + vertex->idx, vertex->used, vertex->discard); 1305 1302 1306 - if (vertex.idx < 0 || vertex.idx > dma->buf_count) 1303 + if (vertex->idx < 0 || vertex->idx > dma->buf_count) 1307 1304 return -EINVAL; 1308 1305 1309 1306 i830_dma_dispatch_vertex(dev, 1310 - dma->buflist[vertex.idx], 1311 - vertex.discard, vertex.used); 1307 + dma->buflist[vertex->idx], 1308 + vertex->discard, vertex->used); 1312 1309 1313 1310 sarea_priv->last_enqueue = dev_priv->counter - 1; 1314 1311 sarea_priv->last_dispatch = (int)hw_status[5]; ··· 1309 1320 return 0; 1310 1321 } 1311 1322 1312 - static int i830_clear_bufs(struct inode *inode, struct drm_file *file_priv, 1313 - unsigned int cmd, unsigned long arg) 1323 + static int i830_clear_bufs(struct drm_device *dev, void *data, 1324 + struct drm_file *file_priv) 1314 1325 { 1315 - struct drm_device *dev = file_priv->head->dev; 1316 - drm_i830_clear_t clear; 1317 - 1318 - if (copy_from_user 1319 - (&clear, (drm_i830_clear_t __user *) arg, sizeof(clear))) 1320 - return -EFAULT; 1326 + drm_i830_clear_t *clear = data; 1321 1327 1322 1328 LOCK_TEST_WITH_RETURN(dev, file_priv); 1323 1329 ··· 1321 1337 return -EINVAL; 1322 1338 } 1323 1339 1324 - i830_dma_dispatch_clear(dev, clear.flags, 1325 - clear.clear_color, 1326 - clear.clear_depth, clear.clear_depthmask); 1340 + i830_dma_dispatch_clear(dev, clear->flags, 1341 + clear->clear_color, 1342 + clear->clear_depth, clear->clear_depthmask); 1327 1343 return 0; 1328 1344 } 1329 1345 1330 - static int i830_swap_bufs(struct inode *inode, struct drm_file *file_priv, 1331 - unsigned int cmd, unsigned long arg) 1346 + static int i830_swap_bufs(struct drm_device *dev, void *data, 1347 + struct drm_file *file_priv) 1332 1348 { 1333 - struct drm_device *dev = file_priv->head->dev; 1334 - 1335 1349 DRM_DEBUG("i830_swap_bufs\n"); 1336 1350 1337 1351 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 1362 1380 return 0; 1363 1381 } 1364 1382 1365 - static int i830_flip_bufs(struct inode *inode, struct drm_file *file_priv, 1366 - unsigned int cmd, unsigned long arg) 1383 + static int i830_flip_bufs(struct drm_device *dev, void *data, 1384 + struct drm_file *file_priv) 1367 1385 { 1368 - struct drm_device *dev = file_priv->head->dev; 1369 1386 drm_i830_private_t *dev_priv = dev->dev_private; 1370 1387 1371 1388 DRM_DEBUG("%s\n", __FUNCTION__); ··· 1378 1397 return 0; 1379 1398 } 1380 1399 1381 - static int i830_getage(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, 1382 - unsigned long arg) 1400 + static int i830_getage(struct drm_device *dev, void *data, 1401 + struct drm_file *file_priv) 1383 1402 { 1384 - struct drm_device *dev = file_priv->head->dev; 1385 1403 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1386 1404 u32 *hw_status = dev_priv->hw_status_page; 1387 1405 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) ··· 1390 1410 return 0; 1391 1411 } 1392 1412 1393 - static int i830_getbuf(struct inode *inode, struct drm_file *file_priv, 1394 - unsigned int cmd, unsigned long arg) 1413 + static int i830_getbuf(struct drm_device *dev, void *data, 1414 + struct drm_file *file_priv) 1395 1415 { 1396 - struct drm_device *dev = file_priv->head->dev; 1397 1416 int retcode = 0; 1398 - drm_i830_dma_t d; 1417 + drm_i830_dma_t *d = data; 1399 1418 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; 1400 1419 u32 *hw_status = dev_priv->hw_status_page; 1401 1420 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) 1402 1421 dev_priv->sarea_priv; 1403 1422 1404 1423 DRM_DEBUG("getbuf\n"); 1405 - if (copy_from_user(&d, (drm_i830_dma_t __user *) arg, sizeof(d))) 1406 - return -EFAULT; 1407 1424 1408 1425 LOCK_TEST_WITH_RETURN(dev, file_priv); 1409 1426 1410 - d.granted = 0; 1427 + d->granted = 0; 1411 1428 1412 - retcode = i830_dma_get_buffer(dev, &d, file_priv); 1429 + retcode = i830_dma_get_buffer(dev, d, file_priv); 1413 1430 1414 1431 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n", 1415 - current->pid, retcode, d.granted); 1432 + current->pid, retcode, d->granted); 1416 1433 1417 - if (copy_to_user((void __user *) arg, &d, sizeof(d))) 1418 - return -EFAULT; 1419 1434 sarea_priv->last_dispatch = (int)hw_status[5]; 1420 1435 1421 1436 return retcode; 1422 1437 } 1423 1438 1424 - static int i830_copybuf(struct inode *inode, 1425 - struct drm_file *file_priv, unsigned int cmd, unsigned long arg) 1439 + static int i830_copybuf(struct drm_device *dev, void *data, 1440 + struct drm_file *file_priv) 1426 1441 { 1427 1442 /* Never copy - 2.4.x doesn't need it */ 1428 1443 return 0; 1429 1444 } 1430 1445 1431 - static int i830_docopy(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, 1432 - unsigned long arg) 1446 + static int i830_docopy(struct drm_device *dev, void *data, 1447 + struct drm_file *file_priv) 1433 1448 { 1434 1449 return 0; 1435 1450 } 1436 1451 1437 - static int i830_getparam(struct inode *inode, struct drm_file *file_priv, 1438 - unsigned int cmd, unsigned long arg) 1452 + static int i830_getparam(struct drm_device *dev, void *data, 1453 + struct drm_file *file_priv) 1439 1454 { 1440 - struct drm_device *dev = file_priv->head->dev; 1441 1455 drm_i830_private_t *dev_priv = dev->dev_private; 1442 - drm_i830_getparam_t param; 1456 + drm_i830_getparam_t *param = data; 1443 1457 int value; 1444 1458 1445 1459 if (!dev_priv) { ··· 1441 1467 return -EINVAL; 1442 1468 } 1443 1469 1444 - if (copy_from_user 1445 - (&param, (drm_i830_getparam_t __user *) arg, sizeof(param))) 1446 - return -EFAULT; 1447 - 1448 - switch (param.param) { 1470 + switch (param->param) { 1449 1471 case I830_PARAM_IRQ_ACTIVE: 1450 1472 value = dev->irq_enabled; 1451 1473 break; ··· 1449 1479 return -EINVAL; 1450 1480 } 1451 1481 1452 - if (copy_to_user(param.value, &value, sizeof(int))) { 1482 + if (copy_to_user(param->value, &value, sizeof(int))) { 1453 1483 DRM_ERROR("copy_to_user\n"); 1454 1484 return -EFAULT; 1455 1485 } ··· 1457 1487 return 0; 1458 1488 } 1459 1489 1460 - static int i830_setparam(struct inode *inode, struct drm_file *file_priv, 1461 - unsigned int cmd, unsigned long arg) 1490 + static int i830_setparam(struct drm_device *dev, void *data, 1491 + struct drm_file *file_priv) 1462 1492 { 1463 - struct drm_device *dev = file_priv->head->dev; 1464 1493 drm_i830_private_t *dev_priv = dev->dev_private; 1465 - drm_i830_setparam_t param; 1494 + drm_i830_setparam_t *param = data; 1466 1495 1467 1496 if (!dev_priv) { 1468 1497 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1469 1498 return -EINVAL; 1470 1499 } 1471 1500 1472 - if (copy_from_user 1473 - (&param, (drm_i830_setparam_t __user *) arg, sizeof(param))) 1474 - return -EFAULT; 1475 - 1476 - switch (param.param) { 1501 + switch (param->param) { 1477 1502 case I830_SETPARAM_USE_MI_BATCHBUFFER_START: 1478 - dev_priv->use_mi_batchbuffer_start = param.value; 1503 + dev_priv->use_mi_batchbuffer_start = param->value; 1479 1504 break; 1480 1505 default: 1481 1506 return -EINVAL; ··· 1517 1552 return 0; 1518 1553 } 1519 1554 1520 - drm_ioctl_desc_t i830_ioctls[] = { 1521 - [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1522 - [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH}, 1523 - [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH}, 1524 - [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH}, 1525 - [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH}, 1526 - [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH}, 1527 - [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH}, 1528 - [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH}, 1529 - [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH}, 1530 - [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH}, 1531 - [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH}, 1532 - [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH}, 1533 - [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH}, 1534 - [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH} 1555 + struct drm_ioctl_desc i830_ioctls[] = { 1556 + DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1557 + DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH), 1558 + DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH), 1559 + DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH), 1560 + DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH), 1561 + DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH), 1562 + DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH), 1563 + DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH), 1564 + DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH), 1565 + DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH), 1566 + DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH), 1567 + DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH), 1568 + DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH), 1569 + DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH) 1535 1570 }; 1536 1571 1537 1572 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
+5 -5
drivers/char/drm/i830_drv.h
··· 122 122 123 123 } drm_i830_private_t; 124 124 125 - extern drm_ioctl_desc_t i830_ioctls[]; 125 + extern struct drm_ioctl_desc i830_ioctls[]; 126 126 extern int i830_max_ioctl; 127 127 128 128 /* i830_irq.c */ 129 - extern int i830_irq_emit(struct inode *inode, struct drm_file *file_priv, 130 - unsigned int cmd, unsigned long arg); 131 - extern int i830_irq_wait(struct inode *inode, struct drm_file *file_priv, 132 - unsigned int cmd, unsigned long arg); 129 + extern int i830_irq_emit(struct drm_device *dev, void *data, 130 + struct drm_file *file_priv); 131 + extern int i830_irq_wait(struct drm_device *dev, void *data, 132 + struct drm_file *file_priv); 133 133 134 134 extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS); 135 135 extern void i830_driver_irq_preinstall(struct drm_device * dev);
+8 -18
drivers/char/drm/i830_irq.c
··· 114 114 115 115 /* Needs the lock as it touches the ring. 116 116 */ 117 - int i830_irq_emit(struct inode *inode, struct drm_file *file_priv, 118 - unsigned int cmd, unsigned long arg) 117 + int i830_irq_emit(struct drm_device *dev, void *data, 118 + struct drm_file *file_priv) 119 119 { 120 - struct drm_device *dev = file_priv->head->dev; 121 120 drm_i830_private_t *dev_priv = dev->dev_private; 122 - drm_i830_irq_emit_t emit; 121 + drm_i830_irq_emit_t *emit = data; 123 122 int result; 124 123 125 124 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 128 129 return -EINVAL; 129 130 } 130 131 131 - if (copy_from_user 132 - (&emit, (drm_i830_irq_emit_t __user *) arg, sizeof(emit))) 133 - return -EFAULT; 134 - 135 132 result = i830_emit_irq(dev); 136 133 137 - if (copy_to_user(emit.irq_seq, &result, sizeof(int))) { 134 + if (copy_to_user(emit->irq_seq, &result, sizeof(int))) { 138 135 DRM_ERROR("copy_to_user\n"); 139 136 return -EFAULT; 140 137 } ··· 140 145 141 146 /* Doesn't need the hardware lock. 142 147 */ 143 - int i830_irq_wait(struct inode *inode, struct drm_file *file_priv, 144 - unsigned int cmd, unsigned long arg) 148 + int i830_irq_wait(struct drm_device *dev, void *data, 149 + struct drm_file *file_priv) 145 150 { 146 - struct drm_device *dev = file_priv->head->dev; 147 151 drm_i830_private_t *dev_priv = dev->dev_private; 148 - drm_i830_irq_wait_t irqwait; 152 + drm_i830_irq_wait_t *irqwait = data; 149 153 150 154 if (!dev_priv) { 151 155 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 152 156 return -EINVAL; 153 157 } 154 158 155 - if (copy_from_user(&irqwait, (drm_i830_irq_wait_t __user *) arg, 156 - sizeof(irqwait))) 157 - return -EFAULT; 158 - 159 - return i830_wait_irq(dev, irqwait.irq_seq); 159 + return i830_wait_irq(dev, irqwait->irq_seq); 160 160 } 161 161 162 162 /* drm_dma.h hooks
+63 -81
drivers/char/drm/i915_dma.c
··· 251 251 return 0; 252 252 } 253 253 254 - static int i915_dma_init(DRM_IOCTL_ARGS) 254 + static int i915_dma_init(struct drm_device *dev, void *data, 255 + struct drm_file *file_priv) 255 256 { 256 - DRM_DEVICE; 257 257 drm_i915_private_t *dev_priv; 258 - drm_i915_init_t init; 258 + drm_i915_init_t *init = data; 259 259 int retcode = 0; 260 260 261 - DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data, 262 - sizeof(init)); 263 - 264 - switch (init.func) { 261 + switch (init->func) { 265 262 case I915_INIT_DMA: 266 263 dev_priv = drm_alloc(sizeof(drm_i915_private_t), 267 264 DRM_MEM_DRIVER); 268 265 if (dev_priv == NULL) 269 266 return -ENOMEM; 270 - retcode = i915_initialize(dev, dev_priv, &init); 267 + retcode = i915_initialize(dev, dev_priv, init); 271 268 break; 272 269 case I915_CLEANUP_DMA: 273 270 retcode = i915_dma_cleanup(dev); ··· 595 598 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); 596 599 } 597 600 598 - static int i915_flush_ioctl(DRM_IOCTL_ARGS) 601 + static int i915_flush_ioctl(struct drm_device *dev, void *data, 602 + struct drm_file *file_priv) 599 603 { 600 - DRM_DEVICE; 601 - 602 604 LOCK_TEST_WITH_RETURN(dev, file_priv); 603 605 604 606 return i915_quiescent(dev); 605 607 } 606 608 607 - static int i915_batchbuffer(DRM_IOCTL_ARGS) 609 + static int i915_batchbuffer(struct drm_device *dev, void *data, 610 + struct drm_file *file_priv) 608 611 { 609 - DRM_DEVICE; 610 612 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 611 613 u32 *hw_status = dev_priv->hw_status_page; 612 614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 613 615 dev_priv->sarea_priv; 614 - drm_i915_batchbuffer_t batch; 616 + drm_i915_batchbuffer_t *batch = data; 615 617 int ret; 616 618 617 619 if (!dev_priv->allow_batchbuffer) { ··· 618 622 return -EINVAL; 619 623 } 620 624 621 - DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, 622 - sizeof(batch)); 623 - 624 625 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 625 - batch.start, batch.used, batch.num_cliprects); 626 + batch->start, batch->used, batch->num_cliprects); 626 627 627 628 LOCK_TEST_WITH_RETURN(dev, file_priv); 628 629 629 - if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, 630 - batch.num_cliprects * 630 + if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 631 + batch->num_cliprects * 631 632 sizeof(struct drm_clip_rect))) 632 633 return -EFAULT; 633 634 634 - ret = i915_dispatch_batchbuffer(dev, &batch); 635 + ret = i915_dispatch_batchbuffer(dev, batch); 635 636 636 637 sarea_priv->last_dispatch = (int)hw_status[5]; 637 638 return ret; 638 639 } 639 640 640 - static int i915_cmdbuffer(DRM_IOCTL_ARGS) 641 + static int i915_cmdbuffer(struct drm_device *dev, void *data, 642 + struct drm_file *file_priv) 641 643 { 642 - DRM_DEVICE; 643 644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 644 645 u32 *hw_status = dev_priv->hw_status_page; 645 646 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 646 647 dev_priv->sarea_priv; 647 - drm_i915_cmdbuffer_t cmdbuf; 648 + drm_i915_cmdbuffer_t *cmdbuf = data; 648 649 int ret; 649 650 650 - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data, 651 - sizeof(cmdbuf)); 652 - 653 651 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 654 - cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); 652 + cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 655 653 656 654 LOCK_TEST_WITH_RETURN(dev, file_priv); 657 655 658 - if (cmdbuf.num_cliprects && 659 - DRM_VERIFYAREA_READ(cmdbuf.cliprects, 660 - cmdbuf.num_cliprects * 656 + if (cmdbuf->num_cliprects && 657 + DRM_VERIFYAREA_READ(cmdbuf->cliprects, 658 + cmdbuf->num_cliprects * 661 659 sizeof(struct drm_clip_rect))) { 662 660 DRM_ERROR("Fault accessing cliprects\n"); 663 661 return -EFAULT; 664 662 } 665 663 666 - ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); 664 + ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 667 665 if (ret) { 668 666 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 669 667 return ret; ··· 667 677 return 0; 668 678 } 669 679 670 - static int i915_flip_bufs(DRM_IOCTL_ARGS) 680 + static int i915_flip_bufs(struct drm_device *dev, void *data, 681 + struct drm_file *file_priv) 671 682 { 672 - DRM_DEVICE; 673 - 674 683 DRM_DEBUG("%s\n", __FUNCTION__); 675 684 676 685 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 677 688 return i915_dispatch_flip(dev); 678 689 } 679 690 680 - static int i915_getparam(DRM_IOCTL_ARGS) 691 + static int i915_getparam(struct drm_device *dev, void *data, 692 + struct drm_file *file_priv) 681 693 { 682 - DRM_DEVICE; 683 694 drm_i915_private_t *dev_priv = dev->dev_private; 684 - drm_i915_getparam_t param; 695 + drm_i915_getparam_t *param = data; 685 696 int value; 686 697 687 698 if (!dev_priv) { ··· 689 700 return -EINVAL; 690 701 } 691 702 692 - DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, 693 - sizeof(param)); 694 - 695 - switch (param.param) { 703 + switch (param->param) { 696 704 case I915_PARAM_IRQ_ACTIVE: 697 705 value = dev->irq ? 1 : 0; 698 706 break; ··· 700 714 value = READ_BREADCRUMB(dev_priv); 701 715 break; 702 716 default: 703 - DRM_ERROR("Unknown parameter %d\n", param.param); 717 + DRM_ERROR("Unknown parameter %d\n", param->param); 704 718 return -EINVAL; 705 719 } 706 720 707 - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 721 + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 708 722 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 709 723 return -EFAULT; 710 724 } ··· 712 726 return 0; 713 727 } 714 728 715 - static int i915_setparam(DRM_IOCTL_ARGS) 729 + static int i915_setparam(struct drm_device *dev, void *data, 730 + struct drm_file *file_priv) 716 731 { 717 - DRM_DEVICE; 718 732 drm_i915_private_t *dev_priv = dev->dev_private; 719 - drm_i915_setparam_t param; 733 + drm_i915_setparam_t *param = data; 720 734 721 735 if (!dev_priv) { 722 736 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 723 737 return -EINVAL; 724 738 } 725 739 726 - DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, 727 - sizeof(param)); 728 - 729 - switch (param.param) { 740 + switch (param->param) { 730 741 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 731 742 if (!IS_I965G(dev)) 732 - dev_priv->use_mi_batchbuffer_start = param.value; 743 + dev_priv->use_mi_batchbuffer_start = param->value; 733 744 break; 734 745 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 735 - dev_priv->tex_lru_log_granularity = param.value; 746 + dev_priv->tex_lru_log_granularity = param->value; 736 747 break; 737 748 case I915_SETPARAM_ALLOW_BATCHBUFFER: 738 - dev_priv->allow_batchbuffer = param.value; 749 + dev_priv->allow_batchbuffer = param->value; 739 750 break; 740 751 default: 741 - DRM_ERROR("unknown parameter %d\n", param.param); 752 + DRM_ERROR("unknown parameter %d\n", param->param); 742 753 return -EINVAL; 743 754 } 744 755 745 756 return 0; 746 757 } 747 758 748 - static int i915_set_status_page(DRM_IOCTL_ARGS) 759 + static int i915_set_status_page(struct drm_device *dev, void *data, 760 + struct drm_file *file_priv) 749 761 { 750 - DRM_DEVICE; 751 762 drm_i915_private_t *dev_priv = dev->dev_private; 752 - drm_i915_hws_addr_t hws; 763 + drm_i915_hws_addr_t *hws = data; 753 764 754 765 if (!dev_priv) { 755 766 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 756 767 return -EINVAL; 757 768 } 758 - DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data, 759 - sizeof(hws)); 760 - printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr); 761 769 762 - dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12); 770 + printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); 763 771 764 - dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr; 772 + dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 773 + 774 + dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws->addr; 765 775 dev_priv->hws_map.size = 4*1024; 766 776 dev_priv->hws_map.type = 0; 767 777 dev_priv->hws_map.flags = 0; ··· 811 829 } 812 830 } 813 831 814 - drm_ioctl_desc_t i915_ioctls[] = { 815 - [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 816 - [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH}, 817 - [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH}, 818 - [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH}, 819 - [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH}, 820 - [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH}, 821 - [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH}, 822 - [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 823 - [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH}, 824 - [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH}, 825 - [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 826 - [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}, 827 - [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, 828 - [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, 829 - [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH }, 830 - [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH}, 831 - [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH}, 832 + struct drm_ioctl_desc i915_ioctls[] = { 833 + DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 834 + DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 835 + DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 836 + DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 837 + DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 838 + DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 839 + DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 840 + DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 841 + DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), 842 + DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), 843 + DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 844 + DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 845 + DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 846 + DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 847 + DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 848 + DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 849 + DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), 832 850 }; 833 851 834 852 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+19 -10
drivers/char/drm/i915_drv.h
··· 116 116 unsigned int swaps_pending; 117 117 } drm_i915_private_t; 118 118 119 - extern drm_ioctl_desc_t i915_ioctls[]; 119 + extern struct drm_ioctl_desc i915_ioctls[]; 120 120 extern int i915_max_ioctl; 121 121 122 122 /* i915_dma.c */ ··· 130 130 unsigned long arg); 131 131 132 132 /* i915_irq.c */ 133 - extern int i915_irq_emit(DRM_IOCTL_ARGS); 134 - extern int i915_irq_wait(DRM_IOCTL_ARGS); 133 + extern int i915_irq_emit(struct drm_device *dev, void *data, 134 + struct drm_file *file_priv); 135 + extern int i915_irq_wait(struct drm_device *dev, void *data, 136 + struct drm_file *file_priv); 135 137 136 138 extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); 137 139 extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); ··· 141 139 extern void i915_driver_irq_preinstall(struct drm_device * dev); 142 140 extern void i915_driver_irq_postinstall(struct drm_device * dev); 143 141 extern void i915_driver_irq_uninstall(struct drm_device * dev); 144 - extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS); 145 - extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS); 146 - extern int i915_vblank_swap(DRM_IOCTL_ARGS); 142 + extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 143 + struct drm_file *file_priv); 144 + extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 145 + struct drm_file *file_priv); 146 + extern int i915_vblank_swap(struct drm_device *dev, void *data, 147 + struct drm_file *file_priv); 147 148 148 149 /* i915_mem.c */ 149 - extern int i915_mem_alloc(DRM_IOCTL_ARGS); 150 - extern int i915_mem_free(DRM_IOCTL_ARGS); 151 - extern int i915_mem_init_heap(DRM_IOCTL_ARGS); 152 - extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS); 150 + extern int i915_mem_alloc(struct drm_device *dev, void *data, 151 + struct drm_file *file_priv); 152 + extern int i915_mem_free(struct drm_device *dev, void *data, 153 + struct drm_file *file_priv); 154 + extern int i915_mem_init_heap(struct drm_device *dev, void *data, 155 + struct drm_file *file_priv); 156 + extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, 157 + struct drm_file *file_priv); 153 158 extern void i915_mem_takedown(struct mem_block **heap); 154 159 extern void i915_mem_release(struct drm_device * dev, 155 160 struct drm_file *file_priv, struct mem_block *heap);
+38 -54
drivers/char/drm/i915_irq.c
··· 355 355 356 356 /* Needs the lock as it touches the ring. 357 357 */ 358 - int i915_irq_emit(DRM_IOCTL_ARGS) 358 + int i915_irq_emit(struct drm_device *dev, void *data, 359 + struct drm_file *file_priv) 359 360 { 360 - DRM_DEVICE; 361 361 drm_i915_private_t *dev_priv = dev->dev_private; 362 - drm_i915_irq_emit_t emit; 362 + drm_i915_irq_emit_t *emit = data; 363 363 int result; 364 364 365 365 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 369 369 return -EINVAL; 370 370 } 371 371 372 - DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data, 373 - sizeof(emit)); 374 - 375 372 result = i915_emit_irq(dev); 376 373 377 - if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { 374 + if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 378 375 DRM_ERROR("copy_to_user\n"); 379 376 return -EFAULT; 380 377 } ··· 381 384 382 385 /* Doesn't need the hardware lock. 383 386 */ 384 - int i915_irq_wait(DRM_IOCTL_ARGS) 387 + int i915_irq_wait(struct drm_device *dev, void *data, 388 + struct drm_file *file_priv) 385 389 { 386 - DRM_DEVICE; 387 390 drm_i915_private_t *dev_priv = dev->dev_private; 388 - drm_i915_irq_wait_t irqwait; 391 + drm_i915_irq_wait_t *irqwait = data; 389 392 390 393 if (!dev_priv) { 391 394 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 392 395 return -EINVAL; 393 396 } 394 397 395 - DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data, 396 - sizeof(irqwait)); 397 - 398 - return i915_wait_irq(dev, irqwait.irq_seq); 398 + return i915_wait_irq(dev, irqwait->irq_seq); 399 399 } 400 400 401 401 static void i915_enable_interrupt (struct drm_device *dev) ··· 411 417 412 418 /* Set the vblank monitor pipe 413 419 */ 414 - int i915_vblank_pipe_set(DRM_IOCTL_ARGS) 420 + int i915_vblank_pipe_set(struct drm_device *dev, void *data, 421 + struct drm_file *file_priv) 415 422 { 416 - DRM_DEVICE; 417 423 drm_i915_private_t *dev_priv = dev->dev_private; 418 - drm_i915_vblank_pipe_t pipe; 424 + drm_i915_vblank_pipe_t *pipe = data; 419 425 420 426 if (!dev_priv) { 421 427 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 422 428 return -EINVAL; 423 429 } 424 430 425 - DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, 426 - sizeof(pipe)); 427 - 428 - if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { 431 + if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { 429 432 DRM_ERROR("%s called with invalid pipe 0x%x\n", 430 - __FUNCTION__, pipe.pipe); 433 + __FUNCTION__, pipe->pipe); 431 434 return -EINVAL; 432 435 } 433 436 434 - dev_priv->vblank_pipe = pipe.pipe; 437 + dev_priv->vblank_pipe = pipe->pipe; 435 438 436 439 i915_enable_interrupt (dev); 437 440 438 441 return 0; 439 442 } 440 443 441 - int i915_vblank_pipe_get(DRM_IOCTL_ARGS) 444 + int i915_vblank_pipe_get(struct drm_device *dev, void *data, 445 + struct drm_file *file_priv) 442 446 { 443 - DRM_DEVICE; 444 447 drm_i915_private_t *dev_priv = dev->dev_private; 445 - drm_i915_vblank_pipe_t pipe; 448 + drm_i915_vblank_pipe_t *pipe = data; 446 449 u16 flag; 447 450 448 451 if (!dev_priv) { ··· 448 457 } 449 458 450 459 flag = I915_READ(I915REG_INT_ENABLE_R); 451 - pipe.pipe = 0; 460 + pipe->pipe = 0; 452 461 if (flag & VSYNC_PIPEA_FLAG) 453 - pipe.pipe |= DRM_I915_VBLANK_PIPE_A; 462 + pipe->pipe |= DRM_I915_VBLANK_PIPE_A; 454 463 if (flag & VSYNC_PIPEB_FLAG) 455 - pipe.pipe |= DRM_I915_VBLANK_PIPE_B; 456 - DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe, 457 - sizeof(pipe)); 464 + pipe->pipe |= DRM_I915_VBLANK_PIPE_B; 465 + 458 466 return 0; 459 467 } 460 468 461 469 /** 462 470 * Schedule buffer swap at given vertical blank. 463 471 */ 464 - int i915_vblank_swap(DRM_IOCTL_ARGS) 472 + int i915_vblank_swap(struct drm_device *dev, void *data, 473 + struct drm_file *file_priv) 465 474 { 466 - DRM_DEVICE; 467 475 drm_i915_private_t *dev_priv = dev->dev_private; 468 - drm_i915_vblank_swap_t swap; 476 + drm_i915_vblank_swap_t *swap = data; 469 477 drm_i915_vbl_swap_t *vbl_swap; 470 478 unsigned int pipe, seqtype, curseq; 471 479 unsigned long irqflags; ··· 480 490 return -EINVAL; 481 491 } 482 492 483 - DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, 484 - sizeof(swap)); 485 - 486 - if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | 493 + if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | 487 494 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { 488 - DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); 495 + DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); 489 496 return -EINVAL; 490 497 } 491 498 492 - pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 499 + pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 493 500 494 - seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 501 + seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 495 502 496 503 if (!(dev_priv->vblank_pipe & (1 << pipe))) { 497 504 DRM_ERROR("Invalid pipe %d\n", pipe); ··· 497 510 498 511 spin_lock_irqsave(&dev->drw_lock, irqflags); 499 512 500 - if (!drm_get_drawable_info(dev, swap.drawable)) { 513 + if (!drm_get_drawable_info(dev, swap->drawable)) { 501 514 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 502 - DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); 515 + DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); 503 516 return -EINVAL; 504 517 } 505 518 ··· 508 521 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); 509 522 510 523 if (seqtype == _DRM_VBLANK_RELATIVE) 511 - swap.sequence += curseq; 524 + swap->sequence += curseq; 512 525 513 - if ((curseq - swap.sequence) <= (1<<23)) { 514 - if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) { 515 - swap.sequence = curseq + 1; 526 + if ((curseq - swap->sequence) <= (1<<23)) { 527 + if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) { 528 + swap->sequence = curseq + 1; 516 529 } else { 517 530 DRM_DEBUG("Missed target sequence\n"); 518 531 return -EINVAL; ··· 524 537 list_for_each(list, &dev_priv->vbl_swaps.head) { 525 538 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 526 539 527 - if (vbl_swap->drw_id == swap.drawable && 540 + if (vbl_swap->drw_id == swap->drawable && 528 541 vbl_swap->pipe == pipe && 529 - vbl_swap->sequence == swap.sequence) { 542 + vbl_swap->sequence == swap->sequence) { 530 543 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 531 544 DRM_DEBUG("Already scheduled\n"); 532 545 return 0; ··· 549 562 550 563 DRM_DEBUG("\n"); 551 564 552 - vbl_swap->drw_id = swap.drawable; 565 + vbl_swap->drw_id = swap->drawable; 553 566 vbl_swap->pipe = pipe; 554 - vbl_swap->sequence = swap.sequence; 567 + vbl_swap->sequence = swap->sequence; 555 568 556 569 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 557 570 ··· 559 572 dev_priv->swaps_pending++; 560 573 561 574 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 562 - 563 - DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap, 564 - sizeof(swap)); 565 575 566 576 return 0; 567 577 }
+23 -35
drivers/char/drm/i915_mem.c
··· 268 268 269 269 /* IOCTL HANDLERS */ 270 270 271 - int i915_mem_alloc(DRM_IOCTL_ARGS) 271 + int i915_mem_alloc(struct drm_device *dev, void *data, 272 + struct drm_file *file_priv) 272 273 { 273 - DRM_DEVICE; 274 274 drm_i915_private_t *dev_priv = dev->dev_private; 275 - drm_i915_mem_alloc_t alloc; 275 + drm_i915_mem_alloc_t *alloc = data; 276 276 struct mem_block *block, **heap; 277 277 278 278 if (!dev_priv) { ··· 280 280 return -EINVAL; 281 281 } 282 282 283 - DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, 284 - sizeof(alloc)); 285 - 286 - heap = get_heap(dev_priv, alloc.region); 283 + heap = get_heap(dev_priv, alloc->region); 287 284 if (!heap || !*heap) 288 285 return -EFAULT; 289 286 290 287 /* Make things easier on ourselves: all allocations at least 291 288 * 4k aligned. 292 289 */ 293 - if (alloc.alignment < 12) 294 - alloc.alignment = 12; 290 + if (alloc->alignment < 12) 291 + alloc->alignment = 12; 295 292 296 - block = alloc_block(*heap, alloc.size, alloc.alignment, file_priv); 293 + block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); 297 294 298 295 if (!block) 299 296 return -ENOMEM; 300 297 301 298 mark_block(dev, block, 1); 302 299 303 - if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { 300 + if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, 301 + sizeof(int))) { 304 302 DRM_ERROR("copy_to_user\n"); 305 303 return -EFAULT; 306 304 } ··· 306 308 return 0; 307 309 } 308 310 309 - int i915_mem_free(DRM_IOCTL_ARGS) 311 + int i915_mem_free(struct drm_device *dev, void *data, 312 + struct drm_file *file_priv) 310 313 { 311 - DRM_DEVICE; 312 314 drm_i915_private_t *dev_priv = dev->dev_private; 313 - drm_i915_mem_free_t memfree; 315 + drm_i915_mem_free_t *memfree = data; 314 316 struct mem_block *block, **heap; 315 317 316 318 if (!dev_priv) { ··· 318 320 return -EINVAL; 319 321 } 320 322 321 - DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, 322 - sizeof(memfree)); 323 - 324 - heap = get_heap(dev_priv, memfree.region); 323 + heap = get_heap(dev_priv, memfree->region); 325 324 if (!heap || !*heap) 326 325 return -EFAULT; 327 326 328 - block = find_block(*heap, memfree.region_offset); 327 + block = find_block(*heap, memfree->region_offset); 329 328 if (!block) 330 329 return -EFAULT; 331 330 ··· 334 339 return 0; 335 340 } 336 341 337 - int i915_mem_init_heap(DRM_IOCTL_ARGS) 342 + int i915_mem_init_heap(struct drm_device *dev, void *data, 343 + struct drm_file *file_priv) 338 344 { 339 - DRM_DEVICE; 340 345 drm_i915_private_t *dev_priv = dev->dev_private; 341 - drm_i915_mem_init_heap_t initheap; 346 + drm_i915_mem_init_heap_t *initheap = data; 342 347 struct mem_block **heap; 343 348 344 349 if (!dev_priv) { ··· 346 351 return -EINVAL; 347 352 } 348 353 349 - DRM_COPY_FROM_USER_IOCTL(initheap, 350 - (drm_i915_mem_init_heap_t __user *) data, 351 - sizeof(initheap)); 352 - 353 - heap = get_heap(dev_priv, initheap.region); 354 + heap = get_heap(dev_priv, initheap->region); 354 355 if (!heap) 355 356 return -EFAULT; 356 357 ··· 355 364 return -EFAULT; 356 365 } 357 366 358 - return init_heap(heap, initheap.start, initheap.size); 367 + return init_heap(heap, initheap->start, initheap->size); 359 368 } 360 369 361 - int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) 370 + int i915_mem_destroy_heap( struct drm_device *dev, void *data, 371 + struct drm_file *file_priv ) 362 372 { 363 - DRM_DEVICE; 364 373 drm_i915_private_t *dev_priv = dev->dev_private; 365 - drm_i915_mem_destroy_heap_t destroyheap; 374 + drm_i915_mem_destroy_heap_t *destroyheap = data; 366 375 struct mem_block **heap; 367 376 368 377 if ( !dev_priv ) { ··· 370 379 return -EINVAL; 371 380 } 372 381 373 - DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, 374 - sizeof(destroyheap) ); 375 - 376 - heap = get_heap( dev_priv, destroyheap.region ); 382 + heap = get_heap( dev_priv, destroyheap->region ); 377 383 if (!heap) { 378 384 DRM_ERROR("get_heap failed"); 379 385 return -EFAULT;
+34 -51
drivers/char/drm/mga_dma.c
··· 759 759 return err; 760 760 } 761 761 762 - int mga_dma_bootstrap(DRM_IOCTL_ARGS) 762 + int mga_dma_bootstrap(struct drm_device *dev, void *data, 763 + struct drm_file *file_priv) 763 764 { 764 - DRM_DEVICE; 765 - drm_mga_dma_bootstrap_t bootstrap; 765 + drm_mga_dma_bootstrap_t *bootstrap = data; 766 766 int err; 767 767 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; 768 768 const drm_mga_private_t *const dev_priv = 769 769 (drm_mga_private_t *) dev->dev_private; 770 770 771 - DRM_COPY_FROM_USER_IOCTL(bootstrap, 772 - (drm_mga_dma_bootstrap_t __user *) data, 773 - sizeof(bootstrap)); 774 - 775 - err = mga_do_dma_bootstrap(dev, &bootstrap); 771 + err = mga_do_dma_bootstrap(dev, bootstrap); 776 772 if (err) { 777 773 mga_do_cleanup_dma(dev, FULL_CLEANUP); 778 774 return err; 779 775 } 780 776 781 777 if (dev_priv->agp_textures != NULL) { 782 - bootstrap.texture_handle = dev_priv->agp_textures->offset; 783 - bootstrap.texture_size = dev_priv->agp_textures->size; 778 + bootstrap->texture_handle = dev_priv->agp_textures->offset; 779 + bootstrap->texture_size = dev_priv->agp_textures->size; 784 780 } else { 785 - bootstrap.texture_handle = 0; 786 - bootstrap.texture_size = 0; 781 + bootstrap->texture_handle = 0; 782 + bootstrap->texture_size = 0; 787 783 } 788 784 789 - bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07]; 790 - DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data, 791 - bootstrap, sizeof(bootstrap)); 785 + bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; 792 786 793 787 return err; 794 788 } ··· 1001 1007 return 0; 1002 1008 } 1003 1009 1004 - int mga_dma_init(DRM_IOCTL_ARGS) 1010 + int mga_dma_init(struct drm_device *dev, void *data, 1011 + struct drm_file *file_priv) 1005 1012 { 1006 - DRM_DEVICE; 1007 - drm_mga_init_t init; 1013 + drm_mga_init_t *init = data; 1008 1014 int err; 1009 1015 1010 1016 LOCK_TEST_WITH_RETURN(dev, file_priv); 1011 1017 1012 - DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data, 1013 - sizeof(init)); 1014 - 1015 - switch (init.func) { 1018 + switch (init->func) { 1016 1019 case MGA_INIT_DMA: 1017 - err = mga_do_init_dma(dev, &init); 1020 + err = mga_do_init_dma(dev, init); 1018 1021 if (err) { 1019 1022 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); 1020 1023 } ··· 1027 1036 * Primary DMA stream management 1028 1037 */ 1029 1038 1030 - int mga_dma_flush(DRM_IOCTL_ARGS) 1039 + int mga_dma_flush(struct drm_device *dev, void *data, 1040 + struct drm_file *file_priv) 1031 1041 { 1032 - DRM_DEVICE; 1033 1042 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1034 - struct drm_lock lock; 1043 + struct drm_lock *lock = data; 1035 1044 1036 1045 LOCK_TEST_WITH_RETURN(dev, file_priv); 1037 1046 1038 - DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data, 1039 - sizeof(lock)); 1040 - 1041 1047 DRM_DEBUG("%s%s%s\n", 1042 - (lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "", 1043 - (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", 1044 - (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); 1048 + (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "", 1049 + (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", 1050 + (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); 1045 1051 1046 1052 WRAP_WAIT_WITH_RETURN(dev_priv); 1047 1053 1048 - if (lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { 1054 + if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { 1049 1055 mga_do_dma_flush(dev_priv); 1050 1056 } 1051 1057 1052 - if (lock.flags & _DRM_LOCK_QUIESCENT) { 1058 + if (lock->flags & _DRM_LOCK_QUIESCENT) { 1053 1059 #if MGA_DMA_DEBUG 1054 1060 int ret = mga_do_wait_for_idle(dev_priv); 1055 1061 if (ret < 0) ··· 1060 1072 } 1061 1073 } 1062 1074 1063 - int mga_dma_reset(DRM_IOCTL_ARGS) 1075 + int mga_dma_reset(struct drm_device *dev, void *data, 1076 + struct drm_file *file_priv) 1064 1077 { 1065 - DRM_DEVICE; 1066 1078 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1067 1079 1068 1080 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 1099 1111 return 0; 1100 1112 } 1101 1113 1102 - int mga_dma_buffers(DRM_IOCTL_ARGS) 1114 + int mga_dma_buffers(struct drm_device *dev, void *data, 1115 + struct drm_file *file_priv) 1103 1116 { 1104 - DRM_DEVICE; 1105 1117 struct drm_device_dma *dma = dev->dma; 1106 1118 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 1107 - struct drm_dma __user *argp = (void __user *)data; 1108 - struct drm_dma d; 1119 + struct drm_dma *d = data; 1109 1120 int ret = 0; 1110 1121 1111 1122 LOCK_TEST_WITH_RETURN(dev, file_priv); 1112 1123 1113 - DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); 1114 - 1115 1124 /* Please don't send us buffers. 1116 1125 */ 1117 - if (d.send_count != 0) { 1126 + if (d->send_count != 0) { 1118 1127 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1119 - DRM_CURRENTPID, d.send_count); 1128 + DRM_CURRENTPID, d->send_count); 1120 1129 return -EINVAL; 1121 1130 } 1122 1131 1123 1132 /* We'll send you buffers. 1124 1133 */ 1125 - if (d.request_count < 0 || d.request_count > dma->buf_count) { 1134 + if (d->request_count < 0 || d->request_count > dma->buf_count) { 1126 1135 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1127 - DRM_CURRENTPID, d.request_count, dma->buf_count); 1136 + DRM_CURRENTPID, d->request_count, dma->buf_count); 1128 1137 return -EINVAL; 1129 1138 } 1130 1139 1131 1140 WRAP_TEST_WITH_RETURN(dev_priv); 1132 1141 1133 - d.granted_count = 0; 1142 + d->granted_count = 0; 1134 1143 1135 - if (d.request_count) { 1136 - ret = mga_dma_get_buffers(dev, file_priv, &d); 1144 + if (d->request_count) { 1145 + ret = mga_dma_get_buffers(dev, file_priv, d); 1137 1146 } 1138 - 1139 - DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); 1140 1147 1141 1148 return ret; 1142 1149 }
+11 -6
drivers/char/drm/mga_drv.h
··· 148 148 unsigned int agp_size; 149 149 } drm_mga_private_t; 150 150 151 - extern drm_ioctl_desc_t mga_ioctls[]; 151 + extern struct drm_ioctl_desc mga_ioctls[]; 152 152 extern int mga_max_ioctl; 153 153 154 154 /* mga_dma.c */ 155 - extern int mga_dma_bootstrap(DRM_IOCTL_ARGS); 156 - extern int mga_dma_init(DRM_IOCTL_ARGS); 157 - extern int mga_dma_flush(DRM_IOCTL_ARGS); 158 - extern int mga_dma_reset(DRM_IOCTL_ARGS); 159 - extern int mga_dma_buffers(DRM_IOCTL_ARGS); 155 + extern int mga_dma_bootstrap(struct drm_device *dev, void *data, 156 + struct drm_file *file_priv); 157 + extern int mga_dma_init(struct drm_device *dev, void *data, 158 + struct drm_file *file_priv); 159 + extern int mga_dma_flush(struct drm_device *dev, void *data, 160 + struct drm_file *file_priv); 161 + extern int mga_dma_reset(struct drm_device *dev, void *data, 162 + struct drm_file *file_priv); 163 + extern int mga_dma_buffers(struct drm_device *dev, void *data, 164 + struct drm_file *file_priv); 160 165 extern int mga_driver_load(struct drm_device *dev, unsigned long flags); 161 166 extern int mga_driver_unload(struct drm_device * dev); 162 167 extern void mga_driver_lastclose(struct drm_device * dev);
+54 -95
drivers/char/drm/mga_state.c
··· 828 828 * 829 829 */ 830 830 831 - static int mga_dma_clear(DRM_IOCTL_ARGS) 831 + static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) 832 832 { 833 - DRM_DEVICE; 834 833 drm_mga_private_t *dev_priv = dev->dev_private; 835 834 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 836 - drm_mga_clear_t clear; 835 + drm_mga_clear_t *clear = data; 837 836 838 837 LOCK_TEST_WITH_RETURN(dev, file_priv); 839 - 840 - DRM_COPY_FROM_USER_IOCTL(clear, (drm_mga_clear_t __user *) data, 841 - sizeof(clear)); 842 838 843 839 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) 844 840 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 845 841 846 842 WRAP_TEST_WITH_RETURN(dev_priv); 847 843 848 - mga_dma_dispatch_clear(dev, &clear); 844 + mga_dma_dispatch_clear(dev, clear); 849 845 850 846 /* Make sure we restore the 3D state next time. 851 847 */ ··· 850 854 return 0; 851 855 } 852 856 853 - static int mga_dma_swap(DRM_IOCTL_ARGS) 857 + static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) 854 858 { 855 - DRM_DEVICE; 856 859 drm_mga_private_t *dev_priv = dev->dev_private; 857 860 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 858 861 ··· 871 876 return 0; 872 877 } 873 878 874 - static int mga_dma_vertex(DRM_IOCTL_ARGS) 879 + static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) 875 880 { 876 - DRM_DEVICE; 877 881 drm_mga_private_t *dev_priv = dev->dev_private; 878 882 struct drm_device_dma *dma = dev->dma; 879 883 struct drm_buf *buf; 880 884 drm_mga_buf_priv_t *buf_priv; 881 - drm_mga_vertex_t vertex; 885 + drm_mga_vertex_t *vertex = data; 882 886 883 887 LOCK_TEST_WITH_RETURN(dev, file_priv); 884 888 885 - DRM_COPY_FROM_USER_IOCTL(vertex, 886 - (drm_mga_vertex_t __user *) data, 887 - sizeof(vertex)); 888 - 889 - if (vertex.idx < 0 || vertex.idx > dma->buf_count) 889 + if (vertex->idx < 0 || vertex->idx > dma->buf_count) 890 890 return -EINVAL; 891 - buf = dma->buflist[vertex.idx]; 891 + buf = dma->buflist[vertex->idx]; 892 892 buf_priv = buf->dev_private; 893 893 894 - buf->used = vertex.used; 895 - buf_priv->discard = vertex.discard; 894 + buf->used = vertex->used; 895 + buf_priv->discard = vertex->discard; 896 896 897 897 if (!mga_verify_state(dev_priv)) { 898 - if (vertex.discard) { 898 + if (vertex->discard) { 899 899 if (buf_priv->dispatched == 1) 900 900 AGE_BUFFER(buf_priv); 901 901 buf_priv->dispatched = 0; ··· 906 916 return 0; 907 917 } 908 918 909 - static int mga_dma_indices(DRM_IOCTL_ARGS) 919 + static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) 910 920 { 911 - DRM_DEVICE; 912 921 drm_mga_private_t *dev_priv = dev->dev_private; 913 922 struct drm_device_dma *dma = dev->dma; 914 923 struct drm_buf *buf; 915 924 drm_mga_buf_priv_t *buf_priv; 916 - drm_mga_indices_t indices; 925 + drm_mga_indices_t *indices = data; 917 926 918 927 LOCK_TEST_WITH_RETURN(dev, file_priv); 919 928 920 - DRM_COPY_FROM_USER_IOCTL(indices, 921 - (drm_mga_indices_t __user *) data, 922 - sizeof(indices)); 923 - 924 - if (indices.idx < 0 || indices.idx > dma->buf_count) 929 + if (indices->idx < 0 || indices->idx > dma->buf_count) 925 930 return -EINVAL; 926 931 927 - buf = dma->buflist[indices.idx]; 932 + buf = dma->buflist[indices->idx]; 928 933 buf_priv = buf->dev_private; 929 934 930 - buf_priv->discard = indices.discard; 935 + buf_priv->discard = indices->discard; 931 936 932 937 if (!mga_verify_state(dev_priv)) { 933 - if (indices.discard) { 938 + if (indices->discard) { 934 939 if (buf_priv->dispatched == 1) 935 940 AGE_BUFFER(buf_priv); 936 941 buf_priv->dispatched = 0; ··· 936 951 937 952 WRAP_TEST_WITH_RETURN(dev_priv); 938 953 939 - mga_dma_dispatch_indices(dev, buf, indices.start, indices.end); 954 + mga_dma_dispatch_indices(dev, buf, indices->start, indices->end); 940 955 941 956 return 0; 942 957 } 943 958 944 - static int mga_dma_iload(DRM_IOCTL_ARGS) 959 + static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv) 945 960 { 946 - DRM_DEVICE; 947 961 struct drm_device_dma *dma = dev->dma; 948 962 drm_mga_private_t *dev_priv = dev->dev_private; 949 963 struct drm_buf *buf; 950 964 drm_mga_buf_priv_t *buf_priv; 951 - drm_mga_iload_t iload; 965 + drm_mga_iload_t *iload = data; 952 966 DRM_DEBUG("\n"); 953 967 954 968 LOCK_TEST_WITH_RETURN(dev, file_priv); 955 - 956 - DRM_COPY_FROM_USER_IOCTL(iload, (drm_mga_iload_t __user *) data, 957 - sizeof(iload)); 958 969 959 970 #if 0 960 971 if (mga_do_wait_for_idle(dev_priv) < 0) { ··· 959 978 return -EBUSY; 960 979 } 961 980 #endif 962 - if (iload.idx < 0 || iload.idx > dma->buf_count) 981 + if (iload->idx < 0 || iload->idx > dma->buf_count) 963 982 return -EINVAL; 964 983 965 - buf = dma->buflist[iload.idx]; 984 + buf = dma->buflist[iload->idx]; 966 985 buf_priv = buf->dev_private; 967 986 968 - if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) { 987 + if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { 969 988 mga_freelist_put(dev, buf); 970 989 return -EINVAL; 971 990 } 972 991 973 992 WRAP_TEST_WITH_RETURN(dev_priv); 974 993 975 - mga_dma_dispatch_iload(dev, buf, iload.dstorg, iload.length); 994 + mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length); 976 995 977 996 /* Make sure we restore the 3D state next time. 978 997 */ ··· 981 1000 return 0; 982 1001 } 983 1002 984 - static int mga_dma_blit(DRM_IOCTL_ARGS) 1003 + static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) 985 1004 { 986 - DRM_DEVICE; 987 1005 drm_mga_private_t *dev_priv = dev->dev_private; 988 1006 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; 989 - drm_mga_blit_t blit; 1007 + drm_mga_blit_t *blit = data; 990 1008 DRM_DEBUG("\n"); 991 1009 992 1010 LOCK_TEST_WITH_RETURN(dev, file_priv); 993 1011 994 - DRM_COPY_FROM_USER_IOCTL(blit, (drm_mga_blit_t __user *) data, 995 - sizeof(blit)); 996 - 997 1012 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) 998 1013 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 999 1014 1000 - if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg)) 1015 + if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg)) 1001 1016 return -EINVAL; 1002 1017 1003 1018 WRAP_TEST_WITH_RETURN(dev_priv); 1004 1019 1005 - mga_dma_dispatch_blit(dev, &blit); 1020 + mga_dma_dispatch_blit(dev, blit); 1006 1021 1007 1022 /* Make sure we restore the 3D state next time. 1008 1023 */ ··· 1007 1030 return 0; 1008 1031 } 1009 1032 1010 - static int mga_getparam(DRM_IOCTL_ARGS) 1033 + static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) 1011 1034 { 1012 - DRM_DEVICE; 1013 1035 drm_mga_private_t *dev_priv = dev->dev_private; 1014 - drm_mga_getparam_t param; 1036 + drm_mga_getparam_t *param = data; 1015 1037 int value; 1016 1038 1017 1039 if (!dev_priv) { ··· 1018 1042 return -EINVAL; 1019 1043 } 1020 1044 1021 - DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data, 1022 - sizeof(param)); 1023 - 1024 1045 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1025 1046 1026 - switch (param.param) { 1047 + switch (param->param) { 1027 1048 case MGA_PARAM_IRQ_NR: 1028 1049 value = dev->irq; 1029 1050 break; ··· 1031 1058 return -EINVAL; 1032 1059 } 1033 1060 1034 - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 1061 + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 1035 1062 DRM_ERROR("copy_to_user\n"); 1036 1063 return -EFAULT; 1037 1064 } ··· 1039 1066 return 0; 1040 1067 } 1041 1068 1042 - static int mga_set_fence(DRM_IOCTL_ARGS) 1069 + static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) 1043 1070 { 1044 - DRM_DEVICE; 1045 1071 drm_mga_private_t *dev_priv = dev->dev_private; 1046 - u32 temp; 1072 + u32 *fence = data; 1047 1073 DMA_LOCALS; 1048 1074 1049 1075 if (!dev_priv) { ··· 1052 1080 1053 1081 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1054 1082 1055 - /* I would normal do this assignment in the declaration of temp, 1083 + /* I would normal do this assignment in the declaration of fence, 1056 1084 * but dev_priv may be NULL. 1057 1085 */ 1058 1086 1059 - temp = dev_priv->next_fence_to_post; 1087 + *fence = dev_priv->next_fence_to_post; 1060 1088 dev_priv->next_fence_to_post++; 1061 1089 1062 1090 BEGIN_DMA(1); ··· 1065 1093 MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000); 1066 1094 ADVANCE_DMA(); 1067 1095 1068 - if (DRM_COPY_TO_USER((u32 __user *) data, &temp, sizeof(u32))) { 1069 - DRM_ERROR("copy_to_user\n"); 1070 - return -EFAULT; 1071 - } 1072 - 1073 1096 return 0; 1074 1097 } 1075 1098 1076 - static int mga_wait_fence(DRM_IOCTL_ARGS) 1099 + static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file * 1100 + file_priv) 1077 1101 { 1078 - DRM_DEVICE; 1079 1102 drm_mga_private_t *dev_priv = dev->dev_private; 1080 - u32 fence; 1103 + u32 *fence = data; 1081 1104 1082 1105 if (!dev_priv) { 1083 1106 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1084 1107 return -EINVAL; 1085 1108 } 1086 1109 1087 - DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32)); 1088 - 1089 1110 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1090 1111 1091 - mga_driver_fence_wait(dev, &fence); 1092 - 1093 - if (DRM_COPY_TO_USER((u32 __user *) data, &fence, sizeof(u32))) { 1094 - DRM_ERROR("copy_to_user\n"); 1095 - return -EFAULT; 1096 - } 1097 - 1112 + mga_driver_fence_wait(dev, fence); 1098 1113 return 0; 1099 1114 } 1100 1115 1101 - drm_ioctl_desc_t mga_ioctls[] = { 1102 - [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1103 - [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH}, 1104 - [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH}, 1105 - [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH}, 1106 - [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH}, 1107 - [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH}, 1108 - [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH}, 1109 - [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH}, 1110 - [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH}, 1111 - [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH}, 1112 - [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH}, 1113 - [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH}, 1114 - [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1116 + struct drm_ioctl_desc mga_ioctls[] = { 1117 + DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1118 + DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), 1119 + DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), 1120 + DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), 1121 + DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), 1122 + DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), 1123 + DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), 1124 + DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), 1125 + DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), 1126 + DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), 1127 + DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), 1128 + DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), 1129 + DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1115 1130 }; 1116 1131 1117 1132 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
+22 -40
drivers/char/drm/r128_cce.c
··· 625 625 return 0; 626 626 } 627 627 628 - int r128_cce_init(DRM_IOCTL_ARGS) 628 + int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 629 629 { 630 - DRM_DEVICE; 631 - drm_r128_init_t init; 630 + drm_r128_init_t *init = data; 632 631 633 632 DRM_DEBUG("\n"); 634 633 635 634 LOCK_TEST_WITH_RETURN(dev, file_priv); 636 635 637 - DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data, 638 - sizeof(init)); 639 - 640 - switch (init.func) { 636 + switch (init->func) { 641 637 case R128_INIT_CCE: 642 - return r128_do_init_cce(dev, &init); 638 + return r128_do_init_cce(dev, init); 643 639 case R128_CLEANUP_CCE: 644 640 return r128_do_cleanup_cce(dev); 645 641 } ··· 643 647 return -EINVAL; 644 648 } 645 649 646 - int r128_cce_start(DRM_IOCTL_ARGS) 650 + int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv) 647 651 { 648 - DRM_DEVICE; 649 652 drm_r128_private_t *dev_priv = dev->dev_private; 650 653 DRM_DEBUG("\n"); 651 654 ··· 663 668 /* Stop the CCE. The engine must have been idled before calling this 664 669 * routine. 665 670 */ 666 - int r128_cce_stop(DRM_IOCTL_ARGS) 671 + int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) 667 672 { 668 - DRM_DEVICE; 669 673 drm_r128_private_t *dev_priv = dev->dev_private; 670 - drm_r128_cce_stop_t stop; 674 + drm_r128_cce_stop_t *stop = data; 671 675 int ret; 672 676 DRM_DEBUG("\n"); 673 677 674 678 LOCK_TEST_WITH_RETURN(dev, file_priv); 675 679 676 - DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data, 677 - sizeof(stop)); 678 - 679 680 /* Flush any pending CCE commands. This ensures any outstanding 680 681 * commands are exectuted by the engine before we turn it off. 681 682 */ 682 - if (stop.flush) { 683 + if (stop->flush) { 683 684 r128_do_cce_flush(dev_priv); 684 685 } 685 686 686 687 /* If we fail to make the engine go idle, we return an error 687 688 * code so that the DRM ioctl wrapper can try again. 688 689 */ 689 - if (stop.idle) { 690 + if (stop->idle) { 690 691 ret = r128_do_cce_idle(dev_priv); 691 692 if (ret) 692 693 return ret; ··· 702 711 703 712 /* Just reset the CCE ring. Called as part of an X Server engine reset. 704 713 */ 705 - int r128_cce_reset(DRM_IOCTL_ARGS) 714 + int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) 706 715 { 707 - DRM_DEVICE; 708 716 drm_r128_private_t *dev_priv = dev->dev_private; 709 717 DRM_DEBUG("\n"); 710 718 ··· 722 732 return 0; 723 733 } 724 734 725 - int r128_cce_idle(DRM_IOCTL_ARGS) 735 + int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) 726 736 { 727 - DRM_DEVICE; 728 737 drm_r128_private_t *dev_priv = dev->dev_private; 729 738 DRM_DEBUG("\n"); 730 739 ··· 736 747 return r128_do_cce_idle(dev_priv); 737 748 } 738 749 739 - int r128_engine_reset(DRM_IOCTL_ARGS) 750 + int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) 740 751 { 741 - DRM_DEVICE; 742 752 DRM_DEBUG("\n"); 743 753 744 754 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 745 757 return r128_do_engine_reset(dev); 746 758 } 747 759 748 - int r128_fullscreen(DRM_IOCTL_ARGS) 760 + int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) 749 761 { 750 762 return -EINVAL; 751 763 } ··· 900 912 return 0; 901 913 } 902 914 903 - int r128_cce_buffers(DRM_IOCTL_ARGS) 915 + int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) 904 916 { 905 - DRM_DEVICE; 906 917 struct drm_device_dma *dma = dev->dma; 907 918 int ret = 0; 908 - struct drm_dma __user *argp = (void __user *)data; 909 - struct drm_dma d; 919 + struct drm_dma *d = data; 910 920 911 921 LOCK_TEST_WITH_RETURN(dev, file_priv); 912 922 913 - DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); 914 - 915 923 /* Please don't send us buffers. 916 924 */ 917 - if (d.send_count != 0) { 925 + if (d->send_count != 0) { 918 926 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 919 - DRM_CURRENTPID, d.send_count); 927 + DRM_CURRENTPID, d->send_count); 920 928 return -EINVAL; 921 929 } 922 930 923 931 /* We'll send you buffers. 924 932 */ 925 - if (d.request_count < 0 || d.request_count > dma->buf_count) { 933 + if (d->request_count < 0 || d->request_count > dma->buf_count) { 926 934 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 927 - DRM_CURRENTPID, d.request_count, dma->buf_count); 935 + DRM_CURRENTPID, d->request_count, dma->buf_count); 928 936 return -EINVAL; 929 937 } 930 938 931 - d.granted_count = 0; 939 + d->granted_count = 0; 932 940 933 - if (d.request_count) { 934 - ret = r128_cce_get_buffers(dev, file_priv, &d); 941 + if (d->request_count) { 942 + ret = r128_cce_get_buffers(dev, file_priv, d); 935 943 } 936 - 937 - DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); 938 944 939 945 return ret; 940 946 }
+9 -9
drivers/char/drm/r128_drv.h
··· 129 129 drm_r128_freelist_t *list_entry; 130 130 } drm_r128_buf_priv_t; 131 131 132 - extern drm_ioctl_desc_t r128_ioctls[]; 132 + extern struct drm_ioctl_desc r128_ioctls[]; 133 133 extern int r128_max_ioctl; 134 134 135 135 /* r128_cce.c */ 136 - extern int r128_cce_init(DRM_IOCTL_ARGS); 137 - extern int r128_cce_start(DRM_IOCTL_ARGS); 138 - extern int r128_cce_stop(DRM_IOCTL_ARGS); 139 - extern int r128_cce_reset(DRM_IOCTL_ARGS); 140 - extern int r128_cce_idle(DRM_IOCTL_ARGS); 141 - extern int r128_engine_reset(DRM_IOCTL_ARGS); 142 - extern int r128_fullscreen(DRM_IOCTL_ARGS); 143 - extern int r128_cce_buffers(DRM_IOCTL_ARGS); 136 + extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 137 + extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv); 138 + extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); 139 + extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); 140 + extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); 141 + extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); 142 + extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); 143 + extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); 144 144 145 145 extern void r128_freelist_reset(struct drm_device * dev); 146 146
+89 -121
drivers/char/drm/r128_state.c
··· 1242 1242 * IOCTL functions 1243 1243 */ 1244 1244 1245 - static int r128_cce_clear(DRM_IOCTL_ARGS) 1245 + static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) 1246 1246 { 1247 - DRM_DEVICE; 1248 1247 drm_r128_private_t *dev_priv = dev->dev_private; 1249 1248 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 1250 - drm_r128_clear_t clear; 1249 + drm_r128_clear_t *clear = data; 1251 1250 DRM_DEBUG("\n"); 1252 1251 1253 1252 LOCK_TEST_WITH_RETURN(dev, file_priv); 1254 - 1255 - DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data, 1256 - sizeof(clear)); 1257 1253 1258 1254 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1259 1255 1260 1256 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) 1261 1257 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; 1262 1258 1263 - r128_cce_dispatch_clear(dev, &clear); 1259 + r128_cce_dispatch_clear(dev, clear); 1264 1260 COMMIT_RING(); 1265 1261 1266 1262 /* Make sure we restore the 3D state next time. ··· 1306 1310 * They can & should be intermixed to support multiple 3d windows. 1307 1311 */ 1308 1312 1309 - static int r128_cce_flip(DRM_IOCTL_ARGS) 1313 + static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) 1310 1314 { 1311 - DRM_DEVICE; 1312 1315 drm_r128_private_t *dev_priv = dev->dev_private; 1313 1316 DRM_DEBUG("%s\n", __FUNCTION__); 1314 1317 ··· 1324 1329 return 0; 1325 1330 } 1326 1331 1327 - static int r128_cce_swap(DRM_IOCTL_ARGS) 1332 + static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) 1328 1333 { 1329 - DRM_DEVICE; 1330 1334 drm_r128_private_t *dev_priv = dev->dev_private; 1331 1335 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; 1332 1336 DRM_DEBUG("%s\n", __FUNCTION__); ··· 1345 1351 return 0; 1346 1352 } 1347 1353 1348 - static int r128_cce_vertex(DRM_IOCTL_ARGS) 1354 + static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) 1349 1355 { 1350 - DRM_DEVICE; 1351 1356 drm_r128_private_t *dev_priv = dev->dev_private; 1352 1357 struct drm_device_dma *dma = dev->dma; 1353 1358 struct drm_buf *buf; 1354 1359 drm_r128_buf_priv_t *buf_priv; 1355 - drm_r128_vertex_t vertex; 1360 + drm_r128_vertex_t *vertex = data; 1356 1361 1357 1362 LOCK_TEST_WITH_RETURN(dev, file_priv); 1358 1363 ··· 1360 1367 return -EINVAL; 1361 1368 } 1362 1369 1363 - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data, 1364 - sizeof(vertex)); 1365 - 1366 1370 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", 1367 - DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); 1371 + DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); 1368 1372 1369 - if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 1373 + if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { 1370 1374 DRM_ERROR("buffer index %d (of %d max)\n", 1371 - vertex.idx, dma->buf_count - 1); 1375 + vertex->idx, dma->buf_count - 1); 1372 1376 return -EINVAL; 1373 1377 } 1374 - if (vertex.prim < 0 || 1375 - vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1376 - DRM_ERROR("buffer prim %d\n", vertex.prim); 1378 + if (vertex->prim < 0 || 1379 + vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1380 + DRM_ERROR("buffer prim %d\n", vertex->prim); 1377 1381 return -EINVAL; 1378 1382 } 1379 1383 1380 1384 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1381 1385 VB_AGE_TEST_WITH_RETURN(dev_priv); 1382 1386 1383 - buf = dma->buflist[vertex.idx]; 1387 + buf = dma->buflist[vertex->idx]; 1384 1388 buf_priv = buf->dev_private; 1385 1389 1386 1390 if (buf->file_priv != file_priv) { ··· 1386 1396 return -EINVAL; 1387 1397 } 1388 1398 if (buf->pending) { 1389 - DRM_ERROR("sending pending buffer %d\n", vertex.idx); 1399 + DRM_ERROR("sending pending buffer %d\n", vertex->idx); 1390 1400 return -EINVAL; 1391 1401 } 1392 1402 1393 - buf->used = vertex.count; 1394 - buf_priv->prim = vertex.prim; 1395 - buf_priv->discard = vertex.discard; 1403 + buf->used = vertex->count; 1404 + buf_priv->prim = vertex->prim; 1405 + buf_priv->discard = vertex->discard; 1396 1406 1397 1407 r128_cce_dispatch_vertex(dev, buf); 1398 1408 ··· 1400 1410 return 0; 1401 1411 } 1402 1412 1403 - static int r128_cce_indices(DRM_IOCTL_ARGS) 1413 + static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) 1404 1414 { 1405 - DRM_DEVICE; 1406 1415 drm_r128_private_t *dev_priv = dev->dev_private; 1407 1416 struct drm_device_dma *dma = dev->dma; 1408 1417 struct drm_buf *buf; 1409 1418 drm_r128_buf_priv_t *buf_priv; 1410 - drm_r128_indices_t elts; 1419 + drm_r128_indices_t *elts = data; 1411 1420 int count; 1412 1421 1413 1422 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 1416 1427 return -EINVAL; 1417 1428 } 1418 1429 1419 - DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data, 1420 - sizeof(elts)); 1421 - 1422 1430 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID, 1423 - elts.idx, elts.start, elts.end, elts.discard); 1431 + elts->idx, elts->start, elts->end, elts->discard); 1424 1432 1425 - if (elts.idx < 0 || elts.idx >= dma->buf_count) { 1433 + if (elts->idx < 0 || elts->idx >= dma->buf_count) { 1426 1434 DRM_ERROR("buffer index %d (of %d max)\n", 1427 - elts.idx, dma->buf_count - 1); 1435 + elts->idx, dma->buf_count - 1); 1428 1436 return -EINVAL; 1429 1437 } 1430 - if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1431 - DRM_ERROR("buffer prim %d\n", elts.prim); 1438 + if (elts->prim < 0 || 1439 + elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1440 + DRM_ERROR("buffer prim %d\n", elts->prim); 1432 1441 return -EINVAL; 1433 1442 } 1434 1443 1435 1444 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1436 1445 VB_AGE_TEST_WITH_RETURN(dev_priv); 1437 1446 1438 - buf = dma->buflist[elts.idx]; 1447 + buf = dma->buflist[elts->idx]; 1439 1448 buf_priv = buf->dev_private; 1440 1449 1441 1450 if (buf->file_priv != file_priv) { ··· 1442 1455 return -EINVAL; 1443 1456 } 1444 1457 if (buf->pending) { 1445 - DRM_ERROR("sending pending buffer %d\n", elts.idx); 1458 + DRM_ERROR("sending pending buffer %d\n", elts->idx); 1446 1459 return -EINVAL; 1447 1460 } 1448 1461 1449 - count = (elts.end - elts.start) / sizeof(u16); 1450 - elts.start -= R128_INDEX_PRIM_OFFSET; 1462 + count = (elts->end - elts->start) / sizeof(u16); 1463 + elts->start -= R128_INDEX_PRIM_OFFSET; 1451 1464 1452 - if (elts.start & 0x7) { 1453 - DRM_ERROR("misaligned buffer 0x%x\n", elts.start); 1465 + if (elts->start & 0x7) { 1466 + DRM_ERROR("misaligned buffer 0x%x\n", elts->start); 1454 1467 return -EINVAL; 1455 1468 } 1456 - if (elts.start < buf->used) { 1457 - DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); 1469 + if (elts->start < buf->used) { 1470 + DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); 1458 1471 return -EINVAL; 1459 1472 } 1460 1473 1461 - buf->used = elts.end; 1462 - buf_priv->prim = elts.prim; 1463 - buf_priv->discard = elts.discard; 1474 + buf->used = elts->end; 1475 + buf_priv->prim = elts->prim; 1476 + buf_priv->discard = elts->discard; 1464 1477 1465 - r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count); 1478 + r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count); 1466 1479 1467 1480 COMMIT_RING(); 1468 1481 return 0; 1469 1482 } 1470 1483 1471 - static int r128_cce_blit(DRM_IOCTL_ARGS) 1484 + static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) 1472 1485 { 1473 - DRM_DEVICE; 1474 1486 struct drm_device_dma *dma = dev->dma; 1475 1487 drm_r128_private_t *dev_priv = dev->dev_private; 1476 - drm_r128_blit_t blit; 1488 + drm_r128_blit_t *blit = data; 1477 1489 int ret; 1478 1490 1479 1491 LOCK_TEST_WITH_RETURN(dev, file_priv); 1480 1492 1481 - DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data, 1482 - sizeof(blit)); 1493 + DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx); 1483 1494 1484 - DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx); 1485 - 1486 - if (blit.idx < 0 || blit.idx >= dma->buf_count) { 1495 + if (blit->idx < 0 || blit->idx >= dma->buf_count) { 1487 1496 DRM_ERROR("buffer index %d (of %d max)\n", 1488 - blit.idx, dma->buf_count - 1); 1497 + blit->idx, dma->buf_count - 1); 1489 1498 return -EINVAL; 1490 1499 } 1491 1500 1492 1501 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1493 1502 VB_AGE_TEST_WITH_RETURN(dev_priv); 1494 1503 1495 - ret = r128_cce_dispatch_blit(dev, file_priv, &blit); 1504 + ret = r128_cce_dispatch_blit(dev, file_priv, blit); 1496 1505 1497 1506 COMMIT_RING(); 1498 1507 return ret; 1499 1508 } 1500 1509 1501 - static int r128_cce_depth(DRM_IOCTL_ARGS) 1510 + static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv) 1502 1511 { 1503 - DRM_DEVICE; 1504 1512 drm_r128_private_t *dev_priv = dev->dev_private; 1505 - drm_r128_depth_t depth; 1513 + drm_r128_depth_t *depth = data; 1506 1514 int ret; 1507 1515 1508 1516 LOCK_TEST_WITH_RETURN(dev, file_priv); 1509 1517 1510 - DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data, 1511 - sizeof(depth)); 1512 - 1513 1518 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1514 1519 1515 1520 ret = -EINVAL; 1516 - switch (depth.func) { 1521 + switch (depth->func) { 1517 1522 case R128_WRITE_SPAN: 1518 - ret = r128_cce_dispatch_write_span(dev, &depth); 1523 + ret = r128_cce_dispatch_write_span(dev, depth); 1519 1524 break; 1520 1525 case R128_WRITE_PIXELS: 1521 - ret = r128_cce_dispatch_write_pixels(dev, &depth); 1526 + ret = r128_cce_dispatch_write_pixels(dev, depth); 1522 1527 break; 1523 1528 case R128_READ_SPAN: 1524 - ret = r128_cce_dispatch_read_span(dev, &depth); 1529 + ret = r128_cce_dispatch_read_span(dev, depth); 1525 1530 break; 1526 1531 case R128_READ_PIXELS: 1527 - ret = r128_cce_dispatch_read_pixels(dev, &depth); 1532 + ret = r128_cce_dispatch_read_pixels(dev, depth); 1528 1533 break; 1529 1534 } 1530 1535 ··· 1524 1545 return ret; 1525 1546 } 1526 1547 1527 - static int r128_cce_stipple(DRM_IOCTL_ARGS) 1548 + static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) 1528 1549 { 1529 - DRM_DEVICE; 1530 1550 drm_r128_private_t *dev_priv = dev->dev_private; 1531 - drm_r128_stipple_t stipple; 1551 + drm_r128_stipple_t *stipple = data; 1532 1552 u32 mask[32]; 1533 1553 1534 1554 LOCK_TEST_WITH_RETURN(dev, file_priv); 1535 1555 1536 - DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data, 1537 - sizeof(stipple)); 1538 - 1539 - if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) 1556 + if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) 1540 1557 return -EFAULT; 1541 1558 1542 1559 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 1543 1568 return 0; 1544 1569 } 1545 1570 1546 - static int r128_cce_indirect(DRM_IOCTL_ARGS) 1571 + static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) 1547 1572 { 1548 - DRM_DEVICE; 1549 1573 drm_r128_private_t *dev_priv = dev->dev_private; 1550 1574 struct drm_device_dma *dma = dev->dma; 1551 1575 struct drm_buf *buf; 1552 1576 drm_r128_buf_priv_t *buf_priv; 1553 - drm_r128_indirect_t indirect; 1577 + drm_r128_indirect_t *indirect = data; 1554 1578 #if 0 1555 1579 RING_LOCALS; 1556 1580 #endif ··· 1561 1587 return -EINVAL; 1562 1588 } 1563 1589 1564 - DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data, 1565 - sizeof(indirect)); 1566 - 1567 1590 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", 1568 - indirect.idx, indirect.start, indirect.end, indirect.discard); 1591 + indirect->idx, indirect->start, indirect->end, 1592 + indirect->discard); 1569 1593 1570 - if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { 1594 + if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { 1571 1595 DRM_ERROR("buffer index %d (of %d max)\n", 1572 - indirect.idx, dma->buf_count - 1); 1596 + indirect->idx, dma->buf_count - 1); 1573 1597 return -EINVAL; 1574 1598 } 1575 1599 1576 - buf = dma->buflist[indirect.idx]; 1600 + buf = dma->buflist[indirect->idx]; 1577 1601 buf_priv = buf->dev_private; 1578 1602 1579 1603 if (buf->file_priv != file_priv) { ··· 1580 1608 return -EINVAL; 1581 1609 } 1582 1610 if (buf->pending) { 1583 - DRM_ERROR("sending pending buffer %d\n", indirect.idx); 1611 + DRM_ERROR("sending pending buffer %d\n", indirect->idx); 1584 1612 return -EINVAL; 1585 1613 } 1586 1614 1587 - if (indirect.start < buf->used) { 1615 + if (indirect->start < buf->used) { 1588 1616 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", 1589 - indirect.start, buf->used); 1617 + indirect->start, buf->used); 1590 1618 return -EINVAL; 1591 1619 } 1592 1620 1593 1621 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1594 1622 VB_AGE_TEST_WITH_RETURN(dev_priv); 1595 1623 1596 - buf->used = indirect.end; 1597 - buf_priv->discard = indirect.discard; 1624 + buf->used = indirect->end; 1625 + buf_priv->discard = indirect->discard; 1598 1626 1599 1627 #if 0 1600 1628 /* Wait for the 3D stream to idle before the indirect buffer ··· 1609 1637 * X server. This is insecure and is thus only available to 1610 1638 * privileged clients. 1611 1639 */ 1612 - r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end); 1640 + r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end); 1613 1641 1614 1642 COMMIT_RING(); 1615 1643 return 0; 1616 1644 } 1617 1645 1618 - static int r128_getparam(DRM_IOCTL_ARGS) 1646 + static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) 1619 1647 { 1620 - DRM_DEVICE; 1621 1648 drm_r128_private_t *dev_priv = dev->dev_private; 1622 - drm_r128_getparam_t param; 1649 + drm_r128_getparam_t *param = data; 1623 1650 int value; 1624 1651 1625 1652 if (!dev_priv) { ··· 1626 1655 return -EINVAL; 1627 1656 } 1628 1657 1629 - DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data, 1630 - sizeof(param)); 1631 - 1632 1658 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 1633 1659 1634 - switch (param.param) { 1660 + switch (param->param) { 1635 1661 case R128_PARAM_IRQ_NR: 1636 1662 value = dev->irq; 1637 1663 break; ··· 1636 1668 return -EINVAL; 1637 1669 } 1638 1670 1639 - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 1671 + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 1640 1672 DRM_ERROR("copy_to_user\n"); 1641 1673 return -EFAULT; 1642 1674 } ··· 1659 1691 r128_do_cleanup_cce(dev); 1660 1692 } 1661 1693 1662 - drm_ioctl_desc_t r128_ioctls[] = { 1663 - [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1664 - [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1665 - [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1666 - [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1667 - [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH}, 1668 - [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH}, 1669 - [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH}, 1670 - [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH}, 1671 - [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH}, 1672 - [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH}, 1673 - [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH}, 1674 - [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH}, 1675 - [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH}, 1676 - [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH}, 1677 - [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH}, 1678 - [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1679 - [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH}, 1694 + struct drm_ioctl_desc r128_ioctls[] = { 1695 + DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1696 + DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1697 + DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1698 + DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1699 + DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), 1700 + DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), 1701 + DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), 1702 + DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), 1703 + DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), 1704 + DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), 1705 + DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), 1706 + DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), 1707 + DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), 1708 + DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), 1709 + DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), 1710 + DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1711 + DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), 1680 1712 }; 1681 1713 1682 1714 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
+24 -43
drivers/char/drm/radeon_cp.c
··· 1823 1823 return 0; 1824 1824 } 1825 1825 1826 - int radeon_cp_init(DRM_IOCTL_ARGS) 1826 + int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 1827 1827 { 1828 - DRM_DEVICE; 1829 - drm_radeon_init_t init; 1828 + drm_radeon_init_t *init = data; 1830 1829 1831 1830 LOCK_TEST_WITH_RETURN(dev, file_priv); 1832 1831 1833 - DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data, 1834 - sizeof(init)); 1835 - 1836 - if (init.func == RADEON_INIT_R300_CP) 1832 + if (init->func == RADEON_INIT_R300_CP) 1837 1833 r300_init_reg_flags(); 1838 1834 1839 - switch (init.func) { 1835 + switch (init->func) { 1840 1836 case RADEON_INIT_CP: 1841 1837 case RADEON_INIT_R200_CP: 1842 1838 case RADEON_INIT_R300_CP: 1843 - return radeon_do_init_cp(dev, &init); 1839 + return radeon_do_init_cp(dev, init); 1844 1840 case RADEON_CLEANUP_CP: 1845 1841 return radeon_do_cleanup_cp(dev); 1846 1842 } ··· 1844 1848 return -EINVAL; 1845 1849 } 1846 1850 1847 - int radeon_cp_start(DRM_IOCTL_ARGS) 1851 + int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) 1848 1852 { 1849 - DRM_DEVICE; 1850 1853 drm_radeon_private_t *dev_priv = dev->dev_private; 1851 1854 DRM_DEBUG("\n"); 1852 1855 ··· 1869 1874 /* Stop the CP. The engine must have been idled before calling this 1870 1875 * routine. 1871 1876 */ 1872 - int radeon_cp_stop(DRM_IOCTL_ARGS) 1877 + int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) 1873 1878 { 1874 - DRM_DEVICE; 1875 1879 drm_radeon_private_t *dev_priv = dev->dev_private; 1876 - drm_radeon_cp_stop_t stop; 1880 + drm_radeon_cp_stop_t *stop = data; 1877 1881 int ret; 1878 1882 DRM_DEBUG("\n"); 1879 1883 1880 1884 LOCK_TEST_WITH_RETURN(dev, file_priv); 1881 - 1882 - DRM_COPY_FROM_USER_IOCTL(stop, (drm_radeon_cp_stop_t __user *) data, 1883 - sizeof(stop)); 1884 1885 1885 1886 if (!dev_priv->cp_running) 1886 1887 return 0; ··· 1884 1893 /* Flush any pending CP commands. This ensures any outstanding 1885 1894 * commands are exectuted by the engine before we turn it off. 1886 1895 */ 1887 - if (stop.flush) { 1896 + if (stop->flush) { 1888 1897 radeon_do_cp_flush(dev_priv); 1889 1898 } 1890 1899 1891 1900 /* If we fail to make the engine go idle, we return an error 1892 1901 * code so that the DRM ioctl wrapper can try again. 1893 1902 */ 1894 - if (stop.idle) { 1903 + if (stop->idle) { 1895 1904 ret = radeon_do_cp_idle(dev_priv); 1896 1905 if (ret) 1897 1906 return ret; ··· 1954 1963 1955 1964 /* Just reset the CP ring. Called as part of an X Server engine reset. 1956 1965 */ 1957 - int radeon_cp_reset(DRM_IOCTL_ARGS) 1966 + int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) 1958 1967 { 1959 - DRM_DEVICE; 1960 1968 drm_radeon_private_t *dev_priv = dev->dev_private; 1961 1969 DRM_DEBUG("\n"); 1962 1970 ··· 1974 1984 return 0; 1975 1985 } 1976 1986 1977 - int radeon_cp_idle(DRM_IOCTL_ARGS) 1987 + int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) 1978 1988 { 1979 - DRM_DEVICE; 1980 1989 drm_radeon_private_t *dev_priv = dev->dev_private; 1981 1990 DRM_DEBUG("\n"); 1982 1991 ··· 1986 1997 1987 1998 /* Added by Charl P. Botha to call radeon_do_resume_cp(). 1988 1999 */ 1989 - int radeon_cp_resume(DRM_IOCTL_ARGS) 2000 + int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) 1990 2001 { 1991 - DRM_DEVICE; 1992 2002 1993 2003 return radeon_do_resume_cp(dev); 1994 2004 } 1995 2005 1996 - int radeon_engine_reset(DRM_IOCTL_ARGS) 2006 + int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) 1997 2007 { 1998 - DRM_DEVICE; 1999 2008 DRM_DEBUG("\n"); 2000 2009 2001 2010 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 2007 2020 2008 2021 /* KW: Deprecated to say the least: 2009 2022 */ 2010 - int radeon_fullscreen(DRM_IOCTL_ARGS) 2023 + int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) 2011 2024 { 2012 2025 return 0; 2013 2026 } ··· 2185 2198 return 0; 2186 2199 } 2187 2200 2188 - int radeon_cp_buffers(DRM_IOCTL_ARGS) 2201 + int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) 2189 2202 { 2190 - DRM_DEVICE; 2191 2203 struct drm_device_dma *dma = dev->dma; 2192 2204 int ret = 0; 2193 - struct drm_dma __user *argp = (void __user *)data; 2194 - struct drm_dma d; 2205 + struct drm_dma *d = data; 2195 2206 2196 2207 LOCK_TEST_WITH_RETURN(dev, file_priv); 2197 2208 2198 - DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d)); 2199 - 2200 2209 /* Please don't send us buffers. 2201 2210 */ 2202 - if (d.send_count != 0) { 2211 + if (d->send_count != 0) { 2203 2212 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 2204 - DRM_CURRENTPID, d.send_count); 2213 + DRM_CURRENTPID, d->send_count); 2205 2214 return -EINVAL; 2206 2215 } 2207 2216 2208 2217 /* We'll send you buffers. 2209 2218 */ 2210 - if (d.request_count < 0 || d.request_count > dma->buf_count) { 2219 + if (d->request_count < 0 || d->request_count > dma->buf_count) { 2211 2220 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 2212 - DRM_CURRENTPID, d.request_count, dma->buf_count); 2221 + DRM_CURRENTPID, d->request_count, dma->buf_count); 2213 2222 return -EINVAL; 2214 2223 } 2215 2224 2216 - d.granted_count = 0; 2225 + d->granted_count = 0; 2217 2226 2218 - if (d.request_count) { 2219 - ret = radeon_cp_get_buffers(dev, file_priv, &d); 2227 + if (d->request_count) { 2228 + ret = radeon_cp_get_buffers(dev, file_priv, d); 2220 2229 } 2221 - 2222 - DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d)); 2223 2230 2224 2231 return ret; 2225 2232 }
+15 -15
drivers/char/drm/radeon_drv.h
··· 307 307 } drm_radeon_kcmd_buffer_t; 308 308 309 309 extern int radeon_no_wb; 310 - extern drm_ioctl_desc_t radeon_ioctls[]; 310 + extern struct drm_ioctl_desc radeon_ioctls[]; 311 311 extern int radeon_max_ioctl; 312 312 313 313 /* Check whether the given hardware address is inside the framebuffer or the ··· 326 326 } 327 327 328 328 /* radeon_cp.c */ 329 - extern int radeon_cp_init(DRM_IOCTL_ARGS); 330 - extern int radeon_cp_start(DRM_IOCTL_ARGS); 331 - extern int radeon_cp_stop(DRM_IOCTL_ARGS); 332 - extern int radeon_cp_reset(DRM_IOCTL_ARGS); 333 - extern int radeon_cp_idle(DRM_IOCTL_ARGS); 334 - extern int radeon_cp_resume(DRM_IOCTL_ARGS); 335 - extern int radeon_engine_reset(DRM_IOCTL_ARGS); 336 - extern int radeon_fullscreen(DRM_IOCTL_ARGS); 337 - extern int radeon_cp_buffers(DRM_IOCTL_ARGS); 329 + extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 330 + extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv); 331 + extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); 332 + extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); 333 + extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); 334 + extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv); 335 + extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); 336 + extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); 337 + extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); 338 338 339 339 extern void radeon_freelist_reset(struct drm_device * dev); 340 340 extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); ··· 347 347 extern int radeon_presetup(struct drm_device *dev); 348 348 extern int radeon_driver_postcleanup(struct drm_device *dev); 349 349 350 - extern int radeon_mem_alloc(DRM_IOCTL_ARGS); 351 - extern int radeon_mem_free(DRM_IOCTL_ARGS); 352 - extern int radeon_mem_init_heap(DRM_IOCTL_ARGS); 350 + extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); 351 + extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); 352 + extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); 353 353 extern void radeon_mem_takedown(struct mem_block **heap); 354 354 extern void radeon_mem_release(struct drm_file *file_priv, 355 355 struct mem_block *heap); 356 356 357 357 /* radeon_irq.c */ 358 - extern int radeon_irq_emit(DRM_IOCTL_ARGS); 359 - extern int radeon_irq_wait(DRM_IOCTL_ARGS); 358 + extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); 359 + extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); 360 360 361 361 extern void radeon_do_release(struct drm_device * dev); 362 362 extern int radeon_driver_vblank_wait(struct drm_device * dev,
+6 -14
drivers/char/drm/radeon_irq.c
··· 196 196 197 197 /* Needs the lock as it touches the ring. 198 198 */ 199 - int radeon_irq_emit(DRM_IOCTL_ARGS) 199 + int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) 200 200 { 201 - DRM_DEVICE; 202 201 drm_radeon_private_t *dev_priv = dev->dev_private; 203 - drm_radeon_irq_emit_t emit; 202 + drm_radeon_irq_emit_t *emit = data; 204 203 int result; 205 204 206 205 LOCK_TEST_WITH_RETURN(dev, file_priv); ··· 209 210 return -EINVAL; 210 211 } 211 212 212 - DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data, 213 - sizeof(emit)); 214 - 215 213 result = radeon_emit_irq(dev); 216 214 217 - if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { 215 + if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 218 216 DRM_ERROR("copy_to_user\n"); 219 217 return -EFAULT; 220 218 } ··· 221 225 222 226 /* Doesn't need the hardware lock. 223 227 */ 224 - int radeon_irq_wait(DRM_IOCTL_ARGS) 228 + int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) 225 229 { 226 - DRM_DEVICE; 227 230 drm_radeon_private_t *dev_priv = dev->dev_private; 228 - drm_radeon_irq_wait_t irqwait; 231 + drm_radeon_irq_wait_t *irqwait = data; 229 232 230 233 if (!dev_priv) { 231 234 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 232 235 return -EINVAL; 233 236 } 234 237 235 - DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data, 236 - sizeof(irqwait)); 237 - 238 - return radeon_wait_irq(dev, irqwait.irq_seq); 238 + return radeon_wait_irq(dev, irqwait->irq_seq); 239 239 } 240 240 241 241 static void radeon_enable_interrupt(struct drm_device *dev)
+16 -28
drivers/char/drm/radeon_mem.c
··· 217 217 } 218 218 } 219 219 220 - int radeon_mem_alloc(DRM_IOCTL_ARGS) 220 + int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) 221 221 { 222 - DRM_DEVICE; 223 222 drm_radeon_private_t *dev_priv = dev->dev_private; 224 - drm_radeon_mem_alloc_t alloc; 223 + drm_radeon_mem_alloc_t *alloc = data; 225 224 struct mem_block *block, **heap; 226 225 227 226 if (!dev_priv) { ··· 228 229 return -EINVAL; 229 230 } 230 231 231 - DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, 232 - sizeof(alloc)); 233 - 234 - heap = get_heap(dev_priv, alloc.region); 232 + heap = get_heap(dev_priv, alloc->region); 235 233 if (!heap || !*heap) 236 234 return -EFAULT; 237 235 238 236 /* Make things easier on ourselves: all allocations at least 239 237 * 4k aligned. 240 238 */ 241 - if (alloc.alignment < 12) 242 - alloc.alignment = 12; 239 + if (alloc->alignment < 12) 240 + alloc->alignment = 12; 243 241 244 - block = alloc_block(*heap, alloc.size, alloc.alignment, file_priv); 242 + block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); 245 243 246 244 if (!block) 247 245 return -ENOMEM; 248 246 249 - if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { 247 + if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, 248 + sizeof(int))) { 250 249 DRM_ERROR("copy_to_user\n"); 251 250 return -EFAULT; 252 251 } ··· 252 255 return 0; 253 256 } 254 257 255 - int radeon_mem_free(DRM_IOCTL_ARGS) 258 + int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) 256 259 { 257 - DRM_DEVICE; 258 260 drm_radeon_private_t *dev_priv = dev->dev_private; 259 - drm_radeon_mem_free_t memfree; 261 + drm_radeon_mem_free_t *memfree = data; 260 262 struct mem_block *block, **heap; 261 263 262 264 if (!dev_priv) { ··· 263 267 return -EINVAL; 264 268 } 265 269 266 - DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, 267 - sizeof(memfree)); 268 - 269 - heap = get_heap(dev_priv, memfree.region); 270 + heap = get_heap(dev_priv, memfree->region); 270 271 if (!heap || !*heap) 271 272 return -EFAULT; 272 273 273 - block = find_block(*heap, memfree.region_offset); 274 + block = find_block(*heap, memfree->region_offset); 274 275 if (!block) 275 276 return -EFAULT; 276 277 ··· 278 285 return 0; 279 286 } 280 287 281 - int radeon_mem_init_heap(DRM_IOCTL_ARGS) 288 + int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv) 282 289 { 283 - DRM_DEVICE; 284 290 drm_radeon_private_t *dev_priv = dev->dev_private; 285 - drm_radeon_mem_init_heap_t initheap; 291 + drm_radeon_mem_init_heap_t *initheap = data; 286 292 struct mem_block **heap; 287 293 288 294 if (!dev_priv) { ··· 289 297 return -EINVAL; 290 298 } 291 299 292 - DRM_COPY_FROM_USER_IOCTL(initheap, 293 - (drm_radeon_mem_init_heap_t __user *) data, 294 - sizeof(initheap)); 295 - 296 - heap = get_heap(dev_priv, initheap.region); 300 + heap = get_heap(dev_priv, initheap->region); 297 301 if (!heap) 298 302 return -EFAULT; 299 303 ··· 298 310 return -EFAULT; 299 311 } 300 312 301 - return init_heap(heap, initheap.start, initheap.size); 313 + return init_heap(heap, initheap->start, initheap->size); 302 314 }
+147 -197
drivers/char/drm/radeon_state.c
··· 2075 2075 /* ================================================================ 2076 2076 * IOCTL functions 2077 2077 */ 2078 - static int radeon_surface_alloc(DRM_IOCTL_ARGS) 2078 + static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) 2079 2079 { 2080 - DRM_DEVICE; 2081 2080 drm_radeon_private_t *dev_priv = dev->dev_private; 2082 - drm_radeon_surface_alloc_t alloc; 2081 + drm_radeon_surface_alloc_t *alloc = data; 2083 2082 2084 - DRM_COPY_FROM_USER_IOCTL(alloc, 2085 - (drm_radeon_surface_alloc_t __user *) data, 2086 - sizeof(alloc)); 2087 - 2088 - if (alloc_surface(&alloc, dev_priv, file_priv) == -1) 2083 + if (alloc_surface(alloc, dev_priv, file_priv) == -1) 2089 2084 return -EINVAL; 2090 2085 else 2091 2086 return 0; 2092 2087 } 2093 2088 2094 - static int radeon_surface_free(DRM_IOCTL_ARGS) 2089 + static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv) 2095 2090 { 2096 - DRM_DEVICE; 2097 2091 drm_radeon_private_t *dev_priv = dev->dev_private; 2098 - drm_radeon_surface_free_t memfree; 2092 + drm_radeon_surface_free_t *memfree = data; 2099 2093 2100 - DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data, 2101 - sizeof(memfree)); 2102 - 2103 - if (free_surface(file_priv, dev_priv, memfree.address)) 2094 + if (free_surface(file_priv, dev_priv, memfree->address)) 2104 2095 return -EINVAL; 2105 2096 else 2106 2097 return 0; 2107 2098 } 2108 2099 2109 - static int radeon_cp_clear(DRM_IOCTL_ARGS) 2100 + static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) 2110 2101 { 2111 - DRM_DEVICE; 2112 2102 drm_radeon_private_t *dev_priv = dev->dev_private; 2113 2103 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2114 - drm_radeon_clear_t clear; 2104 + drm_radeon_clear_t *clear = data; 2115 2105 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; 2116 2106 DRM_DEBUG("\n"); 2117 2107 2118 2108 LOCK_TEST_WITH_RETURN(dev, file_priv); 2119 - 2120 - DRM_COPY_FROM_USER_IOCTL(clear, (drm_radeon_clear_t __user *) data, 2121 - sizeof(clear)); 2122 2109 2123 2110 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2124 2111 2125 2112 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2126 2113 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; 2127 2114 2128 - if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, 2115 + if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, 2129 2116 sarea_priv->nbox * sizeof(depth_boxes[0]))) 2130 2117 return -EFAULT; 2131 2118 2132 - radeon_cp_dispatch_clear(dev, &clear, depth_boxes); 2119 + radeon_cp_dispatch_clear(dev, clear, depth_boxes); 2133 2120 2134 2121 COMMIT_RING(); 2135 2122 return 0; ··· 2152 2165 /* Swapping and flipping are different operations, need different ioctls. 2153 2166 * They can & should be intermixed to support multiple 3d windows. 2154 2167 */ 2155 - static int radeon_cp_flip(DRM_IOCTL_ARGS) 2168 + static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) 2156 2169 { 2157 - DRM_DEVICE; 2158 2170 drm_radeon_private_t *dev_priv = dev->dev_private; 2159 2171 DRM_DEBUG("\n"); 2160 2172 ··· 2170 2184 return 0; 2171 2185 } 2172 2186 2173 - static int radeon_cp_swap(DRM_IOCTL_ARGS) 2187 + static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) 2174 2188 { 2175 - DRM_DEVICE; 2176 2189 drm_radeon_private_t *dev_priv = dev->dev_private; 2177 2190 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2178 2191 DRM_DEBUG("\n"); ··· 2190 2205 return 0; 2191 2206 } 2192 2207 2193 - static int radeon_cp_vertex(DRM_IOCTL_ARGS) 2208 + static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) 2194 2209 { 2195 - DRM_DEVICE; 2196 2210 drm_radeon_private_t *dev_priv = dev->dev_private; 2197 2211 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2198 2212 struct drm_device_dma *dma = dev->dma; 2199 2213 struct drm_buf *buf; 2200 - drm_radeon_vertex_t vertex; 2214 + drm_radeon_vertex_t *vertex = data; 2201 2215 drm_radeon_tcl_prim_t prim; 2202 2216 2203 2217 LOCK_TEST_WITH_RETURN(dev, file_priv); 2204 2218 2205 - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, 2206 - sizeof(vertex)); 2207 - 2208 2219 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", 2209 - DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard); 2220 + DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); 2210 2221 2211 - if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 2222 + if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { 2212 2223 DRM_ERROR("buffer index %d (of %d max)\n", 2213 - vertex.idx, dma->buf_count - 1); 2224 + vertex->idx, dma->buf_count - 1); 2214 2225 return -EINVAL; 2215 2226 } 2216 - if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2217 - DRM_ERROR("buffer prim %d\n", vertex.prim); 2227 + if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2228 + DRM_ERROR("buffer prim %d\n", vertex->prim); 2218 2229 return -EINVAL; 2219 2230 } 2220 2231 2221 2232 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2222 2233 VB_AGE_TEST_WITH_RETURN(dev_priv); 2223 2234 2224 - buf = dma->buflist[vertex.idx]; 2235 + buf = dma->buflist[vertex->idx]; 2225 2236 2226 2237 if (buf->file_priv != file_priv) { 2227 2238 DRM_ERROR("process %d using buffer owned by %p\n", ··· 2225 2244 return -EINVAL; 2226 2245 } 2227 2246 if (buf->pending) { 2228 - DRM_ERROR("sending pending buffer %d\n", vertex.idx); 2247 + DRM_ERROR("sending pending buffer %d\n", vertex->idx); 2229 2248 return -EINVAL; 2230 2249 } 2231 2250 2232 2251 /* Build up a prim_t record: 2233 2252 */ 2234 - if (vertex.count) { 2235 - buf->used = vertex.count; /* not used? */ 2253 + if (vertex->count) { 2254 + buf->used = vertex->count; /* not used? */ 2236 2255 2237 2256 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { 2238 2257 if (radeon_emit_state(dev_priv, file_priv, ··· 2250 2269 } 2251 2270 2252 2271 prim.start = 0; 2253 - prim.finish = vertex.count; /* unused */ 2254 - prim.prim = vertex.prim; 2255 - prim.numverts = vertex.count; 2272 + prim.finish = vertex->count; /* unused */ 2273 + prim.prim = vertex->prim; 2274 + prim.numverts = vertex->count; 2256 2275 prim.vc_format = dev_priv->sarea_priv->vc_format; 2257 2276 2258 2277 radeon_cp_dispatch_vertex(dev, buf, &prim); 2259 2278 } 2260 2279 2261 - if (vertex.discard) { 2280 + if (vertex->discard) { 2262 2281 radeon_cp_discard_buffer(dev, buf); 2263 2282 } 2264 2283 ··· 2266 2285 return 0; 2267 2286 } 2268 2287 2269 - static int radeon_cp_indices(DRM_IOCTL_ARGS) 2288 + static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) 2270 2289 { 2271 - DRM_DEVICE; 2272 2290 drm_radeon_private_t *dev_priv = dev->dev_private; 2273 2291 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2274 2292 struct drm_device_dma *dma = dev->dma; 2275 2293 struct drm_buf *buf; 2276 - drm_radeon_indices_t elts; 2294 + drm_radeon_indices_t *elts = data; 2277 2295 drm_radeon_tcl_prim_t prim; 2278 2296 int count; 2279 2297 2280 2298 LOCK_TEST_WITH_RETURN(dev, file_priv); 2281 2299 2282 - DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data, 2283 - sizeof(elts)); 2284 - 2285 2300 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", 2286 - DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard); 2301 + DRM_CURRENTPID, elts->idx, elts->start, elts->end, 2302 + elts->discard); 2287 2303 2288 - if (elts.idx < 0 || elts.idx >= dma->buf_count) { 2304 + if (elts->idx < 0 || elts->idx >= dma->buf_count) { 2289 2305 DRM_ERROR("buffer index %d (of %d max)\n", 2290 - elts.idx, dma->buf_count - 1); 2306 + elts->idx, dma->buf_count - 1); 2291 2307 return -EINVAL; 2292 2308 } 2293 - if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2294 - DRM_ERROR("buffer prim %d\n", elts.prim); 2309 + if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2310 + DRM_ERROR("buffer prim %d\n", elts->prim); 2295 2311 return -EINVAL; 2296 2312 } 2297 2313 2298 2314 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2299 2315 VB_AGE_TEST_WITH_RETURN(dev_priv); 2300 2316 2301 - buf = dma->buflist[elts.idx]; 2317 + buf = dma->buflist[elts->idx]; 2302 2318 2303 2319 if (buf->file_priv != file_priv) { 2304 2320 DRM_ERROR("process %d using buffer owned by %p\n", ··· 2303 2325 return -EINVAL; 2304 2326 } 2305 2327 if (buf->pending) { 2306 - DRM_ERROR("sending pending buffer %d\n", elts.idx); 2328 + DRM_ERROR("sending pending buffer %d\n", elts->idx); 2307 2329 return -EINVAL; 2308 2330 } 2309 2331 2310 - count = (elts.end - elts.start) / sizeof(u16); 2311 - elts.start -= RADEON_INDEX_PRIM_OFFSET; 2332 + count = (elts->end - elts->start) / sizeof(u16); 2333 + elts->start -= RADEON_INDEX_PRIM_OFFSET; 2312 2334 2313 - if (elts.start & 0x7) { 2314 - DRM_ERROR("misaligned buffer 0x%x\n", elts.start); 2335 + if (elts->start & 0x7) { 2336 + DRM_ERROR("misaligned buffer 0x%x\n", elts->start); 2315 2337 return -EINVAL; 2316 2338 } 2317 - if (elts.start < buf->used) { 2318 - DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); 2339 + if (elts->start < buf->used) { 2340 + DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); 2319 2341 return -EINVAL; 2320 2342 } 2321 2343 2322 - buf->used = elts.end; 2344 + buf->used = elts->end; 2323 2345 2324 2346 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { 2325 2347 if (radeon_emit_state(dev_priv, file_priv, ··· 2338 2360 2339 2361 /* Build up a prim_t record: 2340 2362 */ 2341 - prim.start = elts.start; 2342 - prim.finish = elts.end; 2343 - prim.prim = elts.prim; 2363 + prim.start = elts->start; 2364 + prim.finish = elts->end; 2365 + prim.prim = elts->prim; 2344 2366 prim.offset = 0; /* offset from start of dma buffers */ 2345 2367 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ 2346 2368 prim.vc_format = dev_priv->sarea_priv->vc_format; 2347 2369 2348 2370 radeon_cp_dispatch_indices(dev, buf, &prim); 2349 - if (elts.discard) { 2371 + if (elts->discard) { 2350 2372 radeon_cp_discard_buffer(dev, buf); 2351 2373 } 2352 2374 ··· 2354 2376 return 0; 2355 2377 } 2356 2378 2357 - static int radeon_cp_texture(DRM_IOCTL_ARGS) 2379 + static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv) 2358 2380 { 2359 - DRM_DEVICE; 2360 2381 drm_radeon_private_t *dev_priv = dev->dev_private; 2361 - drm_radeon_texture_t tex; 2382 + drm_radeon_texture_t *tex = data; 2362 2383 drm_radeon_tex_image_t image; 2363 2384 int ret; 2364 2385 2365 2386 LOCK_TEST_WITH_RETURN(dev, file_priv); 2366 2387 2367 - DRM_COPY_FROM_USER_IOCTL(tex, (drm_radeon_texture_t __user *) data, 2368 - sizeof(tex)); 2369 - 2370 - if (tex.image == NULL) { 2388 + if (tex->image == NULL) { 2371 2389 DRM_ERROR("null texture image!\n"); 2372 2390 return -EINVAL; 2373 2391 } 2374 2392 2375 2393 if (DRM_COPY_FROM_USER(&image, 2376 - (drm_radeon_tex_image_t __user *) tex.image, 2394 + (drm_radeon_tex_image_t __user *) tex->image, 2377 2395 sizeof(image))) 2378 2396 return -EFAULT; 2379 2397 2380 2398 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2381 2399 VB_AGE_TEST_WITH_RETURN(dev_priv); 2382 2400 2383 - ret = radeon_cp_dispatch_texture(dev, file_priv, &tex, &image); 2401 + ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image); 2384 2402 2385 2403 COMMIT_RING(); 2386 2404 return ret; 2387 2405 } 2388 2406 2389 - static int radeon_cp_stipple(DRM_IOCTL_ARGS) 2407 + static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) 2390 2408 { 2391 - DRM_DEVICE; 2392 2409 drm_radeon_private_t *dev_priv = dev->dev_private; 2393 - drm_radeon_stipple_t stipple; 2410 + drm_radeon_stipple_t *stipple = data; 2394 2411 u32 mask[32]; 2395 2412 2396 2413 LOCK_TEST_WITH_RETURN(dev, file_priv); 2397 2414 2398 - DRM_COPY_FROM_USER_IOCTL(stipple, (drm_radeon_stipple_t __user *) data, 2399 - sizeof(stipple)); 2400 - 2401 - if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) 2415 + if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) 2402 2416 return -EFAULT; 2403 2417 2404 2418 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 2401 2431 return 0; 2402 2432 } 2403 2433 2404 - static int radeon_cp_indirect(DRM_IOCTL_ARGS) 2434 + static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) 2405 2435 { 2406 - DRM_DEVICE; 2407 2436 drm_radeon_private_t *dev_priv = dev->dev_private; 2408 2437 struct drm_device_dma *dma = dev->dma; 2409 2438 struct drm_buf *buf; 2410 - drm_radeon_indirect_t indirect; 2439 + drm_radeon_indirect_t *indirect = data; 2411 2440 RING_LOCALS; 2412 2441 2413 2442 LOCK_TEST_WITH_RETURN(dev, file_priv); 2414 2443 2415 - DRM_COPY_FROM_USER_IOCTL(indirect, 2416 - (drm_radeon_indirect_t __user *) data, 2417 - sizeof(indirect)); 2418 - 2419 2444 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", 2420 - indirect.idx, indirect.start, indirect.end, indirect.discard); 2445 + indirect->idx, indirect->start, indirect->end, 2446 + indirect->discard); 2421 2447 2422 - if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { 2448 + if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { 2423 2449 DRM_ERROR("buffer index %d (of %d max)\n", 2424 - indirect.idx, dma->buf_count - 1); 2450 + indirect->idx, dma->buf_count - 1); 2425 2451 return -EINVAL; 2426 2452 } 2427 2453 2428 - buf = dma->buflist[indirect.idx]; 2454 + buf = dma->buflist[indirect->idx]; 2429 2455 2430 2456 if (buf->file_priv != file_priv) { 2431 2457 DRM_ERROR("process %d using buffer owned by %p\n", ··· 2429 2463 return -EINVAL; 2430 2464 } 2431 2465 if (buf->pending) { 2432 - DRM_ERROR("sending pending buffer %d\n", indirect.idx); 2466 + DRM_ERROR("sending pending buffer %d\n", indirect->idx); 2433 2467 return -EINVAL; 2434 2468 } 2435 2469 2436 - if (indirect.start < buf->used) { 2470 + if (indirect->start < buf->used) { 2437 2471 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", 2438 - indirect.start, buf->used); 2472 + indirect->start, buf->used); 2439 2473 return -EINVAL; 2440 2474 } 2441 2475 2442 2476 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2443 2477 VB_AGE_TEST_WITH_RETURN(dev_priv); 2444 2478 2445 - buf->used = indirect.end; 2479 + buf->used = indirect->end; 2446 2480 2447 2481 /* Wait for the 3D stream to idle before the indirect buffer 2448 2482 * containing 2D acceleration commands is processed. ··· 2457 2491 * X server. This is insecure and is thus only available to 2458 2492 * privileged clients. 2459 2493 */ 2460 - radeon_cp_dispatch_indirect(dev, buf, indirect.start, indirect.end); 2461 - if (indirect.discard) { 2494 + radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); 2495 + if (indirect->discard) { 2462 2496 radeon_cp_discard_buffer(dev, buf); 2463 2497 } 2464 2498 ··· 2466 2500 return 0; 2467 2501 } 2468 2502 2469 - static int radeon_cp_vertex2(DRM_IOCTL_ARGS) 2503 + static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) 2470 2504 { 2471 - DRM_DEVICE; 2472 2505 drm_radeon_private_t *dev_priv = dev->dev_private; 2473 2506 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; 2474 2507 struct drm_device_dma *dma = dev->dma; 2475 2508 struct drm_buf *buf; 2476 - drm_radeon_vertex2_t vertex; 2509 + drm_radeon_vertex2_t *vertex = data; 2477 2510 int i; 2478 2511 unsigned char laststate; 2479 2512 2480 2513 LOCK_TEST_WITH_RETURN(dev, file_priv); 2481 2514 2482 - DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data, 2483 - sizeof(vertex)); 2484 - 2485 2515 DRM_DEBUG("pid=%d index=%d discard=%d\n", 2486 - DRM_CURRENTPID, vertex.idx, vertex.discard); 2516 + DRM_CURRENTPID, vertex->idx, vertex->discard); 2487 2517 2488 - if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 2518 + if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { 2489 2519 DRM_ERROR("buffer index %d (of %d max)\n", 2490 - vertex.idx, dma->buf_count - 1); 2520 + vertex->idx, dma->buf_count - 1); 2491 2521 return -EINVAL; 2492 2522 } 2493 2523 2494 2524 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2495 2525 VB_AGE_TEST_WITH_RETURN(dev_priv); 2496 2526 2497 - buf = dma->buflist[vertex.idx]; 2527 + buf = dma->buflist[vertex->idx]; 2498 2528 2499 2529 if (buf->file_priv != file_priv) { 2500 2530 DRM_ERROR("process %d using buffer owned by %p\n", ··· 2499 2537 } 2500 2538 2501 2539 if (buf->pending) { 2502 - DRM_ERROR("sending pending buffer %d\n", vertex.idx); 2540 + DRM_ERROR("sending pending buffer %d\n", vertex->idx); 2503 2541 return -EINVAL; 2504 2542 } 2505 2543 2506 2544 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2507 2545 return -EINVAL; 2508 2546 2509 - for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) { 2547 + for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) { 2510 2548 drm_radeon_prim_t prim; 2511 2549 drm_radeon_tcl_prim_t tclprim; 2512 2550 2513 - if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim))) 2551 + if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim))) 2514 2552 return -EFAULT; 2515 2553 2516 2554 if (prim.stateidx != laststate) { 2517 2555 drm_radeon_state_t state; 2518 2556 2519 2557 if (DRM_COPY_FROM_USER(&state, 2520 - &vertex.state[prim.stateidx], 2558 + &vertex->state[prim.stateidx], 2521 2559 sizeof(state))) 2522 2560 return -EFAULT; 2523 2561 ··· 2550 2588 sarea_priv->nbox = 0; 2551 2589 } 2552 2590 2553 - if (vertex.discard) { 2591 + if (vertex->discard) { 2554 2592 radeon_cp_discard_buffer(dev, buf); 2555 2593 } 2556 2594 ··· 2801 2839 return 0; 2802 2840 } 2803 2841 2804 - static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS) 2842 + static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) 2805 2843 { 2806 - DRM_DEVICE; 2807 2844 drm_radeon_private_t *dev_priv = dev->dev_private; 2808 2845 struct drm_device_dma *dma = dev->dma; 2809 2846 struct drm_buf *buf = NULL; 2810 2847 int idx; 2811 - drm_radeon_kcmd_buffer_t cmdbuf; 2848 + drm_radeon_kcmd_buffer_t *cmdbuf = data; 2812 2849 drm_radeon_cmd_header_t header; 2813 2850 int orig_nbox, orig_bufsz; 2814 2851 char *kbuf = NULL; 2815 2852 2816 2853 LOCK_TEST_WITH_RETURN(dev, file_priv); 2817 2854 2818 - DRM_COPY_FROM_USER_IOCTL(cmdbuf, 2819 - (drm_radeon_cmd_buffer_t __user *) data, 2820 - sizeof(cmdbuf)); 2821 - 2822 2855 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2823 2856 VB_AGE_TEST_WITH_RETURN(dev_priv); 2824 2857 2825 - if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { 2858 + if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) { 2826 2859 return -EINVAL; 2827 2860 } 2828 2861 ··· 2825 2868 * races between checking values and using those values in other code, 2826 2869 * and simply to avoid a lot of function calls to copy in data. 2827 2870 */ 2828 - orig_bufsz = cmdbuf.bufsz; 2871 + orig_bufsz = cmdbuf->bufsz; 2829 2872 if (orig_bufsz != 0) { 2830 - kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2873 + kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER); 2831 2874 if (kbuf == NULL) 2832 2875 return -ENOMEM; 2833 - if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, 2834 - cmdbuf.bufsz)) { 2876 + if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, 2877 + cmdbuf->bufsz)) { 2835 2878 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2836 2879 return -EFAULT; 2837 2880 } 2838 - cmdbuf.buf = kbuf; 2881 + cmdbuf->buf = kbuf; 2839 2882 } 2840 2883 2841 - orig_nbox = cmdbuf.nbox; 2884 + orig_nbox = cmdbuf->nbox; 2842 2885 2843 2886 if (dev_priv->microcode_version == UCODE_R300) { 2844 2887 int temp; 2845 - temp = r300_do_cp_cmdbuf(dev, file_priv, &cmdbuf); 2888 + temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2846 2889 2847 2890 if (orig_bufsz != 0) 2848 2891 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ··· 2851 2894 } 2852 2895 2853 2896 /* microcode_version != r300 */ 2854 - while (cmdbuf.bufsz >= sizeof(header)) { 2897 + while (cmdbuf->bufsz >= sizeof(header)) { 2855 2898 2856 - header.i = *(int *)cmdbuf.buf; 2857 - cmdbuf.buf += sizeof(header); 2858 - cmdbuf.bufsz -= sizeof(header); 2899 + header.i = *(int *)cmdbuf->buf; 2900 + cmdbuf->buf += sizeof(header); 2901 + cmdbuf->bufsz -= sizeof(header); 2859 2902 2860 2903 switch (header.header.cmd_type) { 2861 2904 case RADEON_CMD_PACKET: 2862 2905 DRM_DEBUG("RADEON_CMD_PACKET\n"); 2863 2906 if (radeon_emit_packets 2864 - (dev_priv, file_priv, header, &cmdbuf)) { 2907 + (dev_priv, file_priv, header, cmdbuf)) { 2865 2908 DRM_ERROR("radeon_emit_packets failed\n"); 2866 2909 goto err; 2867 2910 } ··· 2869 2912 2870 2913 case RADEON_CMD_SCALARS: 2871 2914 DRM_DEBUG("RADEON_CMD_SCALARS\n"); 2872 - if (radeon_emit_scalars(dev_priv, header, &cmdbuf)) { 2915 + if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { 2873 2916 DRM_ERROR("radeon_emit_scalars failed\n"); 2874 2917 goto err; 2875 2918 } ··· 2877 2920 2878 2921 case RADEON_CMD_VECTORS: 2879 2922 DRM_DEBUG("RADEON_CMD_VECTORS\n"); 2880 - if (radeon_emit_vectors(dev_priv, header, &cmdbuf)) { 2923 + if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { 2881 2924 DRM_ERROR("radeon_emit_vectors failed\n"); 2882 2925 goto err; 2883 2926 } ··· 2905 2948 2906 2949 case RADEON_CMD_PACKET3: 2907 2950 DRM_DEBUG("RADEON_CMD_PACKET3\n"); 2908 - if (radeon_emit_packet3(dev, file_priv, &cmdbuf)) { 2951 + if (radeon_emit_packet3(dev, file_priv, cmdbuf)) { 2909 2952 DRM_ERROR("radeon_emit_packet3 failed\n"); 2910 2953 goto err; 2911 2954 } ··· 2914 2957 case RADEON_CMD_PACKET3_CLIP: 2915 2958 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); 2916 2959 if (radeon_emit_packet3_cliprect 2917 - (dev, file_priv, &cmdbuf, orig_nbox)) { 2960 + (dev, file_priv, cmdbuf, orig_nbox)) { 2918 2961 DRM_ERROR("radeon_emit_packet3_clip failed\n"); 2919 2962 goto err; 2920 2963 } ··· 2922 2965 2923 2966 case RADEON_CMD_SCALARS2: 2924 2967 DRM_DEBUG("RADEON_CMD_SCALARS2\n"); 2925 - if (radeon_emit_scalars2(dev_priv, header, &cmdbuf)) { 2968 + if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { 2926 2969 DRM_ERROR("radeon_emit_scalars2 failed\n"); 2927 2970 goto err; 2928 2971 } ··· 2937 2980 break; 2938 2981 case RADEON_CMD_VECLINEAR: 2939 2982 DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); 2940 - if (radeon_emit_veclinear(dev_priv, header, &cmdbuf)) { 2983 + if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { 2941 2984 DRM_ERROR("radeon_emit_veclinear failed\n"); 2942 2985 goto err; 2943 2986 } ··· 2946 2989 default: 2947 2990 DRM_ERROR("bad cmd_type %d at %p\n", 2948 2991 header.header.cmd_type, 2949 - cmdbuf.buf - sizeof(header)); 2992 + cmdbuf->buf - sizeof(header)); 2950 2993 goto err; 2951 2994 } 2952 2995 } ··· 2964 3007 return -EINVAL; 2965 3008 } 2966 3009 2967 - static int radeon_cp_getparam(DRM_IOCTL_ARGS) 3010 + static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) 2968 3011 { 2969 - DRM_DEVICE; 2970 3012 drm_radeon_private_t *dev_priv = dev->dev_private; 2971 - drm_radeon_getparam_t param; 3013 + drm_radeon_getparam_t *param = data; 2972 3014 int value; 2973 - 2974 - DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data, 2975 - sizeof(param)); 2976 3015 2977 3016 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); 2978 3017 2979 - switch (param.param) { 3018 + switch (param->param) { 2980 3019 case RADEON_PARAM_GART_BUFFER_OFFSET: 2981 3020 value = dev_priv->gart_buffers_offset; 2982 3021 break; ··· 3034 3081 value = radeon_vblank_crtc_get(dev); 3035 3082 break; 3036 3083 default: 3037 - DRM_DEBUG("Invalid parameter %d\n", param.param); 3084 + DRM_DEBUG("Invalid parameter %d\n", param->param); 3038 3085 return -EINVAL; 3039 3086 } 3040 3087 3041 - if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 3088 + if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { 3042 3089 DRM_ERROR("copy_to_user\n"); 3043 3090 return -EFAULT; 3044 3091 } ··· 3046 3093 return 0; 3047 3094 } 3048 3095 3049 - static int radeon_cp_setparam(DRM_IOCTL_ARGS) 3096 + static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) 3050 3097 { 3051 - DRM_DEVICE; 3052 3098 drm_radeon_private_t *dev_priv = dev->dev_private; 3053 - drm_radeon_setparam_t sp; 3099 + drm_radeon_setparam_t *sp = data; 3054 3100 struct drm_radeon_driver_file_fields *radeon_priv; 3055 3101 3056 - DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data, 3057 - sizeof(sp)); 3058 - 3059 - switch (sp.param) { 3102 + switch (sp->param) { 3060 3103 case RADEON_SETPARAM_FB_LOCATION: 3061 3104 radeon_priv = file_priv->driver_priv; 3062 - radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value; 3105 + radeon_priv->radeon_fb_delta = dev_priv->fb_location - 3106 + sp->value; 3063 3107 break; 3064 3108 case RADEON_SETPARAM_SWITCH_TILING: 3065 - if (sp.value == 0) { 3109 + if (sp->value == 0) { 3066 3110 DRM_DEBUG("color tiling disabled\n"); 3067 3111 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; 3068 3112 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; 3069 3113 dev_priv->sarea_priv->tiling_enabled = 0; 3070 - } else if (sp.value == 1) { 3114 + } else if (sp->value == 1) { 3071 3115 DRM_DEBUG("color tiling enabled\n"); 3072 3116 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; 3073 3117 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; ··· 3072 3122 } 3073 3123 break; 3074 3124 case RADEON_SETPARAM_PCIGART_LOCATION: 3075 - dev_priv->pcigart_offset = sp.value; 3125 + dev_priv->pcigart_offset = sp->value; 3076 3126 dev_priv->pcigart_offset_set = 1; 3077 3127 break; 3078 3128 case RADEON_SETPARAM_NEW_MEMMAP: 3079 - dev_priv->new_memmap = sp.value; 3129 + dev_priv->new_memmap = sp->value; 3080 3130 break; 3081 3131 case RADEON_SETPARAM_PCIGART_TABLE_SIZE: 3082 - dev_priv->gart_info.table_size = sp.value; 3132 + dev_priv->gart_info.table_size = sp->value; 3083 3133 if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) 3084 3134 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; 3085 3135 break; 3086 3136 case RADEON_SETPARAM_VBLANK_CRTC: 3087 - return radeon_vblank_crtc_set(dev, sp.value); 3137 + return radeon_vblank_crtc_set(dev, sp->value); 3088 3138 break; 3089 3139 default: 3090 - DRM_DEBUG("Invalid parameter %d\n", sp.param); 3140 + DRM_DEBUG("Invalid parameter %d\n", sp->param); 3091 3141 return -EINVAL; 3092 3142 } 3093 3143 ··· 3155 3205 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); 3156 3206 } 3157 3207 3158 - drm_ioctl_desc_t radeon_ioctls[] = { 3159 - [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3160 - [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3161 - [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3162 - [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3163 - [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH}, 3164 - [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH}, 3165 - [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH}, 3166 - [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH}, 3167 - [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH}, 3168 - [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH}, 3169 - [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH}, 3170 - [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH}, 3171 - [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH}, 3172 - [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH}, 3173 - [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3174 - [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH}, 3175 - [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH}, 3176 - [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH}, 3177 - [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH}, 3178 - [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH}, 3179 - [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH}, 3180 - [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 3181 - [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH}, 3182 - [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH}, 3183 - [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH}, 3184 - [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH}, 3185 - [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH} 3208 + struct drm_ioctl_desc radeon_ioctls[] = { 3209 + DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3210 + DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3211 + DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3212 + DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3213 + DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), 3214 + DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), 3215 + DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), 3216 + DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), 3217 + DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), 3218 + DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), 3219 + DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), 3220 + DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), 3221 + DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), 3222 + DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), 3223 + DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3224 + DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), 3225 + DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), 3226 + DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), 3227 + DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), 3228 + DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), 3229 + DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), 3230 + DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3231 + DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), 3232 + DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), 3233 + DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), 3234 + DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), 3235 + DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH) 3186 3236 }; 3187 3237 3188 3238 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
+26 -41
drivers/char/drm/savage_bci.c
··· 928 928 return 0; 929 929 } 930 930 931 - static int savage_bci_init(DRM_IOCTL_ARGS) 931 + static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 932 932 { 933 - DRM_DEVICE; 934 - drm_savage_init_t init; 933 + drm_savage_init_t *init = data; 935 934 936 935 LOCK_TEST_WITH_RETURN(dev, file_priv); 937 936 938 - DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *) data, 939 - sizeof(init)); 940 - 941 - switch (init.func) { 937 + switch (init->func) { 942 938 case SAVAGE_INIT_BCI: 943 - return savage_do_init_bci(dev, &init); 939 + return savage_do_init_bci(dev, init); 944 940 case SAVAGE_CLEANUP_BCI: 945 941 return savage_do_cleanup_bci(dev); 946 942 } ··· 944 948 return -EINVAL; 945 949 } 946 950 947 - static int savage_bci_event_emit(DRM_IOCTL_ARGS) 951 + static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) 948 952 { 949 - DRM_DEVICE; 950 953 drm_savage_private_t *dev_priv = dev->dev_private; 951 - drm_savage_event_emit_t event; 954 + drm_savage_event_emit_t *event = data; 952 955 953 956 DRM_DEBUG("\n"); 954 957 955 958 LOCK_TEST_WITH_RETURN(dev, file_priv); 956 959 957 - DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *) data, 958 - sizeof(event)); 960 + event->count = savage_bci_emit_event(dev_priv, event->flags); 961 + event->count |= dev_priv->event_wrap << 16; 959 962 960 - event.count = savage_bci_emit_event(dev_priv, event.flags); 961 - event.count |= dev_priv->event_wrap << 16; 962 - DRM_COPY_TO_USER_IOCTL((drm_savage_event_emit_t __user *) data, 963 - event, sizeof(event)); 964 963 return 0; 965 964 } 966 965 967 - static int savage_bci_event_wait(DRM_IOCTL_ARGS) 966 + static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) 968 967 { 969 - DRM_DEVICE; 970 968 drm_savage_private_t *dev_priv = dev->dev_private; 971 - drm_savage_event_wait_t event; 969 + drm_savage_event_wait_t *event = data; 972 970 unsigned int event_e, hw_e; 973 971 unsigned int event_w, hw_w; 974 972 ··· 980 990 if (hw_e > dev_priv->event_counter) 981 991 hw_w--; /* hardware hasn't passed the last wrap yet */ 982 992 983 - event_e = event.count & 0xffff; 984 - event_w = event.count >> 16; 993 + event_e = event->count & 0xffff; 994 + event_w = event->count >> 16; 985 995 986 996 /* Don't need to wait if 987 997 * - event counter wrapped since the event was emitted or ··· 1023 1033 return 0; 1024 1034 } 1025 1035 1026 - int savage_bci_buffers(DRM_IOCTL_ARGS) 1036 + int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) 1027 1037 { 1028 - DRM_DEVICE; 1029 1038 struct drm_device_dma *dma = dev->dma; 1030 - struct drm_dma d; 1039 + struct drm_dma *d = data; 1031 1040 int ret = 0; 1032 1041 1033 1042 LOCK_TEST_WITH_RETURN(dev, file_priv); 1034 1043 1035 - DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *) data, sizeof(d)); 1036 - 1037 1044 /* Please don't send us buffers. 1038 1045 */ 1039 - if (d.send_count != 0) { 1046 + if (d->send_count != 0) { 1040 1047 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1041 - DRM_CURRENTPID, d.send_count); 1048 + DRM_CURRENTPID, d->send_count); 1042 1049 return -EINVAL; 1043 1050 } 1044 1051 1045 1052 /* We'll send you buffers. 1046 1053 */ 1047 - if (d.request_count < 0 || d.request_count > dma->buf_count) { 1054 + if (d->request_count < 0 || d->request_count > dma->buf_count) { 1048 1055 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1049 - DRM_CURRENTPID, d.request_count, dma->buf_count); 1056 + DRM_CURRENTPID, d->request_count, dma->buf_count); 1050 1057 return -EINVAL; 1051 1058 } 1052 1059 1053 - d.granted_count = 0; 1060 + d->granted_count = 0; 1054 1061 1055 - if (d.request_count) { 1056 - ret = savage_bci_get_buffers(dev, file_priv, &d); 1062 + if (d->request_count) { 1063 + ret = savage_bci_get_buffers(dev, file_priv, d); 1057 1064 } 1058 - 1059 - DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *) data, d, sizeof(d)); 1060 1065 1061 1066 return ret; 1062 1067 } ··· 1088 1103 drm_core_reclaim_buffers(dev, file_priv); 1089 1104 } 1090 1105 1091 - drm_ioctl_desc_t savage_ioctls[] = { 1092 - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 1093 - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH}, 1094 - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH}, 1095 - [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH}, 1106 + struct drm_ioctl_desc savage_ioctls[] = { 1107 + DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1108 + DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), 1109 + DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), 1110 + DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), 1096 1111 }; 1097 1112 1098 1113 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
+3 -3
drivers/char/drm/savage_drv.h
··· 104 104 S3_LAST 105 105 }; 106 106 107 - extern drm_ioctl_desc_t savage_ioctls[]; 107 + extern struct drm_ioctl_desc savage_ioctls[]; 108 108 extern int savage_max_ioctl; 109 109 110 110 #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) ··· 197 197 } drm_savage_private_t; 198 198 199 199 /* ioctls */ 200 - extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS); 201 - extern int savage_bci_buffers(DRM_IOCTL_ARGS); 200 + extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv); 201 + extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); 202 202 203 203 /* BCI functions */ 204 204 extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
+50 -52
drivers/char/drm/savage_state.c
··· 953 953 return 0; 954 954 } 955 955 956 - int savage_bci_cmdbuf(DRM_IOCTL_ARGS) 956 + int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) 957 957 { 958 - DRM_DEVICE; 959 958 drm_savage_private_t *dev_priv = dev->dev_private; 960 959 struct drm_device_dma *dma = dev->dma; 961 960 struct drm_buf *dmabuf; 962 - drm_savage_cmdbuf_t cmdbuf; 961 + drm_savage_cmdbuf_t *cmdbuf = data; 963 962 drm_savage_cmd_header_t *kcmd_addr = NULL; 964 963 drm_savage_cmd_header_t *first_draw_cmd; 965 964 unsigned int *kvb_addr = NULL; ··· 970 971 971 972 LOCK_TEST_WITH_RETURN(dev, file_priv); 972 973 973 - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *) data, 974 - sizeof(cmdbuf)); 975 - 976 974 if (dma && dma->buflist) { 977 - if (cmdbuf.dma_idx > dma->buf_count) { 975 + if (cmdbuf->dma_idx > dma->buf_count) { 978 976 DRM_ERROR 979 977 ("vertex buffer index %u out of range (0-%u)\n", 980 - cmdbuf.dma_idx, dma->buf_count - 1); 978 + cmdbuf->dma_idx, dma->buf_count - 1); 981 979 return -EINVAL; 982 980 } 983 - dmabuf = dma->buflist[cmdbuf.dma_idx]; 981 + dmabuf = dma->buflist[cmdbuf->dma_idx]; 984 982 } else { 985 983 dmabuf = NULL; 986 984 } ··· 987 991 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct 988 992 * for locking on FreeBSD. 989 993 */ 990 - if (cmdbuf.size) { 991 - kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); 994 + if (cmdbuf->size) { 995 + kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); 992 996 if (kcmd_addr == NULL) 993 997 return -ENOMEM; 994 998 995 - if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, 996 - cmdbuf.size * 8)) 999 + if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, 1000 + cmdbuf->size * 8)) 997 1001 { 998 - drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1002 + drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); 999 1003 return -EFAULT; 1000 1004 } 1001 - cmdbuf.cmd_addr = kcmd_addr; 1005 + cmdbuf->cmd_addr = kcmd_addr; 1002 1006 } 1003 - if (cmdbuf.vb_size) { 1004 - kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); 1007 + if (cmdbuf->vb_size) { 1008 + kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); 1005 1009 if (kvb_addr == NULL) { 1006 1010 ret = -ENOMEM; 1007 1011 goto done; 1008 1012 } 1009 1013 1010 - if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, 1011 - cmdbuf.vb_size)) { 1014 + if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr, 1015 + cmdbuf->vb_size)) { 1012 1016 ret = -EFAULT; 1013 1017 goto done; 1014 1018 } 1015 - cmdbuf.vb_addr = kvb_addr; 1019 + cmdbuf->vb_addr = kvb_addr; 1016 1020 } 1017 - if (cmdbuf.nbox) { 1018 - kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), 1021 + if (cmdbuf->nbox) { 1022 + kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), 1019 1023 DRM_MEM_DRIVER); 1020 1024 if (kbox_addr == NULL) { 1021 1025 ret = -ENOMEM; 1022 1026 goto done; 1023 1027 } 1024 1028 1025 - if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, 1026 - cmdbuf.nbox * sizeof(struct drm_clip_rect))) { 1029 + if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr, 1030 + cmdbuf->nbox * sizeof(struct drm_clip_rect))) { 1027 1031 ret = -EFAULT; 1028 1032 goto done; 1029 1033 } 1030 - cmdbuf.box_addr = kbox_addr; 1034 + cmdbuf->box_addr = kbox_addr; 1031 1035 } 1032 1036 1033 1037 /* Make sure writes to DMA buffers are finished before sending ··· 1040 1044 1041 1045 i = 0; 1042 1046 first_draw_cmd = NULL; 1043 - while (i < cmdbuf.size) { 1047 + while (i < cmdbuf->size) { 1044 1048 drm_savage_cmd_header_t cmd_header; 1045 - cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr; 1046 - cmdbuf.cmd_addr++; 1049 + cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr; 1050 + cmdbuf->cmd_addr++; 1047 1051 i++; 1048 1052 1049 1053 /* Group drawing commands with same state to minimize ··· 1053 1057 case SAVAGE_CMD_DMA_IDX: 1054 1058 case SAVAGE_CMD_VB_IDX: 1055 1059 j = (cmd_header.idx.count + 3) / 4; 1056 - if (i + j > cmdbuf.size) { 1060 + if (i + j > cmdbuf->size) { 1057 1061 DRM_ERROR("indexed drawing command extends " 1058 1062 "beyond end of command buffer\n"); 1059 1063 DMA_FLUSH(); ··· 1063 1067 case SAVAGE_CMD_DMA_PRIM: 1064 1068 case SAVAGE_CMD_VB_PRIM: 1065 1069 if (!first_draw_cmd) 1066 - first_draw_cmd = cmdbuf.cmd_addr - 1; 1067 - cmdbuf.cmd_addr += j; 1070 + first_draw_cmd = cmdbuf->cmd_addr - 1; 1071 + cmdbuf->cmd_addr += j; 1068 1072 i += j; 1069 1073 break; 1070 1074 default: 1071 1075 if (first_draw_cmd) { 1072 1076 ret = savage_dispatch_draw( 1073 1077 dev_priv, first_draw_cmd, 1074 - cmdbuf.cmd_addr - 1, 1075 - dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size, 1076 - cmdbuf.vb_stride, 1077 - cmdbuf.nbox, cmdbuf.box_addr); 1078 + cmdbuf->cmd_addr - 1, 1079 + dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size, 1080 + cmdbuf->vb_stride, 1081 + cmdbuf->nbox, cmdbuf->box_addr); 1078 1082 if (ret != 0) 1079 1083 return ret; 1080 1084 first_draw_cmd = NULL; ··· 1086 1090 switch (cmd_header.cmd.cmd) { 1087 1091 case SAVAGE_CMD_STATE: 1088 1092 j = (cmd_header.state.count + 1) / 2; 1089 - if (i + j > cmdbuf.size) { 1093 + if (i + j > cmdbuf->size) { 1090 1094 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1091 1095 "beyond end of command buffer\n"); 1092 1096 DMA_FLUSH(); ··· 1094 1098 goto done; 1095 1099 } 1096 1100 ret = savage_dispatch_state(dev_priv, &cmd_header, 1097 - (const uint32_t *)cmdbuf.cmd_addr); 1098 - cmdbuf.cmd_addr += j; 1101 + (const uint32_t *)cmdbuf->cmd_addr); 1102 + cmdbuf->cmd_addr += j; 1099 1103 i += j; 1100 1104 break; 1101 1105 case SAVAGE_CMD_CLEAR: 1102 - if (i + 1 > cmdbuf.size) { 1106 + if (i + 1 > cmdbuf->size) { 1103 1107 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1104 1108 "beyond end of command buffer\n"); 1105 1109 DMA_FLUSH(); ··· 1107 1111 goto done; 1108 1112 } 1109 1113 ret = savage_dispatch_clear(dev_priv, &cmd_header, 1110 - cmdbuf.cmd_addr, 1111 - cmdbuf.nbox, cmdbuf.box_addr); 1112 - cmdbuf.cmd_addr++; 1114 + cmdbuf->cmd_addr, 1115 + cmdbuf->nbox, 1116 + cmdbuf->box_addr); 1117 + cmdbuf->cmd_addr++; 1113 1118 i++; 1114 1119 break; 1115 1120 case SAVAGE_CMD_SWAP: 1116 - ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox, 1117 - cmdbuf.box_addr); 1121 + ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox, 1122 + cmdbuf->box_addr); 1118 1123 break; 1119 1124 default: 1120 - DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1125 + DRM_ERROR("invalid command 0x%x\n", 1126 + cmd_header.cmd.cmd); 1121 1127 DMA_FLUSH(); 1122 1128 ret = -EINVAL; 1123 1129 goto done; ··· 1133 1135 1134 1136 if (first_draw_cmd) { 1135 1137 ret = savage_dispatch_draw ( 1136 - dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf, 1137 - cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride, 1138 - cmdbuf.nbox, cmdbuf.box_addr); 1138 + dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, 1139 + cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, 1140 + cmdbuf->nbox, cmdbuf->box_addr); 1139 1141 if (ret != 0) { 1140 1142 DMA_FLUSH(); 1141 1143 goto done; ··· 1144 1146 1145 1147 DMA_FLUSH(); 1146 1148 1147 - if (dmabuf && cmdbuf.discard) { 1149 + if (dmabuf && cmdbuf->discard) { 1148 1150 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; 1149 1151 uint16_t event; 1150 1152 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); ··· 1154 1156 1155 1157 done: 1156 1158 /* If we didn't need to allocate them, these'll be NULL */ 1157 - drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1158 - drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER); 1159 - drm_free(kbox_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect), 1159 + drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); 1160 + drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); 1161 + drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), 1160 1162 DRM_MEM_DRIVER); 1161 1163 1162 1164 return ret;
+1 -1
drivers/char/drm/sis_drv.h
··· 67 67 struct drm_file *file_priv); 68 68 extern void sis_lastclose(struct drm_device *dev); 69 69 70 - extern drm_ioctl_desc_t sis_ioctls[]; 70 + extern struct drm_ioctl_desc sis_ioctls[]; 71 71 extern int sis_max_ioctl; 72 72 73 73 #endif
+38 -54
drivers/char/drm/sis_mm.c
··· 82 82 83 83 #endif /* CONFIG_FB_SIS */ 84 84 85 - static int sis_fb_init(DRM_IOCTL_ARGS) 85 + static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 86 86 { 87 - DRM_DEVICE; 88 87 drm_sis_private_t *dev_priv = dev->dev_private; 89 - drm_sis_fb_t fb; 88 + drm_sis_fb_t *fb = data; 90 89 int ret; 91 - 92 - DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb)); 93 90 94 91 mutex_lock(&dev->struct_mutex); 95 92 #if defined(CONFIG_FB_SIS) ··· 102 105 } 103 106 #else 104 107 ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0, 105 - fb.size >> SIS_MM_ALIGN_SHIFT); 108 + fb->size >> SIS_MM_ALIGN_SHIFT); 106 109 #endif 107 110 108 111 if (ret) { ··· 112 115 } 113 116 114 117 dev_priv->vram_initialized = 1; 115 - dev_priv->vram_offset = fb.offset; 118 + dev_priv->vram_offset = fb->offset; 116 119 117 120 mutex_unlock(&dev->struct_mutex); 118 - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); 121 + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); 119 122 120 123 return 0; 121 124 } 122 125 123 126 static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv, 124 - unsigned long data, int pool) 127 + void *data, int pool) 125 128 { 126 129 drm_sis_private_t *dev_priv = dev->dev_private; 127 - drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; 128 - drm_sis_mem_t mem; 130 + drm_sis_mem_t *mem = data; 129 131 int retval = 0; 130 132 struct drm_memblock_item *item; 131 - 132 - DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem)); 133 133 134 134 mutex_lock(&dev->struct_mutex); 135 135 ··· 137 143 return -EINVAL; 138 144 } 139 145 140 - mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; 141 - item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0, 146 + mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; 147 + item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0, 142 148 (unsigned long)file_priv); 143 149 144 150 mutex_unlock(&dev->struct_mutex); 145 151 if (item) { 146 - mem.offset = ((pool == 0) ? 152 + mem->offset = ((pool == 0) ? 147 153 dev_priv->vram_offset : dev_priv->agp_offset) + 148 154 (item->mm-> 149 155 offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT); 150 - mem.free = item->user_hash.key; 151 - mem.size = mem.size << SIS_MM_ALIGN_SHIFT; 156 + mem->free = item->user_hash.key; 157 + mem->size = mem->size << SIS_MM_ALIGN_SHIFT; 152 158 } else { 153 - mem.offset = 0; 154 - mem.size = 0; 155 - mem.free = 0; 159 + mem->offset = 0; 160 + mem->size = 0; 161 + mem->free = 0; 156 162 retval = -ENOMEM; 157 163 } 158 164 159 - DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); 160 - 161 - DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem.size, 162 - mem.offset); 165 + DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, 166 + mem->offset); 163 167 164 168 return retval; 165 169 } 166 170 167 - static int sis_drm_free(DRM_IOCTL_ARGS) 171 + static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv) 168 172 { 169 - DRM_DEVICE; 170 173 drm_sis_private_t *dev_priv = dev->dev_private; 171 - drm_sis_mem_t mem; 174 + drm_sis_mem_t *mem = data; 172 175 int ret; 173 176 174 - DRM_COPY_FROM_USER_IOCTL(mem, (drm_sis_mem_t __user *) data, 175 - sizeof(mem)); 176 - 177 177 mutex_lock(&dev->struct_mutex); 178 - ret = drm_sman_free_key(&dev_priv->sman, mem.free); 178 + ret = drm_sman_free_key(&dev_priv->sman, mem->free); 179 179 mutex_unlock(&dev->struct_mutex); 180 - DRM_DEBUG("free = 0x%lx\n", mem.free); 180 + DRM_DEBUG("free = 0x%lx\n", mem->free); 181 181 182 182 return ret; 183 183 } 184 184 185 - static int sis_fb_alloc(DRM_IOCTL_ARGS) 185 + static int sis_fb_alloc(struct drm_device *dev, void *data, 186 + struct drm_file *file_priv) 186 187 { 187 - DRM_DEVICE; 188 188 return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE); 189 189 } 190 190 191 - static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) 191 + static int sis_ioctl_agp_init(struct drm_device *dev, void *data, 192 + struct drm_file *file_priv) 192 193 { 193 - DRM_DEVICE; 194 194 drm_sis_private_t *dev_priv = dev->dev_private; 195 - drm_sis_agp_t agp; 195 + drm_sis_agp_t *agp = data; 196 196 int ret; 197 197 dev_priv = dev->dev_private; 198 198 199 - DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data, 200 - sizeof(agp)); 201 199 mutex_lock(&dev->struct_mutex); 202 200 ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0, 203 - agp.size >> SIS_MM_ALIGN_SHIFT); 201 + agp->size >> SIS_MM_ALIGN_SHIFT); 204 202 205 203 if (ret) { 206 204 DRM_ERROR("AGP memory manager initialisation error\n"); ··· 201 215 } 202 216 203 217 dev_priv->agp_initialized = 1; 204 - dev_priv->agp_offset = agp.offset; 218 + dev_priv->agp_offset = agp->offset; 205 219 mutex_unlock(&dev->struct_mutex); 206 220 207 - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); 221 + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); 208 222 return 0; 209 223 } 210 224 211 - static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) 225 + static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, 226 + struct drm_file *file_priv) 212 227 { 213 - DRM_DEVICE; 214 228 215 229 return sis_drm_alloc(dev, file_priv, data, AGP_TYPE); 216 230 } ··· 320 334 return; 321 335 } 322 336 323 - drm_ioctl_desc_t sis_ioctls[] = { 324 - [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, 325 - [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, 326 - [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = 327 - {sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}, 328 - [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, 329 - [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_drm_free, DRM_AUTH}, 330 - [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = 331 - {sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY} 337 + struct drm_ioctl_desc sis_ioctls[] = { 338 + DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), 339 + DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), 340 + DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 341 + DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), 342 + DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), 343 + DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 332 344 }; 333 345 334 346 int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
+39 -57
drivers/char/drm/via_dma.c
··· 227 227 return 0; 228 228 } 229 229 230 - static int via_dma_init(DRM_IOCTL_ARGS) 230 + static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 231 231 { 232 - DRM_DEVICE; 233 232 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 234 - drm_via_dma_init_t init; 233 + drm_via_dma_init_t *init = data; 235 234 int retcode = 0; 236 235 237 - DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t __user *) data, 238 - sizeof(init)); 239 - 240 - switch (init.func) { 236 + switch (init->func) { 241 237 case VIA_INIT_DMA: 242 238 if (!DRM_SUSER(DRM_CURPROC)) 243 239 retcode = -EPERM; 244 240 else 245 - retcode = via_initialize(dev, dev_priv, &init); 241 + retcode = via_initialize(dev, dev_priv, init); 246 242 break; 247 243 case VIA_CLEANUP_DMA: 248 244 if (!DRM_SUSER(DRM_CURPROC)) ··· 322 326 return 0; 323 327 } 324 328 325 - static int via_flush_ioctl(DRM_IOCTL_ARGS) 329 + static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 326 330 { 327 - DRM_DEVICE; 328 331 329 332 LOCK_TEST_WITH_RETURN(dev, file_priv); 330 333 331 334 return via_driver_dma_quiescent(dev); 332 335 } 333 336 334 - static int via_cmdbuffer(DRM_IOCTL_ARGS) 337 + static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) 335 338 { 336 - DRM_DEVICE; 337 - drm_via_cmdbuffer_t cmdbuf; 339 + drm_via_cmdbuffer_t *cmdbuf = data; 338 340 int ret; 339 341 340 342 LOCK_TEST_WITH_RETURN(dev, file_priv); 341 343 342 - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, 343 - sizeof(cmdbuf)); 344 + DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf, 345 + cmdbuf->size); 344 346 345 - DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size); 346 - 347 - ret = via_dispatch_cmdbuffer(dev, &cmdbuf); 347 + ret = via_dispatch_cmdbuffer(dev, cmdbuf); 348 348 if (ret) { 349 349 return ret; 350 350 } ··· 372 380 return ret; 373 381 } 374 382 375 - static int via_pci_cmdbuffer(DRM_IOCTL_ARGS) 383 + static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) 376 384 { 377 - DRM_DEVICE; 378 - drm_via_cmdbuffer_t cmdbuf; 385 + drm_via_cmdbuffer_t *cmdbuf = data; 379 386 int ret; 380 387 381 388 LOCK_TEST_WITH_RETURN(dev, file_priv); 382 389 383 - DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data, 384 - sizeof(cmdbuf)); 390 + DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf, 391 + cmdbuf->size); 385 392 386 - DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf, 387 - cmdbuf.size); 388 - 389 - ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf); 393 + ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf); 390 394 if (ret) { 391 395 return ret; 392 396 } ··· 641 653 * User interface to the space and lag functions. 642 654 */ 643 655 644 - static int via_cmdbuf_size(DRM_IOCTL_ARGS) 656 + static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv) 645 657 { 646 - DRM_DEVICE; 647 - drm_via_cmdbuf_size_t d_siz; 658 + drm_via_cmdbuf_size_t *d_siz = data; 648 659 int ret = 0; 649 660 uint32_t tmp_size, count; 650 661 drm_via_private_t *dev_priv; ··· 659 672 return -EFAULT; 660 673 } 661 674 662 - DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data, 663 - sizeof(d_siz)); 664 - 665 675 count = 1000000; 666 - tmp_size = d_siz.size; 667 - switch (d_siz.func) { 676 + tmp_size = d_siz->size; 677 + switch (d_siz->func) { 668 678 case VIA_CMDBUF_SPACE: 669 - while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size) 679 + while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) 670 680 && count--) { 671 - if (!d_siz.wait) { 681 + if (!d_siz->wait) { 672 682 break; 673 683 } 674 684 } ··· 675 691 } 676 692 break; 677 693 case VIA_CMDBUF_LAG: 678 - while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size) 694 + while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size) 679 695 && count--) { 680 - if (!d_siz.wait) { 696 + if (!d_siz->wait) { 681 697 break; 682 698 } 683 699 } ··· 689 705 default: 690 706 ret = -EFAULT; 691 707 } 692 - d_siz.size = tmp_size; 708 + d_siz->size = tmp_size; 693 709 694 - DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t __user *) data, d_siz, 695 - sizeof(d_siz)); 696 710 return ret; 697 711 } 698 712 699 - drm_ioctl_desc_t via_ioctls[] = { 700 - [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH}, 701 - [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH}, 702 - [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER}, 703 - [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER}, 704 - [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER}, 705 - [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH}, 706 - [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH}, 707 - [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH}, 708 - [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH}, 709 - [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH}, 710 - [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH}, 711 - [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH}, 712 - [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH}, 713 - [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH} 713 + struct drm_ioctl_desc via_ioctls[] = { 714 + DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), 715 + DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH), 716 + DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), 717 + DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), 718 + DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), 719 + DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), 720 + DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH), 721 + DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), 722 + DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH), 723 + DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), 724 + DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), 725 + DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), 726 + DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), 727 + DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) 714 728 }; 715 729 716 730 int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
+7 -15
drivers/char/drm/via_dmablit.c
··· 781 781 */ 782 782 783 783 int 784 - via_dma_blit_sync( DRM_IOCTL_ARGS ) 784 + via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) 785 785 { 786 - drm_via_blitsync_t sync; 786 + drm_via_blitsync_t *sync = data; 787 787 int err; 788 - DRM_DEVICE; 789 788 790 - DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); 791 - 792 - if (sync.engine >= VIA_NUM_BLIT_ENGINES) 789 + if (sync->engine >= VIA_NUM_BLIT_ENGINES) 793 790 return -EINVAL; 794 791 795 - err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); 792 + err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); 796 793 797 794 if (-EINTR == err) 798 795 err = -EAGAIN; ··· 805 808 */ 806 809 807 810 int 808 - via_dma_blit( DRM_IOCTL_ARGS ) 811 + via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) 809 812 { 810 - drm_via_dmablit_t xfer; 813 + drm_via_dmablit_t *xfer = data; 811 814 int err; 812 - DRM_DEVICE; 813 815 814 - DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer)); 815 - 816 - err = via_dmablit(dev, &xfer); 817 - 818 - DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); 816 + err = via_dmablit(dev, xfer); 819 817 820 818 return err; 821 819 }
+10 -10
drivers/char/drm/via_drv.h
··· 110 110 #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 111 111 #define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 112 112 113 - extern drm_ioctl_desc_t via_ioctls[]; 113 + extern struct drm_ioctl_desc via_ioctls[]; 114 114 extern int via_max_ioctl; 115 115 116 - extern int via_fb_init(DRM_IOCTL_ARGS); 117 - extern int via_mem_alloc(DRM_IOCTL_ARGS); 118 - extern int via_mem_free(DRM_IOCTL_ARGS); 119 - extern int via_agp_init(DRM_IOCTL_ARGS); 120 - extern int via_map_init(DRM_IOCTL_ARGS); 121 - extern int via_decoder_futex(DRM_IOCTL_ARGS); 122 - extern int via_wait_irq(DRM_IOCTL_ARGS); 123 - extern int via_dma_blit_sync( DRM_IOCTL_ARGS ); 124 - extern int via_dma_blit( DRM_IOCTL_ARGS ); 116 + extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 117 + extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); 118 + extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); 119 + extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 120 + extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 121 + extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv); 122 + extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv); 123 + extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ); 124 + extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ); 125 125 126 126 extern int via_driver_load(struct drm_device *dev, unsigned long chipset); 127 127 extern int via_driver_unload(struct drm_device *dev);
+14 -19
drivers/char/drm/via_irq.c
··· 331 331 } 332 332 } 333 333 334 - int via_wait_irq(DRM_IOCTL_ARGS) 334 + int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) 335 335 { 336 - DRM_DEVICE; 337 - drm_via_irqwait_t __user *argp = (void __user *)data; 338 - drm_via_irqwait_t irqwait; 336 + drm_via_irqwait_t *irqwait = data; 339 337 struct timeval now; 340 338 int ret = 0; 341 339 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; ··· 343 345 if (!dev->irq) 344 346 return -EINVAL; 345 347 346 - DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); 347 - if (irqwait.request.irq >= dev_priv->num_irqs) { 348 + if (irqwait->request.irq >= dev_priv->num_irqs) { 348 349 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 349 - irqwait.request.irq); 350 + irqwait->request.irq); 350 351 return -EINVAL; 351 352 } 352 353 353 - cur_irq += irqwait.request.irq; 354 + cur_irq += irqwait->request.irq; 354 355 355 - switch (irqwait.request.type & ~VIA_IRQ_FLAGS_MASK) { 356 + switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { 356 357 case VIA_IRQ_RELATIVE: 357 - irqwait.request.sequence += atomic_read(&cur_irq->irq_received); 358 - irqwait.request.type &= ~_DRM_VBLANK_RELATIVE; 358 + irqwait->request.sequence += atomic_read(&cur_irq->irq_received); 359 + irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; 359 360 case VIA_IRQ_ABSOLUTE: 360 361 break; 361 362 default: 362 363 return -EINVAL; 363 364 } 364 365 365 - if (irqwait.request.type & VIA_IRQ_SIGNAL) { 366 + if (irqwait->request.type & VIA_IRQ_SIGNAL) { 366 367 DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", 367 368 __FUNCTION__); 368 369 return -EINVAL; 369 370 } 370 371 371 - force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE); 372 + force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE); 372 373 373 - ret = via_driver_irq_wait(dev, irqwait.request.irq, force_sequence, 374 - &irqwait.request.sequence); 374 + ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence, 375 + &irqwait->request.sequence); 375 376 do_gettimeofday(&now); 376 - irqwait.reply.tval_sec = now.tv_sec; 377 - irqwait.reply.tval_usec = now.tv_usec; 378 - 379 - DRM_COPY_TO_USER_IOCTL(argp, irqwait, sizeof(irqwait)); 377 + irqwait->reply.tval_sec = now.tv_sec; 378 + irqwait->reply.tval_usec = now.tv_usec; 380 379 381 380 return ret; 382 381 }
+4 -8
drivers/char/drm/via_map.c
··· 75 75 return 0; 76 76 } 77 77 78 - int via_map_init(DRM_IOCTL_ARGS) 78 + int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 79 79 { 80 - DRM_DEVICE; 81 - drm_via_init_t init; 80 + drm_via_init_t *init = data; 82 81 83 82 DRM_DEBUG("%s\n", __FUNCTION__); 84 83 85 - DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t __user *) data, 86 - sizeof(init)); 87 - 88 - switch (init.func) { 84 + switch (init->func) { 89 85 case VIA_INIT_MAP: 90 - return via_do_init_map(dev, &init); 86 + return via_do_init_map(dev, init); 91 87 case VIA_CLEANUP_MAP: 92 88 return via_do_cleanup_map(dev); 93 89 }
+26 -41
drivers/char/drm/via_mm.c
··· 33 33 #define VIA_MM_ALIGN_SHIFT 4 34 34 #define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1) 35 35 36 - int via_agp_init(DRM_IOCTL_ARGS) 36 + int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 37 37 { 38 - DRM_DEVICE; 39 - drm_via_agp_t agp; 38 + drm_via_agp_t *agp = data; 40 39 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 41 40 int ret; 42 41 43 - DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data, 44 - sizeof(agp)); 45 42 mutex_lock(&dev->struct_mutex); 46 43 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, 47 - agp.size >> VIA_MM_ALIGN_SHIFT); 44 + agp->size >> VIA_MM_ALIGN_SHIFT); 48 45 49 46 if (ret) { 50 47 DRM_ERROR("AGP memory manager initialisation error\n"); ··· 50 53 } 51 54 52 55 dev_priv->agp_initialized = 1; 53 - dev_priv->agp_offset = agp.offset; 56 + dev_priv->agp_offset = agp->offset; 54 57 mutex_unlock(&dev->struct_mutex); 55 58 56 - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); 59 + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); 57 60 return 0; 58 61 } 59 62 60 - int via_fb_init(DRM_IOCTL_ARGS) 63 + int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 61 64 { 62 - DRM_DEVICE; 63 - drm_via_fb_t fb; 65 + drm_via_fb_t *fb = data; 64 66 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 65 67 int ret; 66 68 67 - DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb)); 68 - 69 69 mutex_lock(&dev->struct_mutex); 70 70 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, 71 - fb.size >> VIA_MM_ALIGN_SHIFT); 71 + fb->size >> VIA_MM_ALIGN_SHIFT); 72 72 73 73 if (ret) { 74 74 DRM_ERROR("VRAM memory manager initialisation error\n"); ··· 74 80 } 75 81 76 82 dev_priv->vram_initialized = 1; 77 - dev_priv->vram_offset = fb.offset; 83 + dev_priv->vram_offset = fb->offset; 78 84 79 85 mutex_unlock(&dev->struct_mutex); 80 - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); 86 + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); 81 87 82 88 return 0; 83 89 ··· 115 121 mutex_unlock(&dev->struct_mutex); 116 122 } 117 123 118 - int via_mem_alloc(DRM_IOCTL_ARGS) 124 + int via_mem_alloc(struct drm_device *dev, void *data, 125 + struct drm_file *file_priv) 119 126 { 120 - DRM_DEVICE; 121 - 122 - drm_via_mem_t mem; 127 + drm_via_mem_t *mem = data; 123 128 int retval = 0; 124 129 struct drm_memblock_item *item; 125 130 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 126 131 unsigned long tmpSize; 127 132 128 - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, 129 - sizeof(mem)); 130 - 131 - if (mem.type > VIA_MEM_AGP) { 133 + if (mem->type > VIA_MEM_AGP) { 132 134 DRM_ERROR("Unknown memory type allocation\n"); 133 135 return -EINVAL; 134 136 } 135 137 mutex_lock(&dev->struct_mutex); 136 - if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : 138 + if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : 137 139 dev_priv->agp_initialized)) { 138 140 DRM_ERROR 139 141 ("Attempt to allocate from uninitialized memory manager.\n"); ··· 137 147 return -EINVAL; 138 148 } 139 149 140 - tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; 141 - item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0, 150 + tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; 151 + item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0, 142 152 (unsigned long)file_priv); 143 153 mutex_unlock(&dev->struct_mutex); 144 154 if (item) { 145 - mem.offset = ((mem.type == VIA_MEM_VIDEO) ? 155 + mem->offset = ((mem->type == VIA_MEM_VIDEO) ? 146 156 dev_priv->vram_offset : dev_priv->agp_offset) + 147 157 (item->mm-> 148 158 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); 149 - mem.index = item->user_hash.key; 159 + mem->index = item->user_hash.key; 150 160 } else { 151 - mem.offset = 0; 152 - mem.size = 0; 153 - mem.index = 0; 161 + mem->offset = 0; 162 + mem->size = 0; 163 + mem->index = 0; 154 164 DRM_DEBUG("Video memory allocation failed\n"); 155 165 retval = -ENOMEM; 156 166 } 157 - DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); 158 167 159 168 return retval; 160 169 } 161 170 162 - int via_mem_free(DRM_IOCTL_ARGS) 171 + int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) 163 172 { 164 - DRM_DEVICE; 165 173 drm_via_private_t *dev_priv = dev->dev_private; 166 - drm_via_mem_t mem; 174 + drm_via_mem_t *mem = data; 167 175 int ret; 168 176 169 - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, 170 - sizeof(mem)); 171 - 172 177 mutex_lock(&dev->struct_mutex); 173 - ret = drm_sman_free_key(&dev_priv->sman, mem.index); 178 + ret = drm_sman_free_key(&dev_priv->sman, mem->index); 174 179 mutex_unlock(&dev->struct_mutex); 175 - DRM_DEBUG("free = 0x%lx\n", mem.index); 180 + DRM_DEBUG("free = 0x%lx\n", mem->index); 176 181 177 182 return ret; 178 183 }
+8 -12
drivers/char/drm/via_video.c
··· 65 65 } 66 66 } 67 67 68 - int via_decoder_futex(DRM_IOCTL_ARGS) 68 + int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv) 69 69 { 70 - DRM_DEVICE; 71 - drm_via_futex_t fx; 70 + drm_via_futex_t *fx = data; 72 71 volatile int *lock; 73 72 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 74 73 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv; ··· 75 76 76 77 DRM_DEBUG("%s\n", __FUNCTION__); 77 78 78 - DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t __user *) data, 79 - sizeof(fx)); 80 - 81 - if (fx.lock > VIA_NR_XVMC_LOCKS) 79 + if (fx->lock > VIA_NR_XVMC_LOCKS) 82 80 return -EFAULT; 83 81 84 - lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock); 82 + lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); 85 83 86 - switch (fx.func) { 84 + switch (fx->func) { 87 85 case VIA_FUTEX_WAIT: 88 - DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock], 89 - (fx.ms / 10) * (DRM_HZ / 100), *lock != fx.val); 86 + DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock], 87 + (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val); 90 88 return ret; 91 89 case VIA_FUTEX_WAKE: 92 - DRM_WAKEUP(&(dev_priv->decoder_queue[fx.lock])); 90 + DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock])); 93 91 return 0; 94 92 } 95 93 return 0;