Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: convert drm_ioctl to unlocked_ioctl

drm_ioctl is called with the Big Kernel Lock held,
which shows up very high in statistics on vfs_ioctl.

Moving the lock into the drm_ioctl function itself
makes sure we blame the right subsystem and it gets
us one step closer to eliminating the locked version
of fops->ioctl.

Since drm_ioctl does not require the lock itself,
we only need to hold it while calling the specific
handler. The 32 bit conversion handlers do not
interact with any other code, so they don't need
the BKL here either and can just call drm_ioctl.

As a bonus, this cleans up all the other users
of drm_ioctl which now no longer have to find
the inode or call lock_kernel.

[airlied: squashed the non-driver bits
of the second patch in here, this provides
the flag for drivers to use to select unlocked
ioctls - but doesn't modify any drivers].

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: David Airlie <airlied@linux.ie>
Cc: dri-devel@lists.sourceforge.net
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by

Arnd Bergmann and committed by
Dave Airlie
ed8b6704 dcd6dfcf

+89 -140
+10 -3
drivers/gpu/drm/drm_drv.c
··· 434 434 * Looks up the ioctl function in the ::ioctls table, checking for root 435 435 * previleges if so required, and dispatches to the respective function. 436 436 */ 437 - int drm_ioctl(struct inode *inode, struct file *filp, 437 + long drm_ioctl(struct file *filp, 438 438 unsigned int cmd, unsigned long arg) 439 439 { 440 440 struct drm_file *file_priv = filp->private_data; 441 - struct drm_device *dev = file_priv->minor->dev; 441 + struct drm_device *dev; 442 442 struct drm_ioctl_desc *ioctl; 443 443 drm_ioctl_t *func; 444 444 unsigned int nr = DRM_IOCTL_NR(cmd); ··· 446 446 char stack_kdata[128]; 447 447 char *kdata = NULL; 448 448 449 + dev = file_priv->minor->dev; 449 450 atomic_inc(&dev->ioctl_count); 450 451 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 451 452 ++file_priv->ioctl_count; ··· 502 501 goto err_i1; 503 502 } 504 503 } 505 - retcode = func(dev, kdata, file_priv); 504 + if (ioctl->flags & DRM_UNLOCKED) 505 + retcode = func(dev, kdata, file_priv); 506 + else { 507 + lock_kernel(); 508 + retcode = func(dev, kdata, file_priv); 509 + unlock_kernel(); 510 + } 506 511 507 512 if (cmd & IOC_OUT) { 508 513 if (copy_to_user((void __user *)arg, kdata,
+30 -59
drivers/gpu/drm/drm_ioc32.c
··· 104 104 &version->desc)) 105 105 return -EFAULT; 106 106 107 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 107 + err = drm_ioctl(file, 108 108 DRM_IOCTL_VERSION, (unsigned long)version); 109 109 if (err) 110 110 return err; ··· 145 145 &u->unique)) 146 146 return -EFAULT; 147 147 148 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 149 - DRM_IOCTL_GET_UNIQUE, (unsigned long)u); 148 + err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u); 150 149 if (err) 151 150 return err; 152 151 ··· 173 174 &u->unique)) 174 175 return -EFAULT; 175 176 176 - return drm_ioctl(file->f_path.dentry->d_inode, file, 177 - DRM_IOCTL_SET_UNIQUE, (unsigned long)u); 177 + return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u); 178 178 } 179 179 180 180 typedef struct drm_map32 { ··· 203 205 if (__put_user(idx, &map->offset)) 204 206 return -EFAULT; 205 207 206 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 207 - DRM_IOCTL_GET_MAP, (unsigned long)map); 208 + err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map); 208 209 if (err) 209 210 return err; 210 211 ··· 243 246 || __put_user(m32.flags, &map->flags)) 244 247 return -EFAULT; 245 248 246 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 247 - DRM_IOCTL_ADD_MAP, (unsigned long)map); 249 + err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map); 248 250 if (err) 249 251 return err; 250 252 ··· 280 284 if (__put_user((void *)(unsigned long)handle, &map->handle)) 281 285 return -EFAULT; 282 286 283 - return drm_ioctl(file->f_path.dentry->d_inode, file, 284 - DRM_IOCTL_RM_MAP, (unsigned long)map); 287 + return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map); 285 288 } 286 289 287 290 typedef struct drm_client32 { ··· 309 314 if (__put_user(idx, &client->idx)) 310 315 return -EFAULT; 311 316 312 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 313 - DRM_IOCTL_GET_CLIENT, (unsigned long)client); 317 + err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client); 314 318 if (err) 315 319 return err; 316 320 ··· 345 351 if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) 346 352 return -EFAULT; 347 353 348 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 349 - DRM_IOCTL_GET_STATS, (unsigned long)stats); 354 + err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats); 350 355 if (err) 351 356 return err; 352 357 ··· 388 395 || __put_user(agp_start, &buf->agp_start)) 389 396 return -EFAULT; 390 397 391 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 392 - DRM_IOCTL_ADD_BUFS, (unsigned long)buf); 398 + err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf); 393 399 if (err) 394 400 return err; 395 401 ··· 419 427 || __put_user(b32.high_mark, &buf->high_mark)) 420 428 return -EFAULT; 421 429 422 - return drm_ioctl(file->f_path.dentry->d_inode, file, 423 - DRM_IOCTL_MARK_BUFS, (unsigned long)buf); 430 + return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf); 424 431 } 425 432 426 433 typedef struct drm_buf_info32 { ··· 460 469 || __put_user(list, &request->list)) 461 470 return -EFAULT; 462 471 463 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 464 - DRM_IOCTL_INFO_BUFS, (unsigned long)request); 472 + err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request); 465 473 if (err) 466 474 return err; 467 475 ··· 521 531 || __put_user(list, &request->list)) 522 532 return -EFAULT; 523 533 524 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 525 - DRM_IOCTL_MAP_BUFS, (unsigned long)request); 534 + err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request); 526 535 if (err) 527 536 return err; 528 537 ··· 567 578 &request->list)) 568 579 return -EFAULT; 569 580 570 - return drm_ioctl(file->f_path.dentry->d_inode, file, 571 - DRM_IOCTL_FREE_BUFS, (unsigned long)request); 581 + return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request); 572 582 } 573 583 574 584 typedef struct drm_ctx_priv_map32 { ··· 593 605 &request->handle)) 594 606 return -EFAULT; 595 607 596 - return drm_ioctl(file->f_path.dentry->d_inode, file, 597 - DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request); 608 + return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request); 598 609 } 599 610 600 611 static int compat_drm_getsareactx(struct file *file, unsigned int cmd, ··· 615 628 if (__put_user(ctx_id, &request->ctx_id)) 616 629 return -EFAULT; 617 630 618 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 619 - DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request); 631 + err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request); 620 632 if (err) 621 633 return err; 622 634 ··· 650 664 &res->contexts)) 651 665 return -EFAULT; 652 666 653 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 654 - DRM_IOCTL_RES_CTX, (unsigned long)res); 667 + err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res); 655 668 if (err) 656 669 return err; 657 670 ··· 703 718 &d->request_sizes)) 704 719 return -EFAULT; 705 720 706 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 707 - DRM_IOCTL_DMA, (unsigned long)d); 721 + err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d); 708 722 if (err) 709 723 return err; 710 724 ··· 735 751 if (put_user(m32.mode, &mode->mode)) 736 752 return -EFAULT; 737 753 738 - return drm_ioctl(file->f_path.dentry->d_inode, file, 739 - DRM_IOCTL_AGP_ENABLE, (unsigned long)mode); 754 + return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode); 740 755 } 741 756 742 757 typedef struct drm_agp_info32 { ··· 764 781 if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) 765 782 return -EFAULT; 766 783 767 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 768 - DRM_IOCTL_AGP_INFO, (unsigned long)info); 784 + err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info); 769 785 if (err) 770 786 return err; 771 787 ··· 809 827 || __put_user(req32.type, &request->type)) 810 828 return -EFAULT; 811 829 812 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 813 - DRM_IOCTL_AGP_ALLOC, (unsigned long)request); 830 + err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request); 814 831 if (err) 815 832 return err; 816 833 817 834 if (__get_user(req32.handle, &request->handle) 818 835 || __get_user(req32.physical, &request->physical) 819 836 || copy_to_user(argp, &req32, sizeof(req32))) { 820 - drm_ioctl(file->f_path.dentry->d_inode, file, 821 - DRM_IOCTL_AGP_FREE, (unsigned long)request); 837 + drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request); 822 838 return -EFAULT; 823 839 } 824 840 ··· 836 856 || __put_user(handle, &request->handle)) 837 857 return -EFAULT; 838 858 839 - return drm_ioctl(file->f_path.dentry->d_inode, file, 840 - DRM_IOCTL_AGP_FREE, (unsigned long)request); 859 + return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request); 841 860 } 842 861 843 862 typedef struct drm_agp_binding32 { ··· 860 881 || __put_user(req32.offset, &request->offset)) 861 882 return -EFAULT; 862 883 863 - return drm_ioctl(file->f_path.dentry->d_inode, file, 864 - DRM_IOCTL_AGP_BIND, (unsigned long)request); 884 + return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request); 865 885 } 866 886 867 887 static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, ··· 876 898 || __put_user(handle, &request->handle)) 877 899 return -EFAULT; 878 900 879 - return drm_ioctl(file->f_path.dentry->d_inode, file, 880 - DRM_IOCTL_AGP_UNBIND, (unsigned long)request); 901 + return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request); 881 902 } 882 903 #endif /* __OS_HAS_AGP */ 883 904 ··· 900 923 || __put_user(x, &request->size)) 901 924 return -EFAULT; 902 925 903 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 904 - DRM_IOCTL_SG_ALLOC, (unsigned long)request); 926 + err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request); 905 927 if (err) 906 928 return err; 907 929 ··· 926 950 || __put_user(x << PAGE_SHIFT, &request->handle)) 927 951 return -EFAULT; 928 952 929 - return drm_ioctl(file->f_path.dentry->d_inode, file, 930 - DRM_IOCTL_SG_FREE, (unsigned long)request); 953 + return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request); 931 954 } 932 955 933 956 #if defined(CONFIG_X86) || defined(CONFIG_IA64) ··· 956 981 __put_user(update32.data, &request->data)) 957 982 return -EFAULT; 958 983 959 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 960 - DRM_IOCTL_UPDATE_DRAW, (unsigned long)request); 984 + err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request); 961 985 return err; 962 986 } 963 987 #endif ··· 997 1023 || __put_user(req32.request.signal, &request->request.signal)) 998 1024 return -EFAULT; 999 1025 1000 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 1001 - DRM_IOCTL_WAIT_VBLANK, (unsigned long)request); 1026 + err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request); 1002 1027 if (err) 1003 1028 return err; 1004 1029 ··· 1067 1094 * than always failing. 1068 1095 */ 1069 1096 if (nr >= ARRAY_SIZE(drm_compat_ioctls)) 1070 - return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); 1097 + return drm_ioctl(filp, cmd, arg); 1071 1098 1072 1099 fn = drm_compat_ioctls[nr]; 1073 1100 1074 - lock_kernel(); /* XXX for now */ 1075 1101 if (fn != NULL) 1076 1102 ret = (*fn) (filp, cmd, arg); 1077 1103 else 1078 - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 1079 - unlock_kernel(); 1104 + ret = drm_ioctl(filp, cmd, arg); 1080 1105 1081 1106 return ret; 1082 1107 }
+1 -1
drivers/gpu/drm/i810/i810_dma.c
··· 115 115 static const struct file_operations i810_buffer_fops = { 116 116 .open = drm_open, 117 117 .release = drm_release, 118 - .ioctl = drm_ioctl, 118 + .unlocked_ioctl = drm_ioctl, 119 119 .mmap = i810_mmap_buffers, 120 120 .fasync = drm_fasync, 121 121 };
+1 -1
drivers/gpu/drm/i810/i810_drv.c
··· 59 59 .owner = THIS_MODULE, 60 60 .open = drm_open, 61 61 .release = drm_release, 62 - .ioctl = drm_ioctl, 62 + .unlocked_ioctl = drm_ioctl, 63 63 .mmap = drm_mmap, 64 64 .poll = drm_poll, 65 65 .fasync = drm_fasync,
+1 -1
drivers/gpu/drm/i830/i830_dma.c
··· 117 117 static const struct file_operations i830_buffer_fops = { 118 118 .open = drm_open, 119 119 .release = drm_release, 120 - .ioctl = drm_ioctl, 120 + .unlocked_ioctl = drm_ioctl, 121 121 .mmap = i830_mmap_buffers, 122 122 .fasync = drm_fasync, 123 123 };
+1 -1
drivers/gpu/drm/i830/i830_drv.c
··· 70 70 .owner = THIS_MODULE, 71 71 .open = drm_open, 72 72 .release = drm_release, 73 - .ioctl = drm_ioctl, 73 + .unlocked_ioctl = drm_ioctl, 74 74 .mmap = drm_mmap, 75 75 .poll = drm_poll, 76 76 .fasync = drm_fasync,
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 329 329 .owner = THIS_MODULE, 330 330 .open = drm_open, 331 331 .release = drm_release, 332 - .ioctl = drm_ioctl, 332 + .unlocked_ioctl = drm_ioctl, 333 333 .mmap = drm_gem_mmap, 334 334 .poll = drm_poll, 335 335 .fasync = drm_fasync,
+10 -13
drivers/gpu/drm/i915/i915_ioc32.c
··· 66 66 &batchbuffer->cliprects)) 67 67 return -EFAULT; 68 68 69 - return drm_ioctl(file->f_path.dentry->d_inode, file, 70 - DRM_IOCTL_I915_BATCHBUFFER, 69 + return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER, 71 70 (unsigned long)batchbuffer); 72 71 } 73 72 ··· 101 102 &cmdbuffer->cliprects)) 102 103 return -EFAULT; 103 104 104 - return drm_ioctl(file->f_path.dentry->d_inode, file, 105 - DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer); 105 + return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER, 106 + (unsigned long)cmdbuffer); 106 107 } 107 108 108 109 typedef struct drm_i915_irq_emit32 { ··· 124 125 &request->irq_seq)) 125 126 return -EFAULT; 126 127 127 - return drm_ioctl(file->f_path.dentry->d_inode, file, 128 - DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request); 128 + return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT, 129 + (unsigned long)request); 129 130 } 130 131 typedef struct drm_i915_getparam32 { 131 132 int param; ··· 148 149 &request->value)) 149 150 return -EFAULT; 150 151 151 - return drm_ioctl(file->f_path.dentry->d_inode, file, 152 - DRM_IOCTL_I915_GETPARAM, (unsigned long)request); 152 + return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM, 153 + (unsigned long)request); 153 154 } 154 155 155 156 typedef struct drm_i915_mem_alloc32 { ··· 177 178 &request->region_offset)) 178 179 return -EFAULT; 179 180 180 - return drm_ioctl(file->f_path.dentry->d_inode, file, 181 - DRM_IOCTL_I915_ALLOC, (unsigned long)request); 181 + return drm_ioctl(file, DRM_IOCTL_I915_ALLOC, 182 + (unsigned long)request); 182 183 } 183 184 184 185 drm_ioctl_compat_t *i915_compat_ioctls[] = { ··· 210 211 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) 211 212 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; 212 213 213 - lock_kernel(); /* XXX for now */ 214 214 if (fn != NULL) 215 215 ret = (*fn) (filp, cmd, arg); 216 216 else 217 - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 218 - unlock_kernel(); 217 + ret = drm_ioctl(filp, cmd, arg); 219 218 220 219 return ret; 221 220 }
+1 -1
drivers/gpu/drm/mga/mga_drv.c
··· 68 68 .owner = THIS_MODULE, 69 69 .open = drm_open, 70 70 .release = drm_release, 71 - .ioctl = drm_ioctl, 71 + .unlocked_ioctl = drm_ioctl, 72 72 .mmap = drm_mmap, 73 73 .poll = drm_poll, 74 74 .fasync = drm_fasync,
+4 -9
drivers/gpu/drm/mga/mga_ioc32.c
··· 100 100 if (err) 101 101 return -EFAULT; 102 102 103 - return drm_ioctl(file->f_path.dentry->d_inode, file, 104 - DRM_IOCTL_MGA_INIT, (unsigned long)init); 103 + return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init); 105 104 } 106 105 107 106 typedef struct drm_mga_getparam32 { ··· 124 125 &getparam->value)) 125 126 return -EFAULT; 126 127 127 - return drm_ioctl(file->f_path.dentry->d_inode, file, 128 - DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); 128 + return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); 129 129 } 130 130 131 131 typedef struct drm_mga_drm_bootstrap32 { ··· 164 166 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) 165 167 return -EFAULT; 166 168 167 - err = drm_ioctl(file->f_path.dentry->d_inode, file, 168 - DRM_IOCTL_MGA_DMA_BOOTSTRAP, 169 + err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP, 169 170 (unsigned long)dma_bootstrap); 170 171 if (err) 171 172 return err; ··· 217 220 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) 218 221 fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; 219 222 220 - lock_kernel(); /* XXX for now */ 221 223 if (fn != NULL) 222 224 ret = (*fn) (filp, cmd, arg); 223 225 else 224 - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 225 - unlock_kernel(); 226 + ret = drm_ioctl(filp, cmd, arg); 226 227 227 228 return ret; 228 229 }
+1 -1
drivers/gpu/drm/nouveau/nouveau_drv.c
··· 341 341 .owner = THIS_MODULE, 342 342 .open = drm_open, 343 343 .release = drm_release, 344 - .ioctl = drm_ioctl, 344 + .unlocked_ioctl = drm_ioctl, 345 345 .mmap = nouveau_ttm_mmap, 346 346 .poll = drm_poll, 347 347 .fasync = drm_fasync,
+1 -3
drivers/gpu/drm/nouveau/nouveau_ioc32.c
··· 61 61 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) 62 62 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; 63 63 #endif 64 - lock_kernel(); /* XXX for now */ 65 64 if (fn != NULL) 66 65 ret = (*fn)(filp, cmd, arg); 67 66 else 68 - ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); 69 - unlock_kernel(); 67 + ret = drm_ioctl(filp, cmd, arg); 70 68 71 69 return ret; 72 70 }
+1 -1
drivers/gpu/drm/r128/r128_drv.c
··· 64 64 .owner = THIS_MODULE, 65 65 .open = drm_open, 66 66 .release = drm_release, 67 - .ioctl = drm_ioctl, 67 + .unlocked_ioctl = drm_ioctl, 68 68 .mmap = drm_mmap, 69 69 .poll = drm_poll, 70 70 .fasync = drm_fasync,
+5 -11
drivers/gpu/drm/r128/r128_ioc32.c
··· 95 95 &init->agp_textures_offset)) 96 96 return -EFAULT; 97 97 98 - return drm_ioctl(file->f_path.dentry->d_inode, file, 99 - DRM_IOCTL_R128_INIT, (unsigned long)init); 98 + return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init); 100 99 } 101 100 102 101 typedef struct drm_r128_depth32 { ··· 128 129 &depth->mask)) 129 130 return -EFAULT; 130 131 131 - return drm_ioctl(file->f_path.dentry->d_inode, file, 132 - DRM_IOCTL_R128_DEPTH, (unsigned long)depth); 132 + return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth); 133 133 134 134 } 135 135 ··· 151 153 &stipple->mask)) 152 154 return -EFAULT; 153 155 154 - return drm_ioctl(file->f_path.dentry->d_inode, file, 155 - DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple); 156 + return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple); 156 157 } 157 158 158 159 typedef struct drm_r128_getparam32 { ··· 175 178 &getparam->value)) 176 179 return -EFAULT; 177 180 178 - return drm_ioctl(file->f_path.dentry->d_inode, file, 179 - DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); 181 + return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); 180 182 } 181 183 182 184 drm_ioctl_compat_t *r128_compat_ioctls[] = { ··· 206 210 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) 207 211 fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; 208 212 209 - lock_kernel(); /* XXX for now */ 210 213 if (fn != NULL) 211 214 ret = (*fn) (filp, cmd, arg); 212 215 else 213 - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 214 - unlock_kernel(); 216 + ret = drm_ioctl(filp, cmd, arg); 215 217 216 218 return ret; 217 219 }
+2 -2
drivers/gpu/drm/radeon/radeon_drv.c
··· 196 196 .owner = THIS_MODULE, 197 197 .open = drm_open, 198 198 .release = drm_release, 199 - .ioctl = drm_ioctl, 199 + .unlocked_ioctl = drm_ioctl, 200 200 .mmap = drm_mmap, 201 201 .poll = drm_poll, 202 202 .fasync = drm_fasync, ··· 284 284 .owner = THIS_MODULE, 285 285 .open = drm_open, 286 286 .release = drm_release, 287 - .ioctl = drm_ioctl, 287 + .unlocked_ioctl = drm_ioctl, 288 288 .mmap = radeon_mmap, 289 289 .poll = drm_poll, 290 290 .fasync = drm_fasync,
+12 -26
drivers/gpu/drm/radeon/radeon_ioc32.c
··· 92 92 &init->gart_textures_offset)) 93 93 return -EFAULT; 94 94 95 - return drm_ioctl(file->f_path.dentry->d_inode, file, 96 - DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init); 95 + return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init); 97 96 } 98 97 99 98 typedef struct drm_radeon_clear32 { ··· 124 125 &clr->depth_boxes)) 125 126 return -EFAULT; 126 127 127 - return drm_ioctl(file->f_path.dentry->d_inode, file, 128 - DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr); 128 + return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr); 129 129 } 130 130 131 131 typedef struct drm_radeon_stipple32 { ··· 147 149 &request->mask)) 148 150 return -EFAULT; 149 151 150 - return drm_ioctl(file->f_path.dentry->d_inode, file, 151 - DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request); 152 + return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request); 152 153 } 153 154 154 155 typedef struct drm_radeon_tex_image32 { ··· 201 204 &image->data)) 202 205 return -EFAULT; 203 206 204 - return drm_ioctl(file->f_path.dentry->d_inode, file, 205 - DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request); 207 + return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request); 206 208 } 207 209 208 210 typedef struct drm_radeon_vertex2_32 { ··· 234 238 &request->prim)) 235 239 return -EFAULT; 236 240 237 - return drm_ioctl(file->f_path.dentry->d_inode, file, 238 - DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request); 241 + return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request); 239 242 } 240 243 241 244 typedef struct drm_radeon_cmd_buffer32 { ··· 263 268 &request->boxes)) 264 269 return -EFAULT; 265 270 266 - return drm_ioctl(file->f_path.dentry->d_inode, file, 267 - DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request); 271 + return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request); 268 272 } 269 273 270 274 typedef struct drm_radeon_getparam32 { ··· 287 293 &request->value)) 288 294 return -EFAULT; 289 295 290 - return drm_ioctl(file->f_path.dentry->d_inode, file, 291 - DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request); 296 + return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request); 292 297 } 293 298 294 299 typedef struct drm_radeon_mem_alloc32 { ··· 315 322 &request->region_offset)) 316 323 return -EFAULT; 317 324 318 - return drm_ioctl(file->f_path.dentry->d_inode, file, 319 - DRM_IOCTL_RADEON_ALLOC, (unsigned long)request); 325 + return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request); 320 326 } 321 327 322 328 typedef struct drm_radeon_irq_emit32 { ··· 337 345 &request->irq_seq)) 338 346 return -EFAULT; 339 347 340 - return drm_ioctl(file->f_path.dentry->d_inode, file, 341 - DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request); 348 + return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request); 342 349 } 343 350 344 351 /* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ ··· 363 372 &request->value)) 364 373 return -EFAULT; 365 374 366 - return drm_ioctl(file->f_dentry->d_inode, file, 367 - DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); 375 + return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); 368 376 } 369 377 #else 370 378 #define compat_radeon_cp_setparam NULL ··· 403 413 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) 404 414 fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; 405 415 406 - lock_kernel(); /* XXX for now */ 407 416 if (fn != NULL) 408 417 ret = (*fn) (filp, cmd, arg); 409 418 else 410 - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 411 - unlock_kernel(); 419 + ret = drm_ioctl(filp, cmd, arg); 412 420 413 421 return ret; 414 422 } ··· 419 431 if (nr < DRM_COMMAND_BASE) 420 432 return drm_compat_ioctl(filp, cmd, arg); 421 433 422 - lock_kernel(); /* XXX for now */ 423 - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 424 - unlock_kernel(); 434 + ret = drm_ioctl(filp, cmd, arg); 425 435 426 436 return ret; 427 437 }
+1 -1
drivers/gpu/drm/savage/savage_drv.c
··· 50 50 .owner = THIS_MODULE, 51 51 .open = drm_open, 52 52 .release = drm_release, 53 - .ioctl = drm_ioctl, 53 + .unlocked_ioctl = drm_ioctl, 54 54 .mmap = drm_mmap, 55 55 .poll = drm_poll, 56 56 .fasync = drm_fasync,
+1 -1
drivers/gpu/drm/sis/sis_drv.c
··· 80 80 .owner = THIS_MODULE, 81 81 .open = drm_open, 82 82 .release = drm_release, 83 - .ioctl = drm_ioctl, 83 + .unlocked_ioctl = drm_ioctl, 84 84 .mmap = drm_mmap, 85 85 .poll = drm_poll, 86 86 .fasync = drm_fasync,
+1 -1
drivers/gpu/drm/tdfx/tdfx_drv.c
··· 48 48 .owner = THIS_MODULE, 49 49 .open = drm_open, 50 50 .release = drm_release, 51 - .ioctl = drm_ioctl, 51 + .unlocked_ioctl = drm_ioctl, 52 52 .mmap = drm_mmap, 53 53 .poll = drm_poll, 54 54 .fasync = drm_fasync,
+1 -1
drivers/gpu/drm/via/via_drv.c
··· 58 58 .owner = THIS_MODULE, 59 59 .open = drm_open, 60 60 .release = drm_release, 61 - .ioctl = drm_ioctl, 61 + .unlocked_ioctl = drm_ioctl, 62 62 .mmap = drm_mmap, 63 63 .poll = drm_poll, 64 64 .fasync = drm_fasync,
+3 -2
include/drm/drmP.h
··· 296 296 #define DRM_MASTER 0x2 297 297 #define DRM_ROOT_ONLY 0x4 298 298 #define DRM_CONTROL_ALLOW 0x8 299 + #define DRM_UNLOCKED 0x10 299 300 300 301 struct drm_ioctl_desc { 301 302 unsigned int cmd; ··· 1129 1128 /* Driver support (drm_drv.h) */ 1130 1129 extern int drm_init(struct drm_driver *driver); 1131 1130 extern void drm_exit(struct drm_driver *driver); 1132 - extern int drm_ioctl(struct inode *inode, struct file *filp, 1133 - unsigned int cmd, unsigned long arg); 1131 + extern long drm_ioctl(struct file *filp, 1132 + unsigned int cmd, unsigned long arg); 1134 1133 extern long drm_compat_ioctl(struct file *filp, 1135 1134 unsigned int cmd, unsigned long arg); 1136 1135 extern int drm_lastclose(struct drm_device *dev);