Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'work.misc-set_fs' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull misc user access cleanups from Al Viro:
"The first pile is assorted getting rid of cargo-culted access_ok(),
cargo-culted set_fs() and field-by-field copyouts.

The same description applies to a lot of stuff in other branches -
this is just the stuff that didn't fit into a more specific topical
branch"

* 'work.misc-set_fs' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
Switch flock copyin/copyout primitives to copy_{from,to}_user()
fs/fcntl: return -ESRCH in f_setown when pid/pgid can't be found
fs/fcntl: f_setown, avoid undefined behaviour
fs/fcntl: f_setown, allow returning error
lpfc debugfs: get rid of pointless access_ok()
adb: get rid of pointless access_ok()
isdn: get rid of pointless access_ok()
compat statfs: switch to copy_to_user()
fs/locks: don't mess with the address limit in compat_fcntl64
nfsd_readlink(): switch to vfs_get_link()
drbd: ->sendpage() never needed set_fs()
fs/locks: pass kernel struct flock to fcntl_getlk/setlk
fs: locks: Fix some troubles at kernel-doc comments

+224 -275
-3
drivers/block/drbd/drbd_main.c
··· 1551 1551 int offset, size_t size, unsigned msg_flags) 1552 1552 { 1553 1553 struct socket *socket = peer_device->connection->data.socket; 1554 - mm_segment_t oldfs = get_fs(); 1555 1554 int len = size; 1556 1555 int err = -EIO; 1557 1556 ··· 1565 1566 1566 1567 msg_flags |= MSG_NOSIGNAL; 1567 1568 drbd_update_congested(peer_device->connection); 1568 - set_fs(KERNEL_DS); 1569 1569 do { 1570 1570 int sent; 1571 1571 ··· 1584 1586 len -= sent; 1585 1587 offset += sent; 1586 1588 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/); 1587 - set_fs(oldfs); 1588 1589 clear_bit(NET_CONGESTED, &peer_device->connection->flags); 1589 1590 1590 1591 if (len == 0) {
-18
drivers/isdn/i4l/isdn_common.c
··· 1304 1304 if (arg) { 1305 1305 ulong __user *p = argp; 1306 1306 int i; 1307 - if (!access_ok(VERIFY_WRITE, p, 1308 - sizeof(ulong) * ISDN_MAX_CHANNELS * 2)) 1309 - return -EFAULT; 1310 1307 for (i = 0; i < ISDN_MAX_CHANNELS; i++) { 1311 1308 put_user(dev->ibytes[i], p++); 1312 1309 put_user(dev->obytes[i], p++); ··· 1537 1540 char __user *p = argp; 1538 1541 int i; 1539 1542 1540 - if (!access_ok(VERIFY_WRITE, argp, 1541 - (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) 1542 - * ISDN_MAX_CHANNELS)) 1543 - return -EFAULT; 1544 - 1545 1543 for (i = 0; i < ISDN_MAX_CHANNELS; i++) { 1546 1544 if (copy_to_user(p, dev->mdm.info[i].emu.profile, 1547 1545 ISDN_MODEM_NUMREG)) ··· 1558 1566 if (arg) { 1559 1567 char __user *p = argp; 1560 1568 int i; 1561 - 1562 - if (!access_ok(VERIFY_READ, argp, 1563 - (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) 1564 - * ISDN_MAX_CHANNELS)) 1565 - return -EFAULT; 1566 1569 1567 1570 for (i = 0; i < ISDN_MAX_CHANNELS; i++) { 1568 1571 if (copy_from_user(dev->mdm.info[i].emu.profile, p, ··· 1604 1617 int j = 0; 1605 1618 1606 1619 while (1) { 1607 - if (!access_ok(VERIFY_READ, p, 1)) 1608 - return -EFAULT; 1609 1620 get_user(bname[j], p++); 1610 1621 switch (bname[j]) { 1611 1622 case '\0': ··· 1670 1685 drvidx = 0; 1671 1686 if (drvidx == -1) 1672 1687 return -ENODEV; 1673 - if (!access_ok(VERIFY_WRITE, argp, 1674 - sizeof(isdn_ioctl_struct))) 1675 - return -EFAULT; 1676 1688 c.driver = drvidx; 1677 1689 c.command = ISDN_CMD_IOCTL; 1678 1690 c.arg = cmd;
-6
drivers/isdn/i4l/isdn_ppp.c
··· 795 795 if (!(is->state & IPPP_OPEN)) 796 796 return 0; 797 797 798 - if (!access_ok(VERIFY_WRITE, buf, count)) 799 - return -EFAULT; 800 - 801 798 spin_lock_irqsave(&is->buflock, flags); 802 799 b = is->first->next; 803 800 save_buf = b->buf; ··· 2010 2013 struct ppp_stats __user *res = ifr->ifr_data; 2011 2014 struct ppp_stats t; 2012 2015 isdn_net_local *lp = netdev_priv(dev); 2013 - 2014 - if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats))) 2015 - return -EFAULT; 2016 2016 2017 2017 /* build a temporary stat struct and copy it to user space */ 2018 2018
-2
drivers/isdn/isdnloop/isdnloop.c
··· 1142 1142 case ISDNLOOP_IOCTL_DEBUGVAR: 1143 1143 return (ulong) card; 1144 1144 case ISDNLOOP_IOCTL_STARTUP: 1145 - if (!access_ok(VERIFY_READ, (void *) a, sizeof(isdnloop_sdef))) 1146 - return -EFAULT; 1147 1145 return isdnloop_start(card, (isdnloop_sdef *) a); 1148 1146 break; 1149 1147 case ISDNLOOP_IOCTL_ADDCARD:
-4
drivers/macintosh/adb.c
··· 723 723 return -EINVAL; 724 724 if (count > sizeof(req->reply)) 725 725 count = sizeof(req->reply); 726 - if (!access_ok(VERIFY_WRITE, buf, count)) 727 - return -EFAULT; 728 726 729 727 req = NULL; 730 728 spin_lock_irqsave(&state->lock, flags); ··· 779 781 return -EINVAL; 780 782 if (adb_controller == NULL) 781 783 return -ENXIO; 782 - if (!access_ok(VERIFY_READ, buf, count)) 783 - return -EFAULT; 784 784 785 785 req = kmalloc(sizeof(struct adb_request), 786 786 GFP_KERNEL);
-20
drivers/scsi/lpfc/lpfc_debugfs.c
··· 1949 1949 if (nbytes > 64) 1950 1950 nbytes = 64; 1951 1951 1952 - /* Protect copy from user */ 1953 - if (!access_ok(VERIFY_READ, buf, nbytes)) 1954 - return -EFAULT; 1955 - 1956 1952 memset(mybuf, 0, sizeof(mybuf)); 1957 1953 1958 1954 if (copy_from_user(mybuf, buf, nbytes)) ··· 2032 2036 2033 2037 if (nbytes > 64) 2034 2038 nbytes = 64; 2035 - 2036 - /* Protect copy from user */ 2037 - if (!access_ok(VERIFY_READ, buf, nbytes)) 2038 - return -EFAULT; 2039 2039 2040 2040 memset(mybuf, 0, sizeof(mybuf)); 2041 2041 ··· 2161 2169 if (nbytes > 64) 2162 2170 nbytes = 64; 2163 2171 2164 - /* Protect copy from user */ 2165 - if (!access_ok(VERIFY_READ, buf, nbytes)) 2166 - return -EFAULT; 2167 - 2168 2172 memset(mybuf, 0, sizeof(mybuf)); 2169 2173 2170 2174 if (copy_from_user(mybuf, buf, nbytes)) ··· 2268 2280 if (nbytes > 64) 2269 2281 nbytes = 64; 2270 2282 2271 - /* Protect copy from user */ 2272 - if (!access_ok(VERIFY_READ, buf, nbytes)) 2273 - return -EFAULT; 2274 - 2275 2283 memset(mybuf, 0, sizeof(mybuf)); 2276 2284 2277 2285 if (copy_from_user(mybuf, buf, nbytes)) ··· 2337 2353 char *pbuf, *step_str; 2338 2354 int i; 2339 2355 size_t bsize; 2340 - 2341 - /* Protect copy from user */ 2342 - if (!access_ok(VERIFY_READ, buf, nbytes)) 2343 - return -EFAULT; 2344 2356 2345 2357 memset(mybuf, 0, sizeof(mybuf)); 2346 2358 memset(idiag_cmd, 0, sizeof(*idiag_cmd));
+142 -97
fs/fcntl.c
··· 109 109 } 110 110 EXPORT_SYMBOL(__f_setown); 111 111 112 - void f_setown(struct file *filp, unsigned long arg, int force) 112 + int f_setown(struct file *filp, unsigned long arg, int force) 113 113 { 114 114 enum pid_type type; 115 - struct pid *pid; 116 - int who = arg; 115 + struct pid *pid = NULL; 116 + int who = arg, ret = 0; 117 + 117 118 type = PIDTYPE_PID; 118 119 if (who < 0) { 120 + /* avoid overflow below */ 121 + if (who == INT_MIN) 122 + return -EINVAL; 123 + 119 124 type = PIDTYPE_PGID; 120 125 who = -who; 121 126 } 127 + 122 128 rcu_read_lock(); 123 - pid = find_vpid(who); 124 - __f_setown(filp, pid, type, force); 129 + if (who) { 130 + pid = find_vpid(who); 131 + if (!pid) 132 + ret = -ESRCH; 133 + } 134 + 135 + if (!ret) 136 + __f_setown(filp, pid, type, force); 125 137 rcu_read_unlock(); 138 + 139 + return ret; 126 140 } 127 141 EXPORT_SYMBOL(f_setown); 128 142 ··· 321 307 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, 322 308 struct file *filp) 323 309 { 310 + void __user *argp = (void __user *)arg; 311 + struct flock flock; 324 312 long err = -EINVAL; 325 313 326 314 switch (cmd) { ··· 350 334 case F_OFD_GETLK: 351 335 #endif 352 336 case F_GETLK: 353 - err = fcntl_getlk(filp, cmd, (struct flock __user *) arg); 337 + if (copy_from_user(&flock, argp, sizeof(flock))) 338 + return -EFAULT; 339 + err = fcntl_getlk(filp, cmd, &flock); 340 + if (!err && copy_to_user(argp, &flock, sizeof(flock))) 341 + return -EFAULT; 354 342 break; 355 343 #if BITS_PER_LONG != 32 356 344 /* 32-bit arches must use fcntl64() */ ··· 364 344 /* Fallthrough */ 365 345 case F_SETLK: 366 346 case F_SETLKW: 367 - err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg); 347 + if (copy_from_user(&flock, argp, sizeof(flock))) 348 + return -EFAULT; 349 + err = fcntl_setlk(fd, filp, cmd, &flock); 368 350 break; 369 351 case F_GETOWN: 370 352 /* ··· 380 358 force_successful_syscall_return(); 381 359 break; 382 360 case F_SETOWN: 383 - f_setown(filp, arg, 1); 384 - err = 0; 361 + err = f_setown(filp, arg, 1); 385 362 break; 386 363 case F_GETOWN_EX: 387 364 err = f_getown_ex(filp, arg); ··· 471 450 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, 472 451 unsigned long, arg) 473 452 { 453 + void __user *argp = (void __user *)arg; 474 454 struct fd f = fdget_raw(fd); 455 + struct flock64 flock; 475 456 long err = -EBADF; 476 457 477 458 if (!f.file) ··· 491 468 switch (cmd) { 492 469 case F_GETLK64: 493 470 case F_OFD_GETLK: 494 - err = fcntl_getlk64(f.file, cmd, (struct flock64 __user *) arg); 471 + err = -EFAULT; 472 + if (copy_from_user(&flock, argp, sizeof(flock))) 473 + break; 474 + err = fcntl_getlk64(f.file, cmd, &flock); 475 + if (!err && copy_to_user(argp, &flock, sizeof(flock))) 476 + err = -EFAULT; 495 477 break; 496 478 case F_SETLK64: 497 479 case F_SETLKW64: 498 480 case F_OFD_SETLK: 499 481 case F_OFD_SETLKW: 500 - err = fcntl_setlk64(fd, f.file, cmd, 501 - (struct flock64 __user *) arg); 482 + err = -EFAULT; 483 + if (copy_from_user(&flock, argp, sizeof(flock))) 484 + break; 485 + err = fcntl_setlk64(fd, f.file, cmd, &flock); 502 486 break; 503 487 default: 504 488 err = do_fcntl(fd, cmd, arg, f.file); ··· 519 489 #endif 520 490 521 491 #ifdef CONFIG_COMPAT 492 + /* careful - don't use anywhere else */ 493 + #define copy_flock_fields(from, to) \ 494 + (to).l_type = (from).l_type; \ 495 + (to).l_whence = (from).l_whence; \ 496 + (to).l_start = (from).l_start; \ 497 + (to).l_len = (from).l_len; \ 498 + (to).l_pid = (from).l_pid; 499 + 522 500 static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl) 523 501 { 524 - if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) || 525 - __get_user(kfl->l_type, &ufl->l_type) || 526 - __get_user(kfl->l_whence, &ufl->l_whence) || 527 - __get_user(kfl->l_start, &ufl->l_start) || 528 - __get_user(kfl->l_len, &ufl->l_len) || 529 - __get_user(kfl->l_pid, &ufl->l_pid)) 502 + struct compat_flock fl; 503 + 504 + if (copy_from_user(&fl, ufl, sizeof(struct compat_flock))) 530 505 return -EFAULT; 506 + copy_flock_fields(*kfl, fl); 507 + return 0; 508 + } 509 + 510 + static int get_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl) 511 + { 512 + struct compat_flock64 fl; 513 + 514 + if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64))) 515 + return -EFAULT; 516 + copy_flock_fields(*kfl, fl); 531 517 return 0; 532 518 } 533 519 534 520 static int put_compat_flock(struct flock *kfl, struct compat_flock __user *ufl) 535 521 { 536 - if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)) || 537 - __put_user(kfl->l_type, &ufl->l_type) || 538 - __put_user(kfl->l_whence, &ufl->l_whence) || 539 - __put_user(kfl->l_start, &ufl->l_start) || 540 - __put_user(kfl->l_len, &ufl->l_len) || 541 - __put_user(kfl->l_pid, &ufl->l_pid)) 522 + struct compat_flock fl; 523 + 524 + memset(&fl, 0, sizeof(struct compat_flock)); 525 + copy_flock_fields(fl, *kfl); 526 + if (copy_to_user(ufl, &fl, sizeof(struct compat_flock))) 542 527 return -EFAULT; 543 528 return 0; 544 529 } 545 530 546 - #ifndef HAVE_ARCH_GET_COMPAT_FLOCK64 547 - static int get_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl) 548 - { 549 - if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) || 550 - __get_user(kfl->l_type, &ufl->l_type) || 551 - __get_user(kfl->l_whence, &ufl->l_whence) || 552 - __get_user(kfl->l_start, &ufl->l_start) || 553 - __get_user(kfl->l_len, &ufl->l_len) || 554 - __get_user(kfl->l_pid, &ufl->l_pid)) 555 - return -EFAULT; 556 - return 0; 557 - } 558 - #endif 559 - 560 - #ifndef HAVE_ARCH_PUT_COMPAT_FLOCK64 561 531 static int put_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl) 562 532 { 563 - if (!access_ok(VERIFY_WRITE, ufl, sizeof(*ufl)) || 564 - __put_user(kfl->l_type, &ufl->l_type) || 565 - __put_user(kfl->l_whence, &ufl->l_whence) || 566 - __put_user(kfl->l_start, &ufl->l_start) || 567 - __put_user(kfl->l_len, &ufl->l_len) || 568 - __put_user(kfl->l_pid, &ufl->l_pid)) 533 + struct compat_flock64 fl; 534 + 535 + memset(&fl, 0, sizeof(struct compat_flock64)); 536 + copy_flock_fields(fl, *kfl); 537 + if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64))) 569 538 return -EFAULT; 570 539 return 0; 571 540 } 572 - #endif 541 + #undef copy_flock_fields 573 542 574 543 static unsigned int 575 544 convert_fcntl_cmd(unsigned int cmd) ··· 585 556 return cmd; 586 557 } 587 558 559 + /* 560 + * GETLK was successful and we need to return the data, but it needs to fit in 561 + * the compat structure. 562 + * l_start shouldn't be too big, unless the original start + end is greater than 563 + * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return 564 + * -EOVERFLOW in that case. l_len could be too big, in which case we just 565 + * truncate it, and only allow the app to see that part of the conflicting lock 566 + * that might make sense to it anyway 567 + */ 568 + static int fixup_compat_flock(struct flock *flock) 569 + { 570 + if (flock->l_start > COMPAT_OFF_T_MAX) 571 + return -EOVERFLOW; 572 + if (flock->l_len > COMPAT_OFF_T_MAX) 573 + flock->l_len = COMPAT_OFF_T_MAX; 574 + return 0; 575 + } 576 + 588 577 COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, 589 578 compat_ulong_t, arg) 590 579 { 591 - mm_segment_t old_fs; 592 - struct flock f; 593 - long ret; 594 - unsigned int conv_cmd; 580 + struct fd f = fdget_raw(fd); 581 + struct flock flock; 582 + long err = -EBADF; 583 + 584 + if (!f.file) 585 + return err; 586 + 587 + if (unlikely(f.file->f_mode & FMODE_PATH)) { 588 + if (!check_fcntl_cmd(cmd)) 589 + goto out_put; 590 + } 591 + 592 + err = security_file_fcntl(f.file, cmd, arg); 593 + if (err) 594 + goto out_put; 595 595 596 596 switch (cmd) { 597 597 case F_GETLK: 598 + err = get_compat_flock(&flock, compat_ptr(arg)); 599 + if (err) 600 + break; 601 + err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock); 602 + if (err) 603 + break; 604 + err = fixup_compat_flock(&flock); 605 + if (err) 606 + return err; 607 + err = put_compat_flock(&flock, compat_ptr(arg)); 608 + break; 609 + case F_GETLK64: 610 + case F_OFD_GETLK: 611 + err = get_compat_flock64(&flock, compat_ptr(arg)); 612 + if (err) 613 + break; 614 + err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock); 615 + if (err) 616 + break; 617 + err = fixup_compat_flock(&flock); 618 + if (err) 619 + return err; 620 + err = put_compat_flock64(&flock, compat_ptr(arg)); 621 + break; 598 622 case F_SETLK: 599 623 case F_SETLKW: 600 - ret = get_compat_flock(&f, compat_ptr(arg)); 601 - if (ret != 0) 624 + err = get_compat_flock(&flock, compat_ptr(arg)); 625 + if (err) 602 626 break; 603 - old_fs = get_fs(); 604 - set_fs(KERNEL_DS); 605 - ret = sys_fcntl(fd, cmd, (unsigned long)&f); 606 - set_fs(old_fs); 607 - if (cmd == F_GETLK && ret == 0) { 608 - /* GETLK was successful and we need to return the data... 609 - * but it needs to fit in the compat structure. 610 - * l_start shouldn't be too big, unless the original 611 - * start + end is greater than COMPAT_OFF_T_MAX, in which 612 - * case the app was asking for trouble, so we return 613 - * -EOVERFLOW in that case. 614 - * l_len could be too big, in which case we just truncate it, 615 - * and only allow the app to see that part of the conflicting 616 - * lock that might make sense to it anyway 617 - */ 618 - 619 - if (f.l_start > COMPAT_OFF_T_MAX) 620 - ret = -EOVERFLOW; 621 - if (f.l_len > COMPAT_OFF_T_MAX) 622 - f.l_len = COMPAT_OFF_T_MAX; 623 - if (ret == 0) 624 - ret = put_compat_flock(&f, compat_ptr(arg)); 625 - } 627 + err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock); 626 628 break; 627 - 628 - case F_GETLK64: 629 629 case F_SETLK64: 630 630 case F_SETLKW64: 631 - case F_OFD_GETLK: 632 631 case F_OFD_SETLK: 633 632 case F_OFD_SETLKW: 634 - ret = get_compat_flock64(&f, compat_ptr(arg)); 635 - if (ret != 0) 633 + err = get_compat_flock64(&flock, compat_ptr(arg)); 634 + if (err) 636 635 break; 637 - old_fs = get_fs(); 638 - set_fs(KERNEL_DS); 639 - conv_cmd = convert_fcntl_cmd(cmd); 640 - ret = sys_fcntl(fd, conv_cmd, (unsigned long)&f); 641 - set_fs(old_fs); 642 - if ((conv_cmd == F_GETLK || conv_cmd == F_OFD_GETLK) && ret == 0) { 643 - /* need to return lock information - see above for commentary */ 644 - if (f.l_start > COMPAT_LOFF_T_MAX) 645 - ret = -EOVERFLOW; 646 - if (f.l_len > COMPAT_LOFF_T_MAX) 647 - f.l_len = COMPAT_LOFF_T_MAX; 648 - if (ret == 0) 649 - ret = put_compat_flock64(&f, compat_ptr(arg)); 650 - } 636 + err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock); 651 637 break; 652 - 653 638 default: 654 - ret = sys_fcntl(fd, cmd, arg); 639 + err = do_fcntl(fd, cmd, arg, f.file); 655 640 break; 656 641 } 657 - return ret; 642 + out_put: 643 + fdput(f); 644 + return err; 658 645 } 659 646 660 647 COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
+30 -67
fs/locks.c
··· 1858 1858 * 1859 1859 * Call this to establish a lease on the file. The "lease" argument is not 1860 1860 * used for F_UNLCK requests and may be NULL. For commands that set or alter 1861 - * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set; 1862 - * if not, this function will return -ENOLCK (and generate a scary-looking 1861 + * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be 1862 + * set; if not, this function will return -ENOLCK (and generate a scary-looking 1863 1863 * stack trace). 1864 1864 * 1865 1865 * The "priv" pointer is passed directly to the lm_setup function as-is. It ··· 1972 1972 * @cmd: the type of lock to apply. 1973 1973 * 1974 1974 * Apply a %FL_FLOCK style lock to an open file descriptor. 1975 - * The @cmd can be one of 1975 + * The @cmd can be one of: 1976 1976 * 1977 - * %LOCK_SH -- a shared lock. 1978 - * 1979 - * %LOCK_EX -- an exclusive lock. 1980 - * 1981 - * %LOCK_UN -- remove an existing lock. 1982 - * 1983 - * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes. 1977 + * - %LOCK_SH -- a shared lock. 1978 + * - %LOCK_EX -- an exclusive lock. 1979 + * - %LOCK_UN -- remove an existing lock. 1980 + * - %LOCK_MAND -- a 'mandatory' flock. 1981 + * This exists to emulate Windows Share Modes. 1984 1982 * 1985 1983 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other 1986 1984 * processes read and write access respectively. ··· 2084 2086 /* Report the first existing lock that would conflict with l. 2085 2087 * This implements the F_GETLK command of fcntl(). 2086 2088 */ 2087 - int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l) 2089 + int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock) 2088 2090 { 2089 2091 struct file_lock file_lock; 2090 - struct flock flock; 2091 2092 int error; 2092 2093 2093 - error = -EFAULT; 2094 - if (copy_from_user(&flock, l, sizeof(flock))) 2095 - goto out; 2096 2094 error = -EINVAL; 2097 - if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 2095 + if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK) 2098 2096 goto out; 2099 2097 2100 - error = flock_to_posix_lock(filp, &file_lock, &flock); 2098 + error = flock_to_posix_lock(filp, &file_lock, flock); 2101 2099 if (error) 2102 2100 goto out; 2103 2101 2104 2102 if (cmd == F_OFD_GETLK) { 2105 2103 error = -EINVAL; 2106 - if (flock.l_pid != 0) 2104 + if (flock->l_pid != 0) 2107 2105 goto out; 2108 2106 2109 2107 cmd = F_GETLK; ··· 2111 2117 if (error) 2112 2118 goto out; 2113 2119 2114 - flock.l_type = file_lock.fl_type; 2120 + flock->l_type = file_lock.fl_type; 2115 2121 if (file_lock.fl_type != F_UNLCK) { 2116 - error = posix_lock_to_flock(&flock, &file_lock); 2122 + error = posix_lock_to_flock(flock, &file_lock); 2117 2123 if (error) 2118 2124 goto rel_priv; 2119 2125 } 2120 - error = -EFAULT; 2121 - if (!copy_to_user(l, &flock, sizeof(flock))) 2122 - error = 0; 2123 2126 rel_priv: 2124 2127 locks_release_private(&file_lock); 2125 2128 out: ··· 2209 2218 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2210 2219 */ 2211 2220 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, 2212 - struct flock __user *l) 2221 + struct flock *flock) 2213 2222 { 2214 2223 struct file_lock *file_lock = locks_alloc_lock(); 2215 - struct flock flock; 2216 - struct inode *inode; 2224 + struct inode *inode = locks_inode(filp); 2217 2225 struct file *f; 2218 2226 int error; 2219 2227 2220 2228 if (file_lock == NULL) 2221 2229 return -ENOLCK; 2222 - 2223 - inode = locks_inode(filp); 2224 - 2225 - /* 2226 - * This might block, so we do it before checking the inode. 2227 - */ 2228 - error = -EFAULT; 2229 - if (copy_from_user(&flock, l, sizeof(flock))) 2230 - goto out; 2231 2230 2232 2231 /* Don't allow mandatory locks on files that may be memory mapped 2233 2232 * and shared. ··· 2227 2246 goto out; 2228 2247 } 2229 2248 2230 - error = flock_to_posix_lock(filp, file_lock, &flock); 2249 + error = flock_to_posix_lock(filp, file_lock, flock); 2231 2250 if (error) 2232 2251 goto out; 2233 2252 ··· 2242 2261 switch (cmd) { 2243 2262 case F_OFD_SETLK: 2244 2263 error = -EINVAL; 2245 - if (flock.l_pid != 0) 2264 + if (flock->l_pid != 0) 2246 2265 goto out; 2247 2266 2248 2267 cmd = F_SETLK; ··· 2251 2270 break; 2252 2271 case F_OFD_SETLKW: 2253 2272 error = -EINVAL; 2254 - if (flock.l_pid != 0) 2273 + if (flock->l_pid != 0) 2255 2274 goto out; 2256 2275 2257 2276 cmd = F_SETLKW; ··· 2296 2315 /* Report the first existing lock that would conflict with l. 2297 2316 * This implements the F_GETLK command of fcntl(). 2298 2317 */ 2299 - int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l) 2318 + int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock) 2300 2319 { 2301 2320 struct file_lock file_lock; 2302 - struct flock64 flock; 2303 2321 int error; 2304 2322 2305 - error = -EFAULT; 2306 - if (copy_from_user(&flock, l, sizeof(flock))) 2307 - goto out; 2308 2323 error = -EINVAL; 2309 - if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK)) 2324 + if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK) 2310 2325 goto out; 2311 2326 2312 - error = flock64_to_posix_lock(filp, &file_lock, &flock); 2327 + error = flock64_to_posix_lock(filp, &file_lock, flock); 2313 2328 if (error) 2314 2329 goto out; 2315 2330 2316 2331 if (cmd == F_OFD_GETLK) { 2317 2332 error = -EINVAL; 2318 - if (flock.l_pid != 0) 2333 + if (flock->l_pid != 0) 2319 2334 goto out; 2320 2335 2321 2336 cmd = F_GETLK64; ··· 2323 2346 if (error) 2324 2347 goto out; 2325 2348 2326 - flock.l_type = file_lock.fl_type; 2349 + flock->l_type = file_lock.fl_type; 2327 2350 if (file_lock.fl_type != F_UNLCK) 2328 - posix_lock_to_flock64(&flock, &file_lock); 2329 - 2330 - error = -EFAULT; 2331 - if (!copy_to_user(l, &flock, sizeof(flock))) 2332 - error = 0; 2351 + posix_lock_to_flock64(flock, &file_lock); 2333 2352 2334 2353 locks_release_private(&file_lock); 2335 2354 out: ··· 2336 2363 * This implements both the F_SETLK and F_SETLKW commands of fcntl(). 2337 2364 */ 2338 2365 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, 2339 - struct flock64 __user *l) 2366 + struct flock64 *flock) 2340 2367 { 2341 2368 struct file_lock *file_lock = locks_alloc_lock(); 2342 - struct flock64 flock; 2343 - struct inode *inode; 2369 + struct inode *inode = locks_inode(filp); 2344 2370 struct file *f; 2345 2371 int error; 2346 2372 2347 2373 if (file_lock == NULL) 2348 2374 return -ENOLCK; 2349 - 2350 - /* 2351 - * This might block, so we do it before checking the inode. 2352 - */ 2353 - error = -EFAULT; 2354 - if (copy_from_user(&flock, l, sizeof(flock))) 2355 - goto out; 2356 - 2357 - inode = locks_inode(filp); 2358 2375 2359 2376 /* Don't allow mandatory locks on files that may be memory mapped 2360 2377 * and shared. ··· 2354 2391 goto out; 2355 2392 } 2356 2393 2357 - error = flock64_to_posix_lock(filp, file_lock, &flock); 2394 + error = flock64_to_posix_lock(filp, file_lock, flock); 2358 2395 if (error) 2359 2396 goto out; 2360 2397 ··· 2369 2406 switch (cmd) { 2370 2407 case F_OFD_SETLK: 2371 2408 error = -EINVAL; 2372 - if (flock.l_pid != 0) 2409 + if (flock->l_pid != 0) 2373 2410 goto out; 2374 2411 2375 2412 cmd = F_SETLK64; ··· 2378 2415 break; 2379 2416 case F_OFD_SETLKW: 2380 2417 error = -EINVAL; 2381 - if (flock.l_pid != 0) 2418 + if (flock->l_pid != 0) 2382 2419 goto out; 2383 2420 2384 2421 cmd = F_SETLKW64;
+16 -23
fs/nfsd/vfs.c
··· 1464 1464 __be32 1465 1465 nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) 1466 1466 { 1467 - mm_segment_t oldfs; 1468 1467 __be32 err; 1469 - int host_err; 1468 + const char *link; 1470 1469 struct path path; 1470 + DEFINE_DELAYED_CALL(done); 1471 + int len; 1471 1472 1472 1473 err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP); 1473 - if (err) 1474 - goto out; 1474 + if (unlikely(err)) 1475 + return err; 1475 1476 1476 1477 path.mnt = fhp->fh_export->ex_path.mnt; 1477 1478 path.dentry = fhp->fh_dentry; 1478 1479 1479 - err = nfserr_inval; 1480 - if (!d_is_symlink(path.dentry)) 1481 - goto out; 1480 + if (unlikely(!d_is_symlink(path.dentry))) 1481 + return nfserr_inval; 1482 1482 1483 1483 touch_atime(&path); 1484 - /* N.B. Why does this call need a get_fs()?? 1485 - * Remove the set_fs and watch the fireworks:-) --okir 1486 - */ 1487 1484 1488 - oldfs = get_fs(); set_fs(KERNEL_DS); 1489 - host_err = vfs_readlink(path.dentry, (char __user *)buf, *lenp); 1490 - set_fs(oldfs); 1485 + link = vfs_get_link(path.dentry, &done); 1486 + if (IS_ERR(link)) 1487 + return nfserrno(PTR_ERR(link)); 1491 1488 1492 - if (host_err < 0) 1493 - goto out_nfserr; 1494 - *lenp = host_err; 1495 - err = 0; 1496 - out: 1497 - return err; 1498 - 1499 - out_nfserr: 1500 - err = nfserrno(host_err); 1501 - goto out; 1489 + len = strlen(link); 1490 + if (len < *lenp) 1491 + *lenp = len; 1492 + memcpy(buf, link, *lenp); 1493 + do_delayed_call(&done); 1494 + return 0; 1502 1495 } 1503 1496 1504 1497 /*
+30 -28
fs/statfs.c
··· 244 244 #ifdef CONFIG_COMPAT 245 245 static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *kbuf) 246 246 { 247 + struct compat_statfs buf; 247 248 if (sizeof ubuf->f_blocks == 4) { 248 249 if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail | 249 250 kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL) ··· 258 257 && (kbuf->f_ffree & 0xffffffff00000000ULL)) 259 258 return -EOVERFLOW; 260 259 } 261 - if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)) || 262 - __put_user(kbuf->f_type, &ubuf->f_type) || 263 - __put_user(kbuf->f_bsize, &ubuf->f_bsize) || 264 - __put_user(kbuf->f_blocks, &ubuf->f_blocks) || 265 - __put_user(kbuf->f_bfree, &ubuf->f_bfree) || 266 - __put_user(kbuf->f_bavail, &ubuf->f_bavail) || 267 - __put_user(kbuf->f_files, &ubuf->f_files) || 268 - __put_user(kbuf->f_ffree, &ubuf->f_ffree) || 269 - __put_user(kbuf->f_namelen, &ubuf->f_namelen) || 270 - __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) || 271 - __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) || 272 - __put_user(kbuf->f_frsize, &ubuf->f_frsize) || 273 - __put_user(kbuf->f_flags, &ubuf->f_flags) || 274 - __clear_user(ubuf->f_spare, sizeof(ubuf->f_spare))) 260 + memset(&buf, 0, sizeof(struct compat_statfs)); 261 + buf.f_type = kbuf->f_type; 262 + buf.f_bsize = kbuf->f_bsize; 263 + buf.f_blocks = kbuf->f_blocks; 264 + buf.f_bfree = kbuf->f_bfree; 265 + buf.f_bavail = kbuf->f_bavail; 266 + buf.f_files = kbuf->f_files; 267 + buf.f_ffree = kbuf->f_ffree; 268 + buf.f_namelen = kbuf->f_namelen; 269 + buf.f_fsid.val[0] = kbuf->f_fsid.val[0]; 270 + buf.f_fsid.val[1] = kbuf->f_fsid.val[1]; 271 + buf.f_frsize = kbuf->f_frsize; 272 + buf.f_flags = kbuf->f_flags; 273 + if (copy_to_user(ubuf, &buf, sizeof(struct compat_statfs))) 275 274 return -EFAULT; 276 275 return 0; 277 276 } ··· 300 299 301 300 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf) 302 301 { 302 + struct compat_statfs64 buf; 303 303 if (sizeof(ubuf->f_bsize) == 4) { 304 304 if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen | 305 305 kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL) ··· 314 312 && (kbuf->f_ffree & 0xffffffff00000000ULL)) 315 313 return -EOVERFLOW; 316 314 } 317 - if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)) || 318 - __put_user(kbuf->f_type, &ubuf->f_type) || 319 - __put_user(kbuf->f_bsize, &ubuf->f_bsize) || 320 - __put_user(kbuf->f_blocks, &ubuf->f_blocks) || 321 - __put_user(kbuf->f_bfree, &ubuf->f_bfree) || 322 - __put_user(kbuf->f_bavail, &ubuf->f_bavail) || 323 - __put_user(kbuf->f_files, &ubuf->f_files) || 324 - __put_user(kbuf->f_ffree, &ubuf->f_ffree) || 325 - __put_user(kbuf->f_namelen, &ubuf->f_namelen) || 326 - __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) || 327 - __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) || 328 - __put_user(kbuf->f_frsize, &ubuf->f_frsize) || 329 - __put_user(kbuf->f_flags, &ubuf->f_flags) || 330 - __clear_user(ubuf->f_spare, sizeof(ubuf->f_spare))) 315 + memset(&buf, 0, sizeof(struct compat_statfs64)); 316 + buf.f_type = kbuf->f_type; 317 + buf.f_bsize = kbuf->f_bsize; 318 + buf.f_blocks = kbuf->f_blocks; 319 + buf.f_bfree = kbuf->f_bfree; 320 + buf.f_bavail = kbuf->f_bavail; 321 + buf.f_files = kbuf->f_files; 322 + buf.f_ffree = kbuf->f_ffree; 323 + buf.f_namelen = kbuf->f_namelen; 324 + buf.f_fsid.val[0] = kbuf->f_fsid.val[0]; 325 + buf.f_fsid.val[1] = kbuf->f_fsid.val[1]; 326 + buf.f_frsize = kbuf->f_frsize; 327 + buf.f_flags = kbuf->f_flags; 328 + if (copy_to_user(ubuf, &buf, sizeof(struct compat_statfs64))) 331 329 return -EFAULT; 332 330 return 0; 333 331 }
+5 -5
include/linux/fs.h
··· 1047 1047 } 1048 1048 1049 1049 #ifdef CONFIG_FILE_LOCKING 1050 - extern int fcntl_getlk(struct file *, unsigned int, struct flock __user *); 1050 + extern int fcntl_getlk(struct file *, unsigned int, struct flock *); 1051 1051 extern int fcntl_setlk(unsigned int, struct file *, unsigned int, 1052 - struct flock __user *); 1052 + struct flock *); 1053 1053 1054 1054 #if BITS_PER_LONG == 32 1055 - extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 __user *); 1055 + extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *); 1056 1056 extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, 1057 - struct flock64 __user *); 1057 + struct flock64 *); 1058 1058 #endif 1059 1059 1060 1060 extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); ··· 1258 1258 extern void kill_fasync(struct fasync_struct **, int, int); 1259 1259 1260 1260 extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); 1261 - extern void f_setown(struct file *filp, unsigned long arg, int force); 1261 + extern int f_setown(struct file *filp, unsigned long arg, int force); 1262 1262 extern void f_delown(struct file *filp); 1263 1263 extern pid_t f_getown(struct file *filp); 1264 1264 extern int send_sigurg(struct fown_struct *fown);
+1 -2
net/socket.c
··· 991 991 err = -EFAULT; 992 992 if (get_user(pid, (int __user *)argp)) 993 993 break; 994 - f_setown(sock->file, pid, 1); 995 - err = 0; 994 + err = f_setown(sock->file, pid, 1); 996 995 break; 997 996 case FIOGETOWN: 998 997 case SIOCGPGRP: