return statement cleanup - kill pointless parentheses

This patch removes pointless parentheses from return statements.

Signed-off-by: Jesper Juhl <juhl-lkml@dif.dk>
Signed-off-by: Adrian Bunk <bunk@stusta.de>

authored by Jesper Juhl and committed by Adrian Bunk 014c2544 46a9f65f

+275 -275
+2 -2
arch/um/include/sysdep-i386/checksum.h
··· 36 int len, int sum) 37 { 38 memcpy(dst, src, len); 39 - return(csum_partial(dst, len, sum)); 40 } 41 42 /* ··· 104 : "=r" (sum), "=r" (iph), "=r" (ihl) 105 : "1" (iph), "2" (ihl) 106 : "memory"); 107 - return(sum); 108 } 109 110 /*
··· 36 int len, int sum) 37 { 38 memcpy(dst, src, len); 39 + return csum_partial(dst, len, sum); 40 } 41 42 /* ··· 104 : "=r" (sum), "=r" (iph), "=r" (ihl) 105 : "1" (iph), "2" (ihl) 106 : "memory"); 107 + return sum; 108 } 109 110 /*
+64 -64
drivers/char/stallion.c
··· 738 stl_init(); 739 restore_flags(flags); 740 741 - return(0); 742 } 743 744 /*****************************************************************************/ ··· 889 } 890 val = (val * base) + c; 891 } 892 - return(val); 893 } 894 895 /*****************************************************************************/ ··· 908 #endif 909 910 if ((argp[0] == (char *) NULL) || (*argp[0] == 0)) 911 - return(0); 912 913 for (sp = argp[0], i = 0; ((*sp != 0) && (i < 25)); sp++, i++) 914 *sp = TOLOWER(*sp); ··· 935 } 936 if ((argp[i] != (char *) NULL) && (*argp[i] != 0)) 937 confp->irq = stl_atol(argp[i]); 938 - return(1); 939 } 940 941 /*****************************************************************************/ ··· 946 947 static void *stl_memalloc(int len) 948 { 949 - return((void *) kmalloc(len, GFP_KERNEL)); 950 } 951 952 /*****************************************************************************/ ··· 963 if (brdp == (stlbrd_t *) NULL) { 964 printk("STALLION: failed to allocate memory (size=%d)\n", 965 sizeof(stlbrd_t)); 966 - return((stlbrd_t *) NULL); 967 } 968 969 memset(brdp, 0, sizeof(stlbrd_t)); 970 brdp->magic = STL_BOARDMAGIC; 971 - return(brdp); 972 } 973 974 /*****************************************************************************/ ··· 988 minordev = tty->index; 989 brdnr = MINOR2BRD(minordev); 990 if (brdnr >= stl_nrbrds) 991 - return(-ENODEV); 992 brdp = stl_brds[brdnr]; 993 if (brdp == (stlbrd_t *) NULL) 994 - return(-ENODEV); 995 minordev = MINOR2PORT(minordev); 996 for (portnr = -1, panelnr = 0; (panelnr < STL_MAXPANELS); panelnr++) { 997 if (brdp->panels[panelnr] == (stlpanel_t *) NULL) ··· 1003 minordev -= brdp->panels[panelnr]->nrports; 1004 } 1005 if (portnr < 0) 1006 - return(-ENODEV); 1007 1008 portp = brdp->panels[panelnr]->ports[portnr]; 1009 if (portp == (stlport_t *) NULL) 1010 - return(-ENODEV); 1011 1012 /* 1013 * On the first open of the device setup the port hardware, and ··· 1021 if (portp->tx.buf == (char *) NULL) { 1022 portp->tx.buf = (char *) stl_memalloc(STL_TXBUFSIZE); 1023 if (portp->tx.buf == (char *) NULL) 1024 - return(-ENOMEM); 1025 portp->tx.head = portp->tx.buf; 1026 portp->tx.tail = portp->tx.buf; 1027 } ··· 1043 if (portp->flags & ASYNC_CLOSING) { 1044 interruptible_sleep_on(&portp->close_wait); 1045 if (portp->flags & ASYNC_HUP_NOTIFY) 1046 - return(-EAGAIN); 1047 - return(-ERESTARTSYS); 1048 } 1049 1050 /* ··· 1054 */ 1055 if (!(filp->f_flags & O_NONBLOCK)) { 1056 if ((rc = stl_waitcarrier(portp, filp)) != 0) 1057 - return(rc); 1058 } 1059 portp->flags |= ASYNC_NORMAL_ACTIVE; 1060 1061 - return(0); 1062 } 1063 1064 /*****************************************************************************/ ··· 1115 portp->openwaitcnt--; 1116 restore_flags(flags); 1117 1118 - return(rc); 1119 } 1120 1121 /*****************************************************************************/ ··· 1211 1212 if ((tty == (struct tty_struct *) NULL) || 1213 (stl_tmpwritebuf == (char *) NULL)) 1214 - return(0); 1215 portp = tty->driver_data; 1216 if (portp == (stlport_t *) NULL) 1217 - return(0); 1218 if (portp->tx.buf == (char *) NULL) 1219 - return(0); 1220 1221 /* 1222 * If copying direct from user space we must cater for page faults, ··· 1255 clear_bit(ASYI_TXLOW, &portp->istate); 1256 stl_startrxtx(portp, -1, 1); 1257 1258 - return(count); 1259 } 1260 1261 /*****************************************************************************/ ··· 1336 #endif 1337 1338 if (tty == (struct tty_struct *) NULL) 1339 - return(0); 1340 portp = tty->driver_data; 1341 if (portp == (stlport_t *) NULL) 1342 - return(0); 1343 if (portp->tx.buf == (char *) NULL) 1344 - return(0); 1345 1346 head = portp->tx.head; 1347 tail = portp->tx.tail; 1348 - return((head >= tail) ? (STL_TXBUFSIZE - (head - tail) - 1) : (tail - head - 1)); 1349 } 1350 1351 /*****************************************************************************/ ··· 1370 #endif 1371 1372 if (tty == (struct tty_struct *) NULL) 1373 - return(0); 1374 portp = tty->driver_data; 1375 if (portp == (stlport_t *) NULL) 1376 - return(0); 1377 if (portp->tx.buf == (char *) NULL) 1378 - return(0); 1379 1380 head = portp->tx.head; 1381 tail = portp->tx.tail; 1382 size = (head >= tail) ? (head - tail) : (STL_TXBUFSIZE - (tail - head)); 1383 if ((size == 0) && test_bit(ASYI_TXBUSY, &portp->istate)) 1384 size = 1; 1385 - return(size); 1386 } 1387 1388 /*****************************************************************************/ ··· 1447 (sio.close_delay != portp->close_delay) || 1448 ((sio.flags & ~ASYNC_USR_MASK) != 1449 (portp->flags & ~ASYNC_USR_MASK))) 1450 - return(-EPERM); 1451 } 1452 1453 portp->flags = (portp->flags & ~ASYNC_USR_MASK) | ··· 1457 portp->closing_wait = sio.closing_wait; 1458 portp->custom_divisor = sio.custom_divisor; 1459 stl_setport(portp, portp->tty->termios); 1460 - return(0); 1461 } 1462 1463 /*****************************************************************************/ ··· 1467 stlport_t *portp; 1468 1469 if (tty == (struct tty_struct *) NULL) 1470 - return(-ENODEV); 1471 portp = tty->driver_data; 1472 if (portp == (stlport_t *) NULL) 1473 - return(-ENODEV); 1474 if (tty->flags & (1 << TTY_IO_ERROR)) 1475 - return(-EIO); 1476 1477 return stl_getsignals(portp); 1478 } ··· 1484 int rts = -1, dtr = -1; 1485 1486 if (tty == (struct tty_struct *) NULL) 1487 - return(-ENODEV); 1488 portp = tty->driver_data; 1489 if (portp == (stlport_t *) NULL) 1490 - return(-ENODEV); 1491 if (tty->flags & (1 << TTY_IO_ERROR)) 1492 - return(-EIO); 1493 1494 if (set & TIOCM_RTS) 1495 rts = 1; ··· 1517 #endif 1518 1519 if (tty == (struct tty_struct *) NULL) 1520 - return(-ENODEV); 1521 portp = tty->driver_data; 1522 if (portp == (stlport_t *) NULL) 1523 - return(-ENODEV); 1524 1525 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 1526 (cmd != COM_GETPORTSTATS) && (cmd != COM_CLRPORTSTATS)) { 1527 if (tty->flags & (1 << TTY_IO_ERROR)) 1528 - return(-EIO); 1529 } 1530 1531 rc = 0; ··· 1566 break; 1567 } 1568 1569 - return(rc); 1570 } 1571 1572 /*****************************************************************************/ ··· 1872 pos[(MAXLINE - 2)] = '+'; 1873 pos[(MAXLINE - 1)] = '\n'; 1874 1875 - return(MAXLINE); 1876 } 1877 1878 /*****************************************************************************/ ··· 1957 1958 stl_readdone: 1959 *start = page; 1960 - return(pos - page); 1961 } 1962 1963 /*****************************************************************************/ ··· 2349 } else { 2350 rc = 0; 2351 } 2352 - return(rc); 2353 } 2354 2355 /*****************************************************************************/ ··· 3116 return -1; 3117 } 3118 3119 - return(0); 3120 } 3121 3122 /*****************************************************************************/ ··· 3132 static int stl_cd1400getreg(stlport_t *portp, int regnr) 3133 { 3134 outb((regnr + portp->uartaddr), portp->ioaddr); 3135 - return(inb(portp->ioaddr + EREG_DATA)); 3136 } 3137 3138 static void stl_cd1400setreg(stlport_t *portp, int regnr, int value) ··· 3146 outb((regnr + portp->uartaddr), portp->ioaddr); 3147 if (inb(portp->ioaddr + EREG_DATA) != value) { 3148 outb(value, portp->ioaddr + EREG_DATA); 3149 - return(1); 3150 } 3151 - return(0); 3152 } 3153 3154 /*****************************************************************************/ ··· 3206 } 3207 3208 BRDDISABLE(panelp->brdnr); 3209 - return(chipmask); 3210 } 3211 3212 /*****************************************************************************/ ··· 3557 #else 3558 sigs |= TIOCM_DSR; 3559 #endif 3560 - return(sigs); 3561 } 3562 3563 /*****************************************************************************/ ··· 3830 #endif 3831 3832 if (portp == (stlport_t *) NULL) 3833 - return(0); 3834 3835 - return(test_bit(ASYI_TXBUSY, &portp->istate) ? 1 : 0); 3836 } 3837 3838 /*****************************************************************************/ ··· 3912 outb((SRER + portp->uartaddr), ioaddr); 3913 outb((inb(ioaddr + EREG_DATA) & ~(SRER_TXDATA | SRER_TXEMPTY)), 3914 (ioaddr + EREG_DATA)); 3915 - return(1); 3916 } else if (portp->brklen > 1) { 3917 outb((TDR + portp->uartaddr), ioaddr); 3918 outb(ETC_CMD, (ioaddr + EREG_DATA)); 3919 outb(ETC_STOPBREAK, (ioaddr + EREG_DATA)); 3920 portp->brklen = -1; 3921 - return(1); 3922 } else { 3923 outb((COR2 + portp->uartaddr), ioaddr); 3924 outb((inb(ioaddr + EREG_DATA) & ~COR2_ETC), 3925 (ioaddr + EREG_DATA)); 3926 portp->brklen = 0; 3927 } 3928 - return(0); 3929 } 3930 3931 /*****************************************************************************/ ··· 4166 static int stl_sc26198getreg(stlport_t *portp, int regnr) 4167 { 4168 outb((regnr | portp->uartaddr), (portp->ioaddr + XP_ADDR)); 4169 - return(inb(portp->ioaddr + XP_DATA)); 4170 } 4171 4172 static void stl_sc26198setreg(stlport_t *portp, int regnr, int value) ··· 4180 outb((regnr | portp->uartaddr), (portp->ioaddr + XP_ADDR)); 4181 if (inb(portp->ioaddr + XP_DATA) != value) { 4182 outb(value, (portp->ioaddr + XP_DATA)); 4183 - return(1); 4184 } 4185 - return(0); 4186 } 4187 4188 /*****************************************************************************/ ··· 4194 static int stl_sc26198getglobreg(stlport_t *portp, int regnr) 4195 { 4196 outb(regnr, (portp->ioaddr + XP_ADDR)); 4197 - return(inb(portp->ioaddr + XP_DATA)); 4198 } 4199 4200 #if 0 ··· 4252 } 4253 4254 BRDDISABLE(panelp->brdnr); 4255 - return(chipmask); 4256 } 4257 4258 /*****************************************************************************/ ··· 4546 sigs |= (ipr & IPR_DTR) ? 0: TIOCM_DTR; 4547 sigs |= (ipr & IPR_RTS) ? 0: TIOCM_RTS; 4548 sigs |= TIOCM_DSR; 4549 - return(sigs); 4550 } 4551 4552 /*****************************************************************************/ ··· 4828 #endif 4829 4830 if (portp == (stlport_t *) NULL) 4831 - return(0); 4832 if (test_bit(ASYI_TXBUSY, &portp->istate)) 4833 - return(1); 4834 4835 save_flags(flags); 4836 cli(); ··· 4839 BRDDISABLE(portp->brdnr); 4840 restore_flags(flags); 4841 4842 - return((sr & SR_TXEMPTY) ? 0 : 1); 4843 } 4844 4845 /*****************************************************************************/
··· 738 stl_init(); 739 restore_flags(flags); 740 741 + return 0; 742 } 743 744 /*****************************************************************************/ ··· 889 } 890 val = (val * base) + c; 891 } 892 + return val; 893 } 894 895 /*****************************************************************************/ ··· 908 #endif 909 910 if ((argp[0] == (char *) NULL) || (*argp[0] == 0)) 911 + return 0; 912 913 for (sp = argp[0], i = 0; ((*sp != 0) && (i < 25)); sp++, i++) 914 *sp = TOLOWER(*sp); ··· 935 } 936 if ((argp[i] != (char *) NULL) && (*argp[i] != 0)) 937 confp->irq = stl_atol(argp[i]); 938 + return 1; 939 } 940 941 /*****************************************************************************/ ··· 946 947 static void *stl_memalloc(int len) 948 { 949 + return (void *) kmalloc(len, GFP_KERNEL); 950 } 951 952 /*****************************************************************************/ ··· 963 if (brdp == (stlbrd_t *) NULL) { 964 printk("STALLION: failed to allocate memory (size=%d)\n", 965 sizeof(stlbrd_t)); 966 + return (stlbrd_t *) NULL; 967 } 968 969 memset(brdp, 0, sizeof(stlbrd_t)); 970 brdp->magic = STL_BOARDMAGIC; 971 + return brdp; 972 } 973 974 /*****************************************************************************/ ··· 988 minordev = tty->index; 989 brdnr = MINOR2BRD(minordev); 990 if (brdnr >= stl_nrbrds) 991 + return -ENODEV; 992 brdp = stl_brds[brdnr]; 993 if (brdp == (stlbrd_t *) NULL) 994 + return -ENODEV; 995 minordev = MINOR2PORT(minordev); 996 for (portnr = -1, panelnr = 0; (panelnr < STL_MAXPANELS); panelnr++) { 997 if (brdp->panels[panelnr] == (stlpanel_t *) NULL) ··· 1003 minordev -= brdp->panels[panelnr]->nrports; 1004 } 1005 if (portnr < 0) 1006 + return -ENODEV; 1007 1008 portp = brdp->panels[panelnr]->ports[portnr]; 1009 if (portp == (stlport_t *) NULL) 1010 + return -ENODEV; 1011 1012 /* 1013 * On the first open of the device setup the port hardware, and ··· 1021 if (portp->tx.buf == (char *) NULL) { 1022 portp->tx.buf = (char *) stl_memalloc(STL_TXBUFSIZE); 1023 if (portp->tx.buf == (char *) NULL) 1024 + return -ENOMEM; 1025 portp->tx.head = portp->tx.buf; 1026 portp->tx.tail = portp->tx.buf; 1027 } ··· 1043 if (portp->flags & ASYNC_CLOSING) { 1044 interruptible_sleep_on(&portp->close_wait); 1045 if (portp->flags & ASYNC_HUP_NOTIFY) 1046 + return -EAGAIN; 1047 + return -ERESTARTSYS; 1048 } 1049 1050 /* ··· 1054 */ 1055 if (!(filp->f_flags & O_NONBLOCK)) { 1056 if ((rc = stl_waitcarrier(portp, filp)) != 0) 1057 + return rc; 1058 } 1059 portp->flags |= ASYNC_NORMAL_ACTIVE; 1060 1061 + return 0; 1062 } 1063 1064 /*****************************************************************************/ ··· 1115 portp->openwaitcnt--; 1116 restore_flags(flags); 1117 1118 + return rc; 1119 } 1120 1121 /*****************************************************************************/ ··· 1211 1212 if ((tty == (struct tty_struct *) NULL) || 1213 (stl_tmpwritebuf == (char *) NULL)) 1214 + return 0; 1215 portp = tty->driver_data; 1216 if (portp == (stlport_t *) NULL) 1217 + return 0; 1218 if (portp->tx.buf == (char *) NULL) 1219 + return 0; 1220 1221 /* 1222 * If copying direct from user space we must cater for page faults, ··· 1255 clear_bit(ASYI_TXLOW, &portp->istate); 1256 stl_startrxtx(portp, -1, 1); 1257 1258 + return count; 1259 } 1260 1261 /*****************************************************************************/ ··· 1336 #endif 1337 1338 if (tty == (struct tty_struct *) NULL) 1339 + return 0; 1340 portp = tty->driver_data; 1341 if (portp == (stlport_t *) NULL) 1342 + return 0; 1343 if (portp->tx.buf == (char *) NULL) 1344 + return 0; 1345 1346 head = portp->tx.head; 1347 tail = portp->tx.tail; 1348 + return ((head >= tail) ? (STL_TXBUFSIZE - (head - tail) - 1) : (tail - head - 1)); 1349 } 1350 1351 /*****************************************************************************/ ··· 1370 #endif 1371 1372 if (tty == (struct tty_struct *) NULL) 1373 + return 0; 1374 portp = tty->driver_data; 1375 if (portp == (stlport_t *) NULL) 1376 + return 0; 1377 if (portp->tx.buf == (char *) NULL) 1378 + return 0; 1379 1380 head = portp->tx.head; 1381 tail = portp->tx.tail; 1382 size = (head >= tail) ? (head - tail) : (STL_TXBUFSIZE - (tail - head)); 1383 if ((size == 0) && test_bit(ASYI_TXBUSY, &portp->istate)) 1384 size = 1; 1385 + return size; 1386 } 1387 1388 /*****************************************************************************/ ··· 1447 (sio.close_delay != portp->close_delay) || 1448 ((sio.flags & ~ASYNC_USR_MASK) != 1449 (portp->flags & ~ASYNC_USR_MASK))) 1450 + return -EPERM; 1451 } 1452 1453 portp->flags = (portp->flags & ~ASYNC_USR_MASK) | ··· 1457 portp->closing_wait = sio.closing_wait; 1458 portp->custom_divisor = sio.custom_divisor; 1459 stl_setport(portp, portp->tty->termios); 1460 + return 0; 1461 } 1462 1463 /*****************************************************************************/ ··· 1467 stlport_t *portp; 1468 1469 if (tty == (struct tty_struct *) NULL) 1470 + return -ENODEV; 1471 portp = tty->driver_data; 1472 if (portp == (stlport_t *) NULL) 1473 + return -ENODEV; 1474 if (tty->flags & (1 << TTY_IO_ERROR)) 1475 + return -EIO; 1476 1477 return stl_getsignals(portp); 1478 } ··· 1484 int rts = -1, dtr = -1; 1485 1486 if (tty == (struct tty_struct *) NULL) 1487 + return -ENODEV; 1488 portp = tty->driver_data; 1489 if (portp == (stlport_t *) NULL) 1490 + return -ENODEV; 1491 if (tty->flags & (1 << TTY_IO_ERROR)) 1492 + return -EIO; 1493 1494 if (set & TIOCM_RTS) 1495 rts = 1; ··· 1517 #endif 1518 1519 if (tty == (struct tty_struct *) NULL) 1520 + return -ENODEV; 1521 portp = tty->driver_data; 1522 if (portp == (stlport_t *) NULL) 1523 + return -ENODEV; 1524 1525 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 1526 (cmd != COM_GETPORTSTATS) && (cmd != COM_CLRPORTSTATS)) { 1527 if (tty->flags & (1 << TTY_IO_ERROR)) 1528 + return -EIO; 1529 } 1530 1531 rc = 0; ··· 1566 break; 1567 } 1568 1569 + return rc; 1570 } 1571 1572 /*****************************************************************************/ ··· 1872 pos[(MAXLINE - 2)] = '+'; 1873 pos[(MAXLINE - 1)] = '\n'; 1874 1875 + return MAXLINE; 1876 } 1877 1878 /*****************************************************************************/ ··· 1957 1958 stl_readdone: 1959 *start = page; 1960 + return (pos - page); 1961 } 1962 1963 /*****************************************************************************/ ··· 2349 } else { 2350 rc = 0; 2351 } 2352 + return rc; 2353 } 2354 2355 /*****************************************************************************/ ··· 3116 return -1; 3117 } 3118 3119 + return 0; 3120 } 3121 3122 /*****************************************************************************/ ··· 3132 static int stl_cd1400getreg(stlport_t *portp, int regnr) 3133 { 3134 outb((regnr + portp->uartaddr), portp->ioaddr); 3135 + return inb(portp->ioaddr + EREG_DATA); 3136 } 3137 3138 static void stl_cd1400setreg(stlport_t *portp, int regnr, int value) ··· 3146 outb((regnr + portp->uartaddr), portp->ioaddr); 3147 if (inb(portp->ioaddr + EREG_DATA) != value) { 3148 outb(value, portp->ioaddr + EREG_DATA); 3149 + return 1; 3150 } 3151 + return 0; 3152 } 3153 3154 /*****************************************************************************/ ··· 3206 } 3207 3208 BRDDISABLE(panelp->brdnr); 3209 + return chipmask; 3210 } 3211 3212 /*****************************************************************************/ ··· 3557 #else 3558 sigs |= TIOCM_DSR; 3559 #endif 3560 + return sigs; 3561 } 3562 3563 /*****************************************************************************/ ··· 3830 #endif 3831 3832 if (portp == (stlport_t *) NULL) 3833 + return 0; 3834 3835 + return test_bit(ASYI_TXBUSY, &portp->istate) ? 1 : 0; 3836 } 3837 3838 /*****************************************************************************/ ··· 3912 outb((SRER + portp->uartaddr), ioaddr); 3913 outb((inb(ioaddr + EREG_DATA) & ~(SRER_TXDATA | SRER_TXEMPTY)), 3914 (ioaddr + EREG_DATA)); 3915 + return 1; 3916 } else if (portp->brklen > 1) { 3917 outb((TDR + portp->uartaddr), ioaddr); 3918 outb(ETC_CMD, (ioaddr + EREG_DATA)); 3919 outb(ETC_STOPBREAK, (ioaddr + EREG_DATA)); 3920 portp->brklen = -1; 3921 + return 1; 3922 } else { 3923 outb((COR2 + portp->uartaddr), ioaddr); 3924 outb((inb(ioaddr + EREG_DATA) & ~COR2_ETC), 3925 (ioaddr + EREG_DATA)); 3926 portp->brklen = 0; 3927 } 3928 + return 0; 3929 } 3930 3931 /*****************************************************************************/ ··· 4166 static int stl_sc26198getreg(stlport_t *portp, int regnr) 4167 { 4168 outb((regnr | portp->uartaddr), (portp->ioaddr + XP_ADDR)); 4169 + return inb(portp->ioaddr + XP_DATA); 4170 } 4171 4172 static void stl_sc26198setreg(stlport_t *portp, int regnr, int value) ··· 4180 outb((regnr | portp->uartaddr), (portp->ioaddr + XP_ADDR)); 4181 if (inb(portp->ioaddr + XP_DATA) != value) { 4182 outb(value, (portp->ioaddr + XP_DATA)); 4183 + return 1; 4184 } 4185 + return 0; 4186 } 4187 4188 /*****************************************************************************/ ··· 4194 static int stl_sc26198getglobreg(stlport_t *portp, int regnr) 4195 { 4196 outb(regnr, (portp->ioaddr + XP_ADDR)); 4197 + return inb(portp->ioaddr + XP_DATA); 4198 } 4199 4200 #if 0 ··· 4252 } 4253 4254 BRDDISABLE(panelp->brdnr); 4255 + return chipmask; 4256 } 4257 4258 /*****************************************************************************/ ··· 4546 sigs |= (ipr & IPR_DTR) ? 0: TIOCM_DTR; 4547 sigs |= (ipr & IPR_RTS) ? 0: TIOCM_RTS; 4548 sigs |= TIOCM_DSR; 4549 + return sigs; 4550 } 4551 4552 /*****************************************************************************/ ··· 4828 #endif 4829 4830 if (portp == (stlport_t *) NULL) 4831 + return 0; 4832 if (test_bit(ASYI_TXBUSY, &portp->istate)) 4833 + return 1; 4834 4835 save_flags(flags); 4836 cli(); ··· 4839 BRDDISABLE(portp->brdnr); 4840 restore_flags(flags); 4841 4842 + return (sr & SR_TXEMPTY) ? 0 : 1; 4843 } 4844 4845 /*****************************************************************************/
+3 -2
fs/efs/super.c
··· 222 sblock); 223 #endif 224 } 225 - return(sblock); 226 } 227 228 static int efs_validate_super(struct efs_sb_info *sb, struct efs_super *super) { 229 230 - if (!IS_EFS_MAGIC(be32_to_cpu(super->fs_magic))) return -1; 231 232 sb->fs_magic = be32_to_cpu(super->fs_magic); 233 sb->total_blocks = be32_to_cpu(super->fs_size);
··· 222 sblock); 223 #endif 224 } 225 + return sblock; 226 } 227 228 static int efs_validate_super(struct efs_sb_info *sb, struct efs_super *super) { 229 230 + if (!IS_EFS_MAGIC(be32_to_cpu(super->fs_magic))) 231 + return -1; 232 233 sb->fs_magic = be32_to_cpu(super->fs_magic); 234 sb->total_blocks = be32_to_cpu(super->fs_size);
+57 -57
fs/xfs/quota/xfs_qm.c
··· 497 int error; 498 499 if (mp->m_quotainfo == NULL) 500 - return (0); 501 niters = 0; 502 again: 503 xfs_qm_mplist_lock(mp); ··· 528 error = xfs_qm_dqflush(dqp, flags); 529 xfs_dqunlock(dqp); 530 if (error) 531 - return (error); 532 533 xfs_qm_mplist_lock(mp); 534 if (recl != XFS_QI_MPLRECLAIMS(mp)) { ··· 540 541 xfs_qm_mplist_unlock(mp); 542 /* return ! busy */ 543 - return (0); 544 } 545 /* 546 * Release the group dquot pointers the user dquots may be ··· 599 int nmisses; 600 601 if (mp->m_quotainfo == NULL) 602 - return (0); 603 604 dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; 605 dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; ··· 796 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 797 } 798 #endif 799 - return (error); 800 } 801 802 ··· 897 (! XFS_NOT_DQATTACHED(mp, ip)) || 898 (ip->i_ino == mp->m_sb.sb_uquotino) || 899 (ip->i_ino == mp->m_sb.sb_gquotino)) 900 - return (0); 901 902 ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 || 903 XFS_ISLOCKED_INODE_EXCL(ip)); ··· 984 else 985 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); 986 #endif 987 - return (error); 988 } 989 990 /* ··· 1049 */ 1050 if (! XFS_IS_QUOTA_ON(mp)) { 1051 xfs_qm_mplist_unlock(mp); 1052 - return (0); 1053 } 1054 FOREACH_DQUOT_IN_MP(dqp, mp) { 1055 /* ··· 1109 error = xfs_qm_dqflush(dqp, flush_flags); 1110 xfs_dqunlock(dqp); 1111 if (error && XFS_FORCED_SHUTDOWN(mp)) 1112 - return(0); /* Need to prevent umount failure */ 1113 else if (error) 1114 - return (error); 1115 1116 xfs_qm_mplist_lock(mp); 1117 if (recl != XFS_QI_MPLRECLAIMS(mp)) { ··· 1124 } 1125 1126 xfs_qm_mplist_unlock(mp); 1127 - return (0); 1128 } 1129 1130 ··· 1146 * Tell XQM that we exist as soon as possible. 1147 */ 1148 if ((error = xfs_qm_hold_quotafs_ref(mp))) { 1149 - return (error); 1150 } 1151 1152 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); ··· 1158 if ((error = xfs_qm_init_quotainos(mp))) { 1159 kmem_free(qinf, sizeof(xfs_quotainfo_t)); 1160 mp->m_quotainfo = NULL; 1161 - return (error); 1162 } 1163 1164 spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin"); ··· 1232 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 1233 } 1234 1235 - return (0); 1236 } 1237 1238 ··· 1332 */ 1333 ASSERT(error != ESRCH); 1334 ASSERT(error != ENOENT); 1335 - return (error); 1336 } 1337 ASSERT(udqp); 1338 } ··· 1355 xfs_qm_dqrele(udqp); 1356 ASSERT(error != ESRCH); 1357 ASSERT(error != ENOENT); 1358 - return (error); 1359 } 1360 ASSERT(gdqp); 1361 ··· 1376 if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp)); 1377 if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp)); 1378 #endif 1379 - return (0); 1380 } 1381 1382 /* ··· 1404 XFS_TRANS_PERM_LOG_RES, 1405 XFS_CREATE_LOG_COUNT))) { 1406 xfs_trans_cancel(tp, 0); 1407 - return (error); 1408 } 1409 memset(&zerocr, 0, sizeof(zerocr)); 1410 memset(&zeroino, 0, sizeof(zeroino)); ··· 1413 &zerocr, 0, 1, ip, &committed))) { 1414 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 1415 XFS_TRANS_ABORT); 1416 - return (error); 1417 } 1418 1419 /* ··· 1461 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, 1462 NULL))) { 1463 xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!"); 1464 - return (error); 1465 } 1466 - return (0); 1467 } 1468 1469 ··· 1508 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); 1509 } 1510 1511 - return (0); 1512 } 1513 1514 STATIC int ··· 1557 bno++; 1558 firstid += XFS_QM_DQPERBLK(mp); 1559 } 1560 - return (error); 1561 } 1562 1563 /* ··· 1586 * happens only at mount time which is single threaded. 1587 */ 1588 if (qip->i_d.di_nblocks == 0) 1589 - return (0); 1590 1591 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 1592 ··· 1655 1656 kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map)); 1657 1658 - return (error); 1659 } 1660 1661 /* ··· 1715 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1716 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1717 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) 1718 - return (error); 1719 } 1720 rtblks = 0; 1721 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); ··· 1723 for (ep = base; ep < &base[nextents]; ep++) 1724 rtblks += xfs_bmbt_get_blockcount(ep); 1725 *O_rtblks = (xfs_qcnt_t)rtblks; 1726 - return (0); 1727 } 1728 1729 /* ··· 1767 */ 1768 if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) { 1769 *res = BULKSTAT_RV_NOTHING; 1770 - return (error); 1771 } 1772 1773 if (ip->i_d.di_mode == 0) { ··· 1785 if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) { 1786 xfs_iput(ip, XFS_ILOCK_EXCL); 1787 *res = BULKSTAT_RV_GIVEUP; 1788 - return (error); 1789 } 1790 1791 rtblks = 0; ··· 1802 if (gdqp) 1803 xfs_qm_dqput(gdqp); 1804 *res = BULKSTAT_RV_GIVEUP; 1805 - return (error); 1806 } 1807 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; 1808 } ··· 1847 * Goto next inode. 1848 */ 1849 *res = BULKSTAT_RV_DIDONE; 1850 - return (0); 1851 } 1852 1853 /* ··· 2041 XFS_QI_UQIP(mp) = uip; 2042 XFS_QI_GQIP(mp) = gip; 2043 2044 - return (0); 2045 } 2046 2047 ··· 2062 int nflushes; 2063 2064 if (howmany <= 0) 2065 - return (0); 2066 2067 nreclaimed = 0; 2068 restarts = 0; ··· 2088 xfs_dqunlock(dqp); 2089 xfs_qm_freelist_unlock(xfs_Gqm); 2090 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2091 - return (nreclaimed); 2092 XQM_STATS_INC(xqmstats.xs_qm_dqwants); 2093 goto tryagain; 2094 } ··· 2163 XFS_DQ_HASH_UNLOCK(hash); 2164 xfs_qm_freelist_unlock(xfs_Gqm); 2165 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2166 - return (nreclaimed); 2167 goto tryagain; 2168 } 2169 xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING"); ··· 2188 dqp = nextdqp; 2189 } 2190 xfs_qm_freelist_unlock(xfs_Gqm); 2191 - return (nreclaimed); 2192 } 2193 2194 ··· 2202 int ndqused, nfree, n; 2203 2204 if (!kmem_shake_allow(gfp_mask)) 2205 - return (0); 2206 if (!xfs_Gqm) 2207 - return (0); 2208 2209 nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */ 2210 /* incore dquots in all f/s's */ ··· 2213 ASSERT(ndqused >= 0); 2214 2215 if (nfree <= ndqused && nfree < ndquot) 2216 - return (0); 2217 2218 ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ 2219 n = nfree - ndqused - ndquot; /* # over target */ ··· 2257 xfs_dqunlock(dqp); 2258 xfs_qm_freelist_unlock(xfs_Gqm); 2259 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2260 - return (NULL); 2261 XQM_STATS_INC(xqmstats.xs_qm_dqwants); 2262 goto startagain; 2263 } ··· 2333 } 2334 2335 xfs_qm_freelist_unlock(xfs_Gqm); 2336 - return (dqpout); 2337 } 2338 2339 ··· 2369 */ 2370 memset(&dqp->q_core, 0, sizeof(dqp->q_core)); 2371 *O_dqpp = dqp; 2372 - return (B_FALSE); 2373 } 2374 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); 2375 } ··· 2382 *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); 2383 atomic_inc(&xfs_Gqm->qm_totaldquots); 2384 2385 - return (B_TRUE); 2386 } 2387 2388 ··· 2407 0, 2408 XFS_DEFAULT_LOG_COUNT))) { 2409 xfs_trans_cancel(tp, 0); 2410 - return (error); 2411 } 2412 2413 xfs_mod_sb(tp, flags); 2414 (void) xfs_trans_commit(tp, 0, NULL); 2415 2416 - return (0); 2417 } 2418 2419 ··· 2463 if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC | 2464 XFS_QMOPT_ILOCKED))) { 2465 xfs_iunlock(ip, lockflags); 2466 - return (error); 2467 } 2468 } 2469 ··· 2486 XFS_QMOPT_DOWARN, 2487 &uq))) { 2488 ASSERT(error != ENOENT); 2489 - return (error); 2490 } 2491 /* 2492 * Get the ilock in the right order. ··· 2517 if (uq) 2518 xfs_qm_dqrele(uq); 2519 ASSERT(error != ENOENT); 2520 - return (error); 2521 } 2522 xfs_dqunlock(gq); 2523 lockflags = XFS_ILOCK_SHARED; ··· 2565 *O_gdqpp = gq; 2566 else if (gq) 2567 xfs_qm_dqrele(gq); 2568 - return (0); 2569 } 2570 2571 /* ··· 2608 xfs_dqunlock(newdq); 2609 *IO_olddq = newdq; 2610 2611 - return (prevdq); 2612 } 2613 2614 /* ··· 2702 ip = i_tab[0]; 2703 2704 if (! XFS_IS_QUOTA_ON(ip->i_mount)) 2705 - return (0); 2706 2707 if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { 2708 error = xfs_qm_dqattach(ip, 0); 2709 if (error) 2710 - return (error); 2711 } 2712 for (i = 1; (i < 4 && i_tab[i]); i++) { 2713 /* ··· 2717 if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { 2718 error = xfs_qm_dqattach(ip, 0); 2719 if (error) 2720 - return (error); 2721 } 2722 } 2723 } 2724 - return (0); 2725 } 2726 2727 void ··· 2834 int locked; 2835 2836 locked = mutex_trylock(&((dqp)->q_hash->qh_lock)); 2837 - return (locked); 2838 } 2839 2840 int ··· 2844 int locked; 2845 2846 locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock)); 2847 - return (locked); 2848 } 2849 2850 STATIC int ··· 2855 2856 ASSERT(mp->m_quotainfo); 2857 locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp))); 2858 - return (locked); 2859 }
··· 497 int error; 498 499 if (mp->m_quotainfo == NULL) 500 + return 0; 501 niters = 0; 502 again: 503 xfs_qm_mplist_lock(mp); ··· 528 error = xfs_qm_dqflush(dqp, flags); 529 xfs_dqunlock(dqp); 530 if (error) 531 + return error; 532 533 xfs_qm_mplist_lock(mp); 534 if (recl != XFS_QI_MPLRECLAIMS(mp)) { ··· 540 541 xfs_qm_mplist_unlock(mp); 542 /* return ! busy */ 543 + return 0; 544 } 545 /* 546 * Release the group dquot pointers the user dquots may be ··· 599 int nmisses; 600 601 if (mp->m_quotainfo == NULL) 602 + return 0; 603 604 dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; 605 dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; ··· 796 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 797 } 798 #endif 799 + return error; 800 } 801 802 ··· 897 (! XFS_NOT_DQATTACHED(mp, ip)) || 898 (ip->i_ino == mp->m_sb.sb_uquotino) || 899 (ip->i_ino == mp->m_sb.sb_gquotino)) 900 + return 0; 901 902 ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 || 903 XFS_ISLOCKED_INODE_EXCL(ip)); ··· 984 else 985 ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); 986 #endif 987 + return error; 988 } 989 990 /* ··· 1049 */ 1050 if (! XFS_IS_QUOTA_ON(mp)) { 1051 xfs_qm_mplist_unlock(mp); 1052 + return 0; 1053 } 1054 FOREACH_DQUOT_IN_MP(dqp, mp) { 1055 /* ··· 1109 error = xfs_qm_dqflush(dqp, flush_flags); 1110 xfs_dqunlock(dqp); 1111 if (error && XFS_FORCED_SHUTDOWN(mp)) 1112 + return 0; /* Need to prevent umount failure */ 1113 else if (error) 1114 + return error; 1115 1116 xfs_qm_mplist_lock(mp); 1117 if (recl != XFS_QI_MPLRECLAIMS(mp)) { ··· 1124 } 1125 1126 xfs_qm_mplist_unlock(mp); 1127 + return 0; 1128 } 1129 1130 ··· 1146 * Tell XQM that we exist as soon as possible. 1147 */ 1148 if ((error = xfs_qm_hold_quotafs_ref(mp))) { 1149 + return error; 1150 } 1151 1152 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); ··· 1158 if ((error = xfs_qm_init_quotainos(mp))) { 1159 kmem_free(qinf, sizeof(xfs_quotainfo_t)); 1160 mp->m_quotainfo = NULL; 1161 + return error; 1162 } 1163 1164 spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin"); ··· 1232 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 1233 } 1234 1235 + return 0; 1236 } 1237 1238 ··· 1332 */ 1333 ASSERT(error != ESRCH); 1334 ASSERT(error != ENOENT); 1335 + return error; 1336 } 1337 ASSERT(udqp); 1338 } ··· 1355 xfs_qm_dqrele(udqp); 1356 ASSERT(error != ESRCH); 1357 ASSERT(error != ENOENT); 1358 + return error; 1359 } 1360 ASSERT(gdqp); 1361 ··· 1376 if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp)); 1377 if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp)); 1378 #endif 1379 + return 0; 1380 } 1381 1382 /* ··· 1404 XFS_TRANS_PERM_LOG_RES, 1405 XFS_CREATE_LOG_COUNT))) { 1406 xfs_trans_cancel(tp, 0); 1407 + return error; 1408 } 1409 memset(&zerocr, 0, sizeof(zerocr)); 1410 memset(&zeroino, 0, sizeof(zeroino)); ··· 1413 &zerocr, 0, 1, ip, &committed))) { 1414 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 1415 XFS_TRANS_ABORT); 1416 + return error; 1417 } 1418 1419 /* ··· 1461 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, 1462 NULL))) { 1463 xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!"); 1464 + return error; 1465 } 1466 + return 0; 1467 } 1468 1469 ··· 1508 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); 1509 } 1510 1511 + return 0; 1512 } 1513 1514 STATIC int ··· 1557 bno++; 1558 firstid += XFS_QM_DQPERBLK(mp); 1559 } 1560 + return error; 1561 } 1562 1563 /* ··· 1586 * happens only at mount time which is single threaded. 1587 */ 1588 if (qip->i_d.di_nblocks == 0) 1589 + return 0; 1590 1591 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 1592 ··· 1655 1656 kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map)); 1657 1658 + return error; 1659 } 1660 1661 /* ··· 1715 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1716 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1717 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) 1718 + return error; 1719 } 1720 rtblks = 0; 1721 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); ··· 1723 for (ep = base; ep < &base[nextents]; ep++) 1724 rtblks += xfs_bmbt_get_blockcount(ep); 1725 *O_rtblks = (xfs_qcnt_t)rtblks; 1726 + return 0; 1727 } 1728 1729 /* ··· 1767 */ 1768 if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) { 1769 *res = BULKSTAT_RV_NOTHING; 1770 + return error; 1771 } 1772 1773 if (ip->i_d.di_mode == 0) { ··· 1785 if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) { 1786 xfs_iput(ip, XFS_ILOCK_EXCL); 1787 *res = BULKSTAT_RV_GIVEUP; 1788 + return error; 1789 } 1790 1791 rtblks = 0; ··· 1802 if (gdqp) 1803 xfs_qm_dqput(gdqp); 1804 *res = BULKSTAT_RV_GIVEUP; 1805 + return error; 1806 } 1807 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; 1808 } ··· 1847 * Goto next inode. 1848 */ 1849 *res = BULKSTAT_RV_DIDONE; 1850 + return 0; 1851 } 1852 1853 /* ··· 2041 XFS_QI_UQIP(mp) = uip; 2042 XFS_QI_GQIP(mp) = gip; 2043 2044 + return 0; 2045 } 2046 2047 ··· 2062 int nflushes; 2063 2064 if (howmany <= 0) 2065 + return 0; 2066 2067 nreclaimed = 0; 2068 restarts = 0; ··· 2088 xfs_dqunlock(dqp); 2089 xfs_qm_freelist_unlock(xfs_Gqm); 2090 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2091 + return nreclaimed; 2092 XQM_STATS_INC(xqmstats.xs_qm_dqwants); 2093 goto tryagain; 2094 } ··· 2163 XFS_DQ_HASH_UNLOCK(hash); 2164 xfs_qm_freelist_unlock(xfs_Gqm); 2165 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2166 + return nreclaimed; 2167 goto tryagain; 2168 } 2169 xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING"); ··· 2188 dqp = nextdqp; 2189 } 2190 xfs_qm_freelist_unlock(xfs_Gqm); 2191 + return nreclaimed; 2192 } 2193 2194 ··· 2202 int ndqused, nfree, n; 2203 2204 if (!kmem_shake_allow(gfp_mask)) 2205 + return 0; 2206 if (!xfs_Gqm) 2207 + return 0; 2208 2209 nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */ 2210 /* incore dquots in all f/s's */ ··· 2213 ASSERT(ndqused >= 0); 2214 2215 if (nfree <= ndqused && nfree < ndquot) 2216 + return 0; 2217 2218 ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ 2219 n = nfree - ndqused - ndquot; /* # over target */ ··· 2257 xfs_dqunlock(dqp); 2258 xfs_qm_freelist_unlock(xfs_Gqm); 2259 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2260 + return NULL; 2261 XQM_STATS_INC(xqmstats.xs_qm_dqwants); 2262 goto startagain; 2263 } ··· 2333 } 2334 2335 xfs_qm_freelist_unlock(xfs_Gqm); 2336 + return dqpout; 2337 } 2338 2339 ··· 2369 */ 2370 memset(&dqp->q_core, 0, sizeof(dqp->q_core)); 2371 *O_dqpp = dqp; 2372 + return B_FALSE; 2373 } 2374 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); 2375 } ··· 2382 *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); 2383 atomic_inc(&xfs_Gqm->qm_totaldquots); 2384 2385 + return B_TRUE; 2386 } 2387 2388 ··· 2407 0, 2408 XFS_DEFAULT_LOG_COUNT))) { 2409 xfs_trans_cancel(tp, 0); 2410 + return error; 2411 } 2412 2413 xfs_mod_sb(tp, flags); 2414 (void) xfs_trans_commit(tp, 0, NULL); 2415 2416 + return 0; 2417 } 2418 2419 ··· 2463 if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC | 2464 XFS_QMOPT_ILOCKED))) { 2465 xfs_iunlock(ip, lockflags); 2466 + return error; 2467 } 2468 } 2469 ··· 2486 XFS_QMOPT_DOWARN, 2487 &uq))) { 2488 ASSERT(error != ENOENT); 2489 + return error; 2490 } 2491 /* 2492 * Get the ilock in the right order. ··· 2517 if (uq) 2518 xfs_qm_dqrele(uq); 2519 ASSERT(error != ENOENT); 2520 + return error; 2521 } 2522 xfs_dqunlock(gq); 2523 lockflags = XFS_ILOCK_SHARED; ··· 2565 *O_gdqpp = gq; 2566 else if (gq) 2567 xfs_qm_dqrele(gq); 2568 + return 0; 2569 } 2570 2571 /* ··· 2608 xfs_dqunlock(newdq); 2609 *IO_olddq = newdq; 2610 2611 + return prevdq; 2612 } 2613 2614 /* ··· 2702 ip = i_tab[0]; 2703 2704 if (! XFS_IS_QUOTA_ON(ip->i_mount)) 2705 + return 0; 2706 2707 if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { 2708 error = xfs_qm_dqattach(ip, 0); 2709 if (error) 2710 + return error; 2711 } 2712 for (i = 1; (i < 4 && i_tab[i]); i++) { 2713 /* ··· 2717 if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) { 2718 error = xfs_qm_dqattach(ip, 0); 2719 if (error) 2720 + return error; 2721 } 2722 } 2723 } 2724 + return 0; 2725 } 2726 2727 void ··· 2834 int locked; 2835 2836 locked = mutex_trylock(&((dqp)->q_hash->qh_lock)); 2837 + return locked; 2838 } 2839 2840 int ··· 2844 int locked; 2845 2846 locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock)); 2847 + return locked; 2848 } 2849 2850 STATIC int ··· 2855 2856 ASSERT(mp->m_quotainfo); 2857 locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp))); 2858 + return locked; 2859 }
+48 -48
fs/xfs/xfs_dir_leaf.c
··· 147 hdr->count = 0; 148 dp->i_d.di_size = sizeof(*hdr); 149 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 150 - return(0); 151 } 152 153 /* ··· 180 if (sfe->namelen == args->namelen && 181 args->name[0] == sfe->name[0] && 182 memcmp(args->name, sfe->name, args->namelen) == 0) 183 - return(XFS_ERROR(EEXIST)); 184 sfe = XFS_DIR_SF_NEXTENTRY(sfe); 185 } 186 ··· 198 dp->i_d.di_size += size; 199 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 200 201 - return(0); 202 } 203 204 /* ··· 238 } 239 if (i < 0) { 240 ASSERT(args->oknoent); 241 - return(XFS_ERROR(ENOENT)); 242 } 243 244 if ((base + size) != dp->i_d.di_size) { ··· 251 dp->i_d.di_size -= size; 252 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 253 254 - return(0); 255 } 256 257 /* ··· 390 391 out: 392 kmem_free(tmpbuffer, size); 393 - return(retval); 394 } 395 396 STATIC int ··· 596 /* XXX - replace assert? */ 597 XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sf->hdr.parent); 598 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); 599 - return(0); 600 } 601 ASSERT(args->namelen != 1 || args->name[0] != '.'); 602 sfe = &sf->list[0]; ··· 608 (char *)&sfe->inumber, sizeof(xfs_ino_t))); 609 XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber); 610 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); 611 - return(0); 612 } 613 sfe = XFS_DIR_SF_NEXTENTRY(sfe); 614 } 615 ASSERT(args->oknoent); 616 - return(XFS_ERROR(ENOENT)); 617 } 618 619 /* ··· 695 696 out: 697 kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount)); 698 - return(retval); 699 } 700 701 /* ··· 715 retval = xfs_da_grow_inode(args, &blkno); 716 ASSERT(blkno == 1); 717 if (retval) 718 - return(retval); 719 retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1, 720 XFS_DATA_FORK); 721 if (retval) 722 - return(retval); 723 ASSERT(bp1 != NULL); 724 retval = xfs_da_get_buf(args->trans, args->dp, 1, -1, &bp2, 725 XFS_DATA_FORK); 726 if (retval) { 727 xfs_da_buf_done(bp1); 728 - return(retval); 729 } 730 ASSERT(bp2 != NULL); 731 memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount)); ··· 738 retval = xfs_da_node_create(args, 0, 1, &bp1, XFS_DATA_FORK); 739 if (retval) { 740 xfs_da_buf_done(bp2); 741 - return(retval); 742 } 743 node = bp1->data; 744 leaf = bp2->data; ··· 751 XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0]))); 752 xfs_da_buf_done(bp1); 753 754 - return(retval); 755 } 756 757 ··· 776 ASSERT(dp != NULL); 777 retval = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp, XFS_DATA_FORK); 778 if (retval) 779 - return(retval); 780 ASSERT(bp != NULL); 781 leaf = bp->data; 782 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); ··· 791 xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); 792 793 *bpp = bp; 794 - return(0); 795 } 796 797 /* ··· 813 ASSERT(oldblk->magic == XFS_DIR_LEAF_MAGIC); 814 error = xfs_da_grow_inode(args, &blkno); 815 if (error) 816 - return(error); 817 error = xfs_dir_leaf_create(args, blkno, &newblk->bp); 818 if (error) 819 - return(error); 820 newblk->blkno = blkno; 821 newblk->magic = XFS_DIR_LEAF_MAGIC; 822 ··· 826 xfs_dir_leaf_rebalance(state, oldblk, newblk); 827 error = xfs_da_blk_link(state, oldblk, newblk); 828 if (error) 829 - return(error); 830 831 /* 832 * Insert the new entry in the correct block. ··· 842 */ 843 oldblk->hashval = xfs_dir_leaf_lasthash(oldblk->bp, NULL); 844 newblk->hashval = xfs_dir_leaf_lasthash(newblk->bp, NULL); 845 - return(error); 846 } 847 848 /* ··· 885 if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { 886 if (!args->justcheck) 887 xfs_dir_leaf_add_work(bp, args, index, i); 888 - return(0); 889 } 890 sum += INT_GET(map->size, ARCH_CONVERT); 891 } ··· 896 * no good and we should just give up. 897 */ 898 if (!hdr->holes && (sum < entsize)) 899 - return(XFS_ERROR(ENOSPC)); 900 901 /* 902 * Compact the entries to coalesce free space. ··· 909 (uint)sizeof(xfs_dir_leaf_entry_t) : 0, 910 args->justcheck); 911 if (error) 912 - return(error); 913 /* 914 * After compaction, the block is guaranteed to have only one 915 * free region, in freemap[0]. If it is not big enough, give up. 916 */ 917 if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) < 918 (entsize + (uint)sizeof(xfs_dir_leaf_entry_t))) 919 - return(XFS_ERROR(ENOSPC)); 920 921 if (!args->justcheck) 922 xfs_dir_leaf_add_work(bp, args, index, 0); 923 - return(0); 924 } 925 926 /* ··· 1072 kmem_free(tmpbuffer, lbsize); 1073 if (musthave || justcheck) 1074 kmem_free(tmpbuffer2, lbsize); 1075 - return(rval); 1076 } 1077 1078 /* ··· 1292 1293 *countarg = count; 1294 *namebytesarg = totallen; 1295 - return(foundit); 1296 } 1297 1298 /*======================================================================== ··· 1334 INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); 1335 if (bytes > (state->blocksize >> 1)) { 1336 *action = 0; /* blk over 50%, don't try to join */ 1337 - return(0); 1338 } 1339 1340 /* ··· 1353 error = xfs_da_path_shift(state, &state->altpath, forward, 1354 0, &retval); 1355 if (error) 1356 - return(error); 1357 if (retval) { 1358 *action = 0; 1359 } else { 1360 *action = 2; 1361 } 1362 - return(0); 1363 } 1364 1365 /* ··· 1381 blkno, -1, &bp, 1382 XFS_DATA_FORK); 1383 if (error) 1384 - return(error); 1385 ASSERT(bp != NULL); 1386 1387 leaf = (xfs_dir_leafblock_t *)info; ··· 1402 } 1403 if (i >= 2) { 1404 *action = 0; 1405 - return(0); 1406 } 1407 xfs_da_buf_done(bp); 1408 ··· 1419 0, &retval); 1420 } 1421 if (error) 1422 - return(error); 1423 if (retval) { 1424 *action = 0; 1425 } else { 1426 *action = 1; 1427 } 1428 - return(0); 1429 } 1430 1431 /* ··· 1575 tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); 1576 tmp += INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); 1577 if (tmp < mp->m_dir_magicpct) 1578 - return(1); /* leaf is < 37% full */ 1579 - return(0); 1580 } 1581 1582 /* ··· 1732 if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { 1733 *index = probe; 1734 ASSERT(args->oknoent); 1735 - return(XFS_ERROR(ENOENT)); 1736 } 1737 1738 /* ··· 1745 memcmp(args->name, namest->name, args->namelen) == 0) { 1746 XFS_DIR_SF_GET_DIRINO(&namest->inumber, &args->inumber); 1747 *index = probe; 1748 - return(XFS_ERROR(EEXIST)); 1749 } 1750 entry++; 1751 probe++; 1752 } 1753 *index = probe; 1754 ASSERT(probe == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); 1755 - return(XFS_ERROR(ENOENT)); 1756 } 1757 1758 /*======================================================================== ··· 1890 INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) || 1891 (INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < 1892 INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { 1893 - return(1); 1894 } 1895 - return(0); 1896 } 1897 1898 /* ··· 1942 leaf = bp->data; 1943 if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { 1944 *eobp = 1; 1945 - return(XFS_ERROR(ENOENT)); /* XXX wrong code */ 1946 } 1947 1948 want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); ··· 2000 * the node code will be setting uio_offset anyway. 2001 */ 2002 *eobp = 0; 2003 - return(0); 2004 } 2005 xfs_dir_trace_g_due("leaf: hash found", dp, uio, entry); 2006 ··· 2057 retval = xfs_da_read_buf(dp->i_transp, dp, thishash, 2058 nextda, &bp2, XFS_DATA_FORK); 2059 if (retval) 2060 - return(retval); 2061 2062 ASSERT(bp2 != NULL); 2063 ··· 2073 leaf2); 2074 xfs_da_brelse(dp->i_transp, bp2); 2075 2076 - return(XFS_ERROR(EFSCORRUPTED)); 2077 } 2078 2079 nexthash = INT_GET(leaf2->entries[0].hashval, ··· 2139 2140 xfs_dir_trace_g_du("leaf: E-O-B", dp, uio); 2141 2142 - return(retval); 2143 } 2144 } 2145 ··· 2149 2150 xfs_dir_trace_g_du("leaf: E-O-F", dp, uio); 2151 2152 - return(0); 2153 } 2154 2155 /*
··· 147 hdr->count = 0; 148 dp->i_d.di_size = sizeof(*hdr); 149 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 150 + return 0; 151 } 152 153 /* ··· 180 if (sfe->namelen == args->namelen && 181 args->name[0] == sfe->name[0] && 182 memcmp(args->name, sfe->name, args->namelen) == 0) 183 + return XFS_ERROR(EEXIST); 184 sfe = XFS_DIR_SF_NEXTENTRY(sfe); 185 } 186 ··· 198 dp->i_d.di_size += size; 199 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 200 201 + return 0; 202 } 203 204 /* ··· 238 } 239 if (i < 0) { 240 ASSERT(args->oknoent); 241 + return XFS_ERROR(ENOENT); 242 } 243 244 if ((base + size) != dp->i_d.di_size) { ··· 251 dp->i_d.di_size -= size; 252 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 253 254 + return 0; 255 } 256 257 /* ··· 390 391 out: 392 kmem_free(tmpbuffer, size); 393 + return retval; 394 } 395 396 STATIC int ··· 596 /* XXX - replace assert? */ 597 XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sf->hdr.parent); 598 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); 599 + return 0; 600 } 601 ASSERT(args->namelen != 1 || args->name[0] != '.'); 602 sfe = &sf->list[0]; ··· 608 (char *)&sfe->inumber, sizeof(xfs_ino_t))); 609 XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber); 610 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); 611 + return 0; 612 } 613 sfe = XFS_DIR_SF_NEXTENTRY(sfe); 614 } 615 ASSERT(args->oknoent); 616 + return XFS_ERROR(ENOENT); 617 } 618 619 /* ··· 695 696 out: 697 kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount)); 698 + return retval; 699 } 700 701 /* ··· 715 retval = xfs_da_grow_inode(args, &blkno); 716 ASSERT(blkno == 1); 717 if (retval) 718 + return retval; 719 retval = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1, 720 XFS_DATA_FORK); 721 if (retval) 722 + return retval; 723 ASSERT(bp1 != NULL); 724 retval = xfs_da_get_buf(args->trans, args->dp, 1, -1, &bp2, 725 XFS_DATA_FORK); 726 if (retval) { 727 xfs_da_buf_done(bp1); 728 + return retval; 729 } 730 ASSERT(bp2 != NULL); 731 memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount)); ··· 738 retval = xfs_da_node_create(args, 0, 1, &bp1, XFS_DATA_FORK); 739 if (retval) { 740 xfs_da_buf_done(bp2); 741 + return retval; 742 } 743 node = bp1->data; 744 leaf = bp2->data; ··· 751 XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0]))); 752 xfs_da_buf_done(bp1); 753 754 + return retval; 755 } 756 757 ··· 776 ASSERT(dp != NULL); 777 retval = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp, XFS_DATA_FORK); 778 if (retval) 779 + return retval; 780 ASSERT(bp != NULL); 781 leaf = bp->data; 782 memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); ··· 791 xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); 792 793 *bpp = bp; 794 + return 0; 795 } 796 797 /* ··· 813 ASSERT(oldblk->magic == XFS_DIR_LEAF_MAGIC); 814 error = xfs_da_grow_inode(args, &blkno); 815 if (error) 816 + return error; 817 error = xfs_dir_leaf_create(args, blkno, &newblk->bp); 818 if (error) 819 + return error; 820 newblk->blkno = blkno; 821 newblk->magic = XFS_DIR_LEAF_MAGIC; 822 ··· 826 xfs_dir_leaf_rebalance(state, oldblk, newblk); 827 error = xfs_da_blk_link(state, oldblk, newblk); 828 if (error) 829 + return error; 830 831 /* 832 * Insert the new entry in the correct block. ··· 842 */ 843 oldblk->hashval = xfs_dir_leaf_lasthash(oldblk->bp, NULL); 844 newblk->hashval = xfs_dir_leaf_lasthash(newblk->bp, NULL); 845 + return error; 846 } 847 848 /* ··· 885 if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { 886 if (!args->justcheck) 887 xfs_dir_leaf_add_work(bp, args, index, i); 888 + return 0; 889 } 890 sum += INT_GET(map->size, ARCH_CONVERT); 891 } ··· 896 * no good and we should just give up. 897 */ 898 if (!hdr->holes && (sum < entsize)) 899 + return XFS_ERROR(ENOSPC); 900 901 /* 902 * Compact the entries to coalesce free space. ··· 909 (uint)sizeof(xfs_dir_leaf_entry_t) : 0, 910 args->justcheck); 911 if (error) 912 + return error; 913 /* 914 * After compaction, the block is guaranteed to have only one 915 * free region, in freemap[0]. If it is not big enough, give up. 916 */ 917 if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) < 918 (entsize + (uint)sizeof(xfs_dir_leaf_entry_t))) 919 + return XFS_ERROR(ENOSPC); 920 921 if (!args->justcheck) 922 xfs_dir_leaf_add_work(bp, args, index, 0); 923 + return 0; 924 } 925 926 /* ··· 1072 kmem_free(tmpbuffer, lbsize); 1073 if (musthave || justcheck) 1074 kmem_free(tmpbuffer2, lbsize); 1075 + return rval; 1076 } 1077 1078 /* ··· 1292 1293 *countarg = count; 1294 *namebytesarg = totallen; 1295 + return foundit; 1296 } 1297 1298 /*======================================================================== ··· 1334 INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); 1335 if (bytes > (state->blocksize >> 1)) { 1336 *action = 0; /* blk over 50%, don't try to join */ 1337 + return 0; 1338 } 1339 1340 /* ··· 1353 error = xfs_da_path_shift(state, &state->altpath, forward, 1354 0, &retval); 1355 if (error) 1356 + return error; 1357 if (retval) { 1358 *action = 0; 1359 } else { 1360 *action = 2; 1361 } 1362 + return 0; 1363 } 1364 1365 /* ··· 1381 blkno, -1, &bp, 1382 XFS_DATA_FORK); 1383 if (error) 1384 + return error; 1385 ASSERT(bp != NULL); 1386 1387 leaf = (xfs_dir_leafblock_t *)info; ··· 1402 } 1403 if (i >= 2) { 1404 *action = 0; 1405 + return 0; 1406 } 1407 xfs_da_buf_done(bp); 1408 ··· 1419 0, &retval); 1420 } 1421 if (error) 1422 + return error; 1423 if (retval) { 1424 *action = 0; 1425 } else { 1426 *action = 1; 1427 } 1428 + return 0; 1429 } 1430 1431 /* ··· 1575 tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); 1576 tmp += INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); 1577 if (tmp < mp->m_dir_magicpct) 1578 + return 1; /* leaf is < 37% full */ 1579 + return 0; 1580 } 1581 1582 /* ··· 1732 if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { 1733 *index = probe; 1734 ASSERT(args->oknoent); 1735 + return XFS_ERROR(ENOENT); 1736 } 1737 1738 /* ··· 1745 memcmp(args->name, namest->name, args->namelen) == 0) { 1746 XFS_DIR_SF_GET_DIRINO(&namest->inumber, &args->inumber); 1747 *index = probe; 1748 + return XFS_ERROR(EEXIST); 1749 } 1750 entry++; 1751 probe++; 1752 } 1753 *index = probe; 1754 ASSERT(probe == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); 1755 + return XFS_ERROR(ENOENT); 1756 } 1757 1758 /*======================================================================== ··· 1890 INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) || 1891 (INT_GET(leaf2->entries[ INT_GET(leaf2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < 1892 INT_GET(leaf1->entries[ INT_GET(leaf1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { 1893 + return 1; 1894 } 1895 + return 0; 1896 } 1897 1898 /* ··· 1942 leaf = bp->data; 1943 if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { 1944 *eobp = 1; 1945 + return XFS_ERROR(ENOENT); /* XXX wrong code */ 1946 } 1947 1948 want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); ··· 2000 * the node code will be setting uio_offset anyway. 2001 */ 2002 *eobp = 0; 2003 + return 0; 2004 } 2005 xfs_dir_trace_g_due("leaf: hash found", dp, uio, entry); 2006 ··· 2057 retval = xfs_da_read_buf(dp->i_transp, dp, thishash, 2058 nextda, &bp2, XFS_DATA_FORK); 2059 if (retval) 2060 + return retval; 2061 2062 ASSERT(bp2 != NULL); 2063 ··· 2073 leaf2); 2074 xfs_da_brelse(dp->i_transp, bp2); 2075 2076 + return XFS_ERROR(EFSCORRUPTED); 2077 } 2078 2079 nexthash = INT_GET(leaf2->entries[0].hashval, ··· 2139 2140 xfs_dir_trace_g_du("leaf: E-O-B", dp, uio); 2141 2142 + return retval; 2143 } 2144 } 2145 ··· 2149 2150 xfs_dir_trace_g_du("leaf: E-O-F", dp, uio); 2151 2152 + return 0; 2153 } 2154 2155 /*
+2 -2
fs/xfs/xfs_fsops.c
··· 501 if (inval == (__uint64_t *)NULL) { 502 outval->resblks = mp->m_resblks; 503 outval->resblks_avail = mp->m_resblks_avail; 504 - return(0); 505 } 506 507 request = *inval; ··· 537 outval->resblks = mp->m_resblks; 538 outval->resblks_avail = mp->m_resblks_avail; 539 XFS_SB_UNLOCK(mp, s); 540 - return(0); 541 } 542 543 void
··· 501 if (inval == (__uint64_t *)NULL) { 502 outval->resblks = mp->m_resblks; 503 outval->resblks_avail = mp->m_resblks_avail; 504 + return 0; 505 } 506 507 request = *inval; ··· 537 outval->resblks = mp->m_resblks; 538 outval->resblks_avail = mp->m_resblks_avail; 539 XFS_SB_UNLOCK(mp, s); 540 + return 0; 541 } 542 543 void
+26 -26
fs/xfs/xfs_log.c
··· 403 404 if (xlog_state_release_iclog(log, iclog)) { 405 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); 406 - return(EIO); 407 } 408 409 return 0; ··· 556 557 error = xfs_log_unmount_write(mp); 558 xfs_log_unmount_dealloc(mp); 559 - return (error); 560 } 561 562 /* ··· 728 if ((error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0))) { 729 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); 730 } 731 - return (error); 732 } /* xfs_log_write */ 733 734 ··· 836 needed = 1; 837 } 838 LOG_UNLOCK(log, s); 839 - return(needed); 840 } 841 842 /****************************************************************************** ··· 1003 XFS_BUF_ERROR(bp, EIO); 1004 XFS_BUF_STALE(bp); 1005 xfs_biodone(bp); 1006 - return (XFS_ERROR(EIO)); 1007 1008 1009 } ··· 1263 iclog, XLOG_COMMIT_TRANS))) { 1264 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); 1265 } 1266 - return (error); 1267 } /* xlog_commit_record */ 1268 1269 ··· 1460 if ((error = XFS_bwrite(bp))) { 1461 xfs_ioerror_alert("xlog_sync", log->l_mp, bp, 1462 XFS_BUF_ADDR(bp)); 1463 - return (error); 1464 } 1465 if (split) { 1466 bp = iclog->ic_log->l_xbuf; ··· 1498 if ((error = XFS_bwrite(bp))) { 1499 xfs_ioerror_alert("xlog_sync (split)", log->l_mp, 1500 bp, XFS_BUF_ADDR(bp)); 1501 - return (error); 1502 } 1503 } 1504 - return (0); 1505 } /* xlog_sync */ 1506 1507 ··· 1798 for (index = 0; index < nentries; ) { 1799 if ((error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 1800 &contwr, &log_offset))) 1801 - return (error); 1802 1803 ASSERT(log_offset <= iclog->ic_size - 1); 1804 ptr = (__psint_t) ((char *)iclog->ic_datap+log_offset); ··· 1903 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 1904 record_cnt = data_cnt = 0; 1905 if ((error = xlog_state_release_iclog(log, iclog))) 1906 - return (error); 1907 break; /* don't increment index */ 1908 } else { /* copied entire region */ 1909 index++; ··· 1917 ASSERT(flags & XLOG_COMMIT_TRANS); 1918 *commit_iclog = iclog; 1919 } else if ((error = xlog_state_release_iclog(log, iclog))) 1920 - return (error); 1921 if (index == nentries) 1922 return 0; /* we are done */ 1923 else ··· 1934 *commit_iclog = iclog; 1935 return 0; 1936 } 1937 - return (xlog_state_release_iclog(log, iclog)); 1938 } /* xlog_write */ 1939 1940 ··· 2050 } 2051 lsn_log = lsn_log->ic_next; 2052 } while (lsn_log != log->l_iclog); 2053 - return(lowest_lsn); 2054 } 2055 2056 ··· 2402 if (iclog->ic_refcnt == 1) { 2403 LOG_UNLOCK(log, s); 2404 if ((error = xlog_state_release_iclog(log, iclog))) 2405 - return (error); 2406 } else { 2407 iclog->ic_refcnt--; 2408 LOG_UNLOCK(log, s); ··· 2569 XLOG_TIC_RESET_RES(tic); 2570 2571 if (tic->t_cnt > 0) 2572 - return (0); 2573 2574 #ifdef DEBUG 2575 if (log->l_flags & XLOG_ACTIVE_RECOVERY) ··· 2667 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); 2668 xlog_verify_grant_head(log, 1); 2669 GRANT_UNLOCK(log, s); 2670 - return (0); 2671 2672 2673 error_return: ··· 2837 if (sync) { 2838 return xlog_sync(log, iclog); 2839 } 2840 - return (0); 2841 2842 } /* xlog_state_release_iclog */ 2843 ··· 3127 } while (iclog != log->l_iclog); 3128 3129 LOG_UNLOCK(log, s); 3130 - return (0); 3131 } /* xlog_state_sync */ 3132 3133 ··· 3545 ic->ic_state = XLOG_STATE_IOERROR; 3546 ic = ic->ic_next; 3547 } while (ic != iclog); 3548 - return (0); 3549 } 3550 /* 3551 * Return non-zero, if state transition has already happened. 3552 */ 3553 - return (1); 3554 } 3555 3556 /* ··· 3587 log->l_flags & XLOG_ACTIVE_RECOVERY) { 3588 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3589 XFS_BUF_DONE(mp->m_sb_bp); 3590 - return (0); 3591 } 3592 3593 /* ··· 3596 */ 3597 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { 3598 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 3599 - return (1); 3600 } 3601 retval = 0; 3602 /* ··· 3678 } 3679 #endif 3680 /* return non-zero if log IOERROR transition had already happened */ 3681 - return (retval); 3682 } 3683 3684 STATIC int ··· 3692 * any language. 3693 */ 3694 if (iclog->ic_header.h_num_logops) 3695 - return(0); 3696 iclog = iclog->ic_next; 3697 } while (iclog != log->l_iclog); 3698 - return(1); 3699 }
··· 403 404 if (xlog_state_release_iclog(log, iclog)) { 405 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); 406 + return EIO; 407 } 408 409 return 0; ··· 556 557 error = xfs_log_unmount_write(mp); 558 xfs_log_unmount_dealloc(mp); 559 + return error; 560 } 561 562 /* ··· 728 if ((error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0))) { 729 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); 730 } 731 + return error; 732 } /* xfs_log_write */ 733 734 ··· 836 needed = 1; 837 } 838 LOG_UNLOCK(log, s); 839 + return needed; 840 } 841 842 /****************************************************************************** ··· 1003 XFS_BUF_ERROR(bp, EIO); 1004 XFS_BUF_STALE(bp); 1005 xfs_biodone(bp); 1006 + return XFS_ERROR(EIO); 1007 1008 1009 } ··· 1263 iclog, XLOG_COMMIT_TRANS))) { 1264 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); 1265 } 1266 + return error; 1267 } /* xlog_commit_record */ 1268 1269 ··· 1460 if ((error = XFS_bwrite(bp))) { 1461 xfs_ioerror_alert("xlog_sync", log->l_mp, bp, 1462 XFS_BUF_ADDR(bp)); 1463 + return error; 1464 } 1465 if (split) { 1466 bp = iclog->ic_log->l_xbuf; ··· 1498 if ((error = XFS_bwrite(bp))) { 1499 xfs_ioerror_alert("xlog_sync (split)", log->l_mp, 1500 bp, XFS_BUF_ADDR(bp)); 1501 + return error; 1502 } 1503 } 1504 + return 0; 1505 } /* xlog_sync */ 1506 1507 ··· 1798 for (index = 0; index < nentries; ) { 1799 if ((error = xlog_state_get_iclog_space(log, len, &iclog, ticket, 1800 &contwr, &log_offset))) 1801 + return error; 1802 1803 ASSERT(log_offset <= iclog->ic_size - 1); 1804 ptr = (__psint_t) ((char *)iclog->ic_datap+log_offset); ··· 1903 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); 1904 record_cnt = data_cnt = 0; 1905 if ((error = xlog_state_release_iclog(log, iclog))) 1906 + return error; 1907 break; /* don't increment index */ 1908 } else { /* copied entire region */ 1909 index++; ··· 1917 ASSERT(flags & XLOG_COMMIT_TRANS); 1918 *commit_iclog = iclog; 1919 } else if ((error = xlog_state_release_iclog(log, iclog))) 1920 + return error; 1921 if (index == nentries) 1922 return 0; /* we are done */ 1923 else ··· 1934 *commit_iclog = iclog; 1935 return 0; 1936 } 1937 + return xlog_state_release_iclog(log, iclog); 1938 } /* xlog_write */ 1939 1940 ··· 2050 } 2051 lsn_log = lsn_log->ic_next; 2052 } while (lsn_log != log->l_iclog); 2053 + return lowest_lsn; 2054 } 2055 2056 ··· 2402 if (iclog->ic_refcnt == 1) { 2403 LOG_UNLOCK(log, s); 2404 if ((error = xlog_state_release_iclog(log, iclog))) 2405 + return error; 2406 } else { 2407 iclog->ic_refcnt--; 2408 LOG_UNLOCK(log, s); ··· 2569 XLOG_TIC_RESET_RES(tic); 2570 2571 if (tic->t_cnt > 0) 2572 + return 0; 2573 2574 #ifdef DEBUG 2575 if (log->l_flags & XLOG_ACTIVE_RECOVERY) ··· 2667 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); 2668 xlog_verify_grant_head(log, 1); 2669 GRANT_UNLOCK(log, s); 2670 + return 0; 2671 2672 2673 error_return: ··· 2837 if (sync) { 2838 return xlog_sync(log, iclog); 2839 } 2840 + return 0; 2841 2842 } /* xlog_state_release_iclog */ 2843 ··· 3127 } while (iclog != log->l_iclog); 3128 3129 LOG_UNLOCK(log, s); 3130 + return 0; 3131 } /* xlog_state_sync */ 3132 3133 ··· 3545 ic->ic_state = XLOG_STATE_IOERROR; 3546 ic = ic->ic_next; 3547 } while (ic != iclog); 3548 + return 0; 3549 } 3550 /* 3551 * Return non-zero, if state transition has already happened. 3552 */ 3553 + return 1; 3554 } 3555 3556 /* ··· 3587 log->l_flags & XLOG_ACTIVE_RECOVERY) { 3588 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3589 XFS_BUF_DONE(mp->m_sb_bp); 3590 + return 0; 3591 } 3592 3593 /* ··· 3596 */ 3597 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { 3598 ASSERT(XLOG_FORCED_SHUTDOWN(log)); 3599 + return 1; 3600 } 3601 retval = 0; 3602 /* ··· 3678 } 3679 #endif 3680 /* return non-zero if log IOERROR transition had already happened */ 3681 + return retval; 3682 } 3683 3684 STATIC int ··· 3692 * any language. 3693 */ 3694 if (iclog->ic_header.h_num_logops) 3695 + return 0; 3696 iclog = iclog->ic_next; 3697 } while (iclog != log->l_iclog); 3698 + return 1; 3699 }
+33 -33
fs/xfs/xfs_mount.c
··· 646 647 if (mp->m_sb_bp == NULL) { 648 if ((error = xfs_readsb(mp))) { 649 - return (error); 650 } 651 } 652 xfs_mount_common(mp, sbp); ··· 889 * For client case we are done now 890 */ 891 if (mfsi_flags & XFS_MFSI_CLIENT) { 892 - return(0); 893 } 894 895 /* ··· 1182 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); 1183 } 1184 xfs_buf_relse(sbp); 1185 - return (error); 1186 } 1187 1188 /* ··· 1257 lcounter += delta; 1258 if (lcounter < 0) { 1259 ASSERT(0); 1260 - return (XFS_ERROR(EINVAL)); 1261 } 1262 mp->m_sb.sb_icount = lcounter; 1263 - return (0); 1264 case XFS_SBS_IFREE: 1265 lcounter = (long long)mp->m_sb.sb_ifree; 1266 lcounter += delta; 1267 if (lcounter < 0) { 1268 ASSERT(0); 1269 - return (XFS_ERROR(EINVAL)); 1270 } 1271 mp->m_sb.sb_ifree = lcounter; 1272 - return (0); 1273 case XFS_SBS_FDBLOCKS: 1274 1275 lcounter = (long long)mp->m_sb.sb_fdblocks; ··· 1296 if (rsvd) { 1297 lcounter = (long long)mp->m_resblks_avail + delta; 1298 if (lcounter < 0) { 1299 - return (XFS_ERROR(ENOSPC)); 1300 } 1301 mp->m_resblks_avail = lcounter; 1302 - return (0); 1303 } else { /* not reserved */ 1304 - return (XFS_ERROR(ENOSPC)); 1305 } 1306 } 1307 } 1308 1309 mp->m_sb.sb_fdblocks = lcounter; 1310 - return (0); 1311 case XFS_SBS_FREXTENTS: 1312 lcounter = (long long)mp->m_sb.sb_frextents; 1313 lcounter += delta; 1314 if (lcounter < 0) { 1315 - return (XFS_ERROR(ENOSPC)); 1316 } 1317 mp->m_sb.sb_frextents = lcounter; 1318 - return (0); 1319 case XFS_SBS_DBLOCKS: 1320 lcounter = (long long)mp->m_sb.sb_dblocks; 1321 lcounter += delta; 1322 if (lcounter < 0) { 1323 ASSERT(0); 1324 - return (XFS_ERROR(EINVAL)); 1325 } 1326 mp->m_sb.sb_dblocks = lcounter; 1327 - return (0); 1328 case XFS_SBS_AGCOUNT: 1329 scounter = mp->m_sb.sb_agcount; 1330 scounter += delta; 1331 if (scounter < 0) { 1332 ASSERT(0); 1333 - return (XFS_ERROR(EINVAL)); 1334 } 1335 mp->m_sb.sb_agcount = scounter; 1336 - return (0); 1337 case XFS_SBS_IMAX_PCT: 1338 scounter = mp->m_sb.sb_imax_pct; 1339 scounter += delta; 1340 if (scounter < 0) { 1341 ASSERT(0); 1342 - return (XFS_ERROR(EINVAL)); 1343 } 1344 mp->m_sb.sb_imax_pct = scounter; 1345 - return (0); 1346 case XFS_SBS_REXTSIZE: 1347 scounter = mp->m_sb.sb_rextsize; 1348 scounter += delta; 1349 if (scounter < 0) { 1350 ASSERT(0); 1351 - return (XFS_ERROR(EINVAL)); 1352 } 1353 mp->m_sb.sb_rextsize = scounter; 1354 - return (0); 1355 case XFS_SBS_RBMBLOCKS: 1356 scounter = mp->m_sb.sb_rbmblocks; 1357 scounter += delta; 1358 if (scounter < 0) { 1359 ASSERT(0); 1360 - return (XFS_ERROR(EINVAL)); 1361 } 1362 mp->m_sb.sb_rbmblocks = scounter; 1363 - return (0); 1364 case XFS_SBS_RBLOCKS: 1365 lcounter = (long long)mp->m_sb.sb_rblocks; 1366 lcounter += delta; 1367 if (lcounter < 0) { 1368 ASSERT(0); 1369 - return (XFS_ERROR(EINVAL)); 1370 } 1371 mp->m_sb.sb_rblocks = lcounter; 1372 - return (0); 1373 case XFS_SBS_REXTENTS: 1374 lcounter = (long long)mp->m_sb.sb_rextents; 1375 lcounter += delta; 1376 if (lcounter < 0) { 1377 ASSERT(0); 1378 - return (XFS_ERROR(EINVAL)); 1379 } 1380 mp->m_sb.sb_rextents = lcounter; 1381 - return (0); 1382 case XFS_SBS_REXTSLOG: 1383 scounter = mp->m_sb.sb_rextslog; 1384 scounter += delta; 1385 if (scounter < 0) { 1386 ASSERT(0); 1387 - return (XFS_ERROR(EINVAL)); 1388 } 1389 mp->m_sb.sb_rextslog = scounter; 1390 - return (0); 1391 default: 1392 ASSERT(0); 1393 - return (XFS_ERROR(EINVAL)); 1394 } 1395 } 1396 ··· 1409 s = XFS_SB_LOCK(mp); 1410 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1411 XFS_SB_UNLOCK(mp, s); 1412 - return (status); 1413 } 1414 1415 /* ··· 1470 } 1471 } 1472 XFS_SB_UNLOCK(mp, s); 1473 - return (status); 1474 } 1475 1476 /* ··· 1500 } 1501 XFS_BUF_HOLD(bp); 1502 ASSERT(XFS_BUF_ISDONE(bp)); 1503 - return (bp); 1504 } 1505 1506 /*
··· 646 647 if (mp->m_sb_bp == NULL) { 648 if ((error = xfs_readsb(mp))) { 649 + return error; 650 } 651 } 652 xfs_mount_common(mp, sbp); ··· 889 * For client case we are done now 890 */ 891 if (mfsi_flags & XFS_MFSI_CLIENT) { 892 + return 0; 893 } 894 895 /* ··· 1182 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); 1183 } 1184 xfs_buf_relse(sbp); 1185 + return error; 1186 } 1187 1188 /* ··· 1257 lcounter += delta; 1258 if (lcounter < 0) { 1259 ASSERT(0); 1260 + return XFS_ERROR(EINVAL); 1261 } 1262 mp->m_sb.sb_icount = lcounter; 1263 + return 0; 1264 case XFS_SBS_IFREE: 1265 lcounter = (long long)mp->m_sb.sb_ifree; 1266 lcounter += delta; 1267 if (lcounter < 0) { 1268 ASSERT(0); 1269 + return XFS_ERROR(EINVAL); 1270 } 1271 mp->m_sb.sb_ifree = lcounter; 1272 + return 0; 1273 case XFS_SBS_FDBLOCKS: 1274 1275 lcounter = (long long)mp->m_sb.sb_fdblocks; ··· 1296 if (rsvd) { 1297 lcounter = (long long)mp->m_resblks_avail + delta; 1298 if (lcounter < 0) { 1299 + return XFS_ERROR(ENOSPC); 1300 } 1301 mp->m_resblks_avail = lcounter; 1302 + return 0; 1303 } else { /* not reserved */ 1304 + return XFS_ERROR(ENOSPC); 1305 } 1306 } 1307 } 1308 1309 mp->m_sb.sb_fdblocks = lcounter; 1310 + return 0; 1311 case XFS_SBS_FREXTENTS: 1312 lcounter = (long long)mp->m_sb.sb_frextents; 1313 lcounter += delta; 1314 if (lcounter < 0) { 1315 + return XFS_ERROR(ENOSPC); 1316 } 1317 mp->m_sb.sb_frextents = lcounter; 1318 + return 0; 1319 case XFS_SBS_DBLOCKS: 1320 lcounter = (long long)mp->m_sb.sb_dblocks; 1321 lcounter += delta; 1322 if (lcounter < 0) { 1323 ASSERT(0); 1324 + return XFS_ERROR(EINVAL); 1325 } 1326 mp->m_sb.sb_dblocks = lcounter; 1327 + return 0; 1328 case XFS_SBS_AGCOUNT: 1329 scounter = mp->m_sb.sb_agcount; 1330 scounter += delta; 1331 if (scounter < 0) { 1332 ASSERT(0); 1333 + return XFS_ERROR(EINVAL); 1334 } 1335 mp->m_sb.sb_agcount = scounter; 1336 + return 0; 1337 case XFS_SBS_IMAX_PCT: 1338 scounter = mp->m_sb.sb_imax_pct; 1339 scounter += delta; 1340 if (scounter < 0) { 1341 ASSERT(0); 1342 + return XFS_ERROR(EINVAL); 1343 } 1344 mp->m_sb.sb_imax_pct = scounter; 1345 + return 0; 1346 case XFS_SBS_REXTSIZE: 1347 scounter = mp->m_sb.sb_rextsize; 1348 scounter += delta; 1349 if (scounter < 0) { 1350 ASSERT(0); 1351 + return XFS_ERROR(EINVAL); 1352 } 1353 mp->m_sb.sb_rextsize = scounter; 1354 + return 0; 1355 case XFS_SBS_RBMBLOCKS: 1356 scounter = mp->m_sb.sb_rbmblocks; 1357 scounter += delta; 1358 if (scounter < 0) { 1359 ASSERT(0); 1360 + return XFS_ERROR(EINVAL); 1361 } 1362 mp->m_sb.sb_rbmblocks = scounter; 1363 + return 0; 1364 case XFS_SBS_RBLOCKS: 1365 lcounter = (long long)mp->m_sb.sb_rblocks; 1366 lcounter += delta; 1367 if (lcounter < 0) { 1368 ASSERT(0); 1369 + return XFS_ERROR(EINVAL); 1370 } 1371 mp->m_sb.sb_rblocks = lcounter; 1372 + return 0; 1373 case XFS_SBS_REXTENTS: 1374 lcounter = (long long)mp->m_sb.sb_rextents; 1375 lcounter += delta; 1376 if (lcounter < 0) { 1377 ASSERT(0); 1378 + return XFS_ERROR(EINVAL); 1379 } 1380 mp->m_sb.sb_rextents = lcounter; 1381 + return 0; 1382 case XFS_SBS_REXTSLOG: 1383 scounter = mp->m_sb.sb_rextslog; 1384 scounter += delta; 1385 if (scounter < 0) { 1386 ASSERT(0); 1387 + return XFS_ERROR(EINVAL); 1388 } 1389 mp->m_sb.sb_rextslog = scounter; 1390 + return 0; 1391 default: 1392 ASSERT(0); 1393 + return XFS_ERROR(EINVAL); 1394 } 1395 } 1396 ··· 1409 s = XFS_SB_LOCK(mp); 1410 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1411 XFS_SB_UNLOCK(mp, s); 1412 + return status; 1413 } 1414 1415 /* ··· 1470 } 1471 } 1472 XFS_SB_UNLOCK(mp, s); 1473 + return status; 1474 } 1475 1476 /* ··· 1500 } 1501 XFS_BUF_HOLD(bp); 1502 ASSERT(XFS_BUF_ISDONE(bp)); 1503 + return bp; 1504 } 1505 1506 /*
+11 -11
fs/xfs/xfs_trans_item.c
··· 78 lidp->lid_size = 0; 79 lip->li_desc = lidp; 80 lip->li_mountp = tp->t_mountp; 81 - return (lidp); 82 } 83 84 /* ··· 119 lidp->lid_size = 0; 120 lip->li_desc = lidp; 121 lip->li_mountp = tp->t_mountp; 122 - return (lidp); 123 } 124 125 /* ··· 180 { 181 ASSERT(lip->li_desc != NULL); 182 183 - return (lip->li_desc); 184 } 185 186 ··· 219 continue; 220 } 221 222 - return (XFS_LIC_SLOT(licp, i)); 223 } 224 cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); 225 - return(NULL); 226 } 227 228 ··· 252 continue; 253 } 254 255 - return (XFS_LIC_SLOT(licp, i)); 256 } 257 258 /* ··· 261 * If there is no next chunk, return NULL. 262 */ 263 if (licp->lic_next == NULL) { 264 - return (NULL); 265 } 266 267 licp = licp->lic_next; ··· 271 continue; 272 } 273 274 - return (XFS_LIC_SLOT(licp, i)); 275 } 276 ASSERT(0); 277 /* NOTREACHED */ ··· 425 } 426 } 427 428 - return (freed); 429 } 430 431 ··· 478 */ 479 lbsp->lbc_ag = ag; 480 lbsp->lbc_idx = idx; 481 - return (lbsp); 482 } 483 484 /* ··· 512 tp->t_busy_free--; 513 lbsp->lbc_ag = ag; 514 lbsp->lbc_idx = idx; 515 - return (lbsp); 516 } 517 518
··· 78 lidp->lid_size = 0; 79 lip->li_desc = lidp; 80 lip->li_mountp = tp->t_mountp; 81 + return lidp; 82 } 83 84 /* ··· 119 lidp->lid_size = 0; 120 lip->li_desc = lidp; 121 lip->li_mountp = tp->t_mountp; 122 + return lidp; 123 } 124 125 /* ··· 180 { 181 ASSERT(lip->li_desc != NULL); 182 183 + return lip->li_desc; 184 } 185 186 ··· 219 continue; 220 } 221 222 + return XFS_LIC_SLOT(licp, i); 223 } 224 cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); 225 + return NULL; 226 } 227 228 ··· 252 continue; 253 } 254 255 + return XFS_LIC_SLOT(licp, i); 256 } 257 258 /* ··· 261 * If there is no next chunk, return NULL. 262 */ 263 if (licp->lic_next == NULL) { 264 + return NULL; 265 } 266 267 licp = licp->lic_next; ··· 271 continue; 272 } 273 274 + return XFS_LIC_SLOT(licp, i); 275 } 276 ASSERT(0); 277 /* NOTREACHED */ ··· 425 } 426 } 427 428 + return freed; 429 } 430 431 ··· 478 */ 479 lbsp->lbc_ag = ag; 480 lbsp->lbc_idx = idx; 481 + return lbsp; 482 } 483 484 /* ··· 512 tp->t_busy_free--; 513 lbsp->lbc_ag = ag; 514 lbsp->lbc_idx = idx; 515 + return lbsp; 516 } 517 518
+29 -30
fs/xfs/xfs_vnodeops.c
··· 338 code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, projid, qflags, 339 &udqp, &gdqp); 340 if (code) 341 - return (code); 342 } 343 344 /* ··· 1027 1028 } 1029 1030 - 1031 error_return: 1032 - 1033 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1034 - 1035 return error; 1036 } 1037 ··· 1203 last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1204 map_len = last_fsb - end_fsb; 1205 if (map_len <= 0) 1206 - return (0); 1207 1208 nimaps = 1; 1209 xfs_ilock(ip, XFS_ILOCK_SHARED); ··· 1218 * Attach the dquots to the inode up front. 1219 */ 1220 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 1221 - return (error); 1222 1223 /* 1224 * There are blocks after the end of file. ··· 1246 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1247 xfs_trans_cancel(tp, 0); 1248 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1249 - return (error); 1250 } 1251 1252 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 1274 } 1275 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1276 } 1277 - return (error); 1278 } 1279 1280 /* ··· 1452 if (error) { 1453 xfs_trans_cancel(*tpp, 0); 1454 *tpp = NULL; 1455 - return (error); 1456 } 1457 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1458 ··· 1465 XFS_DATA_FORK); 1466 ASSERT(ip->i_df.if_bytes == 0); 1467 } 1468 - return (0); 1469 } 1470 1471 /* ··· 1491 if (error) { 1492 *tpp = NULL; 1493 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1494 - return (error); /* goto out*/ 1495 } 1496 1497 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); ··· 1504 xfs_trans_cancel(tp, 0); 1505 *tpp = NULL; 1506 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1507 - return (error); 1508 } 1509 1510 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 1515 ASSERT(ip->i_d.di_anextents == 0); 1516 1517 *tpp = tp; 1518 - return (0); 1519 } 1520 1521 STATIC int ··· 1554 (!(ip->i_d.di_flags & 1555 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { 1556 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1557 - return (error); 1558 /* Update linux inode block count after free above */ 1559 LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1560 ip->i_d.di_nblocks + ip->i_delayed_blks); ··· 1635 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || 1636 (ip->i_delayed_blks != 0)))) { 1637 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1638 - return (VN_INACTIVE_CACHE); 1639 /* Update linux inode block count after free above */ 1640 LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1641 ip->i_d.di_nblocks + ip->i_delayed_blks); ··· 1646 ASSERT(ip->i_d.di_nlink == 0); 1647 1648 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 1649 - return (VN_INACTIVE_CACHE); 1650 1651 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); 1652 if (truncate) { ··· 1669 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1670 xfs_trans_cancel(tp, 0); 1671 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1672 - return (VN_INACTIVE_CACHE); 1673 } 1674 1675 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 1690 xfs_trans_cancel(tp, 1691 XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 1692 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1693 - return (VN_INACTIVE_CACHE); 1694 } 1695 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFLNK) { 1696 ··· 1704 1705 if (error) { 1706 ASSERT(tp == NULL); 1707 - return (VN_INACTIVE_CACHE); 1708 } 1709 1710 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); ··· 1717 if (error) { 1718 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1719 xfs_trans_cancel(tp, 0); 1720 - return (VN_INACTIVE_CACHE); 1721 } 1722 1723 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); ··· 1739 * cancelled, and the inode is unlocked. Just get out. 1740 */ 1741 if (error) 1742 - return (VN_INACTIVE_CACHE); 1743 } else if (ip->i_afp) { 1744 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 1745 } ··· 2046 abort_return: 2047 cancel_flags |= XFS_TRANS_ABORT; 2048 /* FALLTHROUGH */ 2049 - error_return: 2050 2051 if (tp != NULL) 2052 xfs_trans_cancel(tp, cancel_flags); 2053 ··· 2721 abort_return: 2722 cancel_flags |= XFS_TRANS_ABORT; 2723 /* FALLTHROUGH */ 2724 error_return: 2725 xfs_trans_cancel(tp, cancel_flags); 2726 - 2727 goto std_return; 2728 } 2729 /* ··· 3196 } 3197 return error; 3198 3199 - error1: 3200 xfs_bmap_cancel(&free_list); 3201 cancel_flags |= XFS_TRANS_ABORT; 3202 - error_return: 3203 xfs_trans_cancel(tp, cancel_flags); 3204 goto std_return; 3205 } ··· 3617 if (locktype == VRWLOCK_WRITE) { 3618 xfs_ilock(ip, XFS_IOLOCK_EXCL); 3619 } else if (locktype == VRWLOCK_TRY_READ) { 3620 - return (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)); 3621 } else if (locktype == VRWLOCK_TRY_WRITE) { 3622 - return (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)); 3623 } else { 3624 ASSERT((locktype == VRWLOCK_READ) || 3625 (locktype == VRWLOCK_WRITE_DIRECT)); ··· 3867 xfs_ifunlock(ip); 3868 xfs_iunlock(ip, XFS_ILOCK_EXCL); 3869 } 3870 - return(1); 3871 } 3872 ip->i_flags |= XFS_IRECLAIM; 3873 write_unlock(&ih->ih_lock); ··· 4044 offset, end_dmi_offset - offset, 4045 0, NULL); 4046 if (error) 4047 - return(error); 4048 } 4049 4050 /* ··· 4304 offset, end_dmi_offset - offset, 4305 AT_DELAY_FLAG(attr_flags), NULL); 4306 if (error) 4307 - return(error); 4308 } 4309 4310 ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1);
··· 338 code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, projid, qflags, 339 &udqp, &gdqp); 340 if (code) 341 + return code; 342 } 343 344 /* ··· 1027 1028 } 1029 1030 error_return: 1031 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1032 return error; 1033 } 1034 ··· 1206 last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); 1207 map_len = last_fsb - end_fsb; 1208 if (map_len <= 0) 1209 + return 0; 1210 1211 nimaps = 1; 1212 xfs_ilock(ip, XFS_ILOCK_SHARED); ··· 1221 * Attach the dquots to the inode up front. 1222 */ 1223 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 1224 + return error; 1225 1226 /* 1227 * There are blocks after the end of file. ··· 1249 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1250 xfs_trans_cancel(tp, 0); 1251 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1252 + return error; 1253 } 1254 1255 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 1277 } 1278 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1279 } 1280 + return error; 1281 } 1282 1283 /* ··· 1455 if (error) { 1456 xfs_trans_cancel(*tpp, 0); 1457 *tpp = NULL; 1458 + return error; 1459 } 1460 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1461 ··· 1468 XFS_DATA_FORK); 1469 ASSERT(ip->i_df.if_bytes == 0); 1470 } 1471 + return 0; 1472 } 1473 1474 /* ··· 1494 if (error) { 1495 *tpp = NULL; 1496 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1497 + return error; /* goto out */ 1498 } 1499 1500 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); ··· 1507 xfs_trans_cancel(tp, 0); 1508 *tpp = NULL; 1509 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1510 + return error; 1511 } 1512 1513 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 1518 ASSERT(ip->i_d.di_anextents == 0); 1519 1520 *tpp = tp; 1521 + return 0; 1522 } 1523 1524 STATIC int ··· 1557 (!(ip->i_d.di_flags & 1558 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { 1559 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1560 + return error; 1561 /* Update linux inode block count after free above */ 1562 LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1563 ip->i_d.di_nblocks + ip->i_delayed_blks); ··· 1638 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || 1639 (ip->i_delayed_blks != 0)))) { 1640 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1641 + return VN_INACTIVE_CACHE; 1642 /* Update linux inode block count after free above */ 1643 LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1644 ip->i_d.di_nblocks + ip->i_delayed_blks); ··· 1649 ASSERT(ip->i_d.di_nlink == 0); 1650 1651 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 1652 + return VN_INACTIVE_CACHE; 1653 1654 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); 1655 if (truncate) { ··· 1672 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1673 xfs_trans_cancel(tp, 0); 1674 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1675 + return VN_INACTIVE_CACHE; 1676 } 1677 1678 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 1693 xfs_trans_cancel(tp, 1694 XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 1695 xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1696 + return VN_INACTIVE_CACHE; 1697 } 1698 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFLNK) { 1699 ··· 1707 1708 if (error) { 1709 ASSERT(tp == NULL); 1710 + return VN_INACTIVE_CACHE; 1711 } 1712 1713 xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); ··· 1720 if (error) { 1721 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1722 xfs_trans_cancel(tp, 0); 1723 + return VN_INACTIVE_CACHE; 1724 } 1725 1726 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); ··· 1742 * cancelled, and the inode is unlocked. Just get out. 1743 */ 1744 if (error) 1745 + return VN_INACTIVE_CACHE; 1746 } else if (ip->i_afp) { 1747 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 1748 } ··· 2049 abort_return: 2050 cancel_flags |= XFS_TRANS_ABORT; 2051 /* FALLTHROUGH */ 2052 2053 + error_return: 2054 if (tp != NULL) 2055 xfs_trans_cancel(tp, cancel_flags); 2056 ··· 2724 abort_return: 2725 cancel_flags |= XFS_TRANS_ABORT; 2726 /* FALLTHROUGH */ 2727 + 2728 error_return: 2729 xfs_trans_cancel(tp, cancel_flags); 2730 goto std_return; 2731 } 2732 /* ··· 3199 } 3200 return error; 3201 3202 + error1: 3203 xfs_bmap_cancel(&free_list); 3204 cancel_flags |= XFS_TRANS_ABORT; 3205 + /* FALLTHROUGH */ 3206 + 3207 + error_return: 3208 xfs_trans_cancel(tp, cancel_flags); 3209 goto std_return; 3210 } ··· 3618 if (locktype == VRWLOCK_WRITE) { 3619 xfs_ilock(ip, XFS_IOLOCK_EXCL); 3620 } else if (locktype == VRWLOCK_TRY_READ) { 3621 + return xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED); 3622 } else if (locktype == VRWLOCK_TRY_WRITE) { 3623 + return xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL); 3624 } else { 3625 ASSERT((locktype == VRWLOCK_READ) || 3626 (locktype == VRWLOCK_WRITE_DIRECT)); ··· 3868 xfs_ifunlock(ip); 3869 xfs_iunlock(ip, XFS_ILOCK_EXCL); 3870 } 3871 + return 1; 3872 } 3873 ip->i_flags |= XFS_IRECLAIM; 3874 write_unlock(&ih->ih_lock); ··· 4045 offset, end_dmi_offset - offset, 4046 0, NULL); 4047 if (error) 4048 + return error; 4049 } 4050 4051 /* ··· 4305 offset, end_dmi_offset - offset, 4306 AT_DELAY_FLAG(attr_flags), NULL); 4307 if (error) 4308 + return error; 4309 } 4310 4311 ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1);