Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: xfs: Remove KM_NOSLEEP and KM_SLEEP.

Since no caller is using KM_NOSLEEP and no callee branches on KM_SLEEP,
we can remove KM_NOSLEEP and replace KM_SLEEP with 0.

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>

authored by

Tetsuo Handa and committed by
Darrick J. Wong
707e0dda a55aa89a

+102 -109
+3 -3
fs/xfs/kmem.c
··· 17 17 18 18 do { 19 19 ptr = kmalloc(size, lflags); 20 - if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 20 + if (ptr || (flags & KM_MAYFAIL)) 21 21 return ptr; 22 22 if (!(++retries % 100)) 23 23 xfs_err(NULL, ··· 67 67 68 68 do { 69 69 ptr = krealloc(old, newsize, lflags); 70 - if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 70 + if (ptr || (flags & KM_MAYFAIL)) 71 71 return ptr; 72 72 if (!(++retries % 100)) 73 73 xfs_err(NULL, ··· 87 87 88 88 do { 89 89 ptr = kmem_cache_alloc(zone, lflags); 90 - if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 90 + if (ptr || (flags & KM_MAYFAIL)) 91 91 return ptr; 92 92 if (!(++retries % 100)) 93 93 xfs_err(NULL,
+4 -10
fs/xfs/kmem.h
··· 16 16 */ 17 17 18 18 typedef unsigned __bitwise xfs_km_flags_t; 19 - #define KM_SLEEP ((__force xfs_km_flags_t)0x0001u) 20 - #define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u) 21 19 #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) 22 20 #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) 23 21 #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) ··· 30 32 { 31 33 gfp_t lflags; 32 34 33 - BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO)); 35 + BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO)); 34 36 35 - if (flags & KM_NOSLEEP) { 36 - lflags = GFP_ATOMIC | __GFP_NOWARN; 37 - } else { 38 - lflags = GFP_KERNEL | __GFP_NOWARN; 39 - if (flags & KM_NOFS) 40 - lflags &= ~__GFP_FS; 41 - } 37 + lflags = GFP_KERNEL | __GFP_NOWARN; 38 + if (flags & KM_NOFS) 39 + lflags &= ~__GFP_FS; 42 40 43 41 /* 44 42 * Default page/slab allocator behavior is to retry for ever
+1 -1
fs/xfs/libxfs/xfs_alloc.c
··· 2205 2205 ASSERT(xfs_bmap_free_item_zone != NULL); 2206 2206 ASSERT(oinfo != NULL); 2207 2207 2208 - new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 2208 + new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0); 2209 2209 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); 2210 2210 new->xefi_blockcount = 1; 2211 2211 new->xefi_oinfo = *oinfo;
+4 -4
fs/xfs/libxfs/xfs_attr_leaf.c
··· 782 782 ifp = dp->i_afp; 783 783 sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; 784 784 size = be16_to_cpu(sf->hdr.totsize); 785 - tmpbuffer = kmem_alloc(size, KM_SLEEP); 785 + tmpbuffer = kmem_alloc(size, 0); 786 786 ASSERT(tmpbuffer != NULL); 787 787 memcpy(tmpbuffer, ifp->if_u1.if_data, size); 788 788 sf = (xfs_attr_shortform_t *)tmpbuffer; ··· 985 985 986 986 trace_xfs_attr_leaf_to_sf(args); 987 987 988 - tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); 988 + tmpbuffer = kmem_alloc(args->geo->blksize, 0); 989 989 if (!tmpbuffer) 990 990 return -ENOMEM; 991 991 ··· 1448 1448 1449 1449 trace_xfs_attr_leaf_compact(args); 1450 1450 1451 - tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); 1451 + tmpbuffer = kmem_alloc(args->geo->blksize, 0); 1452 1452 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); 1453 1453 memset(bp->b_addr, 0, args->geo->blksize); 1454 1454 leaf_src = (xfs_attr_leafblock_t *)tmpbuffer; ··· 2167 2167 struct xfs_attr_leafblock *tmp_leaf; 2168 2168 struct xfs_attr3_icleaf_hdr tmphdr; 2169 2169 2170 - tmp_leaf = kmem_zalloc(state->args->geo->blksize, KM_SLEEP); 2170 + tmp_leaf = kmem_zalloc(state->args->geo->blksize, 0); 2171 2171 2172 2172 /* 2173 2173 * Copy the header into the temp leaf so that all the stuff
+3 -3
fs/xfs/libxfs/xfs_bmap.c
··· 553 553 #endif 554 554 ASSERT(xfs_bmap_free_item_zone != NULL); 555 555 556 - new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 556 + new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0); 557 557 new->xefi_startblock = bno; 558 558 new->xefi_blockcount = (xfs_extlen_t)len; 559 559 if (oinfo) ··· 1099 1099 if (error) 1100 1100 goto trans_cancel; 1101 1101 ASSERT(ip->i_afp == NULL); 1102 - ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); 1102 + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0); 1103 1103 ip->i_afp->if_flags = XFS_IFEXTENTS; 1104 1104 logflags = 0; 1105 1105 switch (ip->i_d.di_format) { ··· 6094 6094 bmap->br_blockcount, 6095 6095 bmap->br_state); 6096 6096 6097 - bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); 6097 + bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS); 6098 6098 INIT_LIST_HEAD(&bi->bi_list); 6099 6099 bi->bi_type = type; 6100 6100 bi->bi_owner = ip;
+3 -3
fs/xfs/libxfs/xfs_da_btree.c
··· 2098 2098 * If we didn't get it and the block might work if fragmented, 2099 2099 * try without the CONTIG flag. Loop until we get it all. 2100 2100 */ 2101 - mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); 2101 + mapp = kmem_alloc(sizeof(*mapp) * count, 0); 2102 2102 for (b = *bno, mapi = 0; b < *bno + count; ) { 2103 2103 nmap = min(XFS_BMAP_MAX_NMAP, count); 2104 2104 c = (int)(*bno + count - b); ··· 2480 2480 2481 2481 if (nirecs > 1) { 2482 2482 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), 2483 - KM_SLEEP | KM_NOFS); 2483 + KM_NOFS); 2484 2484 if (!map) 2485 2485 return -ENOMEM; 2486 2486 *mapp = map; ··· 2539 2539 */ 2540 2540 if (nfsb != 1) 2541 2541 irecs = kmem_zalloc(sizeof(irec) * nfsb, 2542 - KM_SLEEP | KM_NOFS); 2542 + KM_NOFS); 2543 2543 2544 2544 nirecs = nfsb; 2545 2545 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
+1 -1
fs/xfs/libxfs/xfs_defer.c
··· 517 517 } 518 518 if (!dfp) { 519 519 dfp = kmem_alloc(sizeof(struct xfs_defer_pending), 520 - KM_SLEEP | KM_NOFS); 520 + KM_NOFS); 521 521 dfp->dfp_type = type; 522 522 dfp->dfp_intent = NULL; 523 523 dfp->dfp_done = NULL;
+7 -7
fs/xfs/libxfs/xfs_dir2.c
··· 110 110 111 111 nodehdr_size = mp->m_dir_inode_ops->node_hdr_size; 112 112 mp->m_dir_geo = kmem_zalloc(sizeof(struct xfs_da_geometry), 113 - KM_SLEEP | KM_MAYFAIL); 113 + KM_MAYFAIL); 114 114 mp->m_attr_geo = kmem_zalloc(sizeof(struct xfs_da_geometry), 115 - KM_SLEEP | KM_MAYFAIL); 115 + KM_MAYFAIL); 116 116 if (!mp->m_dir_geo || !mp->m_attr_geo) { 117 117 kmem_free(mp->m_dir_geo); 118 118 kmem_free(mp->m_attr_geo); ··· 217 217 if (error) 218 218 return error; 219 219 220 - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 220 + args = kmem_zalloc(sizeof(*args), KM_NOFS); 221 221 if (!args) 222 222 return -ENOMEM; 223 223 ··· 254 254 XFS_STATS_INC(dp->i_mount, xs_dir_create); 255 255 } 256 256 257 - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 257 + args = kmem_zalloc(sizeof(*args), KM_NOFS); 258 258 if (!args) 259 259 return -ENOMEM; 260 260 ··· 353 353 * lockdep Doing this avoids having to add a bunch of lockdep class 354 354 * annotations into the reclaim path for the ilock. 355 355 */ 356 - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 356 + args = kmem_zalloc(sizeof(*args), KM_NOFS); 357 357 args->geo = dp->i_mount->m_dir_geo; 358 358 args->name = name->name; 359 359 args->namelen = name->len; ··· 422 422 ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); 423 423 XFS_STATS_INC(dp->i_mount, xs_dir_remove); 424 424 425 - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 425 + args = kmem_zalloc(sizeof(*args), KM_NOFS); 426 426 if (!args) 427 427 return -ENOMEM; 428 428 ··· 483 483 if (rval) 484 484 return rval; 485 485 486 - args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 486 + args = kmem_zalloc(sizeof(*args), KM_NOFS); 487 487 if (!args) 488 488 return -ENOMEM; 489 489
+1 -1
fs/xfs/libxfs/xfs_dir2_block.c
··· 1092 1092 * Copy the directory into a temporary buffer. 1093 1093 * Then pitch the incore inode data so we can make extents. 1094 1094 */ 1095 - sfp = kmem_alloc(ifp->if_bytes, KM_SLEEP); 1095 + sfp = kmem_alloc(ifp->if_bytes, 0); 1096 1096 memcpy(sfp, oldsfp, ifp->if_bytes); 1097 1097 1098 1098 xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
+4 -4
fs/xfs/libxfs/xfs_dir2_sf.c
··· 164 164 * can free the block and copy the formatted data into the inode literal 165 165 * area. 166 166 */ 167 - dst = kmem_alloc(mp->m_sb.sb_inodesize, KM_SLEEP); 167 + dst = kmem_alloc(mp->m_sb.sb_inodesize, 0); 168 168 hdr = bp->b_addr; 169 169 170 170 /* ··· 436 436 437 437 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 438 438 old_isize = (int)dp->i_d.di_size; 439 - buf = kmem_alloc(old_isize, KM_SLEEP); 439 + buf = kmem_alloc(old_isize, 0); 440 440 oldsfp = (xfs_dir2_sf_hdr_t *)buf; 441 441 memcpy(oldsfp, sfp, old_isize); 442 442 /* ··· 1096 1096 * Don't want xfs_idata_realloc copying the data here. 1097 1097 */ 1098 1098 oldsize = dp->i_df.if_bytes; 1099 - buf = kmem_alloc(oldsize, KM_SLEEP); 1099 + buf = kmem_alloc(oldsize, 0); 1100 1100 oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 1101 1101 ASSERT(oldsfp->i8count == 1); 1102 1102 memcpy(buf, oldsfp, oldsize); ··· 1169 1169 * Don't want xfs_idata_realloc copying the data here. 1170 1170 */ 1171 1171 oldsize = dp->i_df.if_bytes; 1172 - buf = kmem_alloc(oldsize, KM_SLEEP); 1172 + buf = kmem_alloc(oldsize, 0); 1173 1173 oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 1174 1174 ASSERT(oldsfp->i8count == 0); 1175 1175 memcpy(buf, oldsfp, oldsize);
+8 -8
fs/xfs/libxfs/xfs_inode_fork.c
··· 94 94 return 0; 95 95 96 96 ASSERT(ip->i_afp == NULL); 97 - ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); 97 + ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS); 98 98 99 99 switch (dip->di_aformat) { 100 100 case XFS_DINODE_FMT_LOCAL: ··· 147 147 148 148 if (size) { 149 149 real_size = roundup(mem_size, 4); 150 - ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); 150 + ifp->if_u1.if_data = kmem_alloc(real_size, KM_NOFS); 151 151 memcpy(ifp->if_u1.if_data, data, size); 152 152 if (zero_terminate) 153 153 ifp->if_u1.if_data[size] = '\0'; ··· 302 302 } 303 303 304 304 ifp->if_broot_bytes = size; 305 - ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); 305 + ifp->if_broot = kmem_alloc(size, KM_NOFS); 306 306 ASSERT(ifp->if_broot != NULL); 307 307 /* 308 308 * Copy and convert from the on-disk structure ··· 367 367 */ 368 368 if (ifp->if_broot_bytes == 0) { 369 369 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff); 370 - ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 370 + ifp->if_broot = kmem_alloc(new_size, KM_NOFS); 371 371 ifp->if_broot_bytes = (int)new_size; 372 372 return; 373 373 } ··· 382 382 new_max = cur_max + rec_diff; 383 383 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); 384 384 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, 385 - KM_SLEEP | KM_NOFS); 385 + KM_NOFS); 386 386 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, 387 387 ifp->if_broot_bytes); 388 388 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, ··· 408 408 else 409 409 new_size = 0; 410 410 if (new_size > 0) { 411 - new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); 411 + new_broot = kmem_alloc(new_size, KM_NOFS); 412 412 /* 413 413 * First copy over the btree block header. 414 414 */ ··· 492 492 * We enforce that here. 493 493 */ 494 494 ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data, 495 - roundup(new_size, 4), KM_SLEEP | KM_NOFS); 495 + roundup(new_size, 4), KM_NOFS); 496 496 ifp->if_bytes = new_size; 497 497 } 498 498 ··· 683 683 return; 684 684 685 685 ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone, 686 - KM_SLEEP | KM_NOFS); 686 + KM_NOFS); 687 687 ip->i_cowfp->if_flags = XFS_IFEXTENTS; 688 688 ip->i_cformat = XFS_DINODE_FMT_EXTENTS; 689 689 ip->i_cnextents = 0;
+2 -2
fs/xfs/libxfs/xfs_refcount.c
··· 1189 1189 blockcount); 1190 1190 1191 1191 ri = kmem_alloc(sizeof(struct xfs_refcount_intent), 1192 - KM_SLEEP | KM_NOFS); 1192 + KM_NOFS); 1193 1193 INIT_LIST_HEAD(&ri->ri_list); 1194 1194 ri->ri_type = type; 1195 1195 ri->ri_startblock = startblock; ··· 1602 1602 if (be32_to_cpu(rec->refc.rc_refcount) != 1) 1603 1603 return -EFSCORRUPTED; 1604 1604 1605 - rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), KM_SLEEP); 1605 + rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0); 1606 1606 xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec); 1607 1607 list_add_tail(&rr->rr_list, debris); 1608 1608
+1 -1
fs/xfs/libxfs/xfs_rmap.c
··· 2287 2287 bmap->br_blockcount, 2288 2288 bmap->br_state); 2289 2289 2290 - ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_SLEEP | KM_NOFS); 2290 + ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_NOFS); 2291 2291 INIT_LIST_HEAD(&ri->ri_list); 2292 2292 ri->ri_type = type; 2293 2293 ri->ri_owner = owner;
+1 -1
fs/xfs/scrub/attr.c
··· 80 80 * without the inode lock held, which means we can sleep. 81 81 */ 82 82 if (sc->flags & XCHK_TRY_HARDER) { 83 - error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, KM_SLEEP); 83 + error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0); 84 84 if (error) 85 85 return error; 86 86 }
+1 -1
fs/xfs/scrub/fscounters.c
··· 125 125 struct xchk_fscounters *fsc; 126 126 int error; 127 127 128 - sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), KM_SLEEP); 128 + sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); 129 129 if (!sc->buf) 130 130 return -ENOMEM; 131 131 fsc = sc->buf;
+1 -1
fs/xfs/scrub/symlink.c
··· 22 22 struct xfs_inode *ip) 23 23 { 24 24 /* Allocate the buffer without the inode lock held. */ 25 - sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, KM_SLEEP); 25 + sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, 0); 26 26 if (!sc->buf) 27 27 return -ENOMEM; 28 28
+2 -2
fs/xfs/xfs_acl.c
··· 135 135 * go out to the disk. 136 136 */ 137 137 len = XFS_ACL_MAX_SIZE(ip->i_mount); 138 - xfs_acl = kmem_zalloc_large(len, KM_SLEEP); 138 + xfs_acl = kmem_zalloc_large(len, 0); 139 139 if (!xfs_acl) 140 140 return ERR_PTR(-ENOMEM); 141 141 ··· 180 180 struct xfs_acl *xfs_acl; 181 181 int len = XFS_ACL_MAX_SIZE(ip->i_mount); 182 182 183 - xfs_acl = kmem_zalloc_large(len, KM_SLEEP); 183 + xfs_acl = kmem_zalloc_large(len, 0); 184 184 if (!xfs_acl) 185 185 return -ENOMEM; 186 186
+1 -1
fs/xfs/xfs_attr_inactive.c
··· 147 147 * Allocate storage for a list of all the "remote" value extents. 148 148 */ 149 149 size = count * sizeof(xfs_attr_inactive_list_t); 150 - list = kmem_alloc(size, KM_SLEEP); 150 + list = kmem_alloc(size, 0); 151 151 152 152 /* 153 153 * Identify each of the "remote" value extents.
+1 -1
fs/xfs/xfs_attr_list.c
··· 109 109 * It didn't all fit, so we have to sort everything on hashval. 110 110 */ 111 111 sbsize = sf->hdr.count * sizeof(*sbuf); 112 - sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); 112 + sbp = sbuf = kmem_alloc(sbsize, KM_NOFS); 113 113 114 114 /* 115 115 * Scan the attribute list for the rest of the entries, storing
+2 -2
fs/xfs/xfs_bmap_item.c
··· 141 141 { 142 142 struct xfs_bui_log_item *buip; 143 143 144 - buip = kmem_zone_zalloc(xfs_bui_zone, KM_SLEEP); 144 + buip = kmem_zone_zalloc(xfs_bui_zone, 0); 145 145 146 146 xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops); 147 147 buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS; ··· 218 218 { 219 219 struct xfs_bud_log_item *budp; 220 220 221 - budp = kmem_zone_zalloc(xfs_bud_zone, KM_SLEEP); 221 + budp = kmem_zone_zalloc(xfs_bud_zone, 0); 222 222 xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD, 223 223 &xfs_bud_item_ops); 224 224 budp->bud_buip = buip;
+1 -1
fs/xfs/xfs_buf.c
··· 1741 1741 { 1742 1742 xfs_buftarg_t *btp; 1743 1743 1744 - btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); 1744 + btp = kmem_zalloc(sizeof(*btp), KM_NOFS); 1745 1745 1746 1746 btp->bt_mount = mp; 1747 1747 btp->bt_dev = bdev->bd_dev;
+2 -2
fs/xfs/xfs_buf_item.c
··· 702 702 } 703 703 704 704 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), 705 - KM_SLEEP); 705 + 0); 706 706 if (!bip->bli_formats) 707 707 return -ENOMEM; 708 708 return 0; ··· 747 747 return 0; 748 748 } 749 749 750 - bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP); 750 + bip = kmem_zone_zalloc(xfs_buf_item_zone, 0); 751 751 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); 752 752 bip->bli_buf = bp; 753 753
+1 -1
fs/xfs/xfs_dquot.c
··· 440 440 { 441 441 struct xfs_dquot *dqp; 442 442 443 - dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); 443 + dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0); 444 444 445 445 dqp->dq_flags = type; 446 446 dqp->q_core.d_id = cpu_to_be32(id);
+1 -1
fs/xfs/xfs_dquot_item.c
··· 347 347 { 348 348 struct xfs_qoff_logitem *qf; 349 349 350 - qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP); 350 + qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), 0); 351 351 352 352 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? 353 353 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
+1 -1
fs/xfs/xfs_error.c
··· 213 213 struct xfs_mount *mp) 214 214 { 215 215 mp->m_errortag = kmem_zalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX, 216 - KM_SLEEP | KM_MAYFAIL); 216 + KM_MAYFAIL); 217 217 if (!mp->m_errortag) 218 218 return -ENOMEM; 219 219
+1 -1
fs/xfs/xfs_extent_busy.c
··· 33 33 struct rb_node **rbp; 34 34 struct rb_node *parent = NULL; 35 35 36 - new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_SLEEP); 36 + new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0); 37 37 new->agno = agno; 38 38 new->bno = bno; 39 39 new->length = len;
+4 -4
fs/xfs/xfs_extfree_item.c
··· 163 163 if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { 164 164 size = (uint)(sizeof(xfs_efi_log_item_t) + 165 165 ((nextents - 1) * sizeof(xfs_extent_t))); 166 - efip = kmem_zalloc(size, KM_SLEEP); 166 + efip = kmem_zalloc(size, 0); 167 167 } else { 168 - efip = kmem_zone_zalloc(xfs_efi_zone, KM_SLEEP); 168 + efip = kmem_zone_zalloc(xfs_efi_zone, 0); 169 169 } 170 170 171 171 xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); ··· 333 333 if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { 334 334 efdp = kmem_zalloc(sizeof(struct xfs_efd_log_item) + 335 335 (nextents - 1) * sizeof(struct xfs_extent), 336 - KM_SLEEP); 336 + 0); 337 337 } else { 338 - efdp = kmem_zone_zalloc(xfs_efd_zone, KM_SLEEP); 338 + efdp = kmem_zone_zalloc(xfs_efd_zone, 0); 339 339 } 340 340 341 341 xfs_log_item_init(tp->t_mountp, &efdp->efd_item, XFS_LI_EFD,
+1 -1
fs/xfs/xfs_icache.c
··· 40 40 * KM_MAYFAIL and return NULL here on ENOMEM. Set the 41 41 * code up to do this anyway. 42 42 */ 43 - ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); 43 + ip = kmem_zone_alloc(xfs_inode_zone, 0); 44 44 if (!ip) 45 45 return NULL; 46 46 if (inode_init_always(mp->m_super, VFS_I(ip))) {
+1 -1
fs/xfs/xfs_icreate_item.c
··· 89 89 { 90 90 struct xfs_icreate_item *icp; 91 91 92 - icp = kmem_zone_zalloc(xfs_icreate_zone, KM_SLEEP); 92 + icp = kmem_zone_zalloc(xfs_icreate_zone, 0); 93 93 94 94 xfs_log_item_init(tp->t_mountp, &icp->ic_item, XFS_LI_ICREATE, 95 95 &xfs_icreate_item_ops);
+1 -1
fs/xfs/xfs_inode.c
··· 2018 2018 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK)) 2019 2019 return 0; 2020 2020 2021 - iu = kmem_zalloc(sizeof(*iu), KM_SLEEP | KM_NOFS); 2021 + iu = kmem_zalloc(sizeof(*iu), KM_NOFS); 2022 2022 iu->iu_agino = prev_agino; 2023 2023 iu->iu_next_unlinked = this_agino; 2024 2024
+1 -1
fs/xfs/xfs_inode_item.c
··· 651 651 struct xfs_inode_log_item *iip; 652 652 653 653 ASSERT(ip->i_itemp == NULL); 654 - iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); 654 + iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, 0); 655 655 656 656 iip->ili_inode = ip; 657 657 xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
+2 -2
fs/xfs/xfs_ioctl.c
··· 396 396 if (IS_ERR(dentry)) 397 397 return PTR_ERR(dentry); 398 398 399 - kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); 399 + kbuf = kmem_zalloc_large(al_hreq.buflen, 0); 400 400 if (!kbuf) 401 401 goto out_dput; 402 402 ··· 434 434 435 435 if (*len > XFS_XATTR_SIZE_MAX) 436 436 return -EINVAL; 437 - kbuf = kmem_zalloc_large(*len, KM_SLEEP); 437 + kbuf = kmem_zalloc_large(*len, 0); 438 438 if (!kbuf) 439 439 return -ENOMEM; 440 440
+1 -1
fs/xfs/xfs_ioctl32.c
··· 381 381 return PTR_ERR(dentry); 382 382 383 383 error = -ENOMEM; 384 - kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP); 384 + kbuf = kmem_zalloc_large(al_hreq.buflen, 0); 385 385 if (!kbuf) 386 386 goto out_dput; 387 387
+2 -2
fs/xfs/xfs_itable.c
··· 169 169 ASSERT(breq->icount == 1); 170 170 171 171 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat), 172 - KM_SLEEP | KM_MAYFAIL); 172 + KM_MAYFAIL); 173 173 if (!bc.buf) 174 174 return -ENOMEM; 175 175 ··· 243 243 return 0; 244 244 245 245 bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat), 246 - KM_SLEEP | KM_MAYFAIL); 246 + KM_MAYFAIL); 247 247 if (!bc.buf) 248 248 return -ENOMEM; 249 249
+1 -1
fs/xfs/xfs_iwalk.c
··· 616 616 if (xfs_pwork_ctl_want_abort(&pctl)) 617 617 break; 618 618 619 - iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), KM_SLEEP); 619 + iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0); 620 620 iwag->mp = mp; 621 621 iwag->iwalk_fn = iwalk_fn; 622 622 iwag->data = data;
+1 -2
fs/xfs/xfs_log.c
··· 428 428 XFS_STATS_INC(mp, xs_try_logspace); 429 429 430 430 ASSERT(*ticp == NULL); 431 - tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 432 - KM_SLEEP); 431 + tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 0); 433 432 *ticp = tic; 434 433 435 434 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
+5 -5
fs/xfs/xfs_log_cil.c
··· 38 38 struct xlog_ticket *tic; 39 39 40 40 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, 41 - KM_SLEEP|KM_NOFS); 41 + KM_NOFS); 42 42 43 43 /* 44 44 * set the current reservation to zero so we know to steal the basic ··· 186 186 */ 187 187 kmem_free(lip->li_lv_shadow); 188 188 189 - lv = kmem_alloc_large(buf_size, KM_SLEEP | KM_NOFS); 189 + lv = kmem_alloc_large(buf_size, KM_NOFS); 190 190 memset(lv, 0, xlog_cil_iovec_space(niovecs)); 191 191 192 192 lv->lv_item = lip; ··· 660 660 if (!cil) 661 661 return 0; 662 662 663 - new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 663 + new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS); 664 664 new_ctx->ticket = xlog_cil_ticket_alloc(log); 665 665 666 666 down_write(&cil->xc_ctx_lock); ··· 1179 1179 struct xfs_cil *cil; 1180 1180 struct xfs_cil_ctx *ctx; 1181 1181 1182 - cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); 1182 + cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL); 1183 1183 if (!cil) 1184 1184 return -ENOMEM; 1185 1185 1186 - ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); 1186 + ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL); 1187 1187 if (!ctx) { 1188 1188 kmem_free(cil); 1189 1189 return -ENOMEM;
+8 -8
fs/xfs/xfs_log_recover.c
··· 1960 1960 } 1961 1961 } 1962 1962 1963 - bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); 1963 + bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0); 1964 1964 bcp->bc_blkno = buf_f->blf_blkno; 1965 1965 bcp->bc_len = buf_f->blf_len; 1966 1966 bcp->bc_refcount = 1; ··· 2930 2930 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { 2931 2931 in_f = item->ri_buf[0].i_addr; 2932 2932 } else { 2933 - in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP); 2933 + in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0); 2934 2934 need_free = 1; 2935 2935 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); 2936 2936 if (error) ··· 4161 4161 { 4162 4162 xlog_recover_item_t *item; 4163 4163 4164 - item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); 4164 + item = kmem_zalloc(sizeof(xlog_recover_item_t), 0); 4165 4165 INIT_LIST_HEAD(&item->ri_list); 4166 4166 list_add_tail(&item->ri_list, head); 4167 4167 } ··· 4201 4201 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 4202 4202 old_len = item->ri_buf[item->ri_cnt-1].i_len; 4203 4203 4204 - ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP); 4204 + ptr = kmem_realloc(old_ptr, len + old_len, 0); 4205 4205 memcpy(&ptr[old_len], dp, len); 4206 4206 item->ri_buf[item->ri_cnt-1].i_len += len; 4207 4207 item->ri_buf[item->ri_cnt-1].i_addr = ptr; ··· 4261 4261 return 0; 4262 4262 } 4263 4263 4264 - ptr = kmem_alloc(len, KM_SLEEP); 4264 + ptr = kmem_alloc(len, 0); 4265 4265 memcpy(ptr, dp, len); 4266 4266 in_f = (struct xfs_inode_log_format *)ptr; 4267 4267 ··· 4289 4289 item->ri_total = in_f->ilf_size; 4290 4290 item->ri_buf = 4291 4291 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 4292 - KM_SLEEP); 4292 + 0); 4293 4293 } 4294 4294 ASSERT(item->ri_total > item->ri_cnt); 4295 4295 /* Description region is ri_buf[0] */ ··· 4423 4423 * This is a new transaction so allocate a new recovery container to 4424 4424 * hold the recovery ops that will follow. 4425 4425 */ 4426 - trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP); 4426 + trans = kmem_zalloc(sizeof(struct xlog_recover), 0); 4427 4427 trans->r_log_tid = tid; 4428 4428 trans->r_lsn = be64_to_cpu(rhead->h_lsn); 4429 4429 INIT_LIST_HEAD(&trans->r_itemq); ··· 5527 5527 */ 5528 5528 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * 5529 5529 sizeof(struct list_head), 5530 - KM_SLEEP); 5530 + 0); 5531 5531 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 5532 5532 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); 5533 5533
+1 -1
fs/xfs/xfs_mount.c
··· 82 82 if (hole < 0) { 83 83 xfs_uuid_table = kmem_realloc(xfs_uuid_table, 84 84 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), 85 - KM_SLEEP); 85 + 0); 86 86 hole = xfs_uuid_table_size++; 87 87 } 88 88 xfs_uuid_table[hole] = *uuid;
+2 -2
fs/xfs/xfs_mru_cache.c
··· 333 333 if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) 334 334 return -EINVAL; 335 335 336 - if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) 336 + if (!(mru = kmem_zalloc(sizeof(*mru), 0))) 337 337 return -ENOMEM; 338 338 339 339 /* An extra list is needed to avoid reaping up to a grp_time early. */ 340 340 mru->grp_count = grp_count + 1; 341 - mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); 341 + mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0); 342 342 343 343 if (!mru->lists) { 344 344 err = -ENOMEM;
+2 -2
fs/xfs/xfs_qm.c
··· 642 642 643 643 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 644 644 645 - qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 645 + qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), 0); 646 646 647 647 error = list_lru_init(&qinf->qi_lru); 648 648 if (error) ··· 978 978 if (qip->i_d.di_nblocks == 0) 979 979 return 0; 980 980 981 - map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 981 + map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0); 982 982 983 983 lblkno = 0; 984 984 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
+3 -3
fs/xfs/xfs_refcount_item.c
··· 144 144 ASSERT(nextents > 0); 145 145 if (nextents > XFS_CUI_MAX_FAST_EXTENTS) 146 146 cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents), 147 - KM_SLEEP); 147 + 0); 148 148 else 149 - cuip = kmem_zone_zalloc(xfs_cui_zone, KM_SLEEP); 149 + cuip = kmem_zone_zalloc(xfs_cui_zone, 0); 150 150 151 151 xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops); 152 152 cuip->cui_format.cui_nextents = nextents; ··· 223 223 { 224 224 struct xfs_cud_log_item *cudp; 225 225 226 - cudp = kmem_zone_zalloc(xfs_cud_zone, KM_SLEEP); 226 + cudp = kmem_zone_zalloc(xfs_cud_zone, 0); 227 227 xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD, 228 228 &xfs_cud_item_ops); 229 229 cudp->cud_cuip = cuip;
+3 -3
fs/xfs/xfs_rmap_item.c
··· 142 142 143 143 ASSERT(nextents > 0); 144 144 if (nextents > XFS_RUI_MAX_FAST_EXTENTS) 145 - ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), KM_SLEEP); 145 + ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0); 146 146 else 147 - ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP); 147 + ruip = kmem_zone_zalloc(xfs_rui_zone, 0); 148 148 149 149 xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops); 150 150 ruip->rui_format.rui_nextents = nextents; ··· 244 244 { 245 245 struct xfs_rud_log_item *rudp; 246 246 247 - rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP); 247 + rudp = kmem_zone_zalloc(xfs_rud_zone, 0); 248 248 xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD, 249 249 &xfs_rud_item_ops); 250 250 rudp->rud_ruip = ruip;
+2 -2
fs/xfs/xfs_rtalloc.c
··· 865 865 * lower bound on the minimum level with any free extents. We can 866 866 * continue without the cache if it couldn't be allocated. 867 867 */ 868 - mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, KM_SLEEP); 868 + mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, 0); 869 869 if (!mp->m_rsum_cache) 870 870 xfs_warn(mp, "could not allocate realtime summary cache"); 871 871 } ··· 963 963 /* 964 964 * Allocate a new (fake) mount/sb. 965 965 */ 966 - nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP); 966 + nmp = kmem_alloc(sizeof(*nmp), 0); 967 967 /* 968 968 * Loop over the bitmap blocks. 969 969 * We will do everything one bitmap block at a time.
+2 -2
fs/xfs/xfs_trans.c
··· 90 90 91 91 trace_xfs_trans_dup(tp, _RET_IP_); 92 92 93 - ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 93 + ntp = kmem_zone_zalloc(xfs_trans_zone, 0); 94 94 95 95 /* 96 96 * Initialize the new transaction structure. ··· 263 263 * GFP_NOFS allocation context so that we avoid lockdep false positives 264 264 * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 265 265 */ 266 - tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 266 + tp = kmem_zone_zalloc(xfs_trans_zone, 0); 267 267 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 268 268 sb_start_intwrite(mp->m_super); 269 269
+1 -1
fs/xfs/xfs_trans_dquot.c
··· 863 863 xfs_trans_alloc_dqinfo( 864 864 xfs_trans_t *tp) 865 865 { 866 - tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP); 866 + tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0); 867 867 } 868 868 869 869 void