xfs: a couple getbmap cleanups

- reshuffle various conditionals for data vs attr fork to make the code
more readable
- do fine-grainded goto-based error handling
- exit early from conditionals instead of keeping a long else branch around
- allow kmem_alloc to fail

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Sandeen <sandeen@sandeen.net>
Reviewed-by: Felix Blyakher <felixb@sgi.com>
Signed-off-by: Felix Blyakher <felixb@sgi.com>

authored by

Christoph Hellwig and committed by
Felix Blyakher
4be4a00f 2ac00af7

+80 -84
+80 -84
fs/xfs/xfs_bmap.c
··· 5880 5880 void *arg) /* formatter arg */ 5881 5881 { 5882 5882 __int64_t bmvend; /* last block requested */ 5883 - int error; /* return value */ 5883 + int error = 0; /* return value */ 5884 5884 __int64_t fixlen; /* length for -1 case */ 5885 5885 int i; /* extent number */ 5886 5886 int lock; /* lock state */ ··· 5899 5899 5900 5900 mp = ip->i_mount; 5901 5901 iflags = bmv->bmv_iflags; 5902 - 5903 5902 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; 5904 - 5905 - /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not 5906 - * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ 5907 - * bit is set for the file, generate a read event in order 5908 - * that the DMAPI application may do its thing before we return 5909 - * the extents. Usually this means restoring user file data to 5910 - * regions of the file that look like holes. 5911 - * 5912 - * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify 5913 - * BMV_IF_NO_DMAPI_READ so that read events are generated. 5914 - * If this were not true, callers of ioctl( XFS_IOC_GETBMAP ) 5915 - * could misinterpret holes in a DMAPI file as true holes, 5916 - * when in fact they may represent offline user data. 5917 - */ 5918 - if ((iflags & BMV_IF_NO_DMAPI_READ) == 0 && 5919 - DM_EVENT_ENABLED(ip, DM_EVENT_READ) && 5920 - whichfork == XFS_DATA_FORK) { 5921 - error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL); 5922 - if (error) 5923 - return XFS_ERROR(error); 5924 - } 5925 5903 5926 5904 if (whichfork == XFS_ATTR_FORK) { 5927 5905 if (XFS_IFORK_Q(ip)) { ··· 5914 5936 ip->i_mount); 5915 5937 return XFS_ERROR(EFSCORRUPTED); 5916 5938 } 5917 - } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && 5918 - ip->i_d.di_format != XFS_DINODE_FMT_BTREE && 5919 - ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5920 - return XFS_ERROR(EINVAL); 5921 - if (whichfork == XFS_DATA_FORK) { 5939 + 5940 + prealloced = 0; 5941 + fixlen = 1LL << 32; 5942 + } else { 5943 + /* 5944 + * If the BMV_IF_NO_DMAPI_READ interface bit specified, do 5945 + * not generate a DMAPI read event. Otherwise, if the 5946 + * DM_EVENT_READ bit is set for the file, generate a read 5947 + * event in order that the DMAPI application may do its thing 5948 + * before we return the extents. Usually this means restoring 5949 + * user file data to regions of the file that look like holes. 5950 + * 5951 + * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify 5952 + * BMV_IF_NO_DMAPI_READ so that read events are generated. 5953 + * If this were not true, callers of ioctl(XFS_IOC_GETBMAP) 5954 + * could misinterpret holes in a DMAPI file as true holes, 5955 + * when in fact they may represent offline user data. 5956 + */ 5957 + if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && 5958 + !(iflags & BMV_IF_NO_DMAPI_READ)) { 5959 + error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 5960 + 0, 0, 0, NULL); 5961 + if (error) 5962 + return XFS_ERROR(error); 5963 + } 5964 + 5965 + if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && 5966 + ip->i_d.di_format != XFS_DINODE_FMT_BTREE && 5967 + ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5968 + return XFS_ERROR(EINVAL); 5969 + 5922 5970 if (xfs_get_extsz_hint(ip) || 5923 5971 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ 5924 5972 prealloced = 1; ··· 5953 5949 prealloced = 0; 5954 5950 fixlen = ip->i_size; 5955 5951 } 5956 - } else { 5957 - prealloced = 0; 5958 - fixlen = 1LL << 32; 5959 5952 } 5960 5953 5961 5954 if (bmv->bmv_length == -1) { 5962 5955 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen)); 5963 - bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset), 5964 - (__int64_t)0); 5965 - } else if (bmv->bmv_length < 0) 5966 - return XFS_ERROR(EINVAL); 5967 - if (bmv->bmv_length == 0) { 5956 + bmv->bmv_length = 5957 + max_t(__int64_t, fixlen - bmv->bmv_offset, 0); 5958 + } else if (bmv->bmv_length == 0) { 5968 5959 bmv->bmv_entries = 0; 5969 5960 return 0; 5961 + } else if (bmv->bmv_length < 0) { 5962 + return XFS_ERROR(EINVAL); 5970 5963 } 5964 + 5971 5965 nex = bmv->bmv_count - 1; 5972 5966 if (nex <= 0) 5973 5967 return XFS_ERROR(EINVAL); 5974 5968 bmvend = bmv->bmv_offset + bmv->bmv_length; 5975 5969 5976 5970 xfs_ilock(ip, XFS_IOLOCK_SHARED); 5977 - 5978 - if (((iflags & BMV_IF_DELALLOC) == 0) && 5979 - (whichfork == XFS_DATA_FORK) && 5980 - (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) { 5981 - /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ 5982 - error = xfs_flush_pages(ip, (xfs_off_t)0, 5983 - -1, 0, FI_REMAPF); 5984 - if (error) { 5985 - xfs_iunlock(ip, XFS_IOLOCK_SHARED); 5986 - return error; 5971 + if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) { 5972 + if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) { 5973 + error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF); 5974 + if (error) 5975 + goto out_unlock_iolock; 5987 5976 } 5988 - } 5989 5977 5990 - ASSERT(whichfork == XFS_ATTR_FORK || (iflags & BMV_IF_DELALLOC) || 5991 - ip->i_delayed_blks == 0); 5978 + ASSERT(ip->i_delayed_blks == 0); 5979 + } 5992 5980 5993 5981 lock = xfs_ilock_map_shared(ip); 5994 5982 ··· 5991 5995 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) 5992 5996 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; 5993 5997 5994 - bmapi_flags = xfs_bmapi_aflag(whichfork) | 5995 - ((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE); 5998 + bmapi_flags = xfs_bmapi_aflag(whichfork); 5999 + if (!(iflags & BMV_IF_PREALLOC)) 6000 + bmapi_flags |= XFS_BMAPI_IGSTATE; 5996 6001 5997 6002 /* 5998 6003 * Allocate enough space to handle "subnex" maps at a time. 5999 6004 */ 6005 + error = ENOMEM; 6000 6006 subnex = 16; 6001 - map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP); 6007 + map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL); 6008 + if (!map) 6009 + goto out_unlock_ilock; 6002 6010 6003 6011 bmv->bmv_entries = 0; 6004 6012 6005 - if ((XFS_IFORK_NEXTENTS(ip, whichfork) == 0)) { 6006 - if (((iflags & BMV_IF_DELALLOC) == 0) || 6007 - whichfork == XFS_ATTR_FORK) { 6008 - error = 0; 6009 - goto unlock_and_return; 6010 - } 6013 + if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 && 6014 + (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) { 6015 + error = 0; 6016 + goto out_free_map; 6011 6017 } 6012 6018 6013 6019 nexleft = nex; ··· 6021 6023 bmapi_flags, NULL, 0, map, &nmap, 6022 6024 NULL, NULL); 6023 6025 if (error) 6024 - goto unlock_and_return; 6026 + goto out_free_map; 6025 6027 ASSERT(nmap <= subnex); 6026 6028 6027 6029 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { 6030 + int full = 0; /* user array is full */ 6031 + 6028 6032 out.bmv_oflags = 0; 6029 6033 if (map[i].br_state == XFS_EXT_UNWRITTEN) 6030 6034 out.bmv_oflags |= BMV_OF_PREALLOC; ··· 6041 6041 whichfork == XFS_ATTR_FORK) { 6042 6042 /* came to the end of attribute fork */ 6043 6043 out.bmv_oflags |= BMV_OF_LAST; 6044 - goto unlock_and_return; 6045 - } else { 6046 - int full = 0; /* user array is full */ 6047 - 6048 - if (!xfs_getbmapx_fix_eof_hole(ip, &out, 6049 - prealloced, bmvend, 6050 - map[i].br_startblock)) { 6051 - goto unlock_and_return; 6052 - } 6053 - 6054 - /* format results & advance arg */ 6055 - error = formatter(&arg, &out, &full); 6056 - if (error || full) 6057 - goto unlock_and_return; 6058 - nexleft--; 6059 - bmv->bmv_offset = 6060 - out.bmv_offset + out.bmv_length; 6061 - bmv->bmv_length = MAX((__int64_t)0, 6062 - (__int64_t)(bmvend - bmv->bmv_offset)); 6063 - bmv->bmv_entries++; 6044 + goto out_free_map; 6064 6045 } 6046 + 6047 + if (!xfs_getbmapx_fix_eof_hole(ip, &out, prealloced, 6048 + bmvend, map[i].br_startblock)) 6049 + goto out_free_map; 6050 + 6051 + /* format results & advance arg */ 6052 + error = formatter(&arg, &out, &full); 6053 + if (error || full) 6054 + goto out_free_map; 6055 + nexleft--; 6056 + bmv->bmv_offset = 6057 + out.bmv_offset + out.bmv_length; 6058 + bmv->bmv_length = 6059 + max_t(__int64_t, 0, bmvend - bmv->bmv_offset); 6060 + bmv->bmv_entries++; 6065 6061 } 6066 6062 } while (nmap && nexleft && bmv->bmv_length); 6067 6063 6068 - unlock_and_return: 6069 - xfs_iunlock_map_shared(ip, lock); 6070 - xfs_iunlock(ip, XFS_IOLOCK_SHARED); 6071 - 6064 + out_free_map: 6072 6065 kmem_free(map); 6073 - 6066 + out_unlock_ilock: 6067 + xfs_iunlock_map_shared(ip, lock); 6068 + out_unlock_iolock: 6069 + xfs_iunlock(ip, XFS_IOLOCK_SHARED); 6074 6070 return error; 6075 6071 } 6076 6072