Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfs: simplify the xfs_iomap_write_direct calling

Move the EOF alignment and checking for the next allocated extent into
the callers to avoid the need to pass the byte based offset and count
as well as looking at the incoming imap. The added benefit is that
the caller can unlock the incoming ilock and the function doesn't have
funny unbalanced locking contexts.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>

authored by

Christoph Hellwig and committed by
Darrick J. Wong
e696663a 307cdb54

+46 -67
+29 -53
fs/xfs/xfs_iomap.c
··· 148 148 * Check if last_fsb is outside the last extent, and if so grow it to the next 149 149 * stripe unit boundary. 150 150 */ 151 - static xfs_fileoff_t 151 + xfs_fileoff_t 152 152 xfs_iomap_eof_align_last_fsb( 153 153 struct xfs_inode *ip, 154 154 xfs_fileoff_t end_fsb) ··· 185 185 186 186 int 187 187 xfs_iomap_write_direct( 188 - xfs_inode_t *ip, 189 - xfs_off_t offset, 190 - size_t count, 191 - xfs_bmbt_irec_t *imap, 192 - int nmaps) 188 + struct xfs_inode *ip, 189 + xfs_fileoff_t offset_fsb, 190 + xfs_fileoff_t count_fsb, 191 + struct xfs_bmbt_irec *imap) 193 192 { 194 - xfs_mount_t *mp = ip->i_mount; 195 - xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 196 - xfs_fileoff_t last_fsb = xfs_iomap_end_fsb(mp, offset, count); 197 - xfs_filblks_t count_fsb, resaligned; 198 - xfs_extlen_t extsz; 199 - int nimaps; 200 - int quota_flag; 201 - int rt; 202 - xfs_trans_t *tp; 203 - uint qblocks, resblks, resrtextents; 204 - int error; 205 - int lockmode; 206 - int bmapi_flags = XFS_BMAPI_PREALLOC; 207 - uint tflags = 0; 193 + struct xfs_mount *mp = ip->i_mount; 194 + struct xfs_trans *tp; 195 + xfs_filblks_t resaligned; 196 + int nimaps; 197 + int quota_flag; 198 + uint qblocks, resblks; 199 + unsigned int resrtextents = 0; 200 + int error; 201 + int bmapi_flags = XFS_BMAPI_PREALLOC; 202 + uint tflags = 0; 208 203 209 - rt = XFS_IS_REALTIME_INODE(ip); 210 - extsz = xfs_get_extsz_hint(ip); 211 - lockmode = XFS_ILOCK_SHARED; /* locked by caller */ 212 - 213 - ASSERT(xfs_isilocked(ip, lockmode)); 214 - 215 - if (offset + count > XFS_ISIZE(ip)) { 216 - last_fsb = xfs_iomap_eof_align_last_fsb(ip, last_fsb); 217 - } else { 218 - if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 219 - last_fsb = min(last_fsb, (xfs_fileoff_t) 220 - imap->br_blockcount + 221 - imap->br_startoff); 222 - } 223 - count_fsb = last_fsb - offset_fsb; 224 204 ASSERT(count_fsb > 0); 225 - resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz); 226 205 227 - if (unlikely(rt)) { 206 + resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, 207 + xfs_get_extsz_hint(ip)); 208 + if (unlikely(XFS_IS_REALTIME_INODE(ip))) { 228 209 resrtextents = qblocks = resaligned; 229 210 resrtextents /= mp->m_sb.sb_rextsize; 230 211 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 231 212 quota_flag = XFS_QMOPT_RES_RTBLKS; 232 213 } else { 233 - resrtextents = 0; 234 214 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 235 215 quota_flag = XFS_QMOPT_RES_REGBLKS; 236 216 } 237 217 238 - /* 239 - * Drop the shared lock acquired by the caller, attach the dquot if 240 - * necessary and move on to transaction setup. 241 - */ 242 - xfs_iunlock(ip, lockmode); 243 218 error = xfs_qm_dqattach(ip); 244 219 if (error) 245 220 return error; ··· 244 269 if (error) 245 270 return error; 246 271 247 - lockmode = XFS_ILOCK_EXCL; 248 - xfs_ilock(ip, lockmode); 272 + xfs_ilock(ip, XFS_ILOCK_EXCL); 249 273 250 274 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); 251 275 if (error) ··· 281 307 error = xfs_alert_fsblock_zero(ip, imap); 282 308 283 309 out_unlock: 284 - xfs_iunlock(ip, lockmode); 310 + xfs_iunlock(ip, XFS_ILOCK_EXCL); 285 311 return error; 286 312 287 313 out_res_cancel: ··· 781 807 * lower level functions are updated. 782 808 */ 783 809 length = min_t(loff_t, length, 1024 * PAGE_SIZE); 810 + end_fsb = xfs_iomap_end_fsb(mp, offset, length); 784 811 785 - /* 786 - * xfs_iomap_write_direct() expects the shared lock. It is unlocked on 787 - * return. 788 - */ 789 - if (lockmode == XFS_ILOCK_EXCL) 790 - xfs_ilock_demote(ip, lockmode); 791 - error = xfs_iomap_write_direct(ip, offset, length, &imap, nimaps); 812 + if (offset + length > XFS_ISIZE(ip)) 813 + end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb); 814 + else if (nimaps && imap.br_startblock == HOLESTARTBLOCK) 815 + end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount); 816 + xfs_iunlock(ip, lockmode); 817 + 818 + error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb, 819 + &imap); 792 820 if (error) 793 821 return error; 794 822
+4 -2
fs/xfs/xfs_iomap.h
··· 11 11 struct xfs_inode; 12 12 struct xfs_bmbt_irec; 13 13 14 - int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, 15 - struct xfs_bmbt_irec *, int); 14 + int xfs_iomap_write_direct(struct xfs_inode *ip, xfs_fileoff_t offset_fsb, 15 + xfs_fileoff_t count_fsb, struct xfs_bmbt_irec *imap); 16 16 int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool); 17 + xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip, 18 + xfs_fileoff_t end_fsb); 17 19 18 20 int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, 19 21 struct xfs_bmbt_irec *, u16);
+13 -12
fs/xfs/xfs_pnfs.c
··· 143 143 lock_flags = xfs_ilock_data_map_shared(ip); 144 144 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, 145 145 &imap, &nimaps, bmapi_flags); 146 - xfs_iunlock(ip, lock_flags); 147 - 148 - if (error) 149 - goto out_unlock; 150 146 151 147 ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK); 152 148 153 - if (write && (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) { 154 - /* 155 - * xfs_iomap_write_direct() expects to take ownership of the 156 - * shared ilock. 157 - */ 158 - xfs_ilock(ip, XFS_ILOCK_SHARED); 159 - error = xfs_iomap_write_direct(ip, offset, length, &imap, 160 - nimaps); 149 + if (!error && write && 150 + (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) { 151 + if (offset + length > XFS_ISIZE(ip)) 152 + end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb); 153 + else if (nimaps && imap.br_startblock == HOLESTARTBLOCK) 154 + end_fsb = min(end_fsb, imap.br_startoff + 155 + imap.br_blockcount); 156 + xfs_iunlock(ip, lock_flags); 157 + 158 + error = xfs_iomap_write_direct(ip, offset_fsb, 159 + end_fsb - offset_fsb, &imap); 161 160 if (error) 162 161 goto out_unlock; 163 162 ··· 169 170 XFS_PREALLOC_SET | XFS_PREALLOC_SYNC); 170 171 if (error) 171 172 goto out_unlock; 173 + } else { 174 + xfs_iunlock(ip, lock_flags); 172 175 } 173 176 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 174 177