Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_btree.h"
16#include "xfs_bmap_btree.h"
17#include "xfs_bmap.h"
18#include "xfs_bmap_util.h"
19#include "xfs_errortag.h"
20#include "xfs_error.h"
21#include "xfs_trans.h"
22#include "xfs_trans_space.h"
23#include "xfs_inode_item.h"
24#include "xfs_iomap.h"
25#include "xfs_trace.h"
26#include "xfs_quota.h"
27#include "xfs_rtgroup.h"
28#include "xfs_dquot_item.h"
29#include "xfs_dquot.h"
30#include "xfs_reflink.h"
31#include "xfs_health.h"
32#include "xfs_rtbitmap.h"
33#include "xfs_icache.h"
34#include "xfs_zone_alloc.h"
35
36#define XFS_ALLOC_ALIGN(mp, off) \
37 (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
38
39static int
40xfs_alert_fsblock_zero(
41 xfs_inode_t *ip,
42 xfs_bmbt_irec_t *imap)
43{
44 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
45 "Access to block zero in inode %llu "
46 "start_block: %llx start_off: %llx "
47 "blkcnt: %llx extent-state: %x",
48 (unsigned long long)ip->i_ino,
49 (unsigned long long)imap->br_startblock,
50 (unsigned long long)imap->br_startoff,
51 (unsigned long long)imap->br_blockcount,
52 imap->br_state);
53 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
54 return -EFSCORRUPTED;
55}
56
57u64
58xfs_iomap_inode_sequence(
59 struct xfs_inode *ip,
60 u16 iomap_flags)
61{
62 u64 cookie = 0;
63
64 if (iomap_flags & IOMAP_F_XATTR)
65 return READ_ONCE(ip->i_af.if_seq);
66 if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp)
67 cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32;
68 return cookie | READ_ONCE(ip->i_df.if_seq);
69}
70
71/*
72 * Check that the iomap passed to us is still valid for the given offset and
73 * length.
74 */
75static bool
76xfs_iomap_valid(
77 struct inode *inode,
78 const struct iomap *iomap)
79{
80 struct xfs_inode *ip = XFS_I(inode);
81
82 if (iomap->validity_cookie !=
83 xfs_iomap_inode_sequence(ip, iomap->flags)) {
84 trace_xfs_iomap_invalid(ip, iomap);
85 return false;
86 }
87
88 XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS);
89 return true;
90}
91
92static const struct iomap_folio_ops xfs_iomap_folio_ops = {
93 .iomap_valid = xfs_iomap_valid,
94};
95
96int
97xfs_bmbt_to_iomap(
98 struct xfs_inode *ip,
99 struct iomap *iomap,
100 struct xfs_bmbt_irec *imap,
101 unsigned int mapping_flags,
102 u16 iomap_flags,
103 u64 sequence_cookie)
104{
105 struct xfs_mount *mp = ip->i_mount;
106 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
107
108 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
109 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
110 return xfs_alert_fsblock_zero(ip, imap);
111 }
112
113 if (imap->br_startblock == HOLESTARTBLOCK) {
114 iomap->addr = IOMAP_NULL_ADDR;
115 iomap->type = IOMAP_HOLE;
116 } else if (imap->br_startblock == DELAYSTARTBLOCK ||
117 isnullstartblock(imap->br_startblock)) {
118 iomap->addr = IOMAP_NULL_ADDR;
119 iomap->type = IOMAP_DELALLOC;
120 } else {
121 xfs_daddr_t daddr = xfs_fsb_to_db(ip, imap->br_startblock);
122
123 iomap->addr = BBTOB(daddr);
124 if (mapping_flags & IOMAP_DAX)
125 iomap->addr += target->bt_dax_part_off;
126
127 if (imap->br_state == XFS_EXT_UNWRITTEN)
128 iomap->type = IOMAP_UNWRITTEN;
129 else
130 iomap->type = IOMAP_MAPPED;
131
132 /*
133 * Mark iomaps starting at the first sector of a RTG as merge
134 * boundary so that each I/O completions is contained to a
135 * single RTG.
136 */
137 if (XFS_IS_REALTIME_INODE(ip) && xfs_has_rtgroups(mp) &&
138 xfs_rtbno_is_group_start(mp, imap->br_startblock))
139 iomap->flags |= IOMAP_F_BOUNDARY;
140 }
141 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
142 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
143 if (mapping_flags & IOMAP_DAX)
144 iomap->dax_dev = target->bt_daxdev;
145 else
146 iomap->bdev = target->bt_bdev;
147 iomap->flags = iomap_flags;
148
149 if (xfs_ipincount(ip) &&
150 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
151 iomap->flags |= IOMAP_F_DIRTY;
152
153 iomap->validity_cookie = sequence_cookie;
154 iomap->folio_ops = &xfs_iomap_folio_ops;
155 return 0;
156}
157
158static void
159xfs_hole_to_iomap(
160 struct xfs_inode *ip,
161 struct iomap *iomap,
162 xfs_fileoff_t offset_fsb,
163 xfs_fileoff_t end_fsb)
164{
165 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
166
167 iomap->addr = IOMAP_NULL_ADDR;
168 iomap->type = IOMAP_HOLE;
169 iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
170 iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
171 iomap->bdev = target->bt_bdev;
172 iomap->dax_dev = target->bt_daxdev;
173}
174
175static inline xfs_fileoff_t
176xfs_iomap_end_fsb(
177 struct xfs_mount *mp,
178 loff_t offset,
179 loff_t count)
180{
181 ASSERT(offset <= mp->m_super->s_maxbytes);
182 return min(XFS_B_TO_FSB(mp, offset + count),
183 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
184}
185
186static xfs_extlen_t
187xfs_eof_alignment(
188 struct xfs_inode *ip)
189{
190 struct xfs_mount *mp = ip->i_mount;
191 xfs_extlen_t align = 0;
192
193 if (!XFS_IS_REALTIME_INODE(ip)) {
194 /*
195 * Round up the allocation request to a stripe unit
196 * (m_dalign) boundary if the file size is >= stripe unit
197 * size, and we are allocating past the allocation eof.
198 *
199 * If mounted with the "-o swalloc" option the alignment is
200 * increased from the strip unit size to the stripe width.
201 */
202 if (mp->m_swidth && xfs_has_swalloc(mp))
203 align = mp->m_swidth;
204 else if (mp->m_dalign)
205 align = mp->m_dalign;
206
207 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
208 align = 0;
209 }
210
211 return align;
212}
213
214/*
215 * Check if last_fsb is outside the last extent, and if so grow it to the next
216 * stripe unit boundary.
217 */
218xfs_fileoff_t
219xfs_iomap_eof_align_last_fsb(
220 struct xfs_inode *ip,
221 xfs_fileoff_t end_fsb)
222{
223 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
224 xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
225 xfs_extlen_t align = xfs_eof_alignment(ip);
226 struct xfs_bmbt_irec irec;
227 struct xfs_iext_cursor icur;
228
229 ASSERT(!xfs_need_iread_extents(ifp));
230
231 /*
232 * Always round up the allocation request to the extent hint boundary.
233 */
234 if (extsz) {
235 if (align)
236 align = roundup_64(align, extsz);
237 else
238 align = extsz;
239 }
240
241 if (align) {
242 xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
243
244 xfs_iext_last(ifp, &icur);
245 if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
246 aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
247 return aligned_end_fsb;
248 }
249
250 return end_fsb;
251}
252
253int
254xfs_iomap_write_direct(
255 struct xfs_inode *ip,
256 xfs_fileoff_t offset_fsb,
257 xfs_fileoff_t count_fsb,
258 unsigned int flags,
259 struct xfs_bmbt_irec *imap,
260 u64 *seq)
261{
262 struct xfs_mount *mp = ip->i_mount;
263 struct xfs_trans *tp;
264 xfs_filblks_t resaligned;
265 int nimaps;
266 unsigned int dblocks, rblocks;
267 bool force = false;
268 int error;
269 int bmapi_flags = XFS_BMAPI_PREALLOC;
270 int nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT;
271
272 ASSERT(count_fsb > 0);
273
274 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
275 xfs_get_extsz_hint(ip));
276 if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
277 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
278 rblocks = resaligned;
279 } else {
280 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
281 rblocks = 0;
282 }
283
284 error = xfs_qm_dqattach(ip);
285 if (error)
286 return error;
287
288 /*
289 * For DAX, we do not allocate unwritten extents, but instead we zero
290 * the block before we commit the transaction. Ideally we'd like to do
291 * this outside the transaction context, but if we commit and then crash
292 * we may not have zeroed the blocks and this will be exposed on
293 * recovery of the allocation. Hence we must zero before commit.
294 *
295 * Further, if we are mapping unwritten extents here, we need to zero
296 * and convert them to written so that we don't need an unwritten extent
297 * callback for DAX. This also means that we need to be able to dip into
298 * the reserve block pool for bmbt block allocation if there is no space
299 * left but we need to do unwritten extent conversion.
300 */
301 if (flags & IOMAP_DAX) {
302 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
303 if (imap->br_state == XFS_EXT_UNWRITTEN) {
304 force = true;
305 nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
306 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
307 }
308 }
309
310 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
311 rblocks, force, &tp);
312 if (error)
313 return error;
314
315 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, nr_exts);
316 if (error)
317 goto out_trans_cancel;
318
319 /*
320 * From this point onwards we overwrite the imap pointer that the
321 * caller gave to us.
322 */
323 nimaps = 1;
324 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
325 imap, &nimaps);
326 if (error)
327 goto out_trans_cancel;
328
329 /*
330 * Complete the transaction
331 */
332 error = xfs_trans_commit(tp);
333 if (error)
334 goto out_unlock;
335
336 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
337 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
338 error = xfs_alert_fsblock_zero(ip, imap);
339 }
340
341out_unlock:
342 *seq = xfs_iomap_inode_sequence(ip, 0);
343 xfs_iunlock(ip, XFS_ILOCK_EXCL);
344 return error;
345
346out_trans_cancel:
347 xfs_trans_cancel(tp);
348 goto out_unlock;
349}
350
351STATIC bool
352xfs_quota_need_throttle(
353 struct xfs_inode *ip,
354 xfs_dqtype_t type,
355 xfs_fsblock_t alloc_blocks)
356{
357 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
358 struct xfs_dquot_res *res;
359 struct xfs_dquot_pre *pre;
360
361 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
362 return false;
363
364 if (XFS_IS_REALTIME_INODE(ip)) {
365 res = &dq->q_rtb;
366 pre = &dq->q_rtb_prealloc;
367 } else {
368 res = &dq->q_blk;
369 pre = &dq->q_blk_prealloc;
370 }
371
372 /* no hi watermark, no throttle */
373 if (!pre->q_prealloc_hi_wmark)
374 return false;
375
376 /* under the lo watermark, no throttle */
377 if (res->reserved + alloc_blocks < pre->q_prealloc_lo_wmark)
378 return false;
379
380 return true;
381}
382
383STATIC void
384xfs_quota_calc_throttle(
385 struct xfs_inode *ip,
386 xfs_dqtype_t type,
387 xfs_fsblock_t *qblocks,
388 int *qshift,
389 int64_t *qfreesp)
390{
391 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
392 struct xfs_dquot_res *res;
393 struct xfs_dquot_pre *pre;
394 int64_t freesp;
395 int shift = 0;
396
397 if (!dq) {
398 res = NULL;
399 pre = NULL;
400 } else if (XFS_IS_REALTIME_INODE(ip)) {
401 res = &dq->q_rtb;
402 pre = &dq->q_rtb_prealloc;
403 } else {
404 res = &dq->q_blk;
405 pre = &dq->q_blk_prealloc;
406 }
407
408 /* no dq, or over hi wmark, squash the prealloc completely */
409 if (!res || res->reserved >= pre->q_prealloc_hi_wmark) {
410 *qblocks = 0;
411 *qfreesp = 0;
412 return;
413 }
414
415 freesp = pre->q_prealloc_hi_wmark - res->reserved;
416 if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT]) {
417 shift = 2;
418 if (freesp < pre->q_low_space[XFS_QLOWSP_3_PCNT])
419 shift += 2;
420 if (freesp < pre->q_low_space[XFS_QLOWSP_1_PCNT])
421 shift += 2;
422 }
423
424 if (freesp < *qfreesp)
425 *qfreesp = freesp;
426
427 /* only overwrite the throttle values if we are more aggressive */
428 if ((freesp >> shift) < (*qblocks >> *qshift)) {
429 *qblocks = freesp;
430 *qshift = shift;
431 }
432}
433
434static int64_t
435xfs_iomap_freesp(
436 struct xfs_mount *mp,
437 unsigned int idx,
438 uint64_t low_space[XFS_LOWSP_MAX],
439 int *shift)
440{
441 int64_t freesp;
442
443 freesp = xfs_estimate_freecounter(mp, idx);
444 if (freesp < low_space[XFS_LOWSP_5_PCNT]) {
445 *shift = 2;
446 if (freesp < low_space[XFS_LOWSP_4_PCNT])
447 (*shift)++;
448 if (freesp < low_space[XFS_LOWSP_3_PCNT])
449 (*shift)++;
450 if (freesp < low_space[XFS_LOWSP_2_PCNT])
451 (*shift)++;
452 if (freesp < low_space[XFS_LOWSP_1_PCNT])
453 (*shift)++;
454 }
455 return freesp;
456}
457
458/*
459 * If we don't have a user specified preallocation size, dynamically increase
460 * the preallocation size as the size of the file grows. Cap the maximum size
461 * at a single extent or less if the filesystem is near full. The closer the
462 * filesystem is to being full, the smaller the maximum preallocation.
463 */
464STATIC xfs_fsblock_t
465xfs_iomap_prealloc_size(
466 struct xfs_inode *ip,
467 int whichfork,
468 loff_t offset,
469 loff_t count,
470 struct xfs_iext_cursor *icur)
471{
472 struct xfs_iext_cursor ncur = *icur;
473 struct xfs_bmbt_irec prev, got;
474 struct xfs_mount *mp = ip->i_mount;
475 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
476 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
477 int64_t freesp;
478 xfs_fsblock_t qblocks;
479 xfs_fsblock_t alloc_blocks = 0;
480 xfs_extlen_t plen;
481 int shift = 0;
482 int qshift = 0;
483
484 /*
485 * As an exception we don't do any preallocation at all if the file is
486 * smaller than the minimum preallocation and we are using the default
487 * dynamic preallocation scheme, as it is likely this is the only write
488 * to the file that is going to be done.
489 */
490 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
491 return 0;
492
493 /*
494 * Use the minimum preallocation size for small files or if we are
495 * writing right after a hole.
496 */
497 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
498 !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
499 prev.br_startoff + prev.br_blockcount < offset_fsb)
500 return mp->m_allocsize_blocks;
501
502 /*
503 * Take the size of the preceding data extents as the basis for the
504 * preallocation size. Note that we don't care if the previous extents
505 * are written or not.
506 */
507 plen = prev.br_blockcount;
508 while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
509 if (plen > XFS_MAX_BMBT_EXTLEN / 2 ||
510 isnullstartblock(got.br_startblock) ||
511 got.br_startoff + got.br_blockcount != prev.br_startoff ||
512 got.br_startblock + got.br_blockcount != prev.br_startblock)
513 break;
514 plen += got.br_blockcount;
515 prev = got;
516 }
517
518 /*
519 * If the size of the extents is greater than half the maximum extent
520 * length, then use the current offset as the basis. This ensures that
521 * for large files the preallocation size always extends to
522 * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe
523 * unit/width alignment of real extents.
524 */
525 alloc_blocks = plen * 2;
526 if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
527 alloc_blocks = XFS_B_TO_FSB(mp, offset);
528 qblocks = alloc_blocks;
529
530 /*
531 * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc
532 * down to the nearest power of two value after throttling. To prevent
533 * the round down from unconditionally reducing the maximum supported
534 * prealloc size, we round up first, apply appropriate throttling, round
535 * down and cap the value to XFS_BMBT_MAX_EXTLEN.
536 */
537 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN),
538 alloc_blocks);
539
540 if (unlikely(XFS_IS_REALTIME_INODE(ip)))
541 freesp = xfs_rtbxlen_to_blen(mp,
542 xfs_iomap_freesp(mp, XC_FREE_RTEXTENTS,
543 mp->m_low_rtexts, &shift));
544 else
545 freesp = xfs_iomap_freesp(mp, XC_FREE_BLOCKS, mp->m_low_space,
546 &shift);
547
548 /*
549 * Check each quota to cap the prealloc size, provide a shift value to
550 * throttle with and adjust amount of available space.
551 */
552 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
553 xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
554 &freesp);
555 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
556 xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
557 &freesp);
558 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
559 xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
560 &freesp);
561
562 /*
563 * The final prealloc size is set to the minimum of free space available
564 * in each of the quotas and the overall filesystem.
565 *
566 * The shift throttle value is set to the maximum value as determined by
567 * the global low free space values and per-quota low free space values.
568 */
569 alloc_blocks = min(alloc_blocks, qblocks);
570 shift = max(shift, qshift);
571
572 if (shift)
573 alloc_blocks >>= shift;
574 /*
575 * rounddown_pow_of_two() returns an undefined result if we pass in
576 * alloc_blocks = 0.
577 */
578 if (alloc_blocks)
579 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
580 if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
581 alloc_blocks = XFS_MAX_BMBT_EXTLEN;
582
583 /*
584 * If we are still trying to allocate more space than is
585 * available, squash the prealloc hard. This can happen if we
586 * have a large file on a small filesystem and the above
587 * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN.
588 */
589 while (alloc_blocks && alloc_blocks >= freesp)
590 alloc_blocks >>= 4;
591 if (alloc_blocks < mp->m_allocsize_blocks)
592 alloc_blocks = mp->m_allocsize_blocks;
593 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
594 mp->m_allocsize_blocks);
595 return alloc_blocks;
596}
597
598int
599xfs_iomap_write_unwritten(
600 xfs_inode_t *ip,
601 xfs_off_t offset,
602 xfs_off_t count,
603 bool update_isize)
604{
605 xfs_mount_t *mp = ip->i_mount;
606 xfs_fileoff_t offset_fsb;
607 xfs_filblks_t count_fsb;
608 xfs_filblks_t numblks_fsb;
609 int nimaps;
610 xfs_trans_t *tp;
611 xfs_bmbt_irec_t imap;
612 struct inode *inode = VFS_I(ip);
613 xfs_fsize_t i_size;
614 uint resblks;
615 int error;
616
617 trace_xfs_unwritten_convert(ip, offset, count);
618
619 offset_fsb = XFS_B_TO_FSBT(mp, offset);
620 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
621 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
622
623 /*
624 * Reserve enough blocks in this transaction for two complete extent
625 * btree splits. We may be converting the middle part of an unwritten
626 * extent and in this case we will insert two new extents in the btree
627 * each of which could cause a full split.
628 *
629 * This reservation amount will be used in the first call to
630 * xfs_bmbt_split() to select an AG with enough space to satisfy the
631 * rest of the operation.
632 */
633 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
634
635 /* Attach dquots so that bmbt splits are accounted correctly. */
636 error = xfs_qm_dqattach(ip);
637 if (error)
638 return error;
639
640 do {
641 /*
642 * Set up a transaction to convert the range of extents
643 * from unwritten to real. Do allocations in a loop until
644 * we have covered the range passed in.
645 *
646 * Note that we can't risk to recursing back into the filesystem
647 * here as we might be asked to write out the same inode that we
648 * complete here and might deadlock on the iolock.
649 */
650 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks,
651 0, true, &tp);
652 if (error)
653 return error;
654
655 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
656 XFS_IEXT_WRITE_UNWRITTEN_CNT);
657 if (error)
658 goto error_on_bmapi_transaction;
659
660 /*
661 * Modify the unwritten extent state of the buffer.
662 */
663 nimaps = 1;
664 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
665 XFS_BMAPI_CONVERT, resblks, &imap,
666 &nimaps);
667 if (error)
668 goto error_on_bmapi_transaction;
669
670 /*
671 * Log the updated inode size as we go. We have to be careful
672 * to only log it up to the actual write offset if it is
673 * halfway into a block.
674 */
675 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
676 if (i_size > offset + count)
677 i_size = offset + count;
678 if (update_isize && i_size > i_size_read(inode))
679 i_size_write(inode, i_size);
680 i_size = xfs_new_eof(ip, i_size);
681 if (i_size) {
682 ip->i_disk_size = i_size;
683 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
684 }
685
686 error = xfs_trans_commit(tp);
687 xfs_iunlock(ip, XFS_ILOCK_EXCL);
688 if (error)
689 return error;
690
691 if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) {
692 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
693 return xfs_alert_fsblock_zero(ip, &imap);
694 }
695
696 if ((numblks_fsb = imap.br_blockcount) == 0) {
697 /*
698 * The numblks_fsb value should always get
699 * smaller, otherwise the loop is stuck.
700 */
701 ASSERT(imap.br_blockcount);
702 break;
703 }
704 offset_fsb += numblks_fsb;
705 count_fsb -= numblks_fsb;
706 } while (count_fsb > 0);
707
708 return 0;
709
710error_on_bmapi_transaction:
711 xfs_trans_cancel(tp);
712 xfs_iunlock(ip, XFS_ILOCK_EXCL);
713 return error;
714}
715
716static inline bool
717imap_needs_alloc(
718 struct inode *inode,
719 unsigned flags,
720 struct xfs_bmbt_irec *imap,
721 int nimaps)
722{
723 /* don't allocate blocks when just zeroing */
724 if (flags & IOMAP_ZERO)
725 return false;
726 if (!nimaps ||
727 imap->br_startblock == HOLESTARTBLOCK ||
728 imap->br_startblock == DELAYSTARTBLOCK)
729 return true;
730 /* we convert unwritten extents before copying the data for DAX */
731 if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
732 return true;
733 return false;
734}
735
736static inline bool
737imap_needs_cow(
738 struct xfs_inode *ip,
739 unsigned int flags,
740 struct xfs_bmbt_irec *imap,
741 int nimaps)
742{
743 if (!xfs_is_cow_inode(ip))
744 return false;
745
746 /* when zeroing we don't have to COW holes or unwritten extents */
747 if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
748 if (!nimaps ||
749 imap->br_startblock == HOLESTARTBLOCK ||
750 imap->br_state == XFS_EXT_UNWRITTEN)
751 return false;
752 }
753
754 return true;
755}
756
757/*
758 * Extents not yet cached requires exclusive access, don't block for
759 * IOMAP_NOWAIT.
760 *
761 * This is basically an opencoded xfs_ilock_data_map_shared() call, but with
762 * support for IOMAP_NOWAIT.
763 */
764static int
765xfs_ilock_for_iomap(
766 struct xfs_inode *ip,
767 unsigned flags,
768 unsigned *lockmode)
769{
770 if (flags & IOMAP_NOWAIT) {
771 if (xfs_need_iread_extents(&ip->i_df))
772 return -EAGAIN;
773 if (!xfs_ilock_nowait(ip, *lockmode))
774 return -EAGAIN;
775 } else {
776 if (xfs_need_iread_extents(&ip->i_df))
777 *lockmode = XFS_ILOCK_EXCL;
778 xfs_ilock(ip, *lockmode);
779 }
780
781 return 0;
782}
783
784/*
785 * Check that the imap we are going to return to the caller spans the entire
786 * range that the caller requested for the IO.
787 */
788static bool
789imap_spans_range(
790 struct xfs_bmbt_irec *imap,
791 xfs_fileoff_t offset_fsb,
792 xfs_fileoff_t end_fsb)
793{
794 if (imap->br_startoff > offset_fsb)
795 return false;
796 if (imap->br_startoff + imap->br_blockcount < end_fsb)
797 return false;
798 return true;
799}
800
801static bool
802xfs_bmap_hw_atomic_write_possible(
803 struct xfs_inode *ip,
804 struct xfs_bmbt_irec *imap,
805 xfs_fileoff_t offset_fsb,
806 xfs_fileoff_t end_fsb)
807{
808 struct xfs_mount *mp = ip->i_mount;
809 xfs_fsize_t len = XFS_FSB_TO_B(mp, end_fsb - offset_fsb);
810
811 /*
812 * atomic writes are required to be naturally aligned for disk blocks,
813 * which ensures that we adhere to block layer rules that we won't
814 * straddle any boundary or violate write alignment requirement.
815 */
816 if (!IS_ALIGNED(imap->br_startblock, imap->br_blockcount))
817 return false;
818
819 /*
820 * Spanning multiple extents would mean that multiple BIOs would be
821 * issued, and so would lose atomicity required for REQ_ATOMIC-based
822 * atomics.
823 */
824 if (!imap_spans_range(imap, offset_fsb, end_fsb))
825 return false;
826
827 /*
828 * The ->iomap_begin caller should ensure this, but check anyway.
829 */
830 return len <= xfs_inode_buftarg(ip)->bt_awu_max;
831}
832
833static int
834xfs_direct_write_iomap_begin(
835 struct inode *inode,
836 loff_t offset,
837 loff_t length,
838 unsigned flags,
839 struct iomap *iomap,
840 struct iomap *srcmap)
841{
842 struct xfs_inode *ip = XFS_I(inode);
843 struct xfs_mount *mp = ip->i_mount;
844 struct xfs_bmbt_irec imap, cmap;
845 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
846 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
847 xfs_fileoff_t orig_end_fsb = end_fsb;
848 int nimaps = 1, error = 0;
849 bool shared = false;
850 u16 iomap_flags = 0;
851 bool needs_alloc;
852 unsigned int lockmode;
853 u64 seq;
854
855 ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
856
857 if (xfs_is_shutdown(mp))
858 return -EIO;
859
860 /*
861 * Writes that span EOF might trigger an IO size update on completion,
862 * so consider them to be dirty for the purposes of O_DSYNC even if
863 * there is no other metadata changes pending or have been made here.
864 */
865 if (offset + length > i_size_read(inode))
866 iomap_flags |= IOMAP_F_DIRTY;
867
868 /* HW-offload atomics are always used in this path */
869 if (flags & IOMAP_ATOMIC)
870 iomap_flags |= IOMAP_F_ATOMIC_BIO;
871
872 /*
873 * COW writes may allocate delalloc space or convert unwritten COW
874 * extents, so we need to make sure to take the lock exclusively here.
875 */
876 if (xfs_is_cow_inode(ip))
877 lockmode = XFS_ILOCK_EXCL;
878 else
879 lockmode = XFS_ILOCK_SHARED;
880
881relock:
882 error = xfs_ilock_for_iomap(ip, flags, &lockmode);
883 if (error)
884 return error;
885
886 /*
887 * The reflink iflag could have changed since the earlier unlocked
888 * check, check if it again and relock if needed.
889 */
890 if (xfs_is_cow_inode(ip) && lockmode == XFS_ILOCK_SHARED) {
891 xfs_iunlock(ip, lockmode);
892 lockmode = XFS_ILOCK_EXCL;
893 goto relock;
894 }
895
896 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
897 &nimaps, 0);
898 if (error)
899 goto out_unlock;
900
901 if (imap_needs_cow(ip, flags, &imap, nimaps)) {
902 error = -EAGAIN;
903 if (flags & IOMAP_NOWAIT)
904 goto out_unlock;
905
906 /* may drop and re-acquire the ilock */
907 error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
908 &lockmode,
909 (flags & IOMAP_DIRECT) || IS_DAX(inode));
910 if (error)
911 goto out_unlock;
912 if (shared) {
913 if ((flags & IOMAP_ATOMIC) &&
914 !xfs_bmap_hw_atomic_write_possible(ip, &cmap,
915 offset_fsb, end_fsb)) {
916 error = -ENOPROTOOPT;
917 goto out_unlock;
918 }
919 goto out_found_cow;
920 }
921 end_fsb = imap.br_startoff + imap.br_blockcount;
922 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
923 }
924
925 needs_alloc = imap_needs_alloc(inode, flags, &imap, nimaps);
926
927 if (flags & IOMAP_ATOMIC) {
928 error = -ENOPROTOOPT;
929 /*
930 * If we allocate less than what is required for the write
931 * then we may end up with multiple extents, which means that
932 * REQ_ATOMIC-based cannot be used, so avoid this possibility.
933 */
934 if (needs_alloc && orig_end_fsb - offset_fsb > 1)
935 goto out_unlock;
936
937 if (!xfs_bmap_hw_atomic_write_possible(ip, &imap, offset_fsb,
938 orig_end_fsb))
939 goto out_unlock;
940 }
941
942 if (needs_alloc)
943 goto allocate_blocks;
944
945 /*
946 * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
947 * a single map so that we avoid partial IO failures due to the rest of
948 * the I/O range not covered by this map triggering an EAGAIN condition
949 * when it is subsequently mapped and aborting the I/O.
950 */
951 if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) {
952 error = -EAGAIN;
953 if (!imap_spans_range(&imap, offset_fsb, end_fsb))
954 goto out_unlock;
955 }
956
957 /*
958 * For overwrite only I/O, we cannot convert unwritten extents without
959 * requiring sub-block zeroing. This can only be done under an
960 * exclusive IOLOCK, hence return -EAGAIN if this is not a written
961 * extent to tell the caller to try again.
962 */
963 if (flags & IOMAP_OVERWRITE_ONLY) {
964 error = -EAGAIN;
965 if (imap.br_state != XFS_EXT_NORM &&
966 ((offset | length) & mp->m_blockmask))
967 goto out_unlock;
968 }
969
970 seq = xfs_iomap_inode_sequence(ip, iomap_flags);
971 xfs_iunlock(ip, lockmode);
972 trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
973 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
974
975allocate_blocks:
976 error = -EAGAIN;
977 if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY))
978 goto out_unlock;
979
980 /*
981 * We cap the maximum length we map to a sane size to keep the chunks
982 * of work done where somewhat symmetric with the work writeback does.
983 * This is a completely arbitrary number pulled out of thin air as a
984 * best guess for initial testing.
985 *
986 * Note that the values needs to be less than 32-bits wide until the
987 * lower level functions are updated.
988 */
989 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
990 end_fsb = xfs_iomap_end_fsb(mp, offset, length);
991
992 if (offset + length > XFS_ISIZE(ip))
993 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
994 else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
995 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
996 xfs_iunlock(ip, lockmode);
997
998 error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
999 flags, &imap, &seq);
1000 if (error)
1001 return error;
1002
1003 trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
1004 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
1005 iomap_flags | IOMAP_F_NEW, seq);
1006
1007out_found_cow:
1008 length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
1009 trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
1010 if (imap.br_startblock != HOLESTARTBLOCK) {
1011 seq = xfs_iomap_inode_sequence(ip, 0);
1012 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
1013 if (error)
1014 goto out_unlock;
1015 }
1016 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
1017 xfs_iunlock(ip, lockmode);
1018 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
1019
1020out_unlock:
1021 if (lockmode)
1022 xfs_iunlock(ip, lockmode);
1023 return error;
1024}
1025
1026const struct iomap_ops xfs_direct_write_iomap_ops = {
1027 .iomap_begin = xfs_direct_write_iomap_begin,
1028};
1029
1030#ifdef CONFIG_XFS_RT
1031/*
1032 * This is really simple. The space has already been reserved before taking the
1033 * IOLOCK, the actual block allocation is done just before submitting the bio
1034 * and only recorded in the extent map on I/O completion.
1035 */
1036static int
1037xfs_zoned_direct_write_iomap_begin(
1038 struct inode *inode,
1039 loff_t offset,
1040 loff_t length,
1041 unsigned flags,
1042 struct iomap *iomap,
1043 struct iomap *srcmap)
1044{
1045 struct xfs_inode *ip = XFS_I(inode);
1046 int error;
1047
1048 ASSERT(!(flags & IOMAP_OVERWRITE_ONLY));
1049
1050 /*
1051 * Needs to be pushed down into the allocator so that only writes into
1052 * a single zone can be supported.
1053 */
1054 if (flags & IOMAP_NOWAIT)
1055 return -EAGAIN;
1056
1057 /*
1058 * Ensure the extent list is in memory in so that we don't have to do
1059 * read it from the I/O completion handler.
1060 */
1061 if (xfs_need_iread_extents(&ip->i_df)) {
1062 xfs_ilock(ip, XFS_ILOCK_EXCL);
1063 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
1064 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1065 if (error)
1066 return error;
1067 }
1068
1069 iomap->type = IOMAP_MAPPED;
1070 iomap->flags = IOMAP_F_DIRTY;
1071 iomap->bdev = ip->i_mount->m_rtdev_targp->bt_bdev;
1072 iomap->offset = offset;
1073 iomap->length = length;
1074 iomap->flags = IOMAP_F_ANON_WRITE;
1075 return 0;
1076}
1077
1078const struct iomap_ops xfs_zoned_direct_write_iomap_ops = {
1079 .iomap_begin = xfs_zoned_direct_write_iomap_begin,
1080};
1081#endif /* CONFIG_XFS_RT */
1082
1083static int
1084xfs_atomic_write_cow_iomap_begin(
1085 struct inode *inode,
1086 loff_t offset,
1087 loff_t length,
1088 unsigned flags,
1089 struct iomap *iomap,
1090 struct iomap *srcmap)
1091{
1092 struct xfs_inode *ip = XFS_I(inode);
1093 struct xfs_mount *mp = ip->i_mount;
1094 const xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1095 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
1096 xfs_filblks_t count_fsb = end_fsb - offset_fsb;
1097 int nmaps = 1;
1098 xfs_filblks_t resaligned;
1099 struct xfs_bmbt_irec cmap;
1100 struct xfs_iext_cursor icur;
1101 struct xfs_trans *tp;
1102 unsigned int dblocks = 0, rblocks = 0;
1103 int error;
1104 u64 seq;
1105
1106 ASSERT(flags & IOMAP_WRITE);
1107 ASSERT(flags & IOMAP_DIRECT);
1108
1109 if (xfs_is_shutdown(mp))
1110 return -EIO;
1111
1112 if (!xfs_can_sw_atomic_write(mp)) {
1113 ASSERT(xfs_can_sw_atomic_write(mp));
1114 return -EINVAL;
1115 }
1116
1117 /* blocks are always allocated in this path */
1118 if (flags & IOMAP_NOWAIT)
1119 return -EAGAIN;
1120
1121 trace_xfs_iomap_atomic_write_cow(ip, offset, length);
1122
1123 xfs_ilock(ip, XFS_ILOCK_EXCL);
1124
1125 if (!ip->i_cowfp) {
1126 ASSERT(!xfs_is_reflink_inode(ip));
1127 xfs_ifork_init_cow(ip);
1128 }
1129
1130 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
1131 cmap.br_startoff = end_fsb;
1132 if (cmap.br_startoff <= offset_fsb) {
1133 xfs_trim_extent(&cmap, offset_fsb, count_fsb);
1134 goto found;
1135 }
1136
1137 end_fsb = cmap.br_startoff;
1138 count_fsb = end_fsb - offset_fsb;
1139
1140 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
1141 xfs_get_cowextsz_hint(ip));
1142 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1143
1144 if (XFS_IS_REALTIME_INODE(ip)) {
1145 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1146 rblocks = resaligned;
1147 } else {
1148 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
1149 rblocks = 0;
1150 }
1151
1152 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
1153 rblocks, false, &tp);
1154 if (error)
1155 return error;
1156
1157 /* extent layout could have changed since the unlock, so check again */
1158 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
1159 cmap.br_startoff = end_fsb;
1160 if (cmap.br_startoff <= offset_fsb) {
1161 xfs_trim_extent(&cmap, offset_fsb, count_fsb);
1162 xfs_trans_cancel(tp);
1163 goto found;
1164 }
1165
1166 /*
1167 * Allocate the entire reservation as unwritten blocks.
1168 *
1169 * Use XFS_BMAPI_EXTSZALIGN to hint at aligning new extents according to
1170 * extszhint, such that there will be a greater chance that future
1171 * atomic writes to that same range will be aligned (and don't require
1172 * this COW-based method).
1173 */
1174 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
1175 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC |
1176 XFS_BMAPI_EXTSZALIGN, 0, &cmap, &nmaps);
1177 if (error) {
1178 xfs_trans_cancel(tp);
1179 goto out_unlock;
1180 }
1181
1182 xfs_inode_set_cowblocks_tag(ip);
1183 error = xfs_trans_commit(tp);
1184 if (error)
1185 goto out_unlock;
1186
1187found:
1188 if (cmap.br_state != XFS_EXT_NORM) {
1189 error = xfs_reflink_convert_cow_locked(ip, offset_fsb,
1190 count_fsb);
1191 if (error)
1192 goto out_unlock;
1193 cmap.br_state = XFS_EXT_NORM;
1194 }
1195
1196 length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
1197 trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
1198 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
1199 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1200 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
1201
1202out_unlock:
1203 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1204 return error;
1205}
1206
1207const struct iomap_ops xfs_atomic_write_cow_iomap_ops = {
1208 .iomap_begin = xfs_atomic_write_cow_iomap_begin,
1209};
1210
1211static int
1212xfs_dax_write_iomap_end(
1213 struct inode *inode,
1214 loff_t pos,
1215 loff_t length,
1216 ssize_t written,
1217 unsigned flags,
1218 struct iomap *iomap)
1219{
1220 struct xfs_inode *ip = XFS_I(inode);
1221
1222 if (!xfs_is_cow_inode(ip))
1223 return 0;
1224
1225 if (!written)
1226 return xfs_reflink_cancel_cow_range(ip, pos, length, true);
1227
1228 return xfs_reflink_end_cow(ip, pos, written);
1229}
1230
1231const struct iomap_ops xfs_dax_write_iomap_ops = {
1232 .iomap_begin = xfs_direct_write_iomap_begin,
1233 .iomap_end = xfs_dax_write_iomap_end,
1234};
1235
1236/*
1237 * Convert a hole to a delayed allocation.
1238 */
1239static void
1240xfs_bmap_add_extent_hole_delay(
1241 struct xfs_inode *ip, /* incore inode pointer */
1242 int whichfork,
1243 struct xfs_iext_cursor *icur,
1244 struct xfs_bmbt_irec *new) /* new data to add to file extents */
1245{
1246 struct xfs_ifork *ifp; /* inode fork pointer */
1247 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1248 xfs_filblks_t newlen=0; /* new indirect size */
1249 xfs_filblks_t oldlen=0; /* old indirect size */
1250 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1251 uint32_t state = xfs_bmap_fork_to_state(whichfork);
1252 xfs_filblks_t temp; /* temp for indirect calculations */
1253
1254 ifp = xfs_ifork_ptr(ip, whichfork);
1255 ASSERT(isnullstartblock(new->br_startblock));
1256
1257 /*
1258 * Check and set flags if this segment has a left neighbor
1259 */
1260 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
1261 state |= BMAP_LEFT_VALID;
1262 if (isnullstartblock(left.br_startblock))
1263 state |= BMAP_LEFT_DELAY;
1264 }
1265
1266 /*
1267 * Check and set flags if the current (right) segment exists.
1268 * If it doesn't exist, we're converting the hole at end-of-file.
1269 */
1270 if (xfs_iext_get_extent(ifp, icur, &right)) {
1271 state |= BMAP_RIGHT_VALID;
1272 if (isnullstartblock(right.br_startblock))
1273 state |= BMAP_RIGHT_DELAY;
1274 }
1275
1276 /*
1277 * Set contiguity flags on the left and right neighbors.
1278 * Don't let extents get too large, even if the pieces are contiguous.
1279 */
1280 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1281 left.br_startoff + left.br_blockcount == new->br_startoff &&
1282 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
1283 state |= BMAP_LEFT_CONTIG;
1284
1285 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1286 new->br_startoff + new->br_blockcount == right.br_startoff &&
1287 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1288 (!(state & BMAP_LEFT_CONTIG) ||
1289 (left.br_blockcount + new->br_blockcount +
1290 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)))
1291 state |= BMAP_RIGHT_CONTIG;
1292
1293 /*
1294 * Switch out based on the contiguity flags.
1295 */
1296 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1297 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1298 /*
1299 * New allocation is contiguous with delayed allocations
1300 * on the left and on the right.
1301 * Merge all three into a single extent record.
1302 */
1303 temp = left.br_blockcount + new->br_blockcount +
1304 right.br_blockcount;
1305
1306 oldlen = startblockval(left.br_startblock) +
1307 startblockval(new->br_startblock) +
1308 startblockval(right.br_startblock);
1309 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1310 oldlen);
1311 left.br_startblock = nullstartblock(newlen);
1312 left.br_blockcount = temp;
1313
1314 xfs_iext_remove(ip, icur, state);
1315 xfs_iext_prev(ifp, icur);
1316 xfs_iext_update_extent(ip, state, icur, &left);
1317 break;
1318
1319 case BMAP_LEFT_CONTIG:
1320 /*
1321 * New allocation is contiguous with a delayed allocation
1322 * on the left.
1323 * Merge the new allocation with the left neighbor.
1324 */
1325 temp = left.br_blockcount + new->br_blockcount;
1326
1327 oldlen = startblockval(left.br_startblock) +
1328 startblockval(new->br_startblock);
1329 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1330 oldlen);
1331 left.br_blockcount = temp;
1332 left.br_startblock = nullstartblock(newlen);
1333
1334 xfs_iext_prev(ifp, icur);
1335 xfs_iext_update_extent(ip, state, icur, &left);
1336 break;
1337
1338 case BMAP_RIGHT_CONTIG:
1339 /*
1340 * New allocation is contiguous with a delayed allocation
1341 * on the right.
1342 * Merge the new allocation with the right neighbor.
1343 */
1344 temp = new->br_blockcount + right.br_blockcount;
1345 oldlen = startblockval(new->br_startblock) +
1346 startblockval(right.br_startblock);
1347 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1348 oldlen);
1349 right.br_startoff = new->br_startoff;
1350 right.br_startblock = nullstartblock(newlen);
1351 right.br_blockcount = temp;
1352 xfs_iext_update_extent(ip, state, icur, &right);
1353 break;
1354
1355 case 0:
1356 /*
1357 * New allocation is not contiguous with another
1358 * delayed allocation.
1359 * Insert a new entry.
1360 */
1361 oldlen = newlen = 0;
1362 xfs_iext_insert(ip, icur, new, state);
1363 break;
1364 }
1365 if (oldlen != newlen) {
1366 ASSERT(oldlen > newlen);
1367 xfs_add_fdblocks(ip->i_mount, oldlen - newlen);
1368
1369 /*
1370 * Nothing to do for disk quota accounting here.
1371 */
1372 xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen);
1373 }
1374}
1375
1376/*
1377 * Add a delayed allocation extent to an inode. Blocks are reserved from the
1378 * global pool and the extent inserted into the inode in-core extent tree.
1379 *
1380 * On entry, got refers to the first extent beyond the offset of the extent to
1381 * allocate or eof is specified if no such extent exists. On return, got refers
1382 * to the extent record that was inserted to the inode fork.
1383 *
1384 * Note that the allocated extent may have been merged with contiguous extents
1385 * during insertion into the inode fork. Thus, got does not reflect the current
1386 * state of the inode fork on return. If necessary, the caller can use lastx to
1387 * look up the updated record in the inode fork.
1388 */
1389static int
1390xfs_bmapi_reserve_delalloc(
1391 struct xfs_inode *ip,
1392 int whichfork,
1393 xfs_fileoff_t off,
1394 xfs_filblks_t len,
1395 xfs_filblks_t prealloc,
1396 struct xfs_bmbt_irec *got,
1397 struct xfs_iext_cursor *icur,
1398 int eof)
1399{
1400 struct xfs_mount *mp = ip->i_mount;
1401 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1402 xfs_extlen_t alen;
1403 xfs_extlen_t indlen;
1404 uint64_t fdblocks;
1405 int error;
1406 xfs_fileoff_t aoff;
1407 bool use_cowextszhint =
1408 whichfork == XFS_COW_FORK && !prealloc;
1409
1410retry:
1411 /*
1412 * Cap the alloc length. Keep track of prealloc so we know whether to
1413 * tag the inode before we return.
1414 */
1415 aoff = off;
1416 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
1417 if (!eof)
1418 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
1419 if (prealloc && alen >= len)
1420 prealloc = alen - len;
1421
1422 /*
1423 * If we're targetting the COW fork but aren't creating a speculative
1424 * posteof preallocation, try to expand the reservation to align with
1425 * the COW extent size hint if there's sufficient free space.
1426 *
1427 * Unlike the data fork, the CoW cancellation functions will free all
1428 * the reservations at inactivation, so we don't require that every
1429 * delalloc reservation have a dirty pagecache.
1430 */
1431 if (use_cowextszhint) {
1432 struct xfs_bmbt_irec prev;
1433 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
1434
1435 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
1436 prev.br_startoff = NULLFILEOFF;
1437
1438 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
1439 1, 0, &aoff, &alen);
1440 ASSERT(!error);
1441 }
1442
1443 /*
1444 * Make a transaction-less quota reservation for delayed allocation
1445 * blocks. This number gets adjusted later. We return if we haven't
1446 * allocated blocks already inside this loop.
1447 */
1448 error = xfs_quota_reserve_blkres(ip, alen);
1449 if (error)
1450 goto out;
1451
1452 /*
1453 * Split changing sb for alen and indlen since they could be coming
1454 * from different places.
1455 */
1456 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
1457 ASSERT(indlen > 0);
1458
1459 fdblocks = indlen;
1460 if (XFS_IS_REALTIME_INODE(ip)) {
1461 ASSERT(!xfs_is_zoned_inode(ip));
1462 error = xfs_dec_frextents(mp, xfs_blen_to_rtbxlen(mp, alen));
1463 if (error)
1464 goto out_unreserve_quota;
1465 } else {
1466 fdblocks += alen;
1467 }
1468
1469 error = xfs_dec_fdblocks(mp, fdblocks, false);
1470 if (error)
1471 goto out_unreserve_frextents;
1472
1473 ip->i_delayed_blks += alen;
1474 xfs_mod_delalloc(ip, alen, indlen);
1475
1476 got->br_startoff = aoff;
1477 got->br_startblock = nullstartblock(indlen);
1478 got->br_blockcount = alen;
1479 got->br_state = XFS_EXT_NORM;
1480
1481 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
1482
1483 /*
1484 * Tag the inode if blocks were preallocated. Note that COW fork
1485 * preallocation can occur at the start or end of the extent, even when
1486 * prealloc == 0, so we must also check the aligned offset and length.
1487 */
1488 if (whichfork == XFS_DATA_FORK && prealloc)
1489 xfs_inode_set_eofblocks_tag(ip);
1490 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
1491 xfs_inode_set_cowblocks_tag(ip);
1492
1493 return 0;
1494
1495out_unreserve_frextents:
1496 if (XFS_IS_REALTIME_INODE(ip))
1497 xfs_add_frextents(mp, xfs_blen_to_rtbxlen(mp, alen));
1498out_unreserve_quota:
1499 if (XFS_IS_QUOTA_ON(mp))
1500 xfs_quota_unreserve_blkres(ip, alen);
1501out:
1502 if (error == -ENOSPC || error == -EDQUOT) {
1503 trace_xfs_delalloc_enospc(ip, off, len);
1504
1505 if (prealloc || use_cowextszhint) {
1506 /* retry without any preallocation */
1507 use_cowextszhint = false;
1508 prealloc = 0;
1509 goto retry;
1510 }
1511 }
1512 return error;
1513}
1514
1515static int
1516xfs_zoned_buffered_write_iomap_begin(
1517 struct inode *inode,
1518 loff_t offset,
1519 loff_t count,
1520 unsigned flags,
1521 struct iomap *iomap,
1522 struct iomap *srcmap)
1523{
1524 struct iomap_iter *iter =
1525 container_of(iomap, struct iomap_iter, iomap);
1526 struct xfs_zone_alloc_ctx *ac = iter->private;
1527 struct xfs_inode *ip = XFS_I(inode);
1528 struct xfs_mount *mp = ip->i_mount;
1529 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1530 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
1531 u16 iomap_flags = IOMAP_F_SHARED;
1532 unsigned int lockmode = XFS_ILOCK_EXCL;
1533 xfs_filblks_t count_fsb;
1534 xfs_extlen_t indlen;
1535 struct xfs_bmbt_irec got;
1536 struct xfs_iext_cursor icur;
1537 int error = 0;
1538
1539 ASSERT(!xfs_get_extsz_hint(ip));
1540 ASSERT(!(flags & IOMAP_UNSHARE));
1541 ASSERT(ac);
1542
1543 if (xfs_is_shutdown(mp))
1544 return -EIO;
1545
1546 error = xfs_qm_dqattach(ip);
1547 if (error)
1548 return error;
1549
1550 error = xfs_ilock_for_iomap(ip, flags, &lockmode);
1551 if (error)
1552 return error;
1553
1554 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
1555 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
1556 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
1557 error = -EFSCORRUPTED;
1558 goto out_unlock;
1559 }
1560
1561 XFS_STATS_INC(mp, xs_blk_mapw);
1562
1563 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
1564 if (error)
1565 goto out_unlock;
1566
1567 /*
1568 * For zeroing operations check if there is any data to zero first.
1569 *
1570 * For regular writes we always need to allocate new blocks, but need to
1571 * provide the source mapping when the range is unaligned to support
1572 * read-modify-write of the whole block in the page cache.
1573 *
1574 * In either case we need to limit the reported range to the boundaries
1575 * of the source map in the data fork.
1576 */
1577 if (!IS_ALIGNED(offset, mp->m_sb.sb_blocksize) ||
1578 !IS_ALIGNED(offset + count, mp->m_sb.sb_blocksize) ||
1579 (flags & IOMAP_ZERO)) {
1580 struct xfs_bmbt_irec smap;
1581 struct xfs_iext_cursor scur;
1582
1583 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &scur,
1584 &smap))
1585 smap.br_startoff = end_fsb; /* fake hole until EOF */
1586 if (smap.br_startoff > offset_fsb) {
1587 /*
1588 * We never need to allocate blocks for zeroing a hole.
1589 */
1590 if (flags & IOMAP_ZERO) {
1591 xfs_hole_to_iomap(ip, iomap, offset_fsb,
1592 smap.br_startoff);
1593 goto out_unlock;
1594 }
1595 end_fsb = min(end_fsb, smap.br_startoff);
1596 } else {
1597 end_fsb = min(end_fsb,
1598 smap.br_startoff + smap.br_blockcount);
1599 xfs_trim_extent(&smap, offset_fsb,
1600 end_fsb - offset_fsb);
1601 error = xfs_bmbt_to_iomap(ip, srcmap, &smap, flags, 0,
1602 xfs_iomap_inode_sequence(ip, 0));
1603 if (error)
1604 goto out_unlock;
1605 }
1606 }
1607
1608 if (!ip->i_cowfp)
1609 xfs_ifork_init_cow(ip);
1610
1611 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
1612 got.br_startoff = end_fsb;
1613 if (got.br_startoff <= offset_fsb) {
1614 trace_xfs_reflink_cow_found(ip, &got);
1615 goto done;
1616 }
1617
1618 /*
1619 * Cap the maximum length to keep the chunks of work done here somewhat
1620 * symmetric with the work writeback does.
1621 */
1622 end_fsb = min(end_fsb, got.br_startoff);
1623 count_fsb = min3(end_fsb - offset_fsb, XFS_MAX_BMBT_EXTLEN,
1624 XFS_B_TO_FSB(mp, 1024 * PAGE_SIZE));
1625
1626 /*
1627 * The block reservation is supposed to cover all blocks that the
1628 * operation could possible write, but there is a nasty corner case
1629 * where blocks could be stolen from underneath us:
1630 *
1631 * 1) while this thread iterates over a larger buffered write,
1632 * 2) another thread is causing a write fault that calls into
1633 * ->page_mkwrite in range this thread writes to, using up the
1634 * delalloc reservation created by a previous call to this function.
1635 * 3) another thread does direct I/O on the range that the write fault
1636 * happened on, which causes writeback of the dirty data.
1637 * 4) this then set the stale flag, which cuts the current iomap
1638 * iteration short, causing the new call to ->iomap_begin that gets
1639 * us here again, but now without a sufficient reservation.
1640 *
1641 * This is a very unusual I/O pattern, and nothing but generic/095 is
1642 * known to hit it. There's not really much we can do here, so turn this
1643 * into a short write.
1644 */
1645 if (count_fsb > ac->reserved_blocks) {
1646 xfs_warn_ratelimited(mp,
1647"Short write on ino 0x%llx comm %.20s due to three-way race with write fault and direct I/O",
1648 ip->i_ino, current->comm);
1649 count_fsb = ac->reserved_blocks;
1650 if (!count_fsb) {
1651 error = -EIO;
1652 goto out_unlock;
1653 }
1654 }
1655
1656 error = xfs_quota_reserve_blkres(ip, count_fsb);
1657 if (error)
1658 goto out_unlock;
1659
1660 indlen = xfs_bmap_worst_indlen(ip, count_fsb);
1661 error = xfs_dec_fdblocks(mp, indlen, false);
1662 if (error)
1663 goto out_unlock;
1664 ip->i_delayed_blks += count_fsb;
1665 xfs_mod_delalloc(ip, count_fsb, indlen);
1666
1667 got.br_startoff = offset_fsb;
1668 got.br_startblock = nullstartblock(indlen);
1669 got.br_blockcount = count_fsb;
1670 got.br_state = XFS_EXT_NORM;
1671 xfs_bmap_add_extent_hole_delay(ip, XFS_COW_FORK, &icur, &got);
1672 ac->reserved_blocks -= count_fsb;
1673 iomap_flags |= IOMAP_F_NEW;
1674
1675 trace_xfs_iomap_alloc(ip, offset, XFS_FSB_TO_B(mp, count_fsb),
1676 XFS_COW_FORK, &got);
1677done:
1678 error = xfs_bmbt_to_iomap(ip, iomap, &got, flags, iomap_flags,
1679 xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED));
1680out_unlock:
1681 xfs_iunlock(ip, lockmode);
1682 return error;
1683}
1684
1685static int
1686xfs_buffered_write_iomap_begin(
1687 struct inode *inode,
1688 loff_t offset,
1689 loff_t count,
1690 unsigned flags,
1691 struct iomap *iomap,
1692 struct iomap *srcmap)
1693{
1694 struct xfs_inode *ip = XFS_I(inode);
1695 struct xfs_mount *mp = ip->i_mount;
1696 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1697 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
1698 struct xfs_bmbt_irec imap, cmap;
1699 struct xfs_iext_cursor icur, ccur;
1700 xfs_fsblock_t prealloc_blocks = 0;
1701 bool eof = false, cow_eof = false, shared = false;
1702 int allocfork = XFS_DATA_FORK;
1703 int error = 0;
1704 unsigned int lockmode = XFS_ILOCK_EXCL;
1705 unsigned int iomap_flags = 0;
1706 u64 seq;
1707
1708 if (xfs_is_shutdown(mp))
1709 return -EIO;
1710
1711 if (xfs_is_zoned_inode(ip))
1712 return xfs_zoned_buffered_write_iomap_begin(inode, offset,
1713 count, flags, iomap, srcmap);
1714
1715 /* we can't use delayed allocations when using extent size hints */
1716 if (xfs_get_extsz_hint(ip))
1717 return xfs_direct_write_iomap_begin(inode, offset, count,
1718 flags, iomap, srcmap);
1719
1720 error = xfs_qm_dqattach(ip);
1721 if (error)
1722 return error;
1723
1724 error = xfs_ilock_for_iomap(ip, flags, &lockmode);
1725 if (error)
1726 return error;
1727
1728 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
1729 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
1730 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
1731 error = -EFSCORRUPTED;
1732 goto out_unlock;
1733 }
1734
1735 XFS_STATS_INC(mp, xs_blk_mapw);
1736
1737 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
1738 if (error)
1739 goto out_unlock;
1740
1741 /*
1742 * Search the data fork first to look up our source mapping. We
1743 * always need the data fork map, as we have to return it to the
1744 * iomap code so that the higher level write code can read data in to
1745 * perform read-modify-write cycles for unaligned writes.
1746 */
1747 eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
1748 if (eof)
1749 imap.br_startoff = end_fsb; /* fake hole until the end */
1750
1751 /* We never need to allocate blocks for zeroing or unsharing a hole. */
1752 if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) &&
1753 imap.br_startoff > offset_fsb) {
1754 xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
1755 goto out_unlock;
1756 }
1757
1758 /*
1759 * For zeroing, trim a delalloc extent that extends beyond the EOF
1760 * block. If it starts beyond the EOF block, convert it to an
1761 * unwritten extent.
1762 */
1763 if ((flags & IOMAP_ZERO) && imap.br_startoff <= offset_fsb &&
1764 isnullstartblock(imap.br_startblock)) {
1765 xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
1766
1767 if (offset_fsb >= eof_fsb)
1768 goto convert_delay;
1769 if (end_fsb > eof_fsb) {
1770 end_fsb = eof_fsb;
1771 xfs_trim_extent(&imap, offset_fsb,
1772 end_fsb - offset_fsb);
1773 }
1774 }
1775
1776 /*
1777 * Search the COW fork extent list even if we did not find a data fork
1778 * extent. This serves two purposes: first this implements the
1779 * speculative preallocation using cowextsize, so that we also unshare
1780 * block adjacent to shared blocks instead of just the shared blocks
1781 * themselves. Second the lookup in the extent list is generally faster
1782 * than going out to the shared extent tree.
1783 */
1784 if (xfs_is_cow_inode(ip)) {
1785 if (!ip->i_cowfp) {
1786 ASSERT(!xfs_is_reflink_inode(ip));
1787 xfs_ifork_init_cow(ip);
1788 }
1789 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
1790 &ccur, &cmap);
1791 if (!cow_eof && cmap.br_startoff <= offset_fsb) {
1792 trace_xfs_reflink_cow_found(ip, &cmap);
1793 goto found_cow;
1794 }
1795 }
1796
1797 if (imap.br_startoff <= offset_fsb) {
1798 /*
1799 * For reflink files we may need a delalloc reservation when
1800 * overwriting shared extents. This includes zeroing of
1801 * existing extents that contain data.
1802 */
1803 if (!xfs_is_cow_inode(ip) ||
1804 ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
1805 trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
1806 &imap);
1807 goto found_imap;
1808 }
1809
1810 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
1811
1812 /* Trim the mapping to the nearest shared extent boundary. */
1813 error = xfs_bmap_trim_cow(ip, &imap, &shared);
1814 if (error)
1815 goto out_unlock;
1816
1817 /* Not shared? Just report the (potentially capped) extent. */
1818 if (!shared) {
1819 trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
1820 &imap);
1821 goto found_imap;
1822 }
1823
1824 /*
1825 * Fork all the shared blocks from our write offset until the
1826 * end of the extent.
1827 */
1828 allocfork = XFS_COW_FORK;
1829 end_fsb = imap.br_startoff + imap.br_blockcount;
1830 } else {
1831 /*
1832 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
1833 * pages to keep the chunks of work done where somewhat
1834 * symmetric with the work writeback does. This is a completely
1835 * arbitrary number pulled out of thin air.
1836 *
1837 * Note that the values needs to be less than 32-bits wide until
1838 * the lower level functions are updated.
1839 */
1840 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
1841 end_fsb = xfs_iomap_end_fsb(mp, offset, count);
1842
1843 if (xfs_is_always_cow_inode(ip))
1844 allocfork = XFS_COW_FORK;
1845 }
1846
1847 if (eof && offset + count > XFS_ISIZE(ip)) {
1848 /*
1849 * Determine the initial size of the preallocation.
1850 * We clean up any extra preallocation when the file is closed.
1851 */
1852 if (xfs_has_allocsize(mp))
1853 prealloc_blocks = mp->m_allocsize_blocks;
1854 else if (allocfork == XFS_DATA_FORK)
1855 prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
1856 offset, count, &icur);
1857 else
1858 prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
1859 offset, count, &ccur);
1860 if (prealloc_blocks) {
1861 xfs_extlen_t align;
1862 xfs_off_t end_offset;
1863 xfs_fileoff_t p_end_fsb;
1864
1865 end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
1866 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
1867 prealloc_blocks;
1868
1869 align = xfs_eof_alignment(ip);
1870 if (align)
1871 p_end_fsb = roundup_64(p_end_fsb, align);
1872
1873 p_end_fsb = min(p_end_fsb,
1874 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
1875 ASSERT(p_end_fsb > offset_fsb);
1876 prealloc_blocks = p_end_fsb - end_fsb;
1877 }
1878 }
1879
1880 /*
1881 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
1882 * them out if the write happens to fail.
1883 */
1884 iomap_flags |= IOMAP_F_NEW;
1885 if (allocfork == XFS_COW_FORK) {
1886 error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
1887 end_fsb - offset_fsb, prealloc_blocks, &cmap,
1888 &ccur, cow_eof);
1889 if (error)
1890 goto out_unlock;
1891
1892 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
1893 goto found_cow;
1894 }
1895
1896 error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
1897 end_fsb - offset_fsb, prealloc_blocks, &imap, &icur,
1898 eof);
1899 if (error)
1900 goto out_unlock;
1901
1902 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
1903found_imap:
1904 seq = xfs_iomap_inode_sequence(ip, iomap_flags);
1905 xfs_iunlock(ip, lockmode);
1906 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
1907
1908convert_delay:
1909 xfs_iunlock(ip, lockmode);
1910 truncate_pagecache(inode, offset);
1911 error = xfs_bmapi_convert_delalloc(ip, XFS_DATA_FORK, offset,
1912 iomap, NULL);
1913 if (error)
1914 return error;
1915
1916 trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &imap);
1917 return 0;
1918
1919found_cow:
1920 if (imap.br_startoff <= offset_fsb) {
1921 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0,
1922 xfs_iomap_inode_sequence(ip, 0));
1923 if (error)
1924 goto out_unlock;
1925 } else {
1926 xfs_trim_extent(&cmap, offset_fsb,
1927 imap.br_startoff - offset_fsb);
1928 }
1929
1930 iomap_flags |= IOMAP_F_SHARED;
1931 seq = xfs_iomap_inode_sequence(ip, iomap_flags);
1932 xfs_iunlock(ip, lockmode);
1933 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, iomap_flags, seq);
1934
1935out_unlock:
1936 xfs_iunlock(ip, lockmode);
1937 return error;
1938}
1939
1940static void
1941xfs_buffered_write_delalloc_punch(
1942 struct inode *inode,
1943 loff_t offset,
1944 loff_t length,
1945 struct iomap *iomap)
1946{
1947 struct iomap_iter *iter =
1948 container_of(iomap, struct iomap_iter, iomap);
1949
1950 xfs_bmap_punch_delalloc_range(XFS_I(inode),
1951 (iomap->flags & IOMAP_F_SHARED) ?
1952 XFS_COW_FORK : XFS_DATA_FORK,
1953 offset, offset + length, iter->private);
1954}
1955
1956static int
1957xfs_buffered_write_iomap_end(
1958 struct inode *inode,
1959 loff_t offset,
1960 loff_t length,
1961 ssize_t written,
1962 unsigned flags,
1963 struct iomap *iomap)
1964{
1965 loff_t start_byte, end_byte;
1966
1967 /* If we didn't reserve the blocks, we're not allowed to punch them. */
1968 if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW))
1969 return 0;
1970
1971 /*
1972 * iomap_page_mkwrite() will never fail in a way that requires delalloc
1973 * extents that it allocated to be revoked. Hence never try to release
1974 * them here.
1975 */
1976 if (flags & IOMAP_FAULT)
1977 return 0;
1978
1979 /* Nothing to do if we've written the entire delalloc extent */
1980 start_byte = iomap_last_written_block(inode, offset, written);
1981 end_byte = round_up(offset + length, i_blocksize(inode));
1982 if (start_byte >= end_byte)
1983 return 0;
1984
1985 /* For zeroing operations the callers already hold invalidate_lock. */
1986 if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
1987 rwsem_assert_held_write(&inode->i_mapping->invalidate_lock);
1988 iomap_write_delalloc_release(inode, start_byte, end_byte, flags,
1989 iomap, xfs_buffered_write_delalloc_punch);
1990 } else {
1991 filemap_invalidate_lock(inode->i_mapping);
1992 iomap_write_delalloc_release(inode, start_byte, end_byte, flags,
1993 iomap, xfs_buffered_write_delalloc_punch);
1994 filemap_invalidate_unlock(inode->i_mapping);
1995 }
1996
1997 return 0;
1998}
1999
2000const struct iomap_ops xfs_buffered_write_iomap_ops = {
2001 .iomap_begin = xfs_buffered_write_iomap_begin,
2002 .iomap_end = xfs_buffered_write_iomap_end,
2003};
2004
2005static int
2006xfs_read_iomap_begin(
2007 struct inode *inode,
2008 loff_t offset,
2009 loff_t length,
2010 unsigned flags,
2011 struct iomap *iomap,
2012 struct iomap *srcmap)
2013{
2014 struct xfs_inode *ip = XFS_I(inode);
2015 struct xfs_mount *mp = ip->i_mount;
2016 struct xfs_bmbt_irec imap;
2017 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
2018 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
2019 int nimaps = 1, error = 0;
2020 bool shared = false;
2021 unsigned int lockmode = XFS_ILOCK_SHARED;
2022 u64 seq;
2023
2024 ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
2025
2026 if (xfs_is_shutdown(mp))
2027 return -EIO;
2028
2029 error = xfs_ilock_for_iomap(ip, flags, &lockmode);
2030 if (error)
2031 return error;
2032 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
2033 &nimaps, 0);
2034 if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode)))
2035 error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
2036 seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0);
2037 xfs_iunlock(ip, lockmode);
2038
2039 if (error)
2040 return error;
2041 trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
2042 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
2043 shared ? IOMAP_F_SHARED : 0, seq);
2044}
2045
2046const struct iomap_ops xfs_read_iomap_ops = {
2047 .iomap_begin = xfs_read_iomap_begin,
2048};
2049
2050static int
2051xfs_seek_iomap_begin(
2052 struct inode *inode,
2053 loff_t offset,
2054 loff_t length,
2055 unsigned flags,
2056 struct iomap *iomap,
2057 struct iomap *srcmap)
2058{
2059 struct xfs_inode *ip = XFS_I(inode);
2060 struct xfs_mount *mp = ip->i_mount;
2061 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
2062 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
2063 xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
2064 struct xfs_iext_cursor icur;
2065 struct xfs_bmbt_irec imap, cmap;
2066 int error = 0;
2067 unsigned lockmode;
2068 u64 seq;
2069
2070 if (xfs_is_shutdown(mp))
2071 return -EIO;
2072
2073 lockmode = xfs_ilock_data_map_shared(ip);
2074 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
2075 if (error)
2076 goto out_unlock;
2077
2078 if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
2079 /*
2080 * If we found a data extent we are done.
2081 */
2082 if (imap.br_startoff <= offset_fsb)
2083 goto done;
2084 data_fsb = imap.br_startoff;
2085 } else {
2086 /*
2087 * Fake a hole until the end of the file.
2088 */
2089 data_fsb = xfs_iomap_end_fsb(mp, offset, length);
2090 }
2091
2092 /*
2093 * If a COW fork extent covers the hole, report it - capped to the next
2094 * data fork extent:
2095 */
2096 if (xfs_inode_has_cow_data(ip) &&
2097 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
2098 cow_fsb = cmap.br_startoff;
2099 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
2100 if (data_fsb < cow_fsb + cmap.br_blockcount)
2101 end_fsb = min(end_fsb, data_fsb);
2102 xfs_trim_extent(&cmap, offset_fsb, end_fsb - offset_fsb);
2103 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
2104 error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
2105 IOMAP_F_SHARED, seq);
2106 /*
2107 * This is a COW extent, so we must probe the page cache
2108 * because there could be dirty page cache being backed
2109 * by this extent.
2110 */
2111 iomap->type = IOMAP_UNWRITTEN;
2112 goto out_unlock;
2113 }
2114
2115 /*
2116 * Else report a hole, capped to the next found data or COW extent.
2117 */
2118 if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
2119 imap.br_blockcount = cow_fsb - offset_fsb;
2120 else
2121 imap.br_blockcount = data_fsb - offset_fsb;
2122 imap.br_startoff = offset_fsb;
2123 imap.br_startblock = HOLESTARTBLOCK;
2124 imap.br_state = XFS_EXT_NORM;
2125done:
2126 seq = xfs_iomap_inode_sequence(ip, 0);
2127 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
2128 error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
2129out_unlock:
2130 xfs_iunlock(ip, lockmode);
2131 return error;
2132}
2133
2134const struct iomap_ops xfs_seek_iomap_ops = {
2135 .iomap_begin = xfs_seek_iomap_begin,
2136};
2137
2138static int
2139xfs_xattr_iomap_begin(
2140 struct inode *inode,
2141 loff_t offset,
2142 loff_t length,
2143 unsigned flags,
2144 struct iomap *iomap,
2145 struct iomap *srcmap)
2146{
2147 struct xfs_inode *ip = XFS_I(inode);
2148 struct xfs_mount *mp = ip->i_mount;
2149 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
2150 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
2151 struct xfs_bmbt_irec imap;
2152 int nimaps = 1, error = 0;
2153 unsigned lockmode;
2154 int seq;
2155
2156 if (xfs_is_shutdown(mp))
2157 return -EIO;
2158
2159 lockmode = xfs_ilock_attr_map_shared(ip);
2160
2161 /* if there are no attribute fork or extents, return ENOENT */
2162 if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) {
2163 error = -ENOENT;
2164 goto out_unlock;
2165 }
2166
2167 ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL);
2168 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
2169 &nimaps, XFS_BMAPI_ATTRFORK);
2170out_unlock:
2171
2172 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR);
2173 xfs_iunlock(ip, lockmode);
2174
2175 if (error)
2176 return error;
2177 ASSERT(nimaps);
2178 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq);
2179}
2180
2181const struct iomap_ops xfs_xattr_iomap_ops = {
2182 .iomap_begin = xfs_xattr_iomap_begin,
2183};
2184
2185int
2186xfs_zero_range(
2187 struct xfs_inode *ip,
2188 loff_t pos,
2189 loff_t len,
2190 struct xfs_zone_alloc_ctx *ac,
2191 bool *did_zero)
2192{
2193 struct inode *inode = VFS_I(ip);
2194
2195 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
2196
2197 if (IS_DAX(inode))
2198 return dax_zero_range(inode, pos, len, did_zero,
2199 &xfs_dax_write_iomap_ops);
2200 return iomap_zero_range(inode, pos, len, did_zero,
2201 &xfs_buffered_write_iomap_ops, ac);
2202}
2203
2204int
2205xfs_truncate_page(
2206 struct xfs_inode *ip,
2207 loff_t pos,
2208 struct xfs_zone_alloc_ctx *ac,
2209 bool *did_zero)
2210{
2211 struct inode *inode = VFS_I(ip);
2212
2213 if (IS_DAX(inode))
2214 return dax_truncate_page(inode, pos, did_zero,
2215 &xfs_dax_write_iomap_ops);
2216 return iomap_truncate_page(inode, pos, did_zero,
2217 &xfs_buffered_write_iomap_ops, ac);
2218}