jfs: Remove use of folio error flag

Store the blk_status per folio (if we can have multiple metapages per
folio) instead of setting the folio error flag. This will allow us to
reclaim a precious folio flag shortly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>

authored by Matthew Wilcox (Oracle) and committed by Dave Kleikamp ee6817e7 3fefd9b5

+25 -22
+25 -22
fs/jfs/jfs_metapage.c
··· 76 struct meta_anchor { 77 int mp_count; 78 atomic_t io_count; 79 struct metapage *mp[MPS_PER_PAGE]; 80 }; 81 ··· 139 atomic_inc(&anchor->io_count); 140 } 141 142 - static inline void dec_io(struct folio *folio, void (*handler) (struct folio *)) 143 { 144 struct meta_anchor *anchor = folio->private; 145 146 if (atomic_dec_and_test(&anchor->io_count)) 147 - handler(folio); 148 } 149 150 #else ··· 173 } 174 175 #define inc_io(folio) do {} while(0) 176 - #define dec_io(folio, handler) handler(folio) 177 178 #endif 179 ··· 263 return lblock; 264 } 265 266 - static void last_read_complete(struct folio *folio) 267 { 268 - if (!folio_test_error(folio)) 269 - folio_mark_uptodate(folio); 270 - folio_unlock(folio); 271 } 272 273 static void metapage_read_end_io(struct bio *bio) 274 { 275 struct folio *folio = bio->bi_private; 276 277 - if (bio->bi_status) { 278 - printk(KERN_ERR "metapage_read_end_io: I/O error\n"); 279 - folio_set_error(folio); 280 - } 281 - 282 - dec_io(folio, last_read_complete); 283 bio_put(bio); 284 } 285 ··· 302 LOGSYNC_UNLOCK(log, flags); 303 } 304 305 - static void last_write_complete(struct folio *folio) 306 { 307 struct metapage *mp; 308 unsigned int offset; 309 310 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { 311 mp = folio_to_mp(folio, offset); ··· 334 335 BUG_ON(!folio->private); 336 337 - if (bio->bi_status) { 338 - int err = blk_status_to_errno(bio->bi_status); 339 - printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 340 - mapping_set_error(folio->mapping, err); 341 - } 342 - dec_io(folio, last_write_complete); 343 bio_put(bio); 344 } 345 ··· 457 4, bio, sizeof(*bio), 0); 458 bio_put(bio); 459 folio_unlock(folio); 460 - dec_io(folio, last_write_complete); 461 err_out: 462 while (bad_blocks--) 463 - dec_io(folio, last_write_complete); 464 return -EIO; 465 } 466
··· 76 struct meta_anchor { 77 int mp_count; 78 atomic_t io_count; 79 + blk_status_t status; 80 struct metapage *mp[MPS_PER_PAGE]; 81 }; 82 ··· 138 atomic_inc(&anchor->io_count); 139 } 140 141 + static inline void dec_io(struct folio *folio, blk_status_t status, 142 + void (*handler)(struct folio *, blk_status_t)) 143 { 144 struct meta_anchor *anchor = folio->private; 145 146 + if (anchor->status == BLK_STS_OK) 147 + anchor->status = status; 148 + 149 if (atomic_dec_and_test(&anchor->io_count)) 150 + handler(folio, anchor->status); 151 } 152 153 #else ··· 168 } 169 170 #define inc_io(folio) do {} while(0) 171 + #define dec_io(folio, status, handler) handler(folio, status) 172 173 #endif 174 ··· 258 return lblock; 259 } 260 261 + static void last_read_complete(struct folio *folio, blk_status_t status) 262 { 263 + if (status) 264 + printk(KERN_ERR "Read error %d at %#llx\n", status, 265 + folio_pos(folio)); 266 + 267 + folio_end_read(folio, status == 0); 268 } 269 270 static void metapage_read_end_io(struct bio *bio) 271 { 272 struct folio *folio = bio->bi_private; 273 274 + dec_io(folio, bio->bi_status, last_read_complete); 275 bio_put(bio); 276 } 277 ··· 300 LOGSYNC_UNLOCK(log, flags); 301 } 302 303 + static void last_write_complete(struct folio *folio, blk_status_t status) 304 { 305 struct metapage *mp; 306 unsigned int offset; 307 + 308 + if (status) { 309 + int err = blk_status_to_errno(status); 310 + printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 311 + mapping_set_error(folio->mapping, err); 312 + } 313 314 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { 315 mp = folio_to_mp(folio, offset); ··· 326 327 BUG_ON(!folio->private); 328 329 + dec_io(folio, bio->bi_status, last_write_complete); 330 bio_put(bio); 331 } 332 ··· 454 4, bio, sizeof(*bio), 0); 455 bio_put(bio); 456 folio_unlock(folio); 457 + dec_io(folio, BLK_STS_OK, last_write_complete); 458 err_out: 459 while (bad_blocks--) 460 + dec_io(folio, BLK_STS_OK, last_write_complete); 461 return -EIO; 462 } 463