jfs: Remove use of folio error flag

Store the blk_status per folio (if we can have multiple metapages per
folio) instead of setting the folio error flag. This will allow us to
reclaim a precious folio flag shortly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>

authored by Matthew Wilcox (Oracle) and committed by Dave Kleikamp ee6817e7 3fefd9b5

+25 -22
+25 -22
fs/jfs/jfs_metapage.c
··· 76 76 struct meta_anchor { 77 77 int mp_count; 78 78 atomic_t io_count; 79 + blk_status_t status; 79 80 struct metapage *mp[MPS_PER_PAGE]; 80 81 }; 81 82 ··· 139 138 atomic_inc(&anchor->io_count); 140 139 } 141 140 142 - static inline void dec_io(struct folio *folio, void (*handler) (struct folio *)) 141 + static inline void dec_io(struct folio *folio, blk_status_t status, 142 + void (*handler)(struct folio *, blk_status_t)) 143 143 { 144 144 struct meta_anchor *anchor = folio->private; 145 145 146 + if (anchor->status == BLK_STS_OK) 147 + anchor->status = status; 148 + 146 149 if (atomic_dec_and_test(&anchor->io_count)) 147 - handler(folio); 150 + handler(folio, anchor->status); 148 151 } 149 152 150 153 #else ··· 173 168 } 174 169 175 170 #define inc_io(folio) do {} while(0) 176 - #define dec_io(folio, handler) handler(folio) 171 + #define dec_io(folio, status, handler) handler(folio, status) 177 172 178 173 #endif 179 174 ··· 263 258 return lblock; 264 259 } 265 260 266 - static void last_read_complete(struct folio *folio) 261 + static void last_read_complete(struct folio *folio, blk_status_t status) 267 262 { 268 - if (!folio_test_error(folio)) 269 - folio_mark_uptodate(folio); 270 - folio_unlock(folio); 263 + if (status) 264 + printk(KERN_ERR "Read error %d at %#llx\n", status, 265 + folio_pos(folio)); 266 + 267 + folio_end_read(folio, status == 0); 271 268 } 272 269 273 270 static void metapage_read_end_io(struct bio *bio) 274 271 { 275 272 struct folio *folio = bio->bi_private; 276 273 277 - if (bio->bi_status) { 278 - printk(KERN_ERR "metapage_read_end_io: I/O error\n"); 279 - folio_set_error(folio); 280 - } 281 - 282 - dec_io(folio, last_read_complete); 274 + dec_io(folio, bio->bi_status, last_read_complete); 283 275 bio_put(bio); 284 276 } 285 277 ··· 302 300 LOGSYNC_UNLOCK(log, flags); 303 301 } 304 302 305 - static void last_write_complete(struct folio *folio) 303 + static void last_write_complete(struct folio *folio, blk_status_t status) 306 304 { 307 305 struct metapage *mp; 308 306 unsigned int offset; 307 + 308 + if (status) { 309 + int err = blk_status_to_errno(status); 310 + printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 311 + mapping_set_error(folio->mapping, err); 312 + } 309 313 310 314 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { 311 315 mp = folio_to_mp(folio, offset); ··· 334 326 335 327 BUG_ON(!folio->private); 336 328 337 - if (bio->bi_status) { 338 - int err = blk_status_to_errno(bio->bi_status); 339 - printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 340 - mapping_set_error(folio->mapping, err); 341 - } 342 - dec_io(folio, last_write_complete); 329 + dec_io(folio, bio->bi_status, last_write_complete); 343 330 bio_put(bio); 344 331 } 345 332 ··· 457 454 4, bio, sizeof(*bio), 0); 458 455 bio_put(bio); 459 456 folio_unlock(folio); 460 - dec_io(folio, last_write_complete); 457 + dec_io(folio, BLK_STS_OK, last_write_complete); 461 458 err_out: 462 459 while (bad_blocks--) 463 - dec_io(folio, last_write_complete); 460 + dec_io(folio, BLK_STS_OK, last_write_complete); 464 461 return -EIO; 465 462 } 466 463