Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: erofs: simplify `z_erofs_vle_submit_all'

Previously, there are too many hacked stuffs such as `__FSIO_1',
`lstgrp_noio', `lstgrp_io' out there in `z_erofs_vle_submit_all'.

Revisit the whole process by properly introducing jobqueue to
represent each type of queued workgroups, furthermore hide all of
crazyness behind independent separated functions.

After this patch, 2 independent jobqueues exist if managed cache
is enabled, or 1 jobqueue if disabled.

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Gao Xiang and committed by
Greg Kroah-Hartman
7146a4f0 6afd227c

+113 -82
+113 -82
drivers/staging/erofs/unzip_vle.c
··· 1229 1229 return page; 1230 1230 } 1231 1231 1232 - static inline struct z_erofs_vle_unzip_io * 1233 - prepare_io_handler(struct super_block *sb, 1234 - struct z_erofs_vle_unzip_io *io, 1235 - bool background) 1232 + static struct z_erofs_vle_unzip_io * 1233 + jobqueue_init(struct super_block *sb, 1234 + struct z_erofs_vle_unzip_io *io, 1235 + bool foreground) 1236 1236 { 1237 1237 struct z_erofs_vle_unzip_io_sb *iosb; 1238 1238 1239 - if (!background) { 1239 + if (foreground) { 1240 1240 /* waitqueue available for foreground io */ 1241 - BUG_ON(!io); 1241 + DBG_BUGON(!io); 1242 1242 1243 1243 init_waitqueue_head(&io->u.wait); 1244 1244 atomic_set(&io->pending_bios, 0); 1245 1245 goto out; 1246 1246 } 1247 1247 1248 - if (io) 1249 - BUG(); 1250 - else { 1251 - /* allocate extra io descriptor for background io */ 1252 - iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb), 1248 + iosb = kvzalloc(sizeof(struct z_erofs_vle_unzip_io_sb), 1253 1249 GFP_KERNEL | __GFP_NOFAIL); 1254 - BUG_ON(!iosb); 1250 + DBG_BUGON(!iosb); 1255 1251 1256 - io = &iosb->io; 1257 - } 1258 - 1252 + /* initialize fields in the allocated descriptor */ 1253 + io = &iosb->io; 1259 1254 iosb->sb = sb; 1260 1255 INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); 1261 1256 out: ··· 1258 1263 return io; 1259 1264 } 1260 1265 1266 + /* define workgroup jobqueue types */ 1267 + enum { 1261 1268 #ifdef EROFS_FS_HAS_MANAGED_CACHE 1262 - #define __FSIO_1 1 1269 + JQ_BYPASS, 1270 + #endif 1271 + JQ_SUBMIT, 1272 + NR_JOBQUEUES, 1273 + }; 1274 + 1275 + static void *jobqueueset_init(struct super_block *sb, 1276 + z_erofs_vle_owned_workgrp_t qtail[], 1277 + struct z_erofs_vle_unzip_io *q[], 1278 + struct z_erofs_vle_unzip_io *fgq, 1279 + bool forcefg) 1280 + { 1281 + #ifdef EROFS_FS_HAS_MANAGED_CACHE 1282 + /* 1283 + * if managed cache is enabled, bypass jobqueue is needed, 1284 + * no need to read from device for all workgroups in this queue. 1285 + */ 1286 + q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); 1287 + qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; 1288 + #endif 1289 + 1290 + q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); 1291 + qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; 1292 + 1293 + return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); 1294 + } 1295 + 1296 + #ifdef EROFS_FS_HAS_MANAGED_CACHE 1297 + static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp, 1298 + z_erofs_vle_owned_workgrp_t qtail[], 1299 + z_erofs_vle_owned_workgrp_t owned_head) 1300 + { 1301 + z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT]; 1302 + z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS]; 1303 + 1304 + DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED); 1305 + if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL) 1306 + owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED; 1307 + 1308 + WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED); 1309 + 1310 + WRITE_ONCE(*submit_qtail, owned_head); 1311 + WRITE_ONCE(*bypass_qtail, &grp->next); 1312 + 1313 + qtail[JQ_BYPASS] = &grp->next; 1314 + } 1315 + 1316 + static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[], 1317 + unsigned int nr_bios, 1318 + bool force_fg) 1319 + { 1320 + /* 1321 + * although background is preferred, no one is pending for submission. 1322 + * don't issue workqueue for decompression but drop it directly instead. 1323 + */ 1324 + if (force_fg || nr_bios) 1325 + return false; 1326 + 1327 + kvfree(container_of(q[JQ_SUBMIT], 1328 + struct z_erofs_vle_unzip_io_sb, 1329 + io)); 1330 + return true; 1331 + } 1263 1332 #else 1264 - #define __FSIO_1 0 1333 + static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp, 1334 + z_erofs_vle_owned_workgrp_t qtail[], 1335 + z_erofs_vle_owned_workgrp_t owned_head) 1336 + { 1337 + /* impossible to bypass submission for managed cache disabled */ 1338 + DBG_BUGON(1); 1339 + } 1340 + 1341 + static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[], 1342 + unsigned int nr_bios, 1343 + bool force_fg) 1344 + { 1345 + /* bios should be >0 if managed cache is disabled */ 1346 + DBG_BUGON(!nr_bios); 1347 + return false; 1348 + } 1265 1349 #endif 1266 1350 1267 1351 static bool z_erofs_vle_submit_all(struct super_block *sb, 1268 1352 z_erofs_vle_owned_workgrp_t owned_head, 1269 1353 struct list_head *pagepool, 1270 - struct z_erofs_vle_unzip_io *fg_io, 1354 + struct z_erofs_vle_unzip_io *fgq, 1271 1355 bool force_fg) 1272 1356 { 1273 1357 struct erofs_sb_info *const sbi = EROFS_SB(sb); 1274 1358 const unsigned int clusterpages = erofs_clusterpages(sbi); 1275 1359 const gfp_t gfp = GFP_NOFS; 1276 - #ifdef EROFS_FS_HAS_MANAGED_CACHE 1277 - struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL; 1278 - #endif 1279 - struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1]; 1360 + 1361 + z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES]; 1362 + struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES]; 1280 1363 struct bio *bio; 1281 - tagptr1_t bi_private; 1364 + void *bi_private; 1282 1365 /* since bio will be NULL, no need to initialize last_index */ 1283 1366 pgoff_t uninitialized_var(last_index); 1284 1367 bool force_submit = false; ··· 1365 1292 if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL)) 1366 1293 return false; 1367 1294 1368 - /* 1369 - * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io 1370 - * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io 1371 - */ 1372 - #ifdef EROFS_FS_HAS_MANAGED_CACHE 1373 - ios[0] = prepare_io_handler(sb, fg_io + 0, false); 1374 - #endif 1375 - 1376 - if (force_fg) { 1377 - ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false); 1378 - bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0); 1379 - } else { 1380 - ios[__FSIO_1] = prepare_io_handler(sb, NULL, true); 1381 - bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 1); 1382 - } 1383 - 1384 - nr_bios = 0; 1385 1295 force_submit = false; 1386 1296 bio = NULL; 1297 + nr_bios = 0; 1298 + bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); 1387 1299 1388 1300 /* by default, all need io submission */ 1389 - ios[__FSIO_1]->head = owned_head; 1301 + q[JQ_SUBMIT]->head = owned_head; 1390 1302 1391 1303 do { 1392 1304 struct z_erofs_vle_workgroup *grp; ··· 1411 1353 1412 1354 if (!bio) { 1413 1355 bio = erofs_grab_bio(sb, first_index + i, 1414 - BIO_MAX_PAGES, z_erofs_vle_read_endio, true); 1415 - bio->bi_private = tagptr_cast_ptr(bi_private); 1356 + BIO_MAX_PAGES, 1357 + z_erofs_vle_read_endio, true); 1358 + bio->bi_private = bi_private; 1416 1359 1417 1360 ++nr_bios; 1418 1361 } ··· 1428 1369 if (++i < clusterpages) 1429 1370 goto repeat; 1430 1371 1431 - #ifdef EROFS_FS_HAS_MANAGED_CACHE 1432 - if (bypass < clusterpages) { 1433 - lstgrp_io = grp; 1434 - } else { 1435 - z_erofs_vle_owned_workgrp_t iogrp_next = 1436 - owned_head == Z_EROFS_VLE_WORKGRP_TAIL ? 1437 - Z_EROFS_VLE_WORKGRP_TAIL_CLOSED : 1438 - owned_head; 1439 - 1440 - if (!lstgrp_io) 1441 - ios[1]->head = iogrp_next; 1442 - else 1443 - WRITE_ONCE(lstgrp_io->next, iogrp_next); 1444 - 1445 - if (!lstgrp_noio) 1446 - ios[0]->head = &grp->next; 1447 - else 1448 - WRITE_ONCE(lstgrp_noio->next, grp); 1449 - 1450 - lstgrp_noio = grp; 1451 - } 1452 - #endif 1372 + if (bypass < clusterpages) 1373 + qtail[JQ_SUBMIT] = &grp->next; 1374 + else 1375 + move_to_bypass_jobqueue(grp, qtail, owned_head); 1453 1376 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL); 1454 1377 1455 1378 if (bio) 1456 1379 __submit_bio(bio, REQ_OP_READ, 0); 1457 1380 1458 - #ifndef EROFS_FS_HAS_MANAGED_CACHE 1459 - BUG_ON(!nr_bios); 1460 - #else 1461 - if (lstgrp_noio) 1462 - WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED); 1463 - 1464 - if (!force_fg && !nr_bios) { 1465 - kvfree(container_of(ios[1], 1466 - struct z_erofs_vle_unzip_io_sb, io)); 1381 + if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) 1467 1382 return true; 1468 - } 1469 - #endif 1470 1383 1471 - z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios); 1384 + z_erofs_vle_unzip_kickoff(bi_private, nr_bios); 1472 1385 return true; 1473 1386 } 1474 1387 ··· 1449 1418 bool force_fg) 1450 1419 { 1451 1420 struct super_block *sb = f->inode->i_sb; 1452 - struct z_erofs_vle_unzip_io io[1 + __FSIO_1]; 1421 + struct z_erofs_vle_unzip_io io[NR_JOBQUEUES]; 1453 1422 1454 1423 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg)) 1455 1424 return; 1456 1425 1457 1426 #ifdef EROFS_FS_HAS_MANAGED_CACHE 1458 - z_erofs_vle_unzip_all(sb, &io[0], pagepool); 1427 + z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); 1459 1428 #endif 1460 1429 if (!force_fg) 1461 1430 return; 1462 1431 1463 1432 /* wait until all bios are completed */ 1464 - wait_event(io[__FSIO_1].u.wait, 1465 - !atomic_read(&io[__FSIO_1].pending_bios)); 1433 + wait_event(io[JQ_SUBMIT].u.wait, 1434 + !atomic_read(&io[JQ_SUBMIT].pending_bios)); 1466 1435 1467 1436 /* let's synchronous decompression */ 1468 - z_erofs_vle_unzip_all(sb, &io[__FSIO_1], pagepool); 1437 + z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); 1469 1438 } 1470 1439 1471 1440 static int z_erofs_vle_normalaccess_readpage(struct file *file,