Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: avoid starvation when refreshing dfs cache

When refreshing the DFS cache, keep SMB2 IOCTL calls as much outside
critical sections as possible and avoid read/write starvation when
getting new DFS referrals by using broken or slow connections.

Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
Reviewed-by: Aurelien Aptel <aaptel@suse.com>
Signed-off-by: Steve French <stfrench@microsoft.com>

authored by

Paulo Alcantara and committed by
Steve French
1023e90b 0d52df81

+109 -44
+109 -44
fs/cifs/dfs_cache.c
··· 554 554 struct cache_entry *ce; 555 555 struct cache_entry *to_del = NULL; 556 556 557 + WARN_ON(!rwsem_is_locked(&htable_rw_lock)); 558 + 557 559 for (i = 0; i < CACHE_HTABLE_SIZE; i++) { 558 560 struct hlist_head *l = &cache_htable[i]; 559 561 ··· 585 583 struct cache_entry *ce; 586 584 unsigned int hash; 587 585 588 - convert_delimiter(refs[0].path_name, '\\'); 586 + WARN_ON(!rwsem_is_locked(&htable_rw_lock)); 587 + 588 + if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) { 589 + cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES); 590 + remove_oldest_entry_locked(); 591 + } 592 + 589 593 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash); 590 594 if (rc) 591 595 return rc; ··· 612 604 613 605 hlist_add_head(&ce->hlist, &cache_htable[hash]); 614 606 dump_ce(ce); 607 + 608 + atomic_inc(&cache_count); 615 609 616 610 return 0; 617 611 } ··· 729 719 } 730 720 731 721 /* Update a cache entry with the new referral in @refs */ 732 - static int update_cache_entry_locked(const char *path, const struct dfs_info3_param *refs, 722 + static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs, 733 723 int numrefs) 734 724 { 735 725 int rc; 736 - struct cache_entry *ce; 737 726 char *s, *th = NULL; 738 727 739 - ce = lookup_cache_entry(path); 740 - if (IS_ERR(ce)) 741 - return PTR_ERR(ce); 728 + WARN_ON(!rwsem_is_locked(&htable_rw_lock)); 742 729 743 730 if (ce->tgthint) { 744 731 s = ce->tgthint->name; ··· 757 750 static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path, 758 751 struct dfs_info3_param **refs, int *numrefs) 759 752 { 753 + int rc; 754 + int i; 755 + 760 756 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path); 757 + 758 + *refs = NULL; 759 + *numrefs = 0; 761 760 762 761 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer) 763 762 return -EOPNOTSUPP; 764 763 if (unlikely(!cache_cp)) 765 764 return -EINVAL; 766 765 767 - *refs = NULL; 768 - *numrefs = 0; 766 + rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp, 767 + NO_MAP_UNI_RSVD); 768 + if (!rc) { 769 + struct dfs_info3_param *ref = *refs; 769 770 770 - return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp, 771 - NO_MAP_UNI_RSVD); 771 + for (i = 0; i < *numrefs; i++) 772 + convert_delimiter(ref[i].path_name, '\\'); 773 + } 774 + return rc; 772 775 } 773 776 774 777 /* ··· 824 807 dump_refs(refs, numrefs); 825 808 826 809 if (!newent) { 827 - rc = update_cache_entry_locked(path, refs, numrefs); 810 + rc = update_cache_entry_locked(ce, refs, numrefs); 828 811 goto out_unlock; 829 812 } 830 813 831 - if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) { 832 - cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES); 833 - remove_oldest_entry_locked(); 834 - } 835 - 836 814 rc = add_cache_entry_locked(refs, numrefs); 837 - if (!rc) 838 - atomic_inc(&cache_count); 839 815 840 816 out_unlock: 841 817 up_write(&htable_rw_lock); ··· 1323 1313 1324 1314 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) { 1325 1315 const char *path = tcon->dfs_path + 1; 1316 + struct cache_entry *ce; 1317 + struct dfs_info3_param *refs = NULL; 1318 + int numrefs = 0; 1319 + bool needs_refresh = false; 1326 1320 int rc = 0; 1327 1321 1328 1322 list_del_init(&tcon->ulist); 1323 + 1329 1324 ses = find_ipc_from_server_path(sessions, path); 1330 - if (!IS_ERR(ses)) { 1331 - xid = get_xid(); 1332 - cache_refresh_path(xid, ses, path); 1333 - free_xid(xid); 1325 + if (IS_ERR(ses)) 1326 + goto next_tcon; 1327 + 1328 + down_read(&htable_rw_lock); 1329 + ce = lookup_cache_entry(path); 1330 + needs_refresh = IS_ERR(ce) || cache_entry_expired(ce); 1331 + up_read(&htable_rw_lock); 1332 + 1333 + if (!needs_refresh) 1334 + goto next_tcon; 1335 + 1336 + xid = get_xid(); 1337 + rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); 1338 + free_xid(xid); 1339 + 1340 + /* Create or update a cache entry with the new referral */ 1341 + if (!rc) { 1342 + down_write(&htable_rw_lock); 1343 + ce = lookup_cache_entry(path); 1344 + if (IS_ERR(ce)) 1345 + add_cache_entry_locked(refs, numrefs); 1346 + else if (cache_entry_expired(ce)) 1347 + update_cache_entry_locked(ce, refs, numrefs); 1348 + up_write(&htable_rw_lock); 1334 1349 } 1350 + 1351 + next_tcon: 1352 + free_dfs_info_array(refs, numrefs); 1335 1353 cifs_put_tcon(tcon); 1336 1354 } 1337 1355 } ··· 1369 1331 int i; 1370 1332 struct cifs_ses *ses; 1371 1333 unsigned int xid; 1372 - int rc; 1334 + char *ref_paths[CACHE_MAX_ENTRIES]; 1335 + int count = 0; 1336 + struct cache_entry *ce; 1373 1337 1374 1338 /* 1375 - * Refresh all cached entries. 1339 + * Refresh all cached entries. Get all new referrals outside critical section to avoid 1340 + * starvation while performing SMB2 IOCTL on broken or slow connections. 1341 + 1376 1342 * The cache entries may cover more paths than the active mounts 1377 1343 * (e.g. domain-based DFS referrals or multi tier DFS setups). 1378 1344 */ 1379 - down_write(&htable_rw_lock); 1345 + down_read(&htable_rw_lock); 1380 1346 for (i = 0; i < CACHE_HTABLE_SIZE; i++) { 1381 - struct cache_entry *ce; 1382 1347 struct hlist_head *l = &cache_htable[i]; 1383 1348 1384 1349 hlist_for_each_entry(ce, l, hlist) { 1385 - struct dfs_info3_param *refs = NULL; 1386 - int numrefs = 0; 1387 - 1388 - if (hlist_unhashed(&ce->hlist) || !cache_entry_expired(ce)) 1350 + if (count == ARRAY_SIZE(ref_paths)) 1351 + goto out_unlock; 1352 + if (hlist_unhashed(&ce->hlist) || !cache_entry_expired(ce) || 1353 + IS_ERR(find_ipc_from_server_path(sessions, ce->path))) 1389 1354 continue; 1390 - 1391 - ses = find_ipc_from_server_path(sessions, ce->path); 1392 - if (IS_ERR(ses)) 1393 - continue; 1394 - 1395 - xid = get_xid(); 1396 - rc = get_dfs_referral(xid, ses, ce->path, &refs, &numrefs); 1397 - free_xid(xid); 1398 - 1399 - if (!rc) 1400 - update_cache_entry_locked(ce->path, refs, numrefs); 1401 - 1402 - free_dfs_info_array(refs, numrefs); 1355 + ref_paths[count++] = kstrdup(ce->path, GFP_ATOMIC); 1403 1356 } 1404 1357 } 1405 - up_write(&htable_rw_lock); 1358 + 1359 + out_unlock: 1360 + up_read(&htable_rw_lock); 1361 + 1362 + for (i = 0; i < count; i++) { 1363 + char *path = ref_paths[i]; 1364 + struct dfs_info3_param *refs = NULL; 1365 + int numrefs = 0; 1366 + int rc = 0; 1367 + 1368 + if (!path) 1369 + continue; 1370 + 1371 + ses = find_ipc_from_server_path(sessions, path); 1372 + if (IS_ERR(ses)) 1373 + goto next_referral; 1374 + 1375 + xid = get_xid(); 1376 + rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); 1377 + free_xid(xid); 1378 + 1379 + if (!rc) { 1380 + down_write(&htable_rw_lock); 1381 + ce = lookup_cache_entry(path); 1382 + /* 1383 + * We need to re-check it because other tasks might have it deleted or 1384 + * updated. 1385 + */ 1386 + if (!IS_ERR(ce) && cache_entry_expired(ce)) 1387 + update_cache_entry_locked(ce, refs, numrefs); 1388 + up_write(&htable_rw_lock); 1389 + } 1390 + 1391 + next_referral: 1392 + kfree(path); 1393 + free_dfs_info_array(refs, numrefs); 1394 + } 1406 1395 } 1407 1396 1408 1397 /*