Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

maple_tree: update check_forking() and bench_forking()

Updated check_forking() and bench_forking() to use __mt_dup() to duplicate
maple tree.

Link: https://lkml.kernel.org/r/20231027033845.90608-9-zhangpeng.00@bytedance.com
Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Mike Christie <michael.christie@oracle.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Peng Zhang and committed by
Andrew Morton
446e1867 f670fa1c

+62 -59
+58 -59
lib/test_maple_tree.c
··· 1834 1834 } 1835 1835 #endif 1836 1836 /* check_forking - simulate the kernel forking sequence with the tree. */ 1837 - static noinline void __init check_forking(struct maple_tree *mt) 1837 + static noinline void __init check_forking(void) 1838 1838 { 1839 - 1840 - struct maple_tree newmt; 1841 - int i, nr_entries = 134; 1839 + struct maple_tree mt, newmt; 1840 + int i, nr_entries = 134, ret; 1842 1841 void *val; 1843 - MA_STATE(mas, mt, 0, 0); 1844 - MA_STATE(newmas, mt, 0, 0); 1845 - struct rw_semaphore newmt_lock; 1842 + MA_STATE(mas, &mt, 0, 0); 1843 + MA_STATE(newmas, &newmt, 0, 0); 1844 + struct rw_semaphore mt_lock, newmt_lock; 1846 1845 1846 + init_rwsem(&mt_lock); 1847 1847 init_rwsem(&newmt_lock); 1848 1848 1849 - for (i = 0; i <= nr_entries; i++) 1850 - mtree_store_range(mt, i*10, i*10 + 5, 1851 - xa_mk_value(i), GFP_KERNEL); 1849 + mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); 1850 + mt_set_external_lock(&mt, &mt_lock); 1852 1851 1853 - mt_set_non_kernel(99999); 1854 1852 mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); 1855 1853 mt_set_external_lock(&newmt, &newmt_lock); 1856 - newmas.tree = &newmt; 1857 - mas_reset(&newmas); 1858 - mas_reset(&mas); 1859 - down_write(&newmt_lock); 1860 - mas.index = 0; 1861 - mas.last = 0; 1862 - if (mas_expected_entries(&newmas, nr_entries)) { 1854 + 1855 + down_write(&mt_lock); 1856 + for (i = 0; i <= nr_entries; i++) { 1857 + mas_set_range(&mas, i*10, i*10 + 5); 1858 + mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL); 1859 + } 1860 + 1861 + down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING); 1862 + ret = __mt_dup(&mt, &newmt, GFP_KERNEL); 1863 + if (ret) { 1863 1864 pr_err("OOM!"); 1864 1865 BUG_ON(1); 1865 1866 } 1866 - rcu_read_lock(); 1867 - mas_for_each(&mas, val, ULONG_MAX) { 1868 - newmas.index = mas.index; 1869 - newmas.last = mas.last; 1867 + 1868 + mas_set(&newmas, 0); 1869 + mas_for_each(&newmas, val, ULONG_MAX) 1870 1870 mas_store(&newmas, val); 1871 - } 1872 - rcu_read_unlock(); 1871 + 1873 1872 mas_destroy(&newmas); 1873 + mas_destroy(&mas); 1874 1874 mt_validate(&newmt); 1875 - mt_set_non_kernel(0); 1876 1875 __mt_destroy(&newmt); 1876 + __mt_destroy(&mt); 1877 1877 up_write(&newmt_lock); 1878 + up_write(&mt_lock); 1878 1879 } 1879 1880 1880 1881 static noinline void __init check_iteration(struct maple_tree *mt) ··· 1978 1977 } 1979 1978 1980 1979 #if defined(BENCH_FORK) 1981 - static noinline void __init bench_forking(struct maple_tree *mt) 1980 + static noinline void __init bench_forking(void) 1982 1981 { 1983 - 1984 - struct maple_tree newmt; 1985 - int i, nr_entries = 134, nr_fork = 80000; 1982 + struct maple_tree mt, newmt; 1983 + int i, nr_entries = 134, nr_fork = 80000, ret; 1986 1984 void *val; 1987 - MA_STATE(mas, mt, 0, 0); 1988 - MA_STATE(newmas, mt, 0, 0); 1989 - struct rw_semaphore newmt_lock; 1985 + MA_STATE(mas, &mt, 0, 0); 1986 + MA_STATE(newmas, &newmt, 0, 0); 1987 + struct rw_semaphore mt_lock, newmt_lock; 1990 1988 1989 + init_rwsem(&mt_lock); 1991 1990 init_rwsem(&newmt_lock); 1992 - mt_set_external_lock(&newmt, &newmt_lock); 1993 1991 1994 - for (i = 0; i <= nr_entries; i++) 1995 - mtree_store_range(mt, i*10, i*10 + 5, 1996 - xa_mk_value(i), GFP_KERNEL); 1992 + mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); 1993 + mt_set_external_lock(&mt, &mt_lock); 1994 + 1995 + down_write(&mt_lock); 1996 + for (i = 0; i <= nr_entries; i++) { 1997 + mas_set_range(&mas, i*10, i*10 + 5); 1998 + mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL); 1999 + } 1997 2000 1998 2001 for (i = 0; i < nr_fork; i++) { 1999 - mt_set_non_kernel(99999); 2000 - mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE); 2001 - newmas.tree = &newmt; 2002 - mas_reset(&newmas); 2003 - mas_reset(&mas); 2004 - mas.index = 0; 2005 - mas.last = 0; 2006 - rcu_read_lock(); 2007 - down_write(&newmt_lock); 2008 - if (mas_expected_entries(&newmas, nr_entries)) { 2009 - printk("OOM!"); 2002 + mt_init_flags(&newmt, 2003 + MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN); 2004 + mt_set_external_lock(&newmt, &newmt_lock); 2005 + 2006 + down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING); 2007 + ret = __mt_dup(&mt, &newmt, GFP_KERNEL); 2008 + if (ret) { 2009 + pr_err("OOM!"); 2010 2010 BUG_ON(1); 2011 2011 } 2012 - mas_for_each(&mas, val, ULONG_MAX) { 2013 - newmas.index = mas.index; 2014 - newmas.last = mas.last; 2012 + 2013 + mas_set(&newmas, 0); 2014 + mas_for_each(&newmas, val, ULONG_MAX) 2015 2015 mas_store(&newmas, val); 2016 - } 2016 + 2017 2017 mas_destroy(&newmas); 2018 - rcu_read_unlock(); 2019 2018 mt_validate(&newmt); 2020 - mt_set_non_kernel(0); 2021 2019 __mt_destroy(&newmt); 2022 2020 up_write(&newmt_lock); 2023 2021 } 2022 + mas_destroy(&mas); 2023 + __mt_destroy(&mt); 2024 + up_write(&mt_lock); 2024 2025 } 2025 2026 #endif 2026 2027 ··· 3618 3615 #endif 3619 3616 #if defined(BENCH_FORK) 3620 3617 #define BENCH 3621 - mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3622 - bench_forking(&tree); 3623 - mtree_destroy(&tree); 3618 + bench_forking(); 3624 3619 goto skip; 3625 3620 #endif 3626 3621 #if defined(BENCH_MT_FOR_EACH) ··· 3651 3650 check_iteration(&tree); 3652 3651 mtree_destroy(&tree); 3653 3652 3654 - mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3655 - check_forking(&tree); 3656 - mtree_destroy(&tree); 3653 + check_forking(); 3657 3654 3658 3655 mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3659 3656 check_mas_store_gfp(&tree);
+4
tools/include/linux/rwsem.h
··· 37 37 { 38 38 return pthread_rwlock_unlock(&sem->lock); 39 39 } 40 + 41 + #define down_read_nested(sem, subclass) down_read(sem) 42 + #define down_write_nested(sem, subclass) down_write(sem) 43 + 40 44 #endif /* _TOOLS_RWSEM_H */