Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"21 patches.

Subsystems affected by this patch series: MAINTAINERS, mailmap, and mm
(mlock, pagecache, damon, slub, memcg, hugetlb, and pagecache)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits)
mm: bdi: initialize bdi_min_ratio when bdi is unregistered
hugetlbfs: fix issue of preallocation of gigantic pages can't work
mm/memcg: relocate mod_objcg_mlstate(), get_obj_stock() and put_obj_stock()
mm/slub: fix endianness bug for alloc/free_traces attributes
selftests/damon: split test cases
selftests/damon: test debugfs file reads/writes with huge count
selftests/damon: test wrong DAMOS condition ranges input
selftests/damon: test DAMON enabling with empty target_ids case
selftests/damon: skip test if DAMON is running
mm/damon/vaddr-test: remove unnecessary variables
mm/damon/vaddr-test: split a test function having >1024 bytes frame size
mm/damon/vaddr: remove an unnecessary warning message
mm/damon/core: remove unnecessary error messages
mm/damon/dbgfs: remove an unnecessary error message
mm/damon/core: use better timer mechanisms selection threshold
mm/damon/core: fix fake load reports due to uninterruptible sleeps
timers: implement usleep_idle_range()
filemap: remove PageHWPoison check from next_uptodate_page()
mailmap: update email address for Guo Ren
MAINTAINERS: update kdump maintainers
...

+330 -215
+2
.mailmap
··· 126 126 Greg Kroah-Hartman <greg@kroah.com> 127 127 Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com> 128 128 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com> 129 + Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com> 130 + Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com> 129 131 Gustavo Padovan <gustavo@las.ic.unicamp.br> 130 132 Gustavo Padovan <padovan@profusion.mobi> 131 133 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
+1 -1
MAINTAINERS
··· 10279 10279 F: scripts/Makefile.kcsan 10280 10280 10281 10281 KDUMP 10282 - M: Dave Young <dyoung@redhat.com> 10283 10282 M: Baoquan He <bhe@redhat.com> 10284 10283 R: Vivek Goyal <vgoyal@redhat.com> 10284 + R: Dave Young <dyoung@redhat.com> 10285 10285 L: kexec@lists.infradead.org 10286 10286 S: Maintained 10287 10287 W: http://lse.sourceforge.net/kdump/
+13 -1
include/linux/delay.h
··· 20 20 */ 21 21 22 22 #include <linux/math.h> 23 + #include <linux/sched.h> 23 24 24 25 extern unsigned long loops_per_jiffy; 25 26 ··· 59 58 void __attribute__((weak)) calibration_delay_done(void); 60 59 void msleep(unsigned int msecs); 61 60 unsigned long msleep_interruptible(unsigned int msecs); 62 - void usleep_range(unsigned long min, unsigned long max); 61 + void usleep_range_state(unsigned long min, unsigned long max, 62 + unsigned int state); 63 + 64 + static inline void usleep_range(unsigned long min, unsigned long max) 65 + { 66 + usleep_range_state(min, max, TASK_UNINTERRUPTIBLE); 67 + } 68 + 69 + static inline void usleep_idle_range(unsigned long min, unsigned long max) 70 + { 71 + usleep_range_state(min, max, TASK_IDLE); 72 + } 63 73 64 74 static inline void ssleep(unsigned int seconds) 65 75 {
+10 -3
include/uapi/linux/resource.h
··· 66 66 #define _STK_LIM (8*1024*1024) 67 67 68 68 /* 69 - * GPG2 wants 64kB of mlocked memory, to make sure pass phrases 70 - * and other sensitive information are never written to disk. 69 + * Limit the amount of locked memory by some sane default: 70 + * root can always increase this limit if needed. 71 + * 72 + * The main use-cases are (1) preventing sensitive memory 73 + * from being swapped; (2) real-time operations; (3) via 74 + * IOURING_REGISTER_BUFFERS. 75 + * 76 + * The first two don't need much. The latter will take as 77 + * much as it can get. 8MB is a reasonably sane default. 71 78 */ 72 - #define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024) 79 + #define MLOCK_LIMIT (8*1024*1024) 73 80 74 81 /* 75 82 * Due to binary compatibility, the actual resource numbers
+9 -7
kernel/time/timer.c
··· 2054 2054 EXPORT_SYMBOL(msleep_interruptible); 2055 2055 2056 2056 /** 2057 - * usleep_range - Sleep for an approximate time 2058 - * @min: Minimum time in usecs to sleep 2059 - * @max: Maximum time in usecs to sleep 2057 + * usleep_range_state - Sleep for an approximate time in a given state 2058 + * @min: Minimum time in usecs to sleep 2059 + * @max: Maximum time in usecs to sleep 2060 + * @state: State of the current task that will be while sleeping 2060 2061 * 2061 2062 * In non-atomic context where the exact wakeup time is flexible, use 2062 - * usleep_range() instead of udelay(). The sleep improves responsiveness 2063 + * usleep_range_state() instead of udelay(). The sleep improves responsiveness 2063 2064 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces 2064 2065 * power usage by allowing hrtimers to take advantage of an already- 2065 2066 * scheduled interrupt instead of scheduling a new one just for this sleep. 2066 2067 */ 2067 - void __sched usleep_range(unsigned long min, unsigned long max) 2068 + void __sched usleep_range_state(unsigned long min, unsigned long max, 2069 + unsigned int state) 2068 2070 { 2069 2071 ktime_t exp = ktime_add_us(ktime_get(), min); 2070 2072 u64 delta = (u64)(max - min) * NSEC_PER_USEC; 2071 2073 2072 2074 for (;;) { 2073 - __set_current_state(TASK_UNINTERRUPTIBLE); 2075 + __set_current_state(state); 2074 2076 /* Do not return before the requested sleep time has elapsed */ 2075 2077 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) 2076 2078 break; 2077 2079 } 2078 2080 } 2079 - EXPORT_SYMBOL(usleep_range); 2081 + EXPORT_SYMBOL(usleep_range_state);
+7
mm/backing-dev.c
··· 945 945 wb_shutdown(&bdi->wb); 946 946 cgwb_bdi_unregister(bdi); 947 947 948 + /* 949 + * If this BDI's min ratio has been set, use bdi_set_min_ratio() to 950 + * update the global bdi_min_ratio. 951 + */ 952 + if (bdi->min_ratio) 953 + bdi_set_min_ratio(bdi, 0); 954 + 948 955 if (bdi->dev) { 949 956 bdi_debug_unregister(bdi); 950 957 device_unregister(bdi->dev);
+7 -13
mm/damon/core.c
··· 282 282 for (i = 0; i < nr_ids; i++) { 283 283 t = damon_new_target(ids[i]); 284 284 if (!t) { 285 - pr_err("Failed to alloc damon_target\n"); 286 285 /* The caller should do cleanup of the ids itself */ 287 286 damon_for_each_target_safe(t, next, ctx) 288 287 damon_destroy_target(t); ··· 311 312 unsigned long aggr_int, unsigned long primitive_upd_int, 312 313 unsigned long min_nr_reg, unsigned long max_nr_reg) 313 314 { 314 - if (min_nr_reg < 3) { 315 - pr_err("min_nr_regions (%lu) must be at least 3\n", 316 - min_nr_reg); 315 + if (min_nr_reg < 3) 317 316 return -EINVAL; 318 - } 319 - if (min_nr_reg > max_nr_reg) { 320 - pr_err("invalid nr_regions. min (%lu) > max (%lu)\n", 321 - min_nr_reg, max_nr_reg); 317 + if (min_nr_reg > max_nr_reg) 322 318 return -EINVAL; 323 - } 324 319 325 320 ctx->sample_interval = sample_int; 326 321 ctx->aggr_interval = aggr_int; ··· 973 980 974 981 static void kdamond_usleep(unsigned long usecs) 975 982 { 976 - if (usecs > 100 * 1000) 977 - schedule_timeout_interruptible(usecs_to_jiffies(usecs)); 983 + /* See Documentation/timers/timers-howto.rst for the thresholds */ 984 + if (usecs > 20 * USEC_PER_MSEC) 985 + schedule_timeout_idle(usecs_to_jiffies(usecs)); 978 986 else 979 - usleep_range(usecs, usecs + 1); 987 + usleep_idle_range(usecs, usecs + 1); 980 988 } 981 989 982 990 /* Returns negative error code if it's not activated but should return */ ··· 1032 1038 ctx->callback.after_sampling(ctx)) 1033 1039 done = true; 1034 1040 1035 - usleep_range(ctx->sample_interval, ctx->sample_interval + 1); 1041 + kdamond_usleep(ctx->sample_interval); 1036 1042 1037 1043 if (ctx->primitive.check_accesses) 1038 1044 max_nr_accesses = ctx->primitive.check_accesses(ctx);
+1 -3
mm/damon/dbgfs.c
··· 210 210 &wmarks.low, &parsed); 211 211 if (ret != 18) 212 212 break; 213 - if (!damos_action_valid(action)) { 214 - pr_err("wrong action %d\n", action); 213 + if (!damos_action_valid(action)) 215 214 goto fail; 216 - } 217 215 218 216 pos += parsed; 219 217 scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
+45 -50
mm/damon/vaddr-test.h
··· 135 135 struct damon_addr_range *three_regions, 136 136 unsigned long *expected, int nr_expected) 137 137 { 138 - struct damon_ctx *ctx = damon_new_ctx(); 139 138 struct damon_target *t; 140 139 struct damon_region *r; 141 140 int i; ··· 144 145 r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); 145 146 damon_add_region(r, t); 146 147 } 147 - damon_add_target(ctx, t); 148 148 149 149 damon_va_apply_three_regions(t, three_regions); 150 150 ··· 152 154 KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); 153 155 KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); 154 156 } 155 - 156 - damon_destroy_ctx(ctx); 157 157 } 158 158 159 159 /* ··· 248 252 new_three_regions, expected, ARRAY_SIZE(expected)); 249 253 } 250 254 251 - static void damon_test_split_evenly(struct kunit *test) 255 + static void damon_test_split_evenly_fail(struct kunit *test, 256 + unsigned long start, unsigned long end, unsigned int nr_pieces) 252 257 { 253 - struct damon_ctx *c = damon_new_ctx(); 254 - struct damon_target *t; 255 - struct damon_region *r; 256 - unsigned long i; 257 - 258 - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), 259 - -EINVAL); 260 - 261 - t = damon_new_target(42); 262 - r = damon_new_region(0, 100); 263 - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL); 258 + struct damon_target *t = damon_new_target(42); 259 + struct damon_region *r = damon_new_region(start, end); 264 260 265 261 damon_add_region(r, t); 266 - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0); 267 - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u); 268 - 269 - i = 0; 270 - damon_for_each_region(r, t) { 271 - KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10); 272 - KUNIT_EXPECT_EQ(test, r->ar.end, i * 10); 273 - } 274 - damon_free_target(t); 275 - 276 - t = damon_new_target(42); 277 - r = damon_new_region(5, 59); 278 - damon_add_region(r, t); 279 - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0); 280 - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); 281 - 282 - i = 0; 283 - damon_for_each_region(r, t) { 284 - if (i == 4) 285 - break; 286 - KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++); 287 - KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i); 288 - } 289 - KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i); 290 - KUNIT_EXPECT_EQ(test, r->ar.end, 59ul); 291 - damon_free_target(t); 292 - 293 - t = damon_new_target(42); 294 - r = damon_new_region(5, 6); 295 - damon_add_region(r, t); 296 - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL); 262 + KUNIT_EXPECT_EQ(test, 263 + damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL); 297 264 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); 298 265 299 266 damon_for_each_region(r, t) { 300 - KUNIT_EXPECT_EQ(test, r->ar.start, 5ul); 301 - KUNIT_EXPECT_EQ(test, r->ar.end, 6ul); 267 + KUNIT_EXPECT_EQ(test, r->ar.start, start); 268 + KUNIT_EXPECT_EQ(test, r->ar.end, end); 302 269 } 270 + 303 271 damon_free_target(t); 304 - damon_destroy_ctx(c); 272 + } 273 + 274 + static void damon_test_split_evenly_succ(struct kunit *test, 275 + unsigned long start, unsigned long end, unsigned int nr_pieces) 276 + { 277 + struct damon_target *t = damon_new_target(42); 278 + struct damon_region *r = damon_new_region(start, end); 279 + unsigned long expected_width = (end - start) / nr_pieces; 280 + unsigned long i = 0; 281 + 282 + damon_add_region(r, t); 283 + KUNIT_EXPECT_EQ(test, 284 + damon_va_evenly_split_region(t, r, nr_pieces), 0); 285 + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces); 286 + 287 + damon_for_each_region(r, t) { 288 + if (i == nr_pieces - 1) 289 + break; 290 + KUNIT_EXPECT_EQ(test, 291 + r->ar.start, start + i++ * expected_width); 292 + KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width); 293 + } 294 + KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width); 295 + KUNIT_EXPECT_EQ(test, r->ar.end, end); 296 + damon_free_target(t); 297 + } 298 + 299 + static void damon_test_split_evenly(struct kunit *test) 300 + { 301 + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), 302 + -EINVAL); 303 + 304 + damon_test_split_evenly_fail(test, 0, 100, 0); 305 + damon_test_split_evenly_succ(test, 0, 100, 10); 306 + damon_test_split_evenly_succ(test, 5, 59, 5); 307 + damon_test_split_evenly_fail(test, 5, 6, 2); 305 308 } 306 309 307 310 static struct kunit_case damon_test_cases[] = {
-1
mm/damon/vaddr.c
··· 627 627 case DAMOS_STAT: 628 628 return 0; 629 629 default: 630 - pr_warn("Wrong action %d\n", scheme->action); 631 630 return -EINVAL; 632 631 } 633 632
-2
mm/filemap.c
··· 3253 3253 goto skip; 3254 3254 if (!PageUptodate(page) || PageReadahead(page)) 3255 3255 goto skip; 3256 - if (PageHWPoison(page)) 3257 - goto skip; 3258 3256 if (!trylock_page(page)) 3259 3257 goto skip; 3260 3258 if (page->mapping != mapping)
+1 -1
mm/hugetlb.c
··· 2973 2973 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 2974 2974 int nr_nodes, node; 2975 2975 2976 - if (nid >= nr_online_nodes) 2976 + if (nid != NUMA_NO_NODE && nid >= nr_online_nodes) 2977 2977 return 0; 2978 2978 /* do node specific alloc */ 2979 2979 if (nid != NUMA_NO_NODE) {
+53 -53
mm/memcontrol.c
··· 776 776 rcu_read_unlock(); 777 777 } 778 778 779 - /* 780 - * mod_objcg_mlstate() may be called with irq enabled, so 781 - * mod_memcg_lruvec_state() should be used. 782 - */ 783 - static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 784 - struct pglist_data *pgdat, 785 - enum node_stat_item idx, int nr) 786 - { 787 - struct mem_cgroup *memcg; 788 - struct lruvec *lruvec; 789 - 790 - rcu_read_lock(); 791 - memcg = obj_cgroup_memcg(objcg); 792 - lruvec = mem_cgroup_lruvec(memcg, pgdat); 793 - mod_memcg_lruvec_state(lruvec, idx, nr); 794 - rcu_read_unlock(); 795 - } 796 - 797 779 /** 798 780 * __count_memcg_events - account VM events in a cgroup 799 781 * @memcg: the memory cgroup ··· 2119 2137 } 2120 2138 #endif 2121 2139 2122 - /* 2123 - * Most kmem_cache_alloc() calls are from user context. The irq disable/enable 2124 - * sequence used in this case to access content from object stock is slow. 2125 - * To optimize for user context access, there are now two object stocks for 2126 - * task context and interrupt context access respectively. 2127 - * 2128 - * The task context object stock can be accessed by disabling preemption only 2129 - * which is cheap in non-preempt kernel. The interrupt context object stock 2130 - * can only be accessed after disabling interrupt. User context code can 2131 - * access interrupt object stock, but not vice versa. 2132 - */ 2133 - static inline struct obj_stock *get_obj_stock(unsigned long *pflags) 2134 - { 2135 - struct memcg_stock_pcp *stock; 2136 - 2137 - if (likely(in_task())) { 2138 - *pflags = 0UL; 2139 - preempt_disable(); 2140 - stock = this_cpu_ptr(&memcg_stock); 2141 - return &stock->task_obj; 2142 - } 2143 - 2144 - local_irq_save(*pflags); 2145 - stock = this_cpu_ptr(&memcg_stock); 2146 - return &stock->irq_obj; 2147 - } 2148 - 2149 - static inline void put_obj_stock(unsigned long flags) 2150 - { 2151 - if (likely(in_task())) 2152 - preempt_enable(); 2153 - else 2154 - local_irq_restore(flags); 2155 - } 2156 - 2157 2140 /** 2158 2141 * consume_stock: Try to consume stocked charge on this cpu. 2159 2142 * @memcg: memcg to consume from. ··· 2762 2815 * reclaimable. So those GFP bits should be masked off. 2763 2816 */ 2764 2817 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) 2818 + 2819 + /* 2820 + * Most kmem_cache_alloc() calls are from user context. The irq disable/enable 2821 + * sequence used in this case to access content from object stock is slow. 2822 + * To optimize for user context access, there are now two object stocks for 2823 + * task context and interrupt context access respectively. 2824 + * 2825 + * The task context object stock can be accessed by disabling preemption only 2826 + * which is cheap in non-preempt kernel. The interrupt context object stock 2827 + * can only be accessed after disabling interrupt. User context code can 2828 + * access interrupt object stock, but not vice versa. 2829 + */ 2830 + static inline struct obj_stock *get_obj_stock(unsigned long *pflags) 2831 + { 2832 + struct memcg_stock_pcp *stock; 2833 + 2834 + if (likely(in_task())) { 2835 + *pflags = 0UL; 2836 + preempt_disable(); 2837 + stock = this_cpu_ptr(&memcg_stock); 2838 + return &stock->task_obj; 2839 + } 2840 + 2841 + local_irq_save(*pflags); 2842 + stock = this_cpu_ptr(&memcg_stock); 2843 + return &stock->irq_obj; 2844 + } 2845 + 2846 + static inline void put_obj_stock(unsigned long flags) 2847 + { 2848 + if (likely(in_task())) 2849 + preempt_enable(); 2850 + else 2851 + local_irq_restore(flags); 2852 + } 2853 + 2854 + /* 2855 + * mod_objcg_mlstate() may be called with irq enabled, so 2856 + * mod_memcg_lruvec_state() should be used. 2857 + */ 2858 + static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, 2859 + struct pglist_data *pgdat, 2860 + enum node_stat_item idx, int nr) 2861 + { 2862 + struct mem_cgroup *memcg; 2863 + struct lruvec *lruvec; 2864 + 2865 + rcu_read_lock(); 2866 + memcg = obj_cgroup_memcg(objcg); 2867 + lruvec = mem_cgroup_lruvec(memcg, pgdat); 2868 + mod_memcg_lruvec_state(lruvec, idx, nr); 2869 + rcu_read_unlock(); 2870 + } 2765 2871 2766 2872 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, 2767 2873 gfp_t gfp, bool new_page)
+9 -6
mm/slub.c
··· 5081 5081 unsigned long max; 5082 5082 unsigned long count; 5083 5083 struct location *loc; 5084 + loff_t idx; 5084 5085 }; 5085 5086 5086 5087 static struct dentry *slab_debugfs_root; ··· 6053 6052 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 6054 6053 static int slab_debugfs_show(struct seq_file *seq, void *v) 6055 6054 { 6056 - 6057 - struct location *l; 6058 - unsigned int idx = *(unsigned int *)v; 6059 6055 struct loc_track *t = seq->private; 6056 + struct location *l; 6057 + unsigned long idx; 6060 6058 6059 + idx = (unsigned long) t->idx; 6061 6060 if (idx < t->count) { 6062 6061 l = &t->loc[idx]; 6063 6062 ··· 6106 6105 { 6107 6106 struct loc_track *t = seq->private; 6108 6107 6109 - v = ppos; 6110 - ++*ppos; 6108 + t->idx = ++(*ppos); 6111 6109 if (*ppos <= t->count) 6112 - return v; 6110 + return ppos; 6113 6111 6114 6112 return NULL; 6115 6113 } 6116 6114 6117 6115 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 6118 6116 { 6117 + struct loc_track *t = seq->private; 6118 + 6119 + t->idx = *ppos; 6119 6120 return ppos; 6120 6121 } 6121 6122
+2
tools/testing/selftests/damon/.gitignore
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + huge_count_read_write
+5 -2
tools/testing/selftests/damon/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 # Makefile for damon selftests 3 3 4 - TEST_FILES = _chk_dependency.sh 5 - TEST_PROGS = debugfs_attrs.sh 4 + TEST_GEN_FILES += huge_count_read_write 5 + 6 + TEST_FILES = _chk_dependency.sh _debugfs_common.sh 7 + TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh 8 + TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh 6 9 7 10 include ../lib.mk
+52
tools/testing/selftests/damon/_debugfs_common.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + test_write_result() { 5 + file=$1 6 + content=$2 7 + orig_content=$3 8 + expect_reason=$4 9 + expected=$5 10 + 11 + echo "$content" > "$file" 12 + if [ $? -ne "$expected" ] 13 + then 14 + echo "writing $content to $file doesn't return $expected" 15 + echo "expected because: $expect_reason" 16 + echo "$orig_content" > "$file" 17 + exit 1 18 + fi 19 + } 20 + 21 + test_write_succ() { 22 + test_write_result "$1" "$2" "$3" "$4" 0 23 + } 24 + 25 + test_write_fail() { 26 + test_write_result "$1" "$2" "$3" "$4" 1 27 + } 28 + 29 + test_content() { 30 + file=$1 31 + orig_content=$2 32 + expected=$3 33 + expect_reason=$4 34 + 35 + content=$(cat "$file") 36 + if [ "$content" != "$expected" ] 37 + then 38 + echo "reading $file expected $expected but $content" 39 + echo "expected because: $expect_reason" 40 + echo "$orig_content" > "$file" 41 + exit 1 42 + fi 43 + } 44 + 45 + source ./_chk_dependency.sh 46 + 47 + damon_onoff="$DBGFS/monitor_on" 48 + if [ $(cat "$damon_onoff") = "on" ] 49 + then 50 + echo "monitoring is on" 51 + exit $ksft_skip 52 + fi
+1 -72
tools/testing/selftests/damon/debugfs_attrs.sh
··· 1 1 #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 - test_write_result() { 5 - file=$1 6 - content=$2 7 - orig_content=$3 8 - expect_reason=$4 9 - expected=$5 10 - 11 - echo "$content" > "$file" 12 - if [ $? -ne "$expected" ] 13 - then 14 - echo "writing $content to $file doesn't return $expected" 15 - echo "expected because: $expect_reason" 16 - echo "$orig_content" > "$file" 17 - exit 1 18 - fi 19 - } 20 - 21 - test_write_succ() { 22 - test_write_result "$1" "$2" "$3" "$4" 0 23 - } 24 - 25 - test_write_fail() { 26 - test_write_result "$1" "$2" "$3" "$4" 1 27 - } 28 - 29 - test_content() { 30 - file=$1 31 - orig_content=$2 32 - expected=$3 33 - expect_reason=$4 34 - 35 - content=$(cat "$file") 36 - if [ "$content" != "$expected" ] 37 - then 38 - echo "reading $file expected $expected but $content" 39 - echo "expected because: $expect_reason" 40 - echo "$orig_content" > "$file" 41 - exit 1 42 - fi 43 - } 44 - 45 - source ./_chk_dependency.sh 4 + source _debugfs_common.sh 46 5 47 6 # Test attrs file 48 7 # =============== ··· 15 56 "min_nr_regions > max_nr_regions" 16 57 test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written" 17 58 echo "$orig_content" > "$file" 18 - 19 - # Test schemes file 20 - # ================= 21 - 22 - file="$DBGFS/schemes" 23 - orig_content=$(cat "$file") 24 - 25 - test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ 26 - "$orig_content" "valid input" 27 - test_write_fail "$file" "1 2 28 - 3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" 29 - test_write_succ "$file" "" "$orig_content" "disabling" 30 - echo "$orig_content" > "$file" 31 - 32 - # Test target_ids file 33 - # ==================== 34 - 35 - file="$DBGFS/target_ids" 36 - orig_content=$(cat "$file") 37 - 38 - test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" 39 - test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" 40 - test_content "$file" "$orig_content" "1 2" "non-integer was there" 41 - test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" 42 - test_content "$file" "$orig_content" "" "wrong input written" 43 - test_write_succ "$file" "" "$orig_content" "empty input" 44 - test_content "$file" "$orig_content" "" "empty input written" 45 - echo "$orig_content" > "$file" 46 - 47 - echo "PASS"
+13
tools/testing/selftests/damon/debugfs_empty_targets.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + source _debugfs_common.sh 5 + 6 + # Test empty targets case 7 + # ======================= 8 + 9 + orig_target_ids=$(cat "$DBGFS/target_ids") 10 + echo "" > "$DBGFS/target_ids" 11 + orig_monitor_on=$(cat "$DBGFS/monitor_on") 12 + test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids" 13 + echo "$orig_target_ids" > "$DBGFS/target_ids"
+22
tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + source _debugfs_common.sh 5 + 6 + # Test huge count read write 7 + # ========================== 8 + 9 + dmesg -C 10 + 11 + for file in "$DBGFS/"* 12 + do 13 + ./huge_count_read_write "$file" 14 + done 15 + 16 + if dmesg | grep -q WARNING 17 + then 18 + dmesg 19 + exit 1 20 + else 21 + exit 0 22 + fi
+19
tools/testing/selftests/damon/debugfs_schemes.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + source _debugfs_common.sh 5 + 6 + # Test schemes file 7 + # ================= 8 + 9 + file="$DBGFS/schemes" 10 + orig_content=$(cat "$file") 11 + 12 + test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ 13 + "$orig_content" "valid input" 14 + test_write_fail "$file" "1 2 15 + 3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" 16 + test_write_succ "$file" "" "$orig_content" "disabling" 17 + test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \ 18 + "$orig_content" "wrong condition ranges" 19 + echo "$orig_content" > "$file"
+19
tools/testing/selftests/damon/debugfs_target_ids.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + source _debugfs_common.sh 5 + 6 + # Test target_ids file 7 + # ==================== 8 + 9 + file="$DBGFS/target_ids" 10 + orig_content=$(cat "$file") 11 + 12 + test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" 13 + test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" 14 + test_content "$file" "$orig_content" "1 2" "non-integer was there" 15 + test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" 16 + test_content "$file" "$orig_content" "" "wrong input written" 17 + test_write_succ "$file" "" "$orig_content" "empty input" 18 + test_content "$file" "$orig_content" "" "empty input written" 19 + echo "$orig_content" > "$file"
+39
tools/testing/selftests/damon/huge_count_read_write.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Author: SeongJae Park <sj@kernel.org> 4 + */ 5 + 6 + #include <fcntl.h> 7 + #include <stdlib.h> 8 + #include <unistd.h> 9 + #include <stdio.h> 10 + 11 + void write_read_with_huge_count(char *file) 12 + { 13 + int filedesc = open(file, O_RDWR); 14 + char buf[25]; 15 + int ret; 16 + 17 + printf("%s %s\n", __func__, file); 18 + if (filedesc < 0) { 19 + fprintf(stderr, "failed opening %s\n", file); 20 + exit(1); 21 + } 22 + 23 + write(filedesc, "", 0xfffffffful); 24 + perror("after write: "); 25 + ret = read(filedesc, buf, 0xfffffffful); 26 + perror("after read: "); 27 + close(filedesc); 28 + } 29 + 30 + int main(int argc, char *argv[]) 31 + { 32 + if (argc != 2) { 33 + fprintf(stderr, "Usage: %s <file>\n", argv[0]); 34 + exit(1); 35 + } 36 + write_read_with_huge_count(argv[1]); 37 + 38 + return 0; 39 + }