Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: replace (20 - PAGE_SHIFT) with common macros for pages<->MB conversion

Replace repeated (20 - PAGE_SHIFT) calculations with standard macros:
- MB_TO_PAGES(mb) converts MB to page count
- PAGES_TO_MB(pages) converts pages to MB

No functional change.

[akpm@linux-foundation.org: remove arc's private PAGES_TO_MB, remove its unused PAGES_TO_KB]
[akpm@linux-foundation.org: don't include mm.h due to include file ordering mess]
Link: https://lkml.kernel.org/r/20250718024134.1304745-1-ye.liu@linux.dev
Signed-off-by: Ye Liu <liuye@kylinos.cn>
Acked-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Chris Li <chrisl@kernel.org>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Lai jiangshan <jiangshanlai@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Mel Gorman <mgorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: "Paul E . McKenney" <paulmck@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Ye Liu and committed by
Andrew Morton
79e1c242 849d5cff

+15 -10
-3
arch/arc/include/asm/arcregs.h
··· 151 151 /* Helpers */ 152 152 #define TO_KB(bytes) ((bytes) >> 10) 153 153 #define TO_MB(bytes) (TO_KB(bytes) >> 10) 154 - #define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10)) 155 - #define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10) 156 - 157 154 158 155 /* 159 156 ***************************************************************
+9
include/linux/mm.h
··· 69 69 70 70 extern void * high_memory; 71 71 72 + /* 73 + * Convert between pages and MB 74 + * 20 is the shift for 1MB (2^20 = 1MB) 75 + * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages) 76 + * So (20 - PAGE_SHIFT) converts between pages and MB 77 + */ 78 + #define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT)) 79 + #define MB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) 80 + 72 81 #ifdef CONFIG_SYSCTL 73 82 extern int sysctl_legacy_va_layout; 74 83 #else
+1 -1
kernel/rcu/rcuscale.c
··· 796 796 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n", 797 797 (unsigned long long)(end_time - start_time), kfree_loops, 798 798 rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started), 799 - (mem_begin - mem_during) >> (20 - PAGE_SHIFT)); 799 + PAGES_TO_MB(mem_begin - mem_during)); 800 800 801 801 if (shutdown) { 802 802 smp_mb(); /* Assign before wake. */
+2 -3
kernel/sched/fair.c
··· 1495 1495 * by the PTE scanner and NUMA hinting faults should be trapped based 1496 1496 * on resident pages 1497 1497 */ 1498 - nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); 1498 + nr_scan_pages = MB_TO_PAGES(sysctl_numa_balancing_scan_size); 1499 1499 rss = get_mm_rss(p->mm); 1500 1500 if (!rss) 1501 1501 rss = nr_scan_pages; ··· 1934 1934 } 1935 1935 1936 1936 def_th = sysctl_numa_balancing_hot_threshold; 1937 - rate_limit = sysctl_numa_balancing_promote_rate_limit << \ 1938 - (20 - PAGE_SHIFT); 1937 + rate_limit = MB_TO_PAGES(sysctl_numa_balancing_promote_rate_limit); 1939 1938 numa_promotion_adjust_threshold(pgdat, rate_limit, def_th); 1940 1939 1941 1940 th = pgdat->nbp_threshold ? : def_th;
+1 -1
mm/backing-dev.c
··· 510 510 /* 511 511 * Initial write bandwidth: 100 MB/s 512 512 */ 513 - #define INIT_BW (100 << (20 - PAGE_SHIFT)) 513 + #define INIT_BW MB_TO_PAGES(100) 514 514 515 515 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, 516 516 gfp_t gfp)
+1 -1
mm/huge_memory.c
··· 911 911 * where the extra memory used could hurt more than TLB overhead 912 912 * is likely to save. The admin can still enable it through /sys. 913 913 */ 914 - if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 914 + if (totalram_pages() < MB_TO_PAGES(512)) { 915 915 transparent_hugepage_flags = 0; 916 916 return 0; 917 917 }
+1 -1
mm/swap.c
··· 1096 1096 */ 1097 1097 void __init swap_setup(void) 1098 1098 { 1099 - unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1099 + unsigned long megs = PAGES_TO_MB(totalram_pages()); 1100 1100 1101 1101 /* Use a smaller cluster for small-memory machines */ 1102 1102 if (megs < 16)