Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpumask: Switch from inline to __always_inline

On recent (v6.6+) builds with Clang (based on Clang 18.0.0) and certain
configurations [0], I'm finding that (lack of) inlining decisions may
lead to section mismatch warnings like the following:

WARNING: modpost: vmlinux.o: section mismatch in reference:
cpumask_andnot (section: .text) ->
cpuhp_bringup_cpus_parallel.tmp_mask (section: .init.data) ERROR:
modpost: Section mismatches detected.

or more confusingly:

WARNING: modpost: vmlinux: section mismatch in reference:
cpumask_andnot+0x5f (section: .text) -> efi_systab_phys (section:
.init.data)

The first warning makes a little sense, because
cpuhp_bringup_cpus_parallel() (an __init function) calls
cpumask_andnot() on tmp_mask (an __initdata symbol). If the compiler
doesn't inline cpumask_andnot(), this may appear like a mismatch.

The second warning makes less sense, but might be because efi_systab_phys
and cpuhp_bringup_cpus_parallel.tmp_mask are laid out near each other,
and the latter isn't a proper C symbol definition.

In any case, it seems a reasonable solution to suggest more strongly to
the compiler that these cpumask macros *must* be inlined, as 'inline' is
just a recommendation.

This change has been previously proposed in the past as:

Subject: [PATCH 1/3] bitmap: switch from inline to __always_inline
https://lore.kernel.org/all/20221027043810.350460-2-yury.norov@gmail.com/

But the change has been split up, to separately justify the cpumask
changes (which drive my work) and the bitmap/const optimizations (that
Yury separately proposed for other reasons). This ends up as somewhere
between a "rebase" and "rewrite" -- I had to rewrite most of the patch.

According to bloat-o-meter, vmlinux decreases minimally in size (-0.00%
to -0.01%, depending on the version of GCC or Clang and .config in
question) with this series of changes:

gcc 13.2.0, x86_64_defconfig
-3005 bytes, Before=21944501, After=21941496, chg -0.01%

clang 16.0.6, x86_64_defconfig
-105 bytes, Before=22571692, After=22571587, chg -0.00%

gcc 9.5.0, x86_64_defconfig
-1771 bytes, Before=21557598, After=21555827, chg -0.01%

clang 18.0_pre516547 (ChromiumOS toolchain), x86_64_defconfig
-191 bytes, Before=22615339, After=22615148, chg -0.00%

clang 18.0_pre516547 (ChromiumOS toolchain), based on ChromiumOS config + gcov
-979 bytes, Before=76294783, After=76293804, chg -0.00%

[0] CONFIG_HOTPLUG_PARALLEL=y ('select'ed for x86 as of [1]) and
CONFIG_GCOV_PROFILE_ALL.

[1] commit 0c7ffa32dbd6 ("x86/smpboot/64: Implement
arch_cpuhp_init_parallel_bringup() and enable it")

Co-developed-by: Brian Norris <briannorris@chromium.org>
Signed-off-by: Brian Norris <briannorris@chromium.org>
Reviewed-by: Kees Cook <kees@kernel.org>
Reviewed-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Yury Norov <yury.norov@gmail.com>

authored by

Brian Norris and committed by
Yury Norov
ab6b1010 ed8cd2b3

+112 -100
+112 -100
include/linux/cpumask.h
··· 30 30 extern unsigned int nr_cpu_ids; 31 31 #endif 32 32 33 - static inline void set_nr_cpu_ids(unsigned int nr) 33 + static __always_inline void set_nr_cpu_ids(unsigned int nr) 34 34 { 35 35 #if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS) 36 36 WARN_ON(nr != nr_cpu_ids); ··· 149 149 * 150 150 * Return: >= nr_cpu_ids if no cpus set. 151 151 */ 152 - static inline unsigned int cpumask_first(const struct cpumask *srcp) 152 + static __always_inline unsigned int cpumask_first(const struct cpumask *srcp) 153 153 { 154 154 return find_first_bit(cpumask_bits(srcp), small_cpumask_bits); 155 155 } ··· 160 160 * 161 161 * Return: >= nr_cpu_ids if all cpus are set. 162 162 */ 163 - static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) 163 + static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp) 164 164 { 165 165 return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits); 166 166 } ··· 172 172 * 173 173 * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). 174 174 */ 175 - static inline 175 + static __always_inline 176 176 unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2) 177 177 { 178 178 return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits); ··· 186 186 * 187 187 * Return: >= nr_cpu_ids if no cpus set in all. 188 188 */ 189 - static inline 189 + static __always_inline 190 190 unsigned int cpumask_first_and_and(const struct cpumask *srcp1, 191 191 const struct cpumask *srcp2, 192 192 const struct cpumask *srcp3) ··· 201 201 * 202 202 * Return: >= nr_cpumask_bits if no CPUs set. 203 203 */ 204 - static inline unsigned int cpumask_last(const struct cpumask *srcp) 204 + static __always_inline unsigned int cpumask_last(const struct cpumask *srcp) 205 205 { 206 206 return find_last_bit(cpumask_bits(srcp), small_cpumask_bits); 207 207 } ··· 213 213 * 214 214 * Return: >= nr_cpu_ids if no further cpus set. 215 215 */ 216 - static inline 216 + static __always_inline 217 217 unsigned int cpumask_next(int n, const struct cpumask *srcp) 218 218 { 219 219 /* -1 is a legal arg here. */ ··· 229 229 * 230 230 * Return: >= nr_cpu_ids if no further cpus unset. 231 231 */ 232 - static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) 232 + static __always_inline 233 + unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) 233 234 { 234 235 /* -1 is a legal arg here. */ 235 236 if (n != -1) ··· 240 239 241 240 #if NR_CPUS == 1 242 241 /* Uniprocessor: there is only one valid CPU */ 243 - static inline unsigned int cpumask_local_spread(unsigned int i, int node) 242 + static __always_inline 243 + unsigned int cpumask_local_spread(unsigned int i, int node) 244 244 { 245 245 return 0; 246 246 } 247 247 248 - static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, 249 - const struct cpumask *src2p) 248 + static __always_inline 249 + unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, 250 + const struct cpumask *src2p) 250 251 { 251 252 return cpumask_first_and(src1p, src2p); 252 253 } 253 254 254 - static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp) 255 + static __always_inline 256 + unsigned int cpumask_any_distribute(const struct cpumask *srcp) 255 257 { 256 258 return cpumask_first(srcp); 257 259 } ··· 273 269 * 274 270 * Return: >= nr_cpu_ids if no further cpus set in both. 275 271 */ 276 - static inline 272 + static __always_inline 277 273 unsigned int cpumask_next_and(int n, const struct cpumask *src1p, 278 - const struct cpumask *src2p) 274 + const struct cpumask *src2p) 279 275 { 280 276 /* -1 is a legal arg here. */ 281 277 if (n != -1) ··· 295 291 for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits) 296 292 297 293 #if NR_CPUS == 1 298 - static inline 294 + static __always_inline 299 295 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) 300 296 { 301 297 cpumask_check(start); ··· 398 394 * Often used to find any cpu but smp_processor_id() in a mask. 399 395 * Return: >= nr_cpu_ids if no cpus set. 400 396 */ 401 - static inline 397 + static __always_inline 402 398 unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) 403 399 { 404 400 unsigned int i; ··· 418 414 * 419 415 * Returns >= nr_cpu_ids if no cpus set. 420 416 */ 421 - static inline 417 + static __always_inline 422 418 unsigned int cpumask_any_and_but(const struct cpumask *mask1, 423 419 const struct cpumask *mask2, 424 420 unsigned int cpu) ··· 440 436 * 441 437 * Return: >= nr_cpu_ids if such cpu doesn't exist. 442 438 */ 443 - static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp) 439 + static __always_inline 440 + unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp) 444 441 { 445 442 return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu)); 446 443 } ··· 454 449 * 455 450 * Return: >= nr_cpu_ids if such cpu doesn't exist. 456 451 */ 457 - static inline 452 + static __always_inline 458 453 unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1, 459 454 const struct cpumask *srcp2) 460 455 { ··· 470 465 * 471 466 * Return: >= nr_cpu_ids if such cpu doesn't exist. 472 467 */ 473 - static inline 468 + static __always_inline 474 469 unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1, 475 470 const struct cpumask *srcp2) 476 471 { ··· 513 508 * @cpu: cpu number (< nr_cpu_ids) 514 509 * @dstp: the cpumask pointer 515 510 */ 516 - static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 511 + static __always_inline 512 + void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 517 513 { 518 514 set_bit(cpumask_check(cpu), cpumask_bits(dstp)); 519 515 } 520 516 521 - static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 517 + static __always_inline 518 + void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 522 519 { 523 520 __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); 524 521 } ··· 564 557 * 565 558 * Return: true if @cpu is set in @cpumask, else returns false 566 559 */ 567 - static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask) 560 + static __always_inline 561 + bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask) 568 562 { 569 563 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); 570 564 } ··· 579 571 * 580 572 * Return: true if @cpu is set in old bitmap of @cpumask, else returns false 581 573 */ 582 - static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) 574 + static __always_inline 575 + bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) 583 576 { 584 577 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); 585 578 } ··· 594 585 * 595 586 * Return: true if @cpu is set in old bitmap of @cpumask, else returns false 596 587 */ 597 - static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) 588 + static __always_inline 589 + bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) 598 590 { 599 591 return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); 600 592 } ··· 604 594 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask 605 595 * @dstp: the cpumask pointer 606 596 */ 607 - static inline void cpumask_setall(struct cpumask *dstp) 597 + static __always_inline void cpumask_setall(struct cpumask *dstp) 608 598 { 609 599 if (small_const_nbits(small_cpumask_bits)) { 610 600 cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits); ··· 617 607 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask 618 608 * @dstp: the cpumask pointer 619 609 */ 620 - static inline void cpumask_clear(struct cpumask *dstp) 610 + static __always_inline void cpumask_clear(struct cpumask *dstp) 621 611 { 622 612 bitmap_zero(cpumask_bits(dstp), large_cpumask_bits); 623 613 } ··· 630 620 * 631 621 * Return: false if *@dstp is empty, else returns true 632 622 */ 633 - static inline bool cpumask_and(struct cpumask *dstp, 634 - const struct cpumask *src1p, 635 - const struct cpumask *src2p) 623 + static __always_inline 624 + bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, 625 + const struct cpumask *src2p) 636 626 { 637 627 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), 638 628 cpumask_bits(src2p), small_cpumask_bits); ··· 644 634 * @src1p: the first input 645 635 * @src2p: the second input 646 636 */ 647 - static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, 648 - const struct cpumask *src2p) 637 + static __always_inline 638 + void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, 639 + const struct cpumask *src2p) 649 640 { 650 641 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), 651 642 cpumask_bits(src2p), small_cpumask_bits); ··· 658 647 * @src1p: the first input 659 648 * @src2p: the second input 660 649 */ 661 - static inline void cpumask_xor(struct cpumask *dstp, 662 - const struct cpumask *src1p, 663 - const struct cpumask *src2p) 650 + static __always_inline 651 + void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p, 652 + const struct cpumask *src2p) 664 653 { 665 654 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), 666 655 cpumask_bits(src2p), small_cpumask_bits); ··· 674 663 * 675 664 * Return: false if *@dstp is empty, else returns true 676 665 */ 677 - static inline bool cpumask_andnot(struct cpumask *dstp, 678 - const struct cpumask *src1p, 679 - const struct cpumask *src2p) 666 + static __always_inline 667 + bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, 668 + const struct cpumask *src2p) 680 669 { 681 670 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), 682 671 cpumask_bits(src2p), small_cpumask_bits); ··· 689 678 * 690 679 * Return: true if the cpumasks are equal, false if not 691 680 */ 692 - static inline bool cpumask_equal(const struct cpumask *src1p, 693 - const struct cpumask *src2p) 681 + static __always_inline 682 + bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) 694 683 { 695 684 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), 696 685 small_cpumask_bits); ··· 705 694 * Return: true if first cpumask ORed with second cpumask == third cpumask, 706 695 * otherwise false 707 696 */ 708 - static inline bool cpumask_or_equal(const struct cpumask *src1p, 709 - const struct cpumask *src2p, 710 - const struct cpumask *src3p) 697 + static __always_inline 698 + bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p, 699 + const struct cpumask *src3p) 711 700 { 712 701 return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p), 713 702 cpumask_bits(src3p), small_cpumask_bits); ··· 721 710 * Return: true if first cpumask ANDed with second cpumask is non-empty, 722 711 * otherwise false 723 712 */ 724 - static inline bool cpumask_intersects(const struct cpumask *src1p, 725 - const struct cpumask *src2p) 713 + static __always_inline 714 + bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) 726 715 { 727 716 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), 728 717 small_cpumask_bits); ··· 735 724 * 736 725 * Return: true if *@src1p is a subset of *@src2p, else returns false 737 726 */ 738 - static inline bool cpumask_subset(const struct cpumask *src1p, 739 - const struct cpumask *src2p) 727 + static __always_inline 728 + bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) 740 729 { 741 730 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), 742 731 small_cpumask_bits); ··· 748 737 * 749 738 * Return: true if srcp is empty (has no bits set), else false 750 739 */ 751 - static inline bool cpumask_empty(const struct cpumask *srcp) 740 + static __always_inline bool cpumask_empty(const struct cpumask *srcp) 752 741 { 753 742 return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits); 754 743 } ··· 759 748 * 760 749 * Return: true if srcp is full (has all bits set), else false 761 750 */ 762 - static inline bool cpumask_full(const struct cpumask *srcp) 751 + static __always_inline bool cpumask_full(const struct cpumask *srcp) 763 752 { 764 753 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); 765 754 } ··· 770 759 * 771 760 * Return: count of bits set in *srcp 772 761 */ 773 - static inline unsigned int cpumask_weight(const struct cpumask *srcp) 762 + static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp) 774 763 { 775 764 return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits); 776 765 } ··· 782 771 * 783 772 * Return: count of bits set in both *srcp1 and *srcp2 784 773 */ 785 - static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1, 786 - const struct cpumask *srcp2) 774 + static __always_inline 775 + unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2) 787 776 { 788 777 return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits); 789 778 } ··· 795 784 * 796 785 * Return: count of bits set in both *srcp1 and *srcp2 797 786 */ 798 - static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1, 799 - const struct cpumask *srcp2) 787 + static __always_inline 788 + unsigned int cpumask_weight_andnot(const struct cpumask *srcp1, 789 + const struct cpumask *srcp2) 800 790 { 801 791 return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits); 802 792 } ··· 808 796 * @srcp: the input to shift 809 797 * @n: the number of bits to shift by 810 798 */ 811 - static inline void cpumask_shift_right(struct cpumask *dstp, 812 - const struct cpumask *srcp, int n) 799 + static __always_inline 800 + void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n) 813 801 { 814 802 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, 815 803 small_cpumask_bits); ··· 821 809 * @srcp: the input to shift 822 810 * @n: the number of bits to shift by 823 811 */ 824 - static inline void cpumask_shift_left(struct cpumask *dstp, 825 - const struct cpumask *srcp, int n) 812 + static __always_inline 813 + void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n) 826 814 { 827 815 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, 828 816 nr_cpumask_bits); ··· 833 821 * @dstp: the result 834 822 * @srcp: the input cpumask 835 823 */ 836 - static inline void cpumask_copy(struct cpumask *dstp, 837 - const struct cpumask *srcp) 824 + static __always_inline 825 + void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) 838 826 { 839 827 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits); 840 828 } ··· 870 858 * 871 859 * Return: -errno, or 0 for success. 872 860 */ 873 - static inline int cpumask_parse_user(const char __user *buf, int len, 874 - struct cpumask *dstp) 861 + static __always_inline 862 + int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp) 875 863 { 876 864 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 877 865 } ··· 884 872 * 885 873 * Return: -errno, or 0 for success. 886 874 */ 887 - static inline int cpumask_parselist_user(const char __user *buf, int len, 888 - struct cpumask *dstp) 875 + static __always_inline 876 + int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp) 889 877 { 890 878 return bitmap_parselist_user(buf, len, cpumask_bits(dstp), 891 879 nr_cpumask_bits); ··· 898 886 * 899 887 * Return: -errno, or 0 for success. 900 888 */ 901 - static inline int cpumask_parse(const char *buf, struct cpumask *dstp) 889 + static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp) 902 890 { 903 891 return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits); 904 892 } ··· 910 898 * 911 899 * Return: -errno, or 0 for success. 912 900 */ 913 - static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 901 + static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp) 914 902 { 915 903 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); 916 904 } ··· 920 908 * 921 909 * Return: size to allocate for a &struct cpumask in bytes 922 910 */ 923 - static inline unsigned int cpumask_size(void) 911 + static __always_inline unsigned int cpumask_size(void) 924 912 { 925 913 return bitmap_size(large_cpumask_bits); 926 914 } ··· 932 920 933 921 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 934 922 935 - static inline 923 + static __always_inline 936 924 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 937 925 { 938 926 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); ··· 950 938 * 951 939 * Return: %true if allocation succeeded, %false if not 952 940 */ 953 - static inline 941 + static __always_inline 954 942 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 955 943 { 956 944 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); 957 945 } 958 946 959 - static inline 947 + static __always_inline 960 948 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 961 949 { 962 950 return alloc_cpumask_var(mask, flags | __GFP_ZERO); ··· 966 954 void free_cpumask_var(cpumask_var_t mask); 967 955 void free_bootmem_cpumask_var(cpumask_var_t mask); 968 956 969 - static inline bool cpumask_available(cpumask_var_t mask) 957 + static __always_inline bool cpumask_available(cpumask_var_t mask) 970 958 { 971 959 return mask != NULL; 972 960 } ··· 976 964 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) 977 965 #define __cpumask_var_read_mostly 978 966 979 - static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 967 + static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 980 968 { 981 969 return true; 982 970 } 983 971 984 - static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 972 + static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 985 973 int node) 986 974 { 987 975 return true; 988 976 } 989 977 990 - static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 978 + static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 991 979 { 992 980 cpumask_clear(*mask); 993 981 return true; 994 982 } 995 983 996 - static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 984 + static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, 997 985 int node) 998 986 { 999 987 cpumask_clear(*mask); 1000 988 return true; 1001 989 } 1002 990 1003 - static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) 991 + static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) 1004 992 { 1005 993 } 1006 994 1007 - static inline void free_cpumask_var(cpumask_var_t mask) 995 + static __always_inline void free_cpumask_var(cpumask_var_t mask) 1008 996 { 1009 997 } 1010 998 1011 - static inline void free_bootmem_cpumask_var(cpumask_var_t mask) 999 + static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask) 1012 1000 { 1013 1001 } 1014 1002 1015 - static inline bool cpumask_available(cpumask_var_t mask) 1003 + static __always_inline bool cpumask_available(cpumask_var_t mask) 1016 1004 { 1017 1005 return true; 1018 1006 } ··· 1070 1058 ((struct cpumask *)(1 ? (bitmap) \ 1071 1059 : (void *)sizeof(__check_is_bitmap(bitmap)))) 1072 1060 1073 - static inline int __check_is_bitmap(const unsigned long *bitmap) 1061 + static __always_inline int __check_is_bitmap(const unsigned long *bitmap) 1074 1062 { 1075 1063 return 1; 1076 1064 } ··· 1085 1073 extern const unsigned long 1086 1074 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 1087 1075 1088 - static inline const struct cpumask *get_cpu_mask(unsigned int cpu) 1076 + static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu) 1089 1077 { 1090 1078 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 1091 1079 p -= cpu / BITS_PER_LONG; ··· 1112 1100 #define num_present_cpus() cpumask_weight(cpu_present_mask) 1113 1101 #define num_active_cpus() cpumask_weight(cpu_active_mask) 1114 1102 1115 - static inline bool cpu_online(unsigned int cpu) 1103 + static __always_inline bool cpu_online(unsigned int cpu) 1116 1104 { 1117 1105 return cpumask_test_cpu(cpu, cpu_online_mask); 1118 1106 } 1119 1107 1120 - static inline bool cpu_enabled(unsigned int cpu) 1108 + static __always_inline bool cpu_enabled(unsigned int cpu) 1121 1109 { 1122 1110 return cpumask_test_cpu(cpu, cpu_enabled_mask); 1123 1111 } 1124 1112 1125 - static inline bool cpu_possible(unsigned int cpu) 1113 + static __always_inline bool cpu_possible(unsigned int cpu) 1126 1114 { 1127 1115 return cpumask_test_cpu(cpu, cpu_possible_mask); 1128 1116 } 1129 1117 1130 - static inline bool cpu_present(unsigned int cpu) 1118 + static __always_inline bool cpu_present(unsigned int cpu) 1131 1119 { 1132 1120 return cpumask_test_cpu(cpu, cpu_present_mask); 1133 1121 } 1134 1122 1135 - static inline bool cpu_active(unsigned int cpu) 1123 + static __always_inline bool cpu_active(unsigned int cpu) 1136 1124 { 1137 1125 return cpumask_test_cpu(cpu, cpu_active_mask); 1138 1126 } 1139 1127 1140 - static inline bool cpu_dying(unsigned int cpu) 1128 + static __always_inline bool cpu_dying(unsigned int cpu) 1141 1129 { 1142 1130 return cpumask_test_cpu(cpu, cpu_dying_mask); 1143 1131 } ··· 1150 1138 #define num_present_cpus() 1U 1151 1139 #define num_active_cpus() 1U 1152 1140 1153 - static inline bool cpu_online(unsigned int cpu) 1141 + static __always_inline bool cpu_online(unsigned int cpu) 1154 1142 { 1155 1143 return cpu == 0; 1156 1144 } 1157 1145 1158 - static inline bool cpu_possible(unsigned int cpu) 1146 + static __always_inline bool cpu_possible(unsigned int cpu) 1159 1147 { 1160 1148 return cpu == 0; 1161 1149 } 1162 1150 1163 - static inline bool cpu_enabled(unsigned int cpu) 1151 + static __always_inline bool cpu_enabled(unsigned int cpu) 1164 1152 { 1165 1153 return cpu == 0; 1166 1154 } 1167 1155 1168 - static inline bool cpu_present(unsigned int cpu) 1156 + static __always_inline bool cpu_present(unsigned int cpu) 1169 1157 { 1170 1158 return cpu == 0; 1171 1159 } 1172 1160 1173 - static inline bool cpu_active(unsigned int cpu) 1161 + static __always_inline bool cpu_active(unsigned int cpu) 1174 1162 { 1175 1163 return cpu == 0; 1176 1164 } 1177 1165 1178 - static inline bool cpu_dying(unsigned int cpu) 1166 + static __always_inline bool cpu_dying(unsigned int cpu) 1179 1167 { 1180 1168 return false; 1181 1169 } ··· 1209 1197 * Return: the length of the (null-terminated) @buf string, zero if 1210 1198 * nothing is copied. 1211 1199 */ 1212 - static inline ssize_t 1200 + static __always_inline ssize_t 1213 1201 cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) 1214 1202 { 1215 1203 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), ··· 1232 1220 * Return: the length of how many bytes have been copied, excluding 1233 1221 * terminating '\0'. 1234 1222 */ 1235 - static inline ssize_t 1236 - cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, 1237 - loff_t off, size_t count) 1223 + static __always_inline 1224 + ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, 1225 + loff_t off, size_t count) 1238 1226 { 1239 1227 return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask), 1240 1228 nr_cpu_ids, off, count) - 1; ··· 1254 1242 * Return: the length of how many bytes have been copied, excluding 1255 1243 * terminating '\0'. 1256 1244 */ 1257 - static inline ssize_t 1258 - cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, 1259 - loff_t off, size_t count) 1245 + static __always_inline 1246 + ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, 1247 + loff_t off, size_t count) 1260 1248 { 1261 1249 return bitmap_print_list_to_buf(buf, cpumask_bits(mask), 1262 1250 nr_cpu_ids, off, count) - 1;