Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

XArray: Add calls to might_alloc()

Catch bogus GFP flags deterministically, instead of occasionally
when we actually have to allocate memory.

Reported-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

+17
+15
include/linux/xarray.h
··· 16 16 #include <linux/kconfig.h> 17 17 #include <linux/kernel.h> 18 18 #include <linux/rcupdate.h> 19 + #include <linux/sched/mm.h> 19 20 #include <linux/spinlock.h> 20 21 #include <linux/types.h> 21 22 ··· 587 586 { 588 587 void *curr; 589 588 589 + might_alloc(gfp); 590 590 xa_lock_bh(xa); 591 591 curr = __xa_store(xa, index, entry, gfp); 592 592 xa_unlock_bh(xa); ··· 614 612 { 615 613 void *curr; 616 614 615 + might_alloc(gfp); 617 616 xa_lock_irq(xa); 618 617 curr = __xa_store(xa, index, entry, gfp); 619 618 xa_unlock_irq(xa); ··· 690 687 { 691 688 void *curr; 692 689 690 + might_alloc(gfp); 693 691 xa_lock(xa); 694 692 curr = __xa_cmpxchg(xa, index, old, entry, gfp); 695 693 xa_unlock(xa); ··· 718 714 { 719 715 void *curr; 720 716 717 + might_alloc(gfp); 721 718 xa_lock_bh(xa); 722 719 curr = __xa_cmpxchg(xa, index, old, entry, gfp); 723 720 xa_unlock_bh(xa); ··· 746 741 { 747 742 void *curr; 748 743 744 + might_alloc(gfp); 749 745 xa_lock_irq(xa); 750 746 curr = __xa_cmpxchg(xa, index, old, entry, gfp); 751 747 xa_unlock_irq(xa); ··· 776 770 { 777 771 int err; 778 772 773 + might_alloc(gfp); 779 774 xa_lock(xa); 780 775 err = __xa_insert(xa, index, entry, gfp); 781 776 xa_unlock(xa); ··· 806 799 { 807 800 int err; 808 801 802 + might_alloc(gfp); 809 803 xa_lock_bh(xa); 810 804 err = __xa_insert(xa, index, entry, gfp); 811 805 xa_unlock_bh(xa); ··· 836 828 { 837 829 int err; 838 830 831 + might_alloc(gfp); 839 832 xa_lock_irq(xa); 840 833 err = __xa_insert(xa, index, entry, gfp); 841 834 xa_unlock_irq(xa); ··· 866 857 { 867 858 int err; 868 859 860 + might_alloc(gfp); 869 861 xa_lock(xa); 870 862 err = __xa_alloc(xa, id, entry, limit, gfp); 871 863 xa_unlock(xa); ··· 896 886 { 897 887 int err; 898 888 889 + might_alloc(gfp); 899 890 xa_lock_bh(xa); 900 891 err = __xa_alloc(xa, id, entry, limit, gfp); 901 892 xa_unlock_bh(xa); ··· 926 915 { 927 916 int err; 928 917 918 + might_alloc(gfp); 929 919 xa_lock_irq(xa); 930 920 err = __xa_alloc(xa, id, entry, limit, gfp); 931 921 xa_unlock_irq(xa); ··· 960 948 { 961 949 int err; 962 950 951 + might_alloc(gfp); 963 952 xa_lock(xa); 964 953 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); 965 954 xa_unlock(xa); ··· 994 981 { 995 982 int err; 996 983 984 + might_alloc(gfp); 997 985 xa_lock_bh(xa); 998 986 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); 999 987 xa_unlock_bh(xa); ··· 1028 1014 { 1029 1015 int err; 1030 1016 1017 + might_alloc(gfp); 1031 1018 xa_lock_irq(xa); 1032 1019 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); 1033 1020 xa_unlock_irq(xa);
+2
tools/include/linux/sched/mm.h
··· 1 1 #ifndef _TOOLS_PERF_LINUX_SCHED_MM_H 2 2 #define _TOOLS_PERF_LINUX_SCHED_MM_H 3 3 4 + #define might_alloc(gfp) do { } while (0) 5 + 4 6 #endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */