Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu

* 'for-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
percpu: Remove irqsafe_cpu_xxx variants

Fix up conflict in arch/x86/include/asm/percpu.h due to clash with
cebef5beed3d ("x86: Fix and improve percpu_cmpxchg{8,16}b_double()")
which edited the (now removed) irqsafe_cpu_cmpxchg*_double code.

+62 -236
+22 -22
arch/s390/include/asm/percpu.h
··· 19 19 #define ARCH_NEEDS_WEAK_PER_CPU 20 20 #endif 21 21 22 - #define arch_irqsafe_cpu_to_op(pcp, val, op) \ 22 + #define arch_this_cpu_to_op(pcp, val, op) \ 23 23 do { \ 24 24 typedef typeof(pcp) pcp_op_T__; \ 25 25 pcp_op_T__ old__, new__, prev__; \ ··· 41 41 preempt_enable(); \ 42 42 } while (0) 43 43 44 - #define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) 45 - #define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) 46 - #define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) 47 - #define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) 44 + #define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) 45 + #define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) 46 + #define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) 47 + #define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) 48 48 49 - #define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) 50 - #define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) 51 - #define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) 52 - #define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) 49 + #define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) 50 + #define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) 51 + #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) 52 + #define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &) 53 53 54 - #define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) 55 - #define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) 56 - #define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) 57 - #define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) 54 + #define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |) 55 + #define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |) 56 + #define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |) 57 + #define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |) 58 58 59 - #define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) 60 - #define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) 61 - #define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) 62 - #define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) 59 + #define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 60 + #define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 61 + #define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 62 + #define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 63 63 64 - #define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \ 64 + #define arch_this_cpu_cmpxchg(pcp, oval, nval) \ 65 65 ({ \ 66 66 typedef typeof(pcp) pcp_op_T__; \ 67 67 pcp_op_T__ ret__; \ ··· 79 79 ret__; \ 80 80 }) 81 81 82 - #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) 83 - #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) 84 - #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) 85 - #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) 82 + #define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) 83 + #define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) 84 + #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) 85 + #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) 86 86 87 87 #include <asm-generic/percpu.h> 88 88
-28
arch/x86/include/asm/percpu.h
··· 414 414 #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 415 415 #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 416 416 417 - #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 418 - #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 419 - #define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val) 420 - #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) 421 - #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) 422 - #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) 423 - #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) 424 - #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) 425 - #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) 426 - #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) 427 - #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 428 - #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 429 - #define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) 430 - #define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 431 - #define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 432 - 433 417 #ifndef CONFIG_M386 434 418 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 435 419 #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) ··· 429 445 #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 430 446 #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 431 447 432 - #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 433 - #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 434 - #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 435 448 #endif /* !CONFIG_M386 */ 436 449 437 450 #ifdef CONFIG_X86_CMPXCHG64 ··· 445 464 446 465 #define __this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 447 466 #define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 448 - #define irqsafe_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 449 467 #endif /* CONFIG_X86_CMPXCHG64 */ 450 468 451 469 /* ··· 472 492 #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 473 493 #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 474 494 475 - #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 476 - #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 477 - #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 478 - #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 479 - #define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 480 - #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 481 - 482 495 /* 483 496 * Pretty complex macro to generate cmpxchg16 instruction. The instruction 484 497 * is not supported on early AMD64 processors so we must be able to emulate ··· 494 521 495 522 #define __this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 496 523 #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 497 - #define irqsafe_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 498 524 499 525 #endif 500 526
+2 -2
include/linux/netdevice.h
··· 2155 2155 */ 2156 2156 static inline void dev_put(struct net_device *dev) 2157 2157 { 2158 - irqsafe_cpu_dec(*dev->pcpu_refcnt); 2158 + this_cpu_dec(*dev->pcpu_refcnt); 2159 2159 } 2160 2160 2161 2161 /** ··· 2166 2166 */ 2167 2167 static inline void dev_hold(struct net_device *dev) 2168 2168 { 2169 - irqsafe_cpu_inc(*dev->pcpu_refcnt); 2169 + this_cpu_inc(*dev->pcpu_refcnt); 2170 2170 } 2171 2171 2172 2172 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
+2 -2
include/linux/netfilter/x_tables.h
··· 471 471 * 472 472 * Begin packet processing : all readers must wait the end 473 473 * 1) Must be called with preemption disabled 474 - * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) 474 + * 2) softirqs must be disabled too (or we should use this_cpu_add()) 475 475 * Returns : 476 476 * 1 if no recursion on this cpu 477 477 * 0 if recursion detected ··· 503 503 * 504 504 * End packet processing : all readers can proceed 505 505 * 1) Must be called with preemption disabled 506 - * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add()) 506 + * 2) softirqs must be disabled too (or we should use this_cpu_add()) 507 507 */ 508 508 static inline void xt_write_recseq_end(unsigned int addend) 509 509 {
+22 -168
include/linux/percpu.h
··· 172 172 * equal char, int or long. percpu_read() evaluates to a lvalue and 173 173 * all others to void. 174 174 * 175 - * These operations are guaranteed to be atomic w.r.t. preemption. 176 - * The generic versions use plain get/put_cpu_var(). Archs are 175 + * These operations are guaranteed to be atomic. 176 + * The generic versions disable interrupts. Archs are 177 177 * encouraged to implement single-instruction alternatives which don't 178 - * require preemption protection. 178 + * require protection. 179 179 */ 180 180 #ifndef percpu_read 181 181 # define percpu_read(var) \ ··· 347 347 348 348 #define _this_cpu_generic_to_op(pcp, val, op) \ 349 349 do { \ 350 - preempt_disable(); \ 350 + unsigned long flags; \ 351 + local_irq_save(flags); \ 351 352 *__this_cpu_ptr(&(pcp)) op val; \ 352 - preempt_enable(); \ 353 + local_irq_restore(flags); \ 353 354 } while (0) 354 355 355 356 #ifndef this_cpu_write ··· 448 447 #define _this_cpu_generic_add_return(pcp, val) \ 449 448 ({ \ 450 449 typeof(pcp) ret__; \ 451 - preempt_disable(); \ 450 + unsigned long flags; \ 451 + local_irq_save(flags); \ 452 452 __this_cpu_add(pcp, val); \ 453 453 ret__ = __this_cpu_read(pcp); \ 454 - preempt_enable(); \ 454 + local_irq_restore(flags); \ 455 455 ret__; \ 456 456 }) 457 457 ··· 478 476 479 477 #define _this_cpu_generic_xchg(pcp, nval) \ 480 478 ({ typeof(pcp) ret__; \ 481 - preempt_disable(); \ 479 + unsigned long flags; \ 480 + local_irq_save(flags); \ 482 481 ret__ = __this_cpu_read(pcp); \ 483 482 __this_cpu_write(pcp, nval); \ 484 - preempt_enable(); \ 483 + local_irq_restore(flags); \ 485 484 ret__; \ 486 485 }) 487 486 ··· 504 501 #endif 505 502 506 503 #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ 507 - ({ typeof(pcp) ret__; \ 508 - preempt_disable(); \ 504 + ({ \ 505 + typeof(pcp) ret__; \ 506 + unsigned long flags; \ 507 + local_irq_save(flags); \ 509 508 ret__ = __this_cpu_read(pcp); \ 510 509 if (ret__ == (oval)) \ 511 510 __this_cpu_write(pcp, nval); \ 512 - preempt_enable(); \ 511 + local_irq_restore(flags); \ 513 512 ret__; \ 514 513 }) 515 514 ··· 543 538 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 544 539 ({ \ 545 540 int ret__; \ 546 - preempt_disable(); \ 541 + unsigned long flags; \ 542 + local_irq_save(flags); \ 547 543 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ 548 544 oval1, oval2, nval1, nval2); \ 549 - preempt_enable(); \ 545 + local_irq_restore(flags); \ 550 546 ret__; \ 551 547 }) 552 548 ··· 573 567 #endif 574 568 575 569 /* 576 - * Generic percpu operations that do not require preemption handling. 570 + * Generic percpu operations for context that are safe from preemption/interrupts. 577 571 * Either we do not care about races or the caller has the 578 - * responsibility of handling preemptions issues. Arch code can still 572 + * responsibility of handling preemption/interrupt issues. Arch code can still 579 573 * override these instructions since the arch per cpu code may be more 580 574 * efficient and may actually get race freeness for free (that is the 581 575 * case for x86 for example). ··· 806 800 # endif 807 801 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 808 802 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 809 - #endif 810 - 811 - /* 812 - * IRQ safe versions of the per cpu RMW operations. Note that these operations 813 - * are *not* safe against modification of the same variable from another 814 - * processors (which one gets when using regular atomic operations) 815 - * They are guaranteed to be atomic vs. local interrupts and 816 - * preemption only. 817 - */ 818 - #define irqsafe_cpu_generic_to_op(pcp, val, op) \ 819 - do { \ 820 - unsigned long flags; \ 821 - local_irq_save(flags); \ 822 - *__this_cpu_ptr(&(pcp)) op val; \ 823 - local_irq_restore(flags); \ 824 - } while (0) 825 - 826 - #ifndef irqsafe_cpu_add 827 - # ifndef irqsafe_cpu_add_1 828 - # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) 829 - # endif 830 - # ifndef irqsafe_cpu_add_2 831 - # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) 832 - # endif 833 - # ifndef irqsafe_cpu_add_4 834 - # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) 835 - # endif 836 - # ifndef irqsafe_cpu_add_8 837 - # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) 838 - # endif 839 - # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) 840 - #endif 841 - 842 - #ifndef irqsafe_cpu_sub 843 - # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) 844 - #endif 845 - 846 - #ifndef irqsafe_cpu_inc 847 - # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) 848 - #endif 849 - 850 - #ifndef irqsafe_cpu_dec 851 - # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) 852 - #endif 853 - 854 - #ifndef irqsafe_cpu_and 855 - # ifndef irqsafe_cpu_and_1 856 - # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) 857 - # endif 858 - # ifndef irqsafe_cpu_and_2 859 - # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) 860 - # endif 861 - # ifndef irqsafe_cpu_and_4 862 - # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) 863 - # endif 864 - # ifndef irqsafe_cpu_and_8 865 - # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) 866 - # endif 867 - # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) 868 - #endif 869 - 870 - #ifndef irqsafe_cpu_or 871 - # ifndef irqsafe_cpu_or_1 872 - # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) 873 - # endif 874 - # ifndef irqsafe_cpu_or_2 875 - # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) 876 - # endif 877 - # ifndef irqsafe_cpu_or_4 878 - # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) 879 - # endif 880 - # ifndef irqsafe_cpu_or_8 881 - # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) 882 - # endif 883 - # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) 884 - #endif 885 - 886 - #ifndef irqsafe_cpu_xor 887 - # ifndef irqsafe_cpu_xor_1 888 - # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) 889 - # endif 890 - # ifndef irqsafe_cpu_xor_2 891 - # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) 892 - # endif 893 - # ifndef irqsafe_cpu_xor_4 894 - # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) 895 - # endif 896 - # ifndef irqsafe_cpu_xor_8 897 - # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) 898 - # endif 899 - # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) 900 - #endif 901 - 902 - #define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \ 903 - ({ \ 904 - typeof(pcp) ret__; \ 905 - unsigned long flags; \ 906 - local_irq_save(flags); \ 907 - ret__ = __this_cpu_read(pcp); \ 908 - if (ret__ == (oval)) \ 909 - __this_cpu_write(pcp, nval); \ 910 - local_irq_restore(flags); \ 911 - ret__; \ 912 - }) 913 - 914 - #ifndef irqsafe_cpu_cmpxchg 915 - # ifndef irqsafe_cpu_cmpxchg_1 916 - # define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) 917 - # endif 918 - # ifndef irqsafe_cpu_cmpxchg_2 919 - # define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) 920 - # endif 921 - # ifndef irqsafe_cpu_cmpxchg_4 922 - # define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) 923 - # endif 924 - # ifndef irqsafe_cpu_cmpxchg_8 925 - # define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) 926 - # endif 927 - # define irqsafe_cpu_cmpxchg(pcp, oval, nval) \ 928 - __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) 929 - #endif 930 - 931 - #define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 932 - ({ \ 933 - int ret__; \ 934 - unsigned long flags; \ 935 - local_irq_save(flags); \ 936 - ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ 937 - oval1, oval2, nval1, nval2); \ 938 - local_irq_restore(flags); \ 939 - ret__; \ 940 - }) 941 - 942 - #ifndef irqsafe_cpu_cmpxchg_double 943 - # ifndef irqsafe_cpu_cmpxchg_double_1 944 - # define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 945 - irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 946 - # endif 947 - # ifndef irqsafe_cpu_cmpxchg_double_2 948 - # define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 949 - irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 950 - # endif 951 - # ifndef irqsafe_cpu_cmpxchg_double_4 952 - # define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 953 - irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 954 - # endif 955 - # ifndef irqsafe_cpu_cmpxchg_double_8 956 - # define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 957 - irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) 958 - # endif 959 - # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 960 - __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 961 803 #endif 962 804 963 805 #endif /* __LINUX_PERCPU_H */
+7 -7
include/net/snmp.h
··· 129 129 __this_cpu_inc(mib[0]->mibs[field]) 130 130 131 131 #define SNMP_INC_STATS_USER(mib, field) \ 132 - irqsafe_cpu_inc(mib[0]->mibs[field]) 132 + this_cpu_inc(mib[0]->mibs[field]) 133 133 134 134 #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ 135 135 atomic_long_inc(&mib->mibs[field]) 136 136 137 137 #define SNMP_INC_STATS(mib, field) \ 138 - irqsafe_cpu_inc(mib[0]->mibs[field]) 138 + this_cpu_inc(mib[0]->mibs[field]) 139 139 140 140 #define SNMP_DEC_STATS(mib, field) \ 141 - irqsafe_cpu_dec(mib[0]->mibs[field]) 141 + this_cpu_dec(mib[0]->mibs[field]) 142 142 143 143 #define SNMP_ADD_STATS_BH(mib, field, addend) \ 144 144 __this_cpu_add(mib[0]->mibs[field], addend) 145 145 146 146 #define SNMP_ADD_STATS_USER(mib, field, addend) \ 147 - irqsafe_cpu_add(mib[0]->mibs[field], addend) 147 + this_cpu_add(mib[0]->mibs[field], addend) 148 148 149 149 #define SNMP_ADD_STATS(mib, field, addend) \ 150 - irqsafe_cpu_add(mib[0]->mibs[field], addend) 150 + this_cpu_add(mib[0]->mibs[field], addend) 151 151 /* 152 152 * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr" 153 153 * to make @ptr a non-percpu pointer. 154 154 */ 155 155 #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 156 156 do { \ 157 - irqsafe_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ 158 - irqsafe_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ 157 + this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ 158 + this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ 159 159 } while (0) 160 160 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 161 161 do { \
+3 -3
mm/slub.c
··· 1978 1978 page->pobjects = pobjects; 1979 1979 page->next = oldpage; 1980 1980 1981 - } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 1981 + } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 1982 1982 stat(s, CPU_PARTIAL_FREE); 1983 1983 return pobjects; 1984 1984 } ··· 2304 2304 * Since this is without lock semantics the protection is only against 2305 2305 * code executing on this cpu *not* from access by other cpus. 2306 2306 */ 2307 - if (unlikely(!irqsafe_cpu_cmpxchg_double( 2307 + if (unlikely(!this_cpu_cmpxchg_double( 2308 2308 s->cpu_slab->freelist, s->cpu_slab->tid, 2309 2309 object, tid, 2310 2310 get_freepointer_safe(s, object), next_tid(tid)))) { ··· 2534 2534 if (likely(page == c->page)) { 2535 2535 set_freepointer(s, object, c->freelist); 2536 2536 2537 - if (unlikely(!irqsafe_cpu_cmpxchg_double( 2537 + if (unlikely(!this_cpu_cmpxchg_double( 2538 2538 s->cpu_slab->freelist, s->cpu_slab->tid, 2539 2539 c->freelist, tid, 2540 2540 object, next_tid(tid)))) {
+2 -2
net/caif/caif_dev.c
··· 76 76 77 77 static void caifd_put(struct caif_device_entry *e) 78 78 { 79 - irqsafe_cpu_dec(*e->pcpu_refcnt); 79 + this_cpu_dec(*e->pcpu_refcnt); 80 80 } 81 81 82 82 static void caifd_hold(struct caif_device_entry *e) 83 83 { 84 - irqsafe_cpu_inc(*e->pcpu_refcnt); 84 + this_cpu_inc(*e->pcpu_refcnt); 85 85 } 86 86 87 87 static int caifd_refcnt_read(struct caif_device_entry *e)
+2 -2
net/caif/cffrml.c
··· 177 177 { 178 178 struct cffrml *this = container_obj(layr); 179 179 if (layr != NULL && this->pcpu_refcnt != NULL) 180 - irqsafe_cpu_dec(*this->pcpu_refcnt); 180 + this_cpu_dec(*this->pcpu_refcnt); 181 181 } 182 182 183 183 void cffrml_hold(struct cflayer *layr) 184 184 { 185 185 struct cffrml *this = container_obj(layr); 186 186 if (layr != NULL && this->pcpu_refcnt != NULL) 187 - irqsafe_cpu_inc(*this->pcpu_refcnt); 187 + this_cpu_inc(*this->pcpu_refcnt); 188 188 } 189 189 190 190 int cffrml_refcnt_read(struct cflayer *layr)