Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf tools: Move sparc barrier.h stuff to tools/arch/sparc/include/asm/barrier.h

We will need it for atomic.h, so move it from the ad-hoc tools/perf/
place to a tools/ subset of the kernel arch/ hierarchy.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-f0d04b9x63grt30nahpw9ei0@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+61 -9
+8
tools/arch/sparc/include/asm/barrier.h
··· 1 + #ifndef ___TOOLS_LINUX_ASM_SPARC_BARRIER_H 2 + #define ___TOOLS_LINUX_ASM_SPARC_BARRIER_H 3 + #if defined(__sparc__) && defined(__arch64__) 4 + #include "barrier_64.h" 5 + #else 6 + #include "barrier_32.h" 7 + #endif 8 + #endif
+6
tools/arch/sparc/include/asm/barrier_32.h
··· 1 + #ifndef __TOOLS_PERF_SPARC_BARRIER_H 2 + #define __TOOLS_PERF_SPARC_BARRIER_H 3 + 4 + #include <asm-generic/barrier.h> 5 + 6 + #endif /* !(__TOOLS_PERF_SPARC_BARRIER_H) */
+42
tools/arch/sparc/include/asm/barrier_64.h
··· 1 + #ifndef __TOOLS_LINUX_SPARC64_BARRIER_H 2 + #define __TOOLS_LINUX_SPARC64_BARRIER_H 3 + 4 + /* Copied from the kernel sources to tools/: 5 + * 6 + * These are here in an effort to more fully work around Spitfire Errata 7 + * #51. Essentially, if a memory barrier occurs soon after a mispredicted 8 + * branch, the chip can stop executing instructions until a trap occurs. 9 + * Therefore, if interrupts are disabled, the chip can hang forever. 10 + * 11 + * It used to be believed that the memory barrier had to be right in the 12 + * delay slot, but a case has been traced recently wherein the memory barrier 13 + * was one instruction after the branch delay slot and the chip still hung. 14 + * The offending sequence was the following in sym_wakeup_done() of the 15 + * sym53c8xx_2 driver: 16 + * 17 + * call sym_ccb_from_dsa, 0 18 + * movge %icc, 0, %l0 19 + * brz,pn %o0, .LL1303 20 + * mov %o0, %l2 21 + * membar #LoadLoad 22 + * 23 + * The branch has to be mispredicted for the bug to occur. Therefore, we put 24 + * the memory barrier explicitly into a "branch always, predicted taken" 25 + * delay slot to avoid the problem case. 26 + */ 27 + #define membar_safe(type) \ 28 + do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ 29 + " membar " type "\n" \ 30 + "1:\n" \ 31 + : : : "memory"); \ 32 + } while (0) 33 + 34 + /* The kernel always executes in TSO memory model these days, 35 + * and furthermore most sparc64 chips implement more stringent 36 + * memory ordering than required by the specifications. 37 + */ 38 + #define mb() membar_safe("#StoreLoad") 39 + #define rmb() __asm__ __volatile__("":::"memory") 40 + #define wmb() __asm__ __volatile__("":::"memory") 41 + 42 + #endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */
+2
tools/include/asm/barrier.h
··· 6 6 #include "../../arch/s390/include/asm/barrier.h" 7 7 #elif defined(__sh__) 8 8 #include "../../arch/sh/include/asm/barrier.h" 9 + #elif defined(__sparc__) 10 + #include "../../arch/sparc/include/asm/barrier.h" 9 11 #endif
+3
tools/perf/MANIFEST
··· 2 2 tools/arch/powerpc/include/asm/barrier.h 3 3 tools/arch/s390/include/asm/barrier.h 4 4 tools/arch/sh/include/asm/barrier.h 5 + tools/arch/sparc/include/asm/barrier.h 6 + tools/arch/sparc/include/asm/barrier_32.h 7 + tools/arch/sparc/include/asm/barrier_64.h 5 8 tools/arch/x86/include/asm/barrier.h 6 9 tools/scripts 7 10 tools/build
-9
tools/perf/perf-sys.h
··· 57 57 #endif 58 58 59 59 #ifdef __sparc__ 60 - #ifdef __LP64__ 61 - #define mb() asm volatile("ba,pt %%xcc, 1f\n" \ 62 - "membar #StoreLoad\n" \ 63 - "1:\n":::"memory") 64 - #else 65 - #define mb() asm volatile("":::"memory") 66 - #endif 67 - #define wmb() asm volatile("":::"memory") 68 - #define rmb() asm volatile("":::"memory") 69 60 #define CPUINFO_PROC {"cpu"} 70 61 #endif 71 62