perf tools: Move powerpc barrier.h stuff to tools/arch/powerpc/include/asm/barrier.h
authorArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 6 May 2015 21:35:20 +0000 (18:35 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Fri, 8 May 2015 19:05:03 +0000 (16:05 -0300)
We will need it for atomic.h, so move it from the ad-hoc tools/perf/
place to a tools/ subset of the kernel arch/ hierarchy.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-pk6f5x9vh8k2ebzhh9uj5wo2@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/arch/powerpc/include/asm/barrier.h [new file with mode: 0644]
tools/include/asm/barrier.h
tools/perf/MANIFEST
tools/perf/perf-sys.h

diff --git a/tools/arch/powerpc/include/asm/barrier.h b/tools/arch/powerpc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..b23aee8
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copied from the kernel sources:
+ *
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
+#define _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory).  The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ *
+ * *mb() variants without smp_ prefix must order all types of memory
+ * operations with one another. sync is the only instruction sufficient
+ * to do this.
+ */
+#define mb()   __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
+#define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
+
+#endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */
index 9a55c12d6f41d7c6bc58cb936beba5a23538189e..249f67238668690abccf0cb175d462835d05980b 100644 (file)
@@ -1,3 +1,5 @@
 #if defined(__i386__) || defined(__x86_64__)
 #include "../../arch/x86/include/asm/barrier.h"
+#elif defined(__powerpc__)
+#include "../../arch/powerpc/include/asm/barrier.h"
 #endif
index 594737a17b1b47c33e0a16331191ddd29d98affc..4b3346e81d8549b5b0dd64f4da3f13766fa2049b 100644 (file)
@@ -1,4 +1,5 @@
 tools/perf
+tools/arch/powerpc/include/asm/barrier.h
 tools/arch/x86/include/asm/barrier.h
 tools/scripts
 tools/build
index 781d441cffd78855b91eaefcc13594d05a256f0f..01c06954bf16e88406157a1c1a44e4aff251aa70 100644 (file)
@@ -38,9 +38,6 @@
 
 #ifdef __powerpc__
 #include "../../arch/powerpc/include/uapi/asm/unistd.h"
-#define mb()           asm volatile ("sync" ::: "memory")
-#define wmb()          asm volatile ("sync" ::: "memory")
-#define rmb()          asm volatile ("sync" ::: "memory")
 #define CPUINFO_PROC   {"cpu"}
 #endif
 
This page took 0.027099 seconds and 5 git commands to generate.