Merge tag 'tpm-fixes-for-4.2-rc2' of https://github.com/PeterHuewe/linux-tpmdd into...
[deliverable/linux.git] / arch / x86 / include / asm / cacheflush.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H
b2bba72c 3
b2bba72c 4/* Caches aren't brain-dead on the intel. */
cc67ba63 5#include <asm-generic/cacheflush.h>
f05e798a 6#include <asm/special_insns.h>
61031952 7#include <asm/uaccess.h>
b2bba72c 8
7219bebd
AV
9/*
10 * The set_memory_* API can be used to change various attributes of a virtual
11 * address range. The attributes include:
623dffb2 12 * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
7219bebd
AV
13 * Executability : eXeutable, NoteXecutable
14 * Read/Write : ReadOnly, ReadWrite
15 * Presence : NotPresent
16 *
0d2eb44f 17 * Within a category, the attributes are mutually exclusive.
7219bebd
AV
18 *
19 * The implementation of this API will take care of various aspects that
20 * are associated with changing such attributes, such as:
21 * - Flushing TLBs
22 * - Flushing CPU caches
23 * - Making sure aliases of the memory behind the mapping don't violate
24 * coherency rules as defined by the CPU in the system.
25 *
26 * What this API does not do:
27 * - Provide exclusion between various callers - including callers that
28 * operation on other mappings of the same physical page
29 * - Restore default attributes when a page is freed
30 * - Guarantee that mappings other than the requested one are
31 * in any state, other than that these do not violate rules for
32 * the CPU you have. Do not depend on any effects on other mappings,
33 * CPUs other than the one you have may have more relaxed rules.
34 * The caller is required to take care of these.
35 */
75cbade8 36
1219333d 37int _set_memory_uc(unsigned long addr, int numpages);
ef354af4 38int _set_memory_wc(unsigned long addr, int numpages);
623dffb2 39int _set_memory_wt(unsigned long addr, int numpages);
1219333d 40int _set_memory_wb(unsigned long addr, int numpages);
75cbade8 41int set_memory_uc(unsigned long addr, int numpages);
ef354af4 42int set_memory_wc(unsigned long addr, int numpages);
623dffb2 43int set_memory_wt(unsigned long addr, int numpages);
75cbade8
AV
44int set_memory_wb(unsigned long addr, int numpages);
45int set_memory_x(unsigned long addr, int numpages);
46int set_memory_nx(unsigned long addr, int numpages);
47int set_memory_ro(unsigned long addr, int numpages);
48int set_memory_rw(unsigned long addr, int numpages);
f62d0f00 49int set_memory_np(unsigned long addr, int numpages);
c9caa02c 50int set_memory_4k(unsigned long addr, int numpages);
75cbade8 51
d75586ad 52int set_memory_array_uc(unsigned long *addr, int addrinarray);
4f646254 53int set_memory_array_wc(unsigned long *addr, int addrinarray);
623dffb2 54int set_memory_array_wt(unsigned long *addr, int addrinarray);
d75586ad
SL
55int set_memory_array_wb(unsigned long *addr, int addrinarray);
56
0f350755 57int set_pages_array_uc(struct page **pages, int addrinarray);
4f646254 58int set_pages_array_wc(struct page **pages, int addrinarray);
623dffb2 59int set_pages_array_wt(struct page **pages, int addrinarray);
0f350755 60int set_pages_array_wb(struct page **pages, int addrinarray);
61
7219bebd
AV
62/*
63 * For legacy compatibility with the old APIs, a few functions
64 * are provided that work on a "struct page".
65 * These functions operate ONLY on the 1:1 kernel mapping of the
66 * memory that the struct page represents, and internally just
67 * call the set_memory_* function. See the description of the
68 * set_memory_* function for more details on conventions.
69 *
70 * These APIs should be considered *deprecated* and are likely going to
71 * be removed in the future.
72 * The reason for this is the implicit operation on the 1:1 mapping only,
73 * making this not a generally useful API.
74 *
75 * Specifically, many users of the old APIs had a virtual address,
76 * called virt_to_page() or vmalloc_to_page() on that address to
77 * get a struct page* that the old API required.
78 * To convert these cases, use set_memory_*() on the original
79 * virtual address, do not use these functions.
80 */
81
82int set_pages_uc(struct page *page, int numpages);
83int set_pages_wb(struct page *page, int numpages);
84int set_pages_x(struct page *page, int numpages);
85int set_pages_nx(struct page *page, int numpages);
86int set_pages_ro(struct page *page, int numpages);
87int set_pages_rw(struct page *page, int numpages);
88
89
4c61afcd 90void clflush_cache_range(void *addr, unsigned int size);
b2bba72c 91
b2bba72c
TG
92#ifdef CONFIG_DEBUG_RODATA
93void mark_rodata_ro(void);
7bfeab9a 94extern const int rodata_test_data;
502f6604 95extern int kernel_set_to_readonly;
16239630
SR
96void set_kernel_text_rw(void);
97void set_kernel_text_ro(void);
98#else
99static inline void set_kernel_text_rw(void) { }
100static inline void set_kernel_text_ro(void) { }
b2bba72c 101#endif
7bfeab9a 102
edeed305 103#ifdef CONFIG_DEBUG_RODATA_TEST
7bfeab9a 104int rodata_test(void);
edeed305 105#else
7bfeab9a 106static inline int rodata_test(void)
edeed305 107{
7bfeab9a 108 return 0;
edeed305
AV
109}
110#endif
b2bba72c 111
61031952
RZ
112#ifdef ARCH_HAS_NOCACHE_UACCESS
113
114/**
115 * arch_memcpy_to_pmem - copy data to persistent memory
116 * @dst: destination buffer for the copy
117 * @src: source buffer for the copy
118 * @n: length of the copy in bytes
119 *
120 * Copy data to persistent memory media via non-temporal stores so that
121 * a subsequent arch_wmb_pmem() can flush cpu and memory controller
122 * write buffers to guarantee durability.
123 */
124static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
125 size_t n)
126{
127 int unwritten;
128
129 /*
130 * We are copying between two kernel buffers, if
131 * __copy_from_user_inatomic_nocache() returns an error (page
132 * fault) we would have already reported a general protection fault
133 * before the WARN+BUG.
134 */
135 unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
136 (void __user *) src, n);
137 if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
138 __func__, dst, src, unwritten))
139 BUG();
140}
141
142/**
143 * arch_wmb_pmem - synchronize writes to persistent memory
144 *
145 * After a series of arch_memcpy_to_pmem() operations this drains data
146 * from cpu write buffers and any platform (memory controller) buffers
147 * to ensure that written data is durable on persistent memory media.
148 */
149static inline void arch_wmb_pmem(void)
150{
151 /*
152 * wmb() to 'sfence' all previous writes such that they are
153 * architecturally visible to 'pcommit'. Note, that we've
154 * already arranged for pmem writes to avoid the cache via
155 * arch_memcpy_to_pmem().
156 */
157 wmb();
158 pcommit_sfence();
159}
160
161static inline bool __arch_has_wmb_pmem(void)
162{
163#ifdef CONFIG_X86_64
164 /*
165 * We require that wmb() be an 'sfence', that is only guaranteed on
166 * 64-bit builds
167 */
168 return static_cpu_has(X86_FEATURE_PCOMMIT);
169#else
170 return false;
171#endif
172}
173#else /* ARCH_HAS_NOCACHE_UACCESS i.e. ARCH=um */
174extern void arch_memcpy_to_pmem(void __pmem *dst, const void *src, size_t n);
175extern void arch_wmb_pmem(void);
176
177static inline bool __arch_has_wmb_pmem(void)
178{
179 return false;
180}
181#endif
182
1965aae3 183#endif /* _ASM_X86_CACHEFLUSH_H */
This page took 0.503207 seconds and 5 git commands to generate.