arm64: hugetlb: partial revert of 66b3923a1a0f
[deliverable/linux.git] / arch / arm64 / mm / cache.S
1 /*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/linkage.h>
22 #include <linux/init.h>
23 #include <asm/assembler.h>
24 #include <asm/cpufeature.h>
25 #include <asm/alternative.h>
26
27 #include "proc-macros.S"
28
29 /*
30 * flush_icache_range(start,end)
31 *
32 * Ensure that the I and D caches are coherent within specified region.
33 * This is typically used when code has been written to a memory region,
34 * and will be executed.
35 *
36 * - start - virtual start address of region
37 * - end - virtual end address of region
38 */
39 ENTRY(flush_icache_range)
40 /* FALLTHROUGH */
41
42 /*
43 * __flush_cache_user_range(start,end)
44 *
45 * Ensure that the I and D caches are coherent within specified region.
46 * This is typically used when code has been written to a memory region,
47 * and will be executed.
48 *
49 * - start - virtual start address of region
50 * - end - virtual end address of region
51 */
52 ENTRY(__flush_cache_user_range)
53 dcache_line_size x2, x3
54 sub x3, x2, #1
55 bic x4, x0, x3
56 1:
57 USER(9f, dc cvau, x4 ) // clean D line to PoU
58 add x4, x4, x2
59 cmp x4, x1
60 b.lo 1b
61 dsb ish
62
63 icache_line_size x2, x3
64 sub x3, x2, #1
65 bic x4, x0, x3
66 1:
67 USER(9f, ic ivau, x4 ) // invalidate I line PoU
68 add x4, x4, x2
69 cmp x4, x1
70 b.lo 1b
71 dsb ish
72 isb
73 mov x0, #0
74 ret
75 9:
76 mov x0, #-EFAULT
77 ret
78 ENDPROC(flush_icache_range)
79 ENDPROC(__flush_cache_user_range)
80
81 /*
82 * __flush_dcache_area(kaddr, size)
83 *
84 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
85 * are cleaned and invalidated to the PoC.
86 *
87 * - kaddr - kernel address
88 * - size - size in question
89 */
90 ENTRY(__flush_dcache_area)
91 dcache_by_line_op civac, sy, x0, x1, x2, x3
92 ret
93 ENDPIPROC(__flush_dcache_area)
94
95 /*
96 * __clean_dcache_area_pou(kaddr, size)
97 *
98 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
99 * are cleaned to the PoU.
100 *
101 * - kaddr - kernel address
102 * - size - size in question
103 */
104 ENTRY(__clean_dcache_area_pou)
105 dcache_by_line_op cvau, ish, x0, x1, x2, x3
106 ret
107 ENDPROC(__clean_dcache_area_pou)
108
109 /*
110 * __inval_cache_range(start, end)
111 * - start - start address of region
112 * - end - end address of region
113 */
114 ENTRY(__inval_cache_range)
115 /* FALLTHROUGH */
116
117 /*
118 * __dma_inv_range(start, end)
119 * - start - virtual start address of region
120 * - end - virtual end address of region
121 */
122 __dma_inv_range:
123 dcache_line_size x2, x3
124 sub x3, x2, #1
125 tst x1, x3 // end cache line aligned?
126 bic x1, x1, x3
127 b.eq 1f
128 dc civac, x1 // clean & invalidate D / U line
129 1: tst x0, x3 // start cache line aligned?
130 bic x0, x0, x3
131 b.eq 2f
132 dc civac, x0 // clean & invalidate D / U line
133 b 3f
134 2: dc ivac, x0 // invalidate D / U line
135 3: add x0, x0, x2
136 cmp x0, x1
137 b.lo 2b
138 dsb sy
139 ret
140 ENDPIPROC(__inval_cache_range)
141 ENDPROC(__dma_inv_range)
142
143 /*
144 * __dma_clean_range(start, end)
145 * - start - virtual start address of region
146 * - end - virtual end address of region
147 */
148 __dma_clean_range:
149 dcache_line_size x2, x3
150 sub x3, x2, #1
151 bic x0, x0, x3
152 1:
153 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
154 dc cvac, x0
155 alternative_else
156 dc civac, x0
157 alternative_endif
158 add x0, x0, x2
159 cmp x0, x1
160 b.lo 1b
161 dsb sy
162 ret
163 ENDPROC(__dma_clean_range)
164
165 /*
166 * __dma_flush_range(start, end)
167 * - start - virtual start address of region
168 * - end - virtual end address of region
169 */
170 ENTRY(__dma_flush_range)
171 dcache_line_size x2, x3
172 sub x3, x2, #1
173 bic x0, x0, x3
174 1: dc civac, x0 // clean & invalidate D / U line
175 add x0, x0, x2
176 cmp x0, x1
177 b.lo 1b
178 dsb sy
179 ret
180 ENDPIPROC(__dma_flush_range)
181
182 /*
183 * __dma_map_area(start, size, dir)
184 * - start - kernel virtual start address
185 * - size - size of region
186 * - dir - DMA direction
187 */
188 ENTRY(__dma_map_area)
189 add x1, x1, x0
190 cmp w2, #DMA_FROM_DEVICE
191 b.eq __dma_inv_range
192 b __dma_clean_range
193 ENDPIPROC(__dma_map_area)
194
195 /*
196 * __dma_unmap_area(start, size, dir)
197 * - start - kernel virtual start address
198 * - size - size of region
199 * - dir - DMA direction
200 */
201 ENTRY(__dma_unmap_area)
202 add x1, x1, x0
203 cmp w2, #DMA_TO_DEVICE
204 b.ne __dma_inv_range
205 ret
206 ENDPIPROC(__dma_unmap_area)
This page took 0.044276 seconds and 5 git commands to generate.