Merge remote-tracking branch 'befs/for-next'
[deliverable/linux.git] / arch / arm64 / mm / cache.S
CommitLineData
f1a0c4aa
CM
1/*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
a2d25a53 20#include <linux/errno.h>
f1a0c4aa
CM
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <asm/assembler.h>
301bcfac 24#include <asm/cpufeature.h>
8d883b23 25#include <asm/alternative.h>
f1a0c4aa 26
f1a0c4aa
CM
27/*
28 * flush_icache_range(start,end)
29 *
30 * Ensure that the I and D caches are coherent within specified region.
31 * This is typically used when code has been written to a memory region,
32 * and will be executed.
33 *
34 * - start - virtual start address of region
35 * - end - virtual end address of region
36 */
37ENTRY(flush_icache_range)
38 /* FALLTHROUGH */
39
40/*
41 * __flush_cache_user_range(start,end)
42 *
43 * Ensure that the I and D caches are coherent within specified region.
44 * This is typically used when code has been written to a memory region,
45 * and will be executed.
46 *
47 * - start - virtual start address of region
48 * - end - virtual end address of region
49 */
50ENTRY(__flush_cache_user_range)
51 dcache_line_size x2, x3
52 sub x3, x2, #1
53 bic x4, x0, x3
541:
290622ef 55user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
f1a0c4aa
CM
56 add x4, x4, x2
57 cmp x4, x1
58 b.lo 1b
dc60b777 59 dsb ish
f1a0c4aa
CM
60
61 icache_line_size x2, x3
62 sub x3, x2, #1
63 bic x4, x0, x3
641:
65USER(9f, ic ivau, x4 ) // invalidate I line PoU
66 add x4, x4, x2
67 cmp x4, x1
68 b.lo 1b
dc60b777 69 dsb ish
f1a0c4aa 70 isb
a2d25a53
VM
71 mov x0, #0
72 ret
739:
74 mov x0, #-EFAULT
f1a0c4aa
CM
75 ret
76ENDPROC(flush_icache_range)
77ENDPROC(__flush_cache_user_range)
78
79/*
03324e6e 80 * __flush_dcache_area(kaddr, size)
f1a0c4aa 81 *
0a28714c
AK
82 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
83 * are cleaned and invalidated to the PoC.
f1a0c4aa
CM
84 *
85 * - kaddr - kernel address
86 * - size - size in question
87 */
88ENTRY(__flush_dcache_area)
0a28714c 89 dcache_by_line_op civac, sy, x0, x1, x2, x3
f1a0c4aa 90 ret
20791846 91ENDPIPROC(__flush_dcache_area)
7363590d 92
0a28714c
AK
93/*
94 * __clean_dcache_area_pou(kaddr, size)
95 *
96 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
97 * are cleaned to the PoU.
98 *
99 * - kaddr - kernel address
100 * - size - size in question
101 */
102ENTRY(__clean_dcache_area_pou)
103 dcache_by_line_op cvau, ish, x0, x1, x2, x3
104 ret
105ENDPROC(__clean_dcache_area_pou)
106
c218bca7 107/*
d34fdb70
KL
108 * __dma_inv_area(start, size)
109 * - start - virtual start address of region
110 * - size - size in question
c218bca7 111 */
d34fdb70
KL
112__dma_inv_area:
113 add x1, x1, x0
c218bca7
CM
114 /* FALLTHROUGH */
115
7363590d 116/*
d34fdb70
KL
117 * __inval_cache_range(start, end)
118 * - start - start address of region
119 * - end - end address of region
7363590d 120 */
d34fdb70 121ENTRY(__inval_cache_range)
7363590d
CM
122 dcache_line_size x2, x3
123 sub x3, x2, #1
ebf81a93 124 tst x1, x3 // end cache line aligned?
7363590d 125 bic x1, x1, x3
ebf81a93
CM
126 b.eq 1f
127 dc civac, x1 // clean & invalidate D / U line
1281: tst x0, x3 // start cache line aligned?
129 bic x0, x0, x3
130 b.eq 2f
131 dc civac, x0 // clean & invalidate D / U line
132 b 3f
1332: dc ivac, x0 // invalidate D / U line
1343: add x0, x0, x2
7363590d 135 cmp x0, x1
ebf81a93 136 b.lo 2b
7363590d
CM
137 dsb sy
138 ret
20791846 139ENDPIPROC(__inval_cache_range)
d34fdb70
KL
140ENDPROC(__dma_inv_area)
141
142/*
143 * __clean_dcache_area_poc(kaddr, size)
144 *
145 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
146 * are cleaned to the PoC.
147 *
148 * - kaddr - kernel address
149 * - size - size in question
150 */
151ENTRY(__clean_dcache_area_poc)
152 /* FALLTHROUGH */
7363590d
CM
153
154/*
d34fdb70 155 * __dma_clean_area(start, size)
7363590d 156 * - start - virtual start address of region
d34fdb70 157 * - size - size in question
7363590d 158 */
d34fdb70
KL
159__dma_clean_area:
160 dcache_by_line_op cvac, sy, x0, x1, x2, x3
7363590d 161 ret
d34fdb70
KL
162ENDPIPROC(__clean_dcache_area_poc)
163ENDPROC(__dma_clean_area)
7363590d
CM
164
165/*
d34fdb70
KL
166 * __dma_flush_area(start, size)
167 *
168 * clean & invalidate D / U line
169 *
7363590d 170 * - start - virtual start address of region
d34fdb70 171 * - size - size in question
7363590d 172 */
d34fdb70
KL
173ENTRY(__dma_flush_area)
174 dcache_by_line_op civac, sy, x0, x1, x2, x3
7363590d 175 ret
d34fdb70 176ENDPIPROC(__dma_flush_area)
7363590d
CM
177
178/*
179 * __dma_map_area(start, size, dir)
180 * - start - kernel virtual start address
181 * - size - size of region
182 * - dir - DMA direction
183 */
184ENTRY(__dma_map_area)
7363590d 185 cmp w2, #DMA_FROM_DEVICE
d34fdb70
KL
186 b.eq __dma_inv_area
187 b __dma_clean_area
20791846 188ENDPIPROC(__dma_map_area)
7363590d
CM
189
190/*
191 * __dma_unmap_area(start, size, dir)
192 * - start - kernel virtual start address
193 * - size - size of region
194 * - dir - DMA direction
195 */
196ENTRY(__dma_unmap_area)
7363590d 197 cmp w2, #DMA_TO_DEVICE
d34fdb70 198 b.ne __dma_inv_area
7363590d 199 ret
20791846 200ENDPIPROC(__dma_unmap_area)
This page took 0.187321 seconds and 5 git commands to generate.