score: add maintainers for score architecture
[deliverable/linux.git] / arch / score / mm / cache.c
CommitLineData
6bc9a396
CL
1/*
2 * arch/score/mm/cache.c
3 *
4 * Score Processor version.
5 *
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/init.h>
27#include <linux/linkage.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32
33#include <asm/mmu_context.h>
34
35/* Cache operations. */
36void (*flush_cache_all)(void);
37void (*__flush_cache_all)(void);
38void (*flush_cache_mm)(struct mm_struct *mm);
39void (*flush_cache_range)(struct vm_area_struct *vma,
40 unsigned long start, unsigned long end);
41void (*flush_cache_page)(struct vm_area_struct *vma,
42 unsigned long page, unsigned long pfn);
43void (*flush_icache_range)(unsigned long start, unsigned long end);
44void (*__flush_cache_vmap)(void);
45void (*__flush_cache_vunmap)(void);
46void (*flush_cache_sigtramp)(unsigned long addr);
47void (*flush_data_cache_page)(unsigned long addr);
48EXPORT_SYMBOL(flush_data_cache_page);
49void (*flush_icache_all)(void);
50
51/*Score 7 cache operations*/
52static inline void s7___flush_cache_all(void);
53static void s7_flush_cache_mm(struct mm_struct *mm);
54static void s7_flush_cache_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end);
56static void s7_flush_cache_page(struct vm_area_struct *vma,
57 unsigned long page, unsigned long pfn);
58static void s7_flush_icache_range(unsigned long start, unsigned long end);
59static void s7_flush_cache_sigtramp(unsigned long addr);
60static void s7_flush_data_cache_page(unsigned long addr);
61static void s7_flush_dcache_range(unsigned long start, unsigned long end);
62
63void __update_cache(struct vm_area_struct *vma, unsigned long address,
64 pte_t pte)
65{
66 struct page *page;
67 unsigned long pfn, addr;
68 int exec = (vma->vm_flags & VM_EXEC);
69
70 pfn = pte_pfn(pte);
71 if (unlikely(!pfn_valid(pfn)))
72 return;
73 page = pfn_to_page(pfn);
74 if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
75 addr = (unsigned long) page_address(page);
76 if (exec)
77 s7_flush_data_cache_page(addr);
78 clear_bit(PG_arch_1, &page->flags);
79 }
80}
81
82static inline void setup_protection_map(void)
83{
84 protection_map[0] = PAGE_NONE;
85 protection_map[1] = PAGE_READONLY;
86 protection_map[2] = PAGE_COPY;
87 protection_map[3] = PAGE_COPY;
88 protection_map[4] = PAGE_READONLY;
89 protection_map[5] = PAGE_READONLY;
90 protection_map[6] = PAGE_COPY;
91 protection_map[7] = PAGE_COPY;
92 protection_map[8] = PAGE_NONE;
93 protection_map[9] = PAGE_READONLY;
94 protection_map[10] = PAGE_SHARED;
95 protection_map[11] = PAGE_SHARED;
96 protection_map[12] = PAGE_READONLY;
97 protection_map[13] = PAGE_READONLY;
98 protection_map[14] = PAGE_SHARED;
99 protection_map[15] = PAGE_SHARED;
100}
101
102void __devinit cpu_cache_init(void)
103{
104 flush_cache_all = s7_flush_cache_all;
105 __flush_cache_all = s7___flush_cache_all;
106 flush_cache_mm = s7_flush_cache_mm;
107 flush_cache_range = s7_flush_cache_range;
108 flush_cache_page = s7_flush_cache_page;
109 flush_icache_range = s7_flush_icache_range;
110 flush_cache_sigtramp = s7_flush_cache_sigtramp;
111 flush_data_cache_page = s7_flush_data_cache_page;
112
113 setup_protection_map();
114}
115
116void s7_flush_icache_all(void)
117{
118 __asm__ __volatile__(
119 "la r8, s7_flush_icache_all\n"
120 "cache 0x10, [r8, 0]\n"
121 "nop\nnop\nnop\nnop\nnop\nnop\n"
122 : : : "r8");
123}
124
125void s7_flush_dcache_all(void)
126{
127 __asm__ __volatile__(
128 "la r8, s7_flush_dcache_all\n"
129 "cache 0x1f, [r8, 0]\n"
130 "nop\nnop\nnop\nnop\nnop\nnop\n"
131 "cache 0x1a, [r8, 0]\n"
132 "nop\nnop\nnop\nnop\nnop\nnop\n"
133 : : : "r8");
134}
135
136void s7_flush_cache_all(void)
137{
138 __asm__ __volatile__(
139 "la r8, s7_flush_cache_all\n"
140 "cache 0x10, [r8, 0]\n"
141 "nop\nnop\nnop\nnop\nnop\nnop\n"
142 "cache 0x1f, [r8, 0]\n"
143 "nop\nnop\nnop\nnop\nnop\nnop\n"
144 "cache 0x1a, [r8, 0]\n"
145 "nop\nnop\nnop\nnop\nnop\nnop\n"
146 : : : "r8");
147}
148
149void s7___flush_cache_all(void)
150{
151 __asm__ __volatile__(
152 "la r8, s7_flush_cache_all\n"
153 "cache 0x10, [r8, 0]\n"
154 "nop\nnop\nnop\nnop\nnop\nnop\n"
155 "cache 0x1f, [r8, 0]\n"
156 "nop\nnop\nnop\nnop\nnop\nnop\n"
157 "cache 0x1a, [r8, 0]\n"
158 "nop\nnop\nnop\nnop\nnop\nnop\n"
159 : : : "r8");
160}
161
162static void s7_flush_cache_mm(struct mm_struct *mm)
163{
164 if (!(mm->context))
165 return;
166 s7_flush_cache_all();
167}
168
169/*if we flush a range precisely , the processing may be very long.
170We must check each page in the range whether present. If the page is present,
171we can flush the range in the page. Be careful, the range may be cross two
172page, a page is present and another is not present.
173*/
174/*
175The interface is provided in hopes that the port can find
176a suitably efficient method for removing multiple page
177sized regions from the cache.
178*/
179static void
180s7_flush_cache_range(struct vm_area_struct *vma,
181 unsigned long start, unsigned long end)
182{
183 struct mm_struct *mm = vma->vm_mm;
184 int exec = vma->vm_flags & VM_EXEC;
185 pgd_t *pgdp;
186 pud_t *pudp;
187 pmd_t *pmdp;
188 pte_t *ptep;
189
190 if (!(mm->context))
191 return;
192
193 pgdp = pgd_offset(mm, start);
194 pudp = pud_offset(pgdp, start);
195 pmdp = pmd_offset(pudp, start);
196 ptep = pte_offset(pmdp, start);
197
198 while (start <= end) {
199 unsigned long tmpend;
200 pgdp = pgd_offset(mm, start);
201 pudp = pud_offset(pgdp, start);
202 pmdp = pmd_offset(pudp, start);
203 ptep = pte_offset(pmdp, start);
204
205 if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
206 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
207 continue;
208 }
209 tmpend = (start | (PAGE_SIZE-1)) > end ?
210 end : (start | (PAGE_SIZE-1));
211
212 s7_flush_dcache_range(start, tmpend);
213 if (exec)
214 s7_flush_icache_range(start, tmpend);
215 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
216 }
217}
218
219static void
220s7_flush_cache_page(struct vm_area_struct *vma,
221 unsigned long addr, unsigned long pfn)
222{
223 int exec = vma->vm_flags & VM_EXEC;
224 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
225
226 s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
227
228 if (exec)
229 s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
230}
231
232static void s7_flush_cache_sigtramp(unsigned long addr)
233{
234 __asm__ __volatile__(
235 "cache 0x02, [%0, 0]\n"
236 "nop\nnop\nnop\nnop\nnop\n"
237 "cache 0x02, [%0, 0x4]\n"
238 "nop\nnop\nnop\nnop\nnop\n"
239
240 "cache 0x0d, [%0, 0]\n"
241 "nop\nnop\nnop\nnop\nnop\n"
242 "cache 0x0d, [%0, 0x4]\n"
243 "nop\nnop\nnop\nnop\nnop\n"
244
245 "cache 0x1a, [%0, 0]\n"
246 "nop\nnop\nnop\nnop\nnop\n"
247 : : "r" (addr));
248}
249
250/*
251Just flush entire Dcache!!
252You must ensure the page doesn't include instructions, because
253the function will not flush the Icache.
254The addr must be cache aligned.
255*/
256static void s7_flush_data_cache_page(unsigned long addr)
257{
258 unsigned int i;
259 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
260 __asm__ __volatile__(
261 "cache 0x0e, [%0, 0]\n"
262 "cache 0x1a, [%0, 0]\n"
263 "nop\n"
264 : : "r" (addr));
265 addr += L1_CACHE_BYTES;
266 }
267}
268
269/*
2701. WB and invalid a cache line of Dcache
2712. Drain Write Buffer
272the range must be smaller than PAGE_SIZE
273*/
274static void s7_flush_dcache_range(unsigned long start, unsigned long end)
275{
276 int size, i;
277
278 start = start & ~(L1_CACHE_BYTES - 1);
279 end = end & ~(L1_CACHE_BYTES - 1);
280 size = end - start;
281 /* flush dcache to ram, and invalidate dcache lines. */
282 for (i = 0; i < size; i += L1_CACHE_BYTES) {
283 __asm__ __volatile__(
284 "cache 0x0e, [%0, 0]\n"
285 "nop\nnop\nnop\nnop\nnop\n"
286 "cache 0x1a, [%0, 0]\n"
287 "nop\nnop\nnop\nnop\nnop\n"
288 : : "r" (start));
289 start += L1_CACHE_BYTES;
290 }
291}
292
293static void s7_flush_icache_range(unsigned long start, unsigned long end)
294{
295 int size, i;
296 start = start & ~(L1_CACHE_BYTES - 1);
297 end = end & ~(L1_CACHE_BYTES - 1);
298
299 size = end - start;
300 /* invalidate icache lines. */
301 for (i = 0; i < size; i += L1_CACHE_BYTES) {
302 __asm__ __volatile__(
303 "cache 0x02, [%0, 0]\n"
304 "nop\nnop\nnop\nnop\nnop\n"
305 : : "r" (start));
306 start += L1_CACHE_BYTES;
307 }
308}
This page took 0.035221 seconds and 5 git commands to generate.