Merge remote-tracking branch 'asoc/topic/arizona' into asoc-next
[deliverable/linux.git] / arch / parisc / kernel / cache.c
CommitLineData
071327ec 1/*
1da177e4
LT
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
67a5a59d 6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
1da177e4
LT
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
e8edc6e0 20#include <linux/sched.h>
1da177e4
LT
21#include <asm/pdc.h>
22#include <asm/cache.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
1da177e4
LT
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/processor.h>
2464212f 28#include <asm/sections.h>
f311847c 29#include <asm/shmparam.h>
1da177e4 30
8039de10
HD
31int split_tlb __read_mostly;
32int dcache_stride __read_mostly;
33int icache_stride __read_mostly;
1da177e4
LT
34EXPORT_SYMBOL(dcache_stride);
35
f311847c
JB
36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);
38void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
1da177e4 40
1da177e4
LT
41/* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to
44 * ensure this.
45 */
46DEFINE_SPINLOCK(pa_tlb_lock);
1da177e4 47
8039de10 48struct pdc_cache_info cache_info __read_mostly;
1da177e4 49#ifndef CONFIG_PA20
8039de10 50static struct pdc_btlb_info btlb_info __read_mostly;
1da177e4
LT
51#endif
52
53#ifdef CONFIG_SMP
54void
55flush_data_cache(void)
56{
15c8b6c1 57 on_each_cpu(flush_data_cache_local, NULL, 1);
1da177e4
LT
58}
59void
60flush_instruction_cache(void)
61{
15c8b6c1 62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
1da177e4
LT
63}
64#endif
65
66void
67flush_cache_all_local(void)
68{
1b2425e3
MW
69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
1da177e4
LT
71}
72EXPORT_SYMBOL(flush_cache_all_local);
73
50861f5a
JDA
74/* Virtual address of pfn. */
75#define pfn_va(pfn) __va(PFN_PHYS(pfn))
76
1da177e4 77void
4b3073e1 78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
1da177e4 79{
50861f5a
JDA
80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page;
1da177e4 82
50861f5a
JDA
83 /* We don't have pte special. As a result, we can be called with
84 an invalid pfn and we don't need to flush the kernel dcache page.
85 This occurs with FireGL card in C8000. */
86 if (!pfn_valid(pfn))
87 return;
1da177e4 88
50861f5a
JDA
89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn));
1da177e4 92 clear_bit(PG_dcache_dirty, &page->flags);
20f4d3cb 93 } else if (parisc_requires_coherency())
50861f5a 94 flush_kernel_dcache_page_addr(pfn_va(pfn));
1da177e4
LT
95}
96
97void
98show_cache_info(struct seq_file *m)
99{
e5a2e7fd
KM
100 char buf[32];
101
1da177e4
LT
102 seq_printf(m, "I-cache\t\t: %ld KB\n",
103 cache_info.ic_size/1024 );
2f75c12c 104 if (cache_info.dc_loop != 1)
e5a2e7fd
KM
105 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
1da177e4
LT
107 cache_info.dc_size/1024,
108 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
e5a2e7fd 110 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
1da177e4
LT
111 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 cache_info.it_size,
113 cache_info.dt_size,
114 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 );
116
117#ifndef CONFIG_PA20
118 /* BTLB - Block TLB */
119 if (btlb_info.max_size==0) {
120 seq_printf(m, "BTLB\t\t: not supported\n" );
121 } else {
122 seq_printf(m,
123 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 btlb_info.max_size, (int)4096,
127 btlb_info.max_size>>8,
128 btlb_info.fixed_range_info.num_i,
129 btlb_info.fixed_range_info.num_d,
130 btlb_info.fixed_range_info.num_comb,
131 btlb_info.variable_range_info.num_i,
132 btlb_info.variable_range_info.num_d,
133 btlb_info.variable_range_info.num_comb
134 );
135 }
136#endif
137}
138
139void __init
140parisc_cache_init(void)
141{
142 if (pdc_cache_info(&cache_info) < 0)
143 panic("parisc_cache_init: pdc_cache_info failed");
144
145#if 0
146 printk("ic_size %lx dc_size %lx it_size %lx\n",
147 cache_info.ic_size,
148 cache_info.dc_size,
149 cache_info.it_size);
150
151 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 cache_info.dc_base,
153 cache_info.dc_stride,
154 cache_info.dc_count,
155 cache_info.dc_loop);
156
157 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
158 *(unsigned long *) (&cache_info.dc_conf),
159 cache_info.dc_conf.cc_alias,
160 cache_info.dc_conf.cc_block,
161 cache_info.dc_conf.cc_line,
162 cache_info.dc_conf.cc_shift);
e5a2e7fd 163 printk(" wt %d sh %d cst %d hv %d\n",
1da177e4
LT
164 cache_info.dc_conf.cc_wt,
165 cache_info.dc_conf.cc_sh,
166 cache_info.dc_conf.cc_cst,
e5a2e7fd 167 cache_info.dc_conf.cc_hv);
1da177e4
LT
168
169 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 cache_info.ic_base,
171 cache_info.ic_stride,
172 cache_info.ic_count,
173 cache_info.ic_loop);
174
175 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
176 *(unsigned long *) (&cache_info.ic_conf),
177 cache_info.ic_conf.cc_alias,
178 cache_info.ic_conf.cc_block,
179 cache_info.ic_conf.cc_line,
180 cache_info.ic_conf.cc_shift);
e5a2e7fd 181 printk(" wt %d sh %d cst %d hv %d\n",
1da177e4
LT
182 cache_info.ic_conf.cc_wt,
183 cache_info.ic_conf.cc_sh,
184 cache_info.ic_conf.cc_cst,
e5a2e7fd 185 cache_info.ic_conf.cc_hv);
1da177e4 186
a3bee03e 187 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
1da177e4
LT
188 cache_info.dt_conf.tc_sh,
189 cache_info.dt_conf.tc_page,
190 cache_info.dt_conf.tc_cst,
191 cache_info.dt_conf.tc_aid,
192 cache_info.dt_conf.tc_pad1);
193
a3bee03e 194 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
1da177e4
LT
195 cache_info.it_conf.tc_sh,
196 cache_info.it_conf.tc_page,
197 cache_info.it_conf.tc_cst,
198 cache_info.it_conf.tc_aid,
199 cache_info.it_conf.tc_pad1);
200#endif
201
202 split_tlb = 0;
203 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
204 if (cache_info.dt_conf.tc_sh == 2)
205 printk(KERN_WARNING "Unexpected TLB configuration. "
206 "Will flush I/D separately (could be optimized).\n");
207
208 split_tlb = 1;
209 }
210
211 /* "New and Improved" version from Jim Hull
212 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
2464212f
SB
213 * The following CAFL_STRIDE is an optimized version, see
214 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
215 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
1da177e4
LT
216 */
217#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
218 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
219 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
220#undef CAFL_STRIDE
221
222#ifndef CONFIG_PA20
223 if (pdc_btlb_info(&btlb_info) < 0) {
224 memset(&btlb_info, 0, sizeof btlb_info);
225 }
226#endif
227
228 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
229 PDC_MODEL_NVA_UNSUPPORTED) {
230 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
231#if 0
232 panic("SMP kernel required to avoid non-equivalent aliasing");
233#endif
234 }
235}
236
237void disable_sr_hashing(void)
238{
a9d2d386
KM
239 int srhash_type, retval;
240 unsigned long space_bits;
1da177e4
LT
241
242 switch (boot_cpu_data.cpu_type) {
243 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
244 BUG();
245 return;
246
247 case pcxs:
248 case pcxt:
249 case pcxt_:
250 srhash_type = SRHASH_PCXST;
251 break;
252
253 case pcxl:
254 srhash_type = SRHASH_PCXL;
255 break;
256
257 case pcxl2: /* pcxl2 doesn't support space register hashing */
258 return;
259
260 default: /* Currently all PA2.0 machines use the same ins. sequence */
261 srhash_type = SRHASH_PA20;
262 break;
263 }
264
265 disable_sr_hashing_asm(srhash_type);
a9d2d386
KM
266
267 retval = pdc_spaceid_bits(&space_bits);
268 /* If this procedure isn't implemented, don't panic. */
269 if (retval < 0 && retval != PDC_BAD_OPTION)
270 panic("pdc_spaceid_bits call failed.\n");
271 if (space_bits != 0)
272 panic("SpaceID hashing is still on!\n");
1da177e4
LT
273}
274
d6ce8626 275static inline void
f311847c
JB
276__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
277 unsigned long physaddr)
d6ce8626 278{
027f27c4 279 preempt_disable();
f311847c
JB
280 flush_dcache_page_asm(physaddr, vmaddr);
281 if (vma->vm_flags & VM_EXEC)
282 flush_icache_page_asm(physaddr, vmaddr);
027f27c4 283 preempt_enable();
d6ce8626
RC
284}
285
1da177e4
LT
286void flush_dcache_page(struct page *page)
287{
288 struct address_space *mapping = page_mapping(page);
289 struct vm_area_struct *mpnt;
1da177e4 290 unsigned long offset;
f311847c 291 unsigned long addr, old_addr = 0;
1da177e4 292 pgoff_t pgoff;
1da177e4
LT
293
294 if (mapping && !mapping_mapped(mapping)) {
295 set_bit(PG_dcache_dirty, &page->flags);
296 return;
297 }
298
ba575833 299 flush_kernel_dcache_page(page);
1da177e4
LT
300
301 if (!mapping)
302 return;
303
304 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
305
306 /* We have carefully arranged in arch_get_unmapped_area() that
307 * *any* mappings of a file are always congruently mapped (whether
308 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
309 * to flush one address here for them all to become coherent */
310
311 flush_dcache_mmap_lock(mapping);
6b2dbba8 312 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
1da177e4
LT
313 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
314 addr = mpnt->vm_start + offset;
315
b7d45818
JB
316 /* The TLB is the engine of coherence on parisc: The
317 * CPU is entitled to speculate any page with a TLB
318 * mapping, so here we kill the mapping then flush the
319 * page along a special flush only alias mapping.
320 * This guarantees that the page is no-longer in the
321 * cache for any process and nor may it be
322 * speculatively read in (until the user or kernel
323 * specifically accesses it, of course) */
324
325 flush_tlb_page(mpnt, addr);
f311847c
JB
326 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
327 __flush_cache_page(mpnt, addr, page_to_phys(page));
328 if (old_addr)
b7d45818 329 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
f311847c 330 old_addr = addr;
92dc6fcc 331 }
1da177e4
LT
332 }
333 flush_dcache_mmap_unlock(mapping);
334}
335EXPORT_SYMBOL(flush_dcache_page);
336
337/* Defined in arch/parisc/kernel/pacache.S */
338EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
ba575833 339EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
1da177e4
LT
340EXPORT_SYMBOL(flush_data_cache_local);
341EXPORT_SYMBOL(flush_kernel_icache_range_asm);
342
1da177e4 343#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
8039de10 344int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
1da177e4 345
d6ce8626 346void __init parisc_setup_cache_timing(void)
1da177e4
LT
347{
348 unsigned long rangetime, alltime;
1da177e4
LT
349 unsigned long size;
350
351 alltime = mfctl(16);
352 flush_data_cache();
353 alltime = mfctl(16) - alltime;
354
2464212f 355 size = (unsigned long)(_end - _text);
1da177e4 356 rangetime = mfctl(16);
2464212f 357 flush_kernel_dcache_range((unsigned long)_text, size);
1da177e4
LT
358 rangetime = mfctl(16) - rangetime;
359
360 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
361 alltime, size, rangetime);
362
363 /* Racy, but if we see an intermediate value, it's ok too... */
364 parisc_cache_flush_threshold = size * alltime / rangetime;
365
366 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
367 if (!parisc_cache_flush_threshold)
368 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
369
d6ce8626
RC
370 if (parisc_cache_flush_threshold > cache_info.dc_size)
371 parisc_cache_flush_threshold = cache_info.dc_size;
372
67a5a59d 373 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
1da177e4 374}
20f4d3cb 375
76334539
JDA
376extern void purge_kernel_dcache_page_asm(unsigned long);
377extern void clear_user_page_asm(void *, unsigned long);
378extern void copy_user_page_asm(void *, void *, unsigned long);
20f4d3cb
JB
379
380void flush_kernel_dcache_page_addr(void *addr)
381{
e82a3b75
HD
382 unsigned long flags;
383
20f4d3cb 384 flush_kernel_dcache_page_asm(addr);
e82a3b75 385 purge_tlb_start(flags);
20f4d3cb 386 pdtlb_kernel(addr);
e82a3b75 387 purge_tlb_end(flags);
20f4d3cb
JB
388}
389EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
390
76334539
JDA
391void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
392{
393 clear_page_asm(vto);
394 if (!parisc_requires_coherency())
395 flush_kernel_dcache_page_asm(vto);
396}
397EXPORT_SYMBOL(clear_user_page);
398
20f4d3cb 399void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
76334539 400 struct page *pg)
20f4d3cb 401{
76334539
JDA
402 /* Copy using kernel mapping. No coherency is needed
403 (all in kmap/kunmap) on machines that don't support
404 non-equivalent aliasing. However, the `from' page
405 needs to be flushed before it can be accessed through
406 the kernel mapping. */
407 preempt_disable();
408 flush_dcache_page_asm(__pa(vfrom), vaddr);
409 preempt_enable();
410 copy_page_asm(vto, vfrom);
20f4d3cb
JB
411 if (!parisc_requires_coherency())
412 flush_kernel_dcache_page_asm(vto);
413}
414EXPORT_SYMBOL(copy_user_page);
415
416#ifdef CONFIG_PA8X00
417
418void kunmap_parisc(void *addr)
419{
420 if (parisc_requires_coherency())
421 flush_kernel_dcache_page_addr(addr);
422}
423EXPORT_SYMBOL(kunmap_parisc);
424#endif
d6ce8626 425
7139bc15
JDA
426void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
427{
428 unsigned long flags;
429
430 /* Note: purge_tlb_entries can be called at startup with
431 no context. */
432
7139bc15 433 purge_tlb_start(flags);
bda079d3 434 mtsp(mm->context, 1);
7139bc15
JDA
435 pdtlb(addr);
436 pitlb(addr);
437 purge_tlb_end(flags);
7139bc15
JDA
438}
439EXPORT_SYMBOL(purge_tlb_entries);
440
d6ce8626
RC
441void __flush_tlb_range(unsigned long sid, unsigned long start,
442 unsigned long end)
443{
444 unsigned long npages;
445
446 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
447 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
448 flush_tlb_all();
449 else {
e82a3b75
HD
450 unsigned long flags;
451
e82a3b75 452 purge_tlb_start(flags);
e8d8fc21 453 mtsp(sid, 1);
d6ce8626
RC
454 if (split_tlb) {
455 while (npages--) {
456 pdtlb(start);
457 pitlb(start);
458 start += PAGE_SIZE;
459 }
460 } else {
461 while (npages--) {
462 pdtlb(start);
463 start += PAGE_SIZE;
464 }
465 }
e82a3b75 466 purge_tlb_end(flags);
d6ce8626
RC
467 }
468}
469
470static void cacheflush_h_tmp_function(void *dummy)
471{
472 flush_cache_all_local();
473}
474
475void flush_cache_all(void)
476{
15c8b6c1 477 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
d6ce8626
RC
478}
479
6d2439d9
JDA
480static inline unsigned long mm_total_size(struct mm_struct *mm)
481{
482 struct vm_area_struct *vma;
483 unsigned long usize = 0;
484
485 for (vma = mm->mmap; vma; vma = vma->vm_next)
486 usize += vma->vm_end - vma->vm_start;
487 return usize;
488}
489
490static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
491{
492 pte_t *ptep = NULL;
493
494 if (!pgd_none(*pgd)) {
495 pud_t *pud = pud_offset(pgd, addr);
496 if (!pud_none(*pud)) {
497 pmd_t *pmd = pmd_offset(pud, addr);
498 if (!pmd_none(*pmd))
499 ptep = pte_offset_map(pmd, addr);
500 }
501 }
502 return ptep;
503}
504
d6ce8626
RC
505void flush_cache_mm(struct mm_struct *mm)
506{
50861f5a
JDA
507 struct vm_area_struct *vma;
508 pgd_t *pgd;
509
6d2439d9
JDA
510 /* Flushing the whole cache on each cpu takes forever on
511 rp3440, etc. So, avoid it if the mm isn't too big. */
50861f5a
JDA
512 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
513 flush_cache_all();
514 return;
515 }
516
517 if (mm->context == mfsp(3)) {
518 for (vma = mm->mmap; vma; vma = vma->vm_next) {
519 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
520 if ((vma->vm_flags & VM_EXEC) == 0)
521 continue;
522 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
6d2439d9
JDA
523 }
524 return;
525 }
526
50861f5a
JDA
527 pgd = mm->pgd;
528 for (vma = mm->mmap; vma; vma = vma->vm_next) {
529 unsigned long addr;
530
531 for (addr = vma->vm_start; addr < vma->vm_end;
532 addr += PAGE_SIZE) {
533 unsigned long pfn;
534 pte_t *ptep = get_ptep(pgd, addr);
535 if (!ptep)
536 continue;
537 pfn = pte_pfn(*ptep);
538 if (!pfn_valid(pfn))
539 continue;
540 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
541 }
542 }
d6ce8626
RC
543}
544
545void
546flush_user_dcache_range(unsigned long start, unsigned long end)
547{
548 if ((end - start) < parisc_cache_flush_threshold)
549 flush_user_dcache_range_asm(start,end);
550 else
551 flush_data_cache();
552}
553
554void
555flush_user_icache_range(unsigned long start, unsigned long end)
556{
557 if ((end - start) < parisc_cache_flush_threshold)
558 flush_user_icache_range_asm(start,end);
559 else
560 flush_instruction_cache();
561}
562
d6ce8626
RC
563void flush_cache_range(struct vm_area_struct *vma,
564 unsigned long start, unsigned long end)
565{
50861f5a
JDA
566 unsigned long addr;
567 pgd_t *pgd;
568
8980a7ba 569 BUG_ON(!vma->vm_mm->context);
d6ce8626 570
50861f5a 571 if ((end - start) >= parisc_cache_flush_threshold) {
d6ce8626 572 flush_cache_all();
50861f5a
JDA
573 return;
574 }
575
576 if (vma->vm_mm->context == mfsp(3)) {
577 flush_user_dcache_range_asm(start, end);
578 if (vma->vm_flags & VM_EXEC)
579 flush_user_icache_range_asm(start, end);
580 return;
581 }
582
583 pgd = vma->vm_mm->pgd;
584 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
585 unsigned long pfn;
586 pte_t *ptep = get_ptep(pgd, addr);
587 if (!ptep)
588 continue;
589 pfn = pte_pfn(*ptep);
590 if (pfn_valid(pfn))
591 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
d6ce8626
RC
592 }
593}
594
595void
596flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
597{
598 BUG_ON(!vma->vm_mm->context);
599
50861f5a
JDA
600 if (pfn_valid(pfn)) {
601 flush_tlb_page(vma, vmaddr);
602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 }
d6ce8626 604}
cca8e902
JDA
605
606#ifdef CONFIG_PARISC_TMPALIAS
607
608void clear_user_highpage(struct page *page, unsigned long vaddr)
609{
610 void *vto;
611 unsigned long flags;
612
613 /* Clear using TMPALIAS region. The page doesn't need to
614 be flushed but the kernel mapping needs to be purged. */
615
ba969c44 616 vto = kmap_atomic(page);
cca8e902
JDA
617
618 /* The PA-RISC 2.0 Architecture book states on page F-6:
619 "Before a write-capable translation is enabled, *all*
620 non-equivalently-aliased translations must be removed
621 from the page table and purged from the TLB. (Note
622 that the caches are not required to be flushed at this
623 time.) Before any non-equivalent aliased translation
624 is re-enabled, the virtual address range for the writeable
625 page (the entire page) must be flushed from the cache,
626 and the write-capable translation removed from the page
627 table and purged from the TLB." */
628
629 purge_kernel_dcache_page_asm((unsigned long)vto);
630 purge_tlb_start(flags);
631 pdtlb_kernel(vto);
632 purge_tlb_end(flags);
633 preempt_disable();
634 clear_user_page_asm(vto, vaddr);
635 preempt_enable();
636
637 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
638}
639
640void copy_user_highpage(struct page *to, struct page *from,
641 unsigned long vaddr, struct vm_area_struct *vma)
642{
643 void *vfrom, *vto;
644 unsigned long flags;
645
646 /* Copy using TMPALIAS region. This has the advantage
647 that the `from' page doesn't need to be flushed. However,
648 the `to' page must be flushed in copy_user_page_asm since
649 it can be used to bring in executable code. */
650
ba969c44
ZH
651 vfrom = kmap_atomic(from);
652 vto = kmap_atomic(to);
cca8e902
JDA
653
654 purge_kernel_dcache_page_asm((unsigned long)vto);
655 purge_tlb_start(flags);
656 pdtlb_kernel(vto);
657 pdtlb_kernel(vfrom);
658 purge_tlb_end(flags);
659 preempt_disable();
660 copy_user_page_asm(vto, vfrom, vaddr);
661 flush_dcache_page_asm(__pa(vto), vaddr);
662 preempt_enable();
663
664 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
665 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
666}
667
668#endif /* CONFIG_PARISC_TMPALIAS */
This page took 0.588418 seconds and 5 git commands to generate.