Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/mm/pg-sh4.c | |
3 | * | |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | |
8cf1a743 | 5 | * Copyright (C) 2002 - 2007 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | |
8 | */ | |
1da177e4 | 9 | #include <linux/mm.h> |
52e27782 | 10 | #include <linux/mutex.h> |
e06c4e57 | 11 | #include <linux/fs.h> |
7747b9a4 PM |
12 | #include <linux/highmem.h> |
13 | #include <linux/module.h> | |
1da177e4 LT |
14 | #include <asm/mmu_context.h> |
15 | #include <asm/cacheflush.h> | |
16 | ||
11c19656 | 17 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) |
8b395265 | 18 | |
8cf1a743 PM |
19 | static inline void *kmap_coherent(struct page *page, unsigned long addr) |
20 | { | |
21 | enum fixed_addresses idx; | |
22 | unsigned long vaddr, flags; | |
23 | pte_t pte; | |
24 | ||
25 | inc_preempt_count(); | |
26 | ||
27 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | |
28 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | |
29 | pte = mk_pte(page, PAGE_KERNEL); | |
30 | ||
31 | local_irq_save(flags); | |
32 | flush_tlb_one(get_asid(), vaddr); | |
33 | local_irq_restore(flags); | |
34 | ||
35 | update_mmu_cache(NULL, vaddr, pte); | |
36 | ||
37 | return (void *)vaddr; | |
38 | } | |
39 | ||
40 | static inline void kunmap_coherent(struct page *page) | |
41 | { | |
42 | dec_preempt_count(); | |
43 | preempt_check_resched(); | |
44 | } | |
45 | ||
1da177e4 LT |
46 | /* |
47 | * clear_user_page | |
48 | * @to: P1 address | |
49 | * @address: U0 address to be mapped | |
50 | * @page: page (virt_to_page(to)) | |
51 | */ | |
52 | void clear_user_page(void *to, unsigned long address, struct page *page) | |
53 | { | |
39e688a9 | 54 | __set_bit(PG_mapped, &page->flags); |
ba1789ef PM |
55 | |
56 | clear_page(to); | |
57 | if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) | |
58 | __flush_wback_region(to, PAGE_SIZE); | |
1da177e4 LT |
59 | } |
60 | ||
ba1789ef PM |
61 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
62 | unsigned long vaddr, void *dst, const void *src, | |
63 | unsigned long len) | |
1da177e4 | 64 | { |
ba1789ef PM |
65 | void *vto; |
66 | ||
39e688a9 | 67 | __set_bit(PG_mapped, &page->flags); |
ba1789ef PM |
68 | |
69 | vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | |
70 | memcpy(vto, src, len); | |
71 | kunmap_coherent(vto); | |
72 | ||
73 | if (vma->vm_flags & VM_EXEC) | |
74 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | |
75 | } | |
76 | ||
77 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |
78 | unsigned long vaddr, void *dst, const void *src, | |
79 | unsigned long len) | |
80 | { | |
81 | void *vfrom; | |
82 | ||
83 | __set_bit(PG_mapped, &page->flags); | |
84 | ||
85 | vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | |
86 | memcpy(dst, vfrom, len); | |
87 | kunmap_coherent(vfrom); | |
1da177e4 | 88 | } |
39e688a9 | 89 | |
7747b9a4 PM |
90 | void copy_user_highpage(struct page *to, struct page *from, |
91 | unsigned long vaddr, struct vm_area_struct *vma) | |
92 | { | |
93 | void *vfrom, *vto; | |
94 | ||
95 | __set_bit(PG_mapped, &to->flags); | |
96 | ||
97 | vto = kmap_atomic(to, KM_USER1); | |
98 | vfrom = kmap_coherent(from, vaddr); | |
99 | copy_page(vto, vfrom); | |
100 | kunmap_coherent(vfrom); | |
101 | ||
102 | if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) | |
103 | __flush_wback_region(vto, PAGE_SIZE); | |
104 | ||
105 | kunmap_atomic(vto, KM_USER1); | |
106 | /* Make sure this page is cleared on other CPU's too before using it */ | |
107 | smp_wmb(); | |
108 | } | |
109 | EXPORT_SYMBOL(copy_user_highpage); | |
110 | ||
39e688a9 PM |
111 | /* |
112 | * For SH-4, we have our own implementation for ptep_get_and_clear | |
113 | */ | |
73382f71 | 114 | pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
39e688a9 PM |
115 | { |
116 | pte_t pte = *ptep; | |
117 | ||
118 | pte_clear(mm, addr, ptep); | |
119 | if (!pte_not_present(pte)) { | |
120 | unsigned long pfn = pte_pfn(pte); | |
121 | if (pfn_valid(pfn)) { | |
122 | struct page *page = pfn_to_page(pfn); | |
123 | struct address_space *mapping = page_mapping(page); | |
124 | if (!mapping || !mapping_writably_mapped(mapping)) | |
125 | __clear_bit(PG_mapped, &page->flags); | |
126 | } | |
127 | } | |
128 | return pte; | |
129 | } |