Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $ |
2 | * generic.c: Generic Sparc mm routines that are not dependent upon | |
3 | * MMU type but are Sparc specific. | |
4 | * | |
5 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | |
6 | */ | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/swap.h> | |
11 | #include <linux/pagemap.h> | |
12 | ||
13 | #include <asm/pgalloc.h> | |
14 | #include <asm/pgtable.h> | |
15 | #include <asm/page.h> | |
16 | #include <asm/cacheflush.h> | |
17 | #include <asm/tlbflush.h> | |
18 | ||
1da177e4 LT |
19 | /* Remap IO memory, the same way as remap_pfn_range(), but use |
20 | * the obio memory space. | |
21 | * | |
22 | * They use a pgprot that sets PAGE_IO and does not check the | |
23 | * mem_map table as this is independent of normal memory. | |
24 | */ | |
25 | static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size, | |
26 | unsigned long offset, pgprot_t prot, int space) | |
27 | { | |
28 | unsigned long end; | |
29 | ||
30 | address &= ~PMD_MASK; | |
31 | end = address + size; | |
32 | if (end > PMD_SIZE) | |
33 | end = PMD_SIZE; | |
34 | do { | |
35 | pte_t oldpage = *pte; | |
36 | pte_clear(mm, address, pte); | |
37 | set_pte(pte, mk_pte_io(offset, prot, space)); | |
1da177e4 LT |
38 | address += PAGE_SIZE; |
39 | offset += PAGE_SIZE; | |
40 | pte++; | |
41 | } while (address < end); | |
42 | } | |
43 | ||
44 | static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, | |
45 | unsigned long offset, pgprot_t prot, int space) | |
46 | { | |
47 | unsigned long end; | |
48 | ||
49 | address &= ~PGDIR_MASK; | |
50 | end = address + size; | |
51 | if (end > PGDIR_SIZE) | |
52 | end = PGDIR_SIZE; | |
53 | offset -= address; | |
54 | do { | |
55 | pte_t * pte = pte_alloc_map(mm, pmd, address); | |
56 | if (!pte) | |
57 | return -ENOMEM; | |
58 | io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); | |
59 | address = (address + PMD_SIZE) & PMD_MASK; | |
60 | pmd++; | |
61 | } while (address < end); | |
62 | return 0; | |
63 | } | |
64 | ||
1da177e4 LT |
65 | int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, |
66 | unsigned long pfn, unsigned long size, pgprot_t prot) | |
67 | { | |
68 | int error = 0; | |
69 | pgd_t * dir; | |
70 | unsigned long beg = from; | |
71 | unsigned long end = from + size; | |
72 | struct mm_struct *mm = vma->vm_mm; | |
73 | int space = GET_IOSPACE(pfn); | |
74 | unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; | |
75 | ||
76 | prot = __pgprot(pg_iobits); | |
77 | offset -= from; | |
78 | dir = pgd_offset(mm, from); | |
79 | flush_cache_range(vma, beg, end); | |
80 | ||
81 | spin_lock(&mm->page_table_lock); | |
82 | while (from < end) { | |
83 | pmd_t *pmd = pmd_alloc(current->mm, dir, from); | |
84 | error = -ENOMEM; | |
85 | if (!pmd) | |
86 | break; | |
87 | error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space); | |
88 | if (error) | |
89 | break; | |
90 | from = (from + PGDIR_SIZE) & PGDIR_MASK; | |
91 | dir++; | |
92 | } | |
93 | spin_unlock(&mm->page_table_lock); | |
94 | ||
95 | flush_tlb_range(vma, beg, end); | |
96 | return error; | |
97 | } |