powerpc: rework 4xx PTE access and TLB miss
[deliverable/linux.git] / include / asm-powerpc / pgtable-ppc32.h
1 #ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2 #define _ASM_POWERPC_PGTABLE_PPC32_H
3
4 #include <asm-generic/pgtable-nopmd.h>
5
6 #ifndef __ASSEMBLY__
7 #include <linux/sched.h>
8 #include <linux/threads.h>
9 #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
10
11 extern unsigned long va_to_phys(unsigned long address);
12 extern pte_t *va_to_pte(unsigned long address);
13 extern unsigned long ioremap_bot, ioremap_base;
14
15 #ifdef CONFIG_44x
16 extern int icache_44x_need_flush;
17 #endif
18
19 #endif /* __ASSEMBLY__ */
20
21 /*
22 * The PowerPC MMU uses a hash table containing PTEs, together with
23 * a set of 16 segment registers (on 32-bit implementations), to define
24 * the virtual to physical address mapping.
25 *
26 * We use the hash table as an extended TLB, i.e. a cache of currently
27 * active mappings. We maintain a two-level page table tree, much
28 * like that used by the i386, for the sake of the Linux memory
29 * management code. Low-level assembler code in hashtable.S
30 * (procedure hash_page) is responsible for extracting ptes from the
31 * tree and putting them into the hash table when necessary, and
32 * updating the accessed and modified bits in the page table tree.
33 */
34
35 /*
36 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
37 * We also use the two level tables, but we can put the real bits in them
38 * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
39 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
40 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
41 * based upon user/super access. The TLB does not have accessed nor write
42 * protect. We assume that if the TLB get loaded with an entry it is
43 * accessed, and overload the changed bit for write protect. We use
44 * two bits in the software pte that are supposed to be set to zero in
45 * the TLB entry (24 and 25) for these indicators. Although the level 1
46 * descriptor contains the guarded and writethrough/copyback bits, we can
47 * set these at the page level since they get copied from the Mx_TWC
48 * register when the TLB entry is loaded. We will use bit 27 for guard, since
49 * that is where it exists in the MD_TWC, and bit 26 for writethrough.
50 * These will get masked from the level 2 descriptor at TLB load time, and
51 * copied to the MD_TWC before it gets loaded.
52 * Large page sizes added. We currently support two sizes, 4K and 8M.
53 * This also allows a TLB hander optimization because we can directly
54 * load the PMD into MD_TWC. The 8M pages are only used for kernel
55 * mapping of well known areas. The PMD (PGD) entries contain control
56 * flags in addition to the address, so care must be taken that the
57 * software no longer assumes these are only pointers.
58 */
59
60 /*
61 * At present, all PowerPC 400-class processors share a similar TLB
62 * architecture. The instruction and data sides share a unified,
63 * 64-entry, fully-associative TLB which is maintained totally under
64 * software control. In addition, the instruction side has a
65 * hardware-managed, 4-entry, fully-associative TLB which serves as a
66 * first level to the shared TLB. These two TLBs are known as the UTLB
67 * and ITLB, respectively (see "mmu.h" for definitions).
68 */
69
70 /*
71 * The normal case is that PTEs are 32-bits and we have a 1-page
72 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
73 *
74 * For any >32-bit physical address platform, we can use the following
75 * two level page table layout where the pgdir is 8KB and the MS 13 bits
76 * are an index to the second level table. The combined pgdir/pmd first
77 * level has 2048 entries and the second level has 512 64-bit PTE entries.
78 * -Matt
79 */
80 /* PGDIR_SHIFT determines what a top-level page table entry can map */
81 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
82 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
83 #define PGDIR_MASK (~(PGDIR_SIZE-1))
84
85 /*
86 * entries per page directory level: our page-table tree is two-level, so
87 * we don't really have any PMD directory.
88 */
89 #ifndef __ASSEMBLY__
90 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
91 #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
92 #endif /* __ASSEMBLY__ */
93
94 #define PTRS_PER_PTE (1 << PTE_SHIFT)
95 #define PTRS_PER_PMD 1
96 #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
97
98 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
99 #define FIRST_USER_ADDRESS 0
100
101 #define pte_ERROR(e) \
102 printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
103 (unsigned long long)pte_val(e))
104 #define pgd_ERROR(e) \
105 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
106
107 /*
108 * Just any arbitrary offset to the start of the vmalloc VM area: the
109 * current 64MB value just means that there will be a 64MB "hole" after the
110 * physical memory until the kernel virtual memory starts. That means that
111 * any out-of-bounds memory accesses will hopefully be caught.
112 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
113 * area for the same reason. ;)
114 *
115 * We no longer map larger than phys RAM with the BATs so we don't have
116 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
117 * about clashes between our early calls to ioremap() that start growing down
118 * from ioremap_base being run into the VM area allocations (growing upwards
119 * from VMALLOC_START). For this reason we have ioremap_bot to check when
120 * we actually run into our mappings setup in the early boot with the VM
121 * system. This really does become a problem for machines with good amounts
122 * of RAM. -- Cort
123 */
124 #define VMALLOC_OFFSET (0x1000000) /* 16M */
125 #ifdef PPC_PIN_SIZE
126 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
127 #else
128 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
129 #endif
130 #define VMALLOC_END ioremap_bot
131
132 /*
133 * Bits in a linux-style PTE. These match the bits in the
134 * (hardware-defined) PowerPC PTE as closely as possible.
135 */
136
137 #if defined(CONFIG_40x)
138
139 /* There are several potential gotchas here. The 40x hardware TLBLO
140 field looks like this:
141
142 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
143 RPN..................... 0 0 EX WR ZSEL....... W I M G
144
145 Where possible we make the Linux PTE bits match up with this
146
147 - bits 20 and 21 must be cleared, because we use 4k pages (40x can
148 support down to 1k pages), this is done in the TLBMiss exception
149 handler.
150 - We use only zones 0 (for kernel pages) and 1 (for user pages)
151 of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
152 miss handler. Bit 27 is PAGE_USER, thus selecting the correct
153 zone.
154 - PRESENT *must* be in the bottom two bits because swap cache
155 entries use the top 30 bits. Because 40x doesn't support SMP
156 anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
157 is cleared in the TLB miss handler before the TLB entry is loaded.
158 - All other bits of the PTE are loaded into TLBLO without
159 modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
160 software PTE bits. We actually use use bits 21, 24, 25, and
161 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
162 PRESENT.
163 */
164
165 /* Definitions for 40x embedded chips. */
166 #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
167 #define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
168 #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
169 #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
170 #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
171 #define _PAGE_USER 0x010 /* matches one of the zone permission bits */
172 #define _PAGE_RW 0x040 /* software: Writes permitted */
173 #define _PAGE_DIRTY 0x080 /* software: dirty page */
174 #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
175 #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
176 #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
177
178 #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
179 #define _PMD_BAD 0x802
180 #define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */
181 #define _PMD_SIZE_4M 0x0c0
182 #define _PMD_SIZE_16M 0x0e0
183 #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
184
185 /* Until my rework is finished, 40x still needs atomic PTE updates */
186 #define PTE_ATOMIC_UPDATES 1
187
188 #elif defined(CONFIG_44x)
189 /*
190 * Definitions for PPC440
191 *
192 * Because of the 3 word TLB entries to support 36-bit addressing,
193 * the attribute are difficult to map in such a fashion that they
194 * are easily loaded during exception processing. I decided to
195 * organize the entry so the ERPN is the only portion in the
196 * upper word of the PTE and the attribute bits below are packed
197 * in as sensibly as they can be in the area below a 4KB page size
198 * oriented RPN. This at least makes it easy to load the RPN and
199 * ERPN fields in the TLB. -Matt
200 *
201 * Note that these bits preclude future use of a page size
202 * less than 4KB.
203 *
204 *
205 * PPC 440 core has following TLB attribute fields;
206 *
207 * TLB1:
208 * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
209 * RPN................................. - - - - - - ERPN.......
210 *
211 * TLB2:
212 * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
213 * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
214 *
215 * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
216 * TLB2 storage attibute fields. Those are:
217 *
218 * TLB2:
219 * 0...10 11 12 13 14 15 16...31
220 * no change WL1 IL1I IL1D IL2I IL2D no change
221 *
222 * There are some constrains and options, to decide mapping software bits
223 * into TLB entry.
224 *
225 * - PRESENT *must* be in the bottom three bits because swap cache
226 * entries use the top 29 bits for TLB2.
227 *
228 * - FILE *must* be in the bottom three bits because swap cache
229 * entries use the top 29 bits for TLB2.
230 *
231 * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it
232 * doesn't support SMP. So we can use this as software bit, like
233 * DIRTY.
234 *
235 * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
236 * for memory protection related functions (see PTE structure in
237 * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the
238 * above bits. Note that the bit values are CPU specific, not architecture
239 * specific.
240 *
241 * The kernel PTE entry holds an arch-dependent swp_entry structure under
242 * certain situations. In other words, in such situations some portion of
243 * the PTE bits are used as a swp_entry. In the PPC implementation, the
244 * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
245 * hold protection values. That means the three protection bits are
246 * reserved for both PTE and SWAP entry at the most significant three
247 * LSBs.
248 *
249 * There are three protection bits available for SWAP entry:
250 * _PAGE_PRESENT
251 * _PAGE_FILE
252 * _PAGE_HASHPTE (if HW has)
253 *
254 * So those three bits have to be inside of 0-2nd LSB of PTE.
255 *
256 */
257
258 #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
259 #define _PAGE_RW 0x00000002 /* S: Write permission */
260 #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
261 #define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
262 #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
263 #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
264 #define _PAGE_USER 0x00000040 /* S: User page */
265 #define _PAGE_ENDIAN 0x00000080 /* H: E bit */
266 #define _PAGE_GUARDED 0x00000100 /* H: G bit */
267 #define _PAGE_COHERENT 0x00000200 /* H: M bit */
268 #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
269 #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
270
271 /* TODO: Add large page lowmem mapping support */
272 #define _PMD_PRESENT 0
273 #define _PMD_PRESENT_MASK (PAGE_MASK)
274 #define _PMD_BAD (~PAGE_MASK)
275
276 /* ERPN in a PTE never gets cleared, ignore it */
277 #define _PTE_NONE_MASK 0xffffffff00000000ULL
278
279
280 #elif defined(CONFIG_FSL_BOOKE)
281 /*
282 MMU Assist Register 3:
283
284 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
285 RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
286
287 - PRESENT *must* be in the bottom three bits because swap cache
288 entries use the top 29 bits.
289
290 - FILE *must* be in the bottom three bits because swap cache
291 entries use the top 29 bits.
292 */
293
294 /* Definitions for FSL Book-E Cores */
295 #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
296 #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
297 #define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
298 #define _PAGE_ACCESSED 0x00004 /* S: Page referenced */
299 #define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */
300 #define _PAGE_RW 0x00010 /* S: Write permission */
301 #define _PAGE_HWEXEC 0x00020 /* H: UX permission */
302
303 #define _PAGE_ENDIAN 0x00040 /* H: E bit */
304 #define _PAGE_GUARDED 0x00080 /* H: G bit */
305 #define _PAGE_COHERENT 0x00100 /* H: M bit */
306 #define _PAGE_NO_CACHE 0x00200 /* H: I bit */
307 #define _PAGE_WRITETHRU 0x00400 /* H: W bit */
308
309 #ifdef CONFIG_PTE_64BIT
310 #define _PAGE_DIRTY 0x08000 /* S: Page dirty */
311
312 /* ERPN in a PTE never gets cleared, ignore it */
313 #define _PTE_NONE_MASK 0xffffffffffff0000ULL
314 #else
315 #define _PAGE_DIRTY 0x00800 /* S: Page dirty */
316 #endif
317
318 #define _PMD_PRESENT 0
319 #define _PMD_PRESENT_MASK (PAGE_MASK)
320 #define _PMD_BAD (~PAGE_MASK)
321
322 /* Until my rework is finished, FSL BookE still needs atomic PTE updates */
323 #define PTE_ATOMIC_UPDATES 1
324
325 #elif defined(CONFIG_8xx)
326 /* Definitions for 8xx embedded chips. */
327 #define _PAGE_PRESENT 0x0001 /* Page is valid */
328 #define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
329 #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
330 #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
331
332 /* These five software bits must be masked out when the entry is loaded
333 * into the TLB.
334 */
335 #define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
336 #define _PAGE_GUARDED 0x0010 /* software: guarded access */
337 #define _PAGE_DIRTY 0x0020 /* software: page changed */
338 #define _PAGE_RW 0x0040 /* software: user write access allowed */
339 #define _PAGE_ACCESSED 0x0080 /* software: page referenced */
340
341 /* Setting any bits in the nibble with the follow two controls will
342 * require a TLB exception handler change. It is assumed unused bits
343 * are always zero.
344 */
345 #define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
346 #define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
347
348 #define _PMD_PRESENT 0x0001
349 #define _PMD_BAD 0x0ff0
350 #define _PMD_PAGE_MASK 0x000c
351 #define _PMD_PAGE_8M 0x000c
352
353 #define _PTE_NONE_MASK _PAGE_ACCESSED
354
355 /* Until my rework is finished, 8xx still needs atomic PTE updates */
356 #define PTE_ATOMIC_UPDATES 1
357
358 #else /* CONFIG_6xx */
359 /* Definitions for 60x, 740/750, etc. */
360 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
361 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
362 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
363 #define _PAGE_USER 0x004 /* usermode access allowed */
364 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
365 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
366 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
367 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
368 #define _PAGE_DIRTY 0x080 /* C: page changed */
369 #define _PAGE_ACCESSED 0x100 /* R: page referenced */
370 #define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
371 #define _PAGE_RW 0x400 /* software: user write access allowed */
372
373 #define _PTE_NONE_MASK _PAGE_HASHPTE
374
375 #define _PMD_PRESENT 0
376 #define _PMD_PRESENT_MASK (PAGE_MASK)
377 #define _PMD_BAD (~PAGE_MASK)
378
379 /* Hash table based platforms need atomic updates of the linux PTE */
380 #define PTE_ATOMIC_UPDATES 1
381
382 #endif
383
384 /*
385 * Some bits are only used on some cpu families...
386 */
387 #ifndef _PAGE_HASHPTE
388 #define _PAGE_HASHPTE 0
389 #endif
390 #ifndef _PTE_NONE_MASK
391 #define _PTE_NONE_MASK 0
392 #endif
393 #ifndef _PAGE_SHARED
394 #define _PAGE_SHARED 0
395 #endif
396 #ifndef _PAGE_HWWRITE
397 #define _PAGE_HWWRITE 0
398 #endif
399 #ifndef _PAGE_HWEXEC
400 #define _PAGE_HWEXEC 0
401 #endif
402 #ifndef _PAGE_EXEC
403 #define _PAGE_EXEC 0
404 #endif
405 #ifndef _PMD_PRESENT_MASK
406 #define _PMD_PRESENT_MASK _PMD_PRESENT
407 #endif
408 #ifndef _PMD_SIZE
409 #define _PMD_SIZE 0
410 #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
411 #endif
412
413 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
414
415 /*
416 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
417 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
418 * to have it in the Linux PTE, and in fact the bit could be reused for
419 * another purpose. -- paulus.
420 */
421
422 #ifdef CONFIG_44x
423 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
424 #else
425 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
426 #endif
427 #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
428 #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
429
430 #ifdef CONFIG_PPC_STD_MMU
431 /* On standard PPC MMU, no user access implies kernel read/write access,
432 * so to write-protect kernel memory we must turn on user access */
433 #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
434 #else
435 #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
436 #endif
437
438 #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
439 #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
440
441 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
442 defined(CONFIG_KPROBES)
443 /* We want the debuggers to be able to set breakpoints anywhere, so
444 * don't write protect the kernel text */
445 #define _PAGE_RAM_TEXT _PAGE_RAM
446 #else
447 #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)
448 #endif
449
450 #define PAGE_NONE __pgprot(_PAGE_BASE)
451 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
452 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
453 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
454 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
455 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
456 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
457
458 #define PAGE_KERNEL __pgprot(_PAGE_RAM)
459 #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
460
461 /*
462 * The PowerPC can only do execute protection on a segment (256MB) basis,
463 * not on a page basis. So we consider execute permission the same as read.
464 * Also, write permissions imply read permissions.
465 * This is the closest we can get..
466 */
467 #define __P000 PAGE_NONE
468 #define __P001 PAGE_READONLY_X
469 #define __P010 PAGE_COPY
470 #define __P011 PAGE_COPY_X
471 #define __P100 PAGE_READONLY
472 #define __P101 PAGE_READONLY_X
473 #define __P110 PAGE_COPY
474 #define __P111 PAGE_COPY_X
475
476 #define __S000 PAGE_NONE
477 #define __S001 PAGE_READONLY_X
478 #define __S010 PAGE_SHARED
479 #define __S011 PAGE_SHARED_X
480 #define __S100 PAGE_READONLY
481 #define __S101 PAGE_READONLY_X
482 #define __S110 PAGE_SHARED
483 #define __S111 PAGE_SHARED_X
484
485 #ifndef __ASSEMBLY__
486 /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
487 * kernel without large page PMD support */
488 extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
489
490 /*
491 * Conversions between PTE values and page frame numbers.
492 */
493
494 /* in some case we want to additionaly adjust where the pfn is in the pte to
495 * allow room for more flags */
496 #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
497 #define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8)
498 #else
499 #define PFN_SHIFT_OFFSET (PAGE_SHIFT)
500 #endif
501
502 #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
503 #define pte_page(x) pfn_to_page(pte_pfn(x))
504
505 #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
506 pgprot_val(prot))
507 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
508 #endif /* __ASSEMBLY__ */
509
510 #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
511 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
512 #define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
513
514 #define pmd_none(pmd) (!pmd_val(pmd))
515 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
516 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
517 #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
518
519 #ifndef __ASSEMBLY__
520 /*
521 * The following only work if pte_present() is true.
522 * Undefined behaviour if not..
523 */
524 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
525 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
526 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
527 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
528 static inline int pte_special(pte_t pte) { return 0; }
529
530 static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
531 static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
532
533 static inline pte_t pte_wrprotect(pte_t pte) {
534 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
535 static inline pte_t pte_mkclean(pte_t pte) {
536 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
537 static inline pte_t pte_mkold(pte_t pte) {
538 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
539
540 static inline pte_t pte_mkwrite(pte_t pte) {
541 pte_val(pte) |= _PAGE_RW; return pte; }
542 static inline pte_t pte_mkdirty(pte_t pte) {
543 pte_val(pte) |= _PAGE_DIRTY; return pte; }
544 static inline pte_t pte_mkyoung(pte_t pte) {
545 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
546 static inline pte_t pte_mkspecial(pte_t pte) {
547 return pte; }
548
549 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
550 {
551 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
552 return pte;
553 }
554
555 /*
556 * When flushing the tlb entry for a page, we also need to flush the hash
557 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
558 */
559 extern int flush_hash_pages(unsigned context, unsigned long va,
560 unsigned long pmdval, int count);
561
562 /* Add an HPTE to the hash table */
563 extern void add_hash_page(unsigned context, unsigned long va,
564 unsigned long pmdval);
565
566 /*
567 * Atomic PTE updates.
568 *
569 * pte_update clears and sets bit atomically, and returns
570 * the old pte value. In the 64-bit PTE case we lock around the
571 * low PTE word since we expect ALL flag bits to be there
572 */
573 #ifndef CONFIG_PTE_64BIT
574 static inline unsigned long pte_update(pte_t *p,
575 unsigned long clr,
576 unsigned long set)
577 {
578 #ifdef PTE_ATOMIC_UPDATES
579 unsigned long old, tmp;
580
581 __asm__ __volatile__("\
582 1: lwarx %0,0,%3\n\
583 andc %1,%0,%4\n\
584 or %1,%1,%5\n"
585 PPC405_ERR77(0,%3)
586 " stwcx. %1,0,%3\n\
587 bne- 1b"
588 : "=&r" (old), "=&r" (tmp), "=m" (*p)
589 : "r" (p), "r" (clr), "r" (set), "m" (*p)
590 : "cc" );
591 #else /* PTE_ATOMIC_UPDATES */
592 unsigned long old = pte_val(*p);
593 *p = __pte((old & ~clr) | set);
594 #endif /* !PTE_ATOMIC_UPDATES */
595
596 #ifdef CONFIG_44x
597 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
598 icache_44x_need_flush = 1;
599 #endif
600 return old;
601 }
602 #else /* CONFIG_PTE_64BIT */
603 /* TODO: Change that to only modify the low word and move set_pte_at()
604 * out of line
605 */
606 static inline unsigned long long pte_update(pte_t *p,
607 unsigned long clr,
608 unsigned long set)
609 {
610 #ifdef PTE_ATOMIC_UPDATES
611 unsigned long long old;
612 unsigned long tmp;
613
614 __asm__ __volatile__("\
615 1: lwarx %L0,0,%4\n\
616 lwzx %0,0,%3\n\
617 andc %1,%L0,%5\n\
618 or %1,%1,%6\n"
619 PPC405_ERR77(0,%3)
620 " stwcx. %1,0,%4\n\
621 bne- 1b"
622 : "=&r" (old), "=&r" (tmp), "=m" (*p)
623 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
624 : "cc" );
625 #else /* PTE_ATOMIC_UPDATES */
626 unsigned long long old = pte_val(*p);
627 *p = __pte((old & ~clr) | set);
628 #endif /* !PTE_ATOMIC_UPDATES */
629
630 #ifdef CONFIG_44x
631 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
632 icache_44x_need_flush = 1;
633 #endif
634 return old;
635 }
636 #endif /* CONFIG_PTE_64BIT */
637
638 /*
639 * set_pte stores a linux PTE into the linux page table.
640 * On machines which use an MMU hash table we avoid changing the
641 * _PAGE_HASHPTE bit.
642 */
643 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
644 pte_t *ptep, pte_t pte)
645 {
646 #if _PAGE_HASHPTE != 0
647 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
648 #else
649 *ptep = pte;
650 #endif
651 }
652
653 /*
654 * 2.6 calls this without flushing the TLB entry; this is wrong
655 * for our hash-based implementation, we fix that up here.
656 */
657 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
658 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
659 {
660 unsigned long old;
661 old = pte_update(ptep, _PAGE_ACCESSED, 0);
662 #if _PAGE_HASHPTE != 0
663 if (old & _PAGE_HASHPTE) {
664 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
665 flush_hash_pages(context, addr, ptephys, 1);
666 }
667 #endif
668 return (old & _PAGE_ACCESSED) != 0;
669 }
670 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
671 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
672
673 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
674 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
675 pte_t *ptep)
676 {
677 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
678 }
679
680 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
681 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
682 pte_t *ptep)
683 {
684 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
685 }
686 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
687 unsigned long addr, pte_t *ptep)
688 {
689 ptep_set_wrprotect(mm, addr, ptep);
690 }
691
692
693 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
694 static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
695 {
696 unsigned long bits = pte_val(entry) &
697 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
698 pte_update(ptep, 0, bits);
699 }
700
701 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
702 ({ \
703 int __changed = !pte_same(*(__ptep), __entry); \
704 if (__changed) { \
705 __ptep_set_access_flags(__ptep, __entry, __dirty); \
706 flush_tlb_page_nohash(__vma, __address); \
707 } \
708 __changed; \
709 })
710
711 /*
712 * Macro to mark a page protection value as "uncacheable".
713 */
714 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
715
716 struct file;
717 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
718 unsigned long size, pgprot_t vma_prot);
719 #define __HAVE_PHYS_MEM_ACCESS_PROT
720
721 #define __HAVE_ARCH_PTE_SAME
722 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
723
724 /*
725 * Note that on Book E processors, the pmd contains the kernel virtual
726 * (lowmem) address of the pte page. The physical address is less useful
727 * because everything runs with translation enabled (even the TLB miss
728 * handler). On everything else the pmd contains the physical address
729 * of the pte page. -- paulus
730 */
731 #ifndef CONFIG_BOOKE
732 #define pmd_page_vaddr(pmd) \
733 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
734 #define pmd_page(pmd) \
735 (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
736 #else
737 #define pmd_page_vaddr(pmd) \
738 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
739 #define pmd_page(pmd) \
740 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
741 #endif
742
743 /* to find an entry in a kernel page-table-directory */
744 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
745
746 /* to find an entry in a page-table-directory */
747 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
748 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
749
750 /* Find an entry in the third-level page table.. */
751 #define pte_index(address) \
752 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
753 #define pte_offset_kernel(dir, addr) \
754 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
755 #define pte_offset_map(dir, addr) \
756 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
757 #define pte_offset_map_nested(dir, addr) \
758 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
759
760 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
761 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
762
763 /*
764 * Encode and decode a swap entry.
765 * Note that the bits we use in a PTE for representing a swap entry
766 * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
767 *_PAGE_HASHPTE bit (if used). -- paulus
768 */
769 #define __swp_type(entry) ((entry).val & 0x1f)
770 #define __swp_offset(entry) ((entry).val >> 5)
771 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
772 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
773 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
774
775 /* Encode and decode a nonlinear file mapping entry */
776 #define PTE_FILE_MAX_BITS 29
777 #define pte_to_pgoff(pte) (pte_val(pte) >> 3)
778 #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
779
780 /*
781 * No page table caches to initialise
782 */
783 #define pgtable_cache_init() do { } while (0)
784
785 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
786 pmd_t **pmdp);
787
788 #endif /* !__ASSEMBLY__ */
789
790 #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
This page took 0.06216 seconds and 5 git commands to generate.