[PATCH] ppc64: Store virtual address in TLB flush batches
[deliverable/linux.git] / include / asm-ppc64 / tlbflush.h
1 #ifndef _PPC64_TLBFLUSH_H
2 #define _PPC64_TLBFLUSH_H
3
4 /*
5 * TLB flushing:
6 *
7 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
8 * - flush_tlb_page(vma, vmaddr) flushes one page
9 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
10 * - flush_tlb_range(vma, start, end) flushes a range of pages
11 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
12 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
13 */
14
15 #include <linux/percpu.h>
16 #include <asm/page.h>
17
18 #define PPC64_TLB_BATCH_NR 192
19
20 struct mm_struct;
21 struct ppc64_tlb_batch {
22 unsigned long index;
23 struct mm_struct *mm;
24 pte_t pte[PPC64_TLB_BATCH_NR];
25 unsigned long vaddr[PPC64_TLB_BATCH_NR];
26 };
27 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
28
29 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
30
31 static inline void flush_tlb_pending(void)
32 {
33 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
34
35 if (batch->index)
36 __flush_tlb_pending(batch);
37 put_cpu_var(ppc64_tlb_batch);
38 }
39
40 #define flush_tlb_mm(mm) flush_tlb_pending()
41 #define flush_tlb_page(vma, addr) flush_tlb_pending()
42 #define flush_tlb_page_nohash(vma, addr) do { } while (0)
43 #define flush_tlb_range(vma, start, end) \
44 do { (void)(start); flush_tlb_pending(); } while (0)
45 #define flush_tlb_kernel_range(start, end) flush_tlb_pending()
46 #define flush_tlb_pgtables(mm, start, end) do { } while (0)
47
48 extern void flush_hash_page(unsigned long va, pte_t pte, int local);
49 void flush_hash_range(unsigned long number, int local);
50
51 #endif /* _PPC64_TLBFLUSH_H */
This page took 0.032846 seconds and 5 git commands to generate.