Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __X86_64_MMU_CONTEXT_H |
2 | #define __X86_64_MMU_CONTEXT_H | |
3 | ||
1da177e4 | 4 | #include <asm/pda.h> |
1da177e4 | 5 | |
1da177e4 LT |
6 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
7 | { | |
e4b5939a | 8 | #ifdef CONFIG_SMP |
c4fe760e | 9 | if (read_pda(mmu_state) == TLBSTATE_OK) |
1da177e4 | 10 | write_pda(mmu_state, TLBSTATE_LAZY); |
1da177e4 | 11 | #endif |
e4b5939a | 12 | } |
1da177e4 | 13 | |
c4fe760e | 14 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
1da177e4 LT |
15 | struct task_struct *tsk) |
16 | { | |
17 | unsigned cpu = smp_processor_id(); | |
18 | if (likely(prev != next)) { | |
19 | /* stop flush ipis for the previous mm */ | |
3d1712c9 | 20 | cpu_clear(cpu, prev->cpu_vm_mask); |
1da177e4 LT |
21 | #ifdef CONFIG_SMP |
22 | write_pda(mmu_state, TLBSTATE_OK); | |
23 | write_pda(active_mm, next); | |
24 | #endif | |
3d1712c9 | 25 | cpu_set(cpu, next->cpu_vm_mask); |
1da177e4 LT |
26 | load_cr3(next->pgd); |
27 | ||
c4fe760e | 28 | if (unlikely(next->context.ldt != prev->context.ldt)) |
881c2975 | 29 | load_LDT_nolock(&next->context); |
1da177e4 LT |
30 | } |
31 | #ifdef CONFIG_SMP | |
32 | else { | |
33 | write_pda(mmu_state, TLBSTATE_OK); | |
34 | if (read_pda(active_mm) != next) | |
3abf024d | 35 | BUG(); |
3d1712c9 | 36 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
c4fe760e | 37 | /* We were in lazy tlb mode and leave_mm disabled |
1da177e4 LT |
38 | * tlb flush IPI delivery. We must reload CR3 |
39 | * to make sure to use no freed page tables. | |
40 | */ | |
41 | load_cr3(next->pgd); | |
881c2975 | 42 | load_LDT_nolock(&next->context); |
1da177e4 LT |
43 | } |
44 | } | |
45 | #endif | |
46 | } | |
47 | ||
c4fe760e JP |
48 | #define deactivate_mm(tsk, mm) \ |
49 | do { \ | |
50 | load_gs_index(0); \ | |
51 | asm volatile("movl %0,%%fs"::"r"(0)); \ | |
52 | } while (0) | |
1da177e4 | 53 | |
1da177e4 | 54 | #endif |