Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __I386_SCHED_H |
2 | #define __I386_SCHED_H | |
3 | ||
1da177e4 LT |
4 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
5 | { | |
6 | #ifdef CONFIG_SMP | |
7 | unsigned cpu = smp_processor_id(); | |
8 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | |
9 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; | |
10 | #endif | |
11 | } | |
12 | ||
13 | static inline void switch_mm(struct mm_struct *prev, | |
14 | struct mm_struct *next, | |
15 | struct task_struct *tsk) | |
16 | { | |
17 | int cpu = smp_processor_id(); | |
18 | ||
19 | if (likely(prev != next)) { | |
20 | /* stop flush ipis for the previous mm */ | |
21 | cpu_clear(cpu, prev->cpu_vm_mask); | |
22 | #ifdef CONFIG_SMP | |
23 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | |
24 | per_cpu(cpu_tlbstate, cpu).active_mm = next; | |
25 | #endif | |
26 | cpu_set(cpu, next->cpu_vm_mask); | |
27 | ||
28 | /* Re-load page tables */ | |
29 | load_cr3(next->pgd); | |
30 | ||
31 | /* | |
32 | * load the LDT, if the LDT is different: | |
33 | */ | |
34 | if (unlikely(prev->context.ldt != next->context.ldt)) | |
e5e3a042 | 35 | load_LDT_nolock(&next->context); |
1da177e4 LT |
36 | } |
37 | #ifdef CONFIG_SMP | |
38 | else { | |
39 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | |
40 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | |
41 | ||
42 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | |
55464da9 | 43 | /* We were in lazy tlb mode and leave_mm disabled |
1da177e4 LT |
44 | * tlb flush IPI delivery. We must reload %cr3. |
45 | */ | |
46 | load_cr3(next->pgd); | |
e5e3a042 | 47 | load_LDT_nolock(&next->context); |
1da177e4 LT |
48 | } |
49 | } | |
50 | #endif | |
51 | } | |
52 | ||
f95d47ca | 53 | #define deactivate_mm(tsk, mm) \ |
464d1a78 | 54 | asm("movl %0,%%gs": :"r" (0)); |
1da177e4 | 55 | |
1da177e4 | 56 | #endif |