Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/mmu_context.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Changelog: | |
11 | * 27-06-1996 RMK Created | |
12 | */ | |
13 | #ifndef __ASM_ARM_MMU_CONTEXT_H | |
14 | #define __ASM_ARM_MMU_CONTEXT_H | |
15 | ||
8dc39b88 | 16 | #include <linux/compiler.h> |
4fe15ba0 | 17 | #include <asm/cacheflush.h> |
1da177e4 LT |
18 | #include <asm/proc-fns.h> |
19 | ||
20 | #if __LINUX_ARM_ARCH__ >= 6 | |
21 | ||
22 | /* | |
23 | * On ARMv6, we have the following structure in the Context ID: | |
24 | * | |
25 | * 31 7 0 | |
26 | * +-------------------------+-----------+ | |
27 | * | process ID | ASID | | |
28 | * +-------------------------+-----------+ | |
29 | * | context ID | | |
30 | * +-------------------------------------+ | |
31 | * | |
32 | * The ASID is used to tag entries in the CPU caches and TLBs. | |
33 | * The context ID is used by debuggers and trace logic, and | |
34 | * should be unique within all running processes. | |
35 | */ | |
36 | #define ASID_BITS 8 | |
37 | #define ASID_MASK ((~0) << ASID_BITS) | |
38 | ||
39 | extern unsigned int cpu_last_asid; | |
40 | ||
41 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
42 | void __new_context(struct mm_struct *mm); | |
43 | ||
44 | static inline void check_context(struct mm_struct *mm) | |
45 | { | |
46 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | |
47 | __new_context(mm); | |
48 | } | |
49 | ||
50 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | |
51 | ||
52 | #else | |
53 | ||
54 | #define check_context(mm) do { } while (0) | |
55 | #define init_new_context(tsk,mm) 0 | |
56 | ||
57 | #endif | |
58 | ||
59 | #define destroy_context(mm) do { } while(0) | |
60 | ||
61 | /* | |
62 | * This is called when "tsk" is about to enter lazy TLB mode. | |
63 | * | |
64 | * mm: describes the currently active mm context | |
65 | * tsk: task which is entering lazy tlb | |
66 | * cpu: cpu number which is entering lazy tlb | |
67 | * | |
68 | * tsk->mm will be NULL | |
69 | */ | |
70 | static inline void | |
71 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
72 | { | |
73 | } | |
74 | ||
75 | /* | |
76 | * This is the actual mm switch as far as the scheduler | |
77 | * is concerned. No registers are touched. We avoid | |
78 | * calling the CPU specific function when the mm hasn't | |
79 | * actually changed. | |
80 | */ | |
81 | static inline void | |
82 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
83 | struct task_struct *tsk) | |
84 | { | |
85 | unsigned int cpu = smp_processor_id(); | |
86 | ||
87 | if (prev != next) { | |
88 | cpu_set(cpu, next->cpu_vm_mask); | |
89 | check_context(next); | |
90 | cpu_switch_mm(next->pgd, next); | |
7e5e6e9a RK |
91 | if (cache_is_vivt()) |
92 | cpu_clear(cpu, prev->cpu_vm_mask); | |
1da177e4 LT |
93 | } |
94 | } | |
95 | ||
96 | #define deactivate_mm(tsk,mm) do { } while (0) | |
97 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | |
98 | ||
99 | #endif |