ARC: [SMP] TLB flush
authorVineet Gupta <vgupta@synopsys.com>
Sun, 27 Oct 2013 09:19:02 +0000 (14:49 +0530)
committerVineet Gupta <vgupta@synopsys.com>
Wed, 6 Nov 2013 05:11:45 +0000 (10:41 +0530)
- Add mm_cpumask setting (aggregating only, unlike some other arches)
  used to restrict the TLB flush cross-calling

- cross-calling versions of TLB flush routines (thanks to Noam)

Signed-off-by: Noam Camus <noamc@ezchip.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
arch/arc/include/asm/mmu_context.h
arch/arc/include/asm/tlbflush.h
arch/arc/kernel/smp.c
arch/arc/mm/tlb.c

index 45f06f566b027bd29adcb920573a23dea42cf56f..1fd467ef658fe861b34f36247674d8f3e167acad 100644 (file)
@@ -80,7 +80,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
        /* move to new ASID and handle rollover */
        if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
 
-               flush_tlb_all();
+               local_flush_tlb_all();
 
                /*
                 * Above checke for rollover of 8 bit ASID in 32 bit container.
@@ -131,6 +131,21 @@ static inline void destroy_context(struct mm_struct *mm)
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
 {
+       const int cpu = smp_processor_id();
+
+       /*
+        * Note that the mm_cpumask is "aggregating" only, we don't clear it
+        * for the switched-out task, unlike some other arches.
+        * It is used to enlist cpus for sending TLB flush IPIs and not sending
+        * it to CPUs where a task once ran-on, could cause stale TLB entry
+        * re-use, specially for a multi-threaded task.
+        * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
+        *      For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
+        *      were to re-migrate to C1, it could access the unmapped region
+        *      via any existing stale TLB entries.
+        */
+       cpumask_set_cpu(cpu, mm_cpumask(next));
+
 #ifndef CONFIG_SMP
        /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
        write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
index b2f9bc7f68c8cb13ba73a368a321ffc2ece31c05..71c7b2e4b8745002083e71fd19ae28305d62972e 100644 (file)
@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 void local_flush_tlb_range(struct vm_area_struct *vma,
                           unsigned long start, unsigned long end);
 
-/* XXX: Revisit for SMP */
+#ifndef CONFIG_SMP
 #define flush_tlb_range(vma, s, e)     local_flush_tlb_range(vma, s, e)
 #define flush_tlb_page(vma, page)      local_flush_tlb_page(vma, page)
 #define flush_tlb_kernel_range(s, e)   local_flush_tlb_kernel_range(s, e)
 #define flush_tlb_all()                        local_flush_tlb_all()
 #define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
-
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                                                        unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+#endif /* CONFIG_SMP */
 #endif
index 7f0ab1ecd6408a23c637fd0f62f90e1e5899f7e7..41bc4c703f424b4442896a634e1cb7692f2b5bdc 100644 (file)
@@ -128,6 +128,7 @@ void start_kernel_secondary(void)
        atomic_inc(&mm->mm_users);
        atomic_inc(&mm->mm_count);
        current->active_mm = mm;
+       cpumask_set_cpu(cpu, mm_cpumask(mm));
 
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
index db0f0f82398081c7b9052da1c8229db63ad9f935..e1acf0ce56479d63be92629ad7b7c19c2ddd8950 100644 (file)
@@ -363,6 +363,79 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_SMP
+
+struct tlb_args {
+       struct vm_area_struct *ta_vma;
+       unsigned long ta_start;
+       unsigned long ta_end;
+};
+
+static inline void ipi_flush_tlb_page(void *arg)
+{
+       struct tlb_args *ta = arg;
+
+       local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_range(void *arg)
+{
+       struct tlb_args *ta = arg;
+
+       local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+
+static inline void ipi_flush_tlb_kernel_range(void *arg)
+{
+       struct tlb_args *ta = (struct tlb_args *)arg;
+
+       local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
+}
+
+void flush_tlb_all(void)
+{
+       on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+       on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
+                        mm, 1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+       struct tlb_args ta = {
+               .ta_vma = vma,
+               .ta_start = uaddr
+       };
+
+       on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                    unsigned long end)
+{
+       struct tlb_args ta = {
+               .ta_vma = vma,
+               .ta_start = start,
+               .ta_end = end
+       };
+
+       on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       struct tlb_args ta = {
+               .ta_start = start,
+               .ta_end = end
+       };
+
+       on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
+}
+#endif
+
 /*
  * Routine to create a TLB entry
  */
This page took 0.032551 seconds and 5 git commands to generate.