Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/smp.c | |
3 | * | |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
c97d4869 | 10 | #include <linux/module.h> |
1da177e4 LT |
11 | #include <linux/delay.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/cache.h> | |
17 | #include <linux/profile.h> | |
18 | #include <linux/errno.h> | |
19 | #include <linux/mm.h> | |
4e950f6f | 20 | #include <linux/err.h> |
1da177e4 LT |
21 | #include <linux/cpu.h> |
22 | #include <linux/smp.h> | |
23 | #include <linux/seq_file.h> | |
c97d4869 | 24 | #include <linux/irq.h> |
1da177e4 LT |
25 | |
26 | #include <asm/atomic.h> | |
27 | #include <asm/cacheflush.h> | |
28 | #include <asm/cpu.h> | |
e65f38ed RK |
29 | #include <asm/mmu_context.h> |
30 | #include <asm/pgtable.h> | |
31 | #include <asm/pgalloc.h> | |
1da177e4 LT |
32 | #include <asm/processor.h> |
33 | #include <asm/tlbflush.h> | |
34 | #include <asm/ptrace.h> | |
35 | ||
36 | /* | |
37 | * bitmask of present and online CPUs. | |
38 | * The present bitmask indicates that the CPU is physically present. | |
39 | * The online bitmask indicates that the CPU is up and running. | |
40 | */ | |
d12734d1 | 41 | cpumask_t cpu_possible_map; |
e16b38f7 | 42 | EXPORT_SYMBOL(cpu_possible_map); |
1da177e4 | 43 | cpumask_t cpu_online_map; |
e16b38f7 | 44 | EXPORT_SYMBOL(cpu_online_map); |
1da177e4 | 45 | |
e65f38ed RK |
46 | /* |
47 | * as from 2.5, kernels no longer have an init_tasks structure | |
48 | * so we need some other way of telling a new secondary core | |
49 | * where to place its SVC stack | |
50 | */ | |
51 | struct secondary_data secondary_data; | |
52 | ||
1da177e4 LT |
53 | /* |
54 | * structures for inter-processor calls | |
55 | * - A collection of single bit ipi messages. | |
56 | */ | |
57 | struct ipi_data { | |
58 | spinlock_t lock; | |
59 | unsigned long ipi_count; | |
60 | unsigned long bits; | |
61 | }; | |
62 | ||
63 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | |
64 | .lock = SPIN_LOCK_UNLOCKED, | |
65 | }; | |
66 | ||
67 | enum ipi_msg_type { | |
68 | IPI_TIMER, | |
69 | IPI_RESCHEDULE, | |
70 | IPI_CALL_FUNC, | |
f6dd9fa5 | 71 | IPI_CALL_FUNC_SINGLE, |
1da177e4 LT |
72 | IPI_CPU_STOP, |
73 | }; | |
74 | ||
bd6f68af | 75 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 | 76 | { |
71f512e8 RK |
77 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
78 | struct task_struct *idle = ci->idle; | |
e65f38ed RK |
79 | pgd_t *pgd; |
80 | pmd_t *pmd; | |
1da177e4 LT |
81 | int ret; |
82 | ||
83 | /* | |
71f512e8 RK |
84 | * Spawn a new process manually, if not already done. |
85 | * Grab a pointer to its task struct so we can mess with it | |
1da177e4 | 86 | */ |
71f512e8 RK |
87 | if (!idle) { |
88 | idle = fork_idle(cpu); | |
89 | if (IS_ERR(idle)) { | |
90 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | |
91 | return PTR_ERR(idle); | |
92 | } | |
93 | ci->idle = idle; | |
1da177e4 LT |
94 | } |
95 | ||
e65f38ed RK |
96 | /* |
97 | * Allocate initial page tables to allow the new CPU to | |
98 | * enable the MMU safely. This essentially means a set | |
99 | * of our "standard" page tables, with the addition of | |
100 | * a 1:1 mapping for the physical address of the kernel. | |
101 | */ | |
102 | pgd = pgd_alloc(&init_mm); | |
058ddee5 | 103 | pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); |
e65f38ed RK |
104 | *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | |
105 | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); | |
106 | ||
107 | /* | |
108 | * We need to tell the secondary core where to find | |
109 | * its stack and the page tables. | |
110 | */ | |
32d39a93 | 111 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
e65f38ed RK |
112 | secondary_data.pgdir = virt_to_phys(pgd); |
113 | wmb(); | |
114 | ||
1da177e4 LT |
115 | /* |
116 | * Now bring the CPU into our world. | |
117 | */ | |
118 | ret = boot_secondary(cpu, idle); | |
e65f38ed RK |
119 | if (ret == 0) { |
120 | unsigned long timeout; | |
121 | ||
122 | /* | |
123 | * CPU was successfully started, wait for it | |
124 | * to come online or time out. | |
125 | */ | |
126 | timeout = jiffies + HZ; | |
127 | while (time_before(jiffies, timeout)) { | |
128 | if (cpu_online(cpu)) | |
129 | break; | |
130 | ||
131 | udelay(10); | |
132 | barrier(); | |
133 | } | |
134 | ||
135 | if (!cpu_online(cpu)) | |
136 | ret = -EIO; | |
137 | } | |
138 | ||
5d43045b | 139 | secondary_data.stack = NULL; |
e65f38ed RK |
140 | secondary_data.pgdir = 0; |
141 | ||
058ddee5 | 142 | *pmd = __pmd(0); |
5e541973 | 143 | pgd_free(&init_mm, pgd); |
e65f38ed | 144 | |
1da177e4 | 145 | if (ret) { |
0908db22 RK |
146 | printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); |
147 | ||
1da177e4 LT |
148 | /* |
149 | * FIXME: We need to clean up the new idle thread. --rmk | |
150 | */ | |
151 | } | |
152 | ||
153 | return ret; | |
154 | } | |
155 | ||
a054a811 RK |
156 | #ifdef CONFIG_HOTPLUG_CPU |
157 | /* | |
158 | * __cpu_disable runs on the processor to be shutdown. | |
159 | */ | |
160 | int __cpuexit __cpu_disable(void) | |
161 | { | |
162 | unsigned int cpu = smp_processor_id(); | |
163 | struct task_struct *p; | |
164 | int ret; | |
165 | ||
166 | ret = mach_cpu_disable(cpu); | |
167 | if (ret) | |
168 | return ret; | |
169 | ||
170 | /* | |
171 | * Take this CPU offline. Once we clear this, we can't return, | |
172 | * and we must not schedule until we're ready to give up the cpu. | |
173 | */ | |
174 | cpu_clear(cpu, cpu_online_map); | |
175 | ||
176 | /* | |
177 | * OK - migrate IRQs away from this CPU | |
178 | */ | |
179 | migrate_irqs(); | |
180 | ||
37ee16ae RK |
181 | /* |
182 | * Stop the local timer for this CPU. | |
183 | */ | |
184 | local_timer_stop(cpu); | |
185 | ||
a054a811 RK |
186 | /* |
187 | * Flush user cache and TLB mappings, and then remove this CPU | |
188 | * from the vm mask set of all processes. | |
189 | */ | |
190 | flush_cache_all(); | |
191 | local_flush_tlb_all(); | |
192 | ||
193 | read_lock(&tasklist_lock); | |
194 | for_each_process(p) { | |
195 | if (p->mm) | |
196 | cpu_clear(cpu, p->mm->cpu_vm_mask); | |
197 | } | |
198 | read_unlock(&tasklist_lock); | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
203 | /* | |
204 | * called on the thread which is asking for a CPU to be shutdown - | |
205 | * waits until shutdown has completed, or it is timed out. | |
206 | */ | |
207 | void __cpuexit __cpu_die(unsigned int cpu) | |
208 | { | |
209 | if (!platform_cpu_kill(cpu)) | |
210 | printk("CPU%u: unable to kill\n", cpu); | |
211 | } | |
212 | ||
213 | /* | |
214 | * Called from the idle thread for the CPU which has been shutdown. | |
215 | * | |
216 | * Note that we disable IRQs here, but do not re-enable them | |
217 | * before returning to the caller. This is also the behaviour | |
218 | * of the other hotplug-cpu capable cores, so presumably coming | |
219 | * out of idle fixes this. | |
220 | */ | |
221 | void __cpuexit cpu_die(void) | |
222 | { | |
223 | unsigned int cpu = smp_processor_id(); | |
224 | ||
225 | local_irq_disable(); | |
226 | idle_task_exit(); | |
227 | ||
228 | /* | |
229 | * actual CPU shutdown procedure is at least platform (if not | |
230 | * CPU) specific | |
231 | */ | |
232 | platform_cpu_die(cpu); | |
233 | ||
234 | /* | |
235 | * Do not return to the idle loop - jump back to the secondary | |
236 | * cpu initialisation. There's some initialisation which needs | |
237 | * to be repeated to undo the effects of taking the CPU offline. | |
238 | */ | |
239 | __asm__("mov sp, %0\n" | |
240 | " b secondary_start_kernel" | |
241 | : | |
32d39a93 | 242 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); |
a054a811 RK |
243 | } |
244 | #endif /* CONFIG_HOTPLUG_CPU */ | |
245 | ||
e65f38ed RK |
246 | /* |
247 | * This is the secondary CPU boot entry. We're using this CPUs | |
248 | * idle thread stack, but a set of temporary page tables. | |
249 | */ | |
bd6f68af | 250 | asmlinkage void __cpuinit secondary_start_kernel(void) |
e65f38ed RK |
251 | { |
252 | struct mm_struct *mm = &init_mm; | |
da2660d2 | 253 | unsigned int cpu = smp_processor_id(); |
e65f38ed RK |
254 | |
255 | printk("CPU%u: Booted secondary processor\n", cpu); | |
256 | ||
257 | /* | |
258 | * All kernel threads share the same mm context; grab a | |
259 | * reference and switch to it. | |
260 | */ | |
261 | atomic_inc(&mm->mm_users); | |
262 | atomic_inc(&mm->mm_count); | |
263 | current->active_mm = mm; | |
264 | cpu_set(cpu, mm->cpu_vm_mask); | |
265 | cpu_switch_mm(mm->pgd, mm); | |
266 | enter_lazy_tlb(mm, current); | |
505d7b19 | 267 | local_flush_tlb_all(); |
e65f38ed RK |
268 | |
269 | cpu_init(); | |
5bfb5d69 | 270 | preempt_disable(); |
e65f38ed RK |
271 | |
272 | /* | |
273 | * Give the platform a chance to do its own initialisation. | |
274 | */ | |
275 | platform_secondary_init(cpu); | |
276 | ||
277 | /* | |
278 | * Enable local interrupts. | |
279 | */ | |
e545a614 | 280 | notify_cpu_starting(cpu); |
e65f38ed RK |
281 | local_irq_enable(); |
282 | local_fiq_enable(); | |
283 | ||
a8655e83 CM |
284 | /* |
285 | * Setup local timer for this CPU. | |
286 | */ | |
287 | local_timer_setup(cpu); | |
288 | ||
e65f38ed RK |
289 | calibrate_delay(); |
290 | ||
291 | smp_store_cpu_info(cpu); | |
292 | ||
293 | /* | |
294 | * OK, now it's safe to let the boot CPU continue | |
295 | */ | |
296 | cpu_set(cpu, cpu_online_map); | |
297 | ||
298 | /* | |
299 | * OK, it's off to the idle thread for us | |
300 | */ | |
301 | cpu_idle(); | |
302 | } | |
303 | ||
1da177e4 LT |
304 | /* |
305 | * Called by both boot and secondaries to move global data into | |
306 | * per-processor storage. | |
307 | */ | |
bd6f68af | 308 | void __cpuinit smp_store_cpu_info(unsigned int cpuid) |
1da177e4 LT |
309 | { |
310 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | |
311 | ||
312 | cpu_info->loops_per_jiffy = loops_per_jiffy; | |
313 | } | |
314 | ||
315 | void __init smp_cpus_done(unsigned int max_cpus) | |
316 | { | |
317 | int cpu; | |
318 | unsigned long bogosum = 0; | |
319 | ||
320 | for_each_online_cpu(cpu) | |
321 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | |
322 | ||
323 | printk(KERN_INFO "SMP: Total of %d processors activated " | |
324 | "(%lu.%02lu BogoMIPS).\n", | |
325 | num_online_cpus(), | |
326 | bogosum / (500000/HZ), | |
327 | (bogosum / (5000/HZ)) % 100); | |
328 | } | |
329 | ||
330 | void __init smp_prepare_boot_cpu(void) | |
331 | { | |
332 | unsigned int cpu = smp_processor_id(); | |
333 | ||
71f512e8 | 334 | per_cpu(cpu_data, cpu).idle = current; |
1da177e4 LT |
335 | } |
336 | ||
337 | static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | |
338 | { | |
339 | unsigned long flags; | |
340 | unsigned int cpu; | |
341 | ||
342 | local_irq_save(flags); | |
343 | ||
344 | for_each_cpu_mask(cpu, callmap) { | |
345 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | |
346 | ||
347 | spin_lock(&ipi->lock); | |
348 | ipi->bits |= 1 << msg; | |
349 | spin_unlock(&ipi->lock); | |
350 | } | |
351 | ||
352 | /* | |
353 | * Call the platform specific cross-CPU call function. | |
354 | */ | |
355 | smp_cross_call(callmap); | |
356 | ||
357 | local_irq_restore(flags); | |
358 | } | |
359 | ||
f6dd9fa5 | 360 | void arch_send_call_function_ipi(cpumask_t mask) |
1da177e4 | 361 | { |
f6dd9fa5 | 362 | send_ipi_message(mask, IPI_CALL_FUNC); |
1da177e4 LT |
363 | } |
364 | ||
f6dd9fa5 | 365 | void arch_send_call_function_single_ipi(int cpu) |
3e459990 | 366 | { |
f6dd9fa5 | 367 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); |
3e459990 | 368 | } |
3e459990 | 369 | |
1da177e4 LT |
370 | void show_ipi_list(struct seq_file *p) |
371 | { | |
372 | unsigned int cpu; | |
373 | ||
374 | seq_puts(p, "IPI:"); | |
375 | ||
e11b2236 | 376 | for_each_present_cpu(cpu) |
1da177e4 LT |
377 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); |
378 | ||
379 | seq_putc(p, '\n'); | |
380 | } | |
381 | ||
37ee16ae RK |
382 | void show_local_irqs(struct seq_file *p) |
383 | { | |
384 | unsigned int cpu; | |
385 | ||
386 | seq_printf(p, "LOC: "); | |
387 | ||
388 | for_each_present_cpu(cpu) | |
389 | seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); | |
390 | ||
391 | seq_putc(p, '\n'); | |
392 | } | |
393 | ||
c97d4869 | 394 | static void ipi_timer(void) |
1da177e4 | 395 | { |
1da177e4 | 396 | irq_enter(); |
3e459990 | 397 | local_timer_interrupt(); |
1da177e4 LT |
398 | irq_exit(); |
399 | } | |
400 | ||
37ee16ae | 401 | #ifdef CONFIG_LOCAL_TIMERS |
b9811d7f | 402 | asmlinkage void __exception do_local_timer(struct pt_regs *regs) |
37ee16ae | 403 | { |
c97d4869 | 404 | struct pt_regs *old_regs = set_irq_regs(regs); |
37ee16ae RK |
405 | int cpu = smp_processor_id(); |
406 | ||
407 | if (local_timer_ack()) { | |
408 | irq_stat[cpu].local_timer_irqs++; | |
c97d4869 | 409 | ipi_timer(); |
37ee16ae | 410 | } |
c97d4869 RK |
411 | |
412 | set_irq_regs(old_regs); | |
37ee16ae RK |
413 | } |
414 | #endif | |
415 | ||
1da177e4 LT |
416 | static DEFINE_SPINLOCK(stop_lock); |
417 | ||
418 | /* | |
419 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
420 | */ | |
421 | static void ipi_cpu_stop(unsigned int cpu) | |
422 | { | |
423 | spin_lock(&stop_lock); | |
424 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | |
425 | dump_stack(); | |
426 | spin_unlock(&stop_lock); | |
427 | ||
428 | cpu_clear(cpu, cpu_online_map); | |
429 | ||
430 | local_fiq_disable(); | |
431 | local_irq_disable(); | |
432 | ||
433 | while (1) | |
434 | cpu_relax(); | |
435 | } | |
436 | ||
437 | /* | |
438 | * Main handler for inter-processor interrupts | |
439 | * | |
440 | * For ARM, the ipimask now only identifies a single | |
441 | * category of IPI (Bit 1 IPIs have been replaced by a | |
442 | * different mechanism): | |
443 | * | |
444 | * Bit 0 - Inter-processor function call | |
445 | */ | |
b9811d7f | 446 | asmlinkage void __exception do_IPI(struct pt_regs *regs) |
1da177e4 LT |
447 | { |
448 | unsigned int cpu = smp_processor_id(); | |
449 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | |
c97d4869 | 450 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 LT |
451 | |
452 | ipi->ipi_count++; | |
453 | ||
454 | for (;;) { | |
455 | unsigned long msgs; | |
456 | ||
457 | spin_lock(&ipi->lock); | |
458 | msgs = ipi->bits; | |
459 | ipi->bits = 0; | |
460 | spin_unlock(&ipi->lock); | |
461 | ||
462 | if (!msgs) | |
463 | break; | |
464 | ||
465 | do { | |
466 | unsigned nextmsg; | |
467 | ||
468 | nextmsg = msgs & -msgs; | |
469 | msgs &= ~nextmsg; | |
470 | nextmsg = ffz(~nextmsg); | |
471 | ||
472 | switch (nextmsg) { | |
473 | case IPI_TIMER: | |
c97d4869 | 474 | ipi_timer(); |
1da177e4 LT |
475 | break; |
476 | ||
477 | case IPI_RESCHEDULE: | |
478 | /* | |
479 | * nothing more to do - eveything is | |
480 | * done on the interrupt return path | |
481 | */ | |
482 | break; | |
483 | ||
484 | case IPI_CALL_FUNC: | |
f6dd9fa5 JA |
485 | generic_smp_call_function_interrupt(); |
486 | break; | |
487 | ||
488 | case IPI_CALL_FUNC_SINGLE: | |
489 | generic_smp_call_function_single_interrupt(); | |
1da177e4 LT |
490 | break; |
491 | ||
492 | case IPI_CPU_STOP: | |
493 | ipi_cpu_stop(cpu); | |
494 | break; | |
495 | ||
496 | default: | |
497 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | |
498 | cpu, nextmsg); | |
499 | break; | |
500 | } | |
501 | } while (msgs); | |
502 | } | |
c97d4869 RK |
503 | |
504 | set_irq_regs(old_regs); | |
1da177e4 LT |
505 | } |
506 | ||
507 | void smp_send_reschedule(int cpu) | |
508 | { | |
509 | send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); | |
510 | } | |
511 | ||
512 | void smp_send_timer(void) | |
513 | { | |
514 | cpumask_t mask = cpu_online_map; | |
515 | cpu_clear(smp_processor_id(), mask); | |
516 | send_ipi_message(mask, IPI_TIMER); | |
517 | } | |
518 | ||
3e459990 CM |
519 | void smp_timer_broadcast(cpumask_t mask) |
520 | { | |
521 | send_ipi_message(mask, IPI_TIMER); | |
522 | } | |
523 | ||
1da177e4 LT |
524 | void smp_send_stop(void) |
525 | { | |
526 | cpumask_t mask = cpu_online_map; | |
527 | cpu_clear(smp_processor_id(), mask); | |
528 | send_ipi_message(mask, IPI_CPU_STOP); | |
529 | } | |
530 | ||
531 | /* | |
532 | * not supported here | |
533 | */ | |
5048bcba | 534 | int setup_profiling_timer(unsigned int multiplier) |
1da177e4 LT |
535 | { |
536 | return -EINVAL; | |
537 | } | |
4b0ef3b1 RK |
538 | |
539 | static int | |
f6dd9fa5 | 540 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) |
4b0ef3b1 RK |
541 | { |
542 | int ret = 0; | |
543 | ||
544 | preempt_disable(); | |
545 | ||
f6dd9fa5 | 546 | ret = smp_call_function_mask(mask, func, info, wait); |
4b0ef3b1 RK |
547 | if (cpu_isset(smp_processor_id(), mask)) |
548 | func(info); | |
549 | ||
550 | preempt_enable(); | |
551 | ||
552 | return ret; | |
553 | } | |
554 | ||
555 | /**********************************************************************/ | |
556 | ||
557 | /* | |
558 | * TLB operations | |
559 | */ | |
560 | struct tlb_args { | |
561 | struct vm_area_struct *ta_vma; | |
562 | unsigned long ta_start; | |
563 | unsigned long ta_end; | |
564 | }; | |
565 | ||
566 | static inline void ipi_flush_tlb_all(void *ignored) | |
567 | { | |
568 | local_flush_tlb_all(); | |
569 | } | |
570 | ||
571 | static inline void ipi_flush_tlb_mm(void *arg) | |
572 | { | |
573 | struct mm_struct *mm = (struct mm_struct *)arg; | |
574 | ||
575 | local_flush_tlb_mm(mm); | |
576 | } | |
577 | ||
578 | static inline void ipi_flush_tlb_page(void *arg) | |
579 | { | |
580 | struct tlb_args *ta = (struct tlb_args *)arg; | |
581 | ||
582 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); | |
583 | } | |
584 | ||
585 | static inline void ipi_flush_tlb_kernel_page(void *arg) | |
586 | { | |
587 | struct tlb_args *ta = (struct tlb_args *)arg; | |
588 | ||
589 | local_flush_tlb_kernel_page(ta->ta_start); | |
590 | } | |
591 | ||
592 | static inline void ipi_flush_tlb_range(void *arg) | |
593 | { | |
594 | struct tlb_args *ta = (struct tlb_args *)arg; | |
595 | ||
596 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | |
597 | } | |
598 | ||
599 | static inline void ipi_flush_tlb_kernel_range(void *arg) | |
600 | { | |
601 | struct tlb_args *ta = (struct tlb_args *)arg; | |
602 | ||
603 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | |
604 | } | |
605 | ||
606 | void flush_tlb_all(void) | |
607 | { | |
15c8b6c1 | 608 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
4b0ef3b1 RK |
609 | } |
610 | ||
611 | void flush_tlb_mm(struct mm_struct *mm) | |
612 | { | |
613 | cpumask_t mask = mm->cpu_vm_mask; | |
614 | ||
f6dd9fa5 | 615 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); |
4b0ef3b1 RK |
616 | } |
617 | ||
618 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |
619 | { | |
620 | cpumask_t mask = vma->vm_mm->cpu_vm_mask; | |
621 | struct tlb_args ta; | |
622 | ||
623 | ta.ta_vma = vma; | |
624 | ta.ta_start = uaddr; | |
625 | ||
f6dd9fa5 | 626 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); |
4b0ef3b1 RK |
627 | } |
628 | ||
629 | void flush_tlb_kernel_page(unsigned long kaddr) | |
630 | { | |
631 | struct tlb_args ta; | |
632 | ||
633 | ta.ta_start = kaddr; | |
634 | ||
15c8b6c1 | 635 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); |
4b0ef3b1 RK |
636 | } |
637 | ||
638 | void flush_tlb_range(struct vm_area_struct *vma, | |
639 | unsigned long start, unsigned long end) | |
640 | { | |
641 | cpumask_t mask = vma->vm_mm->cpu_vm_mask; | |
642 | struct tlb_args ta; | |
643 | ||
644 | ta.ta_vma = vma; | |
645 | ta.ta_start = start; | |
646 | ta.ta_end = end; | |
647 | ||
f6dd9fa5 | 648 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); |
4b0ef3b1 RK |
649 | } |
650 | ||
651 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
652 | { | |
653 | struct tlb_args ta; | |
654 | ||
655 | ta.ta_start = start; | |
656 | ta.ta_end = end; | |
657 | ||
15c8b6c1 | 658 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); |
4b0ef3b1 | 659 | } |