2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/preempt.h>
17 #include <linux/module.h>
19 #include <linux/kprobes.h>
20 #include <linux/elfcore.h>
21 #include <linux/tick.h>
22 #include <linux/init.h>
24 #include <linux/compat.h>
25 #include <linux/hardirq.h>
26 #include <linux/syscalls.h>
27 #include <linux/kernel.h>
28 #include <linux/tracehook.h>
29 #include <linux/signal.h>
30 #include <linux/delay.h>
31 #include <linux/context_tracking.h>
32 #include <asm/stack.h>
33 #include <asm/switch_to.h>
34 #include <asm/homecache.h>
35 #include <asm/syscalls.h>
36 #include <asm/traps.h>
37 #include <asm/setup.h>
38 #include <asm/uaccess.h>
39 #ifdef CONFIG_HARDWALL
40 #include <asm/hardwall.h>
42 #include <arch/chip.h>
44 #include <arch/sim_def.h>
47 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
48 * idle loop over low power while in the idle loop, e.g. if we have
49 * one thread per core and we want to get threads out of futex waits fast.
51 static int __init
idle_setup(char *str
)
56 if (!strcmp(str
, "poll")) {
57 pr_info("using polling idle threads\n");
58 cpu_idle_poll_ctrl(true);
60 } else if (!strcmp(str
, "halt")) {
65 early_param("idle", idle_setup
);
67 void arch_cpu_idle(void)
69 __this_cpu_write(irq_stat
.idle_timestamp
, jiffies
);
74 * Release a thread_info structure
76 void arch_release_thread_stack(unsigned long *stack
)
78 struct thread_info
*info
= (void *)stack
;
79 struct single_step_state
*step_state
= info
->step_state
;
84 * FIXME: we don't munmap step_state->buffer
85 * because the mm_struct for this process (info->task->mm)
86 * has already been zeroed in exit_mm(). Keeping a
87 * reference to it here seems like a bad move, so this
88 * means we can't munmap() the buffer, and therefore if we
89 * ptrace multiple threads in a process, we will slowly
90 * leak user memory. (Note that as soon as the last
91 * thread in a process dies, we will reclaim all user
92 * memory including single-step buffers in the usual way.)
93 * We should either assign a kernel VA to this buffer
94 * somehow, or we should associate the buffer(s) with the
95 * mm itself so we can clean them up that way.
101 static void save_arch_state(struct thread_struct
*t
);
103 int copy_thread(unsigned long clone_flags
, unsigned long sp
,
104 unsigned long arg
, struct task_struct
*p
)
106 struct pt_regs
*childregs
= task_pt_regs(p
);
108 unsigned long *callee_regs
;
111 * Set up the stack and stack pointer appropriately for the
112 * new child to find itself woken up in __switch_to().
113 * The callee-saved registers must be on the stack to be read;
114 * the new task will then jump to assembly support to handle
115 * calling schedule_tail(), etc., and (for userspace tasks)
116 * returning to the context set up in the pt_regs.
118 ksp
= (unsigned long) childregs
;
119 ksp
-= C_ABI_SAVE_AREA_SIZE
; /* interrupt-entry save area */
120 ((long *)ksp
)[0] = ((long *)ksp
)[1] = 0;
121 ksp
-= CALLEE_SAVED_REGS_COUNT
* sizeof(unsigned long);
122 callee_regs
= (unsigned long *)ksp
;
123 ksp
-= C_ABI_SAVE_AREA_SIZE
; /* __switch_to() save area */
124 ((long *)ksp
)[0] = ((long *)ksp
)[1] = 0;
127 /* Record the pid of the task that created this one. */
128 p
->thread
.creator_pid
= current
->pid
;
130 if (unlikely(p
->flags
& PF_KTHREAD
)) {
132 memset(childregs
, 0, sizeof(struct pt_regs
));
133 memset(&callee_regs
[2], 0,
134 (CALLEE_SAVED_REGS_COUNT
- 2) * sizeof(unsigned long));
135 callee_regs
[0] = sp
; /* r30 = function */
136 callee_regs
[1] = arg
; /* r31 = arg */
137 p
->thread
.pc
= (unsigned long) ret_from_kernel_thread
;
142 * Start new thread in ret_from_fork so it schedules properly
143 * and then return from interrupt like the parent.
145 p
->thread
.pc
= (unsigned long) ret_from_fork
;
148 * Do not clone step state from the parent; each thread
149 * must make its own lazily.
151 task_thread_info(p
)->step_state
= NULL
;
155 * Do not clone unalign jit fixup from the parent; each thread
156 * must allocate its own on demand.
158 task_thread_info(p
)->unalign_jit_base
= NULL
;
162 * Copy the registers onto the kernel stack so the
163 * return-from-interrupt code will reload it into registers.
165 *childregs
= *current_pt_regs();
166 childregs
->regs
[0] = 0; /* return value is zero */
168 childregs
->sp
= sp
; /* override with new user stack pointer */
169 memcpy(callee_regs
, &childregs
->regs
[CALLEE_SAVED_FIRST_REG
],
170 CALLEE_SAVED_REGS_COUNT
* sizeof(unsigned long));
172 /* Save user stack top pointer so we can ID the stack vm area later. */
173 p
->thread
.usp0
= childregs
->sp
;
176 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
177 * which is passed in as arg #5 to sys_clone().
179 if (clone_flags
& CLONE_SETTLS
)
180 childregs
->tp
= childregs
->regs
[4];
183 #if CHIP_HAS_TILE_DMA()
185 * No DMA in the new thread. We model this on the fact that
186 * fork() clears the pending signals, alarms, and aio for the child.
188 memset(&p
->thread
.tile_dma_state
, 0, sizeof(struct tile_dma_state
));
189 memset(&p
->thread
.dma_async_tlb
, 0, sizeof(struct async_tlb
));
192 /* New thread has its miscellaneous processor state bits clear. */
193 p
->thread
.proc_status
= 0;
195 #ifdef CONFIG_HARDWALL
196 /* New thread does not own any networks. */
197 memset(&p
->thread
.hardwall
[0], 0,
198 sizeof(struct hardwall_task
) * HARDWALL_TYPES
);
203 * Start the new thread with the current architecture state
204 * (user interrupt masks, etc.).
206 save_arch_state(&p
->thread
);
211 int set_unalign_ctl(struct task_struct
*tsk
, unsigned int val
)
213 task_thread_info(tsk
)->align_ctl
= val
;
217 int get_unalign_ctl(struct task_struct
*tsk
, unsigned long adr
)
219 return put_user(task_thread_info(tsk
)->align_ctl
,
220 (unsigned int __user
*)adr
);
223 static struct task_struct corrupt_current
= { .comm
= "<corrupt>" };
226 * Return "current" if it looks plausible, or else a pointer to a dummy.
227 * This can be helpful if we are just trying to emit a clean panic.
229 struct task_struct
*validate_current(void)
231 struct task_struct
*tsk
= current
;
232 if (unlikely((unsigned long)tsk
< PAGE_OFFSET
||
233 (high_memory
&& (void *)tsk
> high_memory
) ||
234 ((unsigned long)tsk
& (__alignof__(*tsk
) - 1)) != 0)) {
235 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk
, stack_pointer
);
236 tsk
= &corrupt_current
;
241 /* Take and return the pointer to the previous task, for schedule_tail(). */
242 struct task_struct
*sim_notify_fork(struct task_struct
*prev
)
244 struct task_struct
*tsk
= current
;
245 __insn_mtspr(SPR_SIM_CONTROL
, SIM_CONTROL_OS_FORK_PARENT
|
246 (tsk
->thread
.creator_pid
<< _SIM_CONTROL_OPERATOR_BITS
));
247 __insn_mtspr(SPR_SIM_CONTROL
, SIM_CONTROL_OS_FORK
|
248 (tsk
->pid
<< _SIM_CONTROL_OPERATOR_BITS
));
252 int dump_task_regs(struct task_struct
*tsk
, elf_gregset_t
*regs
)
254 struct pt_regs
*ptregs
= task_pt_regs(tsk
);
255 elf_core_copy_regs(regs
, ptregs
);
259 #if CHIP_HAS_TILE_DMA()
261 /* Allow user processes to access the DMA SPRs */
262 void grant_dma_mpls(void)
264 #if CONFIG_KERNEL_PL == 2
265 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1
, 1);
266 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1
, 1);
268 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0
, 1);
269 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0
, 1);
273 /* Forbid user processes from accessing the DMA SPRs */
274 void restrict_dma_mpls(void)
276 #if CONFIG_KERNEL_PL == 2
277 __insn_mtspr(SPR_MPL_DMA_CPL_SET_2
, 1);
278 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2
, 1);
280 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1
, 1);
281 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1
, 1);
285 /* Pause the DMA engine, then save off its state registers. */
286 static void save_tile_dma_state(struct tile_dma_state
*dma
)
288 unsigned long state
= __insn_mfspr(SPR_DMA_USER_STATUS
);
289 unsigned long post_suspend_state
;
291 /* If we're running, suspend the engine. */
292 if ((state
& DMA_STATUS_MASK
) == SPR_DMA_STATUS__RUNNING_MASK
)
293 __insn_mtspr(SPR_DMA_CTR
, SPR_DMA_CTR__SUSPEND_MASK
);
296 * Wait for the engine to idle, then save regs. Note that we
297 * want to record the "running" bit from before suspension,
298 * and the "done" bit from after, so that we can properly
299 * distinguish a case where the user suspended the engine from
300 * the case where the kernel suspended as part of the context
304 post_suspend_state
= __insn_mfspr(SPR_DMA_USER_STATUS
);
305 } while (post_suspend_state
& SPR_DMA_STATUS__BUSY_MASK
);
307 dma
->src
= __insn_mfspr(SPR_DMA_SRC_ADDR
);
308 dma
->src_chunk
= __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR
);
309 dma
->dest
= __insn_mfspr(SPR_DMA_DST_ADDR
);
310 dma
->dest_chunk
= __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR
);
311 dma
->strides
= __insn_mfspr(SPR_DMA_STRIDE
);
312 dma
->chunk_size
= __insn_mfspr(SPR_DMA_CHUNK_SIZE
);
313 dma
->byte
= __insn_mfspr(SPR_DMA_BYTE
);
314 dma
->status
= (state
& SPR_DMA_STATUS__RUNNING_MASK
) |
315 (post_suspend_state
& SPR_DMA_STATUS__DONE_MASK
);
318 /* Restart a DMA that was running before we were context-switched out. */
319 static void restore_tile_dma_state(struct thread_struct
*t
)
321 const struct tile_dma_state
*dma
= &t
->tile_dma_state
;
324 * The only way to restore the done bit is to run a zero
325 * length transaction.
327 if ((dma
->status
& SPR_DMA_STATUS__DONE_MASK
) &&
328 !(__insn_mfspr(SPR_DMA_USER_STATUS
) & SPR_DMA_STATUS__DONE_MASK
)) {
329 __insn_mtspr(SPR_DMA_BYTE
, 0);
330 __insn_mtspr(SPR_DMA_CTR
, SPR_DMA_CTR__REQUEST_MASK
);
331 while (__insn_mfspr(SPR_DMA_USER_STATUS
) &
332 SPR_DMA_STATUS__BUSY_MASK
)
336 __insn_mtspr(SPR_DMA_SRC_ADDR
, dma
->src
);
337 __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR
, dma
->src_chunk
);
338 __insn_mtspr(SPR_DMA_DST_ADDR
, dma
->dest
);
339 __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR
, dma
->dest_chunk
);
340 __insn_mtspr(SPR_DMA_STRIDE
, dma
->strides
);
341 __insn_mtspr(SPR_DMA_CHUNK_SIZE
, dma
->chunk_size
);
342 __insn_mtspr(SPR_DMA_BYTE
, dma
->byte
);
345 * Restart the engine if we were running and not done.
346 * Clear a pending async DMA fault that we were waiting on return
347 * to user space to execute, since we expect the DMA engine
348 * to regenerate those faults for us now. Note that we don't
349 * try to clear the TIF_ASYNC_TLB flag, since it's relatively
350 * harmless if set, and it covers both DMA and the SN processor.
352 if ((dma
->status
& DMA_STATUS_MASK
) == SPR_DMA_STATUS__RUNNING_MASK
) {
353 t
->dma_async_tlb
.fault_num
= 0;
354 __insn_mtspr(SPR_DMA_CTR
, SPR_DMA_CTR__REQUEST_MASK
);
360 static void save_arch_state(struct thread_struct
*t
)
362 #if CHIP_HAS_SPLIT_INTR_MASK()
363 t
->interrupt_mask
= __insn_mfspr(SPR_INTERRUPT_MASK_0_0
) |
364 ((u64
)__insn_mfspr(SPR_INTERRUPT_MASK_0_1
) << 32);
366 t
->interrupt_mask
= __insn_mfspr(SPR_INTERRUPT_MASK_0
);
368 t
->ex_context
[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0
);
369 t
->ex_context
[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1
);
370 t
->system_save
[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0
);
371 t
->system_save
[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1
);
372 t
->system_save
[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2
);
373 t
->system_save
[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3
);
374 t
->intctrl_0
= __insn_mfspr(SPR_INTCTRL_0_STATUS
);
375 t
->proc_status
= __insn_mfspr(SPR_PROC_STATUS
);
376 #if !CHIP_HAS_FIXED_INTVEC_BASE()
377 t
->interrupt_vector_base
= __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0
);
379 t
->tile_rtf_hwm
= __insn_mfspr(SPR_TILE_RTF_HWM
);
380 #if CHIP_HAS_DSTREAM_PF()
381 t
->dstream_pf
= __insn_mfspr(SPR_DSTREAM_PF
);
385 static void restore_arch_state(const struct thread_struct
*t
)
387 #if CHIP_HAS_SPLIT_INTR_MASK()
388 __insn_mtspr(SPR_INTERRUPT_MASK_0_0
, (u32
) t
->interrupt_mask
);
389 __insn_mtspr(SPR_INTERRUPT_MASK_0_1
, t
->interrupt_mask
>> 32);
391 __insn_mtspr(SPR_INTERRUPT_MASK_0
, t
->interrupt_mask
);
393 __insn_mtspr(SPR_EX_CONTEXT_0_0
, t
->ex_context
[0]);
394 __insn_mtspr(SPR_EX_CONTEXT_0_1
, t
->ex_context
[1]);
395 __insn_mtspr(SPR_SYSTEM_SAVE_0_0
, t
->system_save
[0]);
396 __insn_mtspr(SPR_SYSTEM_SAVE_0_1
, t
->system_save
[1]);
397 __insn_mtspr(SPR_SYSTEM_SAVE_0_2
, t
->system_save
[2]);
398 __insn_mtspr(SPR_SYSTEM_SAVE_0_3
, t
->system_save
[3]);
399 __insn_mtspr(SPR_INTCTRL_0_STATUS
, t
->intctrl_0
);
400 __insn_mtspr(SPR_PROC_STATUS
, t
->proc_status
);
401 #if !CHIP_HAS_FIXED_INTVEC_BASE()
402 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0
, t
->interrupt_vector_base
);
404 __insn_mtspr(SPR_TILE_RTF_HWM
, t
->tile_rtf_hwm
);
405 #if CHIP_HAS_DSTREAM_PF()
406 __insn_mtspr(SPR_DSTREAM_PF
, t
->dstream_pf
);
411 void _prepare_arch_switch(struct task_struct
*next
)
413 #if CHIP_HAS_TILE_DMA()
414 struct tile_dma_state
*dma
= ¤t
->thread
.tile_dma_state
;
416 save_tile_dma_state(dma
);
421 struct task_struct
*__sched
_switch_to(struct task_struct
*prev
,
422 struct task_struct
*next
)
424 /* DMA state is already saved; save off other arch state. */
425 save_arch_state(&prev
->thread
);
427 #if CHIP_HAS_TILE_DMA()
429 * Restore DMA in new task if desired.
430 * Note that it is only safe to restart here since interrupts
431 * are disabled, so we can't take any DMATLB miss or access
432 * interrupts before we have finished switching stacks.
434 if (next
->thread
.tile_dma_state
.enabled
) {
435 restore_tile_dma_state(&next
->thread
);
442 /* Restore other arch state. */
443 restore_arch_state(&next
->thread
);
445 #ifdef CONFIG_HARDWALL
446 /* Enable or disable access to the network registers appropriately. */
447 hardwall_switch_tasks(prev
, next
);
450 /* Notify the simulator of task exit. */
451 if (unlikely(prev
->state
== TASK_DEAD
))
452 __insn_mtspr(SPR_SIM_CONTROL
, SIM_CONTROL_OS_EXIT
|
453 (prev
->pid
<< _SIM_CONTROL_OPERATOR_BITS
));
456 * Switch kernel SP, PC, and callee-saved registers.
457 * In the context of the new task, return the old task pointer
458 * (i.e. the task that actually called __switch_to).
459 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
461 return __switch_to(prev
, next
, next_current_ksp0(next
));
465 * This routine is called on return from interrupt if any of the
466 * TIF_ALLWORK_MASK flags are set in thread_info->flags. It is
467 * entered with interrupts disabled so we don't miss an event that
468 * modified the thread_info flags. We loop until all the tested flags
469 * are clear. Note that the function is called on certain conditions
470 * that are not listed in the loop condition here (e.g. SINGLESTEP)
471 * which guarantees we will do those things once, and redo them if any
472 * of the other work items is re-done, but won't continue looping if
473 * all the other work is done.
475 void prepare_exit_to_usermode(struct pt_regs
*regs
, u32 thread_info_flags
)
477 if (WARN_ON(!user_mode(regs
)))
483 if (thread_info_flags
& _TIF_NEED_RESCHED
)
486 #if CHIP_HAS_TILE_DMA()
487 if (thread_info_flags
& _TIF_ASYNC_TLB
)
488 do_async_page_fault(regs
);
491 if (thread_info_flags
& _TIF_SIGPENDING
)
494 if (thread_info_flags
& _TIF_NOTIFY_RESUME
) {
495 clear_thread_flag(TIF_NOTIFY_RESUME
);
496 tracehook_notify_resume(regs
);
500 thread_info_flags
= READ_ONCE(current_thread_info()->flags
);
502 } while (thread_info_flags
& _TIF_WORK_MASK
);
504 if (thread_info_flags
& _TIF_SINGLESTEP
) {
505 single_step_once(regs
);
508 * FIXME: on tilepro, since we enable interrupts in
509 * this routine, it's possible that we miss a signal
510 * or other asynchronous event.
519 unsigned long get_wchan(struct task_struct
*p
)
521 struct KBacktraceIterator kbt
;
523 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
526 for (KBacktraceIterator_init(&kbt
, p
, NULL
);
527 !KBacktraceIterator_end(&kbt
);
528 KBacktraceIterator_next(&kbt
)) {
529 if (!in_sched_functions(kbt
.it
.pc
))
536 /* Flush thread state. */
537 void flush_thread(void)
543 * Free current thread data structures etc..
545 void exit_thread(struct task_struct
*tsk
)
547 #ifdef CONFIG_HARDWALL
549 * Remove the task from the list of tasks that are associated
550 * with any live hardwalls. (If the task that is exiting held
551 * the last reference to a hardwall fd, it would already have
552 * been released and deactivated at this point.)
554 hardwall_deactivate_all(tsk
);
558 void tile_show_regs(struct pt_regs
*regs
)
562 for (i
= 0; i
< 17; i
++)
563 pr_err(" r%-2d: "REGFMT
" r%-2d: "REGFMT
" r%-2d: "REGFMT
"\n",
564 i
, regs
->regs
[i
], i
+18, regs
->regs
[i
+18],
565 i
+36, regs
->regs
[i
+36]);
566 pr_err(" r17: "REGFMT
" r35: "REGFMT
" tp : "REGFMT
"\n",
567 regs
->regs
[17], regs
->regs
[35], regs
->tp
);
568 pr_err(" sp : "REGFMT
" lr : "REGFMT
"\n", regs
->sp
, regs
->lr
);
570 for (i
= 0; i
< 13; i
++)
571 pr_err(" r%-2d: "REGFMT
" r%-2d: "REGFMT
572 " r%-2d: "REGFMT
" r%-2d: "REGFMT
"\n",
573 i
, regs
->regs
[i
], i
+14, regs
->regs
[i
+14],
574 i
+27, regs
->regs
[i
+27], i
+40, regs
->regs
[i
+40]);
575 pr_err(" r13: "REGFMT
" tp : "REGFMT
" sp : "REGFMT
" lr : "REGFMT
"\n",
576 regs
->regs
[13], regs
->tp
, regs
->sp
, regs
->lr
);
578 pr_err(" pc : "REGFMT
" ex1: %ld faultnum: %ld flags:%s%s%s%s\n",
579 regs
->pc
, regs
->ex1
, regs
->faultnum
,
580 is_compat_task() ? " compat" : "",
581 (regs
->flags
& PT_FLAGS_DISABLE_IRQ
) ? " noirq" : "",
582 !(regs
->flags
& PT_FLAGS_CALLER_SAVES
) ? " nocallersave" : "",
583 (regs
->flags
& PT_FLAGS_RESTORE_REGS
) ? " restoreregs" : "");
586 void show_regs(struct pt_regs
*regs
)
588 struct KBacktraceIterator kbt
;
590 show_regs_print_info(KERN_DEFAULT
);
591 tile_show_regs(regs
);
593 KBacktraceIterator_init(&kbt
, NULL
, regs
);
594 tile_show_stack(&kbt
);
597 /* To ensure stack dump on tiles occurs one by one. */
598 static DEFINE_SPINLOCK(backtrace_lock
);
599 /* To ensure no backtrace occurs before all of the stack dump are done. */
600 static atomic_t backtrace_cpus
;
601 /* The cpu mask to avoid reentrance. */
602 static struct cpumask backtrace_mask
;
604 void do_nmi_dump_stack(struct pt_regs
*regs
)
606 int is_idle
= is_idle_task(current
) && !in_interrupt();
610 cpu
= smp_processor_id();
611 if (WARN_ON_ONCE(!cpumask_test_and_clear_cpu(cpu
, &backtrace_mask
)))
614 spin_lock(&backtrace_lock
);
616 pr_info("CPU: %d idle\n", cpu
);
619 spin_unlock(&backtrace_lock
);
620 atomic_dec(&backtrace_cpus
);
626 void arch_trigger_all_cpu_backtrace(bool self
)
630 unsigned int timeout
;
633 HV_NMI_Info info
[NR_CPUS
];
635 ongoing
= atomic_cmpxchg(&backtrace_cpus
, 0, num_online_cpus() - 1);
637 pr_err("Trying to do all-cpu backtrace.\n");
638 pr_err("But another all-cpu backtrace is ongoing (%d cpus left)\n",
641 pr_err("Reporting the stack on this cpu only.\n");
647 cpumask_copy(&mask
, cpu_online_mask
);
648 cpumask_clear_cpu(smp_processor_id(), &mask
);
649 cpumask_copy(&backtrace_mask
, &mask
);
651 /* Backtrace for myself first. */
655 /* Tentatively dump stack on remote tiles via NMI. */
657 while (!cpumask_empty(&mask
) && timeout
) {
658 for_each_cpu(cpu
, &mask
) {
661 info
[cpu
] = hv_send_nmi(tile
, TILE_NMI_DUMP_STACK
, 0);
662 if (info
[cpu
].result
== HV_NMI_RESULT_OK
)
663 cpumask_clear_cpu(cpu
, &mask
);
670 /* Warn about cpus stuck in ICS and decrement their counts here. */
671 if (!cpumask_empty(&mask
)) {
672 for_each_cpu(cpu
, &mask
) {
673 switch (info
[cpu
].result
) {
674 case HV_NMI_RESULT_FAIL_ICS
:
675 pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
678 case HV_NMI_RESULT_FAIL_HV
:
679 pr_warn("Skipping stack dump of cpu %d in hypervisor\n",
683 pr_warn("Hypervisor too old to allow remote stack dumps.\n");
685 default: /* should not happen */
686 pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
687 cpu
, info
[cpu
].result
, info
[cpu
].pc
);
692 atomic_sub(cpumask_weight(&mask
), &backtrace_cpus
);
695 #endif /* __tilegx_ */