arch/tile: core support for Tilera 32-bit chips.
[deliverable/linux.git] / arch / tile / kernel / smp.c
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE SMP support routines.
15 */
16
17#include <linux/smp.h>
18#include <linux/irq.h>
19#include <asm/cacheflush.h>
20
21HV_Topology smp_topology __write_once;
22
23
24/*
25 * Top-level send_IPI*() functions to send messages to other cpus.
26 */
27
28/* Set by smp_send_stop() to avoid recursive panics. */
29static int stopping_cpus;
30
31void send_IPI_single(int cpu, int tag)
32{
33 HV_Recipient recip = {
34 .y = cpu / smp_width,
35 .x = cpu % smp_width,
36 .state = HV_TO_BE_SENT
37 };
38 int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag));
39 BUG_ON(rc <= 0);
40}
41
42void send_IPI_many(const struct cpumask *mask, int tag)
43{
44 HV_Recipient recip[NR_CPUS];
45 int cpu, sent;
46 int nrecip = 0;
47 int my_cpu = smp_processor_id();
48 for_each_cpu(cpu, mask) {
49 HV_Recipient *r;
50 BUG_ON(cpu == my_cpu);
51 r = &recip[nrecip++];
52 r->y = cpu / smp_width;
53 r->x = cpu % smp_width;
54 r->state = HV_TO_BE_SENT;
55 }
56 sent = 0;
57 while (sent < nrecip) {
58 int rc = hv_send_message(recip, nrecip,
59 (HV_VirtAddr)&tag, sizeof(tag));
60 if (rc <= 0) {
61 if (!stopping_cpus) /* avoid recursive panic */
62 panic("hv_send_message returned %d", rc);
63 break;
64 }
65 sent += rc;
66 }
67}
68
69void send_IPI_allbutself(int tag)
70{
71 struct cpumask mask;
72 cpumask_copy(&mask, cpu_online_mask);
73 cpumask_clear_cpu(smp_processor_id(), &mask);
74 send_IPI_many(&mask, tag);
75}
76
77
78/*
79 * Provide smp_call_function_mask, but also run function locally
80 * if specified in the mask.
81 */
82void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *),
83 void *info, bool wait)
84{
85 int cpu = get_cpu();
86 smp_call_function_many(mask, func, info, wait);
87 if (cpumask_test_cpu(cpu, mask)) {
88 local_irq_disable();
89 func(info);
90 local_irq_enable();
91 }
92 put_cpu();
93}
94
95
96/*
97 * Functions related to starting/stopping cpus.
98 */
99
100/* Handler to start the current cpu. */
101static void smp_start_cpu_interrupt(void)
102{
103 extern unsigned long start_cpu_function_addr;
104 get_irq_regs()->pc = start_cpu_function_addr;
105}
106
107/* Handler to stop the current cpu. */
108static void smp_stop_cpu_interrupt(void)
109{
110 set_cpu_online(smp_processor_id(), 0);
111 raw_local_irq_disable_all();
112 for (;;)
113 asm("nap");
114}
115
116/* This function calls the 'stop' function on all other CPUs in the system. */
117void smp_send_stop(void)
118{
119 stopping_cpus = 1;
120 send_IPI_allbutself(MSG_TAG_STOP_CPU);
121}
122
123
124/*
125 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
126 */
127void evaluate_message(int tag)
128{
129 switch (tag) {
130 case MSG_TAG_START_CPU: /* Start up a cpu */
131 smp_start_cpu_interrupt();
132 break;
133
134 case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
135 smp_stop_cpu_interrupt();
136 break;
137
138 case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
139 generic_smp_call_function_interrupt();
140 break;
141
142 case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
143 generic_smp_call_function_single_interrupt();
144 break;
145
146 default:
147 panic("Unknown IPI message tag %d", tag);
148 break;
149 }
150}
151
152
153/*
154 * flush_icache_range() code uses smp_call_function().
155 */
156
157struct ipi_flush {
158 unsigned long start;
159 unsigned long end;
160};
161
162static void ipi_flush_icache_range(void *info)
163{
164 struct ipi_flush *flush = (struct ipi_flush *) info;
165 __flush_icache_range(flush->start, flush->end);
166}
167
168void flush_icache_range(unsigned long start, unsigned long end)
169{
170 struct ipi_flush flush = { start, end };
171 preempt_disable();
172 on_each_cpu(ipi_flush_icache_range, &flush, 1);
173 preempt_enable();
174}
175
176
177/*
178 * The smp_send_reschedule() path does not use the hv_message_intr()
179 * path but instead the faster tile_dev_intr() path for interrupts.
180 */
181
182irqreturn_t handle_reschedule_ipi(int irq, void *token)
183{
184 /*
185 * Nothing to do here; when we return from interrupt, the
186 * rescheduling will occur there. But do bump the interrupt
187 * profiler count in the meantime.
188 */
189 __get_cpu_var(irq_stat).irq_resched_count++;
190
191 return IRQ_HANDLED;
192}
193
194void smp_send_reschedule(int cpu)
195{
196 HV_Coord coord;
197
198 WARN_ON(cpu_is_offline(cpu));
199 coord.y = cpu / smp_width;
200 coord.x = cpu % smp_width;
201 hv_trigger_ipi(coord, IRQ_RESCHEDULE);
202}
This page took 0.030819 seconds and 5 git commands to generate.