Merge branch 'picoxcell/timer' into next/timer
[deliverable/linux.git] / arch / x86 / kernel / apic / bigsmp_32.c
1 /*
2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3 *
4 * Drives the local APIC in "clustered mode".
5 */
6 #include <linux/threads.h>
7 #include <linux/cpumask.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/dmi.h>
11 #include <linux/smp.h>
12
13 #include <asm/apicdef.h>
14 #include <asm/fixmap.h>
15 #include <asm/mpspec.h>
16 #include <asm/apic.h>
17 #include <asm/ipi.h>
18
19 static unsigned bigsmp_get_apic_id(unsigned long x)
20 {
21 return (x >> 24) & 0xFF;
22 }
23
24 static int bigsmp_apic_id_registered(void)
25 {
26 return 1;
27 }
28
29 static const struct cpumask *bigsmp_target_cpus(void)
30 {
31 #ifdef CONFIG_SMP
32 return cpu_online_mask;
33 #else
34 return cpumask_of(0);
35 #endif
36 }
37
38 static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
39 {
40 return 0;
41 }
42
43 static unsigned long bigsmp_check_apicid_present(int bit)
44 {
45 return 1;
46 }
47
48 static int bigsmp_early_logical_apicid(int cpu)
49 {
50 /* on bigsmp, logical apicid is the same as physical */
51 return early_per_cpu(x86_cpu_to_apicid, cpu);
52 }
53
54 static inline unsigned long calculate_ldr(int cpu)
55 {
56 unsigned long val, id;
57
58 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
59 id = per_cpu(x86_bios_cpu_apicid, cpu);
60 val |= SET_APIC_LOGICAL_ID(id);
61
62 return val;
63 }
64
65 /*
66 * Set up the logical destination ID.
67 *
68 * Intel recommends to set DFR, LDR and TPR before enabling
69 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
70 * document number 292116). So here it goes...
71 */
72 static void bigsmp_init_apic_ldr(void)
73 {
74 unsigned long val;
75 int cpu = smp_processor_id();
76
77 apic_write(APIC_DFR, APIC_DFR_FLAT);
78 val = calculate_ldr(cpu);
79 apic_write(APIC_LDR, val);
80 }
81
82 static void bigsmp_setup_apic_routing(void)
83 {
84 printk(KERN_INFO
85 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
86 nr_ioapics);
87 }
88
89 static int bigsmp_cpu_present_to_apicid(int mps_cpu)
90 {
91 if (mps_cpu < nr_cpu_ids)
92 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
93
94 return BAD_APICID;
95 }
96
97 static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
98 {
99 /* For clustered we don't have a good way to do this yet - hack */
100 physids_promote(0xFFL, retmap);
101 }
102
103 static int bigsmp_check_phys_apicid_present(int phys_apicid)
104 {
105 return 1;
106 }
107
108 /* As we are using single CPU as destination, pick only one CPU here */
109 static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
110 {
111 int cpu = cpumask_first(cpumask);
112
113 if (cpu < nr_cpu_ids)
114 return cpu_physical_id(cpu);
115 return BAD_APICID;
116 }
117
118 static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
119 const struct cpumask *andmask)
120 {
121 int cpu;
122
123 /*
124 * We're using fixed IRQ delivery, can only return one phys APIC ID.
125 * May as well be the first.
126 */
127 for_each_cpu_and(cpu, cpumask, andmask) {
128 if (cpumask_test_cpu(cpu, cpu_online_mask))
129 return cpu_physical_id(cpu);
130 }
131 return BAD_APICID;
132 }
133
134 static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
135 {
136 return cpuid_apic >> index_msb;
137 }
138
139 static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
140 {
141 default_send_IPI_mask_sequence_phys(mask, vector);
142 }
143
144 static void bigsmp_send_IPI_allbutself(int vector)
145 {
146 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
147 }
148
149 static void bigsmp_send_IPI_all(int vector)
150 {
151 bigsmp_send_IPI_mask(cpu_online_mask, vector);
152 }
153
154 static int dmi_bigsmp; /* can be set by dmi scanners */
155
156 static int hp_ht_bigsmp(const struct dmi_system_id *d)
157 {
158 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
159 dmi_bigsmp = 1;
160
161 return 0;
162 }
163
164
165 static const struct dmi_system_id bigsmp_dmi_table[] = {
166 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
167 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
168 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
169 }
170 },
171
172 { hp_ht_bigsmp, "HP ProLiant DL740",
173 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
174 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
175 }
176 },
177 { } /* NULL entry stops DMI scanning */
178 };
179
180 static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
181 {
182 cpumask_clear(retmask);
183 cpumask_set_cpu(cpu, retmask);
184 }
185
186 static int probe_bigsmp(void)
187 {
188 if (def_to_bigsmp)
189 dmi_bigsmp = 1;
190 else
191 dmi_check_system(bigsmp_dmi_table);
192
193 return dmi_bigsmp;
194 }
195
196 static struct apic apic_bigsmp = {
197
198 .name = "bigsmp",
199 .probe = probe_bigsmp,
200 .acpi_madt_oem_check = NULL,
201 .apic_id_valid = default_apic_id_valid,
202 .apic_id_registered = bigsmp_apic_id_registered,
203
204 .irq_delivery_mode = dest_Fixed,
205 /* phys delivery to target CPU: */
206 .irq_dest_mode = 0,
207
208 .target_cpus = bigsmp_target_cpus,
209 .disable_esr = 1,
210 .dest_logical = 0,
211 .check_apicid_used = bigsmp_check_apicid_used,
212 .check_apicid_present = bigsmp_check_apicid_present,
213
214 .vector_allocation_domain = bigsmp_vector_allocation_domain,
215 .init_apic_ldr = bigsmp_init_apic_ldr,
216
217 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
218 .setup_apic_routing = bigsmp_setup_apic_routing,
219 .multi_timer_check = NULL,
220 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
221 .apicid_to_cpu_present = physid_set_mask_of_physid,
222 .setup_portio_remap = NULL,
223 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
224 .enable_apic_mode = NULL,
225 .phys_pkg_id = bigsmp_phys_pkg_id,
226 .mps_oem_check = NULL,
227
228 .get_apic_id = bigsmp_get_apic_id,
229 .set_apic_id = NULL,
230 .apic_id_mask = 0xFF << 24,
231
232 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
233 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
234
235 .send_IPI_mask = bigsmp_send_IPI_mask,
236 .send_IPI_mask_allbutself = NULL,
237 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
238 .send_IPI_all = bigsmp_send_IPI_all,
239 .send_IPI_self = default_send_IPI_self,
240
241 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
242 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
243
244 .wait_for_init_deassert = default_wait_for_init_deassert,
245
246 .smp_callin_clear_local_apic = NULL,
247 .inquire_remote_apic = default_inquire_remote_apic,
248
249 .read = native_apic_mem_read,
250 .write = native_apic_mem_write,
251 .eoi_write = native_apic_mem_write,
252 .icr_read = native_apic_icr_read,
253 .icr_write = native_apic_icr_write,
254 .wait_icr_idle = native_apic_wait_icr_idle,
255 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
256
257 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
258 };
259
260 void __init generic_bigsmp_probe(void)
261 {
262 unsigned int cpu;
263
264 if (!probe_bigsmp())
265 return;
266
267 apic = &apic_bigsmp;
268
269 for_each_possible_cpu(cpu) {
270 if (early_per_cpu(x86_cpu_to_logical_apicid,
271 cpu) == BAD_APICID)
272 continue;
273 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
274 bigsmp_early_logical_apicid(cpu);
275 }
276
277 pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
278 }
279
280 apic_driver(apic_bigsmp);
This page took 0.036527 seconds and 5 git commands to generate.