x86: remove dell reboot dmi quirk board name match
[deliverable/linux.git] / arch / x86 / kernel / genx2apic_uv_x.c
CommitLineData
ac23d4ee
JS
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV APIC functions (note: not an Intel compatible APIC)
7 *
8 * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#include <linux/threads.h>
12#include <linux/cpumask.h>
13#include <linux/string.h>
14#include <linux/kernel.h>
15#include <linux/ctype.h>
16#include <linux/init.h>
17#include <linux/sched.h>
18#include <linux/bootmem.h>
19#include <linux/module.h>
20#include <asm/smp.h>
21#include <asm/ipi.h>
22#include <asm/genapic.h>
23#include <asm/uv/uv_mmrs.h>
24#include <asm/uv/uv_hub.h>
25
26DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
27EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
28
29struct uv_blade_info *uv_blade_info;
30EXPORT_SYMBOL_GPL(uv_blade_info);
31
32short *uv_node_to_blade;
33EXPORT_SYMBOL_GPL(uv_node_to_blade);
34
35short *uv_cpu_to_blade;
36EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
37
38short uv_possible_blades;
39EXPORT_SYMBOL_GPL(uv_possible_blades);
40
41/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
42
43static cpumask_t uv_target_cpus(void)
44{
45 return cpumask_of_cpu(0);
46}
47
48static cpumask_t uv_vector_allocation_domain(int cpu)
49{
50 cpumask_t domain = CPU_MASK_NONE;
51 cpu_set(cpu, domain);
52 return domain;
53}
54
55int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
56{
57 unsigned long val;
58 int nasid;
59
60 nasid = uv_apicid_to_nasid(phys_apicid);
61 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
62 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
63 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
34d05591
JS
64 APIC_DM_INIT;
65 uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
66 mdelay(10);
67
68 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
69 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
70 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
71 APIC_DM_STARTUP;
ac23d4ee
JS
72 uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
73 return 0;
74}
75
76static void uv_send_IPI_one(int cpu, int vector)
77{
34d05591 78 unsigned long val, apicid, lapicid;
ac23d4ee
JS
79 int nasid;
80
81 apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */
34d05591 82 lapicid = apicid & 0x3f; /* ZZZ macro needed */
ac23d4ee
JS
83 nasid = uv_apicid_to_nasid(apicid);
84 val =
34d05591 85 (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
ac23d4ee
JS
86 UVH_IPI_INT_APIC_ID_SHFT) |
87 (vector << UVH_IPI_INT_VECTOR_SHFT);
88 uv_write_global_mmr64(nasid, UVH_IPI_INT, val);
ac23d4ee
JS
89}
90
91static void uv_send_IPI_mask(cpumask_t mask, int vector)
92{
93 unsigned int cpu;
94
95 for (cpu = 0; cpu < NR_CPUS; ++cpu)
96 if (cpu_isset(cpu, mask))
97 uv_send_IPI_one(cpu, vector);
98}
99
100static void uv_send_IPI_allbutself(int vector)
101{
102 cpumask_t mask = cpu_online_map;
103
104 cpu_clear(smp_processor_id(), mask);
105
106 if (!cpus_empty(mask))
107 uv_send_IPI_mask(mask, vector);
108}
109
110static void uv_send_IPI_all(int vector)
111{
112 uv_send_IPI_mask(cpu_online_map, vector);
113}
114
115static int uv_apic_id_registered(void)
116{
117 return 1;
118}
119
120static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
121{
122 int cpu;
123
124 /*
125 * We're using fixed IRQ delivery, can only return one phys APIC ID.
126 * May as well be the first.
127 */
128 cpu = first_cpu(cpumask);
129 if ((unsigned)cpu < NR_CPUS)
130 return per_cpu(x86_cpu_to_apicid, cpu);
131 else
132 return BAD_APICID;
133}
134
135static unsigned int phys_pkg_id(int index_msb)
136{
137 return GET_APIC_ID(read_apic_id()) >> index_msb;
138}
139
140#ifdef ZZZ /* Needs x2apic patch */
141static void uv_send_IPI_self(int vector)
142{
143 apic_write(APIC_SELF_IPI, vector);
144}
145#endif
146
147struct genapic apic_x2apic_uv_x = {
148 .name = "UV large system",
149 .int_delivery_mode = dest_Fixed,
150 .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
151 .target_cpus = uv_target_cpus,
152 .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */
153 .apic_id_registered = uv_apic_id_registered,
154 .send_IPI_all = uv_send_IPI_all,
155 .send_IPI_allbutself = uv_send_IPI_allbutself,
156 .send_IPI_mask = uv_send_IPI_mask,
157 /* ZZZ.send_IPI_self = uv_send_IPI_self, */
158 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
159 .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */
160};
161
162static __cpuinit void set_x2apic_extra_bits(int nasid)
163{
164 __get_cpu_var(x2apic_extra_bits) = ((nasid >> 1) << 6);
165}
166
167/*
168 * Called on boot cpu.
169 */
170static __init void uv_system_init(void)
171{
172 union uvh_si_addr_map_config_u m_n_config;
173 int bytes, nid, cpu, lcpu, nasid, last_nasid, blade;
174 unsigned long mmr_base;
175
176 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
177 mmr_base =
178 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
179 ~UV_MMR_ENABLE;
180 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
181
182 last_nasid = -1;
183 for_each_possible_cpu(cpu) {
184 nid = cpu_to_node(cpu);
185 nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
186 if (nasid != last_nasid)
187 uv_possible_blades++;
188 last_nasid = nasid;
189 }
190 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
191
192 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
193 uv_blade_info = alloc_bootmem_pages(bytes);
194
195 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
196 uv_node_to_blade = alloc_bootmem_pages(bytes);
197 memset(uv_node_to_blade, 255, bytes);
198
199 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
200 uv_cpu_to_blade = alloc_bootmem_pages(bytes);
201 memset(uv_cpu_to_blade, 255, bytes);
202
203 last_nasid = -1;
204 blade = -1;
205 lcpu = -1;
206 for_each_possible_cpu(cpu) {
207 nid = cpu_to_node(cpu);
208 nasid = uv_apicid_to_nasid(per_cpu(x86_cpu_to_apicid, cpu));
209 if (nasid != last_nasid) {
210 blade++;
211 lcpu = -1;
212 uv_blade_info[blade].nr_posible_cpus = 0;
213 uv_blade_info[blade].nr_online_cpus = 0;
214 }
215 last_nasid = nasid;
216 lcpu++;
217
218 uv_cpu_hub_info(cpu)->m_val = m_n_config.s.m_skt;
219 uv_cpu_hub_info(cpu)->n_val = m_n_config.s.n_skt;
220 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
221 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
222 uv_cpu_hub_info(cpu)->local_nasid = nasid;
223 uv_cpu_hub_info(cpu)->gnode_upper =
224 nasid & ~((1 << uv_hub_info->n_val) - 1);
225 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
226 uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
227 uv_blade_info[blade].nasid = nasid;
228 uv_blade_info[blade].nr_posible_cpus++;
229 uv_node_to_blade[nid] = blade;
230 uv_cpu_to_blade[cpu] = blade;
231
232 printk(KERN_DEBUG "UV cpu %d, apicid 0x%x, nasid %d, nid %d\n",
233 cpu, per_cpu(x86_cpu_to_apicid, cpu), nasid, nid);
234 printk(KERN_DEBUG "UV lcpu %d, blade %d\n", lcpu, blade);
235 }
236}
237
238/*
239 * Called on each cpu to initialize the per_cpu UV data area.
240 */
241void __cpuinit uv_cpu_init(void)
242{
243 if (!uv_node_to_blade)
244 uv_system_init();
245
246 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
247
248 if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
249 set_x2apic_extra_bits(uv_hub_info->local_nasid);
250}
This page took 0.060287 seconds and 5 git commands to generate.