ACPI / hotplug: Use device offline/online for graceful hot-removal
[deliverable/linux.git] / drivers / base / cpu.c
CommitLineData
1da177e4 1/*
8a25a2fd 2 * CPU subsystem support
1da177e4
LT
3 */
4
024f7846 5#include <linux/kernel.h>
1da177e4
LT
6#include <linux/module.h>
7#include <linux/init.h>
f6a57033 8#include <linux/sched.h>
1da177e4
LT
9#include <linux/cpu.h>
10#include <linux/topology.h>
11#include <linux/device.h>
76b67ed9 12#include <linux/node.h>
5a0e3ad6 13#include <linux/gfp.h>
fad12ac8 14#include <linux/slab.h>
9f13a1fd 15#include <linux/percpu.h>
1da177e4 16
a1bdc7aa 17#include "base.h"
1da177e4 18
8a25a2fd 19static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
ad74557a 20
1da177e4 21#ifdef CONFIG_HOTPLUG_CPU
34640468
YI
22static void change_cpu_under_node(struct cpu *cpu,
23 unsigned int from_nid, unsigned int to_nid)
24{
25 int cpuid = cpu->dev.id;
26 unregister_cpu_under_node(cpuid, from_nid);
27 register_cpu_under_node(cpuid, to_nid);
28 cpu->node_id = to_nid;
29}
30
0902a904 31static int __ref cpu_subsys_online(struct device *dev)
1da177e4 32{
8a25a2fd 33 struct cpu *cpu = container_of(dev, struct cpu, dev);
0902a904
RW
34 int cpuid = dev->id;
35 int from_nid, to_nid;
36 int ret;
37
38 cpu_hotplug_driver_lock();
39
40 from_nid = cpu_to_node(cpuid);
41 ret = cpu_up(cpuid);
42 /*
43 * When hot adding memory to memoryless node and enabling a cpu
44 * on the node, node number of the cpu may internally change.
45 */
46 to_nid = cpu_to_node(cpuid);
47 if (from_nid != to_nid)
48 change_cpu_under_node(cpu, from_nid, to_nid);
1da177e4 49
0902a904
RW
50 cpu_hotplug_driver_unlock();
51 return ret;
1da177e4
LT
52}
53
0902a904 54static int cpu_subsys_offline(struct device *dev)
1da177e4 55{
0902a904 56 int ret;
1da177e4 57
51badebd 58 cpu_hotplug_driver_lock();
0902a904 59 ret = cpu_down(dev->id);
51badebd 60 cpu_hotplug_driver_unlock();
1da177e4
LT
61 return ret;
62}
1da177e4 63
76b67ed9 64void unregister_cpu(struct cpu *cpu)
1da177e4 65{
8a25a2fd 66 int logical_cpu = cpu->dev.id;
1da177e4 67
76b67ed9
KH
68 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
69
8a25a2fd 70 device_unregister(&cpu->dev);
e37d05da 71 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
1da177e4
LT
72 return;
73}
12633e80
NF
74
75#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
8a25a2fd
KS
76static ssize_t cpu_probe_store(struct device *dev,
77 struct device_attribute *attr,
67fc233f 78 const char *buf,
12633e80
NF
79 size_t count)
80{
81 return arch_cpu_probe(buf, count);
82}
83
8a25a2fd
KS
84static ssize_t cpu_release_store(struct device *dev,
85 struct device_attribute *attr,
67fc233f 86 const char *buf,
12633e80
NF
87 size_t count)
88{
89 return arch_cpu_release(buf, count);
90}
91
8a25a2fd
KS
92static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
93static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
12633e80
NF
94#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
95
1da177e4
LT
96#endif /* CONFIG_HOTPLUG_CPU */
97
0902a904
RW
98struct bus_type cpu_subsys = {
99 .name = "cpu",
100 .dev_name = "cpu",
101#ifdef CONFIG_HOTPLUG_CPU
102 .online = cpu_subsys_online,
103 .offline = cpu_subsys_offline,
104#endif
105};
106EXPORT_SYMBOL_GPL(cpu_subsys);
107
51be5606
VG
108#ifdef CONFIG_KEXEC
109#include <linux/kexec.h>
110
8a25a2fd 111static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
4a0b2b4d 112 char *buf)
51be5606 113{
8a25a2fd 114 struct cpu *cpu = container_of(dev, struct cpu, dev);
51be5606
VG
115 ssize_t rc;
116 unsigned long long addr;
117 int cpunum;
118
8a25a2fd 119 cpunum = cpu->dev.id;
51be5606
VG
120
121 /*
122 * Might be reading other cpu's data based on which cpu read thread
123 * has been scheduled. But cpu data (memory) is allocated once during
124 * boot up and this data does not change there after. Hence this
125 * operation should be safe. No locking required.
126 */
3b034b0d 127 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
51be5606 128 rc = sprintf(buf, "%Lx\n", addr);
51be5606
VG
129 return rc;
130}
8a25a2fd 131static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
eca4549f
ZY
132
133static ssize_t show_crash_notes_size(struct device *dev,
134 struct device_attribute *attr,
135 char *buf)
136{
137 ssize_t rc;
138
bcfb87fb 139 rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
eca4549f
ZY
140 return rc;
141}
142static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
51be5606
VG
143#endif
144
9d1fe323
MT
145/*
146 * Print cpu online, possible, present, and system maps
147 */
265d2e2e
AK
148
149struct cpu_attr {
8a25a2fd 150 struct device_attribute attr;
265d2e2e
AK
151 const struct cpumask *const * const map;
152};
153
8a25a2fd
KS
154static ssize_t show_cpus_attr(struct device *dev,
155 struct device_attribute *attr,
265d2e2e 156 char *buf)
9d1fe323 157{
265d2e2e
AK
158 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
159 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
9d1fe323
MT
160
161 buf[n++] = '\n';
162 buf[n] = '\0';
163 return n;
164}
165
8a25a2fd
KS
166#define _CPU_ATTR(name, map) \
167 { __ATTR(name, 0444, show_cpus_attr, NULL), map }
9d1fe323 168
8a25a2fd 169/* Keep in sync with cpu_subsys_attrs */
265d2e2e
AK
170static struct cpu_attr cpu_attrs[] = {
171 _CPU_ATTR(online, &cpu_online_mask),
172 _CPU_ATTR(possible, &cpu_possible_mask),
173 _CPU_ATTR(present, &cpu_present_mask),
174};
9d1fe323 175
e057d7ae
MT
176/*
177 * Print values for NR_CPUS and offlined cpus
178 */
8a25a2fd
KS
179static ssize_t print_cpus_kernel_max(struct device *dev,
180 struct device_attribute *attr, char *buf)
e057d7ae 181{
8fd2d2d5 182 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
e057d7ae
MT
183 return n;
184}
8a25a2fd 185static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
e057d7ae
MT
186
187/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
188unsigned int total_cpus;
189
8a25a2fd
KS
190static ssize_t print_cpus_offline(struct device *dev,
191 struct device_attribute *attr, char *buf)
e057d7ae
MT
192{
193 int n = 0, len = PAGE_SIZE-2;
194 cpumask_var_t offline;
195
196 /* display offline cpus < nr_cpu_ids */
197 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
198 return -ENOMEM;
cdc6e3d3 199 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
e057d7ae
MT
200 n = cpulist_scnprintf(buf, len, offline);
201 free_cpumask_var(offline);
202
203 /* display offline cpus >= nr_cpu_ids */
204 if (total_cpus && nr_cpu_ids < total_cpus) {
205 if (n && n < len)
206 buf[n++] = ',';
207
208 if (nr_cpu_ids == total_cpus-1)
209 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
210 else
211 n += snprintf(&buf[n], len - n, "%d-%d",
212 nr_cpu_ids, total_cpus-1);
213 }
214
215 n += snprintf(&buf[n], len - n, "\n");
216 return n;
217}
8a25a2fd 218static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
e057d7ae 219
2885e25c
GKH
220static void cpu_device_release(struct device *dev)
221{
222 /*
223 * This is an empty function to prevent the driver core from spitting a
224 * warning at us. Yes, I know this is directly opposite of what the
225 * documentation for the driver core and kobjects say, and the author
226 * of this code has already been publically ridiculed for doing
227 * something as foolish as this. However, at this point in time, it is
228 * the only way to handle the issue of statically allocated cpu
229 * devices. The different architectures will have their cpu device
230 * code reworked to properly handle this in the near future, so this
231 * function will then be changed to correctly free up the memory held
232 * by the cpu device.
233 *
234 * Never copy this way of doing things, or you too will be made fun of
30a4840a 235 * on the linux-kernel list, you have been warned.
2885e25c
GKH
236 */
237}
238
1da177e4 239/*
405ae7d3 240 * register_cpu - Setup a sysfs device for a CPU.
72486f1f
SS
241 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
242 * sysfs for this CPU.
1da177e4
LT
243 * @num - CPU number to use when creating the device.
244 *
245 * Initialize and register the CPU device.
246 */
33b5f31b 247int __cpuinit register_cpu(struct cpu *cpu, int num)
1da177e4
LT
248{
249 int error;
76b67ed9 250
8a25a2fd 251 cpu->node_id = cpu_to_node(num);
29bb5d4f 252 memset(&cpu->dev, 0x00, sizeof(struct device));
8a25a2fd
KS
253 cpu->dev.id = num;
254 cpu->dev.bus = &cpu_subsys;
2885e25c 255 cpu->dev.release = cpu_device_release;
0902a904 256 cpu->dev.offline_disabled = !cpu->hotpluggable;
fad12ac8
TR
257#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
258 cpu->dev.bus->uevent = arch_cpu_uevent;
259#endif
8a25a2fd 260 error = device_register(&cpu->dev);
ad74557a 261 if (!error)
8a25a2fd 262 per_cpu(cpu_sys_devices, num) = &cpu->dev;
76b67ed9
KH
263 if (!error)
264 register_cpu_under_node(num, cpu_to_node(num));
51be5606
VG
265
266#ifdef CONFIG_KEXEC
267 if (!error)
8a25a2fd 268 error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
eca4549f
ZY
269 if (!error)
270 error = device_create_file(&cpu->dev,
271 &dev_attr_crash_notes_size);
51be5606 272#endif
1da177e4
LT
273 return error;
274}
275
8a25a2fd 276struct device *get_cpu_device(unsigned cpu)
ad74557a 277{
e37d05da
MT
278 if (cpu < nr_cpu_ids && cpu_possible(cpu))
279 return per_cpu(cpu_sys_devices, cpu);
ad74557a
AR
280 else
281 return NULL;
282}
8a25a2fd
KS
283EXPORT_SYMBOL_GPL(get_cpu_device);
284
fad12ac8
TR
285#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
286static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
287#endif
288
8a25a2fd
KS
289static struct attribute *cpu_root_attrs[] = {
290#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
291 &dev_attr_probe.attr,
292 &dev_attr_release.attr,
293#endif
294 &cpu_attrs[0].attr.attr,
295 &cpu_attrs[1].attr.attr,
296 &cpu_attrs[2].attr.attr,
297 &dev_attr_kernel_max.attr,
298 &dev_attr_offline.attr,
fad12ac8
TR
299#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
300 &dev_attr_modalias.attr,
301#endif
8a25a2fd
KS
302 NULL
303};
304
305static struct attribute_group cpu_root_attr_group = {
306 .attrs = cpu_root_attrs,
307};
308
309static const struct attribute_group *cpu_root_attr_groups[] = {
310 &cpu_root_attr_group,
311 NULL,
312};
1da177e4 313
2987557f
JT
314bool cpu_is_hotpluggable(unsigned cpu)
315{
7affca35
LT
316 struct device *dev = get_cpu_device(cpu);
317 return dev && container_of(dev, struct cpu, dev)->hotpluggable;
2987557f
JT
318}
319EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
320
9f13a1fd
BH
321#ifdef CONFIG_GENERIC_CPU_DEVICES
322static DEFINE_PER_CPU(struct cpu, cpu_devices);
323#endif
324
325static void __init cpu_dev_register_generic(void)
326{
327#ifdef CONFIG_GENERIC_CPU_DEVICES
328 int i;
329
330 for_each_possible_cpu(i) {
331 if (register_cpu(&per_cpu(cpu_devices, i), i))
332 panic("Failed to register CPU device");
333 }
334#endif
335}
336
024f7846 337void __init cpu_dev_init(void)
1da177e4 338{
024f7846
BH
339 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
340 panic("Failed to register CPU subsystem");
8a25a2fd 341
9f13a1fd 342 cpu_dev_register_generic();
1da177e4 343}
This page took 0.773805 seconds and 5 git commands to generate.