Merge branch 'clock' into HEAD
[deliverable/linux.git] / kernel / irq / proc.c
1 /*
2 * linux/kernel/irq/proc.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the /proc/irq/ handling code.
7 */
8
9 #include <linux/irq.h>
10 #include <linux/gfp.h>
11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15
16 #include "internals.h"
17
18 /*
19 * Access rules:
20 *
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
25 *
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
30 *
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
34 */
35 static struct proc_dir_entry *root_irq_dir;
36
37 #ifdef CONFIG_SMP
38
39 static int show_irq_affinity(int type, struct seq_file *m, void *v)
40 {
41 struct irq_desc *desc = irq_to_desc((long)m->private);
42 const struct cpumask *mask = desc->irq_common_data.affinity;
43
44 #ifdef CONFIG_GENERIC_PENDING_IRQ
45 if (irqd_is_setaffinity_pending(&desc->irq_data))
46 mask = desc->pending_mask;
47 #endif
48 if (type)
49 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
50 else
51 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
52 return 0;
53 }
54
55 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
56 {
57 struct irq_desc *desc = irq_to_desc((long)m->private);
58 unsigned long flags;
59 cpumask_var_t mask;
60
61 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
62 return -ENOMEM;
63
64 raw_spin_lock_irqsave(&desc->lock, flags);
65 if (desc->affinity_hint)
66 cpumask_copy(mask, desc->affinity_hint);
67 raw_spin_unlock_irqrestore(&desc->lock, flags);
68
69 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
70 free_cpumask_var(mask);
71
72 return 0;
73 }
74
75 #ifndef is_affinity_mask_valid
76 #define is_affinity_mask_valid(val) 1
77 #endif
78
79 int no_irq_affinity;
80 static int irq_affinity_proc_show(struct seq_file *m, void *v)
81 {
82 return show_irq_affinity(0, m, v);
83 }
84
85 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
86 {
87 return show_irq_affinity(1, m, v);
88 }
89
90
91 static ssize_t write_irq_affinity(int type, struct file *file,
92 const char __user *buffer, size_t count, loff_t *pos)
93 {
94 unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
95 cpumask_var_t new_value;
96 int err;
97
98 if (!irq_can_set_affinity(irq) || no_irq_affinity)
99 return -EIO;
100
101 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
102 return -ENOMEM;
103
104 if (type)
105 err = cpumask_parselist_user(buffer, count, new_value);
106 else
107 err = cpumask_parse_user(buffer, count, new_value);
108 if (err)
109 goto free_cpumask;
110
111 if (!is_affinity_mask_valid(new_value)) {
112 err = -EINVAL;
113 goto free_cpumask;
114 }
115
116 /*
117 * Do not allow disabling IRQs completely - it's a too easy
118 * way to make the system unusable accidentally :-) At least
119 * one online CPU still has to be targeted.
120 */
121 if (!cpumask_intersects(new_value, cpu_online_mask)) {
122 /* Special case for empty set - allow the architecture
123 code to set default SMP affinity. */
124 err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
125 } else {
126 irq_set_affinity(irq, new_value);
127 err = count;
128 }
129
130 free_cpumask:
131 free_cpumask_var(new_value);
132 return err;
133 }
134
135 static ssize_t irq_affinity_proc_write(struct file *file,
136 const char __user *buffer, size_t count, loff_t *pos)
137 {
138 return write_irq_affinity(0, file, buffer, count, pos);
139 }
140
141 static ssize_t irq_affinity_list_proc_write(struct file *file,
142 const char __user *buffer, size_t count, loff_t *pos)
143 {
144 return write_irq_affinity(1, file, buffer, count, pos);
145 }
146
147 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
148 {
149 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
150 }
151
152 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
153 {
154 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
155 }
156
157 static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
158 {
159 return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
160 }
161
162 static const struct file_operations irq_affinity_proc_fops = {
163 .open = irq_affinity_proc_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 .write = irq_affinity_proc_write,
168 };
169
170 static const struct file_operations irq_affinity_hint_proc_fops = {
171 .open = irq_affinity_hint_proc_open,
172 .read = seq_read,
173 .llseek = seq_lseek,
174 .release = single_release,
175 };
176
177 static const struct file_operations irq_affinity_list_proc_fops = {
178 .open = irq_affinity_list_proc_open,
179 .read = seq_read,
180 .llseek = seq_lseek,
181 .release = single_release,
182 .write = irq_affinity_list_proc_write,
183 };
184
185 static int default_affinity_show(struct seq_file *m, void *v)
186 {
187 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
188 return 0;
189 }
190
191 static ssize_t default_affinity_write(struct file *file,
192 const char __user *buffer, size_t count, loff_t *ppos)
193 {
194 cpumask_var_t new_value;
195 int err;
196
197 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
198 return -ENOMEM;
199
200 err = cpumask_parse_user(buffer, count, new_value);
201 if (err)
202 goto out;
203
204 if (!is_affinity_mask_valid(new_value)) {
205 err = -EINVAL;
206 goto out;
207 }
208
209 /*
210 * Do not allow disabling IRQs completely - it's a too easy
211 * way to make the system unusable accidentally :-) At least
212 * one online CPU still has to be targeted.
213 */
214 if (!cpumask_intersects(new_value, cpu_online_mask)) {
215 err = -EINVAL;
216 goto out;
217 }
218
219 cpumask_copy(irq_default_affinity, new_value);
220 err = count;
221
222 out:
223 free_cpumask_var(new_value);
224 return err;
225 }
226
227 static int default_affinity_open(struct inode *inode, struct file *file)
228 {
229 return single_open(file, default_affinity_show, PDE_DATA(inode));
230 }
231
232 static const struct file_operations default_affinity_proc_fops = {
233 .open = default_affinity_open,
234 .read = seq_read,
235 .llseek = seq_lseek,
236 .release = single_release,
237 .write = default_affinity_write,
238 };
239
240 static int irq_node_proc_show(struct seq_file *m, void *v)
241 {
242 struct irq_desc *desc = irq_to_desc((long) m->private);
243
244 seq_printf(m, "%d\n", irq_desc_get_node(desc));
245 return 0;
246 }
247
248 static int irq_node_proc_open(struct inode *inode, struct file *file)
249 {
250 return single_open(file, irq_node_proc_show, PDE_DATA(inode));
251 }
252
253 static const struct file_operations irq_node_proc_fops = {
254 .open = irq_node_proc_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258 };
259 #endif
260
261 static int irq_spurious_proc_show(struct seq_file *m, void *v)
262 {
263 struct irq_desc *desc = irq_to_desc((long) m->private);
264
265 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
266 desc->irq_count, desc->irqs_unhandled,
267 jiffies_to_msecs(desc->last_unhandled));
268 return 0;
269 }
270
271 static int irq_spurious_proc_open(struct inode *inode, struct file *file)
272 {
273 return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
274 }
275
276 static const struct file_operations irq_spurious_proc_fops = {
277 .open = irq_spurious_proc_open,
278 .read = seq_read,
279 .llseek = seq_lseek,
280 .release = single_release,
281 };
282
283 #define MAX_NAMELEN 128
284
285 static int name_unique(unsigned int irq, struct irqaction *new_action)
286 {
287 struct irq_desc *desc = irq_to_desc(irq);
288 struct irqaction *action;
289 unsigned long flags;
290 int ret = 1;
291
292 raw_spin_lock_irqsave(&desc->lock, flags);
293 for (action = desc->action ; action; action = action->next) {
294 if ((action != new_action) && action->name &&
295 !strcmp(new_action->name, action->name)) {
296 ret = 0;
297 break;
298 }
299 }
300 raw_spin_unlock_irqrestore(&desc->lock, flags);
301 return ret;
302 }
303
304 void register_handler_proc(unsigned int irq, struct irqaction *action)
305 {
306 char name [MAX_NAMELEN];
307 struct irq_desc *desc = irq_to_desc(irq);
308
309 if (!desc->dir || action->dir || !action->name ||
310 !name_unique(irq, action))
311 return;
312
313 memset(name, 0, MAX_NAMELEN);
314 snprintf(name, MAX_NAMELEN, "%s", action->name);
315
316 /* create /proc/irq/1234/handler/ */
317 action->dir = proc_mkdir(name, desc->dir);
318 }
319
320 #undef MAX_NAMELEN
321
322 #define MAX_NAMELEN 10
323
324 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
325 {
326 char name [MAX_NAMELEN];
327
328 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
329 return;
330
331 memset(name, 0, MAX_NAMELEN);
332 sprintf(name, "%d", irq);
333
334 /* create /proc/irq/1234 */
335 desc->dir = proc_mkdir(name, root_irq_dir);
336 if (!desc->dir)
337 return;
338
339 #ifdef CONFIG_SMP
340 /* create /proc/irq/<irq>/smp_affinity */
341 proc_create_data("smp_affinity", 0644, desc->dir,
342 &irq_affinity_proc_fops, (void *)(long)irq);
343
344 /* create /proc/irq/<irq>/affinity_hint */
345 proc_create_data("affinity_hint", 0444, desc->dir,
346 &irq_affinity_hint_proc_fops, (void *)(long)irq);
347
348 /* create /proc/irq/<irq>/smp_affinity_list */
349 proc_create_data("smp_affinity_list", 0644, desc->dir,
350 &irq_affinity_list_proc_fops, (void *)(long)irq);
351
352 proc_create_data("node", 0444, desc->dir,
353 &irq_node_proc_fops, (void *)(long)irq);
354 #endif
355
356 proc_create_data("spurious", 0444, desc->dir,
357 &irq_spurious_proc_fops, (void *)(long)irq);
358 }
359
360 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
361 {
362 char name [MAX_NAMELEN];
363
364 if (!root_irq_dir || !desc->dir)
365 return;
366 #ifdef CONFIG_SMP
367 remove_proc_entry("smp_affinity", desc->dir);
368 remove_proc_entry("affinity_hint", desc->dir);
369 remove_proc_entry("smp_affinity_list", desc->dir);
370 remove_proc_entry("node", desc->dir);
371 #endif
372 remove_proc_entry("spurious", desc->dir);
373
374 memset(name, 0, MAX_NAMELEN);
375 sprintf(name, "%u", irq);
376 remove_proc_entry(name, root_irq_dir);
377 }
378
379 #undef MAX_NAMELEN
380
381 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
382 {
383 proc_remove(action->dir);
384 }
385
386 static void register_default_affinity_proc(void)
387 {
388 #ifdef CONFIG_SMP
389 proc_create("irq/default_smp_affinity", 0644, NULL,
390 &default_affinity_proc_fops);
391 #endif
392 }
393
394 void init_irq_proc(void)
395 {
396 unsigned int irq;
397 struct irq_desc *desc;
398
399 /* create /proc/irq */
400 root_irq_dir = proc_mkdir("irq", NULL);
401 if (!root_irq_dir)
402 return;
403
404 register_default_affinity_proc();
405
406 /*
407 * Create entries for all existing IRQs.
408 */
409 for_each_irq_desc(irq, desc) {
410 if (!desc)
411 continue;
412
413 register_irq_proc(irq, desc);
414 }
415 }
416
417 #ifdef CONFIG_GENERIC_IRQ_SHOW
418
419 int __weak arch_show_interrupts(struct seq_file *p, int prec)
420 {
421 return 0;
422 }
423
424 #ifndef ACTUAL_NR_IRQS
425 # define ACTUAL_NR_IRQS nr_irqs
426 #endif
427
428 int show_interrupts(struct seq_file *p, void *v)
429 {
430 static int prec;
431
432 unsigned long flags, any_count = 0;
433 int i = *(loff_t *) v, j;
434 struct irqaction *action;
435 struct irq_desc *desc;
436
437 if (i > ACTUAL_NR_IRQS)
438 return 0;
439
440 if (i == ACTUAL_NR_IRQS)
441 return arch_show_interrupts(p, prec);
442
443 /* print header and calculate the width of the first column */
444 if (i == 0) {
445 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
446 j *= 10;
447
448 seq_printf(p, "%*s", prec + 8, "");
449 for_each_online_cpu(j)
450 seq_printf(p, "CPU%-8d", j);
451 seq_putc(p, '\n');
452 }
453
454 irq_lock_sparse();
455 desc = irq_to_desc(i);
456 if (!desc)
457 goto outsparse;
458
459 raw_spin_lock_irqsave(&desc->lock, flags);
460 for_each_online_cpu(j)
461 any_count |= kstat_irqs_cpu(i, j);
462 action = desc->action;
463 if (!action && !any_count)
464 goto out;
465
466 seq_printf(p, "%*d: ", prec, i);
467 for_each_online_cpu(j)
468 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
469
470 if (desc->irq_data.chip) {
471 if (desc->irq_data.chip->irq_print_chip)
472 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
473 else if (desc->irq_data.chip->name)
474 seq_printf(p, " %8s", desc->irq_data.chip->name);
475 else
476 seq_printf(p, " %8s", "-");
477 } else {
478 seq_printf(p, " %8s", "None");
479 }
480 if (desc->irq_data.domain)
481 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
482 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
483 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
484 #endif
485 if (desc->name)
486 seq_printf(p, "-%-8s", desc->name);
487
488 if (action) {
489 seq_printf(p, " %s", action->name);
490 while ((action = action->next) != NULL)
491 seq_printf(p, ", %s", action->name);
492 }
493
494 seq_putc(p, '\n');
495 out:
496 raw_spin_unlock_irqrestore(&desc->lock, flags);
497 outsparse:
498 irq_unlock_sparse();
499 return 0;
500 }
501 #endif
This page took 0.051256 seconds and 5 git commands to generate.