Commit | Line | Data |
---|---|---|
3795de23 TG |
1 | /* |
2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
4 | * | |
5 | * This file contains the interrupt descriptor management code | |
6 | * | |
7 | * Detailed information is available in Documentation/DocBook/genericirq | |
8 | * | |
9 | */ | |
10 | #include <linux/irq.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/radix-tree.h> | |
1f5a5b87 | 16 | #include <linux/bitmap.h> |
3795de23 TG |
17 | |
18 | #include "internals.h" | |
19 | ||
20 | /* | |
21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | |
22 | */ | |
78f90d91 | 23 | static struct lock_class_key irq_desc_lock_class; |
3795de23 TG |
24 | |
25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | |
26 | static void __init init_irq_default_affinity(void) | |
27 | { | |
28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | |
29 | cpumask_setall(irq_default_affinity); | |
30 | } | |
31 | #else | |
32 | static void __init init_irq_default_affinity(void) | |
33 | { | |
34 | } | |
35 | #endif | |
36 | ||
1f5a5b87 TG |
37 | #ifdef CONFIG_SMP |
38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | |
39 | { | |
40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | |
41 | return -ENOMEM; | |
42 | ||
43 | #ifdef CONFIG_GENERIC_PENDING_IRQ | |
44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | |
45 | free_cpumask_var(desc->irq_data.affinity); | |
46 | return -ENOMEM; | |
47 | } | |
48 | #endif | |
49 | return 0; | |
50 | } | |
51 | ||
52 | static void desc_smp_init(struct irq_desc *desc, int node) | |
53 | { | |
aa99ec0f | 54 | desc->irq_data.node = node; |
1f5a5b87 | 55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); |
b7b29338 TG |
56 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
57 | cpumask_clear(desc->pending_mask); | |
58 | #endif | |
59 | } | |
60 | ||
61 | static inline int desc_node(struct irq_desc *desc) | |
62 | { | |
63 | return desc->irq_data.node; | |
1f5a5b87 TG |
64 | } |
65 | ||
66 | #else | |
67 | static inline int | |
68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | |
69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | |
b7b29338 | 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } |
1f5a5b87 TG |
71 | #endif |
72 | ||
73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |
74 | { | |
6c9ae009 ED |
75 | int cpu; |
76 | ||
1f5a5b87 TG |
77 | desc->irq_data.irq = irq; |
78 | desc->irq_data.chip = &no_irq_chip; | |
79 | desc->irq_data.chip_data = NULL; | |
80 | desc->irq_data.handler_data = NULL; | |
81 | desc->irq_data.msi_desc = NULL; | |
82 | desc->status = IRQ_DEFAULT_INIT_FLAGS; | |
83 | desc->handle_irq = handle_bad_irq; | |
84 | desc->depth = 1; | |
b7b29338 TG |
85 | desc->irq_count = 0; |
86 | desc->irqs_unhandled = 0; | |
1f5a5b87 | 87 | desc->name = NULL; |
6c9ae009 ED |
88 | for_each_possible_cpu(cpu) |
89 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | |
1f5a5b87 TG |
90 | desc_smp_init(desc, node); |
91 | } | |
92 | ||
3795de23 TG |
93 | int nr_irqs = NR_IRQS; |
94 | EXPORT_SYMBOL_GPL(nr_irqs); | |
95 | ||
a05a900a | 96 | static DEFINE_MUTEX(sparse_irq_lock); |
1f5a5b87 TG |
97 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); |
98 | ||
3795de23 TG |
99 | #ifdef CONFIG_SPARSE_IRQ |
100 | ||
baa0d233 | 101 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
3795de23 | 102 | |
1f5a5b87 | 103 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
3795de23 TG |
104 | { |
105 | radix_tree_insert(&irq_desc_tree, irq, desc); | |
106 | } | |
107 | ||
108 | struct irq_desc *irq_to_desc(unsigned int irq) | |
109 | { | |
110 | return radix_tree_lookup(&irq_desc_tree, irq); | |
111 | } | |
112 | ||
1f5a5b87 TG |
113 | static void delete_irq_desc(unsigned int irq) |
114 | { | |
115 | radix_tree_delete(&irq_desc_tree, irq); | |
116 | } | |
117 | ||
118 | #ifdef CONFIG_SMP | |
119 | static void free_masks(struct irq_desc *desc) | |
120 | { | |
121 | #ifdef CONFIG_GENERIC_PENDING_IRQ | |
122 | free_cpumask_var(desc->pending_mask); | |
123 | #endif | |
c0a19ebc | 124 | free_cpumask_var(desc->irq_data.affinity); |
1f5a5b87 TG |
125 | } |
126 | #else | |
127 | static inline void free_masks(struct irq_desc *desc) { } | |
128 | #endif | |
129 | ||
130 | static struct irq_desc *alloc_desc(int irq, int node) | |
131 | { | |
132 | struct irq_desc *desc; | |
baa0d233 | 133 | gfp_t gfp = GFP_KERNEL; |
1f5a5b87 TG |
134 | |
135 | desc = kzalloc_node(sizeof(*desc), gfp, node); | |
136 | if (!desc) | |
137 | return NULL; | |
138 | /* allocate based on nr_cpu_ids */ | |
6c9ae009 | 139 | desc->kstat_irqs = alloc_percpu(unsigned int); |
1f5a5b87 TG |
140 | if (!desc->kstat_irqs) |
141 | goto err_desc; | |
142 | ||
143 | if (alloc_masks(desc, gfp, node)) | |
144 | goto err_kstat; | |
145 | ||
146 | raw_spin_lock_init(&desc->lock); | |
147 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | |
148 | ||
149 | desc_set_defaults(irq, desc, node); | |
150 | ||
151 | return desc; | |
152 | ||
153 | err_kstat: | |
6c9ae009 | 154 | free_percpu(desc->kstat_irqs); |
1f5a5b87 TG |
155 | err_desc: |
156 | kfree(desc); | |
157 | return NULL; | |
158 | } | |
159 | ||
160 | static void free_desc(unsigned int irq) | |
161 | { | |
162 | struct irq_desc *desc = irq_to_desc(irq); | |
1f5a5b87 | 163 | |
13bfe99e TG |
164 | unregister_irq_proc(irq, desc); |
165 | ||
a05a900a | 166 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 | 167 | delete_irq_desc(irq); |
a05a900a | 168 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
169 | |
170 | free_masks(desc); | |
6c9ae009 | 171 | free_percpu(desc->kstat_irqs); |
1f5a5b87 TG |
172 | kfree(desc); |
173 | } | |
174 | ||
175 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) | |
176 | { | |
177 | struct irq_desc *desc; | |
1f5a5b87 TG |
178 | int i; |
179 | ||
180 | for (i = 0; i < cnt; i++) { | |
181 | desc = alloc_desc(start + i, node); | |
182 | if (!desc) | |
183 | goto err; | |
a05a900a | 184 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 | 185 | irq_insert_desc(start + i, desc); |
a05a900a | 186 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
187 | } |
188 | return start; | |
189 | ||
190 | err: | |
191 | for (i--; i >= 0; i--) | |
192 | free_desc(start + i); | |
193 | ||
a05a900a | 194 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 | 195 | bitmap_clear(allocated_irqs, start, cnt); |
a05a900a | 196 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
197 | return -ENOMEM; |
198 | } | |
199 | ||
aa99ec0f TG |
200 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) |
201 | { | |
202 | int res = irq_alloc_descs(irq, irq, 1, node); | |
3795de23 | 203 | |
aa99ec0f TG |
204 | if (res == -EEXIST || res == irq) |
205 | return irq_to_desc(irq); | |
206 | return NULL; | |
207 | } | |
3795de23 TG |
208 | |
209 | int __init early_irq_init(void) | |
210 | { | |
b683de2b | 211 | int i, initcnt, node = first_online_node; |
3795de23 | 212 | struct irq_desc *desc; |
3795de23 TG |
213 | |
214 | init_irq_default_affinity(); | |
215 | ||
b683de2b TG |
216 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
217 | initcnt = arch_probe_nr_irqs(); | |
218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | |
3795de23 | 219 | |
b683de2b | 220 | for (i = 0; i < initcnt; i++) { |
aa99ec0f TG |
221 | desc = alloc_desc(i, node); |
222 | set_bit(i, allocated_irqs); | |
223 | irq_insert_desc(i, desc); | |
3795de23 | 224 | } |
3795de23 TG |
225 | return arch_early_irq_init(); |
226 | } | |
227 | ||
3795de23 TG |
228 | #else /* !CONFIG_SPARSE_IRQ */ |
229 | ||
230 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |
231 | [0 ... NR_IRQS-1] = { | |
1318a481 | 232 | .status = IRQ_DEFAULT_INIT_FLAGS, |
3795de23 TG |
233 | .handle_irq = handle_bad_irq, |
234 | .depth = 1, | |
235 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | |
236 | } | |
237 | }; | |
238 | ||
3795de23 TG |
239 | int __init early_irq_init(void) |
240 | { | |
aa99ec0f | 241 | int count, i, node = first_online_node; |
3795de23 | 242 | struct irq_desc *desc; |
3795de23 TG |
243 | |
244 | init_irq_default_affinity(); | |
245 | ||
246 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | |
247 | ||
248 | desc = irq_desc; | |
249 | count = ARRAY_SIZE(irq_desc); | |
250 | ||
251 | for (i = 0; i < count; i++) { | |
252 | desc[i].irq_data.irq = i; | |
253 | desc[i].irq_data.chip = &no_irq_chip; | |
6c9ae009 ED |
254 | /* TODO : do this allocation on-demand ... */ |
255 | desc[i].kstat_irqs = alloc_percpu(unsigned int); | |
aa99ec0f TG |
256 | alloc_masks(desc + i, GFP_KERNEL, node); |
257 | desc_smp_init(desc + i, node); | |
154cd387 | 258 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
3795de23 TG |
259 | } |
260 | return arch_early_irq_init(); | |
261 | } | |
262 | ||
263 | struct irq_desc *irq_to_desc(unsigned int irq) | |
264 | { | |
265 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | |
266 | } | |
267 | ||
268 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) | |
269 | { | |
270 | return irq_to_desc(irq); | |
271 | } | |
1f5a5b87 | 272 | |
1f5a5b87 TG |
273 | static void free_desc(unsigned int irq) |
274 | { | |
b7b29338 | 275 | dynamic_irq_cleanup(irq); |
1f5a5b87 TG |
276 | } |
277 | ||
278 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | |
279 | { | |
6c9ae009 ED |
280 | #if defined(CONFIG_KSTAT_IRQS_ONDEMAND) |
281 | struct irq_desc *desc; | |
282 | unsigned int i; | |
283 | ||
284 | for (i = 0; i < cnt; i++) { | |
285 | desc = irq_to_desc(start + i); | |
286 | if (desc && !desc->kstat_irqs) { | |
287 | unsigned int __percpu *stats = alloc_percpu(unsigned int); | |
288 | ||
289 | if (!stats) | |
290 | return -1; | |
291 | if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL) | |
292 | free_percpu(stats); | |
293 | } | |
294 | } | |
295 | #endif | |
1f5a5b87 TG |
296 | return start; |
297 | } | |
3795de23 TG |
298 | #endif /* !CONFIG_SPARSE_IRQ */ |
299 | ||
1f5a5b87 TG |
300 | /* Dynamic interrupt handling */ |
301 | ||
302 | /** | |
303 | * irq_free_descs - free irq descriptors | |
304 | * @from: Start of descriptor range | |
305 | * @cnt: Number of consecutive irqs to free | |
306 | */ | |
307 | void irq_free_descs(unsigned int from, unsigned int cnt) | |
308 | { | |
1f5a5b87 TG |
309 | int i; |
310 | ||
311 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | |
312 | return; | |
313 | ||
314 | for (i = 0; i < cnt; i++) | |
315 | free_desc(from + i); | |
316 | ||
a05a900a | 317 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 | 318 | bitmap_clear(allocated_irqs, from, cnt); |
a05a900a | 319 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
320 | } |
321 | ||
322 | /** | |
323 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | |
324 | * @irq: Allocate for specific irq number if irq >= 0 | |
325 | * @from: Start the search from this irq number | |
326 | * @cnt: Number of consecutive irqs to allocate. | |
327 | * @node: Preferred node on which the irq descriptor should be allocated | |
328 | * | |
329 | * Returns the first irq number or error code | |
330 | */ | |
331 | int __ref | |
332 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | |
333 | { | |
1f5a5b87 TG |
334 | int start, ret; |
335 | ||
336 | if (!cnt) | |
337 | return -EINVAL; | |
338 | ||
a05a900a | 339 | mutex_lock(&sparse_irq_lock); |
1f5a5b87 TG |
340 | |
341 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | |
342 | ret = -EEXIST; | |
343 | if (irq >=0 && start != irq) | |
344 | goto err; | |
345 | ||
346 | ret = -ENOMEM; | |
347 | if (start >= nr_irqs) | |
348 | goto err; | |
349 | ||
350 | bitmap_set(allocated_irqs, start, cnt); | |
a05a900a | 351 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
352 | return alloc_descs(start, cnt, node); |
353 | ||
354 | err: | |
a05a900a | 355 | mutex_unlock(&sparse_irq_lock); |
1f5a5b87 TG |
356 | return ret; |
357 | } | |
358 | ||
06f6c339 TG |
359 | /** |
360 | * irq_reserve_irqs - mark irqs allocated | |
361 | * @from: mark from irq number | |
362 | * @cnt: number of irqs to mark | |
363 | * | |
364 | * Returns 0 on success or an appropriate error code | |
365 | */ | |
366 | int irq_reserve_irqs(unsigned int from, unsigned int cnt) | |
367 | { | |
06f6c339 TG |
368 | unsigned int start; |
369 | int ret = 0; | |
370 | ||
371 | if (!cnt || (from + cnt) > nr_irqs) | |
372 | return -EINVAL; | |
373 | ||
a05a900a | 374 | mutex_lock(&sparse_irq_lock); |
06f6c339 TG |
375 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); |
376 | if (start == from) | |
377 | bitmap_set(allocated_irqs, start, cnt); | |
378 | else | |
379 | ret = -EEXIST; | |
a05a900a | 380 | mutex_unlock(&sparse_irq_lock); |
06f6c339 TG |
381 | return ret; |
382 | } | |
383 | ||
a98d24b7 TG |
384 | /** |
385 | * irq_get_next_irq - get next allocated irq number | |
386 | * @offset: where to start the search | |
387 | * | |
388 | * Returns next irq number after offset or nr_irqs if none is found. | |
389 | */ | |
390 | unsigned int irq_get_next_irq(unsigned int offset) | |
391 | { | |
392 | return find_next_bit(allocated_irqs, nr_irqs, offset); | |
393 | } | |
394 | ||
b7b29338 TG |
395 | /** |
396 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | |
397 | * @irq: irq number to initialize | |
398 | */ | |
399 | void dynamic_irq_cleanup(unsigned int irq) | |
3795de23 | 400 | { |
b7b29338 TG |
401 | struct irq_desc *desc = irq_to_desc(irq); |
402 | unsigned long flags; | |
403 | ||
404 | raw_spin_lock_irqsave(&desc->lock, flags); | |
405 | desc_set_defaults(irq, desc, desc_node(desc)); | |
406 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
3795de23 TG |
407 | } |
408 | ||
3795de23 TG |
409 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
410 | { | |
411 | struct irq_desc *desc = irq_to_desc(irq); | |
6c9ae009 ED |
412 | |
413 | return desc && desc->kstat_irqs ? | |
414 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | |
3795de23 | 415 | } |
478735e3 KH |
416 | |
417 | #ifdef CONFIG_GENERIC_HARDIRQS | |
418 | unsigned int kstat_irqs(unsigned int irq) | |
419 | { | |
420 | struct irq_desc *desc = irq_to_desc(irq); | |
421 | int cpu; | |
422 | int sum = 0; | |
423 | ||
6c9ae009 | 424 | if (!desc || !desc->kstat_irqs) |
478735e3 KH |
425 | return 0; |
426 | for_each_possible_cpu(cpu) | |
6c9ae009 | 427 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
478735e3 KH |
428 | return sum; |
429 | } | |
430 | #endif /* CONFIG_GENERIC_HARDIRQS */ |