x86/MSI: Use hierarchical irqdomains to manage MSI interrupts
[deliverable/linux.git] / arch / x86 / kernel / apic / msi.c
1 /*
2 * Support of MSI, HPET and DMAR interrupts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Convert to hierarchical irqdomain
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13 #include <linux/mm.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/dmar.h>
17 #include <linux/hpet.h>
18 #include <linux/msi.h>
19 #include <linux/irqdomain.h>
20 #include <asm/msidef.h>
21 #include <asm/hpet.h>
22 #include <asm/hw_irq.h>
23 #include <asm/apic.h>
24 #include <asm/irq_remapping.h>
25
26 static struct irq_domain *msi_default_domain;
27
28 void native_compose_msi_msg(struct pci_dev *pdev,
29 unsigned int irq, unsigned int dest,
30 struct msi_msg *msg, u8 hpet_id)
31 {
32 struct irq_cfg *cfg = irq_cfg(irq);
33
34 msg->address_hi = MSI_ADDR_BASE_HI;
35
36 if (x2apic_enabled())
37 msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
38
39 msg->address_lo =
40 MSI_ADDR_BASE_LO |
41 ((apic->irq_dest_mode == 0) ?
42 MSI_ADDR_DEST_MODE_PHYSICAL :
43 MSI_ADDR_DEST_MODE_LOGICAL) |
44 ((apic->irq_delivery_mode != dest_LowestPrio) ?
45 MSI_ADDR_REDIRECTION_CPU :
46 MSI_ADDR_REDIRECTION_LOWPRI) |
47 MSI_ADDR_DEST_ID(dest);
48
49 msg->data =
50 MSI_DATA_TRIGGER_EDGE |
51 MSI_DATA_LEVEL_ASSERT |
52 ((apic->irq_delivery_mode != dest_LowestPrio) ?
53 MSI_DATA_DELIVERY_FIXED :
54 MSI_DATA_DELIVERY_LOWPRI) |
55 MSI_DATA_VECTOR(cfg->vector);
56 }
57
58 static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
59 {
60 struct irq_cfg *cfg = irqd_cfg(data);
61
62 msg->address_hi = MSI_ADDR_BASE_HI;
63
64 if (x2apic_enabled())
65 msg->address_hi |= MSI_ADDR_EXT_DEST_ID(cfg->dest_apicid);
66
67 msg->address_lo =
68 MSI_ADDR_BASE_LO |
69 ((apic->irq_dest_mode == 0) ?
70 MSI_ADDR_DEST_MODE_PHYSICAL :
71 MSI_ADDR_DEST_MODE_LOGICAL) |
72 ((apic->irq_delivery_mode != dest_LowestPrio) ?
73 MSI_ADDR_REDIRECTION_CPU :
74 MSI_ADDR_REDIRECTION_LOWPRI) |
75 MSI_ADDR_DEST_ID(cfg->dest_apicid);
76
77 msg->data =
78 MSI_DATA_TRIGGER_EDGE |
79 MSI_DATA_LEVEL_ASSERT |
80 ((apic->irq_delivery_mode != dest_LowestPrio) ?
81 MSI_DATA_DELIVERY_FIXED :
82 MSI_DATA_DELIVERY_LOWPRI) |
83 MSI_DATA_VECTOR(cfg->vector);
84 }
85
86 static void msi_update_msg(struct msi_msg *msg, struct irq_data *irq_data)
87 {
88 struct irq_cfg *cfg = irqd_cfg(irq_data);
89
90 msg->data &= ~MSI_DATA_VECTOR_MASK;
91 msg->data |= MSI_DATA_VECTOR(cfg->vector);
92 msg->address_lo &= ~MSI_ADDR_DEST_ID_MASK;
93 msg->address_lo |= MSI_ADDR_DEST_ID(cfg->dest_apicid);
94 }
95
96 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
97 struct msi_msg *msg, u8 hpet_id)
98 {
99 struct irq_cfg *cfg;
100 int err;
101 unsigned dest;
102
103 if (disable_apic)
104 return -ENXIO;
105
106 cfg = irq_cfg(irq);
107 err = assign_irq_vector(irq, cfg, apic->target_cpus());
108 if (err)
109 return err;
110
111 err = apic->cpu_mask_to_apicid_and(cfg->domain,
112 apic->target_cpus(), &dest);
113 if (err)
114 return err;
115
116 x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
117
118 return 0;
119 }
120
121 /*
122 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
123 * which implement the MSI or MSI-X Capability Structure.
124 */
125 static struct irq_chip pci_msi_controller = {
126 .name = "PCI-MSI",
127 .irq_unmask = pci_msi_unmask_irq,
128 .irq_mask = pci_msi_mask_irq,
129 .irq_ack = irq_chip_ack_parent,
130 .irq_set_affinity = msi_domain_set_affinity,
131 .irq_retrigger = irq_chip_retrigger_hierarchy,
132 .irq_print_chip = irq_remapping_print_chip,
133 .irq_compose_msi_msg = irq_msi_compose_msg,
134 .irq_write_msi_msg = pci_msi_domain_write_msg,
135 .flags = IRQCHIP_SKIP_SET_WAKE,
136 };
137
138 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
139 {
140 struct irq_domain *domain;
141 struct irq_alloc_info info;
142
143 init_irq_alloc_info(&info, NULL);
144 info.type = X86_IRQ_ALLOC_TYPE_MSI;
145 info.msi_dev = dev;
146
147 domain = irq_remapping_get_irq_domain(&info);
148 if (domain == NULL)
149 domain = msi_default_domain;
150 if (domain == NULL)
151 return -ENOSYS;
152
153 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
154 }
155
156 void native_teardown_msi_irq(unsigned int irq)
157 {
158 irq_domain_free_irqs(irq, 1);
159 }
160
161 static irq_hw_number_t pci_msi_get_hwirq(struct msi_domain_info *info,
162 msi_alloc_info_t *arg)
163 {
164 return arg->msi_hwirq;
165 }
166
167 static int pci_msi_prepare(struct irq_domain *domain, struct device *dev,
168 int nvec, msi_alloc_info_t *arg)
169 {
170 struct pci_dev *pdev = to_pci_dev(dev);
171 struct msi_desc *desc = first_pci_msi_entry(pdev);
172
173 init_irq_alloc_info(arg, NULL);
174 arg->msi_dev = pdev;
175 if (desc->msi_attrib.is_msix) {
176 arg->type = X86_IRQ_ALLOC_TYPE_MSIX;
177 } else {
178 arg->type = X86_IRQ_ALLOC_TYPE_MSI;
179 arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
180 }
181
182 return 0;
183 }
184
185 static void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
186 {
187 arg->msi_hwirq = pci_msi_domain_calc_hwirq(arg->msi_dev, desc);
188 }
189
190 static struct msi_domain_ops pci_msi_domain_ops = {
191 .get_hwirq = pci_msi_get_hwirq,
192 .msi_prepare = pci_msi_prepare,
193 .set_desc = pci_msi_set_desc,
194 };
195
196 static struct msi_domain_info pci_msi_domain_info = {
197 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
198 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
199 .ops = &pci_msi_domain_ops,
200 .chip = &pci_msi_controller,
201 .handler = handle_edge_irq,
202 .handler_name = "edge",
203 };
204
205 void arch_init_msi_domain(struct irq_domain *parent)
206 {
207 if (disable_apic)
208 return;
209
210 msi_default_domain = pci_msi_create_irq_domain(NULL,
211 &pci_msi_domain_info, parent);
212 if (!msi_default_domain)
213 pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
214 }
215
216 #ifdef CONFIG_IRQ_REMAP
217 struct irq_domain *arch_create_msi_irq_domain(struct irq_domain *parent)
218 {
219 return msi_create_irq_domain(NULL, &pci_msi_domain_info, parent);
220 }
221 #endif
222
223 #ifdef CONFIG_DMAR_TABLE
224 static int
225 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
226 bool force)
227 {
228 struct irq_cfg *cfg = irqd_cfg(data);
229 unsigned int dest, irq = data->irq;
230 struct msi_msg msg;
231 int ret;
232
233 ret = apic_set_affinity(data, mask, &dest);
234 if (ret)
235 return ret;
236
237 dmar_msi_read(irq, &msg);
238
239 msg.data &= ~MSI_DATA_VECTOR_MASK;
240 msg.data |= MSI_DATA_VECTOR(cfg->vector);
241 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
242 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
243 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
244
245 dmar_msi_write(irq, &msg);
246
247 return IRQ_SET_MASK_OK_NOCOPY;
248 }
249
250 static struct irq_chip dmar_msi_type = {
251 .name = "DMAR_MSI",
252 .irq_unmask = dmar_msi_unmask,
253 .irq_mask = dmar_msi_mask,
254 .irq_ack = apic_ack_edge,
255 .irq_set_affinity = dmar_msi_set_affinity,
256 .irq_retrigger = apic_retrigger_irq,
257 .flags = IRQCHIP_SKIP_SET_WAKE,
258 };
259
260 int arch_setup_dmar_msi(unsigned int irq)
261 {
262 int ret;
263 struct msi_msg msg;
264
265 ret = msi_compose_msg(NULL, irq, &msg, -1);
266 if (ret < 0)
267 return ret;
268 dmar_msi_write(irq, &msg);
269 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
270 "edge");
271 return 0;
272 }
273
274 int dmar_alloc_hwirq(void)
275 {
276 return irq_domain_alloc_irqs(NULL, 1, NUMA_NO_NODE, NULL);
277 }
278
279 void dmar_free_hwirq(int irq)
280 {
281 irq_domain_free_irqs(irq, 1);
282 }
283 #endif
284
285 /*
286 * MSI message composition
287 */
288 #ifdef CONFIG_HPET_TIMER
289 static inline int hpet_dev_id(struct irq_domain *domain)
290 {
291 return (int)(long)domain->host_data;
292 }
293
294 static int hpet_msi_set_affinity(struct irq_data *data,
295 const struct cpumask *mask, bool force)
296 {
297 struct irq_data *parent = data->parent_data;
298 struct msi_msg msg;
299 int ret;
300
301 ret = parent->chip->irq_set_affinity(parent, mask, force);
302 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
303 hpet_msi_read(data->handler_data, &msg);
304 msi_update_msg(&msg, data);
305 hpet_msi_write(data->handler_data, &msg);
306 }
307
308 return ret;
309 }
310
311 static struct irq_chip hpet_msi_controller = {
312 .name = "HPET_MSI",
313 .irq_unmask = hpet_msi_unmask,
314 .irq_mask = hpet_msi_mask,
315 .irq_ack = irq_chip_ack_parent,
316 .irq_set_affinity = hpet_msi_set_affinity,
317 .irq_retrigger = irq_chip_retrigger_hierarchy,
318 .irq_print_chip = irq_remapping_print_chip,
319 .irq_compose_msi_msg = irq_msi_compose_msg,
320 .flags = IRQCHIP_SKIP_SET_WAKE,
321 };
322
323 int default_setup_hpet_msi(unsigned int irq, unsigned int id)
324 {
325 struct irq_chip *chip = &hpet_msi_controller;
326 struct msi_msg msg;
327 int ret;
328
329 ret = msi_compose_msg(NULL, irq, &msg, id);
330 if (ret < 0)
331 return ret;
332
333 hpet_msi_write(irq_get_handler_data(irq), &msg);
334 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
335 setup_remapped_irq(irq, irq_cfg(irq), chip);
336
337 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
338 return 0;
339 }
340
341 static int hpet_domain_alloc(struct irq_domain *domain, unsigned int virq,
342 unsigned int nr_irqs, void *arg)
343 {
344 struct irq_alloc_info *info = arg;
345 int ret;
346
347 if (nr_irqs > 1 || !info || info->type != X86_IRQ_ALLOC_TYPE_HPET)
348 return -EINVAL;
349 if (irq_find_mapping(domain, info->hpet_index)) {
350 pr_warn("IRQ for HPET%d already exists.\n", info->hpet_index);
351 return -EEXIST;
352 }
353
354 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
355 if (ret >= 0) {
356 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
357 irq_domain_set_hwirq_and_chip(domain, virq, info->hpet_index,
358 &hpet_msi_controller, NULL);
359 irq_set_handler_data(virq, info->hpet_data);
360 __irq_set_handler(virq, handle_edge_irq, 0, "edge");
361 }
362
363 return ret;
364 }
365
366 static void hpet_domain_free(struct irq_domain *domain, unsigned int virq,
367 unsigned int nr_irqs)
368 {
369 BUG_ON(nr_irqs > 1);
370 irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
371 irq_domain_free_irqs_top(domain, virq, nr_irqs);
372 }
373
374 static void hpet_domain_activate(struct irq_domain *domain,
375 struct irq_data *irq_data)
376 {
377 struct msi_msg msg;
378
379 BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
380 hpet_msi_write(irq_get_handler_data(irq_data->irq), &msg);
381 }
382
383 static void hpet_domain_deactivate(struct irq_domain *domain,
384 struct irq_data *irq_data)
385 {
386 struct msi_msg msg;
387
388 memset(&msg, 0, sizeof(msg));
389 hpet_msi_write(irq_get_handler_data(irq_data->irq), &msg);
390 }
391
392 static struct irq_domain_ops hpet_domain_ops = {
393 .alloc = hpet_domain_alloc,
394 .free = hpet_domain_free,
395 .activate = hpet_domain_activate,
396 .deactivate = hpet_domain_deactivate,
397 };
398
399 struct irq_domain *hpet_create_irq_domain(int hpet_id)
400 {
401 struct irq_domain *parent;
402 struct irq_alloc_info info;
403
404 if (x86_vector_domain == NULL)
405 return NULL;
406
407 init_irq_alloc_info(&info, NULL);
408 info.type = X86_IRQ_ALLOC_TYPE_HPET;
409 info.hpet_id = hpet_id;
410 parent = irq_remapping_get_ir_irq_domain(&info);
411 if (parent == NULL)
412 parent = x86_vector_domain;
413
414 return irq_domain_add_hierarchy(parent, 0, 0, NULL, &hpet_domain_ops,
415 (void *)(long)hpet_id);
416 }
417
418 int hpet_assign_irq(struct irq_domain *domain, struct hpet_dev *dev,
419 int dev_num)
420 {
421 struct irq_alloc_info info;
422
423 init_irq_alloc_info(&info, NULL);
424 info.type = X86_IRQ_ALLOC_TYPE_HPET;
425 info.hpet_data = dev;
426 info.hpet_id = hpet_dev_id(domain);
427 info.hpet_index = dev_num;
428
429 return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, NULL);
430 }
431 #endif
This page took 0.041372 seconds and 6 git commands to generate.