[PATCH] genirq: msi: refactor the msi_ops
[deliverable/linux.git] / drivers / pci / msi.c
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9 #include <linux/mm.h>
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/smp_lock.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
17
18 #include <asm/errno.h>
19 #include <asm/io.h>
20 #include <asm/smp.h>
21
22 #include "pci.h"
23 #include "msi.h"
24
25 static DEFINE_SPINLOCK(msi_lock);
26 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
27 static kmem_cache_t* msi_cachep;
28
29 static int pci_msi_enable = 1;
30 static int last_alloc_vector;
31 static int nr_released_vectors;
32 static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
33 static int nr_msix_devices;
34
35 #ifndef CONFIG_X86_IO_APIC
36 int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
37 #endif
38
39 static struct msi_ops *msi_ops;
40
41 int
42 msi_register(struct msi_ops *ops)
43 {
44 msi_ops = ops;
45 return 0;
46 }
47
48 static int msi_cache_init(void)
49 {
50 msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
51 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
52 if (!msi_cachep)
53 return -ENOMEM;
54
55 return 0;
56 }
57
58 static void msi_set_mask_bit(unsigned int vector, int flag)
59 {
60 struct msi_desc *entry;
61
62 entry = (struct msi_desc *)msi_desc[vector];
63 if (!entry || !entry->dev || !entry->mask_base)
64 return;
65 switch (entry->msi_attrib.type) {
66 case PCI_CAP_ID_MSI:
67 {
68 int pos;
69 u32 mask_bits;
70
71 pos = (long)entry->mask_base;
72 pci_read_config_dword(entry->dev, pos, &mask_bits);
73 mask_bits &= ~(1);
74 mask_bits |= flag;
75 pci_write_config_dword(entry->dev, pos, mask_bits);
76 break;
77 }
78 case PCI_CAP_ID_MSIX:
79 {
80 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
81 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
82 writel(flag, entry->mask_base + offset);
83 break;
84 }
85 default:
86 break;
87 }
88 }
89
90 static void read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
91 {
92 switch(entry->msi_attrib.type) {
93 case PCI_CAP_ID_MSI:
94 {
95 struct pci_dev *dev = entry->dev;
96 int pos = entry->msi_attrib.pos;
97 u16 data;
98
99 pci_read_config_dword(dev, msi_lower_address_reg(pos),
100 &msg->address_lo);
101 if (entry->msi_attrib.is_64) {
102 pci_read_config_dword(dev, msi_upper_address_reg(pos),
103 &msg->address_hi);
104 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
105 } else {
106 msg->address_hi = 0;
107 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
108 }
109 msg->data = data;
110 break;
111 }
112 case PCI_CAP_ID_MSIX:
113 {
114 void __iomem *base;
115 base = entry->mask_base +
116 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
117
118 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
119 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
120 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
121 break;
122 }
123 default:
124 BUG();
125 }
126 }
127
128 static void write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
129 {
130 switch (entry->msi_attrib.type) {
131 case PCI_CAP_ID_MSI:
132 {
133 struct pci_dev *dev = entry->dev;
134 int pos = entry->msi_attrib.pos;
135
136 pci_write_config_dword(dev, msi_lower_address_reg(pos),
137 msg->address_lo);
138 if (entry->msi_attrib.is_64) {
139 pci_write_config_dword(dev, msi_upper_address_reg(pos),
140 msg->address_hi);
141 pci_write_config_word(dev, msi_data_reg(pos, 1),
142 msg->data);
143 } else {
144 pci_write_config_word(dev, msi_data_reg(pos, 0),
145 msg->data);
146 }
147 break;
148 }
149 case PCI_CAP_ID_MSIX:
150 {
151 void __iomem *base;
152 base = entry->mask_base +
153 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
154
155 writel(msg->address_lo,
156 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
157 writel(msg->address_hi,
158 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
159 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
160 break;
161 }
162 default:
163 BUG();
164 }
165 }
166
167 #ifdef CONFIG_SMP
168 static void set_msi_affinity(unsigned int irq, cpumask_t cpu_mask)
169 {
170 struct msi_desc *entry;
171 struct msi_msg msg;
172
173 entry = msi_desc[irq];
174 if (!entry || !entry->dev)
175 return;
176
177 read_msi_msg(entry, &msg);
178 msi_ops->target(irq, cpu_mask, &msg);
179 write_msi_msg(entry, &msg);
180 set_native_irq_info(irq, cpu_mask);
181 }
182 #else
183 #define set_msi_affinity NULL
184 #endif /* CONFIG_SMP */
185
186 static void mask_MSI_irq(unsigned int vector)
187 {
188 msi_set_mask_bit(vector, 1);
189 }
190
191 static void unmask_MSI_irq(unsigned int vector)
192 {
193 msi_set_mask_bit(vector, 0);
194 }
195
196 static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
197 {
198 struct msi_desc *entry;
199 unsigned long flags;
200
201 spin_lock_irqsave(&msi_lock, flags);
202 entry = msi_desc[vector];
203 if (!entry || !entry->dev) {
204 spin_unlock_irqrestore(&msi_lock, flags);
205 return 0;
206 }
207 entry->msi_attrib.state = 1; /* Mark it active */
208 spin_unlock_irqrestore(&msi_lock, flags);
209
210 return 0; /* never anything pending */
211 }
212
213 static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
214 {
215 startup_msi_irq_wo_maskbit(vector);
216 unmask_MSI_irq(vector);
217 return 0; /* never anything pending */
218 }
219
220 static void shutdown_msi_irq(unsigned int vector)
221 {
222 struct msi_desc *entry;
223 unsigned long flags;
224
225 spin_lock_irqsave(&msi_lock, flags);
226 entry = msi_desc[vector];
227 if (entry && entry->dev)
228 entry->msi_attrib.state = 0; /* Mark it not active */
229 spin_unlock_irqrestore(&msi_lock, flags);
230 }
231
232 static void end_msi_irq_wo_maskbit(unsigned int vector)
233 {
234 move_native_irq(vector);
235 ack_APIC_irq();
236 }
237
238 static void end_msi_irq_w_maskbit(unsigned int vector)
239 {
240 move_native_irq(vector);
241 unmask_MSI_irq(vector);
242 ack_APIC_irq();
243 }
244
245 static void do_nothing(unsigned int vector)
246 {
247 }
248
249 /*
250 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
251 * which implement the MSI-X Capability Structure.
252 */
253 static struct hw_interrupt_type msix_irq_type = {
254 .typename = "PCI-MSI-X",
255 .startup = startup_msi_irq_w_maskbit,
256 .shutdown = shutdown_msi_irq,
257 .enable = unmask_MSI_irq,
258 .disable = mask_MSI_irq,
259 .ack = mask_MSI_irq,
260 .end = end_msi_irq_w_maskbit,
261 .set_affinity = set_msi_affinity
262 };
263
264 /*
265 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
266 * which implement the MSI Capability Structure with
267 * Mask-and-Pending Bits.
268 */
269 static struct hw_interrupt_type msi_irq_w_maskbit_type = {
270 .typename = "PCI-MSI",
271 .startup = startup_msi_irq_w_maskbit,
272 .shutdown = shutdown_msi_irq,
273 .enable = unmask_MSI_irq,
274 .disable = mask_MSI_irq,
275 .ack = mask_MSI_irq,
276 .end = end_msi_irq_w_maskbit,
277 .set_affinity = set_msi_affinity
278 };
279
280 /*
281 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
282 * which implement the MSI Capability Structure without
283 * Mask-and-Pending Bits.
284 */
285 static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
286 .typename = "PCI-MSI",
287 .startup = startup_msi_irq_wo_maskbit,
288 .shutdown = shutdown_msi_irq,
289 .enable = do_nothing,
290 .disable = do_nothing,
291 .ack = do_nothing,
292 .end = end_msi_irq_wo_maskbit,
293 .set_affinity = set_msi_affinity
294 };
295
296 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
297 static int assign_msi_vector(void)
298 {
299 static int new_vector_avail = 1;
300 int vector;
301 unsigned long flags;
302
303 /*
304 * msi_lock is provided to ensure that successful allocation of MSI
305 * vector is assigned unique among drivers.
306 */
307 spin_lock_irqsave(&msi_lock, flags);
308
309 if (!new_vector_avail) {
310 int free_vector = 0;
311
312 /*
313 * vector_irq[] = -1 indicates that this specific vector is:
314 * - assigned for MSI (since MSI have no associated IRQ) or
315 * - assigned for legacy if less than 16, or
316 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
317 * vector_irq[] = 0 indicates that this vector, previously
318 * assigned for MSI, is freed by hotplug removed operations.
319 * This vector will be reused for any subsequent hotplug added
320 * operations.
321 * vector_irq[] > 0 indicates that this vector is assigned for
322 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
323 * vector-to-IOxAPIC IRQ mapping.
324 */
325 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
326 if (vector_irq[vector] != 0)
327 continue;
328 free_vector = vector;
329 if (!msi_desc[vector])
330 break;
331 else
332 continue;
333 }
334 if (!free_vector) {
335 spin_unlock_irqrestore(&msi_lock, flags);
336 return -EBUSY;
337 }
338 vector_irq[free_vector] = -1;
339 nr_released_vectors--;
340 spin_unlock_irqrestore(&msi_lock, flags);
341 if (msi_desc[free_vector] != NULL) {
342 struct pci_dev *dev;
343 int tail;
344
345 /* free all linked vectors before re-assign */
346 do {
347 spin_lock_irqsave(&msi_lock, flags);
348 dev = msi_desc[free_vector]->dev;
349 tail = msi_desc[free_vector]->link.tail;
350 spin_unlock_irqrestore(&msi_lock, flags);
351 msi_free_vector(dev, tail, 1);
352 } while (free_vector != tail);
353 }
354
355 return free_vector;
356 }
357 vector = assign_irq_vector(AUTO_ASSIGN);
358 last_alloc_vector = vector;
359 if (vector == LAST_DEVICE_VECTOR)
360 new_vector_avail = 0;
361
362 spin_unlock_irqrestore(&msi_lock, flags);
363 return vector;
364 }
365
366 static int get_new_vector(void)
367 {
368 int vector = assign_msi_vector();
369
370 if (vector > 0)
371 set_intr_gate(vector, interrupt[vector]);
372
373 return vector;
374 }
375
376 static int msi_init(void)
377 {
378 static int status = -ENOMEM;
379
380 if (!status)
381 return status;
382
383 if (pci_msi_quirk) {
384 pci_msi_enable = 0;
385 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
386 status = -EINVAL;
387 return status;
388 }
389
390 status = msi_arch_init();
391 if (status < 0) {
392 pci_msi_enable = 0;
393 printk(KERN_WARNING
394 "PCI: MSI arch init failed. MSI disabled.\n");
395 return status;
396 }
397
398 if (! msi_ops) {
399 printk(KERN_WARNING
400 "PCI: MSI ops not registered. MSI disabled.\n");
401 status = -EINVAL;
402 return status;
403 }
404
405 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
406 status = msi_cache_init();
407 if (status < 0) {
408 pci_msi_enable = 0;
409 printk(KERN_WARNING "PCI: MSI cache init failed\n");
410 return status;
411 }
412
413 if (last_alloc_vector < 0) {
414 pci_msi_enable = 0;
415 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
416 status = -EBUSY;
417 return status;
418 }
419 vector_irq[last_alloc_vector] = 0;
420 nr_released_vectors++;
421
422 return status;
423 }
424
425 static int get_msi_vector(struct pci_dev *dev)
426 {
427 return get_new_vector();
428 }
429
430 static struct msi_desc* alloc_msi_entry(void)
431 {
432 struct msi_desc *entry;
433
434 entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
435 if (!entry)
436 return NULL;
437
438 entry->link.tail = entry->link.head = 0; /* single message */
439 entry->dev = NULL;
440
441 return entry;
442 }
443
444 static void attach_msi_entry(struct msi_desc *entry, int vector)
445 {
446 unsigned long flags;
447
448 spin_lock_irqsave(&msi_lock, flags);
449 msi_desc[vector] = entry;
450 spin_unlock_irqrestore(&msi_lock, flags);
451 }
452
453 static void irq_handler_init(int cap_id, int pos, int mask)
454 {
455 unsigned long flags;
456
457 spin_lock_irqsave(&irq_desc[pos].lock, flags);
458 if (cap_id == PCI_CAP_ID_MSIX)
459 irq_desc[pos].chip = &msix_irq_type;
460 else {
461 if (!mask)
462 irq_desc[pos].chip = &msi_irq_wo_maskbit_type;
463 else
464 irq_desc[pos].chip = &msi_irq_w_maskbit_type;
465 }
466 spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
467 }
468
469 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
470 {
471 u16 control;
472
473 pci_read_config_word(dev, msi_control_reg(pos), &control);
474 if (type == PCI_CAP_ID_MSI) {
475 /* Set enabled bits to single MSI & enable MSI_enable bit */
476 msi_enable(control, 1);
477 pci_write_config_word(dev, msi_control_reg(pos), control);
478 dev->msi_enabled = 1;
479 } else {
480 msix_enable(control);
481 pci_write_config_word(dev, msi_control_reg(pos), control);
482 dev->msix_enabled = 1;
483 }
484 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
485 /* PCI Express Endpoint device detected */
486 pci_intx(dev, 0); /* disable intx */
487 }
488 }
489
490 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
491 {
492 u16 control;
493
494 pci_read_config_word(dev, msi_control_reg(pos), &control);
495 if (type == PCI_CAP_ID_MSI) {
496 /* Set enabled bits to single MSI & enable MSI_enable bit */
497 msi_disable(control);
498 pci_write_config_word(dev, msi_control_reg(pos), control);
499 dev->msi_enabled = 0;
500 } else {
501 msix_disable(control);
502 pci_write_config_word(dev, msi_control_reg(pos), control);
503 dev->msix_enabled = 0;
504 }
505 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
506 /* PCI Express Endpoint device detected */
507 pci_intx(dev, 1); /* enable intx */
508 }
509 }
510
511 static int msi_lookup_vector(struct pci_dev *dev, int type)
512 {
513 int vector;
514 unsigned long flags;
515
516 spin_lock_irqsave(&msi_lock, flags);
517 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
518 if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
519 msi_desc[vector]->msi_attrib.type != type ||
520 msi_desc[vector]->msi_attrib.default_vector != dev->irq)
521 continue;
522 spin_unlock_irqrestore(&msi_lock, flags);
523 /* This pre-assigned MSI vector for this device
524 already exits. Override dev->irq with this vector */
525 dev->irq = vector;
526 return 0;
527 }
528 spin_unlock_irqrestore(&msi_lock, flags);
529
530 return -EACCES;
531 }
532
533 void pci_scan_msi_device(struct pci_dev *dev)
534 {
535 if (!dev)
536 return;
537
538 if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
539 nr_msix_devices++;
540 else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
541 nr_reserved_vectors++;
542 }
543
544 #ifdef CONFIG_PM
545 int pci_save_msi_state(struct pci_dev *dev)
546 {
547 int pos, i = 0;
548 u16 control;
549 struct pci_cap_saved_state *save_state;
550 u32 *cap;
551
552 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
553 if (pos <= 0 || dev->no_msi)
554 return 0;
555
556 pci_read_config_word(dev, msi_control_reg(pos), &control);
557 if (!(control & PCI_MSI_FLAGS_ENABLE))
558 return 0;
559
560 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
561 GFP_KERNEL);
562 if (!save_state) {
563 printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
564 return -ENOMEM;
565 }
566 cap = &save_state->data[0];
567
568 pci_read_config_dword(dev, pos, &cap[i++]);
569 control = cap[0] >> 16;
570 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
571 if (control & PCI_MSI_FLAGS_64BIT) {
572 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
573 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
574 } else
575 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
576 if (control & PCI_MSI_FLAGS_MASKBIT)
577 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
578 save_state->cap_nr = PCI_CAP_ID_MSI;
579 pci_add_saved_cap(dev, save_state);
580 return 0;
581 }
582
583 void pci_restore_msi_state(struct pci_dev *dev)
584 {
585 int i = 0, pos;
586 u16 control;
587 struct pci_cap_saved_state *save_state;
588 u32 *cap;
589
590 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
591 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
592 if (!save_state || pos <= 0)
593 return;
594 cap = &save_state->data[0];
595
596 control = cap[i++] >> 16;
597 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
598 if (control & PCI_MSI_FLAGS_64BIT) {
599 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
600 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
601 } else
602 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
603 if (control & PCI_MSI_FLAGS_MASKBIT)
604 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
605 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
606 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
607 pci_remove_saved_cap(save_state);
608 kfree(save_state);
609 }
610
611 int pci_save_msix_state(struct pci_dev *dev)
612 {
613 int pos;
614 int temp;
615 int vector, head, tail = 0;
616 u16 control;
617 struct pci_cap_saved_state *save_state;
618
619 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
620 if (pos <= 0 || dev->no_msi)
621 return 0;
622
623 /* save the capability */
624 pci_read_config_word(dev, msi_control_reg(pos), &control);
625 if (!(control & PCI_MSIX_FLAGS_ENABLE))
626 return 0;
627 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
628 GFP_KERNEL);
629 if (!save_state) {
630 printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
631 return -ENOMEM;
632 }
633 *((u16 *)&save_state->data[0]) = control;
634
635 /* save the table */
636 temp = dev->irq;
637 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
638 kfree(save_state);
639 return -EINVAL;
640 }
641
642 vector = head = dev->irq;
643 while (head != tail) {
644 struct msi_desc *entry;
645
646 entry = msi_desc[vector];
647 read_msi_msg(entry, &entry->msg_save);
648
649 tail = msi_desc[vector]->link.tail;
650 vector = tail;
651 }
652 dev->irq = temp;
653
654 save_state->cap_nr = PCI_CAP_ID_MSIX;
655 pci_add_saved_cap(dev, save_state);
656 return 0;
657 }
658
659 void pci_restore_msix_state(struct pci_dev *dev)
660 {
661 u16 save;
662 int pos;
663 int vector, head, tail = 0;
664 struct msi_desc *entry;
665 int temp;
666 struct pci_cap_saved_state *save_state;
667
668 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
669 if (!save_state)
670 return;
671 save = *((u16 *)&save_state->data[0]);
672 pci_remove_saved_cap(save_state);
673 kfree(save_state);
674
675 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
676 if (pos <= 0)
677 return;
678
679 /* route the table */
680 temp = dev->irq;
681 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
682 return;
683 vector = head = dev->irq;
684 while (head != tail) {
685 entry = msi_desc[vector];
686 write_msi_msg(entry, &entry->msg_save);
687
688 tail = msi_desc[vector]->link.tail;
689 vector = tail;
690 }
691 dev->irq = temp;
692
693 pci_write_config_word(dev, msi_control_reg(pos), save);
694 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
695 }
696 #endif
697
698 static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
699 {
700 int status;
701 struct msi_msg msg;
702 int pos;
703 u16 control;
704
705 pos = entry->msi_attrib.pos;
706 pci_read_config_word(dev, msi_control_reg(pos), &control);
707
708 /* Configure MSI capability structure */
709 status = msi_ops->setup(dev, dev->irq, &msg);
710 if (status < 0)
711 return status;
712
713 write_msi_msg(entry, &msg);
714 if (entry->msi_attrib.maskbit) {
715 unsigned int maskbits, temp;
716 /* All MSIs are unmasked by default, Mask them all */
717 pci_read_config_dword(dev,
718 msi_mask_bits_reg(pos, is_64bit_address(control)),
719 &maskbits);
720 temp = (1 << multi_msi_capable(control));
721 temp = ((temp - 1) & ~temp);
722 maskbits |= temp;
723 pci_write_config_dword(dev,
724 msi_mask_bits_reg(pos, is_64bit_address(control)),
725 maskbits);
726 }
727
728 return 0;
729 }
730
731 /**
732 * msi_capability_init - configure device's MSI capability structure
733 * @dev: pointer to the pci_dev data structure of MSI device function
734 *
735 * Setup the MSI capability structure of device function with a single
736 * MSI vector, regardless of device function is capable of handling
737 * multiple messages. A return of zero indicates the successful setup
738 * of an entry zero with the new MSI vector or non-zero for otherwise.
739 **/
740 static int msi_capability_init(struct pci_dev *dev)
741 {
742 int status;
743 struct msi_desc *entry;
744 int pos, vector;
745 u16 control;
746
747 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
748 pci_read_config_word(dev, msi_control_reg(pos), &control);
749 /* MSI Entry Initialization */
750 entry = alloc_msi_entry();
751 if (!entry)
752 return -ENOMEM;
753
754 vector = get_msi_vector(dev);
755 if (vector < 0) {
756 kmem_cache_free(msi_cachep, entry);
757 return -EBUSY;
758 }
759 entry->link.head = vector;
760 entry->link.tail = vector;
761 entry->msi_attrib.type = PCI_CAP_ID_MSI;
762 entry->msi_attrib.state = 0; /* Mark it not active */
763 entry->msi_attrib.is_64 = is_64bit_address(control);
764 entry->msi_attrib.entry_nr = 0;
765 entry->msi_attrib.maskbit = is_mask_bit_support(control);
766 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
767 entry->msi_attrib.pos = pos;
768 dev->irq = vector;
769 entry->dev = dev;
770 if (is_mask_bit_support(control)) {
771 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
772 is_64bit_address(control));
773 }
774 /* Replace with MSI handler */
775 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
776 /* Configure MSI capability structure */
777 status = msi_register_init(dev, entry);
778 if (status != 0) {
779 dev->irq = entry->msi_attrib.default_vector;
780 kmem_cache_free(msi_cachep, entry);
781 return status;
782 }
783
784 attach_msi_entry(entry, vector);
785 /* Set MSI enabled bits */
786 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
787
788 return 0;
789 }
790
791 /**
792 * msix_capability_init - configure device's MSI-X capability
793 * @dev: pointer to the pci_dev data structure of MSI-X device function
794 * @entries: pointer to an array of struct msix_entry entries
795 * @nvec: number of @entries
796 *
797 * Setup the MSI-X capability structure of device function with a
798 * single MSI-X vector. A return of zero indicates the successful setup of
799 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
800 **/
801 static int msix_capability_init(struct pci_dev *dev,
802 struct msix_entry *entries, int nvec)
803 {
804 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
805 struct msi_msg msg;
806 int status;
807 int vector, pos, i, j, nr_entries, temp = 0;
808 unsigned long phys_addr;
809 u32 table_offset;
810 u16 control;
811 u8 bir;
812 void __iomem *base;
813
814 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
815 /* Request & Map MSI-X table region */
816 pci_read_config_word(dev, msi_control_reg(pos), &control);
817 nr_entries = multi_msix_capable(control);
818
819 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
820 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
821 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
822 phys_addr = pci_resource_start (dev, bir) + table_offset;
823 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
824 if (base == NULL)
825 return -ENOMEM;
826
827 /* MSI-X Table Initialization */
828 for (i = 0; i < nvec; i++) {
829 entry = alloc_msi_entry();
830 if (!entry)
831 break;
832 vector = get_msi_vector(dev);
833 if (vector < 0) {
834 kmem_cache_free(msi_cachep, entry);
835 break;
836 }
837
838 j = entries[i].entry;
839 entries[i].vector = vector;
840 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
841 entry->msi_attrib.state = 0; /* Mark it not active */
842 entry->msi_attrib.is_64 = 1;
843 entry->msi_attrib.entry_nr = j;
844 entry->msi_attrib.maskbit = 1;
845 entry->msi_attrib.default_vector = dev->irq;
846 entry->msi_attrib.pos = pos;
847 entry->dev = dev;
848 entry->mask_base = base;
849 if (!head) {
850 entry->link.head = vector;
851 entry->link.tail = vector;
852 head = entry;
853 } else {
854 entry->link.head = temp;
855 entry->link.tail = tail->link.tail;
856 tail->link.tail = vector;
857 head->link.head = vector;
858 }
859 temp = vector;
860 tail = entry;
861 /* Replace with MSI-X handler */
862 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
863 /* Configure MSI-X capability structure */
864 status = msi_ops->setup(dev, vector, &msg);
865 if (status < 0)
866 break;
867
868 write_msi_msg(entry, &msg);
869 attach_msi_entry(entry, vector);
870 }
871 if (i != nvec) {
872 i--;
873 for (; i >= 0; i--) {
874 vector = (entries + i)->vector;
875 msi_free_vector(dev, vector, 0);
876 (entries + i)->vector = 0;
877 }
878 return -EBUSY;
879 }
880 /* Set MSI-X enabled bits */
881 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
882
883 return 0;
884 }
885
886 /**
887 * pci_msi_supported - check whether MSI may be enabled on device
888 * @dev: pointer to the pci_dev data structure of MSI device function
889 *
890 * MSI must be globally enabled and supported by the device and its root
891 * bus. But, the root bus is not easy to find since some architectures
892 * have virtual busses on top of the PCI hierarchy (for instance the
893 * hypertransport bus), while the actual bus where MSI must be supported
894 * is below. So we test the MSI flag on all parent busses and assume
895 * that no quirk will ever set the NO_MSI flag on a non-root bus.
896 **/
897 static
898 int pci_msi_supported(struct pci_dev * dev)
899 {
900 struct pci_bus *bus;
901
902 if (!pci_msi_enable || !dev || dev->no_msi)
903 return -EINVAL;
904
905 /* check MSI flags of all parent busses */
906 for (bus = dev->bus; bus; bus = bus->parent)
907 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
908 return -EINVAL;
909
910 return 0;
911 }
912
913 /**
914 * pci_enable_msi - configure device's MSI capability structure
915 * @dev: pointer to the pci_dev data structure of MSI device function
916 *
917 * Setup the MSI capability structure of device function with
918 * a single MSI vector upon its software driver call to request for
919 * MSI mode enabled on its hardware device function. A return of zero
920 * indicates the successful setup of an entry zero with the new MSI
921 * vector or non-zero for otherwise.
922 **/
923 int pci_enable_msi(struct pci_dev* dev)
924 {
925 int pos, temp, status;
926 u16 control;
927
928 if (pci_msi_supported(dev) < 0)
929 return -EINVAL;
930
931 temp = dev->irq;
932
933 status = msi_init();
934 if (status < 0)
935 return status;
936
937 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
938 if (!pos)
939 return -EINVAL;
940
941 pci_read_config_word(dev, msi_control_reg(pos), &control);
942 if (!is_64bit_address(control) && msi_ops->needs_64bit_address)
943 return -EINVAL;
944
945 WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSI));
946
947 /* Check whether driver already requested for MSI-X vectors */
948 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
949 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
950 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
951 "Device already has MSI-X vectors assigned\n",
952 pci_name(dev));
953 dev->irq = temp;
954 return -EINVAL;
955 }
956 status = msi_capability_init(dev);
957 if (!status) {
958 if (!pos)
959 nr_reserved_vectors--; /* Only MSI capable */
960 else if (nr_msix_devices > 0)
961 nr_msix_devices--; /* Both MSI and MSI-X capable,
962 but choose enabling MSI */
963 }
964
965 return status;
966 }
967
968 void pci_disable_msi(struct pci_dev* dev)
969 {
970 struct msi_desc *entry;
971 int pos, default_vector;
972 u16 control;
973 unsigned long flags;
974
975 if (!pci_msi_enable)
976 return;
977 if (!dev)
978 return;
979
980 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
981 if (!pos)
982 return;
983
984 pci_read_config_word(dev, msi_control_reg(pos), &control);
985 if (!(control & PCI_MSI_FLAGS_ENABLE))
986 return;
987
988 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
989
990 spin_lock_irqsave(&msi_lock, flags);
991 entry = msi_desc[dev->irq];
992 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
993 spin_unlock_irqrestore(&msi_lock, flags);
994 return;
995 }
996 if (entry->msi_attrib.state) {
997 spin_unlock_irqrestore(&msi_lock, flags);
998 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
999 "free_irq() on MSI vector %d\n",
1000 pci_name(dev), dev->irq);
1001 BUG_ON(entry->msi_attrib.state > 0);
1002 } else {
1003 default_vector = entry->msi_attrib.default_vector;
1004 spin_unlock_irqrestore(&msi_lock, flags);
1005 msi_free_vector(dev, dev->irq, 0);
1006
1007 /* Restore dev->irq to its default pin-assertion vector */
1008 dev->irq = default_vector;
1009 }
1010 }
1011
1012 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1013 {
1014 struct msi_desc *entry;
1015 int head, entry_nr, type;
1016 void __iomem *base;
1017 unsigned long flags;
1018
1019 msi_ops->teardown(vector);
1020
1021 spin_lock_irqsave(&msi_lock, flags);
1022 entry = msi_desc[vector];
1023 if (!entry || entry->dev != dev) {
1024 spin_unlock_irqrestore(&msi_lock, flags);
1025 return -EINVAL;
1026 }
1027 type = entry->msi_attrib.type;
1028 entry_nr = entry->msi_attrib.entry_nr;
1029 head = entry->link.head;
1030 base = entry->mask_base;
1031 msi_desc[entry->link.head]->link.tail = entry->link.tail;
1032 msi_desc[entry->link.tail]->link.head = entry->link.head;
1033 entry->dev = NULL;
1034 if (!reassign) {
1035 vector_irq[vector] = 0;
1036 nr_released_vectors++;
1037 }
1038 msi_desc[vector] = NULL;
1039 spin_unlock_irqrestore(&msi_lock, flags);
1040
1041 kmem_cache_free(msi_cachep, entry);
1042
1043 if (type == PCI_CAP_ID_MSIX) {
1044 if (!reassign)
1045 writel(1, base +
1046 entry_nr * PCI_MSIX_ENTRY_SIZE +
1047 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1048
1049 if (head == vector)
1050 iounmap(base);
1051 }
1052
1053 return 0;
1054 }
1055
1056 /**
1057 * pci_enable_msix - configure device's MSI-X capability structure
1058 * @dev: pointer to the pci_dev data structure of MSI-X device function
1059 * @entries: pointer to an array of MSI-X entries
1060 * @nvec: number of MSI-X vectors requested for allocation by device driver
1061 *
1062 * Setup the MSI-X capability structure of device function with the number
1063 * of requested vectors upon its software driver call to request for
1064 * MSI-X mode enabled on its hardware device function. A return of zero
1065 * indicates the successful configuration of MSI-X capability structure
1066 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
1067 * Or a return of > 0 indicates that driver request is exceeding the number
1068 * of vectors available. Driver should use the returned value to re-send
1069 * its request.
1070 **/
1071 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1072 {
1073 int status, pos, nr_entries, free_vectors;
1074 int i, j, temp;
1075 u16 control;
1076 unsigned long flags;
1077
1078 if (!entries || pci_msi_supported(dev) < 0)
1079 return -EINVAL;
1080
1081 status = msi_init();
1082 if (status < 0)
1083 return status;
1084
1085 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1086 if (!pos)
1087 return -EINVAL;
1088
1089 pci_read_config_word(dev, msi_control_reg(pos), &control);
1090 nr_entries = multi_msix_capable(control);
1091 if (nvec > nr_entries)
1092 return -EINVAL;
1093
1094 /* Check for any invalid entries */
1095 for (i = 0; i < nvec; i++) {
1096 if (entries[i].entry >= nr_entries)
1097 return -EINVAL; /* invalid entry */
1098 for (j = i + 1; j < nvec; j++) {
1099 if (entries[i].entry == entries[j].entry)
1100 return -EINVAL; /* duplicate entry */
1101 }
1102 }
1103 temp = dev->irq;
1104 WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSIX));
1105
1106 /* Check whether driver already requested for MSI vector */
1107 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
1108 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1109 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
1110 "Device already has an MSI vector assigned\n",
1111 pci_name(dev));
1112 dev->irq = temp;
1113 return -EINVAL;
1114 }
1115
1116 spin_lock_irqsave(&msi_lock, flags);
1117 /*
1118 * msi_lock is provided to ensure that enough vectors resources are
1119 * available before granting.
1120 */
1121 free_vectors = pci_vector_resources(last_alloc_vector,
1122 nr_released_vectors);
1123 /* Ensure that each MSI/MSI-X device has one vector reserved by
1124 default to avoid any MSI-X driver to take all available
1125 resources */
1126 free_vectors -= nr_reserved_vectors;
1127 /* Find the average of free vectors among MSI-X devices */
1128 if (nr_msix_devices > 0)
1129 free_vectors /= nr_msix_devices;
1130 spin_unlock_irqrestore(&msi_lock, flags);
1131
1132 if (nvec > free_vectors) {
1133 if (free_vectors > 0)
1134 return free_vectors;
1135 else
1136 return -EBUSY;
1137 }
1138
1139 status = msix_capability_init(dev, entries, nvec);
1140 if (!status && nr_msix_devices > 0)
1141 nr_msix_devices--;
1142
1143 return status;
1144 }
1145
1146 void pci_disable_msix(struct pci_dev* dev)
1147 {
1148 int pos, temp;
1149 u16 control;
1150
1151 if (!pci_msi_enable)
1152 return;
1153 if (!dev)
1154 return;
1155
1156 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1157 if (!pos)
1158 return;
1159
1160 pci_read_config_word(dev, msi_control_reg(pos), &control);
1161 if (!(control & PCI_MSIX_FLAGS_ENABLE))
1162 return;
1163
1164 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
1165
1166 temp = dev->irq;
1167 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1168 int state, vector, head, tail = 0, warning = 0;
1169 unsigned long flags;
1170
1171 vector = head = dev->irq;
1172 dev->irq = temp; /* Restore pin IRQ */
1173 while (head != tail) {
1174 spin_lock_irqsave(&msi_lock, flags);
1175 state = msi_desc[vector]->msi_attrib.state;
1176 tail = msi_desc[vector]->link.tail;
1177 spin_unlock_irqrestore(&msi_lock, flags);
1178 if (state)
1179 warning = 1;
1180 else if (vector != head) /* Release MSI-X vector */
1181 msi_free_vector(dev, vector, 0);
1182 vector = tail;
1183 }
1184 msi_free_vector(dev, vector, 0);
1185 if (warning) {
1186 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1187 "free_irq() on all MSI-X vectors\n",
1188 pci_name(dev));
1189 BUG_ON(warning > 0);
1190 }
1191 }
1192 }
1193
1194 /**
1195 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1196 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1197 *
1198 * Being called during hotplug remove, from which the device function
1199 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1200 * allocated for this device function, are reclaimed to unused state,
1201 * which may be used later on.
1202 **/
1203 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1204 {
1205 int state, pos, temp;
1206 unsigned long flags;
1207
1208 if (!pci_msi_enable || !dev)
1209 return;
1210
1211 temp = dev->irq; /* Save IOAPIC IRQ */
1212 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1213 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1214 spin_lock_irqsave(&msi_lock, flags);
1215 state = msi_desc[dev->irq]->msi_attrib.state;
1216 spin_unlock_irqrestore(&msi_lock, flags);
1217 if (state) {
1218 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1219 "called without free_irq() on MSI vector %d\n",
1220 pci_name(dev), dev->irq);
1221 BUG_ON(state > 0);
1222 } else /* Release MSI vector assigned to this device */
1223 msi_free_vector(dev, dev->irq, 0);
1224 dev->irq = temp; /* Restore IOAPIC IRQ */
1225 }
1226 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1227 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1228 int vector, head, tail = 0, warning = 0;
1229 void __iomem *base = NULL;
1230
1231 vector = head = dev->irq;
1232 while (head != tail) {
1233 spin_lock_irqsave(&msi_lock, flags);
1234 state = msi_desc[vector]->msi_attrib.state;
1235 tail = msi_desc[vector]->link.tail;
1236 base = msi_desc[vector]->mask_base;
1237 spin_unlock_irqrestore(&msi_lock, flags);
1238 if (state)
1239 warning = 1;
1240 else if (vector != head) /* Release MSI-X vector */
1241 msi_free_vector(dev, vector, 0);
1242 vector = tail;
1243 }
1244 msi_free_vector(dev, vector, 0);
1245 if (warning) {
1246 iounmap(base);
1247 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1248 "called without free_irq() on all MSI-X vectors\n",
1249 pci_name(dev));
1250 BUG_ON(warning > 0);
1251 }
1252 dev->irq = temp; /* Restore IOAPIC IRQ */
1253 }
1254 }
1255
1256 void pci_no_msi(void)
1257 {
1258 pci_msi_enable = 0;
1259 }
1260
1261 EXPORT_SYMBOL(pci_enable_msi);
1262 EXPORT_SYMBOL(pci_disable_msi);
1263 EXPORT_SYMBOL(pci_enable_msix);
1264 EXPORT_SYMBOL(pci_disable_msix);
This page took 0.079861 seconds and 5 git commands to generate.