Merge remote-tracking branch 'origin/x86/boot' into x86/mm2
[deliverable/linux.git] / arch / s390 / pci / pci_msi.c
CommitLineData
9a4da8a5
JG
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#define COMPONENT "zPCI"
9#define pr_fmt(fmt) COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/err.h>
13#include <linux/rculist.h>
14#include <linux/hash.h>
15#include <linux/pci.h>
16#include <linux/msi.h>
17#include <asm/hw_irq.h>
18
19/* mapping of irq numbers to msi_desc */
20static struct hlist_head *msi_hash;
21static unsigned int msihash_shift = 6;
22#define msi_hashfn(nr) hash_long(nr, msihash_shift)
23
24static DEFINE_SPINLOCK(msi_map_lock);
25
26struct msi_desc *__irq_get_msi_desc(unsigned int irq)
27{
28 struct hlist_node *entry;
29 struct msi_map *map;
30
31 hlist_for_each_entry_rcu(map, entry,
32 &msi_hash[msi_hashfn(irq)], msi_chain)
33 if (map->irq == irq)
34 return map->msi;
35 return NULL;
36}
37
38int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
39{
40 if (msi->msi_attrib.is_msix) {
41 int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
42 PCI_MSIX_ENTRY_VECTOR_CTRL;
43 msi->masked = readl(msi->mask_base + offset);
44 writel(flag, msi->mask_base + offset);
45 } else {
46 if (msi->msi_attrib.maskbit) {
47 int pos;
48 u32 mask_bits;
49
50 pos = (long) msi->mask_base;
51 pci_read_config_dword(msi->dev, pos, &mask_bits);
52 mask_bits &= ~(mask);
53 mask_bits |= flag & mask;
54 pci_write_config_dword(msi->dev, pos, mask_bits);
55 } else {
56 return 0;
57 }
58 }
59
60 msi->msi_attrib.maskbit = !!flag;
61 return 1;
62}
63
64int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
65 unsigned int nr, int offset)
66{
67 struct msi_map *map;
68 struct msi_msg msg;
69 int rc;
70
71 map = kmalloc(sizeof(*map), GFP_KERNEL);
72 if (map == NULL)
73 return -ENOMEM;
74
75 map->irq = nr;
76 map->msi = msi;
77 zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
78
79 pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
80 __func__, nr, msi_hashfn(nr));
81 hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
82
83 spin_lock(&msi_map_lock);
84 rc = irq_set_msi_desc(nr, msi);
85 if (rc) {
86 spin_unlock(&msi_map_lock);
87 hlist_del_rcu(&map->msi_chain);
88 kfree(map);
89 zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
90 return rc;
91 }
92 spin_unlock(&msi_map_lock);
93
94 msg.data = nr - offset;
95 msg.address_lo = zdev->msi_addr & 0xffffffff;
96 msg.address_hi = zdev->msi_addr >> 32;
97 write_msi_msg(nr, &msg);
98 return 0;
99}
100
101void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
102{
103 int irq = msi->irq & ZPCI_MSI_MASK;
104 struct msi_map *map;
105
106 msi->msg.address_lo = 0;
107 msi->msg.address_hi = 0;
108 msi->msg.data = 0;
109 msi->irq = 0;
110 zpci_msi_set_mask_bits(msi, 1, 1);
111
112 spin_lock(&msi_map_lock);
113 map = zdev->msi_map[irq];
114 hlist_del_rcu(&map->msi_chain);
115 kfree(map);
116 zdev->msi_map[irq] = NULL;
117 spin_unlock(&msi_map_lock);
118}
119
120/*
121 * The msi hash table has 256 entries which is good for 4..20
122 * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
123 * the hash table size adjustable later.
124 */
125int __init zpci_msihash_init(void)
126{
127 unsigned int i;
128
129 msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
130 if (!msi_hash)
131 return -ENOMEM;
132
133 for (i = 0; i < (1U << msihash_shift); i++)
134 INIT_HLIST_HEAD(&msi_hash[i]);
135 return 0;
136}
137
138void __init zpci_msihash_exit(void)
139{
140 kfree(msi_hash);
141}
This page took 0.03762 seconds and 5 git commands to generate.