Commit | Line | Data |
---|---|---|
8369ae33 RM |
1 | /* |
2 | * Broadcom specific AMBA | |
3 | * PCI Host | |
4 | * | |
5 | * Licensed under the GNU/GPL. See COPYING for details. | |
6 | */ | |
7 | ||
8 | #include "bcma_private.h" | |
ba7328b2 | 9 | #include <linux/slab.h> |
8369ae33 RM |
10 | #include <linux/bcma/bcma.h> |
11 | #include <linux/pci.h> | |
200351c7 | 12 | #include <linux/module.h> |
8369ae33 RM |
13 | |
14 | static void bcma_host_pci_switch_core(struct bcma_device *core) | |
15 | { | |
8be08a39 RM |
16 | int win2 = core->bus->host_is_pcie2 ? |
17 | BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2; | |
18 | ||
8369ae33 RM |
19 | pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN, |
20 | core->addr); | |
8be08a39 | 21 | pci_write_config_dword(core->bus->host_pci, win2, core->wrap); |
8369ae33 | 22 | core->bus->mapped_core = core; |
3d9d8af3 | 23 | bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id); |
8369ae33 RM |
24 | } |
25 | ||
439678f8 RM |
26 | /* Provides access to the requested core. Returns base offset that has to be |
27 | * used. It makes use of fixed windows when possible. */ | |
28 | static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core) | |
8369ae33 | 29 | { |
439678f8 RM |
30 | switch (core->id.id) { |
31 | case BCMA_CORE_CHIPCOMMON: | |
32 | return 3 * BCMA_CORE_SIZE; | |
33 | case BCMA_CORE_PCIE: | |
34 | return 2 * BCMA_CORE_SIZE; | |
35 | } | |
36 | ||
8369ae33 RM |
37 | if (core->bus->mapped_core != core) |
38 | bcma_host_pci_switch_core(core); | |
439678f8 RM |
39 | return 0; |
40 | } | |
41 | ||
42 | static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) | |
43 | { | |
44 | offset += bcma_host_pci_provide_access_to_core(core); | |
8369ae33 RM |
45 | return ioread8(core->bus->mmio + offset); |
46 | } | |
47 | ||
48 | static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset) | |
49 | { | |
439678f8 | 50 | offset += bcma_host_pci_provide_access_to_core(core); |
8369ae33 RM |
51 | return ioread16(core->bus->mmio + offset); |
52 | } | |
53 | ||
54 | static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset) | |
55 | { | |
439678f8 | 56 | offset += bcma_host_pci_provide_access_to_core(core); |
8369ae33 RM |
57 | return ioread32(core->bus->mmio + offset); |
58 | } | |
59 | ||
60 | static void bcma_host_pci_write8(struct bcma_device *core, u16 offset, | |
61 | u8 value) | |
62 | { | |
439678f8 | 63 | offset += bcma_host_pci_provide_access_to_core(core); |
8369ae33 RM |
64 | iowrite8(value, core->bus->mmio + offset); |
65 | } | |
66 | ||
67 | static void bcma_host_pci_write16(struct bcma_device *core, u16 offset, | |
68 | u16 value) | |
69 | { | |
439678f8 | 70 | offset += bcma_host_pci_provide_access_to_core(core); |
8369ae33 RM |
71 | iowrite16(value, core->bus->mmio + offset); |
72 | } | |
73 | ||
74 | static void bcma_host_pci_write32(struct bcma_device *core, u16 offset, | |
75 | u32 value) | |
76 | { | |
439678f8 | 77 | offset += bcma_host_pci_provide_access_to_core(core); |
8369ae33 RM |
78 | iowrite32(value, core->bus->mmio + offset); |
79 | } | |
80 | ||
9d75ef0f | 81 | #ifdef CONFIG_BCMA_BLOCKIO |
94f3457f HM |
82 | static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer, |
83 | size_t count, u16 offset, u8 reg_width) | |
9d75ef0f RM |
84 | { |
85 | void __iomem *addr = core->bus->mmio + offset; | |
86 | if (core->bus->mapped_core != core) | |
87 | bcma_host_pci_switch_core(core); | |
88 | switch (reg_width) { | |
89 | case sizeof(u8): | |
90 | ioread8_rep(addr, buffer, count); | |
91 | break; | |
92 | case sizeof(u16): | |
93 | WARN_ON(count & 1); | |
94 | ioread16_rep(addr, buffer, count >> 1); | |
95 | break; | |
96 | case sizeof(u32): | |
97 | WARN_ON(count & 3); | |
98 | ioread32_rep(addr, buffer, count >> 2); | |
99 | break; | |
100 | default: | |
101 | WARN_ON(1); | |
102 | } | |
103 | } | |
104 | ||
94f3457f HM |
105 | static void bcma_host_pci_block_write(struct bcma_device *core, |
106 | const void *buffer, size_t count, | |
107 | u16 offset, u8 reg_width) | |
9d75ef0f RM |
108 | { |
109 | void __iomem *addr = core->bus->mmio + offset; | |
110 | if (core->bus->mapped_core != core) | |
111 | bcma_host_pci_switch_core(core); | |
112 | switch (reg_width) { | |
113 | case sizeof(u8): | |
114 | iowrite8_rep(addr, buffer, count); | |
115 | break; | |
116 | case sizeof(u16): | |
117 | WARN_ON(count & 1); | |
118 | iowrite16_rep(addr, buffer, count >> 1); | |
119 | break; | |
120 | case sizeof(u32): | |
121 | WARN_ON(count & 3); | |
122 | iowrite32_rep(addr, buffer, count >> 2); | |
123 | break; | |
124 | default: | |
125 | WARN_ON(1); | |
126 | } | |
127 | } | |
128 | #endif | |
129 | ||
8369ae33 RM |
130 | static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset) |
131 | { | |
132 | if (core->bus->mapped_core != core) | |
133 | bcma_host_pci_switch_core(core); | |
134 | return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); | |
135 | } | |
136 | ||
137 | static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset, | |
138 | u32 value) | |
139 | { | |
140 | if (core->bus->mapped_core != core) | |
141 | bcma_host_pci_switch_core(core); | |
142 | iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); | |
143 | } | |
144 | ||
94f3457f | 145 | static const struct bcma_host_ops bcma_host_pci_ops = { |
8369ae33 RM |
146 | .read8 = bcma_host_pci_read8, |
147 | .read16 = bcma_host_pci_read16, | |
148 | .read32 = bcma_host_pci_read32, | |
149 | .write8 = bcma_host_pci_write8, | |
150 | .write16 = bcma_host_pci_write16, | |
151 | .write32 = bcma_host_pci_write32, | |
9d75ef0f RM |
152 | #ifdef CONFIG_BCMA_BLOCKIO |
153 | .block_read = bcma_host_pci_block_read, | |
154 | .block_write = bcma_host_pci_block_write, | |
155 | #endif | |
8369ae33 RM |
156 | .aread32 = bcma_host_pci_aread32, |
157 | .awrite32 = bcma_host_pci_awrite32, | |
158 | }; | |
159 | ||
0f58a01d GKH |
160 | static int bcma_host_pci_probe(struct pci_dev *dev, |
161 | const struct pci_device_id *id) | |
8369ae33 RM |
162 | { |
163 | struct bcma_bus *bus; | |
164 | int err = -ENOMEM; | |
165 | const char *name; | |
166 | u32 val; | |
167 | ||
168 | /* Alloc */ | |
169 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); | |
170 | if (!bus) | |
171 | goto out; | |
172 | ||
173 | /* Basic PCI configuration */ | |
174 | err = pci_enable_device(dev); | |
175 | if (err) | |
176 | goto err_kfree_bus; | |
177 | ||
178 | name = dev_name(&dev->dev); | |
179 | if (dev->driver && dev->driver->name) | |
180 | name = dev->driver->name; | |
181 | err = pci_request_regions(dev, name); | |
182 | if (err) | |
183 | goto err_pci_disable; | |
184 | pci_set_master(dev); | |
185 | ||
186 | /* Disable the RETRY_TIMEOUT register (0x41) to keep | |
187 | * PCI Tx retries from interfering with C3 CPU state */ | |
188 | pci_read_config_dword(dev, 0x40, &val); | |
189 | if ((val & 0x0000ff00) != 0) | |
190 | pci_write_config_dword(dev, 0x40, val & 0xffff00ff); | |
191 | ||
192 | /* SSB needed additional powering up, do we have any AMBA PCI cards? */ | |
dfa0415b HM |
193 | if (!pci_is_pcie(dev)) { |
194 | bcma_err(bus, "PCI card detected, they are not supported.\n"); | |
195 | err = -ENXIO; | |
196 | goto err_pci_release_regions; | |
197 | } | |
8369ae33 RM |
198 | |
199 | /* Map MMIO */ | |
200 | err = -ENOMEM; | |
201 | bus->mmio = pci_iomap(dev, 0, ~0UL); | |
202 | if (!bus->mmio) | |
203 | goto err_pci_release_regions; | |
204 | ||
205 | /* Host specific */ | |
206 | bus->host_pci = dev; | |
207 | bus->hosttype = BCMA_HOSTTYPE_PCI; | |
208 | bus->ops = &bcma_host_pci_ops; | |
209 | ||
0a2fcaa7 HM |
210 | bus->boardinfo.vendor = bus->host_pci->subsystem_vendor; |
211 | bus->boardinfo.type = bus->host_pci->subsystem_device; | |
212 | ||
dc8ecdd3 RM |
213 | /* Initialize struct, detect chip */ |
214 | bcma_init_bus(bus); | |
215 | ||
9b6cc9a8 RM |
216 | /* Scan bus to find out generation of PCIe core */ |
217 | err = bcma_bus_scan(bus); | |
218 | if (err) | |
219 | goto err_pci_unmap_mmio; | |
220 | ||
221 | if (bcma_find_core(bus, BCMA_CORE_PCIE2)) | |
222 | bus->host_is_pcie2 = true; | |
223 | ||
8369ae33 RM |
224 | /* Register */ |
225 | err = bcma_bus_register(bus); | |
226 | if (err) | |
9b6cc9a8 | 227 | goto err_unregister_cores; |
8369ae33 RM |
228 | |
229 | pci_set_drvdata(dev, bus); | |
230 | ||
231 | out: | |
232 | return err; | |
233 | ||
9b6cc9a8 RM |
234 | err_unregister_cores: |
235 | bcma_unregister_cores(bus); | |
8369ae33 RM |
236 | err_pci_unmap_mmio: |
237 | pci_iounmap(dev, bus->mmio); | |
238 | err_pci_release_regions: | |
239 | pci_release_regions(dev); | |
240 | err_pci_disable: | |
241 | pci_disable_device(dev); | |
242 | err_kfree_bus: | |
243 | kfree(bus); | |
244 | return err; | |
245 | } | |
246 | ||
0f58a01d | 247 | static void bcma_host_pci_remove(struct pci_dev *dev) |
8369ae33 RM |
248 | { |
249 | struct bcma_bus *bus = pci_get_drvdata(dev); | |
250 | ||
251 | bcma_bus_unregister(bus); | |
252 | pci_iounmap(dev, bus->mmio); | |
253 | pci_release_regions(dev); | |
254 | pci_disable_device(dev); | |
255 | kfree(bus); | |
8369ae33 RM |
256 | } |
257 | ||
ccd60953 | 258 | #ifdef CONFIG_PM_SLEEP |
5d2031f2 | 259 | static int bcma_host_pci_suspend(struct device *dev) |
775ab521 | 260 | { |
5d2031f2 LT |
261 | struct pci_dev *pdev = to_pci_dev(dev); |
262 | struct bcma_bus *bus = pci_get_drvdata(pdev); | |
775ab521 | 263 | |
28e7d218 | 264 | bus->mapped_core = NULL; |
5d2031f2 | 265 | |
685a4ef0 | 266 | return bcma_bus_suspend(bus); |
775ab521 RM |
267 | } |
268 | ||
5d2031f2 | 269 | static int bcma_host_pci_resume(struct device *dev) |
775ab521 | 270 | { |
5d2031f2 LT |
271 | struct pci_dev *pdev = to_pci_dev(dev); |
272 | struct bcma_bus *bus = pci_get_drvdata(pdev); | |
775ab521 | 273 | |
5d2031f2 LT |
274 | return bcma_bus_resume(bus); |
275 | } | |
775ab521 | 276 | |
5d2031f2 LT |
277 | static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend, |
278 | bcma_host_pci_resume); | |
279 | #define BCMA_PM_OPS (&bcma_pm_ops) | |
775ab521 | 280 | |
ccd60953 | 281 | #else /* CONFIG_PM_SLEEP */ |
5d2031f2 LT |
282 | |
283 | #define BCMA_PM_OPS NULL | |
284 | ||
ccd60953 | 285 | #endif /* CONFIG_PM_SLEEP */ |
775ab521 | 286 | |
342a11e1 | 287 | static const struct pci_device_id bcma_pci_bridge_tbl[] = { |
9594b56d | 288 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, |
b9f54bd0 | 289 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) }, |
34b6d429 | 290 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, /* 0xa8d8 */ |
8369ae33 RM |
291 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, |
292 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, | |
91fa4b0a | 293 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, |
646e0827 | 294 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) }, |
c263c2c1 | 295 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, |
9b6cc9a8 | 296 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) }, |
515b399c | 297 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) }, |
9b6cc9a8 | 298 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) }, |
d1d3799f | 299 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, |
27cfdb05 | 300 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, |
9b6cc9a8 | 301 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) }, |
8369ae33 | 302 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, |
34b6d429 RM |
303 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */ |
304 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */ | |
8369ae33 RM |
305 | { 0, }, |
306 | }; | |
307 | MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); | |
308 | ||
309 | static struct pci_driver bcma_pci_bridge_driver = { | |
310 | .name = "bcma-pci-bridge", | |
311 | .id_table = bcma_pci_bridge_tbl, | |
312 | .probe = bcma_host_pci_probe, | |
0f58a01d | 313 | .remove = bcma_host_pci_remove, |
5d2031f2 | 314 | .driver.pm = BCMA_PM_OPS, |
8369ae33 RM |
315 | }; |
316 | ||
317 | int __init bcma_host_pci_init(void) | |
318 | { | |
319 | return pci_register_driver(&bcma_pci_bridge_driver); | |
320 | } | |
321 | ||
322 | void __exit bcma_host_pci_exit(void) | |
323 | { | |
324 | pci_unregister_driver(&bcma_pci_bridge_driver); | |
325 | } | |
4186721d RM |
326 | |
327 | /************************************************** | |
328 | * Runtime ops for drivers. | |
329 | **************************************************/ | |
330 | ||
331 | /* See also pcicore_up */ | |
332 | void bcma_host_pci_up(struct bcma_bus *bus) | |
333 | { | |
334 | if (bus->hosttype != BCMA_HOSTTYPE_PCI) | |
335 | return; | |
336 | ||
337 | if (bus->host_is_pcie2) | |
804e27de | 338 | bcma_core_pcie2_up(&bus->drv_pcie2); |
4186721d RM |
339 | else |
340 | bcma_core_pci_up(&bus->drv_pci[0]); | |
341 | } | |
342 | EXPORT_SYMBOL_GPL(bcma_host_pci_up); | |
343 | ||
344 | /* See also pcicore_down */ | |
345 | void bcma_host_pci_down(struct bcma_bus *bus) | |
346 | { | |
347 | if (bus->hosttype != BCMA_HOSTTYPE_PCI) | |
348 | return; | |
349 | ||
350 | if (!bus->host_is_pcie2) | |
351 | bcma_core_pci_down(&bus->drv_pci[0]); | |
352 | } | |
353 | EXPORT_SYMBOL_GPL(bcma_host_pci_down); | |
702131e2 RM |
354 | |
355 | /* See also si_pci_setup */ | |
356 | int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core, | |
357 | bool enable) | |
358 | { | |
359 | struct pci_dev *pdev; | |
360 | u32 coremask, tmp; | |
361 | int err = 0; | |
362 | ||
363 | if (bus->hosttype != BCMA_HOSTTYPE_PCI) { | |
364 | /* This bcma device is not on a PCI host-bus. So the IRQs are | |
365 | * not routed through the PCI core. | |
366 | * So we must not enable routing through the PCI core. */ | |
367 | goto out; | |
368 | } | |
369 | ||
370 | pdev = bus->host_pci; | |
371 | ||
372 | err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); | |
373 | if (err) | |
374 | goto out; | |
375 | ||
376 | coremask = BIT(core->core_index) << 8; | |
377 | if (enable) | |
378 | tmp |= coremask; | |
379 | else | |
380 | tmp &= ~coremask; | |
381 | ||
382 | err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp); | |
383 | ||
384 | out: | |
385 | return err; | |
386 | } | |
387 | EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl); |