net/mlx4: Postpone the registration of net_device
[deliverable/linux.git] / drivers / ssb / main.c
1 /*
2 * Sonics Silicon Backplane
3 * Subsystem core
4 *
5 * Copyright 2005, Broadcom Corporation
6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7 *
8 * Licensed under the GNU/GPL. See COPYING for details.
9 */
10
11 #include "ssb_private.h"
12
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/ssb/ssb.h>
18 #include <linux/ssb/ssb_regs.h>
19 #include <linux/ssb/ssb_driver_gige.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/pci.h>
22 #include <linux/mmc/sdio_func.h>
23 #include <linux/slab.h>
24
25 #include <pcmcia/cistpl.h>
26 #include <pcmcia/ds.h>
27
28
29 MODULE_DESCRIPTION("Sonics Silicon Backplane driver");
30 MODULE_LICENSE("GPL");
31
32
33 /* Temporary list of yet-to-be-attached buses */
34 static LIST_HEAD(attach_queue);
35 /* List if running buses */
36 static LIST_HEAD(buses);
37 /* Software ID counter */
38 static unsigned int next_busnumber;
39 /* buses_mutes locks the two buslists and the next_busnumber.
40 * Don't lock this directly, but use ssb_buses_[un]lock() below. */
41 static DEFINE_MUTEX(buses_mutex);
42
43 /* There are differences in the codeflow, if the bus is
44 * initialized from early boot, as various needed services
45 * are not available early. This is a mechanism to delay
46 * these initializations to after early boot has finished.
47 * It's also used to avoid mutex locking, as that's not
48 * available and needed early. */
49 static bool ssb_is_early_boot = 1;
50
51 static void ssb_buses_lock(void);
52 static void ssb_buses_unlock(void);
53
54
55 #ifdef CONFIG_SSB_PCIHOST
56 struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev)
57 {
58 struct ssb_bus *bus;
59
60 ssb_buses_lock();
61 list_for_each_entry(bus, &buses, list) {
62 if (bus->bustype == SSB_BUSTYPE_PCI &&
63 bus->host_pci == pdev)
64 goto found;
65 }
66 bus = NULL;
67 found:
68 ssb_buses_unlock();
69
70 return bus;
71 }
72 #endif /* CONFIG_SSB_PCIHOST */
73
74 #ifdef CONFIG_SSB_PCMCIAHOST
75 struct ssb_bus *ssb_pcmcia_dev_to_bus(struct pcmcia_device *pdev)
76 {
77 struct ssb_bus *bus;
78
79 ssb_buses_lock();
80 list_for_each_entry(bus, &buses, list) {
81 if (bus->bustype == SSB_BUSTYPE_PCMCIA &&
82 bus->host_pcmcia == pdev)
83 goto found;
84 }
85 bus = NULL;
86 found:
87 ssb_buses_unlock();
88
89 return bus;
90 }
91 #endif /* CONFIG_SSB_PCMCIAHOST */
92
93 int ssb_for_each_bus_call(unsigned long data,
94 int (*func)(struct ssb_bus *bus, unsigned long data))
95 {
96 struct ssb_bus *bus;
97 int res;
98
99 ssb_buses_lock();
100 list_for_each_entry(bus, &buses, list) {
101 res = func(bus, data);
102 if (res >= 0) {
103 ssb_buses_unlock();
104 return res;
105 }
106 }
107 ssb_buses_unlock();
108
109 return -ENODEV;
110 }
111
112 static struct ssb_device *ssb_device_get(struct ssb_device *dev)
113 {
114 if (dev)
115 get_device(dev->dev);
116 return dev;
117 }
118
119 static void ssb_device_put(struct ssb_device *dev)
120 {
121 if (dev)
122 put_device(dev->dev);
123 }
124
125 static int ssb_device_resume(struct device *dev)
126 {
127 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
128 struct ssb_driver *ssb_drv;
129 int err = 0;
130
131 if (dev->driver) {
132 ssb_drv = drv_to_ssb_drv(dev->driver);
133 if (ssb_drv && ssb_drv->resume)
134 err = ssb_drv->resume(ssb_dev);
135 if (err)
136 goto out;
137 }
138 out:
139 return err;
140 }
141
142 static int ssb_device_suspend(struct device *dev, pm_message_t state)
143 {
144 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
145 struct ssb_driver *ssb_drv;
146 int err = 0;
147
148 if (dev->driver) {
149 ssb_drv = drv_to_ssb_drv(dev->driver);
150 if (ssb_drv && ssb_drv->suspend)
151 err = ssb_drv->suspend(ssb_dev, state);
152 if (err)
153 goto out;
154 }
155 out:
156 return err;
157 }
158
159 int ssb_bus_resume(struct ssb_bus *bus)
160 {
161 int err;
162
163 /* Reset HW state information in memory, so that HW is
164 * completely reinitialized. */
165 bus->mapped_device = NULL;
166 #ifdef CONFIG_SSB_DRIVER_PCICORE
167 bus->pcicore.setup_done = 0;
168 #endif
169
170 err = ssb_bus_powerup(bus, 0);
171 if (err)
172 return err;
173 err = ssb_pcmcia_hardware_setup(bus);
174 if (err) {
175 ssb_bus_may_powerdown(bus);
176 return err;
177 }
178 ssb_chipco_resume(&bus->chipco);
179 ssb_bus_may_powerdown(bus);
180
181 return 0;
182 }
183 EXPORT_SYMBOL(ssb_bus_resume);
184
185 int ssb_bus_suspend(struct ssb_bus *bus)
186 {
187 ssb_chipco_suspend(&bus->chipco);
188 ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0);
189
190 return 0;
191 }
192 EXPORT_SYMBOL(ssb_bus_suspend);
193
194 #ifdef CONFIG_SSB_SPROM
195 /** ssb_devices_freeze - Freeze all devices on the bus.
196 *
197 * After freezing no device driver will be handling a device
198 * on this bus anymore. ssb_devices_thaw() must be called after
199 * a successful freeze to reactivate the devices.
200 *
201 * @bus: The bus.
202 * @ctx: Context structure. Pass this to ssb_devices_thaw().
203 */
204 int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx)
205 {
206 struct ssb_device *sdev;
207 struct ssb_driver *sdrv;
208 unsigned int i;
209
210 memset(ctx, 0, sizeof(*ctx));
211 ctx->bus = bus;
212 SSB_WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen));
213
214 for (i = 0; i < bus->nr_devices; i++) {
215 sdev = ssb_device_get(&bus->devices[i]);
216
217 if (!sdev->dev || !sdev->dev->driver ||
218 !device_is_registered(sdev->dev)) {
219 ssb_device_put(sdev);
220 continue;
221 }
222 sdrv = drv_to_ssb_drv(sdev->dev->driver);
223 if (SSB_WARN_ON(!sdrv->remove))
224 continue;
225 sdrv->remove(sdev);
226 ctx->device_frozen[i] = 1;
227 }
228
229 return 0;
230 }
231
232 /** ssb_devices_thaw - Unfreeze all devices on the bus.
233 *
234 * This will re-attach the device drivers and re-init the devices.
235 *
236 * @ctx: The context structure from ssb_devices_freeze()
237 */
238 int ssb_devices_thaw(struct ssb_freeze_context *ctx)
239 {
240 struct ssb_bus *bus = ctx->bus;
241 struct ssb_device *sdev;
242 struct ssb_driver *sdrv;
243 unsigned int i;
244 int err, result = 0;
245
246 for (i = 0; i < bus->nr_devices; i++) {
247 if (!ctx->device_frozen[i])
248 continue;
249 sdev = &bus->devices[i];
250
251 if (SSB_WARN_ON(!sdev->dev || !sdev->dev->driver))
252 continue;
253 sdrv = drv_to_ssb_drv(sdev->dev->driver);
254 if (SSB_WARN_ON(!sdrv || !sdrv->probe))
255 continue;
256
257 err = sdrv->probe(sdev, &sdev->id);
258 if (err) {
259 ssb_err("Failed to thaw device %s\n",
260 dev_name(sdev->dev));
261 result = err;
262 }
263 ssb_device_put(sdev);
264 }
265
266 return result;
267 }
268 #endif /* CONFIG_SSB_SPROM */
269
270 static void ssb_device_shutdown(struct device *dev)
271 {
272 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
273 struct ssb_driver *ssb_drv;
274
275 if (!dev->driver)
276 return;
277 ssb_drv = drv_to_ssb_drv(dev->driver);
278 if (ssb_drv && ssb_drv->shutdown)
279 ssb_drv->shutdown(ssb_dev);
280 }
281
282 static int ssb_device_remove(struct device *dev)
283 {
284 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
285 struct ssb_driver *ssb_drv = drv_to_ssb_drv(dev->driver);
286
287 if (ssb_drv && ssb_drv->remove)
288 ssb_drv->remove(ssb_dev);
289 ssb_device_put(ssb_dev);
290
291 return 0;
292 }
293
294 static int ssb_device_probe(struct device *dev)
295 {
296 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
297 struct ssb_driver *ssb_drv = drv_to_ssb_drv(dev->driver);
298 int err = 0;
299
300 ssb_device_get(ssb_dev);
301 if (ssb_drv && ssb_drv->probe)
302 err = ssb_drv->probe(ssb_dev, &ssb_dev->id);
303 if (err)
304 ssb_device_put(ssb_dev);
305
306 return err;
307 }
308
309 static int ssb_match_devid(const struct ssb_device_id *tabid,
310 const struct ssb_device_id *devid)
311 {
312 if ((tabid->vendor != devid->vendor) &&
313 tabid->vendor != SSB_ANY_VENDOR)
314 return 0;
315 if ((tabid->coreid != devid->coreid) &&
316 tabid->coreid != SSB_ANY_ID)
317 return 0;
318 if ((tabid->revision != devid->revision) &&
319 tabid->revision != SSB_ANY_REV)
320 return 0;
321 return 1;
322 }
323
324 static int ssb_bus_match(struct device *dev, struct device_driver *drv)
325 {
326 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
327 struct ssb_driver *ssb_drv = drv_to_ssb_drv(drv);
328 const struct ssb_device_id *id;
329
330 for (id = ssb_drv->id_table;
331 id->vendor || id->coreid || id->revision;
332 id++) {
333 if (ssb_match_devid(id, &ssb_dev->id))
334 return 1; /* found */
335 }
336
337 return 0;
338 }
339
340 static int ssb_device_uevent(struct device *dev, struct kobj_uevent_env *env)
341 {
342 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
343
344 if (!dev)
345 return -ENODEV;
346
347 return add_uevent_var(env,
348 "MODALIAS=ssb:v%04Xid%04Xrev%02X",
349 ssb_dev->id.vendor, ssb_dev->id.coreid,
350 ssb_dev->id.revision);
351 }
352
353 #define ssb_config_attr(attrib, field, format_string) \
354 static ssize_t \
355 attrib##_show(struct device *dev, struct device_attribute *attr, char *buf) \
356 { \
357 return sprintf(buf, format_string, dev_to_ssb_dev(dev)->field); \
358 } \
359 static DEVICE_ATTR_RO(attrib);
360
361 ssb_config_attr(core_num, core_index, "%u\n")
362 ssb_config_attr(coreid, id.coreid, "0x%04x\n")
363 ssb_config_attr(vendor, id.vendor, "0x%04x\n")
364 ssb_config_attr(revision, id.revision, "%u\n")
365 ssb_config_attr(irq, irq, "%u\n")
366 static ssize_t
367 name_show(struct device *dev, struct device_attribute *attr, char *buf)
368 {
369 return sprintf(buf, "%s\n",
370 ssb_core_name(dev_to_ssb_dev(dev)->id.coreid));
371 }
372 static DEVICE_ATTR_RO(name);
373
374 static struct attribute *ssb_device_attrs[] = {
375 &dev_attr_name.attr,
376 &dev_attr_core_num.attr,
377 &dev_attr_coreid.attr,
378 &dev_attr_vendor.attr,
379 &dev_attr_revision.attr,
380 &dev_attr_irq.attr,
381 NULL,
382 };
383 ATTRIBUTE_GROUPS(ssb_device);
384
385 static struct bus_type ssb_bustype = {
386 .name = "ssb",
387 .match = ssb_bus_match,
388 .probe = ssb_device_probe,
389 .remove = ssb_device_remove,
390 .shutdown = ssb_device_shutdown,
391 .suspend = ssb_device_suspend,
392 .resume = ssb_device_resume,
393 .uevent = ssb_device_uevent,
394 .dev_groups = ssb_device_groups,
395 };
396
397 static void ssb_buses_lock(void)
398 {
399 /* See the comment at the ssb_is_early_boot definition */
400 if (!ssb_is_early_boot)
401 mutex_lock(&buses_mutex);
402 }
403
404 static void ssb_buses_unlock(void)
405 {
406 /* See the comment at the ssb_is_early_boot definition */
407 if (!ssb_is_early_boot)
408 mutex_unlock(&buses_mutex);
409 }
410
411 static void ssb_devices_unregister(struct ssb_bus *bus)
412 {
413 struct ssb_device *sdev;
414 int i;
415
416 for (i = bus->nr_devices - 1; i >= 0; i--) {
417 sdev = &(bus->devices[i]);
418 if (sdev->dev)
419 device_unregister(sdev->dev);
420 }
421
422 #ifdef CONFIG_SSB_EMBEDDED
423 if (bus->bustype == SSB_BUSTYPE_SSB)
424 platform_device_unregister(bus->watchdog);
425 #endif
426 }
427
428 void ssb_bus_unregister(struct ssb_bus *bus)
429 {
430 int err;
431
432 err = ssb_gpio_unregister(bus);
433 if (err == -EBUSY)
434 ssb_dbg("Some GPIOs are still in use\n");
435 else if (err)
436 ssb_dbg("Can not unregister GPIO driver: %i\n", err);
437
438 ssb_buses_lock();
439 ssb_devices_unregister(bus);
440 list_del(&bus->list);
441 ssb_buses_unlock();
442
443 ssb_pcmcia_exit(bus);
444 ssb_pci_exit(bus);
445 ssb_iounmap(bus);
446 }
447 EXPORT_SYMBOL(ssb_bus_unregister);
448
449 static void ssb_release_dev(struct device *dev)
450 {
451 struct __ssb_dev_wrapper *devwrap;
452
453 devwrap = container_of(dev, struct __ssb_dev_wrapper, dev);
454 kfree(devwrap);
455 }
456
457 static int ssb_devices_register(struct ssb_bus *bus)
458 {
459 struct ssb_device *sdev;
460 struct device *dev;
461 struct __ssb_dev_wrapper *devwrap;
462 int i, err = 0;
463 int dev_idx = 0;
464
465 for (i = 0; i < bus->nr_devices; i++) {
466 sdev = &(bus->devices[i]);
467
468 /* We don't register SSB-system devices to the kernel,
469 * as the drivers for them are built into SSB. */
470 switch (sdev->id.coreid) {
471 case SSB_DEV_CHIPCOMMON:
472 case SSB_DEV_PCI:
473 case SSB_DEV_PCIE:
474 case SSB_DEV_PCMCIA:
475 case SSB_DEV_MIPS:
476 case SSB_DEV_MIPS_3302:
477 case SSB_DEV_EXTIF:
478 continue;
479 }
480
481 devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL);
482 if (!devwrap) {
483 ssb_err("Could not allocate device\n");
484 err = -ENOMEM;
485 goto error;
486 }
487 dev = &devwrap->dev;
488 devwrap->sdev = sdev;
489
490 dev->release = ssb_release_dev;
491 dev->bus = &ssb_bustype;
492 dev_set_name(dev, "ssb%u:%d", bus->busnumber, dev_idx);
493
494 switch (bus->bustype) {
495 case SSB_BUSTYPE_PCI:
496 #ifdef CONFIG_SSB_PCIHOST
497 sdev->irq = bus->host_pci->irq;
498 dev->parent = &bus->host_pci->dev;
499 sdev->dma_dev = dev->parent;
500 #endif
501 break;
502 case SSB_BUSTYPE_PCMCIA:
503 #ifdef CONFIG_SSB_PCMCIAHOST
504 sdev->irq = bus->host_pcmcia->irq;
505 dev->parent = &bus->host_pcmcia->dev;
506 #endif
507 break;
508 case SSB_BUSTYPE_SDIO:
509 #ifdef CONFIG_SSB_SDIOHOST
510 dev->parent = &bus->host_sdio->dev;
511 #endif
512 break;
513 case SSB_BUSTYPE_SSB:
514 dev->dma_mask = &dev->coherent_dma_mask;
515 sdev->dma_dev = dev;
516 break;
517 }
518
519 sdev->dev = dev;
520 err = device_register(dev);
521 if (err) {
522 ssb_err("Could not register %s\n", dev_name(dev));
523 /* Set dev to NULL to not unregister
524 * dev on error unwinding. */
525 sdev->dev = NULL;
526 kfree(devwrap);
527 goto error;
528 }
529 dev_idx++;
530 }
531
532 #ifdef CONFIG_SSB_DRIVER_MIPS
533 if (bus->mipscore.pflash.present) {
534 err = platform_device_register(&ssb_pflash_dev);
535 if (err)
536 pr_err("Error registering parallel flash\n");
537 }
538 #endif
539
540 #ifdef CONFIG_SSB_SFLASH
541 if (bus->mipscore.sflash.present) {
542 err = platform_device_register(&ssb_sflash_dev);
543 if (err)
544 pr_err("Error registering serial flash\n");
545 }
546 #endif
547
548 return 0;
549 error:
550 /* Unwind the already registered devices. */
551 ssb_devices_unregister(bus);
552 return err;
553 }
554
555 /* Needs ssb_buses_lock() */
556 static int ssb_attach_queued_buses(void)
557 {
558 struct ssb_bus *bus, *n;
559 int err = 0;
560 int drop_them_all = 0;
561
562 list_for_each_entry_safe(bus, n, &attach_queue, list) {
563 if (drop_them_all) {
564 list_del(&bus->list);
565 continue;
566 }
567 /* Can't init the PCIcore in ssb_bus_register(), as that
568 * is too early in boot for embedded systems
569 * (no udelay() available). So do it here in attach stage.
570 */
571 err = ssb_bus_powerup(bus, 0);
572 if (err)
573 goto error;
574 ssb_pcicore_init(&bus->pcicore);
575 if (bus->bustype == SSB_BUSTYPE_SSB)
576 ssb_watchdog_register(bus);
577
578 err = ssb_gpio_init(bus);
579 if (err == -ENOTSUPP)
580 ssb_dbg("GPIO driver not activated\n");
581 else if (err)
582 ssb_dbg("Error registering GPIO driver: %i\n", err);
583
584 ssb_bus_may_powerdown(bus);
585
586 err = ssb_devices_register(bus);
587 error:
588 if (err) {
589 drop_them_all = 1;
590 list_del(&bus->list);
591 continue;
592 }
593 list_move_tail(&bus->list, &buses);
594 }
595
596 return err;
597 }
598
599 static u8 ssb_ssb_read8(struct ssb_device *dev, u16 offset)
600 {
601 struct ssb_bus *bus = dev->bus;
602
603 offset += dev->core_index * SSB_CORE_SIZE;
604 return readb(bus->mmio + offset);
605 }
606
607 static u16 ssb_ssb_read16(struct ssb_device *dev, u16 offset)
608 {
609 struct ssb_bus *bus = dev->bus;
610
611 offset += dev->core_index * SSB_CORE_SIZE;
612 return readw(bus->mmio + offset);
613 }
614
615 static u32 ssb_ssb_read32(struct ssb_device *dev, u16 offset)
616 {
617 struct ssb_bus *bus = dev->bus;
618
619 offset += dev->core_index * SSB_CORE_SIZE;
620 return readl(bus->mmio + offset);
621 }
622
623 #ifdef CONFIG_SSB_BLOCKIO
624 static void ssb_ssb_block_read(struct ssb_device *dev, void *buffer,
625 size_t count, u16 offset, u8 reg_width)
626 {
627 struct ssb_bus *bus = dev->bus;
628 void __iomem *addr;
629
630 offset += dev->core_index * SSB_CORE_SIZE;
631 addr = bus->mmio + offset;
632
633 switch (reg_width) {
634 case sizeof(u8): {
635 u8 *buf = buffer;
636
637 while (count) {
638 *buf = __raw_readb(addr);
639 buf++;
640 count--;
641 }
642 break;
643 }
644 case sizeof(u16): {
645 __le16 *buf = buffer;
646
647 SSB_WARN_ON(count & 1);
648 while (count) {
649 *buf = (__force __le16)__raw_readw(addr);
650 buf++;
651 count -= 2;
652 }
653 break;
654 }
655 case sizeof(u32): {
656 __le32 *buf = buffer;
657
658 SSB_WARN_ON(count & 3);
659 while (count) {
660 *buf = (__force __le32)__raw_readl(addr);
661 buf++;
662 count -= 4;
663 }
664 break;
665 }
666 default:
667 SSB_WARN_ON(1);
668 }
669 }
670 #endif /* CONFIG_SSB_BLOCKIO */
671
672 static void ssb_ssb_write8(struct ssb_device *dev, u16 offset, u8 value)
673 {
674 struct ssb_bus *bus = dev->bus;
675
676 offset += dev->core_index * SSB_CORE_SIZE;
677 writeb(value, bus->mmio + offset);
678 }
679
680 static void ssb_ssb_write16(struct ssb_device *dev, u16 offset, u16 value)
681 {
682 struct ssb_bus *bus = dev->bus;
683
684 offset += dev->core_index * SSB_CORE_SIZE;
685 writew(value, bus->mmio + offset);
686 }
687
688 static void ssb_ssb_write32(struct ssb_device *dev, u16 offset, u32 value)
689 {
690 struct ssb_bus *bus = dev->bus;
691
692 offset += dev->core_index * SSB_CORE_SIZE;
693 writel(value, bus->mmio + offset);
694 }
695
696 #ifdef CONFIG_SSB_BLOCKIO
697 static void ssb_ssb_block_write(struct ssb_device *dev, const void *buffer,
698 size_t count, u16 offset, u8 reg_width)
699 {
700 struct ssb_bus *bus = dev->bus;
701 void __iomem *addr;
702
703 offset += dev->core_index * SSB_CORE_SIZE;
704 addr = bus->mmio + offset;
705
706 switch (reg_width) {
707 case sizeof(u8): {
708 const u8 *buf = buffer;
709
710 while (count) {
711 __raw_writeb(*buf, addr);
712 buf++;
713 count--;
714 }
715 break;
716 }
717 case sizeof(u16): {
718 const __le16 *buf = buffer;
719
720 SSB_WARN_ON(count & 1);
721 while (count) {
722 __raw_writew((__force u16)(*buf), addr);
723 buf++;
724 count -= 2;
725 }
726 break;
727 }
728 case sizeof(u32): {
729 const __le32 *buf = buffer;
730
731 SSB_WARN_ON(count & 3);
732 while (count) {
733 __raw_writel((__force u32)(*buf), addr);
734 buf++;
735 count -= 4;
736 }
737 break;
738 }
739 default:
740 SSB_WARN_ON(1);
741 }
742 }
743 #endif /* CONFIG_SSB_BLOCKIO */
744
745 /* Ops for the plain SSB bus without a host-device (no PCI or PCMCIA). */
746 static const struct ssb_bus_ops ssb_ssb_ops = {
747 .read8 = ssb_ssb_read8,
748 .read16 = ssb_ssb_read16,
749 .read32 = ssb_ssb_read32,
750 .write8 = ssb_ssb_write8,
751 .write16 = ssb_ssb_write16,
752 .write32 = ssb_ssb_write32,
753 #ifdef CONFIG_SSB_BLOCKIO
754 .block_read = ssb_ssb_block_read,
755 .block_write = ssb_ssb_block_write,
756 #endif
757 };
758
759 static int ssb_fetch_invariants(struct ssb_bus *bus,
760 ssb_invariants_func_t get_invariants)
761 {
762 struct ssb_init_invariants iv;
763 int err;
764
765 memset(&iv, 0, sizeof(iv));
766 err = get_invariants(bus, &iv);
767 if (err)
768 goto out;
769 memcpy(&bus->boardinfo, &iv.boardinfo, sizeof(iv.boardinfo));
770 memcpy(&bus->sprom, &iv.sprom, sizeof(iv.sprom));
771 bus->has_cardbus_slot = iv.has_cardbus_slot;
772 out:
773 return err;
774 }
775
776 static int ssb_bus_register(struct ssb_bus *bus,
777 ssb_invariants_func_t get_invariants,
778 unsigned long baseaddr)
779 {
780 int err;
781
782 spin_lock_init(&bus->bar_lock);
783 INIT_LIST_HEAD(&bus->list);
784 #ifdef CONFIG_SSB_EMBEDDED
785 spin_lock_init(&bus->gpio_lock);
786 #endif
787
788 /* Powerup the bus */
789 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1);
790 if (err)
791 goto out;
792
793 /* Init SDIO-host device (if any), before the scan */
794 err = ssb_sdio_init(bus);
795 if (err)
796 goto err_disable_xtal;
797
798 ssb_buses_lock();
799 bus->busnumber = next_busnumber;
800 /* Scan for devices (cores) */
801 err = ssb_bus_scan(bus, baseaddr);
802 if (err)
803 goto err_sdio_exit;
804
805 /* Init PCI-host device (if any) */
806 err = ssb_pci_init(bus);
807 if (err)
808 goto err_unmap;
809 /* Init PCMCIA-host device (if any) */
810 err = ssb_pcmcia_init(bus);
811 if (err)
812 goto err_pci_exit;
813
814 /* Initialize basic system devices (if available) */
815 err = ssb_bus_powerup(bus, 0);
816 if (err)
817 goto err_pcmcia_exit;
818 ssb_chipcommon_init(&bus->chipco);
819 ssb_extif_init(&bus->extif);
820 ssb_mipscore_init(&bus->mipscore);
821 err = ssb_fetch_invariants(bus, get_invariants);
822 if (err) {
823 ssb_bus_may_powerdown(bus);
824 goto err_pcmcia_exit;
825 }
826 ssb_bus_may_powerdown(bus);
827
828 /* Queue it for attach.
829 * See the comment at the ssb_is_early_boot definition. */
830 list_add_tail(&bus->list, &attach_queue);
831 if (!ssb_is_early_boot) {
832 /* This is not early boot, so we must attach the bus now */
833 err = ssb_attach_queued_buses();
834 if (err)
835 goto err_dequeue;
836 }
837 next_busnumber++;
838 ssb_buses_unlock();
839
840 out:
841 return err;
842
843 err_dequeue:
844 list_del(&bus->list);
845 err_pcmcia_exit:
846 ssb_pcmcia_exit(bus);
847 err_pci_exit:
848 ssb_pci_exit(bus);
849 err_unmap:
850 ssb_iounmap(bus);
851 err_sdio_exit:
852 ssb_sdio_exit(bus);
853 err_disable_xtal:
854 ssb_buses_unlock();
855 ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0);
856 return err;
857 }
858
859 #ifdef CONFIG_SSB_PCIHOST
860 int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci)
861 {
862 int err;
863
864 bus->bustype = SSB_BUSTYPE_PCI;
865 bus->host_pci = host_pci;
866 bus->ops = &ssb_pci_ops;
867
868 err = ssb_bus_register(bus, ssb_pci_get_invariants, 0);
869 if (!err) {
870 ssb_info("Sonics Silicon Backplane found on PCI device %s\n",
871 dev_name(&host_pci->dev));
872 } else {
873 ssb_err("Failed to register PCI version of SSB with error %d\n",
874 err);
875 }
876
877 return err;
878 }
879 EXPORT_SYMBOL(ssb_bus_pcibus_register);
880 #endif /* CONFIG_SSB_PCIHOST */
881
882 #ifdef CONFIG_SSB_PCMCIAHOST
883 int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
884 struct pcmcia_device *pcmcia_dev,
885 unsigned long baseaddr)
886 {
887 int err;
888
889 bus->bustype = SSB_BUSTYPE_PCMCIA;
890 bus->host_pcmcia = pcmcia_dev;
891 bus->ops = &ssb_pcmcia_ops;
892
893 err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr);
894 if (!err) {
895 ssb_info("Sonics Silicon Backplane found on PCMCIA device %s\n",
896 pcmcia_dev->devname);
897 }
898
899 return err;
900 }
901 EXPORT_SYMBOL(ssb_bus_pcmciabus_register);
902 #endif /* CONFIG_SSB_PCMCIAHOST */
903
904 #ifdef CONFIG_SSB_SDIOHOST
905 int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
906 unsigned int quirks)
907 {
908 int err;
909
910 bus->bustype = SSB_BUSTYPE_SDIO;
911 bus->host_sdio = func;
912 bus->ops = &ssb_sdio_ops;
913 bus->quirks = quirks;
914
915 err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0);
916 if (!err) {
917 ssb_info("Sonics Silicon Backplane found on SDIO device %s\n",
918 sdio_func_id(func));
919 }
920
921 return err;
922 }
923 EXPORT_SYMBOL(ssb_bus_sdiobus_register);
924 #endif /* CONFIG_SSB_PCMCIAHOST */
925
926 int ssb_bus_ssbbus_register(struct ssb_bus *bus, unsigned long baseaddr,
927 ssb_invariants_func_t get_invariants)
928 {
929 int err;
930
931 bus->bustype = SSB_BUSTYPE_SSB;
932 bus->ops = &ssb_ssb_ops;
933
934 err = ssb_bus_register(bus, get_invariants, baseaddr);
935 if (!err) {
936 ssb_info("Sonics Silicon Backplane found at address 0x%08lX\n",
937 baseaddr);
938 }
939
940 return err;
941 }
942
943 int __ssb_driver_register(struct ssb_driver *drv, struct module *owner)
944 {
945 drv->drv.name = drv->name;
946 drv->drv.bus = &ssb_bustype;
947 drv->drv.owner = owner;
948
949 return driver_register(&drv->drv);
950 }
951 EXPORT_SYMBOL(__ssb_driver_register);
952
953 void ssb_driver_unregister(struct ssb_driver *drv)
954 {
955 driver_unregister(&drv->drv);
956 }
957 EXPORT_SYMBOL(ssb_driver_unregister);
958
959 void ssb_set_devtypedata(struct ssb_device *dev, void *data)
960 {
961 struct ssb_bus *bus = dev->bus;
962 struct ssb_device *ent;
963 int i;
964
965 for (i = 0; i < bus->nr_devices; i++) {
966 ent = &(bus->devices[i]);
967 if (ent->id.vendor != dev->id.vendor)
968 continue;
969 if (ent->id.coreid != dev->id.coreid)
970 continue;
971
972 ent->devtypedata = data;
973 }
974 }
975 EXPORT_SYMBOL(ssb_set_devtypedata);
976
977 static u32 clkfactor_f6_resolve(u32 v)
978 {
979 /* map the magic values */
980 switch (v) {
981 case SSB_CHIPCO_CLK_F6_2:
982 return 2;
983 case SSB_CHIPCO_CLK_F6_3:
984 return 3;
985 case SSB_CHIPCO_CLK_F6_4:
986 return 4;
987 case SSB_CHIPCO_CLK_F6_5:
988 return 5;
989 case SSB_CHIPCO_CLK_F6_6:
990 return 6;
991 case SSB_CHIPCO_CLK_F6_7:
992 return 7;
993 }
994 return 0;
995 }
996
997 /* Calculate the speed the backplane would run at a given set of clockcontrol values */
998 u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m)
999 {
1000 u32 n1, n2, clock, m1, m2, m3, mc;
1001
1002 n1 = (n & SSB_CHIPCO_CLK_N1);
1003 n2 = ((n & SSB_CHIPCO_CLK_N2) >> SSB_CHIPCO_CLK_N2_SHIFT);
1004
1005 switch (plltype) {
1006 case SSB_PLLTYPE_6: /* 100/200 or 120/240 only */
1007 if (m & SSB_CHIPCO_CLK_T6_MMASK)
1008 return SSB_CHIPCO_CLK_T6_M1;
1009 return SSB_CHIPCO_CLK_T6_M0;
1010 case SSB_PLLTYPE_1: /* 48Mhz base, 3 dividers */
1011 case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */
1012 case SSB_PLLTYPE_4: /* 48Mhz, 4 dividers */
1013 case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */
1014 n1 = clkfactor_f6_resolve(n1);
1015 n2 += SSB_CHIPCO_CLK_F5_BIAS;
1016 break;
1017 case SSB_PLLTYPE_2: /* 48Mhz, 4 dividers */
1018 n1 += SSB_CHIPCO_CLK_T2_BIAS;
1019 n2 += SSB_CHIPCO_CLK_T2_BIAS;
1020 SSB_WARN_ON(!((n1 >= 2) && (n1 <= 7)));
1021 SSB_WARN_ON(!((n2 >= 5) && (n2 <= 23)));
1022 break;
1023 case SSB_PLLTYPE_5: /* 25Mhz, 4 dividers */
1024 return 100000000;
1025 default:
1026 SSB_WARN_ON(1);
1027 }
1028
1029 switch (plltype) {
1030 case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */
1031 case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */
1032 clock = SSB_CHIPCO_CLK_BASE2 * n1 * n2;
1033 break;
1034 default:
1035 clock = SSB_CHIPCO_CLK_BASE1 * n1 * n2;
1036 }
1037 if (!clock)
1038 return 0;
1039
1040 m1 = (m & SSB_CHIPCO_CLK_M1);
1041 m2 = ((m & SSB_CHIPCO_CLK_M2) >> SSB_CHIPCO_CLK_M2_SHIFT);
1042 m3 = ((m & SSB_CHIPCO_CLK_M3) >> SSB_CHIPCO_CLK_M3_SHIFT);
1043 mc = ((m & SSB_CHIPCO_CLK_MC) >> SSB_CHIPCO_CLK_MC_SHIFT);
1044
1045 switch (plltype) {
1046 case SSB_PLLTYPE_1: /* 48Mhz base, 3 dividers */
1047 case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */
1048 case SSB_PLLTYPE_4: /* 48Mhz, 4 dividers */
1049 case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */
1050 m1 = clkfactor_f6_resolve(m1);
1051 if ((plltype == SSB_PLLTYPE_1) ||
1052 (plltype == SSB_PLLTYPE_3))
1053 m2 += SSB_CHIPCO_CLK_F5_BIAS;
1054 else
1055 m2 = clkfactor_f6_resolve(m2);
1056 m3 = clkfactor_f6_resolve(m3);
1057
1058 switch (mc) {
1059 case SSB_CHIPCO_CLK_MC_BYPASS:
1060 return clock;
1061 case SSB_CHIPCO_CLK_MC_M1:
1062 return (clock / m1);
1063 case SSB_CHIPCO_CLK_MC_M1M2:
1064 return (clock / (m1 * m2));
1065 case SSB_CHIPCO_CLK_MC_M1M2M3:
1066 return (clock / (m1 * m2 * m3));
1067 case SSB_CHIPCO_CLK_MC_M1M3:
1068 return (clock / (m1 * m3));
1069 }
1070 return 0;
1071 case SSB_PLLTYPE_2:
1072 m1 += SSB_CHIPCO_CLK_T2_BIAS;
1073 m2 += SSB_CHIPCO_CLK_T2M2_BIAS;
1074 m3 += SSB_CHIPCO_CLK_T2_BIAS;
1075 SSB_WARN_ON(!((m1 >= 2) && (m1 <= 7)));
1076 SSB_WARN_ON(!((m2 >= 3) && (m2 <= 10)));
1077 SSB_WARN_ON(!((m3 >= 2) && (m3 <= 7)));
1078
1079 if (!(mc & SSB_CHIPCO_CLK_T2MC_M1BYP))
1080 clock /= m1;
1081 if (!(mc & SSB_CHIPCO_CLK_T2MC_M2BYP))
1082 clock /= m2;
1083 if (!(mc & SSB_CHIPCO_CLK_T2MC_M3BYP))
1084 clock /= m3;
1085 return clock;
1086 default:
1087 SSB_WARN_ON(1);
1088 }
1089 return 0;
1090 }
1091
1092 /* Get the current speed the backplane is running at */
1093 u32 ssb_clockspeed(struct ssb_bus *bus)
1094 {
1095 u32 rate;
1096 u32 plltype;
1097 u32 clkctl_n, clkctl_m;
1098
1099 if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)
1100 return ssb_pmu_get_controlclock(&bus->chipco);
1101
1102 if (ssb_extif_available(&bus->extif))
1103 ssb_extif_get_clockcontrol(&bus->extif, &plltype,
1104 &clkctl_n, &clkctl_m);
1105 else if (bus->chipco.dev)
1106 ssb_chipco_get_clockcontrol(&bus->chipco, &plltype,
1107 &clkctl_n, &clkctl_m);
1108 else
1109 return 0;
1110
1111 if (bus->chip_id == 0x5365) {
1112 rate = 100000000;
1113 } else {
1114 rate = ssb_calc_clock_rate(plltype, clkctl_n, clkctl_m);
1115 if (plltype == SSB_PLLTYPE_3) /* 25Mhz, 2 dividers */
1116 rate /= 2;
1117 }
1118
1119 return rate;
1120 }
1121 EXPORT_SYMBOL(ssb_clockspeed);
1122
1123 static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev)
1124 {
1125 u32 rev = ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_SSBREV;
1126
1127 /* The REJECT bit seems to be different for Backplane rev 2.3 */
1128 switch (rev) {
1129 case SSB_IDLOW_SSBREV_22:
1130 case SSB_IDLOW_SSBREV_24:
1131 case SSB_IDLOW_SSBREV_26:
1132 return SSB_TMSLOW_REJECT;
1133 case SSB_IDLOW_SSBREV_23:
1134 return SSB_TMSLOW_REJECT_23;
1135 case SSB_IDLOW_SSBREV_25: /* TODO - find the proper REJECT bit */
1136 case SSB_IDLOW_SSBREV_27: /* same here */
1137 return SSB_TMSLOW_REJECT; /* this is a guess */
1138 case SSB_IDLOW_SSBREV:
1139 break;
1140 default:
1141 WARN(1, KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev);
1142 }
1143 return (SSB_TMSLOW_REJECT | SSB_TMSLOW_REJECT_23);
1144 }
1145
1146 int ssb_device_is_enabled(struct ssb_device *dev)
1147 {
1148 u32 val;
1149 u32 reject;
1150
1151 reject = ssb_tmslow_reject_bitmask(dev);
1152 val = ssb_read32(dev, SSB_TMSLOW);
1153 val &= SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET | reject;
1154
1155 return (val == SSB_TMSLOW_CLOCK);
1156 }
1157 EXPORT_SYMBOL(ssb_device_is_enabled);
1158
1159 static void ssb_flush_tmslow(struct ssb_device *dev)
1160 {
1161 /* Make _really_ sure the device has finished the TMSLOW
1162 * register write transaction, as we risk running into
1163 * a machine check exception otherwise.
1164 * Do this by reading the register back to commit the
1165 * PCI write and delay an additional usec for the device
1166 * to react to the change. */
1167 ssb_read32(dev, SSB_TMSLOW);
1168 udelay(1);
1169 }
1170
1171 void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags)
1172 {
1173 u32 val;
1174
1175 ssb_device_disable(dev, core_specific_flags);
1176 ssb_write32(dev, SSB_TMSLOW,
1177 SSB_TMSLOW_RESET | SSB_TMSLOW_CLOCK |
1178 SSB_TMSLOW_FGC | core_specific_flags);
1179 ssb_flush_tmslow(dev);
1180
1181 /* Clear SERR if set. This is a hw bug workaround. */
1182 if (ssb_read32(dev, SSB_TMSHIGH) & SSB_TMSHIGH_SERR)
1183 ssb_write32(dev, SSB_TMSHIGH, 0);
1184
1185 val = ssb_read32(dev, SSB_IMSTATE);
1186 if (val & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
1187 val &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
1188 ssb_write32(dev, SSB_IMSTATE, val);
1189 }
1190
1191 ssb_write32(dev, SSB_TMSLOW,
1192 SSB_TMSLOW_CLOCK | SSB_TMSLOW_FGC |
1193 core_specific_flags);
1194 ssb_flush_tmslow(dev);
1195
1196 ssb_write32(dev, SSB_TMSLOW, SSB_TMSLOW_CLOCK |
1197 core_specific_flags);
1198 ssb_flush_tmslow(dev);
1199 }
1200 EXPORT_SYMBOL(ssb_device_enable);
1201
1202 /* Wait for bitmask in a register to get set or cleared.
1203 * timeout is in units of ten-microseconds */
1204 static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
1205 int timeout, int set)
1206 {
1207 int i;
1208 u32 val;
1209
1210 for (i = 0; i < timeout; i++) {
1211 val = ssb_read32(dev, reg);
1212 if (set) {
1213 if ((val & bitmask) == bitmask)
1214 return 0;
1215 } else {
1216 if (!(val & bitmask))
1217 return 0;
1218 }
1219 udelay(10);
1220 }
1221 printk(KERN_ERR PFX "Timeout waiting for bitmask %08X on "
1222 "register %04X to %s.\n",
1223 bitmask, reg, (set ? "set" : "clear"));
1224
1225 return -ETIMEDOUT;
1226 }
1227
1228 void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags)
1229 {
1230 u32 reject, val;
1231
1232 if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET)
1233 return;
1234
1235 reject = ssb_tmslow_reject_bitmask(dev);
1236
1237 if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_CLOCK) {
1238 ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK);
1239 ssb_wait_bits(dev, SSB_TMSLOW, reject, 1000, 1);
1240 ssb_wait_bits(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0);
1241
1242 if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
1243 val = ssb_read32(dev, SSB_IMSTATE);
1244 val |= SSB_IMSTATE_REJECT;
1245 ssb_write32(dev, SSB_IMSTATE, val);
1246 ssb_wait_bits(dev, SSB_IMSTATE, SSB_IMSTATE_BUSY, 1000,
1247 0);
1248 }
1249
1250 ssb_write32(dev, SSB_TMSLOW,
1251 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
1252 reject | SSB_TMSLOW_RESET |
1253 core_specific_flags);
1254 ssb_flush_tmslow(dev);
1255
1256 if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) {
1257 val = ssb_read32(dev, SSB_IMSTATE);
1258 val &= ~SSB_IMSTATE_REJECT;
1259 ssb_write32(dev, SSB_IMSTATE, val);
1260 }
1261 }
1262
1263 ssb_write32(dev, SSB_TMSLOW,
1264 reject | SSB_TMSLOW_RESET |
1265 core_specific_flags);
1266 ssb_flush_tmslow(dev);
1267 }
1268 EXPORT_SYMBOL(ssb_device_disable);
1269
1270 /* Some chipsets need routing known for PCIe and 64-bit DMA */
1271 static bool ssb_dma_translation_special_bit(struct ssb_device *dev)
1272 {
1273 u16 chip_id = dev->bus->chip_id;
1274
1275 if (dev->id.coreid == SSB_DEV_80211) {
1276 return (chip_id == 0x4322 || chip_id == 43221 ||
1277 chip_id == 43231 || chip_id == 43222);
1278 }
1279
1280 return 0;
1281 }
1282
1283 u32 ssb_dma_translation(struct ssb_device *dev)
1284 {
1285 switch (dev->bus->bustype) {
1286 case SSB_BUSTYPE_SSB:
1287 return 0;
1288 case SSB_BUSTYPE_PCI:
1289 if (pci_is_pcie(dev->bus->host_pci) &&
1290 ssb_read32(dev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64) {
1291 return SSB_PCIE_DMA_H32;
1292 } else {
1293 if (ssb_dma_translation_special_bit(dev))
1294 return SSB_PCIE_DMA_H32;
1295 else
1296 return SSB_PCI_DMA;
1297 }
1298 default:
1299 __ssb_dma_not_implemented(dev);
1300 }
1301 return 0;
1302 }
1303 EXPORT_SYMBOL(ssb_dma_translation);
1304
1305 int ssb_bus_may_powerdown(struct ssb_bus *bus)
1306 {
1307 struct ssb_chipcommon *cc;
1308 int err = 0;
1309
1310 /* On buses where more than one core may be working
1311 * at a time, we must not powerdown stuff if there are
1312 * still cores that may want to run. */
1313 if (bus->bustype == SSB_BUSTYPE_SSB)
1314 goto out;
1315
1316 cc = &bus->chipco;
1317
1318 if (!cc->dev)
1319 goto out;
1320 if (cc->dev->id.revision < 5)
1321 goto out;
1322
1323 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_SLOW);
1324 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0);
1325 if (err)
1326 goto error;
1327 out:
1328 #ifdef CONFIG_SSB_DEBUG
1329 bus->powered_up = 0;
1330 #endif
1331 return err;
1332 error:
1333 ssb_err("Bus powerdown failed\n");
1334 goto out;
1335 }
1336 EXPORT_SYMBOL(ssb_bus_may_powerdown);
1337
1338 int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl)
1339 {
1340 int err;
1341 enum ssb_clkmode mode;
1342
1343 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1);
1344 if (err)
1345 goto error;
1346
1347 #ifdef CONFIG_SSB_DEBUG
1348 bus->powered_up = 1;
1349 #endif
1350
1351 mode = dynamic_pctl ? SSB_CLKMODE_DYNAMIC : SSB_CLKMODE_FAST;
1352 ssb_chipco_set_clockmode(&bus->chipco, mode);
1353
1354 return 0;
1355 error:
1356 ssb_err("Bus powerup failed\n");
1357 return err;
1358 }
1359 EXPORT_SYMBOL(ssb_bus_powerup);
1360
1361 static void ssb_broadcast_value(struct ssb_device *dev,
1362 u32 address, u32 data)
1363 {
1364 #ifdef CONFIG_SSB_DRIVER_PCICORE
1365 /* This is used for both, PCI and ChipCommon core, so be careful. */
1366 BUILD_BUG_ON(SSB_PCICORE_BCAST_ADDR != SSB_CHIPCO_BCAST_ADDR);
1367 BUILD_BUG_ON(SSB_PCICORE_BCAST_DATA != SSB_CHIPCO_BCAST_DATA);
1368 #endif
1369
1370 ssb_write32(dev, SSB_CHIPCO_BCAST_ADDR, address);
1371 ssb_read32(dev, SSB_CHIPCO_BCAST_ADDR); /* flush */
1372 ssb_write32(dev, SSB_CHIPCO_BCAST_DATA, data);
1373 ssb_read32(dev, SSB_CHIPCO_BCAST_DATA); /* flush */
1374 }
1375
1376 void ssb_commit_settings(struct ssb_bus *bus)
1377 {
1378 struct ssb_device *dev;
1379
1380 #ifdef CONFIG_SSB_DRIVER_PCICORE
1381 dev = bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev;
1382 #else
1383 dev = bus->chipco.dev;
1384 #endif
1385 if (WARN_ON(!dev))
1386 return;
1387 /* This forces an update of the cached registers. */
1388 ssb_broadcast_value(dev, 0xFD8, 0);
1389 }
1390 EXPORT_SYMBOL(ssb_commit_settings);
1391
1392 u32 ssb_admatch_base(u32 adm)
1393 {
1394 u32 base = 0;
1395
1396 switch (adm & SSB_ADM_TYPE) {
1397 case SSB_ADM_TYPE0:
1398 base = (adm & SSB_ADM_BASE0);
1399 break;
1400 case SSB_ADM_TYPE1:
1401 SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
1402 base = (adm & SSB_ADM_BASE1);
1403 break;
1404 case SSB_ADM_TYPE2:
1405 SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
1406 base = (adm & SSB_ADM_BASE2);
1407 break;
1408 default:
1409 SSB_WARN_ON(1);
1410 }
1411
1412 return base;
1413 }
1414 EXPORT_SYMBOL(ssb_admatch_base);
1415
1416 u32 ssb_admatch_size(u32 adm)
1417 {
1418 u32 size = 0;
1419
1420 switch (adm & SSB_ADM_TYPE) {
1421 case SSB_ADM_TYPE0:
1422 size = ((adm & SSB_ADM_SZ0) >> SSB_ADM_SZ0_SHIFT);
1423 break;
1424 case SSB_ADM_TYPE1:
1425 SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
1426 size = ((adm & SSB_ADM_SZ1) >> SSB_ADM_SZ1_SHIFT);
1427 break;
1428 case SSB_ADM_TYPE2:
1429 SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */
1430 size = ((adm & SSB_ADM_SZ2) >> SSB_ADM_SZ2_SHIFT);
1431 break;
1432 default:
1433 SSB_WARN_ON(1);
1434 }
1435 size = (1 << (size + 1));
1436
1437 return size;
1438 }
1439 EXPORT_SYMBOL(ssb_admatch_size);
1440
1441 static int __init ssb_modinit(void)
1442 {
1443 int err;
1444
1445 /* See the comment at the ssb_is_early_boot definition */
1446 ssb_is_early_boot = 0;
1447 err = bus_register(&ssb_bustype);
1448 if (err)
1449 return err;
1450
1451 /* Maybe we already registered some buses at early boot.
1452 * Check for this and attach them
1453 */
1454 ssb_buses_lock();
1455 err = ssb_attach_queued_buses();
1456 ssb_buses_unlock();
1457 if (err) {
1458 bus_unregister(&ssb_bustype);
1459 goto out;
1460 }
1461
1462 err = b43_pci_ssb_bridge_init();
1463 if (err) {
1464 ssb_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
1465 /* don't fail SSB init because of this */
1466 err = 0;
1467 }
1468 err = ssb_gige_init();
1469 if (err) {
1470 ssb_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
1471 /* don't fail SSB init because of this */
1472 err = 0;
1473 }
1474 out:
1475 return err;
1476 }
1477 /* ssb must be initialized after PCI but before the ssb drivers.
1478 * That means we must use some initcall between subsys_initcall
1479 * and device_initcall. */
1480 fs_initcall(ssb_modinit);
1481
1482 static void __exit ssb_modexit(void)
1483 {
1484 ssb_gige_exit();
1485 b43_pci_ssb_bridge_exit();
1486 bus_unregister(&ssb_bustype);
1487 }
1488 module_exit(ssb_modexit)
This page took 0.101355 seconds and 5 git commands to generate.