Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild
[deliverable/linux.git] / arch / powerpc / platforms / cell / cbe_regs.c
CommitLineData
acf7d768
BH
1/*
2 * cbe_regs.c
3 *
4 * Accessor routines for the various MMIO register blocks of the CBE
5 *
6 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
7 */
8
9
10#include <linux/config.h>
11#include <linux/percpu.h>
12#include <linux/types.h>
13
14#include <asm/io.h>
15#include <asm/pgtable.h>
16#include <asm/prom.h>
17#include <asm/ptrace.h>
18
19#include "cbe_regs.h"
20
21#define MAX_CBE 2
22
23/*
24 * Current implementation uses "cpu" nodes. We build our own mapping
25 * array of cpu numbers to cpu nodes locally for now to allow interrupt
26 * time code to have a fast path rather than call of_get_cpu_node(). If
27 * we implement cpu hotplug, we'll have to install an appropriate norifier
28 * in order to release references to the cpu going away
29 */
30static struct cbe_regs_map
31{
32 struct device_node *cpu_node;
33 struct cbe_pmd_regs __iomem *pmd_regs;
34 struct cbe_iic_regs __iomem *iic_regs;
35} cbe_regs_maps[MAX_CBE];
36static int cbe_regs_map_count;
37
38static struct cbe_thread_map
39{
40 struct device_node *cpu_node;
41 struct cbe_regs_map *regs;
42} cbe_thread_map[NR_CPUS];
43
44static struct cbe_regs_map *cbe_find_map(struct device_node *np)
45{
46 int i;
47
48 for (i = 0; i < cbe_regs_map_count; i++)
49 if (cbe_regs_maps[i].cpu_node == np)
50 return &cbe_regs_maps[i];
51 return NULL;
52}
53
54struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
55{
56 struct cbe_regs_map *map = cbe_find_map(np);
57 if (map == NULL)
58 return NULL;
59 return map->pmd_regs;
60}
61
62struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
63{
64 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
65 if (map == NULL)
66 return NULL;
67 return map->pmd_regs;
68}
69
70
71struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
72{
73 struct cbe_regs_map *map = cbe_find_map(np);
74 if (map == NULL)
75 return NULL;
76 return map->iic_regs;
77}
78struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
79{
80 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
81 if (map == NULL)
82 return NULL;
83 return map->iic_regs;
84}
85
86void __init cbe_regs_init(void)
87{
88 int i;
89 struct device_node *cpu;
90
91 /* Build local fast map of CPUs */
1e48275a 92 for_each_possible_cpu(i)
acf7d768
BH
93 cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL);
94
95 /* Find maps for each device tree CPU */
96 for_each_node_by_type(cpu, "cpu") {
97 struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++];
98
99 /* That hack must die die die ! */
100 struct address_prop {
101 unsigned long address;
102 unsigned int len;
103 } __attribute__((packed)) *prop;
104
105
106 if (cbe_regs_map_count > MAX_CBE) {
107 printk(KERN_ERR "cbe_regs: More BE chips than supported"
108 "!\n");
109 cbe_regs_map_count--;
110 return;
111 }
112 map->cpu_node = cpu;
1e48275a 113 for_each_possible_cpu(i)
acf7d768
BH
114 if (cbe_thread_map[i].cpu_node == cpu)
115 cbe_thread_map[i].regs = map;
116
117 prop = (struct address_prop *)get_property(cpu, "pervasive",
118 NULL);
119 if (prop != NULL)
120 map->pmd_regs = ioremap(prop->address, prop->len);
121
122 prop = (struct address_prop *)get_property(cpu, "iic",
123 NULL);
124 if (prop != NULL)
125 map->iic_regs = ioremap(prop->address, prop->len);
126 }
127}
128
This page took 0.118258 seconds and 5 git commands to generate.