Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * cbe_regs.c
3 *
4 * Accessor routines for the various MMIO register blocks of the CBE
5 *
6 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
7 */
8
9#include <linux/percpu.h>
10#include <linux/types.h>
11#include <linux/module.h>
12
13#include <asm/io.h>
14#include <asm/pgtable.h>
15#include <asm/prom.h>
16#include <asm/ptrace.h>
17
18#include "cbe_regs.h"
19
20/*
21 * Current implementation uses "cpu" nodes. We build our own mapping
22 * array of cpu numbers to cpu nodes locally for now to allow interrupt
23 * time code to have a fast path rather than call of_get_cpu_node(). If
24 * we implement cpu hotplug, we'll have to install an appropriate norifier
25 * in order to release references to the cpu going away
26 */
27static struct cbe_regs_map
28{
29 struct device_node *cpu_node;
30 struct cbe_pmd_regs __iomem *pmd_regs;
31 struct cbe_iic_regs __iomem *iic_regs;
32 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
33 struct cbe_pmd_shadow_regs pmd_shadow_regs;
34} cbe_regs_maps[MAX_CBE];
35static int cbe_regs_map_count;
36
37static struct cbe_thread_map
38{
39 struct device_node *cpu_node;
40 struct cbe_regs_map *regs;
41} cbe_thread_map[NR_CPUS];
42
43static struct cbe_regs_map *cbe_find_map(struct device_node *np)
44{
45 int i;
46 struct device_node *tmp_np;
47
48 if (strcasecmp(np->type, "spe") == 0) {
49 if (np->data == NULL) {
50 /* walk up path until cpu node was found */
51 tmp_np = np->parent;
52 while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
53 tmp_np = tmp_np->parent;
54
55 np->data = cbe_find_map(tmp_np);
56 }
57 return np->data;
58 }
59
60 for (i = 0; i < cbe_regs_map_count; i++)
61 if (cbe_regs_maps[i].cpu_node == np)
62 return &cbe_regs_maps[i];
63 return NULL;
64}
65
66struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
67{
68 struct cbe_regs_map *map = cbe_find_map(np);
69 if (map == NULL)
70 return NULL;
71 return map->pmd_regs;
72}
73EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
74
75struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
76{
77 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
78 if (map == NULL)
79 return NULL;
80 return map->pmd_regs;
81}
82EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
83
84struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
85{
86 struct cbe_regs_map *map = cbe_find_map(np);
87 if (map == NULL)
88 return NULL;
89 return &map->pmd_shadow_regs;
90}
91
92struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
93{
94 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
95 if (map == NULL)
96 return NULL;
97 return &map->pmd_shadow_regs;
98}
99
100struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
101{
102 struct cbe_regs_map *map = cbe_find_map(np);
103 if (map == NULL)
104 return NULL;
105 return map->iic_regs;
106}
107
108struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
109{
110 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
111 if (map == NULL)
112 return NULL;
113 return map->iic_regs;
114}
115
116struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
117{
118 struct cbe_regs_map *map = cbe_find_map(np);
119 if (map == NULL)
120 return NULL;
121 return map->mic_tm_regs;
122}
123
124struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
125{
126 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
127 if (map == NULL)
128 return NULL;
129 return map->mic_tm_regs;
130}
131EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
132
133/* FIXME
134 * This is little more than a stub at the moment. It should be
135 * fleshed out so that it works for both SMT and non-SMT, no
136 * matter if the passed cpu is odd or even.
137 * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1.
138 * For SMT disabled, returns 0 for all cpus.
139 */
140u32 cbe_get_hw_thread_id(int cpu)
141{
142 return (cpu & 1);
143}
144EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
145
146void __init cbe_regs_init(void)
147{
148 int i;
149 struct device_node *cpu;
150
151 /* Build local fast map of CPUs */
152 for_each_possible_cpu(i)
153 cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL);
154
155 /* Find maps for each device tree CPU */
156 for_each_node_by_type(cpu, "cpu") {
157 struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++];
158
159 /* That hack must die die die ! */
160 const struct address_prop {
161 unsigned long address;
162 unsigned int len;
163 } __attribute__((packed)) *prop;
164
165
166 if (cbe_regs_map_count > MAX_CBE) {
167 printk(KERN_ERR "cbe_regs: More BE chips than supported"
168 "!\n");
169 cbe_regs_map_count--;
170 return;
171 }
172 map->cpu_node = cpu;
173 for_each_possible_cpu(i)
174 if (cbe_thread_map[i].cpu_node == cpu)
175 cbe_thread_map[i].regs = map;
176
177 prop = get_property(cpu, "pervasive", NULL);
178 if (prop != NULL)
179 map->pmd_regs = ioremap(prop->address, prop->len);
180
181 prop = get_property(cpu, "iic", NULL);
182 if (prop != NULL)
183 map->iic_regs = ioremap(prop->address, prop->len);
184
185 prop = (struct address_prop *)get_property(cpu, "mic-tm",
186 NULL);
187 if (prop != NULL)
188 map->mic_tm_regs = ioremap(prop->address, prop->len);
189 }
190}
191