x86/debug: Add KERN_<LEVEL> to bare printks, convert printks to pr_<level>
[deliverable/linux.git] / arch / x86 / kernel / amd_nb.c
CommitLineData
a32073bf
AK
1/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
c767a54b
JP
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
a32073bf 8#include <linux/types.h>
5a0e3ad6 9#include <linux/slab.h>
a32073bf
AK
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
23ac4ae8 14#include <asm/amd_nb.h>
a32073bf 15
a32073bf
AK
16static u32 *flush_words;
17
691269f0 18const struct pci_device_id amd_nb_misc_ids[] = {
cf169702
JR
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
cb293250 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
a32073bf
AK
22 {}
23};
9653a5c7 24EXPORT_SYMBOL(amd_nb_misc_ids);
a32073bf 25
41b2610c 26static struct pci_device_id amd_nb_link_ids[] = {
cb6c8520 27 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
41b2610c
HR
28 {}
29};
30
24d9b70b
JB
31const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
32 { 0x00, 0x18, 0x20 },
33 { 0xff, 0x00, 0x20 },
34 { 0xfe, 0x00, 0x20 },
35 { }
36};
37
eec1d4fa
HR
38struct amd_northbridge_info amd_northbridges;
39EXPORT_SYMBOL(amd_northbridges);
a32073bf 40
9653a5c7 41static struct pci_dev *next_northbridge(struct pci_dev *dev,
691269f0 42 const struct pci_device_id *ids)
a32073bf
AK
43{
44 do {
45 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
46 if (!dev)
47 break;
9653a5c7 48 } while (!pci_match_id(ids, dev));
a32073bf
AK
49 return dev;
50}
51
9653a5c7 52int amd_cache_northbridges(void)
a32073bf 53{
84fd1d35 54 u16 i = 0;
9653a5c7 55 struct amd_northbridge *nb;
41b2610c 56 struct pci_dev *misc, *link;
3c6df2a9 57
9653a5c7 58 if (amd_nb_num())
a32073bf
AK
59 return 0;
60
9653a5c7
HR
61 misc = NULL;
62 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
63 i++;
900f9ac9 64
9653a5c7
HR
65 if (i == 0)
66 return 0;
a32073bf 67
9653a5c7
HR
68 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
69 if (!nb)
a32073bf
AK
70 return -ENOMEM;
71
9653a5c7
HR
72 amd_northbridges.nb = nb;
73 amd_northbridges.num = i;
3c6df2a9 74
41b2610c 75 link = misc = NULL;
9653a5c7
HR
76 for (i = 0; i != amd_nb_num(); i++) {
77 node_to_amd_nb(i)->misc = misc =
78 next_northbridge(misc, amd_nb_misc_ids);
41b2610c
HR
79 node_to_amd_nb(i)->link = link =
80 next_northbridge(link, amd_nb_link_ids);
9653a5c7
HR
81 }
82
83 /* some CPU families (e.g. family 0x11) do not support GART */
84 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
85 boot_cpu_data.x86 == 0x15)
86 amd_northbridges.flags |= AMD_NB_GART;
a32073bf 87
f658bcfb
HR
88 /*
89 * Some CPU families support L3 Cache Index Disable. There are some
90 * limitations because of E382 and E388 on family 0x10.
91 */
92 if (boot_cpu_data.x86 == 0x10 &&
93 boot_cpu_data.x86_model >= 0x8 &&
94 (boot_cpu_data.x86_model > 0x9 ||
95 boot_cpu_data.x86_mask >= 0x1))
96 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
97
b453de02
HR
98 if (boot_cpu_data.x86 == 0x15)
99 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
100
cabb5bd7
HR
101 /* L3 cache partitioning is supported on family 0x15 */
102 if (boot_cpu_data.x86 == 0x15)
103 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
104
a32073bf
AK
105 return 0;
106}
9653a5c7 107EXPORT_SYMBOL_GPL(amd_cache_northbridges);
a32073bf 108
84fd1d35
BP
109/*
110 * Ignores subdevice/subvendor but as far as I can figure out
111 * they're useless anyways
112 */
113bool __init early_is_amd_nb(u32 device)
a32073bf 114{
691269f0 115 const struct pci_device_id *id;
a32073bf 116 u32 vendor = device & 0xffff;
691269f0 117
a32073bf 118 device >>= 16;
9653a5c7 119 for (id = amd_nb_misc_ids; id->vendor; id++)
a32073bf 120 if (vendor == id->vendor && device == id->device)
84fd1d35
BP
121 return true;
122 return false;
a32073bf
AK
123}
124
24d25dbf
BH
125struct resource *amd_get_mmconfig_range(struct resource *res)
126{
127 u32 address;
128 u64 base, msr;
129 unsigned segn_busn_bits;
130
131 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
132 return NULL;
133
134 /* assume all cpus from fam10h have mmconfig */
135 if (boot_cpu_data.x86 < 0x10)
136 return NULL;
137
138 address = MSR_FAM10H_MMIO_CONF_BASE;
139 rdmsrl(address, msr);
140
141 /* mmconfig is not enabled */
142 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
143 return NULL;
144
145 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
146
147 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
148 FAM10H_MMIO_CONF_BUSRANGE_MASK;
149
150 res->flags = IORESOURCE_MEM;
151 res->start = base;
152 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
153 return res;
154}
155
cabb5bd7
HR
156int amd_get_subcaches(int cpu)
157{
158 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
159 unsigned int mask;
141168c3 160 int cuid;
cabb5bd7
HR
161
162 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
163 return 0;
164
165 pci_read_config_dword(link, 0x1d4, &mask);
166
cabb5bd7 167 cuid = cpu_data(cpu).compute_unit_id;
cabb5bd7
HR
168 return (mask >> (4 * cuid)) & 0xf;
169}
170
171int amd_set_subcaches(int cpu, int mask)
172{
173 static unsigned int reset, ban;
174 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
175 unsigned int reg;
141168c3 176 int cuid;
cabb5bd7
HR
177
178 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
179 return -EINVAL;
180
181 /* if necessary, collect reset state of L3 partitioning and BAN mode */
182 if (reset == 0) {
183 pci_read_config_dword(nb->link, 0x1d4, &reset);
184 pci_read_config_dword(nb->misc, 0x1b8, &ban);
185 ban &= 0x180000;
186 }
187
188 /* deactivate BAN mode if any subcaches are to be disabled */
189 if (mask != 0xf) {
190 pci_read_config_dword(nb->misc, 0x1b8, &reg);
191 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
192 }
193
cabb5bd7 194 cuid = cpu_data(cpu).compute_unit_id;
cabb5bd7
HR
195 mask <<= 4 * cuid;
196 mask |= (0xf ^ (1 << cuid)) << 26;
197
198 pci_write_config_dword(nb->link, 0x1d4, mask);
199
200 /* reset BAN mode if L3 partitioning returned to reset state */
201 pci_read_config_dword(nb->link, 0x1d4, &reg);
202 if (reg == reset) {
203 pci_read_config_dword(nb->misc, 0x1b8, &reg);
204 reg &= ~0x180000;
205 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
206 }
207
208 return 0;
209}
210
84fd1d35 211static int amd_cache_gart(void)
9653a5c7 212{
84fd1d35 213 u16 i;
9653a5c7
HR
214
215 if (!amd_nb_has_feature(AMD_NB_GART))
216 return 0;
217
218 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
219 if (!flush_words) {
220 amd_northbridges.flags &= ~AMD_NB_GART;
221 return -ENOMEM;
222 }
223
224 for (i = 0; i != amd_nb_num(); i++)
225 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
226 &flush_words[i]);
227
228 return 0;
229}
230
eec1d4fa 231void amd_flush_garts(void)
a32073bf
AK
232{
233 int flushed, i;
234 unsigned long flags;
235 static DEFINE_SPINLOCK(gart_lock);
236
9653a5c7 237 if (!amd_nb_has_feature(AMD_NB_GART))
900f9ac9
AH
238 return;
239
a32073bf
AK
240 /* Avoid races between AGP and IOMMU. In theory it's not needed
241 but I'm not sure if the hardware won't lose flush requests
242 when another is pending. This whole thing is so expensive anyways
243 that it doesn't matter to serialize more. -AK */
244 spin_lock_irqsave(&gart_lock, flags);
245 flushed = 0;
9653a5c7
HR
246 for (i = 0; i < amd_nb_num(); i++) {
247 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
248 flush_words[i] | 1);
a32073bf
AK
249 flushed++;
250 }
9653a5c7 251 for (i = 0; i < amd_nb_num(); i++) {
a32073bf
AK
252 u32 w;
253 /* Make sure the hardware actually executed the flush*/
254 for (;;) {
9653a5c7 255 pci_read_config_dword(node_to_amd_nb(i)->misc,
a32073bf
AK
256 0x9c, &w);
257 if (!(w & 1))
258 break;
259 cpu_relax();
260 }
261 }
262 spin_unlock_irqrestore(&gart_lock, flags);
263 if (!flushed)
c767a54b 264 pr_notice("nothing to flush?\n");
a32073bf 265}
eec1d4fa 266EXPORT_SYMBOL_GPL(amd_flush_garts);
a32073bf 267
eec1d4fa 268static __init int init_amd_nbs(void)
0e152cd7
BP
269{
270 int err = 0;
271
9653a5c7 272 err = amd_cache_northbridges();
0e152cd7
BP
273
274 if (err < 0)
c767a54b 275 pr_notice("Cannot enumerate AMD northbridges\n");
0e152cd7 276
9653a5c7 277 if (amd_cache_gart() < 0)
c767a54b 278 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
9653a5c7 279
0e152cd7
BP
280 return err;
281}
282
283/* This has to go after the PCI subsystem */
eec1d4fa 284fs_initcall(init_amd_nbs);
This page took 0.71458 seconds and 5 git commands to generate.