Merge branch 'next-integrity' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorri...
[sfrench/cifs-2.6.git] / arch / x86 / kernel / amd_nb.c
1 /*
2  * Shared support code for AMD K8 northbridges and derivates.
3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/export.h>
13 #include <linux/spinlock.h>
14 #include <linux/pci_ids.h>
15 #include <asm/amd_nb.h>
16
17 #define PCI_DEVICE_ID_AMD_17H_ROOT      0x1450
18 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
19 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
20 #define PCI_DEVICE_ID_AMD_17H_DF_F4     0x1464
21 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
22 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
23
24 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
25 static DEFINE_MUTEX(smn_mutex);
26
27 static u32 *flush_words;
28
29 static const struct pci_device_id amd_root_ids[] = {
30         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
31         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
32         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
33         {}
34 };
35
36
37 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
38
39 const struct pci_device_id amd_nb_misc_ids[] = {
40         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
41         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
42         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
43         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
44         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
45         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
46         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
47         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
48         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
49         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
50         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
51         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
52         {}
53 };
54 EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
55
56 static const struct pci_device_id amd_nb_link_ids[] = {
57         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
58         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
59         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
60         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
61         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
62         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
63         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
64         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
65         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
66         {}
67 };
68
69 static const struct pci_device_id hygon_root_ids[] = {
70         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
71         {}
72 };
73
74 const struct pci_device_id hygon_nb_misc_ids[] = {
75         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
76         {}
77 };
78
79 static const struct pci_device_id hygon_nb_link_ids[] = {
80         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
81         {}
82 };
83
84 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
85         { 0x00, 0x18, 0x20 },
86         { 0xff, 0x00, 0x20 },
87         { 0xfe, 0x00, 0x20 },
88         { }
89 };
90
91 static struct amd_northbridge_info amd_northbridges;
92
93 u16 amd_nb_num(void)
94 {
95         return amd_northbridges.num;
96 }
97 EXPORT_SYMBOL_GPL(amd_nb_num);
98
99 bool amd_nb_has_feature(unsigned int feature)
100 {
101         return ((amd_northbridges.flags & feature) == feature);
102 }
103 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
104
105 struct amd_northbridge *node_to_amd_nb(int node)
106 {
107         return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
108 }
109 EXPORT_SYMBOL_GPL(node_to_amd_nb);
110
111 static struct pci_dev *next_northbridge(struct pci_dev *dev,
112                                         const struct pci_device_id *ids)
113 {
114         do {
115                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
116                 if (!dev)
117                         break;
118         } while (!pci_match_id(ids, dev));
119         return dev;
120 }
121
122 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
123 {
124         struct pci_dev *root;
125         int err = -ENODEV;
126
127         if (node >= amd_northbridges.num)
128                 goto out;
129
130         root = node_to_amd_nb(node)->root;
131         if (!root)
132                 goto out;
133
134         mutex_lock(&smn_mutex);
135
136         err = pci_write_config_dword(root, 0x60, address);
137         if (err) {
138                 pr_warn("Error programming SMN address 0x%x.\n", address);
139                 goto out_unlock;
140         }
141
142         err = (write ? pci_write_config_dword(root, 0x64, *value)
143                      : pci_read_config_dword(root, 0x64, value));
144         if (err)
145                 pr_warn("Error %s SMN address 0x%x.\n",
146                         (write ? "writing to" : "reading from"), address);
147
148 out_unlock:
149         mutex_unlock(&smn_mutex);
150
151 out:
152         return err;
153 }
154
155 int amd_smn_read(u16 node, u32 address, u32 *value)
156 {
157         return __amd_smn_rw(node, address, value, false);
158 }
159 EXPORT_SYMBOL_GPL(amd_smn_read);
160
161 int amd_smn_write(u16 node, u32 address, u32 value)
162 {
163         return __amd_smn_rw(node, address, &value, true);
164 }
165 EXPORT_SYMBOL_GPL(amd_smn_write);
166
167 /*
168  * Data Fabric Indirect Access uses FICAA/FICAD.
169  *
170  * Fabric Indirect Configuration Access Address (FICAA): Constructed based
171  * on the device's Instance Id and the PCI function and register offset of
172  * the desired register.
173  *
174  * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
175  * and FICAD HI registers but so far we only need the LO register.
176  */
177 int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
178 {
179         struct pci_dev *F4;
180         u32 ficaa;
181         int err = -ENODEV;
182
183         if (node >= amd_northbridges.num)
184                 goto out;
185
186         F4 = node_to_amd_nb(node)->link;
187         if (!F4)
188                 goto out;
189
190         ficaa  = 1;
191         ficaa |= reg & 0x3FC;
192         ficaa |= (func & 0x7) << 11;
193         ficaa |= instance_id << 16;
194
195         mutex_lock(&smn_mutex);
196
197         err = pci_write_config_dword(F4, 0x5C, ficaa);
198         if (err) {
199                 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
200                 goto out_unlock;
201         }
202
203         err = pci_read_config_dword(F4, 0x98, lo);
204         if (err)
205                 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
206
207 out_unlock:
208         mutex_unlock(&smn_mutex);
209
210 out:
211         return err;
212 }
213 EXPORT_SYMBOL_GPL(amd_df_indirect_read);
214
215 int amd_cache_northbridges(void)
216 {
217         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
218         const struct pci_device_id *link_ids = amd_nb_link_ids;
219         const struct pci_device_id *root_ids = amd_root_ids;
220         struct pci_dev *root, *misc, *link;
221         struct amd_northbridge *nb;
222         u16 roots_per_misc = 0;
223         u16 misc_count = 0;
224         u16 root_count = 0;
225         u16 i, j;
226
227         if (amd_northbridges.num)
228                 return 0;
229
230         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
231                 root_ids = hygon_root_ids;
232                 misc_ids = hygon_nb_misc_ids;
233                 link_ids = hygon_nb_link_ids;
234         }
235
236         misc = NULL;
237         while ((misc = next_northbridge(misc, misc_ids)) != NULL)
238                 misc_count++;
239
240         if (!misc_count)
241                 return -ENODEV;
242
243         root = NULL;
244         while ((root = next_northbridge(root, root_ids)) != NULL)
245                 root_count++;
246
247         if (root_count) {
248                 roots_per_misc = root_count / misc_count;
249
250                 /*
251                  * There should be _exactly_ N roots for each DF/SMN
252                  * interface.
253                  */
254                 if (!roots_per_misc || (root_count % roots_per_misc)) {
255                         pr_info("Unsupported AMD DF/PCI configuration found\n");
256                         return -ENODEV;
257                 }
258         }
259
260         nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
261         if (!nb)
262                 return -ENOMEM;
263
264         amd_northbridges.nb = nb;
265         amd_northbridges.num = misc_count;
266
267         link = misc = root = NULL;
268         for (i = 0; i < amd_northbridges.num; i++) {
269                 node_to_amd_nb(i)->root = root =
270                         next_northbridge(root, root_ids);
271                 node_to_amd_nb(i)->misc = misc =
272                         next_northbridge(misc, misc_ids);
273                 node_to_amd_nb(i)->link = link =
274                         next_northbridge(link, link_ids);
275
276                 /*
277                  * If there are more PCI root devices than data fabric/
278                  * system management network interfaces, then the (N)
279                  * PCI roots per DF/SMN interface are functionally the
280                  * same (for DF/SMN access) and N-1 are redundant.  N-1
281                  * PCI roots should be skipped per DF/SMN interface so
282                  * the following DF/SMN interfaces get mapped to
283                  * correct PCI roots.
284                  */
285                 for (j = 1; j < roots_per_misc; j++)
286                         root = next_northbridge(root, root_ids);
287         }
288
289         if (amd_gart_present())
290                 amd_northbridges.flags |= AMD_NB_GART;
291
292         /*
293          * Check for L3 cache presence.
294          */
295         if (!cpuid_edx(0x80000006))
296                 return 0;
297
298         /*
299          * Some CPU families support L3 Cache Index Disable. There are some
300          * limitations because of E382 and E388 on family 0x10.
301          */
302         if (boot_cpu_data.x86 == 0x10 &&
303             boot_cpu_data.x86_model >= 0x8 &&
304             (boot_cpu_data.x86_model > 0x9 ||
305              boot_cpu_data.x86_stepping >= 0x1))
306                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
307
308         if (boot_cpu_data.x86 == 0x15)
309                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
310
311         /* L3 cache partitioning is supported on family 0x15 */
312         if (boot_cpu_data.x86 == 0x15)
313                 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
314
315         return 0;
316 }
317 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
318
319 /*
320  * Ignores subdevice/subvendor but as far as I can figure out
321  * they're useless anyways
322  */
323 bool __init early_is_amd_nb(u32 device)
324 {
325         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
326         const struct pci_device_id *id;
327         u32 vendor = device & 0xffff;
328
329         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
330             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
331                 return false;
332
333         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
334                 misc_ids = hygon_nb_misc_ids;
335
336         device >>= 16;
337         for (id = misc_ids; id->vendor; id++)
338                 if (vendor == id->vendor && device == id->device)
339                         return true;
340         return false;
341 }
342
343 struct resource *amd_get_mmconfig_range(struct resource *res)
344 {
345         u32 address;
346         u64 base, msr;
347         unsigned int segn_busn_bits;
348
349         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
350             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
351                 return NULL;
352
353         /* assume all cpus from fam10h have mmconfig */
354         if (boot_cpu_data.x86 < 0x10)
355                 return NULL;
356
357         address = MSR_FAM10H_MMIO_CONF_BASE;
358         rdmsrl(address, msr);
359
360         /* mmconfig is not enabled */
361         if (!(msr & FAM10H_MMIO_CONF_ENABLE))
362                 return NULL;
363
364         base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
365
366         segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
367                          FAM10H_MMIO_CONF_BUSRANGE_MASK;
368
369         res->flags = IORESOURCE_MEM;
370         res->start = base;
371         res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
372         return res;
373 }
374
375 int amd_get_subcaches(int cpu)
376 {
377         struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
378         unsigned int mask;
379
380         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
381                 return 0;
382
383         pci_read_config_dword(link, 0x1d4, &mask);
384
385         return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
386 }
387
388 int amd_set_subcaches(int cpu, unsigned long mask)
389 {
390         static unsigned int reset, ban;
391         struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
392         unsigned int reg;
393         int cuid;
394
395         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
396                 return -EINVAL;
397
398         /* if necessary, collect reset state of L3 partitioning and BAN mode */
399         if (reset == 0) {
400                 pci_read_config_dword(nb->link, 0x1d4, &reset);
401                 pci_read_config_dword(nb->misc, 0x1b8, &ban);
402                 ban &= 0x180000;
403         }
404
405         /* deactivate BAN mode if any subcaches are to be disabled */
406         if (mask != 0xf) {
407                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
408                 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
409         }
410
411         cuid = cpu_data(cpu).cpu_core_id;
412         mask <<= 4 * cuid;
413         mask |= (0xf ^ (1 << cuid)) << 26;
414
415         pci_write_config_dword(nb->link, 0x1d4, mask);
416
417         /* reset BAN mode if L3 partitioning returned to reset state */
418         pci_read_config_dword(nb->link, 0x1d4, &reg);
419         if (reg == reset) {
420                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
421                 reg &= ~0x180000;
422                 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
423         }
424
425         return 0;
426 }
427
428 static void amd_cache_gart(void)
429 {
430         u16 i;
431
432         if (!amd_nb_has_feature(AMD_NB_GART))
433                 return;
434
435         flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
436         if (!flush_words) {
437                 amd_northbridges.flags &= ~AMD_NB_GART;
438                 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
439                 return;
440         }
441
442         for (i = 0; i != amd_northbridges.num; i++)
443                 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
444 }
445
446 void amd_flush_garts(void)
447 {
448         int flushed, i;
449         unsigned long flags;
450         static DEFINE_SPINLOCK(gart_lock);
451
452         if (!amd_nb_has_feature(AMD_NB_GART))
453                 return;
454
455         /*
456          * Avoid races between AGP and IOMMU. In theory it's not needed
457          * but I'm not sure if the hardware won't lose flush requests
458          * when another is pending. This whole thing is so expensive anyways
459          * that it doesn't matter to serialize more. -AK
460          */
461         spin_lock_irqsave(&gart_lock, flags);
462         flushed = 0;
463         for (i = 0; i < amd_northbridges.num; i++) {
464                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
465                                        flush_words[i] | 1);
466                 flushed++;
467         }
468         for (i = 0; i < amd_northbridges.num; i++) {
469                 u32 w;
470                 /* Make sure the hardware actually executed the flush*/
471                 for (;;) {
472                         pci_read_config_dword(node_to_amd_nb(i)->misc,
473                                               0x9c, &w);
474                         if (!(w & 1))
475                                 break;
476                         cpu_relax();
477                 }
478         }
479         spin_unlock_irqrestore(&gart_lock, flags);
480         if (!flushed)
481                 pr_notice("nothing to flush?\n");
482 }
483 EXPORT_SYMBOL_GPL(amd_flush_garts);
484
485 static void __fix_erratum_688(void *info)
486 {
487 #define MSR_AMD64_IC_CFG 0xC0011021
488
489         msr_set_bit(MSR_AMD64_IC_CFG, 3);
490         msr_set_bit(MSR_AMD64_IC_CFG, 14);
491 }
492
493 /* Apply erratum 688 fix so machines without a BIOS fix work. */
494 static __init void fix_erratum_688(void)
495 {
496         struct pci_dev *F4;
497         u32 val;
498
499         if (boot_cpu_data.x86 != 0x14)
500                 return;
501
502         if (!amd_northbridges.num)
503                 return;
504
505         F4 = node_to_amd_nb(0)->link;
506         if (!F4)
507                 return;
508
509         if (pci_read_config_dword(F4, 0x164, &val))
510                 return;
511
512         if (val & BIT(2))
513                 return;
514
515         on_each_cpu(__fix_erratum_688, NULL, 0);
516
517         pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
518 }
519
520 static __init int init_amd_nbs(void)
521 {
522         amd_cache_northbridges();
523         amd_cache_gart();
524
525         fix_erratum_688();
526
527         return 0;
528 }
529
530 /* This has to go after the PCI subsystem */
531 fs_initcall(init_amd_nbs);