Linux 6.9-rc5
[sfrench/cifs-2.6.git] / arch / x86 / kernel / quirks.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains work-arounds for x86 and x86_64 platform bugs.
4  */
5 #include <linux/dmi.h>
6 #include <linux/pci.h>
7 #include <linux/irq.h>
8
9 #include <asm/hpet.h>
10 #include <asm/setup.h>
11 #include <asm/mce.h>
12
13 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
14
15 static void quirk_intel_irqbalance(struct pci_dev *dev)
16 {
17         u8 config;
18         u16 word;
19
20         /* BIOS may enable hardware IRQ balancing for
21          * E7520/E7320/E7525(revision ID 0x9 and below)
22          * based platforms.
23          * Disable SW irqbalance/affinity on those platforms.
24          */
25         if (dev->revision > 0x9)
26                 return;
27
28         /* enable access to config space*/
29         pci_read_config_byte(dev, 0xf4, &config);
30         pci_write_config_byte(dev, 0xf4, config|0x2);
31
32         /*
33          * read xTPR register.  We may not have a pci_dev for device 8
34          * because it might be hidden until the above write.
35          */
36         pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
37
38         if (!(word & (1 << 13))) {
39                 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
40                         "disabling irq balancing and affinity\n");
41                 noirqdebug_setup("");
42 #ifdef CONFIG_PROC_FS
43                 no_irq_affinity = 1;
44 #endif
45         }
46
47         /* put back the original value for config space*/
48         if (!(config & 0x2))
49                 pci_write_config_byte(dev, 0xf4, config);
50 }
51 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
52                         quirk_intel_irqbalance);
53 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
54                         quirk_intel_irqbalance);
55 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
56                         quirk_intel_irqbalance);
57 #endif
58
59 #if defined(CONFIG_HPET_TIMER)
60 unsigned long force_hpet_address;
61
62 static enum {
63         NONE_FORCE_HPET_RESUME,
64         OLD_ICH_FORCE_HPET_RESUME,
65         ICH_FORCE_HPET_RESUME,
66         VT8237_FORCE_HPET_RESUME,
67         NVIDIA_FORCE_HPET_RESUME,
68         ATI_FORCE_HPET_RESUME,
69 } force_hpet_resume_type;
70
71 static void __iomem *rcba_base;
72
73 static void ich_force_hpet_resume(void)
74 {
75         u32 val;
76
77         if (!force_hpet_address)
78                 return;
79
80         BUG_ON(rcba_base == NULL);
81
82         /* read the Function Disable register, dword mode only */
83         val = readl(rcba_base + 0x3404);
84         if (!(val & 0x80)) {
85                 /* HPET disabled in HPTC. Trying to enable */
86                 writel(val | 0x80, rcba_base + 0x3404);
87         }
88
89         val = readl(rcba_base + 0x3404);
90         if (!(val & 0x80))
91                 BUG();
92         else
93                 printk(KERN_DEBUG "Force enabled HPET at resume\n");
94 }
95
96 static void ich_force_enable_hpet(struct pci_dev *dev)
97 {
98         u32 val;
99         u32 rcba;
100         int err = 0;
101
102         if (hpet_address || force_hpet_address)
103                 return;
104
105         pci_read_config_dword(dev, 0xF0, &rcba);
106         rcba &= 0xFFFFC000;
107         if (rcba == 0) {
108                 dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
109                         "cannot force enable HPET\n");
110                 return;
111         }
112
113         /* use bits 31:14, 16 kB aligned */
114         rcba_base = ioremap(rcba, 0x4000);
115         if (rcba_base == NULL) {
116                 dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
117                         "cannot force enable HPET\n");
118                 return;
119         }
120
121         /* read the Function Disable register, dword mode only */
122         val = readl(rcba_base + 0x3404);
123
124         if (val & 0x80) {
125                 /* HPET is enabled in HPTC. Just not reported by BIOS */
126                 val = val & 0x3;
127                 force_hpet_address = 0xFED00000 | (val << 12);
128                 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
129                         "0x%lx\n", force_hpet_address);
130                 iounmap(rcba_base);
131                 return;
132         }
133
134         /* HPET disabled in HPTC. Trying to enable */
135         writel(val | 0x80, rcba_base + 0x3404);
136
137         val = readl(rcba_base + 0x3404);
138         if (!(val & 0x80)) {
139                 err = 1;
140         } else {
141                 val = val & 0x3;
142                 force_hpet_address = 0xFED00000 | (val << 12);
143         }
144
145         if (err) {
146                 force_hpet_address = 0;
147                 iounmap(rcba_base);
148                 dev_printk(KERN_DEBUG, &dev->dev,
149                         "Failed to force enable HPET\n");
150         } else {
151                 force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
152                 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
153                         "0x%lx\n", force_hpet_address);
154         }
155 }
156
157 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
158                          ich_force_enable_hpet);
159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
160                          ich_force_enable_hpet);
161 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
162                          ich_force_enable_hpet);
163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
164                          ich_force_enable_hpet);
165 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
166                          ich_force_enable_hpet);
167 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
168                          ich_force_enable_hpet);
169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
170                          ich_force_enable_hpet);
171 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
172                          ich_force_enable_hpet);
173 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
174                          ich_force_enable_hpet);
175 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,   /* ICH10 */
176                          ich_force_enable_hpet);
177
178 static struct pci_dev *cached_dev;
179
180 static void hpet_print_force_info(void)
181 {
182         printk(KERN_INFO "HPET not enabled in BIOS. "
183                "You might try hpet=force boot option\n");
184 }
185
186 static void old_ich_force_hpet_resume(void)
187 {
188         u32 val;
189         u32 gen_cntl;
190
191         if (!force_hpet_address || !cached_dev)
192                 return;
193
194         pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
195         gen_cntl &= (~(0x7 << 15));
196         gen_cntl |= (0x4 << 15);
197
198         pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
199         pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
200         val = gen_cntl >> 15;
201         val &= 0x7;
202         if (val == 0x4)
203                 printk(KERN_DEBUG "Force enabled HPET at resume\n");
204         else
205                 BUG();
206 }
207
208 static void old_ich_force_enable_hpet(struct pci_dev *dev)
209 {
210         u32 val;
211         u32 gen_cntl;
212
213         if (hpet_address || force_hpet_address)
214                 return;
215
216         pci_read_config_dword(dev, 0xD0, &gen_cntl);
217         /*
218          * Bit 17 is HPET enable bit.
219          * Bit 16:15 control the HPET base address.
220          */
221         val = gen_cntl >> 15;
222         val &= 0x7;
223         if (val & 0x4) {
224                 val &= 0x3;
225                 force_hpet_address = 0xFED00000 | (val << 12);
226                 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
227                         force_hpet_address);
228                 return;
229         }
230
231         /*
232          * HPET is disabled. Trying enabling at FED00000 and check
233          * whether it sticks
234          */
235         gen_cntl &= (~(0x7 << 15));
236         gen_cntl |= (0x4 << 15);
237         pci_write_config_dword(dev, 0xD0, gen_cntl);
238
239         pci_read_config_dword(dev, 0xD0, &gen_cntl);
240
241         val = gen_cntl >> 15;
242         val &= 0x7;
243         if (val & 0x4) {
244                 /* HPET is enabled in HPTC. Just not reported by BIOS */
245                 val &= 0x3;
246                 force_hpet_address = 0xFED00000 | (val << 12);
247                 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
248                         "0x%lx\n", force_hpet_address);
249                 cached_dev = dev;
250                 force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
251                 return;
252         }
253
254         dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
255 }
256
257 /*
258  * Undocumented chipset features. Make sure that the user enforced
259  * this.
260  */
261 static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
262 {
263         if (hpet_force_user)
264                 old_ich_force_enable_hpet(dev);
265 }
266
267 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
268                          old_ich_force_enable_hpet_user);
269 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
270                          old_ich_force_enable_hpet_user);
271 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
272                          old_ich_force_enable_hpet_user);
273 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
274                          old_ich_force_enable_hpet_user);
275 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
276                          old_ich_force_enable_hpet_user);
277 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
278                          old_ich_force_enable_hpet);
279 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
280                          old_ich_force_enable_hpet);
281
282
283 static void vt8237_force_hpet_resume(void)
284 {
285         u32 val;
286
287         if (!force_hpet_address || !cached_dev)
288                 return;
289
290         val = 0xfed00000 | 0x80;
291         pci_write_config_dword(cached_dev, 0x68, val);
292
293         pci_read_config_dword(cached_dev, 0x68, &val);
294         if (val & 0x80)
295                 printk(KERN_DEBUG "Force enabled HPET at resume\n");
296         else
297                 BUG();
298 }
299
300 static void vt8237_force_enable_hpet(struct pci_dev *dev)
301 {
302         u32 val;
303
304         if (hpet_address || force_hpet_address)
305                 return;
306
307         if (!hpet_force_user) {
308                 hpet_print_force_info();
309                 return;
310         }
311
312         pci_read_config_dword(dev, 0x68, &val);
313         /*
314          * Bit 7 is HPET enable bit.
315          * Bit 31:10 is HPET base address (contrary to what datasheet claims)
316          */
317         if (val & 0x80) {
318                 force_hpet_address = (val & ~0x3ff);
319                 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
320                         force_hpet_address);
321                 return;
322         }
323
324         /*
325          * HPET is disabled. Trying enabling at FED00000 and check
326          * whether it sticks
327          */
328         val = 0xfed00000 | 0x80;
329         pci_write_config_dword(dev, 0x68, val);
330
331         pci_read_config_dword(dev, 0x68, &val);
332         if (val & 0x80) {
333                 force_hpet_address = (val & ~0x3ff);
334                 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
335                         "0x%lx\n", force_hpet_address);
336                 cached_dev = dev;
337                 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
338                 return;
339         }
340
341         dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
342 }
343
344 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
345                          vt8237_force_enable_hpet);
346 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
347                          vt8237_force_enable_hpet);
348 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
349                          vt8237_force_enable_hpet);
350
351 static void ati_force_hpet_resume(void)
352 {
353         pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
354         printk(KERN_DEBUG "Force enabled HPET at resume\n");
355 }
356
357 static u32 ati_ixp4x0_rev(struct pci_dev *dev)
358 {
359         int err = 0;
360         u32 d = 0;
361         u8  b = 0;
362
363         err = pci_read_config_byte(dev, 0xac, &b);
364         b &= ~(1<<5);
365         err |= pci_write_config_byte(dev, 0xac, b);
366         err |= pci_read_config_dword(dev, 0x70, &d);
367         d |= 1<<8;
368         err |= pci_write_config_dword(dev, 0x70, d);
369         err |= pci_read_config_dword(dev, 0x8, &d);
370         d &= 0xff;
371         dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
372
373         WARN_ON_ONCE(err);
374
375         return d;
376 }
377
378 static void ati_force_enable_hpet(struct pci_dev *dev)
379 {
380         u32 d, val;
381         u8  b;
382
383         if (hpet_address || force_hpet_address)
384                 return;
385
386         if (!hpet_force_user) {
387                 hpet_print_force_info();
388                 return;
389         }
390
391         d = ati_ixp4x0_rev(dev);
392         if (d  < 0x82)
393                 return;
394
395         /* base address */
396         pci_write_config_dword(dev, 0x14, 0xfed00000);
397         pci_read_config_dword(dev, 0x14, &val);
398
399         /* enable interrupt */
400         outb(0x72, 0xcd6); b = inb(0xcd7);
401         b |= 0x1;
402         outb(0x72, 0xcd6); outb(b, 0xcd7);
403         outb(0x72, 0xcd6); b = inb(0xcd7);
404         if (!(b & 0x1))
405                 return;
406         pci_read_config_dword(dev, 0x64, &d);
407         d |= (1<<10);
408         pci_write_config_dword(dev, 0x64, d);
409         pci_read_config_dword(dev, 0x64, &d);
410         if (!(d & (1<<10)))
411                 return;
412
413         force_hpet_address = val;
414         force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
415         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
416                    force_hpet_address);
417         cached_dev = dev;
418 }
419 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
420                          ati_force_enable_hpet);
421
422 /*
423  * Undocumented chipset feature taken from LinuxBIOS.
424  */
425 static void nvidia_force_hpet_resume(void)
426 {
427         pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
428         printk(KERN_DEBUG "Force enabled HPET at resume\n");
429 }
430
431 static void nvidia_force_enable_hpet(struct pci_dev *dev)
432 {
433         u32 val;
434
435         if (hpet_address || force_hpet_address)
436                 return;
437
438         if (!hpet_force_user) {
439                 hpet_print_force_info();
440                 return;
441         }
442
443         pci_write_config_dword(dev, 0x44, 0xfed00001);
444         pci_read_config_dword(dev, 0x44, &val);
445         force_hpet_address = val & 0xfffffffe;
446         force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
447         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
448                 force_hpet_address);
449         cached_dev = dev;
450 }
451
452 /* ISA Bridges */
453 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
454                         nvidia_force_enable_hpet);
455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
456                         nvidia_force_enable_hpet);
457
458 /* LPC bridges */
459 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
460                         nvidia_force_enable_hpet);
461 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
462                         nvidia_force_enable_hpet);
463 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
464                         nvidia_force_enable_hpet);
465 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
466                         nvidia_force_enable_hpet);
467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
468                         nvidia_force_enable_hpet);
469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
470                         nvidia_force_enable_hpet);
471 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
472                         nvidia_force_enable_hpet);
473 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
474                         nvidia_force_enable_hpet);
475 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
476                         nvidia_force_enable_hpet);
477
478 void force_hpet_resume(void)
479 {
480         switch (force_hpet_resume_type) {
481         case ICH_FORCE_HPET_RESUME:
482                 ich_force_hpet_resume();
483                 return;
484         case OLD_ICH_FORCE_HPET_RESUME:
485                 old_ich_force_hpet_resume();
486                 return;
487         case VT8237_FORCE_HPET_RESUME:
488                 vt8237_force_hpet_resume();
489                 return;
490         case NVIDIA_FORCE_HPET_RESUME:
491                 nvidia_force_hpet_resume();
492                 return;
493         case ATI_FORCE_HPET_RESUME:
494                 ati_force_hpet_resume();
495                 return;
496         default:
497                 break;
498         }
499 }
500
501 /*
502  * According to the datasheet e6xx systems have the HPET hardwired to
503  * 0xfed00000
504  */
505 static void e6xx_force_enable_hpet(struct pci_dev *dev)
506 {
507         if (hpet_address || force_hpet_address)
508                 return;
509
510         force_hpet_address = 0xFED00000;
511         force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
512         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
513                 "0x%lx\n", force_hpet_address);
514 }
515 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
516                          e6xx_force_enable_hpet);
517
518 /*
519  * HPET MSI on some boards (ATI SB700/SB800) has side effect on
520  * floppy DMA. Disable HPET MSI on such platforms.
521  * See erratum #27 (Misinterpreted MSI Requests May Result in
522  * Corrupted LPC DMA Data) in AMD Publication #46837,
523  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
524  */
525 static void force_disable_hpet_msi(struct pci_dev *unused)
526 {
527         hpet_msi_disable = true;
528 }
529
530 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
531                          force_disable_hpet_msi);
532
533 #endif
534
535 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
536 /* Set correct numa_node information for AMD NB functions */
537 static void quirk_amd_nb_node(struct pci_dev *dev)
538 {
539         struct pci_dev *nb_ht;
540         unsigned int devfn;
541         u32 node;
542         u32 val;
543
544         devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
545         nb_ht = pci_get_slot(dev->bus, devfn);
546         if (!nb_ht)
547                 return;
548
549         pci_read_config_dword(nb_ht, 0x60, &val);
550         node = pcibus_to_node(dev->bus) | (val & 7);
551         /*
552          * Some hardware may return an invalid node ID,
553          * so check it first:
554          */
555         if (node_online(node))
556                 set_dev_node(&dev->dev, node);
557         pci_dev_put(nb_ht);
558 }
559
560 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
561                         quirk_amd_nb_node);
562 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
563                         quirk_amd_nb_node);
564 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
565                         quirk_amd_nb_node);
566 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
567                         quirk_amd_nb_node);
568 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
569                         quirk_amd_nb_node);
570 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
571                         quirk_amd_nb_node);
572 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
573                         quirk_amd_nb_node);
574 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
575                         quirk_amd_nb_node);
576 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
577                         quirk_amd_nb_node);
578 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
579                         quirk_amd_nb_node);
580 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
581                         quirk_amd_nb_node);
582 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
583                         quirk_amd_nb_node);
584 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
585                         quirk_amd_nb_node);
586 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
587                         quirk_amd_nb_node);
588 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
589                         quirk_amd_nb_node);
590
591 #endif
592
593 #ifdef CONFIG_PCI
594 /*
595  * Processor does not ensure DRAM scrub read/write sequence
596  * is atomic wrt accesses to CC6 save state area. Therefore
597  * if a concurrent scrub read/write access is to same address
598  * the entry may appear as if it is not written. This quirk
599  * applies to Fam16h models 00h-0Fh
600  *
601  * See "Revision Guide" for AMD F16h models 00h-0fh,
602  * document 51810 rev. 3.04, Nov 2013
603  */
604 static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
605 {
606         u32 val;
607
608         /*
609          * Suggested workaround:
610          * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
611          */
612         pci_read_config_dword(dev, 0x58, &val);
613         if (val & 0x1F) {
614                 val &= ~(0x1F);
615                 pci_write_config_dword(dev, 0x58, val);
616         }
617
618         pci_read_config_dword(dev, 0x5C, &val);
619         if (val & BIT(0)) {
620                 val &= ~BIT(0);
621                 pci_write_config_dword(dev, 0x5c, val);
622         }
623 }
624
625 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
626                         amd_disable_seq_and_redirect_scrub);
627
628 /* Ivy Bridge, Haswell, Broadwell */
629 static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
630 {
631         u32 capid0;
632
633         pci_read_config_dword(pdev, 0x84, &capid0);
634
635         if (capid0 & 0x10)
636                 enable_copy_mc_fragile();
637 }
638
639 /* Skylake */
640 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
641 {
642         u32 capid0, capid5;
643
644         pci_read_config_dword(pdev, 0x84, &capid0);
645         pci_read_config_dword(pdev, 0x98, &capid5);
646
647         /*
648          * CAPID0{7:6} indicate whether this is an advanced RAS SKU
649          * CAPID5{8:5} indicate that various NVDIMM usage modes are
650          * enabled, so memory machine check recovery is also enabled.
651          */
652         if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
653                 enable_copy_mc_fragile();
654
655 }
656 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
657 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
658 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
659 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
660 #endif
661
662 bool x86_apple_machine;
663 EXPORT_SYMBOL(x86_apple_machine);
664
665 void __init early_platform_quirks(void)
666 {
667         x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
668                             dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
669 }