Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / x86 / kernel / cpu / mcheck / mce_amd.c
1 /*
2  *  (c) 2005-2016 Advanced Micro Devices, Inc.
3  *  Your use of this code is subject to the terms and conditions of the
4  *  GNU general public license version 2. See "COPYING" or
5  *  http://www.gnu.org/licenses/gpl.html
6  *
7  *  Written by Jacob Shin - AMD, Inc.
8  *  Maintained by: Borislav Petkov <bp@alien8.de>
9  *
10  *  All MC4_MISCi registers are shared between cores on a node.
11  */
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/kobject.h>
15 #include <linux/percpu.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sysfs.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/string.h>
24
25 #include <asm/amd_nb.h>
26 #include <asm/apic.h>
27 #include <asm/mce.h>
28 #include <asm/msr.h>
29 #include <asm/trace/irq_vectors.h>
30
31 #define NR_BLOCKS         5
32 #define THRESHOLD_MAX     0xFFF
33 #define INT_TYPE_APIC     0x00020000
34 #define MASK_VALID_HI     0x80000000
35 #define MASK_CNTP_HI      0x40000000
36 #define MASK_LOCKED_HI    0x20000000
37 #define MASK_LVTOFF_HI    0x00F00000
38 #define MASK_COUNT_EN_HI  0x00080000
39 #define MASK_INT_TYPE_HI  0x00060000
40 #define MASK_OVERFLOW_HI  0x00010000
41 #define MASK_ERR_COUNT_HI 0x00000FFF
42 #define MASK_BLKPTR_LO    0xFF000000
43 #define MCG_XBLK_ADDR     0xC0000400
44
45 /* Deferred error settings */
46 #define MSR_CU_DEF_ERR          0xC0000410
47 #define MASK_DEF_LVTOFF         0x000000F0
48 #define MASK_DEF_INT_TYPE       0x00000006
49 #define DEF_LVT_OFF             0x2
50 #define DEF_INT_TYPE_APIC       0x2
51
52 /* Scalable MCA: */
53
54 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
55 #define SMCA_THR_LVT_OFF        0xF000
56
57 static bool thresholding_en;
58
59 static const char * const th_names[] = {
60         "load_store",
61         "insn_fetch",
62         "combined_unit",
63         "",
64         "northbridge",
65         "execution_unit",
66 };
67
68 static const char * const smca_umc_block_names[] = {
69         "dram_ecc",
70         "misc_umc"
71 };
72
73 struct smca_bank_name {
74         const char *name;       /* Short name for sysfs */
75         const char *long_name;  /* Long name for pretty-printing */
76 };
77
78 static struct smca_bank_name smca_names[] = {
79         [SMCA_LS]       = { "load_store",       "Load Store Unit" },
80         [SMCA_IF]       = { "insn_fetch",       "Instruction Fetch Unit" },
81         [SMCA_L2_CACHE] = { "l2_cache",         "L2 Cache" },
82         [SMCA_DE]       = { "decode_unit",      "Decode Unit" },
83         [SMCA_EX]       = { "execution_unit",   "Execution Unit" },
84         [SMCA_FP]       = { "floating_point",   "Floating Point Unit" },
85         [SMCA_L3_CACHE] = { "l3_cache",         "L3 Cache" },
86         [SMCA_CS]       = { "coherent_slave",   "Coherent Slave" },
87         [SMCA_PIE]      = { "pie",              "Power, Interrupts, etc." },
88         [SMCA_UMC]      = { "umc",              "Unified Memory Controller" },
89         [SMCA_PB]       = { "param_block",      "Parameter Block" },
90         [SMCA_PSP]      = { "psp",              "Platform Security Processor" },
91         [SMCA_SMU]      = { "smu",              "System Management Unit" },
92 };
93
94 const char *smca_get_name(enum smca_bank_types t)
95 {
96         if (t >= N_SMCA_BANK_TYPES)
97                 return NULL;
98
99         return smca_names[t].name;
100 }
101
102 const char *smca_get_long_name(enum smca_bank_types t)
103 {
104         if (t >= N_SMCA_BANK_TYPES)
105                 return NULL;
106
107         return smca_names[t].long_name;
108 }
109 EXPORT_SYMBOL_GPL(smca_get_long_name);
110
111 static struct smca_hwid smca_hwid_mcatypes[] = {
112         /* { bank_type, hwid_mcatype, xec_bitmap } */
113
114         /* ZN Core (HWID=0xB0) MCA types */
115         { SMCA_LS,       HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
116         { SMCA_IF,       HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
117         { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
118         { SMCA_DE,       HWID_MCATYPE(0xB0, 0x3), 0x1FF },
119         /* HWID 0xB0 MCATYPE 0x4 is Reserved */
120         { SMCA_EX,       HWID_MCATYPE(0xB0, 0x5), 0x7FF },
121         { SMCA_FP,       HWID_MCATYPE(0xB0, 0x6), 0x7F },
122         { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },
123
124         /* Data Fabric MCA types */
125         { SMCA_CS,       HWID_MCATYPE(0x2E, 0x0), 0x1FF },
126         { SMCA_PIE,      HWID_MCATYPE(0x2E, 0x1), 0xF },
127
128         /* Unified Memory Controller MCA type */
129         { SMCA_UMC,      HWID_MCATYPE(0x96, 0x0), 0x3F },
130
131         /* Parameter Block MCA type */
132         { SMCA_PB,       HWID_MCATYPE(0x05, 0x0), 0x1 },
133
134         /* Platform Security Processor MCA type */
135         { SMCA_PSP,      HWID_MCATYPE(0xFF, 0x0), 0x1 },
136
137         /* System Management Unit MCA type */
138         { SMCA_SMU,      HWID_MCATYPE(0x01, 0x0), 0x1 },
139 };
140
141 struct smca_bank smca_banks[MAX_NR_BANKS];
142 EXPORT_SYMBOL_GPL(smca_banks);
143
144 /*
145  * In SMCA enabled processors, we can have multiple banks for a given IP type.
146  * So to define a unique name for each bank, we use a temp c-string to append
147  * the MCA_IPID[InstanceId] to type's name in get_name().
148  *
149  * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
150  * is greater than 8 plus 1 (for underscore) plus length of longest type name.
151  */
152 #define MAX_MCATYPE_NAME_LEN    30
153 static char buf_mcatype[MAX_MCATYPE_NAME_LEN];
154
155 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
156 static DEFINE_PER_CPU(unsigned int, bank_map);  /* see which banks are on */
157
158 static void amd_threshold_interrupt(void);
159 static void amd_deferred_error_interrupt(void);
160
161 static void default_deferred_error_interrupt(void)
162 {
163         pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
164 }
165 void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
166
167 static void get_smca_bank_info(unsigned int bank)
168 {
169         unsigned int i, hwid_mcatype, cpu = smp_processor_id();
170         struct smca_hwid *s_hwid;
171         u32 high, instance_id;
172
173         /* Collect bank_info using CPU 0 for now. */
174         if (cpu)
175                 return;
176
177         if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instance_id, &high)) {
178                 pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
179                 return;
180         }
181
182         hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
183                                     (high & MCI_IPID_MCATYPE) >> 16);
184
185         for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
186                 s_hwid = &smca_hwid_mcatypes[i];
187                 if (hwid_mcatype == s_hwid->hwid_mcatype) {
188
189                         WARN(smca_banks[bank].hwid,
190                              "Bank %s already initialized!\n",
191                              smca_get_name(s_hwid->bank_type));
192
193                         smca_banks[bank].hwid = s_hwid;
194                         smca_banks[bank].id = instance_id;
195                         smca_banks[bank].sysfs_id = s_hwid->count++;
196                         break;
197                 }
198         }
199 }
200
201 struct thresh_restart {
202         struct threshold_block  *b;
203         int                     reset;
204         int                     set_lvt_off;
205         int                     lvt_off;
206         u16                     old_limit;
207 };
208
209 static inline bool is_shared_bank(int bank)
210 {
211         /*
212          * Scalable MCA provides for only one core to have access to the MSRs of
213          * a shared bank.
214          */
215         if (mce_flags.smca)
216                 return false;
217
218         /* Bank 4 is for northbridge reporting and is thus shared */
219         return (bank == 4);
220 }
221
222 static const char *bank4_names(const struct threshold_block *b)
223 {
224         switch (b->address) {
225         /* MSR4_MISC0 */
226         case 0x00000413:
227                 return "dram";
228
229         case 0xc0000408:
230                 return "ht_links";
231
232         case 0xc0000409:
233                 return "l3_cache";
234
235         default:
236                 WARN(1, "Funny MSR: 0x%08x\n", b->address);
237                 return "";
238         }
239 };
240
241
242 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
243 {
244         /*
245          * bank 4 supports APIC LVT interrupts implicitly since forever.
246          */
247         if (bank == 4)
248                 return true;
249
250         /*
251          * IntP: interrupt present; if this bit is set, the thresholding
252          * bank can generate APIC LVT interrupts
253          */
254         return msr_high_bits & BIT(28);
255 }
256
257 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
258 {
259         int msr = (hi & MASK_LVTOFF_HI) >> 20;
260
261         if (apic < 0) {
262                 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
263                        "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
264                        b->bank, b->block, b->address, hi, lo);
265                 return 0;
266         }
267
268         if (apic != msr) {
269                 /*
270                  * On SMCA CPUs, LVT offset is programmed at a different MSR, and
271                  * the BIOS provides the value. The original field where LVT offset
272                  * was set is reserved. Return early here:
273                  */
274                 if (mce_flags.smca)
275                         return 0;
276
277                 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
278                        "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
279                        b->cpu, apic, b->bank, b->block, b->address, hi, lo);
280                 return 0;
281         }
282
283         return 1;
284 };
285
286 /* Reprogram MCx_MISC MSR behind this threshold bank. */
287 static void threshold_restart_bank(void *_tr)
288 {
289         struct thresh_restart *tr = _tr;
290         u32 hi, lo;
291
292         rdmsr(tr->b->address, lo, hi);
293
294         if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
295                 tr->reset = 1;  /* limit cannot be lower than err count */
296
297         if (tr->reset) {                /* reset err count and overflow bit */
298                 hi =
299                     (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
300                     (THRESHOLD_MAX - tr->b->threshold_limit);
301         } else if (tr->old_limit) {     /* change limit w/o reset */
302                 int new_count = (hi & THRESHOLD_MAX) +
303                     (tr->old_limit - tr->b->threshold_limit);
304
305                 hi = (hi & ~MASK_ERR_COUNT_HI) |
306                     (new_count & THRESHOLD_MAX);
307         }
308
309         /* clear IntType */
310         hi &= ~MASK_INT_TYPE_HI;
311
312         if (!tr->b->interrupt_capable)
313                 goto done;
314
315         if (tr->set_lvt_off) {
316                 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
317                         /* set new lvt offset */
318                         hi &= ~MASK_LVTOFF_HI;
319                         hi |= tr->lvt_off << 20;
320                 }
321         }
322
323         if (tr->b->interrupt_enable)
324                 hi |= INT_TYPE_APIC;
325
326  done:
327
328         hi |= MASK_COUNT_EN_HI;
329         wrmsr(tr->b->address, lo, hi);
330 }
331
332 static void mce_threshold_block_init(struct threshold_block *b, int offset)
333 {
334         struct thresh_restart tr = {
335                 .b                      = b,
336                 .set_lvt_off            = 1,
337                 .lvt_off                = offset,
338         };
339
340         b->threshold_limit              = THRESHOLD_MAX;
341         threshold_restart_bank(&tr);
342 };
343
344 static int setup_APIC_mce_threshold(int reserved, int new)
345 {
346         if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
347                                               APIC_EILVT_MSG_FIX, 0))
348                 return new;
349
350         return reserved;
351 }
352
353 static int setup_APIC_deferred_error(int reserved, int new)
354 {
355         if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
356                                               APIC_EILVT_MSG_FIX, 0))
357                 return new;
358
359         return reserved;
360 }
361
362 static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
363 {
364         u32 low = 0, high = 0;
365         int def_offset = -1, def_new;
366
367         if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
368                 return;
369
370         def_new = (low & MASK_DEF_LVTOFF) >> 4;
371         if (!(low & MASK_DEF_LVTOFF)) {
372                 pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
373                 def_new = DEF_LVT_OFF;
374                 low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
375         }
376
377         def_offset = setup_APIC_deferred_error(def_offset, def_new);
378         if ((def_offset == def_new) &&
379             (deferred_error_int_vector != amd_deferred_error_interrupt))
380                 deferred_error_int_vector = amd_deferred_error_interrupt;
381
382         low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
383         wrmsr(MSR_CU_DEF_ERR, low, high);
384 }
385
386 static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
387                              unsigned int bank, unsigned int block)
388 {
389         u32 addr = 0, offset = 0;
390
391         if (mce_flags.smca) {
392                 if (!block) {
393                         addr = MSR_AMD64_SMCA_MCx_MISC(bank);
394                 } else {
395                         /*
396                          * For SMCA enabled processors, BLKPTR field of the
397                          * first MISC register (MCx_MISC0) indicates presence of
398                          * additional MISC register set (MISC1-4).
399                          */
400                         u32 low, high;
401
402                         if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
403                                 return addr;
404
405                         if (!(low & MCI_CONFIG_MCAX))
406                                 return addr;
407
408                         if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
409                             (low & MASK_BLKPTR_LO))
410                                 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
411                 }
412                 return addr;
413         }
414
415         /* Fall back to method we used for older processors: */
416         switch (block) {
417         case 0:
418                 addr = msr_ops.misc(bank);
419                 break;
420         case 1:
421                 offset = ((low & MASK_BLKPTR_LO) >> 21);
422                 if (offset)
423                         addr = MCG_XBLK_ADDR + offset;
424                 break;
425         default:
426                 addr = ++current_addr;
427         }
428         return addr;
429 }
430
431 static int
432 prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
433                         int offset, u32 misc_high)
434 {
435         unsigned int cpu = smp_processor_id();
436         u32 smca_low, smca_high, smca_addr;
437         struct threshold_block b;
438         int new;
439
440         if (!block)
441                 per_cpu(bank_map, cpu) |= (1 << bank);
442
443         memset(&b, 0, sizeof(b));
444         b.cpu                   = cpu;
445         b.bank                  = bank;
446         b.block                 = block;
447         b.address               = addr;
448         b.interrupt_capable     = lvt_interrupt_supported(bank, misc_high);
449
450         if (!b.interrupt_capable)
451                 goto done;
452
453         b.interrupt_enable = 1;
454
455         if (!mce_flags.smca) {
456                 new = (misc_high & MASK_LVTOFF_HI) >> 20;
457                 goto set_offset;
458         }
459
460         smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
461
462         if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
463                 /*
464                  * OS is required to set the MCAX bit to acknowledge that it is
465                  * now using the new MSR ranges and new registers under each
466                  * bank. It also means that the OS will configure deferred
467                  * errors in the new MCx_CONFIG register. If the bit is not set,
468                  * uncorrectable errors will cause a system panic.
469                  *
470                  * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
471                  */
472                 smca_high |= BIT(0);
473
474                 /*
475                  * SMCA logs Deferred Error information in MCA_DE{STAT,ADDR}
476                  * registers with the option of additionally logging to
477                  * MCA_{STATUS,ADDR} if MCA_CONFIG[LogDeferredInMcaStat] is set.
478                  *
479                  * This bit is usually set by BIOS to retain the old behavior
480                  * for OSes that don't use the new registers. Linux supports the
481                  * new registers so let's disable that additional logging here.
482                  *
483                  * MCA_CONFIG[LogDeferredInMcaStat] is bit 34 (bit 2 in the high
484                  * portion of the MSR).
485                  */
486                 smca_high &= ~BIT(2);
487
488                 /*
489                  * SMCA sets the Deferred Error Interrupt type per bank.
490                  *
491                  * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
492                  * if the DeferredIntType bit field is available.
493                  *
494                  * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
495                  * high portion of the MSR). OS should set this to 0x1 to enable
496                  * APIC based interrupt. First, check that no interrupt has been
497                  * set.
498                  */
499                 if ((smca_low & BIT(5)) && !((smca_high >> 5) & 0x3))
500                         smca_high |= BIT(5);
501
502                 wrmsr(smca_addr, smca_low, smca_high);
503         }
504
505         /* Gather LVT offset for thresholding: */
506         if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
507                 goto out;
508
509         new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
510
511 set_offset:
512         offset = setup_APIC_mce_threshold(offset, new);
513
514         if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
515                 mce_threshold_vector = amd_threshold_interrupt;
516
517 done:
518         mce_threshold_block_init(&b, offset);
519
520 out:
521         return offset;
522 }
523
524 /* cpu init entry point, called from mce.c with preempt off */
525 void mce_amd_feature_init(struct cpuinfo_x86 *c)
526 {
527         u32 low = 0, high = 0, address = 0;
528         unsigned int bank, block, cpu = smp_processor_id();
529         int offset = -1;
530
531         for (bank = 0; bank < mca_cfg.banks; ++bank) {
532                 if (mce_flags.smca)
533                         get_smca_bank_info(bank);
534
535                 for (block = 0; block < NR_BLOCKS; ++block) {
536                         address = get_block_address(cpu, address, low, high, bank, block);
537                         if (!address)
538                                 break;
539
540                         if (rdmsr_safe(address, &low, &high))
541                                 break;
542
543                         if (!(high & MASK_VALID_HI))
544                                 continue;
545
546                         if (!(high & MASK_CNTP_HI)  ||
547                              (high & MASK_LOCKED_HI))
548                                 continue;
549
550                         offset = prepare_threshold_block(bank, block, address, offset, high);
551                 }
552         }
553
554         if (mce_flags.succor)
555                 deferred_error_interrupt_enable(c);
556 }
557
558 int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
559 {
560         u64 dram_base_addr, dram_limit_addr, dram_hole_base;
561         /* We start from the normalized address */
562         u64 ret_addr = norm_addr;
563
564         u32 tmp;
565
566         u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
567         u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
568         u8 intlv_addr_sel, intlv_addr_bit;
569         u8 num_intlv_bits, hashed_bit;
570         u8 lgcy_mmio_hole_en, base = 0;
571         u8 cs_mask, cs_id = 0;
572         bool hash_enabled = false;
573
574         /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
575         if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
576                 goto out_err;
577
578         /* Remove HiAddrOffset from normalized address, if enabled: */
579         if (tmp & BIT(0)) {
580                 u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;
581
582                 if (norm_addr >= hi_addr_offset) {
583                         ret_addr -= hi_addr_offset;
584                         base = 1;
585                 }
586         }
587
588         /* Read D18F0x110 (DramBaseAddress). */
589         if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
590                 goto out_err;
591
592         /* Check if address range is valid. */
593         if (!(tmp & BIT(0))) {
594                 pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
595                         __func__, tmp);
596                 goto out_err;
597         }
598
599         lgcy_mmio_hole_en = tmp & BIT(1);
600         intlv_num_chan    = (tmp >> 4) & 0xF;
601         intlv_addr_sel    = (tmp >> 8) & 0x7;
602         dram_base_addr    = (tmp & GENMASK_ULL(31, 12)) << 16;
603
604         /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
605         if (intlv_addr_sel > 3) {
606                 pr_err("%s: Invalid interleave address select %d.\n",
607                         __func__, intlv_addr_sel);
608                 goto out_err;
609         }
610
611         /* Read D18F0x114 (DramLimitAddress). */
612         if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
613                 goto out_err;
614
615         intlv_num_sockets = (tmp >> 8) & 0x1;
616         intlv_num_dies    = (tmp >> 10) & 0x3;
617         dram_limit_addr   = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
618
619         intlv_addr_bit = intlv_addr_sel + 8;
620
621         /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
622         switch (intlv_num_chan) {
623         case 0: intlv_num_chan = 0; break;
624         case 1: intlv_num_chan = 1; break;
625         case 3: intlv_num_chan = 2; break;
626         case 5: intlv_num_chan = 3; break;
627         case 7: intlv_num_chan = 4; break;
628
629         case 8: intlv_num_chan = 1;
630                 hash_enabled = true;
631                 break;
632         default:
633                 pr_err("%s: Invalid number of interleaved channels %d.\n",
634                         __func__, intlv_num_chan);
635                 goto out_err;
636         }
637
638         num_intlv_bits = intlv_num_chan;
639
640         if (intlv_num_dies > 2) {
641                 pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
642                         __func__, intlv_num_dies);
643                 goto out_err;
644         }
645
646         num_intlv_bits += intlv_num_dies;
647
648         /* Add a bit if sockets are interleaved. */
649         num_intlv_bits += intlv_num_sockets;
650
651         /* Assert num_intlv_bits <= 4 */
652         if (num_intlv_bits > 4) {
653                 pr_err("%s: Invalid interleave bits %d.\n",
654                         __func__, num_intlv_bits);
655                 goto out_err;
656         }
657
658         if (num_intlv_bits > 0) {
659                 u64 temp_addr_x, temp_addr_i, temp_addr_y;
660                 u8 die_id_bit, sock_id_bit, cs_fabric_id;
661
662                 /*
663                  * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
664                  * This is the fabric id for this coherent slave. Use
665                  * umc/channel# as instance id of the coherent slave
666                  * for FICAA.
667                  */
668                 if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
669                         goto out_err;
670
671                 cs_fabric_id = (tmp >> 8) & 0xFF;
672                 die_id_bit   = 0;
673
674                 /* If interleaved over more than 1 channel: */
675                 if (intlv_num_chan) {
676                         die_id_bit = intlv_num_chan;
677                         cs_mask    = (1 << die_id_bit) - 1;
678                         cs_id      = cs_fabric_id & cs_mask;
679                 }
680
681                 sock_id_bit = die_id_bit;
682
683                 /* Read D18F1x208 (SystemFabricIdMask). */
684                 if (intlv_num_dies || intlv_num_sockets)
685                         if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
686                                 goto out_err;
687
688                 /* If interleaved over more than 1 die. */
689                 if (intlv_num_dies) {
690                         sock_id_bit  = die_id_bit + intlv_num_dies;
691                         die_id_shift = (tmp >> 24) & 0xF;
692                         die_id_mask  = (tmp >> 8) & 0xFF;
693
694                         cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
695                 }
696
697                 /* If interleaved over more than 1 socket. */
698                 if (intlv_num_sockets) {
699                         socket_id_shift = (tmp >> 28) & 0xF;
700                         socket_id_mask  = (tmp >> 16) & 0xFF;
701
702                         cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
703                 }
704
705                 /*
706                  * The pre-interleaved address consists of XXXXXXIIIYYYYY
707                  * where III is the ID for this CS, and XXXXXXYYYYY are the
708                  * address bits from the post-interleaved address.
709                  * "num_intlv_bits" has been calculated to tell us how many "I"
710                  * bits there are. "intlv_addr_bit" tells us how many "Y" bits
711                  * there are (where "I" starts).
712                  */
713                 temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
714                 temp_addr_i = (cs_id << intlv_addr_bit);
715                 temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
716                 ret_addr    = temp_addr_x | temp_addr_i | temp_addr_y;
717         }
718
719         /* Add dram base address */
720         ret_addr += dram_base_addr;
721
722         /* If legacy MMIO hole enabled */
723         if (lgcy_mmio_hole_en) {
724                 if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
725                         goto out_err;
726
727                 dram_hole_base = tmp & GENMASK(31, 24);
728                 if (ret_addr >= dram_hole_base)
729                         ret_addr += (BIT_ULL(32) - dram_hole_base);
730         }
731
732         if (hash_enabled) {
733                 /* Save some parentheses and grab ls-bit at the end. */
734                 hashed_bit =    (ret_addr >> 12) ^
735                                 (ret_addr >> 18) ^
736                                 (ret_addr >> 21) ^
737                                 (ret_addr >> 30) ^
738                                 cs_id;
739
740                 hashed_bit &= BIT(0);
741
742                 if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
743                         ret_addr ^= BIT(intlv_addr_bit);
744         }
745
746         /* Is calculated system address is above DRAM limit address? */
747         if (ret_addr > dram_limit_addr)
748                 goto out_err;
749
750         *sys_addr = ret_addr;
751         return 0;
752
753 out_err:
754         return -EINVAL;
755 }
756 EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
757
758 static void
759 __log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
760 {
761         u32 msr_status = msr_ops.status(bank);
762         u32 msr_addr = msr_ops.addr(bank);
763         struct mce m;
764         u64 status;
765
766         WARN_ON_ONCE(deferred_err && threshold_err);
767
768         if (deferred_err && mce_flags.smca) {
769                 msr_status = MSR_AMD64_SMCA_MCx_DESTAT(bank);
770                 msr_addr = MSR_AMD64_SMCA_MCx_DEADDR(bank);
771         }
772
773         rdmsrl(msr_status, status);
774
775         if (!(status & MCI_STATUS_VAL))
776                 return;
777
778         mce_setup(&m);
779
780         m.status = status;
781         m.bank   = bank;
782         m.tsc    = rdtsc();
783
784         if (threshold_err)
785                 m.misc = misc;
786
787         if (m.status & MCI_STATUS_ADDRV) {
788                 rdmsrl(msr_addr, m.addr);
789
790                 /*
791                  * Extract [55:<lsb>] where lsb is the least significant
792                  * *valid* bit of the address bits.
793                  */
794                 if (mce_flags.smca) {
795                         u8 lsb = (m.addr >> 56) & 0x3f;
796
797                         m.addr &= GENMASK_ULL(55, lsb);
798                 }
799         }
800
801         if (mce_flags.smca) {
802                 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);
803
804                 if (m.status & MCI_STATUS_SYNDV)
805                         rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
806         }
807
808         mce_log(&m);
809
810         wrmsrl(msr_status, 0);
811 }
812
813 static inline void __smp_deferred_error_interrupt(void)
814 {
815         inc_irq_stat(irq_deferred_error_count);
816         deferred_error_int_vector();
817 }
818
819 asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
820 {
821         entering_irq();
822         __smp_deferred_error_interrupt();
823         exiting_ack_irq();
824 }
825
826 asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
827 {
828         entering_irq();
829         trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
830         __smp_deferred_error_interrupt();
831         trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
832         exiting_ack_irq();
833 }
834
835 /* APIC interrupt handler for deferred errors */
836 static void amd_deferred_error_interrupt(void)
837 {
838         unsigned int bank;
839         u32 msr_status;
840         u64 status;
841
842         for (bank = 0; bank < mca_cfg.banks; ++bank) {
843                 msr_status = (mce_flags.smca) ? MSR_AMD64_SMCA_MCx_DESTAT(bank)
844                                               : msr_ops.status(bank);
845
846                 rdmsrl(msr_status, status);
847
848                 if (!(status & MCI_STATUS_VAL) ||
849                     !(status & MCI_STATUS_DEFERRED))
850                         continue;
851
852                 __log_error(bank, true, false, 0);
853                 break;
854         }
855 }
856
857 /*
858  * APIC Interrupt Handler
859  */
860
861 /*
862  * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
863  * the interrupt goes off when error_count reaches threshold_limit.
864  * the handler will simply log mcelog w/ software defined bank number.
865  */
866
867 static void amd_threshold_interrupt(void)
868 {
869         u32 low = 0, high = 0, address = 0;
870         unsigned int bank, block, cpu = smp_processor_id();
871         struct thresh_restart tr;
872
873         /* assume first bank caused it */
874         for (bank = 0; bank < mca_cfg.banks; ++bank) {
875                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
876                         continue;
877                 for (block = 0; block < NR_BLOCKS; ++block) {
878                         address = get_block_address(cpu, address, low, high, bank, block);
879                         if (!address)
880                                 break;
881
882                         if (rdmsr_safe(address, &low, &high))
883                                 break;
884
885                         if (!(high & MASK_VALID_HI)) {
886                                 if (block)
887                                         continue;
888                                 else
889                                         break;
890                         }
891
892                         if (!(high & MASK_CNTP_HI)  ||
893                              (high & MASK_LOCKED_HI))
894                                 continue;
895
896                         /*
897                          * Log the machine check that caused the threshold
898                          * event.
899                          */
900                         if (high & MASK_OVERFLOW_HI)
901                                 goto log;
902                 }
903         }
904         return;
905
906 log:
907         __log_error(bank, false, true, ((u64)high << 32) | low);
908
909         /* Reset threshold block after logging error. */
910         memset(&tr, 0, sizeof(tr));
911         tr.b = &per_cpu(threshold_banks, cpu)[bank]->blocks[block];
912         threshold_restart_bank(&tr);
913 }
914
915 /*
916  * Sysfs Interface
917  */
918
919 struct threshold_attr {
920         struct attribute attr;
921         ssize_t (*show) (struct threshold_block *, char *);
922         ssize_t (*store) (struct threshold_block *, const char *, size_t count);
923 };
924
925 #define SHOW_FIELDS(name)                                               \
926 static ssize_t show_ ## name(struct threshold_block *b, char *buf)      \
927 {                                                                       \
928         return sprintf(buf, "%lu\n", (unsigned long) b->name);          \
929 }
930 SHOW_FIELDS(interrupt_enable)
931 SHOW_FIELDS(threshold_limit)
932
933 static ssize_t
934 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
935 {
936         struct thresh_restart tr;
937         unsigned long new;
938
939         if (!b->interrupt_capable)
940                 return -EINVAL;
941
942         if (kstrtoul(buf, 0, &new) < 0)
943                 return -EINVAL;
944
945         b->interrupt_enable = !!new;
946
947         memset(&tr, 0, sizeof(tr));
948         tr.b            = b;
949
950         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
951
952         return size;
953 }
954
955 static ssize_t
956 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
957 {
958         struct thresh_restart tr;
959         unsigned long new;
960
961         if (kstrtoul(buf, 0, &new) < 0)
962                 return -EINVAL;
963
964         if (new > THRESHOLD_MAX)
965                 new = THRESHOLD_MAX;
966         if (new < 1)
967                 new = 1;
968
969         memset(&tr, 0, sizeof(tr));
970         tr.old_limit = b->threshold_limit;
971         b->threshold_limit = new;
972         tr.b = b;
973
974         smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
975
976         return size;
977 }
978
979 static ssize_t show_error_count(struct threshold_block *b, char *buf)
980 {
981         u32 lo, hi;
982
983         rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
984
985         return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
986                                      (THRESHOLD_MAX - b->threshold_limit)));
987 }
988
989 static struct threshold_attr error_count = {
990         .attr = {.name = __stringify(error_count), .mode = 0444 },
991         .show = show_error_count,
992 };
993
994 #define RW_ATTR(val)                                                    \
995 static struct threshold_attr val = {                                    \
996         .attr   = {.name = __stringify(val), .mode = 0644 },            \
997         .show   = show_## val,                                          \
998         .store  = store_## val,                                         \
999 };
1000
1001 RW_ATTR(interrupt_enable);
1002 RW_ATTR(threshold_limit);
1003
1004 static struct attribute *default_attrs[] = {
1005         &threshold_limit.attr,
1006         &error_count.attr,
1007         NULL,   /* possibly interrupt_enable if supported, see below */
1008         NULL,
1009 };
1010
1011 #define to_block(k)     container_of(k, struct threshold_block, kobj)
1012 #define to_attr(a)      container_of(a, struct threshold_attr, attr)
1013
1014 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1015 {
1016         struct threshold_block *b = to_block(kobj);
1017         struct threshold_attr *a = to_attr(attr);
1018         ssize_t ret;
1019
1020         ret = a->show ? a->show(b, buf) : -EIO;
1021
1022         return ret;
1023 }
1024
1025 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1026                      const char *buf, size_t count)
1027 {
1028         struct threshold_block *b = to_block(kobj);
1029         struct threshold_attr *a = to_attr(attr);
1030         ssize_t ret;
1031
1032         ret = a->store ? a->store(b, buf, count) : -EIO;
1033
1034         return ret;
1035 }
1036
1037 static const struct sysfs_ops threshold_ops = {
1038         .show                   = show,
1039         .store                  = store,
1040 };
1041
1042 static struct kobj_type threshold_ktype = {
1043         .sysfs_ops              = &threshold_ops,
1044         .default_attrs          = default_attrs,
1045 };
1046
1047 static const char *get_name(unsigned int bank, struct threshold_block *b)
1048 {
1049         unsigned int bank_type;
1050
1051         if (!mce_flags.smca) {
1052                 if (b && bank == 4)
1053                         return bank4_names(b);
1054
1055                 return th_names[bank];
1056         }
1057
1058         if (!smca_banks[bank].hwid)
1059                 return NULL;
1060
1061         bank_type = smca_banks[bank].hwid->bank_type;
1062
1063         if (b && bank_type == SMCA_UMC) {
1064                 if (b->block < ARRAY_SIZE(smca_umc_block_names))
1065                         return smca_umc_block_names[b->block];
1066                 return NULL;
1067         }
1068
1069         if (smca_banks[bank].hwid->count == 1)
1070                 return smca_get_name(bank_type);
1071
1072         snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
1073                  "%s_%x", smca_get_name(bank_type),
1074                           smca_banks[bank].sysfs_id);
1075         return buf_mcatype;
1076 }
1077
1078 static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
1079                                      unsigned int block, u32 address)
1080 {
1081         struct threshold_block *b = NULL;
1082         u32 low, high;
1083         int err;
1084
1085         if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
1086                 return 0;
1087
1088         if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
1089                 return 0;
1090
1091         if (!(high & MASK_VALID_HI)) {
1092                 if (block)
1093                         goto recurse;
1094                 else
1095                         return 0;
1096         }
1097
1098         if (!(high & MASK_CNTP_HI)  ||
1099              (high & MASK_LOCKED_HI))
1100                 goto recurse;
1101
1102         b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
1103         if (!b)
1104                 return -ENOMEM;
1105
1106         b->block                = block;
1107         b->bank                 = bank;
1108         b->cpu                  = cpu;
1109         b->address              = address;
1110         b->interrupt_enable     = 0;
1111         b->interrupt_capable    = lvt_interrupt_supported(bank, high);
1112         b->threshold_limit      = THRESHOLD_MAX;
1113
1114         if (b->interrupt_capable) {
1115                 threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
1116                 b->interrupt_enable = 1;
1117         } else {
1118                 threshold_ktype.default_attrs[2] = NULL;
1119         }
1120
1121         INIT_LIST_HEAD(&b->miscj);
1122
1123         if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
1124                 list_add(&b->miscj,
1125                          &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
1126         } else {
1127                 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
1128         }
1129
1130         err = kobject_init_and_add(&b->kobj, &threshold_ktype,
1131                                    per_cpu(threshold_banks, cpu)[bank]->kobj,
1132                                    get_name(bank, b));
1133         if (err)
1134                 goto out_free;
1135 recurse:
1136         address = get_block_address(cpu, address, low, high, bank, ++block);
1137         if (!address)
1138                 return 0;
1139
1140         err = allocate_threshold_blocks(cpu, bank, block, address);
1141         if (err)
1142                 goto out_free;
1143
1144         if (b)
1145                 kobject_uevent(&b->kobj, KOBJ_ADD);
1146
1147         return err;
1148
1149 out_free:
1150         if (b) {
1151                 kobject_put(&b->kobj);
1152                 list_del(&b->miscj);
1153                 kfree(b);
1154         }
1155         return err;
1156 }
1157
1158 static int __threshold_add_blocks(struct threshold_bank *b)
1159 {
1160         struct list_head *head = &b->blocks->miscj;
1161         struct threshold_block *pos = NULL;
1162         struct threshold_block *tmp = NULL;
1163         int err = 0;
1164
1165         err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
1166         if (err)
1167                 return err;
1168
1169         list_for_each_entry_safe(pos, tmp, head, miscj) {
1170
1171                 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
1172                 if (err) {
1173                         list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
1174                                 kobject_del(&pos->kobj);
1175
1176                         return err;
1177                 }
1178         }
1179         return err;
1180 }
1181
1182 static int threshold_create_bank(unsigned int cpu, unsigned int bank)
1183 {
1184         struct device *dev = per_cpu(mce_device, cpu);
1185         struct amd_northbridge *nb = NULL;
1186         struct threshold_bank *b = NULL;
1187         const char *name = get_name(bank, NULL);
1188         int err = 0;
1189
1190         if (!dev)
1191                 return -ENODEV;
1192
1193         if (is_shared_bank(bank)) {
1194                 nb = node_to_amd_nb(amd_get_nb_id(cpu));
1195
1196                 /* threshold descriptor already initialized on this node? */
1197                 if (nb && nb->bank4) {
1198                         /* yes, use it */
1199                         b = nb->bank4;
1200                         err = kobject_add(b->kobj, &dev->kobj, name);
1201                         if (err)
1202                                 goto out;
1203
1204                         per_cpu(threshold_banks, cpu)[bank] = b;
1205                         atomic_inc(&b->cpus);
1206
1207                         err = __threshold_add_blocks(b);
1208
1209                         goto out;
1210                 }
1211         }
1212
1213         b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1214         if (!b) {
1215                 err = -ENOMEM;
1216                 goto out;
1217         }
1218
1219         b->kobj = kobject_create_and_add(name, &dev->kobj);
1220         if (!b->kobj) {
1221                 err = -EINVAL;
1222                 goto out_free;
1223         }
1224
1225         per_cpu(threshold_banks, cpu)[bank] = b;
1226
1227         if (is_shared_bank(bank)) {
1228                 atomic_set(&b->cpus, 1);
1229
1230                 /* nb is already initialized, see above */
1231                 if (nb) {
1232                         WARN_ON(nb->bank4);
1233                         nb->bank4 = b;
1234                 }
1235         }
1236
1237         err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
1238         if (!err)
1239                 goto out;
1240
1241  out_free:
1242         kfree(b);
1243
1244  out:
1245         return err;
1246 }
1247
1248 static void deallocate_threshold_block(unsigned int cpu,
1249                                                  unsigned int bank)
1250 {
1251         struct threshold_block *pos = NULL;
1252         struct threshold_block *tmp = NULL;
1253         struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
1254
1255         if (!head)
1256                 return;
1257
1258         list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1259                 kobject_put(&pos->kobj);
1260                 list_del(&pos->miscj);
1261                 kfree(pos);
1262         }
1263
1264         kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
1265         per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
1266 }
1267
1268 static void __threshold_remove_blocks(struct threshold_bank *b)
1269 {
1270         struct threshold_block *pos = NULL;
1271         struct threshold_block *tmp = NULL;
1272
1273         kobject_del(b->kobj);
1274
1275         list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
1276                 kobject_del(&pos->kobj);
1277 }
1278
1279 static void threshold_remove_bank(unsigned int cpu, int bank)
1280 {
1281         struct amd_northbridge *nb;
1282         struct threshold_bank *b;
1283
1284         b = per_cpu(threshold_banks, cpu)[bank];
1285         if (!b)
1286                 return;
1287
1288         if (!b->blocks)
1289                 goto free_out;
1290
1291         if (is_shared_bank(bank)) {
1292                 if (!atomic_dec_and_test(&b->cpus)) {
1293                         __threshold_remove_blocks(b);
1294                         per_cpu(threshold_banks, cpu)[bank] = NULL;
1295                         return;
1296                 } else {
1297                         /*
1298                          * the last CPU on this node using the shared bank is
1299                          * going away, remove that bank now.
1300                          */
1301                         nb = node_to_amd_nb(amd_get_nb_id(cpu));
1302                         nb->bank4 = NULL;
1303                 }
1304         }
1305
1306         deallocate_threshold_block(cpu, bank);
1307
1308 free_out:
1309         kobject_del(b->kobj);
1310         kobject_put(b->kobj);
1311         kfree(b);
1312         per_cpu(threshold_banks, cpu)[bank] = NULL;
1313 }
1314
1315 int mce_threshold_remove_device(unsigned int cpu)
1316 {
1317         unsigned int bank;
1318
1319         if (!thresholding_en)
1320                 return 0;
1321
1322         for (bank = 0; bank < mca_cfg.banks; ++bank) {
1323                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1324                         continue;
1325                 threshold_remove_bank(cpu, bank);
1326         }
1327         kfree(per_cpu(threshold_banks, cpu));
1328         per_cpu(threshold_banks, cpu) = NULL;
1329         return 0;
1330 }
1331
1332 /* create dir/files for all valid threshold banks */
1333 int mce_threshold_create_device(unsigned int cpu)
1334 {
1335         unsigned int bank;
1336         struct threshold_bank **bp;
1337         int err = 0;
1338
1339         if (!thresholding_en)
1340                 return 0;
1341
1342         bp = per_cpu(threshold_banks, cpu);
1343         if (bp)
1344                 return 0;
1345
1346         bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
1347                      GFP_KERNEL);
1348         if (!bp)
1349                 return -ENOMEM;
1350
1351         per_cpu(threshold_banks, cpu) = bp;
1352
1353         for (bank = 0; bank < mca_cfg.banks; ++bank) {
1354                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1355                         continue;
1356                 err = threshold_create_bank(cpu, bank);
1357                 if (err)
1358                         goto err;
1359         }
1360         return err;
1361 err:
1362         mce_threshold_remove_device(cpu);
1363         return err;
1364 }
1365
1366 static __init int threshold_init_device(void)
1367 {
1368         unsigned lcpu = 0;
1369
1370         if (mce_threshold_vector == amd_threshold_interrupt)
1371                 thresholding_en = true;
1372
1373         /* to hit CPUs online before the notifier is up */
1374         for_each_online_cpu(lcpu) {
1375                 int err = mce_threshold_create_device(lcpu);
1376
1377                 if (err)
1378                         return err;
1379         }
1380
1381         return 0;
1382 }
1383 /*
1384  * there are 3 funcs which need to be _initcalled in a logic sequence:
1385  * 1. xen_late_init_mcelog
1386  * 2. mcheck_init_device
1387  * 3. threshold_init_device
1388  *
1389  * xen_late_init_mcelog must register xen_mce_chrdev_device before
1390  * native mce_chrdev_device registration if running under xen platform;
1391  *
1392  * mcheck_init_device should be inited before threshold_init_device to
1393  * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
1394  *
1395  * so we use following _initcalls
1396  * 1. device_initcall(xen_late_init_mcelog);
1397  * 2. device_initcall_sync(mcheck_init_device);
1398  * 3. late_initcall(threshold_init_device);
1399  *
1400  * when running under xen, the initcall order is 1,2,3;
1401  * on baremetal, we skip 1 and we do only 2 and 3.
1402  */
1403 late_initcall(threshold_init_device);