Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[sfrench/cifs-2.6.git] / drivers / edac / sb_edac.c
1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
2  *
3  * This driver supports the memory controllers found on the Intel
4  * processor family Sandy Bridge.
5  *
6  * This file may be distributed under the terms of the
7  * GNU General Public License version 2 only.
8  *
9  * Copyright (c) 2011 by:
10  *       Mauro Carvalho Chehab
11  */
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/pci_ids.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/edac.h>
20 #include <linux/mmzone.h>
21 #include <linux/smp.h>
22 #include <linux/bitmap.h>
23 #include <linux/math64.h>
24 #include <linux/mod_devicetable.h>
25 #include <asm/cpu_device_id.h>
26 #include <asm/intel-family.h>
27 #include <asm/processor.h>
28 #include <asm/mce.h>
29
30 #include "edac_module.h"
31
32 /* Static vars */
33 static LIST_HEAD(sbridge_edac_list);
34
35 /*
36  * Alter this version for the module when modifications are made
37  */
38 #define SBRIDGE_REVISION    " Ver: 1.1.2 "
39 #define EDAC_MOD_STR      "sbridge_edac"
40
41 /*
42  * Debug macros
43  */
44 #define sbridge_printk(level, fmt, arg...)                      \
45         edac_printk(level, "sbridge", fmt, ##arg)
46
47 #define sbridge_mc_printk(mci, level, fmt, arg...)              \
48         edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
49
50 /*
51  * Get a bit field at register value <v>, from bit <lo> to bit <hi>
52  */
53 #define GET_BITFIELD(v, lo, hi) \
54         (((v) & GENMASK_ULL(hi, lo)) >> (lo))
55
56 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */
57 static const u32 sbridge_dram_rule[] = {
58         0x80, 0x88, 0x90, 0x98, 0xa0,
59         0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
60 };
61
62 static const u32 ibridge_dram_rule[] = {
63         0x60, 0x68, 0x70, 0x78, 0x80,
64         0x88, 0x90, 0x98, 0xa0, 0xa8,
65         0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
66         0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
67 };
68
69 static const u32 knl_dram_rule[] = {
70         0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
71         0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
72         0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
73         0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
74         0x100, 0x108, 0x110, 0x118,   /* 20-23 */
75 };
76
77 #define DRAM_RULE_ENABLE(reg)   GET_BITFIELD(reg, 0,  0)
78 #define A7MODE(reg)             GET_BITFIELD(reg, 26, 26)
79
80 static char *show_dram_attr(u32 attr)
81 {
82         switch (attr) {
83                 case 0:
84                         return "DRAM";
85                 case 1:
86                         return "MMCFG";
87                 case 2:
88                         return "NXM";
89                 default:
90                         return "unknown";
91         }
92 }
93
94 static const u32 sbridge_interleave_list[] = {
95         0x84, 0x8c, 0x94, 0x9c, 0xa4,
96         0xac, 0xb4, 0xbc, 0xc4, 0xcc,
97 };
98
99 static const u32 ibridge_interleave_list[] = {
100         0x64, 0x6c, 0x74, 0x7c, 0x84,
101         0x8c, 0x94, 0x9c, 0xa4, 0xac,
102         0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
103         0xdc, 0xe4, 0xec, 0xf4, 0xfc,
104 };
105
106 static const u32 knl_interleave_list[] = {
107         0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
108         0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
109         0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
110         0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
111         0x104, 0x10c, 0x114, 0x11c,   /* 20-23 */
112 };
113
114 struct interleave_pkg {
115         unsigned char start;
116         unsigned char end;
117 };
118
119 static const struct interleave_pkg sbridge_interleave_pkg[] = {
120         { 0, 2 },
121         { 3, 5 },
122         { 8, 10 },
123         { 11, 13 },
124         { 16, 18 },
125         { 19, 21 },
126         { 24, 26 },
127         { 27, 29 },
128 };
129
130 static const struct interleave_pkg ibridge_interleave_pkg[] = {
131         { 0, 3 },
132         { 4, 7 },
133         { 8, 11 },
134         { 12, 15 },
135         { 16, 19 },
136         { 20, 23 },
137         { 24, 27 },
138         { 28, 31 },
139 };
140
141 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
142                           int interleave)
143 {
144         return GET_BITFIELD(reg, table[interleave].start,
145                             table[interleave].end);
146 }
147
148 /* Devices 12 Function 7 */
149
150 #define TOLM            0x80
151 #define TOHM            0x84
152 #define HASWELL_TOLM    0xd0
153 #define HASWELL_TOHM_0  0xd4
154 #define HASWELL_TOHM_1  0xd8
155 #define KNL_TOLM        0xd0
156 #define KNL_TOHM_0      0xd4
157 #define KNL_TOHM_1      0xd8
158
159 #define GET_TOLM(reg)           ((GET_BITFIELD(reg, 0,  3) << 28) | 0x3ffffff)
160 #define GET_TOHM(reg)           ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
161
162 /* Device 13 Function 6 */
163
164 #define SAD_TARGET      0xf0
165
166 #define SOURCE_ID(reg)          GET_BITFIELD(reg, 9, 11)
167
168 #define SOURCE_ID_KNL(reg)      GET_BITFIELD(reg, 12, 14)
169
170 #define SAD_CONTROL     0xf4
171
172 /* Device 14 function 0 */
173
174 static const u32 tad_dram_rule[] = {
175         0x40, 0x44, 0x48, 0x4c,
176         0x50, 0x54, 0x58, 0x5c,
177         0x60, 0x64, 0x68, 0x6c,
178 };
179 #define MAX_TAD ARRAY_SIZE(tad_dram_rule)
180
181 #define TAD_LIMIT(reg)          ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
182 #define TAD_SOCK(reg)           GET_BITFIELD(reg, 10, 11)
183 #define TAD_CH(reg)             GET_BITFIELD(reg,  8,  9)
184 #define TAD_TGT3(reg)           GET_BITFIELD(reg,  6,  7)
185 #define TAD_TGT2(reg)           GET_BITFIELD(reg,  4,  5)
186 #define TAD_TGT1(reg)           GET_BITFIELD(reg,  2,  3)
187 #define TAD_TGT0(reg)           GET_BITFIELD(reg,  0,  1)
188
189 /* Device 15, function 0 */
190
191 #define MCMTR                   0x7c
192 #define KNL_MCMTR               0x624
193
194 #define IS_ECC_ENABLED(mcmtr)           GET_BITFIELD(mcmtr, 2, 2)
195 #define IS_LOCKSTEP_ENABLED(mcmtr)      GET_BITFIELD(mcmtr, 1, 1)
196 #define IS_CLOSE_PG(mcmtr)              GET_BITFIELD(mcmtr, 0, 0)
197
198 /* Device 15, function 1 */
199
200 #define RASENABLES              0xac
201 #define IS_MIRROR_ENABLED(reg)          GET_BITFIELD(reg, 0, 0)
202
203 /* Device 15, functions 2-5 */
204
205 static const int mtr_regs[] = {
206         0x80, 0x84, 0x88,
207 };
208
209 static const int knl_mtr_reg = 0xb60;
210
211 #define RANK_DISABLE(mtr)               GET_BITFIELD(mtr, 16, 19)
212 #define IS_DIMM_PRESENT(mtr)            GET_BITFIELD(mtr, 14, 14)
213 #define RANK_CNT_BITS(mtr)              GET_BITFIELD(mtr, 12, 13)
214 #define RANK_WIDTH_BITS(mtr)            GET_BITFIELD(mtr, 2, 4)
215 #define COL_WIDTH_BITS(mtr)             GET_BITFIELD(mtr, 0, 1)
216
217 static const u32 tad_ch_nilv_offset[] = {
218         0x90, 0x94, 0x98, 0x9c,
219         0xa0, 0xa4, 0xa8, 0xac,
220         0xb0, 0xb4, 0xb8, 0xbc,
221 };
222 #define CHN_IDX_OFFSET(reg)             GET_BITFIELD(reg, 28, 29)
223 #define TAD_OFFSET(reg)                 (GET_BITFIELD(reg,  6, 25) << 26)
224
225 static const u32 rir_way_limit[] = {
226         0x108, 0x10c, 0x110, 0x114, 0x118,
227 };
228 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
229
230 #define IS_RIR_VALID(reg)       GET_BITFIELD(reg, 31, 31)
231 #define RIR_WAY(reg)            GET_BITFIELD(reg, 28, 29)
232
233 #define MAX_RIR_WAY     8
234
235 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
236         { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
237         { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
238         { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
239         { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
240         { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
241 };
242
243 #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
244         GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
245
246 #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
247         GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
248
249 /* Device 16, functions 2-7 */
250
251 /*
252  * FIXME: Implement the error count reads directly
253  */
254
255 static const u32 correrrcnt[] = {
256         0x104, 0x108, 0x10c, 0x110,
257 };
258
259 #define RANK_ODD_OV(reg)                GET_BITFIELD(reg, 31, 31)
260 #define RANK_ODD_ERR_CNT(reg)           GET_BITFIELD(reg, 16, 30)
261 #define RANK_EVEN_OV(reg)               GET_BITFIELD(reg, 15, 15)
262 #define RANK_EVEN_ERR_CNT(reg)          GET_BITFIELD(reg,  0, 14)
263
264 static const u32 correrrthrsld[] = {
265         0x11c, 0x120, 0x124, 0x128,
266 };
267
268 #define RANK_ODD_ERR_THRSLD(reg)        GET_BITFIELD(reg, 16, 30)
269 #define RANK_EVEN_ERR_THRSLD(reg)       GET_BITFIELD(reg,  0, 14)
270
271
272 /* Device 17, function 0 */
273
274 #define SB_RANK_CFG_A           0x0328
275
276 #define IB_RANK_CFG_A           0x0320
277
278 /*
279  * sbridge structs
280  */
281
282 #define NUM_CHANNELS            4       /* Max channels per MC */
283 #define MAX_DIMMS               3       /* Max DIMMS per channel */
284 #define KNL_MAX_CHAS            38      /* KNL max num. of Cache Home Agents */
285 #define KNL_MAX_CHANNELS        6       /* KNL max num. of PCI channels */
286 #define KNL_MAX_EDCS            8       /* Embedded DRAM controllers */
287 #define CHANNEL_UNSPECIFIED     0xf     /* Intel IA32 SDM 15-14 */
288
289 enum type {
290         SANDY_BRIDGE,
291         IVY_BRIDGE,
292         HASWELL,
293         BROADWELL,
294         KNIGHTS_LANDING,
295 };
296
297 enum domain {
298         IMC0 = 0,
299         IMC1,
300         SOCK,
301 };
302
303 enum mirroring_mode {
304         NON_MIRRORING,
305         ADDR_RANGE_MIRRORING,
306         FULL_MIRRORING,
307 };
308
309 struct sbridge_pvt;
310 struct sbridge_info {
311         enum type       type;
312         u32             mcmtr;
313         u32             rankcfgr;
314         u64             (*get_tolm)(struct sbridge_pvt *pvt);
315         u64             (*get_tohm)(struct sbridge_pvt *pvt);
316         u64             (*rir_limit)(u32 reg);
317         u64             (*sad_limit)(u32 reg);
318         u32             (*interleave_mode)(u32 reg);
319         u32             (*dram_attr)(u32 reg);
320         const u32       *dram_rule;
321         const u32       *interleave_list;
322         const struct interleave_pkg *interleave_pkg;
323         u8              max_sad;
324         u8              max_interleave;
325         u8              (*get_node_id)(struct sbridge_pvt *pvt);
326         enum mem_type   (*get_memory_type)(struct sbridge_pvt *pvt);
327         enum dev_type   (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
328         struct pci_dev  *pci_vtd;
329 };
330
331 struct sbridge_channel {
332         u32             ranks;
333         u32             dimms;
334 };
335
336 struct pci_id_descr {
337         int                     dev_id;
338         int                     optional;
339         enum domain             dom;
340 };
341
342 struct pci_id_table {
343         const struct pci_id_descr       *descr;
344         int                             n_devs_per_imc;
345         int                             n_devs_per_sock;
346         int                             n_imcs_per_sock;
347         enum type                       type;
348 };
349
350 struct sbridge_dev {
351         struct list_head        list;
352         u8                      bus, mc;
353         u8                      node_id, source_id;
354         struct pci_dev          **pdev;
355         enum domain             dom;
356         int                     n_devs;
357         int                     i_devs;
358         struct mem_ctl_info     *mci;
359 };
360
361 struct knl_pvt {
362         struct pci_dev          *pci_cha[KNL_MAX_CHAS];
363         struct pci_dev          *pci_channel[KNL_MAX_CHANNELS];
364         struct pci_dev          *pci_mc0;
365         struct pci_dev          *pci_mc1;
366         struct pci_dev          *pci_mc0_misc;
367         struct pci_dev          *pci_mc1_misc;
368         struct pci_dev          *pci_mc_info; /* tolm, tohm */
369 };
370
371 struct sbridge_pvt {
372         /* Devices per socket */
373         struct pci_dev          *pci_ddrio;
374         struct pci_dev          *pci_sad0, *pci_sad1;
375         struct pci_dev          *pci_br0, *pci_br1;
376         /* Devices per memory controller */
377         struct pci_dev          *pci_ha, *pci_ta, *pci_ras;
378         struct pci_dev          *pci_tad[NUM_CHANNELS];
379
380         struct sbridge_dev      *sbridge_dev;
381
382         struct sbridge_info     info;
383         struct sbridge_channel  channel[NUM_CHANNELS];
384
385         /* Memory type detection */
386         bool                    is_cur_addr_mirrored, is_lockstep, is_close_pg;
387         bool                    is_chan_hash;
388         enum mirroring_mode     mirror_mode;
389
390         /* Memory description */
391         u64                     tolm, tohm;
392         struct knl_pvt knl;
393 };
394
395 #define PCI_DESCR(device_id, opt, domain)       \
396         .dev_id = (device_id),          \
397         .optional = opt,        \
398         .dom = domain
399
400 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
401                 /* Processor Home Agent */
402         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0,   0, IMC0) },
403
404                 /* Memory controller */
405         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA,    0, IMC0) },
406         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS,   0, IMC0) },
407         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0,  0, IMC0) },
408         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1,  0, IMC0) },
409         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2,  0, IMC0) },
410         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3,  0, IMC0) },
411         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
412
413                 /* System Address Decoder */
414         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0,      0, SOCK) },
415         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1,      0, SOCK) },
416
417                 /* Broadcast Registers */
418         { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR,        0, SOCK) },
419 };
420
421 #define PCI_ID_TABLE_ENTRY(A, N, M, T) {        \
422         .descr = A,                     \
423         .n_devs_per_imc = N,    \
424         .n_devs_per_sock = ARRAY_SIZE(A),       \
425         .n_imcs_per_sock = M,   \
426         .type = T                       \
427 }
428
429 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
430         PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
431         {0,}                    /* 0 terminated list. */
432 };
433
434 /* This changes depending if 1HA or 2HA:
435  * 1HA:
436  *      0x0eb8 (17.0) is DDRIO0
437  * 2HA:
438  *      0x0ebc (17.4) is DDRIO0
439  */
440 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0      0x0eb8
441 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0      0x0ebc
442
443 /* pci ids */
444 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0             0x0ea0
445 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA          0x0ea8
446 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS         0x0e71
447 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0        0x0eaa
448 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1        0x0eab
449 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2        0x0eac
450 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3        0x0ead
451 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD                 0x0ec8
452 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0                 0x0ec9
453 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1                 0x0eca
454 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1             0x0e60
455 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA          0x0e68
456 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS         0x0e79
457 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0        0x0e6a
458 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1        0x0e6b
459 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2        0x0e6c
460 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3        0x0e6d
461
462 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
463                 /* Processor Home Agent */
464         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
465
466                 /* Memory controller */
467         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
468         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS,    0, IMC0) },
469         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0,   0, IMC0) },
470         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1,   0, IMC0) },
471         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2,   0, IMC0) },
472         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
473
474                 /* Optional, mode 2HA */
475         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
476         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
477         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
478         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
479         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1,   1, IMC1) },
480         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2,   1, IMC1) },
481         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3,   1, IMC1) },
482
483         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
484         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
485
486                 /* System Address Decoder */
487         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD,            0, SOCK) },
488
489                 /* Broadcast Registers */
490         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0,            1, SOCK) },
491         { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1,            0, SOCK) },
492
493 };
494
495 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
496         PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
497         {0,}                    /* 0 terminated list. */
498 };
499
500 /* Haswell support */
501 /* EN processor:
502  *      - 1 IMC
503  *      - 3 DDR3 channels, 2 DPC per channel
504  * EP processor:
505  *      - 1 or 2 IMC
506  *      - 4 DDR4 channels, 3 DPC per channel
507  * EP 4S processor:
508  *      - 2 IMC
509  *      - 4 DDR4 channels, 3 DPC per channel
510  * EX processor:
511  *      - 2 IMC
512  *      - each IMC interfaces with a SMI 2 channel
513  *      - each SMI channel interfaces with a scalable memory buffer
514  *      - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
515  */
516 #define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
517 #define HASWELL_HASYSDEFEATURE2 0x84
518 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
519 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0     0x2fa0
520 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1     0x2f60
521 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA  0x2fa8
522 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM  0x2f71
523 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA  0x2f68
524 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM  0x2f79
525 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
526 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
527 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
528 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
529 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
530 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
531 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
532 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
533 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
534 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
535 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
536 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
537 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
538 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
539 static const struct pci_id_descr pci_dev_descr_haswell[] = {
540         /* first item must be the HA */
541         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0,      0, IMC0) },
542         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1,      1, IMC1) },
543
544         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA,   0, IMC0) },
545         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM,   0, IMC0) },
546         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
547         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
548         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
549         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
550
551         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA,   1, IMC1) },
552         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM,   1, IMC1) },
553         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
554         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
555         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
556         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
557
558         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
559         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
560         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0,   1, SOCK) },
561         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1,   1, SOCK) },
562         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2,   1, SOCK) },
563         { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3,   1, SOCK) },
564 };
565
566 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
567         PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
568         {0,}                    /* 0 terminated list. */
569 };
570
571 /* Knight's Landing Support */
572 /*
573  * KNL's memory channels are swizzled between memory controllers.
574  * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
575  */
576 #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
577
578 /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
579 #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
580 /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
581 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN     0x7843
582 /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
583 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
584 /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
585 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0     0x782a
586 /* SAD target - 1-29-1 (1 of these) */
587 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1     0x782b
588 /* Caching / Home Agent */
589 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA      0x782c
590 /* Device with TOLM and TOHM, 0-5-0 (1 of these) */
591 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM    0x7810
592
593 /*
594  * KNL differs from SB, IB, and Haswell in that it has multiple
595  * instances of the same device with the same device ID, so we handle that
596  * by creating as many copies in the table as we expect to find.
597  * (Like device ID must be grouped together.)
598  */
599
600 static const struct pci_id_descr pci_dev_descr_knl[] = {
601         [0 ... 1]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC,    0, IMC0)},
602         [2 ... 7]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN,  0, IMC0) },
603         [8]         = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA,    0, IMC0) },
604         [9]         = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
605         [10]        = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0,  0, SOCK) },
606         [11]        = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1,  0, SOCK) },
607         [12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA,   0, SOCK) },
608 };
609
610 static const struct pci_id_table pci_dev_descr_knl_table[] = {
611         PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
612         {0,}
613 };
614
615 /*
616  * Broadwell support
617  *
618  * DE processor:
619  *      - 1 IMC
620  *      - 2 DDR3 channels, 2 DPC per channel
621  * EP processor:
622  *      - 1 or 2 IMC
623  *      - 4 DDR4 channels, 3 DPC per channel
624  * EP 4S processor:
625  *      - 2 IMC
626  *      - 4 DDR4 channels, 3 DPC per channel
627  * EX processor:
628  *      - 2 IMC
629  *      - each IMC interfaces with a SMI 2 channel
630  *      - each SMI channel interfaces with a scalable memory buffer
631  *      - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
632  */
633 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
634 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0   0x6fa0
635 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1   0x6f60
636 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA        0x6fa8
637 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM        0x6f71
638 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA        0x6f68
639 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM        0x6f79
640 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
641 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
642 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
643 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
644 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
645 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
646 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
647 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
648 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
649 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
650 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
651
652 static const struct pci_id_descr pci_dev_descr_broadwell[] = {
653         /* first item must be the HA */
654         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0,      0, IMC0) },
655         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1,      1, IMC1) },
656
657         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA,   0, IMC0) },
658         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM,   0, IMC0) },
659         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
660         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
661         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
662         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
663
664         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA,   1, IMC1) },
665         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM,   1, IMC1) },
666         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
667         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
668         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
669         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
670
671         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
672         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
673         { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0,   1, SOCK) },
674 };
675
676 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
677         PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
678         {0,}                    /* 0 terminated list. */
679 };
680
681
682 /****************************************************************************
683                         Ancillary status routines
684  ****************************************************************************/
685
686 static inline int numrank(enum type type, u32 mtr)
687 {
688         int ranks = (1 << RANK_CNT_BITS(mtr));
689         int max = 4;
690
691         if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
692                 max = 8;
693
694         if (ranks > max) {
695                 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
696                          ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
697                 return -EINVAL;
698         }
699
700         return ranks;
701 }
702
703 static inline int numrow(u32 mtr)
704 {
705         int rows = (RANK_WIDTH_BITS(mtr) + 12);
706
707         if (rows < 13 || rows > 18) {
708                 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
709                          rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
710                 return -EINVAL;
711         }
712
713         return 1 << rows;
714 }
715
716 static inline int numcol(u32 mtr)
717 {
718         int cols = (COL_WIDTH_BITS(mtr) + 10);
719
720         if (cols > 12) {
721                 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
722                          cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
723                 return -EINVAL;
724         }
725
726         return 1 << cols;
727 }
728
729 static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
730                                            struct sbridge_dev *prev)
731 {
732         struct sbridge_dev *sbridge_dev;
733
734         /*
735          * If we have devices scattered across several busses that pertain
736          * to the same memory controller, we'll lump them all together.
737          */
738         if (multi_bus) {
739                 return list_first_entry_or_null(&sbridge_edac_list,
740                                 struct sbridge_dev, list);
741         }
742
743         sbridge_dev = list_entry(prev ? prev->list.next
744                                       : sbridge_edac_list.next, struct sbridge_dev, list);
745
746         list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
747                 if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
748                         return sbridge_dev;
749         }
750
751         return NULL;
752 }
753
754 static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
755                                              const struct pci_id_table *table)
756 {
757         struct sbridge_dev *sbridge_dev;
758
759         sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
760         if (!sbridge_dev)
761                 return NULL;
762
763         sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
764                                     sizeof(*sbridge_dev->pdev),
765                                     GFP_KERNEL);
766         if (!sbridge_dev->pdev) {
767                 kfree(sbridge_dev);
768                 return NULL;
769         }
770
771         sbridge_dev->bus = bus;
772         sbridge_dev->dom = dom;
773         sbridge_dev->n_devs = table->n_devs_per_imc;
774         list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
775
776         return sbridge_dev;
777 }
778
779 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
780 {
781         list_del(&sbridge_dev->list);
782         kfree(sbridge_dev->pdev);
783         kfree(sbridge_dev);
784 }
785
786 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
787 {
788         u32 reg;
789
790         /* Address range is 32:28 */
791         pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
792         return GET_TOLM(reg);
793 }
794
795 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
796 {
797         u32 reg;
798
799         pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
800         return GET_TOHM(reg);
801 }
802
803 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
804 {
805         u32 reg;
806
807         pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
808
809         return GET_TOLM(reg);
810 }
811
812 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
813 {
814         u32 reg;
815
816         pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
817
818         return GET_TOHM(reg);
819 }
820
821 static u64 rir_limit(u32 reg)
822 {
823         return ((u64)GET_BITFIELD(reg,  1, 10) << 29) | 0x1fffffff;
824 }
825
826 static u64 sad_limit(u32 reg)
827 {
828         return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
829 }
830
831 static u32 interleave_mode(u32 reg)
832 {
833         return GET_BITFIELD(reg, 1, 1);
834 }
835
836 static u32 dram_attr(u32 reg)
837 {
838         return GET_BITFIELD(reg, 2, 3);
839 }
840
841 static u64 knl_sad_limit(u32 reg)
842 {
843         return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
844 }
845
846 static u32 knl_interleave_mode(u32 reg)
847 {
848         return GET_BITFIELD(reg, 1, 2);
849 }
850
851 static const char * const knl_intlv_mode[] = {
852         "[8:6]", "[10:8]", "[14:12]", "[32:30]"
853 };
854
855 static const char *get_intlv_mode_str(u32 reg, enum type t)
856 {
857         if (t == KNIGHTS_LANDING)
858                 return knl_intlv_mode[knl_interleave_mode(reg)];
859         else
860                 return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
861 }
862
863 static u32 dram_attr_knl(u32 reg)
864 {
865         return GET_BITFIELD(reg, 3, 4);
866 }
867
868
869 static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
870 {
871         u32 reg;
872         enum mem_type mtype;
873
874         if (pvt->pci_ddrio) {
875                 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
876                                       &reg);
877                 if (GET_BITFIELD(reg, 11, 11))
878                         /* FIXME: Can also be LRDIMM */
879                         mtype = MEM_RDDR3;
880                 else
881                         mtype = MEM_DDR3;
882         } else
883                 mtype = MEM_UNKNOWN;
884
885         return mtype;
886 }
887
888 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
889 {
890         u32 reg;
891         bool registered = false;
892         enum mem_type mtype = MEM_UNKNOWN;
893
894         if (!pvt->pci_ddrio)
895                 goto out;
896
897         pci_read_config_dword(pvt->pci_ddrio,
898                               HASWELL_DDRCRCLKCONTROLS, &reg);
899         /* Is_Rdimm */
900         if (GET_BITFIELD(reg, 16, 16))
901                 registered = true;
902
903         pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
904         if (GET_BITFIELD(reg, 14, 14)) {
905                 if (registered)
906                         mtype = MEM_RDDR4;
907                 else
908                         mtype = MEM_DDR4;
909         } else {
910                 if (registered)
911                         mtype = MEM_RDDR3;
912                 else
913                         mtype = MEM_DDR3;
914         }
915
916 out:
917         return mtype;
918 }
919
920 static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
921 {
922         /* for KNL value is fixed */
923         return DEV_X16;
924 }
925
926 static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
927 {
928         /* there's no way to figure out */
929         return DEV_UNKNOWN;
930 }
931
932 static enum dev_type __ibridge_get_width(u32 mtr)
933 {
934         enum dev_type type;
935
936         switch (mtr) {
937         case 3:
938                 type = DEV_UNKNOWN;
939                 break;
940         case 2:
941                 type = DEV_X16;
942                 break;
943         case 1:
944                 type = DEV_X8;
945                 break;
946         case 0:
947                 type = DEV_X4;
948                 break;
949         }
950
951         return type;
952 }
953
954 static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
955 {
956         /*
957          * ddr3_width on the documentation but also valid for DDR4 on
958          * Haswell
959          */
960         return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
961 }
962
963 static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
964 {
965         /* ddr3_width on the documentation but also valid for DDR4 */
966         return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
967 }
968
969 static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
970 {
971         /* DDR4 RDIMMS and LRDIMMS are supported */
972         return MEM_RDDR4;
973 }
974
975 static u8 get_node_id(struct sbridge_pvt *pvt)
976 {
977         u32 reg;
978         pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
979         return GET_BITFIELD(reg, 0, 2);
980 }
981
982 static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
983 {
984         u32 reg;
985
986         pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
987         return GET_BITFIELD(reg, 0, 3);
988 }
989
990 static u8 knl_get_node_id(struct sbridge_pvt *pvt)
991 {
992         u32 reg;
993
994         pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
995         return GET_BITFIELD(reg, 0, 2);
996 }
997
998
999 static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1000 {
1001         u32 reg;
1002
1003         pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
1004         return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1005 }
1006
1007 static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1008 {
1009         u64 rc;
1010         u32 reg;
1011
1012         pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
1013         rc = GET_BITFIELD(reg, 26, 31);
1014         pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
1015         rc = ((reg << 6) | rc) << 26;
1016
1017         return rc | 0x1ffffff;
1018 }
1019
1020 static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1021 {
1022         u32 reg;
1023
1024         pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, &reg);
1025         return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1026 }
1027
1028 static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1029 {
1030         u64 rc;
1031         u32 reg_lo, reg_hi;
1032
1033         pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, &reg_lo);
1034         pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, &reg_hi);
1035         rc = ((u64)reg_hi << 32) | reg_lo;
1036         return rc | 0x3ffffff;
1037 }
1038
1039
1040 static u64 haswell_rir_limit(u32 reg)
1041 {
1042         return (((u64)GET_BITFIELD(reg,  1, 11) + 1) << 29) - 1;
1043 }
1044
1045 static inline u8 sad_pkg_socket(u8 pkg)
1046 {
1047         /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
1048         return ((pkg >> 3) << 2) | (pkg & 0x3);
1049 }
1050
1051 static inline u8 sad_pkg_ha(u8 pkg)
1052 {
1053         return (pkg >> 2) & 0x1;
1054 }
1055
1056 static int haswell_chan_hash(int idx, u64 addr)
1057 {
1058         int i;
1059
1060         /*
1061          * XOR even bits from 12:26 to bit0 of idx,
1062          *     odd bits from 13:27 to bit1
1063          */
1064         for (i = 12; i < 28; i += 2)
1065                 idx ^= (addr >> i) & 3;
1066
1067         return idx;
1068 }
1069
1070 /* Low bits of TAD limit, and some metadata. */
1071 static const u32 knl_tad_dram_limit_lo[] = {
1072         0x400, 0x500, 0x600, 0x700,
1073         0x800, 0x900, 0xa00, 0xb00,
1074 };
1075
1076 /* Low bits of TAD offset. */
1077 static const u32 knl_tad_dram_offset_lo[] = {
1078         0x404, 0x504, 0x604, 0x704,
1079         0x804, 0x904, 0xa04, 0xb04,
1080 };
1081
1082 /* High 16 bits of TAD limit and offset. */
1083 static const u32 knl_tad_dram_hi[] = {
1084         0x408, 0x508, 0x608, 0x708,
1085         0x808, 0x908, 0xa08, 0xb08,
1086 };
1087
1088 /* Number of ways a tad entry is interleaved. */
1089 static const u32 knl_tad_ways[] = {
1090         8, 6, 4, 3, 2, 1,
1091 };
1092
1093 /*
1094  * Retrieve the n'th Target Address Decode table entry
1095  * from the memory controller's TAD table.
1096  *
1097  * @pvt:        driver private data
1098  * @entry:      which entry you want to retrieve
1099  * @mc:         which memory controller (0 or 1)
1100  * @offset:     output tad range offset
1101  * @limit:      output address of first byte above tad range
1102  * @ways:       output number of interleave ways
1103  *
1104  * The offset value has curious semantics.  It's a sort of running total
1105  * of the sizes of all the memory regions that aren't mapped in this
1106  * tad table.
1107  */
1108 static int knl_get_tad(const struct sbridge_pvt *pvt,
1109                 const int entry,
1110                 const int mc,
1111                 u64 *offset,
1112                 u64 *limit,
1113                 int *ways)
1114 {
1115         u32 reg_limit_lo, reg_offset_lo, reg_hi;
1116         struct pci_dev *pci_mc;
1117         int way_id;
1118
1119         switch (mc) {
1120         case 0:
1121                 pci_mc = pvt->knl.pci_mc0;
1122                 break;
1123         case 1:
1124                 pci_mc = pvt->knl.pci_mc1;
1125                 break;
1126         default:
1127                 WARN_ON(1);
1128                 return -EINVAL;
1129         }
1130
1131         pci_read_config_dword(pci_mc,
1132                         knl_tad_dram_limit_lo[entry], &reg_limit_lo);
1133         pci_read_config_dword(pci_mc,
1134                         knl_tad_dram_offset_lo[entry], &reg_offset_lo);
1135         pci_read_config_dword(pci_mc,
1136                         knl_tad_dram_hi[entry], &reg_hi);
1137
1138         /* Is this TAD entry enabled? */
1139         if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1140                 return -ENODEV;
1141
1142         way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1143
1144         if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1145                 *ways = knl_tad_ways[way_id];
1146         } else {
1147                 *ways = 0;
1148                 sbridge_printk(KERN_ERR,
1149                                 "Unexpected value %d in mc_tad_limit_lo wayness field\n",
1150                                 way_id);
1151                 return -ENODEV;
1152         }
1153
1154         /*
1155          * The least significant 6 bits of base and limit are truncated.
1156          * For limit, we fill the missing bits with 1s.
1157          */
1158         *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1159                                 ((u64) GET_BITFIELD(reg_hi, 0,  15) << 32);
1160         *limit = ((u64) GET_BITFIELD(reg_limit_lo,  6, 31) << 6) | 63 |
1161                                 ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1162
1163         return 0;
1164 }
1165
1166 /* Determine which memory controller is responsible for a given channel. */
1167 static int knl_channel_mc(int channel)
1168 {
1169         WARN_ON(channel < 0 || channel >= 6);
1170
1171         return channel < 3 ? 1 : 0;
1172 }
1173
1174 /*
1175  * Get the Nth entry from EDC_ROUTE_TABLE register.
1176  * (This is the per-tile mapping of logical interleave targets to
1177  *  physical EDC modules.)
1178  *
1179  * entry 0: 0:2
1180  *       1: 3:5
1181  *       2: 6:8
1182  *       3: 9:11
1183  *       4: 12:14
1184  *       5: 15:17
1185  *       6: 18:20
1186  *       7: 21:23
1187  * reserved: 24:31
1188  */
1189 static u32 knl_get_edc_route(int entry, u32 reg)
1190 {
1191         WARN_ON(entry >= KNL_MAX_EDCS);
1192         return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1193 }
1194
1195 /*
1196  * Get the Nth entry from MC_ROUTE_TABLE register.
1197  * (This is the per-tile mapping of logical interleave targets to
1198  *  physical DRAM channels modules.)
1199  *
1200  * entry 0: mc 0:2   channel 18:19
1201  *       1: mc 3:5   channel 20:21
1202  *       2: mc 6:8   channel 22:23
1203  *       3: mc 9:11  channel 24:25
1204  *       4: mc 12:14 channel 26:27
1205  *       5: mc 15:17 channel 28:29
1206  * reserved: 30:31
1207  *
1208  * Though we have 3 bits to identify the MC, we should only see
1209  * the values 0 or 1.
1210  */
1211
1212 static u32 knl_get_mc_route(int entry, u32 reg)
1213 {
1214         int mc, chan;
1215
1216         WARN_ON(entry >= KNL_MAX_CHANNELS);
1217
1218         mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1219         chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1220
1221         return knl_channel_remap(mc, chan);
1222 }
1223
1224 /*
1225  * Render the EDC_ROUTE register in human-readable form.
1226  * Output string s should be at least KNL_MAX_EDCS*2 bytes.
1227  */
1228 static void knl_show_edc_route(u32 reg, char *s)
1229 {
1230         int i;
1231
1232         for (i = 0; i < KNL_MAX_EDCS; i++) {
1233                 s[i*2] = knl_get_edc_route(i, reg) + '0';
1234                 s[i*2+1] = '-';
1235         }
1236
1237         s[KNL_MAX_EDCS*2 - 1] = '\0';
1238 }
1239
1240 /*
1241  * Render the MC_ROUTE register in human-readable form.
1242  * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
1243  */
1244 static void knl_show_mc_route(u32 reg, char *s)
1245 {
1246         int i;
1247
1248         for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1249                 s[i*2] = knl_get_mc_route(i, reg) + '0';
1250                 s[i*2+1] = '-';
1251         }
1252
1253         s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1254 }
1255
1256 #define KNL_EDC_ROUTE 0xb8
1257 #define KNL_MC_ROUTE 0xb4
1258
1259 /* Is this dram rule backed by regular DRAM in flat mode? */
1260 #define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1261
1262 /* Is this dram rule cached? */
1263 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1264
1265 /* Is this rule backed by edc ? */
1266 #define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1267
1268 /* Is this rule backed by DRAM, cacheable in EDRAM? */
1269 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1270
1271 /* Is this rule mod3? */
1272 #define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1273
1274 /*
1275  * Figure out how big our RAM modules are.
1276  *
1277  * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
1278  * have to figure this out from the SAD rules, interleave lists, route tables,
1279  * and TAD rules.
1280  *
1281  * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
1282  * inspect the TAD rules to figure out how large the SAD regions really are.
1283  *
1284  * When we know the real size of a SAD region and how many ways it's
1285  * interleaved, we know the individual contribution of each channel to
1286  * TAD is size/ways.
1287  *
1288  * Finally, we have to check whether each channel participates in each SAD
1289  * region.
1290  *
1291  * Fortunately, KNL only supports one DIMM per channel, so once we know how
1292  * much memory the channel uses, we know the DIMM is at least that large.
1293  * (The BIOS might possibly choose not to map all available memory, in which
1294  * case we will underreport the size of the DIMM.)
1295  *
1296  * In theory, we could try to determine the EDC sizes as well, but that would
1297  * only work in flat mode, not in cache mode.
1298  *
1299  * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
1300  *            elements)
1301  */
1302 static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1303 {
1304         u64 sad_base, sad_size, sad_limit = 0;
1305         u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1306         int sad_rule = 0;
1307         int tad_rule = 0;
1308         int intrlv_ways, tad_ways;
1309         u32 first_pkg, pkg;
1310         int i;
1311         u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
1312         u32 dram_rule, interleave_reg;
1313         u32 mc_route_reg[KNL_MAX_CHAS];
1314         u32 edc_route_reg[KNL_MAX_CHAS];
1315         int edram_only;
1316         char edc_route_string[KNL_MAX_EDCS*2];
1317         char mc_route_string[KNL_MAX_CHANNELS*2];
1318         int cur_reg_start;
1319         int mc;
1320         int channel;
1321         int way;
1322         int participants[KNL_MAX_CHANNELS];
1323         int participant_count = 0;
1324
1325         for (i = 0; i < KNL_MAX_CHANNELS; i++)
1326                 mc_sizes[i] = 0;
1327
1328         /* Read the EDC route table in each CHA. */
1329         cur_reg_start = 0;
1330         for (i = 0; i < KNL_MAX_CHAS; i++) {
1331                 pci_read_config_dword(pvt->knl.pci_cha[i],
1332                                 KNL_EDC_ROUTE, &edc_route_reg[i]);
1333
1334                 if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1335                         knl_show_edc_route(edc_route_reg[i-1],
1336                                         edc_route_string);
1337                         if (cur_reg_start == i-1)
1338                                 edac_dbg(0, "edc route table for CHA %d: %s\n",
1339                                         cur_reg_start, edc_route_string);
1340                         else
1341                                 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1342                                         cur_reg_start, i-1, edc_route_string);
1343                         cur_reg_start = i;
1344                 }
1345         }
1346         knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1347         if (cur_reg_start == i-1)
1348                 edac_dbg(0, "edc route table for CHA %d: %s\n",
1349                         cur_reg_start, edc_route_string);
1350         else
1351                 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1352                         cur_reg_start, i-1, edc_route_string);
1353
1354         /* Read the MC route table in each CHA. */
1355         cur_reg_start = 0;
1356         for (i = 0; i < KNL_MAX_CHAS; i++) {
1357                 pci_read_config_dword(pvt->knl.pci_cha[i],
1358                         KNL_MC_ROUTE, &mc_route_reg[i]);
1359
1360                 if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1361                         knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1362                         if (cur_reg_start == i-1)
1363                                 edac_dbg(0, "mc route table for CHA %d: %s\n",
1364                                         cur_reg_start, mc_route_string);
1365                         else
1366                                 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1367                                         cur_reg_start, i-1, mc_route_string);
1368                         cur_reg_start = i;
1369                 }
1370         }
1371         knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1372         if (cur_reg_start == i-1)
1373                 edac_dbg(0, "mc route table for CHA %d: %s\n",
1374                         cur_reg_start, mc_route_string);
1375         else
1376                 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1377                         cur_reg_start, i-1, mc_route_string);
1378
1379         /* Process DRAM rules */
1380         for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1381                 /* previous limit becomes the new base */
1382                 sad_base = sad_limit;
1383
1384                 pci_read_config_dword(pvt->pci_sad0,
1385                         pvt->info.dram_rule[sad_rule], &dram_rule);
1386
1387                 if (!DRAM_RULE_ENABLE(dram_rule))
1388                         break;
1389
1390                 edram_only = KNL_EDRAM_ONLY(dram_rule);
1391
1392                 sad_limit = pvt->info.sad_limit(dram_rule)+1;
1393                 sad_size = sad_limit - sad_base;
1394
1395                 pci_read_config_dword(pvt->pci_sad0,
1396                         pvt->info.interleave_list[sad_rule], &interleave_reg);
1397
1398                 /*
1399                  * Find out how many ways this dram rule is interleaved.
1400                  * We stop when we see the first channel again.
1401                  */
1402                 first_pkg = sad_pkg(pvt->info.interleave_pkg,
1403                                                 interleave_reg, 0);
1404                 for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1405                         pkg = sad_pkg(pvt->info.interleave_pkg,
1406                                                 interleave_reg, intrlv_ways);
1407
1408                         if ((pkg & 0x8) == 0) {
1409                                 /*
1410                                  * 0 bit means memory is non-local,
1411                                  * which KNL doesn't support
1412                                  */
1413                                 edac_dbg(0, "Unexpected interleave target %d\n",
1414                                         pkg);
1415                                 return -1;
1416                         }
1417
1418                         if (pkg == first_pkg)
1419                                 break;
1420                 }
1421                 if (KNL_MOD3(dram_rule))
1422                         intrlv_ways *= 3;
1423
1424                 edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1425                         sad_rule,
1426                         sad_base,
1427                         sad_limit,
1428                         intrlv_ways,
1429                         edram_only ? ", EDRAM" : "");
1430
1431                 /*
1432                  * Find out how big the SAD region really is by iterating
1433                  * over TAD tables (SAD regions may contain holes).
1434                  * Each memory controller might have a different TAD table, so
1435                  * we have to look at both.
1436                  *
1437                  * Livespace is the memory that's mapped in this TAD table,
1438                  * deadspace is the holes (this could be the MMIO hole, or it
1439                  * could be memory that's mapped by the other TAD table but
1440                  * not this one).
1441                  */
1442                 for (mc = 0; mc < 2; mc++) {
1443                         sad_actual_size[mc] = 0;
1444                         tad_livespace = 0;
1445                         for (tad_rule = 0;
1446                                         tad_rule < ARRAY_SIZE(
1447                                                 knl_tad_dram_limit_lo);
1448                                         tad_rule++) {
1449                                 if (knl_get_tad(pvt,
1450                                                 tad_rule,
1451                                                 mc,
1452                                                 &tad_deadspace,
1453                                                 &tad_limit,
1454                                                 &tad_ways))
1455                                         break;
1456
1457                                 tad_size = (tad_limit+1) -
1458                                         (tad_livespace + tad_deadspace);
1459                                 tad_livespace += tad_size;
1460                                 tad_base = (tad_limit+1) - tad_size;
1461
1462                                 if (tad_base < sad_base) {
1463                                         if (tad_limit > sad_base)
1464                                                 edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1465                                 } else if (tad_base < sad_limit) {
1466                                         if (tad_limit+1 > sad_limit) {
1467                                                 edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1468                                         } else {
1469                                                 /* TAD region is completely inside SAD region */
1470                                                 edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1471                                                         tad_rule, tad_base,
1472                                                         tad_limit, tad_size,
1473                                                         mc);
1474                                                 sad_actual_size[mc] += tad_size;
1475                                         }
1476                                 }
1477                                 tad_base = tad_limit+1;
1478                         }
1479                 }
1480
1481                 for (mc = 0; mc < 2; mc++) {
1482                         edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1483                                 mc, sad_actual_size[mc], sad_actual_size[mc]);
1484                 }
1485
1486                 /* Ignore EDRAM rule */
1487                 if (edram_only)
1488                         continue;
1489
1490                 /* Figure out which channels participate in interleave. */
1491                 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1492                         participants[channel] = 0;
1493
1494                 /* For each channel, does at least one CHA have
1495                  * this channel mapped to the given target?
1496                  */
1497                 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1498                         for (way = 0; way < intrlv_ways; way++) {
1499                                 int target;
1500                                 int cha;
1501
1502                                 if (KNL_MOD3(dram_rule))
1503                                         target = way;
1504                                 else
1505                                         target = 0x7 & sad_pkg(
1506                                 pvt->info.interleave_pkg, interleave_reg, way);
1507
1508                                 for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1509                                         if (knl_get_mc_route(target,
1510                                                 mc_route_reg[cha]) == channel
1511                                                 && !participants[channel]) {
1512                                                 participant_count++;
1513                                                 participants[channel] = 1;
1514                                                 break;
1515                                         }
1516                                 }
1517                         }
1518                 }
1519
1520                 if (participant_count != intrlv_ways)
1521                         edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n",
1522                                 participant_count, intrlv_ways);
1523
1524                 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1525                         mc = knl_channel_mc(channel);
1526                         if (participants[channel]) {
1527                                 edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1528                                         channel,
1529                                         sad_actual_size[mc]/intrlv_ways,
1530                                         sad_rule);
1531                                 mc_sizes[channel] +=
1532                                         sad_actual_size[mc]/intrlv_ways;
1533                         }
1534                 }
1535         }
1536
1537         return 0;
1538 }
1539
1540 static void get_source_id(struct mem_ctl_info *mci)
1541 {
1542         struct sbridge_pvt *pvt = mci->pvt_info;
1543         u32 reg;
1544
1545         if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1546             pvt->info.type == KNIGHTS_LANDING)
1547                 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1548         else
1549                 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
1550
1551         if (pvt->info.type == KNIGHTS_LANDING)
1552                 pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1553         else
1554                 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1555 }
1556
1557 static int __populate_dimms(struct mem_ctl_info *mci,
1558                             u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1559                             enum edac_type mode)
1560 {
1561         struct sbridge_pvt *pvt = mci->pvt_info;
1562         int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1563                                                          : NUM_CHANNELS;
1564         unsigned int i, j, banks, ranks, rows, cols, npages;
1565         struct dimm_info *dimm;
1566         enum mem_type mtype;
1567         u64 size;
1568
1569         mtype = pvt->info.get_memory_type(pvt);
1570         if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1571                 edac_dbg(0, "Memory is registered\n");
1572         else if (mtype == MEM_UNKNOWN)
1573                 edac_dbg(0, "Cannot determine memory type\n");
1574         else
1575                 edac_dbg(0, "Memory is unregistered\n");
1576
1577         if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1578                 banks = 16;
1579         else
1580                 banks = 8;
1581
1582         for (i = 0; i < channels; i++) {
1583                 u32 mtr;
1584
1585                 int max_dimms_per_channel;
1586
1587                 if (pvt->info.type == KNIGHTS_LANDING) {
1588                         max_dimms_per_channel = 1;
1589                         if (!pvt->knl.pci_channel[i])
1590                                 continue;
1591                 } else {
1592                         max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1593                         if (!pvt->pci_tad[i])
1594                                 continue;
1595                 }
1596
1597                 for (j = 0; j < max_dimms_per_channel; j++) {
1598                         dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1599                         if (pvt->info.type == KNIGHTS_LANDING) {
1600                                 pci_read_config_dword(pvt->knl.pci_channel[i],
1601                                         knl_mtr_reg, &mtr);
1602                         } else {
1603                                 pci_read_config_dword(pvt->pci_tad[i],
1604                                         mtr_regs[j], &mtr);
1605                         }
1606                         edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
1607                         if (IS_DIMM_PRESENT(mtr)) {
1608                                 if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1609                                         sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1610                                                        pvt->sbridge_dev->source_id,
1611                                                        pvt->sbridge_dev->dom, i);
1612                                         return -ENODEV;
1613                                 }
1614                                 pvt->channel[i].dimms++;
1615
1616                                 ranks = numrank(pvt->info.type, mtr);
1617
1618                                 if (pvt->info.type == KNIGHTS_LANDING) {
1619                                         /* For DDR4, this is fixed. */
1620                                         cols = 1 << 10;
1621                                         rows = knl_mc_sizes[i] /
1622                                                 ((u64) cols * ranks * banks * 8);
1623                                 } else {
1624                                         rows = numrow(mtr);
1625                                         cols = numcol(mtr);
1626                                 }
1627
1628                                 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1629                                 npages = MiB_TO_PAGES(size);
1630
1631                                 edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1632                                          pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1633                                          size, npages,
1634                                          banks, ranks, rows, cols);
1635
1636                                 dimm->nr_pages = npages;
1637                                 dimm->grain = 32;
1638                                 dimm->dtype = pvt->info.get_width(pvt, mtr);
1639                                 dimm->mtype = mtype;
1640                                 dimm->edac_mode = mode;
1641                                 snprintf(dimm->label, sizeof(dimm->label),
1642                                                  "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1643                                                  pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1644                         }
1645                 }
1646         }
1647
1648         return 0;
1649 }
1650
1651 static int get_dimm_config(struct mem_ctl_info *mci)
1652 {
1653         struct sbridge_pvt *pvt = mci->pvt_info;
1654         u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1655         enum edac_type mode;
1656         u32 reg;
1657
1658         pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1659         edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1660                  pvt->sbridge_dev->mc,
1661                  pvt->sbridge_dev->node_id,
1662                  pvt->sbridge_dev->source_id);
1663
1664         /* KNL doesn't support mirroring or lockstep,
1665          * and is always closed page
1666          */
1667         if (pvt->info.type == KNIGHTS_LANDING) {
1668                 mode = EDAC_S4ECD4ED;
1669                 pvt->mirror_mode = NON_MIRRORING;
1670                 pvt->is_cur_addr_mirrored = false;
1671
1672                 if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1673                         return -1;
1674                 if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1675                         edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1676                         return -ENODEV;
1677                 }
1678         } else {
1679                 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1680                         if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
1681                                 edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1682                                 return -ENODEV;
1683                         }
1684                         pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1685                         if (GET_BITFIELD(reg, 28, 28)) {
1686                                 pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1687                                 edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1688                                 goto next;
1689                         }
1690                 }
1691                 if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
1692                         edac_dbg(0, "Failed to read RASENABLES register\n");
1693                         return -ENODEV;
1694                 }
1695                 if (IS_MIRROR_ENABLED(reg)) {
1696                         pvt->mirror_mode = FULL_MIRRORING;
1697                         edac_dbg(0, "Full memory mirroring is enabled\n");
1698                 } else {
1699                         pvt->mirror_mode = NON_MIRRORING;
1700                         edac_dbg(0, "Memory mirroring is disabled\n");
1701                 }
1702
1703 next:
1704                 if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1705                         edac_dbg(0, "Failed to read MCMTR register\n");
1706                         return -ENODEV;
1707                 }
1708                 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1709                         edac_dbg(0, "Lockstep is enabled\n");
1710                         mode = EDAC_S8ECD8ED;
1711                         pvt->is_lockstep = true;
1712                 } else {
1713                         edac_dbg(0, "Lockstep is disabled\n");
1714                         mode = EDAC_S4ECD4ED;
1715                         pvt->is_lockstep = false;
1716                 }
1717                 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1718                         edac_dbg(0, "address map is on closed page mode\n");
1719                         pvt->is_close_pg = true;
1720                 } else {
1721                         edac_dbg(0, "address map is on open page mode\n");
1722                         pvt->is_close_pg = false;
1723                 }
1724         }
1725
1726         return __populate_dimms(mci, knl_mc_sizes, mode);
1727 }
1728
1729 static void get_memory_layout(const struct mem_ctl_info *mci)
1730 {
1731         struct sbridge_pvt *pvt = mci->pvt_info;
1732         int i, j, k, n_sads, n_tads, sad_interl;
1733         u32 reg;
1734         u64 limit, prv = 0;
1735         u64 tmp_mb;
1736         u32 gb, mb;
1737         u32 rir_way;
1738
1739         /*
1740          * Step 1) Get TOLM/TOHM ranges
1741          */
1742
1743         pvt->tolm = pvt->info.get_tolm(pvt);
1744         tmp_mb = (1 + pvt->tolm) >> 20;
1745
1746         gb = div_u64_rem(tmp_mb, 1024, &mb);
1747         edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1748                 gb, (mb*1000)/1024, (u64)pvt->tolm);
1749
1750         /* Address range is already 45:25 */
1751         pvt->tohm = pvt->info.get_tohm(pvt);
1752         tmp_mb = (1 + pvt->tohm) >> 20;
1753
1754         gb = div_u64_rem(tmp_mb, 1024, &mb);
1755         edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1756                 gb, (mb*1000)/1024, (u64)pvt->tohm);
1757
1758         /*
1759          * Step 2) Get SAD range and SAD Interleave list
1760          * TAD registers contain the interleave wayness. However, it
1761          * seems simpler to just discover it indirectly, with the
1762          * algorithm bellow.
1763          */
1764         prv = 0;
1765         for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1766                 /* SAD_LIMIT Address range is 45:26 */
1767                 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1768                                       &reg);
1769                 limit = pvt->info.sad_limit(reg);
1770
1771                 if (!DRAM_RULE_ENABLE(reg))
1772                         continue;
1773
1774                 if (limit <= prv)
1775                         break;
1776
1777                 tmp_mb = (limit + 1) >> 20;
1778                 gb = div_u64_rem(tmp_mb, 1024, &mb);
1779                 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1780                          n_sads,
1781                          show_dram_attr(pvt->info.dram_attr(reg)),
1782                          gb, (mb*1000)/1024,
1783                          ((u64)tmp_mb) << 20L,
1784                          get_intlv_mode_str(reg, pvt->info.type),
1785                          reg);
1786                 prv = limit;
1787
1788                 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1789                                       &reg);
1790                 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1791                 for (j = 0; j < 8; j++) {
1792                         u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1793                         if (j > 0 && sad_interl == pkg)
1794                                 break;
1795
1796                         edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1797                                  n_sads, j, pkg);
1798                 }
1799         }
1800
1801         if (pvt->info.type == KNIGHTS_LANDING)
1802                 return;
1803
1804         /*
1805          * Step 3) Get TAD range
1806          */
1807         prv = 0;
1808         for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1809                 pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
1810                 limit = TAD_LIMIT(reg);
1811                 if (limit <= prv)
1812                         break;
1813                 tmp_mb = (limit + 1) >> 20;
1814
1815                 gb = div_u64_rem(tmp_mb, 1024, &mb);
1816                 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1817                          n_tads, gb, (mb*1000)/1024,
1818                          ((u64)tmp_mb) << 20L,
1819                          (u32)(1 << TAD_SOCK(reg)),
1820                          (u32)TAD_CH(reg) + 1,
1821                          (u32)TAD_TGT0(reg),
1822                          (u32)TAD_TGT1(reg),
1823                          (u32)TAD_TGT2(reg),
1824                          (u32)TAD_TGT3(reg),
1825                          reg);
1826                 prv = limit;
1827         }
1828
1829         /*
1830          * Step 4) Get TAD offsets, per each channel
1831          */
1832         for (i = 0; i < NUM_CHANNELS; i++) {
1833                 if (!pvt->channel[i].dimms)
1834                         continue;
1835                 for (j = 0; j < n_tads; j++) {
1836                         pci_read_config_dword(pvt->pci_tad[i],
1837                                               tad_ch_nilv_offset[j],
1838                                               &reg);
1839                         tmp_mb = TAD_OFFSET(reg) >> 20;
1840                         gb = div_u64_rem(tmp_mb, 1024, &mb);
1841                         edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1842                                  i, j,
1843                                  gb, (mb*1000)/1024,
1844                                  ((u64)tmp_mb) << 20L,
1845                                  reg);
1846                 }
1847         }
1848
1849         /*
1850          * Step 6) Get RIR Wayness/Limit, per each channel
1851          */
1852         for (i = 0; i < NUM_CHANNELS; i++) {
1853                 if (!pvt->channel[i].dimms)
1854                         continue;
1855                 for (j = 0; j < MAX_RIR_RANGES; j++) {
1856                         pci_read_config_dword(pvt->pci_tad[i],
1857                                               rir_way_limit[j],
1858                                               &reg);
1859
1860                         if (!IS_RIR_VALID(reg))
1861                                 continue;
1862
1863                         tmp_mb = pvt->info.rir_limit(reg) >> 20;
1864                         rir_way = 1 << RIR_WAY(reg);
1865                         gb = div_u64_rem(tmp_mb, 1024, &mb);
1866                         edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1867                                  i, j,
1868                                  gb, (mb*1000)/1024,
1869                                  ((u64)tmp_mb) << 20L,
1870                                  rir_way,
1871                                  reg);
1872
1873                         for (k = 0; k < rir_way; k++) {
1874                                 pci_read_config_dword(pvt->pci_tad[i],
1875                                                       rir_offset[j][k],
1876                                                       &reg);
1877                                 tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1878
1879                                 gb = div_u64_rem(tmp_mb, 1024, &mb);
1880                                 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1881                                          i, j, k,
1882                                          gb, (mb*1000)/1024,
1883                                          ((u64)tmp_mb) << 20L,
1884                                          (u32)RIR_RNK_TGT(pvt->info.type, reg),
1885                                          reg);
1886                         }
1887                 }
1888         }
1889 }
1890
1891 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1892 {
1893         struct sbridge_dev *sbridge_dev;
1894
1895         list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1896                 if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1897                         return sbridge_dev->mci;
1898         }
1899         return NULL;
1900 }
1901
1902 static int get_memory_error_data(struct mem_ctl_info *mci,
1903                                  u64 addr,
1904                                  u8 *socket, u8 *ha,
1905                                  long *channel_mask,
1906                                  u8 *rank,
1907                                  char **area_type, char *msg)
1908 {
1909         struct mem_ctl_info     *new_mci;
1910         struct sbridge_pvt *pvt = mci->pvt_info;
1911         struct pci_dev          *pci_ha;
1912         int                     n_rir, n_sads, n_tads, sad_way, sck_xch;
1913         int                     sad_interl, idx, base_ch;
1914         int                     interleave_mode, shiftup = 0;
1915         unsigned                sad_interleave[pvt->info.max_interleave];
1916         u32                     reg, dram_rule;
1917         u8                      ch_way, sck_way, pkg, sad_ha = 0;
1918         u32                     tad_offset;
1919         u32                     rir_way;
1920         u32                     mb, gb;
1921         u64                     ch_addr, offset, limit = 0, prv = 0;
1922
1923
1924         /*
1925          * Step 0) Check if the address is at special memory ranges
1926          * The check bellow is probably enough to fill all cases where
1927          * the error is not inside a memory, except for the legacy
1928          * range (e. g. VGA addresses). It is unlikely, however, that the
1929          * memory controller would generate an error on that range.
1930          */
1931         if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1932                 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1933                 return -EINVAL;
1934         }
1935         if (addr >= (u64)pvt->tohm) {
1936                 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1937                 return -EINVAL;
1938         }
1939
1940         /*
1941          * Step 1) Get socket
1942          */
1943         for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1944                 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1945                                       &reg);
1946
1947                 if (!DRAM_RULE_ENABLE(reg))
1948                         continue;
1949
1950                 limit = pvt->info.sad_limit(reg);
1951                 if (limit <= prv) {
1952                         sprintf(msg, "Can't discover the memory socket");
1953                         return -EINVAL;
1954                 }
1955                 if  (addr <= limit)
1956                         break;
1957                 prv = limit;
1958         }
1959         if (n_sads == pvt->info.max_sad) {
1960                 sprintf(msg, "Can't discover the memory socket");
1961                 return -EINVAL;
1962         }
1963         dram_rule = reg;
1964         *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1965         interleave_mode = pvt->info.interleave_mode(dram_rule);
1966
1967         pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1968                               &reg);
1969
1970         if (pvt->info.type == SANDY_BRIDGE) {
1971                 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1972                 for (sad_way = 0; sad_way < 8; sad_way++) {
1973                         u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1974                         if (sad_way > 0 && sad_interl == pkg)
1975                                 break;
1976                         sad_interleave[sad_way] = pkg;
1977                         edac_dbg(0, "SAD interleave #%d: %d\n",
1978                                  sad_way, sad_interleave[sad_way]);
1979                 }
1980                 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
1981                          pvt->sbridge_dev->mc,
1982                          n_sads,
1983                          addr,
1984                          limit,
1985                          sad_way + 7,
1986                          !interleave_mode ? "" : "XOR[18:16]");
1987                 if (interleave_mode)
1988                         idx = ((addr >> 6) ^ (addr >> 16)) & 7;
1989                 else
1990                         idx = (addr >> 6) & 7;
1991                 switch (sad_way) {
1992                 case 1:
1993                         idx = 0;
1994                         break;
1995                 case 2:
1996                         idx = idx & 1;
1997                         break;
1998                 case 4:
1999                         idx = idx & 3;
2000                         break;
2001                 case 8:
2002                         break;
2003                 default:
2004                         sprintf(msg, "Can't discover socket interleave");
2005                         return -EINVAL;
2006                 }
2007                 *socket = sad_interleave[idx];
2008                 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2009                          idx, sad_way, *socket);
2010         } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2011                 int bits, a7mode = A7MODE(dram_rule);
2012
2013                 if (a7mode) {
2014                         /* A7 mode swaps P9 with P6 */
2015                         bits = GET_BITFIELD(addr, 7, 8) << 1;
2016                         bits |= GET_BITFIELD(addr, 9, 9);
2017                 } else
2018                         bits = GET_BITFIELD(addr, 6, 8);
2019
2020                 if (interleave_mode == 0) {
2021                         /* interleave mode will XOR {8,7,6} with {18,17,16} */
2022                         idx = GET_BITFIELD(addr, 16, 18);
2023                         idx ^= bits;
2024                 } else
2025                         idx = bits;
2026
2027                 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2028                 *socket = sad_pkg_socket(pkg);
2029                 sad_ha = sad_pkg_ha(pkg);
2030
2031                 if (a7mode) {
2032                         /* MCChanShiftUpEnable */
2033                         pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
2034                         shiftup = GET_BITFIELD(reg, 22, 22);
2035                 }
2036
2037                 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2038                          idx, *socket, sad_ha, shiftup);
2039         } else {
2040                 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
2041                 idx = (addr >> 6) & 7;
2042                 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2043                 *socket = sad_pkg_socket(pkg);
2044                 sad_ha = sad_pkg_ha(pkg);
2045                 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2046                          idx, *socket, sad_ha);
2047         }
2048
2049         *ha = sad_ha;
2050
2051         /*
2052          * Move to the proper node structure, in order to access the
2053          * right PCI registers
2054          */
2055         new_mci = get_mci_for_node_id(*socket, sad_ha);
2056         if (!new_mci) {
2057                 sprintf(msg, "Struct for socket #%u wasn't initialized",
2058                         *socket);
2059                 return -EINVAL;
2060         }
2061         mci = new_mci;
2062         pvt = mci->pvt_info;
2063
2064         /*
2065          * Step 2) Get memory channel
2066          */
2067         prv = 0;
2068         pci_ha = pvt->pci_ha;
2069         for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2070                 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
2071                 limit = TAD_LIMIT(reg);
2072                 if (limit <= prv) {
2073                         sprintf(msg, "Can't discover the memory channel");
2074                         return -EINVAL;
2075                 }
2076                 if  (addr <= limit)
2077                         break;
2078                 prv = limit;
2079         }
2080         if (n_tads == MAX_TAD) {
2081                 sprintf(msg, "Can't discover the memory channel");
2082                 return -EINVAL;
2083         }
2084
2085         ch_way = TAD_CH(reg) + 1;
2086         sck_way = TAD_SOCK(reg);
2087
2088         if (ch_way == 3)
2089                 idx = addr >> 6;
2090         else {
2091                 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2092                 if (pvt->is_chan_hash)
2093                         idx = haswell_chan_hash(idx, addr);
2094         }
2095         idx = idx % ch_way;
2096
2097         /*
2098          * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
2099          */
2100         switch (idx) {
2101         case 0:
2102                 base_ch = TAD_TGT0(reg);
2103                 break;
2104         case 1:
2105                 base_ch = TAD_TGT1(reg);
2106                 break;
2107         case 2:
2108                 base_ch = TAD_TGT2(reg);
2109                 break;
2110         case 3:
2111                 base_ch = TAD_TGT3(reg);
2112                 break;
2113         default:
2114                 sprintf(msg, "Can't discover the TAD target");
2115                 return -EINVAL;
2116         }
2117         *channel_mask = 1 << base_ch;
2118
2119         pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2120
2121         if (pvt->mirror_mode == FULL_MIRRORING ||
2122             (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2123                 *channel_mask |= 1 << ((base_ch + 2) % 4);
2124                 switch(ch_way) {
2125                 case 2:
2126                 case 4:
2127                         sck_xch = (1 << sck_way) * (ch_way >> 1);
2128                         break;
2129                 default:
2130                         sprintf(msg, "Invalid mirror set. Can't decode addr");
2131                         return -EINVAL;
2132                 }
2133
2134                 pvt->is_cur_addr_mirrored = true;
2135         } else {
2136                 sck_xch = (1 << sck_way) * ch_way;
2137                 pvt->is_cur_addr_mirrored = false;
2138         }
2139
2140         if (pvt->is_lockstep)
2141                 *channel_mask |= 1 << ((base_ch + 1) % 4);
2142
2143         offset = TAD_OFFSET(tad_offset);
2144
2145         edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2146                  n_tads,
2147                  addr,
2148                  limit,
2149                  sck_way,
2150                  ch_way,
2151                  offset,
2152                  idx,
2153                  base_ch,
2154                  *channel_mask);
2155
2156         /* Calculate channel address */
2157         /* Remove the TAD offset */
2158
2159         if (offset > addr) {
2160                 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2161                         offset, addr);
2162                 return -EINVAL;
2163         }
2164
2165         ch_addr = addr - offset;
2166         ch_addr >>= (6 + shiftup);
2167         ch_addr /= sck_xch;
2168         ch_addr <<= (6 + shiftup);
2169         ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2170
2171         /*
2172          * Step 3) Decode rank
2173          */
2174         for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2175                 pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
2176
2177                 if (!IS_RIR_VALID(reg))
2178                         continue;
2179
2180                 limit = pvt->info.rir_limit(reg);
2181                 gb = div_u64_rem(limit >> 20, 1024, &mb);
2182                 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2183                          n_rir,
2184                          gb, (mb*1000)/1024,
2185                          limit,
2186                          1 << RIR_WAY(reg));
2187                 if  (ch_addr <= limit)
2188                         break;
2189         }
2190         if (n_rir == MAX_RIR_RANGES) {
2191                 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2192                         ch_addr);
2193                 return -EINVAL;
2194         }
2195         rir_way = RIR_WAY(reg);
2196
2197         if (pvt->is_close_pg)
2198                 idx = (ch_addr >> 6);
2199         else
2200                 idx = (ch_addr >> 13);  /* FIXME: Datasheet says to shift by 15 */
2201         idx %= 1 << rir_way;
2202
2203         pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
2204         *rank = RIR_RNK_TGT(pvt->info.type, reg);
2205
2206         edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2207                  n_rir,
2208                  ch_addr,
2209                  limit,
2210                  rir_way,
2211                  idx);
2212
2213         return 0;
2214 }
2215
2216 /****************************************************************************
2217         Device initialization routines: put/get, init/exit
2218  ****************************************************************************/
2219
2220 /*
2221  *      sbridge_put_all_devices 'put' all the devices that we have
2222  *                              reserved via 'get'
2223  */
2224 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2225 {
2226         int i;
2227
2228         edac_dbg(0, "\n");
2229         for (i = 0; i < sbridge_dev->n_devs; i++) {
2230                 struct pci_dev *pdev = sbridge_dev->pdev[i];
2231                 if (!pdev)
2232                         continue;
2233                 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2234                          pdev->bus->number,
2235                          PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2236                 pci_dev_put(pdev);
2237         }
2238 }
2239
2240 static void sbridge_put_all_devices(void)
2241 {
2242         struct sbridge_dev *sbridge_dev, *tmp;
2243
2244         list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2245                 sbridge_put_devices(sbridge_dev);
2246                 free_sbridge_dev(sbridge_dev);
2247         }
2248 }
2249
2250 static int sbridge_get_onedevice(struct pci_dev **prev,
2251                                  u8 *num_mc,
2252                                  const struct pci_id_table *table,
2253                                  const unsigned devno,
2254                                  const int multi_bus)
2255 {
2256         struct sbridge_dev *sbridge_dev = NULL;
2257         const struct pci_id_descr *dev_descr = &table->descr[devno];
2258         struct pci_dev *pdev = NULL;
2259         u8 bus = 0;
2260         int i = 0;
2261
2262         sbridge_printk(KERN_DEBUG,
2263                 "Seeking for: PCI ID %04x:%04x\n",
2264                 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2265
2266         pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2267                               dev_descr->dev_id, *prev);
2268
2269         if (!pdev) {
2270                 if (*prev) {
2271                         *prev = pdev;
2272                         return 0;
2273                 }
2274
2275                 if (dev_descr->optional)
2276                         return 0;
2277
2278                 /* if the HA wasn't found */
2279                 if (devno == 0)
2280                         return -ENODEV;
2281
2282                 sbridge_printk(KERN_INFO,
2283                         "Device not found: %04x:%04x\n",
2284                         PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2285
2286                 /* End of list, leave */
2287                 return -ENODEV;
2288         }
2289         bus = pdev->bus->number;
2290
2291 next_imc:
2292         sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
2293         if (!sbridge_dev) {
2294
2295                 if (dev_descr->dom == SOCK)
2296                         goto out_imc;
2297
2298                 sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
2299                 if (!sbridge_dev) {
2300                         pci_dev_put(pdev);
2301                         return -ENOMEM;
2302                 }
2303                 (*num_mc)++;
2304         }
2305
2306         if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2307                 sbridge_printk(KERN_ERR,
2308                         "Duplicated device for %04x:%04x\n",
2309                         PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2310                 pci_dev_put(pdev);
2311                 return -ENODEV;
2312         }
2313
2314         sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2315
2316         /* pdev belongs to more than one IMC, do extra gets */
2317         if (++i > 1)
2318                 pci_dev_get(pdev);
2319
2320         if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2321                 goto next_imc;
2322
2323 out_imc:
2324         /* Be sure that the device is enabled */
2325         if (unlikely(pci_enable_device(pdev) < 0)) {
2326                 sbridge_printk(KERN_ERR,
2327                         "Couldn't enable %04x:%04x\n",
2328                         PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2329                 return -ENODEV;
2330         }
2331
2332         edac_dbg(0, "Detected %04x:%04x\n",
2333                  PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2334
2335         /*
2336          * As stated on drivers/pci/search.c, the reference count for
2337          * @from is always decremented if it is not %NULL. So, as we need
2338          * to get all devices up to null, we need to do a get for the device
2339          */
2340         pci_dev_get(pdev);
2341
2342         *prev = pdev;
2343
2344         return 0;
2345 }
2346
2347 /*
2348  * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
2349  *                           devices we want to reference for this driver.
2350  * @num_mc: pointer to the memory controllers count, to be incremented in case
2351  *          of success.
2352  * @table: model specific table
2353  *
2354  * returns 0 in case of success or error code
2355  */
2356 static int sbridge_get_all_devices(u8 *num_mc,
2357                                         const struct pci_id_table *table)
2358 {
2359         int i, rc;
2360         struct pci_dev *pdev = NULL;
2361         int allow_dups = 0;
2362         int multi_bus = 0;
2363
2364         if (table->type == KNIGHTS_LANDING)
2365                 allow_dups = multi_bus = 1;
2366         while (table && table->descr) {
2367                 for (i = 0; i < table->n_devs_per_sock; i++) {
2368                         if (!allow_dups || i == 0 ||
2369                                         table->descr[i].dev_id !=
2370                                                 table->descr[i-1].dev_id) {
2371                                 pdev = NULL;
2372                         }
2373                         do {
2374                                 rc = sbridge_get_onedevice(&pdev, num_mc,
2375                                                            table, i, multi_bus);
2376                                 if (rc < 0) {
2377                                         if (i == 0) {
2378                                                 i = table->n_devs_per_sock;
2379                                                 break;
2380                                         }
2381                                         sbridge_put_all_devices();
2382                                         return -ENODEV;
2383                                 }
2384                         } while (pdev && !allow_dups);
2385                 }
2386                 table++;
2387         }
2388
2389         return 0;
2390 }
2391
2392 /*
2393  * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
2394  * the format: XXXa. So we can convert from a device to the corresponding
2395  * channel like this
2396  */
2397 #define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2398
2399 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2400                                  struct sbridge_dev *sbridge_dev)
2401 {
2402         struct sbridge_pvt *pvt = mci->pvt_info;
2403         struct pci_dev *pdev;
2404         u8 saw_chan_mask = 0;
2405         int i;
2406
2407         for (i = 0; i < sbridge_dev->n_devs; i++) {
2408                 pdev = sbridge_dev->pdev[i];
2409                 if (!pdev)
2410                         continue;
2411
2412                 switch (pdev->device) {
2413                 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2414                         pvt->pci_sad0 = pdev;
2415                         break;
2416                 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2417                         pvt->pci_sad1 = pdev;
2418                         break;
2419                 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2420                         pvt->pci_br0 = pdev;
2421                         break;
2422                 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2423                         pvt->pci_ha = pdev;
2424                         break;
2425                 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2426                         pvt->pci_ta = pdev;
2427                         break;
2428                 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2429                         pvt->pci_ras = pdev;
2430                         break;
2431                 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2432                 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2433                 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2434                 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2435                 {
2436                         int id = TAD_DEV_TO_CHAN(pdev->device);
2437                         pvt->pci_tad[id] = pdev;
2438                         saw_chan_mask |= 1 << id;
2439                 }
2440                         break;
2441                 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2442                         pvt->pci_ddrio = pdev;
2443                         break;
2444                 default:
2445                         goto error;
2446                 }
2447
2448                 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2449                          pdev->vendor, pdev->device,
2450                          sbridge_dev->bus,
2451                          pdev);
2452         }
2453
2454         /* Check if everything were registered */
2455         if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2456             !pvt->pci_ras || !pvt->pci_ta)
2457                 goto enodev;
2458
2459         if (saw_chan_mask != 0x0f)
2460                 goto enodev;
2461         return 0;
2462
2463 enodev:
2464         sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2465         return -ENODEV;
2466
2467 error:
2468         sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2469                        PCI_VENDOR_ID_INTEL, pdev->device);
2470         return -EINVAL;
2471 }
2472
2473 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2474                                  struct sbridge_dev *sbridge_dev)
2475 {
2476         struct sbridge_pvt *pvt = mci->pvt_info;
2477         struct pci_dev *pdev;
2478         u8 saw_chan_mask = 0;
2479         int i;
2480
2481         for (i = 0; i < sbridge_dev->n_devs; i++) {
2482                 pdev = sbridge_dev->pdev[i];
2483                 if (!pdev)
2484                         continue;
2485
2486                 switch (pdev->device) {
2487                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2488                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2489                         pvt->pci_ha = pdev;
2490                         break;
2491                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2492                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2493                         pvt->pci_ta = pdev;
2494                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2495                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2496                         pvt->pci_ras = pdev;
2497                         break;
2498                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2499                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2500                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2501                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2502                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2503                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2504                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2505                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2506                 {
2507                         int id = TAD_DEV_TO_CHAN(pdev->device);
2508                         pvt->pci_tad[id] = pdev;
2509                         saw_chan_mask |= 1 << id;
2510                 }
2511                         break;
2512                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2513                         pvt->pci_ddrio = pdev;
2514                         break;
2515                 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2516                         pvt->pci_ddrio = pdev;
2517                         break;
2518                 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2519                         pvt->pci_sad0 = pdev;
2520                         break;
2521                 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2522                         pvt->pci_br0 = pdev;
2523                         break;
2524                 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2525                         pvt->pci_br1 = pdev;
2526                         break;
2527                 default:
2528                         goto error;
2529                 }
2530
2531                 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2532                          sbridge_dev->bus,
2533                          PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2534                          pdev);
2535         }
2536
2537         /* Check if everything were registered */
2538         if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2539             !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2540                 goto enodev;
2541
2542         if (saw_chan_mask != 0x0f && /* -EN/-EX */
2543             saw_chan_mask != 0x03)   /* -EP */
2544                 goto enodev;
2545         return 0;
2546
2547 enodev:
2548         sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2549         return -ENODEV;
2550
2551 error:
2552         sbridge_printk(KERN_ERR,
2553                        "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2554                         pdev->device);
2555         return -EINVAL;
2556 }
2557
2558 static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2559                                  struct sbridge_dev *sbridge_dev)
2560 {
2561         struct sbridge_pvt *pvt = mci->pvt_info;
2562         struct pci_dev *pdev;
2563         u8 saw_chan_mask = 0;
2564         int i;
2565
2566         /* there's only one device per system; not tied to any bus */
2567         if (pvt->info.pci_vtd == NULL)
2568                 /* result will be checked later */
2569                 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2570                                                    PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2571                                                    NULL);
2572
2573         for (i = 0; i < sbridge_dev->n_devs; i++) {
2574                 pdev = sbridge_dev->pdev[i];
2575                 if (!pdev)
2576                         continue;
2577
2578                 switch (pdev->device) {
2579                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2580                         pvt->pci_sad0 = pdev;
2581                         break;
2582                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2583                         pvt->pci_sad1 = pdev;
2584                         break;
2585                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2586                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2587                         pvt->pci_ha = pdev;
2588                         break;
2589                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2590                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2591                         pvt->pci_ta = pdev;
2592                         break;
2593                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2594                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2595                         pvt->pci_ras = pdev;
2596                         break;
2597                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2598                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2599                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2600                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2601                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2602                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2603                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2604                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2605                 {
2606                         int id = TAD_DEV_TO_CHAN(pdev->device);
2607                         pvt->pci_tad[id] = pdev;
2608                         saw_chan_mask |= 1 << id;
2609                 }
2610                         break;
2611                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2612                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2613                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2614                 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2615                         if (!pvt->pci_ddrio)
2616                                 pvt->pci_ddrio = pdev;
2617                         break;
2618                 default:
2619                         break;
2620                 }
2621
2622                 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2623                          sbridge_dev->bus,
2624                          PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2625                          pdev);
2626         }
2627
2628         /* Check if everything were registered */
2629         if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2630             !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2631                 goto enodev;
2632
2633         if (saw_chan_mask != 0x0f && /* -EN/-EX */
2634             saw_chan_mask != 0x03)   /* -EP */
2635                 goto enodev;
2636         return 0;
2637
2638 enodev:
2639         sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2640         return -ENODEV;
2641 }
2642
2643 static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2644                                  struct sbridge_dev *sbridge_dev)
2645 {
2646         struct sbridge_pvt *pvt = mci->pvt_info;
2647         struct pci_dev *pdev;
2648         u8 saw_chan_mask = 0;
2649         int i;
2650
2651         /* there's only one device per system; not tied to any bus */
2652         if (pvt->info.pci_vtd == NULL)
2653                 /* result will be checked later */
2654                 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2655                                                    PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2656                                                    NULL);
2657
2658         for (i = 0; i < sbridge_dev->n_devs; i++) {
2659                 pdev = sbridge_dev->pdev[i];
2660                 if (!pdev)
2661                         continue;
2662
2663                 switch (pdev->device) {
2664                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2665                         pvt->pci_sad0 = pdev;
2666                         break;
2667                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2668                         pvt->pci_sad1 = pdev;
2669                         break;
2670                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2671                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2672                         pvt->pci_ha = pdev;
2673                         break;
2674                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2675                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2676                         pvt->pci_ta = pdev;
2677                         break;
2678                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2679                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2680                         pvt->pci_ras = pdev;
2681                         break;
2682                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2683                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2684                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2685                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2686                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2687                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2688                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2689                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2690                 {
2691                         int id = TAD_DEV_TO_CHAN(pdev->device);
2692                         pvt->pci_tad[id] = pdev;
2693                         saw_chan_mask |= 1 << id;
2694                 }
2695                         break;
2696                 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2697                         pvt->pci_ddrio = pdev;
2698                         break;
2699                 default:
2700                         break;
2701                 }
2702
2703                 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2704                          sbridge_dev->bus,
2705                          PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2706                          pdev);
2707         }
2708
2709         /* Check if everything were registered */
2710         if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2711             !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2712                 goto enodev;
2713
2714         if (saw_chan_mask != 0x0f && /* -EN/-EX */
2715             saw_chan_mask != 0x03)   /* -EP */
2716                 goto enodev;
2717         return 0;
2718
2719 enodev:
2720         sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2721         return -ENODEV;
2722 }
2723
2724 static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2725                         struct sbridge_dev *sbridge_dev)
2726 {
2727         struct sbridge_pvt *pvt = mci->pvt_info;
2728         struct pci_dev *pdev;
2729         int dev, func;
2730
2731         int i;
2732         int devidx;
2733
2734         for (i = 0; i < sbridge_dev->n_devs; i++) {
2735                 pdev = sbridge_dev->pdev[i];
2736                 if (!pdev)
2737                         continue;
2738
2739                 /* Extract PCI device and function. */
2740                 dev = (pdev->devfn >> 3) & 0x1f;
2741                 func = pdev->devfn & 0x7;
2742
2743                 switch (pdev->device) {
2744                 case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2745                         if (dev == 8)
2746                                 pvt->knl.pci_mc0 = pdev;
2747                         else if (dev == 9)
2748                                 pvt->knl.pci_mc1 = pdev;
2749                         else {
2750                                 sbridge_printk(KERN_ERR,
2751                                         "Memory controller in unexpected place! (dev %d, fn %d)\n",
2752                                         dev, func);
2753                                 continue;
2754                         }
2755                         break;
2756
2757                 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2758                         pvt->pci_sad0 = pdev;
2759                         break;
2760
2761                 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2762                         pvt->pci_sad1 = pdev;
2763                         break;
2764
2765                 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2766                         /* There are one of these per tile, and range from
2767                          * 1.14.0 to 1.18.5.
2768                          */
2769                         devidx = ((dev-14)*8)+func;
2770
2771                         if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2772                                 sbridge_printk(KERN_ERR,
2773                                         "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2774                                         dev, func);
2775                                 continue;
2776                         }
2777
2778                         WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2779
2780                         pvt->knl.pci_cha[devidx] = pdev;
2781                         break;
2782
2783                 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
2784                         devidx = -1;
2785
2786                         /*
2787                          *  MC0 channels 0-2 are device 9 function 2-4,
2788                          *  MC1 channels 3-5 are device 8 function 2-4.
2789                          */
2790
2791                         if (dev == 9)
2792                                 devidx = func-2;
2793                         else if (dev == 8)
2794                                 devidx = 3 + (func-2);
2795
2796                         if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2797                                 sbridge_printk(KERN_ERR,
2798                                         "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2799                                         dev, func);
2800                                 continue;
2801                         }
2802
2803                         WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2804                         pvt->knl.pci_channel[devidx] = pdev;
2805                         break;
2806
2807                 case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2808                         pvt->knl.pci_mc_info = pdev;
2809                         break;
2810
2811                 case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2812                         pvt->pci_ta = pdev;
2813                         break;
2814
2815                 default:
2816                         sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2817                                 pdev->device);
2818                         break;
2819                 }
2820         }
2821
2822         if (!pvt->knl.pci_mc0  || !pvt->knl.pci_mc1 ||
2823             !pvt->pci_sad0     || !pvt->pci_sad1    ||
2824             !pvt->pci_ta) {
2825                 goto enodev;
2826         }
2827
2828         for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2829                 if (!pvt->knl.pci_channel[i]) {
2830                         sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2831                         goto enodev;
2832                 }
2833         }
2834
2835         for (i = 0; i < KNL_MAX_CHAS; i++) {
2836                 if (!pvt->knl.pci_cha[i]) {
2837                         sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2838                         goto enodev;
2839                 }
2840         }
2841
2842         return 0;
2843
2844 enodev:
2845         sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2846         return -ENODEV;
2847 }
2848
2849 /****************************************************************************
2850                         Error check routines
2851  ****************************************************************************/
2852
2853 /*
2854  * While Sandy Bridge has error count registers, SMI BIOS read values from
2855  * and resets the counters. So, they are not reliable for the OS to read
2856  * from them. So, we have no option but to just trust on whatever MCE is
2857  * telling us about the errors.
2858  */
2859 static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2860                                     const struct mce *m)
2861 {
2862         struct mem_ctl_info *new_mci;
2863         struct sbridge_pvt *pvt = mci->pvt_info;
2864         enum hw_event_mc_err_type tp_event;
2865         char *type, *optype, msg[256];
2866         bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2867         bool overflow = GET_BITFIELD(m->status, 62, 62);
2868         bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2869         bool recoverable;
2870         u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2871         u32 mscod = GET_BITFIELD(m->status, 16, 31);
2872         u32 errcode = GET_BITFIELD(m->status, 0, 15);
2873         u32 channel = GET_BITFIELD(m->status, 0, 3);
2874         u32 optypenum = GET_BITFIELD(m->status, 4, 6);
2875         long channel_mask, first_channel;
2876         u8  rank, socket, ha;
2877         int rc, dimm;
2878         char *area_type = NULL;
2879
2880         if (pvt->info.type != SANDY_BRIDGE)
2881                 recoverable = true;
2882         else
2883                 recoverable = GET_BITFIELD(m->status, 56, 56);
2884
2885         if (uncorrected_error) {
2886                 if (ripv) {
2887                         type = "FATAL";
2888                         tp_event = HW_EVENT_ERR_FATAL;
2889                 } else {
2890                         type = "NON_FATAL";
2891                         tp_event = HW_EVENT_ERR_UNCORRECTED;
2892                 }
2893         } else {
2894                 type = "CORRECTED";
2895                 tp_event = HW_EVENT_ERR_CORRECTED;
2896         }
2897
2898         /*
2899          * According with Table 15-9 of the Intel Architecture spec vol 3A,
2900          * memory errors should fit in this mask:
2901          *      000f 0000 1mmm cccc (binary)
2902          * where:
2903          *      f = Correction Report Filtering Bit. If 1, subsequent errors
2904          *          won't be shown
2905          *      mmm = error type
2906          *      cccc = channel
2907          * If the mask doesn't match, report an error to the parsing logic
2908          */
2909         if (! ((errcode & 0xef80) == 0x80)) {
2910                 optype = "Can't parse: it is not a mem";
2911         } else {
2912                 switch (optypenum) {
2913                 case 0:
2914                         optype = "generic undef request error";
2915                         break;
2916                 case 1:
2917                         optype = "memory read error";
2918                         break;
2919                 case 2:
2920                         optype = "memory write error";
2921                         break;
2922                 case 3:
2923                         optype = "addr/cmd error";
2924                         break;
2925                 case 4:
2926                         optype = "memory scrubbing error";
2927                         break;
2928                 default:
2929                         optype = "reserved";
2930                         break;
2931                 }
2932         }
2933
2934         /* Only decode errors with an valid address (ADDRV) */
2935         if (!GET_BITFIELD(m->status, 58, 58))
2936                 return;
2937
2938         if (pvt->info.type == KNIGHTS_LANDING) {
2939                 if (channel == 14) {
2940                         edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
2941                                 overflow ? " OVERFLOW" : "",
2942                                 (uncorrected_error && recoverable)
2943                                 ? " recoverable" : "",
2944                                 mscod, errcode,
2945                                 m->bank);
2946                 } else {
2947                         char A = *("A");
2948
2949                         /*
2950                          * Reported channel is in range 0-2, so we can't map it
2951                          * back to mc. To figure out mc we check machine check
2952                          * bank register that reported this error.
2953                          * bank15 means mc0 and bank16 means mc1.
2954                          */
2955                         channel = knl_channel_remap(m->bank == 16, channel);
2956                         channel_mask = 1 << channel;
2957
2958                         snprintf(msg, sizeof(msg),
2959                                 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
2960                                 overflow ? " OVERFLOW" : "",
2961                                 (uncorrected_error && recoverable)
2962                                 ? " recoverable" : " ",
2963                                 mscod, errcode, channel, A + channel);
2964                         edac_mc_handle_error(tp_event, mci, core_err_cnt,
2965                                 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
2966                                 channel, 0, -1,
2967                                 optype, msg);
2968                 }
2969                 return;
2970         } else {
2971                 rc = get_memory_error_data(mci, m->addr, &socket, &ha,
2972                                 &channel_mask, &rank, &area_type, msg);
2973         }
2974
2975         if (rc < 0)
2976                 goto err_parsing;
2977         new_mci = get_mci_for_node_id(socket, ha);
2978         if (!new_mci) {
2979                 strcpy(msg, "Error: socket got corrupted!");
2980                 goto err_parsing;
2981         }
2982         mci = new_mci;
2983         pvt = mci->pvt_info;
2984
2985         first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
2986
2987         if (rank < 4)
2988                 dimm = 0;
2989         else if (rank < 8)
2990                 dimm = 1;
2991         else
2992                 dimm = 2;
2993
2994
2995         /*
2996          * FIXME: On some memory configurations (mirror, lockstep), the
2997          * Memory Controller can't point the error to a single DIMM. The
2998          * EDAC core should be handling the channel mask, in order to point
2999          * to the group of dimm's where the error may be happening.
3000          */
3001         if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3002                 channel = first_channel;
3003
3004         snprintf(msg, sizeof(msg),
3005                  "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3006                  overflow ? " OVERFLOW" : "",
3007                  (uncorrected_error && recoverable) ? " recoverable" : "",
3008                  area_type,
3009                  mscod, errcode,
3010                  socket, ha,
3011                  channel_mask,
3012                  rank);
3013
3014         edac_dbg(0, "%s\n", msg);
3015
3016         /* FIXME: need support for channel mask */
3017
3018         if (channel == CHANNEL_UNSPECIFIED)
3019                 channel = -1;
3020
3021         /* Call the helper to output message */
3022         edac_mc_handle_error(tp_event, mci, core_err_cnt,
3023                              m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3024                              channel, dimm, -1,
3025                              optype, msg);
3026         return;
3027 err_parsing:
3028         edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3029                              -1, -1, -1,
3030                              msg, "");
3031
3032 }
3033
3034 /*
3035  * Check that logging is enabled and that this is the right type
3036  * of error for us to handle.
3037  */
3038 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3039                                    void *data)
3040 {
3041         struct mce *mce = (struct mce *)data;
3042         struct mem_ctl_info *mci;
3043         struct sbridge_pvt *pvt;
3044         char *type;
3045
3046         if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3047                 return NOTIFY_DONE;
3048
3049         mci = get_mci_for_node_id(mce->socketid, IMC0);
3050         if (!mci)
3051                 return NOTIFY_DONE;
3052         pvt = mci->pvt_info;
3053
3054         /*
3055          * Just let mcelog handle it if the error is
3056          * outside the memory controller. A memory error
3057          * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
3058          * bit 12 has an special meaning.
3059          */
3060         if ((mce->status & 0xefff) >> 7 != 1)
3061                 return NOTIFY_DONE;
3062
3063         if (mce->mcgstatus & MCG_STATUS_MCIP)
3064                 type = "Exception";
3065         else
3066                 type = "Event";
3067
3068         sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3069
3070         sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3071                           "Bank %d: %016Lx\n", mce->extcpu, type,
3072                           mce->mcgstatus, mce->bank, mce->status);
3073         sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3074         sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3075         sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3076
3077         sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3078                           "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3079                           mce->time, mce->socketid, mce->apicid);
3080
3081         sbridge_mce_output_error(mci, mce);
3082
3083         /* Advice mcelog that the error were handled */
3084         return NOTIFY_STOP;
3085 }
3086
3087 static struct notifier_block sbridge_mce_dec = {
3088         .notifier_call  = sbridge_mce_check_error,
3089         .priority       = MCE_PRIO_EDAC,
3090 };
3091
3092 /****************************************************************************
3093                         EDAC register/unregister logic
3094  ****************************************************************************/
3095
3096 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3097 {
3098         struct mem_ctl_info *mci = sbridge_dev->mci;
3099         struct sbridge_pvt *pvt;
3100
3101         if (unlikely(!mci || !mci->pvt_info)) {
3102                 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3103
3104                 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3105                 return;
3106         }
3107
3108         pvt = mci->pvt_info;
3109
3110         edac_dbg(0, "MC: mci = %p, dev = %p\n",
3111                  mci, &sbridge_dev->pdev[0]->dev);
3112
3113         /* Remove MC sysfs nodes */
3114         edac_mc_del_mc(mci->pdev);
3115
3116         edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3117         kfree(mci->ctl_name);
3118         edac_mc_free(mci);
3119         sbridge_dev->mci = NULL;
3120 }
3121
3122 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3123 {
3124         struct mem_ctl_info *mci;
3125         struct edac_mc_layer layers[2];
3126         struct sbridge_pvt *pvt;
3127         struct pci_dev *pdev = sbridge_dev->pdev[0];
3128         int rc;
3129
3130         /* allocate a new MC control structure */
3131         layers[0].type = EDAC_MC_LAYER_CHANNEL;
3132         layers[0].size = type == KNIGHTS_LANDING ?
3133                 KNL_MAX_CHANNELS : NUM_CHANNELS;
3134         layers[0].is_virt_csrow = false;
3135         layers[1].type = EDAC_MC_LAYER_SLOT;
3136         layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3137         layers[1].is_virt_csrow = true;
3138         mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3139                             sizeof(*pvt));
3140
3141         if (unlikely(!mci))
3142                 return -ENOMEM;
3143
3144         edac_dbg(0, "MC: mci = %p, dev = %p\n",
3145                  mci, &pdev->dev);
3146
3147         pvt = mci->pvt_info;
3148         memset(pvt, 0, sizeof(*pvt));
3149
3150         /* Associate sbridge_dev and mci for future usage */
3151         pvt->sbridge_dev = sbridge_dev;
3152         sbridge_dev->mci = mci;
3153
3154         mci->mtype_cap = type == KNIGHTS_LANDING ?
3155                 MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3156         mci->edac_ctl_cap = EDAC_FLAG_NONE;
3157         mci->edac_cap = EDAC_FLAG_NONE;
3158         mci->mod_name = "sb_edac.c";
3159         mci->dev_name = pci_name(pdev);
3160         mci->ctl_page_to_phys = NULL;
3161
3162         pvt->info.type = type;
3163         switch (type) {
3164         case IVY_BRIDGE:
3165                 pvt->info.rankcfgr = IB_RANK_CFG_A;
3166                 pvt->info.get_tolm = ibridge_get_tolm;
3167                 pvt->info.get_tohm = ibridge_get_tohm;
3168                 pvt->info.dram_rule = ibridge_dram_rule;
3169                 pvt->info.get_memory_type = get_memory_type;
3170                 pvt->info.get_node_id = get_node_id;
3171                 pvt->info.rir_limit = rir_limit;
3172                 pvt->info.sad_limit = sad_limit;
3173                 pvt->info.interleave_mode = interleave_mode;
3174                 pvt->info.dram_attr = dram_attr;
3175                 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3176                 pvt->info.interleave_list = ibridge_interleave_list;
3177                 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3178                 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3179                 pvt->info.get_width = ibridge_get_width;
3180
3181                 /* Store pci devices at mci for faster access */
3182                 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3183                 if (unlikely(rc < 0))
3184                         goto fail0;
3185                 get_source_id(mci);
3186                 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3187                         pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3188                 break;
3189         case SANDY_BRIDGE:
3190                 pvt->info.rankcfgr = SB_RANK_CFG_A;
3191                 pvt->info.get_tolm = sbridge_get_tolm;
3192                 pvt->info.get_tohm = sbridge_get_tohm;
3193                 pvt->info.dram_rule = sbridge_dram_rule;
3194                 pvt->info.get_memory_type = get_memory_type;
3195                 pvt->info.get_node_id = get_node_id;
3196                 pvt->info.rir_limit = rir_limit;
3197                 pvt->info.sad_limit = sad_limit;
3198                 pvt->info.interleave_mode = interleave_mode;
3199                 pvt->info.dram_attr = dram_attr;
3200                 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3201                 pvt->info.interleave_list = sbridge_interleave_list;
3202                 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
3203                 pvt->info.interleave_pkg = sbridge_interleave_pkg;
3204                 pvt->info.get_width = sbridge_get_width;
3205
3206                 /* Store pci devices at mci for faster access */
3207                 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3208                 if (unlikely(rc < 0))
3209                         goto fail0;
3210                 get_source_id(mci);
3211                 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3212                         pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3213                 break;
3214         case HASWELL:
3215                 /* rankcfgr isn't used */
3216                 pvt->info.get_tolm = haswell_get_tolm;
3217                 pvt->info.get_tohm = haswell_get_tohm;
3218                 pvt->info.dram_rule = ibridge_dram_rule;
3219                 pvt->info.get_memory_type = haswell_get_memory_type;
3220                 pvt->info.get_node_id = haswell_get_node_id;
3221                 pvt->info.rir_limit = haswell_rir_limit;
3222                 pvt->info.sad_limit = sad_limit;
3223                 pvt->info.interleave_mode = interleave_mode;
3224                 pvt->info.dram_attr = dram_attr;
3225                 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3226                 pvt->info.interleave_list = ibridge_interleave_list;
3227                 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3228                 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3229                 pvt->info.get_width = ibridge_get_width;
3230
3231                 /* Store pci devices at mci for faster access */
3232                 rc = haswell_mci_bind_devs(mci, sbridge_dev);
3233                 if (unlikely(rc < 0))
3234                         goto fail0;
3235                 get_source_id(mci);
3236                 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3237                         pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3238                 break;
3239         case BROADWELL:
3240                 /* rankcfgr isn't used */
3241                 pvt->info.get_tolm = haswell_get_tolm;
3242                 pvt->info.get_tohm = haswell_get_tohm;
3243                 pvt->info.dram_rule = ibridge_dram_rule;
3244                 pvt->info.get_memory_type = haswell_get_memory_type;
3245                 pvt->info.get_node_id = haswell_get_node_id;
3246                 pvt->info.rir_limit = haswell_rir_limit;
3247                 pvt->info.sad_limit = sad_limit;
3248                 pvt->info.interleave_mode = interleave_mode;
3249                 pvt->info.dram_attr = dram_attr;
3250                 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3251                 pvt->info.interleave_list = ibridge_interleave_list;
3252                 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3253                 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3254                 pvt->info.get_width = broadwell_get_width;
3255
3256                 /* Store pci devices at mci for faster access */
3257                 rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3258                 if (unlikely(rc < 0))
3259                         goto fail0;
3260                 get_source_id(mci);
3261                 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3262                         pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3263                 break;
3264         case KNIGHTS_LANDING:
3265                 /* pvt->info.rankcfgr == ??? */
3266                 pvt->info.get_tolm = knl_get_tolm;
3267                 pvt->info.get_tohm = knl_get_tohm;
3268                 pvt->info.dram_rule = knl_dram_rule;
3269                 pvt->info.get_memory_type = knl_get_memory_type;
3270                 pvt->info.get_node_id = knl_get_node_id;
3271                 pvt->info.rir_limit = NULL;
3272                 pvt->info.sad_limit = knl_sad_limit;
3273                 pvt->info.interleave_mode = knl_interleave_mode;
3274                 pvt->info.dram_attr = dram_attr_knl;
3275                 pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3276                 pvt->info.interleave_list = knl_interleave_list;
3277                 pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list);
3278                 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3279                 pvt->info.get_width = knl_get_width;
3280
3281                 rc = knl_mci_bind_devs(mci, sbridge_dev);
3282                 if (unlikely(rc < 0))
3283                         goto fail0;
3284                 get_source_id(mci);
3285                 mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3286                         pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3287                 break;
3288         }
3289
3290         /* Get dimm basic config and the memory layout */
3291         rc = get_dimm_config(mci);
3292         if (rc < 0) {
3293                 edac_dbg(0, "MC: failed to get_dimm_config()\n");
3294                 goto fail;
3295         }
3296         get_memory_layout(mci);
3297
3298         /* record ptr to the generic device */
3299         mci->pdev = &pdev->dev;
3300
3301         /* add this new MC control structure to EDAC's list of MCs */
3302         if (unlikely(edac_mc_add_mc(mci))) {
3303                 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3304                 rc = -EINVAL;
3305                 goto fail;
3306         }
3307
3308         return 0;
3309
3310 fail:
3311         kfree(mci->ctl_name);
3312 fail0:
3313         edac_mc_free(mci);
3314         sbridge_dev->mci = NULL;
3315         return rc;
3316 }
3317
3318 #define ICPU(model, table) \
3319         { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
3320
3321 static const struct x86_cpu_id sbridge_cpuids[] = {
3322         ICPU(INTEL_FAM6_SANDYBRIDGE_X,    pci_dev_descr_sbridge_table),
3323         ICPU(INTEL_FAM6_IVYBRIDGE_X,      pci_dev_descr_ibridge_table),
3324         ICPU(INTEL_FAM6_HASWELL_X,        pci_dev_descr_haswell_table),
3325         ICPU(INTEL_FAM6_BROADWELL_X,      pci_dev_descr_broadwell_table),
3326         ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
3327         ICPU(INTEL_FAM6_XEON_PHI_KNL,     pci_dev_descr_knl_table),
3328         ICPU(INTEL_FAM6_XEON_PHI_KNM,     pci_dev_descr_knl_table),
3329         { }
3330 };
3331 MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3332
3333 /*
3334  *      sbridge_probe   Get all devices and register memory controllers
3335  *                      present.
3336  *      return:
3337  *              0 for FOUND a device
3338  *              < 0 for error code
3339  */
3340
3341 static int sbridge_probe(const struct x86_cpu_id *id)
3342 {
3343         int rc = -ENODEV;
3344         u8 mc, num_mc = 0;
3345         struct sbridge_dev *sbridge_dev;
3346         struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3347
3348         /* get the pci devices we want to reserve for our use */
3349         rc = sbridge_get_all_devices(&num_mc, ptable);
3350
3351         if (unlikely(rc < 0)) {
3352                 edac_dbg(0, "couldn't get all devices\n");
3353                 goto fail0;
3354         }
3355
3356         mc = 0;
3357
3358         list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3359                 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3360                          mc, mc + 1, num_mc);
3361
3362                 sbridge_dev->mc = mc++;
3363                 rc = sbridge_register_mci(sbridge_dev, ptable->type);
3364                 if (unlikely(rc < 0))
3365                         goto fail1;
3366         }
3367
3368         sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3369
3370         return 0;
3371
3372 fail1:
3373         list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3374                 sbridge_unregister_mci(sbridge_dev);
3375
3376         sbridge_put_all_devices();
3377 fail0:
3378         return rc;
3379 }
3380
3381 /*
3382  *      sbridge_remove  cleanup
3383  *
3384  */
3385 static void sbridge_remove(void)
3386 {
3387         struct sbridge_dev *sbridge_dev;
3388
3389         edac_dbg(0, "\n");
3390
3391         list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3392                 sbridge_unregister_mci(sbridge_dev);
3393
3394         /* Release PCI resources */
3395         sbridge_put_all_devices();
3396 }
3397
3398 /*
3399  *      sbridge_init            Module entry function
3400  *                      Try to initialize this module for its devices
3401  */
3402 static int __init sbridge_init(void)
3403 {
3404         const struct x86_cpu_id *id;
3405         int rc;
3406
3407         edac_dbg(2, "\n");
3408
3409         id = x86_match_cpu(sbridge_cpuids);
3410         if (!id)
3411                 return -ENODEV;
3412
3413         /* Ensure that the OPSTATE is set correctly for POLL or NMI */
3414         opstate_init();
3415
3416         rc = sbridge_probe(id);
3417
3418         if (rc >= 0) {
3419                 mce_register_decode_chain(&sbridge_mce_dec);
3420                 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3421                         sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
3422                 return 0;
3423         }
3424
3425         sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3426                       rc);
3427
3428         return rc;
3429 }
3430
3431 /*
3432  *      sbridge_exit()  Module exit function
3433  *                      Unregister the driver
3434  */
3435 static void __exit sbridge_exit(void)
3436 {
3437         edac_dbg(2, "\n");
3438         sbridge_remove();
3439         mce_unregister_decode_chain(&sbridge_mce_dec);
3440 }
3441
3442 module_init(sbridge_init);
3443 module_exit(sbridge_exit);
3444
3445 module_param(edac_op_state, int, 0444);
3446 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3447
3448 MODULE_LICENSE("GPL");
3449 MODULE_AUTHOR("Mauro Carvalho Chehab");
3450 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
3451 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3452                    SBRIDGE_REVISION);