* 'linux_next' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/i7core: (83 commits)
i7core_edac: Better describe the supported devices
Add support for Westmere to i7core_edac driver
i7core_edac: don't free on success
i7core_edac: Add support for X5670
Always call i7core_[ur]dimm_check_mc_ecc_err
i7core_edac: fix memory leak of i7core_dev
EDAC: add __init to i7core_xeon_pci_fixup
i7core_edac: Fix wrong device id for channel 1 devices
i7core: add support for Lynnfield alternate address
i7core_edac: Add initial support for Lynnfield
i7core_edac: do not export static functions
edac: fix i7core build
edac: i7core_edac produces undefined behaviour on 32bit
i7core_edac: Use a more generic approach for probing PCI devices
i7core_edac: PCI device is called NONCORE, instead of NOCORE
i7core_edac: Fix ringbuffer maxsize
i7core_edac: First store, then increment
i7core_edac: Better parse "any" addrmask
i7core_edac: Use a lockless ringbuffer
edac: Create an unique instance for each kobj
...
extern struct pci_bus *pci_root_bus;
extern struct pci_ops pci_root_ops;
+ void pcibios_scan_specific_bus(int busn);
+
/* pci-irq.c */
struct irq_info {
extern unsigned int pcibios_irq_mask;
-extern spinlock_t pci_config_lock;
+extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
+ #include <linux/edac_mce.h>
#include <asm/processor.h>
#include <asm/hw_irq.h>
for (;;) {
entry = rcu_dereference_check_mce(mcelog.next);
for (;;) {
+ /*
+ * If edac_mce is enabled, it will check the error type
+ * and will process it, if it is a known error.
+ * Otherwise, the error will be sent through mcelog
+ * interface
+ */
+ if (edac_mce_parse(mce))
+ return;
+
/*
* When the buffer fills up discard new entries.
* Assume that the earlier errors are the more
static void mce_panic(char *msg, struct mce *final, char *exp)
{
- int i;
+ int i, apei_err = 0;
if (!fake_panic) {
/*
struct mce *m = &mcelog.entry[i];
if (!(m->status & MCI_STATUS_VAL))
continue;
- if (!(m->status & MCI_STATUS_UC))
+ if (!(m->status & MCI_STATUS_UC)) {
print_mce(m);
+ if (!apei_err)
+ apei_err = apei_write_mce(m);
+ }
}
/* Now print uncorrected but with the final one last */
for (i = 0; i < MCE_LOG_LEN; i++) {
continue;
if (!(m->status & MCI_STATUS_UC))
continue;
- if (!final || memcmp(m, final, sizeof(struct mce)))
+ if (!final || memcmp(m, final, sizeof(struct mce))) {
print_mce(m);
+ if (!apei_err)
+ apei_err = apei_write_mce(m);
+ }
}
- if (final)
+ if (final) {
print_mce(final);
+ if (!apei_err)
+ apei_err = apei_write_mce(final);
+ }
if (cpu_missing)
printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
print_mce_tail();
struct mce m;
int i;
- __get_cpu_var(mce_poll_count)++;
+ percpu_inc(mce_poll_count);
mce_setup(&m);
atomic_inc(&mce_entry);
- __get_cpu_var(mce_exception_count)++;
+ percpu_inc(mce_exception_count);
if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP)
rdtscll(cpu_tsc[smp_processor_id()]);
}
+static int mce_apei_read_done;
+
+/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
+static int __mce_read_apei(char __user **ubuf, size_t usize)
+{
+ int rc;
+ u64 record_id;
+ struct mce m;
+
+ if (usize < sizeof(struct mce))
+ return -EINVAL;
+
+ rc = apei_read_mce(&m, &record_id);
+ /* Error or no more MCE record */
+ if (rc <= 0) {
+ mce_apei_read_done = 1;
+ return rc;
+ }
+ rc = -EFAULT;
+ if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
+ return rc;
+ /*
+ * In fact, we should have cleared the record after that has
+ * been flushed to the disk or sent to network in
+ * /sbin/mcelog, but we have no interface to support that now,
+ * so just clear it to avoid duplication.
+ */
+ rc = apei_clear_mce(record_id);
+ if (rc) {
+ mce_apei_read_done = 1;
+ return rc;
+ }
+ *ubuf += sizeof(struct mce);
+
+ return 0;
+}
+
static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
loff_t *off)
{
return -ENOMEM;
mutex_lock(&mce_read_mutex);
+
+ if (!mce_apei_read_done) {
+ err = __mce_read_apei(&buf, usize);
+ if (err || buf != ubuf)
+ goto out;
+ }
+
next = rcu_dereference_check_mce(mcelog.next);
/* Only supports full reads right now */
- if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
- mutex_unlock(&mce_read_mutex);
- kfree(cpu_tsc);
-
- return -EINVAL;
- }
+ err = -EINVAL;
+ if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
+ goto out;
err = 0;
prev = 0;
memset(&mcelog.entry[i], 0, sizeof(struct mce));
}
}
+
+ if (err)
+ err = -EFAULT;
+
+out:
mutex_unlock(&mce_read_mutex);
kfree(cpu_tsc);
- return err ? -EFAULT : buf - ubuf;
+ return err ? err : buf - ubuf;
}
static unsigned int mce_poll(struct file *file, poll_table *wait)
poll_wait(file, &mce_wait, wait);
if (rcu_dereference_check_mce(mcelog.next))
return POLLIN | POLLRDNORM;
+ if (!mce_apei_read_done && apei_check_mce())
+ return POLLIN | POLLRDNORM;
return 0;
}
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
- unsigned int aer_firmware_first:1;
+ unsigned int __aer_firmware_first_valid:1;
+ unsigned int __aer_firmware_first:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
#endif
};
+static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_IOV
+ if (dev->is_virtfn)
+ dev = dev->physfn;
+#endif
+
+ return dev;
+}
+
extern struct pci_dev *alloc_pci_dev(void);
#define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list)
/* Generic PCI functions used internally */
+ void pcibios_scan_specific_bus(int busn);
extern struct pci_bus *pci_find_bus(int domain, int busnr);
void pci_bus_add_devices(const struct pci_bus *bus);
struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
-#define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42
-#define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43
+#define PCI_DEVICE_ID_INTEL_CPT_LPC_MIN 0x1c41
+#define PCI_DEVICE_ID_INTEL_CPT_LPC_MAX 0x1c5f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+ #define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18
+ #define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19
+ #define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a
+ #define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32
+ #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33
+ #define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
+ #define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC 0x2ca3
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL 0x2ca8
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2 0x2d98
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2 0x2d99
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2 0x2d9a
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2 0x2d9c
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2 0x2da0
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2 0x2da1
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2 0x2da2
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2 0x2da3
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2 0x2da8
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2 0x2da9
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2 0x2daa
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2 0x2dab
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2 0x2db0
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2
+ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b
#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c
+ #define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e
#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430
#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432