-------------------------------------------
Required properties:
-- compatible : Either "aspeed,ast2400-gpio" or "aspeed,ast2500-gpio"
+- compatible : Either "aspeed,ast2400-gpio", "aspeed,ast2500-gpio",
+ or "aspeed,ast2600-gpio".
- #gpio-cells : Should be two
- First cell is the GPIO line number
Optional properties:
-- clocks : A phandle to the clock to use for debounce timings
+- clocks : A phandle to the clock to use for debounce timings
+- ngpios : Number of GPIOs controlled by this controller. Should be set
+ when there are multiple GPIO controllers on a SoC (ast2600).
The gpio and interrupt properties are further described in their respective
bindings documentation:
66AK2E SoCs
"ti,k2g-gpio", "ti,keystone-gpio": for 66AK2G
"ti,am654-gpio", "ti,keystone-gpio": for TI K3 AM654
+ "ti,j721e-gpio", "ti,keystone-gpio": for J721E SoCs
- reg: Physical base address of the controller and the size of memory mapped
registers.
- compatible : Should be "fsl,<soc>-gpio"
The following <soc>s are known to be supported:
mpc5121, mpc5125, mpc8349, mpc8572, mpc8610, pq3, qoriq,
- ls1021a, ls1043a, ls2080a.
+ ls1021a, ls1043a, ls2080a, ls1028a, ls1088a.
- reg : Address and length of the register set for the device
- interrupts : Should be the port interrupt shared by all 32 pins.
- #gpio-cells : Should be two. The first cell is the pin number and
interrupt-controller;
#interrupt-cells = <2>;
};
+
+
+Example of gpio-controller node for a ls1028a/ls1088a SoC:
+
+gpio1: gpio@2300000 {
+ compatible = "fsl,ls1028a-gpio", "fsl,ls1088a-gpio", "fsl,qoriq-gpio";
+ reg = <0x0 0x2300000 0x0 0x10000>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ little-endian;
+};
--- /dev/null
+Aspeed SGPIO controller Device Tree Bindings
+--------------------------------------------
+
+This SGPIO controller is for ASPEED AST2500 SoC, it supports up to 80 full
+featured Serial GPIOs. Each of the Serial GPIO pins can be programmed to
+support the following options:
+- Support interrupt option for each input port and various interrupt
+ sensitivity option (level-high, level-low, edge-high, edge-low)
+- Support reset tolerance option for each output port
+- Directly connected to APB bus and its shift clock is from APB bus clock
+ divided by a programmable value.
+- Co-work with external signal-chained TTL components (74LV165/74LV595)
+
+Required properties:
+
+- compatible : Should be one of
+ "aspeed,ast2400-sgpio", "aspeed,ast2500-sgpio"
+- #gpio-cells : Should be 2, see gpio.txt
+- reg : Address and length of the register set for the device
+- gpio-controller : Marks the device node as a GPIO controller
+- interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt
+- interrupt-controller : Mark the GPIO controller as an interrupt-controller
+- ngpios : number of GPIO lines, see gpio.txt
+ (should be multiple of 8, up to 80 pins)
+- clocks : A phandle to the APB clock for SGPM clock division
+- bus-frequency : SGPM CLK frequency
+
+The sgpio and interrupt properties are further described in their respective
+bindings documentation:
+
+- Documentation/devicetree/bindings/gpio/gpio.txt
+- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+
+ Example:
+ sgpio: sgpio@1e780200 {
+ #gpio-cells = <2>;
+ compatible = "aspeed,ast2500-sgpio";
+ gpio-controller;
+ interrupts = <40>;
+ reg = <0x1e780200 0x0100>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ interrupt-controller;
+ ngpios = <8>;
+ bus-frequency = <12000000>;
+ };
--- /dev/null
+Bindings for Synaptics AS370 PVT sensors
+
+Required properties:
+- compatible : "syna,as370-hwmon"
+- reg : address and length of the register set.
+
+Example:
+ hwmon@ea0810 {
+ compatible = "syna,as370-hwmon";
+ reg = <0xea0810 0xc>;
+ };
-Device-tree bindings for IBM Common Form Factor Power Supply Version 1
-----------------------------------------------------------------------
+Device-tree bindings for IBM Common Form Factor Power Supply Versions 1 and 2
+-----------------------------------------------------------------------------
Required properties:
- - compatible = "ibm,cffps1";
+ - compatible : Must be one of the following:
+ "ibm,cffps1"
+ "ibm,cffps2"
- reg = < I2C bus address >; : Address of the power supply on the
I2C bus.
"maxim,max31725",
"maxim,max31726",
"maxim,mcp980x",
+ "nxp,pct2075",
"st,stds75",
"st,stlm75",
"microchip,tcn75",
- inl67-supply: The input supply for LDO_REG3 and LDO_REG4
Any standard regulator properties can be used to configure the single regulator.
+regulator-initial-mode, regulator-allowed-modes and regulator-mode could be specified
+for act8865 using mode values from dt-bindings/regulator/active-semi,8865-regulator.h
+file.
The valid names for regulators are:
- for act8846:
Example:
--------
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
+
i2c1: i2c@f0018000 {
pmic: act8865@5b {
compatible = "active-semi,act8865";
regulator-name = "VCC_1V2";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1300000>;
- regulator-suspend-mem-microvolt = <1150000>;
- regulator-suspend-standby-microvolt = <1150000>;
regulator-always-on;
+
+ regulator-allowed-modes = <ACT8865_REGULATOR_MODE_FIXED>,
+ <ACT8865_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8865_REGULATOR_MODE_FIXED>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-min-microvolt = <1150000>;
+ regulator-suspend-max-microvolt = <1150000>;
+ regulator-changeable-in-suspend;
+ regulator-mode = <ACT8865_REGULATOR_MODE_LOWPOWER>;
+ };
};
vcc_3v3_reg: DCDC_REG3 {
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
+
+ regulator-allowed-modes = <ACT8865_REGULATOR_MODE_NORMAL>,
+ <ACT8865_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8865_REGULATOR_MODE_NORMAL>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
vddfuse_reg: LDO_REG2 {
allOf:
- $ref: "regulator.yaml#"
+if:
+ properties:
+ compatible:
+ contains:
+ const: regulator-fixed-clock
+ required:
+ - clocks
+
properties:
compatible:
- const: regulator-fixed
+ enum:
+ - const: regulator-fixed
+ - const: regulator-fixed-clock
regulator-name: true
description: gpio to use for enable control
maxItems: 1
+ clocks:
+ description:
+ clock to use for enable control. This binding is only available if
+ the compatible is chosen to regulator-fixed-clock. The clock binding
+ is mandatory if compatible is chosen to regulator-fixed-clock.
+ maxItems: 1
+
startup-delay-us:
description: startup time in microseconds
$ref: /schemas/types.yaml#/definitions/uint32
--- /dev/null
+MediaTek MT6358 Regulator
+
+All voltage regulators provided by the MT6358 PMIC are described as the
+subnodes of the MT6358 regulators node. Each regulator is named according
+to its regulator type, buck_<name> and ldo_<name>. The definition for each
+of these nodes is defined using the standard binding for regulators at
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are::
+BUCK:
+ buck_vdram1, buck_vcore, buck_vpa, buck_vproc11, buck_vproc12, buck_vgpu,
+ buck_vs2, buck_vmodem, buck_vs1
+LDO:
+ ldo_vdram2, ldo_vsim1, ldo_vibr, ldo_vrf12, ldo_vio18, ldo_vusb, ldo_vcamio,
+ ldo_vcamd, ldo_vcn18, ldo_vfe28, ldo_vsram_proc11, ldo_vcn28, ldo_vsram_others,
+ ldo_vsram_gpu, ldo_vxo22, ldo_vefuse, ldo_vaux18, ldo_vmch, ldo_vbif28,
+ ldo_vsram_proc12, ldo_vcama1, ldo_vemc, ldo_vio28, ldo_va12, ldo_vrf18,
+ ldo_vcn33_bt, ldo_vcn33_wifi, ldo_vcama2, ldo_vmc, ldo_vldo28, ldo_vaud28,
+ ldo_vsim2
+
+Example:
+
+ pmic {
+ compatible = "mediatek,mt6358";
+
+ mt6358regulator: mt6358regulator {
+ compatible = "mediatek,mt6358-regulator";
+
+ mt6358_vdram1_reg: buck_vdram1 {
+ regulator-compatible = "buck_vdram1";
+ regulator-name = "vdram1";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2087500>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+
+ mt6358_vcore_reg: buck_vcore {
+ regulator-name = "vcore";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-always-on;
+ };
+
+ mt6358_vpa_reg: buck_vpa {
+ regulator-name = "vpa";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <3650000>;
+ regulator-ramp-delay = <50000>;
+ regulator-enable-ramp-delay = <250>;
+ };
+
+ mt6358_vproc11_reg: buck_vproc11 {
+ regulator-name = "vproc11";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-always-on;
+ };
+
+ mt6358_vproc12_reg: buck_vproc12 {
+ regulator-name = "vproc12";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-always-on;
+ };
+
+ mt6358_vgpu_reg: buck_vgpu {
+ regulator-name = "vgpu";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <200>;
+ };
+
+ mt6358_vs2_reg: buck_vs2 {
+ regulator-name = "vs2";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2087500>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+
+ mt6358_vmodem_reg: buck_vmodem {
+ regulator-name = "vmodem";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <900>;
+ regulator-always-on;
+ };
+
+ mt6358_vs1_reg: buck_vs1 {
+ regulator-name = "vs1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <2587500>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+
+ mt6358_vdram2_reg: ldo_vdram2 {
+ regulator-name = "vdram2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <3300>;
+ };
+
+ mt6358_vsim1_reg: ldo_vsim1 {
+ regulator-name = "vsim1";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-enable-ramp-delay = <540>;
+ };
+
+ mt6358_vibr_reg: ldo_vibr {
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <60>;
+ };
+
+ mt6358_vrf12_reg: ldo_vrf12 {
+ compatible = "regulator-fixed";
+ regulator-name = "vrf12";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ mt6358_vio18_reg: ldo_vio18 {
+ compatible = "regulator-fixed";
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <2700>;
+ regulator-always-on;
+ };
+
+ mt6358_vusb_reg: ldo_vusb {
+ regulator-name = "vusb";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-enable-ramp-delay = <270>;
+ regulator-always-on;
+ };
+
+ mt6358_vcamio_reg: ldo_vcamio {
+ compatible = "regulator-fixed";
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vcamd_reg: ldo_vcamd {
+ regulator-name = "vcamd";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vcn18_reg: ldo_vcn18 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcn18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vfe28_reg: ldo_vfe28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vfe28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vsram_proc11_reg: ldo_vsram_proc11 {
+ regulator-name = "vsram_proc11";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+
+ mt6358_vcn28_reg: ldo_vcn28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcn28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vsram_others_reg: ldo_vsram_others {
+ regulator-name = "vsram_others";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+
+ mt6358_vsram_gpu_reg: ldo_vsram_gpu {
+ regulator-name = "vsram_gpu";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6358_vxo22_reg: ldo_vxo22 {
+ compatible = "regulator-fixed";
+ regulator-name = "vxo22";
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <2200000>;
+ regulator-enable-ramp-delay = <120>;
+ regulator-always-on;
+ };
+
+ mt6358_vefuse_reg: ldo_vefuse {
+ regulator-name = "vefuse";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vaux18_reg: ldo_vaux18 {
+ compatible = "regulator-fixed";
+ regulator-name = "vaux18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vmch_reg: ldo_vmch {
+ regulator-name = "vmch";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <60>;
+ };
+
+ mt6358_vbif28_reg: ldo_vbif28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbif28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vsram_proc12_reg: ldo_vsram_proc12 {
+ regulator-name = "vsram_proc12";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+
+ mt6358_vcama1_reg: ldo_vcama1 {
+ regulator-name = "vcama1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vemc_reg: ldo_vemc {
+ regulator-name = "vemc";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <60>;
+ regulator-always-on;
+ };
+
+ mt6358_vio28_reg: ldo_vio28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vio28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_va12_reg: ldo_va12 {
+ compatible = "regulator-fixed";
+ regulator-name = "va12";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <270>;
+ regulator-always-on;
+ };
+
+ mt6358_vrf18_reg: ldo_vrf18 {
+ compatible = "regulator-fixed";
+ regulator-name = "vrf18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ mt6358_vcn33_bt_reg: ldo_vcn33_bt {
+ regulator-name = "vcn33_bt";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3500000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vcn33_wifi_reg: ldo_vcn33_wifi {
+ regulator-name = "vcn33_wifi";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3500000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vcama2_reg: ldo_vcama2 {
+ regulator-name = "vcama2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vmc_reg: ldo_vmc {
+ regulator-name = "vmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <60>;
+ };
+
+ mt6358_vldo28_reg: ldo_vldo28 {
+ regulator-name = "vldo28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vaud28_reg: ldo_vaud28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vaud28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vsim2_reg: ldo_vsim2 {
+ regulator-name = "vsim2";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-enable-ramp-delay = <540>;
+ };
+ };
+ };
The names used for regulator nodes must match those supported by a given PMIC.
Supported regulator node names:
+ PM8005: smps1 - smps4
+ PM8009: smps1 - smps2, ldo1 - ldo7
+ PM8150: smps1 - smps10, ldo1 - ldo18
+ PM8150L: smps1 - smps8, ldo1 - ldo11, bob, flash, rgb
PM8998: smps1 - smps13, ldo1 - ldo28, lvs1 - lvs2
PMI8998: bob
- PM8005: smps1 - smps4
========================
First Level Nodes - PMIC
- compatible
Usage: required
Value type: <string>
- Definition: Must be one of: "qcom,pm8998-rpmh-regulators",
- "qcom,pmi8998-rpmh-regulators" or
- "qcom,pm8005-rpmh-regulators".
+ Definition: Must be one of below:
+ "qcom,pm8005-rpmh-regulators"
+ "qcom,pm8009-rpmh-regulators"
+ "qcom,pm8150-rpmh-regulators"
+ "qcom,pm8150l-rpmh-regulators"
+ "qcom,pm8998-rpmh-regulators"
+ "qcom,pmi8998-rpmh-regulators"
- qcom,pmic-id
Usage: required
--- /dev/null
+SY8824C/SY8824E/SY20276 Voltage regulator
+
+Required properties:
+- compatible: Must be one of the following.
+ "silergy,sy8824c"
+ "silergy,sy8824e"
+ "silergy,sy20276"
+ "silergy,sy20278"
+- reg: I2C slave address
+
+Any property defined as part of the core regulator binding, defined in
+./regulator.txt, can also be used.
+
+Example:
+
+ vcore: regulator@00 {
+ compatible = "silergy,sy8824c";
+ reg = <0x66>;
+ regulator-name = "vcore";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3000000>;
};
+
+For twl6030 regulators/LDOs:
+
+ - ti,retain-on-reset: Does not turn off the supplies during warm
+ reset. Could be needed for VMMC, as TWL6030
+ reset sequence for this signal does not comply
+ with the SD specification.
Required properties:
- compatible: Should be
"socionext,uniphier-pro4-usb3-regulator" - for Pro4 SoC
+ "socionext,uniphier-pro5-usb3-regulator" - for Pro5 SoC
"socionext,uniphier-pxs2-usb3-regulator" - for PXs2 SoC
"socionext,uniphier-ld20-usb3-regulator" - for LD20 SoC
"socionext,uniphier-pxs3-usb3-regulator" - for PXs3 SoC
- clocks: A list of phandles to the clock gate for USB3 glue layer.
According to the clock-names, appropriate clocks are required.
- clock-names: Should contain
- "gio", "link" - for Pro4 SoC
+ "gio", "link" - for Pro4 and Pro5 SoCs
"link" - for others
- resets: A list of phandles to the reset control for USB3 glue layer.
According to the reset-names, appropriate resets are required.
- reset-names: Should contain
- "gio", "link" - for Pro4 SoC
+ "gio", "link" - for Pro4 and Pro5 SoCs
"link" - for others
See Documentation/devicetree/bindings/regulator/regulator.txt
--- /dev/null
+* Nuvoton FLASH Interface Unit (FIU) SPI Controller
+
+NPCM FIU supports single, dual and quad communication interface.
+
+The NPCM7XX supports three FIU modules,
+FIU0 and FIUx supports two chip selects,
+FIU3 support four chip select.
+
+Required properties:
+ - compatible : "nuvoton,npcm750-fiu" for the NPCM7XX BMC
+ - #address-cells : should be 1.
+ - #size-cells : should be 0.
+ - reg : the first contains the register location and length,
+ the second contains the memory mapping address and length
+ - reg-names: Should contain the reg names "control" and "memory"
+ - clocks : phandle of FIU reference clock.
+
+Required properties in case the pins can be muxed:
+ - pinctrl-names : a pinctrl state named "default" must be defined.
+ - pinctrl-0 : phandle referencing pin configuration of the device.
+
+Optional property:
+ - nuvoton,spix-mode: enable spix-mode for an expansion bus to an ASIC or CPLD.
+
+Aliases:
+- All the FIU controller nodes should be represented in the aliases node using
+ the following format 'fiu{n}' where n is a unique number for the alias.
+ In the NPCM7XX BMC:
+ fiu0 represent fiu 0 controller
+ fiu1 represent fiu 3 controller
+ fiu2 represent fiu x controller
+
+Example:
+fiu3: spi@c00000000 {
+ compatible = "nuvoton,npcm750-fiu";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xfb000000 0x1000>, <0x80000000 0x10000000>;
+ reg-names = "control", "memory";
+ clocks = <&clk NPCM7XX_CLK_AHB>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi3_pins>;
+ spi-nor@0 {
+ ...
+ };
+};
+
If that property is used, the number of chip selects will be
increased automatically with max(cs-gpios, hardware chip selects).
- So if, for example, the controller has 2 CS lines, and the
+ So if, for example, the controller has 4 CS lines, and the
cs-gpios looks like this
cs-gpios = <&gpio1 0 0>, <0>, <&gpio1 1 0>, <&gpio1 2 0>;
Required properties:
- compatible : Should be "fsl,vf610-qspi", "fsl,imx6sx-qspi",
"fsl,imx7d-qspi", "fsl,imx6ul-qspi",
- "fsl,ls1021a-qspi"
+ "fsl,ls1021a-qspi", "fsl,ls2080a-qspi"
or
- "fsl,ls2080a-qspi" followed by "fsl,ls1021a-qspi",
"fsl,ls1043a-qspi" followed by "fsl,ls1021a-qspi"
- reg : the first contains the register location and length,
the second contains the memory mapping address and length
clock-names = "qspi_en", "qspi";
flash0: s25fl128s@0 {
- ....
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25fl128s", "jedec,spi-nor";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
};
};
- mediatek,mt2701-spi: for mt2701 platforms
- mediatek,mt2712-spi: for mt2712 platforms
- mediatek,mt6589-spi: for mt6589 platforms
+ - mediatek,mt6765-spi: for mt6765 platforms
- mediatek,mt7622-spi: for mt7622 platforms
- "mediatek,mt7629-spi", "mediatek,mt7622-spi": for mt7629 platforms
- mediatek,mt8135-spi: for mt8135 platforms
ADI registers will make ADI controller registers chaos to lead incorrect results.
Then we need one hardware spinlock to synchronize between the multiple subsystems.
+The new version ADI controller supplies multiple master channels for different
+subsystem accessing, that means no need to add hardware spinlock to synchronize,
+thus change the hardware spinlock support to be optional to keep backward
+compatibility.
+
Required properties:
- compatible: Should be "sprd,sc9860-adi".
- reg: Offset and length of ADI-SPI controller register space.
-- hwlocks: Reference to a phandle of a hwlock provider node.
-- hwlock-names: Reference to hwlock name strings defined in the same order
- as the hwlocks, should be "adi".
- #address-cells: Number of cells required to define a chip select address
on the ADI-SPI bus. Should be set to 1.
- #size-cells: Size of cells required to define a chip select address size
on the ADI-SPI bus. Should be set to 0.
Optional properties:
+- hwlocks: Reference to a phandle of a hwlock provider node.
+- hwlock-names: Reference to hwlock name strings defined in the same order
+ as the hwlocks, should be "adi".
- sprd,hw-channels: This is an array of channel values up to 49 channels.
The first value specifies the hardware channel id which is used to
transfer data triggered by hardware automatically, and the second
- infineon,slb9645tt
# Infineon TLV493D-A1B6 I2C 3D Magnetic Sensor
- infineon,tlv493d-a1b6
+ # Inspur Power System power supply unit version 1
+ - inspur,ipsps1
# Intersil ISL29028 Ambient Light and Proximity Sensor
- isil,isl29028
# Intersil ISL29030 Ambient Light and Proximity Sensor
description: Micro Crystal AG
"^micron,.*":
description: Micron Technology Inc.
+ "^microsoft,.*":
+ description: Microsoft Corporation
"^mikroe,.*":
description: MikroElektronika d.o.o.
"^miniand,.*":
The code implementing a gpio_chip should support multiple instances of the
controller, preferably using the driver model. That code will configure each
-gpio_chip and issue ``gpiochip_add[_data]()`` or ``devm_gpiochip_add_data()``.
-Removing a GPIO controller should be rare; use ``[devm_]gpiochip_remove()``
-when it is unavoidable.
+gpio_chip and issue gpiochip_add(), gpiochip_add_data(), or
+devm_gpiochip_add_data(). Removing a GPIO controller should be rare; use
+gpiochip_remove() when it is unavoidable.
Often a gpio_chip is part of an instance-specific structure with states not
exposed by the GPIO interfaces, such as addressing, power management, and more.
cases the GPIO logic is melded with a SoC's primary interrupt controller.
The IRQ portions of the GPIO block are implemented using an irq_chip, using
-the header <linux/irq.h>. So basically such a driver is utilizing two sub-
+the header <linux/irq.h>. So this combined driver is utilizing two sub-
systems simultaneously: gpio and irq.
It is legal for any IRQ consumer to request an IRQ from any irqchip even if it
----------------------------------------
To help out in handling the set-up and management of GPIO irqchips and the
-associated irqdomain and resource allocation callbacks, the gpiolib has
-some helpers that can be enabled by selecting the GPIOLIB_IRQCHIP Kconfig
-symbol:
-
-- gpiochip_irqchip_add(): adds a chained cascaded irqchip to a gpiochip. It
- will pass the struct gpio_chip* for the chip to all IRQ callbacks, so the
- callbacks need to embed the gpio_chip in its state container and obtain a
- pointer to the container using container_of().
- (See Documentation/driver-api/driver-model/design-patterns.rst)
+associated irqdomain and resource allocation callbacks. These are activated
+by selecting the Kconfig symbol GPIOLIB_IRQCHIP. If the symbol
+IRQ_DOMAIN_HIERARCHY is also selected, hierarchical helpers will also be
+provided. A big portion of overhead code will be managed by gpiolib,
+under the assumption that your interrupts are 1-to-1-mapped to the
+GPIO line index:
+
+ GPIO line offset Hardware IRQ
+ 0 0
+ 1 1
+ 2 2
+ ... ...
+ ngpio-1 ngpio-1
+
+If some GPIO lines do not have corresponding IRQs, the bitmask valid_mask
+and the flag need_valid_mask in gpio_irq_chip can be used to mask off some
+lines as invalid for associating with IRQs.
+
+The preferred way to set up the helpers is to fill in the
+struct gpio_irq_chip inside struct gpio_chip before adding the gpio_chip.
+If you do this, the additional irq_chip will be set up by gpiolib at the
+same time as setting up the rest of the GPIO functionality. The following
+is a typical example of a cascaded interrupt handler using gpio_irq_chip:
+
+ /* Typical state container with dynamic irqchip */
+ struct my_gpio {
+ struct gpio_chip gc;
+ struct irq_chip irq;
+ };
+
+ int irq; /* from platform etc */
+ struct my_gpio *g;
+ struct gpio_irq_chip *girq;
+
+ /* Set up the irqchip dynamically */
+ g->irq.name = "my_gpio_irq";
+ g->irq.irq_ack = my_gpio_ack_irq;
+ g->irq.irq_mask = my_gpio_mask_irq;
+ g->irq.irq_unmask = my_gpio_unmask_irq;
+ g->irq.irq_set_type = my_gpio_set_irq_type;
+
+ /* Get a pointer to the gpio_irq_chip */
+ girq = &g->gc.irq;
+ girq->chip = &g->irq;
+ girq->parent_handler = ftgpio_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parents[0] = irq;
+
+ return devm_gpiochip_add_data(dev, &g->gc, g);
+
+The helper support using hierarchical interrupt controllers as well.
+In this case the typical set-up will look like this:
+
+ /* Typical state container with dynamic irqchip */
+ struct my_gpio {
+ struct gpio_chip gc;
+ struct irq_chip irq;
+ struct fwnode_handle *fwnode;
+ };
+
+ int irq; /* from platform etc */
+ struct my_gpio *g;
+ struct gpio_irq_chip *girq;
+
+ /* Set up the irqchip dynamically */
+ g->irq.name = "my_gpio_irq";
+ g->irq.irq_ack = my_gpio_ack_irq;
+ g->irq.irq_mask = my_gpio_mask_irq;
+ g->irq.irq_unmask = my_gpio_unmask_irq;
+ g->irq.irq_set_type = my_gpio_set_irq_type;
+
+ /* Get a pointer to the gpio_irq_chip */
+ girq = &g->gc.irq;
+ girq->chip = &g->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->fwnode = g->fwnode;
+ girq->parent_domain = parent;
+ girq->child_to_parent_hwirq = my_gpio_child_to_parent_hwirq;
+
+ return devm_gpiochip_add_data(dev, &g->gc, g);
+
+As you can see pretty similar, but you do not supply a parent handler for
+the IRQ, instead a parent irqdomain, an fwnode for the hardware and
+a funcion .child_to_parent_hwirq() that has the purpose of looking up
+the parent hardware irq from a child (i.e. this gpio chip) hardware irq.
+As always it is good to look at examples in the kernel tree for advice
+on how to find the required pieces.
+
+The old way of adding irqchips to gpiochips after registration is also still
+available but we try to move away from this:
+
+- DEPRECATED: gpiochip_irqchip_add(): adds a chained cascaded irqchip to a
+ gpiochip. It will pass the struct gpio_chip* for the chip to all IRQ
+ callbacks, so the callbacks need to embed the gpio_chip in its state
+ container and obtain a pointer to the container using container_of().
+ (See Documentation/driver-model/design-patterns.txt)
- gpiochip_irqchip_add_nested(): adds a nested cascaded irqchip to a gpiochip,
as discussed above regarding different types of cascaded irqchips. The
cascaded irq has to be handled by a threaded interrupt handler.
Apart from that it works exactly like the chained irqchip.
-- gpiochip_set_chained_irqchip(): sets up a chained cascaded irq handler for a
- gpio_chip from a parent IRQ and passes the struct gpio_chip* as handler
- data. Notice that we pass is as the handler data, since the irqchip data is
- likely used by the parent irqchip.
+- DEPRECATED: gpiochip_set_chained_irqchip(): sets up a chained cascaded irq
+ handler for a gpio_chip from a parent IRQ and passes the struct gpio_chip*
+ as handler data. Notice that we pass is as the handler data, since the
+ irqchip data is likely used by the parent irqchip.
- gpiochip_set_nested_irqchip(): sets up a nested cascaded irq handler for a
gpio_chip from a parent IRQ. As the parent IRQ has usually been
If there is a need to exclude certain GPIO lines from the IRQ domain handled by
these helpers, we can set .irq.need_valid_mask of the gpiochip before
-``[devm_]gpiochip_add_data()`` is called. This allocates an .irq.valid_mask with as
-many bits set as there are GPIO lines in the chip, each bit representing line
-0..n-1. Drivers can exclude GPIO lines by clearing bits from this mask. The mask
-must be filled in before gpiochip_irqchip_add() or gpiochip_irqchip_add_nested()
-is called.
+devm_gpiochip_add_data() or gpiochip_add_data() is called. This allocates an
+.irq.valid_mask with as many bits set as there are GPIO lines in the chip, each
+bit representing line 0..n-1. Drivers can exclude GPIO lines by clearing bits
+from this mask. The mask must be filled in before gpiochip_irqchip_add() or
+gpiochip_irqchip_add_nested() is called.
To use the helpers please keep the following in mind:
+++ /dev/null
-Kernel driver ads1015
-=====================
-
-Supported chips:
-
- * Texas Instruments ADS1015
-
- Prefix: 'ads1015'
-
- Datasheet: Publicly available at the Texas Instruments website:
-
- http://focus.ti.com/lit/ds/symlink/ads1015.pdf
-
- * Texas Instruments ADS1115
-
- Prefix: 'ads1115'
-
- Datasheet: Publicly available at the Texas Instruments website:
-
- http://focus.ti.com/lit/ds/symlink/ads1115.pdf
-
-Authors:
- Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
-
-Description
------------
-
-This driver implements support for the Texas Instruments ADS1015/ADS1115.
-
-This device is a 12/16-bit A-D converter with 4 inputs.
-
-The inputs can be used single ended or in certain differential combinations.
-
-The inputs can be made available by 8 sysfs input files in0_input - in7_input:
-
- - in0: Voltage over AIN0 and AIN1.
- - in1: Voltage over AIN0 and AIN3.
- - in2: Voltage over AIN1 and AIN3.
- - in3: Voltage over AIN2 and AIN3.
- - in4: Voltage over AIN0 and GND.
- - in5: Voltage over AIN1 and GND.
- - in6: Voltage over AIN2 and GND.
- - in7: Voltage over AIN3 and GND.
-
-Which inputs are available can be configured using platform data or devicetree.
-
-By default all inputs are exported.
-
-Platform Data
--------------
-
-In linux/platform_data/ads1015.h platform data is defined, channel_data contains
-configuration data for the used input combinations:
-
-- pga is the programmable gain amplifier (values are full scale)
-
- - 0: +/- 6.144 V
- - 1: +/- 4.096 V
- - 2: +/- 2.048 V
- - 3: +/- 1.024 V
- - 4: +/- 0.512 V
- - 5: +/- 0.256 V
-
-- data_rate in samples per second
-
- - 0: 128
- - 1: 250
- - 2: 490
- - 3: 920
- - 4: 1600
- - 5: 2400
- - 6: 3300
-
-Example::
-
- struct ads1015_platform_data data = {
- .channel_data = {
- [2] = { .enabled = true, .pga = 1, .data_rate = 0 },
- [4] = { .enabled = true, .pga = 4, .data_rate = 5 },
- }
- };
-
-In this case only in2_input (FS +/- 4.096 V, 128 SPS) and in4_input
-(FS +/- 0.512 V, 2400 SPS) would be created.
-
-Devicetree
-----------
-
-Configuration is also possible via devicetree:
-Documentation/devicetree/bindings/hwmon/ads1015.txt
adm1031
adm1275
adm9240
- ads1015
ads7828
adt7410
adt7411
pcf8591
pmbus
powr1220
+ pxe1610
pwm-fan
raspberrypi-hwmon
sch5627
--- /dev/null
+Kernel driver inspur-ipsps1
+=======================
+
+Supported chips:
+
+ * Inspur Power System power supply unit
+
+Author: John Wang <wangzqbj@inspur.com>
+
+Description
+-----------
+
+This driver supports Inspur Power System power supplies. This driver
+is a client to the core PMBus driver.
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+Sysfs entries
+-------------
+
+The following attributes are supported:
+
+======================= ======================================================
+curr1_input Measured input current
+curr1_label "iin"
+curr1_max Maximum current
+curr1_max_alarm Current high alarm
+curr2_input Measured output current in mA.
+curr2_label "iout1"
+curr2_crit Critical maximum current
+curr2_crit_alarm Current critical high alarm
+curr2_max Maximum current
+curr2_max_alarm Current high alarm
+
+fan1_alarm Fan 1 warning.
+fan1_fault Fan 1 fault.
+fan1_input Fan 1 speed in RPM.
+
+in1_alarm Input voltage under-voltage alarm.
+in1_input Measured input voltage in mV.
+in1_label "vin"
+in2_input Measured output voltage in mV.
+in2_label "vout1"
+in2_lcrit Critical minimum output voltage
+in2_lcrit_alarm Output voltage critical low alarm
+in2_max Maximum output voltage
+in2_max_alarm Output voltage high alarm
+in2_min Minimum output voltage
+in2_min_alarm Output voltage low alarm
+
+power1_alarm Input fault or alarm.
+power1_input Measured input power in uW.
+power1_label "pin"
+power1_max Input power limit
+power2_max_alarm Output power high alarm
+power2_max Output power limit
+power2_input Measured output power in uW.
+power2_label "pout"
+
+temp[1-3]_input Measured temperature
+temp[1-2]_max Maximum temperature
+temp[1-3]_max_alarm Temperature high alarm
+
+vendor Manufacturer name
+model Product model
+part_number Product part number
+serial_number Product serial number
+fw_version Firmware version
+hw_version Hardware version
+mode Work mode. Can be set to active or
+ standby, when set to standby, PSU will
+ automatically switch between standby
+ and redundancy mode.
+======================= ======================================================
http://www.ti.com/product/tmp275
- * NXP LM75B
+ * NXP LM75B, PCT2075
- Prefix: 'lm75b'
+ Prefix: 'lm75b', 'pct2075'
Addresses scanned: none
http://www.nxp.com/documents/data_sheet/LM75B.pdf
+ http://www.nxp.com/docs/en/data-sheet/PCT2075.pdf
+
Author: Frodo Looijaard <frodol@dds.nl>
Description
=====================
Supported chips:
+
* Infineon PXE1610
+
Prefix: 'pxe1610'
+
Addresses scanned: -
+
Datasheet: Datasheet is not publicly available.
* Infineon PXE1110
+
Prefix: 'pxe1110'
+
Addresses scanned: -
+
Datasheet: Datasheet is not publicly available.
* Infineon PXM1310
+
Prefix: 'pxm1310'
+
Addresses scanned: -
+
Datasheet: Datasheet is not publicly available.
Author: Vijay Khemka <vijaykhemka@fb.com>
PXE1610/PXE1110 are Multi-rail/Multiphase Digital Controllers
and compliant to
- -- Intel VR13 DC-DC converter specifications.
- -- Intel SVID protocol.
+
+ - Intel VR13 DC-DC converter specifications.
+ - Intel SVID protocol.
+
Used for Vcore power regulation for Intel VR13 based microprocessors
- -- Servers, Workstations, and High-end desktops
+
+ - Servers, Workstations, and High-end desktops
PXM1310 is a Multi-rail Controller and it is compliant to
- -- Intel VR13 DC-DC converter specifications.
- -- Intel SVID protocol.
+
+ - Intel VR13 DC-DC converter specifications.
+ - Intel SVID protocol.
+
Used for DDR3/DDR4 Memory power regulation for Intel VR13 and
IMVP8 based systems
to instantiate devices explicitly.
Example: the following commands will load the driver for an PXE1610
-at address 0x70 on I2C bus #4:
+at address 0x70 on I2C bus #4::
-# modprobe pxe1610
-# echo pxe1610 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
+ # modprobe pxe1610
+ # echo pxe1610 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
It can also be instantiated by declaring in device tree
Sysfs attributes
----------------
+====================== ====================================
curr1_label "iin"
curr1_input Measured input current
curr1_alarm Current high alarm
temp[1-3]_crit_alarm Chip temperature critical high alarm
temp[1-3]_max Maximum temperature
temp[1-3]_max_alarm Chip temperature high alarm
+====================== ====================================
Addresses scanned: none
- Datasheet: Not publicly available
+ Datasheet: http://www.sensirion.com/file/datasheet_shtw1
+
+
+
+ * Sensirion SHTC3
+
+ Prefix: 'shtc3'
+
+ Addresses scanned: none
+
+ Datasheet: http://www.sensirion.com/file/datasheet_shtc3
Description
-----------
-This driver implements support for the Sensirion SHTC1 chip, a humidity and
-temperature sensor. Temperature is measured in degrees celsius, relative
-humidity is expressed as a percentage. Driver can be used as well for SHTW1
-chip, which has the same electrical interface.
+This driver implements support for the Sensirion SHTC1, SHTW1, and SHTC3
+chips, a humidity and temperature sensor. Temperature is measured in degrees
+celsius, relative humidity is expressed as a percentage.
The device communicates with the I2C protocol. All sensors are set to I2C
address 0x70. See Documentation/i2c/instantiating-devices for methods to
errors, no warnings, and few if any check messages. If there are any
messages, please be prepared to explain.
+* Please use the standard multi-line comment style. Do not mix C and C++
+ style comments in a single driver (with the exception of the SPDX license
+ identifier).
+
* If your patch generates checkpatch errors, warnings, or check messages,
please refrain from explanations such as "I prefer that coding style".
Keep in mind that each unnecessary message helps hiding a real problem,
completely initialize your chip and your driver first, then register with
the hwmon subsystem.
-* Use devm_hwmon_device_register_with_groups() or, if your driver needs a remove
- function, hwmon_device_register_with_groups() to register your driver with the
+* Use devm_hwmon_device_register_with_info() or, if your driver needs a remove
+ function, hwmon_device_register_with_info() to register your driver with the
hwmon subsystem. Try using devm_add_action() instead of a remove function if
possible. Do not use hwmon_device_register().
u32 res1 = 0; /* Reserved */
u64 res2 = 0; /* Reserved */
u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
- u32 res3; /* Reserved for additional RISC-V specific header */
+ u32 magic2 = 0x56534905; /* Magic number 2, little endian, "RSC\x05" */
u32 res4; /* Reserved for PE COFF offset */
This header format is compliant with PE/COFF header and largely inspired from
Bits 16:31 - Major version
This preserves compatibility across newer and older version of the header.
- The current version is defined as 0.1.
+ The current version is defined as 0.2.
-- res3 is reserved for offset to any other additional fields. This makes the
- header extendible in future. One example would be to accommodate ISA
- extension for RISC-V in future. For current version, it is set to be zero.
+- The "magic" field is deprecated as of version 0.2. In a future
+ release, it may be removed. This originally should have matched up
+ with the ARM64 header "magic" field, but unfortunately does not.
+ The "magic2" field replaces it, matching up with the ARM64 header.
-- In current header, the flag field has only one field.
+- In current header, the flags field has only one field.
Bit 0: Kernel endianness. 1 if BE, 0 if LE.
- Image size is mandatory for boot loader to load kernel image. Booting will
tpm_vtpm_proxy
xen-tpmfront
+ tpm_ftpm_tee
--- /dev/null
+=============================================
+Firmware TPM Driver
+=============================================
+
+This document describes the firmware Trusted Platform Module (fTPM)
+device driver.
+
+Introduction
+============
+
+This driver is a shim for firmware implemented in ARM's TrustZone
+environment. The driver allows programs to interact with the TPM in the same
+way they would interact with a hardware TPM.
+
+Design
+======
+
+The driver acts as a thin layer that passes commands to and from a TPM
+implemented in firmware. The driver itself doesn't contain much logic and is
+used more like a dumb pipe between firmware and kernel/userspace.
+
+The firmware itself is based on the following paper:
+https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/ftpm1.pdf
+
+When the driver is loaded it will expose ``/dev/tpmX`` character devices to
+userspace which will enable userspace to communicate with the firmware TPM
+through this device.
S: Supported
F: drivers/video/backlight/adp8860_bl.c
-ADS1015 HARDWARE MONITOR DRIVER
-M: Dirk Eibach <eibach@gdsys.de>
-L: linux-hwmon@vger.kernel.org
-S: Maintained
-F: Documentation/hwmon/ads1015.rst
-F: drivers/hwmon/ads1015.c
-F: include/linux/platform_data/ads1015.h
-
ADT746X FAN DRIVER
M: Colin Leroy <colin@colino.net>
S: Maintained
F: drivers/edac/aspeed_edac.c
F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
+EDAC-BLUEFIELD
+M: Shravan Kumar Ramani <sramani@mellanox.com>
+S: Supported
+F: drivers/edac/bluefield_edac.c
+
EDAC-CALXEDA
M: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
EDAC-CORE
M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
+M: Tony Luck <tony.luck@intel.com>
R: James Morse <james.morse@arm.com>
+R: Robert Richter <rrichter@marvell.com>
L: linux-edac@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras.git edac-for-next
S: Supported
F: Documentation/admin-guide/ras.rst
F: Documentation/driver-api/edac.rst
L: linux-block@vger.kernel.org
F: drivers/block/floppy.c
-FMC SUBSYSTEM
-M: Alessandro Rubini <rubini@gnudd.com>
-W: http://www.ohwr.org/projects/fmc-bus
-S: Supported
-F: drivers/fmc/
-F: include/linux/fmc*.h
-F: include/linux/ipmi-fru.h
-K: fmc_d.*register
-
FPGA MANAGER FRAMEWORK
M: Moritz Fischer <mdf@kernel.org>
L: linux-fpga@vger.kernel.org
F: include/linux/tboot.h
F: arch/x86/kernel/tboot.c
-INTEL-MID GPIO DRIVER
-M: David Cohen <david.a.cohen@linux.intel.com>
-L: linux-gpio@vger.kernel.org
-S: Maintained
-F: drivers/gpio/gpio-intel-mid.c
-
INTERCONNECT API
M: Georgi Djakov <georgi.djakov@linaro.org>
L: linux-pm@vger.kernel.org
KEYS-TRUSTED
M: James Bottomley <jejb@linux.ibm.com>
-M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
F: drivers/regulator/
F: include/dt-bindings/regulator/
F: include/linux/regulator/
+K: regulator_get_optional
VRF
M: David Ahern <dsa@cumulusnetworks.com>
F: include/uapi/linux/fsmap.h
XILINX AXI ETHERNET DRIVER
-M: Anirudha Sarangi <anirudh@xilinx.com>
-M: John Linn <John.Linn@xilinx.com>
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*
VERSION = 5
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
NAME = Bobtail Squid
# *DOCUMENTATION*
# CONFIG_HW_RANDOM is not set
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PNX=y
+CONFIG_GPIO_LPC32XX=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_SYSFS=y
};
static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
- .dev_id = "ep93xx-spi.0",
+ .dev_id = "spi0",
.table = {
GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
{ },
* v1.3 parts will still work, since the signal on SFRMOUT is automatic.
*/
static struct gpiod_lookup_table simone_spi_cs_gpio_table = {
- .dev_id = "ep93xx-spi.0",
+ .dev_id = "spi0",
.table = {
GPIO_LOOKUP("A", 1, "cs", GPIO_ACTIVE_LOW),
{ },
* goes through CPLD
*/
static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
- .dev_id = "ep93xx-spi.0",
+ .dev_id = "spi0",
.table = {
GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
{ },
};
static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
- .dev_id = "ep93xx-spi.0",
+ .dev_id = "spi0",
.table = {
/* DIO_17 */
GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
};
static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
- .dev_id = "ep93xx-spi.0",
+ .dev_id = "spi0",
.table = {
GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
#ifndef __ASM_IMAGE_H
#define __ASM_IMAGE_H
-#define RISCV_IMAGE_MAGIC "RISCV"
+#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
+#define RISCV_IMAGE_MAGIC2 "RSC\x05"
#define RISCV_IMAGE_FLAG_BE_SHIFT 0
#define RISCV_IMAGE_FLAG_BE_MASK 0x1
#define __HEAD_FLAGS (__HEAD_FLAG(BE))
#define RISCV_HEADER_VERSION_MAJOR 0
-#define RISCV_HEADER_VERSION_MINOR 1
+#define RISCV_HEADER_VERSION_MINOR 2
#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
RISCV_HEADER_VERSION_MINOR)
* @version: version
* @res1: reserved
* @res2: reserved
- * @magic: Magic number
- * @res3: reserved (will be used for additional RISC-V specific
- * header)
+ * @magic: Magic number (RISC-V specific; deprecated)
+ * @magic2: Magic number 2 (to match the ARM64 'magic' field pos)
* @res4: reserved (will be used for PE COFF offset)
*
* The intention is for this header format to be shared between multiple
u32 res1;
u64 res2;
u64 magic;
- u32 res3;
+ u32 magic2;
u32 res4;
};
#endif /* __ASSEMBLY__ */
.word RISCV_HEADER_VERSION
.word 0
.dword 0
- .asciz RISCV_IMAGE_MAGIC
- .word 0
+ .ascii RISCV_IMAGE_MAGIC
.balign 4
+ .ascii RISCV_IMAGE_MAGIC2
.word 0
.global _start_kernel
case KVM_S390_MCHK:
irq->u.mchk.mcic = s390int->parm64;
break;
+ case KVM_S390_INT_PFAULT_INIT:
+ irq->u.ext.ext_params = s390int->parm;
+ irq->u.ext.ext_params2 = s390int->parm64;
+ break;
+ case KVM_S390_RESTART:
+ case KVM_S390_INT_CLOCK_COMP:
+ case KVM_S390_INT_CPU_TIMER:
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
/* mark all the pages in active slots as dirty */
for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
ms = slots->memslots + slotnr;
+ if (!ms->dirty_bitmap)
+ return -EINVAL;
/*
* The second half of the bitmap is only used on x86,
* and would be wasted otherwise, so we put it to good
}
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
- struct kvm_s390_irq s390irq;
+ struct kvm_s390_irq s390irq = {};
if (copy_from_user(&s390int, argp, sizeof(s390int)))
return -EFAULT;
{
long err;
+ if (!IS_ENABLED(CONFIG_SYSVIPC))
+ return -ENOSYS;
+
/* No need for backward compatibility. We can start fresh... */
if (call <= SEMTIMEDOP) {
switch (call) {
case SEMOP:
- err = sys_semtimedop(first, ptr,
- (unsigned int)second, NULL);
+ err = ksys_semtimedop(first, ptr,
+ (unsigned int)second, NULL);
goto out;
case SEMTIMEDOP:
- err = sys_semtimedop(first, ptr, (unsigned int)second,
+ err = ksys_semtimedop(first, ptr, (unsigned int)second,
(const struct __kernel_timespec __user *)
- (unsigned long) fifth);
+ (unsigned long) fifth);
goto out;
case SEMGET:
- err = sys_semget(first, (int)second, (int)third);
+ err = ksys_semget(first, (int)second, (int)third);
goto out;
case SEMCTL: {
- err = sys_semctl(first, second,
- (int)third | IPC_64,
- (unsigned long) ptr);
+ err = ksys_old_semctl(first, second,
+ (int)third | IPC_64,
+ (unsigned long) ptr);
goto out;
}
default:
if (call <= MSGCTL) {
switch (call) {
case MSGSND:
- err = sys_msgsnd(first, ptr, (size_t)second,
+ err = ksys_msgsnd(first, ptr, (size_t)second,
(int)third);
goto out;
case MSGRCV:
- err = sys_msgrcv(first, ptr, (size_t)second, fifth,
+ err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
(int)third);
goto out;
case MSGGET:
- err = sys_msgget((key_t)first, (int)second);
+ err = ksys_msgget((key_t)first, (int)second);
goto out;
case MSGCTL:
- err = sys_msgctl(first, (int)second | IPC_64, ptr);
+ err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
goto out;
}
case SHMDT:
- err = sys_shmdt(ptr);
+ err = ksys_shmdt(ptr);
goto out;
case SHMGET:
- err = sys_shmget(first, (size_t)second, (int)third);
+ err = ksys_shmget(first, (size_t)second, (int)third);
goto out;
case SHMCTL:
- err = sys_shmctl(first, (int)second | IPC_64, ptr);
+ err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/syscore_ops.h>
-#include <linux/gpio.h>
#include <mach/hardware.h>
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
+ unsigned long mmu_valid_gen;
DECLARE_BITMAP(unsync_child_bitmap, 512);
#ifdef CONFIG_X86_32
unsigned long n_requested_mmu_pages;
unsigned long n_max_mmu_pages;
unsigned int indirect_shadow_pages;
+ unsigned long mmu_valid_gen;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
if (!boot_cpu_has(X86_FEATURE_APIC))
return true;
+ /* Virt guests may lack ARAT, but still have DEADLINE */
+ if (!boot_cpu_has(X86_FEATURE_ARAT))
+ return true;
+
/* Deadline timer is based on TSC so no further PIT action required */
if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
return false;
*/
MCESEV(
AO, "Action optional: memory scrubbing error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
+ SER, MASK(MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
),
MCESEV(
AO, "Action optional: last level cache writeback error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
+ SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
),
/* ignore OVER for UCNA */
if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+
+ /*
+ * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
+ * depends on valid pages being added to the head of the list. See
+ * comments in kvm_zap_obsolete_pages().
+ */
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
return sp;
#define for_each_valid_sp(_kvm, _sp, _gfn) \
hlist_for_each_entry(_sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
- if ((_sp)->role.invalid) { \
+ if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
} else
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
static void mmu_audit_disable(void) { }
#endif
+static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
+}
+
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
+ sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
clear_page(sp->spt);
trace_kvm_mmu_get_page(sp, true);
return false;
if (cached_root_available(vcpu, new_cr3, new_role)) {
+ /*
+ * It is possible that the cached previous root page is
+ * obsolete because of a change in the MMU generation
+ * number. However, changing the generation number is
+ * accompanied by KVM_REQ_MMU_RELOAD, which will free
+ * the root set here and allocate a new one.
+ */
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
return alloc_mmu_pages(vcpu);
}
+
+static void kvm_zap_obsolete_pages(struct kvm *kvm)
+{
+ struct kvm_mmu_page *sp, *node;
+ LIST_HEAD(invalid_list);
+ int ign;
+
+restart:
+ list_for_each_entry_safe_reverse(sp, node,
+ &kvm->arch.active_mmu_pages, link) {
+ /*
+ * No obsolete valid page exists before a newly created page
+ * since active_mmu_pages is a FIFO list.
+ */
+ if (!is_obsolete_sp(kvm, sp))
+ break;
+
+ /*
+ * Do not repeatedly zap a root page to avoid unnecessary
+ * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
+ * progress:
+ * vcpu 0 vcpu 1
+ * call vcpu_enter_guest():
+ * 1): handle KVM_REQ_MMU_RELOAD
+ * and require mmu-lock to
+ * load mmu
+ * repeat:
+ * 1): zap root page and
+ * send KVM_REQ_MMU_RELOAD
+ *
+ * 2): if (cond_resched_lock(mmu-lock))
+ *
+ * 2): hold mmu-lock and load mmu
+ *
+ * 3): see KVM_REQ_MMU_RELOAD bit
+ * on vcpu->requests is set
+ * then return 1 to call
+ * vcpu_enter_guest() again.
+ * goto repeat;
+ *
+ * Since we are reversely walking the list and the invalid
+ * list will be moved to the head, skip the invalid page
+ * can help us to avoid the infinity list walking.
+ */
+ if (sp->role.invalid)
+ continue;
+
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ cond_resched_lock(&kvm->mmu_lock);
+ goto restart;
+ }
+
+ if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
+ goto restart;
+ }
+
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+}
+
+/*
+ * Fast invalidate all shadow pages and use lock-break technique
+ * to zap obsolete pages.
+ *
+ * It's required when memslot is being deleted or VM is being
+ * destroyed, in these cases, we should ensure that KVM MMU does
+ * not use any resource of the being-deleted slot or all slots
+ * after calling the function.
+ */
+static void kvm_mmu_zap_all_fast(struct kvm *kvm)
+{
+ spin_lock(&kvm->mmu_lock);
+ kvm->arch.mmu_valid_gen++;
+
+ kvm_zap_obsolete_pages(kvm);
+ spin_unlock(&kvm->mmu_lock);
+}
+
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
- kvm_mmu_zap_all(kvm);
+ kvm_mmu_zap_all_fast(kvm);
}
void kvm_mmu_init_vm(struct kvm *kvm)
int len;
gva_t gva = 0;
struct vmcs12 *vmcs12;
+ struct x86_exception e;
short offset;
if (!nested_vmx_check_permission(vcpu))
vmx_instruction_info, true, len, &gva))
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
- kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
+ if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
+ kvm_inject_page_fault(vcpu, &e);
}
return nested_vmx_succeed(vcpu);
/* kvm_write_guest_virt_system can pull in tons of pages. */
vcpu->arch.l1tf_flush_l1d = true;
+ /*
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+ * is returned, but our callers are not ready for that and they blindly
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
+ * uninitialized kernel stack memory into cr2 and error code.
+ */
+ memset(exception, 0, sizeof(*exception));
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
KASAN_SANITIZE := n
KCOV_INSTRUMENT := n
+# These are adjustments to the compiler flags used for objects that
+# make up the standalone purgatory.ro
+
+PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
+PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
+
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
# sure how to relocate those.
ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
+PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
endif
ifdef CONFIG_STACKPROTECTOR
-CFLAGS_REMOVE_sha256.o += -fstack-protector
-CFLAGS_REMOVE_purgatory.o += -fstack-protector
-CFLAGS_REMOVE_string.o += -fstack-protector
-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
+PURGATORY_CFLAGS_REMOVE += -fstack-protector
endif
ifdef CONFIG_STACKPROTECTOR_STRONG
-CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
-CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
-CFLAGS_REMOVE_string.o += -fstack-protector-strong
-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
+PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
endif
ifdef CONFIG_RETPOLINE
-CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
-CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
+PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS)
endif
+CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
+
+CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
+CFLAGS_string.o += $(PURGATORY_CFLAGS)
+
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
}
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
- if (!map->debugfs) {
- dev_warn(map->dev,
- "Failed to create %s debugfs directory\n", name);
-
- kfree(map->debugfs_name);
- map->debugfs_name = NULL;
- return;
- }
debugfs_create_file("name", 0400, map->debugfs,
map, ®map_name_fops);
struct regmap_debugfs_node *node, *tmp;
regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
- if (!regmap_debugfs_root) {
- pr_warn("regmap: Failed to create debugfs root\n");
- return;
- }
mutex_lock(®map_debugfs_early_lock);
list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) {
if (ret < 0) {
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
ret);
- pm_runtime_put(map->dev);
goto exit;
}
}
dev_err(map->dev,
"Failed to read IRQ status %d\n",
ret);
- if (chip->runtime_pm)
- pm_runtime_put(map->dev);
goto exit;
}
}
dev_err(map->dev,
"Failed to read IRQ status: %d\n",
ret);
- if (chip->runtime_pm)
- pm_runtime_put(map->dev);
goto exit;
}
}
}
}
+exit:
if (chip->runtime_pm)
pm_runtime_put(map->dev);
-exit:
if (chip->handle_post_irq)
chip->handle_post_irq(chip->irq_drv_data);
usb_free_urb(urb);
- return 0;
+ return err;
}
static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
{ USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8822CE Bluetooth devices */
+ { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
}
data->intf->needs_remote_wakeup = 1;
- /* device specific wakeup source enabled and required for USB
- * remote wakeup while host is suspended
- */
- device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
goto failed;
data->intf->needs_remote_wakeup = 0;
- device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:
ws_awake_device);
struct hci_uart *hu = qca->hu;
unsigned long retrans_delay;
+ unsigned long flags;
BT_DBG("hu %p wq awake device", hu);
/* Vote for serial clock */
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
- spin_lock(&qca->hci_ibs_lock);
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
/* Send wake indication to device */
if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
retrans_delay = msecs_to_jiffies(qca->wake_retrans);
mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
- spin_unlock(&qca->hci_ibs_lock);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */
hci_uart_tx_wakeup(hu);
struct qca_data *qca = container_of(work, struct qca_data,
ws_awake_rx);
struct hci_uart *hu = qca->hu;
+ unsigned long flags;
BT_DBG("hu %p wq awake rx", hu);
serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
- spin_lock(&qca->hci_ibs_lock);
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
/* Always acknowledge device wake up,
qca->ibs_sent_wacks++;
- spin_unlock(&qca->hci_ibs_lock);
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
/* Actually send the packets */
hci_uart_tx_wakeup(hu);
/dev/vtpmX and a server-side file descriptor on which the vTPM
can receive commands.
+config TCG_FTPM_TEE
+ tristate "TEE based fTPM Interface"
+ depends on TEE && OPTEE
+ help
+ This driver proxies for firmware TPM running in TEE.
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
+obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
* @dev: device to which the chip is associated.
*
* Issues a TPM2_Shutdown command prior to loss of power, as required by the
- * TPM 2.0 spec.
- * Then, calls bus- and device- specific shutdown code.
+ * TPM 2.0 spec. Then, calls bus- and device- specific shutdown code.
*
- * XXX: This codepath relies on the fact that sysfs is not enabled for
- * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
- * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ * Return: always 0 (i.e. success)
*/
static int tpm_class_shutdown(struct device *dev)
{
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
- /* XXX: If you wish to remove this restriction, you must first update
- * tpm_sysfs to explicitly lock chip->ops.
- */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return;
- /* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
- * is called before ops is null'd and the sysfs core synchronizes this
- * removal so that no callbacks are running or can run again
- */
WARN_ON(chip->groups_cnt != 0);
chip->groups[chip->groups_cnt++] = &tpm_dev_group;
}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Microsoft Corporation
+ *
+ * Implements a firmware TPM as described here:
+ * https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/
+ *
+ * A reference implementation is available here:
+ * https://github.com/microsoft/ms-tpm-20-ref/tree/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM
+ */
+
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+#include "tpm.h"
+#include "tpm_ftpm_tee.h"
+
+/*
+ * TA_FTPM_UUID: BC50D971-D4C9-42C4-82CB-343FB7F37896
+ *
+ * Randomly generated, and must correspond to the GUID on the TA side.
+ * Defined here in the reference implementation:
+ * https://github.com/microsoft/ms-tpm-20-ref/blob/master/Samples/ARM32-FirmwareTPM/optee_ta/fTPM/include/fTPM.h#L42
+ */
+static const uuid_t ftpm_ta_uuid =
+ UUID_INIT(0xBC50D971, 0xD4C9, 0x42C4,
+ 0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
+
+/**
+ * ftpm_tee_tpm_op_recv - retrieve fTPM response.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @buf: the buffer to store data.
+ * @count: the number of bytes to read.
+ *
+ * Return:
+ * In case of success the number of bytes received.
+ * On failure, -errno.
+ */
+static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t len;
+
+ len = pvt_data->resp_len;
+ if (count < len) {
+ dev_err(&chip->dev,
+ "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
+ __func__, count, len);
+ return -EIO;
+ }
+
+ memcpy(buf, pvt_data->resp_buf, len);
+ pvt_data->resp_len = 0;
+
+ return len;
+}
+
+/**
+ * ftpm_tee_tpm_op_send - send TPM commands through the TEE shared memory.
+ * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @buf: the buffer to send.
+ * @len: the number of bytes to send.
+ *
+ * Return:
+ * In case of success, returns 0.
+ * On failure, -errno
+ */
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
+ size_t resp_len;
+ int rc;
+ u8 *temp_buf;
+ struct tpm_header *resp_header;
+ struct tee_ioctl_invoke_arg transceive_args;
+ struct tee_param command_params[4];
+ struct tee_shm *shm = pvt_data->shm;
+
+ if (len > MAX_COMMAND_SIZE) {
+ dev_err(&chip->dev,
+ "%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
+ __func__, len);
+ return -EIO;
+ }
+
+ memset(&transceive_args, 0, sizeof(transceive_args));
+ memset(command_params, 0, sizeof(command_params));
+ pvt_data->resp_len = 0;
+
+ /* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
+ transceive_args = (struct tee_ioctl_invoke_arg) {
+ .func = FTPM_OPTEE_TA_SUBMIT_COMMAND,
+ .session = pvt_data->session,
+ .num_params = 4,
+ };
+
+ /* Fill FTPM_OPTEE_TA_SUBMIT_COMMAND parameters */
+ command_params[0] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
+ .u.memref = {
+ .shm = shm,
+ .size = len,
+ .shm_offs = 0,
+ },
+ };
+
+ temp_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for transmit\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+ memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
+ memcpy(temp_buf, buf, len);
+
+ command_params[1] = (struct tee_param) {
+ .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+ .u.memref = {
+ .shm = shm,
+ .size = MAX_RESPONSE_SIZE,
+ .shm_offs = MAX_COMMAND_SIZE,
+ },
+ };
+
+ rc = tee_client_invoke_func(pvt_data->ctx, &transceive_args,
+ command_params);
+ if ((rc < 0) || (transceive_args.ret != 0)) {
+ dev_err(&chip->dev, "%s: SUBMIT_COMMAND invoke error: 0x%x\n",
+ __func__, transceive_args.ret);
+ return (rc < 0) ? rc : transceive_args.ret;
+ }
+
+ temp_buf = tee_shm_get_va(shm, command_params[1].u.memref.shm_offs);
+ if (IS_ERR(temp_buf)) {
+ dev_err(&chip->dev, "%s: tee_shm_get_va failed for receive\n",
+ __func__);
+ return PTR_ERR(temp_buf);
+ }
+
+ resp_header = (struct tpm_header *)temp_buf;
+ resp_len = be32_to_cpu(resp_header->length);
+
+ /* sanity check resp_len */
+ if (resp_len < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "%s: tpm response header too small\n",
+ __func__);
+ return -EIO;
+ }
+ if (resp_len > MAX_RESPONSE_SIZE) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds MAX_RESPONSE_SIZE\n",
+ __func__, resp_len);
+ return -EIO;
+ }
+
+ /* sanity checks look good, cache the response */
+ memcpy(pvt_data->resp_buf, temp_buf, resp_len);
+ pvt_data->resp_len = resp_len;
+
+ return 0;
+}
+
+static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
+{
+ /* not supported */
+}
+
+static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
+{
+ return 0;
+}
+
+static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return 0;
+}
+
+static const struct tpm_class_ops ftpm_tee_tpm_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = ftpm_tee_tpm_op_recv,
+ .send = ftpm_tee_tpm_op_send,
+ .cancel = ftpm_tee_tpm_op_cancel,
+ .status = ftpm_tee_tpm_op_status,
+ .req_complete_mask = 0,
+ .req_complete_val = 0,
+ .req_canceled = ftpm_tee_tpm_req_canceled,
+};
+
+/*
+ * Check whether this driver supports the fTPM TA in the TEE instance
+ * represented by the params (ver/data) to this function.
+ */
+static int ftpm_tee_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ /*
+ * Currently this driver only support GP Complaint OPTEE based fTPM TA
+ */
+ if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
+ (ver->gen_caps & TEE_GEN_CAP_GP))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * ftpm_tee_probe - initialize the fTPM
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+static int ftpm_tee_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct tpm_chip *chip;
+ struct device *dev = &pdev->dev;
+ struct ftpm_tee_private *pvt_data = NULL;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ pvt_data = devm_kzalloc(dev, sizeof(struct ftpm_tee_private),
+ GFP_KERNEL);
+ if (!pvt_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, pvt_data);
+
+ /* Open context with TEE driver */
+ pvt_data->ctx = tee_client_open_context(NULL, ftpm_tee_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data->ctx)) {
+ if (PTR_ERR(pvt_data->ctx) == -ENOENT)
+ return -EPROBE_DEFER;
+ dev_err(dev, "%s: tee_client_open_context failed\n", __func__);
+ return PTR_ERR(pvt_data->ctx);
+ }
+
+ /* Open a session with fTPM TA */
+ memset(&sess_arg, 0, sizeof(sess_arg));
+ memcpy(sess_arg.uuid, ftpm_ta_uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(pvt_data->ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "%s: tee_client_open_session failed, err=%x\n",
+ __func__, sess_arg.ret);
+ rc = -EINVAL;
+ goto out_tee_session;
+ }
+ pvt_data->session = sess_arg.session;
+
+ /* Allocate dynamic shared memory with fTPM TA */
+ pvt_data->shm = tee_shm_alloc(pvt_data->ctx,
+ MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(pvt_data->shm)) {
+ dev_err(dev, "%s: tee_shm_alloc failed\n", __func__);
+ rc = -ENOMEM;
+ goto out_shm_alloc;
+ }
+
+ /* Allocate new struct tpm_chip instance */
+ chip = tpm_chip_alloc(dev, &ftpm_tee_tpm_ops);
+ if (IS_ERR(chip)) {
+ dev_err(dev, "%s: tpm_chip_alloc failed\n", __func__);
+ rc = PTR_ERR(chip);
+ goto out_chip_alloc;
+ }
+
+ pvt_data->chip = chip;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ /* Create a character device for the fTPM */
+ rc = tpm_chip_register(pvt_data->chip);
+ if (rc) {
+ dev_err(dev, "%s: tpm_chip_register failed with rc=%d\n",
+ __func__, rc);
+ goto out_chip;
+ }
+
+ return 0;
+
+out_chip:
+ put_device(&pvt_data->chip->dev);
+out_chip_alloc:
+ tee_shm_free(pvt_data->shm);
+out_shm_alloc:
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+out_tee_session:
+ tee_client_close_context(pvt_data->ctx);
+
+ return rc;
+}
+
+/**
+ * ftpm_tee_remove - remove the TPM device
+ * @pdev: the platform_device description.
+ *
+ * Return:
+ * 0 always.
+ */
+static int ftpm_tee_remove(struct platform_device *pdev)
+{
+ struct ftpm_tee_private *pvt_data = dev_get_drvdata(&pdev->dev);
+
+ /* Release the chip */
+ tpm_chip_unregister(pvt_data->chip);
+
+ /* frees chip */
+ put_device(&pvt_data->chip->dev);
+
+ /* Free the shared memory pool */
+ tee_shm_free(pvt_data->shm);
+
+ /* close the existing session with fTPM TA*/
+ tee_client_close_session(pvt_data->ctx, pvt_data->session);
+
+ /* close the context with TEE driver */
+ tee_client_close_context(pvt_data->ctx);
+
+ /* memory allocated with devm_kzalloc() is freed automatically */
+
+ return 0;
+}
+
+static const struct of_device_id of_ftpm_tee_ids[] = {
+ { .compatible = "microsoft,ftpm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
+
+static struct platform_driver ftpm_tee_driver = {
+ .driver = {
+ .name = "ftpm-tee",
+ .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ },
+ .probe = ftpm_tee_probe,
+ .remove = ftpm_tee_remove,
+};
+
+module_platform_driver(ftpm_tee_driver);
+
+MODULE_AUTHOR("Thirupathaiah Annapureddy <thiruan@microsoft.com>");
+MODULE_DESCRIPTION("TPM Driver for fTPM TA in TEE");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Microsoft Corporation
+ */
+
+#ifndef __TPM_FTPM_TEE_H__
+#define __TPM_FTPM_TEE_H__
+
+#include <linux/tee_drv.h>
+#include <linux/tpm.h>
+#include <linux/uuid.h>
+
+/* The TAFs ID implemented in this TA */
+#define FTPM_OPTEE_TA_SUBMIT_COMMAND (0)
+#define FTPM_OPTEE_TA_EMULATE_PPI (1)
+
+/* max. buffer size supported by fTPM */
+#define MAX_COMMAND_SIZE 4096
+#define MAX_RESPONSE_SIZE 4096
+
+/**
+ * struct ftpm_tee_private - fTPM's private data
+ * @chip: struct tpm_chip instance registered with tpm framework.
+ * @state: internal state
+ * @session: fTPM TA session identifier.
+ * @resp_len: cached response buffer length.
+ * @resp_buf: cached response buffer.
+ * @ctx: TEE context handler.
+ * @shm: Memory pool shared with fTPM TA in TEE.
+ */
+struct ftpm_tee_private {
+ struct tpm_chip *chip;
+ u32 session;
+ size_t resp_len;
+ u8 resp_buf[MAX_RESPONSE_SIZE];
+ struct tee_context *ctx;
+ struct tee_shm *shm;
+};
+
+#endif /* __TPM_FTPM_TEE_H__ */
goto out_err;
}
+ tpm_chip_start(chip);
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
} else {
tpm_tis_probe_irq(chip, intmask);
}
+ tpm_chip_stop(chip);
}
rc = tpm_chip_register(chip);
#define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
#define BCM2835_DMA_CHAN_NAME_SIZE 8
+/**
+ * struct bcm2835_dmadev - BCM2835 DMA controller
+ * @ddev: DMA device
+ * @base: base address of register map
+ * @dma_parms: DMA parameters (to convey 1 GByte max segment size to clients)
+ * @zero_page: bus address of zero page (to detect transactions copying from
+ * zero page and avoid accessing memory if so)
+ */
struct bcm2835_dmadev {
struct dma_device ddev;
void __iomem *base;
struct device_dma_parameters dma_parms;
+ dma_addr_t zero_page;
};
struct bcm2835_dma_cb {
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
+ struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device);
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
struct bcm2835_desc *d;
dma_addr_t src, dst;
u32 info = BCM2835_DMA_WAIT_RESP;
- u32 extra = BCM2835_DMA_INT_EN;
+ u32 extra = 0;
size_t max_len = bcm2835_dma_max_frame_length(c);
size_t frames;
return NULL;
}
+ if (flags & DMA_PREP_INTERRUPT)
+ extra |= BCM2835_DMA_INT_EN;
+ else
+ period_len = buf_len;
+
/*
* warn if buf_len is not a multiple of period_len - this may leed
* to unexpected latencies for interrupts and thus audiable clicks
dst = c->cfg.dst_addr;
src = buf_addr;
info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
+
+ /* non-lite channels can write zeroes w/o accessing memory */
+ if (buf_addr == od->zero_page && !c->is_lite_channel)
+ info |= BCM2835_DMA_S_IGNORE;
}
/* calculate number of frames */
/* stop DMA activity */
if (c->desc) {
- vchan_terminate_vdesc(&c->desc->vd);
+ if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
+ vchan_terminate_vdesc(&c->desc->vd);
+ else
+ vchan_vdesc_fini(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c);
}
list_del(&c->vc.chan.device_node);
tasklet_kill(&c->vc.task);
}
+
+ dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static const struct of_device_id bcm2835_dma_of_match[] = {
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
BIT(DMA_MEM_TO_MEM);
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ od->ddev.descriptor_reuse = true;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
platform_set_drvdata(pdev, od);
+ od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
+ PAGE_SIZE, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
+ dev_err(&pdev->dev, "Failed to map zero page\n");
+ return -ENOMEM;
+ }
+
/* Request DMA channel mask from device tree */
if (of_property_read_u32(pdev->dev.of_node,
"brcm,dma-channel-mask",
First, ECC must be configured in the bootloader. Then, this driver
will expose error counters via the EDAC kernel framework.
+config EDAC_BLUEFIELD
+ tristate "Mellanox BlueField Memory ECC"
+ depends on ARM64 && ((MELLANOX_PLATFORM && ACPI) || COMPILE_TEST)
+ help
+ Support for error detection and correction on the
+ Mellanox BlueField SoCs.
+
endif # EDAC
obj-$(CONFIG_EDAC_TI) += ti_edac.o
obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
+obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
{ .compatible = "altr,sdram-edac", .data = &c5_data},
{ .compatible = "altr,sdram-edac-a10", .data = &a10_data},
- { .compatible = "altr,sdram-edac-s10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
return 0;
}
+/*********************** SDRAM EDAC Device Functions *********************/
+
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+
+static const struct edac_device_prv_data s10_sdramecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_S10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_S10_ECC_DERRPENA,
+ .ecc_enable_mask = ALTR_S10_ECC_EN,
+ .ecc_en_ofst = ALTR_S10_ECC_CTRL_SDRAM_OFST,
+ .ce_set_mask = ALTR_S10_ECC_TSERRA,
+ .ue_set_mask = ALTR_S10_ECC_TDERRA,
+ .set_err_ofst = ALTR_S10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+#endif /* CONFIG_EDAC_ALTERA_SDRAM */
+
/*********************** OCRAM EDAC Device Functions *********************/
#ifdef CONFIG_EDAC_ALTERA_OCRAM
#endif
#ifdef CONFIG_EDAC_ALTERA_SDMMC
{ .compatible = "altr,socfpga-sdmmc-ecc", .data = &a10_sdmmcecca_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+ { .compatible = "altr,sdram-edac-s10", .data = &s10_sdramecc_data },
#endif
{},
};
struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
int irq = irq_desc_get_irq(desc);
+ unsigned long bits;
dberr = (irq == edac->db_irq) ? 1 : 0;
sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
+ bits = irq_status;
+ for_each_set_bit(bit, &bits, 32) {
irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
if (irq)
generic_handle_irq(irq);
struct device_node *parent;
int ret = 0;
+ /* SDRAM must be present for Linux (implied parent) */
+ if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
+ return 0;
+
/* Ensure parent device is enabled if parent node exists */
parent = of_parse_phandle(np, "altr,ecc-parent", 0);
if (parent && !of_device_is_available(parent))
return ret;
}
+static int get_s10_sdram_edac_resource(struct device_node *np,
+ struct resource *res)
+{
+ struct device_node *parent;
+ int ret;
+
+ parent = of_parse_phandle(np, "altr,sdr-syscon", 0);
+ if (!parent)
+ return -ENODEV;
+
+ ret = of_address_to_resource(parent, 0, res);
+ of_node_put(parent);
+
+ return ret;
+}
+
static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
struct device_node *np)
{
if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
return -ENOMEM;
- rc = of_address_to_resource(np, 0, &res);
+ if (of_device_is_compatible(np, "altr,sdram-edac-s10"))
+ rc = get_s10_sdram_edac_resource(np, &res);
+ else
+ rc = of_address_to_resource(np, 0, &res);
+
if (rc < 0) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s: no resource address\n", ecc_name);
of_device_is_compatible(child, "altr,socfpga-dma-ecc") ||
of_device_is_compatible(child, "altr,socfpga-usb-ecc") ||
of_device_is_compatible(child, "altr,socfpga-qspi-ecc") ||
+#ifdef CONFIG_EDAC_ALTERA_SDRAM
+ of_device_is_compatible(child, "altr,sdram-edac-s10") ||
+#endif
of_device_is_compatible(child, "altr,socfpga-sdmmc-ecc"))
altr_edac_a10_device_add(edac, child);
#ifdef CONFIG_EDAC_ALTERA_SDRAM
- else if ((of_device_is_compatible(child, "altr,sdram-edac-a10")) ||
- (of_device_is_compatible(child, "altr,sdram-edac-s10")))
+ else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
of_platform_populate(pdev->dev.of_node,
altr_sdram_ctrl_of_match,
NULL, &pdev->dev);
#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000
/************* Stratix10 Defines **************/
+#define ALTR_S10_ECC_CTRL_SDRAM_OFST 0x00
+#define ALTR_S10_ECC_EN BIT(0)
+
+#define ALTR_S10_ECC_ERRINTEN_OFST 0x10
+#define ALTR_S10_ECC_ERRINTENS_OFST 0x14
+#define ALTR_S10_ECC_ERRINTENR_OFST 0x18
+#define ALTR_S10_ECC_SERRINTEN BIT(0)
+
+#define ALTR_S10_ECC_INTMODE_OFST 0x1C
+#define ALTR_S10_ECC_INTMODE BIT(0)
+
+#define ALTR_S10_ECC_INTSTAT_OFST 0x20
+#define ALTR_S10_ECC_SERRPENA BIT(0)
+#define ALTR_S10_ECC_DERRPENA BIT(8)
+#define ALTR_S10_ECC_ERRPENA_MASK (ALTR_S10_ECC_SERRPENA | \
+ ALTR_S10_ECC_DERRPENA)
+
+#define ALTR_S10_ECC_INTTEST_OFST 0x24
+#define ALTR_S10_ECC_TSERRA BIT(0)
+#define ALTR_S10_ECC_TDERRA BIT(8)
+#define ALTR_S10_ECC_TSERRB BIT(16)
+#define ALTR_S10_ECC_TDERRB BIT(24)
+
#define ALTR_S10_DERR_ADDRA_OFST 0x2C
/* Stratix10 ECC Manager Defines */
#define S10_SYSMGR_UE_ADDR_OFST 0x224
#define S10_DDR0_IRQ_MASK BIT(16)
-#define S10_DBE_IRQ_MASK 0x3FE
+#define S10_DBE_IRQ_MASK 0x3FFFE
/* Define ECC Block Offsets for peripherals */
#define ECC_BLK_ADDRESS_OFST 0x40
(dclr & BIT(15)) ? "yes" : "no");
}
-/*
- * The Address Mask should be a contiguous set of bits in the non-interleaved
- * case. So to check for CS interleaving, find the most- and least-significant
- * bits of the mask, generate a contiguous bitmask, and compare the two.
- */
-static bool f17_cs_interleaved(struct amd64_pvt *pvt, u8 ctrl, int cs)
+#define CS_EVEN_PRIMARY BIT(0)
+#define CS_ODD_PRIMARY BIT(1)
+#define CS_EVEN_SECONDARY BIT(2)
+#define CS_ODD_SECONDARY BIT(3)
+
+#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
+#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
+
+static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
- u32 mask = pvt->csels[ctrl].csmasks[cs >> 1];
- u32 msb = fls(mask) - 1, lsb = ffs(mask) - 1;
- u32 test_mask = GENMASK(msb, lsb);
+ int cs_mode = 0;
- edac_dbg(1, "mask=0x%08x test_mask=0x%08x\n", mask, test_mask);
+ if (csrow_enabled(2 * dimm, ctrl, pvt))
+ cs_mode |= CS_EVEN_PRIMARY;
- return mask ^ test_mask;
+ if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
+ cs_mode |= CS_ODD_PRIMARY;
+
+ /* Asymmetric dual-rank DIMM support. */
+ if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
+ cs_mode |= CS_ODD_SECONDARY;
+
+ return cs_mode;
}
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{
- int dimm, size0, size1, cs0, cs1;
+ int dimm, size0, size1, cs0, cs1, cs_mode;
edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
- for (dimm = 0; dimm < 4; dimm++) {
- size0 = 0;
+ for (dimm = 0; dimm < 2; dimm++) {
cs0 = dimm * 2;
-
- if (csrow_enabled(cs0, ctrl, pvt))
- size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
-
- size1 = 0;
cs1 = dimm * 2 + 1;
- if (csrow_enabled(cs1, ctrl, pvt)) {
- /*
- * CS interleaving is only supported if both CSes have
- * the same amount of memory. Because they are
- * interleaved, it will look like both CSes have the
- * full amount of memory. Save the size for both as
- * half the amount we found on CS0, if interleaved.
- */
- if (f17_cs_interleaved(pvt, ctrl, cs1))
- size1 = size0 = (size0 >> 1);
- else
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
- }
+ cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
+
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
+ } else if (pvt->fam >= 0x17) {
+ int umc;
+
+ for_each_umc(umc) {
+ pvt->csels[umc].b_cnt = 4;
+ pvt->csels[umc].m_cnt = 2;
+ }
+
} else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
+static void read_umc_base_mask(struct amd64_pvt *pvt)
+{
+ u32 umc_base_reg, umc_base_reg_sec;
+ u32 umc_mask_reg, umc_mask_reg_sec;
+ u32 base_reg, base_reg_sec;
+ u32 mask_reg, mask_reg_sec;
+ u32 *base, *base_sec;
+ u32 *mask, *mask_sec;
+ int cs, umc;
+
+ for_each_umc(umc) {
+ umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
+ umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
+
+ for_each_chip_select(cs, umc, pvt) {
+ base = &pvt->csels[umc].csbases[cs];
+ base_sec = &pvt->csels[umc].csbases_sec[cs];
+
+ base_reg = umc_base_reg + (cs * 4);
+ base_reg_sec = umc_base_reg_sec + (cs * 4);
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
+ edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base, base_reg);
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
+ edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base_sec, base_reg_sec);
+ }
+
+ umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
+ umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
+
+ for_each_chip_select_mask(cs, umc, pvt) {
+ mask = &pvt->csels[umc].csmasks[cs];
+ mask_sec = &pvt->csels[umc].csmasks_sec[cs];
+
+ mask_reg = umc_mask_reg + (cs * 4);
+ mask_reg_sec = umc_mask_reg_sec + (cs * 4);
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
+ edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask, mask_reg);
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
+ edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask_sec, mask_reg_sec);
+ }
+ }
+}
+
/*
* Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
+ int cs;
prep_chip_selects(pvt);
- if (pvt->umc) {
- base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
- base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
- mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
- mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
- } else {
- base_reg0 = DCSB0;
- base_reg1 = DCSB1;
- mask_reg0 = DCSM0;
- mask_reg1 = DCSM1;
- }
+ if (pvt->umc)
+ return read_umc_base_mask(pvt);
for_each_chip_select(cs, 0, pvt) {
- int reg0 = base_reg0 + (cs * 4);
- int reg1 = base_reg1 + (cs * 4);
+ int reg0 = DCSB0 + (cs * 4);
+ int reg1 = DCSB1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
- if (pvt->umc) {
- if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
- cs, *base0, reg0);
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
- if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
- cs, *base1, reg1);
- } else {
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
-
- if (pvt->fam == 0xf)
- continue;
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, (pvt->fam == 0x10) ? reg1
- : reg0);
- }
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
for_each_chip_select_mask(cs, 0, pvt) {
- int reg0 = mask_reg0 + (cs * 4);
- int reg1 = mask_reg1 + (cs * 4);
+ int reg0 = DCSM0 + (cs * 4);
+ int reg1 = DCSM1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
- if (pvt->umc) {
- if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
- cs, *mask0, reg0);
-
- if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
- cs, *mask1, reg1);
- } else {
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
- if (pvt->fam == 0xf)
- continue;
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, (pvt->fam == 0x10) ? reg1
- : reg0);
- }
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
}
return ddr3_cs_size(cs_mode, false);
}
-static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
- u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
+ u32 addr_mask_orig, addr_mask_deinterleaved;
+ u32 msb, weight, num_zero_bits;
+ int dimm, size = 0;
- /* Each mask is used for every two base addresses. */
- u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
+ /* No Chip Selects are enabled. */
+ if (!cs_mode)
+ return size;
- /* Register [31:1] = Address [39:9]. Size is in kBs here. */
- u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
+ /* Requested size of an even CS but none are enabled. */
+ if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
+ return size;
- edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
+ /* Requested size of an odd CS but none are enabled. */
+ if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
+ return size;
+
+ /*
+ * There is one mask per DIMM, and two Chip Selects per DIMM.
+ * CS0 and CS1 -> DIMM0
+ * CS2 and CS3 -> DIMM1
+ */
+ dimm = csrow_nr >> 1;
+
+ /* Asymmetric dual-rank DIMM support. */
+ if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
+ addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
+ else
+ addr_mask_orig = pvt->csels[umc].csmasks[dimm];
+
+ /*
+ * The number of zero bits in the mask is equal to the number of bits
+ * in a full mask minus the number of bits in the current mask.
+ *
+ * The MSB is the number of bits in the full mask because BIT[0] is
+ * always 0.
+ */
+ msb = fls(addr_mask_orig) - 1;
+ weight = hweight_long(addr_mask_orig);
+ num_zero_bits = msb - weight;
+
+ /* Take the number of zero bits off from the top of the mask. */
+ addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+
+ edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
+ edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+
+ /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+ size = (addr_mask_deinterleaved >> 2) + 1;
/* Return size in MBs. */
return size >> 10;
.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M10H_CPUS] = {
.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M30H_CPUS] = {
.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
+ }
+ },
+ [F17_M70H_CPUS] = {
+ .ctl_name = "F17h_M70h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
};
err.channel = find_umc_channel(m);
- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
- err.err_code = ERR_NORM_ADDR;
- goto log_error;
- }
-
- error_address_to_page_and_offset(sys_addr, &err);
-
if (!(m->status & MCI_STATUS_SYNDV)) {
err.err_code = ERR_SYND;
goto log_error;
err.csrow = m->synd & 0x7;
+ if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+ err.err_code = ERR_NORM_ADDR;
+ goto log_error;
+ }
+
+ error_address_to_page_and_offset(sys_addr, &err);
+
log_error:
__log_ecc_error(mci, &err, ecc_type);
}
int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
- if (!pvt->umc)
+ if (!pvt->umc) {
csrow_nr >>= 1;
-
- cs_mode = DBAM_DIMM(csrow_nr, dbam);
+ cs_mode = DBAM_DIMM(csrow_nr, dbam);
+ } else {
+ cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
+ }
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
return nr_pages;
}
+static int init_csrows_df(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ enum edac_type edac_mode = EDAC_NONE;
+ enum dev_type dev_type = DEV_UNKNOWN;
+ struct dimm_info *dimm;
+ int empty = 1;
+ u8 umc, cs;
+
+ if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
+ edac_mode = EDAC_S16ECD16ED;
+ dev_type = DEV_X16;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
+ edac_mode = EDAC_S8ECD8ED;
+ dev_type = DEV_X8;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
+ edac_mode = EDAC_S4ECD4ED;
+ dev_type = DEV_X4;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
+ edac_mode = EDAC_SECDED;
+ }
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ if (!csrow_enabled(cs, umc, pvt))
+ continue;
+
+ empty = 0;
+ dimm = mci->csrows[cs]->channels[umc]->dimm;
+
+ edac_dbg(1, "MC node: %d, csrow: %d\n",
+ pvt->mc_node_id, cs);
+
+ dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
+ dimm->mtype = pvt->dram_type;
+ dimm->edac_mode = edac_mode;
+ dimm->dtype = dev_type;
+ }
+ }
+
+ return empty;
+}
+
/*
* Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers.
int nr_pages = 0;
u32 val;
- if (!pvt->umc) {
- amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+ if (pvt->umc)
+ return init_csrows_df(mci);
- pvt->nbcfg = val;
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
- edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- }
+ pvt->nbcfg = val;
+
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
/*
* We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
/* Determine DIMM ECC mode: */
- if (pvt->umc) {
- if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
- edac_mode = EDAC_S4ECD4ED;
- else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
- edac_mode = EDAC_SECDED;
-
- } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
? EDAC_S4ECD4ED
: EDAC_SECDED;
static inline void
f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
- u8 i, ecc_en = 1, cpk_en = 1;
+ u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
for_each_umc(i) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+
+ dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
+ dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
}
}
if (ecc_en) {
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (cpk_en)
+ if (!cpk_en)
+ return;
+
+ if (dev_x4)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ else if (dev_x16)
+ mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
+ else
+ mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
}
}
fam_type = &family_types[F17_M30H_CPUS];
pvt->ops = &family_types[F17_M30H_CPUS].ops;
break;
+ } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
+ fam_type = &family_types[F17_M70H_CPUS];
+ pvt->ops = &family_types[F17_M70H_CPUS].ops;
+ break;
}
/* fall through */
case 0x18:
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
+#define NUM_CONTROLLERS 8
#define ON true
#define OFF false
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
/*
* Function 1 - Address Map
#define DCSM0 0x60
#define DCSM1 0x160
-#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
+#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
+#define csrow_sec_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases_sec[(i)] & DCSB_CS_ENABLE)
#define DRAM_CONTROL 0x78
/* UMC CH register offsets */
#define UMCCH_BASE_ADDR 0x0
+#define UMCCH_BASE_ADDR_SEC 0x10
#define UMCCH_ADDR_MASK 0x20
+#define UMCCH_ADDR_MASK_SEC 0x28
#define UMCCH_ADDR_CFG 0x30
#define UMCCH_DIMM_CFG 0x80
#define UMCCH_UMC_CFG 0x100
F17_CPUS,
F17_M10H_CPUS,
F17_M30H_CPUS,
+ F17_M70H_CPUS,
NUM_FAMILIES,
};
/* A DCT chip selects collection */
struct chip_select {
u32 csbases[NUM_CHIPSELECTS];
+ u32 csbases_sec[NUM_CHIPSELECTS];
u8 b_cnt;
u32 csmasks[NUM_CHIPSELECTS];
+ u32 csmasks_sec[NUM_CHIPSELECTS];
u8 m_cnt;
};
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
- /* one for each DCT */
- struct chip_select csels[2];
+ /* one for each DCT/UMC */
+ struct chip_select csels[NUM_CONTROLLERS];
/* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
struct dram_range ranges[DRAM_RANGES];
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Bluefield-specific EDAC driver.
+ *
+ * Copyright (c) 2019 Mellanox Technologies.
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "edac_module.h"
+
+#define DRIVER_NAME "bluefield-edac"
+
+/*
+ * Mellanox BlueField EMI (External Memory Interface) register definitions.
+ */
+
+#define MLXBF_ECC_CNT 0x340
+#define MLXBF_ECC_CNT__SERR_CNT GENMASK(15, 0)
+#define MLXBF_ECC_CNT__DERR_CNT GENMASK(31, 16)
+
+#define MLXBF_ECC_ERR 0x348
+#define MLXBF_ECC_ERR__SECC BIT(0)
+#define MLXBF_ECC_ERR__DECC BIT(16)
+
+#define MLXBF_ECC_LATCH_SEL 0x354
+#define MLXBF_ECC_LATCH_SEL__START BIT(24)
+
+#define MLXBF_ERR_ADDR_0 0x358
+
+#define MLXBF_ERR_ADDR_1 0x37c
+
+#define MLXBF_SYNDROM 0x35c
+#define MLXBF_SYNDROM__DERR BIT(0)
+#define MLXBF_SYNDROM__SERR BIT(1)
+#define MLXBF_SYNDROM__SYN GENMASK(25, 16)
+
+#define MLXBF_ADD_INFO 0x364
+#define MLXBF_ADD_INFO__ERR_PRANK GENMASK(9, 8)
+
+#define MLXBF_EDAC_MAX_DIMM_PER_MC 2
+#define MLXBF_EDAC_ERROR_GRAIN 8
+
+/*
+ * Request MLNX_SIP_GET_DIMM_INFO
+ *
+ * Retrieve information about DIMM on a certain slot.
+ *
+ * Call register usage:
+ * a0: MLNX_SIP_GET_DIMM_INFO
+ * a1: (Memory controller index) << 16 | (Dimm index in memory controller)
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: MLXBF_DIMM_INFO defined below describing the DIMM.
+ * a1-3: not used.
+ */
+#define MLNX_SIP_GET_DIMM_INFO 0x82000008
+
+/* Format for the SMC response about the memory information */
+#define MLXBF_DIMM_INFO__SIZE_GB GENMASK_ULL(15, 0)
+#define MLXBF_DIMM_INFO__IS_RDIMM BIT(16)
+#define MLXBF_DIMM_INFO__IS_LRDIMM BIT(17)
+#define MLXBF_DIMM_INFO__IS_NVDIMM BIT(18)
+#define MLXBF_DIMM_INFO__RANKS GENMASK_ULL(23, 21)
+#define MLXBF_DIMM_INFO__PACKAGE_X GENMASK_ULL(31, 24)
+
+struct bluefield_edac_priv {
+ int dimm_ranks[MLXBF_EDAC_MAX_DIMM_PER_MC];
+ void __iomem *emi_base;
+ int dimm_per_mc;
+};
+
+static u64 smc_call1(u64 smc_op, u64 smc_arg)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+/*
+ * Gather the ECC information from the External Memory Interface registers
+ * and report it to the edac handler.
+ */
+static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
+ int error_cnt,
+ int is_single_ecc)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ u32 dram_additional_info, err_prank, edea0, edea1;
+ u32 ecc_latch_select, dram_syndrom, serr, derr, syndrom;
+ enum hw_event_mc_err_type ecc_type;
+ u64 ecc_dimm_addr;
+ int ecc_dimm;
+
+ ecc_type = is_single_ecc ? HW_EVENT_ERR_CORRECTED :
+ HW_EVENT_ERR_UNCORRECTED;
+
+ /*
+ * Tell the External Memory Interface to populate the relevant
+ * registers with information about the last ECC error occurrence.
+ */
+ ecc_latch_select = MLXBF_ECC_LATCH_SEL__START;
+ writel(ecc_latch_select, priv->emi_base + MLXBF_ECC_LATCH_SEL);
+
+ /*
+ * Verify that the ECC reported info in the registers is of the
+ * same type as the one asked to report. If not, just report the
+ * error without the detailed information.
+ */
+ dram_syndrom = readl(priv->emi_base + MLXBF_SYNDROM);
+ serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
+ derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
+ syndrom = FIELD_GET(MLXBF_SYNDROM__SYN, dram_syndrom);
+
+ if ((is_single_ecc && !serr) || (!is_single_ecc && !derr)) {
+ edac_mc_handle_error(ecc_type, mci, error_cnt, 0, 0, 0,
+ 0, 0, -1, mci->ctl_name, "");
+ return;
+ }
+
+ dram_additional_info = readl(priv->emi_base + MLXBF_ADD_INFO);
+ err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
+
+ ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
+
+ edea0 = readl(priv->emi_base + MLXBF_ERR_ADDR_0);
+ edea1 = readl(priv->emi_base + MLXBF_ERR_ADDR_1);
+
+ ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
+
+ edac_mc_handle_error(ecc_type, mci, error_cnt,
+ PFN_DOWN(ecc_dimm_addr),
+ offset_in_page(ecc_dimm_addr),
+ syndrom, ecc_dimm, 0, 0, mci->ctl_name, "");
+}
+
+static void bluefield_edac_check(struct mem_ctl_info *mci)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ u32 ecc_count, single_error_count, double_error_count, ecc_error = 0;
+
+ /*
+ * The memory controller might not be initialized by the firmware
+ * when there isn't memory, which may lead to bad register readings.
+ */
+ if (mci->edac_cap == EDAC_FLAG_NONE)
+ return;
+
+ ecc_count = readl(priv->emi_base + MLXBF_ECC_CNT);
+ single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
+ double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
+
+ if (single_error_count) {
+ ecc_error |= MLXBF_ECC_ERR__SECC;
+
+ bluefield_gather_report_ecc(mci, single_error_count, 1);
+ }
+
+ if (double_error_count) {
+ ecc_error |= MLXBF_ECC_ERR__DECC;
+
+ bluefield_gather_report_ecc(mci, double_error_count, 0);
+ }
+
+ /* Write to clear reported errors. */
+ if (ecc_count)
+ writel(ecc_error, priv->emi_base + MLXBF_ECC_ERR);
+}
+
+/* Initialize the DIMMs information for the given memory controller. */
+static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
+{
+ struct bluefield_edac_priv *priv = mci->pvt_info;
+ int mem_ctrl_idx = mci->mc_idx;
+ struct dimm_info *dimm;
+ u64 smc_info, smc_arg;
+ int is_empty = 1, i;
+
+ for (i = 0; i < priv->dimm_per_mc; i++) {
+ dimm = mci->dimms[i];
+
+ smc_arg = mem_ctrl_idx << 16 | i;
+ smc_info = smc_call1(MLNX_SIP_GET_DIMM_INFO, smc_arg);
+
+ if (!FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info)) {
+ dimm->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ is_empty = 0;
+
+ dimm->edac_mode = EDAC_SECDED;
+
+ if (FIELD_GET(MLXBF_DIMM_INFO__IS_NVDIMM, smc_info))
+ dimm->mtype = MEM_NVDIMM;
+ else if (FIELD_GET(MLXBF_DIMM_INFO__IS_LRDIMM, smc_info))
+ dimm->mtype = MEM_LRDDR4;
+ else if (FIELD_GET(MLXBF_DIMM_INFO__IS_RDIMM, smc_info))
+ dimm->mtype = MEM_RDDR4;
+ else
+ dimm->mtype = MEM_DDR4;
+
+ dimm->nr_pages =
+ FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info) *
+ (SZ_1G / PAGE_SIZE);
+ dimm->grain = MLXBF_EDAC_ERROR_GRAIN;
+
+ /* Mem controller for BlueField only supports x4, x8 and x16 */
+ switch (FIELD_GET(MLXBF_DIMM_INFO__PACKAGE_X, smc_info)) {
+ case 4:
+ dimm->dtype = DEV_X4;
+ break;
+ case 8:
+ dimm->dtype = DEV_X8;
+ break;
+ case 16:
+ dimm->dtype = DEV_X16;
+ break;
+ default:
+ dimm->dtype = DEV_UNKNOWN;
+ }
+
+ priv->dimm_ranks[i] =
+ FIELD_GET(MLXBF_DIMM_INFO__RANKS, smc_info);
+ }
+
+ if (is_empty)
+ mci->edac_cap = EDAC_FLAG_NONE;
+ else
+ mci->edac_cap = EDAC_FLAG_SECDED;
+}
+
+static int bluefield_edac_mc_probe(struct platform_device *pdev)
+{
+ struct bluefield_edac_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct edac_mc_layer layers[1];
+ struct mem_ctl_info *mci;
+ struct resource *emi_res;
+ unsigned int mc_idx, dimm_count;
+ int rc, ret;
+
+ /* Read the MSS (Memory SubSystem) index from ACPI table. */
+ if (device_property_read_u32(dev, "mss_number", &mc_idx)) {
+ dev_warn(dev, "bf_edac: MSS number unknown\n");
+ return -EINVAL;
+ }
+
+ /* Read the DIMMs per MC from ACPI table. */
+ if (device_property_read_u32(dev, "dimm_per_mc", &dimm_count)) {
+ dev_warn(dev, "bf_edac: DIMMs per MC unknown\n");
+ return -EINVAL;
+ }
+
+ if (dimm_count > MLXBF_EDAC_MAX_DIMM_PER_MC) {
+ dev_warn(dev, "bf_edac: DIMMs per MC not valid\n");
+ return -EINVAL;
+ }
+
+ emi_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!emi_res)
+ return -EINVAL;
+
+ layers[0].type = EDAC_MC_LAYER_SLOT;
+ layers[0].size = dimm_count;
+ layers[0].is_virt_csrow = true;
+
+ mci = edac_mc_alloc(mc_idx, ARRAY_SIZE(layers), layers, sizeof(*priv));
+ if (!mci)
+ return -ENOMEM;
+
+ priv = mci->pvt_info;
+
+ priv->dimm_per_mc = dimm_count;
+ priv->emi_base = devm_ioremap_resource(dev, emi_res);
+ if (IS_ERR(priv->emi_base)) {
+ dev_err(dev, "failed to map EMI IO resource\n");
+ ret = PTR_ERR(priv->emi_base);
+ goto err;
+ }
+
+ mci->pdev = dev;
+ mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
+ MEM_FLAG_LRDDR4 | MEM_FLAG_NVDIMM;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = DRIVER_NAME;
+ mci->ctl_name = "BlueField_Memory_Controller";
+ mci->dev_name = dev_name(dev);
+ mci->edac_check = bluefield_edac_check;
+
+ /* Initialize mci with the actual populated DIMM information. */
+ bluefield_edac_init_dimms(mci);
+
+ platform_set_drvdata(pdev, mci);
+
+ /* Register with EDAC core */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ dev_err(dev, "failed to register with EDAC core\n");
+ ret = rc;
+ goto err;
+ }
+
+ /* Only POLL mode supported so far. */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ return 0;
+
+err:
+ edac_mc_free(mci);
+
+ return ret;
+
+}
+
+static int bluefield_edac_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static const struct acpi_device_id bluefield_mc_acpi_ids[] = {
+ {"MLNXBF08", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, bluefield_mc_acpi_ids);
+
+static struct platform_driver bluefield_edac_mc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .acpi_match_table = bluefield_mc_acpi_ids,
+ },
+ .probe = bluefield_edac_mc_probe,
+ .remove = bluefield_edac_mc_remove,
+};
+
+module_platform_driver(bluefield_edac_mc_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField memory edac driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL v2");
module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644);
-unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
- unsigned len)
+unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
+ unsigned int len)
{
struct mem_ctl_info *mci = dimm->mci;
int i, n, count = 0;
* At return, the pointer 'p' will be incremented to be used on a next call
* to this function.
*/
-void *edac_align_ptr(void **p, unsigned size, int n_elems)
+void *edac_align_ptr(void **p, unsigned int size, int n_elems)
{
- unsigned align, r;
+ unsigned int align, r;
void *ptr = *p;
*p += size * n_elems;
static void _edac_mc_free(struct mem_ctl_info *mci)
{
- int i, chn, row;
struct csrow_info *csr;
- const unsigned int tot_dimms = mci->tot_dimms;
- const unsigned int tot_channels = mci->num_cschannel;
- const unsigned int tot_csrows = mci->nr_csrows;
+ int i, chn, row;
if (mci->dimms) {
- for (i = 0; i < tot_dimms; i++)
+ for (i = 0; i < mci->tot_dimms; i++)
kfree(mci->dimms[i]);
kfree(mci->dimms);
}
+
if (mci->csrows) {
- for (row = 0; row < tot_csrows; row++) {
+ for (row = 0; row < mci->nr_csrows; row++) {
csr = mci->csrows[row];
- if (csr) {
- if (csr->channels) {
- for (chn = 0; chn < tot_channels; chn++)
- kfree(csr->channels[chn]);
- kfree(csr->channels);
- }
- kfree(csr);
+ if (!csr)
+ continue;
+
+ if (csr->channels) {
+ for (chn = 0; chn < mci->num_cschannel; chn++)
+ kfree(csr->channels[chn]);
+ kfree(csr->channels);
}
+ kfree(csr);
}
kfree(mci->csrows);
}
kfree(mci);
}
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
- unsigned n_layers,
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+ unsigned int n_layers,
struct edac_mc_layer *layers,
- unsigned sz_pvt)
+ unsigned int sz_pvt)
{
struct mem_ctl_info *mci;
struct edac_mc_layer *layer;
struct rank_info *chan;
struct dimm_info *dimm;
u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
- unsigned pos[EDAC_MAX_LAYERS];
- unsigned size, tot_dimms = 1, count = 1;
- unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+ unsigned int pos[EDAC_MAX_LAYERS];
+ unsigned int size, tot_dimms = 1, count = 1;
+ unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
void *pvt, *p, *ptr = NULL;
int i, j, row, chn, n, len, off;
bool per_rank = false;
if (p > e->location)
*(p - 1) = '\0';
- /* Report the error via the trace interface */
- grain_bits = fls_long(e->grain) + 1;
+ /* Sanity-check driver-supplied grain value. */
+ if (WARN_ON_ONCE(!e->grain))
+ e->grain = 1;
+ grain_bits = fls_long(e->grain - 1);
+
+ /* Report the error via the trace interface */
if (IS_ENABLED(CONFIG_RAS))
trace_mc_event(type, e->msg, e->label, e->error_count,
mci->mc_idx, e->top_layer, e->mid_layer,
* On success, return a pointer to struct mem_ctl_info pointer;
* %NULL otherwise
*/
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
- unsigned n_layers,
+struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
+ unsigned int n_layers,
struct edac_mc_layer *layers,
- unsigned sz_pvt);
+ unsigned int sz_pvt);
/**
* edac_get_owner - Return the owner's mod_name of EDAC MC
struct dev_ch_attribute {
struct device_attribute attr;
- int channel;
+ unsigned int channel;
};
#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
char *data)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
/* if field has not been initialized, there is nothing to send */
const char *data, size_t count)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
size_t copy_count = count;
struct device_attribute *mattr, char *data)
{
struct csrow_info *csrow = to_csrow(dev);
- unsigned chan = to_channel(mattr);
+ unsigned int chan = to_channel(mattr);
struct rank_info *rank = csrow->channels[chan];
return sprintf(data, "%u\n", rank->ce_count);
{
struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
- edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(csrow);
}
dev_set_name(&csrow->dev, "csrow%d", index);
dev_set_drvdata(&csrow->dev, csrow);
- edac_dbg(0, "creating (virtual) csrow node %s\n",
- dev_name(&csrow->dev));
-
err = device_add(&csrow->dev);
- if (err)
+ if (err) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
put_device(&csrow->dev);
+ return err;
+ }
- return err;
+ edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
+
+ return 0;
}
/* Create a CSROW object under specifed edac_mc_device */
if (!nr_pages_per_csrow(csrow))
continue;
err = edac_create_csrow_object(mci, mci->csrows[i], i);
- if (err < 0) {
- edac_dbg(1,
- "failure: create csrow objects for csrow %d\n",
- i);
+ if (err < 0)
goto error;
- }
}
return 0;
{
struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
- edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(dimm);
}
pm_runtime_forbid(&mci->dev);
err = device_add(&dimm->dev);
- if (err)
+ if (err) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&dimm->dev));
put_device(&dimm->dev);
+ return err;
+ }
- edac_dbg(0, "created rank/dimm device %s\n", dev_name(&dimm->dev));
+ if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
+ char location[80];
- return err;
+ edac_dimm_info_location(dimm, location, sizeof(location));
+ edac_dbg(0, "device %s created at location %s\n",
+ dev_name(&dimm->dev), location);
+ }
+
+ return 0;
}
/*
{
struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
- edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(mci);
}
dev_set_drvdata(&mci->dev, mci);
pm_runtime_forbid(&mci->dev);
- edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
put_device(&mci->dev);
- goto out;
+ return err;
}
+ edac_dbg(0, "device %s created\n", dev_name(&mci->dev));
+
/*
* Create the dimm/rank devices
*/
if (!dimm->nr_pages)
continue;
-#ifdef CONFIG_EDAC_DEBUG
- edac_dbg(1, "creating dimm%d, located at ", i);
- if (edac_debug_level >= 1) {
- int lay;
- for (lay = 0; lay < mci->n_layers; lay++)
- printk(KERN_CONT "%s %d ",
- edac_layer_name[mci->layers[lay].type],
- dimm->location[lay]);
- printk(KERN_CONT "\n");
- }
-#endif
err = edac_create_dimm_object(mci, dimm, i);
- if (err) {
- edac_dbg(1, "failure: create dimm %d obj\n", i);
+ if (err)
goto fail_unregister_dimm;
- }
}
#ifdef CONFIG_EDAC_LEGACY_SYSFS
}
device_unregister(&mci->dev);
-out:
return err;
}
struct dimm_info *dimm = mci->dimms[i];
if (dimm->nr_pages == 0)
continue;
- edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
+ edac_dbg(1, "unregistering device %s\n", dev_name(&dimm->dev));
device_unregister(&dimm->dev);
}
}
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
- edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ edac_dbg(1, "unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev);
}
* parent device, used to create the /sys/devices/mc sysfs node.
* So, there are no attributes on it.
*/
- edac_dbg(1, "Releasing device %s\n", dev_name(dev));
+ edac_dbg(1, "device %s released\n", dev_name(dev));
kfree(dev);
}
int err;
mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
- if (!mci_pdev) {
- err = -ENOMEM;
- goto out;
- }
+ if (!mci_pdev)
+ return -ENOMEM;
mci_pdev->bus = edac_get_sysfs_subsys();
mci_pdev->type = &mc_attr_type;
dev_set_name(mci_pdev, "mc");
err = device_add(mci_pdev);
- if (err < 0)
- goto out_put_device;
+ if (err < 0) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(mci_pdev));
+ put_device(mci_pdev);
+ return err;
+ }
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
-
- out_put_device:
- put_device(mci_pdev);
- out:
- return err;
}
void edac_mc_sysfs_exit(void)
struct ghes_edac_dimm_fill {
struct mem_ctl_info *mci;
- unsigned count;
+ unsigned int count;
};
static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
}
/* convert csrow index into a rank (per channel -- 0..5) */
-static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
+static unsigned int i5100_csrow_to_rank(const struct mem_ctl_info *mci,
+ unsigned int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
}
/* convert csrow index into a channel (0..1) */
-static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
+static unsigned int i5100_csrow_to_chan(const struct mem_ctl_info *mci,
+ unsigned int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
return ret;
}
-static unsigned long i5100_npages(struct mem_ctl_info *mci, int csrow)
+static unsigned long i5100_npages(struct mem_ctl_info *mci, unsigned int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
- const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned chan = i5100_csrow_to_chan(mci, csrow);
+ const unsigned int chan_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned int chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
for (i = 0; i < mci->tot_dimms; i++) {
struct dimm_info *dimm;
const unsigned long npages = i5100_npages(mci, i);
- const unsigned chan = i5100_csrow_to_chan(mci, i);
- const unsigned rank = i5100_csrow_to_rank(mci, i);
+ const unsigned int chan = i5100_csrow_to_chan(mci, i);
+ const unsigned int rank = i5100_csrow_to_rank(mci, i);
if (!npages)
continue;
}
}
+#define DNV_MCHBAR_SIZE 0x8000
+#define DNV_SB_PORT_SIZE 0x10000
static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
struct pci_dev *pdev;
char *base;
u64 addr;
+ unsigned long size;
if (op == 4) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
addr = get_mem_ctrl_hub_base_addr();
if (!addr)
return -ENODEV;
+ size = DNV_MCHBAR_SIZE;
} else {
/* MMIO via sideband register base address */
addr = get_sideband_reg_base_addr();
if (!addr)
return -ENODEV;
addr += (port << 16);
+ size = DNV_SB_PORT_SIZE;
}
- base = ioremap((resource_size_t)addr, 0x10000);
+ base = ioremap((resource_size_t)addr, size);
if (!base)
return -ENODEV;