Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 17 Aug 2019 17:44:50 +0000 (10:44 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 17 Aug 2019 17:44:50 +0000 (10:44 -0700)
Pull i2c fixes from Wolfram Sang:
 "I2C has one revert because of a regression, two fixes for tiny race
  windows (which we were not able to trigger), a MAINTAINERS addition,
  and a SPDX fix"

* 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux:
  i2c: stm32: Use the correct style for SPDX License Identifier
  i2c: emev2: avoid race when unregistering slave client
  i2c: rcar: avoid race when unregistering slave client
  MAINTAINERS: i2c-imx: take over maintainership
  Revert "i2c: imx: improve the error handling in i2c_imx_dma_request()"

103 files changed:
Documentation/devicetree/bindings/Makefile
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
MAINTAINERS
arch/arm/mm/dma-mapping.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/ftrace.c
arch/arm64/mm/dma-mapping.c
arch/powerpc/Kconfig
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/dma-common.c [deleted file]
arch/riscv/configs/defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/switch_to.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/kernel/process.c
arch/sh/kernel/disassemble.c
arch/sh/kernel/hw_breakpoint.c
arch/xtensa/kernel/setup.c
drivers/auxdisplay/Kconfig
drivers/auxdisplay/charlcd.c
drivers/auxdisplay/charlcd.h [moved from include/misc/charlcd.h with 94% similarity]
drivers/auxdisplay/hd44780.c
drivers/auxdisplay/panel.c
drivers/cpufreq/cpufreq.c
drivers/dma/dw-edma/dw-edma-core.h
drivers/dma/dw-edma/dw-edma-pcie.c
drivers/dma/dw-edma/dw-edma-v0-core.c
drivers/dma/dw-edma/dw-edma-v0-debugfs.c
drivers/dma/ste_dma40.c
drivers/dma/stm32-mdma.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/omap-dma.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/ast/ast_post.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/infiniband/core/counters.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/sw/siw/Kconfig
drivers/infiniband/sw/siw/siw.h
drivers/infiniband/sw/siw/siw_main.c
drivers/infiniband/sw/siw/siw_qp.c
drivers/infiniband/sw/siw/siw_verbs.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel-iommu-debugfs.c
drivers/iommu/intel-iommu.c
drivers/media/platform/omap/omap_vout_vrfb.c
drivers/nvme/host/pci.c
drivers/of/irq.c
drivers/of/resolver.c
drivers/pci/pcie/aspm.c
drivers/scsi/lpfc/lpfc_init.c
fs/afs/cmservice.c
fs/afs/dir.c
fs/afs/file.c
fs/afs/vlclient.c
fs/seq_file.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_da_btree.c
fs/xfs/libxfs/xfs_dir2_node.c
fs/xfs/xfs_log.c
include/asm-generic/5level-fixup.h
include/linux/dma-noncoherent.h
include/linux/gfp.h
include/linux/memcontrol.h
include/linux/mempolicy.h
include/linux/mm_types.h
include/linux/pci.h
include/uapi/rdma/siw-abi.h
kernel/dma/direct.c
kernel/dma/mapping.c
kernel/dma/remap.c
kernel/sched/cpufreq_schedutil.c
mm/huge_memory.c
mm/hugetlb.c
mm/kmemleak.c
mm/memcontrol.c
mm/mempolicy.c
mm/memremap.c
mm/rmap.c
mm/shmem.c
mm/usercopy.c
mm/vmalloc.c
mm/vmscan.c
mm/workingset.c
mm/z3fold.c
samples/auxdisplay/cfag12864b-example.c
security/keys/trusted.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_generic.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/usb/mixer.c

index 6b0dfd5c17baccb3232f032738a7a5c53efd28c9..5138a2f6232aa23cdc98211d2e3fcaaa2890030e 100644 (file)
@@ -19,7 +19,9 @@ quiet_cmd_mk_schema = SCHEMA  $@
 
 DT_DOCS = $(shell \
        cd $(srctree)/$(src) && \
-       find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
+       find * \( -name '*.yaml' ! \
+               -name $(DT_TMP_SCHEMA) ! \
+               -name '*.example.dt.yaml' \) \
        )
 
 DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
index 2d41fb96ce0a2b66acf309071f1534c6437f52cf..5b88fae0307d153e50a6faac33d6152bdf2711d4 100644 (file)
@@ -7,18 +7,6 @@ Required properties:
 - phy-mode : See ethernet.txt file in the same directory
 
 Optional properties:
-- phy-reset-gpios : Should specify the gpio for phy reset
-- phy-reset-duration : Reset duration in milliseconds.  Should present
-  only if property "phy-reset-gpios" is available.  Missing the property
-  will have the duration be 1 millisecond.  Numbers greater than 1000 are
-  invalid and 1 millisecond will be used instead.
-- phy-reset-active-high : If present then the reset sequence using the GPIO
-  specified in the "phy-reset-gpios" property is reversed (H=reset state,
-  L=operation state).
-- phy-reset-post-delay : Post reset delay in milliseconds. If present then
-  a delay of phy-reset-post-delay milliseconds will be observed after the
-  phy-reset-gpios has been toggled. Can be omitted thus no delay is
-  observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
 - phy-supply : regulator that powers the Ethernet PHY.
 - phy-handle : phandle to the PHY device connected to this device.
 - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
@@ -47,11 +35,27 @@ Optional properties:
   For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
   per second interrupt associated with 1588 precision time protocol(PTP).
 
-
 Optional subnodes:
 - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
   according to phy.txt in the same directory
 
+Deprecated optional properties:
+       To avoid these, create a phy node according to phy.txt in the same
+       directory, and point the fec's "phy-handle" property to it. Then use
+       the phy's reset binding, again described by phy.txt.
+- phy-reset-gpios : Should specify the gpio for phy reset
+- phy-reset-duration : Reset duration in milliseconds.  Should present
+  only if property "phy-reset-gpios" is available.  Missing the property
+  will have the duration be 1 millisecond.  Numbers greater than 1000 are
+  invalid and 1 millisecond will be used instead.
+- phy-reset-active-high : If present then the reset sequence using the GPIO
+  specified in the "phy-reset-gpios" property is reversed (H=reset state,
+  L=operation state).
+- phy-reset-post-delay : Post reset delay in milliseconds. If present then
+  a delay of phy-reset-post-delay milliseconds will be observed after the
+  phy-reset-gpios has been toggled. Can be omitted thus no delay is
+  observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
+
 Example:
 
 ethernet@83fec000 {
index 91d3e78b3395cd3b23b07826f6fcee9cda71166e..400df2da018a32f0ee2b0e036ac3432912dac0a4 100644 (file)
@@ -37,7 +37,8 @@ properties:
   hwlocks: true
 
   st,syscfg:
-    $ref: "/schemas/types.yaml#/definitions/phandle-array"
+    allOf:
+      - $ref: "/schemas/types.yaml#/definitions/phandle-array"
     description: Should be phandle/offset/mask
     items:
       - description: Phandle to the syscon node which includes IRQ mux selection.
index ddc1cf5cb0b9c67e9a9d3d512131ddf4736ae95e..14dc54200751ebbb4d9fdeba9987faefcf141c6b 100644 (file)
@@ -8423,7 +8423,6 @@ L:        linux-xfs@vger.kernel.org
 L:     linux-fsdevel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
 S:     Supported
-F:     fs/iomap.c
 F:     fs/iomap/
 F:     include/linux/iomap.h
 
index 6774b03aa405ca4e2dd057fcfe65aa9b9a43a878..d42557ee69c28fbcfd2f7f6c909f5b9b534779a8 100644 (file)
@@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
                unsigned long attrs)
 {
-       if (!dev_is_dma_coherent(dev))
-               return __get_dma_pgprot(attrs, prot);
-       return prot;
+       return __get_dma_pgprot(attrs, prot);
 }
 
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
index d19d14ba9ae401558e6e387bed2ef0b45aa211ce..b1fdc486aed8239c681acdcab1d98550bbdf60d9 100644 (file)
@@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
-       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
-       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+       /*
+        * We already refuse to boot CPUs that don't support our configured
+        * page size, so we can only detect mismatches for a page size other
+        * than the one we're currently using. Unfortunately, SoCs like this
+        * exist in the wild so, even though we don't like it, we'll have to go
+        * along with it and treat them as non-strict.
+        */
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
        /* Linux shouldn't care about secure memory */
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
index 1285c7b2947fa74cf50e66b83ec163b2d7a21027..17177325797420cea45062f349f5dde71b72119b 100644 (file)
@@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (offset < -SZ_128M || offset >= SZ_128M) {
 #ifdef CONFIG_ARM64_MODULE_PLTS
-               struct plt_entry trampoline;
+               struct plt_entry trampoline, *dst;
                struct module *mod;
 
                /*
@@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * to check if the actual opcodes are in fact identical,
                 * regardless of the offset in memory so use memcmp() instead.
                 */
-               trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
-               if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
-                          sizeof(trampoline))) {
-                       if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
+               dst = mod->arch.ftrace_trampoline;
+               trampoline = get_plt_entry(addr, dst);
+               if (memcmp(dst, &trampoline, sizeof(trampoline))) {
+                       if (plt_entry_is_initialized(dst)) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
                        }
 
                        /* point the trampoline to our ftrace entry point */
                        module_disable_ro(mod);
-                       *mod->arch.ftrace_trampoline = trampoline;
+                       *dst = trampoline;
                        module_enable_ro(mod, true);
 
-                       /* update trampoline before patching in the branch */
-                       smp_wmb();
+                       /*
+                        * Ensure updated trampoline is visible to instruction
+                        * fetch before we patch in the branch.
+                        */
+                       __flush_icache_range((unsigned long)&dst[0],
+                                            (unsigned long)&dst[1]);
                }
-               addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+               addr = (unsigned long)dst;
 #else /* CONFIG_ARM64_MODULE_PLTS */
                return -EINVAL;
 #endif /* CONFIG_ARM64_MODULE_PLTS */
index 1d3f0b5a99400757bf361c4127e43f38c3916dee..bd2b039f43a622d6ce190cef0643ac8fb12dbe2d 100644 (file)
@@ -14,9 +14,7 @@
 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
                unsigned long attrs)
 {
-       if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
-               return pgprot_writecombine(prot);
-       return prot;
+       return pgprot_writecombine(prot);
 }
 
 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
index 77f6ebf97113cea548e360c942b81041a7b10027..d8dcd8820369eb4be85e4493bf325779f3645e5f 100644 (file)
@@ -121,7 +121,6 @@ config PPC
        select ARCH_32BIT_OFF_T if PPC32
        select ARCH_HAS_DEBUG_VIRTUAL
        select ARCH_HAS_DEVMEM_IS_ALLOWED
-       select ARCH_HAS_DMA_MMAP_PGPROT
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
index ea0c692367892381f4e2984efc26e474066627c2..56dfa7a2a6f2a09f7d3d3d385147f4b77714932c 100644 (file)
@@ -49,8 +49,7 @@ obj-y                         := cputable.o ptrace.o syscalls.o \
                                   signal.o sysfs.o cacheinfo.o time.o \
                                   prom.o traps.o setup-common.o \
                                   udbg.o misc.o io.o misc_$(BITS).o \
-                                  of_platform.o prom_parse.o \
-                                  dma-common.o
+                                  of_platform.o prom_parse.o
 obj-$(CONFIG_PPC64)            += setup_64.o sys_ppc32.o \
                                   signal_64.o ptrace32.o \
                                   paca.o nvram_64.o firmware.o
diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c
deleted file mode 100644 (file)
index dc7ef6b..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Contains common dma routines for all powerpc platforms.
- *
- * Copyright (C) 2019 Shawn Anastasio.
- */
-
-#include <linux/mm.h>
-#include <linux/dma-noncoherent.h>
-
-pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
-               unsigned long attrs)
-{
-       if (!dev_is_dma_coherent(dev))
-               return pgprot_noncached(prot);
-       return prot;
-}
index 93205c0bf71df1b066f25abbaf8945ae0fbbe880..3efff552a261e2745bd0fd05dd873125d6877e19 100644 (file)
@@ -54,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
 CONFIG_HVC_RISCV_SBI=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
 CONFIG_SPI=y
 CONFIG_SPI_SIFIVE=y
 # CONFIG_PTP_1588_CLOCK is not set
index d5449ef805a340eba735b3ac7a1688cbdcd2b30e..7da93e494445e10bfbc4e1c715d30a5a387011e2 100644 (file)
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCIE_XILINX=y
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
@@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
 CONFIG_HVC_RISCV_SBI=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_DRM=y
 CONFIG_DRM_RADEON=y
index 853b65ef656da42d52b5c3c46f9ac8f75ce8774d..f0227bdce0f0614925ae772dc80588b8828f2ec0 100644 (file)
@@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from);
 
 static inline void __fstate_clean(struct pt_regs *regs)
 {
-       regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+       regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
+}
+
+static inline void fstate_off(struct task_struct *task,
+                             struct pt_regs *regs)
+{
+       regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
 }
 
 static inline void fstate_save(struct task_struct *task,
index 687dd19735a7e6483afb98a319376706ed426cef..4d9bbe8438bf6a7cff301cfd2c6fec0d268a607c 100644 (file)
@@ -53,10 +53,17 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
 }
 
 #define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
-#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
+
 #define flush_tlb_range(vma, start, end) \
        remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
-#define flush_tlb_mm(mm) \
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+                                 unsigned long addr)
+{
+       flush_tlb_range(vma, addr, addr + PAGE_SIZE);
+}
+
+#define flush_tlb_mm(mm)                               \
        remote_sfence_vma(mm_cpumask(mm), 0, -1)
 
 #endif /* CONFIG_SMP */
index f23794bd1e90c2f1dad70d1d9ed18507c2b58cd1..fb3a082362eb87554ff297f3e026d9663c5a8044 100644 (file)
@@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
        unsigned long sp)
 {
        regs->sstatus = SR_SPIE;
-       if (has_fpu)
+       if (has_fpu) {
                regs->sstatus |= SR_FS_INITIAL;
+               /*
+                * Restore the initial value to the FP register
+                * before starting the user program.
+                */
+               fstate_restore(current, regs);
+       }
        regs->sepc = pc;
        regs->sp = sp;
        set_fs(USER_DS);
@@ -75,10 +81,11 @@ void flush_thread(void)
 {
 #ifdef CONFIG_FPU
        /*
-        * Reset FPU context
+        * Reset FPU state and context
         *      frm: round to nearest, ties to even (IEEE default)
         *      fflags: accrued exceptions cleared
         */
+       fstate_off(current, task_pt_regs(current));
        memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
 #endif
 }
index defebf1a9c8af223ad6ea4ed9af2f315acbc2202..845543780cc5501406f0817bf3312ef5aedd4bdb 100644 (file)
@@ -475,8 +475,6 @@ static void print_sh_insn(u32 memaddr, u16 insn)
                                printk("dbr");
                                break;
                        case FD_REG_N:
-                               if (0)
-                                       goto d_reg_n;
                        case F_REG_N:
                                printk("fr%d", rn);
                                break;
@@ -488,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
                                        printk("xd%d", rn & ~1);
                                        break;
                                }
-                       d_reg_n:
+                               /* else, fall through */
                        case D_REG_N:
                                printk("dr%d", rn);
                                break;
@@ -497,6 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
                                        printk("xd%d", rm & ~1);
                                        break;
                                }
+                               /* else, fall through */
                        case D_REG_M:
                                printk("dr%d", rm);
                                break;
index 3bd010b4c55f58e49f0e761693bab35dd070c36d..f10d64311127baa9aca0a80003fda2ed9aeecf3f 100644 (file)
@@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
        switch (sh_type) {
        case SH_BREAKPOINT_READ:
                *gen_type = HW_BREAKPOINT_R;
+               break;
        case SH_BREAKPOINT_WRITE:
                *gen_type = HW_BREAKPOINT_W;
                break;
index 5cb8a62e091c77619081973fe488b04a79437dd7..7c3106093c7584e44daaa2e6af660afa05ff4132 100644 (file)
@@ -511,6 +511,7 @@ void cpu_reset(void)
                                      "add      %2, %2, %7\n\t"
                                      "addi     %0, %0, -1\n\t"
                                      "bnez     %0, 1b\n\t"
+                                     "isync\n\t"
                                      /* Jump to identity mapping */
                                      "jx       %3\n"
                                      "2:\n\t"
index dd61fdd400f05a0a9d5438241a52a7e6436e9e8f..68489d1f00bb33a275100e280bd68341aa31c8d1 100644 (file)
@@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
 choice
        prompt "Backlight initial state"
        default CHARLCD_BL_FLASH
+       ---help---
+         Select the initial backlight state on boot or module load.
+
+         Previously, there was no option for this: the backlight flashed
+         briefly on init. Now you can also turn it off/on.
 
        config CHARLCD_BL_OFF
                bool "Off"
index 92745efefb540e5d723f5bbaa3aefd11735b0f29..bef6b85778b6e2e04056fac1de5f1e9e56f834c4 100644 (file)
@@ -20,7 +20,7 @@
 
 #include <generated/utsrelease.h>
 
-#include <misc/charlcd.h>
+#include "charlcd.h"
 
 #define LCD_MINOR              156
 
similarity index 94%
rename from include/misc/charlcd.h
rename to drivers/auxdisplay/charlcd.h
index 8cf6c18b0adbd7abed3a0fd122f413a23ebc1665..00911ad0f3debeb15e735768b1683869b662b986 100644 (file)
@@ -6,6 +6,9 @@
  * Copyright (C) 2016-2017 Glider bvba
  */
 
+#ifndef _CHARLCD_H
+#define _CHARLCD_H
+
 struct charlcd {
        const struct charlcd_ops *ops;
        const unsigned char *char_conv; /* Optional */
@@ -37,3 +40,5 @@ int charlcd_register(struct charlcd *lcd);
 int charlcd_unregister(struct charlcd *lcd);
 
 void charlcd_poke(struct charlcd *lcd);
+
+#endif /* CHARLCD_H */
index ab15b64707ad22ef429a7ee6911d72ec9a284bd5..bcbe1309232760235239d94479f23e076a76cd9f 100644 (file)
@@ -14,8 +14,7 @@
 #include <linux/property.h>
 #include <linux/slab.h>
 
-#include <misc/charlcd.h>
-
+#include "charlcd.h"
 
 enum hd44780_pin {
        /* Order does matter due to writing to GPIO array subsets! */
index e06de63497cf8f00edde8d2d7bcffa5b25cc8e81..85965953683e4cf78e58081c1f8eb17107a74df1 100644 (file)
@@ -55,7 +55,7 @@
 #include <linux/io.h>
 #include <linux/uaccess.h>
 
-#include <misc/charlcd.h>
+#include "charlcd.h"
 
 #define KEYPAD_MINOR           185
 
@@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
        return;
 
 err_lcd_unreg:
+       if (scan_timer.function)
+               del_timer_sync(&scan_timer);
        if (lcd.enabled)
                charlcd_unregister(lcd.charlcd);
 err_unreg_device:
index 8dda62367816f331420eedd58a7c686f0d68a04f..c28ebf2810f11508a1c36cb1660bdd949ef3aeff 100644 (file)
@@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state)
                }
 
                ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
-               if (ret)
+               if (ret < 0)
                        break;
        }
 
index b6cc90cbc9dc244734fd763a9ed34829419c994a..4e5f9f6e901baec4f3c87f9fd3ed0f7b5bb5874b 100644 (file)
@@ -50,7 +50,7 @@ struct dw_edma_burst {
 
 struct dw_edma_region {
        phys_addr_t                     paddr;
-       dma_addr_t                      vaddr;
+       void                            __iomem *vaddr;
        size_t                          sz;
 };
 
index 4c96e1c948f2be68489b264b69f488523f520995..dc85f55e1bb8ce3c3d9410d181c7362f9801ed7e 100644 (file)
@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
        chip->id = pdev->devfn;
        chip->irq = pdev->irq;
 
-       dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
+       dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
        dw->rg_region.vaddr += pdata->rg_off;
        dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
        dw->rg_region.paddr += pdata->rg_off;
        dw->rg_region.sz = pdata->rg_sz;
 
-       dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
+       dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
        dw->ll_region.vaddr += pdata->ll_off;
        dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
        dw->ll_region.paddr += pdata->ll_off;
        dw->ll_region.sz = pdata->ll_sz;
 
-       dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
+       dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
        dw->dt_region.vaddr += pdata->dt_off;
        dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
        dw->dt_region.paddr += pdata->dt_off;
@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
        pci_dbg(pdev, "Mode:\t%s\n",
                dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
 
-       pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+       pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
-               &dw->rg_region.vaddr, &dw->rg_region.paddr);
+               dw->rg_region.vaddr, &dw->rg_region.paddr);
 
-       pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+       pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
-               &dw->ll_region.vaddr, &dw->ll_region.paddr);
+               dw->ll_region.vaddr, &dw->ll_region.paddr);
 
-       pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+       pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
-               &dw->dt_region.vaddr, &dw->dt_region.paddr);
+               dw->dt_region.vaddr, &dw->dt_region.paddr);
 
        pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
 
index 8a3180ed49a60940492d5c32f31518618e8ff97a..692de47b16701dc7b9f75aefa657618a352149d5 100644 (file)
@@ -25,7 +25,7 @@ enum dw_edma_control {
 
 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
 {
-       return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
+       return dw->rg_region.vaddr;
 }
 
 #define SET(dw, name, value)                           \
@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
 static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
 {
        struct dw_edma_burst *child;
-       struct dw_edma_v0_lli *lli;
-       struct dw_edma_v0_llp *llp;
+       struct dw_edma_v0_lli __iomem *lli;
+       struct dw_edma_v0_llp __iomem *llp;
        u32 control = 0, i = 0;
-       u64 sar, dar, addr;
        int j;
 
-       lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
+       lli = chunk->ll_region.vaddr;
 
        if (chunk->cb)
                control = DW_EDMA_V0_CB;
@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
                /* Transfer size */
                SET_LL(&lli[i].transfer_size, child->sz);
                /* SAR - low, high */
-               sar = cpu_to_le64(child->sar);
-               SET_LL(&lli[i].sar_low, lower_32_bits(sar));
-               SET_LL(&lli[i].sar_high, upper_32_bits(sar));
+               SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
+               SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
                /* DAR - low, high */
-               dar = cpu_to_le64(child->dar);
-               SET_LL(&lli[i].dar_low, lower_32_bits(dar));
-               SET_LL(&lli[i].dar_high, upper_32_bits(dar));
+               SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
+               SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
                i++;
        }
 
-       llp = (struct dw_edma_v0_llp *)&lli[i];
+       llp = (void __iomem *)&lli[i];
        control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
        if (!chunk->cb)
                control |= DW_EDMA_V0_CB;
@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
        /* Channel control */
        SET_LL(&llp->control, control);
        /* Linked list  - low, high */
-       addr = cpu_to_le64(chunk->ll_region.paddr);
-       SET_LL(&llp->llp_low, lower_32_bits(addr));
-       SET_LL(&llp->llp_high, upper_32_bits(addr));
+       SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
+       SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
 }
 
 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
        struct dw_edma_chan *chan = chunk->chan;
        struct dw_edma *dw = chan->chip->dw;
        u32 tmp;
-       u64 llp;
 
        dw_edma_v0_core_write_chunk(chunk);
 
@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
                SET_CH(dw, chan->dir, chan->id, ch_control1,
                       (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
                /* Linked list - low, high */
-               llp = cpu_to_le64(chunk->ll_region.paddr);
-               SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
-               SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
+               SET_CH(dw, chan->dir, chan->id, llp_low,
+                      lower_32_bits(chunk->ll_region.paddr));
+               SET_CH(dw, chan->dir, chan->id, llp_high,
+                      upper_32_bits(chunk->ll_region.paddr));
        }
        /* Doorbell */
        SET_RW(dw, chan->dir, doorbell,
index 3226f528cc11395409ae503f45ce9816ead6d8bc..42739508c0d854c669ea3f40baf2c0e8e61a59c9 100644 (file)
@@ -14,7 +14,7 @@
 #include "dw-edma-core.h"
 
 #define REGS_ADDR(name) \
-       ((dma_addr_t *)&regs->name)
+       ((void __force *)&regs->name)
 #define REGISTER(name) \
        { #name, REGS_ADDR(name) }
 
 
 static struct dentry                           *base_dir;
 static struct dw_edma                          *dw;
-static struct dw_edma_v0_regs                  *regs;
+static struct dw_edma_v0_regs                  __iomem *regs;
 
 static struct {
-       void                                    *start;
-       void                                    *end;
+       void                                    __iomem *start;
+       void                                    __iomem *end;
 } lim[2][EDMA_V0_MAX_NR_CH];
 
 struct debugfs_entries {
-       char                                    name[24];
+       const char                              *name;
        dma_addr_t                              *reg;
 };
 
 static int dw_edma_debugfs_u32_get(void *data, u64 *val)
 {
+       void __iomem *reg = (void __force __iomem *)data;
        if (dw->mode == EDMA_MODE_LEGACY &&
-           data >= (void *)&regs->type.legacy.ch) {
-               void *ptr = (void *)&regs->type.legacy.ch;
+           reg >= (void __iomem *)&regs->type.legacy.ch) {
+               void __iomem *ptr = &regs->type.legacy.ch;
                u32 viewport_sel = 0;
                unsigned long flags;
                u16 ch;
 
                for (ch = 0; ch < dw->wr_ch_cnt; ch++)
-                       if (lim[0][ch].start >= data && data < lim[0][ch].end) {
-                               ptr += (data - lim[0][ch].start);
+                       if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
+                               ptr += (reg - lim[0][ch].start);
                                goto legacy_sel_wr;
                        }
 
                for (ch = 0; ch < dw->rd_ch_cnt; ch++)
-                       if (lim[1][ch].start >= data && data < lim[1][ch].end) {
-                               ptr += (data - lim[1][ch].start);
+                       if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
+                               ptr += (reg - lim[1][ch].start);
                                goto legacy_sel_rd;
                        }
 
@@ -86,7 +87,7 @@ legacy_sel_wr:
 
                raw_spin_unlock_irqrestore(&dw->lock, flags);
        } else {
-               *val = readl(data);
+               *val = readl(reg);
        }
 
        return 0;
@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
        }
 }
 
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
+static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
                                    struct dentry *dir)
 {
        int nr_entries;
@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
        if (!dw)
                return;
 
-       regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
+       regs = dw->rg_region.vaddr;
        if (!regs)
                return;
 
index 89d710899010d3d804a2acc3d92d07f287366d4c..de8bfd9a76e9ebc87d678002de85096b85d80810 100644 (file)
@@ -142,7 +142,7 @@ enum d40_events {
  * when the DMA hw is powered off.
  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
  */
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
        D40_DREG_LCPA,
        D40_DREG_LCLA,
        D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
 
 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
 
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
        D40_CHAN_REG_SSCFG,
        D40_CHAN_REG_SSELT,
        D40_CHAN_REG_SSPTR,
index d6e919d3936a295e75271329035f02ad7239e076..1311de74bfdde5f817937fd2a929a1e02ce00c0f 100644 (file)
@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
 
        chan = &dmadev->chan[id];
        if (!chan) {
-               dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+               dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
                goto exit;
        }
 
index 2805853e963fcf6f9076e8360ebd519585efa94e..b33cf6e8ab8ef85b67a885fafa8161ee1c989d68 100644 (file)
@@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
        return chan;
 }
 
-static int tegra_adma_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
 {
        struct tegra_adma *tdma = dev_get_drvdata(dev);
        struct tegra_adma_chan_regs *ch_reg;
@@ -744,7 +744,7 @@ clk_disable:
        return 0;
 }
 
-static int tegra_adma_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
 {
        struct tegra_adma *tdma = dev_get_drvdata(dev);
        struct tegra_adma_chan_regs *ch_reg;
index ba2489d4ea246604ade12f48bf1e077e1ccd30e6..ba27802efcd0a900609893931e2f2fc4555dac0c 100644 (file)
@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
        if (src_icg) {
                d->ccr |= CCR_SRC_AMODE_DBLIDX;
                d->ei = 1;
-               d->fi = src_icg;
+               d->fi = src_icg + 1;
        } else if (xt->src_inc) {
                d->ccr |= CCR_SRC_AMODE_POSTINC;
                d->fi = 0;
@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
        if (dst_icg) {
                d->ccr |= CCR_DST_AMODE_DBLIDX;
                sg->ei = 1;
-               sg->fi = dst_icg;
+               sg->fi = dst_icg + 1;
        } else if (xt->dst_inc) {
                d->ccr |= CCR_DST_AMODE_POSTINC;
                sg->fi = 0;
index 1cf639a511783c9cb1280f86a6ddbc3a5577c349..04b8ac4432c700ca92d877879c664862ab20ffaf 100644 (file)
@@ -4869,7 +4869,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
        value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
        value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
        value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
-       WREG32(mmSQ_CMD, value);
+       WREG32_SOC15(GC, 0, mmSQ_CMD, value);
 }
 
 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
index fa20201eef3a8b03765a03158865bca006edcba9..cbc480a333764130878e64b880ef2073a1b440ff 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/mm.h>
 
 #include "dm_services.h"
 
@@ -1171,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
 
 struct dc_state *dc_create_state(struct dc *dc)
 {
-       struct dc_state *context = kzalloc(sizeof(struct dc_state),
-                                          GFP_KERNEL);
+       struct dc_state *context = kvzalloc(sizeof(struct dc_state),
+                                           GFP_KERNEL);
 
        if (!context)
                return NULL;
@@ -1192,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc)
 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
 {
        int i, j;
-       struct dc_state *new_ctx = kmemdup(src_ctx,
-                       sizeof(struct dc_state), GFP_KERNEL);
+       struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
 
        if (!new_ctx)
                return NULL;
+       memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
 
        for (i = 0; i < MAX_PIPES; i++) {
                        struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -1230,7 +1231,7 @@ static void dc_state_free(struct kref *kref)
 {
        struct dc_state *context = container_of(kref, struct dc_state, refcount);
        dc_resource_state_destruct(context);
-       kfree(context);
+       kvfree(context);
 }
 
 void dc_release_state(struct dc_state *context)
index 4c7e31cb45ffaf14073ab3ccf286dee27823f9d6..a5d1494a3dc44b853d6b19472305341c18c504fd 100644 (file)
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
 
 
        /* Enable extended register access */
-       ast_enable_mmio(dev);
        ast_open_key(ast);
+       ast_enable_mmio(dev);
 
        /* Find out whether P2A works or whether to use device-tree */
        ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
 
+       /* enable standard VGA decode */
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
+
        ast_release_firmware(dev);
        kfree(ast->dp501_fw_addr);
        ast_mode_fini(dev);
index ffccbef962a41d44d331e1b8da970a0246483b2b..a1cb020e07e502197428bc0fca447c650c381110 100644 (file)
@@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
                return -EINVAL;
        ast_open_key(ast);
 
-       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
 
        ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
        ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
index f7d421359d564756ff86d78c64293e37eea74c7e..c1d1ac51d1c207c0cb0b2f08825aa19ca7761bde 100644 (file)
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
 {
        struct ast_private *ast = dev->dev_private;
 
-       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+       ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
 }
 
 
index 9f3fd7d96a694a6fe890978dd19e3c38160965e4..75baff657e4331f28fc58d938c7a9de7087e0fcc 100644 (file)
@@ -1528,9 +1528,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                        if (!intel_gvt_ggtt_validate_range(vgpu,
                                workload->wa_ctx.indirect_ctx.guest_gma,
                                workload->wa_ctx.indirect_ctx.size)) {
-                               kmem_cache_free(s->workloads, workload);
                                gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
                                    workload->wa_ctx.indirect_ctx.guest_gma);
+                               kmem_cache_free(s->workloads, workload);
                                return ERR_PTR(-EINVAL);
                        }
                }
@@ -1542,9 +1542,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                        if (!intel_gvt_ggtt_validate_range(vgpu,
                                workload->wa_ctx.per_ctx.guest_gma,
                                CACHELINE_BYTES)) {
-                               kmem_cache_free(s->workloads, workload);
                                gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
                                        workload->wa_ctx.per_ctx.guest_gma);
+                               kmem_cache_free(s->workloads, workload);
                                return ERR_PTR(-EINVAL);
                        }
                }
index 126703816794e77a521437271e09a34a09015f4b..5c36c75232e6d0364f4d90419b7a7010da8985e9 100644 (file)
@@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
        struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
        int slots;
 
-       /* When restoring duplicated states, we need to make sure that the
-        * bw remains the same and avoid recalculating it, as the connector's
-        * bpc may have changed after the state was duplicated
-        */
-       if (!state->duplicated)
-               asyh->dp.pbn =
-                       drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
-                                            connector->display_info.bpc * 3);
+       if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+               /*
+                * When restoring duplicated states, we need to make sure that
+                * the bw remains the same and avoid recalculating it, as the
+                * connector's bpc may have changed after the state was
+                * duplicated
+                */
+               if (!state->duplicated) {
+                       const int bpp = connector->display_info.bpc * 3;
+                       const int clock = crtc_state->adjusted_mode.clock;
+
+                       asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
+               }
 
-       if (crtc_state->mode_changed) {
                slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
                                                      mstc->port,
                                                      asyh->dp.pbn);
index 35ddbec1375ae880bba180a4d7f46173b38e2ec2..671c90f34ede6ba6ed6eb40e3159243cf142e7be 100644 (file)
@@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
        rmb(); /* for list_empty to work without lock */
 
        if (list_empty(&entity->list) ||
-           spsc_queue_peek(&entity->job_queue) == NULL)
+           spsc_queue_count(&entity->job_queue) == 0)
                return true;
 
        return false;
@@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
        /* Consumption of existing IBs wasn't completed. Forcefully
         * remove them here.
         */
-       if (spsc_queue_peek(&entity->job_queue)) {
+       if (spsc_queue_count(&entity->job_queue)) {
                if (sched) {
                        /* Park the kernel for a moment to make sure it isn't processing
                         * our enity.
index 45d5164e9574af247ae3bb53dd63d9791957ae18..b79890739a2c6b14f0d653c1e6e5a61ec3747680 100644 (file)
@@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
        int ret;
 
        port_counter = &dev->port_data[port].port_counter;
+       if (!port_counter->hstats)
+               return -EOPNOTSUPP;
+
        mutex_lock(&port_counter->lock);
        if (on) {
                ret = __counter_set_mode(&port_counter->mode,
@@ -509,6 +512,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
        if (!rdma_is_port_valid(dev, port))
                return -EINVAL;
 
+       if (!dev->port_data[port].port_counter.hstats)
+               return -EOPNOTSUPP;
+
        qp = rdma_counter_get_qp(dev, qp_num);
        if (!qp)
                return -ENOENT;
index 783e465e7c412988903088911fa655c4aa169346..87d40d1ecdde8064ae36b03dce5963c3e9a390bc 100644 (file)
@@ -1952,12 +1952,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        if (fill_nldev_handle(msg, device) ||
            nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
-           nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode))
+           nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
+               ret = -EMSGSIZE;
                goto err_msg;
+       }
 
        if ((mode == RDMA_COUNTER_MODE_AUTO) &&
-           nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask))
+           nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
+               ret = -EMSGSIZE;
                goto err_msg;
+       }
 
        nlmsg_end(msg, nlh);
        ib_device_put(device);
index 2a75c6f8d8270e5ca07610684fbe9e5156f72708..c0e15db346808db3efc8222eddb14abc1212be05 100644 (file)
@@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
         * prevent any further fault handling on this MR.
         */
        ib_umem_notifier_start_account(umem_odp);
-       umem_odp->dying = 1;
-       /* Make sure that the fact the umem is dying is out before we release
-        * all pending page faults. */
-       smp_wmb();
        complete_all(&umem_odp->notifier_completion);
        umem_odp->umem.context->invalidate_range(
                umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
index ec4370f9938127f94d70533f2ebf8cc9f44b48a2..af5bbb35c0589364794ca131a1e741392cfe5931 100644 (file)
@@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
                        event_sub->eventfd =
                                eventfd_ctx_fdget(redirect_fd);
 
-                       if (IS_ERR(event_sub)) {
+                       if (IS_ERR(event_sub->eventfd)) {
                                err = PTR_ERR(event_sub->eventfd);
                                event_sub->eventfd = NULL;
                                goto err;
@@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
        struct devx_async_event_file *ev_file = filp->private_data;
        struct devx_event_subscription *event_sub, *event_sub_tmp;
        struct devx_async_event_data *entry, *tmp;
+       struct mlx5_ib_dev *dev = ev_file->dev;
 
-       mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
+       mutex_lock(&dev->devx_event_table.event_xa_lock);
        /* delete the subscriptions which are related to this FD */
        list_for_each_entry_safe(event_sub, event_sub_tmp,
                                 &ev_file->subscribed_events_list, file_list) {
-               devx_cleanup_subscription(ev_file->dev, event_sub);
+               devx_cleanup_subscription(dev, event_sub);
                if (event_sub->eventfd)
                        eventfd_ctx_put(event_sub->eventfd);
 
@@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
                kfree_rcu(event_sub, rcu);
        }
 
-       mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
+       mutex_unlock(&dev->devx_event_table.event_xa_lock);
 
        /* free the pending events allocation */
        if (!ev_file->omit_data) {
@@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
        }
 
        uverbs_close_fd(filp);
-       put_device(&ev_file->dev->ib_dev.dev);
+       put_device(&dev->ib_dev.dev);
        return 0;
 }
 
index 81da82050d05ec2579bcd4145c2902a55b488cd7..1d257d1b3b0da0b1a1035a469100ab6d613b54f6 100644 (file)
@@ -579,7 +579,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
                        u32 flags)
 {
        int npages = 0, current_seq, page_shift, ret, np;
-       bool implicit = false;
        struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
        bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
        bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
@@ -594,7 +593,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
                if (IS_ERR(odp))
                        return PTR_ERR(odp);
                mr = odp->private;
-               implicit = true;
        } else {
                odp = odp_mr;
        }
@@ -682,19 +680,15 @@ next_mr:
 
 out:
        if (ret == -EAGAIN) {
-               if (implicit || !odp->dying) {
-                       unsigned long timeout =
-                               msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
-
-                       if (!wait_for_completion_timeout(
-                                       &odp->notifier_completion,
-                                       timeout)) {
-                               mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
-                                            current_seq, odp->notifiers_seq, odp->notifiers_count);
-                       }
-               } else {
-                       /* The MR is being killed, kill the QP as well. */
-                       ret = -EFAULT;
+               unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+
+               if (!wait_for_completion_timeout(&odp->notifier_completion,
+                                                timeout)) {
+                       mlx5_ib_warn(
+                               dev,
+                               "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
+                               current_seq, odp->notifiers_seq,
+                               odp->notifiers_count);
                }
        }
 
index dace276aea1413baa46f292a7e522381c6f00250..b622fc62f2cd6d4699cb9a1ed6ead9e44cd6bee2 100644 (file)
@@ -1,6 +1,6 @@
 config RDMA_SIW
        tristate "Software RDMA over TCP/IP (iWARP) driver"
-       depends on INET && INFINIBAND && LIBCRC32C && 64BIT
+       depends on INET && INFINIBAND && LIBCRC32C
        select DMA_VIRT_OPS
        help
        This driver implements the iWARP RDMA transport over
index 03fd7b2f595f1f35c5a535e068a60913d5b879ed..77b1aabf6ff357245ea7b3c3b04ee570320eb9b1 100644 (file)
@@ -214,7 +214,7 @@ struct siw_wqe {
 struct siw_cq {
        struct ib_cq base_cq;
        spinlock_t lock;
-       u64 *notify;
+       struct siw_cq_ctrl *notify;
        struct siw_cqe *queue;
        u32 cq_put;
        u32 cq_get;
index d0f140daf65924287833ad03716c4a26e8fb8058..05a92f997f603bfbc64138ae2bdda31486baa36c 100644 (file)
@@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
 
 out_err:
        siw_cpu_info.num_nodes = 0;
-       while (i) {
+       while (--i >= 0)
                kfree(siw_cpu_info.tx_valid_cpus[i]);
-               siw_cpu_info.tx_valid_cpus[i--] = NULL;
-       }
        kfree(siw_cpu_info.tx_valid_cpus);
        siw_cpu_info.tx_valid_cpus = NULL;
 
index e27bd5b35b966280e5ecfc2c2cc2affbc75ec597..0990307c5d2cde951d646b2d06e990670f44a709 100644 (file)
@@ -1013,18 +1013,24 @@ out:
  */
 static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
 {
-       u64 cq_notify;
+       u32 cq_notify;
 
        if (!cq->base_cq.comp_handler)
                return false;
 
-       cq_notify = READ_ONCE(*cq->notify);
+       /* Read application shared notification state */
+       cq_notify = READ_ONCE(cq->notify->flags);
 
        if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
            ((cq_notify & SIW_NOTIFY_SOLICITED) &&
             (flags & SIW_WQE_SOLICITED))) {
-               /* dis-arm CQ */
-               smp_store_mb(*cq->notify, SIW_NOTIFY_NOT);
+               /*
+                * CQ notification is one-shot: Since the
+                * current CQE causes user notification,
+                * the CQ gets dis-aremd and must be re-aremd
+                * by the user for a new notification.
+                */
+               WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
 
                return true;
        }
index 32dc79d0e8981770ddf2069f69b286adc5116b3d..e7f3a2379d9d87858ddb83bd7abce843ed330a7a 100644 (file)
@@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
 
        spin_lock_init(&cq->lock);
 
-       cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify;
+       cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
 
        if (udata) {
                struct siw_uresp_create_cq uresp = {};
@@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
        siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
 
        if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
-               /* CQ event for next solicited completion */
-               smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED);
+               /*
+                * Enable CQ event for next solicited completion.
+                * and make it visible to all associated producers.
+                */
+               smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
        else
-               /* CQ event for any signalled completion */
-               smp_store_mb(*cq->notify, SIW_NOTIFY_ALL);
+               /*
+                * Enable CQ event for any signalled completion.
+                * and make it visible to all associated producers.
+                */
+               smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
 
        if (flags & IB_CQ_REPORT_MISSED_EVENTS)
                return cq->cq_put - cq->cq_get;
index a9a9fabd396804a26b77039d0fe804cea3dc56dc..c5c93e48b4dbdf7409ca3b8008a24a4ab3214a49 100644 (file)
@@ -1186,8 +1186,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                        ste_live = true;
                        break;
                case STRTAB_STE_0_CFG_ABORT:
-                       if (disable_bypass)
-                               break;
+                       BUG_ON(!disable_bypass);
+                       break;
                default:
                        BUG(); /* STE corruption */
                }
index a7f9c3edbcb299f83f8d4c6093c274a0bc5b3387..d991d40f797fb62e9f6659060c1e92d4abb3bddc 100644 (file)
@@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
 {
        struct iommu_domain *domain = iommu_get_dma_domain(dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
-       size_t iova_off = 0;
+       struct iova_domain *iovad = &cookie->iovad;
+       size_t iova_off = iova_offset(iovad, phys);
        dma_addr_t iova;
 
-       if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
-               iova_off = iova_offset(&cookie->iovad, phys);
-               size = iova_align(&cookie->iovad, size + iova_off);
-       }
+       size = iova_align(iovad, size + iova_off);
 
        iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
        if (!iova)
@@ -574,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
        struct iova_domain *iovad = &cookie->iovad;
        bool coherent = dev_is_dma_coherent(dev);
        int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
-       pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+       pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
        unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
        struct page **pages;
        struct sg_table sgt;
@@ -764,7 +762,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
                 * - and wouldn't make the resulting output segment too long
                 */
                if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
-                   (cur_len + s_length <= max_len)) {
+                   (max_len - cur_len >= s_length)) {
                        /* ...then concatenate it with the previous one */
                        cur_len += s_length;
                } else {
@@ -975,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
                return NULL;
 
        if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
-               pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+               pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
 
                cpu_addr = dma_common_contiguous_remap(page, alloc_size,
                                VM_USERMAP, prot, __builtin_return_address(0));
@@ -1035,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        unsigned long pfn, off = vma->vm_pgoff;
        int ret;
 
-       vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+       vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
 
        if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
@@ -1147,16 +1145,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
        if (!msi_page)
                return NULL;
 
-       iova = __iommu_dma_map(dev, msi_addr, size, prot);
-       if (iova == DMA_MAPPING_ERROR)
+       iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+       if (!iova)
                goto out_free_page;
 
+       if (iommu_map(domain, iova, msi_addr, size, prot))
+               goto out_free_iova;
+
        INIT_LIST_HEAD(&msi_page->list);
        msi_page->phys = msi_addr;
        msi_page->iova = iova;
        list_add(&msi_page->list, &cookie->msi_page_list);
        return msi_page;
 
+out_free_iova:
+       iommu_dma_free_iova(cookie, iova, size);
 out_free_page:
        kfree(msi_page);
        return NULL;
index 2b25d9c5933604eb70d6a83a7e1a91daa10e29d7..471f05d452e0198f3d2837f2a83e13d0984e1a12 100644 (file)
@@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
                tbl_wlk.ctx_entry = context;
                m->private = &tbl_wlk;
 
-               if (pasid_supported(iommu) && is_pasid_enabled(context)) {
+               if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
                        pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
                        pasid_dir_size = get_pasid_dir_size(context);
                        pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
index bdaed2da8a55e5a96904d0a182b9d44db8843777..12d094d08c0a2e982fb712b9310ceadf7213013d 100644 (file)
@@ -3449,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev)
                                dmar_domain = to_dmar_domain(domain);
                                dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
                        }
+                       dmar_remove_one_dev_info(dev);
                        get_private_domain_for_dev(dev);
                }
 
@@ -4790,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 
        /* free the private domain */
        if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
-           !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
+           !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+           list_empty(&domain->devices))
                domain_exit(info->domain);
 
        free_devinfo_mem(info);
@@ -4803,7 +4805,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
 
        spin_lock_irqsave(&device_domain_lock, flags);
        info = dev->archdata.iommu;
-       __dmar_remove_one_dev_info(info);
+       if (info)
+               __dmar_remove_one_dev_info(info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
@@ -5281,6 +5284,7 @@ static int intel_iommu_add_device(struct device *dev)
                if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
                        ret = iommu_request_dm_for_dev(dev);
                        if (ret) {
+                               dmar_remove_one_dev_info(dev);
                                dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
                                domain_add_dev_info(si_domain, dev);
                                dev_info(dev,
@@ -5291,6 +5295,7 @@ static int intel_iommu_add_device(struct device *dev)
                if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
                        ret = iommu_request_dma_domain_for_dev(dev);
                        if (ret) {
+                               dmar_remove_one_dev_info(dev);
                                dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
                                if (!get_private_domain_for_dev(dev)) {
                                        dev_warn(dev,
@@ -5316,6 +5321,8 @@ static void intel_iommu_remove_device(struct device *dev)
        if (!iommu)
                return;
 
+       dmar_remove_one_dev_info(dev);
+
        iommu_group_remove_device(dev);
 
        iommu_device_unlink(&iommu->iommu, dev);
index 29e3f5da59c1ff61137f66d93b75b865521ed5e1..11ec048929e80109e6702249aea0b345e79378d3 100644 (file)
@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
         */
 
        pixsize = vout->bpp * vout->vrfb_bpp;
-       dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
-                 (vout->pix.width * vout->bpp)) + 1;
+       dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
 
        xt->src_start = vout->buf_phy_addr[vb->i];
        xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
index db160cee42ad178e02b11cc15bb0229a9417f118..108e109e99f1aceddce3c535c3e25079752204bf 100644 (file)
@@ -2846,7 +2846,7 @@ static int nvme_resume(struct device *dev)
        struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
        struct nvme_ctrl *ctrl = &ndev->ctrl;
 
-       if (pm_resume_via_firmware() || !ctrl->npss ||
+       if (ndev->last_ps == U32_MAX ||
            nvme_set_power_state(ctrl, ndev->last_ps) != 0)
                nvme_reset_ctrl(ctrl);
        return 0;
@@ -2859,6 +2859,8 @@ static int nvme_suspend(struct device *dev)
        struct nvme_ctrl *ctrl = &ndev->ctrl;
        int ret = -EBUSY;
 
+       ndev->last_ps = U32_MAX;
+
        /*
         * The platform does not remove power for a kernel managed suspend so
         * use host managed nvme power settings for lowest idle power if
@@ -2866,8 +2868,14 @@ static int nvme_suspend(struct device *dev)
         * shutdown.  But if the firmware is involved after the suspend or the
         * device does not support any non-default power states, shut down the
         * device fully.
+        *
+        * If ASPM is not enabled for the device, shut down the device and allow
+        * the PCI bus layer to put it into D3 in order to take the PCIe link
+        * down, so as to allow the platform to achieve its minimum low-power
+        * state (which may not be possible if the link is up).
         */
-       if (pm_suspend_via_firmware() || !ctrl->npss) {
+       if (pm_suspend_via_firmware() || !ctrl->npss ||
+           !pcie_aspm_enabled(pdev)) {
                nvme_dev_disable(ndev, true);
                return 0;
        }
@@ -2880,7 +2888,6 @@ static int nvme_suspend(struct device *dev)
            ctrl->state != NVME_CTRL_ADMIN_ONLY)
                goto unfreeze;
 
-       ndev->last_ps = 0;
        ret = nvme_get_power_state(ctrl, &ndev->last_ps);
        if (ret < 0)
                goto unfreeze;
index 7f84bb4903caaf4d63847689972ff2c005cb9279..a296eaf52a5b23c377349b59688ffa62ec66db4d 100644 (file)
@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
  * of_irq_parse_one - Resolve an interrupt for a device
  * @device: the device whose interrupt is to be resolved
  * @index: index of the interrupt to resolve
- * @out_irq: structure of_irq filled by this function
+ * @out_irq: structure of_phandle_args filled by this function
  *
  * This function resolves an interrupt for a node by walking the interrupt tree,
  * finding which interrupt controller node it is attached to, and returning the
index c1b67dd7cd6eed61c75e9fe4234565dd9c329c5b..83c766233181096887f7d4a204214012b8223166 100644 (file)
@@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
        for_each_child_of_node(local_fixups, child) {
 
                for_each_child_of_node(overlay, overlay_child)
-                       if (!node_name_cmp(child, overlay_child))
+                       if (!node_name_cmp(child, overlay_child)) {
+                               of_node_put(overlay_child);
                                break;
+                       }
 
-               if (!overlay_child)
+               if (!overlay_child) {
+                       of_node_put(child);
                        return -EINVAL;
+               }
 
                err = adjust_local_phandle_references(child, overlay_child,
                                phandle_delta);
-               if (err)
+               if (err) {
+                       of_node_put(child);
                        return err;
+               }
        }
 
        return 0;
index e44af7f4d37ff48c050daeaeab33326ec1a52067..464f8f92653f461e907e3a77062658e1a33a57ec 100644 (file)
@@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
        NULL, 0644);
 
+/**
+ * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
+ * @pdev: Target device.
+ */
+bool pcie_aspm_enabled(struct pci_dev *pdev)
+{
+       struct pci_dev *bridge = pci_upstream_bridge(pdev);
+       bool ret;
+
+       if (!bridge)
+               return false;
+
+       mutex_lock(&aspm_lock);
+       ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
+       mutex_unlock(&aspm_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
+
 #ifdef CONFIG_PCIEASPM_DEBUG
 static ssize_t link_state_show(struct device *dev,
                struct device_attribute *attr,
index faf43b1d3dbef626610744daf4d3ecf6220489db..a7549ae32542738cce6a8c4011911f1462ff09d1 100644 (file)
@@ -10776,12 +10776,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
        /* This loop sets up all CPUs that are affinitized with a
         * irq vector assigned to the driver. All affinitized CPUs
         * will get a link to that vectors IRQ and EQ.
+        *
+        * NULL affinity mask handling:
+        * If irq count is greater than one, log an error message.
+        * If the null mask is received for the first irq, find the
+        * first present cpu, and assign the eq index to ensure at
+        * least one EQ is assigned.
         */
        for (idx = 0; idx <  phba->cfg_irq_chann; idx++) {
                /* Get a CPU mask for all CPUs affinitized to this vector */
                maskp = pci_irq_get_affinity(phba->pcidev, idx);
-               if (!maskp)
-                       continue;
+               if (!maskp) {
+                       if (phba->cfg_irq_chann > 1)
+                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                               "3329 No affinity mask found "
+                                               "for vector %d (%d)\n",
+                                               idx, phba->cfg_irq_chann);
+                       if (!idx) {
+                               cpu = cpumask_first(cpu_present_mask);
+                               cpup = &phba->sli4_hba.cpu_map[cpu];
+                               cpup->eq = idx;
+                               cpup->irq = pci_irq_vector(phba->pcidev, idx);
+                               cpup->flag |= LPFC_CPU_FIRST_IRQ;
+                       }
+                       break;
+               }
 
                i = 0;
                /* Loop through all CPUs associated with vector idx */
index 4f1b6f466ff510d2899ae6f225ff5e0234875f15..b86195e4dc6cd657485eb01f62d103b4e06f7008 100644 (file)
@@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
        struct afs_call *call = container_of(work, struct afs_call, work);
        struct afs_uuid *r = call->request;
 
-       struct {
-               __be32  match;
-       } reply;
-
        _enter("");
 
        if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
-               reply.match = htonl(0);
+               afs_send_empty_reply(call);
        else
-               reply.match = htonl(1);
+               rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                       1, 1, "K-1");
 
-       afs_send_simple_reply(call, &reply, sizeof(reply));
        afs_put_call(call);
        _leave("");
 }
index e640d67274be2b06e64a83dd967670d72a1cbed4..81207dc3c9977892ece421851666eadf8e818321 100644 (file)
@@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
  * iterate through the data blob that lists the contents of an AFS directory
  */
 static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
-                          struct key *key)
+                          struct key *key, afs_dataversion_t *_dir_version)
 {
        struct afs_vnode *dvnode = AFS_FS_I(dir);
        struct afs_xdr_dir_page *dbuf;
@@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
        req = afs_read_dir(dvnode, key);
        if (IS_ERR(req))
                return PTR_ERR(req);
+       *_dir_version = req->data_version;
 
        /* round the file position up to the next entry boundary */
        ctx->pos += sizeof(union afs_xdr_dirent) - 1;
@@ -514,7 +515,10 @@ out:
  */
 static int afs_readdir(struct file *file, struct dir_context *ctx)
 {
-       return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file));
+       afs_dataversion_t dir_version;
+
+       return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
+                              &dir_version);
 }
 
 /*
@@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
  * - just returns the FID the dentry name maps to if found
  */
 static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
-                            struct afs_fid *fid, struct key *key)
+                            struct afs_fid *fid, struct key *key,
+                            afs_dataversion_t *_dir_version)
 {
        struct afs_super_info *as = dir->i_sb->s_fs_info;
        struct afs_lookup_one_cookie cookie = {
@@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
        _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
 
        /* search the directory */
-       ret = afs_dir_iterate(dir, &cookie.ctx, key);
+       ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
        if (ret < 0) {
                _leave(" = %d [iter]", ret);
                return ret;
@@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
        struct afs_server *server;
        struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
        struct inode *inode = NULL, *ti;
+       afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
        int ret, i;
 
        _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
                cookie->fids[i].vid = as->volume->vid;
 
        /* search the directory */
-       ret = afs_dir_iterate(dir, &cookie->ctx, key);
+       ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
        if (ret < 0) {
                inode = ERR_PTR(ret);
                goto out;
        }
 
+       dentry->d_fsdata = (void *)(unsigned long)data_version;
+
        inode = ERR_PTR(-ENOENT);
        if (!cookie->found)
                goto out;
@@ -968,7 +976,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        struct dentry *parent;
        struct inode *inode;
        struct key *key;
-       long dir_version, de_version;
+       afs_dataversion_t dir_version;
+       long de_version;
        int ret;
 
        if (flags & LOOKUP_RCU)
@@ -1014,20 +1023,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
         * on a 32-bit system, we only have 32 bits in the dentry to store the
         * version.
         */
-       dir_version = (long)dir->status.data_version;
+       dir_version = dir->status.data_version;
        de_version = (long)dentry->d_fsdata;
-       if (de_version == dir_version)
-               goto out_valid;
+       if (de_version == (long)dir_version)
+               goto out_valid_noupdate;
 
-       dir_version = (long)dir->invalid_before;
-       if (de_version - dir_version >= 0)
+       dir_version = dir->invalid_before;
+       if (de_version - (long)dir_version >= 0)
                goto out_valid;
 
        _debug("dir modified");
        afs_stat_v(dir, n_reval);
 
        /* search the directory for this vnode */
-       ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key);
+       ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
        switch (ret) {
        case 0:
                /* the filename maps to something */
@@ -1080,7 +1089,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        }
 
 out_valid:
-       dentry->d_fsdata = (void *)dir_version;
+       dentry->d_fsdata = (void *)(unsigned long)dir_version;
+out_valid_noupdate:
        dput(parent);
        key_put(key);
        _leave(" = 1 [valid]");
@@ -1185,6 +1195,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
        iget_data->cb_s_break = fc->cbi->server->cb_s_break;
 }
 
+/*
+ * Note that a dentry got changed.  We need to set d_fsdata to the data version
+ * number derived from the result of the operation.  It doesn't matter if
+ * d_fsdata goes backwards as we'll just revalidate.
+ */
+static void afs_update_dentry_version(struct afs_fs_cursor *fc,
+                                     struct dentry *dentry,
+                                     struct afs_status_cb *scb)
+{
+       if (fc->ac.error == 0)
+               dentry->d_fsdata =
+                       (void *)(unsigned long)scb->status.data_version;
+}
+
 /*
  * create a directory on an AFS filesystem
  */
@@ -1227,6 +1251,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
                afs_check_for_remote_deletion(&fc, dvnode);
                afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
                                        &data_version, &scb[0]);
+               afs_update_dentry_version(&fc, dentry, &scb[0]);
                afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
@@ -1319,6 +1344,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
 
                afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
                                        &data_version, scb);
+               afs_update_dentry_version(&fc, dentry, scb);
                ret = afs_end_vnode_operation(&fc);
                if (ret == 0) {
                        afs_dir_remove_subdir(dentry);
@@ -1458,6 +1484,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
                                        &data_version, &scb[0]);
                afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
                                        &data_version_2, &scb[1]);
+               afs_update_dentry_version(&fc, dentry, &scb[0]);
                ret = afs_end_vnode_operation(&fc);
                if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
                        ret = afs_dir_remove_link(dvnode, dentry, key);
@@ -1526,6 +1553,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                afs_check_for_remote_deletion(&fc, dvnode);
                afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
                                        &data_version, &scb[0]);
+               afs_update_dentry_version(&fc, dentry, &scb[0]);
                afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
@@ -1607,6 +1635,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
                afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
                                        NULL, &scb[1]);
                ihold(&vnode->vfs_inode);
+               afs_update_dentry_version(&fc, dentry, &scb[0]);
                d_instantiate(dentry, &vnode->vfs_inode);
 
                mutex_unlock(&vnode->io_lock);
@@ -1686,6 +1715,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
                afs_check_for_remote_deletion(&fc, dvnode);
                afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
                                        &data_version, &scb[0]);
+               afs_update_dentry_version(&fc, dentry, &scb[0]);
                afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
@@ -1791,6 +1821,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
        }
 
+       /* This bit is potentially nasty as there's a potential race with
+        * afs_d_revalidate{,_rcu}().  We have to change d_fsdata on the dentry
+        * to reflect it's new parent's new data_version after the op, but
+        * d_revalidate may see old_dentry between the op having taken place
+        * and the version being updated.
+        *
+        * So drop the old_dentry for now to make other threads go through
+        * lookup instead - which we hold a lock against.
+        */
+       d_drop(old_dentry);
+
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
                afs_dataversion_t orig_data_version;
@@ -1802,9 +1843,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (orig_dvnode != new_dvnode) {
                        if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
                                afs_end_vnode_operation(&fc);
-                               goto error_rehash;
+                               goto error_rehash_old;
                        }
-                       new_data_version = new_dvnode->status.data_version;
+                       new_data_version = new_dvnode->status.data_version + 1;
                } else {
                        new_data_version = orig_data_version;
                        new_scb = &scb[0];
@@ -1827,7 +1868,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
                ret = afs_end_vnode_operation(&fc);
                if (ret < 0)
-                       goto error_rehash;
+                       goto error_rehash_old;
        }
 
        if (ret == 0) {
@@ -1853,10 +1894,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                                drop_nlink(new_inode);
                        spin_unlock(&new_inode->i_lock);
                }
+
+               /* Now we can update d_fsdata on the dentries to reflect their
+                * new parent's data_version.
+                *
+                * Note that if we ever implement RENAME_EXCHANGE, we'll have
+                * to update both dentries with opposing dir versions.
+                */
+               if (new_dvnode != orig_dvnode) {
+                       afs_update_dentry_version(&fc, old_dentry, &scb[1]);
+                       afs_update_dentry_version(&fc, new_dentry, &scb[1]);
+               } else {
+                       afs_update_dentry_version(&fc, old_dentry, &scb[0]);
+                       afs_update_dentry_version(&fc, new_dentry, &scb[0]);
+               }
                d_move(old_dentry, new_dentry);
                goto error_tmp;
        }
 
+error_rehash_old:
+       d_rehash(new_dentry);
 error_rehash:
        if (rehash)
                d_rehash(rehash);
index 56b69576274d5b64cae3be2192f01838f201507d..dd3c55c9101c4e8475eca6a66e789a5f991d845d 100644 (file)
@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req)
        int i;
 
        if (refcount_dec_and_test(&req->usage)) {
-               for (i = 0; i < req->nr_pages; i++)
-                       if (req->pages[i])
-                               put_page(req->pages[i]);
-               if (req->pages != req->array)
-                       kfree(req->pages);
+               if (req->pages) {
+                       for (i = 0; i < req->nr_pages; i++)
+                               if (req->pages[i])
+                                       put_page(req->pages[i]);
+                       if (req->pages != req->array)
+                               kfree(req->pages);
+               }
                kfree(req);
        }
 }
index d7e0fd3c00df9e8ee52b97d272ffc6ae71effa8c..cfb0ac4bd039e672c14110ce8ad4eccb16706103 100644 (file)
@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
                struct afs_uuid__xdr *xdr;
                struct afs_uuid *uuid;
                int j;
+               int n = entry->nr_servers;
 
                tmp = ntohl(uvldb->serverFlags[i]);
                if (tmp & AFS_VLSF_DONTUSE ||
                    (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
                        continue;
                if (tmp & AFS_VLSF_RWVOL) {
-                       entry->fs_mask[i] |= AFS_VOL_VTM_RW;
+                       entry->fs_mask[n] |= AFS_VOL_VTM_RW;
                        if (vlflags & AFS_VLF_BACKEXISTS)
-                               entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
+                               entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
                }
                if (tmp & AFS_VLSF_ROVOL)
-                       entry->fs_mask[i] |= AFS_VOL_VTM_RO;
-               if (!entry->fs_mask[i])
+                       entry->fs_mask[n] |= AFS_VOL_VTM_RO;
+               if (!entry->fs_mask[n])
                        continue;
 
                xdr = &uvldb->serverNumber[i];
-               uuid = (struct afs_uuid *)&entry->fs_server[i];
+               uuid = (struct afs_uuid *)&entry->fs_server[n];
                uuid->time_low                  = xdr->time_low;
                uuid->time_mid                  = htons(ntohl(xdr->time_mid));
                uuid->time_hi_and_version       = htons(ntohl(xdr->time_hi_and_version));
index 04f09689cd6d5bfb23b9757b7cb01fd1af180924..1600034a929bb1c89df6ce7134ecaad994bd1491 100644 (file)
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
                }
                if (seq_has_overflowed(m))
                        goto Eoverflow;
+               p = m->op->next(m, p, &m->index);
                if (pos + m->count > offset) {
                        m->from = offset - pos;
                        m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
                }
                pos += m->count;
                m->count = 0;
-               p = m->op->next(m, p, &m->index);
                if (pos == offset)
                        break;
        }
index baf0b72c0a37deddd4936556a985cb020bf5ff4d..07aad70f39319f46b670dfef948f3b49d16f786a 100644 (file)
@@ -3835,15 +3835,28 @@ xfs_bmapi_read(
        XFS_STATS_INC(mp, xs_blk_mapr);
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (!ifp) {
+               /* No CoW fork?  Return a hole. */
+               if (whichfork == XFS_COW_FORK) {
+                       mval->br_startoff = bno;
+                       mval->br_startblock = HOLESTARTBLOCK;
+                       mval->br_blockcount = len;
+                       mval->br_state = XFS_EXT_NORM;
+                       *nmap = 1;
+                       return 0;
+               }
 
-       /* No CoW fork?  Return a hole. */
-       if (whichfork == XFS_COW_FORK && !ifp) {
-               mval->br_startoff = bno;
-               mval->br_startblock = HOLESTARTBLOCK;
-               mval->br_blockcount = len;
-               mval->br_state = XFS_EXT_NORM;
-               *nmap = 1;
-               return 0;
+               /*
+                * A missing attr ifork implies that the inode says we're in
+                * extents or btree format but failed to pass the inode fork
+                * verifier while trying to load it.  Treat that as a file
+                * corruption too.
+                */
+#ifdef DEBUG
+               xfs_alert(mp, "%s: inode %llu missing fork %d",
+                               __func__, ip->i_ino, whichfork);
+#endif /* DEBUG */
+               return -EFSCORRUPTED;
        }
 
        if (!(ifp->if_flags & XFS_IFEXTENTS)) {
index d1c77fd0815da89cca4882c7bd4c19efac357745..0bf56e94bfe93cb00a87a92e3112346ca21ec7ca 100644 (file)
@@ -487,10 +487,8 @@ xfs_da3_split(
        ASSERT(state->path.active == 0);
        oldblk = &state->path.blk[0];
        error = xfs_da3_root_split(state, oldblk, addblk);
-       if (error) {
-               addblk->bp = NULL;
-               return error;   /* GROT: dir is inconsistent */
-       }
+       if (error)
+               goto out;
 
        /*
         * Update pointers to the node which used to be block 0 and just got
@@ -505,7 +503,10 @@ xfs_da3_split(
         */
        node = oldblk->bp->b_addr;
        if (node->hdr.info.forw) {
-               ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
+               if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
+                       error = -EFSCORRUPTED;
+                       goto out;
+               }
                node = addblk->bp->b_addr;
                node->hdr.info.back = cpu_to_be32(oldblk->blkno);
                xfs_trans_log_buf(state->args->trans, addblk->bp,
@@ -514,15 +515,19 @@ xfs_da3_split(
        }
        node = oldblk->bp->b_addr;
        if (node->hdr.info.back) {
-               ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
+               if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
+                       error = -EFSCORRUPTED;
+                       goto out;
+               }
                node = addblk->bp->b_addr;
                node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
                xfs_trans_log_buf(state->args->trans, addblk->bp,
                                  XFS_DA_LOGRANGE(node, &node->hdr.info,
                                  sizeof(node->hdr.info)));
        }
+out:
        addblk->bp = NULL;
-       return 0;
+       return error;
 }
 
 /*
index afcc6642690a8d85aebab8849321b01c50016b98..1fc44efc344df9ebfd22597b57447e3b77cfb836 100644 (file)
@@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry(
        ents = dp->d_ops->leaf_ents_p(leaf);
 
        xfs_dir3_leaf_check(dp, bp);
-       ASSERT(leafhdr.count > 0);
+       if (leafhdr.count <= 0)
+               return -EFSCORRUPTED;
 
        /*
         * Look up the hash value in the leaf entries.
index 00e9f5c388d366031fd8c5b713655a718c0a9287..7fc3c1ad36bcd0e490058c78f39e5883505da1c6 100644 (file)
@@ -429,10 +429,7 @@ xfs_log_reserve(
 
        ASSERT(*ticp == NULL);
        tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
-                               KM_SLEEP | KM_MAYFAIL);
-       if (!tic)
-               return -ENOMEM;
-
+                               KM_SLEEP);
        *ticp = tic;
 
        xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
index bb6cb347018c07cda6a0cd90570938dd4ac5d2d1..f6947da70d715fc7fdd6d67fd7845ac0d1d4b23f 100644 (file)
 
 #define p4d_alloc(mm, pgd, address)    (pgd)
 #define p4d_offset(pgd, start)         (pgd)
-#define p4d_none(p4d)                  0
-#define p4d_bad(p4d)                   0
-#define p4d_present(p4d)               1
+
+#ifndef __ASSEMBLY__
+static inline int p4d_none(p4d_t p4d)
+{
+       return 0;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+       return 0;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+       return 1;
+}
+#endif
+
 #define p4d_ERROR(p4d)                 do { } while (0)
 #define p4d_clear(p4d)                 pgd_clear(p4d)
 #define p4d_val(p4d)                   pgd_val(p4d)
index 3813211a9aadef0e2dc2f96267faf914cad2987d..0bff3d7fac92c95e324928a0aadc48e6d4ffd35f 100644 (file)
@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs);
 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
                dma_addr_t dma_addr);
-
-#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
                unsigned long attrs);
+
+#ifdef CONFIG_MMU
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
 #else
-# define arch_dma_mmap_pgprot(dev, prot, attrs)        pgprot_noncached(prot)
-#endif
+static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
+               unsigned long attrs)
+{
+       return prot;    /* no protection bits supported without page tables */
+}
+#endif /* CONFIG_MMU */
 
 #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
index fb07b503dc453ddfe16c5f0f959d46be01ad55ba..f33881688f42b2d6605d4f868546fbc06b1cd778 100644 (file)
@@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
 }
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
                        struct vm_area_struct *vma, unsigned long addr,
-                       int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
-       alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
+                       int node);
 #else
 #define alloc_pages(gfp_mask, order) \
                alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
-       alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
        alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 #define alloc_page_vma(gfp_mask, vma, addr)                    \
-       alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
+       alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
 #define alloc_page_vma_node(gfp_mask, vma, addr, node)         \
-       alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+       alloc_pages_vma(gfp_mask, 0, vma, addr, node)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
index 44c41462be334dac8edbc365c293d7014bed29e9..2cd4359cb38cb4fa44d8099b89cee0811eee22ff 100644 (file)
@@ -668,6 +668,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
 
 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
                        int val);
+void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
 
 static inline void mod_lruvec_state(struct lruvec *lruvec,
                                    enum node_stat_item idx, int val)
@@ -1072,6 +1073,14 @@ static inline void mod_lruvec_page_state(struct page *page,
        mod_node_page_state(page_pgdat(page), idx, val);
 }
 
+static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+                                          int val)
+{
+       struct page *page = virt_to_head_page(p);
+
+       __mod_node_page_state(page_pgdat(page), idx, val);
+}
+
 static inline
 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
                                            gfp_t gfp_mask,
@@ -1159,6 +1168,16 @@ static inline void __dec_lruvec_page_state(struct page *page,
        __mod_lruvec_page_state(page, idx, -1);
 }
 
+static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
+{
+       __mod_lruvec_slab_state(p, idx, 1);
+}
+
+static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
+{
+       __mod_lruvec_slab_state(p, idx, -1);
+}
+
 /* idx can be of type enum memcg_stat_item or node_stat_item */
 static inline void inc_memcg_state(struct mem_cgroup *memcg,
                                   int idx)
index 5228c62af41659bb7d5ae0e7db00969b9f16ef73..bac395f1d00a0f9691b12ec6841f2401a10ca4fc 100644 (file)
@@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 struct mempolicy *get_task_policy(struct task_struct *p);
 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
                unsigned long addr);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+                                               unsigned long addr);
 bool vma_policy_mof(struct vm_area_struct *vma);
 
 extern void numa_default_policy(void);
index 3a37a89eb7a7c363c4ac795b8ed782e657e3c6fe..6a7a1083b6fb796d89d47beba775b3dd57255c3f 100644 (file)
@@ -159,7 +159,16 @@ struct page {
                        /** @pgmap: Points to the hosting device page map. */
                        struct dev_pagemap *pgmap;
                        void *zone_device_data;
-                       unsigned long _zd_pad_1;        /* uses mapping */
+                       /*
+                        * ZONE_DEVICE private pages are counted as being
+                        * mapped so the next 3 words hold the mapping, index,
+                        * and private fields from the source anonymous or
+                        * page cache page while the page is migrated to device
+                        * private memory.
+                        * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
+                        * use the mapping, index, and private fields when
+                        * pmem backed DAX files are mapped.
+                        */
                };
 
                /** @rcu_head: You can use this to free a page by RCU. */
index 9e700d9f9f287d0db0bf770cd5c4ee2a564db94c..82e4cd1b7ac3cb50346492a27163da53bb206e2c 100644 (file)
@@ -1567,8 +1567,10 @@ extern bool pcie_ports_native;
 
 #ifdef CONFIG_PCIEASPM
 bool pcie_aspm_support_enabled(void);
+bool pcie_aspm_enabled(struct pci_dev *pdev);
 #else
 static inline bool pcie_aspm_support_enabled(void) { return false; }
+static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
 #endif
 
 #ifdef CONFIG_PCIEAER
index 7de68f1dc707f0d55805e3fafa3c97a363d06e0b..af735f55b2911208077ad70d2e83e750125001e0 100644 (file)
@@ -180,6 +180,7 @@ struct siw_cqe {
  * to control CQ arming.
  */
 struct siw_cq_ctrl {
-       __aligned_u64 notify;
+       __u32 flags;
+       __u32 pad;
 };
 #endif
index 59bdceea3737a4a095555723f4a7b79fda15c048..795c9b095d7573a79df1d4eeef00a10dca6a2ee4 100644 (file)
@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
 {
        u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
 
-       if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
-               max_dma = dev->bus_dma_mask;
-
        return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
 }
 
@@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
        if (!page)
                return NULL;
 
-       if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+       if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+           !force_dma_unencrypted(dev)) {
                /* remove any dirty cache lines on the kernel alias */
                if (!PageHighMem(page))
                        arch_dma_prep_coherent(page, size);
+               *dma_handle = phys_to_dma(dev, page_to_phys(page));
                /* return the page pointer as the opaque cookie */
                return page;
        }
@@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
 {
        unsigned int page_order = get_order(size);
 
-       if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+       if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+           !force_dma_unencrypted(dev)) {
                /* cpu_addr is a struct page cookie, not a kernel address */
                __dma_direct_free_pages(dev, size, cpu_addr);
                return;
index b945239621d86255d6a259a9a2911c9a393e0b11..b0038ca3aa92b2e9d4a7976a08237deb2c2c7531 100644 (file)
@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
 }
 EXPORT_SYMBOL(dma_get_sgtable_attrs);
 
+#ifdef CONFIG_MMU
+/*
+ * Return the page attributes used for mapping dma_alloc_* memory, either in
+ * kernel space if remapping is needed, or to userspace through dma_mmap_*.
+ */
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
+{
+       if (dev_is_dma_coherent(dev) ||
+           (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
+             (attrs & DMA_ATTR_NON_CONSISTENT)))
+               return prot;
+       if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
+               return arch_dma_mmap_pgprot(dev, prot, attrs);
+       return pgprot_noncached(prot);
+}
+#endif /* CONFIG_MMU */
+
 /*
  * Create userspace mapping for the DMA-coherent memory.
  */
@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
        unsigned long pfn;
        int ret = -ENXIO;
 
-       vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+       vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
 
        if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
index a594aec07882d9a0ad4d482d00afb81e4daa3189..ffe78f0b2fe47c442330ee8de67cb11e230b0ae0 100644 (file)
@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
 
        /* create a coherent mapping */
        ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
-                       arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
+                       dma_pgprot(dev, PAGE_KERNEL, attrs),
                        __builtin_return_address(0));
        if (!ret) {
                __dma_direct_free_pages(dev, size, page);
index 636ca6f88c8ee275efd664c553609c5121a5cb3f..867b4bb6d4beb541d1d9eb087711d1e52a446416 100644 (file)
@@ -40,6 +40,7 @@ struct sugov_policy {
        struct task_struct      *thread;
        bool                    work_in_progress;
 
+       bool                    limits_changed;
        bool                    need_freq_update;
 };
 
@@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
            !cpufreq_this_cpu_can_update(sg_policy->policy))
                return false;
 
-       if (unlikely(sg_policy->need_freq_update))
+       if (unlikely(sg_policy->limits_changed)) {
+               sg_policy->limits_changed = false;
+               sg_policy->need_freq_update = true;
                return true;
+       }
 
        delta_ns = time - sg_policy->last_freq_update_time;
 
@@ -437,7 +441,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
 {
        if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
-               sg_policy->need_freq_update = true;
+               sg_policy->limits_changed = true;
 }
 
 static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -457,7 +461,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
        if (!sugov_should_update_freq(sg_policy, time))
                return;
 
-       busy = sugov_cpu_is_busy(sg_cpu);
+       /* Limits may have changed, don't skip frequency update */
+       busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
 
        util = sugov_get_util(sg_cpu);
        max = sg_cpu->max;
@@ -831,6 +836,7 @@ static int sugov_start(struct cpufreq_policy *policy)
        sg_policy->last_freq_update_time        = 0;
        sg_policy->next_freq                    = 0;
        sg_policy->work_in_progress             = false;
+       sg_policy->limits_changed               = false;
        sg_policy->need_freq_update             = false;
        sg_policy->cached_raw_freq              = 0;
 
@@ -879,7 +885,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
                mutex_unlock(&sg_policy->work_lock);
        }
 
-       sg_policy->need_freq_update = true;
+       sg_policy->limits_changed = true;
 }
 
 struct cpufreq_governor schedutil_gov = {
index 1334ede667a82aa1d5701526836c7f54487e9996..738065f765abd4dda88a9f762736f9a1108cb6fd 100644 (file)
@@ -644,30 +644,40 @@ release:
  *         available
  * never: never stall for any thp allocation
  */
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
+static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
 {
        const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
+       gfp_t this_node = 0;
+
+#ifdef CONFIG_NUMA
+       struct mempolicy *pol;
+       /*
+        * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
+        * specified, to express a general desire to stay on the current
+        * node for optimistic allocation attempts. If the defrag mode
+        * and/or madvise hint requires the direct reclaim then we prefer
+        * to fallback to other node rather than node reclaim because that
+        * can lead to excessive reclaim even though there is free memory
+        * on other nodes. We expect that NUMA preferences are specified
+        * by memory policies.
+        */
+       pol = get_vma_policy(vma, addr);
+       if (pol->mode != MPOL_BIND)
+               this_node = __GFP_THISNODE;
+       mpol_cond_put(pol);
+#endif
 
-       /* Always do synchronous compaction */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
                return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
-
-       /* Kick kcompactd and fail quickly */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
-
-       /* Synchronous compaction if madvised, otherwise kick kcompactd */
+               return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT |
-                       (vma_madvised ? __GFP_DIRECT_RECLAIM :
-                                       __GFP_KSWAPD_RECLAIM);
-
-       /* Only do synchronous compaction if madvised */
+               return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
+                                                            __GFP_KSWAPD_RECLAIM | this_node);
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT |
-                      (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
-
-       return GFP_TRANSHUGE_LIGHT;
+               return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
+                                                            this_node);
+       return GFP_TRANSHUGE_LIGHT | this_node;
 }
 
 /* Caller must hold page table lock. */
@@ -739,8 +749,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                        pte_free(vma->vm_mm, pgtable);
                return ret;
        }
-       gfp = alloc_hugepage_direct_gfpmask(vma);
-       page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+       gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+       page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
        if (unlikely(!page)) {
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
@@ -1347,8 +1357,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
 alloc:
        if (__transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow()) {
-               huge_gfp = alloc_hugepage_direct_gfpmask(vma);
-               new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
+               huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+               new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
+                               haddr, numa_node_id());
        } else
                new_page = NULL;
 
index ede7e7f5d1ab2fec113507b5dabe78a7cdad8256..6d7296dd11b83503a511986814ba5d1f2a84b26b 100644 (file)
@@ -3856,6 +3856,25 @@ retry:
 
                page = alloc_huge_page(vma, haddr, 0);
                if (IS_ERR(page)) {
+                       /*
+                        * Returning error will result in faulting task being
+                        * sent SIGBUS.  The hugetlb fault mutex prevents two
+                        * tasks from racing to fault in the same page which
+                        * could result in false unable to allocate errors.
+                        * Page migration does not take the fault mutex, but
+                        * does a clear then write of pte's under page table
+                        * lock.  Page fault code could race with migration,
+                        * notice the clear pte and try to allocate a page
+                        * here.  Before returning error, get ptl and make
+                        * sure there really is no pte entry.
+                        */
+                       ptl = huge_pte_lock(h, mm, ptep);
+                       if (!huge_pte_none(huge_ptep_get(ptep))) {
+                               ret = 0;
+                               spin_unlock(ptl);
+                               goto out;
+                       }
+                       spin_unlock(ptl);
                        ret = vmf_error(PTR_ERR(page));
                        goto out;
                }
index 6e9e8cca663e477d328584ac3142d8234d8e3e05..f6e602918dac84a96fe16a8e2c202505256bcb8d 100644 (file)
@@ -1966,6 +1966,7 @@ static void kmemleak_disable(void)
 
        /* stop any memory operation tracing */
        kmemleak_enabled = 0;
+       kmemleak_early_log = 0;
 
        /* check whether it is too early for a kernel thread */
        if (kmemleak_initialized)
@@ -2009,7 +2010,6 @@ void __init kmemleak_init(void)
 
 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
        if (!kmemleak_skip_disable) {
-               kmemleak_early_log = 0;
                kmemleak_disable();
                return;
        }
index cdbb7a84cb6e186242afeebb32f4c5c7fb2d0639..6f5c0c517c497dbddad9016ef2d1993e4e1bcf05 100644 (file)
@@ -768,6 +768,26 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
        __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
 }
 
+void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
+{
+       struct page *page = virt_to_head_page(p);
+       pg_data_t *pgdat = page_pgdat(page);
+       struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
+
+       rcu_read_lock();
+       memcg = memcg_from_slab_page(page);
+
+       /* Untracked pages have no memcg, no lruvec. Update only the node */
+       if (!memcg || memcg == root_mem_cgroup) {
+               __mod_node_page_state(pgdat, idx, val);
+       } else {
+               lruvec = mem_cgroup_lruvec(pgdat, memcg);
+               __mod_lruvec_state(lruvec, idx, val);
+       }
+       rcu_read_unlock();
+}
+
 /**
  * __count_memcg_events - account VM events in a cgroup
  * @memcg: the memory cgroup
@@ -1130,26 +1150,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
                css_put(&prev->css);
 }
 
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+                                       struct mem_cgroup *dead_memcg)
 {
-       struct mem_cgroup *memcg = dead_memcg;
        struct mem_cgroup_reclaim_iter *iter;
        struct mem_cgroup_per_node *mz;
        int nid;
        int i;
 
-       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
-               for_each_node(nid) {
-                       mz = mem_cgroup_nodeinfo(memcg, nid);
-                       for (i = 0; i <= DEF_PRIORITY; i++) {
-                               iter = &mz->iter[i];
-                               cmpxchg(&iter->position,
-                                       dead_memcg, NULL);
-                       }
+       for_each_node(nid) {
+               mz = mem_cgroup_nodeinfo(from, nid);
+               for (i = 0; i <= DEF_PRIORITY; i++) {
+                       iter = &mz->iter[i];
+                       cmpxchg(&iter->position,
+                               dead_memcg, NULL);
                }
        }
 }
 
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+       struct mem_cgroup *memcg = dead_memcg;
+       struct mem_cgroup *last;
+
+       do {
+               __invalidate_reclaim_iterators(memcg, dead_memcg);
+               last = memcg;
+       } while ((memcg = parent_mem_cgroup(memcg)));
+
+       /*
+        * When cgruop1 non-hierarchy mode is used,
+        * parent_mem_cgroup() does not walk all the way up to the
+        * cgroup root (root_mem_cgroup). So we have to handle
+        * dead_memcg from cgroup root separately.
+        */
+       if (last != root_mem_cgroup)
+               __invalidate_reclaim_iterators(root_mem_cgroup,
+                                               dead_memcg);
+}
+
 /**
  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
  * @memcg: hierarchy root
index f48693f75b37a0ece6ba8bf9c7ebb9a25ce3a7fd..65e0874fce1736a65f8ced74f2a94d3e23d718c9 100644 (file)
@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
        },
 };
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
                                unsigned long flags);
 
 struct queue_pages {
@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
 }
 
 /*
- * queue_pages_pmd() has three possible return values:
- * 1 - pages are placed on the right node or queued successfully.
- * 0 - THP was split.
- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
- *        page was already on a node that does not follow the policy.
+ * queue_pages_pmd() has four possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 2 - THP was split.
+ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+ *        existing page was already on a node that does not follow the
+ *        policy.
  */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
                                unsigned long end, struct mm_walk *walk)
@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
        if (is_huge_zero_page(page)) {
                spin_unlock(ptl);
                __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
+               ret = 2;
                goto out;
        }
-       if (!queue_pages_required(page, qp)) {
-               ret = 1;
+       if (!queue_pages_required(page, qp))
                goto unlock;
-       }
 
-       ret = 1;
        flags = qp->flags;
        /* go to thp migration */
        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-               if (!vma_migratable(walk->vma)) {
-                       ret = -EIO;
+               if (!vma_migratable(walk->vma) ||
+                   migrate_page_add(page, qp->pagelist, flags)) {
+                       ret = 1;
                        goto unlock;
                }
-
-               migrate_page_add(page, qp->pagelist, flags);
        } else
                ret = -EIO;
 unlock:
@@ -479,6 +479,13 @@ out:
 /*
  * Scan through pages checking if pages follow certain conditions,
  * and move them to the pagelist if they do.
+ *
+ * queue_pages_pte_range() has three possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ *        on a node that does not follow the policy.
  */
 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                        unsigned long end, struct mm_walk *walk)
@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
        struct queue_pages *qp = walk->private;
        unsigned long flags = qp->flags;
        int ret;
+       bool has_unmovable = false;
        pte_t *pte;
        spinlock_t *ptl;
 
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl) {
                ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-               if (ret > 0)
-                       return 0;
-               else if (ret < 0)
+               if (ret != 2)
                        return ret;
        }
+       /* THP was split, fall through to pte walk */
 
        if (pmd_trans_unstable(pmd))
                return 0;
@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                if (!queue_pages_required(page, qp))
                        continue;
                if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-                       if (!vma_migratable(vma))
+                       /* MPOL_MF_STRICT must be specified if we get here */
+                       if (!vma_migratable(vma)) {
+                               has_unmovable = true;
                                break;
-                       migrate_page_add(page, qp->pagelist, flags);
+                       }
+
+                       /*
+                        * Do not abort immediately since there may be
+                        * temporary off LRU pages in the range.  Still
+                        * need migrate other LRU pages.
+                        */
+                       if (migrate_page_add(page, qp->pagelist, flags))
+                               has_unmovable = true;
                } else
                        break;
        }
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
+
+       if (has_unmovable)
+               return 1;
+
        return addr != end ? -EIO : 0;
 }
 
@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
  *
  * If pages found in a given range are on a set of nodes (determined by
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
- * passed via @private.)
+ * passed via @private.
+ *
+ * queue_pages_range() has three possible return values:
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ *     specified.
+ * 0 - queue pages successfully or no misplaced page.
+ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
  */
 static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 /*
  * page migration, thp tail pages can be passed.
  */
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
                                unsigned long flags)
 {
        struct page *head = compound_head(page);
@@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
                        mod_node_page_state(page_pgdat(head),
                                NR_ISOLATED_ANON + page_is_file_cache(head),
                                hpage_nr_pages(head));
+               } else if (flags & MPOL_MF_STRICT) {
+                       /*
+                        * Non-movable page may reach here.  And, there may be
+                        * temporary off LRU pages or non-LRU movable pages.
+                        * Treat them as unmovable pages since they can't be
+                        * isolated, so they can't be moved at the moment.  It
+                        * should return -EIO for this case too.
+                        */
+                       return -EIO;
                }
        }
+
+       return 0;
 }
 
 /* page allocation callback for NUMA node migration */
@@ -1142,8 +1180,8 @@ static struct page *new_page(struct page *page, unsigned long start)
        } else if (PageTransHuge(page)) {
                struct page *thp;
 
-               thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
-                                        HPAGE_PMD_ORDER);
+               thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
+                               address, numa_node_id());
                if (!thp)
                        return NULL;
                prep_transhuge_page(thp);
@@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start)
 }
 #else
 
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
                                unsigned long flags)
 {
+       return -EIO;
 }
 
 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len,
        struct mempolicy *new;
        unsigned long end;
        int err;
+       int ret;
        LIST_HEAD(pagelist);
 
        if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (err)
                goto mpol_out;
 
-       err = queue_pages_range(mm, start, end, nmask,
+       ret = queue_pages_range(mm, start, end, nmask,
                          flags | MPOL_MF_INVERT, &pagelist);
-       if (!err)
-               err = mbind_range(mm, start, end, new);
+
+       if (ret < 0) {
+               err = -EIO;
+               goto up_out;
+       }
+
+       err = mbind_range(mm, start, end, new);
 
        if (!err) {
                int nr_failed = 0;
@@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len,
                                putback_movable_pages(&pagelist);
                }
 
-               if (nr_failed && (flags & MPOL_MF_STRICT))
+               if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
                        err = -EIO;
        } else
                putback_movable_pages(&pagelist);
 
+up_out:
        up_write(&mm->mmap_sem);
- mpol_out:
+mpol_out:
        mpol_put(new);
        return err;
 }
@@ -1688,7 +1734,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
  * freeing by another task.  It is the caller's responsibility to free the
  * extra reference for shared policies.
  */
-static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
                                                unsigned long addr)
 {
        struct mempolicy *pol = __get_vma_policy(vma, addr);
@@ -2037,7 +2083,6 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
  *     @vma:  Pointer to VMA or NULL if not available.
  *     @addr: Virtual Address of the allocation. Must be inside the VMA.
  *     @node: Which node to prefer for allocation (modulo policy).
- *     @hugepage: for hugepages try only the preferred node if possible
  *
  *     This function allocates a page from the kernel page pool and applies
  *     a NUMA policy associated with the VMA or the current process.
@@ -2048,7 +2093,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
  */
 struct page *
 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
-               unsigned long addr, int node, bool hugepage)
+               unsigned long addr, int node)
 {
        struct mempolicy *pol;
        struct page *page;
@@ -2066,31 +2111,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                goto out;
        }
 
-       if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
-               int hpage_node = node;
-
-               /*
-                * For hugepage allocation and non-interleave policy which
-                * allows the current node (or other explicitly preferred
-                * node) we only try to allocate from the current/preferred
-                * node and don't fall back to other nodes, as the cost of
-                * remote accesses would likely offset THP benefits.
-                *
-                * If the policy is interleave, or does not allow the current
-                * node in its nodemask, we allocate the standard way.
-                */
-               if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
-                       hpage_node = pol->v.preferred_node;
-
-               nmask = policy_nodemask(gfp, pol);
-               if (!nmask || node_isset(hpage_node, *nmask)) {
-                       mpol_cond_put(pol);
-                       page = __alloc_pages_node(hpage_node,
-                                               gfp | __GFP_THISNODE, order);
-                       goto out;
-               }
-       }
-
        nmask = policy_nodemask(gfp, pol);
        preferred_nid = policy_node(gfp, pol, node);
        page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
index 86432650f829877cf7aa031e540909071ba56ec9..ed70c4e8e52a505f431fdeefaf209694a4b03093 100644 (file)
@@ -403,6 +403,30 @@ void __put_devmap_managed_page(struct page *page)
 
                mem_cgroup_uncharge(page);
 
+               /*
+                * When a device_private page is freed, the page->mapping field
+                * may still contain a (stale) mapping value. For example, the
+                * lower bits of page->mapping may still identify the page as
+                * an anonymous page. Ultimately, this entire field is just
+                * stale and wrong, and it will cause errors if not cleared.
+                * One example is:
+                *
+                *  migrate_vma_pages()
+                *    migrate_vma_insert_page()
+                *      page_add_new_anon_rmap()
+                *        __page_set_anon_rmap()
+                *          ...checks page->mapping, via PageAnon(page) call,
+                *            and incorrectly concludes that the page is an
+                *            anonymous page. Therefore, it incorrectly,
+                *            silently fails to set up the new anon rmap.
+                *
+                * For other types of ZONE_DEVICE pages, migration is either
+                * handled differently or not done at all, so there is no need
+                * to clear page->mapping.
+                */
+               if (is_device_private_page(page))
+                       page->mapping = NULL;
+
                page->pgmap->ops->page_free(page);
        } else if (!count)
                __put_page(page);
index e5dfe2ae6b0d5dfc4a0b38c58c8e2c762b0b7696..003377e2423232614525ae2e6cbdda9695aca75b 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.
+                        *
+                        * The assignment to subpage above was computed from a
+                        * swap PTE which results in an invalid pointer.
+                        * Since only PAGE_SIZE pages can currently be
+                        * migrated, just set it to page. This will need to be
+                        * changed when hugepage migrations to device private
+                        * memory are supported.
                         */
+                       subpage = page;
                        goto discard;
                }
 
index 626d8c74b973f173d3062ee118580b649d35073a..2bed4761f2795695b2e970c304f8985aeacef9f6 100644 (file)
@@ -1466,7 +1466,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
 
        shmem_pseudo_vma_init(&pvma, info, hindex);
        page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
-                       HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
+                       HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
        shmem_pseudo_vma_destroy(&pvma);
        if (page)
                prep_transhuge_page(page);
index 2a09796edef8d53d257dedada5b1a40bc21b4c16..98e924864554c5dbbd4804f51af20639eb413694 100644 (file)
@@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
                                       bool to_user)
 {
        /* Reject if object wraps past end of memory. */
-       if (ptr + n < ptr)
+       if (ptr + (n - 1) < ptr)
                usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
 
        /* Reject if NULL or ZERO-allocation. */
index e0fc963acc41eb33c843886cab669844abc6b16b..7ba11e12a11f33b38a007a038bbfc7dee2fe48b3 100644 (file)
@@ -3278,10 +3278,20 @@ retry:
                if (va == NULL)
                        goto overflow;
 
+               /*
+                * If required width exeeds current VA block, move
+                * base downwards and then recheck.
+                */
+               if (base + end > va->va_end) {
+                       base = pvm_determine_end_from_reverse(&va, align) - end;
+                       term_area = area;
+                       continue;
+               }
+
                /*
                 * If this VA does not fit, move base downwards and recheck.
                 */
-               if (base + start < va->va_start || base + end > va->va_end) {
+               if (base + start < va->va_start) {
                        va = node_to_va(rb_prev(&va->rb_node));
                        base = pvm_determine_end_from_reverse(&va, align) - end;
                        term_area = area;
index dbdc46a84f63089de614a2807ef62a70f7c14968..c77d1e3761a7f191f5e274b281a9ca132b64fc6f 100644 (file)
@@ -88,9 +88,6 @@ struct scan_control {
        /* Can pages be swapped as part of reclaim? */
        unsigned int may_swap:1;
 
-       /* e.g. boosted watermark reclaim leaves slabs alone */
-       unsigned int may_shrinkslab:1;
-
        /*
         * Cgroups are not reclaimed below their configured memory.low,
         * unless we threaten to OOM. If any cgroups are skipped due to
@@ -2714,10 +2711,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                        shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
                        node_lru_pages += lru_pages;
 
-                       if (sc->may_shrinkslab) {
-                               shrink_slab(sc->gfp_mask, pgdat->node_id,
-                                   memcg, sc->priority);
-                       }
+                       shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
+                                       sc->priority);
 
                        /* Record the group's reclaim efficiency */
                        vmpressure(sc->gfp_mask, memcg, false,
@@ -3194,7 +3189,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
                .may_swap = 1,
-               .may_shrinkslab = 1,
        };
 
        /*
@@ -3238,7 +3232,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
                .may_unmap = 1,
                .reclaim_idx = MAX_NR_ZONES - 1,
                .may_swap = !noswap,
-               .may_shrinkslab = 1,
        };
        unsigned long lru_pages;
 
@@ -3286,7 +3279,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
                .may_swap = may_swap,
-               .may_shrinkslab = 1,
        };
 
        set_task_reclaim_state(current, &sc.reclaim_state);
@@ -3598,7 +3590,6 @@ restart:
                 */
                sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
                sc.may_swap = !nr_boost_reclaim;
-               sc.may_shrinkslab = !nr_boost_reclaim;
 
                /*
                 * Do some background aging of the anon list, to give
index e0b4edcb88c8c63dd307b0648ad34e3a27ce0ee8..c963831d354f8058447888c049c41178fe9a4d55 100644 (file)
@@ -380,14 +380,12 @@ void workingset_update_node(struct xa_node *node)
        if (node->count && node->count == node->nr_values) {
                if (list_empty(&node->private_list)) {
                        list_lru_add(&shadow_nodes, &node->private_list);
-                       __inc_lruvec_page_state(virt_to_page(node),
-                                               WORKINGSET_NODES);
+                       __inc_lruvec_slab_state(node, WORKINGSET_NODES);
                }
        } else {
                if (!list_empty(&node->private_list)) {
                        list_lru_del(&shadow_nodes, &node->private_list);
-                       __dec_lruvec_page_state(virt_to_page(node),
-                                               WORKINGSET_NODES);
+                       __dec_lruvec_slab_state(node, WORKINGSET_NODES);
                }
        }
 }
@@ -480,7 +478,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
        }
 
        list_lru_isolate(lru, item);
-       __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES);
+       __dec_lruvec_slab_state(node, WORKINGSET_NODES);
 
        spin_unlock(lru_lock);
 
@@ -503,7 +501,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
         * shadow entries we were tracking ...
         */
        xas_store(&xas, NULL);
-       __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
+       __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
 
 out_invalid:
        xa_unlock_irq(&mapping->i_pages);
index 1a029a7432ee56635f9dcde8f4c5ad2b1e2984ed..ed19d98c9dcd1641c5e66d27d586e93f43fadce1 100644 (file)
@@ -817,9 +817,19 @@ out:
 static void z3fold_destroy_pool(struct z3fold_pool *pool)
 {
        kmem_cache_destroy(pool->c_handle);
-       z3fold_unregister_migration(pool);
-       destroy_workqueue(pool->release_wq);
+
+       /*
+        * We need to destroy pool->compact_wq before pool->release_wq,
+        * as any pending work on pool->compact_wq will call
+        * queue_work(pool->release_wq, &pool->work).
+        *
+        * There are still outstanding pages until both workqueues are drained,
+        * so we cannot unregister migration until then.
+        */
+
        destroy_workqueue(pool->compact_wq);
+       destroy_workqueue(pool->release_wq);
+       z3fold_unregister_migration(pool);
        kfree(pool);
 }
 
index 85571e90191fc35fad9aca49cda400ae75b9751d..bfeab44f81d077b5ac76bd1bf117ec148d010c79 100644 (file)
@@ -245,7 +245,7 @@ int main(int argc, char *argv[])
 
        if (argc != 2) {
                printf(
-                       "Sintax:  %s fbdev\n"
+                       "Syntax:  %s fbdev\n"
                        "Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
                return -1;
        }
index 9a94672e7adccff05901782c47a2473185e05831..ade69913106540e4e1918b88654a8914f3bbe29d 100644 (file)
@@ -1228,24 +1228,11 @@ hashalg_fail:
 
 static int __init init_digests(void)
 {
-       u8 digest[TPM_MAX_DIGEST_SIZE];
-       int ret;
-       int i;
-
-       ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
-       if (ret < 0)
-               return ret;
-       if (ret < TPM_MAX_DIGEST_SIZE)
-               return -EFAULT;
-
        digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
                          GFP_KERNEL);
        if (!digests)
                return -ENOMEM;
 
-       for (i = 0; i < chip->nr_allocated_banks; i++)
-               memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
-
        return 0;
 }
 
index 485edaba0037e5d6a477d2eb0e8b62f75fe06858..5bf24fb819d28f12cb00012dd73aa65db65c60cc 100644 (file)
@@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
 }
 EXPORT_SYMBOL_GPL(snd_hda_gen_free);
 
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+       /* Make the codec enter D3 to avoid spurious noises from the internal
+        * speaker during (and after) reboot
+        */
+       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+       snd_hda_codec_write(codec, codec->core.afg, 0,
+                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+       msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
 #ifdef CONFIG_PM
 /**
  * snd_hda_gen_check_power_status - check the loopback power save state
@@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = {
        .init = snd_hda_gen_init,
        .free = snd_hda_gen_free,
        .unsol_event = snd_hda_jack_unsol_event,
+       .reboot_notify = snd_hda_gen_reboot_notify,
 #ifdef CONFIG_PM
        .check_power_status = snd_hda_gen_check_power_status,
 #endif
@@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
 
        err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
        if (err < 0)
-               return err;
+               goto error;
 
        err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
        if (err < 0)
index 35a670a71c423097e8d65e19b283fe4118aea35f..5f199dcb0d188eb62ff84fac843e6a94bb027e57 100644 (file)
@@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
                                  struct auto_pin_cfg *cfg);
 int snd_hda_gen_build_controls(struct hda_codec *codec);
 int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
 
 /* standard jack event callbacks */
 void snd_hda_gen_hp_automute(struct hda_codec *codec,
index a6d8c0d77b8466c06006c33860384e07c911cb74..99fc0917339bbef2e0911b7119ddf89072923999 100644 (file)
@@ -2508,6 +2508,9 @@ static const struct pci_device_id azx_ids[] = {
        /* AMD, X370 & co */
        { PCI_DEVICE(0x1022, 0x1457),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+       /* AMD, X570 & co */
+       { PCI_DEVICE(0x1022, 0x1487),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
        /* AMD Stoney */
        { PCI_DEVICE(0x1022, 0x157a),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
index f299f137eaea2b5b1c4871008928e31d966573c9..14298ef45b21bdf71db7dc864542ea8a7213331f 100644 (file)
@@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
 {
        struct conexant_spec *spec = codec->spec;
 
-       switch (codec->core.vendor_id) {
-       case 0x14f12008: /* CX8200 */
-       case 0x14f150f2: /* CX20722 */
-       case 0x14f150f4: /* CX20724 */
-               break;
-       default:
-               return;
-       }
-
        /* Turn the problematic codec into D3 to avoid spurious noises
           from the internal speaker during (and after) reboot */
        cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
-       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-       snd_hda_codec_write(codec, codec->core.afg, 0,
-                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-       msleep(10);
+       snd_hda_gen_reboot_notify(codec);
 }
 
 static void cx_auto_free(struct hda_codec *codec)
index de224cbea7a077b000ba95b9eed6eb1571a1510f..e333b3e30e316034420b1dd4e24e031d18812b3d 100644 (file)
@@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
                alc_shutup(codec);
 }
 
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
-       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
-       snd_hda_codec_write(codec, codec->core.afg, 0,
-                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-       msleep(10);
-}
-
 #define alc_free       snd_hda_gen_free
 
 #ifdef CONFIG_PM
@@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-               spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+               spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                codec->power_save_node = 0; /* avoid click noises */
                snd_hda_apply_pincfgs(codec, pincfgs);
@@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
index 7498b5191b68e4cf0368599998160ec0eb17f29d..b5927c3d5bc0b0657360ef8aa923c0aa344422bb 100644 (file)
@@ -68,6 +68,7 @@ struct mixer_build {
        unsigned char *buffer;
        unsigned int buflen;
        DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+       DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
        struct usb_audio_term oterm;
        const struct usbmix_name_map *map;
        const struct usbmix_selector_map *selector_map;
@@ -744,6 +745,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
                return -EINVAL;
        if (!desc->bNrInPins)
                return -EINVAL;
+       if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
+               return -EINVAL;
 
        switch (state->mixer->protocol) {
        case UAC_VERSION_1:
@@ -773,16 +776,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
  * parse the source unit recursively until it reaches to a terminal
  * or a branched unit.
  */
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
                            struct usb_audio_term *term)
 {
        int protocol = state->mixer->protocol;
        int err;
        void *p1;
+       unsigned char *hdr;
 
        memset(term, 0, sizeof(*term));
-       while ((p1 = find_audio_control_unit(state, id)) != NULL) {
-               unsigned char *hdr = p1;
+       for (;;) {
+               /* a loop in the terminal chain? */
+               if (test_and_set_bit(id, state->termbitmap))
+                       return -EINVAL;
+
+               p1 = find_audio_control_unit(state, id);
+               if (!p1)
+                       break;
+
+               hdr = p1;
                term->id = id;
 
                if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -800,7 +812,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
                                        /* call recursively to verify that the
                                         * referenced clock entity is valid */
-                                       err = check_input_term(state, d->bCSourceID, term);
+                                       err = __check_input_term(state, d->bCSourceID, term);
                                        if (err < 0)
                                                return err;
 
@@ -834,7 +846,7 @@ static int check_input_term(struct mixer_build *state, int id,
                        case UAC2_CLOCK_SELECTOR: {
                                struct uac_selector_unit_descriptor *d = p1;
                                /* call recursively to retrieve the channel info */
-                               err = check_input_term(state, d->baSourceID[0], term);
+                               err = __check_input_term(state, d->baSourceID[0], term);
                                if (err < 0)
                                        return err;
                                term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -897,7 +909,7 @@ static int check_input_term(struct mixer_build *state, int id,
 
                                /* call recursively to verify that the
                                 * referenced clock entity is valid */
-                               err = check_input_term(state, d->bCSourceID, term);
+                               err = __check_input_term(state, d->bCSourceID, term);
                                if (err < 0)
                                        return err;
 
@@ -948,7 +960,7 @@ static int check_input_term(struct mixer_build *state, int id,
                        case UAC3_CLOCK_SELECTOR: {
                                struct uac_selector_unit_descriptor *d = p1;
                                /* call recursively to retrieve the channel info */
-                               err = check_input_term(state, d->baSourceID[0], term);
+                               err = __check_input_term(state, d->baSourceID[0], term);
                                if (err < 0)
                                        return err;
                                term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -964,7 +976,7 @@ static int check_input_term(struct mixer_build *state, int id,
                                        return -EINVAL;
 
                                /* call recursively to retrieve the channel info */
-                               err = check_input_term(state, d->baSourceID[0], term);
+                               err = __check_input_term(state, d->baSourceID[0], term);
                                if (err < 0)
                                        return err;
 
@@ -982,6 +994,15 @@ static int check_input_term(struct mixer_build *state, int id,
        return -ENODEV;
 }
 
+
+static int check_input_term(struct mixer_build *state, int id,
+                           struct usb_audio_term *term)
+{
+       memset(term, 0, sizeof(*term));
+       memset(state->termbitmap, 0, sizeof(state->termbitmap));
+       return __check_input_term(state, id, term);
+}
+
 /*
  * Feature Unit
  */