index.txt - File index, Mailing list and Links (this document)
-intel-pstate.txt - Intel pstate cpufreq driver specific file.
-
pcc-cpufreq.txt - PCC cpufreq driver specific file.
compatible : Must be "ams,as3645a".
reg : The I2C address of the device. Typically 0x30.
+#address-cells : 1
+#size-cells : 0
-Required properties of the "flash" child node
-=============================================
+Required properties of the flash child node (0)
+===============================================
+reg: 0
flash-timeout-us: Flash timeout in microseconds. The value must be in
the range [100000, 850000] and divisible by 50000.
flash-max-microamp: Maximum flash current in microamperes. Has to be
and divisible by 50000.
-Optional properties of the "flash" child node
-=============================================
+Optional properties of the flash child node
+===========================================
label : The label of the flash LED.
-Required properties of the "indicator" child node
-=================================================
+Required properties of the indicator child node (1)
+===================================================
+reg: 1
led-max-microamp: Maximum indicator current. The allowed values are
2500, 5000, 7500 and 10000.
-Optional properties of the "indicator" child node
-=================================================
+Optional properties of the indicator child node
+===============================================
label : The label of the indicator LED.
=======
as3645a@30 {
+ #address-cells: 1
+ #size-cells: 0
reg = <0x30>;
compatible = "ams,as3645a";
- flash {
+ flash@0 {
+ reg = <0x0>;
flash-timeout-us = <150000>;
flash-max-microamp = <320000>;
led-max-microamp = <60000>;
ams,input-max-microamp = <1750000>;
label = "as3645a:flash";
};
- indicator {
+ indicator@1 {
+ reg = <0x1>;
led-max-microamp = <10000>;
label = "as3645a:indicator";
};
- "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
- "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
- "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
+ - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
+ - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
- "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
- "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART.
- "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
};
Device drivers can export attributes via their sysfs directories.
-Drivers can declare attributes using a DRIVER_ATTR macro that works
-identically to the DEVICE_ATTR macro.
+Drivers can declare attributes using a DRIVER_ATTR_RW and DRIVER_ATTR_RO
+macro that works identically to the DEVICE_ATTR_RW and DEVICE_ATTR_RO
+macros.
Example:
-DRIVER_ATTR(debug,0644,show_debug,store_debug);
+DRIVER_ATTR_RW(debug);
This is equivalent to declaring:
Declaring:
-DRIVER_ATTR(_name, _mode, _show, _store)
+DRIVER_ATTR_RO(_name)
+DRIVER_ATTR_RW(_name)
Creation/Removal:
S: Maintained
F: drivers/media/rc/mtk-cir.c
+MEDIATEK PMIC LED DRIVER
+M: Sean Wang <sean.wang@mediatek.com>
+S: Maintained
+F: drivers/leds/leds-mt6323.c
+F: Documentation/devicetree/bindings/leds/leds-mt6323.txt
+
MEDIATEK ETHERNET DRIVER
M: Felix Fietkau <nbd@openwrt.org>
M: John Crispin <john@phrozen.org>
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Fearless Coyote
# *DOCUMENTATION*
PHONY += kselftest
kselftest:
- $(Q)$(MAKE) -C tools/testing/selftests run_tests
+ $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests
PHONY += kselftest-clean
kselftest-clean:
- $(Q)$(MAKE) -C tools/testing/selftests clean
+ $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean
PHONY += kselftest-merge
kselftest-merge:
clock-frequency = <400000>;
as3645a@30 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0x30>;
compatible = "ams,as3645a";
- flash {
+ flash@0 {
+ reg = <0x0>;
flash-timeout-us = <150000>;
flash-max-microamp = <320000>;
led-max-microamp = <60000>;
- peak-current-limit = <1750000>;
+ ams,input-max-microamp = <1750000>;
};
- indicator {
+ indicator@1 {
+ reg = <0x1>;
led-max-microamp = <10000>;
};
};
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
* booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
+ msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq 1f
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
- { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
BEGIN_FTR_SECTION
mtspr SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+/* Move canary into DSISR to check for later */
+BEGIN_FTR_SECTION
+ li r0, 0x7fff
+ mtspr SPRN_HDSISR, r0
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
ld r0, VCPU_GPR(R0)(r4)
ld r4, VCPU_GPR(R4)(r4)
kvmppc_hdsi:
ld r3, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r3)
- cmpwi r0, 0
mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR
+BEGIN_FTR_SECTION
+ /* Look for DSISR canary. If we find it, retry instruction */
+ cmpdi r6, 0x7fff
+ beq 6f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ cmpwi r0, 0
bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init timer_setup(void)
+static void __init um_timer_setup(void)
{
int err;
void __init time_init(void)
{
timer_set_signal_handler();
- late_time_init = timer_setup;
+ late_time_init = um_timer_setup;
}
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init),
+
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init),
{},
};
static struct intel_uncore_type skx_uncore_iio = {
.name = "iio",
.num_counters = 4,
- .num_boxes = 5,
+ .num_boxes = 6,
.perf_ctr_bits = 48,
.event_ctl = SKX_IIO0_MSR_PMON_CTL0,
.perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
static struct intel_uncore_type skx_uncore_irp = {
.name = "irp",
.num_counters = 2,
- .num_boxes = 5,
+ .num_boxes = 6,
.perf_ctr_bits = 48,
.event_ctl = SKX_IRP0_MSR_PMON_CTL0,
.perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
case INTEL_FAM6_ATOM_SILVERMONT1:
case INTEL_FAM6_ATOM_SILVERMONT2:
case INTEL_FAM6_ATOM_AIRMONT:
+
+ case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_DENVERTON:
+
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+
+ case INTEL_FAM6_XEON_PHI_KNL:
+ case INTEL_FAM6_XEON_PHI_KNM:
if (idx == PERF_MSR_SMI)
return true;
break;
ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer;
- if (fpu->fpstate_active) {
+ if (fpu->initialized) {
unsigned long fx_aligned, math_size;
sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
# define __ASM_FORM_COMMA(x) " " #x ","
#endif
-#ifdef CONFIG_X86_32
+#ifndef __x86_64__
+/* 32 bit */
# define __ASM_SEL(a,b) __ASM_FORM(a)
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
+/* 64 bit */
# define __ASM_SEL(a,b) __ASM_FORM(b)
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
* gets set up by the containing function. If you forget to do this, objtool
* may print a "call without frame pointer save/setup" warning.
*/
-register unsigned int __asm_call_sp asm("esp");
-#define ASM_CALL_CONSTRAINT "+r" (__asm_call_sp)
+register unsigned long current_stack_pointer asm(_ASM_SP);
+#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif
#endif /* _ASM_X86_ASM_H */
/*
* High level FPU state handling functions:
*/
-extern void fpu__activate_curr(struct fpu *fpu);
-extern void fpu__activate_fpstate_read(struct fpu *fpu);
-extern void fpu__activate_fpstate_write(struct fpu *fpu);
-extern void fpu__current_fpstate_write_begin(void);
-extern void fpu__current_fpstate_write_end(void);
+extern void fpu__initialize(struct fpu *fpu);
+extern void fpu__prepare_read(struct fpu *fpu);
+extern void fpu__prepare_write(struct fpu *fpu);
extern void fpu__save(struct fpu *fpu);
extern void fpu__restore(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
err; \
})
-#define check_insn(insn, output, input...) \
-({ \
- int err; \
+#define kernel_insn(insn, output, input...) \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl $-1,%[err]\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
- : [err] "=r" (err), output \
- : "0"(0), input); \
- err; \
-})
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
+ : output : input)
static inline int copy_fregs_to_user(struct fregs_state __user *fx)
{
static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
{
- int err;
-
if (IS_ENABLED(CONFIG_X86_32)) {
- err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+ kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
- err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+ kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
/* See comment in copy_fxregs_to_kernel() below. */
- err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
+ kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
}
}
- /* Copying from a kernel buffer to FPU registers should never fail: */
- WARN_ON_FPU(err);
}
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
static inline void copy_kernel_to_fregs(struct fregs_state *fx)
{
- int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-
- WARN_ON_FPU(err);
+ kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
* Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
* XSAVE area format.
*/
-#define XSTATE_XRESTORE(st, lmask, hmask, err) \
+#define XSTATE_XRESTORE(st, lmask, hmask) \
asm volatile(ALTERNATIVE(XRSTOR, \
XRSTORS, X86_FEATURE_XSAVES) \
"\n" \
- "xor %[err], %[err]\n" \
"3:\n" \
- ".pushsection .fixup,\"ax\"\n" \
- "4: movl $-2, %[err]\n" \
- "jmp 3b\n" \
- ".popsection\n" \
- _ASM_EXTABLE(661b, 4b) \
- : [err] "=r" (err) \
+ _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
+ : \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
- /* We should never fault when copying from a kernel buffer: */
+ /*
+ * We should never fault when copying from a kernel buffer, and the FPU
+ * state we set at boot time should be valid.
+ */
WARN_ON_FPU(err);
}
u32 hmask = mask >> 32;
int err;
- WARN_ON(!alternatives_patched);
+ WARN_ON_FPU(!alternatives_patched);
XSTATE_XSAVE(xstate, lmask, hmask, err);
{
u32 lmask = mask;
u32 hmask = mask >> 32;
- int err;
-
- XSTATE_XRESTORE(xstate, lmask, hmask, err);
- /* We should never fault when copying from a kernel buffer: */
- WARN_ON_FPU(err);
+ XSTATE_XRESTORE(xstate, lmask, hmask);
}
/*
*/
static inline void fpregs_deactivate(struct fpu *fpu)
{
- WARN_ON_FPU(!fpu->fpregs_active);
-
- fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
trace_x86_fpu_regs_deactivated(fpu);
}
static inline void fpregs_activate(struct fpu *fpu)
{
- WARN_ON_FPU(fpu->fpregs_active);
-
- fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu);
trace_x86_fpu_regs_activated(fpu);
}
-/*
- * The question "does this thread have fpu access?"
- * is slightly racy, since preemption could come in
- * and revoke it immediately after the test.
- *
- * However, even in that very unlikely scenario,
- * we can just assume we have FPU access - typically
- * to save the FP state - we'll just take a #NM
- * fault and get the FPU access back.
- */
-static inline int fpregs_active(void)
-{
- return current->thread.fpu.fpregs_active;
-}
-
/*
* FPU state switching for scheduling.
*
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- if (old_fpu->fpregs_active) {
+ if (old_fpu->initialized) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
old_fpu->last_cpu = cpu;
/* But leave fpu_fpregs_owner_ctx! */
- old_fpu->fpregs_active = 0;
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
bool preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active;
+ new_fpu->initialized;
if (preload) {
if (!fpregs_state_valid(new_fpu, cpu))
struct fpu *fpu = ¤t->thread.fpu;
preempt_disable();
- if (!fpregs_active())
- fpregs_activate(fpu);
+ fpregs_activate(fpu);
preempt_enable();
}
/* Default value for fxregs_state.mxcsr: */
#define MXCSR_DEFAULT 0x1f80
+/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */
+#define MXCSR_AND_FLAGS_SIZE sizeof(u64)
+
/*
* Software based FPU emulation state. This is arbitrary really,
* it matches the x87 format to make it easier to understand:
unsigned int last_cpu;
/*
- * @fpstate_active:
+ * @initialized:
*
- * This flag indicates whether this context is active: if the task
+ * This flag indicates whether this context is initialized: if the task
* is not running then we can restore from this context, if the task
* is running then we should save into this context.
*/
- unsigned char fpstate_active;
-
- /*
- * @fpregs_active:
- *
- * This flag determines whether a given context is actively
- * loaded into the FPU's registers and that those registers
- * represent the task's current FPU state.
- *
- * Note the interaction with fpstate_active:
- *
- * # task does not use the FPU:
- * fpstate_active == 0
- *
- * # task uses the FPU and regs are active:
- * fpstate_active == 1 && fpregs_active == 1
- *
- * # the regs are inactive but still match fpstate:
- * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
- *
- * The third state is what we use for the lazy restore optimization
- * on lazy-switching CPUs.
- */
- unsigned char fpregs_active;
+ unsigned char initialized;
/*
* @state:
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field);
int using_compacted_format(void);
-int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
- void __user *ubuf, struct xregs_state *xsave);
-int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
- struct xregs_state *xsave);
+int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
+int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
+int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
+int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
+
+/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
+extern int validate_xstate_header(const struct xstate_header *hdr);
+
#endif
*/
#ifndef __ASSEMBLY__
-static inline unsigned long current_stack_pointer(void)
-{
- unsigned long sp;
-#ifdef CONFIG_X86_64
- asm("mov %%rsp,%0" : "=g" (sp));
-#else
- asm("mov %%esp,%0" : "=g" (sp));
-#endif
- return sp;
-}
-
/*
* Walks up the stack frames to make sure that the specified object is
* entirely contained by a single stack frame.
TP_STRUCT__entry(
__field(struct fpu *, fpu)
- __field(bool, fpregs_active)
- __field(bool, fpstate_active)
+ __field(bool, initialized)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
TP_fast_assign(
__entry->fpu = fpu;
- __entry->fpregs_active = fpu->fpregs_active;
- __entry->fpstate_active = fpu->fpstate_active;
+ __entry->initialized = fpu->initialized;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
}
),
- TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
+ TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu,
- __entry->fpregs_active,
- __entry->fpstate_active,
+ __entry->initialized,
__entry->xfeatures,
__entry->xcomp_bv
)
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (retval), "=&A"(x) \
- : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
+ : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
"i" (errret), "0" (retval)); \
})
MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
struct desc_struct desc)
{
- u32 *p = (u32 *) &desc;
-
mcl->op = __HYPERVISOR_update_descriptor;
if (sizeof(maddr) == sizeof(long)) {
mcl->args[0] = maddr;
mcl->args[1] = *(unsigned long *)&desc;
} else {
+ u32 *p = (u32 *)&desc;
+
mcl->args[0] = maddr;
mcl->args[1] = maddr >> 32;
mcl->args[2] = *p++;
kernel_fpu_disable();
- if (fpu->fpregs_active) {
+ if (fpu->initialized) {
/*
* Ignore return value -- we don't care if reg state
* is clobbered.
{
struct fpu *fpu = ¤t->thread.fpu;
- if (fpu->fpregs_active)
+ if (fpu->initialized)
copy_kernel_to_fpregs(&fpu->state);
kernel_fpu_enable();
preempt_disable();
trace_x86_fpu_before_save(fpu);
- if (fpu->fpregs_active) {
+ if (fpu->initialized) {
if (!copy_fpregs_to_fpstate(fpu)) {
copy_kernel_to_fpregs(&fpu->state);
}
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
- if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
+ if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
return 0;
WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
/*
* Save current FPU registers directly into the child
* FPU context, without any memory-to-memory copying.
- * In lazy mode, if the FPU context isn't loaded into
- * fpregs, CR0.TS will be set and do_device_not_available
- * will load the FPU context.
*
- * We have to do all this with preemption disabled,
- * mostly because of the FNSAVE case, because in that
- * case we must not allow preemption in the window
- * between the FNSAVE and us marking the context lazy.
- *
- * It shouldn't be an issue as even FNSAVE is plenty
- * fast in terms of critical section length.
+ * ( The function 'fails' in the FNSAVE case, which destroys
+ * register contents so we have to copy them back. )
*/
- preempt_disable();
if (!copy_fpregs_to_fpstate(dst_fpu)) {
- memcpy(&src_fpu->state, &dst_fpu->state,
- fpu_kernel_xstate_size);
-
+ memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
copy_kernel_to_fpregs(&src_fpu->state);
}
- preempt_enable();
trace_x86_fpu_copy_src(src_fpu);
trace_x86_fpu_copy_dst(dst_fpu);
* Activate the current task's in-memory FPU context,
* if it has not been used before:
*/
-void fpu__activate_curr(struct fpu *fpu)
+void fpu__initialize(struct fpu *fpu)
{
WARN_ON_FPU(fpu != ¤t->thread.fpu);
- if (!fpu->fpstate_active) {
+ if (!fpu->initialized) {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for the current task: */
- fpu->fpstate_active = 1;
+ fpu->initialized = 1;
}
}
-EXPORT_SYMBOL_GPL(fpu__activate_curr);
+EXPORT_SYMBOL_GPL(fpu__initialize);
/*
* This function must be called before we read a task's fpstate.
*
- * If the task has not used the FPU before then initialize its
- * fpstate.
+ * There's two cases where this gets called:
+ *
+ * - for the current task (when coredumping), in which case we have
+ * to save the latest FPU registers into the fpstate,
+ *
+ * - or it's called for stopped tasks (ptrace), in which case the
+ * registers were already saved by the context-switch code when
+ * the task scheduled out - we only have to initialize the registers
+ * if they've never been initialized.
*
* If the task has used the FPU before then save it.
*/
-void fpu__activate_fpstate_read(struct fpu *fpu)
+void fpu__prepare_read(struct fpu *fpu)
{
- /*
- * If fpregs are active (in the current CPU), then
- * copy them to the fpstate:
- */
- if (fpu->fpregs_active) {
+ if (fpu == ¤t->thread.fpu) {
fpu__save(fpu);
} else {
- if (!fpu->fpstate_active) {
+ if (!fpu->initialized) {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for current and for stopped child tasks: */
- fpu->fpstate_active = 1;
+ fpu->initialized = 1;
}
}
}
/*
* This function must be called before we write a task's fpstate.
*
- * If the task has used the FPU before then unlazy it.
+ * If the task has used the FPU before then invalidate any cached FPU registers.
* If the task has not used the FPU before then initialize its fpstate.
*
* After this function call, after registers in the fpstate are
* modified and the child task has woken up, the child task will
* restore the modified FPU state from the modified context. If we
- * didn't clear its lazy status here then the lazy in-registers
+ * didn't clear its cached status here then the cached in-registers
* state pending on its former CPU could be restored, corrupting
* the modifications.
*/
-void fpu__activate_fpstate_write(struct fpu *fpu)
+void fpu__prepare_write(struct fpu *fpu)
{
/*
* Only stopped child tasks can be used to modify the FPU
*/
WARN_ON_FPU(fpu == ¤t->thread.fpu);
- if (fpu->fpstate_active) {
- /* Invalidate any lazy state: */
+ if (fpu->initialized) {
+ /* Invalidate any cached state: */
__fpu_invalidate_fpregs_state(fpu);
} else {
fpstate_init(&fpu->state);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for stopped child tasks: */
- fpu->fpstate_active = 1;
+ fpu->initialized = 1;
}
}
-/*
- * This function must be called before we write the current
- * task's fpstate.
- *
- * This call gets the current FPU register state and moves
- * it in to the 'fpstate'. Preemption is disabled so that
- * no writes to the 'fpstate' can occur from context
- * swiches.
- *
- * Must be followed by a fpu__current_fpstate_write_end().
- */
-void fpu__current_fpstate_write_begin(void)
-{
- struct fpu *fpu = ¤t->thread.fpu;
-
- /*
- * Ensure that the context-switching code does not write
- * over the fpstate while we are doing our update.
- */
- preempt_disable();
-
- /*
- * Move the fpregs in to the fpu's 'fpstate'.
- */
- fpu__activate_fpstate_read(fpu);
-
- /*
- * The caller is about to write to 'fpu'. Ensure that no
- * CPU thinks that its fpregs match the fpstate. This
- * ensures we will not be lazy and skip a XRSTOR in the
- * future.
- */
- __fpu_invalidate_fpregs_state(fpu);
-}
-
-/*
- * This function must be paired with fpu__current_fpstate_write_begin()
- *
- * This will ensure that the modified fpstate gets placed back in
- * the fpregs if necessary.
- *
- * Note: This function may be called whether or not an _actual_
- * write to the fpstate occurred.
- */
-void fpu__current_fpstate_write_end(void)
-{
- struct fpu *fpu = ¤t->thread.fpu;
-
- /*
- * 'fpu' now has an updated copy of the state, but the
- * registers may still be out of date. Update them with
- * an XRSTOR if they are active.
- */
- if (fpregs_active())
- copy_kernel_to_fpregs(&fpu->state);
-
- /*
- * Our update is done and the fpregs/fpstate are in sync
- * if necessary. Context switches can happen again.
- */
- preempt_enable();
-}
-
/*
* 'fpu__restore()' is called to copy FPU registers from
* the FPU fpstate to the live hw registers and to activate
*/
void fpu__restore(struct fpu *fpu)
{
- fpu__activate_curr(fpu);
+ fpu__initialize(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
{
preempt_disable();
- if (fpu->fpregs_active) {
- /* Ignore delayed exceptions from user space */
- asm volatile("1: fwait\n"
- "2:\n"
- _ASM_EXTABLE(1b, 2b));
- fpregs_deactivate(fpu);
+ if (fpu == ¤t->thread.fpu) {
+ if (fpu->initialized) {
+ /* Ignore delayed exceptions from user space */
+ asm volatile("1: fwait\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b));
+ fpregs_deactivate(fpu);
+ }
}
- fpu->fpstate_active = 0;
+ fpu->initialized = 0;
trace_x86_fpu_dropped(fpu);
* Make sure fpstate is cleared and initialized.
*/
if (static_cpu_has(X86_FEATURE_FPU)) {
- fpu__activate_curr(fpu);
+ preempt_disable();
+ fpu__initialize(fpu);
user_fpu_begin();
copy_init_fpstate_to_fpregs();
+ preempt_enable();
}
}
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
- WARN_ON_FPU(current->thread.fpu.fpstate_active);
+ WARN_ON_FPU(current->thread.fpu.initialized);
}
/*
{
struct fpu *target_fpu = &target->thread.fpu;
- return target_fpu->fpstate_active ? regset->n : 0;
+ return target_fpu->initialized ? regset->n : 0;
}
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
struct fpu *target_fpu = &target->thread.fpu;
- if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active)
+ if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
return regset->n;
else
return 0;
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
- fpu__activate_fpstate_read(fpu);
+ fpu__prepare_read(fpu);
fpstate_sanitize_xstate(fpu);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
if (!boot_cpu_has(X86_FEATURE_FXSR))
return -ENODEV;
- fpu__activate_fpstate_write(fpu);
+ fpu__prepare_write(fpu);
fpstate_sanitize_xstate(fpu);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
xsave = &fpu->state.xsave;
- fpu__activate_fpstate_read(fpu);
+ fpu__prepare_read(fpu);
if (using_compacted_format()) {
- ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave);
+ if (kbuf)
+ ret = copy_xstate_to_kernel(kbuf, xsave, pos, count);
+ else
+ ret = copy_xstate_to_user(ubuf, xsave, pos, count);
} else {
fpstate_sanitize_xstate(fpu);
/*
xsave = &fpu->state.xsave;
- fpu__activate_fpstate_write(fpu);
+ fpu__prepare_write(fpu);
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- ret = copyin_to_xsaves(kbuf, ubuf, xsave);
- else
+ if (using_compacted_format()) {
+ if (kbuf)
+ ret = copy_kernel_to_xstate(xsave, kbuf);
+ else
+ ret = copy_user_to_xstate(xsave, ubuf);
+ } else {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
-
- /*
- * In case of failure, mark all states as init:
- */
- if (ret)
- fpstate_init(&fpu->state);
+ if (!ret)
+ ret = validate_xstate_header(&xsave->header);
+ }
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
- xsave->header.xfeatures &= xfeatures_mask;
+
/*
- * These bits must be zero.
+ * In case of failure, mark all states as init:
*/
- memset(&xsave->header.reserved, 0, 48);
+ if (ret)
+ fpstate_init(&fpu->state);
return ret;
}
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
- fpu__activate_fpstate_read(fpu);
+ fpu__prepare_read(fpu);
if (!boot_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
struct user_i387_ia32_struct env;
int ret;
- fpu__activate_fpstate_write(fpu);
+ fpu__prepare_write(fpu);
fpstate_sanitize_xstate(fpu);
if (!boot_cpu_has(X86_FEATURE_FPU))
struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
- fpvalid = fpu->fpstate_active;
+ fpvalid = fpu->initialized;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),
*/
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
{
- struct xregs_state *xsave = ¤t->thread.fpu.state.xsave;
+ struct fpu *fpu = ¤t->thread.fpu;
+ struct xregs_state *xsave = &fpu->state.xsave;
struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx);
sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1;
- if (fpregs_active() || using_compacted_format()) {
+ if (fpu->initialized || using_compacted_format()) {
/* Save the live register state to the user directly. */
if (copy_fpregs_to_sigframe(buf_fx))
return -1;
/* Update the thread's fxstate to save the fsave header. */
if (ia32_fxstate)
- copy_fxregs_to_kernel(&tsk->thread.fpu);
+ copy_fxregs_to_kernel(fpu);
} else {
/*
* It is a *bug* if kernel uses compacted-format for xsave
return -1;
}
- fpstate_sanitize_xstate(&tsk->thread.fpu);
+ fpstate_sanitize_xstate(fpu);
if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
return -1;
}
struct xstate_header *header = &xsave->header;
if (use_xsave()) {
- /* These bits must be zero. */
- memset(header->reserved, 0, 48);
+ /*
+ * Note: we don't need to zero the reserved bits in the
+ * xstate_header here because we either didn't copy them at all,
+ * or we checked earlier that they aren't set.
+ */
/*
* Init the state that is not present in the memory
if (fx_only)
header->xfeatures = XFEATURE_MASK_FPSSE;
else
- header->xfeatures &= (xfeatures_mask & xfeatures);
+ header->xfeatures &= xfeatures;
}
if (use_fxsr()) {
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- fpu__activate_curr(fpu);
+ fpu__initialize(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
/*
* For 32-bit frames with fxstate, copy the user state to the
* thread's fpu state, reconstruct fxstate from the fsave
- * header. Sanitize the copied state etc.
+ * header. Validate and sanitize the copied state.
*/
struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;
/*
- * Drop the current fpu which clears fpu->fpstate_active. This ensures
+ * Drop the current fpu which clears fpu->initialized. This ensures
* that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after
- * fpu->fpstate_active is again set.
+ * fpu->initialized is again set.
*/
fpu__drop(fpu);
if (using_compacted_format()) {
- err = copyin_to_xsaves(NULL, buf_fx,
- &fpu->state.xsave);
+ err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
} else {
- err = __copy_from_user(&fpu->state.xsave,
- buf_fx, state_size);
+ err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
+
+ if (!err && state_size > offsetof(struct xregs_state, header))
+ err = validate_xstate_header(&fpu->state.xsave.header);
}
if (err || __copy_from_user(&env, buf, sizeof(env))) {
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
}
- fpu->fpstate_active = 1;
+ fpu->initialized = 1;
preempt_disable();
fpu__restore(fpu);
preempt_enable();
return boot_cpu_has(X86_FEATURE_XSAVES);
}
+/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
+int validate_xstate_header(const struct xstate_header *hdr)
+{
+ /* No unknown or supervisor features may be set */
+ if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR))
+ return -EINVAL;
+
+ /* Userspace must use the uncompacted format */
+ if (hdr->xcomp_bv)
+ return -EINVAL;
+
+ /*
+ * If 'reserved' is shrunken to add a new field, make sure to validate
+ * that new field here!
+ */
+ BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
+
+ /* No reserved bits may be set */
+ if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
+ return -EINVAL;
+
+ return 0;
+}
+
static void __xstate_dump_leaves(void)
{
int i;
{
struct fpu *fpu = ¤t->thread.fpu;
- if (!fpu->fpstate_active)
+ if (!fpu->initialized)
return NULL;
/*
* fpu__save() takes the CPU's xstate registers
}
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
+/*
+ * Weird legacy quirk: SSE and YMM states store information in the
+ * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
+ * area is marked as unused in the xfeatures header, we need to copy
+ * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
+ */
+static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
+{
+ if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
+ return false;
+
+ if (xfeatures & XFEATURE_MASK_FP)
+ return false;
+
+ return true;
+}
+
/*
* This is similar to user_regset_copyout(), but will not add offset to
* the source data pointer or increment pos, count, kbuf, and ubuf.
*/
-static inline int xstate_copyout(unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf,
- const void *data, const int start_pos,
- const int end_pos)
+static inline void
+__copy_xstate_to_kernel(void *kbuf, const void *data,
+ unsigned int offset, unsigned int size, unsigned int size_total)
{
- if ((count == 0) || (pos < start_pos))
- return 0;
+ if (offset < size_total) {
+ unsigned int copy = min(size, size_total - offset);
- if (end_pos < 0 || pos < end_pos) {
- unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
+ memcpy(kbuf + offset, data, copy);
+ }
+}
- if (kbuf) {
- memcpy(kbuf + pos, data, copy);
- } else {
- if (__copy_to_user(ubuf + pos, data, copy))
- return -EFAULT;
+/*
+ * Convert from kernel XSAVES compacted format to standard format and copy
+ * to a kernel-space ptrace buffer.
+ *
+ * It supports partial copy but pos always starts from zero. This is called
+ * from xstateregs_get() and there we check the CPU has XSAVES.
+ */
+int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
+{
+ unsigned int offset, size;
+ struct xstate_header header;
+ int i;
+
+ /*
+ * Currently copy_regset_to_user() starts from pos 0:
+ */
+ if (unlikely(offset_start != 0))
+ return -EFAULT;
+
+ /*
+ * The destination is a ptrace buffer; we put in only user xstates:
+ */
+ memset(&header, 0, sizeof(header));
+ header.xfeatures = xsave->header.xfeatures;
+ header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
+
+ /*
+ * Copy xregs_state->header:
+ */
+ offset = offsetof(struct xregs_state, header);
+ size = sizeof(header);
+
+ __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
+
+ for (i = 0; i < XFEATURE_MAX; i++) {
+ /*
+ * Copy only in-use xstates:
+ */
+ if ((header.xfeatures >> i) & 1) {
+ void *src = __raw_xsave_addr(xsave, 1 << i);
+
+ offset = xstate_offsets[i];
+ size = xstate_sizes[i];
+
+ /* The next component has to fit fully into the output buffer: */
+ if (offset + size > size_total)
+ break;
+
+ __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
}
+
+ }
+
+ if (xfeatures_mxcsr_quirk(header.xfeatures)) {
+ offset = offsetof(struct fxregs_state, mxcsr);
+ size = MXCSR_AND_FLAGS_SIZE;
+ __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
+ }
+
+ /*
+ * Fill xsave->i387.sw_reserved value for ptrace frame:
+ */
+ offset = offsetof(struct fxregs_state, sw_reserved);
+ size = sizeof(xstate_fx_sw_bytes);
+
+ __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
+
+ return 0;
+}
+
+static inline int
+__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total)
+{
+ if (!size)
+ return 0;
+
+ if (offset < size_total) {
+ unsigned int copy = min(size, size_total - offset);
+
+ if (__copy_to_user(ubuf + offset, data, copy))
+ return -EFAULT;
}
return 0;
}
/*
* Convert from kernel XSAVES compacted format to standard format and copy
- * to a ptrace buffer. It supports partial copy but pos always starts from
+ * to a user-space buffer. It supports partial copy but pos always starts from
* zero. This is called from xstateregs_get() and there we check the CPU
* has XSAVES.
*/
-int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
- void __user *ubuf, struct xregs_state *xsave)
+int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{
unsigned int offset, size;
int ret, i;
/*
* Currently copy_regset_to_user() starts from pos 0:
*/
- if (unlikely(pos != 0))
+ if (unlikely(offset_start != 0))
return -EFAULT;
/*
offset = offsetof(struct xregs_state, header);
size = sizeof(header);
- ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count);
-
+ ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
if (ret)
return ret;
offset = xstate_offsets[i];
size = xstate_sizes[i];
- ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count);
+ /* The next component has to fit fully into the output buffer: */
+ if (offset + size > size_total)
+ break;
+ ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
if (ret)
return ret;
-
- if (offset + size >= count)
- break;
}
}
+ if (xfeatures_mxcsr_quirk(header.xfeatures)) {
+ offset = offsetof(struct fxregs_state, mxcsr);
+ size = MXCSR_AND_FLAGS_SIZE;
+ __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total);
+ }
+
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:
*/
offset = offsetof(struct fxregs_state, sw_reserved);
size = sizeof(xstate_fx_sw_bytes);
- ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count);
-
+ ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
if (ret)
return ret;
}
/*
- * Convert from a ptrace standard-format buffer to kernel XSAVES format
- * and copy to the target thread. This is called from xstateregs_set() and
- * there we check the CPU has XSAVES and a whole standard-sized buffer
- * exists.
+ * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
+ * and copy to the target thread. This is called from xstateregs_set().
*/
-int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
- struct xregs_state *xsave)
+int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
{
unsigned int offset, size;
int i;
- u64 xfeatures;
- u64 allowed_features;
+ struct xstate_header hdr;
offset = offsetof(struct xregs_state, header);
- size = sizeof(xfeatures);
+ size = sizeof(hdr);
- if (kbuf) {
- memcpy(&xfeatures, kbuf + offset, size);
- } else {
- if (__copy_from_user(&xfeatures, ubuf + offset, size))
- return -EFAULT;
+ memcpy(&hdr, kbuf + offset, size);
+
+ if (validate_xstate_header(&hdr))
+ return -EINVAL;
+
+ for (i = 0; i < XFEATURE_MAX; i++) {
+ u64 mask = ((u64)1 << i);
+
+ if (hdr.xfeatures & mask) {
+ void *dst = __raw_xsave_addr(xsave, 1 << i);
+
+ offset = xstate_offsets[i];
+ size = xstate_sizes[i];
+
+ memcpy(dst, kbuf + offset, size);
+ }
+ }
+
+ if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
+ offset = offsetof(struct fxregs_state, mxcsr);
+ size = MXCSR_AND_FLAGS_SIZE;
+ memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
}
/*
- * Reject if the user sets any disabled or supervisor features:
+ * The state that came in from userspace was user-state only.
+ * Mask all the user states out of 'xfeatures':
+ */
+ xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
+
+ /*
+ * Add back in the features that came in from userspace:
*/
- allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR;
+ xsave->header.xfeatures |= hdr.xfeatures;
- if (xfeatures & ~allowed_features)
+ return 0;
+}
+
+/*
+ * Convert from a ptrace or sigreturn standard-format user-space buffer to
+ * kernel XSAVES format and copy to the target thread. This is called from
+ * xstateregs_set(), as well as potentially from the sigreturn() and
+ * rt_sigreturn() system calls.
+ */
+int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
+{
+ unsigned int offset, size;
+ int i;
+ struct xstate_header hdr;
+
+ offset = offsetof(struct xregs_state, header);
+ size = sizeof(hdr);
+
+ if (__copy_from_user(&hdr, ubuf + offset, size))
+ return -EFAULT;
+
+ if (validate_xstate_header(&hdr))
return -EINVAL;
for (i = 0; i < XFEATURE_MAX; i++) {
u64 mask = ((u64)1 << i);
- if (xfeatures & mask) {
+ if (hdr.xfeatures & mask) {
void *dst = __raw_xsave_addr(xsave, 1 << i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
- if (kbuf) {
- memcpy(dst, kbuf + offset, size);
- } else {
- if (__copy_from_user(dst, ubuf + offset, size))
- return -EFAULT;
- }
+ if (__copy_from_user(dst, ubuf + offset, size))
+ return -EFAULT;
}
}
+ if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
+ offset = offsetof(struct fxregs_state, mxcsr);
+ size = MXCSR_AND_FLAGS_SIZE;
+ if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
+ return -EFAULT;
+ }
+
/*
* The state that came in from userspace was user-state only.
* Mask all the user states out of 'xfeatures':
/*
* Add back in the features that came in from userspace:
*/
- xsave->header.xfeatures |= xfeatures;
+ xsave->header.xfeatures |= hdr.xfeatures;
return 0;
}
static inline void *current_stack(void)
{
- return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
+ return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
}
static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
/* Save the next esp at the bottom of the stack */
prev_esp = (u32 *)irqstk;
- *prev_esp = current_stack_pointer();
+ *prev_esp = current_stack_pointer;
if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp);
/* Push the previous esp onto the stack */
prev_esp = (u32 *)irqstk;
- *prev_esp = current_stack_pointer();
+ *prev_esp = current_stack_pointer;
call_on_stack(__do_softirq, isp);
}
return 0;
out_clean_nodes:
- for (j = i - 1; j > 0; j--)
+ for (j = i - 1; j >= 0; j--)
cleanup_setup_data_node(*(kobjp + j));
kfree(kobjp);
out_setup_data_kobj:
n.token = token;
n.cpu = smp_processor_id();
- n.halted = is_idle_task(current) || preempt_count() > 1;
+ n.halted = is_idle_task(current) || preempt_count() > 1 ||
+ rcu_preempt_depth();
init_swait_queue_head(&n.wq);
hlist_add_head(&n.link, &b->list);
raw_spin_unlock(&b->lock);
sp = (unsigned long) ka->sa.sa_restorer;
}
- if (fpu->fpstate_active) {
+ if (fpu->initialized) {
sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
&buf_fx, &math_size);
*fpstate = (void __user *)sp;
return (void __user *)-1L;
/* save i387 and extended state */
- if (fpu->fpstate_active &&
+ if (fpu->initialized &&
copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
return (void __user *)-1L;
/*
* Ensure the signal handler starts with the new fpu state.
*/
- if (fpu->fpstate_active)
+ if (fpu->initialized)
fpu__clear(fpu);
}
signal_setup_done(failed, ksig, stepping);
* from double_fault.
*/
BUG_ON((unsigned long)(current_top_of_stack() -
- current_stack_pointer()) >= THREAD_SIZE);
+ current_stack_pointer) >= THREAD_SIZE);
preempt_enable_no_resched();
}
int cpu;
bool launched;
bool nmi_known_unmasked;
+ unsigned long vmcs_host_cr3; /* May not match real cr3 */
+ unsigned long vmcs_host_cr4; /* May not match real cr4 */
struct list_head loaded_vmcss_on_cpu_link;
};
int gs_ldt_reload_needed;
int fs_reload_needed;
u64 msr_host_bndcfgs;
- unsigned long vmcs_host_cr3; /* May not match real cr3 */
- unsigned long vmcs_host_cr4; /* May not match real cr4 */
} host_state;
struct {
int vm86_active;
struct pi_desc old, new;
unsigned int dest;
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
+ /*
+ * In case of hot-plug or hot-unplug, we may have to undo
+ * vmx_vcpu_pi_put even if there is no assigned device. And we
+ * always keep PI.NDST up to date for simplicity: it makes the
+ * code easier, and CPU migration is not a fast path.
+ */
+ if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
return;
+ /*
+ * First handle the simple case where no cmpxchg is necessary; just
+ * allow posting non-urgent interrupts.
+ *
+ * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
+ * PI.NDST: pi_post_block will do it for us and the wakeup_handler
+ * expects the VCPU to be on the blocked_vcpu_list that matches
+ * PI.NDST.
+ */
+ if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
+ vcpu->cpu == cpu) {
+ pi_clear_sn(pi_desc);
+ return;
+ }
+
+ /* The full case. */
do {
old.control = new.control = pi_desc->control;
- /*
- * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
- * are two possible cases:
- * 1. After running 'pre_block', context switch
- * happened. For this case, 'sn' was set in
- * vmx_vcpu_put(), so we need to clear it here.
- * 2. After running 'pre_block', we were blocked,
- * and woken up by some other guy. For this case,
- * we don't need to do anything, 'pi_post_block'
- * will do everything for us. However, we cannot
- * check whether it is case #1 or case #2 here
- * (maybe, not needed), so we also clear sn here,
- * I think it is not a big deal.
- */
- if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
- if (vcpu->cpu != cpu) {
- dest = cpu_physical_id(cpu);
-
- if (x2apic_enabled())
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
- }
+ dest = cpu_physical_id(cpu);
- /* set 'NV' to 'notification vector' */
- new.nv = POSTED_INTR_VECTOR;
- }
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
- /* Allow posting non-urgent interrupts */
new.sn = 0;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
}
static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
*/
cr3 = __read_cr3();
vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
- vmx->host_state.vmcs_host_cr3 = cr3;
+ vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow();
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
- vmx->host_state.vmcs_host_cr4 = cr4;
+ vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
#ifdef CONFIG_X86_64
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr3 = __get_current_cr3_fast();
- if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) {
+ if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) {
vmcs_writel(HOST_CR3, cr3);
- vmx->host_state.vmcs_host_cr3 = cr3;
+ vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
}
cr4 = cr4_read_shadow();
- if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
+ if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) {
vmcs_writel(HOST_CR4, cr4);
- vmx->host_state.vmcs_host_cr4 = cr4;
+ vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
}
/* When single-stepping over STI and MOV SS, we must clear the
vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
+ /*
+ * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
+ * or POSTED_INTR_WAKEUP_VECTOR.
+ */
+ vmx->pi_desc.nv = POSTED_INTR_VECTOR;
+ vmx->pi_desc.sn = 1;
+
return &vmx->vcpu;
free_vmcs:
WARN_ON(!is_guest_mode(vcpu));
- if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) {
+ if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
+ !to_vmx(vcpu)->nested.nested_run_pending) {
vmcs12->vm_exit_intr_error_code = fault->error_code;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
}
+static void __pi_post_block(struct kvm_vcpu *vcpu)
+{
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ struct pi_desc old, new;
+ unsigned int dest;
+
+ do {
+ old.control = new.control = pi_desc->control;
+ WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
+ "Wakeup handler not enabled while the VCPU is blocked\n");
+
+ dest = cpu_physical_id(vcpu->cpu);
+
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
+
+ /* set 'NV' to 'notification vector' */
+ new.nv = POSTED_INTR_VECTOR;
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
+
+ if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
+ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ list_del(&vcpu->blocked_vcpu_list);
+ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ vcpu->pre_pcpu = -1;
+ }
+}
+
/*
* This routine does the following things for vCPU which is going
* to be blocked if VT-d PI is enabled.
*/
static int pi_pre_block(struct kvm_vcpu *vcpu)
{
- unsigned long flags;
unsigned int dest;
struct pi_desc old, new;
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
!kvm_vcpu_apicv_active(vcpu))
return 0;
- vcpu->pre_pcpu = vcpu->cpu;
- spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- list_add_tail(&vcpu->blocked_vcpu_list,
- &per_cpu(blocked_vcpu_on_cpu,
- vcpu->pre_pcpu));
- spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
+ WARN_ON(irqs_disabled());
+ local_irq_disable();
+ if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
+ vcpu->pre_pcpu = vcpu->cpu;
+ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ list_add_tail(&vcpu->blocked_vcpu_list,
+ &per_cpu(blocked_vcpu_on_cpu,
+ vcpu->pre_pcpu));
+ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ }
do {
old.control = new.control = pi_desc->control;
- /*
- * We should not block the vCPU if
- * an interrupt is posted for it.
- */
- if (pi_test_on(pi_desc) == 1) {
- spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- list_del(&vcpu->blocked_vcpu_list);
- spin_unlock_irqrestore(
- &per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- vcpu->pre_pcpu = -1;
-
- return 1;
- }
-
WARN((pi_desc->sn == 1),
"Warning: SN field of posted-interrupts "
"is set before blocking\n");
/* set 'NV' to 'wakeup vector' */
new.nv = POSTED_INTR_WAKEUP_VECTOR;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
- return 0;
+ /* We should not block the vCPU if an interrupt is posted for it. */
+ if (pi_test_on(pi_desc) == 1)
+ __pi_post_block(vcpu);
+
+ local_irq_enable();
+ return (vcpu->pre_pcpu == -1);
}
static int vmx_pre_block(struct kvm_vcpu *vcpu)
static void pi_post_block(struct kvm_vcpu *vcpu)
{
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- struct pi_desc old, new;
- unsigned int dest;
- unsigned long flags;
-
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
+ if (vcpu->pre_pcpu == -1)
return;
- do {
- old.control = new.control = pi_desc->control;
-
- dest = cpu_physical_id(vcpu->cpu);
-
- if (x2apic_enabled())
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
-
- /* Allow posting non-urgent interrupts */
- new.sn = 0;
-
- /* set 'NV' to 'notification vector' */
- new.nv = POSTED_INTR_VECTOR;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
-
- if(vcpu->pre_pcpu != -1) {
- spin_lock_irqsave(
- &per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- list_del(&vcpu->blocked_vcpu_list);
- spin_unlock_irqrestore(
- &per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- vcpu->pre_pcpu = -1;
- }
+ WARN_ON(irqs_disabled());
+ local_irq_disable();
+ __pi_post_block(vcpu);
+ local_irq_enable();
}
static void vmx_post_block(struct kvm_vcpu *vcpu)
int r;
sigset_t sigsaved;
- fpu__activate_curr(fpu);
+ fpu__initialize(fpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
struct desc_struct code_descriptor;
struct fpu *fpu = ¤t->thread.fpu;
- fpu__activate_curr(fpu);
+ fpu__initialize(fpu);
#ifdef RE_ENTRANT_CHECKING
if (emulating) {
#include <linux/uaccess.h>
#include <linux/sched/debug.h>
+#include <asm/fpu/internal.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
}
EXPORT_SYMBOL_GPL(ex_handler_refcount);
+/*
+ * Handler for when we fail to restore a task's FPU state. We should never get
+ * here because the FPU state of a task using the FPU (task->thread.fpu.state)
+ * should always be valid. However, past bugs have allowed userspace to set
+ * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
+ * These caused XRSTOR to fail when switching to the task, leaking the FPU
+ * registers of the task previously executing on the CPU. Mitigate this class
+ * of vulnerability by restoring from the initial state (essentially, zeroing
+ * out all the FPU registers) if we can't restore from the task's FPU state.
+ */
+bool ex_handler_fprestore(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
+{
+ regs->ip = ex_fixup_addr(fixup);
+
+ WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
+ (void *)instruction_pointer(regs));
+
+ __copy_kernel_to_fpregs(&init_fpstate, -1);
+ return true;
+}
+EXPORT_SYMBOL_GPL(ex_handler_fprestore);
+
bool ex_handler_ext(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
-static void fill_sig_info_pkey(int si_code, siginfo_t *info,
- struct vm_area_struct *vma)
+static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
{
/* This is effectively an #ifdef */
if (!boot_cpu_has(X86_FEATURE_OSPKE))
* valid VMA, so we should never reach this without a
* valid VMA.
*/
- if (!vma) {
+ if (!pkey) {
WARN_ONCE(1, "PKU fault with no VMA passed in");
info->si_pkey = 0;
return;
* absolutely guranteed to be 100% accurate because of
* the race explained above.
*/
- info->si_pkey = vma_pkey(vma);
+ info->si_pkey = *pkey;
}
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
- struct task_struct *tsk, struct vm_area_struct *vma,
- int fault)
+ struct task_struct *tsk, u32 *pkey, int fault)
{
unsigned lsb = 0;
siginfo_t info;
lsb = PAGE_SHIFT;
info.si_addr_lsb = lsb;
- fill_sig_info_pkey(si_code, &info, vma);
+ fill_sig_info_pkey(si_code, &info, pkey);
force_sig_info(si_signo, &info, tsk);
}
struct task_struct *tsk = current;
unsigned long flags;
int sig;
- /* No context means no VMA to pass down */
- struct vm_area_struct *vma = NULL;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs, X86_TRAP_PF)) {
/* XXX: hwpoison faults will set the wrong code. */
force_sig_info_fault(signal, si_code, address,
- tsk, vma, 0);
+ tsk, NULL, 0);
}
/*
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, struct vm_area_struct *vma,
- int si_code)
+ unsigned long address, u32 *pkey, int si_code)
{
struct task_struct *tsk = current;
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_PF;
- force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
return;
}
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, struct vm_area_struct *vma)
+ unsigned long address, u32 *pkey)
{
- __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
+ __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
}
static void
unsigned long address, struct vm_area_struct *vma, int si_code)
{
struct mm_struct *mm = current->mm;
+ u32 pkey;
+
+ if (vma)
+ pkey = vma_pkey(vma);
/*
* Something tried to access memory that isn't in our memory map..
*/
up_read(&mm->mmap_sem);
- __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
+ __bad_area_nosemaphore(regs, error_code, address,
+ (vma) ? &pkey : NULL, si_code);
}
static noinline void
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
- struct vm_area_struct *vma, unsigned int fault)
+ u32 *pkey, unsigned int fault)
{
struct task_struct *tsk = current;
int code = BUS_ADRERR;
code = BUS_MCEERR_AR;
}
#endif
- force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
+ force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
}
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, struct vm_area_struct *vma,
- unsigned int fault)
+ unsigned long address, u32 *pkey, unsigned int fault)
{
if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
no_context(regs, error_code, address, 0, 0);
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
- do_sigbus(regs, error_code, address, vma, fault);
+ do_sigbus(regs, error_code, address, pkey, fault);
else if (fault & VM_FAULT_SIGSEGV)
- bad_area_nosemaphore(regs, error_code, address, vma);
+ bad_area_nosemaphore(regs, error_code, address, pkey);
else
BUG();
}
struct mm_struct *mm;
int fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ u32 pkey;
tsk = current;
mm = tsk->mm;
return;
}
+ pkey = vma_pkey(vma);
up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
- mm_fault_error(regs, error_code, address, vma, fault);
+ mm_fault_error(regs, error_code, address, &pkey, fault);
return;
}
* published by the Free Software Foundation.
*/
+#define DISABLE_BRANCH_PROFILING
+
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/mmu_context.h> /* vma_pkey() */
-#include <asm/fpu/internal.h> /* fpregs_active() */
int __execute_only_pkey(struct mm_struct *mm)
{
*/
preempt_disable();
if (!need_to_set_mm_pkey &&
- fpregs_active() &&
+ current->thread.fpu.initialized &&
!__pkru_allows_read(read_pkru(), execute_only_pkey)) {
preempt_enable();
return execute_only_pkey;
* mapped in the new pgd, we'll double-fault. Forcibly
* map it.
*/
- unsigned int index = pgd_index(current_stack_pointer());
+ unsigned int index = pgd_index(current_stack_pointer);
pgd_t *pgd = next->pgd + index;
if (unlikely(pgd_none(*pgd)))
* from _brk_limit way up to the max_pfn_mapped (which is the end of
* the ramdisk). We continue on, erasing PMD entries that point to page
* tables - do note that they are accessible at this stage via __va.
- * For good measure we also round up to the PMD - which means that if
+ * As Xen is aligning the memory end to a 4MB boundary, for good
+ * measure we also round up to PMD_SIZE * 2 - which means that if
* anybody is using __ka address to the initial boot-stack - and try
* to use it - they are going to crash. The xen_start_info has been
* taken care of already in xen_setup_kernel_pagetable. */
addr = xen_start_info->pt_base;
- size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
+ size = xen_start_info->nr_pt_frames * PAGE_SIZE;
- xen_cleanhighmap(addr, addr + size);
+ xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
-#ifdef DEBUG
- /* This is superfluous and is not necessary, but you know what
- * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
- * anything at this stage. */
- xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
-#endif
}
#endif
}
ghes_do_proc(ghes, ghes->estatus);
+out:
+ ghes_clear_estatus(ghes);
+
+ if (rc == -ENOENT)
+ return rc;
+
/*
* GHESv2 type HEST entries introduce support for error acknowledgment,
* so only acknowledge the error if this support is present.
*/
- if (is_hest_type_generic_v2(ghes)) {
- rc = ghes_ack_error(ghes->generic_v2);
- if (rc)
- return rc;
- }
-out:
- ghes_clear_estatus(ghes);
+ if (is_hest_type_generic_v2(ghes))
+ return ghes_ack_error(ghes->generic_v2);
+
return rc;
}
debug_id, (u64)fda->num_fds);
continue;
}
- fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
task_close_fd(proc, fd_array[fd_index]);
} break;
(u64)node->ptr);
binder_node_unlock(node);
} else {
- int ret;
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
*/
parent_buffer = parent->buffer -
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
- fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
proc->pid, thread->pid);
return -EINVAL;
}
- parent_buffer = (u8 *)(parent->buffer -
+ parent_buffer = (u8 *)((uintptr_t)parent->buffer -
binder_alloc_get_user_buffer_offset(
&target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
+ binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
+ int rc;
- ahci_reset_controller(host);
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
struct ahci_host_priv *hpriv = host->private_data;
{ 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
{ 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
+ { 0x24CA, 0x10CF, 0x11AB }, /* ICH4M on Fujitsu-Siemens Lifebook S6120 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
{ 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
{ 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
};
#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
-#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
+#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
{
- q->setup = EZ(t->setup * 1000, T);
- q->act8b = EZ(t->act8b * 1000, T);
- q->rec8b = EZ(t->rec8b * 1000, T);
- q->cyc8b = EZ(t->cyc8b * 1000, T);
- q->active = EZ(t->active * 1000, T);
- q->recover = EZ(t->recover * 1000, T);
- q->dmack_hold = EZ(t->dmack_hold * 1000, T);
- q->cycle = EZ(t->cycle * 1000, T);
- q->udma = EZ(t->udma * 1000, UT);
+ q->setup = EZ(t->setup, T);
+ q->act8b = EZ(t->act8b, T);
+ q->rec8b = EZ(t->rec8b, T);
+ q->cyc8b = EZ(t->cyc8b, T);
+ q->active = EZ(t->active, T);
+ q->recover = EZ(t->recover, T);
+ q->dmack_hold = EZ(t->dmack_hold, T);
+ q->cycle = EZ(t->cycle, T);
+ q->udma = EZ(t->udma, UT);
}
void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
static int charlcd_open(struct inode *inode, struct file *file)
{
struct charlcd_priv *priv = to_priv(the_charlcd);
+ int ret;
+ ret = -EBUSY;
if (!atomic_dec_and_test(&charlcd_available))
- return -EBUSY; /* open only once at a time */
+ goto fail; /* open only once at a time */
+ ret = -EPERM;
if (file->f_mode & FMODE_READ) /* device is write-only */
- return -EPERM;
+ goto fail;
if (priv->must_clear) {
charlcd_clear_display(&priv->lcd);
priv->must_clear = false;
}
return nonseekable_open(inode, file);
+
+ fail:
+ atomic_inc(&charlcd_available);
+ return ret;
}
static int charlcd_release(struct inode *inode, struct file *file)
static int keypad_open(struct inode *inode, struct file *file)
{
+ int ret;
+
+ ret = -EBUSY;
if (!atomic_dec_and_test(&keypad_available))
- return -EBUSY; /* open only once at a time */
+ goto fail; /* open only once at a time */
+ ret = -EPERM;
if (file->f_mode & FMODE_WRITE) /* device is read-only */
- return -EPERM;
+ goto fail;
keypad_buflen = 0; /* flush the buffer on opening */
return 0;
+ fail:
+ atomic_inc(&keypad_available);
+ return ret;
}
static int keypad_release(struct inode *inode, struct file *file)
}
#ifdef CONFIG_CPU_FREQ
-static cpumask_var_t cpus_to_visit;
-static void parsing_done_workfn(struct work_struct *work);
-static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+static cpumask_var_t cpus_to_visit __initdata;
+static void __init parsing_done_workfn(struct work_struct *work);
+static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
-static int
+static int __init
init_cpu_capacity_callback(struct notifier_block *nb,
unsigned long val,
void *data)
return 0;
}
-static struct notifier_block init_cpu_capacity_notifier = {
+static struct notifier_block init_cpu_capacity_notifier __initdata = {
.notifier_call = init_cpu_capacity_callback,
};
}
core_initcall(register_cpufreq_notifier);
-static void parsing_done_workfn(struct work_struct *work)
+static void __init parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
opp->available = availability_req;
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+
/* Notify the change of the OPP availability */
if (availability_req)
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
blocking_notifier_call_chain(&opp_table->head,
OPP_EVENT_DISABLE, opp);
+ dev_pm_opp_put(opp);
+ goto put_table;
+
unlock:
mutex_unlock(&opp_table->lock);
+put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}
return 0;
}
-static struct clock_event_device numachip2_clockevent = {
+static const struct clock_event_device numachip2_clockevent __initconst = {
.name = "numachip2",
.rating = 400,
.set_next_event = numachip2_set_next_event,
{ .compatible = "sigma,tango4", },
+ { .compatible = "ti,am33xx", },
+ { .compatible = "ti,am43", },
+ { .compatible = "ti,dra7", },
+
{ }
};
.write_complete = altera_cvp_write_complete,
};
-static ssize_t show_chkcfg(struct device_driver *dev, char *buf)
+static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
{
return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
}
-static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
+static ssize_t chkcfg_store(struct device_driver *drv, const char *buf,
size_t count)
{
int ret;
return count;
}
-static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg);
+static DRIVER_ATTR_RW(chkcfg);
static int altera_cvp_probe(struct pci_dev *pdev,
const struct pci_device_id *dev_id);
NUM_BANKS(ADDR_SURF_2_BANK);
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
- } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) {
+ } else if (adev->asic_type == CHIP_OLAND) {
+ tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
+ tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
+ } else if (adev->asic_type == CHIP_HAINAN) {
tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
int err = 0;
dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
dev->kfd2kgd->get_tile_config(dev->kgd, &config);
struct kfd_event *ev)
{
if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
- pr_warn("Signal event wasn't created because limit was reached\n");
+ if (!p->signal_event_limit_reached) {
+ pr_warn("Signal event wasn't created because limit was reached\n");
+ p->signal_event_limit_reached = true;
+ }
return -ENOMEM;
}
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
kq->mqd->destroy_mqd(kq->mqd,
kq->queue->mqd,
- false,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
kq->queue->pipe,
kq->queue->queue);
uint32_t wptr, rptr;
unsigned int *queue_address;
+ /* When rptr == wptr, the buffer is empty.
+ * When rptr == wptr + 1, the buffer is full.
+ * It is always rptr that advances to the position of wptr, rather than
+ * the opposite. So we can only use up to queue_size_dwords - 1 dwords.
+ */
rptr = *kq->rptr_kernel;
wptr = *kq->wptr_kernel;
queue_address = (unsigned int *)kq->pq_kernel_addr;
pr_debug("wptr: %d\n", wptr);
pr_debug("queue_address 0x%p\n", queue_address);
- available_size = (rptr - 1 - wptr + queue_size_dwords) %
+ available_size = (rptr + queue_size_dwords - 1 - wptr) %
queue_size_dwords;
- if (packet_size_in_dwords >= queue_size_dwords ||
- packet_size_in_dwords >= available_size) {
+ if (packet_size_in_dwords > available_size) {
/*
* make sure calling functions know
* acquire_packet_buffer() failed
}
if (wptr + packet_size_in_dwords >= queue_size_dwords) {
+ /* make sure after rolling back to position 0, there is
+ * still enough space.
+ */
+ if (packet_size_in_dwords >= rptr) {
+ *buffer_ptr = NULL;
+ return -ENOMEM;
+ }
+ /* fill nops, roll back and start at position 0 */
while (wptr > 0) {
queue_address[wptr] = kq->nop_packet;
wptr = (wptr + 1) % queue_size_dwords;
struct list_head signal_event_pages;
u32 next_nonsignal_event_id;
size_t signal_event_count;
+ bool signal_event_limit_reached;
};
/**
void etnaviv_gem_free_object(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_drm_private *priv = obj->dev->dev_private;
struct etnaviv_vram_mapping *mapping, *tmp;
/* object should not be active */
WARN_ON(is_active(etnaviv_obj));
+ mutex_lock(&priv->gem_lock);
list_del(&etnaviv_obj->gem_node);
+ mutex_unlock(&priv->gem_lock);
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
obj_node) {
cmdbuf->user_size = ALIGN(args->stream_size, 8);
ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
- if (ret == 0)
- cmdbuf = NULL;
+ if (ret)
+ goto out;
+
+ cmdbuf = NULL;
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*
.y2 = qfb->base.height
};
- if (!old_state->fb) {
- qxl_io_log(qdev,
- "create primary fb: %dx%d,%d,%d\n",
- bo->surf.width, bo->surf.height,
- bo->surf.stride, bo->surf.format);
+ if (old_state->fb) {
+ qfb_old = to_qxl_framebuffer(old_state->fb);
+ bo_old = gem_to_qxl_bo(qfb_old->obj);
+ } else {
+ bo_old = NULL;
+ }
- qxl_io_create_primary(qdev, 0, bo);
- bo->is_primary = true;
+ if (bo == bo_old)
return;
- } else {
- qfb_old = to_qxl_framebuffer(old_state->fb);
- bo_old = gem_to_qxl_bo(qfb_old->obj);
+ if (bo_old && bo_old->is_primary) {
+ qxl_io_destroy_primary(qdev);
bo_old->is_primary = false;
}
- bo->is_primary = true;
+ if (!bo->is_primary) {
+ qxl_io_create_primary(qdev, 0, bo);
+ bo->is_primary = true;
+ }
qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
}
{
struct qxl_device *qdev = plane->dev->dev_private;
- if (old_state->fb)
- { struct qxl_framebuffer *qfb =
+ if (old_state->fb) {
+ struct qxl_framebuffer *qfb =
to_qxl_framebuffer(old_state->fb);
struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
- qxl_io_destroy_primary(qdev);
- bo->is_primary = false;
+ if (bo->is_primary) {
+ qxl_io_destroy_primary(qdev);
+ bo->is_primary = false;
+ }
}
}
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
- if (!plane->state->fb) {
- /* we never executed prepare_fb, so there's nothing to
+ if (!old_state->fb) {
+ /*
+ * we never executed prepare_fb, so there's nothing to
* unpin.
*/
return;
}
- obj = to_qxl_framebuffer(plane->state->fb)->obj;
+ obj = to_qxl_framebuffer(old_state->fb)->obj;
user_bo = gem_to_qxl_bo(obj);
qxl_bo_unpin(user_bo);
}
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
- if (freeze && rdev->family >= CHIP_CEDAR) {
+ if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
rdev->asic->asic_reset(rdev, true);
pci_restore_state(dev->pdev);
} else if (suspend) {
bool "Allwinner A10 HDMI CEC Support"
depends on DRM_SUN4I_HDMI
select CEC_CORE
- depends on CEC_PIN
+ select CEC_PIN
help
Choose this option if you have an Allwinner SoC with an HDMI
controller and want to use CEC.
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
-#include <media/cec.h>
+#include <media/cec-pin.h>
#define SUN4I_HDMI_CTRL_REG 0x004
#define SUN4I_HDMI_CTRL_ENABLE BIT(31)
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
{
- mutex_lock(&vmbus_connection.channel_mutex);
-
BUG_ON(!is_hvsock_channel(channel));
channel->rescind = true;
vmbus_device_unregister(channel->device_obj);
-
- mutex_unlock(&vmbus_connection.channel_mutex);
}
EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
out_src = smsg_out;
break;
+ case WRITE_TO_FILE:
+ out_src = fcopy_transaction.fcopy_msg;
+ out_len = sizeof(struct hv_do_fcopy);
+ break;
default:
out_src = fcopy_transaction.fcopy_msg;
out_len = fcopy_transaction.recv_len;
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Lewisburg PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Cedar Fork PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
stm_source_link_drop(src);
- device_destroy(&stm_source_class, src->dev.devt);
+ device_unregister(&src->dev);
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
unsigned int vref_mv)
{
struct ad7793_state *st = iio_priv(indio_dev);
- int i, ret = -1;
+ int i, ret;
unsigned long long scale_uv;
u32 id;
return ret;
/* reset the serial interface */
- ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
+ ret = ad_sd_reset(&st->sd, 32);
if (ret < 0)
goto out;
usleep_range(500, 2000); /* Wait for at least 500us */
}
EXPORT_SYMBOL_GPL(ad_sd_read_reg);
+/**
+ * ad_sd_reset() - Reset the serial interface
+ *
+ * @sigma_delta: The sigma delta device
+ * @reset_length: Number of SCLKs with DIN = 1
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length)
+{
+ uint8_t *buf;
+ unsigned int size;
+ int ret;
+
+ size = DIV_ROUND_UP(reset_length, 8);
+ buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memset(buf, 0xff, size);
+ ret = spi_write(sigma_delta->spi, buf, size);
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad_sd_reset);
+
static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
* MCP3204
* MCP3208
* ------------
+ * 13 bit converter
+ * MCP3301
*
* Datasheet can be found here:
* http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
}
static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
- bool differential, int device_index)
+ bool differential, int device_index, int *val)
{
int ret;
switch (device_index) {
case mcp3001:
- return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ return 0;
case mcp3002:
case mcp3004:
case mcp3008:
- return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ return 0;
case mcp3201:
- return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ return 0;
case mcp3202:
case mcp3204:
case mcp3208:
- return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ return 0;
case mcp3301:
- return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
+ *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
+ | adc->rx_buf[1], 12);
+ return 0;
default:
return -EINVAL;
}
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = mcp320x_adc_conversion(adc, channel->address,
- channel->differential, device_index);
-
+ channel->differential, device_index, val);
if (ret < 0)
goto out;
- *val = ret;
ret = IIO_VAL_INT;
break;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
+ spi_set_drvdata(spi, indio_dev);
chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
indio_dev->channels = chip_info->channels;
num_channels = of_property_count_u32_elems(node, "st,adc-channels");
if (num_channels < 0 ||
- num_channels >= adc_info->max_channels) {
+ num_channels > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
return num_channels < 0 ? num_channels : -EINVAL;
}
#define ADS1015_CFG_COMP_QUE_MASK GENMASK(1, 0)
#define ADS1015_CFG_COMP_LAT_MASK BIT(2)
-#define ADS1015_CFG_COMP_POL_MASK BIT(2)
+#define ADS1015_CFG_COMP_POL_MASK BIT(3)
#define ADS1015_CFG_COMP_MODE_MASK BIT(4)
#define ADS1015_CFG_DR_MASK GENMASK(7, 5)
#define ADS1015_CFG_MOD_MASK BIT(8)
switch (irq_trig) {
case IRQF_TRIGGER_LOW:
- cfg_comp |= ADS1015_CFG_COMP_POL_LOW;
+ cfg_comp |= ADS1015_CFG_COMP_POL_LOW <<
+ ADS1015_CFG_COMP_POL_SHIFT;
break;
case IRQF_TRIGGER_HIGH:
- cfg_comp |= ADS1015_CFG_COMP_POL_HIGH;
+ cfg_comp |= ADS1015_CFG_COMP_POL_HIGH <<
+ ADS1015_CFG_COMP_POL_SHIFT;
break;
default:
return -EINVAL;
/* Enable 3v1 bias regulator for MADC[3:6] */
madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
- if (IS_ERR(madc->usb3v1))
- return -ENODEV;
+ if (IS_ERR(madc->usb3v1)) {
+ ret = -ENODEV;
+ goto err_i2c;
+ }
ret = regulator_enable(madc->usb3v1);
- if (ret)
+ if (ret) {
dev_err(madc->dev, "could not enable 3v1 bias regulator\n");
+ goto err_i2c;
+ }
ret = iio_device_register(iio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio device\n");
- goto err_i2c;
+ goto err_usb3v1;
}
return 0;
+err_usb3v1:
+ regulator_disable(madc->usb3v1);
err_i2c:
twl4030_madc_set_current_generator(madc, 0, 0);
err_current_generator:
u8 drdy_mask;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- if (!sdata->sensor_settings->drdy_irq.addr)
+ if (!sdata->sensor_settings->drdy_irq.addr) {
+ /*
+ * there are some devices (e.g. LIS3MDL) where drdy line is
+ * routed to a given pin and it is not possible to select a
+ * different one. Take into account irq status register
+ * to understand if irq trigger can be properly supported
+ */
+ if (sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ sdata->hw_irq_trigger = enable;
return 0;
+ }
/* Enable/Disable the interrupt generator 1. */
if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) {
ret = indio_dev->info->debugfs_reg_access(indio_dev,
indio_dev->cached_reg_addr,
0, &val);
- if (ret)
+ if (ret) {
dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
+ return ret;
+ }
len = snprintf(buf, sizeof(buf), "0x%X\n", val);
},
},
},
+ .drdy_irq = {
+ /* drdy line is routed drdy pin */
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
.multi_read_bit = true,
.bootime = 2,
},
u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
- ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+ ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
BMP280_OSRS_TEMP_MASK |
BMP280_OSRS_PRESS_MASK |
BMP280_MODE_MASK,
clk_disable(priv->clk);
/* Stop timer */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
regmap_write(priv->regmap, TIM_PSC, 0);
regmap_write(priv->regmap, TIM_ARR, 0);
if (ret)
return ret;
+ /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
regmap_write(priv->regmap, TIM_ARR, preset);
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
return len;
}
atomic_set(&qp->qp_sec->error_list_count, 0);
init_completion(&qp->qp_sec->error_complete);
ret = security_ib_alloc_security(&qp->qp_sec->security);
- if (ret)
+ if (ret) {
kfree(qp->qp_sec);
+ qp->qp_sec = NULL;
+ }
return ret;
}
resp.raw_packet_caps = attr.raw_packet_caps;
resp.response_length += sizeof(resp.raw_packet_caps);
- if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps))
+ if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
goto end;
- resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size;
- resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags;
- resp.xrq_caps.max_ops = attr.xrq_caps.max_ops;
- resp.xrq_caps.max_sge = attr.xrq_caps.max_sge;
- resp.xrq_caps.flags = attr.xrq_caps.flags;
- resp.response_length += sizeof(resp.xrq_caps);
+ resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
+ resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
+ resp.tm_caps.max_ops = attr.tm_caps.max_ops;
+ resp.tm_caps.max_sge = attr.tm_caps.max_sge;
+ resp.tm_caps.flags = attr.tm_caps.flags;
+ resp.response_length += sizeof(resp.tm_caps);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
static int thermal_init(struct hfi1_devdata *dd);
static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
+static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
+ int msecs);
static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
int msecs);
static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
u64 regs[CCE_NUM_INT_CSRS];
u32 bit;
int i;
+ irqreturn_t handled = IRQ_NONE;
this_cpu_inc(*dd->int_counter);
for_each_set_bit(bit, (unsigned long *)®s[0],
CCE_NUM_INT_CSRS * 64) {
is_interrupt(dd, bit);
+ handled = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return handled;
}
static irqreturn_t sdma_interrupt(int irq, void *data)
write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
}
-void reset_qsfp(struct hfi1_pportdata *ppd)
+int reset_qsfp(struct hfi1_pportdata *ppd)
{
struct hfi1_devdata *dd = ppd->dd;
u64 mask, qsfp_mask;
* for alarms and warnings
*/
set_qsfp_int_n(ppd, 1);
+
+ /*
+ * After the reset, AOC transmitters are enabled by default. They need
+ * to be turned off to complete the QSFP setup before they can be
+ * enabled again.
+ */
+ return set_qsfp_tx(ppd, 0);
}
static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
{
struct hfi1_devdata *dd = ppd->dd;
u32 previous_state;
+ int offline_state_ret;
int ret;
update_lcb_cache(dd);
ppd->offline_disabled_reason =
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
- /*
- * Wait for offline transition. It can take a while for
- * the link to go down.
- */
- ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
- if (ret < 0)
- return ret;
-
- /*
- * Now in charge of LCB - must be after the physical state is
- * offline.quiet and before host_link_state is changed.
- */
- set_host_lcb_access(dd);
- write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
-
- /* make sure the logical state is also down */
- ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
- if (ret)
- force_logical_link_state_down(ppd);
-
- ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+ offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
+ if (offline_state_ret < 0)
+ return offline_state_ret;
+ /* Disabling AOC transmitters */
if (ppd->port_type == PORT_TYPE_QSFP &&
ppd->qsfp_info.limiting_active &&
qsfp_mod_present(ppd)) {
}
}
+ /*
+ * Wait for the offline.Quiet transition if it hasn't happened yet. It
+ * can take a while for the link to go down.
+ */
+ if (offline_state_ret != PLS_OFFLINE_QUIET) {
+ ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Now in charge of LCB - must be after the physical state is
+ * offline.quiet and before host_link_state is changed.
+ */
+ set_host_lcb_access(dd);
+ write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
+
+ /* make sure the logical state is also down */
+ ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
+ if (ret)
+ force_logical_link_state_down(ppd);
+
+ ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+
/*
* The LNI has a mandatory wait time after the physical state
* moves to Offline.Quiet. The wait time may be different
& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
/* went down while attempting link up */
check_lni_states(ppd);
+
+ /* The QSFP doesn't need to be reset on LNI failure */
+ ppd->qsfp_info.reset_needed = 0;
}
/* the active link width (downgrade) is 0 on link down */
return 0;
}
+/*
+ * wait_phys_link_offline_quiet_substates - wait for any offline substate
+ * @ppd: port device
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for any offline physical link
+ * state change to occur.
+ * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
+ */
+static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
+ int msecs)
+{
+ u32 read_state;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ read_state = read_physical_state(ppd->dd);
+ if ((read_state & 0xF0) == PLS_OFFLINE)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
+ read_state, msecs);
+ return -ETIMEDOUT;
+ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
+ log_state_transition(ppd, read_state);
+ return read_state;
+}
+
#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
#define PLS_OFFLINE_REPORT_FAILURE 0x93
#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
+#define PLS_OFFLINE_QUIET_DURATION 0x95
#define PLS_POLLING 0x20
#define PLS_POLLING_QUIET 0x20
#define PLS_POLLING_ACTIVE 0x21
void handle_link_bounce(struct work_struct *work);
void handle_start_link(struct work_struct *work);
void handle_sma_message(struct work_struct *work);
-void reset_qsfp(struct hfi1_pportdata *ppd);
+int reset_qsfp(struct hfi1_pportdata *ppd);
void qsfp_event(struct work_struct *work);
void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
int send_idle_sma(struct hfi1_devdata *dd, u64 message);
return ret;
}
-/* magic character sequence that trails an image */
+/* magic character sequence that begins an image */
+#define IMAGE_START_MAGIC "APO="
+
+/* magic character sequence that might trail an image */
#define IMAGE_TRAIL_MAGIC "egamiAPO"
/* EPROM file types */
{
void *buffer;
void *p;
+ u32 length;
int ret;
buffer = kmalloc(P1_SIZE, GFP_KERNEL);
return ret;
}
- /* scan for image magic that may trail the actual data */
- p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
- if (!p) {
+ /* config partition is valid only if it starts with IMAGE_START_MAGIC */
+ if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) {
kfree(buffer);
return -ENOENT;
}
+ /* scan for image magic that may trail the actual data */
+ p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
+ if (p)
+ length = p - buffer;
+ else
+ length = P1_SIZE;
+
*data = buffer;
- *size = p - buffer;
+ *size = length;
return 0;
}
switch (ret) {
case 0:
ret = setup_base_ctxt(fd, uctxt);
- if (uctxt->subctxt_cnt) {
- /*
- * Base context is done (successfully or not), notify
- * anybody using a sub-context that is waiting for
- * this completion.
- */
- clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
- wake_up(&uctxt->wait);
- }
+ if (ret)
+ deallocate_ctxt(uctxt);
break;
case 1:
ret = complete_subctxt(fd);
/* Now allocate the RcvHdr queue and eager buffers. */
ret = hfi1_create_rcvhdrq(dd, uctxt);
if (ret)
- return ret;
+ goto done;
ret = hfi1_setup_eagerbufs(uctxt);
if (ret)
- goto setup_failed;
+ goto done;
/* If sub-contexts are enabled, do the appropriate setup */
if (uctxt->subctxt_cnt)
ret = setup_subctxt(uctxt);
if (ret)
- goto setup_failed;
+ goto done;
ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
if (ret)
- goto setup_failed;
+ goto done;
ret = init_user_ctxt(fd, uctxt);
if (ret)
- goto setup_failed;
+ goto done;
user_init(uctxt);
fd->uctxt = uctxt;
hfi1_rcd_get(uctxt);
- return 0;
+done:
+ if (uctxt->subctxt_cnt) {
+ /*
+ * On error, set the failed bit so sub-contexts will clean up
+ * correctly.
+ */
+ if (ret)
+ set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
-setup_failed:
- /* Set the failed bit so sub-context init can do the right thing */
- set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
- deallocate_ctxt(uctxt);
+ /*
+ * Base context is done (successfully or not), notify anybody
+ * using a sub-context that is waiting for this completion.
+ */
+ clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
+ wake_up(&uctxt->wait);
+ }
return ret;
}
/*
* Code to adjust PCIe capabilities.
*/
-static int tune_pcie_caps(struct hfi1_devdata *);
+static void tune_pcie_caps(struct hfi1_devdata *);
/*
* Do all the common PCIe setup and initialization.
*/
int request_msix(struct hfi1_devdata *dd, u32 msireq)
{
- int nvec, ret;
+ int nvec;
nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
return nvec;
}
- ret = tune_pcie_caps(dd);
- if (ret) {
- dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret);
- pci_free_irq_vectors(dd->pcidev);
- return ret;
- }
+ tune_pcie_caps(dd);
/* check for legacy IRQ */
if (nvec == 1 && !dd->pcidev->msix_enabled)
module_param_named(aspm, aspm_mode, uint, S_IRUGO);
MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
-static int tune_pcie_caps(struct hfi1_devdata *dd)
+static void tune_pcie_caps(struct hfi1_devdata *dd)
{
struct pci_dev *parent;
u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
* Turn on extended tags in DevCtl in case the BIOS has turned it off
* to improve WFR SDMA bandwidth
*/
- ret = pcie_capability_read_word(dd->pcidev,
- PCI_EXP_DEVCTL, &ectl);
- if (ret) {
- dd_dev_err(dd, "Unable to read from PCI config\n");
- return ret;
- }
-
- if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+ if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
dd_dev_info(dd, "Enabling PCIe extended tags\n");
ectl |= PCI_EXP_DEVCTL_EXT_TAG;
ret = pcie_capability_write_word(dd->pcidev,
PCI_EXP_DEVCTL, ectl);
- if (ret) {
- dd_dev_err(dd, "Unable to write to PCI config\n");
- return ret;
- }
+ if (ret)
+ dd_dev_info(dd, "Unable to write to PCI config\n");
}
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
* The driver cannot perform the tuning if it does not have
* access to the upstream component.
*/
- if (!parent)
- return -EINVAL;
+ if (!parent) {
+ dd_dev_info(dd, "Parent not found\n");
+ return;
+ }
if (!pci_is_root_bus(parent->bus)) {
dd_dev_info(dd, "Parent not root\n");
- return -EINVAL;
+ return;
+ }
+ if (!pci_is_pcie(parent)) {
+ dd_dev_info(dd, "Parent is not PCI Express capable\n");
+ return;
+ }
+ if (!pci_is_pcie(dd->pcidev)) {
+ dd_dev_info(dd, "PCI device is not PCI Express capable\n");
+ return;
}
-
- if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- return -EINVAL;
rc_mpss = parent->pcie_mpss;
rc_mps = ffs(pcie_get_mps(parent)) - 8;
/* Find out supported and configured values for endpoint (us) */
ep_mrrs = max_mrrs;
pcie_set_readrq(dd->pcidev, ep_mrrs);
}
-
- return 0;
}
/* End of PCIe capability tuning */
* reuse of stale settings established in our previous pass through.
*/
if (ppd->qsfp_info.reset_needed) {
- reset_qsfp(ppd);
+ ret = reset_qsfp(ppd);
+ if (ret)
+ return ret;
refresh_qsfp_cache(ppd, &ppd->qsfp_info);
} else {
ppd->qsfp_info.reset_needed = 1;
}
if (MLX5_CAP_GEN(mdev, tag_matching)) {
- props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
- props->xrq_caps.max_num_tags =
+ props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+ props->tm_caps.max_num_tags =
(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
- props->xrq_caps.flags = IB_TM_CAP_RC;
- props->xrq_caps.max_ops =
+ props->tm_caps.flags = IB_TM_CAP_RC;
+ props->tm_caps.max_ops =
1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
- props->xrq_caps.max_sge = MLX5_TM_MAX_SGE;
+ props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
}
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
{
unsigned long tmp;
unsigned long m;
- int i, k;
- u64 base = 0;
- int p = 0;
- int skip;
- int mask;
- u64 len;
- u64 pfn;
+ u64 base = ~0, p = 0;
+ u64 len, pfn;
+ int i = 0;
struct scatterlist *sg;
int entry;
unsigned long page_shift = umem->page_shift;
m = find_first_bit(&tmp, BITS_PER_LONG);
if (max_page_shift)
m = min_t(unsigned long, max_page_shift - page_shift, m);
- skip = 1 << m;
- mask = skip - 1;
- i = 0;
+
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> page_shift;
pfn = sg_dma_address(sg) >> page_shift;
- for (k = 0; k < len; k++) {
- if (!(i & mask)) {
- tmp = (unsigned long)pfn;
- m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- } else {
- if (base + p != pfn) {
- tmp = (unsigned long)p;
- m = find_first_bit(&tmp, BITS_PER_LONG);
- skip = 1 << m;
- mask = skip - 1;
- base = pfn;
- p = 0;
- }
- }
- p++;
- i++;
+ if (base + p != pfn) {
+ /* If either the offset or the new
+ * base are unaligned update m
+ */
+ tmp = (unsigned long)(pfn | p);
+ if (!IS_ALIGNED(tmp, 1 << m))
+ m = find_first_bit(&tmp, BITS_PER_LONG);
+
+ base = pfn;
+ p = 0;
}
+
+ p += len;
+ i += len;
}
if (i) {
#define MLX5_UMR_ALIGN 2048
-static int clean_mr(struct mlx5_ib_mr *mr);
+static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
update_xlt_flags);
+
if (err) {
- mlx5_ib_dereg_mr(&mr->ibmr);
+ dereg_mr(dev, mr);
return ERR_PTR(err);
}
}
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err < 0) {
- clean_mr(mr);
+ clean_mr(dev, mr);
return err;
}
}
if (err) {
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
ib_umem_release(mr->umem);
- clean_mr(mr);
+ clean_mr(dev, mr);
return err;
}
}
}
}
-static int clean_mr(struct mlx5_ib_mr *mr)
+static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
int allocated_from_cache = mr->allocated_from_cache;
int err;
return 0;
}
-int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
- struct mlx5_ib_mr *mr = to_mmr(ibmr);
int npages = mr->npages;
struct ib_umem *umem = mr->umem;
}
#endif
- clean_mr(mr);
+ clean_mr(dev, mr);
if (umem) {
ib_umem_release(umem);
return 0;
}
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+
+ return dereg_mr(dev, mr);
+}
+
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg)
mr->ibmr.iova);
set_wqe_32bit_value(wqe->wqe_words,
NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
- mr->ibmr.length);
+ lower_32_bits(mr->ibmr.length));
set_wqe_32bit_value(wqe->wqe_words,
NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
set_wqe_32bit_value(wqe->wqe_words,
mr->npages * 8);
nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, "
- "length: %d, rkey: %0x, pgl_paddr: %llx, "
+ "length: %lld, rkey: %0x, pgl_paddr: %llx, "
"page_list_len: %u, wqe_misc: %x\n",
(unsigned long long) mr->ibmr.iova,
mr->ibmr.length,
*/
priv->dev->broadcast[8] = priv->pkey >> 8;
priv->dev->broadcast[9] = priv->pkey & 0xff;
-
- /*
- * Update the broadcast address in the priv->broadcast object,
- * in case it already exists, otherwise no one will do that.
- */
- if (priv->broadcast) {
- spin_lock_irq(&priv->lock);
- memcpy(priv->broadcast->mcmember.mgid.raw,
- priv->dev->broadcast + 4,
- sizeof(union ib_gid));
- spin_unlock_irq(&priv->lock);
- }
-
return 0;
}
{
struct ipoib_dev_priv *priv;
struct ib_port_attr attr;
+ struct rdma_netdev *rn;
int result = -ENOMEM;
priv = ipoib_intf_alloc(hca, port, format);
ipoib_dev_cleanup(priv->dev);
device_init_failed:
- free_netdev(priv->dev);
+ rn = netdev_priv(priv->dev);
+ rn->free_rdma_netdev(priv->dev);
kfree(priv);
alloc_mem_failed:
return;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
- struct rdma_netdev *rn = netdev_priv(priv->dev);
+ struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
unregister_netdev(priv->dev);
mutex_unlock(&priv->sysfs_mutex);
- rn->free_rdma_netdev(priv->dev);
+ parent_rn->free_rdma_netdev(priv->dev);
+
+ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
+ struct rdma_netdev *child_rn;
- list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
+ child_rn = netdev_priv(cpriv->dev);
+ child_rn->free_rdma_netdev(cpriv->dev);
kfree(cpriv);
+ }
kfree(priv);
}
return restart_syscall();
}
- priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
- if (!priv) {
+ if (!down_write_trylock(&ppriv->vlan_rwsem)) {
rtnl_unlock();
mutex_unlock(&ppriv->sysfs_mutex);
- return -ENOMEM;
+ return restart_syscall();
}
- down_write(&ppriv->vlan_rwsem);
+ priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ if (!priv) {
+ result = -ENOMEM;
+ goto out;
+ }
/*
* First ensure this isn't a duplicate. We check the parent device and
rtnl_unlock();
mutex_unlock(&ppriv->sysfs_mutex);
- if (result) {
- free_netdev(priv->dev);
+ if (result && priv) {
+ struct rdma_netdev *rn;
+
+ rn = netdev_priv(priv->dev);
+ rn->free_rdma_netdev(priv->dev);
kfree(priv);
}
return restart_syscall();
}
- down_write(&ppriv->vlan_rwsem);
+ if (!down_write_trylock(&ppriv->vlan_rwsem)) {
+ rtnl_unlock();
+ mutex_unlock(&ppriv->sysfs_mutex);
+ return restart_syscall();
+ }
+
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
mutex_unlock(&ppriv->sysfs_mutex);
if (dev) {
- free_netdev(dev);
+ struct rdma_netdev *rn;
+
+ rn = netdev_priv(dev);
+ rn->free_rdma_netdev(priv->dev);
kfree(priv);
return 0;
}
{
int i;
- iser_err("page vec npages %d data length %d\n",
+ iser_err("page vec npages %d data length %lld\n",
page_vec->npages, page_vec->fake_mr.length);
for (i = 0; i < page_vec->npages; i++)
iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
entry = (((u64) hi) << 32) + lo;
if (last_entry && last_entry != entry) {
- pr_err("IOMMU:%d should use the same dev table as others!/n",
+ pr_err("IOMMU:%d should use the same dev table as others!\n",
iommu->index);
return false;
}
old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
if (old_devtb_size != dev_table_size) {
- pr_err("The device table size of IOMMU:%d is not expected!/n",
+ pr_err("The device table size of IOMMU:%d is not expected!\n",
iommu->index);
return false;
}
old_devtb_phys = entry & PAGE_MASK;
if (old_devtb_phys >= 0x100000000ULL) {
- pr_err("The address of old device table is above 4G, not trustworthy!/n");
+ pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
get_order(dev_table_size));
if (old_dev_tbl_cpy == NULL) {
- pr_err("Failed to allocate memory for copying old device table!/n");
+ pr_err("Failed to allocate memory for copying old device table!\n");
return false;
}
static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
struct io_pgtable_cfg *cfg)
{
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+ if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
return;
dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
int ret;
spin_lock_irqsave(&dom->pgtlock, flags);
- ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
+ ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
+ size, prot);
spin_unlock_irqrestore(&dom->pgtlock, flags);
return ret;
static void gic_unmask_irq(struct irq_data *d)
{
- struct cpumask *affinity = irq_data_get_affinity_mask(d);
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
unsigned int cpu;
write_gic_smask(intr);
gic_clear_pcpu_masks(intr);
- cpu = cpumask_first_and(affinity, cpu_online_mask);
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
}
irq_hw_number_t hw, unsigned int cpu)
{
int intr = GIC_HWIRQ_TO_SHARED(hw);
+ struct irq_data *data;
unsigned long flags;
+ data = irq_get_irq_data(virq);
+
spin_lock_irqsave(&gic_lock, flags);
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
gic_clear_pcpu_masks(intr);
set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
spin_unlock_irqrestore(&gic_lock, flags);
return 0;
/* Find the first available CPU vector. */
i = 0;
- reserved = (C_SW0 | C_SW1) >> __fls(C_SW0);
+ reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
i++, &cpu_vec))
reserved |= BIT(cpu_vec);
gicconfig = read_gic_config();
gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
- gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS);
+ gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
gic_shared_intrs = (gic_shared_intrs + 1) * 8;
gic_vpes = gicconfig & GIC_CONFIG_PVPS;
- gic_vpes >>= __fls(GIC_CONFIG_PVPS);
+ gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
gic_vpes = gic_vpes + 1;
if (cpu_has_veic) {
#define AS_PEAK_mA_TO_REG(a) \
((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250)
+/* LED numbers for Devicetree */
+#define AS_LED_FLASH 0
+#define AS_LED_INDICATOR 1
+
enum as_mode {
AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
struct device_node *node)
{
struct as3645a_config *cfg = &flash->cfg;
+ struct device_node *child;
const char *name;
int rval;
- flash->flash_node = of_get_child_by_name(node, "flash");
+ for_each_child_of_node(node, child) {
+ u32 id = 0;
+
+ of_property_read_u32(child, "reg", &id);
+
+ switch (id) {
+ case AS_LED_FLASH:
+ flash->flash_node = of_node_get(child);
+ break;
+ case AS_LED_INDICATOR:
+ flash->indicator_node = of_node_get(child);
+ break;
+ default:
+ dev_warn(&flash->client->dev,
+ "unknown LED %u encountered, ignoring\n", id);
+ break;
+ }
+ }
+
if (!flash->flash_node) {
dev_err(&flash->client->dev, "can't find flash node\n");
return -ENODEV;
of_property_read_u32(flash->flash_node, "voltage-reference",
&cfg->voltage_reference);
- of_property_read_u32(flash->flash_node, "peak-current-limit",
+ of_property_read_u32(flash->flash_node, "ams,input-max-microamp",
&cfg->peak);
cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak);
- flash->indicator_node = of_get_child_by_name(node, "indicator");
if (!flash->indicator_node) {
dev_warn(&flash->client->dev,
"can't find indicator node\n");
as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
v4l2_flash_release(flash->vf);
+ v4l2_flash_release(flash->vfind);
led_classdev_flash_unregister(&flash->fled);
led_classdev_unregister(&flash->iled_cdev);
if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE;
- mddev->pers->make_request(mddev, bio);
+ md_handle_request(mddev, bio);
return DM_MAPIO_SUBMITTED;
}
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
+void md_handle_request(struct mddev *mddev, struct bio *bio)
+{
+check_suspended:
+ rcu_read_lock();
+ if (mddev->suspended) {
+ DEFINE_WAIT(__wait);
+ for (;;) {
+ prepare_to_wait(&mddev->sb_wait, &__wait,
+ TASK_UNINTERRUPTIBLE);
+ if (!mddev->suspended)
+ break;
+ rcu_read_unlock();
+ schedule();
+ rcu_read_lock();
+ }
+ finish_wait(&mddev->sb_wait, &__wait);
+ }
+ atomic_inc(&mddev->active_io);
+ rcu_read_unlock();
+
+ if (!mddev->pers->make_request(mddev, bio)) {
+ atomic_dec(&mddev->active_io);
+ wake_up(&mddev->sb_wait);
+ goto check_suspended;
+ }
+
+ if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+ wake_up(&mddev->sb_wait);
+}
+EXPORT_SYMBOL(md_handle_request);
+
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
bio_endio(bio);
return BLK_QC_T_NONE;
}
-check_suspended:
- rcu_read_lock();
- if (mddev->suspended) {
- DEFINE_WAIT(__wait);
- for (;;) {
- prepare_to_wait(&mddev->sb_wait, &__wait,
- TASK_UNINTERRUPTIBLE);
- if (!mddev->suspended)
-