Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Mar 2015 21:43:33 +0000 (13:43 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Mar 2015 21:43:33 +0000 (13:43 -0800)
Pull thermal management fixes from Eduardo Valentin:
 "Specifics:

   - adding Lukasz as maintainer of samsung thermal driver.
   - driver fixes: exynos and int430x.
   - one fix in the exynos cpufreq driver related to cpu cooling (acked
     by cpufreq maintainer).
   - fix default sysfs attributes of cooling devices

  Note: I am sending this pull on Rui's behalf while he fixes issues in his Linux box"

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal:
  thermal: Make sysfs attributes of cooling devices default attributes
  Thermal/int340x: Fix memleak for aux trip
  MAINTAINERS: Add entry for SAMSUNG THERMAL DRIVER
  cpufreq: exynos: Use simple approach to asses if cpu cooling can be used
  thermal: exynos: Fix wrong control of power down detection mode for Exynos7

215 files changed:
Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
Documentation/power/suspend-and-interrupts.txt
MAINTAINERS
Makefile
arch/arc/include/asm/processor.h
arch/arc/include/asm/stacktrace.h [new file with mode: 0644]
arch/arc/kernel/process.c
arch/arc/kernel/stacktrace.c
arch/arc/kernel/unaligned.c
arch/arc/mm/fault.c
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/arm.c
arch/arm/kvm/trace.h
arch/arm/mach-msm/board-halibut.c
arch/arm/mach-msm/board-qsd8x50.c
arch/arm/mach-pxa/idp.c
arch/arm/mach-pxa/lpd270.c
arch/arm/mach-realview/core.c
arch/arm/mach-realview/realview_eb.c
arch/arm/mach-sa1100/neponset.c
arch/arm/mach-sa1100/pleb.c
arch/mips/kvm/tlb.c
arch/mips/kvm/trace.h
arch/powerpc/include/asm/iommu.h
arch/powerpc/include/asm/irq_work.h [new file with mode: 0644]
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/smp.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/pseries/iommu.c
arch/x86/Kconfig
arch/x86/include/asm/xsave.h
arch/x86/kernel/entry_64.S
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/pci/acpi.c
drivers/acpi/resource.c
drivers/acpi/video.c
drivers/base/power/domain.c
drivers/base/power/wakeup.c
drivers/bluetooth/btusb.c
drivers/clk/at91/pmc.c
drivers/clk/at91/pmc.h
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpuidle/cpuidle.c
drivers/dma-buf/fence.c
drivers/dma-buf/reservation.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/gpu/drm/i915/intel_display.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/net/Kconfig
drivers/net/appletalk/Kconfig
drivers/net/dsa/bcm_sf2.h
drivers/net/ethernet/8390/axnet_cs.c
drivers/net/ethernet/8390/pcnet_cs.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/macvtap.c
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/phy.c
drivers/net/team/team.c
drivers/net/usb/Kconfig
drivers/net/usb/asix_devices.c
drivers/net/usb/hso.c
drivers/net/usb/plusb.c
drivers/net/wan/cosa.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/xen-netback/netback.c
drivers/pci/host/pci-versatile.c
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-at91sam9.c
drivers/tty/serial/atmel_serial.c
drivers/vhost/net.c
drivers/watchdog/at91sam9_wdt.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/locks.c
fs/nfs/client.c
fs/nfs/delegation.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs3proc.c
fs/nfs/nfs3xdr.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.h
fs/nfs/nfs4state.c
fs/nfs/proc.c
fs/nfs/write.c
fs/nfsd/nfs4state.c
include/linux/cpuidle.h
include/linux/interrupt.h
include/linux/irqdesc.h
include/linux/mlx4/qp.h
include/linux/netdevice.h
include/linux/nfs_fs.h
include/linux/rhashtable.h
include/net/caif/cfpkt.h
include/uapi/linux/tc_act/Kbuild
kernel/irq/manage.c
kernel/irq/pm.c
kernel/sched/idle.c
lib/rhashtable.c
lib/test_rhashtable.c
net/bridge/br.c
net/caif/cffrml.c
net/caif/cfpkt_skbuff.c
net/compat.c
net/core/dev.c
net/core/ethtool.c
net/core/gen_stats.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/decnet/dn_route.c
net/hsr/hsr_device.c
net/hsr/hsr_main.c
net/hsr/hsr_slave.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/tcp_input.c
net/ipv6/addrconf.c
net/ipv6/ip6_output.c
net/irda/ircomm/ircomm_tty.c
net/irda/irnet/irnet_ppp.c
net/mac80211/chan.c
net/mac80211/rc80211_minstrel.c
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nft_compat.c
net/netfilter/nft_hash.c
net/netfilter/xt_recent.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/openvswitch/datapath.c
net/openvswitch/flow_netlink.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/rxrpc/ar-ack.c
net/sched/ematch.c
net/sunrpc/auth_gss/gss_rpc_upcall.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/tipc/socket.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/reg.c
sound/drivers/opl3/opl3_midi.c
sound/firewire/dice/dice-interface.h
sound/firewire/dice/dice-proc.c
sound/firewire/oxfw/oxfw-stream.c
sound/isa/msnd/msnd_pinnacle_mixer.c
sound/pci/hda/patch_realtek.c
sound/soc/atmel/sam9g20_wm8731.c
sound/soc/cirrus/Kconfig
sound/soc/codecs/Kconfig
sound/soc/codecs/max98357a.c
sound/soc/codecs/rt5670.c
sound/soc/codecs/rt5677.c
sound/soc/codecs/sta32x.c
sound/soc/fsl/fsl_ssi.c
sound/soc/generic/simple-card.c
sound/soc/intel/sst-atom-controls.h
sound/soc/intel/sst/sst.c
sound/soc/omap/omap-hdmi-audio.c
sound/soc/omap/omap-mcbsp.c
sound/soc/omap/omap-pcm.c
sound/soc/samsung/Kconfig
sound/soc/sh/rcar/core.c
sound/usb/line6/playback.c

index 33df3932168e1b8941a5de1565b1cf02f96fb551..8db32384a4866e56094aa96e86ac8765ccdf519c 100644 (file)
@@ -27,6 +27,8 @@ property is used.
 - amd,serdes-cdr-rate: CDR rate speed selection
 - amd,serdes-pq-skew: PQ (data sampling) skew
 - amd,serdes-tx-amp: TX amplitude boost
+- amd,serdes-dfe-tap-config: DFE taps available to run
+- amd,serdes-dfe-tap-enable: DFE taps to enable
 
 Example:
        xgbe_phy@e1240800 {
@@ -41,4 +43,6 @@ Example:
                amd,serdes-cdr-rate = <2>, <2>, <7>;
                amd,serdes-pq-skew = <10>, <10>, <30>;
                amd,serdes-tx-amp = <15>, <15>, <10>;
+               amd,serdes-dfe-tap-config = <3>, <3>, <1>;
+               amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
        };
index 2f9c5a5fcb25ff2d91aa196123952b01d0908e34..8afb29a8604a552c9dc1b1794466bf20933dca35 100644 (file)
@@ -40,8 +40,10 @@ but also to IPIs and to some other special-purpose interrupts.
 
 The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when
 requesting a special-purpose interrupt.  It causes suspend_device_irqs() to
-leave the corresponding IRQ enabled so as to allow the interrupt to work all
-the time as expected.
+leave the corresponding IRQ enabled so as to allow the interrupt to work as
+expected during the suspend-resume cycle, but does not guarantee that the
+interrupt will wake the system from a suspended state -- for such cases it is
+necessary to use enable_irq_wake().
 
 Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one
 user of it.  Thus, if the IRQ is shared, all of the interrupt handlers installed
@@ -110,8 +112,9 @@ any special interrupt handling logic for it to work.
 IRQF_NO_SUSPEND and enable_irq_wake()
 -------------------------------------
 
-There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND
-flag on the same IRQ.
+There are very few valid reasons to use both enable_irq_wake() and the
+IRQF_NO_SUSPEND flag on the same IRQ, and it is never valid to use both for the
+same device.
 
 First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND
 interrupts (interrupt handlers are invoked after suspend_device_irqs()) are
@@ -120,4 +123,13 @@ handlers are not invoked after suspend_device_irqs()).
 
 Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not
 to individual interrupt handlers, so sharing an IRQ between a system wakeup
-interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense.
+interrupt source and an IRQF_NO_SUSPEND interrupt source does not generally
+make sense.
+
+In rare cases an IRQ can be shared between a wakeup device driver and an
+IRQF_NO_SUSPEND user. In order for this to be safe, the wakeup device driver
+must be able to discern spurious IRQs from genuine wakeup events (signalling
+the latter to the core with pm_system_wakeup()), must use enable_irq_wake() to
+ensure that the IRQ will function as a wakeup source, and must request the IRQ
+with IRQF_COND_SUSPEND to tell the core that it meets these requirements. If
+these requirements are not met, it is not valid to use IRQF_COND_SUSPEND.
index 76cc5346ef9cb9c66d66aa43066e6e974a5d41f5..6239a305dff0d9f871713651babb9e7398450365 100644 (file)
@@ -2065,7 +2065,7 @@ F:        include/net/bluetooth/
 BONDING DRIVER
 M:     Jay Vosburgh <j.vosburgh@gmail.com>
 M:     Veaceslav Falico <vfalico@gmail.com>
-M:     Andy Gospodarek <andy@greyhouse.net>
+M:     Andy Gospodarek <gospo@cumulusnetworks.com>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
index 9fab639727c78e5370538afcd6980f167af6fdf6..e6a9b1b94656b6e70cd2735cc1fcdaea32789cf4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index 4e547296831d62ea673239dcb224c69ee02cd782..52312cb5dbe21490b48e21343ab8f82b7eecfc0e 100644 (file)
@@ -47,9 +47,6 @@ struct thread_struct {
 /* Forward declaration, a strange C thing */
 struct task_struct;
 
-/* Return saved PC of a blocked thread  */
-unsigned long thread_saved_pc(struct task_struct *t);
-
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
 
@@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t);
 #define release_segments(mm)        do { } while (0)
 
 #define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
 /*
  * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
  * Look in process.c for details of kernel stack layout
  */
-#define KSTK_ESP(tsk)   (tsk->thread.ksp)
+#define TSK_K_ESP(tsk)         (tsk->thread.ksp)
 
-#define KSTK_REG(tsk, off)     (*((unsigned int *)(KSTK_ESP(tsk) + \
+#define TSK_K_REG(tsk, off)    (*((unsigned int *)(TSK_K_ESP(tsk) + \
                                        sizeof(struct callee_regs) + off)))
 
-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
-#define KSTK_FP(tsk)    KSTK_REG(tsk, 0)
+#define TSK_K_BLINK(tsk)       TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk)          TSK_K_REG(tsk, 0)
+
+#define thread_saved_pc(tsk)   TSK_K_BLINK(tsk)
 
 extern void start_thread(struct pt_regs * regs, unsigned long pc,
                         unsigned long usp);
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644 (file)
index 0000000..b29b606
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk:               NULL for current task, specific task otherwise
+ * @regs:              pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ *                     If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ *                     use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn:       Callback invoked for each frame unwound
+ *                     Returns 0 to continue unwinding, -1 to stop
+ * @arg:               Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ *  - synchronous unwinding (e.g. dump_stack): @tsk  NULL, @regs  NULL
+ *  - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs  NULL
+ *  - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+       struct task_struct *tsk, struct pt_regs *regs,
+       int (*consumer_fn) (unsigned int, void *),
+       void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
index fdd89715d2d3783f8a1302513c10a15d138ded91..98c00a2d4dd9a57f1c503ac2ebb6d63a3f1a76b4 100644 (file)
@@ -192,29 +192,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
        return 0;
 }
 
-/*
- * API: expected by schedular Code: If thread is sleeping where is that.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- * So we hard code that anyways.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-       struct pt_regs *regs = task_pt_regs(t);
-       unsigned long blink = 0;
-
-       /*
-        * If the thread being queried for in not itself calling this, then it
-        * implies it is not executing, which in turn implies it is sleeping,
-        * which in turn implies it got switched OUT by the schedular.
-        * In that case, it's kernel mode blink can reliably retrieved as per
-        * the picture above (right above pt_regs).
-        */
-       if (t != current && t->state != TASK_RUNNING)
-               blink = *((unsigned int *)regs - 1);
-
-       return blink;
-}
-
 int elf_check_arch(const struct elf32_hdr *x)
 {
        unsigned int eflags;
index 9ce47cfe23037fa12f463a350819731422aadd9b..92320d6f737cf5149d0968f5da3cd73a47af9848 100644 (file)
@@ -43,6 +43,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
                                   struct pt_regs *regs,
                                   struct unwind_frame_info *frame_info)
 {
+       /*
+        * synchronous unwinding (e.g. dump_stack)
+        *  - uses current values of SP and friends
+        */
        if (tsk == NULL && regs == NULL) {
                unsigned long fp, sp, blink, ret;
                frame_info->task = current;
@@ -61,12 +65,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
                frame_info->regs.r63 = ret;
                frame_info->call_frame = 0;
        } else if (regs == NULL) {
+               /*
+                * Asynchronous unwinding of sleeping task
+                *  - Gets SP etc from task's pt_regs (saved bottom of kernel
+                *    mode stack of task)
+                */
 
                frame_info->task = tsk;
 
-               frame_info->regs.r27 = KSTK_FP(tsk);
-               frame_info->regs.r28 = KSTK_ESP(tsk);
-               frame_info->regs.r31 = KSTK_BLINK(tsk);
+               frame_info->regs.r27 = TSK_K_FP(tsk);
+               frame_info->regs.r28 = TSK_K_ESP(tsk);
+               frame_info->regs.r31 = TSK_K_BLINK(tsk);
                frame_info->regs.r63 = (unsigned int)__switch_to;
 
                /* In the prologue of __switch_to, first FP is saved on stack
@@ -83,6 +92,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
                frame_info->call_frame = 0;
 
        } else {
+               /*
+                * Asynchronous unwinding of intr/exception
+                *  - Just uses the pt_regs passed
+                */
                frame_info->task = tsk;
 
                frame_info->regs.r27 = regs->fp;
@@ -95,7 +108,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
 
 #endif
 
-static noinline unsigned int
+notrace noinline unsigned int
 arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
                int (*consumer_fn) (unsigned int, void *), void *arg)
 {
index 7ff5b5c183bb026716295c13f7b123de1d67a96f..74db59b6f39269f072ce606700375ae14ab9ec8c 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/perf_event.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
 #include <asm/disasm.h>
@@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
                }
        }
 
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
        return 0;
 
 fault:
index 563cb27e37f55f3f99badc9b99e64aca6ee397b7..6a2e006cbcce1f1cd69866e0f0f9f94463d73dcb 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
 #include <linux/kdebug.h>
+#include <linux/perf_event.h>
 #include <asm/pgalloc.h>
 #include <asm/mmu.h>
 
@@ -139,13 +140,20 @@ good_area:
                        return;
        }
 
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
        if (likely(!(fault & VM_FAULT_ERROR))) {
                if (flags & FAULT_FLAG_ALLOW_RETRY) {
                        /* To avoid updating stats twice for retry case */
-                       if (fault & VM_FAULT_MAJOR)
+                       if (fault & VM_FAULT_MAJOR) {
                                tsk->maj_flt++;
-                       else
+                               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+                                             regs, address);
+                       } else {
                                tsk->min_flt++;
+                               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+                                             regs, address);
+                       }
 
                        if (fault & VM_FAULT_RETRY) {
                                flags &= ~FAULT_FLAG_ALLOW_RETRY;
index 37ca2a4c6f0944598cacb0fbdab33c716258ea4e..bf0fe99e8ca927e8b3894dabaf1e1fca9f079c3d 100644 (file)
@@ -207,7 +207,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
 
        bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
 
-       VM_BUG_ON(size & PAGE_MASK);
+       VM_BUG_ON(size & ~PAGE_MASK);
 
        if (!need_flush && !icache_is_pipt())
                goto vipt_cache;
index 07e7eb1d7ab63b0417f8a54203620988b72228c4..5560f74f9eeef1e3e4d2c9c39fc672e539eee93f 100644 (file)
@@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
                vcpu->mode = OUTSIDE_GUEST_MODE;
                kvm_guest_exit();
-               trace_kvm_exit(*vcpu_pc(vcpu));
+               trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
                /*
                 * We may have taken a host interrupt in HYP mode (ie
                 * while executing the guest). This interrupt is still
index 881874b1a036ce117e01c2c6e124b28fb2506b89..6817664b46b80419047066686a47a8bc7953ebeb 100644 (file)
@@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry,
 );
 
 TRACE_EVENT(kvm_exit,
-       TP_PROTO(unsigned long vcpu_pc),
-       TP_ARGS(vcpu_pc),
+       TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc),
+       TP_ARGS(exit_reason, vcpu_pc),
 
        TP_STRUCT__entry(
+               __field(        unsigned int,   exit_reason     )
                __field(        unsigned long,  vcpu_pc         )
        ),
 
        TP_fast_assign(
+               __entry->exit_reason            = exit_reason;
                __entry->vcpu_pc                = vcpu_pc;
        ),
 
-       TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+       TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx",
+                 __entry->exit_reason,
+                 __entry->vcpu_pc)
 );
 
 TRACE_EVENT(kvm_guest_fault,
index 61bfe584a9d7fad4a7204d3ddb2a0a42afe3a23b..fc832040c6e979f139e272c73a47d167a65d8df5 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/input.h>
 #include <linux/io.h>
 #include <linux/delay.h>
+#include <linux/smc91x.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = {
        [1] = {
                .start  = MSM_GPIO_TO_INT(49),
                .end    = MSM_GPIO_TO_INT(49),
-               .flags  = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
        },
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static struct platform_device *devices[] __initdata = {
index 4c748616ef47eb9de63d751072e8d779fc3ffe44..10016a3bc69826351830ea53f822c7a8e3c4f033 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/usb/msm_hsusb.h>
 #include <linux/err.h>
 #include <linux/clkdev.h>
+#include <linux/smc91x.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = {
                .flags = IORESOURCE_MEM,
        },
        [1] = {
-               .flags = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
        },
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static int __init msm_init_smc91x(void)
index 343c4e3a7c5d1aceb136a196dcae946a7c8f5a69..7d8eab857a930b34ce9f02d4f48b83b05eaee52c 100644 (file)
@@ -81,11 +81,16 @@ static struct resource smc91x_resources[] = {
        }
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static void idp_backlight_power(int on)
index ad777b353bd5234797d93031ab6815747b65e363..28da319d389f2036e8e551b629054db546daecbe 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/pwm_backlight.h>
+#include <linux/smc91x.h>
 
 #include <asm/types.h>
 #include <asm/setup.h>
@@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = {
        [1] = {
                .start  = LPD270_ETHERNET_IRQ,
                .end    = LPD270_ETHERNET_IRQ,
-               .flags  = IORESOURCE_IRQ,
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
        },
 };
 
+struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT;
+};
+
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev.platform_data = &smc91x_platdata,
 };
 
 static struct resource lpd270_flash_resources[] = {
index 850e506926dfb8adbc137f652353d8d84cc40197..c309593abdb223e9c9499c0469f080344ea40fd7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/platform_data/video-clcd-versatile.h>
 #include <linux/io.h>
 #include <linux/smsc911x.h>
+#include <linux/smc91x.h>
 #include <linux/ata_platform.h>
 #include <linux/amba/mmci.h>
 #include <linux/gfp.h>
@@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = {
        .phy_interface  = PHY_INTERFACE_MODE_MII,
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
+};
+
 static struct platform_device realview_eth_device = {
        .name           = "smsc911x",
        .id             = 0,
@@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res)
        realview_eth_device.resource = res;
        if (strcmp(realview_eth_device.name, "smsc911x") == 0)
                realview_eth_device.dev.platform_data = &smsc911x_config;
+       else
+               realview_eth_device.dev.platform_data = &smc91x_platdata;
 
        return platform_device_register(&realview_eth_device);
 }
index 64c88d657f9efc6360600380910248a51fd3c73b..b3869cbbcc6858c5ddb6b8ab9808773cde4dfae6 100644 (file)
@@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = {
        [1] = {
                .start          = IRQ_EB_ETH,
                .end            = IRQ_EB_ETH,
-               .flags          = IORESOURCE_IRQ,
+               .flags          = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
        },
 };
 
index 169262e3040dd77b25ae268bc39880929805f63c..7b0cd3172354dfcfb4b8d710c15949e6dae356ac 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/pm.h>
 #include <linux/serial_core.h>
 #include <linux/slab.h>
+#include <linux/smc91x.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev)
                        0x02000000, "smc91x-attrib"),
                { .flags = IORESOURCE_IRQ },
        };
+       struct smc91x_platdata smc91x_platdata = {
+               .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
+       };
        struct platform_device_info smc91x_devinfo = {
                .parent = &dev->dev,
                .name = "smc91x",
                .id = 0,
                .res = smc91x_resources,
                .num_res = ARRAY_SIZE(smc91x_resources),
+               .data = &smc91c_platdata,
+               .size_data = sizeof(smc91c_platdata),
        };
        int ret, irq;
 
index 091261878effde2e56d1b4f81a157f7d696de765..696fd0fe48062590d69a1d9b08e7d85a9f21b143 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/mtd/partitions.h>
+#include <linux/smc91x.h>
 
 #include <mach/hardware.h>
 #include <asm/setup.h>
@@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = {
 #endif
 };
 
+static struct smc91x_platdata smc91x_platdata = {
+       .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+};
 
 static struct platform_device smc91x_device = {
        .name           = "smc91x",
        .id             = 0,
        .num_resources  = ARRAY_SIZE(smc91x_resources),
        .resource       = smc91x_resources,
+       .dev = {
+               .platform_data  = &smc91c_platdata,
+       },
 };
 
 static struct platform_device *devices[] __initdata = {
index bbcd82242059d36f91f2f4a7dfbd8d22fb479ea6..b6beb0e07b1b3b535f7625d61100e6c0087de00d 100644 (file)
@@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
        if (idx > current_cpu_data.tlbsize) {
                kvm_err("%s: Invalid Index: %d\n", __func__, idx);
                kvm_mips_dump_host_tlbs();
+               local_irq_restore(flags);
                return -1;
        }
 
index c1388d40663b0143501bacae8af8f2793e54c815..bd6437f67dc03b01c3a76e57a53a811a61f0ebe7 100644 (file)
@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
            TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
            TP_ARGS(vcpu, reason),
            TP_STRUCT__entry(
-                       __field(struct kvm_vcpu *, vcpu)
+                       __field(unsigned long, pc)
                        __field(unsigned int, reason)
            ),
 
            TP_fast_assign(
-                       __entry->vcpu = vcpu;
+                       __entry->pc = vcpu->arch.pc;
                        __entry->reason = reason;
            ),
 
            TP_printk("[%s]PC: 0x%08lx",
                      kvm_mips_exit_types_str[__entry->reason],
-                     __entry->vcpu->arch.pc)
+                     __entry->pc)
 );
 
 #endif /* _TRACE_KVM_H */
index 9cfa3706a1b8750942d7e41a91ef44996053a2ad..f1ea5972f6eccddceb7b776961ff67dcf2cb14e6 100644 (file)
@@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl,
                                 int pci_domain_number, unsigned long pe_num);
 extern int iommu_add_device(struct device *dev);
 extern void iommu_del_device(struct device *dev);
+extern int __init tce_iommu_bus_notifier_init(void);
 #else
 static inline void iommu_register_group(struct iommu_table *tbl,
                                        int pci_domain_number,
@@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev)
 static inline void iommu_del_device(struct device *dev)
 {
 }
+
+static inline int __init tce_iommu_bus_notifier_init(void)
+{
+        return 0;
+}
 #endif /* !CONFIG_IOMMU_API */
 
 static inline void set_iommu_table_base_and_group(struct device *dev,
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
new file mode 100644 (file)
index 0000000..744fd54
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _ASM_POWERPC_IRQ_WORK_H
+#define _ASM_POWERPC_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+       return true;
+}
+
+#endif /* _ASM_POWERPC_IRQ_WORK_H */
index 5d3968c4d79973a4645f9d7b4069056311fd42cc..b054f33ab1fbcdad3bff7fda332c9e55dcda1d2f 100644 (file)
@@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(iommu_del_device);
 
+static int tce_iommu_bus_notifier(struct notifier_block *nb,
+                unsigned long action, void *data)
+{
+        struct device *dev = data;
+
+        switch (action) {
+        case BUS_NOTIFY_ADD_DEVICE:
+                return iommu_add_device(dev);
+        case BUS_NOTIFY_DEL_DEVICE:
+                if (dev->iommu_group)
+                        iommu_del_device(dev);
+                return 0;
+        default:
+                return 0;
+        }
+}
+
+static struct notifier_block tce_iommu_bus_nb = {
+        .notifier_call = tce_iommu_bus_notifier,
+};
+
+int __init tce_iommu_bus_notifier_init(void)
+{
+        bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
+        return 0;
+}
 #endif /* CONFIG_IOMMU_API */
index 6e19afa35a153d2736af94bc845008d724fe735f..ec9ec2058d2d3f3ec6db2dfbe25b35ab1f4cfb0b 100644 (file)
@@ -541,8 +541,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        if (smp_ops->give_timebase)
                smp_ops->give_timebase();
 
-       /* Wait until cpu puts itself in the online map */
-       while (!cpu_online(cpu))
+       /* Wait until cpu puts itself in the online & active maps */
+       while (!cpu_online(cpu) || !cpu_active(cpu))
                cpu_relax();
 
        return 0;
index e69142f4af089cf986dd84f3fb2731ae705cf1f2..54323d6b5166218fa4e9c548a0511c7ec64884dd 100644 (file)
@@ -836,30 +836,4 @@ void __init pnv_pci_init(void)
 #endif
 }
 
-static int tce_iommu_bus_notifier(struct notifier_block *nb,
-               unsigned long action, void *data)
-{
-       struct device *dev = data;
-
-       switch (action) {
-       case BUS_NOTIFY_ADD_DEVICE:
-               return iommu_add_device(dev);
-       case BUS_NOTIFY_DEL_DEVICE:
-               if (dev->iommu_group)
-                       iommu_del_device(dev);
-               return 0;
-       default:
-               return 0;
-       }
-}
-
-static struct notifier_block tce_iommu_bus_nb = {
-       .notifier_call = tce_iommu_bus_notifier,
-};
-
-static int __init tce_iommu_bus_notifier_init(void)
-{
-       bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
-       return 0;
-}
 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
index 1d3d52dc3ff31ed6ce0ab9b554c45c1233d0f039..7803a19adb31822fbe1c679160ff7655ec5c5411 100644 (file)
@@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str)
 }
 
 __setup("multitce=", disable_multitce);
+
+machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
index c2fb8a87dccb2990a794bb8960bfdad85eb9a390..b7d31ca5518744983c77bc8339f30756621dfea0 100644 (file)
@@ -499,6 +499,7 @@ config X86_INTEL_QUARK
        depends on X86_IO_APIC
        select IOSF_MBI
        select INTEL_IMR
+       select COMMON_CLK
        ---help---
          Select to include support for Quark X1000 SoC.
          Say Y here if you have a Quark based system such as the Arduino
index 5fa9770035dc935c0a90899f470e668ab889ec7f..c9a6d68b8d623b84d169f61c4680c194ea8d137a 100644 (file)
@@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
        if (boot_cpu_has(X86_FEATURE_XSAVES))
                asm volatile("1:"XSAVES"\n\t"
                        "2:\n\t"
-                       : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                            xstate_fault
+                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                        :   "memory");
        else
                asm volatile("1:"XSAVE"\n\t"
                        "2:\n\t"
-                       : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                            xstate_fault
+                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                        :   "memory");
-
-       asm volatile(xstate_fault
-                    : "0" (0)
-                    : "memory");
-
        return err;
 }
 
@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
        if (boot_cpu_has(X86_FEATURE_XSAVES))
                asm volatile("1:"XRSTORS"\n\t"
                        "2:\n\t"
-                       : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                            xstate_fault
+                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                        :   "memory");
        else
                asm volatile("1:"XRSTOR"\n\t"
                        "2:\n\t"
-                       : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                            xstate_fault
+                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                        :   "memory");
-
-       asm volatile(xstate_fault
-                    : "0" (0)
-                    : "memory");
-
        return err;
 }
 
@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
         */
        alternative_input_2(
                "1:"XSAVE,
-               "1:"XSAVEOPT,
+               XSAVEOPT,
                X86_FEATURE_XSAVEOPT,
-               "1:"XSAVES,
+               XSAVES,
                X86_FEATURE_XSAVES,
                [fx] "D" (fx), "a" (lmask), "d" (hmask) :
                "memory");
@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
         */
        alternative_input(
                "1: " XRSTOR,
-               "1: " XRSTORS,
+               XRSTORS,
                X86_FEATURE_XSAVES,
                "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
                : "memory");
index 10074ad9ebf85ed82e552f055baebfa6bb3169f2..1d74d161687c9f2a71f334b5530067356310af18 100644 (file)
@@ -269,11 +269,14 @@ ENTRY(ret_from_fork)
        testl $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
        jz   1f
 
-       testl $_TIF_IA32, TI_flags(%rcx)        # 32-bit compat task needs IRET
-       jnz  int_ret_from_sys_call
-
-       RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
-       jmp ret_from_sys_call                   # go to the SYSRET fastpath
+       /*
+        * By the time we get here, we have no idea whether our pt_regs,
+        * ti flags, and ti status came from the 64-bit SYSCALL fast path,
+        * the slow path, or one of the ia32entry paths.
+        * Use int_ret_from_sys_call to return, since it can safely handle
+        * all of the above.
+        */
+       jmp  int_ret_from_sys_call
 
 1:
        subq $REST_SKIP, %rsp   # leave space for volatiles
index e0b794a84c35cdd7ecc03bc6de6500b0ac237f57..106c01557f2b63706eca28e462a3b072b590f0c5 100644 (file)
@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
                        goto done;
                }
        }
-       ctxt->dst.orig_val = ctxt->dst.val;
+       /* Copy full 64-bit value for CMPXCHG8B.  */
+       ctxt->dst.orig_val64 = ctxt->dst.val64;
 
 special_insn:
 
index e55b5fc344eb911a7b4ed0c490d50a6767532d27..bd4e34de24c7a0860de0adf88d3b85fd94f8ddae 100644 (file)
@@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
                apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
        }
        apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
-       apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
+       apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0;
        apic->highest_isr_cache = -1;
        update_divide_count(apic);
        atomic_set(&apic->lapic_timer.pending, 0);
@@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
        update_divide_count(apic);
        start_apic_timer(apic);
        apic->irr_pending = true;
-       apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
+       apic->isr_count = kvm_x86_ops->hwapic_isr_update ?
                                1 : count_vectors(apic->regs + APIC_ISR);
        apic->highest_isr_cache = -1;
        if (kvm_x86_ops->hwapic_irr_update)
index d319e0c24758876178aeab46c65fe611cb02126e..cc618c882f900ad21cb4de57d94daa91a5f4ec4c 100644 (file)
@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
        return;
 }
 
-static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
-{
-       return;
-}
-
 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 {
        return;
@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = {
        .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
        .vm_has_apicv = svm_vm_has_apicv,
        .load_eoi_exitmap = svm_load_eoi_exitmap,
-       .hwapic_isr_update = svm_hwapic_isr_update,
        .sync_pir_to_irr = svm_sync_pir_to_irr,
 
        .set_tss_addr = svm_set_tss_addr,
index 14c1a18d206aeee0d59637162b0f1a58056c8941..f7b20b417a3a4296b446ee4d35ee5dfcdef86be4 100644 (file)
@@ -4367,6 +4367,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_SMP
+       if (vcpu->mode == IN_GUEST_MODE) {
+               apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+                               POSTED_INTR_VECTOR);
+               return true;
+       }
+#endif
+       return false;
+}
+
 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
                                                int vector)
 {
@@ -4375,9 +4387,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
        if (is_guest_mode(vcpu) &&
            vector == vmx->nested.posted_intr_nv) {
                /* the PIR and ON have been set by L1. */
-               if (vcpu->mode == IN_GUEST_MODE)
-                       apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
-                               POSTED_INTR_VECTOR);
+               kvm_vcpu_trigger_posted_interrupt(vcpu);
                /*
                 * If a posted intr is not recognized by hardware,
                 * we will accomplish it in the next vmentry.
@@ -4409,12 +4419,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
 
        r = pi_test_and_set_on(&vmx->pi_desc);
        kvm_make_request(KVM_REQ_EVENT, vcpu);
-#ifdef CONFIG_SMP
-       if (!r && (vcpu->mode == IN_GUEST_MODE))
-               apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
-                               POSTED_INTR_VECTOR);
-       else
-#endif
+       if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
                kvm_vcpu_kick(vcpu);
 }
 
index 6ac273832f284635ac1a66bf3f8551de379fa0f0..e4695985f9de85778db5e084b37eda5719d3a82a 100644 (file)
@@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info,
                                struct list_head *list)
 {
        int ret;
-       struct resource_entry *entry;
+       struct resource_entry *entry, *tmp;
 
        sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
        info->bridge = device;
@@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info,
                dev_dbg(&device->dev,
                        "no IO and memory resources present in _CRS\n");
        else
-               resource_list_for_each_entry(entry, list)
-                       entry->res->name = info->name;
+               resource_list_for_each_entry_safe(entry, tmp, list) {
+                       if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
+                           (entry->res->flags & IORESOURCE_DISABLED))
+                               resource_list_destroy_entry(entry);
+                       else
+                               entry->res->name = info->name;
+               }
 }
 
 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
index c723668e3e277def6f8d6309fe1af21b989951fb..5589a6e2a02346e3b2ce48656b3facea1abfc621 100644 (file)
@@ -42,8 +42,10 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
         * CHECKME: len might be required to check versus a minimum
         * length as well. 1 for io is fine, but for memory it does
         * not make any sense at all.
+        * Note: some BIOSes report incorrect length for ACPI address space
+        * descriptor, so remove check of 'reslen == len' to avoid regression.
         */
-       if (len && reslen && reslen == len && start <= end)
+       if (len && reslen && start <= end)
                return true;
 
        pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
index debd30917010a17697102bc84d1e468c69c94d17..26eb70c8f5184f878ae489c4130e1b382d7394a2 100644 (file)
@@ -2110,7 +2110,8 @@ static int __init intel_opregion_present(void)
 
 int acpi_video_register(void)
 {
-       int result = 0;
+       int ret;
+
        if (register_count) {
                /*
                 * if the function of acpi_video_register is already called,
@@ -2122,9 +2123,9 @@ int acpi_video_register(void)
        mutex_init(&video_list_lock);
        INIT_LIST_HEAD(&video_bus_head);
 
-       result = acpi_bus_register_driver(&acpi_video_bus);
-       if (result < 0)
-               return -ENODEV;
+       ret = acpi_bus_register_driver(&acpi_video_bus);
+       if (ret)
+               return ret;
 
        /*
         * When the acpi_video_bus is loaded successfully, increase
@@ -2176,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
 
 static int __init acpi_video_init(void)
 {
+       /*
+        * Let the module load even if ACPI is disabled (e.g. due to
+        * a broken BIOS) so that i915.ko can still be loaded on such
+        * old systems without an AcpiOpRegion.
+        *
+        * acpi_video_register() will report -ENODEV later as well due
+        * to acpi_disabled when i915.ko tries to register itself afterwards.
+        */
+       if (acpi_disabled)
+               return 0;
+
        dmi_check_system(video_dmi_table);
 
        if (intel_opregion_present())
index ba4abbe4693c3e29be764d66662295dd300d41c4..45937f88e77c88893f6f05430efcd2dd88449e9f 100644 (file)
@@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
 }
 
 static int pm_genpd_summary_one(struct seq_file *s,
-               struct generic_pm_domain *gpd)
+                               struct generic_pm_domain *genpd)
 {
        static const char * const status_lookup[] = {
                [GPD_STATE_ACTIVE] = "on",
@@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s,
        struct gpd_link *link;
        int ret;
 
-       ret = mutex_lock_interruptible(&gpd->lock);
+       ret = mutex_lock_interruptible(&genpd->lock);
        if (ret)
                return -ERESTARTSYS;
 
-       if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
+       if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
                goto exit;
-       seq_printf(s, "%-30s  %-15s  ", gpd->name, status_lookup[gpd->status]);
+       seq_printf(s, "%-30s  %-15s  ", genpd->name, status_lookup[genpd->status]);
 
        /*
         * Modifications on the list require holding locks on both
         * master and slave, so we are safe.
-        * Also gpd->name is immutable.
+        * Also genpd->name is immutable.
         */
-       list_for_each_entry(link, &gpd->master_links, master_node) {
+       list_for_each_entry(link, &genpd->master_links, master_node) {
                seq_printf(s, "%s", link->slave->name);
-               if (!list_is_last(&link->master_node, &gpd->master_links))
+               if (!list_is_last(&link->master_node, &genpd->master_links))
                        seq_puts(s, ", ");
        }
 
-       list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
+       list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
                kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
                if (kobj_path == NULL)
                        continue;
@@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s,
 
        seq_puts(s, "\n");
 exit:
-       mutex_unlock(&gpd->lock);
+       mutex_unlock(&genpd->lock);
 
        return 0;
 }
 
 static int pm_genpd_summary_show(struct seq_file *s, void *data)
 {
-       struct generic_pm_domain *gpd;
+       struct generic_pm_domain *genpd;
        int ret = 0;
 
        seq_puts(s, "    domain                      status         slaves\n");
@@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
        if (ret)
                return -ERESTARTSYS;
 
-       list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
-               ret = pm_genpd_summary_one(s, gpd);
+       list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+               ret = pm_genpd_summary_one(s, genpd);
                if (ret)
                        break;
        }
index c2744b30d5d92e9dde512e492cf9fdf44f21b5ef..aab7158d2afea87c5cee091eb2af3a1ae0a7222a 100644 (file)
@@ -730,6 +730,7 @@ void pm_system_wakeup(void)
        pm_abort_suspend = true;
        freeze_wake();
 }
+EXPORT_SYMBOL_GPL(pm_system_wakeup);
 
 void pm_wakeup_clear(void)
 {
index b876888811432a9bad46ab73a32ca40b04ed2ce4..8bfc4c2bba87b61f46dfbb4778463f393226f735 100644 (file)
@@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
 
        /* Intel Bluetooth devices */
+       { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
        { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
        { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
        { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
index f07c8152e5cc42aa660f5c83c6c6470ee2e68502..3f27d21fb7297e70494bf35743f027f8c8005d73 100644 (file)
@@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type)
        return 0;
 }
 
+static void pmc_irq_suspend(struct irq_data *d)
+{
+       struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+       pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
+       pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
+}
+
+static void pmc_irq_resume(struct irq_data *d)
+{
+       struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
+
+       pmc_write(pmc, AT91_PMC_IER, pmc->imr);
+}
+
 static struct irq_chip pmc_irq = {
        .name = "PMC",
        .irq_disable = pmc_irq_mask,
        .irq_mask = pmc_irq_mask,
        .irq_unmask = pmc_irq_unmask,
        .irq_set_type = pmc_irq_set_type,
+       .irq_suspend = pmc_irq_suspend,
+       .irq_resume = pmc_irq_resume,
 };
 
 static struct lock_class_key pmc_lock_class;
@@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
                goto out_free_pmc;
 
        pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
-       if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc))
+       if (request_irq(pmc->virq, pmc_irq_handler,
+                       IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
                goto out_remove_irqdomain;
 
        return pmc;
index 52d2041fa3f6354a4abfdc8f45ee147a75a370ef..69abb08cf146513b0307a4a78449b2e5da971282 100644 (file)
@@ -33,6 +33,7 @@ struct at91_pmc {
        spinlock_t lock;
        const struct at91_pmc_caps *caps;
        struct irq_domain *irqdomain;
+       u32 imr;
 };
 
 static inline void pmc_lock(struct at91_pmc *pmc)
index bee5df7794d33d1078116c8ac2f3618075230c8c..7cb4b766cf948d3f3e4b813325eda0aa0f0129aa 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/smp.h>
 #include <sysdev/fsl_soc.h>
 
+#include <asm/smp.h>   /* for get_hard_smp_processor_id() in UP configs */
+
 /**
  * struct cpu_data - per CPU data struct
  * @parent: the parent node of cpu clock
index 4d534582514e014b5fdb3fc5e0b9db7e52c7b306..080bd2dbde4ba5408504a451e9454b51497202e3 100644 (file)
@@ -44,6 +44,12 @@ void disable_cpuidle(void)
        off = 1;
 }
 
+bool cpuidle_not_available(struct cpuidle_driver *drv,
+                          struct cpuidle_device *dev)
+{
+       return off || !initialized || !drv || !dev || !dev->enabled;
+}
+
 /**
  * cpuidle_play_dead - cpu off-lining
  *
@@ -66,14 +72,8 @@ int cpuidle_play_dead(void)
        return -ENODEV;
 }
 
-/**
- * cpuidle_find_deepest_state - Find deepest state meeting specific conditions.
- * @drv: cpuidle driver for the given CPU.
- * @dev: cpuidle device for the given CPU.
- * @freeze: Whether or not the state should be suitable for suspend-to-idle.
- */
-static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
-                                     struct cpuidle_device *dev, bool freeze)
+static int find_deepest_state(struct cpuidle_driver *drv,
+                             struct cpuidle_device *dev, bool freeze)
 {
        unsigned int latency_req = 0;
        int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
@@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
        return ret;
 }
 
+/**
+ * cpuidle_find_deepest_state - Find the deepest available idle state.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
+ */
+int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+                              struct cpuidle_device *dev)
+{
+       return find_deepest_state(drv, dev, false);
+}
+
 static void enter_freeze_proper(struct cpuidle_driver *drv,
                                struct cpuidle_device *dev, int index)
 {
@@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
 
 /**
  * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
  *
  * If there are states with the ->enter_freeze callback, find the deepest of
- * them and enter it with frozen tick.  Otherwise, find the deepest state
- * available and enter it normally.
+ * them and enter it with frozen tick.
  */
-void cpuidle_enter_freeze(void)
+int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
-       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
-       struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int index;
 
        /*
@@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void)
         * that interrupts won't be enabled when it exits and allows the tick to
         * be frozen safely.
         */
-       index = cpuidle_find_deepest_state(drv, dev, true);
-       if (index >= 0) {
-               enter_freeze_proper(drv, dev, index);
-               return;
-       }
-
-       /*
-        * It is not safe to freeze the tick, find the deepest state available
-        * at all and try to enter it normally.
-        */
-       index = cpuidle_find_deepest_state(drv, dev, false);
+       index = find_deepest_state(drv, dev, true);
        if (index >= 0)
-               cpuidle_enter(drv, dev, index);
-       else
-               arch_cpu_idle();
+               enter_freeze_proper(drv, dev, index);
 
-       /* Interrupts are enabled again here. */
-       local_irq_disable();
+       return index;
 }
 
 /**
@@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
  */
 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
-       if (off || !initialized)
-               return -ENODEV;
-
-       if (!drv || !dev || !dev->enabled)
-               return -EBUSY;
-
        return cpuidle_curr_governor->select(drv, dev);
 }
 
index e5541117b3e915de7f08ecc38a58ec9f72bf2c12..50ef8bd8708ba69d93808510c9a8be473d377d7c 100644 (file)
@@ -159,6 +159,9 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
        if (WARN_ON(timeout < 0))
                return -EINVAL;
 
+       if (timeout == 0)
+               return fence_is_signaled(fence);
+
        trace_fence_wait_start(fence);
        ret = fence->ops->wait(fence, intr, timeout);
        trace_fence_wait_end(fence);
index 3c97c8fa8d02687290e7340f82666f3466fbda2e..39920d77f288d7802c054130acf72caa8f40465d 100644 (file)
@@ -327,6 +327,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
        unsigned seq, shared_count, i = 0;
        long ret = timeout;
 
+       if (!timeout)
+               return reservation_object_test_signaled_rcu(obj, wait_all);
+
 retry:
        fence = NULL;
        shared_count = 0;
@@ -402,8 +405,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
        int ret = 1;
 
        if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-               int ret;
-
                fence = fence_get_rcu(lfence);
                if (!fence)
                        return -1;
index c5f7b4e9eb6c6e490454958473893820bccaeb10..69fac068669fde566f41013cefbdf48db023466c 100644 (file)
@@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
  *     We have to be cautious here. We have seen BIOSes with DMI pointers
  *     pointing to completely the wrong place for example
  */
-static void dmi_table(u8 *buf, int len, int num,
+static void dmi_table(u8 *buf, u32 len, int num,
                      void (*decode)(const struct dmi_header *, void *),
                      void *private_data)
 {
@@ -92,12 +92,6 @@ static void dmi_table(u8 *buf, int len, int num,
        while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
                const struct dmi_header *dm = (const struct dmi_header *)data;
 
-               /*
-                * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
-                */
-               if (dm->type == DMI_ENTRY_END_OF_TABLE)
-                       break;
-
                /*
                 *  We want to know the total length (formatted area and
                 *  strings) before decoding to make sure we won't run off the
@@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num,
                        data++;
                if (data - buf < len - 1)
                        decode(dm, private_data);
+
+               /*
+                * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+                */
+               if (dm->type == DMI_ENTRY_END_OF_TABLE)
+                       break;
+
                data += 2;
                i++;
        }
 }
 
 static phys_addr_t dmi_base;
-static u16 dmi_len;
+static u32 dmi_len;
 static u16 dmi_num;
 
 static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
index 2fe195002021d079ec36515a7ebba4385c1f3600..f07d4a67fa76b3a3cb542e31a24a093c6f7aff97 100644 (file)
@@ -179,12 +179,12 @@ again:
                start = desc->phys_addr;
                end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
 
-               if ((start + size) > end || (start + size) > max)
-                       continue;
-
-               if (end - size > max)
+               if (end > max)
                        end = max;
 
+               if ((start + size) > end)
+                       continue;
+
                if (round_down(end - size, align) < start)
                        continue;
 
index 3117679299a65a12989b78ca34091f073ce833e3..e730789b53b7b0c141bada8400b398f67149275b 100644 (file)
@@ -8713,6 +8713,7 @@ retry:
                        old->release_fb->funcs->destroy(old->release_fb);
                goto fail;
        }
+       crtc->primary->crtc = crtc;
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
index 3c92780bda09e17843f3cea5c7c35161e103c25c..ff48da61c94c849bf06cbb9ab9cb149515dcd626 100644 (file)
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
                enable_hwirq(hc);
                spin_unlock_irqrestore(&hc->lock, flags);
                /* Timeout 80ms */
-               current->state = TASK_UNINTERRUPTIBLE;
+               set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout((80 * HZ) / 1000);
                printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
                       hc->irq, hc->irqcnt);
index 84673ebcf428846fadf26ae881c39172fd45d612..df51d6025a9017413500046edb78401abe0dfdb3 100644 (file)
@@ -157,7 +157,7 @@ config IPVLAN
       making it transparent to the connected L2 switch.
 
       Ipvlan devices can be added using the "ip" command from the
-      iproute2 package starting with the iproute2-X.Y.ZZ release:
+      iproute2 package starting with the iproute2-3.19 release:
 
       "ip link add link <main-dev> [ NAME ] type ipvlan"
 
index 4ce6ca5f3d365a48ab554c7477b157437fff9357..dc6b78e5342f937e6b3b2d498d4694bc0d0fe6c0 100644 (file)
@@ -40,7 +40,7 @@ config DEV_APPLETALK
 
 config LTPC
        tristate "Apple/Farallon LocalTalk PC support"
-       depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
+       depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
        help
          This allows you to use the AppleTalk PC card to connect to LocalTalk
          networks. The card is also known as the Farallon PhoneNet PC card.
index ee9f650d50264bb74771783b65e860b2aabb3d46..7b7053d3c5fad20e07a75eb5ea987743b51484ba 100644 (file)
@@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off)  \
 {                                                                      \
        u32 indir, dir;                                                 \
        spin_lock(&priv->indir_lock);                                   \
-       indir = reg_readl(priv, REG_DIR_DATA_READ);                     \
        dir = __raw_readl(priv->name + off);                            \
+       indir = reg_readl(priv, REG_DIR_DATA_READ);                     \
        spin_unlock(&priv->indir_lock);                                 \
        return (u64)indir << 32 | dir;                                  \
 }                                                                      \
index 7769c05543f17fcc8432cac6145ebdd70c4fc5da..ec6eac1f8c95ab79d33209e272a31f71484b0f62 100644 (file)
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
     link->open++;
 
     info->link_status = 0x00;
-    init_timer(&info->watchdog);
-    info->watchdog.function = ei_watchdog;
-    info->watchdog.data = (u_long)dev;
-    info->watchdog.expires = jiffies + HZ;
-    add_timer(&info->watchdog);
+    setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+    mod_timer(&info->watchdog, jiffies + HZ);
 
     return ax_open(dev);
 } /* axnet_open */
index 9fb7b9d4fd6c6595f7642d859678bc3097998750..2777289a26c0419f855926ef028942074ca62a2f 100644 (file)
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
 
     info->phy_id = info->eth_phy;
     info->link_status = 0x00;
-    init_timer(&info->watchdog);
-    info->watchdog.function = ei_watchdog;
-    info->watchdog.data = (u_long)dev;
-    info->watchdog.expires = jiffies + HZ;
-    add_timer(&info->watchdog);
+    setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+    mod_timer(&info->watchdog, jiffies + HZ);
 
     return ei_open(dev);
 } /* pcnet_open */
index 760c72c6e2acd50ba8472e4b4dd77170c2c381d6..6725dc00750bd6da367396bceb33adaac10842d0 100644 (file)
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
        u16 pktlength;
        u16 pktstatus;
 
-       while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+       while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+              (count < limit))  {
                pktstatus = rxstatus >> 16;
                pktlength = rxstatus & 0xffff;
 
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
        struct altera_tse_private *priv =
                        container_of(napi, struct altera_tse_private, napi);
        int rxcomplete = 0;
-       int txcomplete = 0;
        unsigned long int flags;
 
-       txcomplete = tse_tx_complete(priv);
+       tse_tx_complete(priv);
 
        rxcomplete = tse_rx(priv, budget);
 
-       if (rxcomplete >= budget || txcomplete > 0)
-               return rxcomplete;
+       if (rxcomplete < budget) {
 
-       napi_gro_flush(napi, false);
-       __napi_complete(napi);
+               napi_gro_flush(napi, false);
+               __napi_complete(napi);
 
-       netdev_dbg(priv->dev,
-                  "NAPI Complete, did %d packets with budget %d\n",
-                  txcomplete+rxcomplete, budget);
+               netdev_dbg(priv->dev,
+                          "NAPI Complete, did %d packets with budget %d\n",
+                          rxcomplete, budget);
 
-       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
-       priv->dmaops->enable_rxirq(priv);
-       priv->dmaops->enable_txirq(priv);
-       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
-       return rxcomplete + txcomplete;
+               spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+               priv->dmaops->enable_rxirq(priv);
+               priv->dmaops->enable_txirq(priv);
+               spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+       }
+       return rxcomplete;
 }
 
 /* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct altera_tse_private *priv;
-       unsigned long int flags;
 
        if (unlikely(!dev)) {
                pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
        }
        priv = netdev_priv(dev);
 
-       /* turn off desc irqs and enable napi rx */
-       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+       spin_lock(&priv->rxdma_irq_lock);
+       /* reset IRQs */
+       priv->dmaops->clear_rxirq(priv);
+       priv->dmaops->clear_txirq(priv);
+       spin_unlock(&priv->rxdma_irq_lock);
 
        if (likely(napi_schedule_prep(&priv->napi))) {
+               spin_lock(&priv->rxdma_irq_lock);
                priv->dmaops->disable_rxirq(priv);
                priv->dmaops->disable_txirq(priv);
+               spin_unlock(&priv->rxdma_irq_lock);
                __napi_schedule(&priv->napi);
        }
 
-       /* reset IRQs */
-       priv->dmaops->clear_rxirq(priv);
-       priv->dmaops->clear_txirq(priv);
-
-       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 
        return IRQ_HANDLED;
 }
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        }
 
        if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
-                                &priv->rx_fifo_depth)) {
+                                &priv->tx_fifo_depth)) {
                dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
                ret = -ENXIO;
                goto err_free_netdev;
index b93d4404d975571f0f4033f06f4de15b576156d3..885b02b5be07f6732fc0540684cb7875aeec1140 100644 (file)
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
        }
 }
 
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_channel *channel;
+       struct net_device *netdev = pdata->netdev;
+       unsigned int i;
+       int ret;
+
+       ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+                              netdev->name, pdata);
+       if (ret) {
+               netdev_alert(netdev, "error requesting irq %d\n",
+                            pdata->dev_irq);
+               return ret;
+       }
+
+       if (!pdata->per_channel_irq)
+               return 0;
+
+       channel = pdata->channel;
+       for (i = 0; i < pdata->channel_count; i++, channel++) {
+               snprintf(channel->dma_irq_name,
+                        sizeof(channel->dma_irq_name) - 1,
+                        "%s-TxRx-%u", netdev_name(netdev),
+                        channel->queue_index);
+
+               ret = devm_request_irq(pdata->dev, channel->dma_irq,
+                                      xgbe_dma_isr, 0,
+                                      channel->dma_irq_name, channel);
+               if (ret) {
+                       netdev_alert(netdev, "error requesting irq %d\n",
+                                    channel->dma_irq);
+                       goto err_irq;
+               }
+       }
+
+       return 0;
+
+err_irq:
+       /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+       for (i--, channel--; i < pdata->channel_count; i--, channel--)
+               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+       return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_channel *channel;
+       unsigned int i;
+
+       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+       if (!pdata->per_channel_irq)
+               return;
+
+       channel = pdata->channel;
+       for (i = 0; i < pdata->channel_count; i++, channel++)
+               devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
                return -EINVAL;
        }
 
-       phy_stop(pdata->phydev);
-
        spin_lock_irqsave(&pdata->lock, flags);
 
        if (caller == XGMAC_DRIVER_CONTEXT)
                netif_device_detach(netdev);
 
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata, 0);
 
-       /* Powerdown Tx/Rx */
        hw_if->powerdown_tx(pdata);
        hw_if->powerdown_rx(pdata);
 
+       xgbe_napi_disable(pdata, 0);
+
+       phy_stop(pdata->phydev);
+
        pdata->power_down = 1;
 
        spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
 
        phy_start(pdata->phydev);
 
-       /* Enable Tx/Rx */
+       xgbe_napi_enable(pdata, 0);
+
        hw_if->powerup_tx(pdata);
        hw_if->powerup_rx(pdata);
 
        if (caller == XGMAC_DRIVER_CONTEXT)
                netif_device_attach(netdev);
 
-       xgbe_napi_enable(pdata, 0);
        netif_tx_start_all_queues(netdev);
 
        spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct net_device *netdev = pdata->netdev;
+       int ret;
 
        DBGPR("-->xgbe_start\n");
 
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 
        phy_start(pdata->phydev);
 
+       xgbe_napi_enable(pdata, 1);
+
+       ret = xgbe_request_irqs(pdata);
+       if (ret)
+               goto err_napi;
+
        hw_if->enable_tx(pdata);
        hw_if->enable_rx(pdata);
 
        xgbe_init_tx_timers(pdata);
 
-       xgbe_napi_enable(pdata, 1);
        netif_tx_start_all_queues(netdev);
 
        DBGPR("<--xgbe_start\n");
 
        return 0;
+
+err_napi:
+       xgbe_napi_disable(pdata, 1);
+
+       phy_stop(pdata->phydev);
+
+       hw_if->exit(pdata);
+
+       return ret;
 }
 
 static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 
        DBGPR("-->xgbe_stop\n");
 
-       phy_stop(pdata->phydev);
-
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata, 1);
 
        xgbe_stop_tx_timers(pdata);
 
        hw_if->disable_tx(pdata);
        hw_if->disable_rx(pdata);
 
+       xgbe_free_irqs(pdata);
+
+       xgbe_napi_disable(pdata, 1);
+
+       phy_stop(pdata->phydev);
+
+       hw_if->exit(pdata);
+
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
                if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 
 static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_channel *channel;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int i;
-
        DBGPR("-->xgbe_restart_dev\n");
 
        /* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
                return;
 
        xgbe_stop(pdata);
-       synchronize_irq(pdata->dev_irq);
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++)
-                       synchronize_irq(channel->dma_irq);
-       }
 
        xgbe_free_tx_data(pdata);
        xgbe_free_rx_data(pdata);
 
-       /* Issue software reset to device */
-       hw_if->exit(pdata);
-
        xgbe_start(pdata);
 
        DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
 static int xgbe_open(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_desc_if *desc_if = &pdata->desc_if;
-       struct xgbe_channel *channel = NULL;
-       unsigned int i = 0;
        int ret;
 
        DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
        INIT_WORK(&pdata->restart_work, xgbe_restart);
        INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
 
-       /* Request interrupts */
-       ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
-                              netdev->name, pdata);
-       if (ret) {
-               netdev_alert(netdev, "error requesting irq %d\n",
-                            pdata->dev_irq);
-               goto err_rings;
-       }
-
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++) {
-                       snprintf(channel->dma_irq_name,
-                                sizeof(channel->dma_irq_name) - 1,
-                                "%s-TxRx-%u", netdev_name(netdev),
-                                channel->queue_index);
-
-                       ret = devm_request_irq(pdata->dev, channel->dma_irq,
-                                              xgbe_dma_isr, 0,
-                                              channel->dma_irq_name, channel);
-                       if (ret) {
-                               netdev_alert(netdev,
-                                            "error requesting irq %d\n",
-                                            channel->dma_irq);
-                               goto err_irq;
-                       }
-               }
-       }
-
        ret = xgbe_start(pdata);
        if (ret)
-               goto err_start;
+               goto err_rings;
 
        DBGPR("<--xgbe_open\n");
 
        return 0;
 
-err_start:
-       hw_if->exit(pdata);
-
-err_irq:
-       if (pdata->per_channel_irq) {
-               /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
-               for (i--, channel--; i < pdata->channel_count; i--, channel--)
-                       devm_free_irq(pdata->dev, channel->dma_irq, channel);
-       }
-
-       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
 err_rings:
        desc_if->free_ring_resources(pdata);
 
@@ -1399,30 +1424,16 @@ err_phy_init:
 static int xgbe_close(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_desc_if *desc_if = &pdata->desc_if;
-       struct xgbe_channel *channel;
-       unsigned int i;
 
        DBGPR("-->xgbe_close\n");
 
        /* Stop the device */
        xgbe_stop(pdata);
 
-       /* Issue software reset to device */
-       hw_if->exit(pdata);
-
        /* Free the ring descriptors and buffers */
        desc_if->free_ring_resources(pdata);
 
-       /* Release the interrupts */
-       devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-       if (pdata->per_channel_irq) {
-               channel = pdata->channel;
-               for (i = 0; i < pdata->channel_count; i++, channel++)
-                       devm_free_irq(pdata->dev, channel->dma_irq, channel);
-       }
-
        /* Free the channel and ring structures */
        xgbe_free_channels(pdata);
 
index 5b308a4a4d0eccc35c641967fcf2459ec736f094..783543ad1fcfa1a4976090e0797f7f15f29a1724 100644 (file)
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
        /* RBUF misc statistics */
        STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
        STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
-       STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
-       STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
-       STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+       STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+       STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+       STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
 };
 
 #define BCM_SYSPORT_STATS_LEN  ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
                s = &bcm_sysport_gstrings_stats[i];
                switch (s->type) {
                case BCM_SYSPORT_STAT_NETDEV:
+               case BCM_SYSPORT_STAT_SOFT:
                        continue;
                case BCM_SYSPORT_STAT_MIB_RX:
                case BCM_SYSPORT_STAT_MIB_TX:
index fc19417d82a505dc61c9f25a940f891522763e00..7e3d87a88c76a81e2c36b65559d8b2b0bcf34217 100644 (file)
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
        BCM_SYSPORT_STAT_RUNT,
        BCM_SYSPORT_STAT_RXCHK,
        BCM_SYSPORT_STAT_RBUF,
+       BCM_SYSPORT_STAT_SOFT,
 };
 
 /* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
 #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
 #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
 #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
 
 #define STAT_RXCHK(str, m, ofs) { \
        .stat_string = str, \
index ff83c46bc38961561441814813f24d9b33da42ac..6befde61c203461a27ac0298d619f4f78b7c366e 100644 (file)
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
        BCMGENET_STAT_MIB_TX,
        BCMGENET_STAT_RUNT,
        BCMGENET_STAT_MISC,
+       BCMGENET_STAT_SOFT,
 };
 
 struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
 
 #define STAT_GENET_MISC(str, m, offset) { \
        .stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
                        UMAC_RBUF_OVFL_CNT),
        STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
        STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
-       STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
-       STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
-       STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+       STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+       STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+       STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
 };
 
 #define BCMGENET_STATS_LEN     ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
                s = &bcmgenet_gstrings_stats[i];
                switch (s->type) {
                case BCMGENET_STAT_NETDEV:
+               case BCMGENET_STAT_SOFT:
                        continue;
                case BCMGENET_STAT_MIB_RX:
                case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
 }
 
 /* Unlocked version of the reclaim routine */
-static void __bcmgenet_tx_reclaim(struct net_device *dev,
-                                 struct bcmgenet_tx_ring *ring)
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+                                         struct bcmgenet_tx_ring *ring)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        int last_tx_cn, last_c_index, num_tx_bds;
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
+       unsigned int pkts_compl = 0;
        unsigned int bds_compl;
        unsigned int c_index;
 
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                tx_cb_ptr = ring->cbs + last_c_index;
                bds_compl = 0;
                if (tx_cb_ptr->skb) {
+                       pkts_compl++;
                        bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                last_c_index &= (num_tx_bds - 1);
        }
 
-       if (ring->free_bds > (MAX_SKB_FRAGS + 1))
-               ring->int_disable(priv, ring);
-
-       if (netif_tx_queue_stopped(txq))
-               netif_tx_wake_queue(txq);
+       if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+               if (netif_tx_queue_stopped(txq))
+                       netif_tx_wake_queue(txq);
+       }
 
        ring->c_index = c_index;
+
+       return pkts_compl;
 }
 
-static void bcmgenet_tx_reclaim(struct net_device *dev,
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
                                struct bcmgenet_tx_ring *ring)
 {
+       unsigned int released;
        unsigned long flags;
 
        spin_lock_irqsave(&ring->lock, flags);
-       __bcmgenet_tx_reclaim(dev, ring);
+       released = __bcmgenet_tx_reclaim(dev, ring);
        spin_unlock_irqrestore(&ring->lock, flags);
+
+       return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+       struct bcmgenet_tx_ring *ring =
+               container_of(napi, struct bcmgenet_tx_ring, napi);
+       unsigned int work_done = 0;
+
+       work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+       if (work_done == 0) {
+               napi_complete(napi);
+               ring->int_enable(ring->priv, ring);
+
+               return 0;
+       }
+
+       return budget;
 }
 
 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
        bcmgenet_tdma_ring_writel(priv, ring->index,
                                  ring->prod_index, TDMA_PROD_INDEX);
 
-       if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+       if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
                netif_tx_stop_queue(txq);
-               ring->int_enable(priv, ring);
-       }
 
 out:
        spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
        struct device *kdev = &priv->pdev->dev;
        int ret;
        u32 reg, cpu_mask_clear;
+       int index;
 
        dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
 
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_intr_disable(priv);
 
-       cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+       cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
 
        dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
 
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
 
+       for (index = 0; index < priv->hw_params->tx_queues; index++)
+               bcmgenet_intrl2_1_writel(priv, (1 << index),
+                                        INTRL2_CPU_MASK_CLEAR);
+
        /* Enable rx/tx engine.*/
        dev_dbg(kdev, "done init umac\n");
 
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
        unsigned int first_bd;
 
        spin_lock_init(&ring->lock);
+       ring->priv = priv;
+       netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
        ring->index = index;
        if (index == DESC_INDEX) {
                ring->queue = 0;
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
                                  TDMA_WRITE_PTR);
        bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
                                  DMA_END_ADDR);
+
+       napi_enable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
+                                 unsigned int index)
+{
+       struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+
+       napi_disable(&ring->napi);
+       netif_napi_del(&ring->napi);
 }
 
 /* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
        return ret;
 }
 
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
        int i;
 
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
        kfree(priv->tx_cbs);
 }
 
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+       int i;
+
+       bcmgenet_fini_tx_ring(priv, DESC_INDEX);
+
+       for (i = 0; i < priv->hw_params->tx_queues; i++)
+               bcmgenet_fini_tx_ring(priv, i);
+
+       __bcmgenet_fini_dma(priv);
+}
+
 /* init_edma: Initialize DMA control register */
 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 {
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
        priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
                               GFP_KERNEL);
        if (!priv->tx_cbs) {
-               bcmgenet_fini_dma(priv);
+               __bcmgenet_fini_dma(priv);
                return -ENOMEM;
        }
 
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
                        struct bcmgenet_priv, napi);
        unsigned int work_done;
 
-       /* tx reclaim */
-       bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
-
        work_done = bcmgenet_desc_rx(priv, budget);
 
        /* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 {
        struct bcmgenet_priv *priv = dev_id;
+       struct bcmgenet_tx_ring *ring;
        unsigned int index;
 
        /* Save irq status for bottom-half processing. */
        priv->irq1_stat =
                bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
-               ~priv->int1_mask;
+               ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
        /* clear interrupts */
        bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
                  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
        /* Check the MBDONE interrupts.
         * packet is done, reclaim descriptors
         */
-       if (priv->irq1_stat & 0x0000ffff) {
-               index = 0;
-               for (index = 0; index < 16; index++) {
-                       if (priv->irq1_stat & (1 << index))
-                               bcmgenet_tx_reclaim(priv->dev,
-                                                   &priv->tx_rings[index]);
+       for (index = 0; index < priv->hw_params->tx_queues; index++) {
+               if (!(priv->irq1_stat & BIT(index)))
+                       continue;
+
+               ring = &priv->tx_rings[index];
+
+               if (likely(napi_schedule_prep(&ring->napi))) {
+                       ring->int_disable(priv, ring);
+                       __napi_schedule(&ring->napi);
                }
        }
+
        return IRQ_HANDLED;
 }
 
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        }
        if (priv->irq0_stat &
                        (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
-               /* Tx reclaim */
-               bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+               struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+
+               if (likely(napi_schedule_prep(&ring->napi))) {
+                       ring->int_disable(priv, ring);
+                       __napi_schedule(&ring->napi);
+               }
        }
        if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
                                UMAC_IRQ_PHY_DET_F |
index b36ddec0cc0a3c5d1c64f9c2818a4b44464c41f8..0d370d168aee0ea24924fbc3afc011f3a8697841 100644 (file)
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
 
 struct bcmgenet_tx_ring {
        spinlock_t      lock;           /* ring lock */
+       struct napi_struct napi;        /* NAPI per tx queue */
        unsigned int    index;          /* ring index */
        unsigned int    queue;          /* queue index */
        struct enet_cb  *cbs;           /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
                           struct bcmgenet_tx_ring *);
        void (*int_disable)(struct bcmgenet_priv *priv,
                            struct bcmgenet_tx_ring *);
+       struct bcmgenet_priv *priv;
 };
 
 /* device context */
index 9062a843424688beabaa21e46b3e210387658c81..c308429dd9c7fa0aebf2cee3b951f71f3863d939 100644 (file)
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
 }
 
 static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
-                                  int addr_len)
+                                  u8 v6)
 {
-       return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
-                               ipv6_clip_hash(ctbl, addr);
+       return v6 ? ipv6_clip_hash(ctbl, addr) :
+                       ipv4_clip_hash(ctbl, addr);
 }
 
 static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
        struct clip_entry *ce, *cte;
        u32 *addr = (u32 *)lip;
        int hash;
-       int addr_len;
-       int ret = 0;
+       int ret = -1;
 
        if (!ctbl)
                return 0;
 
-       if (v6)
-               addr_len = 16;
-       else
-               addr_len = 4;
-
-       hash = clip_addr_hash(ctbl, addr, addr_len);
+       hash = clip_addr_hash(ctbl, addr, v6);
 
        read_lock_bh(&ctbl->lock);
        list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
-               if (addr_len == cte->addr_len &&
-                   memcmp(lip, cte->addr, cte->addr_len) == 0) {
+               if (cte->addr6.sin6_family == AF_INET6 && v6)
+                       ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+                                    sizeof(struct in6_addr));
+               else if (cte->addr.sin_family == AF_INET && !v6)
+                       ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+                                    sizeof(struct in_addr));
+               if (!ret) {
                        ce = cte;
                        read_unlock_bh(&ctbl->lock);
                        goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
                spin_lock_init(&ce->lock);
                atomic_set(&ce->refcnt, 0);
                atomic_dec(&ctbl->nfree);
-               ce->addr_len = addr_len;
-               memcpy(ce->addr, lip, addr_len);
                list_add_tail(&ce->list, &ctbl->hash_list[hash]);
                if (v6) {
+                       ce->addr6.sin6_family = AF_INET6;
+                       memcpy(ce->addr6.sin6_addr.s6_addr,
+                              lip, sizeof(struct in6_addr));
                        ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
                        if (ret) {
                                write_unlock_bh(&ctbl->lock);
                                return ret;
                        }
+               } else {
+                       ce->addr.sin_family = AF_INET;
+                       memcpy((char *)(&ce->addr.sin_addr), lip,
+                              sizeof(struct in_addr));
                }
        } else {
                write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
        struct clip_entry *ce, *cte;
        u32 *addr = (u32 *)lip;
        int hash;
-       int addr_len;
-
-       if (v6)
-               addr_len = 16;
-       else
-               addr_len = 4;
+       int ret = -1;
 
-       hash = clip_addr_hash(ctbl, addr, addr_len);
+       hash = clip_addr_hash(ctbl, addr, v6);
 
        read_lock_bh(&ctbl->lock);
        list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
-               if (addr_len == cte->addr_len &&
-                   memcmp(lip, cte->addr, cte->addr_len) == 0) {
+               if (cte->addr6.sin6_family == AF_INET6 && v6)
+                       ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+                                    sizeof(struct in6_addr));
+               else if (cte->addr.sin_family == AF_INET && !v6)
+                       ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+                                    sizeof(struct in_addr));
+               if (!ret) {
                        ce = cte;
                        read_unlock_bh(&ctbl->lock);
                        goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
        for (i = 0 ; i < ctbl->clipt_size;  ++i) {
                list_for_each_entry(ce, &ctbl->hash_list[i], list) {
                        ip[0] = '\0';
-                       if (ce->addr_len == 16)
-                               sprintf(ip, "%pI6c", ce->addr);
-                       else
-                               sprintf(ip, "%pI4c", ce->addr);
+                       sprintf(ip, "%pISc", &ce->addr);
                        seq_printf(seq, "%-25s   %u\n", ip,
                                   atomic_read(&ce->refcnt));
                }
index 2eaba0161cf8104eb8cbf9756112174fd1275b38..35eb43c6bcbbe37e5f934a767154bc4f4fe2f5c7 100644 (file)
@@ -14,8 +14,10 @@ struct clip_entry {
        spinlock_t lock;        /* Hold while modifying clip reference */
        atomic_t refcnt;
        struct list_head list;
-       u32 addr[4];
-       int addr_len;
+       union {
+               struct sockaddr_in addr;
+               struct sockaddr_in6 addr6;
+       };
 };
 
 struct clip_tbl {
index d6cda17efe6ef475a5579d8248e01a3158bbc272..97842d03675b327d65f564b351f24d12bf72c18b 100644 (file)
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
 #define T4_MEMORY_WRITE        0
 #define T4_MEMORY_READ 1
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
-                __be32 *buf, int dir);
+                void *buf, int dir);
 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
                                  u32 len, __be32 *buf)
 {
index 4d643b65265e8ee0ad12ed886cbb2b77b9d2557b..853c38997c822aa8ab48a6bca5e0e88ae075146d 100644 (file)
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
  *     @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  *     @addr: address within indicated memory type
  *     @len: amount of memory to transfer
- *     @buf: host memory buffer
+ *     @hbuf: host memory buffer
  *     @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  *
  *     Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
  *     caller's responsibility to perform appropriate byte order conversions.
  */
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
-                u32 len, __be32 *buf, int dir)
+                u32 len, void *hbuf, int dir)
 {
        u32 pos, offset, resid, memoffset;
        u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+       u32 *buf;
 
        /* Argument sanity checks ...
         */
-       if (addr & 0x3)
+       if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
                return -EINVAL;
+       buf = (u32 *)hbuf;
 
        /* It's convenient to be able to handle lengths which aren't a
         * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
 
        /* Transfer data to/from the adapter as long as there's an integral
         * number of 32-bit transfers to complete.
+        *
+        * A note on Endianness issues:
+        *
+        * The "register" reads and writes below from/to the PCI-E Memory
+        * Window invoke the standard adapter Big-Endian to PCI-E Link
+        * Little-Endian "swizzel."  As a result, if we have the following
+        * data in adapter memory:
+        *
+        *     Memory:  ... | b0 | b1 | b2 | b3 | ...
+        *     Address:      i+0  i+1  i+2  i+3
+        *
+        * Then a read of the adapter memory via the PCI-E Memory Window
+        * will yield:
+        *
+        *     x = readl(i)
+        *         31                  0
+        *         [ b3 | b2 | b1 | b0 ]
+        *
+        * If this value is stored into local memory on a Little-Endian system
+        * it will show up correctly in local memory as:
+        *
+        *     ( ..., b0, b1, b2, b3, ... )
+        *
+        * But on a Big-Endian system, the store will show up in memory
+        * incorrectly swizzled as:
+        *
+        *     ( ..., b3, b2, b1, b0, ... )
+        *
+        * So we need to account for this in the reads and writes to the
+        * PCI-E Memory Window below by undoing the register read/write
+        * swizzels.
         */
        while (len > 0) {
                if (dir == T4_MEMORY_READ)
-                       *buf++ = (__force __be32) t4_read_reg(adap,
-                                                       mem_base + offset);
+                       *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+                                               mem_base + offset));
                else
                        t4_write_reg(adap, mem_base + offset,
-                                    (__force u32) *buf++);
+                                    (__force u32)cpu_to_le32(*buf++));
                offset += sizeof(__be32);
                len -= sizeof(__be32);
 
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
         */
        if (resid) {
                union {
-                       __be32 word;
+                       u32 word;
                        char byte[4];
                } last;
                unsigned char *bp;
                int i;
 
                if (dir == T4_MEMORY_READ) {
-                       last.word = (__force __be32) t4_read_reg(adap,
-                                                       mem_base + offset);
+                       last.word = le32_to_cpu(
+                                       (__force __le32)t4_read_reg(adap,
+                                               mem_base + offset));
                        for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
                                bp[i] = last.byte[i];
                } else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
                        for (i = resid; i < 4; i++)
                                last.byte[i] = 0;
                        t4_write_reg(adap, mem_base + offset,
-                                    (__force u32) last.word);
+                                    (__force u32)cpu_to_le32(last.word));
                }
        }
 
index 9cbe038a388ea62a6f4552e7f088dc5816dc5b5c..a5179bfcdc2c1b6124a92f33f9c5f57363751a02 100644 (file)
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
        }
 
        if (ENIC_TEST_INTR(pba, notify_intr)) {
-               vnic_intr_return_all_credits(&enic->intr[notify_intr]);
                enic_notify_check(enic);
+               vnic_intr_return_all_credits(&enic->intr[notify_intr]);
        }
 
        if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
        struct enic *enic = data;
        unsigned int intr = enic_msix_notify_intr(enic);
 
-       vnic_intr_return_all_credits(&enic->intr[intr]);
        enic_notify_check(enic);
+       vnic_intr_return_all_credits(&enic->intr[intr]);
 
        return IRQ_HANDLED;
 }
index 43df78882e484e065706bd04c322fa8276d4c424..178e54028d1047eab23af38ba970303bc0cad357 100644 (file)
@@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev)
        struct phy_device *phydev = priv->phydev;
 
        if (unlikely(phydev->link != priv->oldlink ||
-                    phydev->duplex != priv->oldduplex ||
-                    phydev->speed != priv->oldspeed))
+                    (phydev->link && (phydev->duplex != priv->oldduplex ||
+                                      phydev->speed != priv->oldspeed))))
                gfar_update_link_state(priv);
 }
 
index e8a1adb7a96255bf8da1baa87b29514527c2764d..c05e50759621137fa3f9749a55c347381d54ed55 100644 (file)
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
        device_remove_file(&dev->dev, &dev_attr_remove_port);
 }
 
+static int ehea_reboot_notifier(struct notifier_block *nb,
+                               unsigned long action, void *unused)
+{
+       if (action == SYS_RESTART) {
+               pr_info("Reboot: freeing all eHEA resources\n");
+               ibmebus_unregister_driver(&ehea_driver);
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+       .notifier_call = ehea_reboot_notifier,
+};
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+                            unsigned long action, void *data)
+{
+       int ret = NOTIFY_BAD;
+       struct memory_notify *arg = data;
+
+       mutex_lock(&dlpar_mem_lock);
+
+       switch (action) {
+       case MEM_CANCEL_OFFLINE:
+               pr_info("memory offlining canceled");
+               /* Fall through: re-add canceled memory block */
+
+       case MEM_ONLINE:
+               pr_info("memory is going online");
+               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+               if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+                       goto out_unlock;
+               ehea_rereg_mrs();
+               break;
+
+       case MEM_GOING_OFFLINE:
+               pr_info("memory is going offline");
+               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+               if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+                       goto out_unlock;
+               ehea_rereg_mrs();
+               break;
+
+       default:
+               break;
+       }
+
+       ehea_update_firmware_handles();
+       ret = NOTIFY_OK;
+
+out_unlock:
+       mutex_unlock(&dlpar_mem_lock);
+       return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+       .notifier_call = ehea_mem_notifier,
+};
+
+static void ehea_crash_handler(void)
+{
+       int i;
+
+       if (ehea_fw_handles.arr)
+               for (i = 0; i < ehea_fw_handles.num_entries; i++)
+                       ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+                                            ehea_fw_handles.arr[i].fwh,
+                                            FORCE_FREE);
+
+       if (ehea_bcmc_regs.arr)
+               for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+                       ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+                                             ehea_bcmc_regs.arr[i].port_id,
+                                             ehea_bcmc_regs.arr[i].reg_type,
+                                             ehea_bcmc_regs.arr[i].macaddr,
+                                             0, H_DEREG_BCMC);
+}
+
+static atomic_t ehea_memory_hooks_registered;
+
+/* Register memory hooks on probe of first adapter */
+static int ehea_register_memory_hooks(void)
+{
+       int ret = 0;
+
+       if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+               return 0;
+
+       ret = ehea_create_busmap();
+       if (ret) {
+               pr_info("ehea_create_busmap failed\n");
+               goto out;
+       }
+
+       ret = register_reboot_notifier(&ehea_reboot_nb);
+       if (ret) {
+               pr_info("register_reboot_notifier failed\n");
+               goto out;
+       }
+
+       ret = register_memory_notifier(&ehea_mem_nb);
+       if (ret) {
+               pr_info("register_memory_notifier failed\n");
+               goto out2;
+       }
+
+       ret = crash_shutdown_register(ehea_crash_handler);
+       if (ret) {
+               pr_info("crash_shutdown_register failed\n");
+               goto out3;
+       }
+
+       return 0;
+
+out3:
+       unregister_memory_notifier(&ehea_mem_nb);
+out2:
+       unregister_reboot_notifier(&ehea_reboot_nb);
+out:
+       return ret;
+}
+
+static void ehea_unregister_memory_hooks(void)
+{
+       if (atomic_read(&ehea_memory_hooks_registered))
+               return;
+
+       unregister_reboot_notifier(&ehea_reboot_nb);
+       if (crash_shutdown_unregister(ehea_crash_handler))
+               pr_info("failed unregistering crash handler\n");
+       unregister_memory_notifier(&ehea_mem_nb);
+}
+
 static int ehea_probe_adapter(struct platform_device *dev)
 {
        struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
        int ret;
        int i;
 
+       ret = ehea_register_memory_hooks();
+       if (ret)
+               return ret;
+
        if (!dev || !dev->dev.of_node) {
                pr_err("Invalid ibmebus device probed\n");
                return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
        return 0;
 }
 
-static void ehea_crash_handler(void)
-{
-       int i;
-
-       if (ehea_fw_handles.arr)
-               for (i = 0; i < ehea_fw_handles.num_entries; i++)
-                       ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
-                                            ehea_fw_handles.arr[i].fwh,
-                                            FORCE_FREE);
-
-       if (ehea_bcmc_regs.arr)
-               for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
-                       ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
-                                             ehea_bcmc_regs.arr[i].port_id,
-                                             ehea_bcmc_regs.arr[i].reg_type,
-                                             ehea_bcmc_regs.arr[i].macaddr,
-                                             0, H_DEREG_BCMC);
-}
-
-static int ehea_mem_notifier(struct notifier_block *nb,
-                             unsigned long action, void *data)
-{
-       int ret = NOTIFY_BAD;
-       struct memory_notify *arg = data;
-
-       mutex_lock(&dlpar_mem_lock);
-
-       switch (action) {
-       case MEM_CANCEL_OFFLINE:
-               pr_info("memory offlining canceled");
-               /* Readd canceled memory block */
-       case MEM_ONLINE:
-               pr_info("memory is going online");
-               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
-               if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
-                       goto out_unlock;
-               ehea_rereg_mrs();
-               break;
-       case MEM_GOING_OFFLINE:
-               pr_info("memory is going offline");
-               set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
-               if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
-                       goto out_unlock;
-               ehea_rereg_mrs();
-               break;
-       default:
-               break;
-       }
-
-       ehea_update_firmware_handles();
-       ret = NOTIFY_OK;
-
-out_unlock:
-       mutex_unlock(&dlpar_mem_lock);
-       return ret;
-}
-
-static struct notifier_block ehea_mem_nb = {
-       .notifier_call = ehea_mem_notifier,
-};
-
-static int ehea_reboot_notifier(struct notifier_block *nb,
-                               unsigned long action, void *unused)
-{
-       if (action == SYS_RESTART) {
-               pr_info("Reboot: freeing all eHEA resources\n");
-               ibmebus_unregister_driver(&ehea_driver);
-       }
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block ehea_reboot_nb = {
-       .notifier_call = ehea_reboot_notifier,
-};
-
 static int check_module_parm(void)
 {
        int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
        if (ret)
                goto out;
 
-       ret = ehea_create_busmap();
-       if (ret)
-               goto out;
-
-       ret = register_reboot_notifier(&ehea_reboot_nb);
-       if (ret)
-               pr_info("failed registering reboot notifier\n");
-
-       ret = register_memory_notifier(&ehea_mem_nb);
-       if (ret)
-               pr_info("failed registering memory remove notifier\n");
-
-       ret = crash_shutdown_register(ehea_crash_handler);
-       if (ret)
-               pr_info("failed registering crash handler\n");
-
        ret = ibmebus_register_driver(&ehea_driver);
        if (ret) {
                pr_err("failed registering eHEA device driver on ebus\n");
-               goto out2;
+               goto out;
        }
 
        ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
        if (ret) {
                pr_err("failed to register capabilities attribute, ret=%d\n",
                       ret);
-               goto out3;
+               goto out2;
        }
 
        return ret;
 
-out3:
-       ibmebus_unregister_driver(&ehea_driver);
 out2:
-       unregister_memory_notifier(&ehea_mem_nb);
-       unregister_reboot_notifier(&ehea_reboot_nb);
-       crash_shutdown_unregister(ehea_crash_handler);
+       ibmebus_unregister_driver(&ehea_driver);
 out:
        return ret;
 }
 
 static void __exit ehea_module_exit(void)
 {
-       int ret;
-
        driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
        ibmebus_unregister_driver(&ehea_driver);
-       unregister_reboot_notifier(&ehea_reboot_nb);
-       ret = crash_shutdown_unregister(ehea_crash_handler);
-       if (ret)
-               pr_info("failed unregistering crash handler\n");
-       unregister_memory_notifier(&ehea_mem_nb);
+       ehea_unregister_memory_hooks();
        kfree(ehea_fw_handles.arr);
        kfree(ehea_bcmc_regs.arr);
        ehea_destroy_busmap();
index 21978cc019e7c86dab83968ba994c0e9051c8e33..072426a72745a8fd984fa26dac7922a89ba189bb 100644 (file)
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
        return ret;
 }
 
+static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
+{
+       struct ibmveth_adapter *adapter = netdev_priv(dev);
+       struct sockaddr *addr = p;
+       u64 mac_address;
+       int rc;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+       rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
+       if (rc) {
+               netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
+               return rc;
+       }
+
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+       return 0;
+}
+
 static const struct net_device_ops ibmveth_netdev_ops = {
        .ndo_open               = ibmveth_open,
        .ndo_stop               = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
        .ndo_fix_features       = ibmveth_fix_features,
        .ndo_set_features       = ibmveth_set_features,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = ibmveth_set_mac_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ibmveth_poll_controller,
 #endif
index 11a9ffebf8d88acdfd5b738baab62bd2b34aa30a..6aea65dae5ed654b5da2e7a8885a02a92c75710d 100644 (file)
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
         * The grst delay value is in 100ms units, and we'll wait a
         * couple counts longer to be sure we don't just miss the end.
         */
-       grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
-                       >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+       grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+                   I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+                   I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
        for (cnt = 0; cnt < grst_del + 2; cnt++) {
                reg = rd32(hw, I40E_GLGEN_RSTAT);
                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
 
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
-       if (!status)
+       if (!status && filter_index)
                *filter_index = resp->index;
 
        return status;
index 183dcb63ce98e14e5b504bed9874911b35ae18d3..a11c70ca5a2811c84cc094ac425b93ec0b840d5d 100644 (file)
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
        u32 val;
 
        val = rd32(hw, I40E_PRTDCB_GENC);
-       *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+       *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
                       I40E_PRTDCB_GENC_PFCLDA_SHIFT);
 }
 
index 61236f983971a1d55955586cd5a6ee288f5c60e7..c17ee77100d3651e254265ae192bd4d3e54c3659 100644 (file)
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        if (!cmd_buf)
                return count;
        bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
-       if (bytes_not_copied < 0)
+       if (bytes_not_copied < 0) {
+               kfree(cmd_buf);
                return bytes_not_copied;
+       }
        if (bytes_not_copied > 0)
                count -= bytes_not_copied;
        cmd_buf[count] = '\0';
index cbe281be1c9f0c1956c4e7d2467b42474fad70c2..dadda3c5d658b950cf64d21c838f5ffd5176f0f6 100644 (file)
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        vsi->tc_config.numtc = numtc;
        vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
        /* Number of queues per enabled TC */
-       num_tc_qps = vsi->alloc_queue_pairs/numtc;
+       /* In MFP case we can have a much lower count of MSIx
+        * vectors available and so we need to lower the used
+        * q count.
+        */
+       qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+       num_tc_qps = qcount / numtc;
        num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
 
        /* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
        u16 qoffset, qcount;
        int i, n;
 
-       if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
-               return;
+       if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+               /* Reset the TC information */
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       rx_ring = vsi->rx_rings[i];
+                       tx_ring = vsi->tx_rings[i];
+                       rx_ring->dcb_tc = 0;
+                       tx_ring->dcb_tc = 0;
+               }
+       }
 
        for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
                if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
 {
        int i;
 
+       i40e_stop_misc_vector(pf);
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               synchronize_irq(pf->msix_entries[0].vector);
+               free_irq(pf->msix_entries[0].vector, pf);
+       }
+
        i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
        for (i = 0; i < pf->num_alloc_vsi; i++)
                if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
 
        /* Wait for the PF's Tx queues to be disabled */
        ret = i40e_pf_wait_txq_disabled(pf);
-       if (!ret)
+       if (ret) {
+               /* Schedule PF reset to recover */
+               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+               i40e_service_event_schedule(pf);
+       } else {
                i40e_pf_unquiesce_all_vsi(pf);
+       }
+
 exit:
        return ret;
 }
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
        int i, v;
 
        /* If we're down or resetting, just bail */
-       if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+       if (test_bit(__I40E_DOWN, &pf->state) ||
+           test_bit(__I40E_CONFIG_BUSY, &pf->state))
                return;
 
        /* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
        set_bit(__I40E_DOWN, &pf->state);
        del_timer_sync(&pf->service_timer);
        cancel_work_sync(&pf->service_task);
+       i40e_fdir_teardown(pf);
 
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
                i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
        if (pf->vsi[pf->lan_vsi])
                i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
-       i40e_stop_misc_vector(pf);
-       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-               synchronize_irq(pf->msix_entries[0].vector);
-               free_irq(pf->msix_entries[0].vector, pf);
-       }
-
        /* shutdown and destroy the HMC */
        if (pf->hw.hmc.hmc_obj) {
                ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
        wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
        wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
 
+       i40e_clear_interrupt_scheme(pf);
+
        if (system_state == SYSTEM_POWER_OFF) {
                pci_wake_from_d3(pdev, pf->wol_en);
                pci_set_power_state(pdev, PCI_D3hot);
index 3e70f2e45a4768986a0a90ee21bdf98790db9b53..5defe0d635141ed5c886cb1086995f2b2d3d31a1 100644 (file)
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
 {
        i40e_status status;
        enum i40e_nvmupd_cmd upd_cmd;
+       bool retry_attempt = false;
 
        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
 
+retry:
        switch (upd_cmd) {
        case I40E_NVMUPD_WRITE_CON:
                status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
                *errno = -ESRCH;
                break;
        }
+
+       /* In some circumstances, a multi-write transaction takes longer
+        * than the default 3 minute timeout on the write semaphore.  If
+        * the write failed with an EBUSY status, this is likely the problem,
+        * so here we try to reacquire the semaphore then retry the write.
+        * We only do one retry, then give up.
+        */
+       if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+           !retry_attempt) {
+               i40e_status old_status = status;
+               u32 old_asq_status = hw->aq.asq_last_status;
+               u32 gtime;
+
+               gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+               if (gtime >= hw->nvm.hw_semaphore_timeout) {
+                       i40e_debug(hw, I40E_DEBUG_ALL,
+                                  "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+                                  gtime, hw->nvm.hw_semaphore_timeout);
+                       i40e_release_nvm(hw);
+                       status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+                       if (status) {
+                               i40e_debug(hw, I40E_DEBUG_ALL,
+                                          "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+                                          hw->aq.asq_last_status);
+                               status = old_status;
+                               hw->aq.asq_last_status = old_asq_status;
+                       } else {
+                               retry_attempt = true;
+                               goto retry;
+                       }
+               }
+       }
+
        return status;
 }
 
index 2206d2d36f0fdf2e9f249c7624e156f8c1b17118..bbf1b1247ac471bb712ed1397956db83cf80e4f3 100644 (file)
@@ -585,6 +585,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
        }
 }
 
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
  **/
 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
-       u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
-                       ? ring->next_to_use
-                       : ring->next_to_use + ring->count);
-       return ntu - ring->next_to_clean;
+       u32 head, tail;
+
+       head = i40e_get_head(ring);
+       tail = readl(ring->tail);
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
 }
 
 /**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
  **/
 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 {
+       u32 tx_done = tx_ring->stats.packets;
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
        u32 tx_pending = i40e_get_tx_pending(tx_ring);
        struct i40e_pf *pf = tx_ring->vsi->back;
        bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-           (tx_pending >= I40E_MIN_DESC_PENDING)) {
+       if ((tx_done_old == tx_done) && tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
-       } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-                  (tx_pending < I40E_MIN_DESC_PENDING) &&
-                  (tx_pending > 0)) {
+       } else if (tx_done_old == tx_done &&
+                  (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
                if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
                        dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
                                 tx_pending, tx_ring->queue_index);
                pf->tx_sluggish_count++;
        } else {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_done;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
        return ret;
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-       return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -2139,6 +2145,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
        return __i40e_maybe_stop_tx(tx_ring, size);
 }
 
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+                              const u8 hdr_len)
+{
+       struct skb_frag_struct *frag;
+       bool linearize = false;
+       unsigned int size = 0;
+       u16 num_frags;
+       u16 gso_segs;
+
+       num_frags = skb_shinfo(skb)->nr_frags;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+               u16 j = 1;
+
+               if (num_frags < (I40E_MAX_BUFFER_TXD))
+                       goto linearize_chk_done;
+               /* try the simple math, if we have too many frags per segment */
+               if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+                   I40E_MAX_BUFFER_TXD) {
+                       linearize = true;
+                       goto linearize_chk_done;
+               }
+               frag = &skb_shinfo(skb)->frags[0];
+               size = hdr_len;
+               /* we might still have more fragments per segment */
+               do {
+                       size += skb_frag_size(frag);
+                       frag++; j++;
+                       if (j == I40E_MAX_BUFFER_TXD) {
+                               if (size < skb_shinfo(skb)->gso_size) {
+                                       linearize = true;
+                                       break;
+                               }
+                               j = 1;
+                               size -= skb_shinfo(skb)->gso_size;
+                               if (size)
+                                       j++;
+                               size += hdr_len;
+                       }
+                       num_frags--;
+               } while (num_frags);
+       } else {
+               if (num_frags >= I40E_MAX_BUFFER_TXD)
+                       linearize = true;
+       }
+
+linearize_chk_done:
+       return linearize;
+}
+
 /**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (tsyn)
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
+       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+               if (skb_linearize(skb))
+                       goto out_drop;
+
        skb_tx_timestamp(skb);
 
        /* always enable CRC insertion offload */
index 18b00231d2f117d714e7e1399aecba0061ead41a..dff0baeb1ecc092e53ef22ea373be46e79df13a2 100644 (file)
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
 
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 #define I40E_MAX_DATA_PER_TXD  8192
 
index 29004382f462ce717fd27b5b004139f5ef0efd1a..708891571dae328299e2b31e83cbf40b42726473 100644 (file)
@@ -125,6 +125,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
        }
 }
 
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
  **/
 static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
-       u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
-                       ? ring->next_to_use
-                       : ring->next_to_use + ring->count);
-       return ntu - ring->next_to_clean;
+       u32 head, tail;
+
+       head = i40e_get_head(ring);
+       tail = readl(ring->tail);
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
 }
 
 /**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
  **/
 static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 {
+       u32 tx_done = tx_ring->stats.packets;
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
        u32 tx_pending = i40e_get_tx_pending(tx_ring);
        bool ret = false;
 
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
-           (tx_pending >= I40E_MIN_DESC_PENDING)) {
+       if ((tx_done_old == tx_done) && tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
-       } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
-                  !(tx_pending < I40E_MIN_DESC_PENDING) ||
-                  !(tx_pending > 0)) {
+       } else if (tx_done_old == tx_done &&
+                  (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_done;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
        return ret;
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
-       return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 #define WB_STRIDE 0x3
 
 /**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       if (protocol == htons(ETH_P_IP)) {
-               iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       if (iph->version == 4) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                 0, IPPROTO_TCP, 0);
-       } else if (skb_is_gso_v6(skb)) {
-
-               ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
-                                          : ipv6_hdr(skb);
+       } else if (ipv6h->version == 6) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                ipv6h->payload_len = 0;
                tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
                } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
-                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       if (tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
-                       } else {
-                               *cd_tunneling |=
-                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-                       }
                }
 
                /* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+               if (this_ip_hdr->version == 6) {
+                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       tx_flags |= I40E_TX_FLAGS_IPV6;
+               }
+
 
        } else {
                network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+                              const u8 hdr_len)
+{
+       struct skb_frag_struct *frag;
+       bool linearize = false;
+       unsigned int size = 0;
+       u16 num_frags;
+       u16 gso_segs;
+
+       num_frags = skb_shinfo(skb)->nr_frags;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+               u16 j = 1;
+
+               if (num_frags < (I40E_MAX_BUFFER_TXD))
+                       goto linearize_chk_done;
+               /* try the simple math, if we have too many frags per segment */
+               if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+                   I40E_MAX_BUFFER_TXD) {
+                       linearize = true;
+                       goto linearize_chk_done;
+               }
+               frag = &skb_shinfo(skb)->frags[0];
+               size = hdr_len;
+               /* we might still have more fragments per segment */
+               do {
+                       size += skb_frag_size(frag);
+                       frag++; j++;
+                       if (j == I40E_MAX_BUFFER_TXD) {
+                               if (size < skb_shinfo(skb)->gso_size) {
+                                       linearize = true;
+                                       break;
+                               }
+                               j = 1;
+                               size -= skb_shinfo(skb)->gso_size;
+                               if (size)
+                                       j++;
+                               size += hdr_len;
+                       }
+                       num_frags--;
+               } while (num_frags);
+       } else {
+               if (num_frags >= I40E_MAX_BUFFER_TXD)
+                       linearize = true;
+       }
+
+linearize_chk_done:
+       return linearize;
+}
+
 /**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
+       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+               if (skb_linearize(skb))
+                       goto out_drop;
+
        skb_tx_timestamp(skb);
 
        /* always enable CRC insertion offload */
index 4e15903b2b6ded2a054981999f71e2406b1e80a2..c950a038237c2c63dc66b9bf7be887556b80a5de 100644 (file)
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
 
 #define i40e_rx_desc i40e_32byte_rx_desc
 
+#define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 #define I40E_MAX_DATA_PER_TXD  8192
 
index 2d8ee66138e8ad48cb72daa773a67c1f421e4cac..a61009f4b2df728e05a4e54def4bb1336949f03e 100644 (file)
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
 {
        u32 loopback_ok = 0;
        int i;
-
+       bool gro_enabled;
 
         priv->loopback_ok = 0;
        priv->validate_loopback = 1;
+       gro_enabled = priv->dev->features & NETIF_F_GRO;
 
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+       priv->dev->features &= ~NETIF_F_GRO;
 
        /* xmit */
        if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
 mlx4_en_test_loopback_exit:
 
        priv->validate_loopback = 0;
+
+       if (gro_enabled)
+               priv->dev->features |= NETIF_F_GRO;
+
        mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
        return !loopback_ok;
 }
index 2bb8553bd9054b25456ec694ee25696e93ebde25..eda29dbbfcd259824f0a0fbec3876975f215d2e2 100644 (file)
@@ -412,7 +412,6 @@ err_icm:
 
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
-#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params)
index 486e3d26cd4a9ef4bb6a23995b85ac50cd413776..d97ca88c55b59e039af66991e4ad413f82984158 100644 (file)
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_priv *priv;
        u32 qp_type;
-       int port;
+       int port, err = 0;
 
        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
        priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                        } else {
                                struct mlx4_update_qp_params params = {.flags = 0};
 
-                               mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                               err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+                               if (err)
+                                       goto out;
                        }
                }
 
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
        }
-       return 0;
+out:
+       return err;
 }
 
 static int mpt_mask(struct mlx4_dev *dev)
index 44e8d7d255474d30bf48577723caf9032af43854..57a6e6cd74fc3c9c99708530b431cd0e5b768f8c 100644 (file)
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
        if (mac->phydev)
                phy_start(mac->phydev);
 
-       init_timer(&mac->tx->clean_timer);
-       mac->tx->clean_timer.function = pasemi_mac_tx_timer;
-       mac->tx->clean_timer.data = (unsigned long)mac->tx;
-       mac->tx->clean_timer.expires = jiffies+HZ;
-       add_timer(&mac->tx->clean_timer);
+       setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
+                   (unsigned long)mac->tx);
+       mod_timer(&mac->tx->clean_timer, jiffies + HZ);
 
        return 0;
 
index 6e426ae9469228ed55586bca15a8eef1dcb5e5c4..0a5e204a0179a35c15f52a3dea58729c30c2c31f 100644 (file)
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
 
 } __attribute__ ((aligned(64)));
 
-/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
 struct rcv_desc {
        __le16 reference_handle;
        __le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
 #define NETXEN_IMAGE_START     0x43000 /* compressed image */
 #define NETXEN_SECONDARY_START 0x200000        /* backup images */
 #define NETXEN_PXE_START       0x3E0000        /* PXE boot rom */
-#define NETXEN_USER_START      0x3E8000        /* Firmare info */
+#define NETXEN_USER_START      0x3E8000        /* Firmware info */
 #define NETXEN_FIXED_START     0x3F0000        /* backup of crbinit */
 #define NETXEN_USER_START_OLD  NETXEN_PXE_START /* very old flash */
 
index fa4317611fd63fe81df2e23e47fa307b8c5c5348..f221126a5c4e6789cb2630a07dc58b02f0676239 100644 (file)
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
 #define QLCNIC_BRDCFG_START    0x4000          /* board config */
 #define QLCNIC_BOOTLD_START    0x10000         /* bootld */
 #define QLCNIC_IMAGE_START     0x43000         /* compressed image */
-#define QLCNIC_USER_START      0x3E8000        /* Firmare info */
+#define QLCNIC_USER_START      0x3E8000        /* Firmware info */
 
 #define QLCNIC_FW_VERSION_OFFSET       (QLCNIC_USER_START+0x408)
 #define QLCNIC_FW_SIZE_OFFSET          (QLCNIC_USER_START+0x40c)
index ad0020af2193da8749534c25242047212cccc1a4..c70ab40d86989974d54c9161bf7acd8558d93c74 100644 (file)
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
        int rc = -EINVAL;
 
        if (!rtl_fw_format_ok(tp, rtl_fw)) {
-               netif_err(tp, ifup, dev, "invalid firwmare\n");
+               netif_err(tp, ifup, dev, "invalid firmware\n");
                goto out;
        }
 
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
        RTL_W8(ChipCmd, CmdReset);
 
        rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
-
-       netdev_reset_queue(tp->dev);
 }
 
 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        u32 status, len;
        u32 opts[2];
        int frags;
-       bool stop_queue;
 
        if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
                netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        txd->opts2 = cpu_to_le32(opts[1]);
 
-       netdev_sent_queue(dev, skb->len);
-
        skb_tx_timestamp(skb);
 
        /* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        tp->cur_tx += frags + 1;
 
-       stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
+       RTL_W8(TxPoll, NPQ);
 
-       if (!skb->xmit_more || stop_queue ||
-           netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
-               RTL_W8(TxPoll, NPQ);
-
-               mmiowb();
-       }
+       mmiowb();
 
-       if (stop_queue) {
+       if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
                /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
                 * not miss a ring update when it notices a stopped queue.
                 */
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
 {
        unsigned int dirty_tx, tx_left;
-       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        dirty_tx = tp->dirty_tx;
        smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
                rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
                                     tp->TxDescArray + entry);
                if (status & LastFrag) {
-                       pkts_compl++;
-                       bytes_compl += tx_skb->skb->len;
+                       u64_stats_update_begin(&tp->tx_stats.syncp);
+                       tp->tx_stats.packets++;
+                       tp->tx_stats.bytes += tx_skb->skb->len;
+                       u64_stats_update_end(&tp->tx_stats.syncp);
                        dev_kfree_skb_any(tx_skb->skb);
                        tx_skb->skb = NULL;
                }
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
        }
 
        if (tp->dirty_tx != dirty_tx) {
-               netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
-
-               u64_stats_update_begin(&tp->tx_stats.syncp);
-               tp->tx_stats.packets += pkts_compl;
-               tp->tx_stats.bytes += bytes_compl;
-               u64_stats_update_end(&tp->tx_stats.syncp);
-
                tp->dirty_tx = dirty_tx;
                /* Sync with rtl8169_start_xmit:
                 * - publish dirty_tx ring index (write barrier)
index 4da8bd263997a17baf89b5fe7a3d2198f186827b..736d5d1624a142e902d6023cf3ee801c5169fa14 100644 (file)
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
        .tpauser        = 1,
        .hw_swap        = 1,
        .rmiimode       = 1,
-       .shift_rd0      = 1,
 };
 
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
        msleep(2); /* max frame time at 10 Mbps < 1250 us */
        sh_eth_get_stats(ndev);
        sh_eth_reset(ndev);
+
+       /* Set MAC address again */
+       update_mac_address(ndev);
 }
 
 /* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
                txdesc = &mdp->tx_ring[entry];
                if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
                        break;
+               /* TACT bit must be checked before all the following reads */
+               rmb();
                /* Free the original skb. */
                if (mdp->tx_skbuff[entry]) {
                        dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        limit = boguscnt;
        rxdesc = &mdp->rx_ring[entry];
        while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+               /* RACT bit must be checked before all the following reads */
+               rmb();
                desc_status = edmac_to_cpu(mdp, rxdesc->status);
                pkt_len = rxdesc->frame_length;
 
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 
                /* In case of almost all GETHER/ETHERs, the Receive Frame State
                 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
-                * bit 0. However, in case of the R8A7740, R8A779x, and
-                * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+                * bit 0. However, in case of the R8A7740 and R7S72100
+                * the RFS bits are from bit 25 to bit 16. So, the
                 * driver needs right shifting by 16.
                 */
                if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        skb_checksum_none_assert(skb);
                        rxdesc->addr = dma_addr;
                }
+               wmb(); /* RACT bit must be set after all the above writes */
                if (entry >= mdp->num_rx_ring - 1)
                        rxdesc->status |=
                                cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        /* If we don't need to check status, don't. -KDU */
        if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
                /* fix the values for the next receiving if RDE is set */
-               if (intr_status & EESR_RDE) {
+               if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
                        u32 count = (sh_eth_read(ndev, RDFAR) -
                                     sh_eth_read(ndev, RDLAR)) >> 4;
 
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        spin_unlock_irqrestore(&mdp->lock, flags);
 
-       if (skb_padto(skb, ETH_ZLEN))
+       if (skb_put_padto(skb, ETH_ZLEN))
                return NETDEV_TX_OK;
 
        entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        txdesc->buffer_length = skb->len;
 
+       wmb(); /* TACT bit must be set after all the above writes */
        if (entry >= mdp->num_tx_ring - 1)
                txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
        else
index 34389b6aa67cbd26263366ca1bff769f1b27c68a..9fb6948e14c64ef424c032811364aaefc608d4cb 100644 (file)
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
        u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
 
        if (enable)
-               val |= 1 << rocker_port->lport;
+               val |= 1ULL << rocker_port->lport;
        else
-               val &= ~(1 << rocker_port->lport);
+               val &= ~(1ULL << rocker_port->lport);
        rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
 }
 
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
 
        alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
        rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+       if (!rocker->ports)
+               return -ENOMEM;
        for (i = 0; i < rocker->port_count; i++) {
                err = rocker_probe_port(rocker, i);
                if (err)
index 6b33127ab352a43ed6a787af7eedde554241e1b3..3449893aea8d402fb2fc56582df92a04aa157c10 100644 (file)
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
     smc->packets_waiting = 0;
 
     smc_reset(dev);
-    init_timer(&smc->media);
-    smc->media.function = media_check;
-    smc->media.data = (u_long) dev;
-    smc->media.expires = jiffies + HZ;
-    add_timer(&smc->media);
+    setup_timer(&smc->media, media_check, (u_long)dev);
+    mod_timer(&smc->media, jiffies + HZ);
 
     return 0;
 } /* smc_open */
index 88a55f95fe09bc544b0acf0e28bae507086788f8..209ee1b27f8d75aa2a3efa12c74ae28484f465c5 100644 (file)
@@ -91,6 +91,10 @@ static const char version[] =
 
 #include "smc91x.h"
 
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/neponset.h>
+#endif
+
 #ifndef SMC_NOWAIT
 # define SMC_NOWAIT            0
 #endif
@@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
        ret = smc_request_attrib(pdev, ndev);
        if (ret)
                goto out_release_io;
-#if defined(CONFIG_SA1100_ASSABET)
-       neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+       if (machine_is_assabet() && machine_has_neponset())
+               neponset_ncr_set(NCR_ENET_OSC_EN);
 #endif
        platform_set_drvdata(pdev, ndev);
        ret = smc_enable_device(pdev);
index be67baf5f6778d08df4eaa06216914b77ab8f2b5..3a18501d1068c36816554f953e367ff1439c2a36 100644 (file)
  * Define your architecture specific bus configuration parameters here.
  */
 
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
-    defined(CONFIG_MACH_MAINSTONE) ||\
-    defined(CONFIG_MACH_ZYLONITE) ||\
-    defined(CONFIG_MACH_LITTLETON) ||\
-    defined(CONFIG_MACH_ZYLONITE2) ||\
-    defined(CONFIG_ARCH_VIPER) ||\
-    defined(CONFIG_MACH_STARGATE2) ||\
-    defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
 
 #include <asm/mach-types.h>
 
 /* We actually can't write halfwords properly if not word aligned */
 static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 {
-       if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
-               unsigned int v = val << 16;
-               v |= readl(ioaddr + (reg & ~2)) & 0xffff;
-               writel(v, ioaddr + (reg & ~2));
-       } else {
-               writew(val, ioaddr + reg);
-       }
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_insb(a, r, p, l)   readsb((a) + (r), p, (l))
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l)  writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS          (-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      0
-#define SMC_CAN_USE_32BIT      0
-#define SMC_NOWAIT             1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT           2
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l)   readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l)  writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS          (-1)    /* from resource */
-
-#elif  defined(CONFIG_MACH_LOGICPD_PXA270) ||  \
-       defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT       0
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#elif  defined(CONFIG_ARCH_INNOKOM) || \
-       defined(CONFIG_ARCH_PXA_IDP) || \
-       defined(CONFIG_ARCH_RAMSES) || \
-       defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT       1
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      1
-#define SMC_IO_SHIFT           0
-#define SMC_NOWAIT             1
-#define SMC_USE_PXA_DMA                1
-
-#define SMC_inb(a, r)          readb((a) + (r))
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_inl(a, r)          readl((a) + (r))
-#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
-#define SMC_outl(v, a, r)      writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l)   readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l)  writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS          (-1)    /* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
-       if (reg & 2) {
+       if ((machine_is_mainstone() || machine_is_stargate2() ||
+            machine_is_pxa_idp()) && reg & 2) {
                unsigned int v = val << 16;
                v |= readl(ioaddr + (reg & ~2)) & 0xffff;
                writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 #define RPC_LSA_DEFAULT         RPC_LED_100_10
 #define RPC_LSB_DEFAULT         RPC_LED_TX_RX
 
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT       0
-#define SMC_CAN_USE_16BIT      1
-#define SMC_CAN_USE_32BIT      0
-#define SMC_NOWAIT             1
-
-#define SMC_inw(a, r)          readw((a) + (r))
-#define SMC_outw(v, a, r)      writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l)   readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l)  writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS          IRQF_TRIGGER_HIGH
-
 #elif defined(CONFIG_COLDFIRE)
 
 #define SMC_CAN_USE_8BIT       0
index 55e89b3838f1cb60df3f2f751ba254eddbef8fa2..a0ea84fe6519badffb8b5cabf0e9892d135ea081 100644 (file)
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                spin_lock_irqsave(&priv->lock, flags);
                if (!priv->eee_active) {
                        priv->eee_active = 1;
-                       init_timer(&priv->eee_ctrl_timer);
-                       priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
-                       priv->eee_ctrl_timer.data = (unsigned long)priv;
-                       priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
-                       add_timer(&priv->eee_ctrl_timer);
+                       setup_timer(&priv->eee_ctrl_timer,
+                                   stmmac_eee_ctrl_timer,
+                                   (unsigned long)priv);
+                       mod_timer(&priv->eee_ctrl_timer,
+                                 STMMAC_LPI_T(eee_timer));
 
                        priv->hw->mac->set_eee_timer(priv->hw,
                                                     STMMAC_DEFAULT_LIT_LS,
index 4b51f903fb733cba9b9b8a3fe9539fe3bc811c84..0c5842aeb807014c632a2d713b366133d7021f56 100644 (file)
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
                *flow_type = IP_USER_FLOW;
                break;
        default:
-               return 0;
+               return -EINVAL;
        }
 
-       return 1;
+       return 0;
 }
 
 static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
        class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
                TCAM_V4KEY0_CLASS_CODE_SHIFT;
        ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
        if (ret < 0) {
                netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
                            parent->index);
-               ret = -EINVAL;
                goto out;
        }
 
index 7d8dd0d2182ef9f8d94d1e84b3c7a45f3364347c..a1bbaf6352ba379d209c7fc5cac33c8bfcbcb347 100644 (file)
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
        cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                           port_mask, ALE_VLAN, slave->port_vlan, 0);
        cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
-               priv->host_port, ALE_VLAN, slave->port_vlan);
+               priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int cpsw_suspend(struct device *dev)
 {
        struct platform_device  *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
        }
        return 0;
 }
+#endif
 
-static const struct dev_pm_ops cpsw_pm_ops = {
-       .suspend        = cpsw_suspend,
-       .resume         = cpsw_resume,
-};
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
 
 static const struct of_device_id cpsw_of_mtable[] = {
        { .compatible = "ti,cpsw", },
index 98655b44b97e2d7690ef2fa28156730697098d2b..c00084d689f3ba99fe846c2e50f5b21daec73189 100644 (file)
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int davinci_mdio_suspend(struct device *dev)
 {
        struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
 
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops davinci_mdio_pm_ops = {
-       .suspend_late   = davinci_mdio_suspend,
-       .resume_early   = davinci_mdio_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
 };
 
 #if IS_ENABLED(CONFIG_OF)
index f7e0f0f7c2e27dd19b2cbc674644cd4678074c2c..9e16a2819d4850938389c924e18f56184fedd946 100644 (file)
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
        int i;
        static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
-       if (dev->flags & IFF_ALLMULTI) {
+       if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
                for (i = 0; i < ETH_ALEN; i++) {
                        __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
                        __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
index e40fdfccc9c10df4ea8676a1dd59275d5d9c6b88..27ecc5c4fa2665cd42ac1ca81717255f85507113 100644 (file)
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
        } /* else everything is zero */
 }
 
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
 /* Get packet from user space buffer */
 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                                struct iov_iter *from, int noblock)
 {
-       int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+       int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
        struct sk_buff *skb;
        struct macvlan_dev *vlan;
        unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                        linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
        }
 
-       skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+       skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
                                linear, noblock, &err);
        if (!skb)
                goto err;
index 9e3af54c90102a2c113596d326d893670b7e6c24..32efbd48f32642ddabb21126384b0c21e160a403 100644 (file)
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define XGBE_PHY_CDR_RATE_PROPERTY     "amd,serdes-cdr-rate"
 #define XGBE_PHY_PQ_SKEW_PROPERTY      "amd,serdes-pq-skew"
 #define XGBE_PHY_TX_AMP_PROPERTY       "amd,serdes-tx-amp"
+#define XGBE_PHY_DFE_CFG_PROPERTY      "amd,serdes-dfe-tap-config"
+#define XGBE_PHY_DFE_ENA_PROPERTY      "amd,serdes-dfe-tap-enable"
 
 #define XGBE_PHY_SPEEDS                        3
 #define XGBE_PHY_SPEED_1000            0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_10000_BLWC               0
 #define SPEED_10000_CDR                        0x7
 #define SPEED_10000_PLL                        0x1
-#define SPEED_10000_PQ                 0x1e
+#define SPEED_10000_PQ                 0x12
 #define SPEED_10000_RATE               0x0
 #define SPEED_10000_TXAMP              0xa
 #define SPEED_10000_WORD               0x7
+#define SPEED_10000_DFE_TAP_CONFIG     0x1
+#define SPEED_10000_DFE_TAP_ENABLE     0x7f
 
 #define SPEED_2500_BLWC                        1
 #define SPEED_2500_CDR                 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_2500_RATE                        0x1
 #define SPEED_2500_TXAMP               0xf
 #define SPEED_2500_WORD                        0x1
+#define SPEED_2500_DFE_TAP_CONFIG      0x3
+#define SPEED_2500_DFE_TAP_ENABLE      0x0
 
 #define SPEED_1000_BLWC                        1
 #define SPEED_1000_CDR                 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define SPEED_1000_RATE                        0x3
 #define SPEED_1000_TXAMP               0xf
 #define SPEED_1000_WORD                        0x1
+#define SPEED_1000_DFE_TAP_CONFIG      0x3
+#define SPEED_1000_DFE_TAP_ENABLE      0x0
 
 /* SerDes RxTx register offsets */
+#define RXTX_REG6                      0x0018
 #define RXTX_REG20                     0x0050
+#define RXTX_REG22                     0x0058
 #define RXTX_REG114                    0x01c8
+#define RXTX_REG129                    0x0204
 
 /* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX     8
+#define RXTX_REG6_RESETB_RXD_WIDTH     1
 #define RXTX_REG20_BLWC_ENA_INDEX      2
 #define RXTX_REG20_BLWC_ENA_WIDTH      1
 #define RXTX_REG114_PQ_REG_INDEX       9
 #define RXTX_REG114_PQ_REG_WIDTH       7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
 
 /* Bit setting and getting macros
  *  The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
        SPEED_10000_TXAMP,
 };
 
+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
+       SPEED_1000_DFE_TAP_CONFIG,
+       SPEED_2500_DFE_TAP_CONFIG,
+       SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
+       SPEED_1000_DFE_TAP_ENABLE,
+       SPEED_2500_DFE_TAP_ENABLE,
+       SPEED_10000_DFE_TAP_ENABLE,
+};
+
 enum amd_xgbe_phy_an {
        AMD_XGBE_AN_READY = 0,
        AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
        u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
        u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
        u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+       u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
+       u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
 
        /* Auto-negotiation state machine support */
        struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
                status = XSIR0_IOREAD(priv, SIR0_STATUS);
                if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
                    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
-                       return;
+                       goto rx_reset;
        }
 
        netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
                   status);
+
+rx_reset:
+       /* Perform Rx reset for the DFE changes */
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
 }
 
 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
                           priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
        XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
                           priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
+       XRXTX_IOWRITE(priv, RXTX_REG22,
+                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
 
        amd_xgbe_phy_serdes_complete_ratechange(phydev);
 
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
                       sizeof(priv->serdes_tx_amp));
        }
 
+       if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_DFE_CFG_PROPERTY,
+                                                    priv->serdes_dfe_tap_cfg,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_DFE_CFG_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_dfe_tap_cfg,
+                      amd_xgbe_phy_serdes_dfe_tap_cfg,
+                      sizeof(priv->serdes_dfe_tap_cfg));
+       }
+
+       if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PHY_DFE_ENA_PROPERTY,
+                                                    priv->serdes_dfe_tap_ena,
+                                                    XGBE_PHY_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PHY_DFE_ENA_PROPERTY);
+                       goto err_sir1;
+               }
+       } else {
+               memcpy(priv->serdes_dfe_tap_ena,
+                      amd_xgbe_phy_serdes_dfe_tap_ena,
+                      sizeof(priv->serdes_dfe_tap_ena));
+       }
+
        phydev->priv = priv;
 
        if (!priv->adev || acpi_disabled)
index cdcac6aa4260b32927d7c903e024b42e5d17861e..52cd8db2c57daad2767dec72149f4cdabbcf6917 100644 (file)
@@ -235,6 +235,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
        return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
 }
 
+/**
+ * phy_check_valid - check if there is a valid PHY setting which matches
+ *                  speed, duplex, and feature mask
+ * @speed: speed to match
+ * @duplex: duplex to match
+ * @features: A mask of the valid settings
+ *
+ * Description: Returns true if there is a valid setting, false otherwise.
+ */
+static inline bool phy_check_valid(int speed, int duplex, u32 features)
+{
+       unsigned int idx;
+
+       idx = phy_find_valid(phy_find_setting(speed, duplex), features);
+
+       return settings[idx].speed == speed && settings[idx].duplex == duplex &&
+               (settings[idx].setting & features);
+}
+
 /**
  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  * @phydev: the target phy_device struct
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                int eee_lp, eee_cap, eee_adv;
                u32 lp, cap, adv;
                int status;
-               unsigned int idx;
 
                /* Read phy status to properly get the right settings */
                status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 
                adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
                lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
-               idx = phy_find_setting(phydev->speed, phydev->duplex);
-               if (!(lp & adv & settings[idx].setting))
+               if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
                        goto eee_exit_err;
 
                if (clk_stop_enable) {
index 0e62274e884a89de170d668795b40c3620d41046..f1ee71e2224158088abf0716ef719407c03a20a3 100644 (file)
@@ -43,9 +43,7 @@
 
 static struct team_port *team_port_get_rcu(const struct net_device *dev)
 {
-       struct team_port *port = rcu_dereference(dev->rx_handler_data);
-
-       return team_port_exists(dev) ? port : NULL;
+       return rcu_dereference(dev->rx_handler_data);
 }
 
 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
index 3bd9678315ad651beccd96ca1b5d74bcd1e68c1e..7ba8d0885f120156c47f44884212a2fd73f604b9 100644 (file)
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
            * Linksys USB200M
            * Netgear FA120
            * Sitecom LN-029
+           * Sitecom LN-028
            * Intellinet USB 2.0 Ethernet
            * ST Lab USB 2.0 Ethernet
            * TrendNet TU2-ET100
index bf49792062a2b40c2f1bd2f5a06e6eff8954ab90..1173a24feda38c3af236c84acaf8982f39c0e0b1 100644 (file)
@@ -978,6 +978,10 @@ static const struct usb_device_id  products [] = {
        // Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter"
        USB_DEVICE (0x0df6, 0x0056),
        .driver_info =  (unsigned long) &ax88178_info,
+}, {
+       // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
+       USB_DEVICE (0x0df6, 0x061c),
+       .driver_info =  (unsigned long) &ax88178_info,
 }, {
        // corega FEther USB2-TX
        USB_DEVICE (0x07aa, 0x0017),
index 9cdfb3fe9c156ba775d41a9d6d343ddbb5fc9b60..778e91531fac7f35480208ba35f6ae3e6c9ad5b2 100644 (file)
@@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
                }
                cprev = cnow;
        }
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        remove_wait_queue(&tiocmget->waitq, &wait);
 
        return ret;
index 3d18bb0eee8528ece6509ec3848c3044ff5804ce..1bfe0fcaccf5ba31bf125f898ec6c624f506206e 100644 (file)
@@ -134,6 +134,11 @@ static const struct usb_device_id  products [] = {
 }, {
        USB_DEVICE(0x050d, 0x258a),     /* Belkin F5U258/F5U279 (PL-25A1) */
        .driver_info =  (unsigned long) &prolific_info,
+}, {
+       USB_DEVICE(0x3923, 0x7825),     /* National Instruments USB
+                                        * Host-to-Host Cable
+                                        */
+       .driver_info =  (unsigned long) &prolific_info,
 },
 
        { },            // END
index 83c39e2858bf70a1673cf2c6d9813a92f25ce4d3..88d121d43c08bedf2efc3265964188cf2b7f94a7 100644 (file)
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
        spin_lock_irqsave(&cosa->lock, flags);
        add_wait_queue(&chan->rxwaitq, &wait);
        while (!chan->rx_status) {
-               current->state = TASK_INTERRUPTIBLE;
+               set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_irqrestore(&cosa->lock, flags);
                schedule();
                spin_lock_irqsave(&cosa->lock, flags);
                if (signal_pending(current) && chan->rx_status == 0) {
                        chan->rx_status = 1;
                        remove_wait_queue(&chan->rxwaitq, &wait);
-                       current->state = TASK_RUNNING;
+                       __set_current_state(TASK_RUNNING);
                        spin_unlock_irqrestore(&cosa->lock, flags);
                        mutex_unlock(&chan->rlock);
                        return -ERESTARTSYS;
                }
        }
        remove_wait_queue(&chan->rxwaitq, &wait);
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        kbuf = chan->rxdata;
        count = chan->rxsize;
        spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
        spin_lock_irqsave(&cosa->lock, flags);
        add_wait_queue(&chan->txwaitq, &wait);
        while (!chan->tx_status) {
-               current->state = TASK_INTERRUPTIBLE;
+               set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_irqrestore(&cosa->lock, flags);
                schedule();
                spin_lock_irqsave(&cosa->lock, flags);
                if (signal_pending(current) && chan->tx_status == 0) {
                        chan->tx_status = 1;
                        remove_wait_queue(&chan->txwaitq, &wait);
-                       current->state = TASK_RUNNING;
+                       __set_current_state(TASK_RUNNING);
                        chan->tx_status = 1;
                        spin_unlock_irqrestore(&cosa->lock, flags);
                        up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
                }
        }
        remove_wait_queue(&chan->txwaitq, &wait);
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        up(&chan->wsem);
        spin_unlock_irqrestore(&cosa->lock, flags);
        kfree(kbuf);
index 4a4c6586a8d2dcda2b6f49a5b767bcc48304e138..8908be6dbc48233db9183247e1928bfab6eaa491 100644 (file)
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
 
        genlmsg_end(skb, msg_head);
-       genlmsg_unicast(&init_net, skb, dst_portid);
+       if (genlmsg_unicast(&init_net, skb, dst_portid))
+               goto err_free_txskb;
 
        /* Enqueue the packet */
        skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
        return;
 
 nla_put_failure:
+       nlmsg_free(skb);
+err_free_txskb:
        printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
        ieee80211_free_txskb(hw, my_skb);
        data->tx_failed++;
index f7a31d2cb3f1819fdf7ebdeb40e0f6bf44aabe0a..c4d68d7684087f6ec66400047be4991c5e51cfe6 100644 (file)
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
        unsigned long flags;
 
        do {
+               int notify;
+
                spin_lock_irqsave(&queue->response_lock, flags);
                make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+               RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
                spin_unlock_irqrestore(&queue->response_lock, flags);
+               if (notify)
+                       notify_remote_via_irq(queue->tx_irq);
+
                if (cons == end)
                        break;
                txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 {
        struct pending_tx_info *pending_tx_info;
        pending_ring_idx_t index;
+       int notify;
        unsigned long flags;
 
        pending_tx_info = &queue->pending_tx_info[pending_idx];
+
        spin_lock_irqsave(&queue->response_lock, flags);
+
        make_tx_response(queue, &pending_tx_info->req, status);
-       index = pending_index(queue->pending_prod);
+
+       /* Release the pending index before pusing the Tx response so
+        * its available before a new Tx request is pushed by the
+        * frontend.
+        */
+       index = pending_index(queue->pending_prod++);
        queue->pending_ring[index] = pending_idx;
-       /* TX shouldn't use the index before we give it back here */
-       mb();
-       queue->pending_prod++;
+
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+
        spin_unlock_irqrestore(&queue->response_lock, flags);
+
+       if (notify)
+               notify_remote_via_irq(queue->tx_irq);
 }
 
 
@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
 {
        RING_IDX i = queue->tx.rsp_prod_pvt;
        struct xen_netif_tx_response *resp;
-       int notify;
 
        resp = RING_GET_RESPONSE(&queue->tx, i);
        resp->id     = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
                RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
 
        queue->tx.rsp_prod_pvt = ++i;
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
-       if (notify)
-               notify_remote_via_irq(queue->tx_irq);
 }
 
 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
index 1ec694a52379ea4c753cc1b397c931bc940bb1cc..464bf492ee2ae375e8fe92e0a5446dbdf0461f9d 100644 (file)
@@ -80,7 +80,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
        if (err)
                return err;
 
-       resource_list_for_each_entry(win, res, list) {
+       resource_list_for_each_entry(win, res) {
                struct resource *parent, *res = win->res;
 
                switch (resource_type(res)) {
index 70a5d94cc766af5302dfaf945dfb4a1a36b48236..b4f7744f67510b87be03400d270c7328e6c77912 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/suspend.h>
 #include <linux/uaccess.h>
 
 #include "rtc-at91rm9200.h"
@@ -54,6 +55,10 @@ static void __iomem *at91_rtc_regs;
 static int irq;
 static DEFINE_SPINLOCK(at91_rtc_lock);
 static u32 at91_rtc_shadow_imr;
+static bool suspended;
+static DEFINE_SPINLOCK(suspended_lock);
+static unsigned long cached_events;
+static u32 at91_rtc_imr;
 
 static void at91_rtc_write_ier(u32 mask)
 {
@@ -290,7 +295,9 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
        struct rtc_device *rtc = platform_get_drvdata(pdev);
        unsigned int rtsr;
        unsigned long events = 0;
+       int ret = IRQ_NONE;
 
+       spin_lock(&suspended_lock);
        rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
        if (rtsr) {             /* this interrupt is shared!  Is it ours? */
                if (rtsr & AT91_RTC_ALARM)
@@ -304,14 +311,22 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
 
                at91_rtc_write(AT91_RTC_SCCR, rtsr);    /* clear status reg */
 
-               rtc_update_irq(rtc, 1, events);
+               if (!suspended) {
+                       rtc_update_irq(rtc, 1, events);
 
-               dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__,
-                       events >> 8, events & 0x000000FF);
+                       dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n",
+                               __func__, events >> 8, events & 0x000000FF);
+               } else {
+                       cached_events |= events;
+                       at91_rtc_write_idr(at91_rtc_imr);
+                       pm_system_wakeup();
+               }
 
-               return IRQ_HANDLED;
+               ret = IRQ_HANDLED;
        }
-       return IRQ_NONE;                /* not handled */
+       spin_lock(&suspended_lock);
+
+       return ret;
 }
 
 static const struct at91_rtc_config at91rm9200_config = {
@@ -401,8 +416,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
                                        AT91_RTC_CALEV);
 
        ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt,
-                               IRQF_SHARED,
-                               "at91_rtc", pdev);
+                              IRQF_SHARED | IRQF_COND_SUSPEND,
+                              "at91_rtc", pdev);
        if (ret) {
                dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
                return ret;
@@ -454,8 +469,6 @@ static void at91_rtc_shutdown(struct platform_device *pdev)
 
 /* AT91RM9200 RTC Power management control */
 
-static u32 at91_rtc_imr;
-
 static int at91_rtc_suspend(struct device *dev)
 {
        /* this IRQ is shared with DBGU and other hardware which isn't
@@ -464,21 +477,42 @@ static int at91_rtc_suspend(struct device *dev)
        at91_rtc_imr = at91_rtc_read_imr()
                        & (AT91_RTC_ALARM|AT91_RTC_SECEV);
        if (at91_rtc_imr) {
-               if (device_may_wakeup(dev))
+               if (device_may_wakeup(dev)) {
+                       unsigned long flags;
+
                        enable_irq_wake(irq);
-               else
+
+                       spin_lock_irqsave(&suspended_lock, flags);
+                       suspended = true;
+                       spin_unlock_irqrestore(&suspended_lock, flags);
+               } else {
                        at91_rtc_write_idr(at91_rtc_imr);
+               }
        }
        return 0;
 }
 
 static int at91_rtc_resume(struct device *dev)
 {
+       struct rtc_device *rtc = dev_get_drvdata(dev);
+
        if (at91_rtc_imr) {
-               if (device_may_wakeup(dev))
+               if (device_may_wakeup(dev)) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&suspended_lock, flags);
+
+                       if (cached_events) {
+                               rtc_update_irq(rtc, 1, cached_events);
+                               cached_events = 0;
+                       }
+
+                       suspended = false;
+                       spin_unlock_irqrestore(&suspended_lock, flags);
+
                        disable_irq_wake(irq);
-               else
-                       at91_rtc_write_ier(at91_rtc_imr);
+               }
+               at91_rtc_write_ier(at91_rtc_imr);
        }
        return 0;
 }
index 2183fd2750abd9d4b388ca4a5f48916600356122..5ccaee32df7223ad1aeb2d8cb74233caa7fae351 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
+#include <linux/suspend.h>
 #include <linux/clk.h>
 
 /*
@@ -77,6 +78,9 @@ struct sam9_rtc {
        unsigned int            gpbr_offset;
        int                     irq;
        struct clk              *sclk;
+       bool                    suspended;
+       unsigned long           events;
+       spinlock_t              lock;
 };
 
 #define rtt_readl(rtc, field) \
@@ -271,14 +275,9 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
        return 0;
 }
 
-/*
- * IRQ handler for the RTC
- */
-static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc)
 {
-       struct sam9_rtc *rtc = _rtc;
        u32 sr, mr;
-       unsigned long events = 0;
 
        /* Shared interrupt may be for another device.  Note: reading
         * SR clears it, so we must only read it in this irq handler!
@@ -290,18 +289,54 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
 
        /* alarm status */
        if (sr & AT91_RTT_ALMS)
-               events |= (RTC_AF | RTC_IRQF);
+               rtc->events |= (RTC_AF | RTC_IRQF);
 
        /* timer update/increment */
        if (sr & AT91_RTT_RTTINC)
-               events |= (RTC_UF | RTC_IRQF);
+               rtc->events |= (RTC_UF | RTC_IRQF);
+
+       return IRQ_HANDLED;
+}
+
+static void at91_rtc_flush_events(struct sam9_rtc *rtc)
+{
+       if (!rtc->events)
+               return;
 
-       rtc_update_irq(rtc->rtcdev, 1, events);
+       rtc_update_irq(rtc->rtcdev, 1, rtc->events);
+       rtc->events = 0;
 
        pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
-               events >> 8, events & 0x000000FF);
+               rtc->events >> 8, rtc->events & 0x000000FF);
+}
 
-       return IRQ_HANDLED;
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+{
+       struct sam9_rtc *rtc = _rtc;
+       int ret;
+
+       spin_lock(&rtc->lock);
+
+       ret = at91_rtc_cache_events(rtc);
+
+       /* We're called in suspended state */
+       if (rtc->suspended) {
+               /* Mask irqs coming from this peripheral */
+               rtt_writel(rtc, MR,
+                          rtt_readl(rtc, MR) &
+                          ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
+               /* Trigger a system wakeup */
+               pm_system_wakeup();
+       } else {
+               at91_rtc_flush_events(rtc);
+       }
+
+       spin_unlock(&rtc->lock);
+
+       return ret;
 }
 
 static const struct rtc_class_ops at91_rtc_ops = {
@@ -421,7 +456,8 @@ static int at91_rtc_probe(struct platform_device *pdev)
 
        /* register irq handler after we know what name we'll use */
        ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt,
-                               IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc);
+                              IRQF_SHARED | IRQF_COND_SUSPEND,
+                              dev_name(&rtc->rtcdev->dev), rtc);
        if (ret) {
                dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
                return ret;
@@ -482,7 +518,12 @@ static int at91_rtc_suspend(struct device *dev)
        rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
        if (rtc->imr) {
                if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) {
+                       unsigned long flags;
+
                        enable_irq_wake(rtc->irq);
+                       spin_lock_irqsave(&rtc->lock, flags);
+                       rtc->suspended = true;
+                       spin_unlock_irqrestore(&rtc->lock, flags);
                        /* don't let RTTINC cause wakeups */
                        if (mr & AT91_RTT_RTTINCIEN)
                                rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -499,10 +540,18 @@ static int at91_rtc_resume(struct device *dev)
        u32             mr;
 
        if (rtc->imr) {
+               unsigned long flags;
+
                if (device_may_wakeup(dev))
                        disable_irq_wake(rtc->irq);
                mr = rtt_readl(rtc, MR);
                rtt_writel(rtc, MR, mr | rtc->imr);
+
+               spin_lock_irqsave(&rtc->lock, flags);
+               rtc->suspended = false;
+               at91_rtc_cache_events(rtc);
+               at91_rtc_flush_events(rtc);
+               spin_unlock_irqrestore(&rtc->lock, flags);
        }
 
        return 0;
index 846552bff67d6f005c3966e80368134dcd9ab27f..4e959c43f6804d12f8677d916e1f050bdc740457 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/err.h>
 #include <linux/irq.h>
+#include <linux/suspend.h>
 
 #include <asm/io.h>
 #include <asm/ioctls.h>
@@ -173,6 +174,12 @@ struct atmel_uart_port {
        bool                    ms_irq_enabled;
        bool                    is_usart;       /* usart or uart */
        struct timer_list       uart_timer;     /* uart timer */
+
+       bool                    suspended;
+       unsigned int            pending;
+       unsigned int            pending_status;
+       spinlock_t              lock_suspended;
+
        int (*prepare_rx)(struct uart_port *port);
        int (*prepare_tx)(struct uart_port *port);
        void (*schedule_rx)(struct uart_port *port);
@@ -1179,12 +1186,15 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
 {
        struct uart_port *port = dev_id;
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-       unsigned int status, pending, pass_counter = 0;
+       unsigned int status, pending, mask, pass_counter = 0;
        bool gpio_handled = false;
 
+       spin_lock(&atmel_port->lock_suspended);
+
        do {
                status = atmel_get_lines_status(port);
-               pending = status & UART_GET_IMR(port);
+               mask = UART_GET_IMR(port);
+               pending = status & mask;
                if (!gpio_handled) {
                        /*
                         * Dealing with GPIO interrupt
@@ -1206,11 +1216,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
                if (!pending)
                        break;
 
+               if (atmel_port->suspended) {
+                       atmel_port->pending |= pending;
+                       atmel_port->pending_status = status;
+                       UART_PUT_IDR(port, mask);
+                       pm_system_wakeup();
+                       break;
+               }
+
                atmel_handle_receive(port, pending);
                atmel_handle_status(port, pending, status);
                atmel_handle_transmit(port, pending);
        } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
 
+       spin_unlock(&atmel_port->lock_suspended);
+
        return pass_counter ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -1742,7 +1762,8 @@ static int atmel_startup(struct uart_port *port)
        /*
         * Allocate the IRQ
         */
-       retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
+       retval = request_irq(port->irq, atmel_interrupt,
+                       IRQF_SHARED | IRQF_COND_SUSPEND,
                        tty ? tty->name : "atmel_serial", port);
        if (retval) {
                dev_err(port->dev, "atmel_startup - Can't get irq\n");
@@ -2513,8 +2534,14 @@ static int atmel_serial_suspend(struct platform_device *pdev,
 
        /* we can not wake up if we're running on slow clock */
        atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
-       if (atmel_serial_clk_will_stop())
+       if (atmel_serial_clk_will_stop()) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&atmel_port->lock_suspended, flags);
+               atmel_port->suspended = true;
+               spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
                device_set_wakeup_enable(&pdev->dev, 0);
+       }
 
        uart_suspend_port(&atmel_uart, port);
 
@@ -2525,6 +2552,18 @@ static int atmel_serial_resume(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+       unsigned long flags;
+
+       spin_lock_irqsave(&atmel_port->lock_suspended, flags);
+       if (atmel_port->pending) {
+               atmel_handle_receive(port, atmel_port->pending);
+               atmel_handle_status(port, atmel_port->pending,
+                                   atmel_port->pending_status);
+               atmel_handle_transmit(port, atmel_port->pending);
+               atmel_port->pending = 0;
+       }
+       atmel_port->suspended = false;
+       spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
 
        uart_resume_port(&atmel_uart, port);
        device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
@@ -2593,6 +2632,8 @@ static int atmel_serial_probe(struct platform_device *pdev)
        port->backup_imr = 0;
        port->uart.line = ret;
 
+       spin_lock_init(&port->lock_suspended);
+
        ret = atmel_init_gpios(port, &pdev->dev);
        if (ret < 0)
                dev_err(&pdev->dev, "%s",
index afa06d28725dad3960aed1df9dd4f9e9ddc53493..2bbfc25e582cb8b334a1ef4083b22da56c48cc65 100644 (file)
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
                         * TODO: support TSO.
                         */
                        iov_iter_advance(&msg.msg_iter, vhost_hlen);
-               } else {
-                       /* It'll come from socket; we'll need to patch
-                        * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
-                        */
-                       iov_iter_advance(&fixup, sizeof(hdr));
                }
                err = sock->ops->recvmsg(NULL, sock, &msg,
                                         sock_len, MSG_DONTWAIT | MSG_TRUNC);
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
                        continue;
                }
                /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
-               if (unlikely(vhost_hlen) &&
-                   copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
-                       vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
-                              vq->iov->iov_base);
-                       break;
+               if (unlikely(vhost_hlen)) {
+                       if (copy_to_iter(&hdr, sizeof(hdr),
+                                        &fixup) != sizeof(hdr)) {
+                               vq_err(vq, "Unable to write vnet_hdr "
+                                      "at addr %p\n", vq->iov->iov_base);
+                               break;
+                       }
+               } else {
+                       /* Header came from socket; we'll need to patch
+                        * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
+                        */
+                       iov_iter_advance(&fixup, sizeof(hdr));
                }
                /* TODO: Should check and handle checksum. */
 
                num_buffers = cpu_to_vhost16(vq, headcount);
                if (likely(mergeable) &&
-                   copy_to_iter(&num_buffers, 2, &fixup) != 2) {
+                   copy_to_iter(&num_buffers, sizeof num_buffers,
+                                &fixup) != sizeof num_buffers) {
                        vq_err(vq, "Failed num_buffers write");
                        vhost_discard_vq_desc(vq, headcount);
                        break;
index 6df940528fd21b0cef8524004645fe472b7f7061..1443b3c391de497c05fe332f1c4cdd067bc5f5c9 100644 (file)
@@ -208,7 +208,8 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
 
        if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
                err = request_irq(wdt->irq, wdt_interrupt,
-                                 IRQF_SHARED | IRQF_IRQPOLL,
+                                 IRQF_SHARED | IRQF_IRQPOLL |
+                                 IRQF_NO_SUSPEND,
                                  pdev->name, wdt);
                if (err)
                        return err;
index 90d1882b306face4c53d10ed08c44e3ec77b726a..5ba029e627cc22db0648317a619685e545977316 100644 (file)
@@ -124,7 +124,7 @@ ecryptfs_get_key_payload_data(struct key *key)
 }
 
 #define ECRYPTFS_MAX_KEYSET_SIZE 1024
-#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32
+#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 31
 #define ECRYPTFS_MAX_NUM_ENC_KEYS 64
 #define ECRYPTFS_MAX_IV_BYTES 16       /* 128 bits */
 #define ECRYPTFS_SALT_BYTES 2
@@ -237,7 +237,7 @@ struct ecryptfs_crypt_stat {
        struct crypto_ablkcipher *tfm;
        struct crypto_hash *hash_tfm; /* Crypto context for generating
                                       * the initialization vectors */
-       unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
+       unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
        unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
        unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
        struct list_head keysig_list;
index b07731e68c0b4d39cf75a5840033638cf37c123f..fd39bad6f1bdf8bbcb4321a8fc8ff1934d67167c 100644 (file)
@@ -303,9 +303,22 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        struct file *lower_file = ecryptfs_file_to_lower(file);
        long rc = -ENOTTY;
 
-       if (lower_file->f_op->unlocked_ioctl)
+       if (!lower_file->f_op->unlocked_ioctl)
+               return rc;
+
+       switch (cmd) {
+       case FITRIM:
+       case FS_IOC_GETFLAGS:
+       case FS_IOC_SETFLAGS:
+       case FS_IOC_GETVERSION:
+       case FS_IOC_SETVERSION:
                rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
-       return rc;
+               fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
+
+               return rc;
+       default:
+               return rc;
+       }
 }
 
 #ifdef CONFIG_COMPAT
@@ -315,9 +328,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        struct file *lower_file = ecryptfs_file_to_lower(file);
        long rc = -ENOIOCTLCMD;
 
-       if (lower_file->f_op->compat_ioctl)
+       if (!lower_file->f_op->compat_ioctl)
+               return rc;
+
+       switch (cmd) {
+       case FITRIM:
+       case FS_IOC32_GETFLAGS:
+       case FS_IOC32_SETFLAGS:
+       case FS_IOC32_GETVERSION:
+       case FS_IOC32_SETVERSION:
                rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
-       return rc;
+               fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
+
+               return rc;
+       default:
+               return rc;
+       }
 }
 #endif
 
index 917bd5c9776aabcff5b482f59bd8d316193ead35..6bd67e2011f083e4e184e4d2d336cb15176d40ee 100644 (file)
@@ -891,7 +891,7 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
        struct blkcipher_desc desc;
        char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
        char iv[ECRYPTFS_MAX_IV_BYTES];
-       char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
+       char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
 };
 
 /**
index 1895d60f4122c21d0e2ee267066c6308473ba642..c095d32642599f90cf68a9437b9a10674d5cd3d9 100644 (file)
@@ -407,7 +407,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
        if (!cipher_name_set) {
                int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
 
-               BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE);
+               BUG_ON(cipher_name_len > ECRYPTFS_MAX_CIPHER_NAME_SIZE);
                strcpy(mount_crypt_stat->global_default_cipher_name,
                       ECRYPTFS_DEFAULT_CIPHER);
        }
index 365c82e1b3a9a602057e65edc9b857c6adce6b76..f1bad681fc1ca3df14eda4be6542088425e901d2 100644 (file)
@@ -1665,7 +1665,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
        }
 <