Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 22 Jun 2007 18:10:34 +0000 (11:10 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Fri, 22 Jun 2007 18:10:34 +0000 (11:10 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/mlx4: Correct max_srq_wr returned from mlx4_ib_query_device()
  IPoIB/cm: Remove dead definition of struct ipoib_cm_id
  IPoIB/cm: Fix interoperability when MTU doesn't match
  IPoIB/cm: Initialize RX before moving QP to RTR
  IB/umem: Fix possible hang on process exit

36 files changed:
arch/i386/Kconfig.debug
arch/i386/mm/init.c
arch/parisc/kernel/unwind.c
arch/x86_64/Kconfig.debug
arch/x86_64/mm/init.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/pata_amd.c
drivers/ata/pata_it821x.c
drivers/char/agp/intel-agp.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/cxgb3/ael1002.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/cxgb3/regs.h
drivers/net/cxgb3/sge.c
drivers/net/cxgb3/xgmac.c
drivers/net/forcedeth.c
drivers/net/natsemi.c
drivers/net/spider_net.c
drivers/net/spider_net.h
drivers/net/spider_net_ethtool.c
drivers/parisc/led.c
drivers/s390/net/claw.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_eddp.c
drivers/s390/net/qeth_main.c
include/asm-parisc/system.h
include/linux/mm.h
kernel/posix-timers.c
mm/mmap.c
net/ipv4/ipvs/ip_vs_sync.c
net/rxrpc/ar-output.c
net/xfrm/xfrm_state.c

index 6293920cd1be394c32d7161dc05858fedfed6ef3..b31c0802e1ccf53f1e5a677e802c5e34166ebc54 100644 (file)
@@ -49,7 +49,6 @@ config DEBUG_PAGEALLOC
 config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        depends on DEBUG_KERNEL
-       depends on !KPROBES # temporary for 2.6.22
        help
          Mark the kernel read-only data as write-protected in the pagetables,
          in order to catch accidental (and incorrect) writes to such const
index b22ce8d6b1ba64c76db039bccdf2920ca9a20061..7135946d366322ade4cb529372f7389647a91d31 100644 (file)
@@ -799,6 +799,7 @@ void mark_rodata_ro(void)
        unsigned long start = PFN_ALIGN(_text);
        unsigned long size = PFN_ALIGN(_etext) - start;
 
+#ifndef CONFIG_KPROBES
 #ifdef CONFIG_HOTPLUG_CPU
        /* It must still be possible to apply SMP alternatives. */
        if (num_possible_cpus() <= 1)
@@ -808,7 +809,7 @@ void mark_rodata_ro(void)
                                 size >> PAGE_SHIFT, PAGE_KERNEL_RX);
                printk("Write protecting the kernel text: %luk\n", size >> 10);
        }
-
+#endif
        start += size;
        size = (unsigned long)__end_rodata - start;
        change_page_attr(virt_to_page(start),
index e70f57e27643a0743254cfbf793c4b575b6a415c..322167737de75a7cfa261f12dcd8f31ad724cca4 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <asm/uaccess.h>
 #include <asm/assembly.h>
+#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 #include <asm/unwind.h>
 
@@ -26,6 +28,8 @@
 #define dbg(x...)
 #endif
 
+#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000)
+
 extern struct unwind_table_entry __start___unwind[];
 extern struct unwind_table_entry __stop___unwind[];
 
@@ -197,6 +201,29 @@ static int unwind_init(void)
        return 0;
 }
 
+#ifdef CONFIG_64BIT
+#define get_func_addr(fptr) fptr[2]
+#else
+#define get_func_addr(fptr) fptr[0]
+#endif
+
+static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
+{
+       void handle_interruption(int, struct pt_regs *);
+       static unsigned long *hi = (unsigned long)&handle_interruption;
+
+       if (pc == get_func_addr(hi)) {
+               struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
+               dbg("Unwinding through handle_interruption()\n");
+               info->prev_sp = regs->gr[30];
+               info->prev_ip = regs->iaoq[0];
+
+               return 1;
+       }
+
+       return 0;
+}
+
 static void unwind_frame_regs(struct unwind_frame_info *info)
 {
        const struct unwind_table_entry *e;
@@ -310,13 +337,15 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
                        }
                }
 
-               info->prev_sp = info->sp - frame_size;
-               if (e->Millicode)
-                       info->rp = info->r31;
-               else if (rpoffset)
-                       info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
-               info->prev_ip = info->rp;
-               info->rp = 0;
+               if (!unwind_special(info, e->region_start, frame_size)) {
+                       info->prev_sp = info->sp - frame_size;
+                       if (e->Millicode)
+                               info->rp = info->r31;
+                       else if (rpoffset)
+                               info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
+                       info->prev_ip = info->rp;
+                       info->rp = 0;
+               }
 
                dbg("analyzing func @ %lx, setting prev_sp=%lx "
                    "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, 
index 8a8677518447ca6624fa0e7b9545772ee6a77959..775d211a5cf93983ca1fd97c64f56fc18c3f8213 100644 (file)
@@ -9,7 +9,6 @@ source "lib/Kconfig.debug"
 config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        depends on DEBUG_KERNEL
-       depends on !KPROBES # temporary for 2.6.22
        help
         Mark the kernel read-only data as write-protected in the pagetables,
         in order to catch accidental (and incorrect) writes to such const data.
index efb6e845114ec727430860981b03b22495f9d299..9a0e98accf04aae3e11282e3079fd747afa646a3 100644 (file)
@@ -605,6 +605,11 @@ void mark_rodata_ro(void)
        if (num_possible_cpus() > 1)
                start = (unsigned long)_etext;
 #endif
+
+#ifdef CONFIG_KPROBES
+       start = (unsigned long)__start_rodata;
+#endif
+       
        end = (unsigned long)__end_rodata;
        start = (start + PAGE_SIZE - 1) & PAGE_MASK;
        end &= PAGE_MASK;
index 545f330e59a54bd5c6aebfca56a798d8cdd8a431..ca5229d24d8ed8f0692b64afed88d8e14cfbac33 100644 (file)
@@ -527,7 +527,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
 
        /* fixup zero port_map */
        if (!port_map) {
-               port_map = (1 << ahci_nr_ports(hpriv->cap)) - 1;
+               port_map = (1 << ahci_nr_ports(cap)) - 1;
                dev_printk(KERN_WARNING, &pdev->dev,
                           "PORTS_IMPL is zero, forcing 0x%x\n", port_map);
 
index 047eabd75363a110d56e88edd54d899624ab0c6b..adfae9d1ceb15394d92dd5e9944750a93ee41915 100644 (file)
@@ -3659,7 +3659,7 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
 
 /**
  *     ata_dev_reread_id - Re-read IDENTIFY data
- *     @adev: target ATA device
+ *     @dev: target ATA device
  *     @readid_flags: read ID flags
  *
  *     Re-read IDENTIFY page and make sure @dev is still attached to
@@ -3802,6 +3802,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
        /* Drives which do spurious command completion */
        { "HTS541680J9SA00",    "SB2IC7EP",     ATA_HORKAGE_NONCQ, },
+       { "HTS541612J9SA00",    "SBDIC7JP",     ATA_HORKAGE_NONCQ, },
+       { "WDC WD740ADFD-00NLR1", NULL,         ATA_HORKAGE_NONCQ, },
 
        /* Devices with NCQ limits */
 
index b439351f1fd3bc215994e36ce88d64a20f933864..a16f629b7b384518b8eebfccc7cc4ea9178f39c2 100644 (file)
@@ -693,6 +693,8 @@ static const struct pci_device_id amd[] = {
        { PCI_VDEVICE(NVIDIA,   PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
        { PCI_VDEVICE(NVIDIA,   PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
        { PCI_VDEVICE(NVIDIA,   PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
+       { PCI_VDEVICE(NVIDIA,   PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
+       { PCI_VDEVICE(NVIDIA,   PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
        { PCI_VDEVICE(AMD,      PCI_DEVICE_ID_AMD_CS5536_IDE),          9 },
 
        { },
index b3456d7a592c18701cb762197ca9accfaa4f9087..dab4e7cf8cda0d7a9df9922aea68c5072a608561 100644 (file)
@@ -2,6 +2,7 @@
  * pata_it821x.c       - IT821x PATA for new ATA layer
  *                       (C) 2005 Red Hat Inc
  *                       Alan Cox <alan@redhat.com>
+ *                       (C) 2007 Bartlomiej Zolnierkiewicz
  *
  * based upon
  *
@@ -79,7 +80,7 @@
 
 
 #define DRV_NAME "pata_it821x"
-#define DRV_VERSION "0.3.6"
+#define DRV_VERSION "0.3.7"
 
 struct it821x_dev
 {
@@ -460,14 +461,8 @@ static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
 
 static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused)
 {
-       int dma_enabled = 0;
        int i;
 
-       /* Bits 5 and 6 indicate if DMA is active on master/slave */
-       /* It is possible that BMDMA isn't allocated */
-       if (ap->ioaddr.bmdma_addr)
-               dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
        for (i = 0; i < ATA_MAX_DEVICES; i++) {
                struct ata_device *dev = &ap->device[i];
                if (ata_dev_enabled(dev)) {
@@ -476,7 +471,7 @@ static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused
                        dev->dma_mode = XFER_MW_DMA_0;
                        /* We do need the right mode information for DMA or PIO
                           and this comes from the current configuration flags */
-                       if (dma_enabled & (1 << (5 + i))) {
+                       if (ata_id_has_dma(dev->id)) {
                                ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
                                dev->xfer_mode = XFER_MW_DMA_0;
                                dev->xfer_shift = ATA_SHIFT_MWDMA;
@@ -799,7 +794,7 @@ MODULE_VERSION(DRV_VERSION);
 
 
 module_param_named(noraid, it8212_noraid, int, S_IRUGO);
-MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode");
+MODULE_PARM_DESC(noraid, "Force card into bypass mode");
 
 module_init(it821x_init);
 module_exit(it821x_exit);
index 0439ee951a113ab3fbed364860ddb9b4c6cdcd1d..a1240603912c2608f0137fa73ded8aa86a00e9e1 100644 (file)
@@ -1843,35 +1843,35 @@ static const struct intel_driver_description {
                &intel_845_driver, &intel_830_driver },
        { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
        { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
-               &intel_845_driver, &intel_915_driver },
+               NULL, &intel_915_driver },
        { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
-               &intel_845_driver, &intel_915_driver },
+               NULL, &intel_915_driver },
        { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
-               &intel_845_driver, &intel_915_driver },
+               NULL, &intel_915_driver },
        { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 1, "945GM",
-               &intel_845_driver, &intel_915_driver },
+               NULL, &intel_915_driver },
        { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
-               &intel_845_driver, &intel_915_driver },
+               NULL, &intel_915_driver },
        { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
-               &intel_845_driver, &intel_i965_driver },
+               NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_82965G_1_HB, PCI_DEVICE_ID_INTEL_82965G_1_IG, 0, "965G",
-               &intel_845_driver, &intel_i965_driver },
+               NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
-               &intel_845_driver, &intel_i965_driver },
+               NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
-               &intel_845_driver, &intel_i965_driver },
+               NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 1, "965GM",
-               &intel_845_driver, &intel_i965_driver },
+               NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
-               &intel_845_driver, &intel_i965_driver },
+               NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
        { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
        { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33",
-               &intel_845_driver, &intel_g33_driver },
+               NULL, &intel_g33_driver },
        { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35",
-               &intel_845_driver, &intel_g33_driver },
+               NULL, &intel_g33_driver },
        { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
-               &intel_845_driver, &intel_g33_driver },
+               NULL, &intel_g33_driver },
        { 0, 0, 0, NULL, NULL, NULL }
 };
 
@@ -1917,8 +1917,11 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
        }
 
        if (bridge->driver == NULL) {
-               printk(KERN_WARNING PFX "Failed to find bridge device "
-                       "(chip_id: %04x)\n", intel_agp_chipsets[i].gmch_chip_id);
+               /* bridge has no AGP and no IGD detected */
+               if (cap_ptr)
+                       printk(KERN_WARNING PFX "Failed to find bridge device "
+                               "(chip_id: %04x)\n",
+                               intel_agp_chipsets[i].gmch_chip_id);
                agp_put_bridge(bridge);
                return -ENODEV;
         }
index 7e03f41ae2c2c817f17a776ba02d0c7731f75236..f829e4ad8b4970aacd1cc893e0c65a7855f751bd 100644 (file)
@@ -2303,19 +2303,18 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 }
 
 /*
- * set link state for bonding master: if we have an active partnered
+ * set link state for bonding master: if we have an active 
  * aggregator, we're up, if not, we're down.  Presumes that we cannot
  * have an active aggregator if there are no slaves with link up.
  *
+ * This behavior complies with IEEE 802.3 section 43.3.9.
+ *
  * Called by bond_set_carrier(). Return zero if carrier state does not
  * change, nonzero if it does.
  */
 int bond_3ad_set_carrier(struct bonding *bond)
 {
-       struct aggregator *agg;
-
-       agg = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator));
-       if (agg && MAC_ADDRESS_COMPARE(&agg->partner_system, &null_mac_addr)) {
+       if (__get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator))) {
                if (!netif_carrier_ok(bond->dev)) {
                        netif_carrier_on(bond->dev);
                        return 1;
index 223517dcbcfd48ba48c75710d99b3eeb22ceaa46..6287ffbda7f794c4d2312bc421d4f12fb1b32456 100644 (file)
@@ -4345,8 +4345,8 @@ static void bond_free_all(void)
                bond_mc_list_destroy(bond);
                /* Release the bonded slaves */
                bond_release_all(bond_dev);
-               unregister_netdevice(bond_dev);
                bond_deinit(bond_dev);
+               unregister_netdevice(bond_dev);
        }
 
 #ifdef CONFIG_PROC_FS
index a122baa5c7bb937ebcc5c436d5710fe785ae3b61..60cccf2aa9594e5c98eefd4e93622fdf5257bec9 100644 (file)
@@ -164,9 +164,9 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
                                printk(KERN_INFO DRV_NAME
                                        ": %s is being deleted...\n",
                                        bond->dev->name);
-                               unregister_netdevice(bond->dev);
                                bond_deinit(bond->dev);
                                bond_destroy_sysfs_entry(bond);
+                               unregister_netdevice(bond->dev);
                                rtnl_unlock();
                                goto out;
                        }
index 41aa78bf1f783af9d86928db4c1216de406dd6d4..a89102116ccb22e670f383ea9b7df0d6091db9eb 100644 (file)
@@ -22,8 +22,8 @@
 #include "bond_3ad.h"
 #include "bond_alb.h"
 
-#define DRV_VERSION    "3.1.2"
-#define DRV_RELDATE    "January 20, 2007"
+#define DRV_VERSION    "3.1.3"
+#define DRV_RELDATE    "June 13, 2007"
 #define DRV_NAME       "bonding"
 #define DRV_DESCRIPTION        "Ethernet Channel Bonding Driver"
 
index 73a41e6a5bfc8fbf0fd7803a38d6b7612b963dc9..ee140e63ddc5e337a3b2e22ce8c7a128395b1fd6 100644 (file)
@@ -219,7 +219,13 @@ static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
                unsigned int status;
 
                status = t3_read_reg(phy->adapter,
-                                    XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
+                                    XGM_REG(A_XGM_SERDES_STAT0, phy->addr)) |
+                   t3_read_reg(phy->adapter,
+                               XGM_REG(A_XGM_SERDES_STAT1, phy->addr)) |
+                   t3_read_reg(phy->adapter,
+                               XGM_REG(A_XGM_SERDES_STAT2, phy->addr)) |
+                   t3_read_reg(phy->adapter,
+                               XGM_REG(A_XGM_SERDES_STAT3, phy->addr));
                *link_ok = !(status & F_LOWSIG0);
        }
        if (speed)
@@ -247,5 +253,5 @@ static struct cphy_ops xaui_direct_ops = {
 void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
                             int phy_addr, const struct mdio_ops *mdio_ops)
 {
-       cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
+       cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops);
 }
index 1b20f4060e2d7a1fbdb00b9154d88103a4127625..d8a1f5452c51513092a2504548a6c8998d038a39 100644 (file)
@@ -2071,10 +2071,20 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 static void cxgb_netpoll(struct net_device *dev)
 {
        struct adapter *adapter = dev->priv;
-       struct sge_qset *qs = dev2qset(dev);
+       struct port_info *pi = netdev_priv(dev);
+       int qidx;
 
-       t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
-                                                   adapter);
+       for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
+               struct sge_qset *qs = &adapter->sge.qs[qidx];
+               void *source;
+               
+               if (adapter->flags & USING_MSIX)
+                       source = qs;
+               else
+                       source = adapter;
+
+               t3_intr_handler(adapter, qs->rspq.polling) (0, source);
+       }
 }
 #endif
 
index e5a553410e24c6038668b688ce0498cbf0fb3f00..020859c855d7c6836657ab133484f0d7d6aa4ef5 100644 (file)
 #define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
 #define F_COPYALLFRAMES    V_COPYALLFRAMES(1U)
 
+#define S_DISBCAST    1
+#define V_DISBCAST(x) ((x) << S_DISBCAST)
+#define F_DISBCAST    V_DISBCAST(1U)
+
 #define A_XGM_RX_HASH_LOW 0x814
 
 #define A_XGM_RX_HASH_HIGH 0x818
 #define F_RESETPLL01    V_RESETPLL01(1U)
 
 #define A_XGM_SERDES_STAT0 0x8f0
+#define A_XGM_SERDES_STAT1 0x8f4
+#define A_XGM_SERDES_STAT2 0x8f8
 
 #define S_LOWSIG0    0
 #define V_LOWSIG0(x) ((x) << S_LOWSIG0)
index 3666586a4831d530d765d4e280e24ad8ec826a21..a60ec4d4707c50c2919bf2f99cd5f5051f3d483a 100644 (file)
@@ -1690,8 +1690,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
        struct port_info *pi;
 
        skb_pull(skb, sizeof(*p) + pad);
-       skb->dev->last_rx = jiffies;
        skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
+       skb->dev->last_rx = jiffies;
        pi = netdev_priv(skb->dev);
        if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
            !p->fragment) {
@@ -2217,7 +2217,6 @@ irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
        struct sge_rspq *q = &qs->rspq;
 
        spin_lock(&q->lock);
-       BUG_ON(napi_is_scheduled(qs->netdev));
 
        if (handle_responses(adap, q) < 0)
                q->unhandled_irqs++;
index a506792f9575a6d5bfd60a15bbdbd570579a226d..b261be147e7b071590e89d6fd673de996943ccb3 100644 (file)
@@ -231,6 +231,28 @@ int t3_mac_set_num_ucast(struct cmac *mac, int n)
        return 0;
 }
 
+static void disable_exact_filters(struct cmac *mac)
+{
+       unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
+
+       for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
+               u32 v = t3_read_reg(mac->adapter, reg);
+               t3_write_reg(mac->adapter, reg, v);
+       }
+       t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1);  /* flush */
+}
+
+static void enable_exact_filters(struct cmac *mac)
+{
+       unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
+
+       for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
+               u32 v = t3_read_reg(mac->adapter, reg);
+               t3_write_reg(mac->adapter, reg, v);
+       }
+       t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1);  /* flush */
+}
+
 /* Calculate the RX hash filter index of an Ethernet address */
 static int hash_hw_addr(const u8 * addr)
 {
@@ -281,6 +303,14 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
        return 0;
 }
 
+static int rx_fifo_hwm(int mtu)
+{
+       int hwm;
+
+       hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
+       return min(hwm, MAC_RXFIFO_SIZE - 8192);
+}
+
 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
 {
        int hwm, lwm;
@@ -306,11 +336,38 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
        lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
 
        v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
+       if (adap->params.rev == T3_REV_B2 &&
+           (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
+               disable_exact_filters(mac);
+               t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + mac->offset,
+                                F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
+
+               /* drain rx FIFO */
+               if (t3_wait_op_done(adap,
+                                   A_XGM_RX_MAX_PKT_SIZE_ERR_CNT +
+                                   mac->offset,
+                                   1 << 31, 1, 20, 5)) {
+                       t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
+                       enable_exact_filters(mac);
+                       return -EIO;
+               }
+               t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+               enable_exact_filters(mac);
+       } else
+               t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+
+       /*
+        * Adjust the PAUSE frame watermarks.  We always set the LWM, and the
+        * HWM only if flow-control is enabled.
+        */
+       hwm = rx_fifo_hwm(mtu);
+       lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
        v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
        v |= V_RXFIFOPAUSELWM(lwm / 8);
        if (G_RXFIFOPAUSEHWM(v))
                v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
                    V_RXFIFOPAUSEHWM(hwm / 8);
+
        t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
 
        /* Adjust the TX FIFO threshold based on the MTU */
@@ -329,7 +386,6 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
                             (hwm - lwm) * 4 / 8);
        t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
                     MAC_RXFIFO_SIZE * 4 * 8 / 512);
-
        return 0;
 }
 
@@ -357,6 +413,15 @@ int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
                                 V_PORTSPEED(M_PORTSPEED), val);
        }
 
+       val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
+       val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
+       if (fc & PAUSE_TX)
+               val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(
+                                               t3_read_reg(adap,
+                                               A_XGM_RX_MAX_PKT_SIZE
+                                               + oft)) / 8);
+       t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
+
        t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
                         (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
        return 0;
@@ -436,6 +501,10 @@ int t3b2_mac_watchdog_task(struct cmac *mac)
        unsigned int rx_xcnt;
        int status;
 
+       status = 0;
+       tx_xcnt = 1;            /* By default tx_xcnt is making progress */
+       tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
+       rx_xcnt = 1;            /* By default rx_xcnt is making progress */
        if (tx_mcnt == mac->tx_mcnt) {
                tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
                                                A_XGM_TX_SPI4_SOP_EOP_CNT +
@@ -446,37 +515,44 @@ int t3b2_mac_watchdog_task(struct cmac *mac)
                        tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
                                                      A_TP_PIO_DATA)));
                } else {
-                       mac->toggle_cnt = 0;
-                       return 0;
+                       goto rxcheck;
                }
        } else {
                mac->toggle_cnt = 0;
-               return 0;
+               goto rxcheck;
        }
 
        if (((tx_tcnt != mac->tx_tcnt) &&
             (tx_xcnt == 0) && (mac->tx_xcnt == 0)) ||
            ((mac->tx_mcnt == tx_mcnt) &&
             (tx_xcnt != 0) && (mac->tx_xcnt != 0))) {
-               if (mac->toggle_cnt > 4)
+               if (mac->toggle_cnt > 4) {
                        status = 2;
-               else 
+                       goto out;
+               } else {
                        status = 1;
+                       goto out;
+               }
        } else {
                mac->toggle_cnt = 0;
-               return 0;
+               goto rxcheck;
        }
 
+rxcheck:
        if (rx_mcnt != mac->rx_mcnt)
                rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
                                                A_XGM_RX_SPI4_SOP_EOP_CNT +
                                                mac->offset)));
-       else 
-               return 0;
+       else
+               goto out;
 
-       if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0) 
+       if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 &&
+           mac->rx_xcnt == 0) {
                status = 2;
-       
+               goto out;
+       }
+
+out:
        mac->tx_tcnt = tx_tcnt;
        mac->tx_xcnt = tx_xcnt;
        mac->tx_mcnt = s->tx_frames;
index 32788ca40d25c4cbc726d96451d181038128c003..42ba1c012ee2c3ad0f20ec14588bd5751507e8f3 100644 (file)
@@ -4825,8 +4825,10 @@ static int nv_close(struct net_device *dev)
 
        drain_ring(dev);
 
-       if (np->wolenabled)
+       if (np->wolenabled) {
+               writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
                nv_start_rx(dev);
+       }
 
        /* FIXME: power down nic */
 
index 4cf0d3fcb519d741559d9e016178f455a75884c3..460a08718c69f8c740fcb5716720ea44219b14f6 100644 (file)
@@ -690,7 +690,7 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
 {
        struct netdev_private *np = netdev_priv(to_net_dev(dev));
        int new_setting;
-       u32 flags;
+       unsigned long flags;
 
         /* Find out the new setting */
         if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
index b47ad1df2e0cd8c6cafe7d4e51a00cfa27945013..7a4aa6a9f94913245f2eb7b155a363c33c1399af 100644 (file)
@@ -460,13 +460,9 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
                hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
        } else {
                hwdescr->buf_addr = buf;
-               hwdescr->next_descr_addr = 0;
                wmb();
                hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
                                         SPIDER_NET_DMAC_NOINTR_COMPLETE;
-
-               wmb();
-               descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
        }
 
        return 0;
@@ -541,12 +537,16 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
 static int
 spider_net_alloc_rx_skbs(struct spider_net_card *card)
 {
-       int result;
-       struct spider_net_descr_chain *chain;
+       struct spider_net_descr_chain *chain = &card->rx_chain;
+       struct spider_net_descr *start = chain->tail;
+       struct spider_net_descr *descr = start;
 
-       result = -ENOMEM;
+       /* Link up the hardware chain pointers */
+       do {
+               descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
+               descr = descr->next;
+       } while (descr != start);
 
-       chain = &card->rx_chain;
        /* Put at least one buffer into the chain. if this fails,
         * we've got a problem. If not, spider_net_refill_rx_chain
         * will do the rest at the end of this function. */
@@ -563,7 +563,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
 
 error:
        spider_net_free_rx_chain_contents(card);
-       return result;
+       return -ENOMEM;
 }
 
 /**
@@ -718,7 +718,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
                        SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
        spin_unlock_irqrestore(&chain->lock, flags);
 
-       if (skb->protocol == htons(ETH_P_IP) && skb->ip_summed == CHECKSUM_PARTIAL)
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_TCP:
                        hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
@@ -1050,6 +1050,66 @@ static void show_rx_chain(struct spider_net_card *card)
 }
 #endif
 
+/**
+ * spider_net_resync_head_ptr - Advance head ptr past empty descrs
+ *
+ * If the driver fails to keep up and empty the queue, then the
+ * hardware wil run out of room to put incoming packets. This
+ * will cause the hardware to skip descrs that are full (instead
+ * of halting/retrying). Thus, once the driver runs, it wil need
+ * to "catch up" to where the hardware chain pointer is at.
+ */
+static void spider_net_resync_head_ptr(struct spider_net_card *card)
+{
+       unsigned long flags;
+       struct spider_net_descr_chain *chain = &card->rx_chain;
+       struct spider_net_descr *descr;
+       int i, status;
+
+       /* Advance head pointer past any empty descrs */
+       descr = chain->head;
+       status = spider_net_get_descr_status(descr->hwdescr);
+
+       if (status == SPIDER_NET_DESCR_NOT_IN_USE)
+               return;
+
+       spin_lock_irqsave(&chain->lock, flags);
+
+       descr = chain->head;
+       status = spider_net_get_descr_status(descr->hwdescr);
+       for (i=0; i<chain->num_desc; i++) {
+               if (status != SPIDER_NET_DESCR_CARDOWNED) break;
+               descr = descr->next;
+               status = spider_net_get_descr_status(descr->hwdescr);
+       }
+       chain->head = descr;
+
+       spin_unlock_irqrestore(&chain->lock, flags);
+}
+
+static int spider_net_resync_tail_ptr(struct spider_net_card *card)
+{
+       struct spider_net_descr_chain *chain = &card->rx_chain;
+       struct spider_net_descr *descr;
+       int i, status;
+
+       /* Advance tail pointer past any empty and reaped descrs */
+       descr = chain->tail;
+       status = spider_net_get_descr_status(descr->hwdescr);
+
+       for (i=0; i<chain->num_desc; i++) {
+               if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
+                   (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
+               descr = descr->next;
+               status = spider_net_get_descr_status(descr->hwdescr);
+       }
+       chain->tail = descr;
+
+       if ((i == chain->num_desc) || (i == 0))
+               return 1;
+       return 0;
+}
+
 /**
  * spider_net_decode_one_descr - processes an RX descriptor
  * @card: card structure
@@ -1112,7 +1172,7 @@ spider_net_decode_one_descr(struct spider_net_card *card)
                goto bad_desc;
        }
 
-       if (hwdescr->dmac_cmd_status & 0xfefe) {
+       if (hwdescr->dmac_cmd_status & 0xfcf4) {
                pr_err("%s: bad status, cmd_status=x%08x\n",
                               card->netdev->name,
                               hwdescr->dmac_cmd_status);
@@ -1131,6 +1191,7 @@ spider_net_decode_one_descr(struct spider_net_card *card)
 
        /* Ok, we've got a packet in descr */
        spider_net_pass_skb_up(descr, card);
+       descr->skb = NULL;
        hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
        return 1;
 
@@ -1174,6 +1235,12 @@ spider_net_poll(struct net_device *netdev, int *budget)
                }
        }
 
+       if ((packets_done == 0) && (card->num_rx_ints != 0)) {
+               no_more_packets = spider_net_resync_tail_ptr(card);
+               spider_net_resync_head_ptr(card);
+       }
+       card->num_rx_ints = 0;
+
        netdev->quota -= packets_done;
        *budget -= packets_done;
        spider_net_refill_rx_chain(card);
@@ -1184,6 +1251,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
        if (no_more_packets) {
                netif_rx_complete(netdev);
                spider_net_rx_irq_on(card);
+               card->ignore_rx_ramfull = 0;
                return 0;
        }
 
@@ -1417,11 +1485,15 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
        case SPIDER_NET_GRFBFLLINT: /* fallthrough */
        case SPIDER_NET_GRFAFLLINT: /* fallthrough */
        case SPIDER_NET_GRMFLLINT:
-               if (netif_msg_intr(card) && net_ratelimit())
-                       pr_err("Spider RX RAM full, incoming packets "
-                              "might be discarded!\n");
-               spider_net_rx_irq_off(card);
-               netif_rx_schedule(card->netdev);
+               /* Could happen when rx chain is full */
+               if (card->ignore_rx_ramfull == 0) {
+                       card->ignore_rx_ramfull = 1;
+                       spider_net_resync_head_ptr(card);
+                       spider_net_refill_rx_chain(card);
+                       spider_net_enable_rxdmac(card);
+                       card->num_rx_ints ++;
+                       netif_rx_schedule(card->netdev);
+               }
                show_error = 0;
                break;
 
@@ -1436,12 +1508,11 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
        case SPIDER_NET_GDCDCEINT: /* fallthrough */
        case SPIDER_NET_GDBDCEINT: /* fallthrough */
        case SPIDER_NET_GDADCEINT:
-               if (netif_msg_intr(card) && net_ratelimit())
-                       pr_err("got descriptor chain end interrupt, "
-                              "restarting DMAC %c.\n",
-                              'D'-(i-SPIDER_NET_GDDDCEINT)/3);
+               spider_net_resync_head_ptr(card);
                spider_net_refill_rx_chain(card);
                spider_net_enable_rxdmac(card);
+               card->num_rx_ints ++;
+               netif_rx_schedule(card->netdev);
                show_error = 0;
                break;
 
@@ -1450,9 +1521,12 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
        case SPIDER_NET_GDCINVDINT: /* fallthrough */
        case SPIDER_NET_GDBINVDINT: /* fallthrough */
        case SPIDER_NET_GDAINVDINT:
-               /* could happen when rx chain is full */
+               /* Could happen when rx chain is full */
+               spider_net_resync_head_ptr(card);
                spider_net_refill_rx_chain(card);
                spider_net_enable_rxdmac(card);
+               card->num_rx_ints ++;
+               netif_rx_schedule(card->netdev);
                show_error = 0;
                break;
 
@@ -1545,6 +1619,7 @@ spider_net_interrupt(int irq, void *ptr)
        if (status_reg & SPIDER_NET_RXINT ) {
                spider_net_rx_irq_off(card);
                netif_rx_schedule(netdev);
+               card->num_rx_ints ++;
        }
        if (status_reg & SPIDER_NET_TXINT)
                netif_rx_schedule(netdev);
@@ -2185,11 +2260,13 @@ spider_net_setup_netdev(struct spider_net_card *card)
 
        spider_net_setup_netdev_ops(netdev);
 
-       netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
+       netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX;
        /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
         *              NETIF_F_HW_VLAN_FILTER */
 
        netdev->irq = card->pdev->irq;
+       card->num_rx_ints = 0;
+       card->ignore_rx_ramfull = 0;
 
        dn = pci_device_to_OF_node(card->pdev);
        if (!dn)
index 4a1e0d28a502b1be6e532bb2fafd0357aafce93c..1d054aa715049779fde4e9b04fcd68d161e3e2d6 100644 (file)
@@ -25,7 +25,7 @@
 #ifndef _SPIDER_NET_H
 #define _SPIDER_NET_H
 
-#define VERSION "2.0 A"
+#define VERSION "2.0 B"
 
 #include "sungem_phy.h"
 
@@ -222,6 +222,7 @@ extern char spider_net_driver_name[];
 #define SPIDER_NET_GDTBSTA             0x00000300
 #define SPIDER_NET_GDTDCEIDIS          0x00000002
 #define SPIDER_NET_DMA_TX_VALUE        SPIDER_NET_TX_DMA_EN | \
+                                       SPIDER_NET_GDTDCEIDIS | \
                                        SPIDER_NET_GDTBSTA
 
 #define SPIDER_NET_DMA_TX_FEND_VALUE   0x00030003
@@ -332,8 +333,7 @@ enum spider_net_int2_status {
        SPIDER_NET_GRISPDNGINT
 };
 
-#define SPIDER_NET_TXINT       ( (1 << SPIDER_NET_GDTFDCINT) | \
-                             (1 << SPIDER_NET_GDTDCEINT) )
+#define SPIDER_NET_TXINT       (1 << SPIDER_NET_GDTFDCINT)
 
 /* We rely on flagged descriptor interrupts */
 #define SPIDER_NET_RXINT       ( (1 << SPIDER_NET_GDAFDCINT) )
@@ -461,6 +461,8 @@ struct spider_net_card {
        struct work_struct tx_timeout_task;
        atomic_t tx_timeout_task_counter;
        wait_queue_head_t waitq;
+       int num_rx_ints;
+       int ignore_rx_ramfull;
 
        /* for ethtool */
        int msg_enable;
index 6bcf03fc89be75f1b1524e841b137d3861ddfef2..d940474e024a19b0eed798443b5f92a7feec2adc 100644 (file)
@@ -134,22 +134,6 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
        return 0;
 }
 
-static uint32_t
-spider_net_ethtool_get_tx_csum(struct net_device *netdev)
-{
-        return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
-static int
-spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
-{
-        if (data)
-                netdev->features |= NETIF_F_HW_CSUM;
-        else
-                netdev->features &= ~NETIF_F_HW_CSUM;
-
-        return 0;
-}
 
 static void
 spider_net_ethtool_get_ringparam(struct net_device *netdev,
@@ -200,11 +184,12 @@ const struct ethtool_ops spider_net_ethtool_ops = {
        .get_wol                = spider_net_ethtool_get_wol,
        .get_msglevel           = spider_net_ethtool_get_msglevel,
        .set_msglevel           = spider_net_ethtool_set_msglevel,
+       .get_link               = ethtool_op_get_link,
        .nway_reset             = spider_net_ethtool_nway_reset,
        .get_rx_csum            = spider_net_ethtool_get_rx_csum,
        .set_rx_csum            = spider_net_ethtool_set_rx_csum,
-       .get_tx_csum            = spider_net_ethtool_get_tx_csum,
-       .set_tx_csum            = spider_net_ethtool_set_tx_csum,
+       .get_tx_csum            = ethtool_op_get_tx_csum,
+       .set_tx_csum            = ethtool_op_set_tx_csum,
        .get_ringparam          = spider_net_ethtool_get_ringparam,
        .get_strings            = spider_net_get_strings,
        .get_stats_count        = spider_net_get_stats_count,
index 98be2880757d90fe48a5e2947bada642568f3261..e5d7ed92d6f780ba746b177c14cf6eab9b814bfe 100644 (file)
@@ -195,12 +195,6 @@ static int led_proc_write(struct file *file, const char *buf,
 
        cur = lbuf;
 
-       /* skip initial spaces */
-       while (*cur && isspace(*cur))
-       {
-               cur++;
-       }
-
        switch ((long)data)
        {
        case LED_NOLCD:
index 6dd64d0c8d45df40490c08c43fde4ab974496ac1..348bb7b82771a231cb5aae013d6d64158095275a 100644 (file)
@@ -3912,6 +3912,7 @@ static int
 add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
 {
        struct chbk *p_ch;
+       struct ccw_dev_id dev_id;
 
 #ifdef FUNCTRACE
         printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__);
@@ -3921,7 +3922,8 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
        p_ch = &privptr->channel[i];
        p_ch->cdev = cdev;
        snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id);
-       sscanf(cdev->dev.bus_id+4,"%x",&p_ch->devno);
+       ccw_device_get_id(cdev, &dev_id);
+       p_ch->devno = dev_id.devno;
        if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
                printk(KERN_WARNING "%s Out of memory in %s for irb\n",
                        p_ch->id,__FUNCTION__);
@@ -3955,6 +3957,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
        struct claw_env *p_env;
        struct net_device *dev;
        int ret;
+       struct ccw_dev_id dev_id;
 
        pr_debug("%s() called\n", __FUNCTION__);
        printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id);
@@ -3965,10 +3968,10 @@ claw_new_device(struct ccwgroup_device *cgdev)
        if (!privptr)
                return -ENODEV;
        p_env = privptr->p_env;
-       sscanf(cgdev->cdev[READ]->dev.bus_id+4,"%x",
-               &p_env->devno[READ]);
-        sscanf(cgdev->cdev[WRITE]->dev.bus_id+4,"%x",
-               &p_env->devno[WRITE]);
+       ccw_device_get_id(cgdev->cdev[READ], &dev_id);
+       p_env->devno[READ] = dev_id.devno;
+       ccw_device_get_id(cgdev->cdev[WRITE], &dev_id);
+       p_env->devno[WRITE] = dev_id.devno;
        ret = add_channel(cgdev->cdev[0],0,privptr);
        if (ret == 0)
                ret = add_channel(cgdev->cdev[1],1,privptr);
index c358764f3264b56a478b88dce21f6ae94cc96e82..3d28e1a5bf799ea0369afeec25405e594dfd21e4 100644 (file)
@@ -134,18 +134,6 @@ PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
                   *(((char*)ptr)+28),*(((char*)ptr)+29), \
                   *(((char*)ptr)+30),*(((char*)ptr)+31));
 
-static inline void iucv_hex_dump(unsigned char *buf, size_t len)
-{
-       size_t i;
-
-       for (i = 0; i < len; i++) {
-               if (i && !(i % 16))
-                       printk("\n");
-               printk("%02x ", *(buf + i));
-       }
-       printk("\n");
-}
-
 #define PRINTK_HEADER " iucv: "       /* for debugging */
 
 static struct device_driver netiucv_driver = {
@@ -212,7 +200,7 @@ struct iucv_connection {
  */
 static struct list_head iucv_connection_list =
        LIST_HEAD_INIT(iucv_connection_list);
-static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(iucv_connection_rwlock);
 
 /**
  * Representation of event-data for the
@@ -280,7 +268,7 @@ static u8 iucvMagic[16] = {
  *
  * @returns The printable string (static data!!)
  */
-static inline char *netiucv_printname(char *name)
+static char *netiucv_printname(char *name)
 {
        static char tmp[9];
        char *p = tmp;
@@ -1315,7 +1303,8 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
         * and throw away packet.
         */
        if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
-               fsm_event(privptr->fsm, DEV_EVENT_START, dev);
+               if (!in_atomic())
+                       fsm_event(privptr->fsm, DEV_EVENT_START, dev);
                dev_kfree_skb(skb);
                privptr->stats.tx_dropped++;
                privptr->stats.tx_errors++;
@@ -1729,7 +1718,7 @@ static struct attribute_group netiucv_stat_attr_group = {
        .attrs = netiucv_stat_attrs,
 };
 
-static inline int netiucv_add_files(struct device *dev)
+static int netiucv_add_files(struct device *dev)
 {
        int ret;
 
@@ -1743,7 +1732,7 @@ static inline int netiucv_add_files(struct device *dev)
        return ret;
 }
 
-static inline void netiucv_remove_files(struct device *dev)
+static void netiucv_remove_files(struct device *dev)
 {
        IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
        sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
index 4640f32daae5832ed75e410a5bcc131bfc180098..70108fb1690653f58c08d8a4e09e0f79834041af 100644 (file)
@@ -424,8 +424,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
                /* prepare qdio hdr */
                if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
                        eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
-                                                    eddp->nhl + eddp->thl -
-                                                    sizeof(struct qeth_hdr);
+                                                    eddp->nhl + eddp->thl;
 #ifdef CONFIG_QETH_VLAN
                        if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
                                eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
index 0b96d49dd636bc55f6f8f8f360472ab9c18fede1..86b0c44165c198a73a3dc9fca8ef390aab71782a 100644 (file)
@@ -986,15 +986,15 @@ qeth_recover(void *ptr)
        card->use_hard_stop = 1;
        __qeth_set_offline(card->gdev,1);
        rc = __qeth_set_online(card->gdev,1);
+       /* don't run another scheduled recovery */
+       qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+       qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
        if (!rc)
                PRINT_INFO("Device %s successfully recovered!\n",
                           CARD_BUS_ID(card));
        else
                PRINT_INFO("Device %s could not be recovered!\n",
                           CARD_BUS_ID(card));
-       /* don't run another scheduled recovery */
-       qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
-       qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
        return 0;
 }
 
@@ -2176,13 +2176,6 @@ qeth_ulp_enable(struct qeth_card *card)
 
 }
 
-static inline __u16
-__raw_devno_from_bus_id(char *id)
-{
-       id += (strlen(id) - 4);
-       return (__u16) simple_strtoul(id, &id, 16);
-}
-
 static int
 qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
                  unsigned long data)
@@ -2205,6 +2198,7 @@ qeth_ulp_setup(struct qeth_card *card)
        int rc;
        __u16 temp;
        struct qeth_cmd_buffer *iob;
+       struct ccw_dev_id dev_id;
 
        QETH_DBF_TEXT(setup,2,"ulpsetup");
 
@@ -2218,8 +2212,8 @@ qeth_ulp_setup(struct qeth_card *card)
        memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
               &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
 
-       temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card));
-       memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2);
+       ccw_device_get_id(CARD_DDEV(card), &dev_id);
+       memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
        temp = (card->info.cula << 8) + card->info.unit_addr2;
        memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
        rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
@@ -5850,9 +5844,9 @@ qeth_add_vlan_mc6(struct qeth_card *card)
                in_dev = in6_dev_get(netdev);
                if (!in_dev)
                        continue;
-               read_lock(&in_dev->lock);
+               read_lock_bh(&in_dev->lock);
                qeth_add_mc6(card,in_dev);
-               read_unlock(&in_dev->lock);
+               read_unlock_bh(&in_dev->lock);
                in6_dev_put(in_dev);
        }
 #endif /* CONFIG_QETH_VLAN */
@@ -5869,10 +5863,10 @@ qeth_add_multicast_ipv6(struct qeth_card *card)
        in6_dev = in6_dev_get(card->dev);
        if (in6_dev == NULL)
                return;
-       read_lock(&in6_dev->lock);
+       read_lock_bh(&in6_dev->lock);
        qeth_add_mc6(card, in6_dev);
        qeth_add_vlan_mc6(card);
-       read_unlock(&in6_dev->lock);
+       read_unlock_bh(&in6_dev->lock);
        in6_dev_put(in6_dev);
 }
 #endif /* CONFIG_QETH_IPV6 */
@@ -7476,11 +7470,11 @@ qeth_softsetup_card(struct qeth_card *card)
                QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
                if (rc == 0xe080){
                        PRINT_WARN("LAN on card %s if offline! "
-                                  "Continuing softsetup.\n",
+                                  "Waiting for STARTLAN from card.\n",
                                   CARD_BUS_ID(card));
                        card->lan_online = 0;
-               } else
-                       return rc;
+               }
+               return rc;
        } else
                card->lan_online = 1;
        if (card->info.type==QETH_CARD_TYPE_OSN)
@@ -7797,15 +7791,17 @@ qeth_print_status_message(struct qeth_card *card)
                }
                /* fallthrough */
        case QETH_CARD_TYPE_IQD:
-               card->info.mcl_level[0] = (char) _ebcasc[(__u8)
-                       card->info.mcl_level[0]];
-               card->info.mcl_level[1] = (char) _ebcasc[(__u8)
-                       card->info.mcl_level[1]];
-               card->info.mcl_level[2] = (char) _ebcasc[(__u8)
-                       card->info.mcl_level[2]];
-               card->info.mcl_level[3] = (char) _ebcasc[(__u8)
-                       card->info.mcl_level[3]];
-               card->info.mcl_level[QETH_MCL_LENGTH] = 0;
+               if (card->info.guestlan) {
+                       card->info.mcl_level[0] = (char) _ebcasc[(__u8)
+                               card->info.mcl_level[0]];
+                       card->info.mcl_level[1] = (char) _ebcasc[(__u8)
+                               card->info.mcl_level[1]];
+                       card->info.mcl_level[2] = (char) _ebcasc[(__u8)
+                               card->info.mcl_level[2]];
+                       card->info.mcl_level[3] = (char) _ebcasc[(__u8)
+                               card->info.mcl_level[3]];
+                       card->info.mcl_level[QETH_MCL_LENGTH] = 0;
+               }
                break;
        default:
                memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
index 7e9afa720d4389b31e5a4fc1e1164ec23df5def0..21fbfc5afd027a2ca6dc5e6ea7ad4415cfe03d1f 100644 (file)
@@ -188,7 +188,6 @@ static inline void set_eiem(unsigned long val)
 # define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
 #endif
 
-#define KERNEL_START (0x10100000 - 0x1000)
 #define arch_align_stack(x) (x)
 
 #endif
index e4183c6c7de3dca209ed8f99eb852719e3f31bfb..1c1207472bb45657553ff349cf081c44840b310c 100644 (file)
@@ -603,6 +603,10 @@ static inline struct address_space *page_mapping(struct page *page)
 
        if (unlikely(PageSwapCache(page)))
                mapping = &swapper_space;
+#ifdef CONFIG_SLUB
+       else if (unlikely(PageSlab(page)))
+               mapping = NULL;
+#endif
        else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
                mapping = NULL;
        return mapping;
index 588c99da030792babb3c643b89e645c8abebdb2b..329ce0172074a8270b662e3850118d08d42c0dc0 100644 (file)
@@ -353,9 +353,40 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
                 * it should be restarted.
                 */
                if (timr->it.real.interval.tv64 != 0) {
+                       ktime_t now = hrtimer_cb_get_time(timer);
+
+                       /*
+                        * FIXME: What we really want, is to stop this
+                        * timer completely and restart it in case the
+                        * SIG_IGN is removed. This is a non trivial
+                        * change which involves sighand locking
+                        * (sigh !), which we don't want to do late in
+                        * the release cycle.
+                        *
+                        * For now we just let timers with an interval
+                        * less than a jiffie expire every jiffie to
+                        * avoid softirq starvation in case of SIG_IGN
+                        * and a very small interval, which would put
+                        * the timer right back on the softirq pending
+                        * list. By moving now ahead of time we trick
+                        * hrtimer_forward() to expire the timer
+                        * later, while we still maintain the overrun
+                        * accuracy, but have some inconsistency in
+                        * the timer_gettime() case. This is at least
+                        * better than a starved softirq. A more
+                        * complex fix which solves also another related
+                        * inconsistency is already in the pipeline.
+                        */
+#ifdef CONFIG_HIGH_RES_TIMERS
+                       {
+                               ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ);
+
+                               if (timr->it.real.interval.tv64 < kj.tv64)
+                                       now = ktime_add(now, kj);
+                       }
+#endif
                        timr->it_overrun +=
-                               hrtimer_forward(timer,
-                                               hrtimer_cb_get_time(timer),
+                               hrtimer_forward(timer, now,
                                                timr->it.real.interval);
                        ret = HRTIMER_RESTART;
                        ++timr->it_requeue_pending;
index 68b9ad2ef1d6917c28721419627fa9fd4a29e3b3..906ed402f7cabda336a73d6d0d297b862b0d2932 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1536,9 +1536,14 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
+        * Also guard against wrapping around to address 0.
         */
-       address += 4 + PAGE_SIZE - 1;
-       address &= PAGE_MASK;
+       if (address < PAGE_ALIGN(address+4))
+               address = PAGE_ALIGN(address+4);
+       else {
+               anon_vma_unlock(vma);
+               return -ENOMEM;
+       }
        error = 0;
 
        /* Somebody else might have raced and expanded it already */
index 7ea2d981a9328490df6dc684b6f1768fe0304f3d..356f067484e393634176357f193b9565cfdb2814 100644 (file)
@@ -67,6 +67,11 @@ struct ip_vs_sync_conn_options {
        struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 };
 
+struct ip_vs_sync_thread_data {
+       struct completion *startup;
+       int state;
+};
+
 #define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
 #define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn))
 #define FULL_CONN_SIZE  \
@@ -751,6 +756,7 @@ static int sync_thread(void *startup)
        mm_segment_t oldmm;
        int state;
        const char *name;
+       struct ip_vs_sync_thread_data *tinfo = startup;
 
        /* increase the module use count */
        ip_vs_use_count_inc();
@@ -789,7 +795,14 @@ static int sync_thread(void *startup)
        add_wait_queue(&sync_wait, &wait);
 
        set_sync_pid(state, current->pid);
-       complete((struct completion *)startup);
+       complete(tinfo->startup);
+
+       /*
+        * once we call the completion queue above, we should
+        * null out that reference, since its allocated on the
+        * stack of the creating kernel thread
+        */
+       tinfo->startup = NULL;
 
        /* processing master/backup loop here */
        if (state == IP_VS_STATE_MASTER)
@@ -801,6 +814,14 @@ static int sync_thread(void *startup)
        remove_wait_queue(&sync_wait, &wait);
 
        /* thread exits */
+
+       /*
+        * If we weren't explicitly stopped, then we
+        * exited in error, and should undo our state
+        */
+       if ((!stop_master_sync) && (!stop_backup_sync))
+               ip_vs_sync_state -= tinfo->state;
+
        set_sync_pid(state, 0);
        IP_VS_INFO("sync thread stopped!\n");
 
@@ -812,6 +833,11 @@ static int sync_thread(void *startup)
        set_stop_sync(state, 0);
        wake_up(&stop_sync_wait);
 
+       /*
+        * we need to free the structure that was allocated
+        * for us in start_sync_thread
+        */
+       kfree(tinfo);
        return 0;
 }
 
@@ -838,11 +864,19 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
 {
        DECLARE_COMPLETION_ONSTACK(startup);
        pid_t pid;
+       struct ip_vs_sync_thread_data *tinfo;
 
        if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
            (state == IP_VS_STATE_BACKUP && sync_backup_pid))
                return -EEXIST;
 
+       /*
+        * Note that tinfo will be freed in sync_thread on exit
+        */
+       tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL);
+       if (!tinfo)
+               return -ENOMEM;
+
        IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
        IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
                  sizeof(struct ip_vs_sync_conn));
@@ -858,8 +892,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
                ip_vs_backup_syncid = syncid;
        }
 
+       tinfo->state = state;
+       tinfo->startup = &startup;
+
   repeat:
-       if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
+       if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) {
                IP_VS_ERR("could not create fork_sync_thread due to %d... "
                          "retrying.\n", pid);
                msleep_interruptible(1000);
index 591c4422205e4bb9e76d3b7c71ca1d1c13fc8795..cc9102c5b588319e09b9769302bd5704e9bfb8e3 100644 (file)
@@ -640,6 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
                        goto efault;
                sp->remain -= copy;
                skb->mark += copy;
+               copied += copy;
 
                len -= copy;
                segment -= copy;
@@ -709,6 +710,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
 
        } while (segment > 0);
 
+success:
+       ret = copied;
 out:
        call->tx_pending = skb;
        _leave(" = %d", ret);
@@ -725,7 +728,7 @@ call_aborted:
 
 maybe_error:
        if (copied)
-               ret = copied;
+               goto success;
        goto out;
 
 efault:
index 85f3f43a6cca402a3339aa4ef8317d7b41eaaa00..dfacb9c2a6e3861837b1a25b2d919bcc19b88d5a 100644 (file)
@@ -1729,7 +1729,7 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu)
            x->type && x->type->get_mtu)
                res = x->type->get_mtu(x, mtu);
        else
-               res = mtu;
+               res = mtu - x->props.header_len;
        spin_unlock_bh(&x->lock);
        return res;
 }