u32 vlanctl_bits;
u32 driver_data;
u32 register_size;
+ int rx_csum;
void __iomem *base;
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif
- tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+ tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
+ NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
/* vlan tag */
if (np->vlangrp && vlan_tx_tag_present(skb)) {
}
}
}
- if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
+ if (np->rx_csum) {
flags &= NV_RX2_CHECKSUMMASK;
if (flags == NV_RX2_CHECKSUMOK1 ||
flags == NV_RX2_CHECKSUMOK2 ||
memset(mask, 0, sizeof(mask));
if (dev->flags & IFF_PROMISC) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
pff |= NVREG_PFF_PROMISC;
} else {
pff |= NVREG_PFF_MYADDR;
dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
}
-static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
+static irqreturn_t nv_nic_irq(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
return IRQ_RETVAL(i);
}
-static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
+static irqreturn_t nv_nic_irq_tx(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
+ unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
if (!(events & np->irqmask))
break;
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
nv_tx_done(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base);
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
#endif
#ifdef CONFIG_FORCEDETH_NAPI
-static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
+static irqreturn_t nv_nic_irq_rx(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
u8 __iomem *base = get_hwbase(dev);
return IRQ_HANDLED;
}
#else
-static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
+static irqreturn_t nv_nic_irq_rx(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
+ unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
nv_rx_process(dev, dev->weight);
if (nv_alloc_rx(dev)) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base);
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
}
}
#endif
-static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
+static irqreturn_t nv_nic_irq_other(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
+ unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
break;
if (events & NVREG_IRQ_LINK) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
nv_link_irq(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
}
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
nv_linkchange(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & (NVREG_IRQ_UNKNOWN)) {
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
return IRQ_RETVAL(i);
}
-static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
+static irqreturn_t nv_nic_irq_test(int foo, void *data)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
pci_push(base);
if (!using_multi_irqs(dev)) {
- nv_nic_irq(0, dev, NULL);
+ nv_nic_irq(0, dev);
if (np->msi_flags & NV_MSI_X_ENABLED)
enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
enable_irq_lockdep(dev->irq);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
- nv_nic_irq_rx(0, dev, NULL);
+ nv_nic_irq_rx(0, dev);
enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
- nv_nic_irq_tx(0, dev, NULL);
+ nv_nic_irq_tx(0, dev);
enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
- nv_nic_irq_other(0, dev, NULL);
+ nv_nic_irq_other(0, dev);
enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
static u32 nv_get_rx_csum(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
+ return (np->rx_csum) != 0;
}
static int nv_set_rx_csum(struct net_device *dev, u32 data)
int retcode = 0;
if (np->driver_data & DEV_HAS_CHECKSUM) {
-
- if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
- (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
- /* already set or unset */
- return 0;
- }
-
if (data) {
+ np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
- } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
- np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
} else {
- printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
- return -EINVAL;
+ np->rx_csum = 0;
+ /* vlan is dependent on rx checksum offload */
+ if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
}
-
if (netif_running(dev)) {
spin_lock_irq(&np->lock);
writel(np->txrxctl_bits, base + NvRegTxRxControl);
/* setup packet for tx */
pkt_len = ETH_DATA_LEN;
tx_skb = dev_alloc_skb(pkt_len);
+ if (!tx_skb) {
+ printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
+ " of %s\n", dev->name);
+ ret = 0;
+ goto out;
+ }
pkt_data = skb_put(tx_skb, pkt_len);
for (i = 0; i < pkt_len; i++)
pkt_data[i] = (u8)(i & 0xff);
tx_skb->end-tx_skb->data,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(tx_skb);
-
+ out:
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
}
}
-static struct ethtool_ops ops = {
+static const struct ethtool_ops ops = {
.get_drvinfo = nv_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_wol = nv_get_wol,
np->pkt_limit = NV_PKTLIMIT_2;
if (id->driver_data & DEV_HAS_CHECKSUM) {
+ np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
#ifdef NETIF_F_TSO