X-Git-Url: http://git.samba.org/samba.git/?a=blobdiff_plain;f=drivers%2Fnet%2Fpcnet32.c;h=fe0558242835c908eeeda19059f4185889aa5c43;hb=df27f4a610e22e8c8c740286368cc13e0600f22c;hp=fc08c4af506ca8194b7f4d3854181fdc5eed43b4;hpb=1323523f505606cfd24af6122369afddefc3b09d;p=sfrench%2Fcifs-2.6.git diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index fc08c4af506c..fe0558242835 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -58,18 +58,15 @@ static const char *const version = * PCI device identifiers for "new style" Linux PCI Device Drivers */ static struct pci_device_id pcnet32_pci_tbl[] = { - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, /* * Adapters that were sold with IBM's RS/6000 or pSeries hardware have * the incorrect vendor id. */ - { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, - PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0}, + { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, { } /* terminate list */ }; @@ -188,6 +185,23 @@ static int homepna[MAX_UNITS]; #define PCNET32_TOTAL_SIZE 0x20 +#define CSR0 0 +#define CSR0_INIT 0x1 +#define CSR0_START 0x2 +#define CSR0_STOP 0x4 +#define CSR0_TXPOLL 0x8 +#define CSR0_INTEN 0x40 +#define CSR0_IDON 0x0100 +#define CSR0_NORMAL (CSR0_START | CSR0_INTEN) +#define PCNET32_INIT_LOW 1 +#define PCNET32_INIT_HIGH 2 +#define CSR3 3 +#define CSR4 4 +#define CSR5 5 +#define CSR5_SUSPEND 0x0001 +#define CSR15 15 +#define PCNET32_MC_FILTER 8 + /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { u32 base; @@ -277,7 +291,6 @@ struct pcnet32_private { u32 phymask; }; -static void pcnet32_probe_vlbus(void); static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); static int pcnet32_probe1(unsigned long, int, struct pci_dev *); static int pcnet32_open(struct net_device *); @@ -309,12 +322,6 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name); static void pcnet32_free_ring(struct net_device *dev); static void pcnet32_check_media(struct net_device *dev, int verbose); -enum pci_flags_bit { - PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4, - PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 = - 0x10 << 2, PCI_ADDR3 = 0x10 << 3, -}; - static u16 pcnet32_wio_read_csr(unsigned long addr, int index) { outw(index, addr + PCNET32_WIO_RAP); @@ -425,6 +432,219 @@ static struct pcnet32_access pcnet32_dwio = { .reset = pcnet32_dwio_reset }; +static void pcnet32_netif_stop(struct net_device *dev) +{ + dev->trans_start = jiffies; + netif_poll_disable(dev); + netif_tx_disable(dev); +} + +static void pcnet32_netif_start(struct net_device *dev) +{ + netif_wake_queue(dev); + netif_poll_enable(dev); +} + +/* + * Allocate space for the new sized tx ring. + * Free old resources + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_tx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_tx_head *new_tx_ring; + struct sk_buff **new_skb_list; + + pcnet32_purge_tx_ring(dev); + + new_tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + (1 << size), + &new_ring_dma_addr); + if (new_tx_ring == NULL) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Consistent memory allocation failed.\n", + dev->name); + return; + } + memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); + + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Memory allocation failed.\n", dev->name); + goto free_new_tx_ring; + } + + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Memory allocation failed.\n", dev->name); + goto free_new_lists; + } + + kfree(lp->tx_skbuff); + kfree(lp->tx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, lp->tx_ring, + lp->tx_ring_dma_addr); + + lp->tx_ring_size = (1 << size); + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->tx_len_bits = (size << 12); + lp->tx_ring = new_tx_ring; + lp->tx_ring_dma_addr = new_ring_dma_addr; + lp->tx_dma_addr = new_dma_addr_list; + lp->tx_skbuff = new_skb_list; + return; + + free_new_lists: + kfree(new_dma_addr_list); + free_new_tx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + (1 << size), + new_tx_ring, + new_ring_dma_addr); + return; +} + +/* + * Allocate space for the new sized rx ring. + * Re-use old receive buffers. + * alloc extra buffers + * free unneeded buffers + * free unneeded buffers + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_rx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_rx_head *new_rx_ring; + struct sk_buff **new_skb_list; + int new, overlap; + + new_rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + (1 << size), + &new_ring_dma_addr); + if (new_rx_ring == NULL) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Consistent memory allocation failed.\n", + dev->name); + return; + } + memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); + + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), + GFP_ATOMIC); + if (!new_dma_addr_list) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Memory allocation failed.\n", dev->name); + goto free_new_rx_ring; + } + + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!new_skb_list) { + if (netif_msg_drv(lp)) + printk("\n" KERN_ERR + "%s: Memory allocation failed.\n", dev->name); + goto free_new_lists; + } + + /* first copy the current receive buffers */ + overlap = min(size, lp->rx_ring_size); + for (new = 0; new < overlap; new++) { + new_rx_ring[new] = lp->rx_ring[new]; + new_dma_addr_list[new] = lp->rx_dma_addr[new]; + new_skb_list[new] = lp->rx_skbuff[new]; + } + /* now allocate any new buffers needed */ + for (; new < size; new++ ) { + struct sk_buff *rx_skbuff; + new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); + if (!(rx_skbuff = new_skb_list[new])) { + /* keep the original lists and buffers */ + if (netif_msg_drv(lp)) + printk(KERN_ERR + "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n", + dev->name); + goto free_all_new; + } + skb_reserve(rx_skbuff, 2); + + new_dma_addr_list[new] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]); + new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + new_rx_ring[new].status = le16_to_cpu(0x8000); + } + /* and free any unneeded buffers */ + for (; new < lp->rx_ring_size; new++) { + if (lp->rx_skbuff[new]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[new]); + } + } + + kfree(lp->rx_skbuff); + kfree(lp->rx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + + lp->rx_ring_size = (1 << size); + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->rx_len_bits = (size << 4); + lp->rx_ring = new_rx_ring; + lp->rx_ring_dma_addr = new_ring_dma_addr; + lp->rx_dma_addr = new_dma_addr_list; + lp->rx_skbuff = new_skb_list; + return; + + free_all_new: + for (; --new >= lp->rx_ring_size; ) { + if (new_skb_list[new]) { + pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + dev_kfree_skb(new_skb_list[new]); + } + } + kfree(new_skb_list); + free_new_lists: + kfree(new_dma_addr_list); + free_new_rx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + (1 << size), + new_rx_ring, + new_ring_dma_addr); + return; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void pcnet32_poll_controller(struct net_device *dev) { @@ -525,10 +745,10 @@ static void pcnet32_get_ringparam(struct net_device *dev, { struct pcnet32_private *lp = dev->priv; - ering->tx_max_pending = TX_MAX_RING_SIZE - 1; - ering->tx_pending = lp->tx_ring_size - 1; - ering->rx_max_pending = RX_MAX_RING_SIZE - 1; - ering->rx_pending = lp->rx_ring_size - 1; + ering->tx_max_pending = TX_MAX_RING_SIZE; + ering->tx_pending = lp->tx_ring_size; + ering->rx_max_pending = RX_MAX_RING_SIZE; + ering->rx_pending = lp->rx_ring_size; } static int pcnet32_set_ringparam(struct net_device *dev, @@ -536,56 +756,53 @@ static int pcnet32_set_ringparam(struct net_device *dev, { struct pcnet32_private *lp = dev->priv; unsigned long flags; + unsigned int size; + ulong ioaddr = dev->base_addr; int i; if (ering->rx_mini_pending || ering->rx_jumbo_pending) return -EINVAL; if (netif_running(dev)) - pcnet32_close(dev); + pcnet32_netif_stop(dev); spin_lock_irqsave(&lp->lock, flags); - pcnet32_free_ring(dev); - lp->tx_ring_size = - min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); - lp->rx_ring_size = - min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); /* set the minimum ring size to 4, to allow the loopback test to work * unchanged. */ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { - if (lp->tx_ring_size <= (1 << i)) + if (size <= (1 << i)) break; } - lp->tx_ring_size = (1 << i); - lp->tx_mod_mask = lp->tx_ring_size - 1; - lp->tx_len_bits = (i << 12); - + if ((1 << i) != lp->tx_ring_size) + pcnet32_realloc_tx_ring(dev, lp, i); + + size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { - if (lp->rx_ring_size <= (1 << i)) + if (size <= (1 << i)) break; } - lp->rx_ring_size = (1 << i); - lp->rx_mod_mask = lp->rx_ring_size - 1; - lp->rx_len_bits = (i << 4); + if ((1 << i) != lp->rx_ring_size) + pcnet32_realloc_rx_ring(dev, lp, i); + + dev->weight = lp->rx_ring_size / 2; - if (pcnet32_alloc_ring(dev, dev->name)) { - pcnet32_free_ring(dev); - spin_unlock_irqrestore(&lp->lock, flags); - return -ENOMEM; + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); } spin_unlock_irqrestore(&lp->lock, flags); - if (pcnet32_debug & NETIF_MSG_DRV) - printk(KERN_INFO PFX + if (netif_msg_drv(lp)) + printk(KERN_INFO "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, lp->rx_ring_size, lp->tx_ring_size); - if (netif_running(dev)) - pcnet32_open(dev); - return 0; } @@ -845,6 +1062,43 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data) return 0; } +/* + * lp->lock must be held. + */ +static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, + int can_sleep) +{ + int csr5; + struct pcnet32_private *lp = dev->priv; + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + + /* set SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); + + /* poll waiting for bit to be set */ + ticks = 0; + while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { + spin_unlock_irqrestore(&lp->lock, *flags); + if (can_sleep) + msleep(1); + else + mdelay(1); + spin_lock_irqsave(&lp->lock, *flags); + ticks++; + if (ticks > 200) { + if (netif_msg_hw(lp)) + printk(KERN_DEBUG + "%s: Error getting into suspend!\n", + dev->name); + return 0; + } + } + return 1; +} + #define PCNET32_REGS_PER_PHY 32 #define PCNET32_MAX_PHYS 32 static int pcnet32_get_regs_len(struct net_device *dev) @@ -863,32 +1117,13 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, struct pcnet32_private *lp = dev->priv; struct pcnet32_access *a = &lp->a; ulong ioaddr = dev->base_addr; - int ticks; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); - csr0 = a->read_csr(ioaddr, 0); - if (!(csr0 & 0x0004)) { /* If not stopped */ - /* set SUSPEND (SPND) - CSR5 bit 0 */ - a->write_csr(ioaddr, 5, 0x0001); - - /* poll waiting for bit to be set */ - ticks = 0; - while (!(a->read_csr(ioaddr, 5) & 0x0001)) { - spin_unlock_irqrestore(&lp->lock, flags); - mdelay(1); - spin_lock_irqsave(&lp->lock, flags); - ticks++; - if (ticks > 200) { - if (netif_msg_hw(lp)) - printk(KERN_DEBUG - "%s: Error getting into suspend!\n", - dev->name); - break; - } - } - } + csr0 = a->read_csr(ioaddr, CSR0); + if (!(csr0 & CSR0_STOP)) /* If not stopped */ + pcnet32_suspend(dev, &flags, 1); /* read address PROM */ for (i = 0; i < 16; i += 2) @@ -925,9 +1160,12 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, } } - if (!(csr0 & 0x0004)) { /* If not stopped */ + if (!(csr0 & CSR0_STOP)) { /* If not stopped */ + int csr5; + /* clear SUSPEND (SPND) - CSR5 bit 0 */ - a->write_csr(ioaddr, 5, 0x0000); + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); } spin_unlock_irqrestore(&lp->lock, flags); @@ -958,7 +1196,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = { /* only probes for non-PCI devices, the rest are handled by * pci_register_driver via pcnet32_probe_pci */ -static void __devinit pcnet32_probe_vlbus(void) +static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) { unsigned int *port, ioaddr; @@ -1442,7 +1680,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name) lp->tx_ring_size, &lp->tx_ring_dma_addr); if (lp->tx_ring == NULL) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", name); @@ -1454,52 +1692,48 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name) lp->rx_ring_size, &lp->rx_ring_dma_addr); if (lp->rx_ring == NULL) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", name); return -ENOMEM; } - lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, + lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); if (!lp->tx_dma_addr) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } - memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); - lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, + lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), GFP_ATOMIC); if (!lp->rx_dma_addr) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } - memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); - lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, + lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); if (!lp->tx_skbuff) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } - memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); - lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, + lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), GFP_ATOMIC); if (!lp->rx_skbuff) { - if (pcnet32_debug & NETIF_MSG_DRV) + if (netif_msg_drv(lp)) printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); return -ENOMEM; } - memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); return 0; } @@ -1547,7 +1781,7 @@ static int pcnet32_open(struct net_device *dev) unsigned long flags; if (request_irq(dev->irq, &pcnet32_interrupt, - lp->shared_irq ? SA_SHIRQ : 0, dev->name, + lp->shared_irq ? IRQF_SHARED : 0, dev->name, (void *)dev)) { return -EAGAIN; } @@ -2439,6 +2673,7 @@ static void pcnet32_load_multicast(struct net_device *dev) volatile struct pcnet32_init_block *ib = &lp->init_block; volatile u16 *mcast_table = (u16 *) & ib->filter; struct dev_mc_list *dmi = dev->mc_list; + unsigned long ioaddr = dev->base_addr; char *addrs; int i; u32 crc; @@ -2447,6 +2682,10 @@ static void pcnet32_load_multicast(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) { ib->filter[0] = 0xffffffff; ib->filter[1] = 0xffffffff; + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); return; } /* clear the multicast filter */ @@ -2468,6 +2707,9 @@ static void pcnet32_load_multicast(struct net_device *dev) le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | (1 << (crc & 0xf))); } + for (i = 0; i < 4; i++) + lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, + le16_to_cpu(mcast_table[i])); return; } @@ -2478,8 +2720,11 @@ static void pcnet32_set_multicast_list(struct net_device *dev) { unsigned long ioaddr = dev->base_addr, flags; struct pcnet32_private *lp = dev->priv; + int csr15, suspended; spin_lock_irqsave(&lp->lock, flags); + suspended = pcnet32_suspend(dev, &flags, 0); + csr15 = lp->a.read_csr(ioaddr, CSR15); if (dev->flags & IFF_PROMISC) { /* Log any net taps. */ if (netif_msg_hw(lp)) @@ -2488,15 +2733,24 @@ static void pcnet32_set_multicast_list(struct net_device *dev) lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); } else { lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); pcnet32_load_multicast(dev); } - lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ - pcnet32_restart(dev, 0x0042); /* Resume normal operation */ - netif_wake_queue(dev); + if (suspended) { + int csr5; + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = lp->a.read_csr(ioaddr, CSR5); + lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } else { + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); + pcnet32_restart(dev, CSR0_NORMAL); + netif_wake_queue(dev); + } spin_unlock_irqrestore(&lp->lock, flags); } @@ -2736,7 +2990,7 @@ static int __init pcnet32_init_module(void) /* should we find any remaining VLbus devices ? */ if (pcnet32vlb) - pcnet32_probe_vlbus(); + pcnet32_probe_vlbus(pcnet32_portlist); if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) printk(KERN_INFO PFX "%d cards_found.\n", cards_found);