seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
seq += keyptr->count;
- seq += ktime_get_real().tv64;
+ seq += ktime_to_ns(ktime_get_real());
return seq;
}
* overlaps less than one time per MSL (2 minutes).
* Choosing a clock of 64 ns period is OK. (period of 274 s)
*/
- seq += ktime_get_real().tv64 >> 6;
+ seq += ktime_to_ns(ktime_get_real()) >> 6;
#if 0
printk("init_seq(%lx, %lx, %d, %d) = %d\n",
saddr, daddr, sport, dport, seq);
seq = half_md4_transform(hash, keyptr->secret);
seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
- seq += ktime_get_real().tv64;
+ seq += ktime_to_ns(ktime_get_real());
seq &= (1ull << 48) - 1;
#if 0
printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
int get_card_from_id(int driver);
int indicate_status(int card, int event, ulong Channel, char *Data);
irqreturn_t interrupt_handler(int interrupt, void *cardptr);
-int sndpkt(int devId, int channel, struct sk_buff *data);
+int sndpkt(int devId, int channel, int ack, struct sk_buff *data);
void rcvpkt(int card, RspMessage *rcvmsg);
int command(isdn_ctrl *cmd);
int reset(int card);
#include "message.h"
#include "card.h"
-int sndpkt(int devId, int channel, struct sk_buff *data)
+int sndpkt(int devId, int channel, int ack, struct sk_buff *data)
{
LLData ReqLnkWrite;
int status;
outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80,
sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]);
- memcpy_toio(sc_adapter[card]->rambase + dest_rem, src, n);
+ memcpy_toio((void __iomem *)(sc_adapter[card]->rambase + dest_rem), src, n);
spin_unlock_irqrestore(&sc_adapter[card]->lock, flags);
pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename,
((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80);
if (status & REG_INTSTS_RX) {
spin_lock(&ep->rx_lock);
- if (likely(__netif_rx_schedule_prep(dev, &ep->napi))) {
+ if (likely(netif_rx_schedule_prep(dev, &ep->napi))) {
wrl(ep, REG_INTEN, REG_INTEN_TX);
__netif_rx_schedule(dev, &ep->napi);
}
*/
void bond_destroy(struct bonding *bond)
{
- unregister_netdevice(bond->dev);
bond_deinit(bond->dev);
bond_destroy_sysfs_entry(bond);
+ unregister_netdevice(bond->dev);
}
/*
bond_mc_list_destroy(bond);
/* Release the bonded slaves */
bond_release_all(bond_dev);
- unregister_netdevice(bond_dev);
bond_deinit(bond_dev);
+ unregister_netdevice(bond_dev);
}
#ifdef CONFIG_PROC_FS
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
if (adapter->hw.media_type == e1000_media_type_copper) {
switch (data->reg_num) {
case PHY_CTRL:
DUPLEX_HALF;
retval = e1000_set_spd_dplx(adapter,
spddplx);
- if (retval) {
- spin_unlock_irqrestore(
- &adapter->stats_lock,
- flags);
+ if (retval)
return retval;
- }
}
if (netif_running(adapter->netdev))
e1000_reinit_locked(adapter);
break;
case M88E1000_PHY_SPEC_CTRL:
case M88E1000_EXT_PHY_SPEC_CTRL:
- if (e1000_phy_reset(&adapter->hw)) {
- spin_unlock_irqrestore(
- &adapter->stats_lock, flags);
+ if (e1000_phy_reset(&adapter->hw))
return -EIO;
- }
break;
}
} else {
break;
}
}
- spin_unlock_irqrestore(&adapter->stats_lock, flags);
break;
default:
return -EOPNOTSUPP;
tristate "Freescale Ethernet Driver"
depends on CPM1 || CPM2
select MII
+ select PHYLIB
config FS_ENET_HAS_SCC
bool "Chip has an SCC usable for ethernet"
config FS_ENET_HAS_FCC
bool "Chip has an FCC usable for ethernet"
depends on FS_ENET && CPM2
- select MDIO_BITBANG
default y
config FS_ENET_HAS_FEC
bool "Chip has an FEC usable for ethernet"
depends on FS_ENET && CPM1
+ select FS_ENET_MDIO_FEC
default y
+config FS_ENET_MDIO_FEC
+ tristate "MDIO driver for FEC"
+ depends on FS_ENET && CPM1
+
+config FS_ENET_MDIO_FCC
+ tristate "MDIO driver for FCC"
+ depends on FS_ENET && CPM2
+ select MDIO_BITBANG
obj-$(CONFIG_FS_ENET) += fs_enet.o
-obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o
-obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o
+fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o
-fs_enet-objs := fs_enet-main.o
+ifeq ($(CONFIG_PPC_CPM_NEW_BINDING),y)
+obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
+obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
+else
+fs_enet-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
+fs_enet-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
+endif
+
+fs_enet-objs := fs_enet-main.o $(fs_enet-m)
FIFO_PTR_FRAMELEN(len));
ndev->trans_start = jiffies;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
netif_stop_queue(ndev);
spin_unlock_irq(&priv->lock);
if (unlikely(skb == NULL)) {
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
ndev->name);
- dev->stats.rx_dropped++;
+ ndev->stats.rx_dropped++;
return;
}
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.26.5"
+#define DRV_VERSION "2.0.26.6"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
return err;
}
+
+static void remove_msix_isr(struct s2io_nic *sp)
+{
+ int i;
+ u16 msi_control;
+
+ for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
+ if (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+ free_irq(vector, arg);
+ }
+ }
+
+ kfree(sp->entries);
+ kfree(sp->s2io_entries);
+ sp->entries = NULL;
+ sp->s2io_entries = NULL;
+
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+
+ pci_disable_msix(sp->pdev);
+}
+
+static void remove_inta_isr(struct s2io_nic *sp)
+{
+ struct net_device *dev = sp->dev;
+
+ free_irq(sp->pdev->irq, dev);
+}
+
/* ********************************************************* *
* Functions defined below concern the OS part of the driver *
* ********************************************************* */
int ret = s2io_enable_msi_x(sp);
if (!ret) {
- u16 msi_control;
-
ret = s2io_test_msi(sp);
-
/* rollback MSI-X, will re-enable during add_isr() */
- kfree(sp->entries);
- sp->mac_control.stats_info->sw_stat.mem_freed +=
- (MAX_REQUESTED_MSI_X *
- sizeof(struct msix_entry));
- kfree(sp->s2io_entries);
- sp->mac_control.stats_info->sw_stat.mem_freed +=
- (MAX_REQUESTED_MSI_X *
- sizeof(struct s2io_msix_entry));
- sp->entries = NULL;
- sp->s2io_entries = NULL;
-
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
-
- pci_disable_msix(sp->pdev);
-
+ remove_msix_isr(sp);
}
if (ret) {
}
}
if (err) {
+ remove_msix_isr(sp);
DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
"failed\n", dev->name, i);
- DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
- return -1;
+ DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
+ dev->name);
+ sp->config.intr_type = INTA;
+ break;
}
sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
}
- printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
- printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
+ if (!err) {
+ printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
+ msix_tx_cnt);
+ printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
+ msix_rx_cnt);
+ }
}
if (sp->config.intr_type == INTA) {
err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
}
static void s2io_rem_isr(struct s2io_nic * sp)
{
- struct net_device *dev = sp->dev;
- struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
-
- if (sp->config.intr_type == MSI_X) {
- int i;
- u16 msi_control;
-
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
-
- synchronize_irq(vector);
- free_irq(vector, arg);
- }
-
- kfree(sp->entries);
- stats->mem_freed +=
- (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
- kfree(sp->s2io_entries);
- stats->mem_freed +=
- (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
- sp->entries = NULL;
- sp->s2io_entries = NULL;
-
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
-
- pci_disable_msix(sp->pdev);
- } else {
- synchronize_irq(sp->pdev->irq);
- free_irq(sp->pdev->irq, dev);
- }
+ if (sp->config.intr_type == MSI_X)
+ remove_msix_isr(sp);
+ else
+ remove_inta_isr(sp);
}
static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
/* Hardware disappeared */
IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
- goto none;
+ goto unplugged;
}
IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
/* iwl_irq_tasklet() will service interrupts and re-enable them */
tasklet_schedule(&priv->irq_tasklet);
+unplugged:
spin_unlock(&priv->lock);
return IRQ_HANDLED;
gfp_t gfp)
{
struct sk_buff *skb;
- int hdr_len;
- hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
- skb = alloc_skb_fclone(size + hdr_len, gfp);
+ skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
if (skb) {
skb->truesize += mem;
if (sk_stream_wmem_schedule(sk, skb->truesize)) {
- skb_reserve(skb, hdr_len);
+ /*
+ * Make sure that we have exactly size bytes
+ * available to the caller, no more, no less.
+ */
+ skb_reserve(skb, skb_tailroom(skb) - size);
return skb;
}
__kfree_skb(skb);
{ NET_ROSE, "rose", trans_net_rose_table },
{ NET_IPV6, "ipv6", trans_net_ipv6_table },
{ NET_X25, "x25", trans_net_x25_table },
- { NET_TR, "tr", trans_net_tr_table },
+ { NET_TR, "token-ring", trans_net_tr_table },
{ NET_DECNET, "decnet", trans_net_decnet_table },
/* NET_ECONET not used */
{ NET_SCTP, "sctp", trans_net_sctp_table },
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
}
}
+
+ raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
i = (i + 1) & rt_hash_mask;
rthp = &rt_hash_table[i].chain;
+ if (need_resched())
+ cond_resched();
+
if (*rthp == NULL)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
return 0;
+ if (!tp->packets_out)
+ goto out;
+
/* SACK fastpath:
* if the only SACK change is the increase of the end_seq of
* the first block then only apply that SACK block
(!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
tcp_update_reordering(sk, tp->fackets_out - reord, 0);
+out:
+
#if FASTRETRANS_DEBUG > 0
BUG_TRAP((int)tp->sacked_out >= 0);
BUG_TRAP((int)tp->lost_out >= 0);
}
tcp_verify_left_out(tp);
+ /* Too bad if TCP was application limited */
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
+
/* Earlier loss recovery underway (see RFC4138; Appendix B).
* The last condition is necessary at least in tp->frto_counter case.
*/
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
+
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
/*
* Count the retransmission made on RTO correctly (only when
* waiting for the first ACK and did not get it)...
} else {
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
tp->undo_marker = 0;
- TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
}
/* Don't lost mark skbs that were fwd transmitted after RTO */
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
+ if (tp->frto_counter)
+ frto_cwnd = tcp_process_frto(sk, flag);
/* Guarantee sacktag reordering detection against wrap-arounds */
if (before(tp->frto_highmark, tp->snd_una))
tp->frto_highmark = 0;
- if (tp->frto_counter)
- frto_cwnd = tcp_process_frto(sk, flag);
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
{
struct Qdisc *q = dev->qdisc;
struct sk_buff *skb;
- int ret;
+ int ret = NETDEV_TX_BUSY;
/* Dequeue packet */
if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
spin_unlock(&dev->queue_lock);
HARD_TX_LOCK(dev, smp_processor_id());
- ret = dev_hard_start_xmit(skb, dev);
+ if (!netif_subqueue_stopped(dev, skb))
+ ret = dev_hard_start_xmit(skb, dev);
HARD_TX_UNLOCK(dev);
spin_lock(&dev->queue_lock);