dev->nls = nls;
- dev->cn_queue = create_workqueue(dev->name);
+ dev->cn_queue = create_singlethread_workqueue(dev->name);
if (!dev->cn_queue) {
kfree(dev);
return NULL;
#include "bnx2x.h"
#include "bnx2x_init.h"
-#define DRV_MODULE_VERSION "1.40.22"
-#define DRV_MODULE_RELDATE "2007/11/27"
+#define DRV_MODULE_VERSION "1.42.3"
+#define DRV_MODULE_RELDATE "2008/3/9"
#define BNX2X_BC_VER 0x040200
/* Time in jiffies before concluding the transmitter is hung. */
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
- switch (cmd->port) {
- case PORT_TP:
- if (!(bp->supported & SUPPORTED_TP)) {
- DP(NETIF_MSG_LINK, "TP not supported\n");
- return -EINVAL;
- }
-
- if (bp->phy_flags & PHY_XGXS_FLAG) {
- bnx2x_link_reset(bp);
- bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
- bnx2x_phy_deassert(bp);
- }
- break;
-
- case PORT_FIBRE:
- if (!(bp->supported & SUPPORTED_FIBRE)) {
- DP(NETIF_MSG_LINK, "FIBRE not supported\n");
- return -EINVAL;
- }
-
- if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
- bnx2x_link_reset(bp);
- bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
- bnx2x_phy_deassert(bp);
- }
- break;
-
- default:
- DP(NETIF_MSG_LINK, "Unknown port type\n");
- return -EINVAL;
- }
-
if (cmd->autoneg == AUTONEG_ENABLE) {
if (!(bp->supported & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
* rx ring - must call napi_disable(), which
* schedule_timeout()'s if polling is already disabled.
*/
- work_done += gem_rx(gp, budget);
+ work_done += gem_rx(gp, budget - work_done);
if (work_done >= budget)
return work_done;
#else /* #ifdef defined(CONFIG_IPV6) */
-static inline void sctp_v6_pf_init(void) { return 0; }
+static inline void sctp_v6_pf_init(void) { return; }
static inline void sctp_v6_pf_exit(void) { return; }
static inline int sctp_v6_protosw_init(void) { return 0; }
static inline void sctp_v6_protosw_exit(void) { return; }
static void p9_mux_flush_cb(struct p9_req *freq, void *a)
{
- p9_conn_req_callback cb;
int tag;
struct p9_conn *m;
struct p9_req *req, *rreq, *rptr;
freq->tcall->params.tflush.oldtag);
spin_lock(&m->lock);
- cb = NULL;
tag = freq->tcall->params.tflush.oldtag;
req = NULL;
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
};
#endif
+static void atm_clip_exit_noproc(void);
+
static int __init atm_clip_init(void)
{
neigh_table_init_no_netlink(&clip_tbl);
struct proc_dir_entry *p;
p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
+ if (!p) {
+ printk(KERN_ERR "Unable to initialize "
+ "/proc/net/atm/arp\n");
+ atm_clip_exit_noproc();
+ return -ENOMEM;
+ }
}
#endif
return 0;
}
-static void __exit atm_clip_exit(void)
+static void atm_clip_exit_noproc(void)
{
struct net_device *dev, *next;
- remove_proc_entry("arp", atm_proc_root);
-
unregister_inetaddr_notifier(&clip_inet_notifier);
unregister_netdevice_notifier(&clip_dev_notifier);
clip_tbl_hook = NULL;
}
+static void __exit atm_clip_exit(void)
+{
+ remove_proc_entry("arp", atm_proc_root);
+
+ atm_clip_exit_noproc();
+}
+
module_init(atm_clip_init);
module_exit(atm_clip_exit);
MODULE_AUTHOR("Werner Almesberger");
struct proc_dir_entry *p;
p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
+ if (!p) {
+ printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n");
+ return -ENOMEM;
+ }
#endif
register_atm_ioctl(&lane_ioctl_ops);
return rcu_dereference(ret);
}
+/* Same as rcu_assign_pointer
+ * but that macro() assumes that value is a pointer.
+ */
static inline void node_set_parent(struct node *node, struct tnode *ptr)
{
- rcu_assign_pointer(node->parent,
- (unsigned long)ptr | NODE_TYPE(node));
+ smp_wmb();
+ node->parent = (unsigned long)ptr | NODE_TYPE(node);
}
static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
- net = skb->dev->nd_net;
+ net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net;
/* Start by cleaning up the memory. */
if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
ip_evictor(net);
if (!(psize -= copy))
goto out;
- if (skb->len < mss_now || (flags & MSG_OOB))
+ if (skb->len < size_goal || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
- if (skb->len < mss_now || (flags & MSG_OOB))
+ if (skb->len < size_goal || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
u8 *opt;
int rd_len;
int err;
- int hlen;
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
dev = skb->dev;
return;
}
- hlen = 0;
skb_reserve(buff, LL_RESERVED_SPACE(dev));
ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
*/
static psched_time_t htb_do_events(struct htb_sched *q, int level)
{
- int i;
-
- for (i = 0; i < 500; i++) {
+ /* don't run for longer than 2 jiffies; 2 is used instead of
+ 1 to simplify things when jiffy is going to be incremented
+ too soon */
+ unsigned long stop_at = jiffies + 2;
+ while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
long diff;
struct rb_node *p = rb_first(&q->wait_pq[level]);
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
- if (net_ratelimit())
- printk(KERN_WARNING "htb: too many events !\n");
- return q->now + PSCHED_TICKS_PER_SEC / 10;
+ /* too much load - let's continue on next jiffie */
+ return q->now + PSCHED_TICKS_PER_SEC / HZ;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
if (!dlci_ioctl_hook)
request_module("dlci");
- if (dlci_ioctl_hook) {
- mutex_lock(&dlci_ioctl_mutex);
+ mutex_lock(&dlci_ioctl_mutex);
+ if (dlci_ioctl_hook)
err = dlci_ioctl_hook(cmd, argp);
- mutex_unlock(&dlci_ioctl_mutex);
- }
+ mutex_unlock(&dlci_ioctl_mutex);
break;
default:
err = sock->ops->ioctl(sock, cmd, arg);