Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
authorDavid Howells <dhowells@redhat.com>
Tue, 5 Dec 2006 14:37:56 +0000 (14:37 +0000)
committerDavid Howells <dhowells@warthog.cambridge.redhat.com>
Tue, 5 Dec 2006 14:37:56 +0000 (14:37 +0000)
Conflicts:

drivers/infiniband/core/iwcm.c
drivers/net/chelsio/cxgb2.c
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/usb/core/hub.h
drivers/usb/input/hid-core.c
net/core/netpoll.c

Fix up merge failures with Linus's head and fix new compilation failures.

Signed-Off-By: David Howells <dhowells@redhat.com>
87 files changed:
1  2 
block/as-iosched.c
block/cfq-iosched.c
block/ll_rw_blk.c
drivers/ata/libata-scsi.c
drivers/char/random.c
drivers/char/tpm/tpm.c
drivers/char/tty_io.c
drivers/char/vt.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/mad.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/media/dvb/cinergyT2/cinergyT2.c
drivers/media/video/saa6588.c
drivers/mmc/mmc.c
drivers/mmc/mmc_sysfs.c
drivers/net/bnx2.c
drivers/net/cassini.c
drivers/net/chelsio/common.h
drivers/net/chelsio/cxgb2.c
drivers/net/e100.c
drivers/net/e1000/e1000_main.c
drivers/net/ixgb/ixgb_main.c
drivers/net/myri10ge/myri10ge.c
drivers/net/phy/phy.c
drivers/net/r8169.c
drivers/net/skge.c
drivers/net/spider_net.c
drivers/net/sungem.c
drivers/net/wireless/bcm43xx/bcm43xx.h
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/ipw2100.c
drivers/net/wireless/ipw2200.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/isl_ioctl.h
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/net/wireless/prism54/islpci_eth.h
drivers/net/wireless/prism54/islpci_mgt.c
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/wireless/zd1211rw/zd_mac.h
drivers/pcmcia/ds.c
drivers/rtc/rtc-dev.c
drivers/usb/atm/speedtch.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/core/usb.c
drivers/usb/gadget/ether.c
drivers/usb/host/u132-hcd.c
drivers/usb/input/hid-core.c
drivers/usb/misc/ftdi-elan.c
drivers/usb/misc/phidgetkit.c
drivers/usb/misc/phidgetmotorcontrol.c
drivers/usb/net/pegasus.c
drivers/usb/net/usbnet.c
drivers/usb/serial/aircable.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/usb-serial.c
fs/aio.c
fs/bio.c
fs/reiserfs/journal.c
include/linux/mmc/host.h
include/linux/reiserfs_fs_sb.h
include/linux/tty.h
include/linux/usb.h
include/net/sctp/structs.h
include/scsi/libsas.h
kernel/kmod.c
net/atm/lec.c
net/atm/lec.h
net/core/netpoll.c
net/dccp/minisocks.c
net/ieee80211/softmac/ieee80211softmac_auth.c
net/ieee80211/softmac/ieee80211softmac_scan.c
net/ipv4/tcp_minisocks.c
net/sctp/associola.c
net/sctp/endpointola.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c

diff --combined block/as-iosched.c
index f371c9359999aa7627be436940ceee1d14c7164e,00242111a457e3b461d59e4542bdd86d8501db44..5934c4bfd52a3d9b71331785ac6fafc26ddef992
@@@ -1274,10 -1274,9 +1274,10 @@@ static void as_merged_requests(request_
   *
   * FIXME! dispatch queue is not a queue at all!
   */
 -static void as_work_handler(void *data)
 +static void as_work_handler(struct work_struct *work)
  {
 -      struct request_queue *q = data;
 +      struct as_data *ad = container_of(work, struct as_data, antic_work);
 +      struct request_queue *q = ad->q;
        unsigned long flags;
  
        spin_lock_irqsave(q->queue_lock, flags);
@@@ -1318,7 -1317,7 +1318,7 @@@ static void as_exit_queue(elevator_t *e
  /*
   * initialize elevator private data (as_data).
   */
- static void *as_init_queue(request_queue_t *q, elevator_t *e)
+ static void *as_init_queue(request_queue_t *q)
  {
        struct as_data *ad;
  
        ad->antic_timer.function = as_antic_timeout;
        ad->antic_timer.data = (unsigned long)q;
        init_timer(&ad->antic_timer);
 -      INIT_WORK(&ad->antic_work, as_work_handler, q);
 +      INIT_WORK(&ad->antic_work, as_work_handler);
  
        INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
        INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --combined block/cfq-iosched.c
index 6cec3a1dccb8c4af57f3740357b90d7d55be7b49,e9019ed39b7352a5416aa38a9a15623be744293d..84e9be073180253baab4cfddb69fbc2a4a14e764
@@@ -1464,8 -1464,7 +1464,7 @@@ cfq_update_io_thinktime(struct cfq_dat
  }
  
  static void
- cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
-                      struct request *rq)
+ cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
  {
        sector_t sdist;
        u64 total;
@@@ -1617,7 -1616,7 +1616,7 @@@ cfq_rq_enqueued(struct cfq_data *cfqd, 
        }
  
        cfq_update_io_thinktime(cfqd, cic);
-       cfq_update_io_seektime(cfqd, cic, rq);
+       cfq_update_io_seektime(cic, rq);
        cfq_update_idle_window(cfqd, cfqq, cic);
  
        cic->last_queue = jiffies;
@@@ -1770,7 -1769,7 +1769,7 @@@ static int cfq_may_queue(request_queue_
  /*
   * queue lock held here
   */
- static void cfq_put_request(request_queue_t *q, struct request *rq)
+ static void cfq_put_request(struct request *rq)
  {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
  
@@@ -1841,11 -1840,9 +1840,11 @@@ queue_fail
        return 1;
  }
  
 -static void cfq_kick_queue(void *data)
 +static void cfq_kick_queue(struct work_struct *work)
  {
 -      request_queue_t *q = data;
 +      struct cfq_data *cfqd =
 +              container_of(work, struct cfq_data, unplug_work);
 +      request_queue_t *q = cfqd->queue;
        unsigned long flags;
  
        spin_lock_irqsave(q->queue_lock, flags);
@@@ -1953,7 -1950,7 +1952,7 @@@ static void cfq_exit_queue(elevator_t *
        kfree(cfqd);
  }
  
- static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
+ static void *cfq_init_queue(request_queue_t *q)
  {
        struct cfq_data *cfqd;
        int i;
        cfqd->idle_class_timer.function = cfq_idle_class_timer;
        cfqd->idle_class_timer.data = (unsigned long) cfqd;
  
 -      INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
 +      INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  
        cfqd->cfq_quantum = cfq_quantum;
        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
diff --combined block/ll_rw_blk.c
index eb4cf6df7374213cfac39f7409b7b6629d838454,0f82e12f7b678553b51189374bfb562c676de919..cc6e95f8e5d9b05820251f8ef948f492d4ff6d33
@@@ -34,7 -34,7 +34,7 @@@
   */
  #include <scsi/scsi_cmnd.h>
  
 -static void blk_unplug_work(void *data);
 +static void blk_unplug_work(struct work_struct *work);
  static void blk_unplug_timeout(unsigned long data);
  static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
  static void init_request_from_bio(struct request *req, struct bio *bio);
@@@ -227,7 -227,7 +227,7 @@@ void blk_queue_make_request(request_que
        if (q->unplug_delay == 0)
                q->unplug_delay = 1;
  
 -      INIT_WORK(&q->unplug_work, blk_unplug_work, q);
 +      INIT_WORK(&q->unplug_work, blk_unplug_work);
  
        q->unplug_timer.function = blk_unplug_timeout;
        q->unplug_timer.data = (unsigned long)q;
@@@ -1631,9 -1631,9 +1631,9 @@@ static void blk_backing_dev_unplug(stru
        }
  }
  
 -static void blk_unplug_work(void *data)
 +static void blk_unplug_work(struct work_struct *work)
  {
 -      request_queue_t *q = data;
 +      request_queue_t *q = container_of(work, request_queue_t, unplug_work);
  
        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
                                q->rq.count[READ] + q->rq.count[WRITE]);
@@@ -2322,6 -2322,84 +2322,84 @@@ void blk_insert_request(request_queue_
  
  EXPORT_SYMBOL(blk_insert_request);
  
+ static int __blk_rq_unmap_user(struct bio *bio)
+ {
+       int ret = 0;
+       if (bio) {
+               if (bio_flagged(bio, BIO_USER_MAPPED))
+                       bio_unmap_user(bio);
+               else
+                       ret = bio_uncopy_user(bio);
+       }
+       return ret;
+ }
+ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+                            void __user *ubuf, unsigned int len)
+ {
+       unsigned long uaddr;
+       struct bio *bio, *orig_bio;
+       int reading, ret;
+       reading = rq_data_dir(rq) == READ;
+       /*
+        * if alignment requirement is satisfied, map in user pages for
+        * direct dma. else, set up kernel bounce buffers
+        */
+       uaddr = (unsigned long) ubuf;
+       if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+               bio = bio_map_user(q, NULL, uaddr, len, reading);
+       else
+               bio = bio_copy_user(q, uaddr, len, reading);
+       if (IS_ERR(bio)) {
+               return PTR_ERR(bio);
+       }
+       orig_bio = bio;
+       blk_queue_bounce(q, &bio);
+       /*
+        * We link the bounce buffer in and could have to traverse it
+        * later so we have to get a ref to prevent it from being freed
+        */
+       bio_get(bio);
+       /*
+        * for most (all? don't know of any) queues we could
+        * skip grabbing the queue lock here. only drivers with
+        * funky private ->back_merge_fn() function could be
+        * problematic.
+        */
+       spin_lock_irq(q->queue_lock);
+       if (!rq->bio)
+               blk_rq_bio_prep(q, rq, bio);
+       else if (!q->back_merge_fn(q, rq, bio)) {
+               ret = -EINVAL;
+               spin_unlock_irq(q->queue_lock);
+               goto unmap_bio;
+       } else {
+               rq->biotail->bi_next = bio;
+               rq->biotail = bio;
+               rq->nr_sectors += bio_sectors(bio);
+               rq->hard_nr_sectors = rq->nr_sectors;
+               rq->data_len += bio->bi_size;
+       }
+       spin_unlock_irq(q->queue_lock);
+       return bio->bi_size;
+ unmap_bio:
+       /* if it was boucned we must call the end io function */
+       bio_endio(bio, bio->bi_size, 0);
+       __blk_rq_unmap_user(orig_bio);
+       bio_put(bio);
+       return ret;
+ }
  /**
   * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
   * @q:                request queue where request should be inserted
   *    unmapping.
   */
  int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
-                   unsigned int len)
+                   unsigned long len)
  {
-       unsigned long uaddr;
-       struct bio *bio;
-       int reading;
+       unsigned long bytes_read = 0;
+       int ret;
  
        if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
        if (!len || !ubuf)
                return -EINVAL;
  
-       reading = rq_data_dir(rq) == READ;
+       while (bytes_read != len) {
+               unsigned long map_len, end, start;
  
-       /*
-        * if alignment requirement is satisfied, map in user pages for
-        * direct dma. else, set up kernel bounce buffers
-        */
-       uaddr = (unsigned long) ubuf;
-       if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
-               bio = bio_map_user(q, NULL, uaddr, len, reading);
-       else
-               bio = bio_copy_user(q, uaddr, len, reading);
+               map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+               end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+                                                               >> PAGE_SHIFT;
+               start = (unsigned long)ubuf >> PAGE_SHIFT;
  
-       if (!IS_ERR(bio)) {
-               rq->bio = rq->biotail = bio;
-               blk_rq_bio_prep(q, rq, bio);
+               /*
+                * A bad offset could cause us to require BIO_MAX_PAGES + 1
+                * pages. If this happens we just lower the requested
+                * mapping len by a page so that we can fit
+                */
+               if (end - start > BIO_MAX_PAGES)
+                       map_len -= PAGE_SIZE;
  
-               rq->buffer = rq->data = NULL;
-               rq->data_len = len;
-               return 0;
+               ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+               if (ret < 0)
+                       goto unmap_rq;
+               bytes_read += ret;
+               ubuf += ret;
        }
  
-       /*
-        * bio is the err-ptr
-        */
-       return PTR_ERR(bio);
+       rq->buffer = rq->data = NULL;
+       return 0;
+ unmap_rq:
+       blk_rq_unmap_user(rq);
+       return ret;
  }
  
  EXPORT_SYMBOL(blk_rq_map_user);
   *    unmapping.
   */
  int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
-                       struct sg_iovec *iov, int iov_count)
+                       struct sg_iovec *iov, int iov_count, unsigned int len)
  {
        struct bio *bio;
  
        if (IS_ERR(bio))
                return PTR_ERR(bio);
  
-       rq->bio = rq->biotail = bio;
+       if (bio->bi_size != len) {
+               bio_endio(bio, bio->bi_size, 0);
+               bio_unmap_user(bio);
+               return -EINVAL;
+       }
+       bio_get(bio);
        blk_rq_bio_prep(q, rq, bio);
        rq->buffer = rq->data = NULL;
-       rq->data_len = bio->bi_size;
        return 0;
  }
  
@@@ -2429,23 -2514,26 +2514,26 @@@ EXPORT_SYMBOL(blk_rq_map_user_iov)
  
  /**
   * blk_rq_unmap_user - unmap a request with user data
-  * @bio:      bio to be unmapped
-  * @ulen:     length of user buffer
+  * @rq:               rq to be unmapped
   *
   * Description:
-  *    Unmap a bio previously mapped by blk_rq_map_user().
+  *    Unmap a rq previously mapped by blk_rq_map_user().
+  *    rq->bio must be set to the original head of the request.
   */
- int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
+ int blk_rq_unmap_user(struct request *rq)
  {
-       int ret = 0;
+       struct bio *bio, *mapped_bio;
  
-       if (bio) {
-               if (bio_flagged(bio, BIO_USER_MAPPED))
-                       bio_unmap_user(bio);
+       while ((bio = rq->bio)) {
+               if (bio_flagged(bio, BIO_BOUNCED))
+                       mapped_bio = bio->bi_private;
                else
-                       ret = bio_uncopy_user(bio);
-       }
+                       mapped_bio = bio;
  
+               __blk_rq_unmap_user(mapped_bio);
+               rq->bio = bio->bi_next;
+               bio_put(bio);
+       }
        return 0;
  }
  
@@@ -2476,11 -2564,8 +2564,8 @@@ int blk_rq_map_kern(request_queue_t *q
        if (rq_data_dir(rq) == WRITE)
                bio->bi_rw |= (1 << BIO_RW);
  
-       rq->bio = rq->biotail = bio;
        blk_rq_bio_prep(q, rq, bio);
        rq->buffer = rq->data = NULL;
-       rq->data_len = len;
        return 0;
  }
  
@@@ -3495,6 -3580,7 +3580,7 @@@ void blk_rq_bio_prep(request_queue_t *q
        rq->hard_cur_sectors = rq->current_nr_sectors;
        rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
        rq->buffer = bio_data(bio);
+       rq->data_len = bio->bi_size;
  
        rq->bio = rq->biotail = bio;
  }
index c872b324dbd389a6d4b31398b7ccb0a79718ddd8,47ea111d5acee837d20d8939b9c52c6a46f0357e..4c32d93d44b1a0b0e5d1d985c76a670502a21ace
@@@ -1451,6 -1451,7 +1451,7 @@@ nothing_to_do
  
  static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
  {
+       struct ata_port *ap = qc->ap;
        struct scsi_cmnd *cmd = qc->scsicmd;
        u8 *cdb = cmd->cmnd;
        int need_sense = (qc->err_mask != 0);
         * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
         * cache
         */
-       if (!need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
+       if (ap->ops->error_handler &&
+           !need_sense && (qc->tf.command == ATA_CMD_SET_FEATURES) &&
            ((qc->tf.feature == SETFEATURES_WC_ON) ||
             (qc->tf.feature == SETFEATURES_WC_OFF))) {
-               qc->ap->eh_info.action |= ATA_EH_REVALIDATE;
-               ata_port_schedule_eh(qc->ap);
+               ap->eh_info.action |= ATA_EH_REVALIDATE;
+               ata_port_schedule_eh(ap);
        }
  
        /* For ATA pass thru (SAT) commands, generate a sense block if
                }
        }
  
-       if (need_sense && !qc->ap->ops->error_handler)
-               ata_dump_status(qc->ap->id, &qc->result_tf);
+       if (need_sense && !ap->ops->error_handler)
+               ata_dump_status(ap->id, &qc->result_tf);
  
        qc->scsidone(cmd);
  
@@@ -3079,7 -3081,7 +3081,7 @@@ static void ata_scsi_remove_dev(struct 
  
  /**
   *    ata_scsi_hotplug - SCSI part of hotplug
 - *    @data: Pointer to ATA port to perform SCSI hotplug on
 + *    @work: Pointer to ATA port to perform SCSI hotplug on
   *
   *    Perform SCSI part of hotplug.  It's executed from a separate
   *    workqueue after EH completes.  This is necessary because SCSI
   *    LOCKING:
   *    Kernel thread context (may sleep).
   */
 -void ata_scsi_hotplug(void *data)
 +void ata_scsi_hotplug(struct work_struct *work)
  {
 -      struct ata_port *ap = data;
 +      struct ata_port *ap =
 +              container_of(work, struct ata_port, hotplug_task.work);
        int i;
  
        if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@@ -3191,7 -3192,7 +3193,7 @@@ static int ata_scsi_user_scan(struct Sc
  
  /**
   *    ata_scsi_dev_rescan - initiate scsi_rescan_device()
 - *    @data: Pointer to ATA port to perform scsi_rescan_device()
 + *    @work: Pointer to ATA port to perform scsi_rescan_device()
   *
   *    After ATA pass thru (SAT) commands are executed successfully,
   *    libata need to propagate the changes to SCSI layer.  This
   *    LOCKING:
   *    Kernel thread context (may sleep).
   */
 -void ata_scsi_dev_rescan(void *data)
 +void ata_scsi_dev_rescan(struct work_struct *work)
  {
 -      struct ata_port *ap = data;
 +      struct ata_port *ap =
 +              container_of(work, struct ata_port, scsi_rescan_task);
        struct ata_device *dev;
        unsigned int i;
  
@@@ -3347,20 -3347,23 +3349,23 @@@ EXPORT_SYMBOL_GPL(ata_sas_slave_configu
   *    @ap:    ATA port to which the command is being sent
   *
   *    RETURNS:
-  *    Zero.
+  *    Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+  *    0 otherwise.
   */
  
  int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
                     struct ata_port *ap)
  {
+       int rc = 0;
        ata_scsi_dump_cdb(ap, cmd);
  
        if (likely(ata_scsi_dev_enabled(ap->device)))
-               __ata_scsi_queuecmd(cmd, done, ap->device);
+               rc = __ata_scsi_queuecmd(cmd, done, ap->device);
        else {
                cmd->result = (DID_BAD_TARGET << 16);
                done(cmd);
        }
-       return 0;
+       return rc;
  }
  EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
diff --combined drivers/char/random.c
index fa764688cad17e18a70b87d0f07e20fb2d0c9de3,d40df30c2b10014a220ec0739f33bf05d4f603a2..4c6782a1ecdba4f5682ecfed4b8ffa45f7e40ec2
@@@ -1422,9 -1422,9 +1422,9 @@@ static struct keydata 
  
  static unsigned int ip_cnt;
  
 -static void rekey_seq_generator(void *private_);
 +static void rekey_seq_generator(struct work_struct *work);
  
 -static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
 +static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
  
  /*
   * Lock avoidance:
   * happen, and even if that happens only a not perfectly compliant
   * ISN is generated, nothing fatal.
   */
 -static void rekey_seq_generator(void *private_)
 +static void rekey_seq_generator(struct work_struct *work)
  {
        struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
  
@@@ -1466,8 -1466,8 +1466,8 @@@ static __init int seqgen_init(void
  late_initcall(seqgen_init);
  
  #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- __u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr,
-                                  __u16 sport, __u16 dport)
+ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+                                  __be16 sport, __be16 dport)
  {
        struct timeval tv;
        __u32 seq;
         */
  
        memcpy(hash, saddr, 16);
-       hash[4]=(sport << 16) + dport;
+       hash[4]=((__force u16)sport << 16) + (__force u16)dport;
        memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
  
-       seq = twothirdsMD4Transform(daddr, hash) & HASH_MASK;
+       seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
        seq += keyptr->count;
  
        do_gettimeofday(&tv);
@@@ -1496,7 -1496,7 +1496,7 @@@ EXPORT_SYMBOL(secure_tcpv6_sequence_num
  /*  The code below is shamelessly stolen from secure_tcp_sequence_number().
   *  All blames to Andrey V. Savochkin <saw@msu.ru>.
   */
- __u32 secure_ip_id(__u32 daddr)
+ __u32 secure_ip_id(__be32 daddr)
  {
        struct keydata *keyptr;
        __u32 hash[4];
         *  The dest ip address is placed in the starting vector,
         *  which is then hashed with random data.
         */
-       hash[0] = daddr;
+       hash[0] = (__force __u32)daddr;
        hash[1] = keyptr->secret[9];
        hash[2] = keyptr->secret[10];
        hash[3] = keyptr->secret[11];
  
  #ifdef CONFIG_INET
  
- __u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
-                                __u16 sport, __u16 dport)
+ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+                                __be16 sport, __be16 dport)
  {
        struct timeval tv;
        __u32 seq;
         *  Note that the words are placed into the starting vector, which is
         *  then mixed with a partial MD4 over random data.
         */
-       hash[0]=saddr;
-       hash[1]=daddr;
-       hash[2]=(sport << 16) + dport;
+       hash[0]=(__force u32)saddr;
+       hash[1]=(__force u32)daddr;
+       hash[2]=((__force u16)sport << 16) + (__force u16)dport;
        hash[3]=keyptr->secret[11];
  
        seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
  EXPORT_SYMBOL(secure_tcp_sequence_number);
  
  /* Generate secure starting point for ephemeral IPV4 transport port search */
- u32 secure_ipv4_port_ephemeral(__u32 saddr, __u32 daddr, __u16 dport)
+ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
  {
        struct keydata *keyptr = get_keyptr();
        u32 hash[4];
         *  Pick a unique starting offset for each ephemeral port search
         *  (saddr, daddr, dport) and 48bits of random data.
         */
-       hash[0] = saddr;
-       hash[1] = daddr;
-       hash[2] = dport ^ keyptr->secret[10];
+       hash[0] = (__force u32)saddr;
+       hash[1] = (__force u32)daddr;
+       hash[2] = (__force u32)dport ^ keyptr->secret[10];
        hash[3] = keyptr->secret[11];
  
        return half_md4_transform(hash, keyptr->secret);
  }
  
  #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- u32 secure_ipv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, __u16 dport)
+ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport)
  {
        struct keydata *keyptr = get_keyptr();
        u32 hash[12];
  
        memcpy(hash, saddr, 16);
-       hash[4] = dport;
+       hash[4] = (__force u32)dport;
        memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
  
-       return twothirdsMD4Transform(daddr, hash);
+       return twothirdsMD4Transform((const __u32 *)daddr, hash);
  }
  #endif
  
   * bit's 32-47 increase every key exchange
   *       0-31  hash(source, dest)
   */
- u64 secure_dccp_sequence_number(__u32 saddr, __u32 daddr,
-                               __u16 sport, __u16 dport)
+ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+                               __be16 sport, __be16 dport)
  {
        struct timeval tv;
        u64 seq;
        __u32 hash[4];
        struct keydata *keyptr = get_keyptr();
  
-       hash[0] = saddr;
-       hash[1] = daddr;
-       hash[2] = (sport << 16) + dport;
+       hash[0] = (__force u32)saddr;
+       hash[1] = (__force u32)daddr;
+       hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
        hash[3] = keyptr->secret[11];
  
        seq = half_md4_transform(hash, keyptr->secret);
@@@ -1641,7 -1641,7 +1641,7 @@@ unsigned int get_random_int(void
         * drain on it), and uses halfMD4Transform within the second. We
         * also mix it with jiffies and the PID:
         */
-       return secure_ip_id(current->pid + jiffies);
+       return secure_ip_id((__force __be32)(current->pid + jiffies));
  }
  
  /*
diff --combined drivers/char/tpm/tpm.c
index 36f91a655275a6f7481648b8b6273d2537b40e96,6e1329d404d201ce50f5ff981fede21cbf317d95..774fa861169ad5af8392639178366db761356679
@@@ -325,9 -325,9 +325,9 @@@ static void user_reader_timeout(unsigne
        schedule_work(&chip->work);
  }
  
 -static void timeout_work(void *ptr)
 +static void timeout_work(struct work_struct *work)
  {
 -      struct tpm_chip *chip = ptr;
 +      struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
  
        down(&chip->buffer_mutex);
        atomic_set(&chip->data_pending, 0);
@@@ -1105,7 -1105,7 +1105,7 @@@ struct tpm_chip *tpm_register_hardware(
        init_MUTEX(&chip->tpm_mutex);
        INIT_LIST_HEAD(&chip->list);
  
 -      INIT_WORK(&chip->work, timeout_work, chip);
 +      INIT_WORK(&chip->work, timeout_work);
  
        init_timer(&chip->user_read_timer);
        chip->user_read_timer.function = user_reader_timeout;
        scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
        chip->vendor.miscdev.name = devname;
  
-       chip->vendor.miscdev.dev = dev;
+       chip->vendor.miscdev.parent = dev;
        chip->dev = get_device(dev);
  
        if (misc_register(&chip->vendor.miscdev)) {
diff --combined drivers/char/tty_io.c
index 83e9e7d9b58c2150205e5b2db202a04a88d20b67,50dc49205a231a2c5854a0e9699e39e5db9e6bed..b3cfc8bc613c7bbfeee7ef7ce037e638793f32a0
@@@ -1254,7 -1254,7 +1254,7 @@@ EXPORT_SYMBOL_GPL(tty_ldisc_flush)
        
  /**
   *    do_tty_hangup           -       actual handler for hangup events
 - *    @data: tty device
 + *    @work: tty device
   *
   *    This can be called by the "eventd" kernel thread.  That is process
   *    synchronous but doesn't hold any locks, so we need to make sure we
   *            tasklist_lock to walk task list for hangup event
   *
   */
 -static void do_tty_hangup(void *data)
 +static void do_tty_hangup(struct work_struct *work)
  {
 -      struct tty_struct *tty = (struct tty_struct *) data;
 +      struct tty_struct *tty =
 +              container_of(work, struct tty_struct, hangup_work);
        struct file * cons_filp = NULL;
        struct file *filp, *f = NULL;
        struct task_struct *p;
@@@ -1434,7 -1433,7 +1434,7 @@@ void tty_vhangup(struct tty_struct * tt
  
        printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
  #endif
 -      do_tty_hangup((void *) tty);
 +      do_tty_hangup(&tty->hangup_work);
  }
  EXPORT_SYMBOL(tty_vhangup);
  
@@@ -3305,13 -3304,12 +3305,13 @@@ int tty_ioctl(struct inode * inode, str
   * Nasty bug: do_SAK is being called in interrupt context.  This can
   * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
   */
 -static void __do_SAK(void *arg)
 +static void __do_SAK(struct work_struct *work)
  {
 +      struct tty_struct *tty =
 +              container_of(work, struct tty_struct, SAK_work);
  #ifdef TTY_SOFT_SAK
        tty_hangup(tty);
  #else
 -      struct tty_struct *tty = arg;
        struct task_struct *g, *p;
        int session;
        int             i;
@@@ -3390,7 -3388,7 +3390,7 @@@ void do_SAK(struct tty_struct *tty
  {
        if (!tty)
                return;
 -      PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
 +      PREPARE_WORK(&tty->SAK_work, __do_SAK);
        schedule_work(&tty->SAK_work);
  }
  
@@@ -3398,7 -3396,7 +3398,7 @@@ EXPORT_SYMBOL(do_SAK)
  
  /**
   *    flush_to_ldisc
 - *    @private_: tty structure passed from work queue.
 + *    @work: tty structure passed from work queue.
   *
   *    This routine is called out of the software interrupt to flush data
   *    from the buffer chain to the line discipline.
   *    receive_buf method is single threaded for each tty instance.
   */
   
 -static void flush_to_ldisc(void *private_)
 +static void flush_to_ldisc(struct work_struct *work)
  {
 -      struct tty_struct *tty = (struct tty_struct *) private_;
 +      struct tty_struct *tty =
 +              container_of(work, struct tty_struct, buf.work.work);
        unsigned long   flags;
        struct tty_ldisc *disc;
        struct tty_buffer *tbuf, *head;
@@@ -3556,7 -3553,7 +3556,7 @@@ void tty_flip_buffer_push(struct tty_st
        spin_unlock_irqrestore(&tty->buf.lock, flags);
  
        if (tty->low_latency)
 -              flush_to_ldisc((void *) tty);
 +              flush_to_ldisc(&tty->buf.work.work);
        else
                schedule_delayed_work(&tty->buf.work, 1);
  }
@@@ -3583,17 -3580,17 +3583,17 @@@ static void initialize_tty_struct(struc
        tty->overrun_time = jiffies;
        tty->buf.head = tty->buf.tail = NULL;
        tty_buffer_init(tty);
 -      INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
 +      INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
        init_MUTEX(&tty->buf.pty_sem);
        mutex_init(&tty->termios_mutex);
        init_waitqueue_head(&tty->write_wait);
        init_waitqueue_head(&tty->read_wait);
 -      INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
 +      INIT_WORK(&tty->hangup_work, do_tty_hangup);
        mutex_init(&tty->atomic_read_lock);
        mutex_init(&tty->atomic_write_lock);
        spin_lock_init(&tty->read_lock);
        INIT_LIST_HEAD(&tty->tty_files);
 -      INIT_WORK(&tty->SAK_work, NULL, NULL);
 +      INIT_WORK(&tty->SAK_work, NULL);
  }
  
  /*
@@@ -3615,7 -3612,8 +3615,8 @@@ static struct class *tty_class
   *            This field is optional, if there is no known struct device
   *            for this tty device it can be set to NULL safely.
   *
-  *    Returns a pointer to the class device (or ERR_PTR(-EFOO) on error).
+  *    Returns a pointer to the struct device for this tty device
+  *    (or ERR_PTR(-EFOO) on error).
   *
   *    This call is required to be made to register an individual tty device
   *    if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set.  If
   *    Locking: ??
   */
  
- struct class_device *tty_register_device(struct tty_driver *driver,
-                                        unsigned index, struct device *device)
+ struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+                                  struct device *device)
  {
        char name[64];
        dev_t dev = MKDEV(driver->major, driver->minor_start) + index;
        else
                tty_line_name(driver, index, name);
  
-       return class_device_create(tty_class, NULL, dev, device, "%s", name);
+       return device_create(tty_class, device, dev, name);
  }
  
  /**
  
  void tty_unregister_device(struct tty_driver *driver, unsigned index)
  {
-       class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
+       device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
  }
  
  EXPORT_SYMBOL(tty_register_device);
@@@ -3898,20 -3896,20 +3899,20 @@@ static int __init tty_init(void
        if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
            register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
                panic("Couldn't register /dev/tty driver\n");
-       class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+       device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), "tty");
  
        cdev_init(&console_cdev, &console_fops);
        if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
            register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
                panic("Couldn't register /dev/console driver\n");
-       class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL, "console");
+       device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), "console");
  
  #ifdef CONFIG_UNIX98_PTYS
        cdev_init(&ptmx_cdev, &ptmx_fops);
        if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
            register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
                panic("Couldn't register /dev/ptmx driver\n");
-       class_device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx");
+       device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), "ptmx");
  #endif
  
  #ifdef CONFIG_VT
        if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
            register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
                panic("Couldn't register /dev/tty0 driver\n");
-       class_device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+       device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), "tty0");
  
        vty_init();
  #endif
diff --combined drivers/char/vt.c
index 8ee04adc37f06e5b80092dc318f420322736e402,87587b4385abcb88dc72d4a113052d17baae9c5e..75ff0286e1adee4bd7ac0b5f067a4a28339ef4a5
  struct con_driver {
        const struct consw *con;
        const char *desc;
-       struct class_device *class_dev;
+       struct device *dev;
        int node;
        int first;
        int last;
@@@ -155,7 -155,7 +155,7 @@@ static void con_flush_chars(struct tty_
  static void set_vesa_blanking(char __user *p);
  static void set_cursor(struct vc_data *vc);
  static void hide_cursor(struct vc_data *vc);
 -static void console_callback(void *ignored);
 +static void console_callback(struct work_struct *ignored);
  static void blank_screen_t(unsigned long dummy);
  static void set_palette(struct vc_data *vc);
  
@@@ -174,7 -174,7 +174,7 @@@ static int vesa_blank_mode; /* 0:none 1
  static int blankinterval = 10*60*HZ;
  static int vesa_off_interval;
  
 -static DECLARE_WORK(console_work, console_callback, NULL);
 +static DECLARE_WORK(console_work, console_callback);
  
  /*
   * fg_console is the current virtual console,
@@@ -2154,7 -2154,7 +2154,7 @@@ out
   * with other console code and prevention of re-entrancy is
   * ensured with console_sem.
   */
 -static void console_callback(void *ignored)
 +static void console_callback(struct work_struct *ignored)
  {
        acquire_console_sem();
  
@@@ -3023,10 -3023,10 +3023,10 @@@ static inline int vt_unbind(struct con_
  }
  #endif /* CONFIG_VT_HW_CONSOLE_BINDING */
  
- static ssize_t store_bind(struct class_device *class_device,
+ static ssize_t store_bind(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
  {
-       struct con_driver *con = class_get_devdata(class_device);
+       struct con_driver *con = dev_get_drvdata(dev);
        int bind = simple_strtoul(buf, NULL, 0);
  
        if (bind)
        return count;
  }
  
- static ssize_t show_bind(struct class_device *class_device, char *buf)
+ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
+                        char *buf)
  {
-       struct con_driver *con = class_get_devdata(class_device);
+       struct con_driver *con = dev_get_drvdata(dev);
        int bind = con_is_bound(con->con);
  
        return snprintf(buf, PAGE_SIZE, "%i\n", bind);
  }
  
- static ssize_t show_name(struct class_device *class_device, char *buf)
+ static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+                        char *buf)
  {
-       struct con_driver *con = class_get_devdata(class_device);
+       struct con_driver *con = dev_get_drvdata(dev);
  
        return snprintf(buf, PAGE_SIZE, "%s %s\n",
                        (con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)",
  
  }
  
- static struct class_device_attribute class_device_attrs[] = {
+ static struct device_attribute device_attrs[] = {
        __ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind),
        __ATTR(name, S_IRUGO, show_name, NULL),
  };
  
- static int vtconsole_init_class_device(struct con_driver *con)
+ static int vtconsole_init_device(struct con_driver *con)
  {
        int i;
        int error = 0;
  
        con->flag |= CON_DRIVER_FLAG_ATTR;
-       class_set_devdata(con->class_dev, con);
-       for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
-               error = class_device_create_file(con->class_dev,
-                                        &class_device_attrs[i]);
+       dev_set_drvdata(con->dev, con);
+       for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+               error = device_create_file(con->dev, &device_attrs[i]);
                if (error)
                        break;
        }
  
        if (error) {
                while (--i >= 0)
-                       class_device_remove_file(con->class_dev,
-                                        &class_device_attrs[i]);
+                       device_remove_file(con->dev, &device_attrs[i]);
                con->flag &= ~CON_DRIVER_FLAG_ATTR;
        }
  
        return error;
  }
  
- static void vtconsole_deinit_class_device(struct con_driver *con)
+ static void vtconsole_deinit_device(struct con_driver *con)
  {
        int i;
  
        if (con->flag & CON_DRIVER_FLAG_ATTR) {
-               for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
-                       class_device_remove_file(con->class_dev,
-                                                &class_device_attrs[i]);
+               for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+                       device_remove_file(con->dev, &device_attrs[i]);
                con->flag &= ~CON_DRIVER_FLAG_ATTR;
        }
  }
@@@ -3179,18 -3178,17 +3178,17 @@@ int register_con_driver(const struct co
        if (retval)
                goto err;
  
-       con_driver->class_dev = class_device_create(vtconsole_class, NULL,
-                                                   MKDEV(0, con_driver->node),
-                                                   NULL, "vtcon%i",
-                                                   con_driver->node);
+       con_driver->dev = device_create(vtconsole_class, NULL,
+                                       MKDEV(0, con_driver->node),
+                                       "vtcon%i", con_driver->node);
  
-       if (IS_ERR(con_driver->class_dev)) {
-               printk(KERN_WARNING "Unable to create class_device for %s; "
+       if (IS_ERR(con_driver->dev)) {
+               printk(KERN_WARNING "Unable to create device for %s; "
                       "errno = %ld\n", con_driver->desc,
-                      PTR_ERR(con_driver->class_dev));
-               con_driver->class_dev = NULL;
+                      PTR_ERR(con_driver->dev));
+               con_driver->dev = NULL;
        } else {
-               vtconsole_init_class_device(con_driver);
+               vtconsole_init_device(con_driver);
        }
  
  err:
@@@ -3226,12 -3224,12 +3224,12 @@@ int unregister_con_driver(const struct 
  
                if (con_driver->con == csw &&
                    con_driver->flag & CON_DRIVER_FLAG_MODULE) {
-                       vtconsole_deinit_class_device(con_driver);
-                       class_device_destroy(vtconsole_class,
-                                            MKDEV(0, con_driver->node));
+                       vtconsole_deinit_device(con_driver);
+                       device_destroy(vtconsole_class,
+                                      MKDEV(0, con_driver->node));
                        con_driver->con = NULL;
                        con_driver->desc = NULL;
-                       con_driver->class_dev = NULL;
+                       con_driver->dev = NULL;
                        con_driver->node = 0;
                        con_driver->flag = 0;
                        con_driver->first = 0;
@@@ -3289,19 -3287,18 +3287,18 @@@ static int __init vtconsole_class_init(
        for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
                struct con_driver *con = &registered_con_driver[i];
  
-               if (con->con && !con->class_dev) {
-                       con->class_dev =
-                               class_device_create(vtconsole_class, NULL,
-                                                   MKDEV(0, con->node), NULL,
-                                                   "vtcon%i", con->node);
+               if (con->con && !con->dev) {
+                       con->dev = device_create(vtconsole_class, NULL,
+                                                MKDEV(0, con->node),
+                                                "vtcon%i", con->node);
  
-                       if (IS_ERR(con->class_dev)) {
+                       if (IS_ERR(con->dev)) {
                                printk(KERN_WARNING "Unable to create "
-                                      "class_device for %s; errno = %ld\n",
-                                      con->desc, PTR_ERR(con->class_dev));
-                               con->class_dev = NULL;
+                                      "device for %s; errno = %ld\n",
+                                      con->desc, PTR_ERR(con->dev));
+                               con->dev = NULL;
                        } else {
-                               vtconsole_init_class_device(con);
+                               vtconsole_init_device(con);
                        }
                }
        }
index 84b2f5cb37226d7cb778ede69688a9b334e07222,7767a11b6890d4d88d2c61d1a3adcb3730a9c2e4..af939796750dba07fd713f83dc3cf473ce52c43e
@@@ -55,11 -55,11 +55,11 @@@ struct addr_req 
        int status;
  };
  
 -static void process_req(void *data);
 +static void process_req(struct work_struct *work);
  
  static DEFINE_MUTEX(lock);
  static LIST_HEAD(req_list);
 -static DECLARE_WORK(work, process_req, NULL);
 +static DECLARE_DELAYED_WORK(work, process_req);
  static struct workqueue_struct *addr_wq;
  
  void rdma_addr_register_client(struct rdma_addr_client *client)
@@@ -139,7 -139,7 +139,7 @@@ static void queue_req(struct addr_req *
  
        mutex_lock(&lock);
        list_for_each_entry_reverse(temp_req, &req_list, list) {
-               if (time_after(req->timeout, temp_req->timeout))
+               if (time_after_eq(req->timeout, temp_req->timeout))
                        break;
        }
  
@@@ -215,7 -215,7 +215,7 @@@ out
        return ret;
  }
  
 -static void process_req(void *data)
 +static void process_req(struct work_struct *work)
  {
        struct addr_req *req, *temp_req;
        struct sockaddr_in *src_in, *dst_in;
  
        mutex_lock(&lock);
        list_for_each_entry_safe(req, temp_req, &req_list, list) {
-               if (req->status) {
+               if (req->status == -ENODATA) {
                        src_in = (struct sockaddr_in *) &req->src_addr;
                        dst_in = (struct sockaddr_in *) &req->dst_addr;
                        req->status = addr_resolve_remote(src_in, dst_in,
                                                          req->addr);
+                       if (req->status && time_after_eq(jiffies, req->timeout))
+                               req->status = -ETIMEDOUT;
+                       else if (req->status == -ENODATA)
+                               continue;
                }
-               if (req->status && time_after(jiffies, req->timeout))
-                       req->status = -ETIMEDOUT;
-               else if (req->status == -ENODATA)
-                       continue;
-               list_del(&req->list);
-               list_add_tail(&req->list, &done_list);
+               list_move_tail(&req->list, &done_list);
        }
  
        if (!list_empty(&req_list)) {
@@@ -347,8 -345,7 +345,7 @@@ void rdma_addr_cancel(struct rdma_dev_a
                if (req->addr == addr) {
                        req->status = -ECANCELED;
                        req->timeout = jiffies;
-                       list_del(&req->list);
-                       list_add(&req->list, &req_list);
+                       list_move(&req->list, &req_list);
                        set_timeout(req->timeout);
                        break;
                }
index e1990f531d0ad4a9acfe653441ae31cf4a8a599c,e5dc4530808aac8c8e8d410ca32f47ae60cf5cfe..79c937bf696259096fd0a168cfed6528e2fe4e5f
@@@ -101,7 -101,7 +101,7 @@@ struct cm_av 
  };
  
  struct cm_work {
 -      struct work_struct work;
 +      struct delayed_work work;
        struct list_head list;
        struct cm_port *port;
        struct ib_mad_recv_wc *mad_recv_wc;     /* Received MADs */
@@@ -147,12 -147,12 +147,12 @@@ struct cm_id_private 
        __be32 rq_psn;
        int timeout_ms;
        enum ib_mtu path_mtu;
+       __be16 pkey;
        u8 private_data_len;
        u8 max_cm_retries;
        u8 peer_to_peer;
        u8 responder_resources;
        u8 initiator_depth;
-       u8 local_ack_timeout;
        u8 retry_count;
        u8 rnr_retry_count;
        u8 service_timeout;
        atomic_t work_count;
  };
  
 -static void cm_work_handler(void *data);
 +static void cm_work_handler(struct work_struct *work);
  
  static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  {
@@@ -240,11 -240,10 +240,10 @@@ static void * cm_copy_private_data(cons
        if (!private_data || !private_data_len)
                return NULL;
  
-       data = kmalloc(private_data_len, GFP_KERNEL);
+       data = kmemdup(private_data, private_data_len, GFP_KERNEL);
        if (!data)
                return ERR_PTR(-ENOMEM);
  
-       memcpy(data, private_data, private_data_len);
        return data;
  }
  
@@@ -669,7 -668,8 +668,7 @@@ static struct cm_timewait_info * cm_cre
                return ERR_PTR(-ENOMEM);
  
        timewait_info->work.local_id = local_id;
 -      INIT_WORK(&timewait_info->work.work, cm_work_handler,
 -                &timewait_info->work);
 +      INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
        timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
        return timewait_info;
  }
@@@ -690,7 -690,7 +689,7 @@@ static void cm_enter_timewait(struct cm
         * timewait before notifying the user that we've exited timewait.
         */
        cm_id_priv->id.state = IB_CM_TIMEWAIT;
-       wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
+       wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
        queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
                           msecs_to_jiffies(wait_time));
        cm_id_priv->timewait_info = NULL;
@@@ -1009,6 -1009,7 +1008,7 @@@ int ib_send_cm_req(struct ib_cm_id *cm_
        cm_id_priv->responder_resources = param->responder_resources;
        cm_id_priv->retry_count = param->retry_count;
        cm_id_priv->path_mtu = param->primary_path->mtu;
+       cm_id_priv->pkey = param->primary_path->pkey;
        cm_id_priv->qp_type = param->qp_type;
  
        ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  
        cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
        cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
-       cm_id_priv->local_ack_timeout =
-                               cm_req_get_primary_local_ack_timeout(req_msg);
  
        spin_lock_irqsave(&cm_id_priv->lock, flags);
        ret = ib_post_send_mad(cm_id_priv->msg, NULL);
@@@ -1409,9 -1408,8 +1407,8 @@@ static int cm_req_handler(struct cm_wor
        cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
        cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
        cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
+       cm_id_priv->pkey = req_msg->pkey;
        cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
-       cm_id_priv->local_ack_timeout =
-                               cm_req_get_primary_local_ack_timeout(req_msg);
        cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
        cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
        cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
@@@ -1715,7 -1713,7 +1712,7 @@@ static int cm_establish_handler(struct 
        unsigned long flags;
        int ret;
  
-       /* See comment in ib_cm_establish about lookup. */
+       /* See comment in cm_establish about lookup. */
        cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
        if (!cm_id_priv)
                return -EINVAL;
@@@ -2401,11 -2399,16 +2398,16 @@@ int ib_send_cm_lap(struct ib_cm_id *cm_
        cm_id_priv = container_of(cm_id, struct cm_id_private, id);
        spin_lock_irqsave(&cm_id_priv->lock, flags);
        if (cm_id->state != IB_CM_ESTABLISHED ||
-           cm_id->lap_state != IB_CM_LAP_IDLE) {
+           (cm_id->lap_state != IB_CM_LAP_UNINIT &&
+            cm_id->lap_state != IB_CM_LAP_IDLE)) {
                ret = -EINVAL;
                goto out;
        }
  
+       ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+       if (ret)
+               goto out;
        ret = cm_alloc_msg(cm_id_priv, &msg);
        if (ret)
                goto out;
@@@ -2430,7 -2433,8 +2432,8 @@@ out:    spin_unlock_irqrestore(&cm_id_priv
  }
  EXPORT_SYMBOL(ib_send_cm_lap);
  
- static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
+ static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
+                                   struct ib_sa_path_rec *path,
                                    struct cm_lap_msg *lap_msg)
  {
        memset(path, 0, sizeof *path);
        path->hop_limit = lap_msg->alt_hop_limit;
        path->traffic_class = cm_lap_get_traffic_class(lap_msg);
        path->reversible = 1;
-       /* pkey is same as in REQ */
+       path->pkey = cm_id_priv->pkey;
        path->sl = cm_lap_get_sl(lap_msg);
        path->mtu_selector = IB_SA_EQ;
-       /* mtu is same as in REQ */
+       path->mtu = cm_id_priv->path_mtu;
        path->rate_selector = IB_SA_EQ;
        path->rate = cm_lap_get_packet_rate(lap_msg);
        path->packet_life_time_selector = IB_SA_EQ;
@@@ -2471,7 -2475,7 +2474,7 @@@ static int cm_lap_handler(struct cm_wor
  
        param = &work->cm_event.param.lap_rcvd;
        param->alternate_path = &work->path[0];
-       cm_format_path_from_lap(param->alternate_path, lap_msg);
+       cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
        work->cm_event.private_data = &lap_msg->private_data;
  
        spin_lock_irqsave(&cm_id_priv->lock, flags);
                goto unlock;
  
        switch (cm_id_priv->id.lap_state) {
+       case IB_CM_LAP_UNINIT:
        case IB_CM_LAP_IDLE:
                break;
        case IB_CM_MRA_LAP_SENT:
  
        cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
        cm_id_priv->tid = lap_msg->hdr.tid;
+       cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+                               work->mad_recv_wc->recv_buf.grh,
+                               &cm_id_priv->av);
+       cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
        ret = atomic_inc_and_test(&cm_id_priv->work_count);
        if (!ret)
                list_add_tail(&work->list, &cm_id_priv->work_list);
@@@ -2986,9 -2995,9 +2994,9 @@@ static void cm_send_handler(struct ib_m
        }
  }
  
 -static void cm_work_handler(void *data)
 +static void cm_work_handler(struct work_struct *_work)
  {
 -      struct cm_work *work = data;
 +      struct cm_work *work = container_of(_work, struct cm_work, work.work);
        int ret;
  
        switch (work->cm_event.event) {
                cm_free_work(work);
  }
  
int ib_cm_establish(struct ib_cm_id *cm_id)
static int cm_establish(struct ib_cm_id *cm_id)
  {
        struct cm_id_private *cm_id_priv;
        struct cm_work *work;
         * we need to find the cm_id once we're in the context of the
         * worker thread, rather than holding a reference on it.
         */
 -      INIT_WORK(&work->work, cm_work_handler, work);
 +      INIT_DELAYED_WORK(&work->work, cm_work_handler);
        work->local_id = cm_id->local_id;
        work->remote_id = cm_id->remote_id;
        work->mad_recv_wc = NULL;
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
 -      queue_work(cm.wq, &work->work);
 +      queue_delayed_work(cm.wq, &work->work, 0);
  out:
        return ret;
  }
- EXPORT_SYMBOL(ib_cm_establish);
+ static int cm_migrate(struct ib_cm_id *cm_id)
+ {
+       struct cm_id_private *cm_id_priv;
+       unsigned long flags;
+       int ret = 0;
+       cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+       spin_lock_irqsave(&cm_id_priv->lock, flags);
+       if (cm_id->state == IB_CM_ESTABLISHED &&
+           (cm_id->lap_state == IB_CM_LAP_UNINIT ||
+            cm_id->lap_state == IB_CM_LAP_IDLE)) {
+               cm_id->lap_state = IB_CM_LAP_IDLE;
+               cm_id_priv->av = cm_id_priv->alt_av;
+       } else
+               ret = -EINVAL;
+       spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       return ret;
+ }
+ int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
+ {
+       int ret;
+       switch (event) {
+       case IB_EVENT_COMM_EST:
+               ret = cm_establish(cm_id);
+               break;
+       case IB_EVENT_PATH_MIG:
+               ret = cm_migrate(cm_id);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       return ret;
+ }
+ EXPORT_SYMBOL(ib_cm_notify);
  
  static void cm_recv_handler(struct ib_mad_agent *mad_agent,
                            struct ib_mad_recv_wc *mad_recv_wc)
                return;
        }
  
 -      INIT_WORK(&work->work, cm_work_handler, work);
 +      INIT_DELAYED_WORK(&work->work, cm_work_handler);
        work->cm_event.event = event;
        work->mad_recv_wc = mad_recv_wc;
        work->port = (struct cm_port *)mad_agent->context;
 -      queue_work(cm.wq, &work->work);
 +      queue_delayed_work(cm.wq, &work->work, 0);
  }
  
  static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
        case IB_CM_ESTABLISHED:
                *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
                                IB_QP_PKEY_INDEX | IB_QP_PORT;
-               qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
-                                          IB_ACCESS_REMOTE_WRITE;
+               qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
                if (cm_id_priv->responder_resources)
                        qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
                                                    IB_ACCESS_REMOTE_ATOMIC;
@@@ -3221,6 -3266,9 +3265,9 @@@ static int cm_init_qp_rtr_attr(struct c
                if (cm_id_priv->alt_av.ah_attr.dlid) {
                        *qp_attr_mask |= IB_QP_ALT_PATH;
                        qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+                       qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+                       qp_attr->alt_timeout =
+                                       cm_id_priv->alt_av.packet_life_time + 1;
                        qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
                }
                ret = 0;
@@@ -3247,19 -3295,31 +3294,31 @@@ static int cm_init_qp_rts_attr(struct c
        case IB_CM_REP_SENT:
        case IB_CM_MRA_REP_RCVD:
        case IB_CM_ESTABLISHED:
-               *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
-               qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
-               if (cm_id_priv->qp_type == IB_QPT_RC) {
-                       *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
-                                        IB_QP_RNR_RETRY |
-                                        IB_QP_MAX_QP_RD_ATOMIC;
-                       qp_attr->timeout = cm_id_priv->local_ack_timeout;
-                       qp_attr->retry_cnt = cm_id_priv->retry_count;
-                       qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
-                       qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
-               }
-               if (cm_id_priv->alt_av.ah_attr.dlid) {
-                       *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+               if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
+                       *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
+                       qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
+                       if (cm_id_priv->qp_type == IB_QPT_RC) {
+                               *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
+                                                IB_QP_RNR_RETRY |
+                                                IB_QP_MAX_QP_RD_ATOMIC;
+                               qp_attr->timeout =
+                                       cm_id_priv->av.packet_life_time + 1;
+                               qp_attr->retry_cnt = cm_id_priv->retry_count;
+                               qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
+                               qp_attr->max_rd_atomic =
+                                       cm_id_priv->initiator_depth;
+                       }
+                       if (cm_id_priv->alt_av.ah_attr.dlid) {
+                               *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
+                               qp_attr->path_mig_state = IB_MIG_REARM;
+                       }
+               } else {
+                       *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
+                       qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
+                       qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
+                       qp_attr->alt_timeout =
+                               cm_id_priv->alt_av.packet_life_time + 1;
+                       qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
                        qp_attr->path_mig_state = IB_MIG_REARM;
                }
                ret = 0;
index 189f73f3f721f4744f2b78abc2d5f698ce3adc4d,cf48f269743449b6a4797a2727e84148fe1bf176..985a6b564d8feec4781cefd7f3316cf1793013ce
@@@ -344,7 -344,7 +344,7 @@@ static int cma_init_ib_qp(struct rdma_i
                return ret;
  
        qp_attr.qp_state = IB_QPS_INIT;
-       qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+       qp_attr.qp_access_flags = 0;
        qp_attr.port_num = id_priv->id.port_num;
        return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
                                          IB_QP_PKEY_INDEX | IB_QP_PORT);
@@@ -935,13 -935,8 +935,8 @@@ static int cma_req_handler(struct ib_cm
        mutex_lock(&lock);
        ret = cma_acquire_dev(conn_id);
        mutex_unlock(&lock);
-       if (ret) {
-               ret = -ENODEV;
-               cma_exch(conn_id, CMA_DESTROYING);
-               cma_release_remove(conn_id);
-               rdma_destroy_id(&conn_id->id);
-               goto out;
-       }
+       if (ret)
+               goto release_conn_id;
  
        conn_id->cm_id.ib = cm_id;
        cm_id->context = conn_id;
        ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
                              ib_event->private_data + offset,
                              IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
-       if (ret) {
-               /* Destroy the CM ID by returning a non-zero value. */
-               conn_id->cm_id.ib = NULL;
-               cma_exch(conn_id, CMA_DESTROYING);
-               cma_release_remove(conn_id);
-               rdma_destroy_id(&conn_id->id);
-       }
+       if (!ret)
+               goto out;
+       /* Destroy the CM ID by returning a non-zero value. */
+       conn_id->cm_id.ib = NULL;
+ release_conn_id:
+       cma_exch(conn_id, CMA_DESTROYING);
+       cma_release_remove(conn_id);
+       rdma_destroy_id(&conn_id->id);
  out:
        cma_release_remove(listen_id);
        return ret;
@@@ -1341,9 -1340,9 +1340,9 @@@ static int cma_query_ib_route(struct rd
        return (id_priv->query_id < 0) ? id_priv->query_id : 0;
  }
  
 -static void cma_work_handler(void *data)
 +static void cma_work_handler(struct work_struct *_work)
  {
 -      struct cma_work *work = data;
 +      struct cma_work *work = container_of(_work, struct cma_work, work);
        struct rdma_id_private *id_priv = work->id;
        int destroy = 0;
  
@@@ -1374,7 -1373,7 +1373,7 @@@ static int cma_resolve_ib_route(struct 
                return -ENOMEM;
  
        work->id = id_priv;
 -      INIT_WORK(&work->work, cma_work_handler, work);
 +      INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ROUTE_QUERY;
        work->new_state = CMA_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@@ -1431,7 -1430,7 +1430,7 @@@ static int cma_resolve_iw_route(struct 
                return -ENOMEM;
  
        work->id = id_priv;
 -      INIT_WORK(&work->work, cma_work_handler, work);
 +      INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ROUTE_QUERY;
        work->new_state = CMA_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@@ -1481,19 -1480,18 +1480,18 @@@ static int cma_bind_loopback(struct rdm
        u8 p;
  
        mutex_lock(&lock);
+       if (list_empty(&dev_list)) {
+               ret = -ENODEV;
+               goto out;
+       }
        list_for_each_entry(cma_dev, &dev_list, list)
                for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
-                       if (!ib_query_port (cma_dev->device, p, &port_attr) &&
+                       if (!ib_query_port(cma_dev->device, p, &port_attr) &&
                            port_attr.state == IB_PORT_ACTIVE)
                                goto port_found;
  
-       if (!list_empty(&dev_list)) {
-               p = 1;
-               cma_dev = list_entry(dev_list.next, struct cma_device, list);
-       } else {
-               ret = -ENODEV;
-               goto out;
-       }
+       p = 1;
+       cma_dev = list_entry(dev_list.next, struct cma_device, list);
  
  port_found:
        ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
@@@ -1585,7 -1583,7 +1583,7 @@@ static int cma_resolve_loopback(struct 
        }
  
        work->id = id_priv;
 -      INIT_WORK(&work->work, cma_work_handler, work);
 +      INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ADDR_QUERY;
        work->new_state = CMA_ADDR_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
@@@ -2123,8 -2121,6 +2121,6 @@@ static void cma_add_one(struct ib_devic
  
        cma_dev->device = device;
        cma_dev->node_guid = device->node_guid;
-       if (!cma_dev->node_guid)
-               goto err;
  
        init_completion(&cma_dev->comp);
        atomic_set(&cma_dev->refcount, 1);
        list_for_each_entry(id_priv, &listen_any_list, list)
                cma_listen_on_dev(id_priv, cma_dev);
        mutex_unlock(&lock);
-       return;
- err:
-       kfree(cma_dev);
  }
  
  static int cma_remove_id_dev(struct rdma_id_private *id_priv)
index 9bfa785252dc746a59b02267952cd400205b3846,cf797d7aea09a0673617a1e04aa56da384509d8c..1039ad57d53b4b0f3ef2d252c51fb9a47bfd68f0
@@@ -80,7 -80,7 +80,7 @@@ struct iwcm_work 
   * 1) in the event upcall, cm_event_handler(), for a listening cm_id.  If
   *    the backlog is exceeded, then no more connection request events will
   *    be processed.  cm_event_handler() returns -ENOMEM in this case.  Its up
-  *    to the provider to reject the connectino request.
+  *    to the provider to reject the connection request.
   * 2) in the connection request workqueue handler, cm_conn_req_handler().
   *    If work elements cannot be allocated for the new connect request cm_id,
   *    then IWCM will call the provider reject method.  This is ok since
@@@ -131,26 -131,25 +131,25 @@@ static int alloc_work_entries(struct iw
  }
  
  /*
-  * Save private data from incoming connection requests in the
-  * cm_id_priv so the low level driver doesn't have to.  Adjust
+  * Save private data from incoming connection requests to
+  * iw_cm_event, so the low level driver doesn't have to. Adjust
   * the event ptr to point to the local copy.
   */
- static int copy_private_data(struct iwcm_id_private *cm_id_priv,
-                      struct iw_cm_event *event)
+ static int copy_private_data(struct iw_cm_event *event)
  {
        void *p;
  
-       p = kmalloc(event->private_data_len, GFP_ATOMIC);
+       p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
        if (!p)
                return -ENOMEM;
-       memcpy(p, event->private_data, event->private_data_len);
        event->private_data = p;
        return 0;
  }
  
  /*
-  * Release a reference on cm_id. If the last reference is being removed
-  * and iw_destroy_cm_id is waiting, wake up the waiting thread.
+  * Release a reference on cm_id. If the last reference is being
+  * released, enable the waiting thread (in iw_destroy_cm_id) to
+  * get woken up, and return 1 if a thread is already waiting.
   */
  static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
  {
@@@ -243,7 -242,7 +242,7 @@@ static int iwcm_modify_qp_sqd(struct ib
  /*
   * CM_ID <-- CLOSING
   *
-  * Block if a passive or active connection is currenlty being processed. Then
+  * Block if a passive or active connection is currently being processed. Then
   * process the event as follows:
   * - If we are ESTABLISHED, move to CLOSING and modify the QP state
   *   based on the abrupt flag
@@@ -408,7 -407,7 +407,7 @@@ int iw_cm_listen(struct iw_cm_id *cm_id
  {
        struct iwcm_id_private *cm_id_priv;
        unsigned long flags;
-       int ret = 0;
+       int ret;
  
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
  
@@@ -535,7 -534,7 +534,7 @@@ EXPORT_SYMBOL(iw_cm_accept)
  int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
  {
        struct iwcm_id_private *cm_id_priv;
-       int ret = 0;
+       int ret;
        unsigned long flags;
        struct ib_qp *qp;
  
@@@ -620,7 -619,7 +619,7 @@@ static void cm_conn_req_handler(struct 
        spin_lock_irqsave(&listen_id_priv->lock, flags);
        if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
                spin_unlock_irqrestore(&listen_id_priv->lock, flags);
-               return;
+               goto out;
        }
        spin_unlock_irqrestore(&listen_id_priv->lock, flags);
  
                                listen_id_priv->id.context);
        /* If the cm_id could not be created, ignore the request */
        if (IS_ERR(cm_id))
-               return;
+               goto out;
  
        cm_id->provider_data = iw_event->provider_data;
        cm_id->local_addr = iw_event->local_addr;
        if (ret) {
                iw_cm_reject(cm_id, NULL, 0);
                iw_destroy_cm_id(cm_id);
-               return;
+               goto out;
        }
  
        /* Call the client CM handler */
                        kfree(cm_id);
        }
  
+ out:
        if (iw_event->private_data_len)
                kfree(iw_event->private_data);
  }
@@@ -674,7 -674,7 +674,7 @@@ static int cm_conn_est_handler(struct i
                               struct iw_cm_event *iw_event)
  {
        unsigned long flags;
-       int ret = 0;
+       int ret;
  
        spin_lock_irqsave(&cm_id_priv->lock, flags);
  
@@@ -704,7 -704,7 +704,7 @@@ static int cm_conn_rep_handler(struct i
                               struct iw_cm_event *iw_event)
  {
        unsigned long flags;
-       int ret = 0;
+       int ret;
  
        spin_lock_irqsave(&cm_id_priv->lock, flags);
        /*
@@@ -828,10 -828,10 +828,10 @@@ static int process_event(struct iwcm_id
   * thread asleep on the destroy_comp list vs. an object destroyed
   * here synchronously when the last reference is removed.
   */
 -static void cm_work_handler(void *arg)
 +static void cm_work_handler(struct work_struct *_work)
  {
-       struct iwcm_work lwork, *work =
-               container_of(_work, struct iwcm_work, work);
 -      struct iwcm_work *work = arg;
++      struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
+       struct iw_cm_event levent;
        struct iwcm_id_private *cm_id_priv = work->cm_id;
        unsigned long flags;
        int empty;
                                  struct iwcm_work, list);
                list_del_init(&work->list);
                empty = list_empty(&cm_id_priv->work_list);
-               lwork = *work;
+               levent = work->event;
                put_work(work);
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  
-               ret = process_event(cm_id_priv, &work->event);
+               ret = process_event(cm_id_priv, &levent);
                if (ret) {
                        set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
                        destroy_cm_id(&cm_id_priv->id);
@@@ -900,14 -900,14 +900,14 @@@ static int cm_event_handler(struct iw_c
                goto out;
        }
  
 -      INIT_WORK(&work->work, cm_work_handler, work);
 +      INIT_WORK(&work->work, cm_work_handler);
        work->cm_id = cm_id_priv;
        work->event = *iw_event;
  
        if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
             work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
            work->event.private_data_len) {
-               ret = copy_private_data(cm_id_priv, &work->event);
+               ret = copy_private_data(&work->event);
                if (ret) {
                        put_work(work);
                        goto out;
index 5a54ac35e9612abfe3a104098e482fda478fc8ff,3f9c16232c4d7f95971ba8298f6cef738cd1b3cb..15f38d94b3a853b3a86f002cd11c8e0333a12ba8
@@@ -46,7 -46,7 +46,7 @@@ MODULE_DESCRIPTION("kernel IB MAD API")
  MODULE_AUTHOR("Hal Rosenstock");
  MODULE_AUTHOR("Sean Hefty");
  
- static kmem_cache_t *ib_mad_cache;
+ static struct kmem_cache *ib_mad_cache;
  
  static struct list_head ib_mad_port_list;
  static u32 ib_mad_client_id = 0;
@@@ -65,8 -65,8 +65,8 @@@ static struct ib_mad_agent_private *fin
  static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                                    struct ib_mad_private *mad);
  static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
 -static void timeout_sends(void *data);
 -static void local_completions(void *data);
 +static void timeout_sends(struct work_struct *work);
 +static void local_completions(struct work_struct *work);
  static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
                              struct ib_mad_agent_private *agent_priv,
                              u8 mgmt_class);
@@@ -356,9 -356,10 +356,9 @@@ struct ib_mad_agent *ib_register_mad_ag
        INIT_LIST_HEAD(&mad_agent_priv->wait_list);
        INIT_LIST_HEAD(&mad_agent_priv->done_list);
        INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
 -      INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
 +      INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
        INIT_LIST_HEAD(&mad_agent_priv->local_list);
 -      INIT_WORK(&mad_agent_priv->local_work, local_completions,
 -                 mad_agent_priv);
 +      INIT_WORK(&mad_agent_priv->local_work, local_completions);
        atomic_set(&mad_agent_priv->refcount, 1);
        init_completion(&mad_agent_priv->comp);
  
@@@ -2197,12 -2198,12 +2197,12 @@@ static void mad_error_handler(struct ib
  /*
   * IB MAD completion callback
   */
 -static void ib_mad_completion_handler(void *data)
 +static void ib_mad_completion_handler(struct work_struct *work)
  {
        struct ib_mad_port_private *port_priv;
        struct ib_wc wc;
  
 -      port_priv = (struct ib_mad_port_private *)data;
 +      port_priv = container_of(work, struct ib_mad_port_private, work);
        ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
  
        while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@@ -2323,7 -2324,7 +2323,7 @@@ void ib_cancel_mad(struct ib_mad_agent 
  }
  EXPORT_SYMBOL(ib_cancel_mad);
  
 -static void local_completions(void *data)
 +static void local_completions(struct work_struct *work)
  {
        struct ib_mad_agent_private *mad_agent_priv;
        struct ib_mad_local_private *local;
        struct ib_wc wc;
        struct ib_mad_send_wc mad_send_wc;
  
 -      mad_agent_priv = (struct ib_mad_agent_private *)data;
 +      mad_agent_priv =
 +              container_of(work, struct ib_mad_agent_private, local_work);
  
        spin_lock_irqsave(&mad_agent_priv->lock, flags);
        while (!list_empty(&mad_agent_priv->local_list)) {
@@@ -2434,15 -2434,14 +2434,15 @@@ static int retry_send(struct ib_mad_sen
        return ret;
  }
  
 -static void timeout_sends(void *data)
 +static void timeout_sends(struct work_struct *work)
  {
        struct ib_mad_agent_private *mad_agent_priv;
        struct ib_mad_send_wr_private *mad_send_wr;
        struct ib_mad_send_wc mad_send_wc;
        unsigned long flags, delay;
  
 -      mad_agent_priv = (struct ib_mad_agent_private *)data;
 +      mad_agent_priv = container_of(work, struct ib_mad_agent_private,
 +                                    timed_work.work);
        mad_send_wc.vendor_err = 0;
  
        spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@@ -2800,7 -2799,7 +2800,7 @@@ static int ib_mad_port_open(struct ib_d
                ret = -ENOMEM;
                goto error8;
        }
 -      INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
 +      INIT_WORK(&port_priv->work, ib_mad_completion_handler);
  
        spin_lock_irqsave(&ib_mad_port_list_lock, flags);
        list_add_tail(&port_priv->port_list, &ib_mad_port_list);
index 3b2ee0eba05e308568a4016350e36bd071db0b64,f2b61851a49c5b3e5838cb193fe983e87e36246e..99547996aba2e742e2c8f1aea10106f7ebeccbe1
@@@ -136,11 -136,11 +136,11 @@@ struct ipoib_dev_priv 
        struct list_head multicast_list;
        struct rb_root multicast_tree;
  
 -      struct work_struct pkey_task;
 -      struct work_struct mcast_task;
 +      struct delayed_work pkey_task;
 +      struct delayed_work mcast_task;
        struct work_struct flush_task;
        struct work_struct restart_task;
 -      struct work_struct ah_reap_task;
 +      struct delayed_work ah_reap_task;
  
        struct ib_device *ca;
        u8                port;
@@@ -233,7 -233,7 +233,7 @@@ static inline struct ipoib_neigh **to_i
  }
  
  struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh);
- void ipoib_neigh_free(struct ipoib_neigh *neigh);
+ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
  
  extern struct workqueue_struct *ipoib_workqueue;
  
@@@ -254,13 -254,13 +254,13 @@@ int ipoib_add_pkey_attr(struct net_devi
  
  void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                struct ipoib_ah *address, u32 qpn);
 -void ipoib_reap_ah(void *dev_ptr);
 +void ipoib_reap_ah(struct work_struct *work);
  
  void ipoib_flush_paths(struct net_device *dev);
  struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
  
  int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
 -void ipoib_ib_dev_flush(void *dev);
 +void ipoib_ib_dev_flush(struct work_struct *work);
  void ipoib_ib_dev_cleanup(struct net_device *dev);
  
  int ipoib_ib_dev_open(struct net_device *dev);
@@@ -271,10 -271,10 +271,10 @@@ int ipoib_ib_dev_stop(struct net_devic
  int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
  void ipoib_dev_cleanup(struct net_device *dev);
  
 -void ipoib_mcast_join_task(void *dev_ptr);
 +void ipoib_mcast_join_task(struct work_struct *work);
  void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
  
 -void ipoib_mcast_restart_task(void *dev_ptr);
 +void ipoib_mcast_restart_task(struct work_struct *work);
  int ipoib_mcast_start_thread(struct net_device *dev);
  int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
  
@@@ -312,7 -312,7 +312,7 @@@ void ipoib_event(struct ib_event_handle
  int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
  int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
  
 -void ipoib_pkey_poll(void *dev);
 +void ipoib_pkey_poll(struct work_struct *work);
  int ipoib_pkey_dev_delay_open(struct net_device *dev);
  
  #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
index 71114a8c12b940eef670827ee701a81e8bdebc4d,5ba3154320b4f9cb3c2dd8a80cd635e65a0d7636..c092802437263ca5c7ea1f5a05bcd2c37e5fa2f7
@@@ -264,7 -264,7 +264,7 @@@ static void path_free(struct net_devic
                if (neigh->ah)
                        ipoib_put_ah(neigh->ah);
  
-               ipoib_neigh_free(neigh);
+               ipoib_neigh_free(dev, neigh);
        }
  
        spin_unlock_irqrestore(&priv->lock, flags);
@@@ -525,10 -525,11 +525,11 @@@ static void neigh_add_path(struct sk_bu
                ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
        } else {
                neigh->ah  = NULL;
-               __skb_queue_tail(&neigh->queue, skb);
  
                if (!path->query && path_rec_start(dev, path))
                        goto err_list;
+               __skb_queue_tail(&neigh->queue, skb);
        }
  
        spin_unlock(&priv->lock);
@@@ -538,7 -539,7 +539,7 @@@ err_list
        list_del(&neigh->list);
  
  err_path:
-       ipoib_neigh_free(neigh);
+       ipoib_neigh_free(dev, neigh);
        ++priv->stats.tx_dropped;
        dev_kfree_skb_any(skb);
  
@@@ -655,7 -656,7 +656,7 @@@ static int ipoib_start_xmit(struct sk_b
                                 */
                                ipoib_put_ah(neigh->ah);
                                list_del(&neigh->list);
-                               ipoib_neigh_free(neigh);
+                               ipoib_neigh_free(dev, neigh);
                                spin_unlock(&priv->lock);
                                ipoib_path_lookup(skb, dev);
                                goto out;
@@@ -786,7 -787,7 +787,7 @@@ static void ipoib_neigh_destructor(stru
                if (neigh->ah)
                        ah = neigh->ah;
                list_del(&neigh->list);
-               ipoib_neigh_free(neigh);
+               ipoib_neigh_free(n->dev, neigh);
        }
  
        spin_unlock_irqrestore(&priv->lock, flags);
@@@ -809,9 -810,15 +810,15 @@@ struct ipoib_neigh *ipoib_neigh_alloc(s
        return neigh;
  }
  
- void ipoib_neigh_free(struct ipoib_neigh *neigh)
+ void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
  {
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct sk_buff *skb;
        *to_ipoib_neigh(neigh->neighbour) = NULL;
+       while ((skb = __skb_dequeue(&neigh->queue))) {
+               ++priv->stats.tx_dropped;
+               dev_kfree_skb_any(skb);
+       }
        kfree(neigh);
  }
  
@@@ -933,11 -940,11 +940,11 @@@ static void ipoib_setup(struct net_devi
        INIT_LIST_HEAD(&priv->dead_ahs);
        INIT_LIST_HEAD(&priv->multicast_list);
  
 -      INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
 -      INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
 -      INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
 -      INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
 -      INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
 +      INIT_DELAYED_WORK(&priv->pkey_task,    ipoib_pkey_poll);
 +      INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
 +      INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
 +      INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
 +      INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
  }
  
  struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
index f0a4fac1a21516dec1ee7b22376db90292192841,d282d65e3ee00bb312634b0a62a93c0188d68afc..b04b72ca32eda5816e79780ed5553f51d4e53d1e
@@@ -114,7 -114,7 +114,7 @@@ static void ipoib_mcast_free(struct ipo
                 */
                if (neigh->ah)
                        ipoib_put_ah(neigh->ah);
-               ipoib_neigh_free(neigh);
+               ipoib_neigh_free(dev, neigh);
        }
  
        spin_unlock_irqrestore(&priv->lock, flags);
@@@ -399,8 -399,7 +399,8 @@@ static void ipoib_mcast_join_complete(i
                mcast->backoff = 1;
                mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
 -                      queue_work(ipoib_workqueue, &priv->mcast_task);
 +                      queue_delayed_work(ipoib_workqueue,
 +                                         &priv->mcast_task, 0);
                mutex_unlock(&mcast_mutex);
                complete(&mcast->done);
                return;
  
        if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
                if (status == -ETIMEDOUT)
 -                      queue_work(ipoib_workqueue, &priv->mcast_task);
 +                      queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
 +                                         0);
                else
                        queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
                                           mcast->backoff * HZ);
@@@ -519,11 -517,10 +519,11 @@@ static void ipoib_mcast_join(struct net
                mcast->query_id = ret;
  }
  
 -void ipoib_mcast_join_task(void *dev_ptr)
 +void ipoib_mcast_join_task(struct work_struct *work)
  {
 -      struct net_device *dev = dev_ptr;
 -      struct ipoib_dev_priv *priv = netdev_priv(dev);
 +      struct ipoib_dev_priv *priv =
 +              container_of(work, struct ipoib_dev_priv, mcast_task.work);
 +      struct net_device *dev = priv->dev;
  
        if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
                return;
@@@ -613,7 -610,7 +613,7 @@@ int ipoib_mcast_start_thread(struct net
  
        mutex_lock(&mcast_mutex);
        if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
 -              queue_work(ipoib_workqueue, &priv->mcast_task);
 +              queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
        mutex_unlock(&mcast_mutex);
  
        spin_lock_irq(&priv->lock);
@@@ -821,11 -818,10 +821,11 @@@ void ipoib_mcast_dev_flush(struct net_d
        }
  }
  
 -void ipoib_mcast_restart_task(void *dev_ptr)
 +void ipoib_mcast_restart_task(struct work_struct *work)
  {
 -      struct net_device *dev = dev_ptr;
 -      struct ipoib_dev_priv *priv = netdev_priv(dev);
 +      struct ipoib_dev_priv *priv =
 +              container_of(work, struct ipoib_dev_priv, restart_task);
 +      struct net_device *dev = priv->dev;
        struct dev_mc_list *mclist;
        struct ipoib_mcast *mcast, *tmcast;
        LIST_HEAD(remove_list);
index 214f66195af23e6699584c49273f44dfec32d026,64ab5fc7cca38db98ad503dbf4e8cf2a5fbd7eb3..a6289595557b704bc1a21d478e77047f6526c194
@@@ -390,10 -390,9 +390,10 @@@ static void srp_disconnect_target(struc
        wait_for_completion(&target->done);
  }
  
 -static void srp_remove_work(void *target_ptr)
 +static void srp_remove_work(struct work_struct *work)
  {
 -      struct srp_target_port *target = target_ptr;
 +      struct srp_target_port *target =
 +              container_of(work, struct srp_target_port, work);
  
        spin_lock_irq(target->scsi_host->host_lock);
        if (target->state != SRP_TARGET_DEAD) {
@@@ -576,7 -575,7 +576,7 @@@ err
        spin_lock_irq(target->scsi_host->host_lock);
        if (target->state == SRP_TARGET_CONNECTING) {
                target->state = SRP_TARGET_DEAD;
 -              INIT_WORK(&target->work, srp_remove_work, target);
 +              INIT_WORK(&target->work, srp_remove_work);
                schedule_work(&target->work);
        }
        spin_unlock_irq(target->scsi_host->host_lock);
@@@ -1177,9 -1176,11 +1177,11 @@@ static int srp_cm_handler(struct ib_cm_
                        break;
                }
  
-               target->status = srp_alloc_iu_bufs(target);
-               if (target->status)
-                       break;
+               if (!target->rx_ring[0]) {
+                       target->status = srp_alloc_iu_bufs(target);
+                       if (target->status)
+                               break;
+               }
  
                qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
                if (!qp_attr) {
@@@ -1717,7 -1718,8 +1719,8 @@@ static ssize_t srp_create_target(struc
        if (!target_host)
                return -ENOMEM;
  
-       target_host->max_lun = SRP_MAX_LUN;
+       target_host->max_lun     = SRP_MAX_LUN;
+       target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
  
        target = host_to_target(target_host);
  
index dd0bcbe140bde61a8e6518941d02f4a951987374,55bc891768c29e0a8ba0a07d12b58ab91667f646..8458ff3f351e265b15affc9f71541b9f7802503c
@@@ -127,7 -127,7 +127,7 @@@ struct cinergyt2 
  
        struct dvbt_set_parameters_msg param;
        struct dvbt_get_status_msg status;
 -      struct work_struct query_work;
 +      struct delayed_work query_work;
  
        wait_queue_head_t poll_wq;
        int pending_fe_events;
  #ifdef ENABLE_RC
        struct input_dev *rc_input_dev;
        char phys[64];
 -      struct work_struct rc_query_work;
 +      struct delayed_work rc_query_work;
        int rc_input_event;
        u32 rc_last_code;
        unsigned long last_event_jiffies;
@@@ -275,8 -275,7 +275,7 @@@ static void cinergyt2_free_stream_urbs 
        int i;
  
        for (i=0; i<STREAM_URB_COUNT; i++)
-               if (cinergyt2->stream_urb[i])
-                       usb_free_urb(cinergyt2->stream_urb[i]);
+               usb_free_urb(cinergyt2->stream_urb[i]);
  
        usb_buffer_free(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE,
                            cinergyt2->streambuf, cinergyt2->streambuf_dmahandle);
@@@ -320,8 -319,7 +319,7 @@@ static void cinergyt2_stop_stream_xfer 
        cinergyt2_control_stream_transfer(cinergyt2, 0);
  
        for (i=0; i<STREAM_URB_COUNT; i++)
-               if (cinergyt2->stream_urb[i])
-                       usb_kill_urb(cinergyt2->stream_urb[i]);
+               usb_kill_urb(cinergyt2->stream_urb[i]);
  }
  
  static int cinergyt2_start_stream_xfer (struct cinergyt2 *cinergyt2)
@@@ -724,10 -722,9 +722,10 @@@ static struct dvb_device cinergyt2_fe_t
  
  #ifdef ENABLE_RC
  
 -static void cinergyt2_query_rc (void *data)
 +static void cinergyt2_query_rc (struct work_struct *work)
  {
 -      struct cinergyt2 *cinergyt2 = data;
 +      struct cinergyt2 *cinergyt2 =
 +              container_of(work, struct cinergyt2, rc_query_work.work);
        char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
        struct cinergyt2_rc_event rc_events[12];
        int n, len, i;
@@@ -808,7 -805,7 +806,7 @@@ static int cinergyt2_register_rc(struc
        strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
        cinergyt2->rc_input_event = KEY_MAX;
        cinergyt2->rc_last_code = ~0;
 -      INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2);
 +      INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
  
        input_dev->name = DRIVER_NAME " remote control";
        input_dev->phys = cinergyt2->phys;
@@@ -849,10 -846,9 +847,10 @@@ static inline void cinergyt2_resume_rc(
  
  #endif /* ENABLE_RC */
  
 -static void cinergyt2_query (void *data)
 +static void cinergyt2_query (struct work_struct *work)
  {
 -      struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data;
 +      struct cinergyt2 *cinergyt2 =
 +              container_of(work, struct cinergyt2, query_work.work);
        char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
        struct dvbt_get_status_msg *s = &cinergyt2->status;
        uint8_t lock_bits;
@@@ -896,7 -892,7 +894,7 @@@ static int cinergyt2_probe (struct usb_
  
        mutex_init(&cinergyt2->sem);
        init_waitqueue_head (&cinergyt2->poll_wq);
 -      INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
 +      INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
  
        cinergyt2->udev = interface_to_usbdev(intf);
        cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
index 8ba05c214ca7a5222771fd73f3d4ce09a7d4aa71,7b9859c33018e41803be47f094626cd6b17409c2..92eabf88a09b72e336ce55ea1b4f4901b432ce18
@@@ -212,8 -212,10 +212,10 @@@ static void read_from_buf(struct saa658
        if (rd_blocks > s->block_count)
                rd_blocks = s->block_count;
  
-       if (!rd_blocks)
+       if (!rd_blocks) {
+               spin_unlock_irqrestore(&s->lock, flags);
                return;
+       }
  
        for (i = 0; i < rd_blocks; i++) {
                if (block_to_user_buf(s, buf_ptr)) {
@@@ -322,9 -324,9 +324,9 @@@ static void saa6588_timer(unsigned lon
        schedule_work(&s->work);
  }
  
 -static void saa6588_work(void *data)
 +static void saa6588_work(struct work_struct *work)
  {
 -      struct saa6588 *s = (struct saa6588 *)data;
 +      struct saa6588 *s = container_of(work, struct saa6588, work);
  
        saa6588_i2c_poll(s);
        mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
@@@ -417,7 -419,7 +419,7 @@@ static int saa6588_attach(struct i2c_ad
        saa6588_configure(s);
  
        /* start polling via eventd */
 -      INIT_WORK(&s->work, saa6588_work, s);
 +      INIT_WORK(&s->work, saa6588_work);
        init_timer(&s->timer);
        s->timer.function = saa6588_timer;
        s->timer.data = (unsigned long)s;
diff --combined drivers/mmc/mmc.c
index 21fd39e4a20f2b437cd05b0c3ae0ee8e42cf83c1,9d190022a4905f8d3682699ab2c87ec67e693ff6..6f2a282e2b9759c0511cf5501a0bd4e9bd501e3f
@@@ -4,6 -4,7 +4,7 @@@
   *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
   *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
   *  SD support Copyright (C) 2005 Pierre Ossman, All Rights Reserved.
+  *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
@@@ -396,23 -397,23 +397,23 @@@ static int mmc_select_card(struct mmc_h
                return err;
  
        /*
-        * Default bus width is 1 bit.
-        */
-       host->ios.bus_width = MMC_BUS_WIDTH_1;
-       /*
-        * We can only change the bus width of the selected
-        * card so therefore we have to put the handling
+        * We can only change the bus width of SD cards when
+        * they are selected so we have to put the handling
         * here.
+        *
+        * The card is in 1 bit mode by default so
+        * we only need to change if it supports the
+        * wider version.
         */
-       if (host->caps & MMC_CAP_4_BIT_DATA) {
+       if (mmc_card_sd(card) &&
+               (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
                /*
-                * The card is in 1 bit mode by default so
-                * we only need to change if it supports the
-                * wider version.
-                */
-               if (mmc_card_sd(card) &&
-                       (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+               * Default bus width is 1 bit.
+               */
+               host->ios.bus_width = MMC_BUS_WIDTH_1;
+               if (host->caps & MMC_CAP_4_BIT_DATA) {
                        struct mmc_command cmd;
                        cmd.opcode = SD_APP_SET_BUS_WIDTH;
                        cmd.arg = SD_BUS_WIDTH_4;
@@@ -453,11 -454,11 +454,11 @@@ static void mmc_deselect_cards(struct m
  
  static inline void mmc_delay(unsigned int ms)
  {
-       if (ms < HZ / 1000) {
-               yield();
+       if (ms < 1000 / HZ) {
+               cond_resched();
                mdelay(ms);
        } else {
-               msleep_interruptible (ms);
+               msleep(ms);
        }
  }
  
@@@ -953,6 -954,137 +954,137 @@@ static void mmc_read_csds(struct mmc_ho
        }
  }
  
+ static void mmc_process_ext_csds(struct mmc_host *host)
+ {
+       int err;
+       struct mmc_card *card;
+       struct mmc_request mrq;
+       struct mmc_command cmd;
+       struct mmc_data data;
+       struct scatterlist sg;
+       /*
+        * As the ext_csd is so large and mostly unused, we don't store the
+        * raw block in mmc_card.
+        */
+       u8 *ext_csd;
+       ext_csd = kmalloc(512, GFP_KERNEL);
+       if (!ext_csd) {
+               printk("%s: could not allocate a buffer to receive the ext_csd."
+                      "mmc v4 cards will be treated as v3.\n",
+                       mmc_hostname(host));
+               return;
+       }
+       list_for_each_entry(card, &host->cards, node) {
+               if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
+                       continue;
+               if (mmc_card_sd(card))
+                       continue;
+               if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+                       continue;
+               err = mmc_select_card(host, card);
+               if (err != MMC_ERR_NONE) {
+                       mmc_card_set_dead(card);
+                       continue;
+               }
+               memset(&cmd, 0, sizeof(struct mmc_command));
+               cmd.opcode = MMC_SEND_EXT_CSD;
+               cmd.arg = 0;
+               cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+               memset(&data, 0, sizeof(struct mmc_data));
+               mmc_set_data_timeout(&data, card, 0);
+               data.blksz = 512;
+               data.blocks = 1;
+               data.flags = MMC_DATA_READ;
+               data.sg = &sg;
+               data.sg_len = 1;
+               memset(&mrq, 0, sizeof(struct mmc_request));
+               mrq.cmd = &cmd;
+               mrq.data = &data;
+               sg_init_one(&sg, ext_csd, 512);
+               mmc_wait_for_req(host, &mrq);
+               if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+                       mmc_card_set_dead(card);
+                       continue;
+               }
+               switch (ext_csd[EXT_CSD_CARD_TYPE]) {
+               case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
+                       card->ext_csd.hs_max_dtr = 52000000;
+                       break;
+               case EXT_CSD_CARD_TYPE_26:
+                       card->ext_csd.hs_max_dtr = 26000000;
+                       break;
+               default:
+                       /* MMC v4 spec says this cannot happen */
+                       printk("%s: card is mmc v4 but doesn't support "
+                              "any high-speed modes.\n",
+                               mmc_hostname(card->host));
+                       mmc_card_set_bad(card);
+                       continue;
+               }
+               /* Activate highspeed support. */
+               cmd.opcode = MMC_SWITCH;
+               cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+                         (EXT_CSD_HS_TIMING << 16) |
+                         (1 << 8) |
+                         EXT_CSD_CMD_SET_NORMAL;
+               cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+               err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+               if (err != MMC_ERR_NONE) {
+                       printk("%s: failed to switch card to mmc v4 "
+                              "high-speed mode.\n",
+                              mmc_hostname(card->host));
+                       continue;
+               }
+               mmc_card_set_highspeed(card);
+               /* Check for host support for wide-bus modes. */
+               if (!(host->caps & MMC_CAP_4_BIT_DATA)) {
+                       continue;
+               }
+               /* Activate 4-bit support. */
+               cmd.opcode = MMC_SWITCH;
+               cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+                         (EXT_CSD_BUS_WIDTH << 16) |
+                         (EXT_CSD_BUS_WIDTH_4 << 8) |
+                         EXT_CSD_CMD_SET_NORMAL;
+               cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+               err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
+               if (err != MMC_ERR_NONE) {
+                       printk("%s: failed to switch card to "
+                              "mmc v4 4-bit bus mode.\n",
+                              mmc_hostname(card->host));
+                       continue;
+               }
+               host->ios.bus_width = MMC_BUS_WIDTH_4;
+       }
+       kfree(ext_csd);
+       mmc_deselect_cards(host);
+ }
  static void mmc_read_scrs(struct mmc_host *host)
  {
        int err;
        mmc_deselect_cards(host);
  }
  
+ static void mmc_read_switch_caps(struct mmc_host *host)
+ {
+       int err;
+       struct mmc_card *card;
+       struct mmc_request mrq;
+       struct mmc_command cmd;
+       struct mmc_data data;
+       unsigned char *status;
+       struct scatterlist sg;
+       status = kmalloc(64, GFP_KERNEL);
+       if (!status) {
+               printk(KERN_WARNING "%s: Unable to allocate buffer for "
+                       "reading switch capabilities.\n",
+                       mmc_hostname(host));
+               return;
+       }
+       list_for_each_entry(card, &host->cards, node) {
+               if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
+                       continue;
+               if (!mmc_card_sd(card))
+                       continue;
+               if (card->scr.sda_vsn < SCR_SPEC_VER_1)
+                       continue;
+               err = mmc_select_card(host, card);
+               if (err != MMC_ERR_NONE) {
+                       mmc_card_set_dead(card);
+                       continue;
+               }
+               memset(&cmd, 0, sizeof(struct mmc_command));
+               cmd.opcode = SD_SWITCH;
+               cmd.arg = 0x00FFFFF1;
+               cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+               memset(&data, 0, sizeof(struct mmc_data));
+               mmc_set_data_timeout(&data, card, 0);
+               data.blksz = 64;
+               data.blocks = 1;
+               data.flags = MMC_DATA_READ;
+               data.sg = &sg;
+               data.sg_len = 1;
+               memset(&mrq, 0, sizeof(struct mmc_request));
+               mrq.cmd = &cmd;
+               mrq.data = &data;
+               sg_init_one(&sg, status, 64);
+               mmc_wait_for_req(host, &mrq);
+               if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+                       mmc_card_set_dead(card);
+                       continue;
+               }
+               if (status[13] & 0x02)
+                       card->sw_caps.hs_max_dtr = 50000000;
+               memset(&cmd, 0, sizeof(struct mmc_command));
+               cmd.opcode = SD_SWITCH;
+               cmd.arg = 0x80FFFFF1;
+               cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+               memset(&data, 0, sizeof(struct mmc_data));
+               mmc_set_data_timeout(&data, card, 0);
+               data.blksz = 64;
+               data.blocks = 1;
+               data.flags = MMC_DATA_READ;
+               data.sg = &sg;
+               data.sg_len = 1;
+               memset(&mrq, 0, sizeof(struct mmc_request));
+               mrq.cmd = &cmd;
+               mrq.data = &data;
+               sg_init_one(&sg, status, 64);
+               mmc_wait_for_req(host, &mrq);
+               if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
+                       mmc_card_set_dead(card);
+                       continue;
+               }
+               if ((status[16] & 0xF) != 1) {
+                       printk(KERN_WARNING "%s: Problem switching card "
+                               "into high-speed mode!\n",
+                               mmc_hostname(host));
+                       continue;
+               }
+               mmc_card_set_highspeed(card);
+       }
+       kfree(status);
+       mmc_deselect_cards(host);
+ }
  static unsigned int mmc_calculate_clock(struct mmc_host *host)
  {
        struct mmc_card *card;
        unsigned int max_dtr = host->f_max;
  
        list_for_each_entry(card, &host->cards, node)
-               if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
-                       max_dtr = card->csd.max_dtr;
+               if (!mmc_card_dead(card)) {
+                       if (mmc_card_highspeed(card) && mmc_card_sd(card)) {
+                               if (max_dtr > card->sw_caps.hs_max_dtr)
+                                       max_dtr = card->sw_caps.hs_max_dtr;
+                       } else if (mmc_card_highspeed(card) && !mmc_card_sd(card)) {
+                               if (max_dtr > card->ext_csd.hs_max_dtr)
+                                       max_dtr = card->ext_csd.hs_max_dtr;
+                       } else if (max_dtr > card->csd.max_dtr) {
+                               max_dtr = card->csd.max_dtr;
+                       }
+               }
  
        pr_debug("%s: selected %d.%03dMHz transfer rate\n",
                 mmc_hostname(host),
@@@ -1150,8 -1401,11 +1401,11 @@@ static void mmc_setup(struct mmc_host *
  
        mmc_read_csds(host);
  
-       if (host->mode == MMC_MODE_SD)
+       if (host->mode == MMC_MODE_SD) {
                mmc_read_scrs(host);
+               mmc_read_switch_caps(host);
+       } else
+               mmc_process_ext_csds(host);
  }
  
  
   */
  void mmc_detect_change(struct mmc_host *host, unsigned long delay)
  {
 -      if (delay)
 -              mmc_schedule_delayed_work(&host->detect, delay);
 -      else
 -              mmc_schedule_work(&host->detect);
 +      mmc_schedule_delayed_work(&host->detect, delay);
  }
  
  EXPORT_SYMBOL(mmc_detect_change);
  
  
 -static void mmc_rescan(void *data)
 +static void mmc_rescan(struct work_struct *work)
  {
 -      struct mmc_host *host = data;
 +      struct mmc_host *host =
 +              container_of(work, struct mmc_host, detect.work);
        struct list_head *l, *n;
        unsigned char power_mode;
  
@@@ -1257,7 -1513,7 +1511,7 @@@ struct mmc_host *mmc_alloc_host(int ext
                spin_lock_init(&host->lock);
                init_waitqueue_head(&host->wq);
                INIT_LIST_HEAD(&host->cards);
 -              INIT_WORK(&host->detect, mmc_rescan, host);
 +              INIT_DELAYED_WORK(&host->detect, mmc_rescan);
  
                /*
                 * By default, hosts do not support SGIO or large requests.
@@@ -1355,7 -1611,7 +1609,7 @@@ EXPORT_SYMBOL(mmc_suspend_host)
   */
  int mmc_resume_host(struct mmc_host *host)
  {
 -      mmc_rescan(host);
 +      mmc_rescan(&host->detect.work);
  
        return 0;
  }
diff --combined drivers/mmc/mmc_sysfs.c
index fd9a5fc6db7becf7ab9d490af15e400c8f6e7767,ac53296360453c6067f6c565ee0b2857b7fc049d..e334acd045bced56ac88edf5850bde81297d8087
@@@ -199,7 -199,7 +199,7 @@@ void mmc_init_card(struct mmc_card *car
        memset(card, 0, sizeof(struct mmc_card));
        card->host = host;
        device_initialize(&card->dev);
-       card->dev.parent = card->host->dev;
+       card->dev.parent = mmc_dev(host);
        card->dev.bus = &mmc_bus_type;
        card->dev.release = mmc_release_card;
  }
@@@ -242,7 -242,7 +242,7 @@@ void mmc_remove_card(struct mmc_card *c
  }
  
  
- static void mmc_host_classdev_release(struct class_device *dev)
+ static void mmc_host_classdev_release(struct device *dev)
  {
        struct mmc_host *host = cls_dev_to_mmc_host(dev);
        kfree(host);
  
  static struct class mmc_host_class = {
        .name           = "mmc_host",
-       .release        = mmc_host_classdev_release,
+       .dev_release    = mmc_host_classdev_release,
  };
  
  static DEFINE_IDR(mmc_host_idr);
@@@ -267,10 -267,10 +267,10 @@@ struct mmc_host *mmc_alloc_host_sysfs(i
        if (host) {
                memset(host, 0, sizeof(struct mmc_host) + extra);
  
-               host->dev = dev;
-               host->class_dev.dev = host->dev;
+               host->parent = dev;
+               host->class_dev.parent = dev;
                host->class_dev.class = &mmc_host_class;
-               class_device_initialize(&host->class_dev);
+               device_initialize(&host->class_dev);
        }
  
        return host;
@@@ -292,10 -292,10 +292,10 @@@ int mmc_add_host_sysfs(struct mmc_host 
        if (err)
                return err;
  
-       snprintf(host->class_dev.class_id, BUS_ID_SIZE,
+       snprintf(host->class_dev.bus_id, BUS_ID_SIZE,
                 "mmc%d", host->index);
  
-       return class_device_add(&host->class_dev);
+       return device_add(&host->class_dev);
  }
  
  /*
   */
  void mmc_remove_host_sysfs(struct mmc_host *host)
  {
-       class_device_del(&host->class_dev);
+       device_del(&host->class_dev);
  
        spin_lock(&mmc_host_lock);
        idr_remove(&mmc_host_idr, host->index);
   */
  void mmc_free_host_sysfs(struct mmc_host *host)
  {
-       class_device_put(&host->class_dev);
+       put_device(&host->class_dev);
  }
  
  static struct workqueue_struct *workqueue;
  
 -/*
 - * Internal function. Schedule work in the MMC work queue.
 - */
 -int mmc_schedule_work(struct work_struct *work)
 -{
 -      return queue_work(workqueue, work);
 -}
 -
  /*
   * Internal function. Schedule delayed work in the MMC work queue.
   */
 -int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
 +int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
  {
        return queue_delayed_work(workqueue, work, delay);
  }
diff --combined drivers/net/bnx2.c
index b12cc4596b8d0168019ea03b3be0a2f58bd4d471,fc2f1d1c7ead437f4665539b9b7e61c461ac527f..5bacb7587df41b77cdc414b278e85750279f4477
  
  #include "bnx2.h"
  #include "bnx2_fw.h"
+ #include "bnx2_fw2.h"
  
  #define DRV_MODULE_NAME               "bnx2"
  #define PFX DRV_MODULE_NAME   ": "
- #define DRV_MODULE_VERSION    "1.4.45"
- #define DRV_MODULE_RELDATE    "September 29, 2006"
+ #define DRV_MODULE_VERSION    "1.5.1"
+ #define DRV_MODULE_RELDATE    "November 15, 2006"
  
  #define RUN_AT(x) (jiffies + (x))
  
@@@ -85,6 -86,7 +86,7 @@@ typedef enum 
        NC370F,
        BCM5708,
        BCM5708S,
+       BCM5709,
  } board_t;
  
  /* indexed by board_t, above */
@@@ -98,6 -100,7 +100,7 @@@ static const struct 
        { "HP NC370F Multifunction Gigabit Server Adapter" },
        { "Broadcom NetXtreme II BCM5708 1000Base-T" },
        { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
+       { "Broadcom NetXtreme II BCM5709 1000Base-T" },
        };
  
  static struct pci_device_id bnx2_pci_tbl[] = {
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
        { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
+       { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
        { 0, }
  };
  
@@@ -236,8 -241,23 +241,23 @@@ static voi
  bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
  {
        offset += cid_addr;
-       REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
-       REG_WR(bp, BNX2_CTX_DATA, val);
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               int i;
+               REG_WR(bp, BNX2_CTX_CTX_DATA, val);
+               REG_WR(bp, BNX2_CTX_CTX_CTRL,
+                      offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
+               for (i = 0; i < 5; i++) {
+                       u32 val;
+                       val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
+                       if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
+                               break;
+                       udelay(5);
+               }
+       } else {
+               REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
+               REG_WR(bp, BNX2_CTX_DATA, val);
+       }
  }
  
  static int
@@@ -403,6 -423,14 +423,14 @@@ bnx2_free_mem(struct bnx2 *bp
  {
        int i;
  
+       for (i = 0; i < bp->ctx_pages; i++) {
+               if (bp->ctx_blk[i]) {
+                       pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
+                                           bp->ctx_blk[i],
+                                           bp->ctx_blk_mapping[i]);
+                       bp->ctx_blk[i] = NULL;
+               }
+       }
        if (bp->status_blk) {
                pci_free_consistent(bp->pdev, bp->status_stats_size,
                                    bp->status_blk, bp->status_blk_mapping);
@@@ -481,6 -509,18 +509,18 @@@ bnx2_alloc_mem(struct bnx2 *bp
  
        bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
  
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
+               if (bp->ctx_pages == 0)
+                       bp->ctx_pages = 1;
+               for (i = 0; i < bp->ctx_pages; i++) {
+                       bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
+                                               BCM_PAGE_SIZE,
+                                               &bp->ctx_blk_mapping[i]);
+                       if (bp->ctx_blk[i] == NULL)
+                               goto alloc_mem_err;
+               }
+       }
        return 0;
  
  alloc_mem_err:
@@@ -803,13 -843,13 +843,13 @@@ bnx2_set_mac_link(struct bnx2 *bp
  
        val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
                BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
-               BNX2_EMAC_MODE_25G);
+               BNX2_EMAC_MODE_25G_MODE);
  
        if (bp->link_up) {
                switch (bp->line_speed) {
                        case SPEED_10:
-                               if (CHIP_NUM(bp) == CHIP_NUM_5708) {
-                                       val |= BNX2_EMAC_MODE_PORT_MII_10;
+                               if (CHIP_NUM(bp) != CHIP_NUM_5706) {
+                                       val |= BNX2_EMAC_MODE_PORT_MII_10M;
                                        break;
                                }
                                /* fall through */
                                val |= BNX2_EMAC_MODE_PORT_MII;
                                break;
                        case SPEED_2500:
-                               val |= BNX2_EMAC_MODE_25G;
+                               val |= BNX2_EMAC_MODE_25G_MODE;
                                /* fall through */
                        case SPEED_1000:
                                val |= BNX2_EMAC_MODE_PORT_GMII;
@@@ -860,7 -900,7 +900,7 @@@ bnx2_set_link(struct bnx2 *bp
        u32 bmsr;
        u8 link_up;
  
-       if (bp->loopback == MAC_LOOPBACK) {
+       if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
                bp->link_up = 1;
                return 0;
        }
                        u32 bmcr;
  
                        bnx2_read_phy(bp, MII_BMCR, &bmcr);
+                       bmcr &= ~BCM5708S_BMCR_FORCE_2500;
                        if (!(bmcr & BMCR_ANENABLE)) {
                                bnx2_write_phy(bp, MII_BMCR, bmcr |
                                        BMCR_ANENABLE);
@@@ -988,7 -1029,21 +1029,21 @@@ bnx2_setup_serdes_phy(struct bnx2 *bp
                u32 new_bmcr;
                int force_link_down = 0;
  
-               if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+               bnx2_read_phy(bp, MII_ADVERTISE, &adv);
+               adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
+               bnx2_read_phy(bp, MII_BMCR, &bmcr);
+               new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
+               new_bmcr |= BMCR_SPEED1000;
+               if (bp->req_line_speed == SPEED_2500) {
+                       new_bmcr |= BCM5708S_BMCR_FORCE_2500;
+                       bnx2_read_phy(bp, BCM5708S_UP1, &up1);
+                       if (!(up1 & BCM5708S_UP1_2G5)) {
+                               up1 |= BCM5708S_UP1_2G5;
+                               bnx2_write_phy(bp, BCM5708S_UP1, up1);
+                               force_link_down = 1;
+                       }
+               } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
                        bnx2_read_phy(bp, BCM5708S_UP1, &up1);
                        if (up1 & BCM5708S_UP1_2G5) {
                                up1 &= ~BCM5708S_UP1_2G5;
                        }
                }
  
-               bnx2_read_phy(bp, MII_ADVERTISE, &adv);
-               adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
-               bnx2_read_phy(bp, MII_BMCR, &bmcr);
-               new_bmcr = bmcr & ~BMCR_ANENABLE;
-               new_bmcr |= BMCR_SPEED1000;
                if (bp->req_duplex == DUPLEX_FULL) {
                        adv |= ADVERTISE_1000XFULL;
                        new_bmcr |= BMCR_FULLDPLX;
                                bp->link_up = 0;
                                netif_carrier_off(bp->dev);
                                bnx2_write_phy(bp, MII_BMCR, new_bmcr);
+                               bnx2_report_link(bp);
                        }
                        bnx2_write_phy(bp, MII_ADVERTISE, adv);
                        bnx2_write_phy(bp, MII_BMCR, new_bmcr);
        if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
                /* Force a link down visible on the other side */
                if (bp->link_up) {
-                       int i;
                        bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
-                       for (i = 0; i < 110; i++) {
-                               udelay(100);
-                       }
+                       spin_unlock_bh(&bp->phy_lock);
+                       msleep(20);
+                       spin_lock_bh(&bp->phy_lock);
                }
  
                bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
                bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
                        BMCR_ANENABLE);
-               if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-                       /* Speed up link-up time when the link partner
-                        * does not autonegotiate which is very common
-                        * in blade servers. Some blade servers use
-                        * IPMI for kerboard input and it's important
-                        * to minimize link disruptions. Autoneg. involves
-                        * exchanging base pages plus 3 next pages and
-                        * normally completes in about 120 msec.
-                        */
-                       bp->current_interval = SERDES_AN_TIMEOUT;
-                       bp->serdes_an_pending = 1;
-                       mod_timer(&bp->timer, jiffies + bp->current_interval);
-               }
+               /* Speed up link-up time when the link partner
+                * does not autonegotiate which is very common
+                * in blade servers. Some blade servers use
+                * IPMI for kerboard input and it's important
+                * to minimize link disruptions. Autoneg. involves
+                * exchanging base pages plus 3 next pages and
+                * normally completes in about 120 msec.
+                */
+               bp->current_interval = SERDES_AN_TIMEOUT;
+               bp->serdes_an_pending = 1;
+               mod_timer(&bp->timer, jiffies + bp->current_interval);
        }
  
        return 0;
@@@ -1153,7 -1199,6 +1199,6 @@@ bnx2_setup_copper_phy(struct bnx2 *bp
        }
        if (new_bmcr != bmcr) {
                u32 bmsr;
-               int i = 0;
  
                bnx2_read_phy(bp, MII_BMSR, &bmsr);
                bnx2_read_phy(bp, MII_BMSR, &bmsr);
                if (bmsr & BMSR_LSTATUS) {
                        /* Force link down */
                        bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
-                       do {
-                               udelay(100);
-                               bnx2_read_phy(bp, MII_BMSR, &bmsr);
-                               bnx2_read_phy(bp, MII_BMSR, &bmsr);
-                               i++;
-                       } while ((bmsr & BMSR_LSTATUS) && (i < 620));
+                       spin_unlock_bh(&bp->phy_lock);
+                       msleep(50);
+                       spin_lock_bh(&bp->phy_lock);
+                       bnx2_read_phy(bp, MII_BMSR, &bmsr);
+                       bnx2_read_phy(bp, MII_BMSR, &bmsr);
                }
  
                bnx2_write_phy(bp, MII_BMCR, new_bmcr);
@@@ -1258,9 -1303,8 +1303,8 @@@ bnx2_init_5706s_phy(struct bnx2 *bp
  {
        bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
  
-       if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-               REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
-       }
+       if (CHIP_NUM(bp) == CHIP_NUM_5706)
+               REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
  
        if (bp->dev->mtu > 1500) {
                u32 val;
@@@ -1397,13 -1441,13 +1441,13 @@@ bnx2_set_phy_loopback(struct bnx2 *bp
        for (i = 0; i < 10; i++) {
                if (bnx2_test_link(bp) == 0)
                        break;
-               udelay(10);
+               msleep(100);
        }
  
        mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
        mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
                      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
-                     BNX2_EMAC_MODE_25G);
+                     BNX2_EMAC_MODE_25G_MODE);
  
        mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
        REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
@@@ -1454,6 -1498,40 +1498,40 @@@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_d
        return 0;
  }
  
+ static int
+ bnx2_init_5709_context(struct bnx2 *bp)
+ {
+       int i, ret = 0;
+       u32 val;
+       val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
+       val |= (BCM_PAGE_BITS - 8) << 16;
+       REG_WR(bp, BNX2_CTX_COMMAND, val);
+       for (i = 0; i < bp->ctx_pages; i++) {
+               int j;
+               REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+                      (bp->ctx_blk_mapping[i] & 0xffffffff) |
+                      BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
+               REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+                      (u64) bp->ctx_blk_mapping[i] >> 32);
+               REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
+                      BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+               for (j = 0; j < 10; j++) {
+                       val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+                       if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+                               break;
+                       udelay(5);
+               }
+               if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+                       ret = -EBUSY;
+                       break;
+               }
+       }
+       return ret;
+ }
  static void
  bnx2_init_context(struct bnx2 *bp)
  {
@@@ -1576,9 -1654,8 +1654,8 @@@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 
                return -ENOMEM;
        }
  
-       if (unlikely((align = (unsigned long) skb->data & 0x7))) {
-               skb_reserve(skb, 8 - align);
-       }
+       if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
+               skb_reserve(skb, BNX2_RX_ALIGN - align);
  
        mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
                PCI_DMA_FROMDEVICE);
@@@ -2040,7 -2117,8 +2117,8 @@@ bnx2_set_rx_mode(struct net_device *dev
        if (dev->flags & IFF_PROMISC) {
                /* Promiscuous mode. */
                rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
-               sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
+               sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+                            BNX2_RPM_SORT_USER0_PROM_VLAN;
        }
        else if (dev->flags & IFF_ALLMULTI) {
                for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
@@@ -2208,11 -2286,12 +2286,12 @@@ load_rv2p_fw(struct bnx2 *bp, u32 *rv2p
        }
  }
  
- static void
+ static int
  load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
  {
        u32 offset;
        u32 val;
+       int rc;
  
        /* Halt the CPU. */
        val = REG_RD_IND(bp, cpu_reg->mode);
  
        /* Load the Text area. */
        offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
-       if (fw->text) {
+       if (fw->gz_text) {
+               u32 text_len;
+               void *text;
+               rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
+                                &text_len);
+               if (rc)
+                       return rc;
+               fw->text = text;
+       }
+       if (fw->gz_text) {
                int j;
  
                for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
        val &= ~cpu_reg->mode_value_halt;
        REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
        REG_WR_IND(bp, cpu_reg->mode, val);
+       return 0;
  }
  
  static int
  bnx2_init_cpus(struct bnx2 *bp)
  {
        struct cpu_reg cpu_reg;
-       struct fw_info fw;
+       struct fw_info *fw;
        int rc = 0;
        void *text;
        u32 text_len;
        cpu_reg.spad_base = BNX2_RXP_SCRATCH;
        cpu_reg.mips_view_base = 0x8000000;
  
-       fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
-       fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
-       fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
-       fw.start_addr = bnx2_RXP_b06FwStartAddr;
-       fw.text_addr = bnx2_RXP_b06FwTextAddr;
-       fw.text_len = bnx2_RXP_b06FwTextLen;
-       fw.text_index = 0;
+       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+               fw = &bnx2_rxp_fw_09;
+       else
+               fw = &bnx2_rxp_fw_06;
  
-       rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
-                        &text, &text_len);
+       rc = load_cpu_fw(bp, &cpu_reg, fw);
        if (rc)
                goto init_cpu_err;
  
-       fw.text = text;
-       fw.data_addr = bnx2_RXP_b06FwDataAddr;
-       fw.data_len = bnx2_RXP_b06FwDataLen;
-       fw.data_index = 0;
-       fw.data = bnx2_RXP_b06FwData;
-       fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
-       fw.sbss_len = bnx2_RXP_b06FwSbssLen;
-       fw.sbss_index = 0;
-       fw.sbss = bnx2_RXP_b06FwSbss;
-       fw.bss_addr = bnx2_RXP_b06FwBssAddr;
-       fw.bss_len = bnx2_RXP_b06FwBssLen;
-       fw.bss_index = 0;
-       fw.bss = bnx2_RXP_b06FwBss;
-       fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
-       fw.rodata_len = bnx2_RXP_b06FwRodataLen;
-       fw.rodata_index = 0;
-       fw.rodata = bnx2_RXP_b06FwRodata;
-       load_cpu_fw(bp, &cpu_reg, &fw);
        /* Initialize the TX Processor. */
        cpu_reg.mode = BNX2_TXP_CPU_MODE;
        cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
        cpu_reg.spad_base = BNX2_TXP_SCRATCH;
        cpu_reg.mips_view_base = 0x8000000;
  
-       fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
-       fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
-       fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
-       fw.start_addr = bnx2_TXP_b06FwStartAddr;
-       fw.text_addr = bnx2_TXP_b06FwTextAddr;
-       fw.text_len = bnx2_TXP_b06FwTextLen;
-       fw.text_index = 0;
+       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+               fw = &bnx2_txp_fw_09;
+       else
+               fw = &bnx2_txp_fw_06;
  
-       rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
-                        &text, &text_len);
+       rc = load_cpu_fw(bp, &cpu_reg, fw);
        if (rc)
                goto init_cpu_err;
  
-       fw.text = text;
-       fw.data_addr = bnx2_TXP_b06FwDataAddr;
-       fw.data_len = bnx2_TXP_b06FwDataLen;
-       fw.data_index = 0;
-       fw.data = bnx2_TXP_b06FwData;
-       fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
-       fw.sbss_len = bnx2_TXP_b06FwSbssLen;
-       fw.sbss_index = 0;
-       fw.sbss = bnx2_TXP_b06FwSbss;
-       fw.bss_addr = bnx2_TXP_b06FwBssAddr;
-       fw.bss_len = bnx2_TXP_b06FwBssLen;
-       fw.bss_index = 0;
-       fw.bss = bnx2_TXP_b06FwBss;
-       fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
-       fw.rodata_len = bnx2_TXP_b06FwRodataLen;
-       fw.rodata_index = 0;
-       fw.rodata = bnx2_TXP_b06FwRodata;
-       load_cpu_fw(bp, &cpu_reg, &fw);
        /* Initialize the TX Patch-up Processor. */
        cpu_reg.mode = BNX2_TPAT_CPU_MODE;
        cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
        cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
        cpu_reg.mips_view_base = 0x8000000;
  
-       fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
-       fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
-       fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
-       fw.start_addr = bnx2_TPAT_b06FwStartAddr;
-       fw.text_addr = bnx2_TPAT_b06FwTextAddr;
-       fw.text_len = bnx2_TPAT_b06FwTextLen;
-       fw.text_index = 0;
+       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+               fw = &bnx2_tpat_fw_09;
+       else
+               fw = &bnx2_tpat_fw_06;
  
-       rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
-                        &text, &text_len);
+       rc = load_cpu_fw(bp, &cpu_reg, fw);
        if (rc)
                goto init_cpu_err;
  
-       fw.text = text;
-       fw.data_addr = bnx2_TPAT_b06FwDataAddr;
-       fw.data_len = bnx2_TPAT_b06FwDataLen;
-       fw.data_index = 0;
-       fw.data = bnx2_TPAT_b06FwData;
-       fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
-       fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
-       fw.sbss_index = 0;
-       fw.sbss = bnx2_TPAT_b06FwSbss;
-       fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
-       fw.bss_len = bnx2_TPAT_b06FwBssLen;
-       fw.bss_index = 0;
-       fw.bss = bnx2_TPAT_b06FwBss;
-       fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
-       fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
-       fw.rodata_index = 0;
-       fw.rodata = bnx2_TPAT_b06FwRodata;
-       load_cpu_fw(bp, &cpu_reg, &fw);
        /* Initialize the Completion Processor. */
        cpu_reg.mode = BNX2_COM_CPU_MODE;
        cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
        cpu_reg.spad_base = BNX2_COM_SCRATCH;
        cpu_reg.mips_view_base = 0x8000000;
  
-       fw.ver_major = bnx2_COM_b06FwReleaseMajor;
-       fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
-       fw.ver_fix = bnx2_COM_b06FwReleaseFix;
-       fw.start_addr = bnx2_COM_b06FwStartAddr;
-       fw.text_addr = bnx2_COM_b06FwTextAddr;
-       fw.text_len = bnx2_COM_b06FwTextLen;
-       fw.text_index = 0;
+       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+               fw = &bnx2_com_fw_09;
+       else
+               fw = &bnx2_com_fw_06;
  
-       rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
-                        &text, &text_len);
+       rc = load_cpu_fw(bp, &cpu_reg, fw);
        if (rc)
                goto init_cpu_err;
  
-       fw.text = text;
-       fw.data_addr = bnx2_COM_b06FwDataAddr;
-       fw.data_len = bnx2_COM_b06FwDataLen;
-       fw.data_index = 0;
-       fw.data = bnx2_COM_b06FwData;
-       fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
-       fw.sbss_len = bnx2_COM_b06FwSbssLen;
-       fw.sbss_index = 0;
-       fw.sbss = bnx2_COM_b06FwSbss;
-       fw.bss_addr = bnx2_COM_b06FwBssAddr;
-       fw.bss_len = bnx2_COM_b06FwBssLen;
-       fw.bss_index = 0;
-       fw.bss = bnx2_COM_b06FwBss;
-       fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
-       fw.rodata_len = bnx2_COM_b06FwRodataLen;
-       fw.rodata_index = 0;
-       fw.rodata = bnx2_COM_b06FwRodata;
+       /* Initialize the Command Processor. */
+       cpu_reg.mode = BNX2_CP_CPU_MODE;
+       cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
+       cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
+       cpu_reg.state = BNX2_CP_CPU_STATE;
+       cpu_reg.state_value_clear = 0xffffff;
+       cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
+       cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
+       cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
+       cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
+       cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
+       cpu_reg.spad_base = BNX2_CP_SCRATCH;
+       cpu_reg.mips_view_base = 0x8000000;
  
-       load_cpu_fw(bp, &cpu_reg, &fw);
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               fw = &bnx2_cp_fw_09;
  
+               load_cpu_fw(bp, &cpu_reg, fw);
+               if (rc)
+                       goto init_cpu_err;
+       }
  init_cpu_err:
        bnx2_gunzip_end(bp);
        return rc;
@@@ -3288,31 -3285,44 +3285,44 @@@ bnx2_reset_chip(struct bnx2 *bp, u32 re
         * before we issue a reset. */
        val = REG_RD(bp, BNX2_MISC_ID);
  
-       val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
-             BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
-             BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
+               REG_RD(bp, BNX2_MISC_COMMAND);
+               udelay(5);
  
-       /* Chip reset. */
-       REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+               val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+                     BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  
-       if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
-           (CHIP_ID(bp) == CHIP_ID_5706_A1))
-               msleep(15);
+               pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
  
-       /* Reset takes approximate 30 usec */
-       for (i = 0; i < 10; i++) {
-               val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
-               if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
-                           BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
-                       break;
+       } else {
+               val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+                     BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+                     BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+               /* Chip reset. */
+               REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+               if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+                   (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
+                       current->state = TASK_UNINTERRUPTIBLE;
+                       schedule_timeout(HZ / 50);
                }
-               udelay(10);
-       }
  
-       if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
-                  BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
-               printk(KERN_ERR PFX "Chip reset did not complete\n");
-               return -EBUSY;
+               /* Reset takes approximate 30 usec */
+               for (i = 0; i < 10; i++) {
+                       val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
+                       if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+                                   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
+                               break;
+                       udelay(10);
+               }
+               if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+                          BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
+                       printk(KERN_ERR PFX "Chip reset did not complete\n");
+                       return -EBUSY;
+               }
        }
  
        /* Make sure byte swapping is properly configured. */
@@@ -3390,7 -3400,10 +3400,10 @@@ bnx2_init_chip(struct bnx2 *bp
  
        /* Initialize context mapping and zero out the quick contexts.  The
         * context block must have already been enabled. */
-       bnx2_init_context(bp);
+       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+               bnx2_init_5709_context(bp);
+       else
+               bnx2_init_context(bp);
  
        if ((rc = bnx2_init_cpus(bp)) != 0)
                return rc;
        return rc;
  }
  
+ static void
+ bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
+ {
+       u32 val, offset0, offset1, offset2, offset3;
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               offset0 = BNX2_L2CTX_TYPE_XI;
+               offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+               offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+               offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+       } else {
+               offset0 = BNX2_L2CTX_TYPE;
+               offset1 = BNX2_L2CTX_CMD_TYPE;
+               offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+               offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+       }
+       val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+       CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
+       val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+       CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
+       val = (u64) bp->tx_desc_mapping >> 32;
+       CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
+       val = (u64) bp->tx_desc_mapping & 0xffffffff;
+       CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
+ }
  
  static void
  bnx2_init_tx_ring(struct bnx2 *bp)
  {
        struct tx_bd *txbd;
-       u32 val;
+       u32 cid;
  
        bp->tx_wake_thresh = bp->tx_ring_size / 2;
  
        bp->hw_tx_cons = 0;
        bp->tx_prod_bseq = 0;
  
-       val = BNX2_L2CTX_TYPE_TYPE_L2;
-       val |= BNX2_L2CTX_TYPE_SIZE_L2;
-       CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
+       cid = TX_CID;
+       bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+       bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
  
-       val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
-       val |= 8 << 16;
-       CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
-       val = (u64) bp->tx_desc_mapping >> 32;
-       CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
-       val = (u64) bp->tx_desc_mapping & 0xffffffff;
-       CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
+       bnx2_init_tx_context(bp, cid);
  }
  
  static void
@@@ -3545,8 -3578,8 +3578,8 @@@ bnx2_init_rx_ring(struct bnx2 *bp
  
        /* 8 for CRC and VLAN */
        bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
-       /* 8 for alignment */
-       bp->rx_buf_size = bp->rx_buf_use_size + 8;
+       /* hw alignment */
+       bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
  
        ring_prod = prod = bp->rx_prod = 0;
        bp->rx_cons = 0;
@@@ -3712,7 -3745,9 +3745,9 @@@ bnx2_init_nic(struct bnx2 *bp
        if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
                return rc;
  
+       spin_lock_bh(&bp->phy_lock);
        bnx2_init_phy(bp);
+       spin_unlock_bh(&bp->phy_lock);
        bnx2_set_link(bp);
        return 0;
  }
@@@ -3952,7 -3987,7 +3987,7 @@@ bnx2_run_loopback(struct bnx2 *bp, int 
                bnx2_set_mac_loopback(bp);
        }
        else if (loopback_mode == BNX2_PHY_LOOPBACK) {
-               bp->loopback = 0;
+               bp->loopback = PHY_LOOPBACK;
                bnx2_set_phy_loopback(bp);
        }
        else
        bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
        bp->tx_prod_bseq += pkt_size;
  
-       REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
-       REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
+       REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
+       REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
  
        udelay(100);
  
@@@ -4162,80 -4197,117 +4197,117 @@@ bnx2_test_intr(struct bnx2 *bp
  }
  
  static void
- bnx2_timer(unsigned long data)
+ bnx2_5706_serdes_timer(struct bnx2 *bp)
  {
-       struct bnx2 *bp = (struct bnx2 *) data;
-       u32 msg;
+       spin_lock(&bp->phy_lock);
+       if (bp->serdes_an_pending)
+               bp->serdes_an_pending--;
+       else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+               u32 bmcr;
  
-       if (!netif_running(bp->dev))
-               return;
+               bp->current_interval = bp->timer_interval;
  
-       if (atomic_read(&bp->intr_sem) != 0)
-               goto bnx2_restart_timer;
+               bnx2_read_phy(bp, MII_BMCR, &bmcr);
  
-       msg = (u32) ++bp->fw_drv_pulse_wr_seq;
-       REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
+               if (bmcr & BMCR_ANENABLE) {
+                       u32 phy1, phy2;
  
-       bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
+                       bnx2_write_phy(bp, 0x1c, 0x7c00);
+                       bnx2_read_phy(bp, 0x1c, &phy1);
  
-       if ((bp->phy_flags & PHY_SERDES_FLAG) &&
-           (CHIP_NUM(bp) == CHIP_NUM_5706)) {
+                       bnx2_write_phy(bp, 0x17, 0x0f01);
+                       bnx2_read_phy(bp, 0x15, &phy2);
+                       bnx2_write_phy(bp, 0x17, 0x0f01);
+                       bnx2_read_phy(bp, 0x15, &phy2);
  
-               spin_lock(&bp->phy_lock);
-               if (bp->serdes_an_pending) {
-                       bp->serdes_an_pending--;
+                       if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
+                               !(phy2 & 0x20)) {       /* no CONFIG */
+                               bmcr &= ~BMCR_ANENABLE;
+                               bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+                               bnx2_write_phy(bp, MII_BMCR, bmcr);
+                               bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
+                       }
                }
-               else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
-                       u32 bmcr;
+       }
+       else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
+                (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
+               u32 phy2;
  
-                       bp->current_interval = bp->timer_interval;
+               bnx2_write_phy(bp, 0x17, 0x0f01);
+               bnx2_read_phy(bp, 0x15, &phy2);
+               if (phy2 & 0x20) {
+                       u32 bmcr;
  
                        bnx2_read_phy(bp, MII_BMCR, &bmcr);
+                       bmcr |= BMCR_ANENABLE;
+                       bnx2_write_phy(bp, MII_BMCR, bmcr);
  
-                       if (bmcr & BMCR_ANENABLE) {
-                               u32 phy1, phy2;
+                       bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
+               }
+       } else
+               bp->current_interval = bp->timer_interval;
  
-                               bnx2_write_phy(bp, 0x1c, 0x7c00);
-                               bnx2_read_phy(bp, 0x1c, &phy1);
+       spin_unlock(&bp->phy_lock);
+ }
  
-                               bnx2_write_phy(bp, 0x17, 0x0f01);
-                               bnx2_read_phy(bp, 0x15, &phy2);
-                               bnx2_write_phy(bp, 0x17, 0x0f01);
-                               bnx2_read_phy(bp, 0x15, &phy2);
+ static void
+ bnx2_5708_serdes_timer(struct bnx2 *bp)
+ {
+       if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
+               bp->serdes_an_pending = 0;
+               return;
+       }
  
-                               if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
-                                       !(phy2 & 0x20)) {       /* no CONFIG */
+       spin_lock(&bp->phy_lock);
+       if (bp->serdes_an_pending)
+               bp->serdes_an_pending--;
+       else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+               u32 bmcr;
  
-                                       bmcr &= ~BMCR_ANENABLE;
-                                       bmcr |= BMCR_SPEED1000 |
-                                               BMCR_FULLDPLX;
-                                       bnx2_write_phy(bp, MII_BMCR, bmcr);
-                                       bp->phy_flags |=
-                                               PHY_PARALLEL_DETECT_FLAG;
-                               }
-                       }
+               bnx2_read_phy(bp, MII_BMCR, &bmcr);
+               if (bmcr & BMCR_ANENABLE) {
+                       bmcr &= ~BMCR_ANENABLE;
+                       bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
+                       bnx2_write_phy(bp, MII_BMCR, bmcr);
+                       bp->current_interval = SERDES_FORCED_TIMEOUT;
+               } else {
+                       bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
+                       bmcr |= BMCR_ANENABLE;
+                       bnx2_write_phy(bp, MII_BMCR, bmcr);
+                       bp->serdes_an_pending = 2;
+                       bp->current_interval = bp->timer_interval;
                }
-               else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
-                       (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
-                       u32 phy2;
  
-                       bnx2_write_phy(bp, 0x17, 0x0f01);
-                       bnx2_read_phy(bp, 0x15, &phy2);
-                       if (phy2 & 0x20) {
-                               u32 bmcr;
+       } else
+               bp->current_interval = bp->timer_interval;
  
-                               bnx2_read_phy(bp, MII_BMCR, &bmcr);
-                               bmcr |= BMCR_ANENABLE;
-                               bnx2_write_phy(bp, MII_BMCR, bmcr);
+       spin_unlock(&bp->phy_lock);
+ }
+ static void
+ bnx2_timer(unsigned long data)
+ {
+       struct bnx2 *bp = (struct bnx2 *) data;
+       u32 msg;
  
-                               bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
+       if (!netif_running(bp->dev))
+               return;
  
-                       }
-               }
-               else
-                       bp->current_interval = bp->timer_interval;
+       if (atomic_read(&bp->intr_sem) != 0)
+               goto bnx2_restart_timer;
  
-               spin_unlock(&bp->phy_lock);
+       msg = (u32) ++bp->fw_drv_pulse_wr_seq;
+       REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
+       bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
+       if (bp->phy_flags & PHY_SERDES_FLAG) {
+               if (CHIP_NUM(bp) == CHIP_NUM_5706)
+                       bnx2_5706_serdes_timer(bp);
+               else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+                       bnx2_5708_serdes_timer(bp);
        }
  
  bnx2_restart_timer:
@@@ -4339,9 -4411,9 +4411,9 @@@ bnx2_open(struct net_device *dev
  }
  
  static void
 -bnx2_reset_task(void *data)
 +bnx2_reset_task(struct work_struct *work)
  {
 -      struct bnx2 *bp = data;
 +      struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
  
        if (!netif_running(bp->dev))
                return;
@@@ -4508,8 -4580,8 +4580,8 @@@ bnx2_start_xmit(struct sk_buff *skb, st
        prod = NEXT_TX_BD(prod);
        bp->tx_prod_bseq += skb->len;
  
-       REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
-       REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
+       REG_WR16(bp, bp->tx_bidx_addr, prod);
+       REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
  
        mmiowb();
  
@@@ -4743,10 -4815,14 +4815,14 @@@ bnx2_set_settings(struct net_device *de
        }
        else {
                if (bp->phy_flags & PHY_SERDES_FLAG) {
-                       if ((cmd->speed != SPEED_1000) ||
-                               (cmd->duplex != DUPLEX_FULL)) {
+                       if ((cmd->speed != SPEED_1000 &&
+                            cmd->speed != SPEED_2500) ||
+                           (cmd->duplex != DUPLEX_FULL))
+                               return -EINVAL;
+                       if (cmd->speed == SPEED_2500 &&
+                           !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
                                return -EINVAL;
-                       }
                }
                else if (cmd->speed == SPEED_1000) {
                        return -EINVAL;
@@@ -4903,11 -4979,10 +4979,10 @@@ bnx2_nway_reset(struct net_device *dev
                msleep(20);
  
                spin_lock_bh(&bp->phy_lock);
-               if (CHIP_NUM(bp) == CHIP_NUM_5706) {
-                       bp->current_interval = SERDES_AN_TIMEOUT;
-                       bp->serdes_an_pending = 1;
-                       mod_timer(&bp->timer, jiffies + bp->current_interval);
-               }
+               bp->current_interval = SERDES_AN_TIMEOUT;
+               bp->serdes_an_pending = 1;
+               mod_timer(&bp->timer, jiffies + bp->current_interval);
        }
  
        bnx2_read_phy(bp, MII_BMCR, &bmcr);
@@@ -5288,6 -5363,8 +5363,8 @@@ bnx2_self_test(struct net_device *dev, 
  
        memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
        if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               int i;
                bnx2_netif_stop(bp);
                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
                bnx2_free_skbs(bp);
                }
  
                /* wait for link up */
-               msleep_interruptible(3000);
-               if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
-                       msleep_interruptible(4000);
+               for (i = 0; i < 7; i++) {
+                       if (bp->link_up)
+                               break;
+                       msleep_interruptible(1000);
+               }
        }
  
        if (bnx2_test_nvram(bp) != 0) {
@@@ -5604,13 -5683,6 +5683,6 @@@ bnx2_init_board(struct pci_dev *pdev, s
                goto err_out_release;
        }
  
-       bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
-       if (bp->pcix_cap == 0) {
-               dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
-               rc = -EIO;
-               goto err_out_release;
-       }
        if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
                bp->flags |= USING_DAC_FLAG;
                if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
        bp->pdev = pdev;
  
        spin_lock_init(&bp->phy_lock);
 -      INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
 +      INIT_WORK(&bp->reset_task, bnx2_reset_task);
  
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-       mem_len = MB_GET_CID_ADDR(17);
+       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
        dev->mem_end = dev->mem_start + mem_len;
        dev->irq = pdev->irq;
  
  
        bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
  
+       if (CHIP_NUM(bp) != CHIP_NUM_5709) {
+               bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
+               if (bp->pcix_cap == 0) {
+                       dev_err(&pdev->dev,
+                               "Cannot find PCIX capability, aborting.\n");
+                       rc = -EIO;
+                       goto err_out_unmap;
+               }
+       }
        /* Get bus information. */
        reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
        if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
        bp->phy_addr = 1;
  
        /* Disable WOL support if we are running on a SERDES chip. */
-       if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
+                       bp->phy_flags |= PHY_SERDES_FLAG;
+       } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
                bp->phy_flags |= PHY_SERDES_FLAG;
+       if (bp->phy_flags & PHY_SERDES_FLAG) {
                bp->flags |= NO_WOL_FLAG;
-               if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+               if (CHIP_NUM(bp) != CHIP_NUM_5706) {
                        bp->phy_addr = 2;
                        reg = REG_RD_IND(bp, bp->shmem_base +
                                         BNX2_SHARED_HW_CFG_CONFIG);
diff --combined drivers/net/cassini.c
index fe08f3845491179aa70265f6bfb8ac3d5165021f,fd2cc13f7d97b90d200d741207c0f87a61413159..c8126484c2be019350ea65d625676d7444d0af69
@@@ -2825,7 -2825,7 +2825,7 @@@ static inline int cas_xmit_tx_ringN(str
                u64 csum_start_off, csum_stuff_off;
  
                csum_start_off = (u64) (skb->h.raw - skb->data);
-               csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+               csum_stuff_off = csum_start_off + skb->csum_offset;
  
                ctrl =  TX_DESC_CSUM_EN |
                        CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
@@@ -4066,9 -4066,9 +4066,9 @@@ static int cas_alloc_rxds(struct cas *c
        return 0;
  }
  
 -static void cas_reset_task(void *data)
 +static void cas_reset_task(struct work_struct *work)
  {
 -      struct cas *cp = (struct cas *) data;
 +      struct cas *cp = container_of(work, struct cas, reset_task);
  #if 0
        int pending = atomic_read(&cp->reset_task_pending);
  #else
@@@ -5006,7 -5006,7 +5006,7 @@@ static int __devinit cas_init_one(struc
        atomic_set(&cp->reset_task_pending_spare, 0);
        atomic_set(&cp->reset_task_pending_mtu, 0);
  #endif
 -      INIT_WORK(&cp->reset_task, cas_reset_task, cp);
 +      INIT_WORK(&cp->reset_task, cas_reset_task);
  
        /* Default link parameters */
        if (link_mode >= 0 && link_mode <= 6)
index 8b1bedbce0d51984ceb02e002477719de954aaa0,b265941e137220a90eb3d8d1588c61cb04ee717d..74758d2c7af8c368ff79b7ab4dc29aa04c7f0ffb
@@@ -45,6 -45,7 +45,7 @@@
  #include <linux/delay.h>
  #include <linux/pci.h>
  #include <linux/ethtool.h>
+ #include <linux/if_vlan.h>
  #include <linux/mii.h>
  #include <linux/crc32.h>
  #include <linux/init.h>
  
  #define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
  #define DRV_NAME "cxgb"
- #define DRV_VERSION "2.1.1"
+ #define DRV_VERSION "2.2"
  #define PFX      DRV_NAME ": "
  
  #define CH_ERR(fmt, ...)   printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
  #define CH_WARN(fmt, ...)  printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
  #define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
  
+ /*
+  * More powerful macro that selectively prints messages based on msg_enable.
+  * For info and debugging messages.
+  */
+ #define CH_MSG(adapter, level, category, fmt, ...) do { \
+       if ((adapter)->msg_enable & NETIF_MSG_##category) \
+               printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
+                      ## __VA_ARGS__); \
+ } while (0)
+ #ifdef DEBUG
+ # define CH_DBG(adapter, category, fmt, ...) \
+       CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+ #else
+ # define CH_DBG(fmt, ...)
+ #endif
  #define CH_DEVICE(devid, ssid, idx) \
        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
  
  
  typedef struct adapter adapter_t;
  
- void t1_elmer0_ext_intr(adapter_t *adapter);
- void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
-                       int speed, int duplex, int fc);
  struct t1_rx_mode {
        struct net_device *dev;
        u32 idx;
@@@ -97,26 -111,53 +111,53 @@@ static inline u8 *t1_get_next_mcaddr(st
  }
  
  #define       MAX_NPORTS 4
+ #define PORT_MASK ((1 << MAX_NPORTS) - 1)
+ #define NMTUS      8
+ #define TCB_SIZE   128
  
  #define SPEED_INVALID 0xffff
  #define DUPLEX_INVALID 0xff
  
  enum {
        CHBT_BOARD_N110,
-       CHBT_BOARD_N210
+       CHBT_BOARD_N210,
+       CHBT_BOARD_7500,
+       CHBT_BOARD_8000,
+       CHBT_BOARD_CHT101,
+       CHBT_BOARD_CHT110,
+       CHBT_BOARD_CHT210,
+       CHBT_BOARD_CHT204,
+       CHBT_BOARD_CHT204V,
+       CHBT_BOARD_CHT204E,
+       CHBT_BOARD_CHN204,
+       CHBT_BOARD_COUGAR,
+       CHBT_BOARD_6800,
+       CHBT_BOARD_SIMUL,
  };
  
  enum {
+       CHBT_TERM_FPGA,
        CHBT_TERM_T1,
-       CHBT_TERM_T2
+       CHBT_TERM_T2,
+       CHBT_TERM_T3
  };
  
  enum {
+       CHBT_MAC_CHELSIO_A,
+       CHBT_MAC_IXF1010,
        CHBT_MAC_PM3393,
+       CHBT_MAC_VSC7321,
+       CHBT_MAC_DUMMY
  };
  
  enum {
+       CHBT_PHY_88E1041,
+       CHBT_PHY_88E1111,
        CHBT_PHY_88X2010,
+       CHBT_PHY_XPAK,
+       CHBT_PHY_MY3126,
+       CHBT_PHY_8244,
+       CHBT_PHY_DUMMY
  };
  
  enum {
@@@ -150,16 -191,44 +191,44 @@@ struct chelsio_pci_params 
        unsigned char  is_pcix;
  };
  
+ struct tp_params {
+       unsigned int pm_size;
+       unsigned int cm_size;
+       unsigned int pm_rx_base;
+       unsigned int pm_tx_base;
+       unsigned int pm_rx_pg_size;
+       unsigned int pm_tx_pg_size;
+       unsigned int pm_rx_num_pgs;
+       unsigned int pm_tx_num_pgs;
+       unsigned int rx_coalescing_size;
+       unsigned int use_5tuple_mode;
+ };
+ struct mc5_params {
+       unsigned int mode;       /* selects MC5 width */
+       unsigned int nservers;   /* size of server region */
+       unsigned int nroutes;    /* size of routing region */
+ };
+ /* Default MC5 region sizes */
+ #define DEFAULT_SERVER_REGION_LEN 256
+ #define DEFAULT_RT_REGION_LEN 1024
  struct adapter_params {
        struct sge_params sge;
+       struct mc5_params mc5;
+       struct tp_params  tp;
        struct chelsio_pci_params pci;
  
        const struct board_info *brd_info;
  
+       unsigned short mtus[NMTUS];
        unsigned int   nports;          /* # of ethernet ports */
        unsigned int   stats_update_period;
        unsigned short chip_revision;
        unsigned char  chip_version;
+       unsigned char  is_asic;
+       unsigned char  has_msi;
  };
  
  struct link_config {
@@@ -207,17 -276,20 +276,20 @@@ struct adapter 
        /* Terminator modules. */
        struct sge    *sge;
        struct peespi *espi;
+       struct petp   *tp;
  
        struct port_info port[MAX_NPORTS];
 -      struct work_struct stats_update_task;
 +      struct delayed_work stats_update_task;
        struct timer_list stats_update_timer;
  
-       struct semaphore mib_mutex;
        spinlock_t tpi_lock;
        spinlock_t work_lock;
+       spinlock_t mac_lock;
        /* guards async operations */
        spinlock_t async_lock ____cacheline_aligned;
        u32 slow_intr_mask;
+       int t1powersave;
  };
  
  enum {                                           /* adapter flags */
@@@ -256,6 -328,11 +328,11 @@@ struct board_info 
        const char             *desc;
  };
  
+ static inline int t1_is_asic(const adapter_t *adapter)
+ {
+       return adapter->params.is_asic;
+ }
  extern struct pci_device_id t1_pci_tbl[];
  
  static inline int adapter_matches_type(const adapter_t *adapter,
@@@ -285,13 -362,15 +362,15 @@@ static inline unsigned int core_ticks_p
        return board_info(adap)->clock_core / 1000000;
  }
  
+ extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+ extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
  extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
  extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
  
  extern void t1_interrupts_enable(adapter_t *adapter);
  extern void t1_interrupts_disable(adapter_t *adapter);
  extern void t1_interrupts_clear(adapter_t *adapter);
- extern int elmer0_ext_intr_handler(adapter_t *adapter);
+ extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
  extern int t1_slow_intr_handler(adapter_t *adapter);
  
  extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
@@@ -305,9 -384,7 +384,7 @@@ extern int t1_init_hw_modules(adapter_
  extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
  extern void t1_free_sw_modules(adapter_t *adapter);
  extern void t1_fatal_err(adapter_t *adapter);
- extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
- extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
- extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
+ extern void t1_link_changed(adapter_t *adapter, int port_id);
+ extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+                           int speed, int duplex, int pause);
  #endif /* _CXGB_COMMON_H_ */
index f607cc6a276bbf3ed39a1c9e735dbe2f0753ef8c,53bec6739812366fdf0c1537b54149409d9ae1ec..de48eadddbc4af9fa09452fea78b22ec7f17cf9c
@@@ -45,7 -45,6 +45,6 @@@
  #include <linux/if_vlan.h>
  #include <linux/mii.h>
  #include <linux/sockios.h>
- #include <linux/proc_fs.h>
  #include <linux/dma-mapping.h>
  #include <asm/uaccess.h>
  
  #include "gmac.h"
  #include "cphy.h"
  #include "sge.h"
+ #include "tp.h"
  #include "espi.h"
+ #include "elmer0.h"
  
- #ifdef work_struct
- #include <linux/tqueue.h>
- #define INIT_WORK INIT_TQUEUE
- #define schedule_work schedule_task
- #define flush_scheduled_work flush_scheduled_tasks
- static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
- {
-       mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
- }
- static inline void cancel_mac_stats_update(struct adapter *ap)
- {
-       del_timer_sync(&ap->stats_update_timer);
-       flush_scheduled_tasks();
- }
- /*
-  * Stats update timer for 2.4.  It schedules a task to do the actual update as
-  * we need to access MAC statistics in process context.
-  */
- static void mac_stats_timer(unsigned long data)
- {
-       struct adapter *ap = (struct adapter *)data;
-       schedule_task(&ap->stats_update_task);
- }
- #else
  #include <linux/workqueue.h>
  
  static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
@@@ -95,7 -68,6 +68,6 @@@ static inline void cancel_mac_stats_upd
  {
        cancel_delayed_work(&ap->stats_update_task);
  }
- #endif
  
  #define MAX_CMDQ_ENTRIES 16384
  #define MAX_CMDQ1_ENTRIES 1024
  #define MAX_RX_JUMBO_BUFFERS 16384
  #define MAX_TX_BUFFERS_HIGH   16384U
  #define MAX_TX_BUFFERS_LOW    1536U
+ #define MAX_TX_BUFFERS                1460U
  #define MIN_FL_ENTRIES 32
  
- #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@@ -124,8 -95,21 +95,21 @@@ MODULE_LICENSE("GPL")
  static int dflt_msg_enable = DFLT_MSG_ENABLE;
  
  module_param(dflt_msg_enable, int, 0);
- MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
+ MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
+ #define HCLOCK 0x0
+ #define LCLOCK 0x1
+ /* T1 cards powersave mode */
+ static int t1_clock(struct adapter *adapter, int mode);
+ static int t1powersave = 1;   /* HW default is powersave mode. */
  
+ module_param(t1powersave, int, 0);
+ MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
+ static int disable_msi = 0;
+ module_param(disable_msi, int, 0);
+ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  
  static const char pci_speed[][4] = {
        "33", "66", "100", "133"
@@@ -149,7 -133,7 +133,7 @@@ static void t1_set_rxmode(struct net_de
  static void link_report(struct port_info *p)
  {
        if (!netif_carrier_ok(p->dev))
-               printk(KERN_INFO "%s: link down\n", p->dev->name);
+               printk(KERN_INFO "%s: link down\n", p->dev->name);
        else {
                const char *s = "10Mbps";
  
                        case SPEED_100:   s = "100Mbps"; break;
                }
  
-         printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+       printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
                       p->dev->name, s,
                       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
        }
  }
  
- void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
+ void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
                        int speed, int duplex, int pause)
  {
        struct port_info *p = &adapter->port[port_id];
                        netif_carrier_off(p->dev);
                link_report(p);
  
+               /* multi-ports: inform toe */
+               if ((speed > 0) && (adapter->params.nports > 1)) {
+                       unsigned int sched_speed = 10;
+                       switch (speed) {
+                       case SPEED_1000:
+                               sched_speed = 1000;
+                               break;
+                       case SPEED_100:
+                               sched_speed = 100;
+                               break;
+                       case SPEED_10:
+                               sched_speed = 10;
+                               break;
+                       }
+                       t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
+               }
        }
  }
  
@@@ -195,8 -195,10 +195,10 @@@ static void link_start(struct port_inf
  static void enable_hw_csum(struct adapter *adapter)
  {
        if (adapter->flags & TSO_CAPABLE)
-               t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
-       t1_tp_set_tcp_checksum_offload(adapter, 1);
+               t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
+       if (adapter->flags & UDP_CSUM_CAPABLE)
+               t1_tp_set_udp_checksum_offload(adapter->tp, 1);
+       t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
  }
  
  /*
@@@ -217,11 -219,19 +219,19 @@@ static int cxgb_up(struct adapter *adap
        }
  
        t1_interrupts_clear(adapter);
-       if ((err = request_irq(adapter->pdev->irq,
-                              t1_select_intr_handler(adapter), IRQF_SHARED,
-                              adapter->name, adapter))) {
+       adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0;
+       err = request_irq(adapter->pdev->irq,
+                         t1_select_intr_handler(adapter),
+                         adapter->params.has_msi ? 0 : IRQF_SHARED,
+                         adapter->name, adapter);
+       if (err) {
+               if (adapter->params.has_msi)
+                       pci_disable_msi(adapter->pdev);
                goto out_err;
        }
        t1_sge_start(adapter->sge);
        t1_interrupts_enable(adapter);
   out_err:
@@@ -236,6 -246,8 +246,8 @@@ static void cxgb_down(struct adapter *a
        t1_sge_stop(adapter->sge);
        t1_interrupts_disable(adapter);
        free_irq(adapter->pdev->irq, adapter);
+       if (adapter->params.has_msi)
+               pci_disable_msi(adapter->pdev);
  }
  
  static int cxgb_open(struct net_device *dev)
@@@ -290,7 -302,7 +302,7 @@@ static struct net_device_stats *t1_get_
  
        /* Do a full update of the MAC stats */
        pstats = p->mac->ops->statistics_update(p->mac,
-                                                     MAC_STATS_UPDATE_FULL);
+                                               MAC_STATS_UPDATE_FULL);
  
        ns->tx_packets = pstats->TxUnicastFramesOK +
                pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
@@@ -344,47 -356,53 +356,53 @@@ static void set_msglevel(struct net_dev
  }
  
  static char stats_strings[][ETH_GSTRING_LEN] = {
-         "TxOctetsOK",
-         "TxOctetsBad",
-         "TxUnicastFramesOK",
-         "TxMulticastFramesOK",
-         "TxBroadcastFramesOK",
-         "TxPauseFrames",
-         "TxFramesWithDeferredXmissions",
-         "TxLateCollisions",
-         "TxTotalCollisions",
-         "TxFramesAbortedDueToXSCollisions",
-         "TxUnderrun",
-         "TxLengthErrors",
-         "TxInternalMACXmitError",
-         "TxFramesWithExcessiveDeferral",
-         "TxFCSErrors",
-         "RxOctetsOK",
-         "RxOctetsBad",
-         "RxUnicastFramesOK",
-         "RxMulticastFramesOK",
-         "RxBroadcastFramesOK",
-         "RxPauseFrames",
-         "RxFCSErrors",
-         "RxAlignErrors",
-         "RxSymbolErrors",
-         "RxDataErrors",
-         "RxSequenceErrors",
-         "RxRuntErrors",
-         "RxJabberErrors",
-         "RxInternalMACRcvError",
-         "RxInRangeLengthErrors",
-         "RxOutOfRangeLengthField",
-         "RxFrameTooLongErrors",
-       "TSO",
-       "VLANextractions",
-       "VLANinsertions",
+       "TxOctetsOK",
+       "TxOctetsBad",
+       "TxUnicastFramesOK",
+       "TxMulticastFramesOK",
+       "TxBroadcastFramesOK",
+       "TxPauseFrames",
+       "TxFramesWithDeferredXmissions",
+       "TxLateCollisions",
+       "TxTotalCollisions",
+       "TxFramesAbortedDueToXSCollisions",
+       "TxUnderrun",
+       "TxLengthErrors",
+       "TxInternalMACXmitError",
+       "TxFramesWithExcessiveDeferral",
+       "TxFCSErrors",
+       "RxOctetsOK",
+       "RxOctetsBad",
+       "RxUnicastFramesOK",
+       "RxMulticastFramesOK",
+       "RxBroadcastFramesOK",
+       "RxPauseFrames",
+       "RxFCSErrors",
+       "RxAlignErrors",
+       "RxSymbolErrors",
+       "RxDataErrors",
+       "RxSequenceErrors",
+       "RxRuntErrors",
+       "RxJabberErrors",
+       "RxInternalMACRcvError",
+       "RxInRangeLengthErrors",
+       "RxOutOfRangeLengthField",
+       "RxFrameTooLongErrors",
+       /* Port stats */
+       "RxPackets",
        "RxCsumGood",
+       "TxPackets",
        "TxCsumOffload",
-       "RxDrops"
+       "TxTso",
+       "RxVlan",
+       "TxVlan",
+       /* Interrupt stats */
+       "rx drops",
+       "pure_rsps",
+       "unhandled irqs",
        "respQ_empty",
        "respQ_overflow",
        "freelistQ_empty",
        "pkt_mismatch",
        "cmdQ_full0",
        "cmdQ_full1",
-       "tx_ipfrags",
-       "tx_reg_pkts",
-       "tx_lso_pkts",
-       "tx_do_cksum",
-       
        "espi_DIP2ParityErr",
        "espi_DIP4Err",
        "espi_RxDrops",
        "espi_RxOvfl",
        "espi_ParityErr"
  };
-  
  #define T2_REGMAP_SIZE (3 * 1024)
  
  static int get_regs_len(struct net_device *dev)
@@@ -439,65 -453,77 +453,77 @@@ static void get_stats(struct net_devic
        struct adapter *adapter = dev->priv;
        struct cmac *mac = adapter->port[dev->if_port].mac;
        const struct cmac_statistics *s;
-       const struct sge_port_stats *ss;
        const struct sge_intr_counts *t;
+       struct sge_port_stats ss;
  
        s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
-       ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
-       t = t1_sge_get_intr_counts(adapter->sge);
  
-         *data++ = s->TxOctetsOK;
-         *data++ = s->TxOctetsBad;
-         *data++ = s->TxUnicastFramesOK;
-         *data++ = s->TxMulticastFramesOK;
-         *data++ = s->TxBroadcastFramesOK;
-         *data++ = s->TxPauseFrames;
-         *data++ = s->TxFramesWithDeferredXmissions;
-         *data++ = s->TxLateCollisions;
-         *data++ = s->TxTotalCollisions;
-         *data++ = s->TxFramesAbortedDueToXSCollisions;
-         *data++ = s->TxUnderrun;
-         *data++ = s->TxLengthErrors;
-         *data++ = s->TxInternalMACXmitError;
-         *data++ = s->TxFramesWithExcessiveDeferral;
-         *data++ = s->TxFCSErrors;
-         *data++ = s->RxOctetsOK;
-         *data++ = s->RxOctetsBad;
-         *data++ = s->RxUnicastFramesOK;
-         *data++ = s->RxMulticastFramesOK;
-         *data++ = s->RxBroadcastFramesOK;
-         *data++ = s->RxPauseFrames;
-         *data++ = s->RxFCSErrors;
-         *data++ = s->RxAlignErrors;
-         *data++ = s->RxSymbolErrors;
-         *data++ = s->RxDataErrors;
-         *data++ = s->RxSequenceErrors;
-         *data++ = s->RxRuntErrors;
-         *data++ = s->RxJabberErrors;
-         *data++ = s->RxInternalMACRcvError;
-         *data++ = s->RxInRangeLengthErrors;
-         *data++ = s->RxOutOfRangeLengthField;
-         *data++ = s->RxFrameTooLongErrors;
-       *data++ = ss->tso;
-       *data++ = ss->vlan_xtract;
-       *data++ = ss->vlan_insert;
-       *data++ = ss->rx_cso_good;
-       *data++ = ss->tx_cso;
-       *data++ = ss->rx_drops;
-       *data++ = (u64)t->respQ_empty;
-       *data++ = (u64)t->respQ_overflow;
-       *data++ = (u64)t->freelistQ_empty;
-       *data++ = (u64)t->pkt_too_big;
-       *data++ = (u64)t->pkt_mismatch;
-       *data++ = (u64)t->cmdQ_full[0];
-       *data++ = (u64)t->cmdQ_full[1];
-       *data++ = (u64)t->tx_ipfrags;
-       *data++ = (u64)t->tx_reg_pkts;
-       *data++ = (u64)t->tx_lso_pkts;
-       *data++ = (u64)t->tx_do_cksum;
+       *data++ = s->TxOctetsOK;
+       *data++ = s->TxOctetsBad;
+       *data++ = s->TxUnicastFramesOK;
+       *data++ = s->TxMulticastFramesOK;
+       *data++ = s->TxBroadcastFramesOK;
+       *data++ = s->TxPauseFrames;
+       *data++ = s->TxFramesWithDeferredXmissions;
+       *data++ = s->TxLateCollisions;
+       *data++ = s->TxTotalCollisions;
+       *data++ = s->TxFramesAbortedDueToXSCollisions;
+       *data++ = s->TxUnderrun;
+       *data++ = s->TxLengthErrors;
+       *data++ = s->TxInternalMACXmitError;
+       *data++ = s->TxFramesWithExcessiveDeferral;
+       *data++ = s->TxFCSErrors;
+       *data++ = s->RxOctetsOK;
+       *data++ = s->RxOctetsBad;
+       *data++ = s->RxUnicastFramesOK;
+       *data++ = s->RxMulticastFramesOK;
+       *data++ = s->RxBroadcastFramesOK;
+       *data++ = s->RxPauseFrames;
+       *data++ = s->RxFCSErrors;
+       *data++ = s->RxAlignErrors;
+       *data++ = s->RxSymbolErrors;
+       *data++ = s->RxDataErrors;
+       *data++ = s->RxSequenceErrors;
+       *data++ = s->RxRuntErrors;
+       *data++ = s->RxJabberErrors;
+       *data++ = s->RxInternalMACRcvError;
+       *data++ = s->RxInRangeLengthErrors;
+       *data++ = s->RxOutOfRangeLengthField;
+       *data++ = s->RxFrameTooLongErrors;
+       t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
+       *data++ = ss.rx_packets;
+       *data++ = ss.rx_cso_good;
+       *data++ = ss.tx_packets;
+       *data++ = ss.tx_cso;
+       *data++ = ss.tx_tso;
+       *data++ = ss.vlan_xtract;
+       *data++ = ss.vlan_insert;
+       t = t1_sge_get_intr_counts(adapter->sge);
+       *data++ = t->rx_drops;
+       *data++ = t->pure_rsps;
+       *data++ = t->unhandled_irqs;
+       *data++ = t->respQ_empty;
+       *data++ = t->respQ_overflow;
+       *data++ = t->freelistQ_empty;
+       *data++ = t->pkt_too_big;
+       *data++ = t->pkt_mismatch;
+       *data++ = t->cmdQ_full[0];
+       *data++ = t->cmdQ_full[1];
+       if (adapter->espi) {
+               const struct espi_intr_counts *e;
+               e = t1_espi_get_intr_counts(adapter->espi);
+               *data++ = e->DIP2_parity_err;
+               *data++ = e->DIP4_err;
+               *data++ = e->rx_drops;
+               *data++ = e->tx_drops;
+               *data++ = e->rx_ovflw;
+               *data++ = e->parity_err;
+       }
  }
  
  static inline void reg_block_dump(struct adapter *ap, void *buf,
@@@ -521,6 -547,15 +547,15 @@@ static void get_regs(struct net_device 
  
        memset(buf, 0, T2_REGMAP_SIZE);
        reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+       reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
+       reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
+       reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
+       reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
+       reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
+       reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
+       reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
+       reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
+       reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
  }
  
  static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                cmd->duplex = -1;
        }
  
-         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
-         cmd->phy_address = p->phy->addr;
-         cmd->transceiver = XCVR_EXTERNAL;
-         cmd->autoneg = p->link_config.autoneg;
-         cmd->maxtxpkt = 0;
-         cmd->maxrxpkt = 0;
+       cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+       cmd->phy_address = p->phy->addr;
+       cmd->transceiver = XCVR_EXTERNAL;
+       cmd->autoneg = p->link_config.autoneg;
+       cmd->maxtxpkt = 0;
+       cmd->maxrxpkt = 0;
        return 0;
  }
  
@@@ -715,7 -750,7 +750,7 @@@ static int set_sge_param(struct net_dev
                return -EINVAL;
  
        if (adapter->flags & FULL_INIT_DONE)
-         return -EBUSY;
+       return -EBUSY;
  
        adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
        adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
@@@ -759,7 -794,9 +794,9 @@@ static int get_coalesce(struct net_devi
  
  static int get_eeprom_len(struct net_device *dev)
  {
-     return EEPROM_SIZE;
+       struct adapter *adapter = dev->priv;
+       return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
  }
  
  #define EEPROM_MAGIC(ap) \
@@@ -809,47 -846,36 +846,36 @@@ static const struct ethtool_ops t1_etht
        .set_tso           = set_tso,
  };
  
- static void cxgb_proc_cleanup(struct adapter *adapter,
-                                       struct proc_dir_entry *dir)
- {
-       const char *name;
-       name = adapter->name;
-       remove_proc_entry(name, dir);
- }
- //#define chtoe_setup_toedev(adapter) NULL
- #define update_mtu_tab(adapter)
- #define write_smt_entry(adapter, idx)
  static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  {
-         struct adapter *adapter = dev->priv;
-         struct mii_ioctl_data *data = if_mii(req);
+       struct adapter *adapter = dev->priv;
+       struct mii_ioctl_data *data = if_mii(req);
  
        switch (cmd) {
-         case SIOCGMIIPHY:
-                 data->phy_id = adapter->port[dev->if_port].phy->addr;
-                 /* FALLTHRU */
-         case SIOCGMIIREG: {
+       case SIOCGMIIPHY:
+               data->phy_id = adapter->port[dev->if_port].phy->addr;
+               /* FALLTHRU */
+       case SIOCGMIIREG: {
                struct cphy *phy = adapter->port[dev->if_port].phy;
                u32 val;
  
                if (!phy->mdio_read)
-             return -EOPNOTSUPP;
+           return -EOPNOTSUPP;
                phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
                               &val);
-                 data->val_out = val;
-                 break;
+               data->val_out = val;
+               break;
        }
-         case SIOCSMIIREG: {
+       case SIOCSMIIREG: {
                struct cphy *phy = adapter->port[dev->if_port].phy;
  
-                 if (!capable(CAP_NET_ADMIN))
-                     return -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                   return -EPERM;
                if (!phy->mdio_write)
-             return -EOPNOTSUPP;
+           return -EOPNOTSUPP;
                phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
                                data->val_in);
-                 break;
+               break;
        }
  
        default:
@@@ -865,9 -891,9 +891,9 @@@ static int t1_change_mtu(struct net_dev
        struct cmac *mac = adapter->port[dev->if_port].mac;
  
        if (!mac->ops->set_mtu)
-         return -EOPNOTSUPP;
+       return -EOPNOTSUPP;
        if (new_mtu < 68)
-         return -EINVAL;
+       return -EINVAL;
        if ((ret = mac->ops->set_mtu(mac, new_mtu)))
                return ret;
        dev->mtu = new_mtu;
@@@ -918,7 -944,7 +944,7 @@@ static void t1_netpoll(struct net_devic
        struct adapter *adapter = dev->priv;
  
        local_irq_save(flags);
-         t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
+       t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
        local_irq_restore(flags);
  }
  #endif
   * Periodic accumulation of MAC statistics.  This is used only if the MAC
   * does not have any other way to prevent stats counter overflow.
   */
 -static void mac_stats_task(void *data)
 +static void mac_stats_task(struct work_struct *work)
  {
        int i;
 -      struct adapter *adapter = data;
 +      struct adapter *adapter =
 +              container_of(work, struct adapter, stats_update_task.work);
  
        for_each_port(adapter, i) {
                struct port_info *p = &adapter->port[i];
  /*
   * Processes elmer0 external interrupts in process context.
   */
 -static void ext_intr_task(void *data)
 +static void ext_intr_task(struct work_struct *work)
  {
 -      struct adapter *adapter = data;
 +      struct adapter *adapter =
 +              container_of(work, struct adapter, ext_intr_handler_task);
  
-       elmer0_ext_intr_handler(adapter);
+       t1_elmer0_ext_intr_handler(adapter);
  
        /* Now reenable external interrupts */
        spin_lock_irq(&adapter->async_lock);
        adapter->slow_intr_mask |= F_PL_INTR_EXT;
        writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
        writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
-                    adapter->regs + A_PL_ENABLE);
+                  adapter->regs + A_PL_ENABLE);
        spin_unlock_irq(&adapter->async_lock);
  }
  
@@@ -980,7 -1004,7 +1006,7 @@@ void t1_elmer0_ext_intr(struct adapter 
         */
        adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
        writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
-                    adapter->regs + A_PL_ENABLE);
+                  adapter->regs + A_PL_ENABLE);
        schedule_work(&adapter->ext_intr_handler_task);
  }
  
@@@ -1013,7 -1037,7 +1039,7 @@@ static int __devinit init_one(struct pc
  
        err = pci_enable_device(pdev);
        if (err)
-         return err;
+               return err;
  
        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
                CH_ERR("%s: cannot find PCI device memory base address\n",
  
        pci_set_master(pdev);
  
-     mmio_start = pci_resource_start(pdev, 0);
+       mmio_start = pci_resource_start(pdev, 0);
        mmio_len = pci_resource_len(pdev, 0);
        bi = t1_get_board_info(ent->driver_data);
  
                        adapter->msg_enable = dflt_msg_enable;
                        adapter->mmio_len = mmio_len;
  
-                       init_MUTEX(&adapter->mib_mutex);
                        spin_lock_init(&adapter->tpi_lock);
                        spin_lock_init(&adapter->work_lock);
                        spin_lock_init(&adapter->async_lock);
+                       spin_lock_init(&adapter->mac_lock);
  
                        INIT_WORK(&adapter->ext_intr_handler_task,
 -                                ext_intr_task, adapter);
 -                      INIT_WORK(&adapter->stats_update_task, mac_stats_task,
 -                                adapter);
 +                                ext_intr_task);
 +                      INIT_DELAYED_WORK(&adapter->stats_update_task,
 +                                        mac_stats_task);
- #ifdef work_struct
-                       init_timer(&adapter->stats_update_timer);
-                       adapter->stats_update_timer.function = mac_stats_timer;
-                       adapter->stats_update_timer.data =
-                               (unsigned long)adapter;
- #endif
  
                        pci_set_drvdata(pdev, netdev);
                }
                        netdev->vlan_rx_register = vlan_rx_register;
                        netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
  #endif
-                       adapter->flags |= TSO_CAPABLE;
-                       netdev->features |= NETIF_F_TSO;
+                       /* T204: disable TSO */
+                       if (!(is_T2(adapter)) || bi->port_number != 4) {
+                               adapter->flags |= TSO_CAPABLE;
+                               netdev->features |= NETIF_F_TSO;
+                       }
                }
  
                netdev->open = cxgb_open;
                netdev->stop = cxgb_close;
                netdev->hard_start_xmit = t1_start_xmit;
                netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
-                       sizeof(struct cpl_tx_pkt_lso) :
-                       sizeof(struct cpl_tx_pkt);
+                       sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
                netdev->get_stats = t1_get_stats;
                netdev->set_multicast_list = t1_set_rxmode;
                netdev->do_ioctl = t1_ioctl;
  #endif
                netdev->weight = 64;
  
-         SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+               SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
        }
  
        if (t1_init_sw_modules(adapter, bi) < 0) {
                        if (!adapter->registered_device_map)
                                adapter->name = adapter->port[i].dev->name;
  
-                 __set_bit(i, &adapter->registered_device_map);
+                       __set_bit(i, &adapter->registered_device_map);
                }
        }
        if (!adapter->registered_device_map) {
               bi->desc, adapter->params.chip_revision,
               adapter->params.pci.is_pcix ? "PCIX" : "PCI",
               adapter->params.pci.speed, adapter->params.pci.width);
+       /*
+        * Set the T1B ASIC and memory clocks.
+        */
+       if (t1powersave)
+               adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
+       else
+               adapter->t1powersave = HCLOCK;
+       if (t1_is_T1B(adapter))
+               t1_clock(adapter, t1powersave);
        return 0;
  
   out_release_adapter_res:
        t1_free_sw_modules(adapter);
   out_free_dev:
        if (adapter) {
-               if (adapter->regs) iounmap(adapter->regs);
+               if (adapter->regs)
+                       iounmap(adapter->regs);
                for (i = bi->port_number - 1; i >= 0; --i)
-                       if (adapter->port[i].dev) {
-                               cxgb_proc_cleanup(adapter, proc_root_driver);
-                               kfree(adapter->port[i].dev);
-                       }
+                       if (adapter->port[i].dev)
+                               free_netdev(adapter->port[i].dev);
        }
        pci_release_regions(pdev);
   out_disable_pdev:
        return err;
  }
  
+ static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
+ {
+       int data;
+       int i;
+       u32 val;
+       enum {
+               S_CLOCK = 1 << 3,
+               S_DATA = 1 << 4
+       };
+       for (i = (nbits - 1); i > -1; i--) {
+               udelay(50);
+               data = ((bitdata >> i) & 0x1);
+               __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+               if (data)
+                       val |= S_DATA;
+               else
+                       val &= ~S_DATA;
+               udelay(50);
+               /* Set SCLOCK low */
+               val &= ~S_CLOCK;
+               __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+               udelay(50);
+               /* Write SCLOCK high */
+               val |= S_CLOCK;
+               __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       }
+ }
+ static int t1_clock(struct adapter *adapter, int mode)
+ {
+       u32 val;
+       int M_CORE_VAL;
+       int M_MEM_VAL;
+       enum {
+               M_CORE_BITS = 9,
+               T_CORE_VAL = 0,
+               T_CORE_BITS = 2,
+               N_CORE_VAL = 0,
+               N_CORE_BITS = 2,
+               M_MEM_BITS = 9,
+               T_MEM_VAL = 0,
+               T_MEM_BITS = 2,
+               N_MEM_VAL = 0,
+               N_MEM_BITS = 2,
+               NP_LOAD = 1 << 17,
+               S_LOAD_MEM = 1 << 5,
+               S_LOAD_CORE = 1 << 6,
+               S_CLOCK = 1 << 3
+       };
+       if (!t1_is_T1B(adapter))
+               return -ENODEV; /* Can't re-clock this chip. */
+       if (mode & 2) {
+               return 0;       /* show current mode. */
+       }
+       if ((adapter->t1powersave & 1) == (mode & 1))
+               return -EALREADY;       /* ASIC already running in mode. */
+       if ((mode & 1) == HCLOCK) {
+               M_CORE_VAL = 0x14;
+               M_MEM_VAL = 0x18;
+               adapter->t1powersave = HCLOCK;  /* overclock */
+       } else {
+               M_CORE_VAL = 0xe;
+               M_MEM_VAL = 0x10;
+               adapter->t1powersave = LCLOCK;  /* underclock */
+       }
+       /* Don't interrupt this serial stream! */
+       spin_lock(&adapter->tpi_lock);
+       /* Initialize for ASIC core */
+       __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val |= NP_LOAD;
+       udelay(50);
+       __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       udelay(50);
+       __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val &= ~S_LOAD_CORE;
+       val &= ~S_CLOCK;
+       __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       udelay(50);
+       /* Serial program the ASIC clock synthesizer */
+       bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
+       bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
+       bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
+       udelay(50);
+       /* Finish ASIC core */
+       __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val |= S_LOAD_CORE;
+       udelay(50);
+       __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       udelay(50);
+       __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val &= ~S_LOAD_CORE;
+       udelay(50);
+       __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       udelay(50);
+       /* Initialize for memory */
+       __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val |= NP_LOAD;
+       udelay(50);
+       __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       udelay(50);
+       __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val &= ~S_LOAD_MEM;
+       val &= ~S_CLOCK;
+       udelay(50);
+       __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       udelay(50);
+       /* Serial program the memory clock synthesizer */
+       bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
+       bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
+       bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
+       udelay(50);
+       /* Finish memory */
+       __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val |= S_LOAD_MEM;
+       udelay(50);
+       __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       udelay(50);
+       __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+       val &= ~S_LOAD_MEM;
+       udelay(50);
+       __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+       spin_unlock(&adapter->tpi_lock);
+       return 0;
+ }
  static inline void t1_sw_reset(struct pci_dev *pdev)
  {
        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
@@@ -1225,10 -1405,9 +1407,9 @@@ static void __devexit remove_one(struc
                t1_free_sw_modules(adapter);
                iounmap(adapter->regs);
                while (--i >= 0)
-                       if (adapter->port[i].dev) {
-                               cxgb_proc_cleanup(adapter, proc_root_driver);
-                               kfree(adapter->port[i].dev);
-                       }
+                       if (adapter->port[i].dev)
+                               free_netdev(adapter->port[i].dev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
                pci_set_drvdata(pdev, NULL);
diff --combined drivers/net/e100.c
index e7737d02bb055b694a9cd3bd9da7dd3166f7a361,3a8df479cbdad660351a676b54912074fb597062..03bf164f9e8db32f3eb1fdafe1e3fffb0b3d26fb
@@@ -1215,7 -1215,7 +1215,7 @@@ static void e100_setup_ucode(struct ni
  *  the literal in the instruction before the code is loaded, the
  *  driver can change the algorithm.
  *
- *  INTDELAY - This loads the dead-man timer with its inital value.
+ *  INTDELAY - This loads the dead-man timer with its initial value.
  *    When this timer expires the interrupt is asserted, and the
  *    timer is reset each time a new packet is received.  (see
  *    BUNDLEMAX below to set the limit on number of chained packets)
@@@ -2102,10 -2102,9 +2102,10 @@@ static void e100_tx_timeout(struct net_
        schedule_work(&nic->tx_timeout_task);
  }
  
 -static void e100_tx_timeout_task(struct net_device *netdev)
 +static void e100_tx_timeout_task(struct work_struct *work)
  {
 -      struct nic *nic = netdev_priv(netdev);
 +      struct nic *nic = container_of(work, struct nic, tx_timeout_task);
 +      struct net_device *netdev = nic->netdev;
  
        DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
                readb(&nic->csr->scb.status));
@@@ -2638,7 -2637,8 +2638,7 @@@ static int __devinit e100_probe(struct 
        nic->blink_timer.function = e100_blink_led;
        nic->blink_timer.data = (unsigned long)nic;
  
 -      INIT_WORK(&nic->tx_timeout_task,
 -              (void (*)(void *))e100_tx_timeout_task, netdev);
 +      INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
  
        if((err = e100_alloc(nic))) {
                DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
index 03294400bc90a0fb2d77a5974d59df90da3b7c31,32dde0adb6839c107fad7ce9cbd32ec7686674e1..73f3a85fd2384f82b3f57ea83722e7adb9969de2
@@@ -27,6 -27,7 +27,7 @@@
  *******************************************************************************/
  
  #include "e1000.h"
+ #include <net/ip6_checksum.h>
  
  char e1000_driver_name[] = "e1000";
  static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@@ -35,7 -36,7 +36,7 @@@
  #else
  #define DRIVERNAPI "-NAPI"
  #endif
- #define DRV_VERSION "7.2.9-k4"DRIVERNAPI
+ #define DRV_VERSION "7.3.15-k2"DRIVERNAPI
  char e1000_driver_version[] = DRV_VERSION;
  static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  
@@@ -103,6 -104,9 +104,9 @@@ static struct pci_device_id e1000_pci_t
        INTEL_E1000_ETHERNET_DEVICE(0x10B9),
        INTEL_E1000_ETHERNET_DEVICE(0x10BA),
        INTEL_E1000_ETHERNET_DEVICE(0x10BB),
+       INTEL_E1000_ETHERNET_DEVICE(0x10BC),
+       INTEL_E1000_ETHERNET_DEVICE(0x10C4),
+       INTEL_E1000_ETHERNET_DEVICE(0x10C5),
        /* required last entry */
        {0,}
  };
@@@ -154,6 -158,9 +158,9 @@@ static struct net_device_stats * e1000_
  static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
  static int e1000_set_mac(struct net_device *netdev, void *p);
  static irqreturn_t e1000_intr(int irq, void *data);
+ #ifdef CONFIG_PCI_MSI
+ static irqreturn_t e1000_intr_msi(int irq, void *data);
+ #endif
  static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
                                      struct e1000_tx_ring *tx_ring);
  #ifdef CONFIG_E1000_NAPI
@@@ -183,7 -190,7 +190,7 @@@ void e1000_set_ethtool_ops(struct net_d
  static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
  static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
  static void e1000_tx_timeout(struct net_device *dev);
 -static void e1000_reset_task(struct net_device *dev);
 +static void e1000_reset_task(struct work_struct *work);
  static void e1000_smartspeed(struct e1000_adapter *adapter);
  static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                         struct sk_buff *skb);
@@@ -285,7 -292,7 +292,7 @@@ static int e1000_request_irq(struct e10
  
        flags = IRQF_SHARED;
  #ifdef CONFIG_PCI_MSI
-       if (adapter->hw.mac_type > e1000_82547_rev_2) {
+       if (adapter->hw.mac_type >= e1000_82571) {
                adapter->have_msi = TRUE;
                if ((err = pci_enable_msi(adapter->pdev))) {
                        DPRINTK(PROBE, ERR,
                        adapter->have_msi = FALSE;
                }
        }
-       if (adapter->have_msi)
+       if (adapter->have_msi) {
                flags &= ~IRQF_SHARED;
+               err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
+                                 netdev->name, netdev);
+               if (err)
+                       DPRINTK(PROBE, ERR,
+                              "Unable to allocate interrupt Error: %d\n", err);
+       } else
  #endif
        if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
                               netdev->name, netdev)))
@@@ -375,7 -388,7 +388,7 @@@ e1000_update_mng_vlan(struct e1000_adap
   * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
   * For ASF and Pass Through versions of f/w this means that the
   * driver is no longer loaded. For AMT version (only with 82573) i
-  * of the f/w this means that the netowrk i/f is closed.
+  * of the f/w this means that the network i/f is closed.
   *
   **/
  
@@@ -416,7 -429,7 +429,7 @@@ e1000_release_hw_control(struct e1000_a
   * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
   * For ASF and Pass Through versions of f/w this means that
   * the driver is loaded. For AMT version (only with 82573)
-  * of the f/w this means that the netowrk i/f is open.
+  * of the f/w this means that the network i/f is open.
   *
   **/
  
@@@ -426,6 -439,7 +439,7 @@@ e1000_get_hw_control(struct e1000_adapt
        uint32_t ctrl_ext;
        uint32_t swsm;
        uint32_t extcnf;
        /* Let firmware know the driver has taken over */
        switch (adapter->hw.mac_type) {
        case e1000_82571:
@@@ -601,9 -615,6 +615,6 @@@ voi
  e1000_reset(struct e1000_adapter *adapter)
  {
        uint32_t pba, manc;
- #ifdef DISABLE_MULR
-       uint32_t tctl;
- #endif
        uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
  
        /* Repartition Pba for greater than 9k mtu
        e1000_reset_hw(&adapter->hw);
        if (adapter->hw.mac_type >= e1000_82544)
                E1000_WRITE_REG(&adapter->hw, WUC, 0);
- #ifdef DISABLE_MULR
-       /* disable Multiple Reads in Transmit Control Register for debugging */
-       tctl = E1000_READ_REG(hw, TCTL);
-       E1000_WRITE_REG(hw, TCTL, tctl & ~E1000_TCTL_MULR);
  
- #endif
        if (e1000_init_hw(&adapter->hw))
                DPRINTK(PROBE, ERR, "Hardware Error\n");
        e1000_update_mng_vlan(adapter);
@@@ -851,9 -857,9 +857,9 @@@ e1000_probe(struct pci_dev *pdev
           (adapter->hw.mac_type != e1000_82547))
                netdev->features |= NETIF_F_TSO;
  
- #ifdef NETIF_F_TSO_IPV6
+ #ifdef NETIF_F_TSO6
        if (adapter->hw.mac_type > e1000_82547_rev_2)
-               netdev->features |= NETIF_F_TSO_IPV6;
+               netdev->features |= NETIF_F_TSO6;
  #endif
  #endif
        if (pci_using_dac)
        adapter->phy_info_timer.function = &e1000_update_phy_info;
        adapter->phy_info_timer.data = (unsigned long) adapter;
  
 -      INIT_WORK(&adapter->reset_task,
 -              (void (*)(void *))e1000_reset_task, netdev);
 +      INIT_WORK(&adapter->reset_task, e1000_reset_task);
  
        e1000_check_options(adapter);
  
                break;
        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
        case E1000_DEV_ID_82571EB_QUAD_COPPER:
+       case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
                /* if quad port adapter, disable WoL on all but port A */
                if (global_quad_port_a != 0)
                        adapter->eeprom_wol = 0;
@@@ -1278,12 -1286,10 +1285,10 @@@ e1000_open(struct net_device *netdev
                return -EBUSY;
  
        /* allocate transmit descriptors */
        if ((err = e1000_setup_all_tx_resources(adapter)))
                goto err_setup_tx;
  
        /* allocate receive descriptors */
        if ((err = e1000_setup_all_rx_resources(adapter)))
                goto err_setup_rx;
  
@@@ -1568,6 -1574,8 +1573,8 @@@ e1000_configure_tx(struct e1000_adapte
  
        if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
                tarc = E1000_READ_REG(hw, TARC0);
+               /* set the speed mode bit, we'll clear it if we're not at
+                * gigabit link later */
                tarc |= (1 << 21);
                E1000_WRITE_REG(hw, TARC0, tarc);
        } else if (hw->mac_type == e1000_80003es2lan) {
        e1000_config_collision_dist(hw);
  
        /* Setup Transmit Descriptor Settings for eop descriptor */
-       adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
-               E1000_TXD_CMD_IFCS;
+       adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+       /* only set IDE if we are delaying interrupts using the timers */
+       if (adapter->tx_int_delay)
+               adapter->txd_cmd |= E1000_TXD_CMD_IDE;
  
        if (hw->mac_type < e1000_82543)
                adapter->txd_cmd |= E1000_TXD_CMD_RPS;
@@@ -1820,8 -1831,11 +1830,11 @@@ e1000_setup_rctl(struct e1000_adapter *
                /* Configure extra packet-split registers */
                rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
                rfctl |= E1000_RFCTL_EXTEN;
-               /* disable IPv6 packet split support */
-               rfctl |= E1000_RFCTL_IPV6_DIS;
+               /* disable packet split support for IPv6 extension headers,
+                * because some malformed IPv6 headers can hang the RX */
+               rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+                         E1000_RFCTL_NEW_IPV6_EXT_DIS);
                E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
  
                rctl |= E1000_RCTL_DTYP_PS;
@@@ -1884,7 -1898,7 +1897,7 @@@ e1000_configure_rx(struct e1000_adapte
  
        if (hw->mac_type >= e1000_82540) {
                E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
-               if (adapter->itr > 1)
+               if (adapter->itr_setting != 0)
                        E1000_WRITE_REG(hw, ITR,
                                1000000000 / (adapter->itr * 256));
        }
                /* Reset delay timers after every interrupt */
                ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
  #ifdef CONFIG_E1000_NAPI
-               /* Auto-Mask interrupts upon ICR read. */
+               /* Auto-Mask interrupts upon ICR access */
                ctrl_ext |= E1000_CTRL_EXT_IAME;
+               E1000_WRITE_REG(hw, IAM, 0xffffffff);
  #endif
                E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
-               E1000_WRITE_REG(hw, IAM, ~0);
                E1000_WRITE_FLUSH(hw);
        }
  
                E1000_WRITE_REG(hw, RXCSUM, rxcsum);
        }
  
+       /* enable early receives on 82573, only takes effect if using > 2048
+        * byte total frame size.  for example only for jumbo frames */
+ #define E1000_ERT_2048 0x100
+       if (hw->mac_type == e1000_82573)
+               E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
        /* Enable Receives */
        E1000_WRITE_REG(hw, RCTL, rctl);
  }
@@@ -1990,10 -2010,13 +2009,13 @@@ e1000_unmap_and_free_tx_resource(struc
                                buffer_info->dma,
                                buffer_info->length,
                                PCI_DMA_TODEVICE);
+               buffer_info->dma = 0;
        }
-       if (buffer_info->skb)
+       if (buffer_info->skb) {
                dev_kfree_skb_any(buffer_info->skb);
-       memset(buffer_info, 0, sizeof(struct e1000_buffer));
+               buffer_info->skb = NULL;
+       }
+       /* buffer_info must be completely set up in the transmit path */
  }
  
  /**
@@@ -2417,6 -2440,7 +2439,7 @@@ e1000_watchdog(unsigned long data
                DPRINTK(LINK, INFO,
                        "Gigabit has been disabled, downgrading speed\n");
        }
        if (adapter->hw.mac_type == e1000_82573) {
                e1000_enable_tx_pkt_filtering(&adapter->hw);
                if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
                        if ((adapter->hw.mac_type == e1000_82571 ||
                             adapter->hw.mac_type == e1000_82572) &&
                            txb2b == 0) {
- #define SPEED_MODE_BIT (1 << 21)
                                uint32_t tarc0;
                                tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
-                               tarc0 &= ~SPEED_MODE_BIT;
+                               tarc0 &= ~(1 << 21);
                                E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
                        }
-                               
  #ifdef NETIF_F_TSO
                        /* disable TSO for pcie and 10/100 speeds, to avoid
                         * some hardware issues */
                                        DPRINTK(PROBE,INFO,
                                        "10/100 speed: disabling TSO\n");
                                        netdev->features &= ~NETIF_F_TSO;
+ #ifdef NETIF_F_TSO6
+                                       netdev->features &= ~NETIF_F_TSO6;
+ #endif
                                        break;
                                case SPEED_1000:
                                        netdev->features |= NETIF_F_TSO;
+ #ifdef NETIF_F_TSO6
+                                       netdev->features |= NETIF_F_TSO6;
+ #endif
                                        break;
                                default:
                                        /* oops */
                }
        }
  
-       /* Dynamic mode for Interrupt Throttle Rate (ITR) */
-       if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
-               /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
-                * asymmetrical Tx or Rx gets ITR=8000; everyone
-                * else is between 2000-8000. */
-               uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
-               uint32_t dif = (adapter->gotcl > adapter->gorcl ?
-                       adapter->gotcl - adapter->gorcl :
-                       adapter->gorcl - adapter->gotcl) / 10000;
-               uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
-               E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
-       }
        /* Cause software interrupt to ensure rx ring is cleaned */
        E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
  
        mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
  }
  
+ enum latency_range {
+       lowest_latency = 0,
+       low_latency = 1,
+       bulk_latency = 2,
+       latency_invalid = 255
+ };
+ /**
+  * e1000_update_itr - update the dynamic ITR value based on statistics
+  *      Stores a new ITR value based on packets and byte
+  *      counts during the last interrupt.  The advantage of per interrupt
+  *      computation is faster updates and more accurate ITR for the current
+  *      traffic pattern.  Constants in this function were computed
+  *      based on theoretical maximum wire speed and thresholds were set based
+  *      on testing data as well as attempting to minimize response time
+  *      while increasing bulk throughput.
+  *      this functionality is controlled by the InterruptThrottleRate module
+  *      parameter (see e1000_param.c)
+  * @adapter: pointer to adapter
+  * @itr_setting: current adapter->itr
+  * @packets: the number of packets during this measurement interval
+  * @bytes: the number of bytes during this measurement interval
+  **/
+ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+                                    uint16_t itr_setting,
+                                    int packets,
+                                    int bytes)
+ {
+       unsigned int retval = itr_setting;
+       struct e1000_hw *hw = &adapter->hw;
+       if (unlikely(hw->mac_type < e1000_82540))
+               goto update_itr_done;
+       if (packets == 0)
+               goto update_itr_done;
+       switch (itr_setting) {
+       case lowest_latency:
+               if ((packets < 5) && (bytes > 512))
+                       retval = low_latency;
+               break;
+       case low_latency:  /* 50 usec aka 20000 ints/s */
+               if (bytes > 10000) {
+                       if ((packets < 10) ||
+                            ((bytes/packets) > 1200))
+                               retval = bulk_latency;
+                       else if ((packets > 35))
+                               retval = lowest_latency;
+               } else if (packets <= 2 && bytes < 512)
+                       retval = lowest_latency;
+               break;
+       case bulk_latency: /* 250 usec aka 4000 ints/s */
+               if (bytes > 25000) {
+                       if (packets > 35)
+                               retval = low_latency;
+               } else {
+                       if (bytes < 6000)
+                               retval = low_latency;
+               }
+               break;
+       }
+ update_itr_done:
+       return retval;
+ }
+ static void e1000_set_itr(struct e1000_adapter *adapter)
+ {
+       struct e1000_hw *hw = &adapter->hw;
+       uint16_t current_itr;
+       uint32_t new_itr = adapter->itr;
+       if (unlikely(hw->mac_type < e1000_82540))
+               return;
+       /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+       if (unlikely(adapter->link_speed != SPEED_1000)) {
+               current_itr = 0;
+               new_itr = 4000;
+               goto set_itr_now;
+       }
+       adapter->tx_itr = e1000_update_itr(adapter,
+                                   adapter->tx_itr,
+                                   adapter->total_tx_packets,
+                                   adapter->total_tx_bytes);
+       adapter->rx_itr = e1000_update_itr(adapter,
+                                   adapter->rx_itr,
+                                   adapter->total_rx_packets,
+                                   adapter->total_rx_bytes);
+       current_itr = max(adapter->rx_itr, adapter->tx_itr);
+       /* conservative mode eliminates the lowest_latency setting */
+       if (current_itr == lowest_latency && (adapter->itr_setting == 3))
+               current_itr = low_latency;
+       switch (current_itr) {
+       /* counts and packets in update_itr are dependent on these numbers */
+       case lowest_latency:
+               new_itr = 70000;
+               break;
+       case low_latency:
+               new_itr = 20000; /* aka hwitr = ~200 */
+               break;
+       case bulk_latency:
+               new_itr = 4000;
+               break;
+       default:
+               break;
+       }
+ set_itr_now:
+       if (new_itr != adapter->itr) {
+               /* this attempts to bias the interrupt rate towards Bulk
+                * by adding intermediate steps when interrupt rate is
+                * increasing */
+               new_itr = new_itr > adapter->itr ?
+                            min(adapter->itr + (new_itr >> 2), new_itr) :
+                            new_itr;
+               adapter->itr = new_itr;
+               E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
+       }
+       return;
+ }
  #define E1000_TX_FLAGS_CSUM           0x00000001
  #define E1000_TX_FLAGS_VLAN           0x00000002
  #define E1000_TX_FLAGS_TSO            0x00000004
@@@ -2616,7 -2761,7 +2760,7 @@@ e1000_tso(struct e1000_adapter *adapter
                                                   0);
                        cmd_length = E1000_TXD_CMD_IP;
                        ipcse = skb->h.raw - skb->data - 1;
- #ifdef NETIF_F_TSO_IPV6
+ #ifdef NETIF_F_TSO6
                } else if (skb->protocol == htons(ETH_P_IPV6)) {
                        skb->nh.ipv6h->payload_len = 0;
                        skb->h.th->check =
                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
  
                buffer_info->time_stamp = jiffies;
+               buffer_info->next_to_watch = i;
  
                if (++i == tx_ring->count) i = 0;
                tx_ring->next_to_use = i;
@@@ -2680,12 -2826,13 +2825,13 @@@ e1000_tx_csum(struct e1000_adapter *ada
                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
  
                context_desc->upper_setup.tcp_fields.tucss = css;
-               context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
+               context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
                context_desc->upper_setup.tcp_fields.tucse = 0;
                context_desc->tcp_seg_setup.data = 0;
                context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
  
                buffer_info->time_stamp = jiffies;
+               buffer_info->next_to_watch = i;
  
                if (unlikely(++i == tx_ring->count)) i = 0;
                tx_ring->next_to_use = i;
@@@ -2754,6 -2901,7 +2900,7 @@@ e1000_tx_map(struct e1000_adapter *adap
                                size,
                                PCI_DMA_TODEVICE);
                buffer_info->time_stamp = jiffies;
+               buffer_info->next_to_watch = i;
  
                len -= size;
                offset += size;
                                        size,
                                        PCI_DMA_TODEVICE);
                        buffer_info->time_stamp = jiffies;
+                       buffer_info->next_to_watch = i;
  
                        len -= size;
                        offset += size;
@@@ -2858,6 -3007,9 +3006,9 @@@ e1000_tx_queue(struct e1000_adapter *ad
  
        tx_ring->next_to_use = i;
        writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+       /* we need this if more than one processor can write to our tail
+        * at a time, it syncronizes IO on IA64/Altix systems */
+       mmiowb();
  }
  
  /**
@@@ -2951,6 -3103,7 +3102,7 @@@ static int __e1000_maybe_stop_tx(struc
  
        /* A reprieve! */
        netif_start_queue(netdev);
+       ++adapter->restart_queue;
        return 0;
  }
  
@@@ -3009,9 -3162,9 +3161,9 @@@ e1000_xmit_frame(struct sk_buff *skb, s
                max_per_txd = min(mss << 2, max_per_txd);
                max_txd_pwr = fls(max_per_txd) - 1;
  
-       /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
-        * points to just header, pull a few bytes of payload from
-        * frags into skb->data */
+               /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+               * points to just header, pull a few bytes of payload from
+               * frags into skb->data */
                hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
                if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
                        switch (adapter->hw.mac_type) {
@@@ -3153,10 -3306,9 +3305,10 @@@ e1000_tx_timeout(struct net_device *net
  }
  
  static void
 -e1000_reset_task(struct net_device *netdev)
 +e1000_reset_task(struct work_struct *work)
  {
 -      struct e1000_adapter *adapter = netdev_priv(netdev);
 +      struct e1000_adapter *adapter =
 +              container_of(work, struct e1000_adapter, reset_task);
  
        e1000_reinit_locked(adapter);
  }
@@@ -3316,12 -3468,12 +3468,12 @@@ e1000_update_stats(struct e1000_adapte
        adapter->stats.roc += E1000_READ_REG(hw, ROC);
  
        if (adapter->hw.mac_type != e1000_ich8lan) {
-       adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
-       adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
-       adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
-       adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
-       adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
-       adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+               adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
+               adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
+               adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
+               adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
+               adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
+               adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
        }
  
        adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
        adapter->stats.tpr += E1000_READ_REG(hw, TPR);
  
        if (adapter->hw.mac_type != e1000_ich8lan) {
-       adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
-       adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
-       adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
-       adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
-       adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
-       adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+               adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
+               adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
+               adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
+               adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
+               adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
+               adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
        }
  
        adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
                adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
  
                if (adapter->hw.mac_type != e1000_ich8lan) {
-               adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
-               adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
-               adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
-               adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
-               adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
-               adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
-               adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+                       adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
+                       adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
+                       adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
+                       adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
+                       adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
+                       adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
+                       adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
                }
        }
  
        /* Fill out the OS statistics structure */
        adapter->net_stats.rx_packets = adapter->stats.gprc;
        adapter->net_stats.tx_packets = adapter->stats.gptc;
        adapter->net_stats.rx_bytes = adapter->stats.gorcl;
        /* Tx Dropped needs to be maintained elsewhere */
  
        /* Phy Stats */
        if (hw->media_type == e1000_media_type_copper) {
                if ((adapter->link_speed == SPEED_1000) &&
                   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
  
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
  }
+ #ifdef CONFIG_PCI_MSI
+ /**
+  * e1000_intr_msi - Interrupt Handler
+  * @irq: interrupt number
+  * @data: pointer to a network interface device structure
+  **/
+ static
+ irqreturn_t e1000_intr_msi(int irq, void *data)
+ {
+       struct net_device *netdev = data;
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+ #ifndef CONFIG_E1000_NAPI
+       int i;
+ #endif
+       /* this code avoids the read of ICR but has to get 1000 interrupts
+        * at every link change event before it will notice the change */
+       if (++adapter->detect_link >= 1000) {
+               uint32_t icr = E1000_READ_REG(hw, ICR);
+ #ifdef CONFIG_E1000_NAPI
+               /* read ICR disables interrupts using IAM, so keep up with our
+                * enable/disable accounting */
+               atomic_inc(&adapter->irq_sem);
+ #endif
+               adapter->detect_link = 0;
+               if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
+                   (icr & E1000_ICR_INT_ASSERTED)) {
+                       hw->get_link_status = 1;
+                       /* 80003ES2LAN workaround--
+                       * For packet buffer work-around on link down event;
+                       * disable receives here in the ISR and
+                       * reset adapter in watchdog
+                       */
+                       if (netif_carrier_ok(netdev) &&
+                           (adapter->hw.mac_type == e1000_80003es2lan)) {
+                               /* disable receives */
+                               uint32_t rctl = E1000_READ_REG(hw, RCTL);
+                               E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+                       }
+                       /* guard against interrupt when we're going down */
+                       if (!test_bit(__E1000_DOWN, &adapter->flags))
+                               mod_timer(&adapter->watchdog_timer,
+                                         jiffies + 1);
+               }
+       } else {
+               E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
+                                                        E1000_ICR_LSC)));
+               /* bummer we have to flush here, but things break otherwise as
+                * some event appears to be lost or delayed and throughput
+                * drops.  In almost all tests this flush is un-necessary */
+               E1000_WRITE_FLUSH(hw);
+ #ifdef CONFIG_E1000_NAPI
+               /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
+                * masked.  No need for the IMC write, but it does mean we
+                * should account for it ASAP. */
+               atomic_inc(&adapter->irq_sem);
+ #endif
+       }
+ #ifdef CONFIG_E1000_NAPI
+       if (likely(netif_rx_schedule_prep(netdev))) {
+               adapter->total_tx_bytes = 0;
+               adapter->total_tx_packets = 0;
+               adapter->total_rx_bytes = 0;
+               adapter->total_rx_packets = 0;
+               __netif_rx_schedule(netdev);
+       } else
+               e1000_irq_enable(adapter);
+ #else
+       adapter->total_tx_bytes = 0;
+       adapter->total_rx_bytes = 0;
+       adapter->total_tx_packets = 0;
+       adapter->total_rx_packets = 0;
+       for (i = 0; i < E1000_MAX_INTR; i++)
+               if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+                  !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+                       break;
+       if (likely(adapter->itr_setting & 3))
+               e1000_set_itr(adapter);
+ #endif
+       return IRQ_HANDLED;
+ }
+ #endif
  
  /**
   * e1000_intr - Interrupt Handler
@@@ -3458,7 -3697,17 +3697,17 @@@ e1000_intr(int irq, void *data
        uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
  #ifndef CONFIG_E1000_NAPI
        int i;
- #else
+ #endif
+       if (unlikely(!icr))
+               return IRQ_NONE;  /* Not our interrupt */
+ #ifdef CONFIG_E1000_NAPI
+       /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+        * not set, then the adapter didn't send an interrupt */
+       if (unlikely(hw->mac_type >= e1000_82571 &&
+                    !(icr & E1000_ICR_INT_ASSERTED)))
+               return IRQ_NONE;
        /* Interrupt Auto-Mask...upon reading ICR,
         * interrupts are masked.  No need for the
         * IMC write, but it does mean we should
                atomic_inc(&adapter->irq_sem);
  #endif
  
-       if (unlikely(!icr)) {
- #ifdef CONFIG_E1000_NAPI
-               if (hw->mac_type >= e1000_82571)
-                       e1000_irq_enable(adapter);
- #endif
-               return IRQ_NONE;  /* Not our interrupt */
-       }
        if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
                hw->get_link_status = 1;
                /* 80003ES2LAN workaround--
  
  #ifdef CONFIG_E1000_NAPI
        if (unlikely(hw->mac_type < e1000_82571)) {
+               /* disable interrupts, without the synchronize_irq bit */
                atomic_inc(&adapter->irq_sem);
                E1000_WRITE_REG(hw, IMC, ~0);
                E1000_WRITE_FLUSH(hw);
        }
-       if (likely(netif_rx_schedule_prep(netdev)))
+       if (likely(netif_rx_schedule_prep(netdev))) {
+               adapter->total_tx_bytes = 0;
+               adapter->total_tx_packets = 0;
+               adapter->total_rx_bytes = 0;
+               adapter->total_rx_packets = 0;
                __netif_rx_schedule(netdev);
-       else
+       } else
+               /* this really should not happen! if it does it is basically a
+                * bug, but not a hard error, so enable ints and continue */
                e1000_irq_enable(adapter);
  #else
        /* Writing IMC and IMS is needed for 82547.
                E1000_WRITE_REG(hw, IMC, ~0);
        }
  
+       adapter->total_tx_bytes = 0;
+       adapter->total_rx_bytes = 0;
+       adapter->total_tx_packets = 0;
+       adapter->total_rx_packets = 0;
        for (i = 0; i < E1000_MAX_INTR; i++)
                if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
                   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
                        break;
  
+       if (likely(adapter->itr_setting & 3))
+               e1000_set_itr(adapter);
        if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
                e1000_irq_enable(adapter);
  
  #endif
        return IRQ_HANDLED;
  }
  
@@@ -3572,6 -3827,8 +3827,8 @@@ e1000_clean(struct net_device *poll_dev
        if ((!tx_cleaned && (work_done == 0)) ||
           !netif_running(poll_dev)) {
  quit_polling:
+               if (likely(adapter->itr_setting & 3))
+                       e1000_set_itr(adapter);
                netif_rx_complete(poll_dev);
                e1000_irq_enable(adapter);
                return 0;
@@@ -3598,6 -3855,7 +3855,7 @@@ e1000_clean_tx_irq(struct e1000_adapte
        unsigned int count = 0;
  #endif
        boolean_t cleaned = FALSE;
+       unsigned int total_tx_bytes=0, total_tx_packets=0;
  
        i = tx_ring->next_to_clean;
        eop = tx_ring->buffer_info[i].next_to_watch;
                        buffer_info = &tx_ring->buffer_info[i];
                        cleaned = (i == eop);
  
+                       if (cleaned) {
+                               /* this packet count is wrong for TSO but has a
+                                * tendency to make dynamic ITR change more
+                                * towards bulk */
+                               total_tx_packets++;
+                               total_tx_bytes += buffer_info->skb->len;
+                       }
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
-                       memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
+                       tx_desc->upper.data = 0;
  
                        if (unlikely(++i == tx_ring->count)) i = 0;
                }
  
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
  #ifdef CONFIG_E1000_NAPI
                 * sees the new next_to_clean.
                 */
                smp_mb();
-               if (netif_queue_stopped(netdev))
+               if (netif_queue_stopped(netdev)) {
                        netif_wake_queue(netdev);
+                       ++adapter->restart_queue;
+               }
        }
  
        if (adapter->detect_tx_hung) {
                        netif_stop_queue(netdev);
                }
        }
+       adapter->total_tx_bytes += total_tx_bytes;
+       adapter->total_tx_packets += total_tx_packets;
        return cleaned;
  }
  
@@@ -3752,6 -4020,7 +4020,7 @@@ e1000_clean_rx_irq(struct e1000_adapte
        unsigned int i;
        int cleaned_count = 0;
        boolean_t cleaned = FALSE;
+       unsigned int total_rx_bytes=0, total_rx_packets=0;
  
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC(*rx_ring, i);
        while (rx_desc->status & E1000_RXD_STAT_DD) {
                struct sk_buff *skb;
                u8 status;
  #ifdef CONFIG_E1000_NAPI
                if (*work_done >= work_to_do)
                        break;
                 * done after the TBI_ACCEPT workaround above */
                length -= 4;
  
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += length;
+               total_rx_packets++;
                /* code added for copybreak, this should improve
                 * performance for small packets with large amounts
                 * of reassembly being done in the stack */
                                /* save the skb in buffer_info as good */
                                buffer_info->skb = skb;
                                skb = new_skb;
-                               skb_put(skb, length);
                        }
-               } else
-                       skb_put(skb, length);
+                       /* else just continue with the old one */
+               }
                /* end copybreak code */
+               skb_put(skb, length);
  
                /* Receive Checksum Offload */
                e1000_rx_checksum(adapter,
@@@ -3886,6 -4159,8 +4159,8 @@@ next_desc
        if (cleaned_count)
                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  
+       adapter->total_rx_packets += total_rx_packets;
+       adapter->total_rx_bytes += total_rx_bytes;
        return cleaned;
  }
  
@@@ -3915,6 -4190,7 +4190,7 @@@ e1000_clean_rx_irq_ps(struct e1000_adap
        uint32_t length, staterr;
        int cleaned_count = 0;
        boolean_t cleaned = FALSE;
+       unsigned int total_rx_bytes=0, total_rx_packets=0;
  
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
                        goto copydone;
                } /* if */
                }
-               
                for (j = 0; j < adapter->rx_ps_pages; j++) {
                        if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
                                break;
                pskb_trim(skb, skb->len - 4);
  
  copydone:
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
                e1000_rx_checksum(adapter, staterr,
                                  le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
                skb->protocol = eth_type_trans(skb, netdev);
@@@ -4067,6 -4346,8 +4346,8 @@@ next_desc
        if (cleaned_count)
                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
  
+       adapter->total_rx_packets += total_rx_packets;
+       adapter->total_rx_bytes += total_rx_bytes;
        return cleaned;
  }
  
@@@ -4234,7 -4515,7 +4515,7 @@@ e1000_alloc_rx_buffers_ps(struct e1000_
                }
  
                skb = netdev_alloc_skb(netdev,
-                                      adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+                                      adapter->rx_ps_bsize0 + NET_IP_ALIGN);
  
                if (unlikely(!skb)) {
                        adapter->alloc_rx_buff_failed++;
@@@ -4511,7 -4792,6 +4792,6 @@@ e1000_read_pcie_cap_reg(struct e1000_h
      return E1000_SUCCESS;
  }
  
  void
  e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
  {
@@@ -4534,12 -4814,12 +4814,12 @@@ e1000_vlan_rx_register(struct net_devic
                E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  
                if (adapter->hw.mac_type != e1000_ich8lan) {
-               /* enable VLAN receive filtering */
-               rctl = E1000_READ_REG(&adapter->hw, RCTL);
-               rctl |= E1000_RCTL_VFE;
-               rctl &= ~E1000_RCTL_CFIEN;
-               E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
-               e1000_update_mng_vlan(adapter);
+                       /* enable VLAN receive filtering */
+                       rctl = E1000_READ_REG(&adapter->hw, RCTL);
+                       rctl |= E1000_RCTL_VFE;
+                       rctl &= ~E1000_RCTL_CFIEN;
+                       E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+                       e1000_update_mng_vlan(adapter);
                }
        } else {
                /* disable VLAN tag insert/strip */
                E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
  
                if (adapter->hw.mac_type != e1000_ich8lan) {
-               /* disable VLAN filtering */
-               rctl = E1000_READ_REG(&adapter->hw, RCTL);
-               rctl &= ~E1000_RCTL_VFE;
-               E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
-               if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
-                       e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-                       adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
-               }
+                       /* disable VLAN filtering */
+                       rctl = E1000_READ_REG(&adapter->hw, RCTL);
+                       rctl &= ~E1000_RCTL_VFE;
+                       E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+                       if (adapter->mng_vlan_id !=
+                           (uint16_t)E1000_MNG_VLAN_NONE) {
+                               e1000_vlan_rx_kill_vid(netdev,
+                                                      adapter->mng_vlan_id);
+                               adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+                       }
                }
        }
  
index d1ebb91ed2789fa59ed0dc5757dc91da48a05ac8,7b127212e62b579107881901e5d4e00e07d13464..e628126c9c49178211032801e1da1a17a64af338
@@@ -106,7 -106,7 +106,7 @@@ static boolean_t ixgb_clean_rx_irq(stru
  static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
  void ixgb_set_ethtool_ops(struct net_device *netdev);
  static void ixgb_tx_timeout(struct net_device *dev);
 -static void ixgb_tx_timeout_task(struct net_device *dev);
 +static void ixgb_tx_timeout_task(struct work_struct *work);
  static void ixgb_vlan_rx_register(struct net_device *netdev,
                                  struct vlan_group *grp);
  static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@@ -489,7 -489,8 +489,7 @@@ ixgb_probe(struct pci_dev *pdev
        adapter->watchdog_timer.function = &ixgb_watchdog;
        adapter->watchdog_timer.data = (unsigned long)adapter;
  
 -      INIT_WORK(&adapter->tx_timeout_task,
 -                (void (*)(void *))ixgb_tx_timeout_task, netdev);
 +      INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
  
        strcpy(netdev->name, "eth%d");
        if((err = register_netdev(netdev)))
@@@ -1248,7 -1249,7 +1248,7 @@@ ixgb_tx_csum(struct ixgb_adapter *adapt
        if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                struct ixgb_buffer *buffer_info;
                css = skb->h.raw - skb->data;
-               cso = (skb->h.raw + skb->csum) - skb->data;
+               cso = css + skb->csum_offset;
  
                i = adapter->tx_ring.next_to_use;
                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
@@@ -1492,10 -1493,9 +1492,10 @@@ ixgb_tx_timeout(struct net_device *netd
  }
  
  static void
 -ixgb_tx_timeout_task(struct net_device *netdev)
 +ixgb_tx_timeout_task(struct work_struct *work)
  {
 -      struct ixgb_adapter *adapter = netdev_priv(netdev);
 +      struct ixgb_adapter *adapter =
 +              container_of(work, struct ixgb_adapter, tx_timeout_task);
  
        adapter->tx_timeout_count++;
        ixgb_down(adapter, TRUE);
index 98703e086ee71bd55581008f110e7c3cb462df2f,36350e6db1c1bec6a6aac55eb3c578a1846b659e..38df428023863f899e6680a9ae5972ba51b7d999
@@@ -1955,7 -1955,7 +1955,7 @@@ again
        flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                cksum_offset = (skb->h.raw - skb->data);
-               pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
+               pseudo_hdr_offset = cksum_offset + skb->csum_offset;
                /* If the headers are excessively large, then we must
                 * fall back to a software checksum */
                if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
@@@ -2615,10 -2615,9 +2615,10 @@@ static u32 myri10ge_read_reboot(struct 
   * This watchdog is used to check whether the board has suffered
   * from a parity error and needs to be recovered.
   */
 -static void myri10ge_watchdog(void *arg)
 +static void myri10ge_watchdog(struct work_struct *work)
  {
 -      struct myri10ge_priv *mgp = arg;
 +      struct myri10ge_priv *mgp =
 +              container_of(work, struct myri10ge_priv, watchdog_work);
        u32 reboot;
        int status;
        u16 cmd, vendor;
@@@ -2888,7 -2887,7 +2888,7 @@@ static int myri10ge_probe(struct pci_de
                    (unsigned long)mgp);
  
        SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
 -      INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
 +      INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
        status = register_netdev(netdev);
        if (status != 0) {
                dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
diff --combined drivers/net/phy/phy.c
index a443976d5dcf9d7a2d15e259d79bdfd2daa669ab,88237bdb525503d7f9fa9d53462ec6cc6ab75bd7..4044bb1ada8655ebd3a7daa9af2395068efeb34d
@@@ -7,6 -7,7 +7,7 @@@
   * Author: Andy Fleming
   *
   * Copyright (c) 2004 Freescale Semiconductor, Inc.
+  * Copyright (c) 2006  Maciej W. Rozycki
   *
   * This program is free software; you can redistribute  it and/or modify it
   * under  the terms of  the GNU General  Public License as published by the
@@@ -32,6 -33,8 +33,8 @@@
  #include <linux/mii.h>
  #include <linux/ethtool.h>
  #include <linux/phy.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
  
  #include <asm/io.h>
  #include <asm/irq.h>
@@@ -394,7 -397,7 +397,7 @@@ out_unlock
  EXPORT_SYMBOL(phy_start_aneg);
  
  
 -static void phy_change(void *data);
 +static void phy_change(struct work_struct *work);
  static void phy_timer(unsigned long data);
  
  /* phy_start_machine:
@@@ -484,6 -487,9 +487,9 @@@ static irqreturn_t phy_interrupt(int ir
  {
        struct phy_device *phydev = phy_dat;
  
+       if (PHY_HALTED == phydev->state)
+               return IRQ_NONE;                /* It can't be ours.  */
        /* The MDIO bus is not allowed to be written in interrupt
         * context, so we need to disable the irq here.  A work
         * queue will write the PHY to disable and clear the
@@@ -549,7 -555,7 +555,7 @@@ int phy_start_interrupts(struct phy_dev
  {
        int err = 0;
  
 -      INIT_WORK(&phydev->phy_queue, phy_change, phydev);
 +      INIT_WORK(&phydev->phy_queue, phy_change);
  
        if (request_irq(phydev->irq, phy_interrupt,
                                IRQF_SHARED,
@@@ -577,6 -583,13 +583,13 @@@ int phy_stop_interrupts(struct phy_devi
        if (err)
                phy_error(phydev);
  
+       /*
+        * Finish any pending work; we might have been scheduled
+        * to be called from keventd ourselves, though.
+        */
+       if (!current_is_keventd())
+               flush_scheduled_work();
        free_irq(phydev->irq, phydev);
  
        return err;
@@@ -585,11 -598,10 +598,11 @@@ EXPORT_SYMBOL(phy_stop_interrupts)
  
  
  /* Scheduled by the phy_interrupt/timer to handle PHY changes */
 -static void phy_change(void *data)
 +static void phy_change(struct work_struct *work)
  {
        int err;
 -      struct phy_device *phydev = data;
 +      struct phy_device *phydev =
 +              container_of(work, struct phy_device, phy_queue);
  
        err = phy_disable_interrupts(phydev);
  
        enable_irq(phydev->irq);
  
        /* Reenable interrupts */
-       err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+       if (PHY_HALTED != phydev->state)
+               err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  
        if (err)
                goto irq_enable_err;
@@@ -625,18 -638,24 +639,24 @@@ void phy_stop(struct phy_device *phydev
        if (PHY_HALTED == phydev->state)
                goto out_unlock;
  
-       if (phydev->irq != PHY_POLL) {
-               /* Clear any pending interrupts */
-               phy_clear_interrupt(phydev);
+       phydev->state = PHY_HALTED;
  
+       if (phydev->irq != PHY_POLL) {
                /* Disable PHY Interrupts */
                phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
-       }
  
-       phydev->state = PHY_HALTED;
+               /* Clear any pending interrupts */
+               phy_clear_interrupt(phydev);
+       }
  
  out_unlock:
        spin_unlock(&phydev->lock);
+       /*
+        * Cannot call flush_scheduled_work() here as desired because
+        * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
+        * will not reenable interrupts.
+        */
  }
  
  
@@@ -694,60 -713,57 +714,57 @@@ static void phy_timer(unsigned long dat
  
                        break;
                case PHY_AN:
+                       err = phy_read_status(phydev);
+                       if (err < 0)
+                               break;
+                       /* If the link is down, give up on
+                        * negotiation for now */
+                       if (!phydev->link) {
+                               phydev->state = PHY_NOLINK;
+                               netif_carrier_off(phydev->attached_dev);
+                               phydev->adjust_link(phydev->attached_dev);
+                               break;
+                       }
                        /* Check if negotiation is done.  Break
                         * if there's an error */
                        err = phy_aneg_done(phydev);
                        if (err < 0)
                                break;
  
-                       /* If auto-negotiation is done, we change to
-                        * either RUNNING, or NOLINK */
+                       /* If AN is done, we're running */
                        if (err > 0) {
-                               err = phy_read_status(phydev);
+                               phydev->state = PHY_RUNNING;
+                               netif_carrier_on(phydev->attached_dev);
+                               phydev->adjust_link(phydev->attached_dev);
  
-                               if (err)
+                       } else if (0 == phydev->link_timeout--) {
+                               int idx;
+                               needs_aneg = 1;
+                               /* If we have the magic_aneg bit,
+                                * we try again */
+                               if (phydev->drv->flags & PHY_HAS_MAGICANEG)
                                        break;
  
-                               if (phydev->link) {
-                                       phydev->state = PHY_RUNNING;
-                                       netif_carrier_on(phydev->attached_dev);
-                               } else {
-                                       phydev->state = PHY_NOLINK;
-                                       netif_carrier_off(phydev->attached_dev);
-                               }
+                               /* The timer expired, and we still
+                                * don't have a setting, so we try
+                                * forcing it until we find one that
+                                * works, starting from the fastest speed,
+                                * and working our way down */
+                               idx = phy_find_valid(0, phydev->supported);
  
-                               phydev->adjust_link(phydev->attached_dev);
+                               phydev->speed = settings[idx].speed;
+                               phydev->duplex = settings[idx].duplex;
  
-                       } else if (0 == phydev->link_timeout--) {
-                               /* The counter expired, so either we
-                                * switch to forced mode, or the
-                                * magic_aneg bit exists, and we try aneg
-                                * again */
-                               if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
-                                       int idx;
-                                       /* We'll start from the
-                                        * fastest speed, and work
-                                        * our way down */
-                                       idx = phy_find_valid(0,
-                                                       phydev->supported);
-                                       phydev->speed = settings[idx].speed;
-                                       phydev->duplex = settings[idx].duplex;
-                                       
-                                       phydev->autoneg = AUTONEG_DISABLE;
-                                       phydev->state = PHY_FORCING;
-                                       phydev->link_timeout =
-                                               PHY_FORCE_TIMEOUT;
-                                       pr_info("Trying %d/%s\n",
-                                                       phydev->speed,
-                                                       DUPLEX_FULL ==
-                                                       phydev->duplex ?
-                                                       "FULL" : "HALF");
-                               }
+                               phydev->autoneg = AUTONEG_DISABLE;
  
-                               needs_aneg = 1;
+                               pr_info("Trying %d/%s\n", phydev->speed,
+                                               DUPLEX_FULL ==
+                                               phydev->duplex ?
+                                               "FULL" : "HALF");
                        }
                        break;
                case PHY_NOLINK:
                        }
                        break;
                case PHY_FORCING:
-                       err = phy_read_status(phydev);
+                       err = genphy_update_link(phydev);
  
                        if (err)
                                break;
diff --combined drivers/net/r8169.c
index 1f9663a70823112a32b08d74230ae564db9f8d97,45d3ca431957bc43070cd95400a99cd67bf58537..85a392fab5cc4dbc1541033cc35d5aa5401dc2b6
@@@ -424,7 -424,6 +424,7 @@@ struct ring_info 
  struct rtl8169_private {
        void __iomem *mmio_addr;        /* memory map physical address */
        struct pci_dev *pci_dev;        /* Index of PCI device */
 +      struct net_device *dev;
        struct net_device_stats stats;  /* statistics of net device */
        spinlock_t lock;                /* spin lock flag */
        u32 msg_enable;
        void (*phy_reset_enable)(void __iomem *);
        unsigned int (*phy_reset_pending)(void __iomem *);
        unsigned int (*link_ok)(void __iomem *);
 -      struct work_struct task;
 +      struct delayed_work task;
        unsigned wol_enabled : 1;
  };
  
@@@ -572,8 -571,8 +572,8 @@@ static void rtl8169_xmii_reset_enable(v
  {
        unsigned int val;
  
-       val = (mdio_read(ioaddr, MII_BMCR) | BMCR_RESET) & 0xffff;
-       mdio_write(ioaddr, MII_BMCR, val);
+       mdio_write(ioaddr, MII_BMCR, BMCR_RESET);
+       val = mdio_read(ioaddr, MII_BMCR);
  }
  
  static void rtl8169_check_link_status(struct net_device *dev,
@@@ -1407,6 -1406,22 +1407,22 @@@ static void rtl8169_release_board(struc
        free_netdev(dev);
  }
  
+ static void rtl8169_phy_reset(struct net_device *dev,
+                             struct rtl8169_private *tp)
+ {
+       void __iomem *ioaddr = tp->mmio_addr;
+       int i;
+       tp->phy_reset_enable(ioaddr);
+       for (i = 0; i < 100; i++) {
+               if (!tp->phy_reset_pending(ioaddr))
+                       return;
+               msleep(1);
+       }
+       if (netif_msg_link(tp))
+               printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
+ }
  static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
  {
        void __iomem *ioaddr = tp->mmio_addr;
  
        rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
  
+       rtl8169_phy_reset(dev, tp);
        rtl8169_set_speed(dev, autoneg, speed, duplex);
  
        if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
@@@ -1474,8 -1491,8 +1492,8 @@@ rtl8169_init_one(struct pci_dev *pdev, 
        struct rtl8169_private *tp;
        struct net_device *dev;
        void __iomem *ioaddr;
-       unsigned int i, pm_cap;
-       int rc;
+       unsigned int pm_cap;
+       int i, rc;
  
        if (netif_msg_drv(&debug)) {
                printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
        SET_MODULE_OWNER(dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
        tp = netdev_priv(dev);
 +      tp->dev = dev;
        tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
  
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
@@@ -1766,7 -1782,7 +1784,7 @@@ static int rtl8169_open(struct net_devi
        if (retval < 0)
                goto err_free_rx;
  
 -      INIT_WORK(&tp->task, NULL, dev);
 +      INIT_DELAYED_WORK(&tp->task, NULL);
  
        rtl8169_hw_start(dev);
  
@@@ -2089,11 -2105,11 +2107,11 @@@ static void rtl8169_tx_clear(struct rtl
        tp->cur_tx = tp->dirty_tx = 0;
  }
  
 -static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
 +static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
  {
        struct rtl8169_private *tp = netdev_priv(dev);
  
 -      PREPARE_WORK(&tp->task, task, dev);
 +      PREPARE_DELAYED_WORK(&tp->task, task);
        schedule_delayed_work(&tp->task, 4);
  }
  
@@@ -2112,11 -2128,9 +2130,11 @@@ static void rtl8169_wait_for_quiescence
        netif_poll_enable(dev);
  }
  
 -static void rtl8169_reinit_task(void *_data)
 +static void rtl8169_reinit_task(struct work_struct *work)
  {
 -      struct net_device *dev = _data;
 +      struct rtl8169_private *tp =
 +              container_of(work, struct rtl8169_private, task.work);
 +      struct net_device *dev = tp->dev;
        int ret;
  
        if (netif_running(dev)) {
        }
  }
  
 -static void rtl8169_reset_task(void *_data)
 +static void rtl8169_reset_task(struct work_struct *work)
  {
 -      struct net_device *dev = _data;
 -      struct rtl8169_private *tp = netdev_priv(dev);
 +      struct rtl8169_private *tp =
 +              container_of(work, struct rtl8169_private, task.work);
 +      struct net_device *dev = tp->dev;
  
        if (!netif_running(dev))
                return;
diff --combined drivers/net/skge.c
index 3b67614372a7cc150f16a9502ef8b3adff85bd06,5513907e8393d4188dd727aeea1f484d7598bd92..b60f0451f6cdaddff325b599ae5f37e85a6e5c4e
@@@ -1327,11 -1327,10 +1327,11 @@@ static void xm_check_link(struct net_de
   * Since internal PHY is wired to a level triggered pin, can't
   * get an interrupt when carrier is detected.
   */
 -static void xm_link_timer(void *arg)
 +static void xm_link_timer(struct work_struct *work)
  {
 -      struct net_device *dev = arg;
 -      struct skge_port *skge = netdev_priv(arg);
 +      struct skge_port *skge =
 +              container_of(work, struct skge_port, link_thread.work);
 +      struct net_device *dev = skge->netdev;
        struct skge_hw *hw = skge->hw;
        int port = skge->port;
  
@@@ -2155,8 -2154,6 +2155,6 @@@ static void yukon_link_down(struct skge
        int port = skge->port;
        u16 ctrl;
  
-       gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
        ctrl = gma_read16(hw, port, GM_GP_CTRL);
        ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
        gma_write16(hw, port, GM_GP_CTRL, ctrl);
                gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
        }
  
-       yukon_reset(hw, port);
        skge_link_down(skge);
  
        yukon_init(hw, port);
@@@ -2256,6 -2252,7 +2253,7 @@@ static void skge_phy_reset(struct skge_
  {
        struct skge_hw *hw = skge->hw;
        int port = skge->port;
+       struct net_device *dev = hw->dev[port];
  
        netif_stop_queue(skge->netdev);
        netif_carrier_off(skge->netdev);
                yukon_init(hw, port);
        }
        mutex_unlock(&hw->phy_mutex);
+       dev->set_multicast_list(dev);
  }
  
  /* Basic MII support */
@@@ -2566,7 -2565,7 +2566,7 @@@ static int skge_xmit_frame(struct sk_bu
  
                td->csum_offs = 0;
                td->csum_start = offset;
-               td->csum_write = offset + skb->csum;
+               td->csum_write = offset + skb->csum_offset;
        } else
                control = BMU_CHECK;
  
@@@ -3073,9 -3072,9 +3073,9 @@@ static void skge_error_irq(struct skge_
   * because accessing phy registers requires spin wait which might
   * cause excess interrupt latency.
   */
 -static void skge_extirq(void *arg)
 +static void skge_extirq(struct work_struct *work)
  {
 -      struct skge_hw *hw = arg;
 +      struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
        int port;
  
        mutex_lock(&hw->phy_mutex);
@@@ -3457,7 -3456,7 +3457,7 @@@ static struct net_device *skge_devinit(
        skge->port = port;
  
        /* Only used for Genesis XMAC */
 -      INIT_WORK(&skge->link_thread, xm_link_timer, dev);
 +      INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
  
        if (hw->chip_id != CHIP_ID_GENESIS) {
                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@@ -3544,7 -3543,7 +3544,7 @@@ static int __devinit skge_probe(struct 
  
        hw->pdev = pdev;
        mutex_init(&hw->phy_mutex);
 -      INIT_WORK(&hw->phy_work, skge_extirq, hw);
 +      INIT_WORK(&hw->phy_work, skge_extirq);
        spin_lock_init(&hw->hw_lock);
  
        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --combined drivers/net/spider_net.c
index f88fcac0e46a13c96789d26331916f3c681c3cb7,cef7e6671c49b658d9ce618eafad16213b7b09f9..f16f696c1ff28f07a2317780f87483aef2bdc752
@@@ -644,20 -644,12 +644,12 @@@ spider_net_prepare_tx_descr(struct spid
        struct spider_net_descr *descr;
        dma_addr_t buf;
        unsigned long flags;
-       int length;
  
-       length = skb->len;
-       if (length < ETH_ZLEN) {
-               if (skb_pad(skb, ETH_ZLEN-length))
-                       return 0;
-               length = ETH_ZLEN;
-       }
-       buf = pci_map_single(card->pdev, skb->data, length, PCI_DMA_TODEVICE);
+       buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
        if (pci_dma_mapping_error(buf)) {
                if (netif_msg_tx_err(card) && net_ratelimit())
                        pr_err("could not iommu-map packet (%p, %i). "
-                                 "Dropping packet\n", skb->data, length);
+                                 "Dropping packet\n", skb->data, skb->len);
                card->spider_stats.tx_iommu_map_error++;
                return -ENOMEM;
        }
        card->tx_chain.head = descr->next;
  
        descr->buf_addr = buf;
-       descr->buf_size = length;
+       descr->buf_size = skb->len;
        descr->next_descr_addr = 0;
        descr->skb = skb;
        descr->data_status = 0;
@@@ -802,8 -794,8 +794,8 @@@ spider_net_release_tx_chain(struct spid
  
                /* unmap the skb */
                if (skb) {
-                       int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
-                       pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE);
+                       pci_unmap_single(card->pdev, buf_addr, skb->len,
+                                       PCI_DMA_TODEVICE);
                        dev_kfree_skb(skb);
                }
        }
@@@ -1641,7 -1633,7 +1633,7 @@@ spider_net_enable_card(struct spider_ne
                             SPIDER_NET_INT2_MASK_VALUE);
  
        spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
-                            SPIDER_NET_GDTBSTA | SPIDER_NET_GDTDCEIDIS);
+                            SPIDER_NET_GDTBSTA);
  }
  
  /**
@@@ -1945,11 -1937,10 +1937,11 @@@ spider_net_stop(struct net_device *netd
   * called as task when tx hangs, resets interface (if interface is up)
   */
  static void
 -spider_net_tx_timeout_task(void *data)
 +spider_net_tx_timeout_task(struct work_struct *work)
  {
 -      struct net_device *netdev = data;
 -      struct spider_net_card *card = netdev_priv(netdev);
 +      struct spider_net_card *card =
 +              container_of(work, struct spider_net_card, tx_timeout_task);
 +      struct net_device *netdev = card->netdev;
  
        if (!(netdev->flags & IFF_UP))
                goto out;
@@@ -2123,7 -2114,7 +2115,7 @@@ spider_net_alloc_card(void
        card = netdev_priv(netdev);
        card->netdev = netdev;
        card->msg_enable = SPIDER_NET_DEFAULT_MSG;
 -      INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
 +      INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
        init_waitqueue_head(&card->waitq);
        atomic_set(&card->tx_timeout_task_counter, 0);
  
diff --combined drivers/net/sungem.c
index 004d651681ad9da83ce91a157044d816a5cfdb1c,334c6cfd659573b627ac25e875cc5c12ed0d5538..d03a9a849c064f9c969999e1f897b785d58ada9b
@@@ -1030,7 -1030,7 +1030,7 @@@ static int gem_start_xmit(struct sk_buf
                u64 csum_start_off, csum_stuff_off;
  
                csum_start_off = (u64) (skb->h.raw - skb->data);
-               csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+               csum_stuff_off = csum_start_off + skb->csum_offset;
  
                ctrl = (TXDCTRL_CENAB |
                        (csum_start_off << 15) |
@@@ -2281,9 -2281,9 +2281,9 @@@ static void gem_do_stop(struct net_devi
        }
  }
  
 -static void gem_reset_task(void *data)
 +static void gem_reset_task(struct work_struct *work)
  {
 -      struct gem *gp = (struct gem *) data;
 +      struct gem *gp = container_of(work, struct gem, reset_task);
  
        mutex_lock(&gp->pm_mutex);
  
@@@ -3043,7 -3043,7 +3043,7 @@@ static int __devinit gem_init_one(struc
        gp->link_timer.function = gem_link_timer;
        gp->link_timer.data = (unsigned long) gp;
  
 -      INIT_WORK(&gp->reset_task, gem_reset_task, gp);
 +      INIT_WORK(&gp->reset_task, gem_reset_task);
  
        gp->lstate = link_down;
        gp->timer_ticks = 0;
index fbc0c087f53c831317e7a50106db12c118655d9d,94dfb92fab5c2f501624e9dbda8c582b29167754..8286678513b937db84bbfebff057191a62a0bdab
  
  /* Chipcommon registers. */
  #define BCM43xx_CHIPCOMMON_CAPABILITIES       0x04
+ #define BCM43xx_CHIPCOMMON_CTL                        0x28
  #define BCM43xx_CHIPCOMMON_PLLONDELAY         0xB0
  #define BCM43xx_CHIPCOMMON_FREFSELDELAY               0xB4
  #define BCM43xx_CHIPCOMMON_SLOWCLKCTL         0xB8
  /* SBTOPCI2 values. */
  #define BCM43xx_SBTOPCI2_PREFETCH     0x4
  #define BCM43xx_SBTOPCI2_BURST                0x8
+ #define BCM43xx_SBTOPCI2_MEMREAD_MULTI        0x20
+ /* PCI-E core registers. */
+ #define BCM43xx_PCIECORE_REG_ADDR      0x0130
+ #define BCM43xx_PCIECORE_REG_DATA      0x0134
+ #define BCM43xx_PCIECORE_MDIO_CTL      0x0128
+ #define BCM43xx_PCIECORE_MDIO_DATA     0x012C
+ /* PCI-E registers. */
+ #define BCM43xx_PCIE_TLP_WORKAROUND    0x0004
+ #define BCM43xx_PCIE_DLLP_LINKCTL      0x0100
+ /* PCI-E MDIO bits. */
+ #define BCM43xx_PCIE_MDIO_ST   0x40000000
+ #define BCM43xx_PCIE_MDIO_WT   0x10000000
+ #define BCM43xx_PCIE_MDIO_DEV  22
+ #define BCM43xx_PCIE_MDIO_REG  18
+ #define BCM43xx_PCIE_MDIO_TA   0x00020000
+ #define BCM43xx_PCIE_MDIO_TC   0x0100
+ /* MDIO devices. */
+ #define BCM43xx_MDIO_SERDES_RX        0x1F
+ /* SERDES RX registers. */
+ #define BCM43xx_SERDES_RXTIMER        0x2
+ #define BCM43xx_SERDES_CDR    0x6
+ #define BCM43xx_SERDES_CDR_BW 0x7
  
  /* Chipcommon capabilities. */
  #define BCM43xx_CAPABILITIES_PCTL             0x00040000
  #define BCM43xx_COREID_USB20_HOST       0x819
  #define BCM43xx_COREID_USB20_DEV        0x81a
  #define BCM43xx_COREID_SDIO_HOST        0x81b
+ #define BCM43xx_COREID_PCIE           0x820
  
  /* Core Information Registers */
  #define BCM43xx_CIR_BASE              0xf00
  #define BCM43xx_DEFAULT_SHORT_RETRY_LIMIT     7
  #define BCM43xx_DEFAULT_LONG_RETRY_LIMIT      4
  
+ /* FIXME: the next line is a guess as to what the maximum RSSI value might be */
+ #define RX_RSSI_MAX                           60
  /* Max size of a security key */
  #define BCM43xx_SEC_KEYSIZE                   16
  /* Security algorithms. */
@@@ -787,7 -819,7 +819,7 @@@ struct bcm43xx_private 
        struct tasklet_struct isr_tasklet;
  
        /* Periodic tasks */
 -      struct work_struct periodic_work;
 +      struct delayed_work periodic_work;
        unsigned int periodic_state;
  
        struct work_struct restart_work;
index 728a9b789fdf54fa569e0fa0a96eec9f2c4ad686,5b3c27359a1840ba2c2bf59bd1264da401fd720e..2ec2e5afce67dd67d6e62e235e4ac68db37fb76c
@@@ -130,6 -130,10 +130,10 @@@ MODULE_PARM_DESC(fwpostfix, "Postfix fo
        { PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
        /* Broadcom 4307 802.11b */
        { PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+       /* Broadcom 4311 802.11(a)/b/g */
+       { PCI_VENDOR_ID_BROADCOM, 0x4311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+       /* Broadcom 4312 802.11a/b/g */
+       { PCI_VENDOR_ID_BROADCOM, 0x4312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
        /* Broadcom 4318 802.11b/g */
        { PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
        /* Broadcom 4319 802.11a/b/g */
@@@ -2600,8 -2604,9 +2604,9 @@@ static int bcm43xx_probe_cores(struct b
        /* fetch sb_id_hi from core information registers */
        sb_id_hi = bcm43xx_read32(bcm, BCM43xx_CIR_SB_ID_HI);
  
-       core_id = (sb_id_hi & 0xFFF0) >> 4;
-       core_rev = (sb_id_hi & 0xF);
+       core_id = (sb_id_hi & 0x8FF0) >> 4;
+       core_rev = (sb_id_hi & 0x7000) >> 8;
+       core_rev |= (sb_id_hi & 0xF);
        core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
  
        /* if present, chipcommon is always core 0; read the chipid from it */
                bcm->chip_id, bcm->chip_rev);
        dprintk(KERN_INFO PFX "Number of cores: %d\n", core_count);
        if (bcm->core_chipcommon.available) {
-               dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n",
-                       core_id, core_rev, core_vendor,
-                       bcm43xx_core_enabled(bcm) ? "enabled" : "disabled");
-       }
-       if (bcm->core_chipcommon.available)
+               dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x\n",
+                       core_id, core_rev, core_vendor);
                current_core = 1;
-       else
+       else
                current_core = 0;
        for ( ; current_core < core_count; current_core++) {
                struct bcm43xx_coreinfo *core;
                core_rev = (sb_id_hi & 0xF);
                core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
  
-               dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x, %s\n",
-                       current_core, core_id, core_rev, core_vendor,
-                       bcm43xx_core_enabled(bcm) ? "enabled" : "disabled" );
+               dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x\n",
+                       current_core, core_id, core_rev, core_vendor);
  
                core = NULL;
                switch (core_id) {
                case BCM43xx_COREID_PCI:
+               case BCM43xx_COREID_PCIE:
                        core = &bcm->core_pci;
                        if (core->available) {
                                printk(KERN_WARNING PFX "Multiple PCI cores found.\n");
                        case 6:
                        case 7:
                        case 9:
+                       case 10:
                                break;
                        default:
-                               printk(KERN_ERR PFX "Error: Unsupported 80211 core revision %u\n",
+                               printk(KERN_WARNING PFX
+                                      "Unsupported 80211 core revision %u\n",
                                       core_rev);
-                               err = -ENODEV;
-                               goto out;
                        }
                        bcm->nr_80211_available++;
                        core->priv = ext_80211;
@@@ -2868,16 -2869,11 +2869,11 @@@ static int bcm43xx_wireless_core_init(s
        u32 sbimconfiglow;
        u8 limit;
  
-       if (bcm->chip_rev < 5) {
+       if (bcm->core_pci.rev <= 5 && bcm->core_pci.id != BCM43xx_COREID_PCIE) {
                sbimconfiglow = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
                sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
                sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
-               if (bcm->bustype == BCM43xx_BUSTYPE_PCI)
-                       sbimconfiglow |= 0x32;
-               else if (bcm->bustype == BCM43xx_BUSTYPE_SB)
-                       sbimconfiglow |= 0x53;
-               else
-                       assert(0);
+               sbimconfiglow |= 0x32;
                bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, sbimconfiglow);
        }
  
@@@ -3004,22 -3000,64 +3000,64 @@@ static void bcm43xx_pcicore_broadcast_v
  
  static int bcm43xx_pcicore_commit_settings(struct bcm43xx_private *bcm)
  {
-       int err;
-       struct bcm43xx_coreinfo *old_core;
+       int err = 0;
  
-       old_core = bcm->current_core;
-       err = bcm43xx_switch_core(bcm, &bcm->core_pci);
-       if (err)
-               goto out;
+       bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
  
-       bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+       if (bcm->core_chipcommon.available) {
+               err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
+               if (err)
+                       goto out;
+               bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+               /* this function is always called when a PCI core is mapped */
+               err = bcm43xx_switch_core(bcm, &bcm->core_pci);
+               if (err)
+                       goto out;
+       } else
+               bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
+       bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
  
-       bcm43xx_switch_core(bcm, old_core);
-       assert(err == 0);
  out:
        return err;
  }
  
+ static u32 bcm43xx_pcie_reg_read(struct bcm43xx_private *bcm, u32 address)
+ {
+       bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
+       return bcm43xx_read32(bcm, BCM43xx_PCIECORE_REG_DATA);
+ }
+ static void bcm43xx_pcie_reg_write(struct bcm43xx_private *bcm, u32 address,
+                                   u32 data)
+ {
+       bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
+       bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_DATA, data);
+ }
+ static void bcm43xx_pcie_mdio_write(struct bcm43xx_private *bcm, u8 dev, u8 reg,
+                                   u16 data)
+ {
+       int i;
+       bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0x0082);
+       bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_DATA, BCM43xx_PCIE_MDIO_ST |
+                       BCM43xx_PCIE_MDIO_WT | (dev << BCM43xx_PCIE_MDIO_DEV) |
+                       (reg << BCM43xx_PCIE_MDIO_REG) | BCM43xx_PCIE_MDIO_TA |
+                       data);
+       udelay(10);
+       for (i = 0; i < 10; i++) {
+               if (bcm43xx_read32(bcm, BCM43xx_PCIECORE_MDIO_CTL) &
+                   BCM43xx_PCIE_MDIO_TC)
+                       break;
+               msleep(1);
+       }
+       bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0);
+ }
  /* Make an I/O Core usable. "core_mask" is the bitmask of the cores to enable.
   * To enable core 0, pass a core_mask of 1<<0
   */
@@@ -3039,7 -3077,8 +3077,8 @@@ static int bcm43xx_setup_backplane_pci_
        if (err)
                goto out;
  
-       if (bcm->core_pci.rev < 6) {
+       if (bcm->current_core->rev < 6 ||
+               bcm->current_core->id == BCM43xx_COREID_PCI) {
                value = bcm43xx_read32(bcm, BCM43xx_CIR_SBINTVEC);
                value |= (1 << backplane_flag_nr);
                bcm43xx_write32(bcm, BCM43xx_CIR_SBINTVEC, value);
                }
        }
  
-       value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
-       value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
-       bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
-       if (bcm->core_pci.rev < 5) {
-               value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
-               value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
-                        & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
-               value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
-                        & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
-               bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
-               err = bcm43xx_pcicore_commit_settings(bcm);
-               assert(err == 0);
+       if (bcm->current_core->id == BCM43xx_COREID_PCI) {
+               value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
+               value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
+               bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
+               if (bcm->current_core->rev < 5) {
+                       value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
+                       value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
+                                & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
+                       value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
+                                & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
+                       bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
+                       err = bcm43xx_pcicore_commit_settings(bcm);
+                       assert(err == 0);
+               } else if (bcm->current_core->rev >= 11) {
+                       value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
+                       value |= BCM43xx_SBTOPCI2_MEMREAD_MULTI;
+                       bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
+               }
+       } else {
+               if (bcm->current_core->rev == 0 || bcm->current_core->rev == 1) {
+                       value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_TLP_WORKAROUND);
+                       value |= 0x8;
+                       bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_TLP_WORKAROUND,
+                                              value);
+               }
+               if (bcm->current_core->rev == 0) {
+                       bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+                                               BCM43xx_SERDES_RXTIMER, 0x8128);
+                       bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+                                               BCM43xx_SERDES_CDR, 0x0100);
+                       bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
+                                               BCM43xx_SERDES_CDR_BW, 0x1466);
+               } else if (bcm->current_core->rev == 1) {
+                       value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_DLLP_LINKCTL);
+                       value |= 0x40;
+                       bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_DLLP_LINKCTL,
+                                              value);
+               }
        }
  out_switch_back:
        err = bcm43xx_switch_core(bcm, old_core);
  out:
@@@ -3140,56 -3204,27 +3204,28 @@@ static void bcm43xx_periodic_every15sec
  
  static void do_periodic_work(struct bcm43xx_private *bcm)
  {
-       unsigned int state;
-       state = bcm->periodic_state;
-       if (state % 8 == 0)
+       if (bcm->periodic_state % 8 == 0)
                bcm43xx_periodic_every120sec(bcm);
-       if (state % 4 == 0)
+       if (bcm->periodic_state % 4 == 0)
                bcm43xx_periodic_every60sec(bcm);
-       if (state % 2 == 0)
+       if (bcm->periodic_state % 2 == 0)
                bcm43xx_periodic_every30sec(bcm);
-       if (state % 1 == 0)
-               bcm43xx_periodic_every15sec(bcm);
-       bcm->periodic_state = state + 1;
+       bcm43xx_periodic_every15sec(bcm);
  
        schedule_delayed_work(&bcm->periodic_work, HZ * 15);
  }
  
- /* Estimate a "Badness" value based on the periodic work
-  * state-machine state. "Badness" is worse (bigger), if the
-  * periodic work will take longer.
-  */
- static int estimate_periodic_work_badness(unsigned int state)
- {
-       int badness = 0;
-       if (state % 8 == 0) /* every 120 sec */
-               badness += 10;
-       if (state % 4 == 0) /* every 60 sec */
-               badness += 5;
-       if (state % 2 == 0) /* every 30 sec */
-               badness += 1;
-       if (state % 1 == 0) /* every 15 sec */
-               badness += 1;
- #define BADNESS_LIMIT 4
-       return badness;
- }
 -static void bcm43xx_periodic_work_handler(void *d)
 +static void bcm43xx_periodic_work_handler(struct work_struct *work)
  {
 -      struct bcm43xx_private *bcm = d;
 +      struct bcm43xx_private *bcm =
 +              container_of(work, struct bcm43xx_private, periodic_work.work);
        struct net_device *net_dev = bcm->net_dev;
        unsigned long flags;
        u32 savedirqs = 0;
-       int badness;
        unsigned long orig_trans_start = 0;
  
        mutex_lock(&bcm->mutex);
-       badness = estimate_periodic_work_badness(bcm->periodic_state);
-       if (badness > BADNESS_LIMIT) {
+       if (unlikely(bcm->periodic_state % 4 == 0)) {
                /* Periodic work will take a long time, so we want it to
                 * be preemtible.
                 */
  
        do_periodic_work(bcm);
  
-       if (badness > BADNESS_LIMIT) {
+       if (unlikely(bcm->periodic_state % 4 == 0)) {
                spin_lock_irqsave(&bcm->irq_lock, flags);
                tasklet_enable(&bcm->isr_tasklet);
                bcm43xx_interrupt_enable(bcm, savedirqs);
                net_dev->trans_start = orig_trans_start;
        }
        mmiowb();
+       bcm->periodic_state++;
        spin_unlock_irqrestore(&bcm->irq_lock, flags);
        mutex_unlock(&bcm->mutex);
  }
@@@ -3243,11 -3279,11 +3280,11 @@@ void bcm43xx_periodic_tasks_delete(stru
  
  void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
  {
 -      struct work_struct *work = &(bcm->periodic_work);
 +      struct delayed_work *work = &bcm->periodic_work;
  
        assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
 -      INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
 -      schedule_work(work);
 +      INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
 +      schedule_delayed_work(work, 0);
  }
  
  static void bcm43xx_security_init(struct bcm43xx_private *bcm)
@@@ -3599,7 -3635,7 +3636,7 @@@ static int bcm43xx_init_board(struct bc
        bcm43xx_periodic_tasks_setup(bcm);
  
        /*FIXME: This should be handled by softmac instead. */
 -      schedule_work(&bcm->softmac->associnfo.work);
 +      schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
  
  out:
        mutex_unlock(&(bcm)->mutex);
@@@ -3677,7 -3713,7 +3714,7 @@@ static int bcm43xx_read_phyinfo(struct 
                bcm->ieee->freq_band = IEEE80211_24GHZ_BAND;
                break;
        case BCM43xx_PHYTYPE_G:
-               if (phy_rev > 7)
+               if (phy_rev > 8)
                        phy_rev_ok = 0;
                bcm->ieee->modulation = IEEE80211_OFDM_MODULATION |
                                        IEEE80211_CCK_MODULATION;
                       phy_type);
                return -ENODEV;
        };
+       bcm->ieee->perfect_rssi = RX_RSSI_MAX;
+       bcm->ieee->worst_rssi = 0;
        if (!phy_rev_ok) {
                printk(KERN_WARNING PFX "Invalid PHY Revision %x\n",
                       phy_rev);
@@@ -3975,11 -4013,6 +4014,6 @@@ static int bcm43xx_ieee80211_hard_start
        return NETDEV_TX_OK;
  }
  
- static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev)
- {
-       return &(bcm43xx_priv(net_dev)->ieee->stats);
- }
  static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
  {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
@@@ -4093,7 -4126,6 +4127,6 @@@ static int __devinit bcm43xx_init_one(s
  
        net_dev->open = bcm43xx_net_open;
        net_dev->stop = bcm43xx_net_stop;
-       net_dev->get_stats = bcm43xx_net_get_stats;
        net_dev->tx_timeout = bcm43xx_net_tx_timeout;
  #ifdef CONFIG_NET_POLL_CONTROLLER
        net_dev->poll_controller = bcm43xx_net_poll_controller;
@@@ -4150,10 -4182,9 +4183,10 @@@ static void __devexit bcm43xx_remove_on
  /* Hard-reset the chip. Do not call this directly.
   * Use bcm43xx_controller_restart()
   */
 -static void bcm43xx_chip_reset(void *_bcm)
 +static void bcm43xx_chip_reset(struct work_struct *work)
  {
 -      struct bcm43xx_private *bcm = _bcm;
 +      struct bcm43xx_private *bcm =
 +              container_of(work, struct bcm43xx_private, restart_work);
        struct bcm43xx_phyinfo *phy;
        int err = -ENODEV;
  
@@@ -4180,7 -4211,7 +4213,7 @@@ void bcm43xx_controller_restart(struct 
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
                return;
        printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
 -      INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
 +      INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
        schedule_work(&bcm->restart_work);
  }
  
index 0f554373a60dc677e0aaa848bd4e4ea8f1335a4c,79607b8b877ce74e849e87a22c1a6290ce3c8376..1bcd352a813bc24e71284a09f877cd66451d031d
@@@ -316,7 -316,7 +316,7 @@@ static void ipw2100_release_firmware(st
                                     struct ipw2100_fw *fw);
  static int ipw2100_ucode_download(struct ipw2100_priv *priv,
                                  struct ipw2100_fw *fw);
 -static void ipw2100_wx_event_work(struct ipw2100_priv *priv);
 +static void ipw2100_wx_event_work(struct work_struct *work);
  static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
  static struct iw_handler_def ipw2100_wx_handler_def;
  
@@@ -679,8 -679,7 +679,8 @@@ static void schedule_reset(struct ipw21
                        queue_delayed_work(priv->workqueue, &priv->reset_work,
                                           priv->reset_backoff * HZ);
                else
 -                      queue_work(priv->workqueue, &priv->reset_work);
 +                      queue_delayed_work(priv->workqueue, &priv->reset_work,
 +                                         0);
  
                if (priv->reset_backoff < MAX_RESET_BACKOFF)
                        priv->reset_backoff++;
@@@ -1874,10 -1873,8 +1874,10 @@@ static void ipw2100_down(struct ipw2100
        netif_stop_queue(priv->net_dev);
  }
  
 -static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
 +static void ipw2100_reset_adapter(struct work_struct *work)
  {
 +      struct ipw2100_priv *priv =
 +              container_of(work, struct ipw2100_priv, reset_work.work);
        unsigned long flags;
        union iwreq_data wrqu = {
                .ap_addr = {
@@@ -2074,9 -2071,9 +2074,9 @@@ static void isr_indicate_association_lo
                return;
  
        if (priv->status & STATUS_SECURITY_UPDATED)
 -              queue_work(priv->workqueue, &priv->security_work);
 +              queue_delayed_work(priv->workqueue, &priv->security_work, 0);
  
 -      queue_work(priv->workqueue, &priv->wx_event_work);
 +      queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
  }
  
  static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@@ -5527,11 -5524,8 +5527,11 @@@ static int ipw2100_configure_security(s
        return err;
  }
  
 -static void ipw2100_security_work(struct ipw2100_priv *priv)
 +static void ipw2100_security_work(struct work_struct *work)
  {
 +      struct ipw2100_priv *priv =
 +              container_of(work, struct ipw2100_priv, security_work.work);
 +
        /* If we happen to have reconnected before we get a chance to
         * process this, then update the security settings--which causes
         * a disassociation to occur */
@@@ -5754,7 -5748,7 +5754,7 @@@ static int ipw2100_set_address(struct n
  
        priv->reset_backoff = 0;
        mutex_unlock(&priv->action_mutex);
 -      ipw2100_reset_adapter(priv);
 +      ipw2100_reset_adapter(&priv->reset_work.work);
        return 0;
  
        done:
@@@ -5833,19 -5827,6 +5833,6 @@@ static void ipw2100_tx_timeout(struct n
        schedule_reset(priv);
  }
  
- /*
-  * TODO: reimplement it so that it reads statistics
-  *       from the adapter using ordinal tables
-  *       instead of/in addition to collecting them
-  *       in the driver
-  */
- static struct net_device_stats *ipw2100_stats(struct net_device *dev)
- {
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       return &priv->ieee->stats;
- }
  static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
  {
        /* This is called when wpa_supplicant loads and closes the driver
@@@ -5929,10 -5910,9 +5916,10 @@@ static const struct ethtool_ops ipw2100
        .get_drvinfo = ipw_ethtool_get_drvinfo,
  };
  
 -static void ipw2100_hang_check(void *adapter)
 +static void ipw2100_hang_check(struct work_struct *work)
  {
 -      struct ipw2100_priv *priv = adapter;
 +      struct ipw2100_priv *priv =
 +              container_of(work, struct ipw2100_priv, hang_check.work);
        unsigned long flags;
        u32 rtc = 0xa5a5a5a5;
        u32 len = sizeof(rtc);
        spin_unlock_irqrestore(&priv->low_lock, flags);
  }
  
 -static void ipw2100_rf_kill(void *adapter)
 +static void ipw2100_rf_kill(struct work_struct *work)
  {
 -      struct ipw2100_priv *priv = adapter;
 +      struct ipw2100_priv *priv =
 +              container_of(work, struct ipw2100_priv, rf_kill.work);
        unsigned long flags;
  
        spin_lock_irqsave(&priv->low_lock, flags);
@@@ -6030,7 -6009,6 +6017,6 @@@ static struct net_device *ipw2100_alloc
        dev->open = ipw2100_open;
        dev->stop = ipw2100_close;
        dev->init = ipw2100_net_init;
-       dev->get_stats = ipw2100_stats;
        dev->ethtool_ops = &ipw2100_ethtool_ops;
        dev->tx_timeout = ipw2100_tx_timeout;
        dev->wireless_handlers = &ipw2100_wx_handler_def;
  
        priv->workqueue = create_workqueue(DRV_NAME);
  
 -      INIT_WORK(&priv->reset_work,
 -                (void (*)(void *))ipw2100_reset_adapter, priv);
 -      INIT_WORK(&priv->security_work,
 -                (void (*)(void *))ipw2100_security_work, priv);
 -      INIT_WORK(&priv->wx_event_work,
 -                (void (*)(void *))ipw2100_wx_event_work, priv);
 -      INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
 -      INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
 +      INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
 +      INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
 +      INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
 +      INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
 +      INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
  
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
                     ipw2100_irq_tasklet, (unsigned long)priv);
@@@ -6428,6 -6409,7 +6414,7 @@@ static int ipw2100_resume(struct pci_de
  {
        struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
        struct net_device *dev = priv->net_dev;
+       int err;
        u32 val;
  
        if (IPW2100_PM_DISABLED)
        IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name);
  
        pci_set_power_state(pci_dev, PCI_D0);
-       pci_enable_device(pci_dev);
+       err = pci_enable_device(pci_dev);
+       if (err) {
+               printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+                      dev->name);
+               return err;
+       }
        pci_restore_state(pci_dev);
  
        /*
@@@ -7573,11 -7560,10 +7565,10 @@@ static int ipw2100_wx_set_genie(struct 
                return -EINVAL;
  
        if (wrqu->data.length) {
-               buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+               buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
                if (buf == NULL)
                        return -ENOMEM;
  
-               memcpy(buf, extra, wrqu->data.length);
                kfree(ieee->wpa_ie);
                ieee->wpa_ie = buf;
                ieee->wpa_ie_len = wrqu->data.length;
@@@ -8295,10 -8281,8 +8286,10 @@@ static struct iw_handler_def ipw2100_wx
        .get_wireless_stats = ipw2100_wx_wireless_stats,
  };
  
 -static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
 +static void ipw2100_wx_event_work(struct work_struct *work)
  {
 +      struct ipw2100_priv *priv =
 +              container_of(work, struct ipw2100_priv, wx_event_work.work);
        union iwreq_data wrqu;
        int len = ETH_ALEN;
  
index 587a0918fa52fe1750756600cd84e321938934a2,c692d01a76ca528c8830b9ba30c392769b76eb0b..e82e56bb85e14071859055624c6ee53018294d04
@@@ -187,9 -187,9 +187,9 @@@ static struct ipw_rx_queue *ipw_rx_queu
  static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
  static void ipw_rx_queue_replenish(void *);
  static int ipw_up(struct ipw_priv *);
 -static void ipw_bg_up(void *);
 +static void ipw_bg_up(struct work_struct *work);
  static void ipw_down(struct ipw_priv *);
 -static void ipw_bg_down(void *);
 +static void ipw_bg_down(struct work_struct *work);
  static int ipw_config(struct ipw_priv *);
  static int init_supported_rates(struct ipw_priv *priv,
                                struct ipw_supported_rates *prates);
@@@ -862,12 -862,11 +862,12 @@@ static void ipw_led_link_on(struct ipw_
        spin_unlock_irqrestore(&priv->lock, flags);
  }
  
 -static void ipw_bg_led_link_on(void *data)
 +static void ipw_bg_led_link_on(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, led_link_on.work);
        mutex_lock(&priv->mutex);
 -      ipw_led_link_on(data);
 +      ipw_led_link_on(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -907,12 -906,11 +907,12 @@@ static void ipw_led_link_off(struct ipw
        spin_unlock_irqrestore(&priv->lock, flags);
  }
  
 -static void ipw_bg_led_link_off(void *data)
 +static void ipw_bg_led_link_off(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, led_link_off.work);
        mutex_lock(&priv->mutex);
 -      ipw_led_link_off(data);
 +      ipw_led_link_off(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -987,12 -985,11 +987,12 @@@ static void ipw_led_activity_off(struc
        spin_unlock_irqrestore(&priv->lock, flags);
  }
  
 -static void ipw_bg_led_activity_off(void *data)
 +static void ipw_bg_led_activity_off(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, led_act_off.work);
        mutex_lock(&priv->mutex);
 -      ipw_led_activity_off(data);
 +      ipw_led_activity_off(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -2231,12 -2228,11 +2231,12 @@@ static void ipw_adapter_restart(void *a
        }
  }
  
 -static void ipw_bg_adapter_restart(void *data)
 +static void ipw_bg_adapter_restart(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, adapter_restart);
        mutex_lock(&priv->mutex);
 -      ipw_adapter_restart(data);
 +      ipw_adapter_restart(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -2253,12 -2249,11 +2253,12 @@@ static void ipw_scan_check(void *data
        }
  }
  
 -static void ipw_bg_scan_check(void *data)
 +static void ipw_bg_scan_check(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, scan_check.work);
        mutex_lock(&priv->mutex);
 -      ipw_scan_check(data);
 +      ipw_scan_check(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -3836,19 -3831,17 +3836,19 @@@ static int ipw_disassociate(void *data
        return 1;
  }
  
 -static void ipw_bg_disassociate(void *data)
 +static void ipw_bg_disassociate(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, disassociate);
        mutex_lock(&priv->mutex);
 -      ipw_disassociate(data);
 +      ipw_disassociate(priv);
        mutex_unlock(&priv->mutex);
  }
  
 -static void ipw_system_config(void *data)
 +static void ipw_system_config(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, system_config);
  
  #ifdef CONFIG_IPW2200_PROMISCUOUS
        if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
@@@ -4215,12 -4208,11 +4215,12 @@@ static void ipw_gather_stats(struct ipw
                           IPW_STATS_INTERVAL);
  }
  
 -static void ipw_bg_gather_stats(void *data)
 +static void ipw_bg_gather_stats(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, gather_stats.work);
        mutex_lock(&priv->mutex);
 -      ipw_gather_stats(data);
 +      ipw_gather_stats(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -4276,8 -4268,8 +4276,8 @@@ static void ipw_handle_missed_beacon(st
                if (!(priv->status & STATUS_ROAMING)) {
                        priv->status |= STATUS_ROAMING;
                        if (!(priv->status & STATUS_SCANNING))
 -                              queue_work(priv->workqueue,
 -                                         &priv->request_scan);
 +                              queue_delayed_work(priv->workqueue,
 +                                                 &priv->request_scan, 0);
                }
                return;
        }
@@@ -4615,8 -4607,8 +4615,8 @@@ static void ipw_rx_notification(struct 
  #ifdef CONFIG_IPW2200_MONITOR
                        if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
                                priv->status |= STATUS_SCAN_FORCED;
 -                              queue_work(priv->workqueue,
 -                                         &priv->request_scan);
 +                              queue_delayed_work(priv->workqueue,
 +                                                 &priv->request_scan, 0);
                                break;
                        }
                        priv->status &= ~STATUS_SCAN_FORCED;
                                        /* Don't schedule if we aborted the scan */
                                        priv->status &= ~STATUS_ROAMING;
                        } else if (priv->status & STATUS_SCAN_PENDING)
 -                              queue_work(priv->workqueue,
 -                                         &priv->request_scan);
 +                              queue_delayed_work(priv->workqueue,
 +                                                 &priv->request_scan, 0);
                        else if (priv->config & CFG_BACKGROUND_SCAN
                                 && priv->status & STATUS_ASSOCIATED)
                                queue_delayed_work(priv->workqueue,
@@@ -5063,12 -5055,11 +5063,12 @@@ static void ipw_rx_queue_replenish(voi
        ipw_rx_queue_restock(priv);
  }
  
 -static void ipw_bg_rx_queue_replenish(void *data)
 +static void ipw_bg_rx_queue_replenish(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, rx_replenish);
        mutex_lock(&priv->mutex);
 -      ipw_rx_queue_replenish(data);
 +      ipw_rx_queue_replenish(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -5498,10 -5489,9 +5498,10 @@@ static int ipw_find_adhoc_network(struc
        return 1;
  }
  
 -static void ipw_merge_adhoc_network(void *data)
 +static void ipw_merge_adhoc_network(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, merge_networks);
        struct ieee80211_network *network = NULL;
        struct ipw_network_match match = {
                .network = priv->assoc_network
@@@ -5958,12 -5948,11 +5958,12 @@@ static void ipw_adhoc_check(void *data
                           priv->assoc_request.beacon_interval);
  }
  
 -static void ipw_bg_adhoc_check(void *data)
 +static void ipw_bg_adhoc_check(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, adhoc_check.work);
        mutex_lock(&priv->mutex);
 -      ipw_adhoc_check(data);
 +      ipw_adhoc_check(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -6310,26 -6299,19 +6310,26 @@@ done
        return err;
  }
  
 -static int ipw_request_passive_scan(struct ipw_priv *priv) {
 -      return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
 +static void ipw_request_passive_scan(struct work_struct *work)
 +{
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, request_passive_scan);
 +      ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
  }
  
 -static int ipw_request_scan(struct ipw_priv *priv) {
 -      return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
 +static void ipw_request_scan(struct work_struct *work)
 +{
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, request_scan.work);
 +      ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
  }
  
 -static void ipw_bg_abort_scan(void *data)
 +static void ipw_bg_abort_scan(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, abort_scan);
        mutex_lock(&priv->mutex);
 -      ipw_abort_scan(data);
 +      ipw_abort_scan(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -6938,8 -6920,8 +6938,8 @@@ static int ipw_qos_association(struct i
  }
  
  /*
- * handling the beaconing responces. if we get different QoS setting
- * of the network from the the associated setting adjust the QoS
+ * handling the beaconing responses. if we get different QoS setting
+ * off the network from the associated setting, adjust the QoS
  * setting
  */
  static int ipw_qos_association_resp(struct ipw_priv *priv,
@@@ -7102,10 -7084,9 +7102,10 @@@ static int ipw_qos_set_tx_queue_command
  /*
  * background support to run QoS activate functionality
  */
 -static void ipw_bg_qos_activate(void *data)
 +static void ipw_bg_qos_activate(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, qos_activate);
  
        if (priv == NULL)
                return;
@@@ -7413,12 -7394,11 +7413,12 @@@ static void ipw_roam(void *data
        priv->status &= ~STATUS_ROAMING;
  }
  
 -static void ipw_bg_roam(void *data)
 +static void ipw_bg_roam(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, roam);
        mutex_lock(&priv->mutex);
 -      ipw_roam(data);
 +      ipw_roam(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -7499,8 -7479,8 +7499,8 @@@ static int ipw_associate(void *data
                                                   &priv->request_scan,
                                                   SCAN_INTERVAL);
                        else
 -                              queue_work(priv->workqueue,
 -                                         &priv->request_scan);
 +                              queue_delayed_work(priv->workqueue,
 +                                                 &priv->request_scan, 0);
                }
  
                return 0;
        return 1;
  }
  
 -static void ipw_bg_associate(void *data)
 +static void ipw_bg_associate(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, associate);
        mutex_lock(&priv->mutex);
 -      ipw_associate(data);
 +      ipw_associate(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -9431,7 -9410,7 +9431,7 @@@ static int ipw_wx_set_scan(struct net_d
  
        IPW_DEBUG_WX("Start scan\n");
  
 -      queue_work(priv->workqueue, &priv->request_scan);
 +      queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
  
        return 0;
  }
@@@ -10568,12 -10547,11 +10568,12 @@@ static void ipw_rf_kill(void *adapter
        spin_unlock_irqrestore(&priv->lock, flags);
  }
  
 -static void ipw_bg_rf_kill(void *data)
 +static void ipw_bg_rf_kill(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, rf_kill.work);
        mutex_lock(&priv->mutex);
 -      ipw_rf_kill(data);
 +      ipw_rf_kill(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -10604,12 -10582,11 +10604,12 @@@ static void ipw_link_up(struct ipw_pri
                queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
  }
  
 -static void ipw_bg_link_up(void *data)
 +static void ipw_bg_link_up(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, link_up);
        mutex_lock(&priv->mutex);
 -      ipw_link_up(data);
 +      ipw_link_up(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -10629,16 -10606,15 +10629,16 @@@ static void ipw_link_down(struct ipw_pr
  
        if (!(priv->status & STATUS_EXIT_PENDING)) {
                /* Queue up another scan... */
 -              queue_work(priv->workqueue, &priv->request_scan);
 +              queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
        }
  }
  
 -static void ipw_bg_link_down(void *data)
 +static void ipw_bg_link_down(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, link_down);
        mutex_lock(&priv->mutex);
 -      ipw_link_down(data);
 +      ipw_link_down(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -10650,30 -10626,38 +10650,30 @@@ static int ipw_setup_deferred_work(stru
        init_waitqueue_head(&priv->wait_command_queue);
        init_waitqueue_head(&priv->wait_state);
  
 -      INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
 -      INIT_WORK(&priv->associate, ipw_bg_associate, priv);
 -      INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
 -      INIT_WORK(&priv->system_config, ipw_system_config, priv);
 -      INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
 -      INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
 -      INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
 -      INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
 -      INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
 -      INIT_WORK(&priv->request_scan,
 -                (void (*)(void *))ipw_request_scan, priv);
 -      INIT_WORK(&priv->request_passive_scan,
 -                (void (*)(void *))ipw_request_passive_scan, priv);
 -      INIT_WORK(&priv->gather_stats,
 -                (void (*)(void *))ipw_bg_gather_stats, priv);
 -      INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
 -      INIT_WORK(&priv->roam, ipw_bg_roam, priv);
 -      INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
 -      INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
 -      INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
 -      INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
 -                priv);
 -      INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
 -                priv);
 -      INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
 -                priv);
 -      INIT_WORK(&priv->merge_networks,
 -                (void (*)(void *))ipw_merge_adhoc_network, priv);
 +      INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
 +      INIT_WORK(&priv->associate, ipw_bg_associate);
 +      INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
 +      INIT_WORK(&priv->system_config, ipw_system_config);
 +      INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
 +      INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
 +      INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
 +      INIT_WORK(&priv->up, ipw_bg_up);
 +      INIT_WORK(&priv->down, ipw_bg_down);
 +      INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
 +      INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
 +      INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
 +      INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
 +      INIT_WORK(&priv->roam, ipw_bg_roam);
 +      INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
 +      INIT_WORK(&priv->link_up, ipw_bg_link_up);
 +      INIT_WORK(&priv->link_down, ipw_bg_link_down);
 +      INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
 +      INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
 +      INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
 +      INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
  
  #ifdef CONFIG_IPW2200_QOS
 -      INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
 -                priv);
 +      INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
  #endif                                /* CONFIG_IPW2200_QOS */
  
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@@ -11206,8 -11190,7 +11206,8 @@@ static int ipw_up(struct ipw_priv *priv
  
                        /* If configure to try and auto-associate, kick
                         * off a scan. */
 -                      queue_work(priv->workqueue, &priv->request_scan);
 +                      queue_delayed_work(priv->workqueue,
 +                                         &priv->request_scan, 0);
  
                        return 0;
                }
        return -EIO;
  }
  
 -static void ipw_bg_up(void *data)
 +static void ipw_bg_up(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, up);
        mutex_lock(&priv->mutex);
 -      ipw_up(data);
 +      ipw_up(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -11300,12 -11282,11 +11300,12 @@@ static void ipw_down(struct ipw_priv *p
        ipw_led_radio_off(priv);
  }
  
 -static void ipw_bg_down(void *data)
 +static void ipw_bg_down(struct work_struct *work)
  {
 -      struct ipw_priv *priv = data;
 +      struct ipw_priv *priv =
 +              container_of(work, struct ipw_priv, down);
        mutex_lock(&priv->mutex);
 -      ipw_down(data);
 +      ipw_down(priv);
        mutex_unlock(&priv->mutex);
  }
  
@@@ -11746,12 -11727,18 +11746,18 @@@ static int ipw_pci_resume(struct pci_de
  {
        struct ipw_priv *priv = pci_get_drvdata(pdev);
        struct net_device *dev = priv->net_dev;
+       int err;
        u32 val;
  
        printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
  
        pci_set_power_state(pdev, PCI_D0);
-       pci_enable_device(pdev);
+       err = pci_enable_device(pdev);
+       if (err) {
+               printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+                      dev->name);
+               return err;
+       }
        pci_restore_state(pdev);
  
        /*
index e7700b4257ebf4ceffedd256aa4ecd673e082d39,4a20e45de3cab20caced617449070d994fa5272e..a87eb51886c89e3d5122a20ace2506897bbbbd23
@@@ -1,5 -1,4 +1,4 @@@
  /*
-  *  
   *  Copyright (C) 2002 Intersil Americas Inc.
   *            (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
   *            (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
@@@ -55,12 -54,12 +54,12 @@@ static const unsigned char scan_rate_li
   * prism54_mib_mode_helper - MIB change mode helper function
   * @mib: the &struct islpci_mib object to modify
   * @iw_mode: new mode (%IW_MODE_*)
-  * 
+  *
   *  This is a helper function, hence it does not lock. Make sure
-  *  caller deals with locking *if* necessary. This function sets the 
-  *  mode-dependent mib values and does the mapping of the Linux 
-  *  Wireless API modes to Device firmware modes. It also checks for 
-  *  correct valid Linux wireless modes. 
+  *  caller deals with locking *if* necessary. This function sets the
+  *  mode-dependent mib values and does the mapping of the Linux
+  *  Wireless API modes to Device firmware modes. It also checks for
+  *  correct valid Linux wireless modes.
   */
  static int
  prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
   *
   *  this function initializes the struct given as @mib with defaults,
   *  of which many are retrieved from the global module parameter
-  *  variables.  
+  *  variables.
   */
  
  void
@@@ -134,7 -133,7 +133,7 @@@ prism54_mib_init(islpci_private *priv
        authen = CARD_DEFAULT_AUTHEN;
        wep = CARD_DEFAULT_WEP;
        filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */
-       dot1x = CARD_DEFAULT_DOT1X; 
+       dot1x = CARD_DEFAULT_DOT1X;
        mlme = CARD_DEFAULT_MLME_MODE;
        conformance = CARD_DEFAULT_CONFORMANCE;
        power = 127;
   * schedule_work(), thus we can as well use sleeping semaphore
   * locking */
  void
 -prism54_update_stats(islpci_private *priv)
 +prism54_update_stats(struct work_struct *work)
  {
 +      islpci_private *priv = container_of(work, islpci_private, stats_work);
        char *data;
        int j;
        struct obj_bss bss, *bss2;
@@@ -229,7 -227,7 +228,7 @@@ prism54_get_wireless_stats(struct net_d
        } else
                priv->iwstatistics.qual.updated = 0;
  
-       /* Update our wireless stats, but do not schedule to often 
+       /* Update our wireless stats, but do not schedule to often
         * (max 1 HZ) */
        if ((priv->stats_timestamp == 0) ||
            time_after(jiffies, priv->stats_timestamp + 1 * HZ)) {
@@@ -706,7 -704,7 +705,7 @@@ prism54_get_scan(struct net_device *nde
        * Starting with WE-17, the buffer can be as big as needed.
        * But the device won't repport anything if you change the value
        * of IWMAX_BSS=24. */
-       
        rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
        bsslist = r.ptr;
  
@@@ -786,7 -784,7 +785,7 @@@ prism54_get_essid(struct net_device *nd
        return rvalue;
  }
  
- /* Provides no functionality, just completes the ioctl. In essence this is a 
+ /* Provides no functionality, just completes the ioctl. In essence this is a
   * just a cosmetic ioctl.
   */
  static int
@@@ -1105,7 -1103,7 +1104,7 @@@ prism54_set_encode(struct net_device *n
                                            &key);
                }
                /*
-                * If a valid key is set, encryption should be enabled 
+                * If a valid key is set, encryption should be enabled
                 * (user may turn it off later).
                 * This is also how "iwconfig ethX key on" works
                 */
        }
        /* now read the flags */
        if (dwrq->flags & IW_ENCODE_DISABLED) {
-               /* Encoding disabled, 
+               /* Encoding disabled,
                 * authen = DOT11_AUTH_OS;
                 * invoke = 0;
                 * exunencrypt = 0; */
@@@ -1215,7 -1213,7 +1214,7 @@@ prism54_get_txpower(struct net_device *
        vwrq->value = (s32) r.u / 4;
        vwrq->fixed = 1;
        /* radio is not turned of
-        * btw: how is possible to turn off only the radio 
+        * btw: how is possible to turn off only the radio
         */
        vwrq->disabled = 0;
  
@@@ -2355,17 -2353,17 +2354,17 @@@ prism54_process_trap_helper(islpci_priv
                handle_request(priv, mlme, oid);
                send_formatted_event(priv, "Authenticate request (ex)", mlme, 1);
  
-               if (priv->iw_mode != IW_MODE_MASTER 
+               if (priv->iw_mode != IW_MODE_MASTER
                                && mlmeex->state != DOT11_STATE_AUTHING)
                        break;
  
                confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC);
  
-               if (!confirm) 
+               if (!confirm)
                        break;
  
                memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
-               printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", 
+               printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
                                mlmeex->address[0],
                                mlmeex->address[1],
                                mlmeex->address[2],
                handle_request(priv, mlme, oid);
                send_formatted_event(priv, "Associate request (ex)", mlme, 1);
  
-               if (priv->iw_mode != IW_MODE_MASTER 
+               if (priv->iw_mode != IW_MODE_MASTER
                                && mlmeex->state != DOT11_STATE_ASSOCING)
                        break;
-               
                confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
  
                if (!confirm)
  
                if (!wpa_ie_len) {
                        printk(KERN_DEBUG "No WPA IE found from "
-                                       "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", 
+                                       "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
                                mlmeex->address[0],
                                mlmeex->address[1],
                                mlmeex->address[2],
                mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
  
                kfree(confirm);
-               
                break;
  
        case DOT11_OID_REASSOCIATEEX:
                handle_request(priv, mlme, oid);
                send_formatted_event(priv, "Reassociate request (ex)", mlme, 1);
  
-               if (priv->iw_mode != IW_MODE_MASTER 
+               if (priv->iw_mode != IW_MODE_MASTER
                                && mlmeex->state != DOT11_STATE_ASSOCING)
                        break;
  
  
                if (!wpa_ie_len) {
                        printk(KERN_DEBUG "No WPA IE found from "
-                                       "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", 
+                                       "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
                                mlmeex->address[0],
                                mlmeex->address[1],
                                mlmeex->address[2],
                        break;
                }
  
-               confirm->size = wpa_ie_len; 
+               confirm->size = wpa_ie_len;
                memcpy(&confirm->data, wpa_ie, wpa_ie_len);
  
                mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
  
                kfree(confirm);
-               
                break;
  
        default:
   * interrupt context, no locks held.
   */
  void
 -prism54_process_trap(void *data)
 +prism54_process_trap(struct work_struct *work)
  {
 -      struct islpci_mgmtframe *frame = data;
 +      struct islpci_mgmtframe *frame =
 +              container_of(work, struct islpci_mgmtframe, ws);
        struct net_device *ndev = frame->ndev;
        enum oid_num_t n = mgt_oidtonum(frame->header->oid);
  
@@@ -2547,10 -2544,10 +2546,10 @@@ enum 
  #define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
  ((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data))
  
- /* Maximum length for algorithm names (-1 for nul termination) 
+ /* Maximum length for algorithm names (-1 for nul termination)
   * used in ioctl() */
  #define HOSTAP_CRYPT_ALG_NAME_LEN 16
-       
  struct prism2_hostapd_param {
        u32 cmd;
        u8 sta_addr[ETH_ALEN];
@@@ -2623,7 -2620,7 +2622,7 @@@ prism2_ioctl_set_encryption(struct net_
                                            &key);
                }
                /*
-                * If a valid key is set, encryption should be enabled 
+                * If a valid key is set, encryption should be enabled
                 * (user may turn it off later).
                 * This is also how "iwconfig ethX key on" works
                 */
        }
        /* now read the flags */
        if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
-               /* Encoding disabled, 
+               /* Encoding disabled,
                 * authen = DOT11_AUTH_OS;
                 * invoke = 0;
                 * exunencrypt = 0; */
@@@ -2712,7 -2709,7 +2711,7 @@@ prism2_ioctl_set_generic_element(struc
  
               ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
  
-              if (ret == 0) 
+              if (ret == 0)
                       printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
                                       ndev->name);
         }
@@@ -2872,7 -2869,7 +2871,7 @@@ prism54_set_wpa(struct net_device *ndev
                        mlme = DOT11_MLME_AUTO;
                        printk("%s: Disabling WPA\n", ndev->name);
                        break;
-               case 2: 
+               case 2:
                case 1: /* WPA */
                        printk("%s: Enabling WPA\n", ndev->name);
                        break;
index 0802fa64996f11588f40987753a5d7e2c9b2242a,e8183d30c52eca1e2c9e1a6f21caa29d5feae2ed..bcfbfb9281d27c3e23dfc3de3619923c07304bd0
@@@ -1,5 -1,4 +1,4 @@@
  /*
-  *  
   *  Copyright (C) 2002 Intersil Americas Inc.
   *            (C) 2003 Aurelien Alleaume <slts@free.fr>
   *            (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
  void prism54_mib_init(islpci_private *);
  
  struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
 -void prism54_update_stats(islpci_private *);
 +void prism54_update_stats(struct work_struct *);
  
  void prism54_acl_init(struct islpci_acl *);
  void prism54_acl_clean(struct islpci_acl *);
  
 -void prism54_process_trap(void *);
 +void prism54_process_trap(struct work_struct *);
  
  void prism54_wpa_bss_ie_init(islpci_private *priv);
  void prism54_wpa_bss_ie_clean(islpci_private *priv);
index e35fcb2543c44f09ac800b02ff8f93b6c2f99922,1e0603ca436c01b1c2ac49b42537307b251677b7..f057fd9fcd79edcf2288451ec21c51aa09ad3f6b
@@@ -1,5 -1,4 +1,4 @@@
  /*
-  *  
   *  Copyright (C) 2002 Intersil Americas Inc.
   *  Copyright (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
   *  Copyright (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
@@@ -413,7 -412,7 +412,7 @@@ prism54_bring_down(islpci_private *priv
        islpci_set_state(priv, PRV_STATE_PREBOOT);
  
        /* disable all device interrupts in case they weren't */
-       isl38xx_disable_interrupts(priv->device_base);  
+       isl38xx_disable_interrupts(priv->device_base);
  
        /* For safety reasons, we may want to ensure that no DMA transfer is
         * currently in progress by emptying the TX and RX queues. */
@@@ -480,7 -479,7 +479,7 @@@ islpci_reset_if(islpci_private *priv
  
        DEFINE_WAIT(wait);
        prepare_to_wait(&priv->reset_done, &wait, TASK_UNINTERRUPTIBLE);
-       
        /* now the last step is to reset the interface */
        isl38xx_interface_reset(priv->device_base, priv->device_host_address);
        islpci_set_state(priv, PRV_STATE_PREINIT);
          for(count = 0; count < 2 && result; count++) {
                /* The software reset acknowledge needs about 220 msec here.
                 * Be conservative and wait for up to one second. */
-       
                remaining = schedule_timeout_uninterruptible(HZ);
  
                if(remaining > 0) {
                        break;
                }
  
-               /* If we're here it's because our IRQ hasn't yet gone through. 
+               /* If we're here it's because our IRQ hasn't yet gone through.
                 * Retry a bit more...
                 */
                printk(KERN_ERR "%s: no 'reset complete' IRQ seen - retrying\n",
  
        /* Now that the device is 100% up, let's allow
         * for the other interrupts --
-        * NOTE: this is not *yet* true since we've only allowed the 
+        * NOTE: this is not *yet* true since we've only allowed the
         * INIT interrupt on the IRQ line. We can perhaps poll
         * the IRQ line until we know for sure the reset went through */
        isl38xx_enable_common_interrupts(priv->device_base);
@@@ -716,7 -715,7 +715,7 @@@ islpci_alloc_memory(islpci_private *pri
  
        prism54_acl_init(&priv->acl);
        prism54_wpa_bss_ie_init(priv);
-       if (mgt_init(priv)) 
+       if (mgt_init(priv))
                goto out_free;
  
        return 0;
@@@ -861,10 -860,11 +860,10 @@@ islpci_setup(struct pci_dev *pdev
        priv->state_off = 1;
  
        /* initialize workqueue's */
 -      INIT_WORK(&priv->stats_work,
 -                (void (*)(void *)) prism54_update_stats, priv);
 +      INIT_WORK(&priv->stats_work, prism54_update_stats);
        priv->stats_timestamp = 0;
  
 -      INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv);
 +      INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
        priv->reset_task_pending = 0;
  
        /* allocate various memory areas */
index 103a378777331123c282c8635ad485edbfd63d43,676d83813dc8526699c732d3eccc36d583ad536f..b1122912ee2d25dcaffe7024bd78c00b5bcc6b75
@@@ -1,5 -1,4 +1,4 @@@
  /*
-  *  
   *  Copyright (C) 2002 Intersil Americas Inc.
   *  Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
   *  This program is free software; you can redistribute it and/or modify
@@@ -48,7 -47,7 +47,7 @@@ islpci_eth_cleanup_transmit(islpci_priv
                /* read the index of the first fragment to be freed */
                index = priv->free_data_tx % ISL38XX_CB_TX_QSIZE;
  
-               /* check for holes in the arrays caused by multi fragment frames 
+               /* check for holes in the arrays caused by multi fragment frames
                 * searching for the last fragment of a frame */
                if (priv->pci_map_tx_address[index] != (dma_addr_t) NULL) {
                        /* entry is the last fragment of a frame
@@@ -253,6 -252,7 +252,7 @@@ islpci_monitor_rx(islpci_private *priv
         * header and without the FCS. But there a is a bit that
         * indicates if the packet is corrupted :-) */
        struct rfmon_header *hdr = (struct rfmon_header *) (*skb)->data;
        if (hdr->flags & 0x01)
                /* This one is bad. Drop it ! */
                return -1;
                    (struct avs_80211_1_header *) skb_push(*skb,
                                                           sizeof (struct
                                                                   avs_80211_1_header));
-               
                avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
                avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
                avs->mactime = cpu_to_be64(le64_to_cpu(clock));
@@@ -390,7 -390,7 +390,7 @@@ islpci_eth_receive(islpci_private *priv
                        struct rx_annex_header *annex =
                            (struct rx_annex_header *) skb->data;
                        wstats.level = annex->rfmon.rssi;
-                       /* The noise value can be a bit outdated if nobody's 
+                       /* The noise value can be a bit outdated if nobody's
                         * reading wireless stats... */
                        wstats.noise = priv->local_iwstatistics.qual.noise;
                        wstats.qual = wstats.level - wstats.noise;
                        break;
                }
                /* update the fragment address */
-               control_block->rx_data_low[index].address = cpu_to_le32((u32)
-                                                                       priv->
-                                                                       pci_map_rx_address
-                                                                       [index]);
+               control_block->rx_data_low[index].address =
+                       cpu_to_le32((u32)priv->pci_map_rx_address[index]);
                wmb();
  
                /* increment the driver read pointer */
  }
  
  void
 -islpci_do_reset_and_wake(void *data)
 +islpci_do_reset_and_wake(struct work_struct *work)
  {
 -      islpci_private *priv = data;
 +      islpci_private *priv = container_of(work, islpci_private, reset_task);
        islpci_reset(priv, 1);
-       netif_wake_queue(priv->ndev);
        priv->reset_task_pending = 0;
+       smp_wmb();
+       netif_wake_queue(priv->ndev);
  }
  
  void
@@@ -499,12 -499,14 +499,14 @@@ islpci_eth_tx_timeout(struct net_devic
        /* increment the transmit error counter */
        statistics->tx_errors++;
  
-       printk(KERN_WARNING "%s: tx_timeout", ndev->name);
        if (!priv->reset_task_pending) {
-               priv->reset_task_pending = 1;
-               printk(", scheduling a reset");
+               printk(KERN_WARNING
+                       "%s: tx_timeout, scheduling reset", ndev->name);
                netif_stop_queue(ndev);
+               priv->reset_task_pending = 1;
                schedule_work(&priv->reset_task);
+       } else {
+               printk(KERN_WARNING
+                       "%s: tx_timeout, waiting for reset", ndev->name);
        }
-       printk("\n");
  }
index 99d37eda9f012873a1b6843f0e210bfd417580c7,26789454067ccbc40193e3bf27f5dbae1d171487..5bf820defbd01b73b906d295bfe3ce40f1e02e80
@@@ -1,5 -1,4 +1,4 @@@
  /*
-  *  
   *  Copyright (C) 2002 Intersil Americas Inc.
   *
   *  This program is free software; you can redistribute it and/or modify
@@@ -68,6 -67,6 +67,6 @@@ void islpci_eth_cleanup_transmit(islpci
  int islpci_eth_transmit(struct sk_buff *, struct net_device *);
  int islpci_eth_receive(islpci_private *);
  void islpci_eth_tx_timeout(struct net_device *);
 -void islpci_do_reset_and_wake(void *data);
 +void islpci_do_reset_and_wake(struct work_struct *);
  
  #endif                                /* _ISL_GEN_H */
index 656ec9fa71289848819be2390b4bbc74899735f8,036a875054c99fa5041c0f23181962ec0e67558c..2246f7930b4edfd45aeac66a5bdf4c194ce91a96
@@@ -1,5 -1,4 +1,4 @@@
  /*
-  *  
   *  Copyright (C) 2002 Intersil Americas Inc.
   *  Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
   *
@@@ -387,7 -386,7 +386,7 @@@ islpci_mgt_receive(struct net_device *n
  
                        /* Create work to handle trap out of interrupt
                         * context. */
 -                      INIT_WORK(&frame->ws, prism54_process_trap, frame);
 +                      INIT_WORK(&frame->ws, prism54_process_trap);
                        schedule_work(&frame->ws);
  
                } else {
@@@ -502,7 -501,7 +501,7 @@@ islpci_mgt_transaction(struct net_devic
        printk(KERN_WARNING "%s: timeout waiting for mgmt response\n",
               ndev->name);
  
-       /* TODO: we should reset the device here */     
+       /* TODO: we should reset the device here */
   out:
        finish_wait(&priv->mgmt_wqueue, &wait);
        up(&priv->mgmt_sem);
index 5e4f4b707375887baefd988a7532712783dc50e2,2696f95b92781d94c67b0258b91699afe7aee685..44f3cfd4cc1de6bc242c58a629ad6eed87cfd08d
@@@ -32,6 -32,8 +32,8 @@@
  
  static void ieee_init(struct ieee80211_device *ieee);
  static void softmac_init(struct ieee80211softmac_device *sm);
+ static void set_rts_cts_work(void *d);
+ static void set_basic_rates_work(void *d);
  
  static void housekeeping_init(struct zd_mac *mac);
  static void housekeeping_enable(struct zd_mac *mac);
@@@ -46,6 -48,8 +48,8 @@@ int zd_mac_init(struct zd_mac *mac
        memset(mac, 0, sizeof(*mac));
        spin_lock_init(&mac->lock);
        mac->netdev = netdev;
+       INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac);
+       INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac);
  
        ieee_init(ieee);
        softmac_init(ieee80211_priv(netdev));
@@@ -213,6 -217,13 +217,13 @@@ int zd_mac_stop(struct net_device *netd
        housekeeping_disable(mac);
        ieee80211softmac_stop(netdev);
  
+       /* Ensure no work items are running or queued from this point */
+       cancel_delayed_work(&mac->set_rts_cts_work);
+       cancel_delayed_work(&mac->set_basic_rates_work);
+       flush_workqueue(zd_workqueue);
+       mac->updating_rts_rate = 0;
+       mac->updating_basic_rates = 0;
        zd_chip_disable_hwint(chip);
        zd_chip_switch_radio_off(chip);
        zd_chip_disable_int(chip);
@@@ -286,6 -297,186 +297,186 @@@ u8 zd_mac_get_regdomain(struct zd_mac *
        return regdomain;
  }
  
+ /* Fallback to lowest rate, if rate is unknown. */
+ static u8 rate_to_zd_rate(u8 rate)
+ {
+       switch (rate) {
+       case IEEE80211_CCK_RATE_2MB:
+               return ZD_CCK_RATE_2M;
+       case IEEE80211_CCK_RATE_5MB:
+               return ZD_CCK_RATE_5_5M;
+       case IEEE80211_CCK_RATE_11MB:
+               return ZD_CCK_RATE_11M;
+       case IEEE80211_OFDM_RATE_6MB:
+               return ZD_OFDM_RATE_6M;
+       case IEEE80211_OFDM_RATE_9MB:
+               return ZD_OFDM_RATE_9M;
+       case IEEE80211_OFDM_RATE_12MB:
+               return ZD_OFDM_RATE_12M;
+       case IEEE80211_OFDM_RATE_18MB:
+               return ZD_OFDM_RATE_18M;
+       case IEEE80211_OFDM_RATE_24MB:
+               return ZD_OFDM_RATE_24M;
+       case IEEE80211_OFDM_RATE_36MB:
+               return ZD_OFDM_RATE_36M;
+       case IEEE80211_OFDM_RATE_48MB:
+               return ZD_OFDM_RATE_48M;
+       case IEEE80211_OFDM_RATE_54MB:
+               return ZD_OFDM_RATE_54M;
+       }
+       return ZD_CCK_RATE_1M;
+ }
+ static u16 rate_to_cr_rate(u8 rate)
+ {
+       switch (rate) {
+       case IEEE80211_CCK_RATE_2MB:
+               return CR_RATE_1M;
+       case IEEE80211_CCK_RATE_5MB:
+               return CR_RATE_5_5M;
+       case IEEE80211_CCK_RATE_11MB:
+               return CR_RATE_11M;
+       case IEEE80211_OFDM_RATE_6MB:
+               return CR_RATE_6M;
+       case IEEE80211_OFDM_RATE_9MB:
+               return CR_RATE_9M;
+       case IEEE80211_OFDM_RATE_12MB:
+               return CR_RATE_12M;
+       case IEEE80211_OFDM_RATE_18MB:
+               return CR_RATE_18M;
+       case IEEE80211_OFDM_RATE_24MB:
+               return CR_RATE_24M;
+       case IEEE80211_OFDM_RATE_36MB:
+               return CR_RATE_36M;
+       case IEEE80211_OFDM_RATE_48MB:
+               return CR_RATE_48M;
+       case IEEE80211_OFDM_RATE_54MB:
+               return CR_RATE_54M;
+       }
+       return CR_RATE_1M;
+ }
+ static void try_enable_tx(struct zd_mac *mac)
+ {
+       unsigned long flags;
+       spin_lock_irqsave(&mac->lock, flags);
+       if (mac->updating_rts_rate == 0 && mac->updating_basic_rates == 0)
+               netif_wake_queue(mac->netdev);
+       spin_unlock_irqrestore(&mac->lock, flags);
+ }
+ static void set_rts_cts_work(void *d)
+ {
+       struct zd_mac *mac = d;
+       unsigned long flags;
+       u8 rts_rate;
+       unsigned int short_preamble;
+       mutex_lock(&mac->chip.mutex);
+       spin_lock_irqsave(&mac->lock, flags);
+       mac->updating_rts_rate = 0;
+       rts_rate = mac->rts_rate;
+       short_preamble = mac->short_preamble;
+       spin_unlock_irqrestore(&mac->lock, flags);
+       zd_chip_set_rts_cts_rate_locked(&mac->chip, rts_rate, short_preamble);
+       mutex_unlock(&mac->chip.mutex);
+       try_enable_tx(mac);
+ }
+ static void set_basic_rates_work(void *d)
+ {
+       struct zd_mac *mac = d;
+       unsigned long flags;
+       u16 basic_rates;
+       mutex_lock(&mac->chip.mutex);
+       spin_lock_irqsave(&mac->lock, flags);
+       mac->updating_basic_rates = 0;
+       basic_rates = mac->basic_rates;
+       spin_unlock_irqrestore(&mac->lock, flags);
+       zd_chip_set_basic_rates_locked(&mac->chip, basic_rates);
+       mutex_unlock(&mac->chip.mutex);
+       try_enable_tx(mac);
+ }
+ static void bssinfo_change(struct net_device *netdev, u32 changes)
+ {
+       struct zd_mac *mac = zd_netdev_mac(netdev);
+       struct ieee80211softmac_device *softmac = ieee80211_priv(netdev);
+       struct ieee80211softmac_bss_info *bssinfo = &softmac->bssinfo;
+       int need_set_rts_cts = 0;
+       int need_set_rates = 0;
+       u16 basic_rates;
+       unsigned long flags;
+       dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
+       if (changes & IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE) {
+               spin_lock_irqsave(&mac->lock, flags);
+               mac->short_preamble = bssinfo->short_preamble;
+               spin_unlock_irqrestore(&mac->lock, flags);
+               need_set_rts_cts = 1;
+       }
+       if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) {
+               /* Set RTS rate to highest available basic rate */
+               u8 rate = ieee80211softmac_highest_supported_rate(softmac,
+                       &bssinfo->supported_rates, 1);
+               rate = rate_to_zd_rate(rate);
+               spin_lock_irqsave(&mac->lock, flags);
+               if (rate != mac->rts_rate) {
+                       mac->rts_rate = rate;
+                       need_set_rts_cts = 1;
+               }
+               spin_unlock_irqrestore(&mac->lock, flags);
+               /* Set basic rates */
+               need_set_rates = 1;
+               if (bssinfo->supported_rates.count == 0) {
+                       /* Allow the device to be flexible */
+                       basic_rates = CR_RATES_80211B | CR_RATES_80211G;
+               } else {
+                       int i = 0;
+                       basic_rates = 0;
+                       for (i = 0; i < bssinfo->supported_rates.count; i++) {
+                               u16 rate = bssinfo->supported_rates.rates[i];
+                               if ((rate & IEEE80211_BASIC_RATE_MASK) == 0)
+                                       continue;
+                               rate &= ~IEEE80211_BASIC_RATE_MASK;
+                               basic_rates |= rate_to_cr_rate(rate);
+                       }
+               }
+               spin_lock_irqsave(&mac->lock, flags);
+               mac->basic_rates = basic_rates;
+               spin_unlock_irqrestore(&mac->lock, flags);
+       }
+       /* Schedule any changes we made above */
+       spin_lock_irqsave(&mac->lock, flags);
+       if (need_set_rts_cts && !mac->updating_rts_rate) {
+               mac->updating_rts_rate = 1;
+               netif_stop_queue(mac->netdev);
+               queue_work(zd_workqueue, &mac->set_rts_cts_work);
+       }
+       if (need_set_rates && !mac->updating_basic_rates) {
+               mac->updating_basic_rates = 1;
+               netif_stop_queue(mac->netdev);
+               queue_work(zd_workqueue, &mac->set_basic_rates_work);
+       }
+       spin_unlock_irqrestore(&mac->lock, flags);
+ }
  static void set_channel(struct net_device *netdev, u8 channel)
  {
        struct zd_mac *mac = zd_netdev_mac(netdev);
        zd_chip_set_channel(&mac->chip, channel);
  }
  
- /* TODO: Should not work in Managed mode. */
  int zd_mac_request_channel(struct zd_mac *mac, u8 channel)
  {
        unsigned long lock_flags;
                return 0;
  }
  
int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags)
u8 zd_mac_get_channel(struct zd_mac *mac)
  {
-       struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+       u8 channel = zd_chip_get_channel(&mac->chip);
  
-       *channel = zd_chip_get_channel(&mac->chip);
-       if (ieee->iw_mode != IW_MODE_INFRA) {
-               spin_lock_irq(&mac->lock);
-               *flags = *channel == mac->requested_channel ?
-                       MAC_FIXED_CHANNEL : 0;
-               spin_unlock(&mac->lock);
-       } else {
-               *flags = 0;
-       }
-       dev_dbg_f(zd_mac_dev(mac), "channel %u flags %u\n", *channel, *flags);
-       return 0;
+       dev_dbg_f(zd_mac_dev(mac), "channel %u\n", channel);
+       return channel;
  }
  
  /* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */
- static u8 cs_typed_rate(u8 cs_rate)
+ static u8 zd_rate_typed(u8 zd_rate)
  {
        static const u8 typed_rates[16] = {
-               [ZD_CS_CCK_RATE_1M]     = ZD_CS_CCK|ZD_CS_CCK_RATE_1M,
-               [ZD_CS_CCK_RATE_2M]     = ZD_CS_CCK|ZD_CS_CCK_RATE_2M,
-               [ZD_CS_CCK_RATE_5_5M]   = ZD_CS_CCK|ZD_CS_CCK_RATE_5_5M,
-               [ZD_CS_CCK_RATE_11M]    = ZD_CS_CCK|ZD_CS_CCK_RATE_11M,
+               [ZD_CCK_RATE_1M]        = ZD_CS_CCK|ZD_CCK_RATE_1M,
+               [ZD_CCK_RATE_2M]        = ZD_CS_CCK|ZD_CCK_RATE_2M,
+               [ZD_CCK_RATE_5_5M]      = ZD_CS_CCK|ZD_CCK_RATE_5_5M,
+               [ZD_CCK_RATE_11M]       = ZD_CS_CCK|ZD_CCK_RATE_11M,
                [ZD_OFDM_RATE_6M]       = ZD_CS_OFDM|ZD_OFDM_RATE_6M,
                [ZD_OFDM_RATE_9M]       = ZD_CS_OFDM|ZD_OFDM_RATE_9M,
                [ZD_OFDM_RATE_12M]      = ZD_CS_OFDM|ZD_OFDM_RATE_12M,
        };
  
        ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f);
-       return typed_rates[cs_rate & ZD_CS_RATE_MASK];
- }
- /* Fallback to lowest rate, if rate is unknown. */
- static u8 rate_to_cs_rate(u8 rate)
- {
-       switch (rate) {
-       case IEEE80211_CCK_RATE_2MB:
-               return ZD_CS_CCK_RATE_2M;
-       case IEEE80211_CCK_RATE_5MB:
-               return ZD_CS_CCK_RATE_5_5M;
-       case IEEE80211_CCK_RATE_11MB:
-               return ZD_CS_CCK_RATE_11M;
-       case IEEE80211_OFDM_RATE_6MB:
-               return ZD_OFDM_RATE_6M;
-       case IEEE80211_OFDM_RATE_9MB:
-               return ZD_OFDM_RATE_9M;
-       case IEEE80211_OFDM_RATE_12MB:
-               return ZD_OFDM_RATE_12M;
-       case IEEE80211_OFDM_RATE_18MB:
-               return ZD_OFDM_RATE_18M;
-       case IEEE80211_OFDM_RATE_24MB:
-               return ZD_OFDM_RATE_24M;
-       case IEEE80211_OFDM_RATE_36MB:
-               return ZD_OFDM_RATE_36M;
-       case IEEE80211_OFDM_RATE_48MB:
-               return ZD_OFDM_RATE_48M;
-       case IEEE80211_OFDM_RATE_54MB:
-               return ZD_OFDM_RATE_54M;
-       }
-       return ZD_CS_CCK_RATE_1M;
+       return typed_rates[zd_rate & ZD_CS_RATE_MASK];
  }
  
  int zd_mac_set_mode(struct zd_mac *mac, u32 mode)
@@@ -484,13 -635,13 +635,13 @@@ int zd_mac_get_range(struct zd_mac *mac
        return 0;
  }
  
- static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length)
+ static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length)
  {
        static const u8 rate_divisor[] = {
-               [ZD_CS_CCK_RATE_1M]     =  1,
-               [ZD_CS_CCK_RATE_2M]     =  2,
-               [ZD_CS_CCK_RATE_5_5M]   = 11, /* bits must be doubled */
-               [ZD_CS_CCK_RATE_11M]    = 11,
+               [ZD_CCK_RATE_1M]        =  1,
+               [ZD_CCK_RATE_2M]        =  2,
+               [ZD_CCK_RATE_5_5M]      = 11, /* bits must be doubled */
+               [ZD_CCK_RATE_11M]       = 11,
                [ZD_OFDM_RATE_6M]       =  6,
                [ZD_OFDM_RATE_9M]       =  9,
                [ZD_OFDM_RATE_12M]      = 12,
        u32 bits = (u32)tx_length * 8;
        u32 divisor;
  
-       divisor = rate_divisor[cs_rate];
+       divisor = rate_divisor[zd_rate];
        if (divisor == 0)
                return -EINVAL;
  
-       switch (cs_rate) {
-       case ZD_CS_CCK_RATE_5_5M:
+       switch (zd_rate) {
+       case ZD_CCK_RATE_5_5M:
                bits = (2*bits) + 10; /* round up to the next integer */
                break;
-       case ZD_CS_CCK_RATE_11M:
+       case ZD_CCK_RATE_11M:
                if (service) {
                        u32 t = bits % 11;
                        *service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION;
@@@ -532,16 -683,16 +683,16 @@@ enum 
        R2M_11A            = 0x02,
  };
  
- static u8 cs_rate_to_modulation(u8 cs_rate, int flags)
+ static u8 zd_rate_to_modulation(u8 zd_rate, int flags)
  {
        u8 modulation;
  
-       modulation = cs_typed_rate(cs_rate);
+       modulation = zd_rate_typed(zd_rate);
        if (flags & R2M_SHORT_PREAMBLE) {
                switch (ZD_CS_RATE(modulation)) {
-               case ZD_CS_CCK_RATE_2M:
-               case ZD_CS_CCK_RATE_5_5M:
-               case ZD_CS_CCK_RATE_11M:
+               case ZD_CCK_RATE_2M:
+               case ZD_CCK_RATE_5_5M:
+               case ZD_CCK_RATE_11M:
                        modulation |= ZD_CS_CCK_PREA_SHORT;
                        return modulation;
                }
@@@ -558,39 -709,36 +709,36 @@@ static void cs_set_modulation(struct zd
  {
        struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
        u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl));
-       u8 rate, cs_rate;
+       u8 rate, zd_rate;
        int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0;
+       int is_multicast = is_multicast_ether_addr(hdr->addr1);
+       int short_preamble = ieee80211softmac_short_preamble_ok(softmac,
+               is_multicast, is_mgt);
+       int flags = 0;
+       /* FIXME: 802.11a? */
+       rate = ieee80211softmac_suggest_txrate(softmac, is_multicast, is_mgt);
  
-       /* FIXME: 802.11a? short preamble? */
-       rate = ieee80211softmac_suggest_txrate(softmac,
-               is_multicast_ether_addr(hdr->addr1), is_mgt);
+       if (short_preamble)
+               flags |= R2M_SHORT_PREAMBLE;
  
-       cs_rate = rate_to_cs_rate(rate);
-       cs->modulation = cs_rate_to_modulation(cs_rate, 0);
+       zd_rate = rate_to_zd_rate(rate);
+       cs->modulation = zd_rate_to_modulation(zd_rate, flags);
  }
  
  static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
                           struct ieee80211_hdr_4addr *header)
  {
+       struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev);
        unsigned int tx_length = le16_to_cpu(cs->tx_length);
        u16 fctl = le16_to_cpu(header->frame_ctl);
        u16 ftype = WLAN_FC_GET_TYPE(fctl);
        u16 stype = WLAN_FC_GET_STYPE(fctl);
  
        /*
-        * CONTROL:
-        * - start at 0x00
-        * - if fragment 0, enable bit 0
+        * CONTROL TODO:
         * - if backoff needed, enable bit 0
         * - if burst (backoff not needed) disable bit 0
-        * - if multicast, enable bit 1
-        * - if PS-POLL frame, enable bit 2
-        * - if in INDEPENDENT_BSS mode and zd1205_DestPowerSave, then enable
-        *   bit 4 (FIXME: wtf)
-        * - if frag_len > RTS threshold, set bit 5 as long if it isnt
-        *   multicast or mgt
-        * - if bit 5 is set, and we are in OFDM mode, unset bit 5 and set bit
-        *   7
         */
  
        cs->control = 0;
        if (stype == IEEE80211_STYPE_PSPOLL)
                cs->control |= ZD_CS_PS_POLL_FRAME;
  
+       /* Unicast data frames over the threshold should have RTS */
        if (!is_multicast_ether_addr(header->addr1) &&
-           ftype != IEEE80211_FTYPE_MGMT &&
-           tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
-       {
-               /* FIXME: check the logic */
-               if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM) {
-                       /* 802.11g */
-                       cs->control |= ZD_CS_SELF_CTS;
-               } else { /* 802.11b */
-                       cs->control |= ZD_CS_RTS;
-               }
+               ftype != IEEE80211_FTYPE_MGMT &&
+                   tx_length > zd_netdev_ieee80211(mac->netdev)->rts)
+               cs->control |= ZD_CS_RTS;
+       /* Use CTS-to-self protection if required */
+       if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM &&
+                       ieee80211softmac_protection_needed(softmac)) {
+               /* FIXME: avoid sending RTS *and* self-CTS, is that correct? */
+               cs->control &= ~ZD_CS_RTS;
+               cs->control |= ZD_CS_SELF_CTS;
        }
  
        /* FIXME: Management frame? */
@@@ -721,7 -870,7 +870,7 @@@ struct zd_rt_hdr 
        u8  rt_rate;
        u16 rt_channel;
        u16 rt_chbitmask;
- };
+ } __attribute__((packed));
  
  static void fill_rt_header(void *buffer, struct zd_mac *mac,
                           const struct ieee80211_rx_stats *stats,
@@@ -782,9 -931,11 +931,11 @@@ static int is_data_packet_for_us(struc
               (netdev->flags & IFF_PROMISC);
  }
  
- /* Filters receiving packets. If it returns 1 send it to ieee80211_rx, if 0
-  * return. If an error is detected -EINVAL is returned. ieee80211_rx_mgt() is
-  * called here.
+ /* Filters received packets. The function returns 1 if the packet should be
+  * forwarded to ieee80211_rx(). If the packet should be ignored the function
+  * returns 0. If an invalid packet is found the function returns -EINVAL.
+  *
+  * The function calls ieee80211_rx_mgt() directly.
   *
   * It has been based on ieee80211_rx_any.
   */
@@@ -810,9 -961,9 +961,9 @@@ static int filter_rx(struct ieee80211_d
                ieee80211_rx_mgt(ieee, hdr, stats);
                return 0;
        case IEEE80211_FTYPE_CTL:
-               /* Ignore invalid short buffers */
                return 0;
        case IEEE80211_FTYPE_DATA:
+               /* Ignore invalid short buffers */
                if (length < sizeof(struct ieee80211_hdr_3addr))
                        return -EINVAL;
                return is_data_packet_for_us(ieee, hdr);
@@@ -993,6 -1144,7 +1144,7 @@@ static void ieee_init(struct ieee80211_
  static void softmac_init(struct ieee80211softmac_device *sm)
  {
        sm->set_channel = set_channel;
+       sm->bssinfo_change = bssinfo_change;
  }
  
  struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
        return iw_stats;
  }
  
- #ifdef DEBUG
- static const char* decryption_types[] = {
-       [ZD_RX_NO_WEP] = "none",
-       [ZD_RX_WEP64] = "WEP64",
-       [ZD_RX_TKIP] = "TKIP",
-       [ZD_RX_AES] = "AES",
-       [ZD_RX_WEP128] = "WEP128",
-       [ZD_RX_WEP256] = "WEP256",
- };
- static const char *decryption_type_string(u8 type)
- {
-       const char *s;
-       if (type < ARRAY_SIZE(decryption_types)) {
-               s = decryption_types[type];
-       } else {
-               s = NULL;
-       }
-       return s ? s : "unknown";
- }
- static int is_ofdm(u8 frame_status)
- {
-       return (frame_status & ZD_RX_OFDM);
- }
- void zd_dump_rx_status(const struct rx_status *status)
- {
-       const char* modulation;
-       u8 quality;
-       if (is_ofdm(status->frame_status)) {
-               modulation = "ofdm";
-               quality = status->signal_quality_ofdm;
-       } else {
-               modulation = "cck";
-               quality = status->signal_quality_cck;
-       }
-       pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n",
-               modulation, status->signal_strength, quality,
-               decryption_type_string(status->decryption_type));
-       if (status->frame_status & ZD_RX_ERROR) {
-               pr_debug("rx error %s%s%s%s%s%s\n",
-                       (status->frame_status & ZD_RX_TIMEOUT_ERROR) ?
-                               "timeout " : "",
-                       (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ?
-                               "fifo " : "",
-                       (status->frame_status & ZD_RX_DECRYPTION_ERROR) ?
-                               "decryption " : "",
-                       (status->frame_status & ZD_RX_CRC32_ERROR) ?
-                               "crc32 " : "",
-                       (status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ?
-                               "addr1 " : "",
-                       (status->frame_status & ZD_RX_CRC16_ERROR) ?
-                               "crc16" : "");
-       }
- }
- #endif /* DEBUG */
  #define LINK_LED_WORK_DELAY HZ
  
 -static void link_led_handler(void *p)
 +static void link_led_handler(struct work_struct *work)
  {
 -      struct zd_mac *mac = p;
 +      struct zd_mac *mac =
 +              container_of(work, struct zd_mac, housekeeping.link_led_work.work);
        struct zd_chip *chip = &mac->chip;
        struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
        int is_associated;
  
  static void housekeeping_init(struct zd_mac *mac)
  {
 -      INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac);
 +      INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
  }
  
  static void housekeeping_enable(struct zd_mac *mac)
index 7957cac3de25c452ecaad56a5b07988e5305923b,5dcfb251f02ec6b2d2eb9c5cc5641300ac5b2648..08d6b8c08e75a1a8702275c738753c931e6d8842
@@@ -20,6 -20,7 +20,7 @@@
  
  #include <linux/wireless.h>
  #include <linux/kernel.h>
+ #include <linux/workqueue.h>
  #include <net/ieee80211.h>
  #include <net/ieee80211softmac.h>
  
@@@ -48,10 -49,11 +49,11 @@@ struct zd_ctrlset 
  #define ZD_CS_CCK             0x00
  #define ZD_CS_OFDM            0x10
  
- #define ZD_CS_CCK_RATE_1M     0x00
- #define ZD_CS_CCK_RATE_2M     0x01
- #define ZD_CS_CCK_RATE_5_5M   0x02
- #define ZD_CS_CCK_RATE_11M    0x03
+ /* These are referred to as zd_rates */
+ #define ZD_CCK_RATE_1M        0x00
+ #define ZD_CCK_RATE_2M        0x01
+ #define ZD_CCK_RATE_5_5M      0x02
+ #define ZD_CCK_RATE_11M       0x03
  /* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*.
   */
  
@@@ -82,7 -84,7 +84,7 @@@
  struct rx_length_info {
        __le16 length[3];
        __le16 tag;
- };
+ } __attribute__((packed));
  
  #define RX_LENGTH_INFO_TAG            0x697e
  
@@@ -93,7 -95,7 +95,7 @@@ struct rx_status 
        u8 signal_quality_ofdm;
        u8 decryption_type;
        u8 frame_status;
- };
+ } __attribute__((packed));
  
  /* rx_status field decryption_type */
  #define ZD_RX_NO_WEP  0
  #define ZD_RX_CRC16_ERROR             0x40
  #define ZD_RX_ERROR                   0x80
  
- enum mac_flags {
-       MAC_FIXED_CHANNEL = 0x01,
- };
  struct housekeeping {
 -      struct work_struct link_led_work;
 +      struct delayed_work link_led_work;
  };
  
  #define ZD_MAC_STATS_BUFFER_SIZE 16
@@@ -130,15 -128,33 +128,33 @@@ struct zd_mac 
        struct zd_chip chip;
        spinlock_t lock;
        struct net_device *netdev;
        /* Unlocked reading possible */
        struct iw_statistics iw_stats;
        struct housekeeping housekeeping;
+       struct work_struct set_rts_cts_work;
+       struct work_struct set_basic_rates_work;
        unsigned int stats_count;
        u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
        u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE];
        u8 regdomain;
        u8 default_regdomain;
        u8 requested_channel;
+       /* A bitpattern of cr_rates */
+       u16 basic_rates;
+       /* A zd_rate */
+       u8 rts_rate;
+       /* Short preamble (used for RTS/CTS) */
+       unsigned int short_preamble:1;
+       /* flags to indicate update in progress */
+       unsigned int updating_rts_rate:1;
+       unsigned int updating_basic_rates:1;
  };
  
  static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac)
@@@ -180,7 -196,7 +196,7 @@@ int zd_mac_set_regdomain(struct zd_mac 
  u8 zd_mac_get_regdomain(struct zd_mac *zd_mac);
  
  int zd_mac_request_channel(struct zd_mac *mac, u8 channel);
int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags);
u8 zd_mac_get_channel(struct zd_mac *mac);
  
  int zd_mac_set_mode(struct zd_mac *mac, u32 mode);
  int zd_mac_get_mode(struct zd_mac *mac, u32 *mode);
diff --combined drivers/pcmcia/ds.c
index e469a46a388b79c639b789396d59318fe0890031,21d83a895b21c2ed9d97c7bcaea5750c7671fb79..ff14fd8f0cd16c65eabfb5ddf2bcb4c3b2a5afe3
@@@ -698,10 -698,9 +698,10 @@@ static int pcmcia_card_add(struct pcmci
  }
  
  
 -static void pcmcia_delayed_add_pseudo_device(void *data)
 +static void pcmcia_delayed_add_pseudo_device(struct work_struct *work)
  {
 -      struct pcmcia_socket *s = data;
 +      struct pcmcia_socket *s =
 +              container_of(work, struct pcmcia_socket, device_add);
        pcmcia_device_add(s, 0);
        s->pcmcia_state.device_add_pending = 0;
  }
@@@ -1247,7 -1246,7 +1247,7 @@@ static int __devinit pcmcia_bus_add_soc
        init_waitqueue_head(&socket->queue);
  #endif
        INIT_LIST_HEAD(&socket->devices_list);
 -      INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device, socket);
 +      INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device);
        memset(&socket->pcmcia_state, 0, sizeof(u8));
        socket->device_count = 0;
  
@@@ -1273,7 -1272,9 +1273,9 @@@ static void pcmcia_bus_remove_socket(st
        pccard_register_pcmcia(socket, NULL);
  
        /* unregister any unbound devices */
+       mutex_lock(&socket->skt_mutex);
        pcmcia_card_remove(socket, NULL);
+       mutex_unlock(&socket->skt_mutex);
  
        pcmcia_put_socket(socket);
  
diff --combined drivers/rtc/rtc-dev.c
index dcf5f86461f711cbeda9584f4a82c4aef6ac0b54,814b9e1873f55ef8061b4abfc66ab5776da38dc1..828b329e08e0ca62a23617863825a325ca3511d7
@@@ -53,16 -53,17 +53,18 @@@ static int rtc_dev_open(struct inode *i
   * Routine to poll RTC seconds field for change as often as possible,
   * after first RTC_UIE use timer to reduce polling
   */
 -static void rtc_uie_task(void *data)
 +static void rtc_uie_task(struct work_struct *work)
  {
 -      struct rtc_device *rtc = data;
 +      struct rtc_device *rtc =
 +              container_of(work, struct rtc_device, uie_task);
        struct rtc_time tm;
        int num = 0;
        int err;
  
        err = rtc_read_time(&rtc->class_dev, &tm);
-       spin_lock_irq(&rtc->irq_lock);
+       local_irq_disable();
+       spin_lock(&rtc->irq_lock);
        if (rtc->stop_uie_polling || err) {
                rtc->uie_task_active = 0;
        } else if (rtc->oldsecs != tm.tm_sec) {
        } else if (schedule_work(&rtc->uie_task) == 0) {
                rtc->uie_task_active = 0;
        }
-       spin_unlock_irq(&rtc->irq_lock);
+       spin_unlock(&rtc->irq_lock);
        if (num)
                rtc_update_irq(&rtc->class_dev, num, RTC_UF | RTC_IRQF);
+       local_irq_enable();
  }
  static void rtc_uie_timer(unsigned long data)
  {
        struct rtc_device *rtc = (struct rtc_device *)data;
@@@ -215,7 -216,7 +217,7 @@@ static int rtc_dev_ioctl(struct inode *
        struct rtc_wkalrm alarm;
        void __user *uarg = (void __user *) arg;
  
-       /* check that the calles has appropriate permissions
+       /* check that the calling task has appropriate permissions
         * for certain ioctls. doing this check here is useful
         * to avoid duplicate code in each driver.
         */
  
        /* avoid conflicting IRQ users */
        if (cmd == RTC_PIE_ON || cmd == RTC_PIE_OFF || cmd == RTC_IRQP_SET) {
-               spin_lock(&rtc->irq_task_lock);
+               spin_lock_irq(&rtc->irq_task_lock);
                if (rtc->irq_task)
                        err = -EBUSY;
-               spin_unlock(&rtc->irq_task_lock);
+               spin_unlock_irq(&rtc->irq_task_lock);
  
                if (err < 0)
                        return err;
  
                err = rtc_set_time(class_dev, &tm);
                break;
+       case RTC_IRQP_READ:
+               if (ops->irq_set_freq)
+                       err = put_user(rtc->irq_freq, (unsigned long *) arg);
+               break;
+       case RTC_IRQP_SET:
+               if (ops->irq_set_freq)
+                       err = rtc_irq_set_freq(class_dev, rtc->irq_task, arg);
+               break;
  #if 0
        case RTC_EPOCH_SET:
  #ifndef rtc_epoch
@@@ -399,7 -411,7 +412,7 @@@ static int rtc_dev_add_device(struct cl
        spin_lock_init(&rtc->irq_lock);
        init_waitqueue_head(&rtc->irq_queue);
  #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
 -      INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc);
 +      INIT_WORK(&rtc->uie_task, rtc_uie_task);
        setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
  #endif
  
index 7ed34bb1c50f508899271fa632965c532d99f619,a823486495c3a0950ba99cc5d47096927865be07..8ed6c75adf0f3ff640aa8265be7262d37bd31082
@@@ -142,7 -142,7 +142,7 @@@ struct speedtch_instance_data 
  
        struct speedtch_params params; /* set in probe, constant afterwards */
  
 -      struct work_struct status_checker;
 +      struct delayed_work status_checker;
  
        unsigned char last_status;
  
@@@ -498,11 -498,8 +498,11 @@@ static int speedtch_start_synchro(struc
        return ret;
  }
  
 -static void speedtch_check_status(struct speedtch_instance_data *instance)
 +static void speedtch_check_status(struct work_struct *work)
  {
 +      struct speedtch_instance_data *instance =
 +              container_of(work, struct speedtch_instance_data,
 +                           status_checker.work);
        struct usbatm_data *usbatm = instance->usbatm;
        struct atm_dev *atm_dev = usbatm->atm_dev;
        unsigned char *buf = instance->scratch_buffer;
@@@ -579,7 -576,7 +579,7 @@@ static void speedtch_status_poll(unsign
  {
        struct speedtch_instance_data *instance = (void *)data;
  
 -      schedule_work(&instance->status_checker);
 +      schedule_delayed_work(&instance->status_checker, 0);
  
        /* The following check is racy, but the race is harmless */
        if (instance->poll_delay < MAX_POLL_DELAY)
@@@ -599,7 -596,7 +599,7 @@@ static void speedtch_resubmit_int(unsig
        if (int_urb) {
                ret = usb_submit_urb(int_urb, GFP_ATOMIC);
                if (!ret)
 -                      schedule_work(&instance->status_checker);
 +                      schedule_delayed_work(&instance->status_checker, 0);
                else {
                        atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
                        mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@@@ -643,7 -640,7 +643,7 @@@ static void speedtch_handle_int(struct 
  
        if ((int_urb = instance->int_urb)) {
                ret = usb_submit_urb(int_urb, GFP_ATOMIC);
 -              schedule_work(&instance->status_checker);
 +              schedule_delayed_work(&instance->status_checker, 0);
                if (ret < 0) {
                        atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
                        goto fail;
@@@ -837,8 -834,8 +837,8 @@@ static int speedtch_bind(struct usbatm_
                        const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc;
  
                        if ((endpoint_desc->bEndpointAddress == target_address)) {
-                               use_isoc = (endpoint_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-                                       USB_ENDPOINT_XFER_ISOC;
+                               use_isoc =
+                                       usb_endpoint_xfer_isoc(endpoint_desc);
                                break;
                        }
                }
  
        usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
  
 -      INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance);
 +      INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
  
        instance->status_checker.timer.function = speedtch_status_poll;
        instance->status_checker.timer.data = (unsigned long)instance;
index e39bb09f5af99af8f2d4e45923252ff4f4d6521f,c137c041f7a43057d355b40654b31816a7092a50..f2d196fa1e8b0a613fac5dca91caf043b14bbed2
@@@ -401,9 -401,8 +401,8 @@@ static int uea_send_modem_cmd(struct us
        int ret = -ENOMEM;
        u8 *xfer_buff;
  
-       xfer_buff = kmalloc(size, GFP_KERNEL);
+       xfer_buff = kmemdup(buff, size, GFP_KERNEL);
        if (xfer_buff) {
-               memcpy(xfer_buff, buff, size);
                ret = usb_control_msg(usb,
                                      usb_sndctrlpipe(usb, 0),
                                      LOAD_INTERNAL,
@@@ -595,14 -594,12 +594,12 @@@ static int uea_idma_write(struct uea_so
        u8 *xfer_buff;
        int bytes_read;
  
-       xfer_buff = kmalloc(size, GFP_KERNEL);
+       xfer_buff = kmemdup(data, size, GFP_KERNEL);
        if (!xfer_buff) {
                uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
                return ret;
        }
  
-       memcpy(xfer_buff, data, size);
        ret = usb_bulk_msg(sc->usb_dev,
                         usb_sndbulkpipe(sc->usb_dev, UEA_IDMA_PIPE),
                         xfer_buff, size, &bytes_read, BULK_TIMEOUT);
@@@ -658,9 -655,9 +655,9 @@@ static int request_dsp(struct uea_soft
  /*
   * The uea_load_page() function must be called within a process context
   */
 -static void uea_load_page(void *xsc)
 +static void uea_load_page(struct work_struct *work)
  {
 -      struct uea_softc *sc = xsc;
 +      struct uea_softc *sc = container_of(work, struct uea_softc, task);
        u16 pageno = sc->pageno;
        u16 ovl = sc->ovl;
        struct block_info bi;
@@@ -765,12 -762,11 +762,11 @@@ static int uea_request(struct uea_soft
        u8 *xfer_buff;
        int ret = -ENOMEM;
  
-       xfer_buff = kmalloc(size, GFP_KERNEL);
+       xfer_buff = kmemdup(data, size, GFP_KERNEL);
        if (!xfer_buff) {
                uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
                return ret;
        }
-       memcpy(xfer_buff, data, size);
  
        ret = usb_control_msg(sc->usb_dev, usb_sndctrlpipe(sc->usb_dev, 0),
                              UCDC_SEND_ENCAPSULATED_COMMAND,
@@@ -1352,7 -1348,7 +1348,7 @@@ static int uea_boot(struct uea_softc *s
  
        uea_enters(INS_TO_USBDEV(sc));
  
 -      INIT_WORK(&sc->task, uea_load_page, sc);
 +      INIT_WORK(&sc->task, uea_load_page);
        init_waitqueue_head(&sc->sync_q);
        init_waitqueue_head(&sc->cmv_ack_wait);
  
index 6408e10fdbf8e3d5bcf1ef326607e57dd7a8a346,ec3438dc8ee5e99d68346c7d428c7fae75b4c148..7f1fa956dcdb6b0c558f8bb9a835a07222f22867
@@@ -421,9 -421,9 +421,9 @@@ static void acm_write_bulk(struct urb *
                schedule_work(&acm->work);
  }
  
 -static void acm_softint(void *private)
 +static void acm_softint(struct work_struct *work)
  {
 -      struct acm *acm = private;
 +      struct acm *acm = container_of(work, struct acm, work);
        dbg("Entering acm_softint.");
        
        if (!ACM_READY(acm))
@@@ -892,7 -892,7 +892,7 @@@ skip_normal_probe
  
  
        /* workaround for switched endpoints */
-       if ((epread->bEndpointAddress & USB_DIR_IN) != USB_DIR_IN) {
+       if (!usb_endpoint_dir_in(epread)) {
                /* descriptors are swapped */
                struct usb_endpoint_descriptor *t;
                dev_dbg(&intf->dev,"The data interface has switched endpoints");
        acm->rx_buflimit = num_rx_buf;
        acm->urb_task.func = acm_rx_tasklet;
        acm->urb_task.data = (unsigned long) acm;
 -      INIT_WORK(&acm->work, acm_softint, acm);
 +      INIT_WORK(&acm->work, acm_softint);
        spin_lock_init(&acm->throttle_lock);
        spin_lock_init(&acm->write_lock);
        spin_lock_init(&acm->read_lock);
diff --combined drivers/usb/core/hub.c
index ad0ffbe8f7d78c2832514427d4c4aa9a76bb92e1,0ce393eb3c4b0bea98e9a6ec8156ca5272160018..39186db1015fab9ebe7ad7d0fd0a3c605913f72e
  #include "hcd.h"
  #include "hub.h"
  
+ struct usb_hub {
+       struct device           *intfdev;       /* the "interface" device */
+       struct usb_device       *hdev;
+       struct urb              *urb;           /* for interrupt polling pipe */
+       /* buffer for urb ... with extra space in case of babble */
+       char                    (*buffer)[8];
+       dma_addr_t              buffer_dma;     /* DMA address for buffer */
+       union {
+               struct usb_hub_status   hub;
+               struct usb_port_status  port;
+       }                       *status;        /* buffer for status reports */
+       int                     error;          /* last reported error */
+       int                     nerrors;        /* track consecutive errors */
+       struct list_head        event_list;     /* hubs w/data or errs ready */
+       unsigned long           event_bits[1];  /* status change bitmask */
+       unsigned long           change_bits[1]; /* ports with logical connect
+                                                       status change */
+       unsigned long           busy_bits[1];   /* ports being reset or
+                                                       resumed */
+ #if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
+ #error event_bits[] is too short!
+ #endif
+       struct usb_hub_descriptor *descriptor;  /* class descriptor */
+       struct usb_tt           tt;             /* Transaction Translator */
+       unsigned                mA_per_port;    /* current for each child */
+       unsigned                limited_power:1;
+       unsigned                quiescing:1;
+       unsigned                activating:1;
+       unsigned                has_indicators:1;
+       u8                      indicator[USB_MAXCHILDREN];
+       struct work_struct      leds;
+ };
  /* Protect struct usb_device->state and ->children members
   * Note: Both are also protected by ->dev.sem, except that ->state can
   * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
@@@ -45,6 -86,16 +86,16 @@@ static DECLARE_WAIT_QUEUE_HEAD(khubd_wa
  
  static struct task_struct *khubd_task;
  
+ /* multithreaded probe logic */
+ static int multithread_probe =
+ #ifdef CONFIG_USB_MULTITHREAD_PROBE
+       1;
+ #else
+       0;
+ #endif
+ module_param(multithread_probe, bool, S_IRUGO);
+ MODULE_PARM_DESC(multithread_probe, "Run each USB device probe in a new thread");
  /* cycle leds on hubs that aren't blinking for attention */
  static int blinkenlights = 0;
  module_param (blinkenlights, bool, S_IRUGO);
@@@ -167,10 -218,9 +218,10 @@@ static void set_port_led
  
  #define       LED_CYCLE_PERIOD        ((2*HZ)/3)
  
 -static void led_work (void *__hub)
 +static void led_work (struct work_struct *work)
  {
 -      struct usb_hub          *hub = __hub;
 +      struct usb_hub          *hub =
 +              container_of(work, struct usb_hub, leds.work);
        struct usb_device       *hdev = hub->hdev;
        unsigned                i;
        unsigned                changed = 0;
@@@ -277,6 -327,9 +328,9 @@@ static void kick_khubd(struct usb_hub *
  {
        unsigned long   flags;
  
+       /* Suppress autosuspend until khubd runs */
+       to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
        spin_lock_irqsave(&hub_event_lock, flags);
        if (list_empty(&hub->event_list)) {
                list_add_tail(&hub->event_list, &hub_event_list);
@@@ -352,10 -405,9 +406,10 @@@ hub_clear_tt_buffer (struct usb_device 
   * talking to TTs must queue control transfers (not just bulk and iso), so
   * both can talk to the same hub concurrently.
   */
 -static void hub_tt_kevent (void *arg)
 +static void hub_tt_kevent (struct work_struct *work)
  {
 -      struct usb_hub          *hub = arg;
 +      struct usb_hub          *hub =
 +              container_of(work, struct usb_hub, tt.kevent);
        unsigned long           flags;
  
        spin_lock_irqsave (&hub->tt.lock, flags);
@@@ -459,7 -511,6 +513,6 @@@ static void hub_quiesce(struct usb_hub 
        /* (nonblocking) khubd and related activity won't re-trigger */
        hub->quiescing = 1;
        hub->activating = 0;
-       hub->resume_root_hub = 0;
  
        /* (blocking) stop khubd and related activity */
        usb_kill_urb(hub->urb);
@@@ -475,7 -526,7 +528,7 @@@ static void hub_activate(struct usb_hu
  
        hub->quiescing = 0;
        hub->activating = 1;
-       hub->resume_root_hub = 0;
        status = usb_submit_urb(hub->urb, GFP_NOIO);
        if (status < 0)
                dev_err(hub->intfdev, "activate --> %d\n", status);
@@@ -643,7 -694,7 +696,7 @@@ static int hub_configure(struct usb_hu
  
        spin_lock_init (&hub->tt.lock);
        INIT_LIST_HEAD (&hub->tt.clear_list);
 -      INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub);
 +      INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
        switch (hdev->descriptor.bDeviceProtocol) {
                case 0:
                        break;
                dev_dbg(hub_dev, "%sover-current condition exists\n",
                        (hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no ");
  
-       /* set up the interrupt endpoint */
+       /* set up the interrupt endpoint
+        * We use the EP's maxpacket size instead of (PORTS+1+7)/8
+        * bytes as USB2.0[11.12.3] says because some hubs are known
+        * to send more data (and thus cause overflow). For root hubs,
+        * maxpktsize is defined in hcd.c's fake endpoint descriptors
+        * to be big enough for at least USB_MAXCHILDREN ports. */
        pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
        maxp = usb_maxpacket(hdev, pipe, usb_pipeout(pipe));
  
@@@ -882,9 -938,10 +940,10 @@@ descriptor_error
        INIT_LIST_HEAD(&hub->event_list);
        hub->intfdev = &intf->dev;
        hub->hdev = hdev;
 -      INIT_WORK(&hub->leds, led_work, hub);
 +      INIT_DELAYED_WORK(&hub->leds, led_work);
  
        usb_set_intfdata (intf, hub);
+       intf->needs_remote_wakeup = 1;
  
        if (hdev->speed == USB_SPEED_HIGH)
                highspeed_hubs++;
@@@ -982,6 -1039,8 +1041,8 @@@ static void recursively_mark_NOTATTACHE
                if (udev->children[i])
                        recursively_mark_NOTATTACHED(udev->children[i]);
        }
+       if (udev->state == USB_STATE_SUSPENDED)
+               udev->discon_suspended = 1;
        udev->state = USB_STATE_NOTATTACHED;
  }
  
@@@ -1171,6 -1230,14 +1232,14 @@@ void usb_disconnect(struct usb_device *
        *pdev = NULL;
        spin_unlock_irq(&device_state_lock);
  
+       /* Decrement the parent's count of unsuspended children */
+       if (udev->parent) {
+               usb_pm_lock(udev);
+               if (!udev->discon_suspended)
+                       usb_autosuspend_device(udev->parent);
+               usb_pm_unlock(udev);
+       }
        put_device(&udev->dev);
  }
  
@@@ -1193,29 -1260,17 +1262,17 @@@ static inline void show_string(struct u
  static int __usb_port_suspend(struct usb_device *, int port1);
  #endif
  
- /**
-  * usb_new_device - perform initial device setup (usbcore-internal)
-  * @udev: newly addressed device (in ADDRESS state)
-  *
-  * This is called with devices which have been enumerated, but not yet
-  * configured.  The device descriptor is available, but not descriptors
-  * for any device configuration.  The caller must have locked either
-  * the parent hub (if udev is a normal device) or else the
-  * usb_bus_list_lock (if udev is a root hub).  The parent's pointer to
-  * udev has already been installed, but udev is not yet visible through
-  * sysfs or other filesystem code.
-  *
-  * Returns 0 for success (device is configured and listed, with its
-  * interfaces, in sysfs); else a negative errno value.
-  *
-  * This call is synchronous, and may not be used in an interrupt context.
-  *
-  * Only the hub driver or root-hub registrar should ever call this.
-  */
- int usb_new_device(struct usb_device *udev)
+ static int __usb_new_device(void *void_data)
  {
+       struct usb_device *udev = void_data;
        int err;
  
+       /* Lock ourself into memory in order to keep a probe sequence
+        * sleeping in a new thread from allowing us to be unloaded.
+        */
+       if (!try_module_get(THIS_MODULE))
+               return -EINVAL;
        err = usb_get_configuration(udev);
        if (err < 0) {
                dev_err(&udev->dev, "can't read configurations, error %d\n",
                goto fail;
        }
  
-       return 0;
+       /* Increment the parent's count of unsuspended children */
+       if (udev->parent)
+               usb_autoresume_device(udev->parent);
+ exit:
+       module_put(THIS_MODULE);
+       return err;
  
  fail:
        usb_set_device_state(udev, USB_STATE_NOTATTACHED);
-       return err;
+       goto exit;
  }
  
+ /**
+  * usb_new_device - perform initial device setup (usbcore-internal)
+  * @udev: newly addressed device (in ADDRESS state)
+  *
+  * This is called with devices which have been enumerated, but not yet
+  * configured.  The device descriptor is available, but not descriptors
+  * for any device configuration.  The caller must have locked either
+  * the parent hub (if udev is a normal device) or else the
+  * usb_bus_list_lock (if udev is a root hub).  The parent's pointer to
+  * udev has already been installed, but udev is not yet visible through
+  * sysfs or other filesystem code.
+  *
+  * The return value for this function depends on if the
+  * multithread_probe variable is set or not.  If it's set, it will
+  * return a if the probe thread was successfully created or not.  If the
+  * variable is not set, it will return if the device is configured
+  * properly or not.  interfaces, in sysfs); else a negative errno value.
+  *
+  * This call is synchronous, and may not be used in an interrupt context.
+  *
+  * Only the hub driver or root-hub registrar should ever call this.
+  */
+ int usb_new_device(struct usb_device *udev)
+ {
+       struct task_struct *probe_task;
+       int ret = 0;
+       if (multithread_probe) {
+               probe_task = kthread_run(__usb_new_device, udev,
+                                        "usb-probe-%s", udev->devnum);
+               if (IS_ERR(probe_task))
+                       ret = PTR_ERR(probe_task);
+       } else
+               ret = __usb_new_device(udev);
+       return ret;
+ }
  
  static int hub_port_status(struct usb_hub *hub, int port1,
                               u16 *status, u16 *change)
        int ret;
  
        ret = get_port_status(hub->hdev, port1, &hub->status->port);
-       if (ret < 0)
+       if (ret < 4) {
                dev_err (hub->intfdev,
                        "%s failed (err = %d)\n", __FUNCTION__, ret);
-       else {
+               if (ret >= 0)
+                       ret = -EIO;
+       } else {
                *status = le16_to_cpu(hub->status->port.wPortStatus);
                *change = le16_to_cpu(hub->status->port.wPortChange); 
                ret = 0;
@@@ -1676,6 -1776,12 +1778,12 @@@ static in
  hub_port_resume(struct usb_hub *hub, int port1, struct usb_device *udev)
  {
        int     status;
+       u16     portchange, portstatus;
+       /* Skip the initial Clear-Suspend step for a remote wakeup */
+       status = hub_port_status(hub, port1, &portstatus, &portchange);
+       if (status == 0 && !(portstatus & USB_PORT_STAT_SUSPEND))
+               goto SuspendCleared;
  
        // dev_dbg(hub->intfdev, "resume port %d\n", port1);
  
                        "can't resume port %d, status %d\n",
                        port1, status);
        } else {
-               u16             devstatus;
-               u16             portchange;
                /* drive resume for at least 20 msec */
                if (udev)
                        dev_dbg(&udev->dev, "usb %sresume\n",
                 * stop resume signaling.  Then finish the resume
                 * sequence.
                 */
-               devstatus = portchange = 0;
-               status = hub_port_status(hub, port1,
-                               &devstatus, &portchange);
+               status = hub_port_status(hub, port1, &portstatus, &portchange);
+ SuspendCleared:
                if (status < 0
-                               || (devstatus & LIVE_FLAGS) != LIVE_FLAGS
-                               || (devstatus & USB_PORT_STAT_SUSPEND) != 0
+                               || (portstatus & LIVE_FLAGS) != LIVE_FLAGS
+                               || (portstatus & USB_PORT_STAT_SUSPEND) != 0
                                ) {
                        dev_dbg(hub->intfdev,
                                "port %d status %04x.%04x after resume, %d\n",
-                               port1, portchange, devstatus, status);
+                               port1, portchange, portstatus, status);
                        if (status >= 0)
                                status = -ENODEV;
                } else {
@@@ -1776,23 -1878,16 +1880,16 @@@ static int remote_wakeup(struct usb_dev
  {
        int     status = 0;
  
-       /* All this just to avoid sending a port-resume message
-        * to the parent hub! */
        usb_lock_device(udev);
-       usb_pm_lock(udev);
        if (udev->state == USB_STATE_SUSPENDED) {
                dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
-               /* TRSMRCY = 10 msec */
-               msleep(10);
-               status = finish_port_resume(udev);
+               status = usb_autoresume_device(udev);
+               /* Give the interface drivers a chance to do something,
+                * then autosuspend the device again. */
                if (status == 0)
-                       udev->dev.power.power_state.event = PM_EVENT_ON;
+                       usb_autosuspend_device(udev);
        }
-       usb_pm_unlock(udev);
-       if (status == 0)
-               usb_autoresume_device(udev, 0);
        usb_unlock_device(udev);
        return status;
  }
@@@ -1856,6 -1951,8 +1953,8 @@@ static int hub_suspend(struct usb_inter
                }
        }
  
+       dev_dbg(&intf->dev, "%s\n", __FUNCTION__);
        /* "global suspend" of the downstream HC-to-USB interface */
        if (!hdev->parent) {
                struct usb_bus  *bus = hdev->bus;
  
  static int hub_resume(struct usb_interface *intf)
  {
-       struct usb_device       *hdev = interface_to_usbdev(intf);
        struct usb_hub          *hub = usb_get_intfdata (intf);
+       struct usb_device       *hdev = hub->hdev;
        int                     status;
  
+       dev_dbg(&intf->dev, "%s\n", __FUNCTION__);
        /* "global resume" of the downstream HC-to-USB interface */
        if (!hdev->parent) {
                struct usb_bus  *bus = hdev->bus;
@@@ -1920,7 -2019,6 +2021,6 @@@ void usb_resume_root_hub(struct usb_dev
  {
        struct usb_hub *hub = hdev_to_hub(hdev);
  
-       hub->resume_root_hub = 1;
        kick_khubd(hub);
  }
  
@@@ -2283,7 -2381,7 +2383,7 @@@ check_highspeed (struct usb_hub *hub, s
                /* hub LEDs are probably harder to miss than syslog */
                if (hub->has_indicators) {
                        hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
 -                      schedule_work (&hub->leds);
 +                      schedule_delayed_work (&hub->leds, 0);
                }
        }
        kfree(qual);
@@@ -2457,7 -2555,7 +2557,7 @@@ static void hub_port_connect_change(str
                                if (hub->has_indicators) {
                                        hub->indicator[port1-1] =
                                                INDICATOR_AMBER_BLINK;
 -                                      schedule_work (&hub->leds);
 +                                      schedule_delayed_work (&hub->leds, 0);
                                }
                                status = -ENOTCONN;     /* Don't retry */
                                goto loop_disable;
@@@ -2557,16 -2655,13 +2657,13 @@@ static void hub_events(void
                intf = to_usb_interface(hub->intfdev);
                hub_dev = &intf->dev;
  
-               i = hub->resume_root_hub;
-               dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x%s\n",
+               dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
                                hdev->state, hub->descriptor
                                        ? hub->descriptor->bNbrPorts
                                        : 0,
                                /* NOTE: expects max 15 ports... */
                                (u16) hub->change_bits[0],
-                               (u16) hub->event_bits[0],
-                               i ? ", resume root" : "");
+                               (u16) hub->event_bits[0]);
  
                usb_get_intf(intf);
                spin_unlock_irq(&hub_event_lock);
                        goto loop;
                }
  
-               /* Is this is a root hub wanting to reactivate the downstream
-                * ports?  If so, be sure the interface resumes even if its
-                * stub "device" node was never suspended.
-                */
-               if (i)
-                       usb_autoresume_device(hdev, 0);
+               /* Autoresume */
+               ret = usb_autopm_get_interface(intf);
+               if (ret) {
+                       dev_dbg(hub_dev, "Can't autoresume: %d\n", ret);
+                       goto loop;
+               }
  
-               /* If this is an inactive or suspended hub, do nothing */
+               /* If this is an inactive hub, do nothing */
                if (hub->quiescing)
-                       goto loop;
+                       goto loop_autopm;
  
                if (hub->error) {
                        dev_dbg (hub_dev, "resetting for error %d\n",
                        if (ret) {
                                dev_dbg (hub_dev,
                                        "error resetting hub: %d\n", ret);
-                               goto loop;
+                               goto loop_autopm;
                        }
  
                        hub->nerrors = 0;
                if (!hdev->parent && !hub->busy_bits[0])
                        usb_enable_root_hub_irq(hdev->bus);
  
+ loop_autopm:
+               /* Allow autosuspend if we're not going to run again */
+               if (list_empty(&hub->event_list))
+                       usb_autopm_enable(intf);
  loop:
                usb_unlock_device(hdev);
                usb_put_intf(intf);
@@@ -2775,6 -2874,7 +2876,7 @@@ static struct usb_driver hub_driver = 
        .post_reset =   hub_post_reset,
        .ioctl =        hub_ioctl,
        .id_table =     hub_id_table,
+       .supports_autosuspend = 1,
  };
  
  int usb_hub_init(void)
@@@ -2999,7 -3099,7 +3101,7 @@@ int usb_reset_composite_device(struct u
        }
  
        /* Prevent autosuspend during the reset */
-       usb_autoresume_device(udev, 1);
+       usb_autoresume_device(udev);
  
        if (iface && iface->condition != USB_INTERFACE_BINDING)
                iface = NULL;
                }
        }
  
-       usb_autosuspend_device(udev, 1);
+       usb_autosuspend_device(udev);
        return ret;
  }
  EXPORT_SYMBOL(usb_reset_composite_device);
index 89572bc021b10544786866cab369f07300800115,29b0fa9ff9d0a7e2776e24217cc9619cd33f4450..7390b67c609d043667ed4c76de460815f106a769
@@@ -764,7 -764,7 +764,7 @@@ int usb_string(struct usb_device *dev, 
                        err = -EINVAL;
                        goto errout;
                } else {
-                       dev->have_langid = -1;
+                       dev->have_langid = 1;
                        dev->string_langid = tbuf[2] | (tbuf[3]<< 8);
                                /* always use the first langid listed */
                        dev_dbg (&dev->dev, "default language 0x%04x\n",
@@@ -1398,7 -1398,7 +1398,7 @@@ free_interfaces
        }
  
        /* Wake up the device so we can send it the Set-Config request */
-       ret = usb_autoresume_device(dev, 1);
+       ret = usb_autoresume_device(dev);
        if (ret)
                goto free_interfaces;
  
        dev->actconfig = cp;
        if (!cp) {
                usb_set_device_state(dev, USB_STATE_ADDRESS);
-               usb_autosuspend_device(dev, 1);
+               usb_autosuspend_device(dev);
                goto free_interfaces;
        }
        usb_set_device_state(dev, USB_STATE_CONFIGURED);
                usb_create_sysfs_intf_files (intf);
        }
  
-       usb_autosuspend_device(dev, 1);
+       usb_autosuspend_device(dev);
        return 0;
  }
  
@@@ -1501,10 -1501,9 +1501,10 @@@ struct set_config_request 
  };
  
  /* Worker routine for usb_driver_set_configuration() */
 -static void driver_set_config_work(void *_req)
 +static void driver_set_config_work(struct work_struct *work)
  {
 -      struct set_config_request *req = _req;
 +      struct set_config_request *req =
 +              container_of(work, struct set_config_request, work);
  
        usb_lock_device(req->udev);
        usb_set_configuration(req->udev, req->config);
@@@ -1542,7 -1541,7 +1542,7 @@@ int usb_driver_set_configuration(struc
                return -ENOMEM;
        req->udev = udev;
        req->config = config;
 -      INIT_WORK(&req->work, driver_set_config_work, req);
 +      INIT_WORK(&req->work, driver_set_config_work);
  
        usb_get_dev(udev);
        if (!schedule_work(&req->work)) {
diff --combined drivers/usb/core/usb.c
index ab2f68fc7d2d1ab2cf9647ca69e6010ac2b6d087,81cb52564e681eb1e4a4b35bbb8a7e6d2d4e2562..02426d0b9a347f514f69b4c4f706bb9c5a48caef
@@@ -200,20 -200,12 +200,13 @@@ static void ksuspend_usb_cleanup(void
        destroy_workqueue(ksuspend_usb_wq);
  }
  
- #else
- #define ksuspend_usb_init()   0
- #define ksuspend_usb_cleanup()        do {} while (0)
- #endif
  #ifdef        CONFIG_USB_SUSPEND
  
  /* usb_autosuspend_work - callback routine to autosuspend a USB device */
 -static void usb_autosuspend_work(void *_udev)
 +static void usb_autosuspend_work(struct work_struct *work)
  {
 -      struct usb_device       *udev = _udev;
 +      struct usb_device *udev =
 +              container_of(work, struct usb_device, autosuspend.work);
  
        usb_pm_lock(udev);
        udev->auto_pm = 1;
  
  #else
  
 -static void usb_autosuspend_work(void *_udev)
 +static void usb_autosuspend_work(struct work_struct *work)
  {}
  
- #endif
+ #endif        /* CONFIG_USB_SUSPEND */
+ #else
+ #define ksuspend_usb_init()   0
+ #define ksuspend_usb_cleanup()        do {} while (0)
+ #endif        /* CONFIG_PM */
  
  /**
   * usb_alloc_dev - usb device constructor (usbcore-internal)
@@@ -305,7 -304,7 +305,7 @@@ usb_alloc_dev(struct usb_device *parent
  
  #ifdef        CONFIG_PM
        mutex_init(&dev->pm_mutex);
 -      INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev);
 +      INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
  #endif
        return dev;
  }
@@@ -538,138 -537,6 +538,6 @@@ int usb_get_current_frame_number(struc
        return usb_hcd_get_frame_number (dev);
  }
  
- /**
-  * usb_endpoint_dir_in - check if the endpoint has IN direction
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint is of type IN, otherwise it returns false.
-  */
- int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
- {
-       return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
- }
- /**
-  * usb_endpoint_dir_out - check if the endpoint has OUT direction
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint is of type OUT, otherwise it returns false.
-  */
- int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
- {
-       return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
- }
- /**
-  * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint is of type bulk, otherwise it returns false.
-  */
- int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
- {
-       return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-               USB_ENDPOINT_XFER_BULK);
- }
- /**
-  * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint is of type interrupt, otherwise it returns
-  * false.
-  */
- int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
- {
-       return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-               USB_ENDPOINT_XFER_INT);
- }
- /**
-  * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint is of type isochronous, otherwise it returns
-  * false.
-  */
- int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd)
- {
-       return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
-               USB_ENDPOINT_XFER_ISOC);
- }
- /**
-  * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint has bulk transfer type and IN direction,
-  * otherwise it returns false.
-  */
- int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
- {
-       return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
- }
- /**
-  * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint has bulk transfer type and OUT direction,
-  * otherwise it returns false.
-  */
- int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
- {
-       return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
- }
- /**
-  * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint has interrupt transfer type and IN direction,
-  * otherwise it returns false.
-  */
- int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
- {
-       return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
- }
- /**
-  * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint has interrupt transfer type and OUT direction,
-  * otherwise it returns false.
-  */
- int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
- {
-       return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
- }
- /**
-  * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint has isochronous transfer type and IN direction,
-  * otherwise it returns false.
-  */
- int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
- {
-       return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
- }
- /**
-  * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
-  * @epd: endpoint to be checked
-  *
-  * Returns true if the endpoint has isochronous transfer type and OUT direction,
-  * otherwise it returns false.
-  */
- int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
- {
-       return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
- }
  /*-------------------------------------------------------------------*/
  /*
   * __usb_get_extra_descriptor() finds a descriptor of specific type in the
@@@ -1103,18 -970,6 +971,6 @@@ EXPORT_SYMBOL(__usb_get_extra_descripto
  EXPORT_SYMBOL(usb_find_device);
  EXPORT_SYMBOL(usb_get_current_frame_number);
  
- EXPORT_SYMBOL_GPL(usb_endpoint_dir_in);
- EXPORT_SYMBOL_GPL(usb_endpoint_dir_out);
- EXPORT_SYMBOL_GPL(usb_endpoint_xfer_bulk);
- EXPORT_SYMBOL_GPL(usb_endpoint_xfer_int);
- EXPORT_SYMBOL_GPL(usb_endpoint_xfer_isoc);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_bulk_in);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_bulk_out);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_int_in);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_int_out);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_isoc_in);
- EXPORT_SYMBOL_GPL(usb_endpoint_is_isoc_out);
  EXPORT_SYMBOL (usb_buffer_alloc);
  EXPORT_SYMBOL (usb_buffer_free);
  
index 107119c54301cf97c28291f2c0dcda20812db937,3bd1dfe565c1bc8a86cdd872115020477ba3e8db..d15bf22b9a030833991bb5b53fa589ed67c48f2b
@@@ -1833,9 -1833,9 +1833,9 @@@ static void rx_fill (struct eth_dev *de
        spin_unlock_irqrestore(&dev->req_lock, flags);
  }
  
 -static void eth_work (void *_dev)
 +static void eth_work (struct work_struct *work)
  {
 -      struct eth_dev          *dev = _dev;
 +      struct eth_dev  *dev = container_of(work, struct eth_dev, work);
  
        if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
                if (netif_running (dev->net))
@@@ -1894,13 -1894,13 +1894,13 @@@ static int eth_start_xmit (struct sk_bu
        if (!eth_is_promisc (dev)) {
                u8              *dest = skb->data;
  
-               if (dest [0] & 0x01) {
+               if (is_multicast_ether_addr(dest)) {
                        u16     type;
  
                        /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
                         * SET_ETHERNET_MULTICAST_FILTERS requests
                         */
-                       if (memcmp (dest, net->broadcast, ETH_ALEN) == 0)
+                       if (is_broadcast_ether_addr(dest))
                                type = USB_CDC_PACKET_TYPE_BROADCAST;
                        else
                                type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
@@@ -2398,7 -2398,7 +2398,7 @@@ autoconf_fail
        dev = netdev_priv(net);
        spin_lock_init (&dev->lock);
        spin_lock_init (&dev->req_lock);
 -      INIT_WORK (&dev->work, eth_work, dev);
 +      INIT_WORK (&dev->work, eth_work);
        INIT_LIST_HEAD (&dev->tx_reqs);
        INIT_LIST_HEAD (&dev->rx_reqs);
  
index 4f95a249c9137ff17c1b1a4ba766d55021436822,ef54e310bfc49033203cb88cc2aea69e380fe6bd..a9d7119e3176921958aeafcf977aa4cb5bd17573
@@@ -71,7 -71,7 +71,7 @@@ static int distrust_firmware = 1
  module_param(distrust_firmware, bool, 0);
  MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
          "t setup");
- DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
  /*
  * u132_module_lock exists to protect access to global variables
  *
@@@ -163,7 -163,7 +163,7 @@@ struct u132_endp 
          u16 queue_next;
          struct urb *urb_list[ENDP_QUEUE_SIZE];
          struct list_head urb_more;
 -        struct work_struct scheduler;
 +        struct delayed_work scheduler;
  };
  struct u132_ring {
          unsigned in_use:1;
          u8 number;
          struct u132 *u132;
          struct u132_endp *curr_endp;
 -        struct work_struct scheduler;
 +        struct delayed_work scheduler;
  };
  #define OHCI_QUIRK_AMD756 0x01
  #define OHCI_QUIRK_SUPERIO 0x02
@@@ -198,20 -198,16 +198,16 @@@ struct u132 
          u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
          int flags;
          unsigned long next_statechange;
 -        struct work_struct monitor;
 +        struct delayed_work monitor;
          int num_endpoints;
          struct u132_addr addr[MAX_U132_ADDRS];
          struct u132_udev udev[MAX_U132_UDEVS];
          struct u132_port port[MAX_U132_PORTS];
          struct u132_endp *endp[MAX_U132_ENDPS];
  };
- int usb_ftdi_elan_read_reg(struct platform_device *pdev, u32 *data);
- int usb_ftdi_elan_read_pcimem(struct platform_device *pdev, u8 addressofs,
-         u8 width, u32 *data);
- int usb_ftdi_elan_write_pcimem(struct platform_device *pdev, u8 addressofs,
-         u8 width, u32 data);
  /*
- * these can not be inlines because we need the structure offset!!
+ * these cannot be inlines because we need the structure offset!!
  * Does anyone have a better way?????
  */
  #define u132_read_pcimem(u132, member, data) \
@@@ -314,7 -310,7 +310,7 @@@ static void u132_ring_requeue_work(stru
          if (delta > 0) {
                  if (queue_delayed_work(workqueue, &ring->scheduler, delta))
                          return;
 -        } else if (queue_work(workqueue, &ring->scheduler))
 +        } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
                  return;
          kref_put(&u132->kref, u132_hcd_delete);
          return;
@@@ -393,8 -389,12 +389,8 @@@ static inline void u132_endp_init_kref(
  static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
          unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(workqueue, &endp->scheduler, delta))
 -                        kref_get(&endp->kref);
 -        } else if (queue_work(workqueue, &endp->scheduler))
 -                kref_get(&endp->kref);
 -        return;
 +      if (queue_delayed_work(workqueue, &endp->scheduler, delta))
 +              kref_get(&endp->kref);
  }
  
  static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@@ -410,14 -410,24 +406,14 @@@ static inline void u132_monitor_put_kre
  
  static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(workqueue, &u132->monitor, delta)) {
 -                        kref_get(&u132->kref);
 -                }
 -        } else if (queue_work(workqueue, &u132->monitor))
 -                kref_get(&u132->kref);
 -        return;
 +      if (queue_delayed_work(workqueue, &u132->monitor, delta))
 +              kref_get(&u132->kref);
  }
  
  static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(workqueue, &u132->monitor, delta))
 -                        return;
 -        } else if (queue_work(workqueue, &u132->monitor))
 -                return;
 -        kref_put(&u132->kref, u132_hcd_delete);
 -        return;
 +      if (!queue_delayed_work(workqueue, &u132->monitor, delta))
 +              kref_put(&u132->kref, u132_hcd_delete);
  }
  
  static void u132_monitor_cancel_work(struct u132 *u132)
@@@ -479,9 -489,9 +475,9 @@@ static int read_roothub_info(struct u13
          return 0;
  }
  
 -static void u132_hcd_monitor_work(void *data)
 +static void u132_hcd_monitor_work(struct work_struct *work)
  {
 -        struct u132 *u132 = data;
 +        struct u132 *u132 = container_of(work, struct u132, monitor.work);
          if (u132->going > 1) {
                  dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
                          , u132->going);
@@@ -1305,14 -1315,15 +1301,14 @@@ static void u132_hcd_initial_setup_sent
          }
  }
  
 -static void u132_hcd_ring_work_scheduler(void *data);
 -static void u132_hcd_endp_work_scheduler(void *data);
  /*
  * this work function is only executed from the work queue
  *
  */
 -static void u132_hcd_ring_work_scheduler(void *data)
 +static void u132_hcd_ring_work_scheduler(struct work_struct *work)
  {
 -        struct u132_ring *ring = data;
 +        struct u132_ring *ring =
 +              container_of(work, struct u132_ring, scheduler.work);
          struct u132 *u132 = ring->u132;
          down(&u132->scheduler_lock);
          if (ring->in_use) {
          }
  }
  
 -static void u132_hcd_endp_work_scheduler(void *data)
 +static void u132_hcd_endp_work_scheduler(struct work_struct *work)
  {
          struct u132_ring *ring;
 -        struct u132_endp *endp = data;
 +        struct u132_endp *endp =
 +              container_of(work, struct u132_endp, scheduler.work);
          struct u132 *u132 = endp->u132;
          down(&u132->scheduler_lock);
          ring = endp->ring;
@@@ -1933,7 -1943,7 +1929,7 @@@ static int create_endpoint_and_queue_in
          if (!endp) {
                  return -ENOMEM;
          }
 -        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
 +        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
          spin_lock_init(&endp->queue_lock.slock);
          INIT_LIST_HEAD(&endp->urb_more);
          ring = endp->ring = &u132->ring[0];
@@@ -2022,7 -2032,7 +2018,7 @@@ static int create_endpoint_and_queue_bu
          if (!endp) {
                  return -ENOMEM;
          }
 -        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
 +        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
          spin_lock_init(&endp->queue_lock.slock);
          INIT_LIST_HEAD(&endp->urb_more);
          endp->dequeueing = 0;
@@@ -2107,7 -2117,7 +2103,7 @@@ static int create_endpoint_and_queue_co
          if (!endp) {
                  return -ENOMEM;
          }
 -        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
 +        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
          spin_lock_init(&endp->queue_lock.slock);
          INIT_LIST_HEAD(&endp->urb_more);
          ring = endp->ring = &u132->ring[0];
@@@ -3031,7 -3041,7 +3027,7 @@@ static struct hc_driver u132_hc_driver 
  * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
  * is held for writing, thus this module must not call usb_remove_hcd()
  * synchronously - but instead should immediately stop activity to the
- * device and ansynchronously call usb_remove_hcd()
+ * device and asynchronously call usb_remove_hcd()
  */
  static int __devexit u132_remove(struct platform_device *pdev)
  {
@@@ -3086,10 -3096,10 +3082,10 @@@ static void u132_initialise(struct u13
                  ring->number = rings + 1;
                  ring->length = 0;
                  ring->curr_endp = NULL;
 -                INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler,
 -                        (void *)ring);
 +                INIT_DELAYED_WORK(&ring->scheduler,
 +                                u132_hcd_ring_work_scheduler);
          } down(&u132->sw_lock);
 -        INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132);
 +        INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
          while (ports-- > 0) {
                  struct u132_port *port = &u132->port[ports];
                  port->u132 = u132;
@@@ -3227,7 -3237,7 +3223,7 @@@ static int u132_resume(struct platform_
  #define u132_resume NULL
  #endif
  /*
- * this driver is loaded explicitely by ftdi_u132
+ * this driver is loaded explicitly by ftdi_u132
  *
  * the platform_driver struct is static because it is per type of module
  */
index ebc9e823a46e6bb3727d5fe01872346d77a5c2d5,a49644b7c58e1492a19d21d533105254138f78f7..4295bab4f1e2653a69bba6a5326488d782e64e0f
@@@ -968,21 -968,29 +968,30 @@@ static void hid_retry_timeout(unsigned 
                hid_io_error(hid);
  }
  
- /* Workqueue routine to reset the device */
+ /* Workqueue routine to reset the device or clear a halt */
 -static void hid_reset(void *_hid)
 +static void hid_reset(struct work_struct *work)
  {
 -      struct hid_device *hid = (struct hid_device *) _hid;
 +      struct hid_device *hid =
 +              container_of(work, struct hid_device, reset_work);
-       int rc_lock, rc;
-       dev_dbg(&hid->intf->dev, "resetting device\n");
-       rc = rc_lock = usb_lock_device_for_reset(hid->dev, hid->intf);
-       if (rc_lock >= 0) {
-               rc = usb_reset_composite_device(hid->dev, hid->intf);
-               if (rc_lock)
-                       usb_unlock_device(hid->dev);
+       int rc_lock, rc = 0;
+       if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
+               dev_dbg(&hid->intf->dev, "clear halt\n");
+               rc = usb_clear_halt(hid->dev, hid->urbin->pipe);
+               clear_bit(HID_CLEAR_HALT, &hid->iofl);
+               hid_start_in(hid);
+       }
+       else if (test_bit(HID_RESET_PENDING, &hid->iofl)) {
+               dev_dbg(&hid->intf->dev, "resetting device\n");
+               rc = rc_lock = usb_lock_device_for_reset(hid->dev, hid->intf);
+               if (rc_lock >= 0) {
+                       rc = usb_reset_composite_device(hid->dev, hid->intf);
+                       if (rc_lock)
+                               usb_unlock_device(hid->dev);
+               }
+               clear_bit(HID_RESET_PENDING, &hid->iofl);
        }
-       clear_bit(HID_RESET_PENDING, &hid->iofl);
  
        switch (rc) {
        case 0:
@@@ -1024,9 -1032,8 +1033,8 @@@ static void hid_io_error(struct hid_dev
  
                /* Retries failed, so do a port reset */
                if (!test_and_set_bit(HID_RESET_PENDING, &hid->iofl)) {
-                       if (schedule_work(&hid->reset_work))
-                               goto done;
-                       clear_bit(HID_RESET_PENDING, &hid->iofl);
+                       schedule_work(&hid->reset_work);
+                       goto done;
                }
        }
  
@@@ -1050,6 -1057,11 +1058,11 @@@ static void hid_irq_in(struct urb *urb
                        hid->retry_delay = 0;
                        hid_input_report(HID_INPUT_REPORT, urb, 1);
                        break;
+               case -EPIPE:            /* stall */
+                       clear_bit(HID_IN_RUNNING, &hid->iofl);
+                       set_bit(HID_CLEAR_HALT, &hid->iofl);
+                       schedule_work(&hid->reset_work);
+                       return;
                case -ECONNRESET:       /* unlink */
                case -ENOENT:
                case -ESHUTDOWN:        /* unplug */
@@@ -1628,6 -1640,19 +1641,19 @@@ void hid_init_reports(struct hid_devic
  
  #define USB_VENDOR_ID_APPLE           0x05ac
  #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE       0x0304
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI     0x020e
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO      0x020f
+ #define USB_DEVICE_ID_APPLE_GEYSER_ANSI       0x0214
+ #define USB_DEVICE_ID_APPLE_GEYSER_ISO        0x0215
+ #define USB_DEVICE_ID_APPLE_GEYSER_JIS        0x0216
+ #define USB_DEVICE_ID_APPLE_GEYSER3_ANSI      0x0217
+ #define USB_DEVICE_ID_APPLE_GEYSER3_ISO       0x0218
+ #define USB_DEVICE_ID_APPLE_GEYSER3_JIS       0x0219
+ #define USB_DEVICE_ID_APPLE_GEYSER4_ANSI      0x021a
+ #define USB_DEVICE_ID_APPLE_GEYSER4_ISO       0x021b
+ #define USB_DEVICE_ID_APPLE_GEYSER4_JIS       0x021c
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY  0x030a
+ #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY   0x030b
  
  #define USB_VENDOR_ID_CHERRY          0x046a
  #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
@@@ -1795,17 -1820,19 +1821,19 @@@ static const struct hid_blacklist 
  
        { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
  
-       { USB_VENDOR_ID_APPLE, 0x020E, HID_QUIRK_POWERBOOK_HAS_FN },
-       { USB_VENDOR_ID_APPLE, 0x020F, HID_QUIRK_POWERBOOK_HAS_FN },
-       { USB_VENDOR_ID_APPLE, 0x0214, HID_QUIRK_POWERBOOK_HAS_FN },
-       { USB_VENDOR_ID_APPLE, 0x0215, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
-       { USB_VENDOR_ID_APPLE, 0x0216, HID_QUIRK_POWERBOOK_HAS_FN },
-       { USB_VENDOR_ID_APPLE, 0x0217, HID_QUIRK_POWERBOOK_HAS_FN },
-       { USB_VENDOR_ID_APPLE, 0x0218, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
-       { USB_VENDOR_ID_APPLE, 0x0219, HID_QUIRK_POWERBOOK_HAS_FN },
-       { USB_VENDOR_ID_APPLE, 0x021B, HID_QUIRK_POWERBOOK_HAS_FN },
-       { USB_VENDOR_ID_APPLE, 0x030A, HID_QUIRK_POWERBOOK_HAS_FN },
-       { USB_VENDOR_ID_APPLE, 0x030B, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN },
  
        { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE },
        { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE },
@@@ -1986,7 -2013,7 +2014,7 @@@ static struct hid_device *usb_hid_confi
                if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0)
                        interval = hid_mousepoll_interval;
  
-               if (endpoint->bEndpointAddress & USB_DIR_IN) {
+               if (usb_endpoint_dir_in(endpoint)) {
                        if (hid->urbin)
                                continue;
                        if (!(hid->urbin = usb_alloc_urb(0, GFP_KERNEL)))
  
        init_waitqueue_head(&hid->wait);
  
 -      INIT_WORK(&hid->reset_work, hid_reset, hid);
 +      INIT_WORK(&hid->reset_work, hid_reset);
        setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
  
        spin_lock_init(&hid->inlock);
        return hid;
  
  fail:
-       if (hid->urbin)
-               usb_free_urb(hid->urbin);
-       if (hid->urbout)
-               usb_free_urb(hid->urbout);
-       if (hid->urbctrl)
-               usb_free_urb(hid->urbctrl);
+       usb_free_urb(hid->urbin);
+       usb_free_urb(hid->urbout);
+       usb_free_urb(hid->urbctrl);
        hid_free_buffers(dev, hid);
        hid_free_device(hid);
  
@@@ -2105,8 -2128,7 +2129,7 @@@ static void hid_disconnect(struct usb_i
  
        usb_free_urb(hid->urbin);
        usb_free_urb(hid->urbctrl);
-       if (hid->urbout)
-               usb_free_urb(hid->urbout);
+       usb_free_urb(hid->urbout);
  
        hid_free_buffers(hid->dev, hid);
        hid_free_device(hid);
index e4e2cf2ba91508397798aea20581b86a2926afa4,cb0ba3107d7f76949687f268fd1037fcd0ea7e48..18b1925032a859fa4ce6b436359a15c7c81cb463
@@@ -156,9 -156,9 +156,9 @@@ struct usb_ftdi 
          struct usb_device *udev;
          struct usb_interface *interface;
          struct usb_class_driver *class;
 -        struct work_struct status_work;
 -        struct work_struct command_work;
 -        struct work_struct respond_work;
 +        struct delayed_work status_work;
 +        struct delayed_work command_work;
 +        struct delayed_work respond_work;
          struct u132_platform_data platform_data;
          struct resource resources[0];
          struct platform_device platform_dev;
@@@ -210,14 -210,23 +210,14 @@@ static void ftdi_elan_init_kref(struct 
  
  static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
 -                        return;
 -        } else if (queue_work(status_queue, &ftdi->status_work))
 -                return;
 -        kref_put(&ftdi->kref, ftdi_elan_delete);
 -        return;
 +      if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
 +              kref_put(&ftdi->kref, ftdi_elan_delete);
  }
  
  static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
 -                        kref_get(&ftdi->kref);
 -        } else if (queue_work(status_queue, &ftdi->status_work))
 -                kref_get(&ftdi->kref);
 -        return;
 +      if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
 +              kref_get(&ftdi->kref);
  }
  
  static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
  
  static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(command_queue, &ftdi->command_work,
 -                        delta))
 -                        return;
 -        } else if (queue_work(command_queue, &ftdi->command_work))
 -                return;
 -        kref_put(&ftdi->kref, ftdi_elan_delete);
 -        return;
 +      if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
 +              kref_put(&ftdi->kref, ftdi_elan_delete);
  }
  
  static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(command_queue, &ftdi->command_work,
 -                        delta))
 -                        kref_get(&ftdi->kref);
 -        } else if (queue_work(command_queue, &ftdi->command_work))
 -                kref_get(&ftdi->kref);
 -        return;
 +      if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
 +              kref_get(&ftdi->kref);
  }
  
  static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
  static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
          unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
 -                        delta))
 -                        return;
 -        } else if (queue_work(respond_queue, &ftdi->respond_work))
 -                return;
 -        kref_put(&ftdi->kref, ftdi_elan_delete);
 -        return;
 +      if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
 +              kref_put(&ftdi->kref, ftdi_elan_delete);
  }
  
  static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
  {
 -        if (delta > 0) {
 -                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
 -                        delta))
 -                        kref_get(&ftdi->kref);
 -        } else if (queue_work(respond_queue, &ftdi->respond_work))
 -                kref_get(&ftdi->kref);
 -        return;
 +      if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
 +              kref_get(&ftdi->kref);
  }
  
  static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
@@@ -272,7 -303,7 +272,7 @@@ void ftdi_elan_gone_away(struct platfor
  
  
  EXPORT_SYMBOL_GPL(ftdi_elan_gone_away);
- void ftdi_release_platform_dev(struct device *dev)
static void ftdi_release_platform_dev(struct device *dev)
  {
          dev->parent = NULL;
  }
@@@ -444,11 -475,9 +444,11 @@@ static void ftdi_elan_kick_command_queu
          return;
  }
  
 -static void ftdi_elan_command_work(void *data)
 +static void ftdi_elan_command_work(struct work_struct *work)
  {
 -        struct usb_ftdi *ftdi = data;
 +        struct usb_ftdi *ftdi =
 +              container_of(work, struct usb_ftdi, command_work.work);
 +
          if (ftdi->disconnected > 0) {
                  ftdi_elan_put_kref(ftdi);
                  return;
@@@ -471,10 -500,9 +471,10 @@@ static void ftdi_elan_kick_respond_queu
          return;
  }
  
 -static void ftdi_elan_respond_work(void *data)
 +static void ftdi_elan_respond_work(struct work_struct *work)
  {
 -        struct usb_ftdi *ftdi = data;
 +        struct usb_ftdi *ftdi =
 +              container_of(work, struct usb_ftdi, respond_work.work);
          if (ftdi->disconnected > 0) {
                  ftdi_elan_put_kref(ftdi);
                  return;
  * after the FTDI has been synchronized
  *
  */
 -static void ftdi_elan_status_work(void *data)
 +static void ftdi_elan_status_work(struct work_struct *work)
  {
 -        struct usb_ftdi *ftdi = data;
 +        struct usb_ftdi *ftdi =
 +              container_of(work, struct usb_ftdi, status_work.work);
          int work_delay_in_msec = 0;
          if (ftdi->disconnected > 0) {
                  ftdi_elan_put_kref(ftdi);
@@@ -1399,14 -1426,6 +1399,6 @@@ static int ftdi_elan_read_reg(struct us
          }
  }
  
- int usb_ftdi_elan_read_reg(struct platform_device *pdev, u32 *data)
- {
-         struct usb_ftdi *ftdi = platform_device_to_usb_ftdi(pdev);
-         return ftdi_elan_read_reg(ftdi, data);
- }
- EXPORT_SYMBOL_GPL(usb_ftdi_elan_read_reg);
  static int ftdi_elan_read_config(struct usb_ftdi *ftdi, int config_offset,
          u8 width, u32 *data)
  {
@@@ -2606,10 -2625,7 +2598,7 @@@ static int ftdi_elan_probe(struct usb_i
          for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                  endpoint = &iface_desc->endpoint[i].desc;
                  if (!ftdi->bulk_in_endpointAddr &&
-                         ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
-                         == USB_DIR_IN) && ((endpoint->bmAttributes &
-                         USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK))
-                         {
+                   usb_endpoint_is_bulk_in(endpoint)) {
                          buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
                          ftdi->bulk_in_size = buffer_size;
                          ftdi->bulk_in_endpointAddr = endpoint->bEndpointAddress;
                          }
                  }
                  if (!ftdi->bulk_out_endpointAddr &&
-                         ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
-                         == USB_DIR_OUT) && ((endpoint->bmAttributes &
-                         USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK))
-                         {
+                   usb_endpoint_is_bulk_out(endpoint)) {
                          ftdi->bulk_out_endpointAddr =
                                  endpoint->bEndpointAddress;
                  }
                  ftdi->class = NULL;
                  dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
                          "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
 -                INIT_WORK(&ftdi->status_work, ftdi_elan_status_work,
 -                        (void *)ftdi);
 -                INIT_WORK(&ftdi->command_work, ftdi_elan_command_work,
 -                        (void *)ftdi);
 -                INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
 -                        (void *)ftdi);
 +                INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
 +                INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
 +                INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
                  ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
                  return 0;
          } else {
index 33e716c6a79b4931f2e4baaab33a4a98ccd6b1d3,9110793f81d38dc0132cfb90035342d572b10a7a..9659c79e187e16f65bc0036a2b8cad74f59c2db2
@@@ -81,8 -81,8 +81,8 @@@ struct interfacekit 
        unsigned char *data;
        dma_addr_t data_dma;
  
 -      struct work_struct do_notify;
 -      struct work_struct do_resubmit;
 +      struct delayed_work do_notify;
 +      struct delayed_work do_resubmit;
        unsigned long input_events;
        unsigned long sensor_events;
  };
@@@ -374,7 -374,7 +374,7 @@@ static void interfacekit_irq(struct ur
        }
  
        if (kit->input_events || kit->sensor_events)
 -              schedule_work(&kit->do_notify);
 +              schedule_delayed_work(&kit->do_notify, 0);
  
  resubmit:
        status = usb_submit_urb(urb, SLAB_ATOMIC);
                        kit->udev->devpath, status);
  }
  
 -static void do_notify(void *data)
 +static void do_notify(struct work_struct *work)
  {
 -      struct interfacekit *kit = data;
 +      struct interfacekit *kit =
 +              container_of(work, struct interfacekit, do_notify.work);
        int i;
        char sysfs_file[8];
  
        }
  }
  
 -static void do_resubmit(void *data)
 +static void do_resubmit(struct work_struct *work)
  {
 -      set_outputs(data);
 +      struct interfacekit *kit =
 +              container_of(work, struct interfacekit, do_resubmit.work);
 +      set_outputs(kit);
  }
  
  #define show_set_output(value)                \
@@@ -554,7 -551,7 +554,7 @@@ static int interfacekit_probe(struct us
                return -ENODEV;
  
        endpoint = &interface->endpoint[0].desc;
-       if (!(endpoint->bEndpointAddress & 0x80)) 
+       if (!usb_endpoint_dir_in(endpoint))
                return -ENODEV;
        /*
         * bmAttributes
  
        kit->udev = usb_get_dev(dev);
        kit->intf = intf;
 -      INIT_WORK(&kit->do_notify, do_notify, kit);
 -      INIT_WORK(&kit->do_resubmit, do_resubmit, kit);
 +      INIT_DELAYED_WORK(&kit->do_notify, do_notify);
 +      INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
        usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
                        maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
                        interfacekit_irq, kit, endpoint->bInterval);
@@@ -653,8 -650,7 +653,7 @@@ out2
                device_remove_file(kit->dev, &dev_output_attrs[i]);
  out:
        if (kit) {
-               if (kit->irq)
-                       usb_free_urb(kit->irq);
+               usb_free_urb(kit->irq);
                if (kit->data)
                        usb_buffer_free(dev, URB_INT_SIZE, kit->data, kit->data_dma);
                if (kit->dev)
index 0385ffcc74194206575b7c66b138613ee97a1ea4,c3469b0a67c2d6d63059230741f74e12f60f13ad..2bb4fa572bb72492f3f6aa6b60c18e841bd56605
@@@ -41,7 -41,7 +41,7 @@@ struct motorcontrol 
        unsigned char *data;
        dma_addr_t data_dma;
  
 -      struct work_struct do_notify;
 +      struct delayed_work do_notify;
        unsigned long input_events;
        unsigned long speed_events;
        unsigned long exceed_events;
@@@ -148,7 -148,7 +148,7 @@@ static void motorcontrol_irq(struct ur
                set_bit(1, &mc->exceed_events);
  
        if (mc->input_events || mc->exceed_events || mc->speed_events)
 -              schedule_work(&mc->do_notify);
 +              schedule_delayed_work(&mc->do_notify, 0);
  
  resubmit:
        status = usb_submit_urb(urb, SLAB_ATOMIC);
                        mc->udev->devpath, status);
  }
  
 -static void do_notify(void *data)
 +static void do_notify(struct work_struct *work)
  {
 -      struct motorcontrol *mc = data;
 +      struct motorcontrol *mc =
 +              container_of(work, struct motorcontrol, do_notify.work);
        int i;
        char sysfs_file[8];
  
@@@ -324,7 -323,7 +324,7 @@@ static int motorcontrol_probe(struct us
                return -ENODEV;
  
        endpoint = &interface->endpoint[0].desc;
-       if (!(endpoint->bEndpointAddress & 0x80))
+       if (!usb_endpoint_dir_in(endpoint))
                return -ENODEV;
  
        /*
        mc->udev = usb_get_dev(dev);
        mc->intf = intf;
        mc->acceleration[0] = mc->acceleration[1] = 10;
 -      INIT_WORK(&mc->do_notify, do_notify, mc);
 +      INIT_DELAYED_WORK(&mc->do_notify, do_notify);
        usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
                        maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
                        motorcontrol_irq, mc, endpoint->bInterval);
@@@ -393,8 -392,7 +393,7 @@@ out2
                device_remove_file(mc->dev, &dev_attrs[i]);
  out:
        if (mc) {
-               if (mc->irq)
-                       usb_free_urb(mc->irq);
+               usb_free_urb(mc->irq);
                if (mc->data)
                        usb_buffer_free(dev, URB_INT_SIZE, mc->data, mc->data_dma);
                if (mc->dev)
index 78cf6f091285ff08f663200a0e9d20b199b51531,69eb0db399df09f1c4ff5cb5dc9ec92d03974412..b5690b3834e3250606c026dcc7bffe5a5b1118bd
@@@ -163,6 -163,7 +163,7 @@@ static int get_registers(pegasus_t * pe
  
        /* using ATOMIC, we'd never wake up if we slept */
        if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
+               set_current_state(TASK_RUNNING);
                if (ret == -ENODEV)
                        netif_device_detach(pegasus->net);
                if (netif_msg_drv(pegasus))
@@@ -1280,9 -1281,9 +1281,9 @@@ static inline void setup_pegasus_II(peg
  static struct workqueue_struct *pegasus_workqueue = NULL;
  #define CARRIER_CHECK_DELAY (2 * HZ)
  
 -static void check_carrier(void *data)
 +static void check_carrier(struct work_struct *work)
  {
 -      pegasus_t *pegasus = data;
 +      pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
        set_carrier(pegasus->net);
        if (!(pegasus->flags & PEGASUS_UNPLUG)) {
                queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
@@@ -1318,7 -1319,7 +1319,7 @@@ static int pegasus_probe(struct usb_int
  
        tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
  
 -      INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus);
 +      INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
  
        pegasus->intf = intf;
        pegasus->usb = dev;
diff --combined drivers/usb/net/usbnet.c
index 79b5474fe2343593b41f1ef61d218fe42d159f65,7672e11c94c41b17cb9181101562c2dfbeb8d7fc..327f97555679df645334d981c5ef15ed7de802ff
@@@ -116,7 -116,7 +116,7 @@@ int usbnet_get_endpoints(struct usbnet 
                        e = alt->endpoint + ep;
                        switch (e->desc.bmAttributes) {
                        case USB_ENDPOINT_XFER_INT:
-                               if (!(e->desc.bEndpointAddress & USB_DIR_IN))
+                               if (!usb_endpoint_dir_in(&e->desc))
                                        continue;
                                intr = 1;
                                /* FALLTHROUGH */
                        default:
                                continue;
                        }
-                       if (e->desc.bEndpointAddress & USB_DIR_IN) {
+                       if (usb_endpoint_dir_in(&e->desc)) {
                                if (!intr && !in)
                                        in = e;
                                else if (intr && !status)
@@@ -782,10 -782,9 +782,10 @@@ static struct ethtool_ops usbnet_ethtoo
   * especially now that control transfers can be queued.
   */
  static void
 -kevent (void *data)
 +kevent (struct work_struct *work)
  {
 -      struct usbnet           *dev = data;
 +      struct usbnet           *dev =
 +              container_of(work, struct usbnet, kevent);
        int                     status;
  
        /* usb_clear_halt() needs a thread context */
@@@ -1147,7 -1146,7 +1147,7 @@@ usbnet_probe (struct usb_interface *ude
        skb_queue_head_init (&dev->done);
        dev->bh.func = usbnet_bh;
        dev->bh.data = (unsigned long) dev;
 -      INIT_WORK (&dev->kevent, kevent, dev);
 +      INIT_WORK (&dev->kevent, kevent);
        dev->delay.function = usbnet_bh;
        dev->delay.data = (unsigned long) dev;
        init_timer (&dev->delay);
index 2a4ac9bd6a3ace2657057d795c4696181e80722a,b1b5707bc99af029e1e7f1f859357fcd507c12da..86bcf63b6ba5fa6e3e7e277f0db7de0a6367b032
@@@ -92,7 -92,6 +92,7 @@@ struct aircable_private 
        struct circ_buf *rx_buf;        /* read buffer */
        int rx_flags;                   /* for throttilng */
        struct work_struct rx_work;     /* work cue for the receiving line */
 +      struct usb_serial_port *port;   /* USB port with which associated */
  };
  
  /* Private methods */
@@@ -252,11 -251,10 +252,11 @@@ static void aircable_send(struct usb_se
        schedule_work(&port->work);
  }
  
 -static void aircable_read(void *params)
 +static void aircable_read(struct work_struct *work)
  {
 -      struct usb_serial_port *port = params;
 -      struct aircable_private *priv = usb_get_serial_port_data(port);
 +      struct aircable_private *priv =
 +              container_of(work, struct aircable_private, rx_work);
 +      struct usb_serial_port *port = priv->port;
        struct tty_struct *tty;
        unsigned char *data;
        int count;
         */
        tty = port->tty;
  
-       if (!tty)
+       if (!tty) {
                schedule_work(&priv->rx_work);
+               err("%s - No tty available", __FUNCTION__);
+               return ;
+       }
  
        count = min(64, serial_buf_data_avail(priv->rx_buf));
  
@@@ -307,9 -308,7 +310,7 @@@ static int aircable_probe(struct usb_se
  
        for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
                endpoint = &iface_desc->endpoint[i].desc;
-               if (((endpoint->bEndpointAddress & 0x80) == 0x00) &&
-                       ((endpoint->bmAttributes & 3) == 0x02)) {
-                       /* we found our bulk out endpoint */
+               if (usb_endpoint_is_bulk_out(endpoint)) {
                        dbg("found bulk out on endpoint %d", i);
                        ++num_bulk_out;
                }
@@@ -350,8 -349,7 +351,8 @@@ static int aircable_attach (struct usb_
        }
  
        priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
 -      INIT_WORK(&priv->rx_work, aircable_read, port);
 +      priv->port = port;
 +      INIT_WORK(&priv->rx_work, aircable_read);
  
        usb_set_serial_port_data(serial->port[0], priv);
  
@@@ -518,7 -516,7 +519,7 @@@ static void aircable_read_bulk_callback
                                        package_length - shift);
                        }
                }
 -              aircable_read(port);
 +              aircable_read(&priv->rx_work);
        }
  
        /* Schedule the next read _if_ we are still open */
index fd159b040bfb6742c69dab9be635073b881a4793,5e3ac281a2f8798515a08d0f083103effa070b53..83d0e21145b01cf9b1b46352eae664b51799a164
  *       to TASK_RUNNING will be lost and write_chan's subsequent call to
  *       schedule() will never return (unless it catches a signal).
  *       This race condition occurs because write_bulk_callback() (and thus
- *       the wakeup) are called asynchonously from an interrupt, rather than
+ *       the wakeup) are called asynchronously from an interrupt, rather than
  *       from the scheduler.  We can avoid the race by calling the wakeup
  *       from the scheduler queue and that's our fix:  Now, at the end of
  *       write_bulk_callback() we queue up a wakeup call on the scheduler
@@@ -430,14 -430,13 +430,14 @@@ struct digi_port 
        int dp_in_close;                        /* close in progress */
        wait_queue_head_t dp_close_wait;        /* wait queue for close */
        struct work_struct dp_wakeup_work;
 +      struct usb_serial_port *dp_port;
  };
  
  
  /* Local Function Declarations */
  
  static void digi_wakeup_write( struct usb_serial_port *port );
 -static void digi_wakeup_write_lock(void *);
 +static void digi_wakeup_write_lock(struct work_struct *work);
  static int digi_write_oob_command( struct usb_serial_port *port,
        unsigned char *buf, int count, int interruptible );
  static int digi_write_inb_command( struct usb_serial_port *port,
@@@ -599,12 -598,11 +599,12 @@@ static inline long cond_wait_interrupti
  *  on writes.
  */
  
 -static void digi_wakeup_write_lock(void *arg)
 +static void digi_wakeup_write_lock(struct work_struct *work)
  {
 -      struct usb_serial_port *port = arg;
 +      struct digi_port *priv =
 +              container_of(work, struct digi_port, dp_wakeup_work);
 +      struct usb_serial_port *port = priv->dp_port;
        unsigned long flags;
 -      struct digi_port *priv = usb_get_serial_port_data(port);
  
  
        spin_lock_irqsave( &priv->dp_port_lock, flags );
@@@ -1704,8 -1702,8 +1704,8 @@@ dbg( "digi_startup: TOP" )
                init_waitqueue_head( &priv->dp_flush_wait );
                priv->dp_in_close = 0;
                init_waitqueue_head( &priv->dp_close_wait );
 -              INIT_WORK(&priv->dp_wakeup_work,
 -                              digi_wakeup_write_lock, serial->port[i]);
 +              INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
 +              priv->dp_port = serial->port[i];
  
                /* initialize write wait queue for this port */
                init_waitqueue_head( &serial->port[i]->write_wait );
index 88ed5c1d236ccced34c9d0fd02356f108280506c,89ce2775be15c0fbd285a82861d65b75257d4508..72e4d48f51e9c18e9e6f4130dda00ad504261c0e
@@@ -559,8 -559,7 +559,8 @@@ struct ftdi_private 
        char prev_status, diff_status;        /* Used for TIOCMIWAIT */
        __u8 rx_flags;          /* receive state flags (throttling) */
        spinlock_t rx_lock;     /* spinlock for receive state */
 -      struct work_struct rx_work;
 +      struct delayed_work rx_work;
 +      struct usb_serial_port *port;
        int rx_processed;
        unsigned long rx_bytes;
  
@@@ -594,7 -593,7 +594,7 @@@ static int  ftdi_write_room                (struct us
  static int  ftdi_chars_in_buffer      (struct usb_serial_port *port);
  static void ftdi_write_bulk_callback  (struct urb *urb);
  static void ftdi_read_bulk_callback   (struct urb *urb);
 -static void ftdi_process_read         (void *param);
 +static void ftdi_process_read         (struct work_struct *work);
  static void ftdi_set_termios          (struct usb_serial_port *port, struct termios * old);
  static int  ftdi_tiocmget               (struct usb_serial_port *port, struct file *file);
  static int  ftdi_tiocmset             (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
@@@ -1202,8 -1201,7 +1202,8 @@@ static int ftdi_sio_attach (struct usb_
                port->read_urb->transfer_buffer_length = BUFSZ;
        }
  
 -      INIT_WORK(&priv->rx_work, ftdi_process_read, port);
 +      INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
 +      priv->port = port;
  
        /* Free port's existing write urb and transfer buffer. */
        if (port->write_urb) {
@@@ -1390,8 -1388,7 +1390,7 @@@ static void ftdi_close (struct usb_seri
        flush_scheduled_work();
  
        /* shutdown our bulk read */
-       if (port->read_urb)
-               usb_kill_urb(port->read_urb);
+       usb_kill_urb(port->read_urb);
  } /* ftdi_close */
  
  
@@@ -1643,18 -1640,17 +1642,18 @@@ static void ftdi_read_bulk_callback (st
        priv->rx_bytes += countread;
        spin_unlock_irqrestore(&priv->rx_lock, flags);
  
 -      ftdi_process_read(port);
 +      ftdi_process_read(&priv->rx_work.work);
  
  } /* ftdi_read_bulk_callback */
  
  
 -static void ftdi_process_read (void *param)
 +static void ftdi_process_read (struct work_struct *work)
  { /* ftdi_process_read */
 -      struct usb_serial_port *port = (struct usb_serial_port*)param;
 +      struct ftdi_private *priv =
 +              container_of(work, struct ftdi_private, rx_work.work);
 +      struct usb_serial_port *port = priv->port;
        struct urb *urb;
        struct tty_struct *tty;
 -      struct ftdi_private *priv;
        char error_flag;
        unsigned char *data;
  
@@@ -2183,7 -2179,7 +2182,7 @@@ static void ftdi_unthrottle (struct usb
        spin_unlock_irqrestore(&priv->rx_lock, flags);
  
        if (actually_throttled)
 -              schedule_work(&priv->rx_work);
 +              schedule_delayed_work(&priv->rx_work, 0);
  }
  
  static int __init ftdi_init (void)
index 2cfba8488a93afa3c053248495375e8094bd17ea,c1257d5292f54d71565d1991726d18af03cbcfb9..3d5072f14b8d049081fbc2c88fad03bb157f6e9b
@@@ -533,10 -533,9 +533,10 @@@ void usb_serial_port_softint(struct usb
        schedule_work(&port->work);
  }
  
 -static void usb_serial_port_work(void *private)
 +static void usb_serial_port_work(struct work_struct *work)
  {
 -      struct usb_serial_port *port = private;
 +      struct usb_serial_port *port =
 +              container_of(work, struct usb_serial_port, work);
        struct tty_struct *tty;
  
        dbg("%s - port %d", __FUNCTION__, port->number);
@@@ -800,7 -799,7 +800,7 @@@ int usb_serial_probe(struct usb_interfa
                port->serial = serial;
                spin_lock_init(&port->lock);
                mutex_init(&port->mutex);
 -              INIT_WORK(&port->work, usb_serial_port_work, port);
 +              INIT_WORK(&port->work, usb_serial_port_work);
                serial->port[i] = port;
        }
  
@@@ -953,32 -952,28 +953,28 @@@ probe_error
                port = serial->port[i];
                if (!port)
                        continue;
-               if (port->read_urb)
-                       usb_free_urb (port->read_urb);
+               usb_free_urb(port->read_urb);
                kfree(port->bulk_in_buffer);
        }
        for (i = 0; i < num_bulk_out; ++i) {
                port = serial->port[i];
                if (!port)
                        continue;
-               if (port->write_urb)
-                       usb_free_urb (port->write_urb);
+               usb_free_urb(port->write_urb);
                kfree(port->bulk_out_buffer);
        }
        for (i = 0; i < num_interrupt_in; ++i) {
                port = serial->port[i];
                if (!port)
                        continue;
-               if (port->interrupt_in_urb)
-                       usb_free_urb (port->interrupt_in_urb);
+               usb_free_urb(port->interrupt_in_urb);
                kfree(port->interrupt_in_buffer);
        }
        for (i = 0; i < num_interrupt_out; ++i) {
                port = serial->port[i];
                if (!port)
                        continue;
-               if (port->interrupt_out_urb)
-                       usb_free_urb (port->interrupt_out_urb);
+               usb_free_urb(port->interrupt_out_urb);
                kfree(port->interrupt_out_buffer);
        }
  
diff --combined fs/aio.c
index ca1c5180a17f1b5eed1b31a0b79c779e6349c38d,277a5f2d18ad7b8ca66a3aef65551379054fc724..287a1bc7a1828c16c118e811b050390e74556771
+++ b/fs/aio.c
@@@ -53,13 -53,13 +53,13 @@@ static kmem_cache_t        *kioctx_cachep
  static struct workqueue_struct *aio_wq;
  
  /* Used for rare fput completion. */
 -static void aio_fput_routine(void *);
 -static DECLARE_WORK(fput_work, aio_fput_routine, NULL);
 +static void aio_fput_routine(struct work_struct *);
 +static DECLARE_WORK(fput_work, aio_fput_routine);
  
  static DEFINE_SPINLOCK(fput_lock);
  static LIST_HEAD(fput_head);
  
 -static void aio_kick_handler(void *);
 +static void aio_kick_handler(struct work_struct *);
  static void aio_queue_work(struct kioctx *);
  
  /* aio_setup
@@@ -227,7 -227,7 +227,7 @@@ static struct kioctx *ioctx_alloc(unsig
  
        INIT_LIST_HEAD(&ctx->active_reqs);
        INIT_LIST_HEAD(&ctx->run_list);
 -      INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
 +      INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
  
        if (aio_setup_ring(ctx) < 0)
                goto out_freectx;
@@@ -367,8 -367,7 +367,7 @@@ void fastcall __put_ioctx(struct kioct
  {
        unsigned nr_events = ctx->max_reqs;
  
-       if (unlikely(ctx->reqs_active))
-               BUG();
+       BUG_ON(ctx->reqs_active);
  
        cancel_delayed_work(&ctx->wq);
        flush_workqueue(aio_wq);
@@@ -470,7 -469,7 +469,7 @@@ static inline void really_put_req(struc
                wake_up(&ctx->wait);
  }
  
 -static void aio_fput_routine(void *data)
 +static void aio_fput_routine(struct work_struct *data)
  {
        spin_lock_irq(&fput_lock);
        while (likely(!list_empty(&fput_head))) {
@@@ -505,8 -504,7 +504,7 @@@ static int __aio_put_req(struct kioctx 
        assert_spin_locked(&ctx->ctx_lock);
  
        req->ki_users --;
-       if (unlikely(req->ki_users < 0))
-               BUG();
+       BUG_ON(req->ki_users < 0);
        if (likely(req->ki_users))
                return 0;
        list_del(&req->ki_list);                /* remove from active_reqs */
@@@ -859,9 -857,9 +857,9 @@@ static inline void aio_run_all_iocbs(st
   *      space.
   * Run on aiod's context.
   */
 -static void aio_kick_handler(void *data)
 +static void aio_kick_handler(struct work_struct *work)
  {
 -      struct kioctx *ctx = data;
 +      struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
        mm_segment_t oldfs = get_fs();
        int requeue;
  
         * we're in a worker thread already, don't use queue_delayed_work,
         */
        if (requeue)
 -              queue_work(aio_wq, &ctx->wq);
 +              queue_delayed_work(aio_wq, &ctx->wq, 0);
  }
  
  
diff --combined fs/bio.c
index c6c07ca5b5a9d36e2306197a1b6d007f78f293e8,aa4d09bd4e711872589b5b7a5e5ccf8b88952713..50c40ce2cead2078d528d48112d09b062fb32eb0
+++ b/fs/bio.c
@@@ -560,10 -560,8 +560,8 @@@ struct bio *bio_copy_user(request_queue
                        break;
                }
  
-               if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
-                       ret = -EINVAL;
+               if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
                        break;
-               }
  
                len -= bytes;
        }
@@@ -622,10 -620,9 +620,9 @@@ static struct bio *__bio_map_user_iov(r
  
                nr_pages += end - start;
                /*
-                * transfer and buffer must be aligned to at least hardsector
-                * size for now, in the future we can relax this restriction
+                * buffer must be aligned to at least hardsector size for now
                 */
-               if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+               if (uaddr & queue_dma_alignment(q))
                        return ERR_PTR(-EINVAL);
        }
  
@@@ -751,7 -748,6 +748,6 @@@ struct bio *bio_map_user_iov(request_qu
                             int write_to_vm)
  {
        struct bio *bio;
-       int len = 0, i;
  
        bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
  
         */
        bio_get(bio);
  
-       for (i = 0; i < iov_count; i++)
-               len += iov[i].iov_len;
-       if (bio->bi_size == len)
-               return bio;
-       /*
-        * don't support partial mappings
-        */
-       bio_endio(bio, bio->bi_size, 0);
-       bio_unmap_user(bio);
-       return ERR_PTR(-EINVAL);
+       return bio;
  }
  
  static void __bio_unmap_user(struct bio *bio)
@@@ -955,16 -940,16 +940,16 @@@ static void bio_release_pages(struct bi
   * run one bio_put() against the BIO.
   */
  
 -static void bio_dirty_fn(void *data);
 +static void bio_dirty_fn(struct work_struct *work);
  
 -static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
 +static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
  static DEFINE_SPINLOCK(bio_dirty_lock);
  static struct bio *bio_dirty_list;
  
  /*
   * This runs in process context
   */
 -static void bio_dirty_fn(void *data)
 +static void bio_dirty_fn(struct work_struct *work)
  {
        unsigned long flags;
        struct bio *bio;
diff --combined fs/reiserfs/journal.c
index cd1bb75ceb24a459810120bfa361eece65cec42c,ac93174c96398a5b6cdc3a21be94026b1f375765..7280a23ef3444aa4b8370dcb99b8ee46ee246dec
@@@ -104,7 -104,7 +104,7 @@@ static int release_journal_dev(struct s
                               struct reiserfs_journal *journal);
  static int dirty_one_transaction(struct super_block *s,
                                 struct reiserfs_journal_list *jl);
 -static void flush_async_commits(void *p);
 +static void flush_async_commits(struct work_struct *work);
  static void queue_log_writer(struct super_block *s);
  
  /* values for join in do_journal_begin_r */
@@@ -1464,7 -1464,7 +1464,7 @@@ static int flush_journal_list(struct su
                }
  
                /* if someone has this block in a newer transaction, just make
-                ** sure they are commited, and don't try writing it to disk
+                ** sure they are committed, and don't try writing it to disk
                 */
                if (pjl) {
                        if (atomic_read(&pjl->j_commit_left))
@@@ -2836,8 -2836,7 +2836,8 @@@ int journal_init(struct super_block *p_
        if (reiserfs_mounted_fs_count <= 1)
                commit_wq = create_workqueue("reiserfs");
  
 -      INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
 +      INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
 +      journal->j_work_sb = p_s_sb;
        return 0;
        free_and_return:
        free_journal_ram(p_s_sb);
@@@ -3385,7 -3384,7 +3385,7 @@@ static int remove_from_transaction(stru
  
  /*
  ** for any cnode in a journal list, it can only be dirtied of all the
- ** transactions that include it are commited to disk.
+ ** transactions that include it are committed to disk.
  ** this checks through each transaction, and returns 1 if you are allowed to dirty,
  ** and 0 if you aren't
  **
@@@ -3427,7 -3426,7 +3427,7 @@@ static int can_dirty(struct reiserfs_jo
  }
  
  /* syncs the commit blocks, but does not force the real buffers to disk
- ** will wait until the current transaction is done/commited before returning 
+ ** will wait until the current transaction is done/committed before returning 
  */
  int journal_end_sync(struct reiserfs_transaction_handle *th,
                     struct super_block *p_s_sb, unsigned long nblocks)
  /*
  ** writeback the pending async commits to disk
  */
 -static void flush_async_commits(void *p)
 +static void flush_async_commits(struct work_struct *work)
  {
 -      struct super_block *p_s_sb = p;
 -      struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
 +      struct reiserfs_journal *journal =
 +              container_of(work, struct reiserfs_journal, j_work.work);
 +      struct super_block *p_s_sb = journal->j_work_sb;
        struct reiserfs_journal_list *jl;
        struct list_head *entry;
  
diff --combined include/linux/mmc/host.h
index 8b08ef3820f2623c2ac32c9c899713740c38e054,528e7d3fecb18123fc3f98bd022bcc5df97efc5f..c15ae1986b9833e9727a3833be875e3daac93030
@@@ -74,8 -74,8 +74,8 @@@ struct mmc_card
  struct device;
  
  struct mmc_host {
-       struct device           *dev;
-       struct class_device     class_dev;
+       struct device           *parent;
+       struct device           class_dev;
        int                     index;
        const struct mmc_host_ops *ops;
        unsigned int            f_min;
        struct mmc_card         *card_busy;     /* the MMC card claiming host */
        struct mmc_card         *card_selected; /* the selected MMC card */
  
 -      struct work_struct      detect;
 +      struct delayed_work     detect;
  
        unsigned long           private[0] ____cacheline_aligned;
  };
@@@ -125,8 -125,8 +125,8 @@@ static inline void *mmc_priv(struct mmc
        return (void *)host->private;
  }
  
- #define mmc_dev(x)    ((x)->dev)
- #define mmc_hostname(x)       ((x)->class_dev.class_id)
+ #define mmc_dev(x)    ((x)->parent)
+ #define mmc_hostname(x)       ((x)->class_dev.bus_id)
  
  extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
  extern int mmc_resume_host(struct mmc_host *);
index 6610103f23e15790c62ff8c6bfe7ac95eee731c3,62a7169aed8b12d1a4e231f57b93d68bd4ec040e..3a28742d86f96ce04faaad22f0b296619724da89
@@@ -249,8 -249,7 +249,8 @@@ struct reiserfs_journal 
        int j_errno;
  
        /* when flushing ordered buffers, throttle new ordered writers */
 -      struct work_struct j_work;
 +      struct delayed_work j_work;
 +      struct super_block *j_work_sb;
        atomic_t j_async_throttle;
  };
  
@@@ -430,7 -429,7 +430,7 @@@ enum reiserfs_mount_options 
  /* -o hash={tea, rupasov, r5, detect} is meant for properly mounting 
  ** reiserfs disks from 3.5.19 or earlier.  99% of the time, this option
  ** is not required.  If the normal autodection code can't determine which
- ** hash to use (because both hases had the same value for a file)
+ ** hash to use (because both hashes had the same value for a file)
  ** use this option to force a specific hash.  It won't allow you to override
  ** the existing hash on the FS, so if you have a tea hash disk, and mount
  ** with -o hash=rupasov, the mount will fail.
diff --combined include/linux/tty.h
index c1f716446161282539591f6a86f8508e6fc45e1e,65321f911c1e4be2769e9c8417dd919140446e90..f717f0898238c33236415dabbc362d80e4f712bf
@@@ -53,7 -53,7 +53,7 @@@ struct tty_buffer 
  };
  
  struct tty_bufhead {
 -      struct work_struct              work;
 +      struct delayed_work work;
        struct semaphore pty_sem;
        spinlock_t lock;
        struct tty_buffer *head;        /* Queue head */
@@@ -276,9 -276,8 +276,8 @@@ extern int tty_register_ldisc(int disc
  extern int tty_unregister_ldisc(int disc);
  extern int tty_register_driver(struct tty_driver *driver);
  extern int tty_unregister_driver(struct tty_driver *driver);
- extern struct class_device *tty_register_device(struct tty_driver *driver,
-                                               unsigned index,
-                                               struct device *dev);
+ extern struct device *tty_register_device(struct tty_driver *driver,
+                                         unsigned index, struct device *dev);
  extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
  extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
                             int buflen);
diff --combined include/linux/usb.h
index 06ce7a6260408057d64fd1353d3f14e772fda429,0cd73edeef139f3d0a4d2bd8d1b43151d8b51eb4..aab5b1b720218b36b8ecb602f1bf44931b8c9c31
@@@ -313,8 -313,13 +313,13 @@@ struct usb_bus 
  /* This is arbitrary.
   * From USB 2.0 spec Table 11-13, offset 7, a hub can
   * have up to 255 ports. The most yet reported is 10.
+  *
+  * Current Wireless USB host hardware (Intel i1480 for example) allows
+  * up to 22 devices to connect. Upcoming hardware might raise that
+  * limit. Because the arrays need to add a bit for hub status data, we
+  * do 31, so plus one evens out to four bytes.
   */
- #define USB_MAXCHILDREN               (16)
+ #define USB_MAXCHILDREN               (31)
  
  struct usb_tt;
  
@@@ -357,7 -362,8 +362,8 @@@ struct usb_device 
        u8 portnum;                     /* Parent port number (origin 1) */
        u8 level;                       /* Number of USB hub ancestors */
  
-       int have_langid;                /* whether string_langid is valid */
+       unsigned discon_suspended:1;    /* Disconnected while suspended */
+       unsigned have_langid:1;         /* whether string_langid is valid */
        int string_langid;              /* language ID for strings */
  
        /* static strings from the device */
  
        int pm_usage_cnt;               /* usage counter for autosuspend */
  #ifdef CONFIG_PM
 -      struct work_struct autosuspend; /* for delayed autosuspends */
 +      struct delayed_work autosuspend; /* for delayed autosuspends */
        struct mutex pm_mutex;          /* protects PM operations */
  
        unsigned auto_pm:1;             /* autosuspend/resume in progress */
@@@ -410,14 -416,37 +416,37 @@@ extern struct usb_device *usb_find_devi
  
  /* USB autosuspend and autoresume */
  #ifdef CONFIG_USB_SUSPEND
+ extern int usb_autopm_set_interface(struct usb_interface *intf);
  extern int usb_autopm_get_interface(struct usb_interface *intf);
  extern void usb_autopm_put_interface(struct usb_interface *intf);
  
+ static inline void usb_autopm_enable(struct usb_interface *intf)
+ {
+       intf->pm_usage_cnt = 0;
+       usb_autopm_set_interface(intf);
+ }
+ static inline void usb_autopm_disable(struct usb_interface *intf)
+ {
+       intf->pm_usage_cnt = 1;
+       usb_autopm_set_interface(intf);
+ }
  #else
- #define usb_autopm_get_interface(intf)                0
- #define usb_autopm_put_interface(intf)                do {} while (0)
- #endif
  
+ static inline int usb_autopm_set_interface(struct usb_interface *intf)
+ { return 0; }
+ static inline int usb_autopm_get_interface(struct usb_interface *intf)
+ { return 0; }
+ static inline void usb_autopm_put_interface(struct usb_interface *intf)
+ { }
+ static inline void usb_autopm_enable(struct usb_interface *intf)
+ { }
+ static inline void usb_autopm_disable(struct usb_interface *intf)
+ { }
+ #endif
  
  /*-------------------------------------------------------------------------*/
  
@@@ -490,17 -519,137 +519,137 @@@ static inline int usb_make_path (struc
  
  /*-------------------------------------------------------------------------*/
  
- extern int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd);
- extern int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd);
+ /**
+  * usb_endpoint_dir_in - check if the endpoint has IN direction
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint is of type IN, otherwise it returns false.
+  */
+ static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
+ {
+       return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
+ }
+ /**
+  * usb_endpoint_dir_out - check if the endpoint has OUT direction
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint is of type OUT, otherwise it returns false.
+  */
+ static inline int usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
+ {
+       return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
+ }
+ /**
+  * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint is of type bulk, otherwise it returns false.
+  */
+ static inline int usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
+ {
+       return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+               USB_ENDPOINT_XFER_BULK);
+ }
+ /**
+  * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint is of type interrupt, otherwise it returns
+  * false.
+  */
+ static inline int usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
+ {
+       return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+               USB_ENDPOINT_XFER_INT);
+ }
+ /**
+  * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint is of type isochronous, otherwise it returns
+  * false.
+  */
+ static inline int usb_endpoint_xfer_isoc(const struct usb_endpoint_descriptor *epd)
+ {
+       return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+               USB_ENDPOINT_XFER_ISOC);
+ }
+ /**
+  * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint has bulk transfer type and IN direction,
+  * otherwise it returns false.
+  */
+ static inline int usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
+ {
+       return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
+ }
+ /**
+  * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint has bulk transfer type and OUT direction,
+  * otherwise it returns false.
+  */
+ static inline int usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
+ {
+       return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
+ }
+ /**
+  * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint has interrupt transfer type and IN direction,
+  * otherwise it returns false.
+  */
+ static inline int usb_endpoint_is_int_in(const struct usb_endpoint_descriptor *epd)
+ {
+       return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
+ }
+ /**
+  * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint has interrupt transfer type and OUT direction,
+  * otherwise it returns false.
+  */
+ static inline int usb_endpoint_is_int_out(const struct usb_endpoint_descriptor *epd)
+ {
+       return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
+ }
+ /**
+  * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint has isochronous transfer type and IN direction,
+  * otherwise it returns false.
+  */
+ static inline int usb_endpoint_is_isoc_in(const struct usb_endpoint_descriptor *epd)
+ {
+       return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
+ }
+ /**
+  * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
+  * @epd: endpoint to be checked
+  *
+  * Returns true if the endpoint has isochronous transfer type and OUT direction,
+  * otherwise it returns false.
+  */
+ static inline int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor *epd)
+ {
+       return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
+ }
  
  /*-------------------------------------------------------------------------*/
  
index 60b684470db8b33819daba6f028648e866edcf95,f8cbe40f52c00aa7215a35af4ee49ec082ce3ba7..c089f93ba591e24d674fe9292a2c5a5c9639f034
@@@ -537,7 -537,7 +537,7 @@@ struct sctp_af 
                                          struct net_device *);
        void            (*dst_saddr)    (union sctp_addr *saddr,
                                         struct dst_entry *dst,
-                                        unsigned short port);
+                                        __be16 port);
        int             (*cmp_addr)     (const union sctp_addr *addr1,
                                         const union sctp_addr *addr2);
        void            (*addr_copy)    (union sctp_addr *dst,
                                         struct sock *sk);
        void            (*from_addr_param) (union sctp_addr *,
                                            union sctp_addr_param *,
-                                           __u16 port, int iif);       
+                                           __be16 port, int iif);
        int             (*to_addr_param) (const union sctp_addr *,
                                          union sctp_addr_param *); 
        int             (*addr_valid)   (union sctp_addr *,
                                         struct sctp_sock *,
                                         const struct sk_buff *);
        sctp_scope_t    (*scope) (union sctp_addr *);
-       void            (*inaddr_any)   (union sctp_addr *, unsigned short);
+       void            (*inaddr_any)   (union sctp_addr *, __be16);
        int             (*is_any)       (const union sctp_addr *);
        int             (*available)    (union sctp_addr *,
                                         struct sctp_sock *);
@@@ -587,7 -587,7 +587,7 @@@ struct sctp_pf 
                          struct sctp_sock *);
        int  (*bind_verify) (struct sctp_sock *, union sctp_addr *);
        int  (*send_verify) (struct sctp_sock *, union sctp_addr *);
-       int  (*supported_addrs)(const struct sctp_sock *, __u16 *);
+       int  (*supported_addrs)(const struct sctp_sock *, __be16 *);
        struct sock *(*create_accept_sk) (struct sock *sk,
                                          struct sctp_association *asoc);
        void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
@@@ -1030,7 -1030,7 +1030,7 @@@ void sctp_inq_init(struct sctp_inq *)
  void sctp_inq_free(struct sctp_inq *);
  void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
  struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
 -void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
 +void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
  
  /* This is the structure we use to hold outbound chunks.  You push
   * chunks in and they automatically pop out the other end as bundled
@@@ -1270,7 -1270,7 +1270,7 @@@ struct sctp_endpoint 
         *          this here so we pre-allocate this once and can re-use
         *          on every receive.
         */
-       __u8 digest[SCTP_SIGNATURE_SIZE];
+       __u8 *digest;
   
        /* sendbuf acct. policy.        */
        __u32 sndbuf_policy;
@@@ -1314,6 -1314,13 +1314,13 @@@ int sctp_process_init(struct sctp_assoc
  __u32 sctp_generate_tag(const struct sctp_endpoint *);
  __u32 sctp_generate_tsn(const struct sctp_endpoint *);
  
+ struct sctp_inithdr_host {
+       __u32 init_tag;
+       __u32 a_rwnd;
+       __u16 num_outbound_streams;
+       __u16 num_inbound_streams;
+       __u32 initial_tsn;
+ };
  
  /* RFC2960
   *
@@@ -1482,9 -1489,9 +1489,9 @@@ struct sctp_association 
                /* This mask is used to disable sending the ASCONF chunk
                 * with specified parameter to peer.
                 */
-               __u16 addip_disabled_mask;
+               __be16 addip_disabled_mask;
  
-               struct sctp_inithdr i;
+               struct sctp_inithdr_host i;
                int cookie_len;
                void *cookie;
  
diff --combined include/scsi/libsas.h
index 9ccc0365aa896db40bd2ca8b8fb1c4f0df1923ac,1d77b63c5ea4c64882b3e7d37d0c650941729f4a..1f989fb42c7007e17215962bde4f52aa86aef7d9
@@@ -35,6 -35,7 +35,7 @@@
  #include <scsi/scsi_device.h>
  #include <scsi/scsi_cmnd.h>
  #include <scsi/scsi_transport_sas.h>
+ #include <asm/scatterlist.h>
  
  struct block_device;
  
@@@ -200,14 -201,9 +201,14 @@@ struct domain_device 
          void *lldd_dev;
  };
  
 +struct sas_discovery_event {
 +      struct work_struct work;
 +      struct asd_sas_port *port;
 +};
 +
  struct sas_discovery {
        spinlock_t disc_event_lock;
 -      struct work_struct disc_work[DISC_NUM_EVENTS];
 +      struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
        unsigned long    pending;
        u8     fanout_sas_addr[8];
        u8     eeds_a[8];
@@@ -253,19 -249,14 +254,19 @@@ struct asd_sas_port 
        void *lldd_port;          /* not touched by the sas class code */
  };
  
 +struct asd_sas_event {
 +      struct work_struct work;
 +      struct asd_sas_phy *phy;
 +};
 +
  /* The phy pretty much is controlled by the LLDD.
   * The class only reads those fields.
   */
  struct asd_sas_phy {
  /* private: */
        /* protected by ha->event_lock */
 -      struct work_struct   port_events[PORT_NUM_EVENTS];
 -      struct work_struct   phy_events[PHY_NUM_EVENTS];
 +      struct asd_sas_event   port_events[PORT_NUM_EVENTS];
 +      struct asd_sas_event   phy_events[PHY_NUM_EVENTS];
  
        unsigned long port_events_pending;
        unsigned long phy_events_pending;
@@@ -317,15 -308,10 +318,15 @@@ struct scsi_core 
        int               queue_thread_kill;
  };
  
 +struct sas_ha_event {
 +      struct work_struct work;
 +      struct sas_ha_struct *ha;
 +};
 +
  struct sas_ha_struct {
  /* private: */
        spinlock_t       event_lock;
 -      struct work_struct ha_events[HA_NUM_EVENTS];
 +      struct sas_ha_event ha_events[HA_NUM_EVENTS];
        unsigned long    pending;
  
        struct scsi_core core;
diff --combined kernel/kmod.c
index 7dc7a9dad6acc7ffb45439d28ba3b140561c1f54,2b76dee284964c82ca42a81b2ddd1463f72d001d..8d2bea09a4ec9b44f86f5574defa6af2a043917d
@@@ -114,7 -114,6 +114,7 @@@ EXPORT_SYMBOL(request_module)
  #endif /* CONFIG_KMOD */
  
  struct subprocess_info {
 +      struct work_struct work;
        struct completion *complete;
        char *path;
        char **argv;
@@@ -222,10 -221,9 +222,10 @@@ static int wait_for_helper(void *data
  }
  
  /* This is run by khelper thread  */
 -static void __call_usermodehelper(void *data)
 +static void __call_usermodehelper(struct work_struct *work)
  {
 -      struct subprocess_info *sub_info = data;
 +      struct subprocess_info *sub_info =
 +              container_of(work, struct subprocess_info, work);
        pid_t pid;
        int wait = sub_info->wait;
  
@@@ -266,8 -264,6 +266,8 @@@ int call_usermodehelper_keys(char *path
  {
        DECLARE_COMPLETION_ONSTACK(done);
        struct subprocess_info sub_info = {
 +              .work           = __WORK_INITIALIZER(sub_info.work,
 +                                                   __call_usermodehelper),
                .complete       = &done,
                .path           = path,
                .argv           = argv,
                .wait           = wait,
                .retval         = 0,
        };
 -      DECLARE_WORK(work, __call_usermodehelper, &sub_info);
  
        if (!khelper_wq)
                return -EBUSY;
        if (path[0] == '\0')
                return 0;
  
 -      queue_work(khelper_wq, &work);
 +      queue_work(khelper_wq, &sub_info.work);
        wait_for_completion(&done);
        return sub_info.retval;
  }
@@@ -294,8 -291,6 +294,8 @@@ int call_usermodehelper_pipe(char *path
  {
        DECLARE_COMPLETION(done);
        struct subprocess_info sub_info = {
 +              .work           = __WORK_INITIALIZER(sub_info.work,
 +                                                   __call_usermodehelper),
                .complete       = &done,
                .path           = path,
                .argv           = argv,
                .retval         = 0,
        };
        struct file *f;
 -      DECLARE_WORK(work, __call_usermodehelper, &sub_info);
  
        if (!khelper_wq)
                return -EBUSY;
                return 0;
  
        f = create_write_pipe();
-       if (!f)
-               return -ENOMEM;
+       if (IS_ERR(f))
+               return PTR_ERR(f);
        *filp = f;
  
        f = create_read_pipe(f);
-       if (!f) {
+       if (IS_ERR(f)) {
                free_write_pipe(*filp);
-               return -ENOMEM;
+               return PTR_ERR(f);
        }
        sub_info.stdin = f;
  
 -      queue_work(khelper_wq, &work);
 +      queue_work(khelper_wq, &sub_info.work);
        wait_for_completion(&done);
        return sub_info.retval;
  }
diff --combined net/atm/lec.c
index e801fff69dc0d0abf77b6d110f972fc1ef81e0f6,5946ec63724f3634abc4b9cd819b3e6d6a9ac6f4..3fc0abeeaf344fac1d514b4f84b58339b6707703
@@@ -204,9 -204,9 +204,9 @@@ static unsigned char *get_tr_dst(unsign
        memset(rdesc, 0, ETH_ALEN);
        /* offset 4 comes from LAN destination field in LE control frames */
        if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
-               memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(uint16_t));
+               memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16));
        else {
-               memcpy(&rdesc[4], &trh->rseg[1], sizeof(uint16_t));
+               memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16));
                rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
        }
  
@@@ -775,7 -775,7 +775,7 @@@ static void lec_push(struct atm_vcc *vc
                unsigned char *src, *dst;
  
                atm_return(vcc, skb->truesize);
-               if (*(uint16_t *) skb->data == htons(priv->lecid) ||
+               if (*(__be16 *) skb->data == htons(priv->lecid) ||
                    !priv->lecd || !(dev->flags & IFF_UP)) {
                        /*
                         * Probably looping back, or if lecd is missing,
@@@ -1321,11 -1321,10 +1321,10 @@@ static int lane2_resolve(struct net_dev
                if (table == NULL)
                        return -1;
  
-               *tlvs = kmalloc(table->sizeoftlvs, GFP_ATOMIC);
+               *tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC);
                if (*tlvs == NULL)
                        return -1;
  
-               memcpy(*tlvs, table->tlvs, table->sizeoftlvs);
                *sizeoftlvs = table->sizeoftlvs;
  
                return 0;
@@@ -1364,11 -1363,10 +1363,10 @@@ static int lane2_associate_req(struct n
  
        kfree(priv->tlvs);      /* NULL if there was no previous association */
  
-       priv->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL);
+       priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
        if (priv->tlvs == NULL)
                return (0);
        priv->sizeoftlvs = sizeoftlvs;
-       memcpy(priv->tlvs, tlvs, sizeoftlvs);
  
        skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
        if (skb == NULL)
@@@ -1409,12 -1407,10 +1407,10 @@@ static void lane2_associate_ind(struct 
  
        kfree(entry->tlvs);
  
-       entry->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL);
+       entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
        if (entry->tlvs == NULL)
                return;
        entry->sizeoftlvs = sizeoftlvs;
-       memcpy(entry->tlvs, tlvs, sizeoftlvs);
  #endif
  #if 0
        printk("lec.c: lane2_associate_ind()\n");
  
  #define LEC_ARP_REFRESH_INTERVAL (3*HZ)
  
 -static void lec_arp_check_expire(void *data);
 +static void lec_arp_check_expire(struct work_struct *work);
  static void lec_arp_expire_arp(unsigned long data);
  
  /* 
@@@ -1481,7 -1477,7 +1477,7 @@@ static void lec_arp_init(struct lec_pri
          INIT_HLIST_HEAD(&priv->lec_no_forward);
          INIT_HLIST_HEAD(&priv->mcast_fwds);
        spin_lock_init(&priv->lec_arp_lock);
 -      INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
 +      INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
        schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
  }
  
@@@ -1879,11 -1875,10 +1875,11 @@@ static void lec_arp_expire_vcc(unsigne
   *       to ESI_FORWARD_DIRECT. This causes the flush period to end
   *       regardless of the progress of the flush protocol.
   */
 -static void lec_arp_check_expire(void *data)
 +static void lec_arp_check_expire(struct work_struct *work)
  {
        unsigned long flags;
 -      struct lec_priv *priv = data;
 +      struct lec_priv *priv =
 +              container_of(work, struct lec_priv, lec_arp_work.work);
        struct hlist_node *node, *next;
        struct lec_arp_table *entry;
        unsigned long now;
diff --combined net/atm/lec.h
index 984e8e6e083ae9b2ee03f5d631f61b38ab652910,24cc95f86741bdb0728de9222e2297dedecb0879..99136babd5357a273a8c758b5d47116fc59d6dcf
  #define LEC_HEADER_LEN 16
  
  struct lecdatahdr_8023 {
-       unsigned short le_header;
+       __be16 le_header;
        unsigned char h_dest[ETH_ALEN];
        unsigned char h_source[ETH_ALEN];
-       unsigned short h_type;
+       __be16 h_type;
  };
  
  struct lecdatahdr_8025 {
-       unsigned short le_header;
+       __be16 le_header;
        unsigned char ac_pad;
        unsigned char fc;
        unsigned char h_dest[ETH_ALEN];
@@@ -92,7 -92,7 +92,7 @@@ struct lec_priv 
        spinlock_t lec_arp_lock;
        struct atm_vcc *mcast_vcc;              /* Default Multicast Send VCC */
        struct atm_vcc *lecd;
 -      struct work_struct lec_arp_work;        /* C10 */
 +      struct delayed_work lec_arp_work;       /* C10 */
        unsigned int maximum_unknown_frame_count;
                                                /*
                                                 * Within the period of time defined by this variable, the client will send
diff --combined net/core/netpoll.c
index 63f24c914ddb561b240bcabf7a8e66574d368c5f,3c58846fcaa5694a8b6c1396e221a02fc1c7d1c1..b3c559b9ac35cb9495edb39e4e77c8fedd5f26cf
  #define MAX_UDP_CHUNK 1460
  #define MAX_SKBS 32
  #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
- #define MAX_RETRIES 20000
  
- static DEFINE_SPINLOCK(skb_list_lock);
- static int nr_skbs;
- static struct sk_buff *skbs;
- static DEFINE_SPINLOCK(queue_lock);
- static int queue_depth;
- static struct sk_buff *queue_head, *queue_tail;
+ static struct sk_buff_head skb_pool;
  
  static atomic_t trapped;
  
+ #define USEC_PER_POLL 50
  #define NETPOLL_RX_ENABLED  1
  #define NETPOLL_RX_DROP     2
  
  static void zap_completion_queue(void);
  static void arp_reply(struct sk_buff *skb);
  
 -static void queue_process(void *p)
 +static void queue_process(struct work_struct *work)
  {
-       unsigned long flags;
 -      struct netpoll_info *npinfo = p;
++      struct netpoll_info *npinfo =
++              container_of(work, struct netpoll_info, tx_work.work);
        struct sk_buff *skb;
  
-       while (queue_head) {
-               spin_lock_irqsave(&queue_lock, flags);
-               skb = queue_head;
-               queue_head = skb->next;
-               if (skb == queue_tail)
-                       queue_head = NULL;
-               queue_depth--;
-               spin_unlock_irqrestore(&queue_lock, flags);
-               dev_queue_xmit(skb);
-       }
- }
+       while ((skb = skb_dequeue(&npinfo->txq))) {
+               struct net_device *dev = skb->dev;
  
- static DECLARE_WORK(send_queue, queue_process);
+               if (!netif_device_present(dev) || !netif_running(dev)) {
+                       __kfree_skb(skb);
+                       continue;
+               }
  
- void netpoll_queue(struct sk_buff *skb)
- {
-       unsigned long flags;
+               netif_tx_lock_bh(dev);
+               if (netif_queue_stopped(dev) ||
+                   dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+                       skb_queue_head(&npinfo->txq, skb);
+                       netif_tx_unlock_bh(dev);
  
-       if (queue_depth == MAX_QUEUE_DEPTH) {
-               __kfree_skb(skb);
-               return;
+                       schedule_delayed_work(&npinfo->tx_work, HZ/10);
+                       return;
+               }
 -
 -              netif_tx_unlock_bh(dev);
        }
-       spin_lock_irqsave(&queue_lock, flags);
-       if (!queue_head)
-               queue_head = skb;
-       else
-               queue_tail->next = skb;
-       queue_tail = skb;
-       queue_depth++;
-       spin_unlock_irqrestore(&queue_lock, flags);
-       schedule_work(&send_queue);
  }
  
- static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
-                            unsigned short ulen, u32 saddr, u32 daddr)
+ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
+                           unsigned short ulen, __be32 saddr, __be32 daddr)
  {
-       unsigned int psum;
+       __wsum psum;
  
        if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
                return 0;
        psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
  
        if (skb->ip_summed == CHECKSUM_COMPLETE &&
-           !(u16)csum_fold(csum_add(psum, skb->csum)))
+           !csum_fold(csum_add(psum, skb->csum)))
                return 0;
  
        skb->csum = psum;
@@@ -167,12 -144,11 +143,11 @@@ static void service_arp_queue(struct ne
                arp_reply(skb);
                skb = skb_dequeue(&npi->arp_tx);
        }
-       return;
  }
  
  void netpoll_poll(struct netpoll *np)
  {
-       if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
+       if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
                return;
  
        /* Process pending work on NIC */
@@@ -190,17 -166,15 +165,15 @@@ static void refill_skbs(void
        struct sk_buff *skb;
        unsigned long flags;
  
-       spin_lock_irqsave(&skb_list_lock, flags);
-       while (nr_skbs < MAX_SKBS) {
+       spin_lock_irqsave(&skb_pool.lock, flags);
+       while (skb_pool.qlen < MAX_SKBS) {
                skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
                if (!skb)
                        break;
  
-               skb->next = skbs;
-               skbs = skb;
-               nr_skbs++;
+               __skb_queue_tail(&skb_pool, skb);
        }
-       spin_unlock_irqrestore(&skb_list_lock, flags);
+       spin_unlock_irqrestore(&skb_pool.lock, flags);
  }
  
  static void zap_completion_queue(void)
                while (clist != NULL) {
                        struct sk_buff *skb = clist;
                        clist = clist->next;
-                       if(skb->destructor)
+                       if (skb->destructor)
                                dev_kfree_skb_any(skb); /* put this one back */
                        else
                                __kfree_skb(skb);
        put_cpu_var(softnet_data);
  }
  
- static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
+ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
  {
-       int once = 1, count = 0;
-       unsigned long flags;
-       struct sk_buff *skb = NULL;
+       int count = 0;
+       struct sk_buff *skb;
  
        zap_completion_queue();
+       refill_skbs();
  repeat:
-       if (nr_skbs < MAX_SKBS)
-               refill_skbs();
  
        skb = alloc_skb(len, GFP_ATOMIC);
+       if (!skb)
+               skb = skb_dequeue(&skb_pool);
  
        if (!skb) {
-               spin_lock_irqsave(&skb_list_lock, flags);
-               skb = skbs;
-               if (skb) {
-                       skbs = skb->next;
-                       skb->next = NULL;
-                       nr_skbs--;
+               if (++count < 10) {
+                       netpoll_poll(np);
+                       goto repeat;
                }
-               spin_unlock_irqrestore(&skb_list_lock, flags);
-       }
-       if(!skb) {
-               count++;
-               if (once && (count == 1000000)) {
-                       printk("out of netpoll skbs!\n");
-                       once = 0;
-               }
-               netpoll_poll(np);
-               goto repeat;
+               return NULL;
        }
  
        atomic_set(&skb->users, 1);
  
  static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
  {
-       int status;
-       struct netpoll_info *npinfo;
-       if (!np || !np->dev || !netif_running(np->dev)) {
-               __kfree_skb(skb);
-               return;
-       }
-       npinfo = np->dev->npinfo;
-       /* avoid recursion */
-       if (npinfo->poll_owner == smp_processor_id() ||
-           np->dev->xmit_lock_owner == smp_processor_id()) {
-               if (np->drop)
-                       np->drop(skb);
-               else
-                       __kfree_skb(skb);
-               return;
-       }
-       do {
-               npinfo->tries--;
-               netif_tx_lock(np->dev);
+       int status = NETDEV_TX_BUSY;
+       unsigned long tries;
+       struct net_device *dev = np->dev;
+       struct netpoll_info *npinfo = np->dev->npinfo;
+       if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
+               __kfree_skb(skb);
+               return;
+       }
+       /* don't get messages out of order, and no recursion */
+       if (skb_queue_len(&npinfo->txq) == 0 &&
+           npinfo->poll_owner != smp_processor_id() &&
+           netif_tx_trylock(dev)) {
+               /* try until next clock tick */
+               for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
+                       if (!netif_queue_stopped(dev))
+                               status = dev->hard_start_xmit(skb, dev);
  
-               /*
-                * network drivers do not expect to be called if the queue is
-                * stopped.
-                */
-               status = NETDEV_TX_BUSY;
-               if (!netif_queue_stopped(np->dev))
-                       status = np->dev->hard_start_xmit(skb, np->dev);
+                       if (status == NETDEV_TX_OK)
+                               break;
  
-               netif_tx_unlock(np->dev);
+                       /* tickle device maybe there is some cleanup */
+                       netpoll_poll(np);
  
-               /* success */
-               if(!status) {
-                       npinfo->tries = MAX_RETRIES; /* reset */
-                       return;
+                       udelay(USEC_PER_POLL);
                }
+               netif_tx_unlock(dev);
+       }
  
-               /* transmit busy */
-               netpoll_poll(np);
-               udelay(50);
-       } while (npinfo->tries > 0);
+       if (status != NETDEV_TX_OK) {
+               skb_queue_tail(&npinfo->txq, skb);
 -              schedule_work(&npinfo->tx_work);
++              schedule_delayed_work(&npinfo->tx_work,0);
+       }
  }
  
  void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
                                        udp_len, IPPROTO_UDP,
                                        csum_partial((unsigned char *)udph, udp_len, 0));
        if (udph->check == 0)
-               udph->check = -1;
+               udph->check = CSUM_MANGLED_0;
  
        skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
  
@@@ -379,7 -330,7 +329,7 @@@ static void arp_reply(struct sk_buff *s
        struct arphdr *arp;
        unsigned char *arp_ptr;
        int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
-       u32 sip, tip;
+       __be32 sip, tip;
        struct sk_buff *send_skb;
        struct netpoll *np = NULL;
  
  
        if (np->dev->hard_header &&
            np->dev->hard_header(send_skb, skb->dev, ptype,
-                                      np->remote_mac, np->local_mac,
-                                      send_skb->len) < 0) {
+                                np->remote_mac, np->local_mac,
+                                send_skb->len) < 0) {
                kfree_skb(send_skb);
                return;
        }
@@@ -470,7 -421,6 +420,6 @@@ int __netpoll_rx(struct sk_buff *skb
        struct netpoll_info *npi = skb->dev->npinfo;
        struct netpoll *np = npi->rx_np;
  
        if (!np)
                goto out;
        if (skb->dev->type != ARPHRD_ETHER)
@@@ -543,47 -493,47 +492,47 @@@ int netpoll_parse_options(struct netpol
  {
        char *cur=opt, *delim;
  
-       if(*cur != '@') {
+       if (*cur != '@') {
                if ((delim = strchr(cur, '@')) == NULL)
                        goto parse_failed;
-               *delim=0;
-               np->local_port=simple_strtol(cur, NULL, 10);
-               cur=delim;
+               *delim = 0;
+               np->local_port = simple_strtol(cur, NULL, 10);
+               cur = delim;
        }
        cur++;
        printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
  
-       if(*cur != '/') {
+       if (*cur != '/') {
                if ((delim = strchr(cur, '/')) == NULL)
                        goto parse_failed;
-               *delim=0;
-               np->local_ip=ntohl(in_aton(cur));
-               cur=delim;
+               *delim = 0;
+               np->local_ip = ntohl(in_aton(cur));
+               cur = delim;
  
                printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
                       np->name, HIPQUAD(np->local_ip));
        }
        cur++;
  
-       if ( *cur != ',') {
+       if (*cur != ',') {
                /* parse out dev name */
                if ((delim = strchr(cur, ',')) == NULL)
                        goto parse_failed;
-               *delim=0;
+               *delim = 0;
                strlcpy(np->dev_name, cur, sizeof(np->dev_name));
-               cur=delim;
+               cur = delim;
        }
        cur++;
  
        printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
  
-       if ( *cur != '@' ) {
+       if (*cur != '@') {
                /* dst port */
                if ((delim = strchr(cur, '@')) == NULL)
                        goto parse_failed;
-               *delim=0;
-               np->remote_port=simple_strtol(cur, NULL, 10);
-               cur=delim;
+               *delim = 0;
+               np->remote_port = simple_strtol(cur, NULL, 10);
+               cur = delim;
        }
        cur++;
        printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
        /* dst ip */
        if ((delim = strchr(cur, '/')) == NULL)
                goto parse_failed;
-       *delim=0;
-       np->remote_ip=ntohl(in_aton(cur));
-       cur=delim+1;
+       *delim = 0;
+       np->remote_ip = ntohl(in_aton(cur));
+       cur = delim + 1;
  
        printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
-                      np->name, HIPQUAD(np->remote_ip));
+              np->name, HIPQUAD(np->remote_ip));
  
-       if( *cur != 0 )
-       {
+       if (*cur != 0) {
                /* MAC address */
                if ((delim = strchr(cur, ':')) == NULL)
                        goto parse_failed;
-               *delim=0;
-               np->remote_mac[0]=simple_strtol(cur, NULL, 16);
-               cur=delim+1;
+               *delim = 0;
+               np->remote_mac[0] = simple_strtol(cur, NULL, 16);
+               cur = delim + 1;
                if ((delim = strchr(cur, ':')) == NULL)
                        goto parse_failed;
-               *delim=0;
-               np->remote_mac[1]=simple_strtol(cur, NULL, 16);
-               cur=delim+1;
+               *delim = 0;
+               np->remote_mac[1] = simple_strtol(cur, NULL, 16);
+               cur = delim + 1;
                if ((delim = strchr(cur, ':')) == NULL)
                        goto parse_failed;
-               *delim=0;
-               np->remote_mac[2]=simple_strtol(cur, NULL, 16);
-               cur=delim+1;
+               *delim = 0;
+               np->remote_mac[2] = simple_strtol(cur, NULL, 16);
+               cur = delim + 1;
                if ((delim = strchr(cur, ':')) == NULL)
                        goto parse_failed;
-               *delim=0;
-               np->remote_mac[3]=simple_strtol(cur, NULL, 16);
-               cur=delim+1;
+               *delim = 0;
+               np->remote_mac[3] = simple_strtol(cur, NULL, 16);
+               cur = delim + 1;
                if ((delim = strchr(cur, ':')) == NULL)
                        goto parse_failed;
-               *delim=0;
-               np->remote_mac[4]=simple_strtol(cur, NULL, 16);
-               cur=delim+1;
-               np->remote_mac[5]=simple_strtol(cur, NULL, 16);
+               *delim = 0;
+               np->remote_mac[4] = simple_strtol(cur, NULL, 16);
+               cur = delim + 1;
+               np->remote_mac[5] = simple_strtol(cur, NULL, 16);
        }
  
        printk(KERN_INFO "%s: remote ethernet address "
@@@ -653,34 -602,44 +601,44 @@@ int netpoll_setup(struct netpoll *np
        struct in_device *in_dev;
        struct netpoll_info *npinfo;
        unsigned long flags;
+       int err;
  
        if (np->dev_name)
                ndev = dev_get_by_name(np->dev_name);
        if (!ndev) {
                printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
                       np->name, np->dev_name);
-               return -1;
+               return -ENODEV;
        }
  
        np->dev = ndev;
        if (!ndev->npinfo) {
                npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
-               if (!npinfo)
+               if (!npinfo) {
+                       err = -ENOMEM;
                        goto release;
+               }
  
                npinfo->rx_flags = 0;
                npinfo->rx_np = NULL;
                spin_lock_init(&npinfo->poll_lock);
                npinfo->poll_owner = -1;
-               npinfo->tries = MAX_RETRIES;
                spin_lock_init(&npinfo->rx_lock);
                skb_queue_head_init(&npinfo->arp_tx);
-       } else
+               skb_queue_head_init(&npinfo->txq);
 -              INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
++              INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
+               atomic_set(&npinfo->refcnt, 1);
+       } else {
                npinfo = ndev->npinfo;
+               atomic_inc(&npinfo->refcnt);
+       }
  
        if (!ndev->poll_controller) {
                printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
                       np->name, np->dev_name);
+               err = -ENOTSUPP;
                goto release;
        }
  
                       np->name, np->dev_name);
  
                rtnl_lock();
-               if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) {
+               err = dev_open(ndev);
+               rtnl_unlock();
+               if (err) {
                        printk(KERN_ERR "%s: failed to open %s\n",
-                              np->name, np->dev_name);
-                       rtnl_unlock();
+                              np->name, ndev->name);
                        goto release;
                }
-               rtnl_unlock();
  
                atleast = jiffies + HZ/10;
                atmost = jiffies + 4*HZ;
                        rcu_read_unlock();
                        printk(KERN_ERR "%s: no IP address for %s, aborting\n",
                               np->name, np->dev_name);
+                       err = -EDESTADDRREQ;
                        goto release;
                }
  
                kfree(npinfo);
        np->dev = NULL;
        dev_put(ndev);
-       return -1;
+       return err;
  }
  
+ static int __init netpoll_init(void)
+ {
+       skb_queue_head_init(&skb_pool);
+       return 0;
+ }
+ core_initcall(netpoll_init);
  void netpoll_cleanup(struct netpoll *np)
  {
        struct netpoll_info *npinfo;
  
        if (np->dev) {
                npinfo = np->dev->npinfo;
-               if (npinfo && npinfo->rx_np == np) {
-                       spin_lock_irqsave(&npinfo->rx_lock, flags);
-                       npinfo->rx_np = NULL;
-                       npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
-                       spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+               if (npinfo) {
+                       if (npinfo->rx_np == np) {
+                               spin_lock_irqsave(&npinfo->rx_lock, flags);
+                               npinfo->rx_np = NULL;
+                               npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+                               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+                       }
+                       np->dev->npinfo = NULL;
+                       if (atomic_dec_and_test(&npinfo->refcnt)) {
+                               skb_queue_purge(&npinfo->arp_tx);
+                               skb_queue_purge(&npinfo->txq);
+                               cancel_rearming_delayed_work(&npinfo->tx_work);
+                               flush_scheduled_work();
+                               kfree(npinfo);
+                       }
                }
                dev_put(np->dev);
        }
  
@@@ -809,4 -790,3 +789,3 @@@ EXPORT_SYMBOL(netpoll_setup)
  EXPORT_SYMBOL(netpoll_cleanup);
  EXPORT_SYMBOL(netpoll_send_udp);
  EXPORT_SYMBOL(netpoll_poll);
- EXPORT_SYMBOL(netpoll_queue);
diff --combined net/dccp/minisocks.c
index 36db5be2a9e9d693ae88cafa527e94c3ed03e90c,7b52f2a03eefd291deffba6b45f0c9efc2fcad63..4c9e26775f72ddad7a1c0f9f53c7c9656247e91f
@@@ -11,6 -11,7 +11,7 @@@
   */
  
  #include <linux/dccp.h>
+ #include <linux/kernel.h>
  #include <linux/skbuff.h>
  #include <linux/timer.h>
  
@@@ -31,7 -32,8 +32,7 @@@ struct inet_timewait_death_row dccp_dea
        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
                                            (unsigned long)&dccp_death_row),
        .twkill_work    = __WORK_INITIALIZER(dccp_death_row.twkill_work,
 -                                           inet_twdr_twkill_work,
 -                                           &dccp_death_row),
 +                                           inet_twdr_twkill_work),
  /* Short-time timewait calendar */
  
        .twcal_hand     = -1,
@@@ -82,8 -84,7 +83,7 @@@ void dccp_time_wait(struct sock *sk, in
                 * socket up.  We've got bigger problems than
                 * non-graceful socket closings.
                 */
-               LIMIT_NETDEBUG(KERN_INFO "DCCP: time wait bucket "
-                                        "table overflow\n");
+               DCCP_WARN("time wait bucket table overflow\n");
        }
  
        dccp_done(sk);
@@@ -96,8 -97,8 +96,8 @@@ struct sock *dccp_create_openreq_child(
        /*
         * Step 3: Process LISTEN state
         *
-        * // Generate a new socket and switch to that socket
-        * Set S := new socket for this port pair
+        *   (* Generate a new socket and switch to that socket *)
+        *   Set S := new socket for this port pair
         */
        struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
  
@@@ -146,9 -147,9 +146,9 @@@ out_free
                /*
                 * Step 3: Process LISTEN state
                 *
-                *      Choose S.ISS (initial seqno) or set from Init Cookie
-                *      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
-                *      Cookie
+                *    Choose S.ISS (initial seqno) or set from Init Cookies
+                *    Initialize S.GAR := S.ISS
+                *    Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
                 */
  
                /* See dccp_v4_conn_request */
@@@ -194,15 -195,17 +194,17 @@@ struct sock *dccp_check_req(struct soc
  
        /* Check for retransmitted REQUEST */
        if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
-               if (after48(DCCP_SKB_CB(skb)->dccpd_seq,
-                           dccp_rsk(req)->dreq_isr)) {
-                       struct dccp_request_sock *dreq = dccp_rsk(req);
+               struct dccp_request_sock *dreq = dccp_rsk(req);
  
+               if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) {
                        dccp_pr_debug("Retransmitted REQUEST\n");
-                       /* Send another RESPONSE packet */
-                       dccp_set_seqno(&dreq->dreq_iss, dreq->dreq_iss + 1);
-                       dccp_set_seqno(&dreq->dreq_isr,
-                                      DCCP_SKB_CB(skb)->dccpd_seq);
+                       dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
+                       /*
+                        * Send another RESPONSE packet
+                        * To protect against Request floods, increment retrans
+                        * counter (backoff, monitored by dccp_response_timer).
+                        */
+                       req->retrans++;
                        req->rsk_ops->rtx_syn_ack(sk, req, NULL);
                }
                /* Network Duplicate, discard packet */
@@@ -242,7 -245,7 +244,7 @@@ listen_overflow
        DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
  drop:
        if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
-               req->rsk_ops->send_reset(skb);
+               req->rsk_ops->send_reset(sk, skb);
  
        inet_csk_reqsk_queue_drop(sk, req, prev);
        goto out;
@@@ -282,3 -285,19 +284,19 @@@ int dccp_child_process(struct sock *par
  }
  
  EXPORT_SYMBOL_GPL(dccp_child_process);
+ void dccp_reqsk_send_ack(struct sk_buff *skb, struct request_sock *rsk)
+ {
+       DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
+ }
+ EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
+ void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb)
+ {
+       inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
+       inet_rsk(req)->acked    = 0;
+       req->rcv_wnd            = sysctl_dccp_feat_sequence_window;
+ }
+ EXPORT_SYMBOL_GPL(dccp_reqsk_init);
index 2ae1833b657ab658e1c3f2a1074dad0c625020a9,0612015f1c782688602e4965e23532a121e5eed6..6012705aa4f8bba10fcc4905c726730339b72d43
@@@ -26,7 -26,7 +26,7 @@@
  
  #include "ieee80211softmac_priv.h"
  
 -static void ieee80211softmac_auth_queue(void *data);
 +static void ieee80211softmac_auth_queue(struct work_struct *work);
  
  /* Queues an auth request to the desired AP */
  int
@@@ -54,14 -54,14 +54,14 @@@ ieee80211softmac_auth_req(struct ieee80
        auth->mac = mac;
        auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
        auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
 -      INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
 +      INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
        
        /* Lock (for list) */
        spin_lock_irqsave(&mac->lock, flags);
  
        /* add to list */
        list_add_tail(&auth->list, &mac->auth_queue);
 -      schedule_work(&auth->work);
 +      schedule_delayed_work(&auth->work, 0);
        spin_unlock_irqrestore(&mac->lock, flags);
        
        return 0;
  
  /* Sends an auth request to the desired AP and handles timeouts */
  static void
 -ieee80211softmac_auth_queue(void *data)
 +ieee80211softmac_auth_queue(struct work_struct *work)
  {
        struct ieee80211softmac_device *mac;
        struct ieee80211softmac_auth_queue_item *auth;
        struct ieee80211softmac_network *net;
        unsigned long flags;
  
 -      auth = (struct ieee80211softmac_auth_queue_item *)data;
 +      auth = container_of(work, struct ieee80211softmac_auth_queue_item,
 +                          work.work);
        net = auth->net;
        mac = auth->mac;
  
  
  /* Sends a response to an auth challenge (for shared key auth). */
  static void
 -ieee80211softmac_auth_challenge_response(void *_aq)
 +ieee80211softmac_auth_challenge_response(struct work_struct *work)
  {
 -      struct ieee80211softmac_auth_queue_item *aq = _aq;
 +      struct ieee80211softmac_auth_queue_item *aq =
 +              container_of(work, struct ieee80211softmac_auth_queue_item,
 +                           work.work);
  
        /* Send our response */
        ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@@ -161,7 -158,7 +161,7 @@@ ieee80211softmac_auth_resp(struct net_d
        /* Make sure that we've got an auth queue item for this request */
        if(aq == NULL)
        {
-               printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
+               dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2));
                /* Error #? */
                return -1;
        }                       
        /* Check for out of order authentication */
        if(!net->authenticating)
        {
-               printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2));
+               dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2));
                return -1;
        }
  
                        net->challenge_len = *data++;   
                        if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN)
                                net->challenge_len = WLAN_AUTH_CHALLENGE_LEN;
-                       if (net->challenge != NULL)
-                               kfree(net->challenge);
-                       net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC);
-                       memcpy(net->challenge, data, net->challenge_len);
+                       kfree(net->challenge);
+                       net->challenge = kmemdup(data, net->challenge_len,
+                                                GFP_ATOMIC);
+                       if (net->challenge == NULL) {
+                               printkl(KERN_NOTICE PFX "Shared Key "
+                                       "Authentication failed due to "
+                                       "memory shortage.\n");
+                               spin_unlock_irqrestore(&mac->lock, flags);
+                               break;
+                       }
                        aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; 
  
                        /* We reuse the work struct from the auth request here.
                         * we have obviously already sent the initial auth
                         * request. */
                        cancel_delayed_work(&aq->work);
 -                      INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
 -                      schedule_work(&aq->work);
 +                      INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
 +                      schedule_delayed_work(&aq->work, 0);
                        spin_unlock_irqrestore(&mac->lock, flags);
                        return 0;
                case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@@ -345,7 -348,7 +351,7 @@@ ieee80211softmac_deauth_req(struct ieee
        /* Make sure the network is authenticated */
        if (!net->authenticated)
        {
-               printkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
+               dprintkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n");
                /* Error okay? */
                return -EPERM;
        }
@@@ -379,7 -382,7 +385,7 @@@ ieee80211softmac_deauth_resp(struct net
        net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2);
        
        if (net == NULL) {
-               printkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
+               dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n",
                        MAC_ARG(deauth->header.addr2));
                return 0;
        }
        /* Make sure the network is authenticated */
        if(!net->authenticated)
        {
-               printkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
+               dprintkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n");
                /* Error okay? */
                return -EPERM;
        }
        ieee80211softmac_deauth_from_net(mac, net);
  
        /* let's try to re-associate */
 -      schedule_work(&mac->associnfo.work);
 +      schedule_delayed_work(&mac->associnfo.work, 0);
        return 0;
  }
index a8326076581aa0a596c6f7a5330c55a5dcf28637,5507feab32de420aef36e4f0796530cf869d34a2..0c85d6c24cdbca332a5e865fdbb2d51f0ab05621
@@@ -47,7 -47,6 +47,6 @@@ ieee80211softmac_start_scan(struct ieee
        sm->scanning = 1;
        spin_unlock_irqrestore(&sm->lock, flags);
  
-       netif_tx_disable(sm->ieee->dev);
        ret = sm->start_scan(sm->dev);
        if (ret) {
                spin_lock_irqsave(&sm->lock, flags);
@@@ -91,14 -90,12 +90,14 @@@ ieee80211softmac_wait_for_scan(struct i
  
  
  /* internal scanning implementation follows */
 -void ieee80211softmac_scan(void *d)
 +void ieee80211softmac_scan(struct work_struct *work)
  {
        int invalid_channel;
        u8 current_channel_idx;
 -      struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
 -      struct ieee80211softmac_scaninfo *si = sm->scaninfo;
 +      struct ieee80211softmac_scaninfo *si =
 +              container_of(work, struct ieee80211softmac_scaninfo,
 +                           softmac_scan.work);
 +      struct ieee80211softmac_device *sm = si->mac;
        unsigned long flags;
  
        while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
        si->started = 0;
        spin_unlock_irqrestore(&sm->lock, flags);
  
-       dprintk(PFX "Scanning finished\n");
+       dprintk(PFX "Scanning finished: scanned %d channels starting with channel %d\n",
+                    sm->scaninfo->number_channels, sm->scaninfo->channels[0].channel);
        ieee80211softmac_scan_finished(sm);
        complete_all(&sm->scaninfo->finished);
  }
@@@ -148,8 -146,7 +148,8 @@@ static inline struct ieee80211softmac_s
        struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
        if (unlikely(!info))
                return NULL;
 -      INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
 +      INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
 +      info->mac = mac;
        init_completion(&info->finished);
        return info;
  }
@@@ -186,13 -183,11 +186,11 @@@ int ieee80211softmac_start_scan_impleme
                sm->scaninfo->channels = sm->ieee->geo.bg;
                sm->scaninfo->number_channels = sm->ieee->geo.bg_channels;
        }
-       dprintk(PFX "Start scanning with channel: %d\n", sm->scaninfo->channels[0].channel);
-       dprintk(PFX "Scanning %d channels\n", sm->scaninfo->number_channels);
        sm->scaninfo->current_channel_idx = 0;
        sm->scaninfo->started = 1;
        sm->scaninfo->stop = 0;
        INIT_COMPLETION(sm->scaninfo->finished);
 -      schedule_work(&sm->scaninfo->softmac_scan);
 +      schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
        spin_unlock_irqrestore(&sm->lock, flags);
        return 0;
  }
@@@ -251,7 -246,6 +249,6 @@@ void ieee80211softmac_scan_finished(str
                if (net)
                        sm->set_channel(sm->dev, net->channel);
        }
-       netif_wake_queue(sm->ieee->dev);
        ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL);
  }
  EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished);
diff --combined net/ipv4/tcp_minisocks.c
index af7b2c986b1fd33804f64a44399901d4e9116071,6dddf59c1fb934ce9af25f2060ff8ce9b7a6556b..4a3889dd194352a0e26f1f1c1b486ccad4c36a5c
@@@ -45,7 -45,8 +45,7 @@@ struct inet_timewait_death_row tcp_deat
        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
                                            (unsigned long)&tcp_death_row),
        .twkill_work    = __WORK_INITIALIZER(tcp_death_row.twkill_work,
 -                                           inet_twdr_twkill_work,
 -                                           &tcp_death_row),
 +                                           inet_twdr_twkill_work),
  /* Short-time timewait calendar */
  
        .twcal_hand     = -1,
@@@ -305,6 -306,28 +305,28 @@@ void tcp_time_wait(struct sock *sk, in
                        tw->tw_ipv6only = np->ipv6only;
                }
  #endif
+ #ifdef CONFIG_TCP_MD5SIG
+               /*
+                * The timewait bucket does not have the key DB from the
+                * sock structure. We just make a quick copy of the
+                * md5 key being used (if indeed we are using one)
+                * so the timewait ack generating code has the key.
+                */
+               do {
+                       struct tcp_md5sig_key *key;
+                       memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
+                       tcptw->tw_md5_keylen = 0;
+                       key = tp->af_specific->md5_lookup(sk, sk);
+                       if (key != NULL) {
+                               memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
+                               tcptw->tw_md5_keylen = key->keylen;
+                               if (tcp_alloc_md5sig_pool() == NULL)
+                                       BUG();
+                       }
+               } while(0);
+ #endif
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
  
                 * socket up.  We've got bigger problems than
                 * non-graceful socket closings.
                 */
-               if (net_ratelimit())
-                       printk(KERN_INFO "TCP: time wait bucket table overflow\n");
+               LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
        }
  
        tcp_update_metrics(sk);
        tcp_done(sk);
  }
  
+ void tcp_twsk_destructor(struct sock *sk)
+ {
+ #ifdef CONFIG_TCP_MD5SIG
+       struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+       if (twsk->tw_md5_keylen)
+               tcp_put_md5sig_pool();
+ #endif
+ }
+ EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
  /* This is not only more efficient than what we used to do, it eliminates
   * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
   *
@@@ -434,6 -467,11 +466,11 @@@ struct sock *tcp_create_openreq_child(s
                        newtp->rx_opt.ts_recent_stamp = 0;
                        newtp->tcp_header_len = sizeof(struct tcphdr);
                }
+ #ifdef CONFIG_TCP_MD5SIG
+               newtp->md5sig_info = NULL;      /*XXX*/
+               if (newtp->af_specific->md5_lookup(sk, newsk))
+                       newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+ #endif
                if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
                        newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
@@@ -454,7 -492,7 +491,7 @@@ struct sock *tcp_check_req(struct sock 
                           struct request_sock **prev)
  {
        struct tcphdr *th = skb->h.th;
-       u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
+       __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        int paws_reject = 0;
        struct tcp_options_received tmp_opt;
        struct sock *child;
                                                                 req, NULL);
                if (child == NULL)
                        goto listen_overflow;
+ #ifdef CONFIG_TCP_MD5SIG
+               else {
+                       /* Copy over the MD5 key from the original socket */
+                       struct tcp_md5sig_key *key;
+                       struct tcp_sock *tp = tcp_sk(sk);
+                       key = tp->af_specific->md5_lookup(sk, child);
+                       if (key != NULL) {
+                               /*
+                                * We're using one, so create a matching key on the
+                                * newsk structure. If we fail to get memory then we
+                                * end up not copying the key across. Shucks.
+                                */
+                               char *newkey = kmemdup(key->key, key->keylen,
+                                                      GFP_ATOMIC);
+                               if (newkey) {
+                                       if (!tcp_alloc_md5sig_pool())
+                                               BUG();
+                                       tp->af_specific->md5_add(child, child,
+                                                                newkey,
+                                                                key->keylen);
+                               }
+                       }
+               }
+ #endif
  
                inet_csk_reqsk_queue_unlink(sk, req, prev);
                inet_csk_reqsk_queue_removed(sk, req);
        embryonic_reset:
                NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
                if (!(flg & TCP_FLAG_RST))
-                       req->rsk_ops->send_reset(skb);
+                       req->rsk_ops->send_reset(sk, skb);
  
                inet_csk_reqsk_queue_drop(sk, req, prev);
                return NULL;
diff --combined net/sctp/associola.c
index 88124696ba60e505e453fe8e794c996eaf1e2950,39471d3b31b974b93e7710de9a5156dc047f554f..ad0057db0f91884b39a393b06759bdc383c9776b
@@@ -61,7 -61,7 +61,7 @@@
  #include <net/sctp/sm.h>
  
  /* Forward declarations for internal functions. */
 -static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
 +static void sctp_assoc_bh_rcv(struct work_struct *work);
  
  
  /* 1st Level Abstractions. */
@@@ -269,7 -269,9 +269,7 @@@ static struct sctp_association *sctp_as
  
        /* Create an input queue.  */
        sctp_inq_init(&asoc->base.inqueue);
 -      sctp_inq_set_th_handler(&asoc->base.inqueue,
 -                                  (void (*)(void *))sctp_assoc_bh_rcv,
 -                                  asoc);
 +      sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
  
        /* Create an output queue.  */
        sctp_outq_init(asoc, &asoc->outqueue);
@@@ -486,7 -488,7 +486,7 @@@ void sctp_assoc_rm_peer(struct sctp_ass
                                 " port: %d\n",
                                 asoc,
                                 (&peer->ipaddr),
-                                peer->ipaddr.v4.sin_port);
+                                ntohs(peer->ipaddr.v4.sin_port));
  
        /* If we are to remove the current retran_path, update it
         * to the next peer before removing this peer from the list.
@@@ -535,13 -537,13 +535,13 @@@ struct sctp_transport *sctp_assoc_add_p
        sp = sctp_sk(asoc->base.sk);
  
        /* AF_INET and AF_INET6 share common port field. */
-       port = addr->v4.sin_port;
+       port = ntohs(addr->v4.sin_port);
  
        SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
                                 " port: %d state:%d\n",
                                 asoc,
                                 addr,
-                                addr->v4.sin_port,
+                                port,
                                 peer_state);
  
        /* Set the port if it has not been set yet.  */
@@@ -707,6 -709,7 +707,7 @@@ void sctp_assoc_control_transport(struc
        struct sctp_transport *first;
        struct sctp_transport *second;
        struct sctp_ulpevent *event;
+       struct sockaddr_storage addr;
        struct list_head *pos;
        int spc_state = 0;
  
        /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
         * user.
         */
-       event = sctp_ulpevent_make_peer_addr_change(asoc,
-                               (struct sockaddr_storage *) &transport->ipaddr,
+       memset(&addr, 0, sizeof(struct sockaddr_storage));
+       memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
+       event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
                                0, spc_state, error, GFP_ATOMIC);
        if (event)
                sctp_ulpq_tail_event(&asoc->ulpq, event);
@@@ -866,7 -870,7 +868,7 @@@ struct sctp_transport *sctp_assoc_looku
        struct list_head *entry, *pos;
        struct sctp_transport *transport;
        struct sctp_chunk *chunk;
-       __u32 key = htonl(tsn);
+       __be32 key = htonl(tsn);
  
        match = NULL;
  
@@@ -924,8 -928,8 +926,8 @@@ struct sctp_transport *sctp_assoc_is_ma
  
        sctp_read_lock(&asoc->base.addr_lock);
  
-       if ((asoc->base.bind_addr.port == laddr->v4.sin_port) &&
-           (asoc->peer.port == paddr->v4.sin_port)) {
+       if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
+           (htons(asoc->peer.port) == paddr->v4.sin_port)) {
                transport = sctp_assoc_lookup_paddr(asoc, paddr);
                if (!transport)
                        goto out;
@@@ -942,11 -946,8 +944,11 @@@ out
  }
  
  /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
 -static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
 +static void sctp_assoc_bh_rcv(struct work_struct *work)
  {
 +      struct sctp_association *asoc =
 +              container_of(work, struct sctp_association,
 +                           base.inqueue.immediate);
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
        struct sock *sk;
@@@ -1136,7 -1137,7 +1138,7 @@@ void sctp_assoc_update_retran_path(stru
                                 " port: %d\n",
                                 asoc,
                                 (&t->ipaddr),
-                                t->ipaddr.v4.sin_port);
+                                ntohs(t->ipaddr.v4.sin_port));
  }
  
  /* Choose the transport for sending a INIT packet.  */
@@@ -1161,7 -1162,7 +1163,7 @@@ struct sctp_transport *sctp_assoc_choos
                                 " port: %d\n",
                                 asoc,
                                 (&t->ipaddr),
-                                t->ipaddr.v4.sin_port);
+                                ntohs(t->ipaddr.v4.sin_port));
  
        return t;
  }
diff --combined net/sctp/endpointola.c
index a2b5537215144473b9362e4ef94cc050c5373527,33a42e90c32f509f7dfb06827ff1a7296d60ddf7..129756908da49992b5a42e07b84f7521a778e891
@@@ -61,7 -61,7 +61,7 @@@
  #include <net/sctp/sm.h>
  
  /* Forward declarations for internal helpers. */
 -static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
 +static void sctp_endpoint_bh_rcv(struct work_struct *work);
  
  /*
   * Initialize the base fields of the endpoint structure.
@@@ -72,6 -72,10 +72,10 @@@ static struct sctp_endpoint *sctp_endpo
  {
        memset(ep, 0, sizeof(struct sctp_endpoint));
  
+       ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
+       if (!ep->digest)
+               return NULL;
        /* Initialize the base structure. */
        /* What type of endpoint are we?  */
        ep->base.type = SCTP_EP_TYPE_SOCKET;
@@@ -85,7 -89,8 +89,7 @@@
        sctp_inq_init(&ep->base.inqueue);
  
        /* Set its top-half handler */
 -      sctp_inq_set_th_handler(&ep->base.inqueue,
 -                              (void (*)(void *))sctp_endpoint_bh_rcv, ep);
 +      sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
  
        /* Initialize the bind addr area */
        sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@@ -181,6 -186,9 +185,9 @@@ static void sctp_endpoint_destroy(struc
        /* Free up the HMAC transform. */
        crypto_free_hash(sctp_sk(ep->base.sk)->hmac);
  
+       /* Free the digest buffer */
+       kfree(ep->digest);
        /* Cleanup. */
        sctp_inq_free(&ep->base.inqueue);
        sctp_bind_addr_free(&ep->base.bind_addr);
@@@ -222,7 -230,7 +229,7 @@@ struct sctp_endpoint *sctp_endpoint_is_
        struct sctp_endpoint *retval;
  
        sctp_read_lock(&ep->base.addr_lock);
-       if (ep->base.bind_addr.port == laddr->v4.sin_port) {
+       if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
                if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
                                         sctp_sk(ep->base.sk))) {
                        retval = ep;
@@@ -250,7 -258,7 +257,7 @@@ static struct sctp_association *__sctp_
        struct sctp_association *asoc;
        struct list_head *pos;
  
-       rport = paddr->v4.sin_port;
+       rport = ntohs(paddr->v4.sin_port);
  
        list_for_each(pos, &ep->asocs) {
                asoc = list_entry(pos, struct sctp_association, asocs);
@@@ -310,11 -318,8 +317,11 @@@ int sctp_endpoint_is_peeled_off(struct 
  /* Do delayed input processing.  This is scheduled by sctp_rcv().
   * This may be called on BH or task time.
   */
 -static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
 +static void sctp_endpoint_bh_rcv(struct work_struct *work)
  {
 +      struct sctp_endpoint *ep =
 +              container_of(work, struct sctp_endpoint,
 +                           base.inqueue.immediate);
        struct sctp_association *asoc;
        struct sock *sk;
        struct sctp_transport *transport;
diff --combined net/xfrm/xfrm_policy.c
index ba924d40df7d250298b53bea90bc83835a0fd9c1,64d3938f74c46d1d13753c32a3b37da63fd27918..f6c77bd36fddf7f8d597121c156b8d4c0d879c73
@@@ -50,6 -50,40 +50,40 @@@ static void xfrm_policy_put_afinfo(stru
  static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
  static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
  
+ static inline int
+ __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+ {
+       return  addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
+               addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
+               !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
+               !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
+               (fl->proto == sel->proto || !sel->proto) &&
+               (fl->oif == sel->ifindex || !sel->ifindex);
+ }
+ static inline int
+ __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
+ {
+       return  addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
+               addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
+               !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
+               !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
+               (fl->proto == sel->proto || !sel->proto) &&
+               (fl->oif == sel->ifindex || !sel->ifindex);
+ }
+ int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
+                   unsigned short family)
+ {
+       switch (family) {
+       case AF_INET:
+               return __xfrm4_selector_match(sel, fl);
+       case AF_INET6:
+               return __xfrm6_selector_match(sel, fl);
+       }
+       return 0;
+ }
  int xfrm_register_type(struct xfrm_type *type, unsigned short family)
  {
        struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
@@@ -358,7 -392,7 +392,7 @@@ static void xfrm_policy_gc_kill(struct 
        xfrm_pol_put(policy);
  }
  
 -static void xfrm_policy_gc_task(void *data)
 +static void xfrm_policy_gc_task(struct work_struct *work)
  {
        struct xfrm_policy *policy;
        struct hlist_node *entry, *tmp;
@@@ -546,7 -580,7 +580,7 @@@ static inline int xfrm_byidx_should_res
  
  static DEFINE_MUTEX(hash_resize_mutex);
  
 -static void xfrm_hash_resize(void *__unused)
 +static void xfrm_hash_resize(struct work_struct *__unused)
  {
        int dir, total;
  
        mutex_unlock(&hash_resize_mutex);
  }
  
 -static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
 +static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
  
  /* Generate new index... KAME seems to generate them ordered by cost
   * of an absolute inpredictability of ordering of rules. This will not pass. */
@@@ -1177,6 -1211,7 +1211,7 @@@ xfrm_tmpl_resolve_one(struct xfrm_polic
                if (tmpl->mode == XFRM_MODE_TUNNEL) {
                        remote = &tmpl->id.daddr;
                        local = &tmpl->saddr;
+                       family = tmpl->encap_family;
                        if (xfrm_addr_any(local, family)) {
                                error = xfrm_get_saddr(&tmp, remote, family);
                                if (error)
@@@ -1894,7 -1929,8 +1929,8 @@@ int xfrm_bundle_ok(struct xfrm_policy *
  
                if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
                        return 0;
-               if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol))
+               if (fl && pol &&
+                   !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
                        return 0;
                if (dst->xfrm->km.state != XFRM_STATE_VALID)
                        return 0;
@@@ -2080,7 -2116,7 +2116,7 @@@ static void __init xfrm_policy_init(voi
                        panic("XFRM: failed to allocate bydst hash\n");
        }
  
 -      INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
 +      INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
        register_netdevice_notifier(&xfrm_dev_notifier);
  }
  
diff --combined net/xfrm/xfrm_state.c
index 40c5271798431e0c37495051d7972daa12bc1644,864962bbda902a513e04c2a32d7414bdcfce3733..da54a64ccfa3b9ffb4d4401ad3fac881591ce2dd
@@@ -115,7 -115,7 +115,7 @@@ static unsigned long xfrm_hash_new_size
  
  static DEFINE_MUTEX(hash_resize_mutex);
  
 -static void xfrm_hash_resize(void *__unused)
 +static void xfrm_hash_resize(struct work_struct *__unused)
  {
        struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
        unsigned long nsize, osize;
@@@ -168,7 -168,7 +168,7 @@@ out_unlock
        mutex_unlock(&hash_resize_mutex);
  }
  
 -static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
 +static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
  
  DECLARE_WAIT_QUEUE_HEAD(km_waitq);
  EXPORT_SYMBOL(km_waitq);
@@@ -207,7 -207,7 +207,7 @@@ static void xfrm_state_gc_destroy(struc
        kfree(x);
  }
  
 -static void xfrm_state_gc_task(void *data)
 +static void xfrm_state_gc_task(struct work_struct *data)
  {
        struct xfrm_state *x;
        struct hlist_node *entry, *tmp;
@@@ -1304,7 -1304,7 +1304,7 @@@ int km_query(struct xfrm_state *x, stru
  }
  EXPORT_SYMBOL(km_query);
  
- int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
+ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
  {
        int err = -EINVAL;
        struct xfrm_mgr *km;
@@@ -1568,6 -1568,6 +1568,6 @@@ void __init xfrm_state_init(void
                panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
        xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
  
 -      INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
 +      INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
  }