Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 7 Jun 2007 16:36:55 +0000 (09:36 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 7 Jun 2007 16:36:55 +0000 (09:36 -0700)
* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6:
  sh: Fix se73180 platform device registration.
  sh: ioremap() through PMB needs asm/mmu.h.
  sh: voyagergx: Fix build warnings.
  sh: Fix SH4-202 clock fwk set_rate() mismatch.
  sh: microdev: Fix compile warnings.
  sh: Fix in_nmi symbol build error.

79 files changed:
Documentation/powerpc/booting-without-of.txt
Makefile
arch/i386/kernel/cpu/mtrr/main.c
arch/powerpc/boot/crt0.S
arch/powerpc/kernel/of_platform.c
arch/powerpc/platforms/cell/cbe_cpufreq.c
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/spufs.h
arch/powerpc/platforms/celleb/Makefile
arch/powerpc/platforms/pasemi/iommu.c
arch/sparc64/Kconfig
arch/sparc64/kernel/Makefile
arch/sparc64/kernel/entry.S
arch/sparc64/kernel/mdesc.c
arch/sparc64/kernel/prom.c
arch/sparc64/kernel/setup.c
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/sysfs.c [new file with mode: 0644]
drivers/ata/libata-core.c
drivers/ata/pata_via.c
drivers/atm/firestream.c
drivers/input/evdev.c
drivers/input/joydev.c
drivers/input/joystick/db9.c
drivers/input/mouse/Kconfig
drivers/input/mousedev.c
drivers/input/tsdev.c
drivers/mfd/ucb1x00-ts.c
drivers/mtd/Makefile
drivers/mtd/maps/uclinux.c
drivers/mtd/mtdsuper.c [new file with mode: 0644]
drivers/net/tg3.c
drivers/scsi/Kconfig
drivers/scsi/aacraid/linit.c
drivers/scsi/atari_NCR5380.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_scan.c
drivers/spi/spi.c
drivers/video/Kconfig
drivers/video/console/Makefile
drivers/video/ffb.c
drivers/video/sunxvr2500.c
drivers/video/sunxvr500.c
fs/ioctl.c
fs/jffs2/readinode.c
fs/jffs2/super.c
fs/jffs2/xattr.c
include/asm-sparc64/cpudata.h
include/asm-sparc64/dma-mapping.h
include/asm-sparc64/hypervisor.h
include/asm-sparc64/smp.h
include/asm-sparc64/topology.h
include/linux/fs.h
include/linux/libata.h
include/linux/mtd/super.h [new file with mode: 0644]
include/net/af_unix.h
kernel/signal.c
net/8021q/vlan.c
net/core/sock.c
net/dccp/probe.c
net/ipv4/datagram.c
net/ipv4/icmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/sched/act_pedit.c
net/sched/sch_generic.c
net/sctp/debug.c
net/sctp/sm_statetable.c
net/unix/af_unix.c
net/wanrouter/wanmain.c
scripts/checkpatch.pl [changed mode: 0644->0755]

index b49ce169a63aa27d75271a45948b0c1ed8d15ecd..d42d98107d494d906ded560a3e61bfad829e8911 100644 (file)
@@ -1,7 +1,6 @@
            Booting the Linux/ppc kernel without Open Firmware
            --------------------------------------------------
 
-
 (c) 2005 Benjamin Herrenschmidt <benh at kernel.crashing.org>,
     IBM Corp.
 (c) 2005 Becky Bruce <becky.bruce at freescale.com>,
@@ -9,6 +8,62 @@
 (c) 2006 MontaVista Software, Inc.
     Flash chip node definition
 
+Table of Contents
+=================
+
+  I - Introduction
+    1) Entry point for arch/powerpc
+    2) Board support
+
+  II - The DT block format
+    1) Header
+    2) Device tree generalities
+    3) Device tree "structure" block
+    4) Device tree "strings" block
+
+  III - Required content of the device tree
+    1) Note about cells and address representation
+    2) Note about "compatible" properties
+    3) Note about "name" properties
+    4) Note about node and property names and character set
+    5) Required nodes and properties
+      a) The root node
+      b) The /cpus node
+      c) The /cpus/* nodes
+      d) the /memory node(s)
+      e) The /chosen node
+      f) the /soc<SOCname> node
+
+  IV - "dtc", the device tree compiler
+
+  V - Recommendations for a bootloader
+
+  VI - System-on-a-chip devices and nodes
+    1) Defining child nodes of an SOC
+    2) Representing devices without a current OF specification
+      a) MDIO IO device
+      c) PHY nodes
+      b) Gianfar-compatible ethernet nodes
+      d) Interrupt controllers
+      e) I2C
+      f) Freescale SOC USB controllers
+      g) Freescale SOC SEC Security Engines
+      h) Board Control and Status (BCSR)
+      i) Freescale QUICC Engine module (QE)
+      g) Flash chip nodes
+
+  VII - Specifying interrupt information for devices
+    1) interrupts property
+    2) interrupt-parent property
+    3) OpenPIC Interrupt Controllers
+    4) ISA Interrupt Controllers
+
+  Appendix A - Sample SOC node for MPC8540
+
+
+Revision Information
+====================
+
    May 18, 2005: Rev 0.1 - Initial draft, no chapter III yet.
 
    May 19, 2005: Rev 0.2 - Add chapter III and bits & pieces here or
@@ -1687,7 +1742,7 @@ platforms are moved over to use the flattened-device-tree model.
                };
        };
 
-    g) Flash chip nodes
+    j) Flash chip nodes
 
     Flash chips (Memory Technology Devices) are often used for solid state
     file systems on embedded devices.
index 562a90902cf698cc34e6e804ffdd9e20ed54828f..30d685b629a46a84f5ccbd022a4ac52eb97fdefe 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 22
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Jeff Thinks I Should Change This, But To What?
 
 # *DOCUMENTATION*
index 1cf466df330ad567c26355aeda582b98196534ae..7202b98aac4f2f53516e421ef935545538e5466a 100644 (file)
@@ -734,10 +734,13 @@ void mtrr_ap_init(void)
  */
 void mtrr_save_state(void)
 {
-       if (smp_processor_id() == 0)
+       int cpu = get_cpu();
+
+       if (cpu == 0)
                mtrr_save_fixed_ranges(NULL);
        else
                smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
+       put_cpu();
 }
 
 static int __init mtrr_init_finialize(void)
index 5a4215c4b0146aa063fe87c5bb503a398232f6d3..f1c4dfc635be20380d6a718d420cb83b6e4a6068 100644 (file)
@@ -13,6 +13,7 @@
 
        .text
        /* a procedure descriptor used when booting this as a COFF file */
+       .globl  _zimage_start_opd
 _zimage_start_opd:
        .long   _zimage_start, 0, 0, 0
 
index d501c23e5159559ca493caf512e888ca6671e73a..d454f61c9c7c7b5a84c00ef3f66b730e436171ee 100644 (file)
@@ -433,7 +433,7 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
         * Note also that we don't do ISA, this will also be fixed with a
         * more massive rework.
         */
-       pci_setup_phb_io(phb, 0);
+       pci_setup_phb_io(phb, pci_io_base == 0);
 
        /* Init pci_dn data structures */
        pci_devs_phb_init_dynamic(phb);
index f9ac3fe3be9767fbf04a73296dfc365d3efbbec7..ac445998d8313d087964e0e6133f272a6b080ab8 100644 (file)
@@ -67,6 +67,7 @@ static u64 MIC_Slow_Next_Timer_table[] = {
        0x00003FC000000000ull,
 };
 
+static unsigned int pmi_frequency_limit = 0;
 /*
  * hardware specific functions
  */
@@ -164,7 +165,6 @@ static int set_pmode(int cpu, unsigned int slow_mode) {
 
 static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
 {
-       struct cpufreq_policy policy;
        u8 cpu;
        u8 cbe_pmode_new;
 
@@ -173,15 +173,27 @@ static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
        cpu = cbe_node_to_cpu(pmi_msg.data1);
        cbe_pmode_new = pmi_msg.data2;
 
-       cpufreq_get_policy(&policy, cpu);
+       pmi_frequency_limit = cbe_freqs[cbe_pmode_new].frequency;
 
-       policy.max = min(policy.max, cbe_freqs[cbe_pmode_new].frequency);
-       policy.min = min(policy.min, policy.max);
+       pr_debug("cbe_handle_pmi: max freq=%d\n", pmi_frequency_limit);
+}
+
+static int pmi_notifier(struct notifier_block *nb,
+                                      unsigned long event, void *data)
+{
+       struct cpufreq_policy *policy = data;
 
-       pr_debug("cbe_handle_pmi: new policy.min=%d policy.max=%d\n", policy.min, policy.max);
-       cpufreq_set_policy(&policy);
+       if (event != CPUFREQ_INCOMPATIBLE)
+               return 0;
+
+       cpufreq_verify_within_limits(policy, 0, pmi_frequency_limit);
+       return 0;
 }
 
+static struct notifier_block pmi_notifier_block = {
+       .notifier_call = pmi_notifier,
+};
+
 static struct pmi_handler cbe_pmi_handler = {
        .type                   = PMI_TYPE_FREQ_CHANGE,
        .handle_pmi_message     = cbe_cpufreq_handle_pmi,
@@ -238,12 +250,21 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
 
+       if (pmi_dev) {
+               /* frequency might get limited later, initialize limit with max_freq */
+               pmi_frequency_limit = max_freq;
+               cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+       }
+
        /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
        return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
 }
 
 static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
+       if (pmi_dev)
+               cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+
        cpufreq_frequency_table_put_attr(policy->cpu);
        return 0;
 }
index 8654749e317bae0aa67795dda4ba2eebdae50c3f..7c51cb54bca1ca8ba78c5074631247465dc784e7 100644 (file)
@@ -39,7 +39,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
        if (spu_init_csa(&ctx->csa))
                goto out_free;
        spin_lock_init(&ctx->mmio_lock);
-       spin_lock_init(&ctx->mapping_lock);
+       mutex_init(&ctx->mapping_lock);
        kref_init(&ctx->kref);
        mutex_init(&ctx->state_mutex);
        mutex_init(&ctx->run_mutex);
@@ -103,6 +103,7 @@ void spu_forget(struct spu_context *ctx)
 
 void spu_unmap_mappings(struct spu_context *ctx)
 {
+       mutex_lock(&ctx->mapping_lock);
        if (ctx->local_store)
                unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
        if (ctx->mfc)
@@ -117,6 +118,7 @@ void spu_unmap_mappings(struct spu_context *ctx)
                unmap_mapping_range(ctx->mss, 0, 0x1000, 1);
        if (ctx->psmap)
                unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
+       mutex_unlock(&ctx->mapping_lock);
 }
 
 /**
index 45614c73c7841824d1a1f47834ebc2838bfbbe0a..b1e7e2f8a2e9256f4263a1ee72a589de6c592408 100644 (file)
@@ -45,11 +45,11 @@ spufs_mem_open(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        file->private_data = ctx;
        if (!i->i_openers++)
                ctx->local_store = inode->i_mapping;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return 0;
 }
 
@@ -59,10 +59,10 @@ spufs_mem_release(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        if (!--i->i_openers)
                ctx->local_store = NULL;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return 0;
 }
 
@@ -217,6 +217,7 @@ unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
 
 static const struct file_operations spufs_mem_fops = {
        .open                   = spufs_mem_open,
+       .release                = spufs_mem_release,
        .read                   = spufs_mem_read,
        .write                  = spufs_mem_write,
        .llseek                 = generic_file_llseek,
@@ -309,11 +310,11 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        file->private_data = ctx;
        if (!i->i_openers++)
                ctx->cntl = inode->i_mapping;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return simple_attr_open(inode, file, spufs_cntl_get,
                                        spufs_cntl_set, "0x%08lx");
 }
@@ -326,10 +327,10 @@ spufs_cntl_release(struct inode *inode, struct file *file)
 
        simple_attr_close(inode, file);
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        if (!--i->i_openers)
                ctx->cntl = NULL;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return 0;
 }
 
@@ -812,11 +813,11 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        file->private_data = ctx;
        if (!i->i_openers++)
                ctx->signal1 = inode->i_mapping;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return nonseekable_open(inode, file);
 }
 
@@ -826,10 +827,10 @@ spufs_signal1_release(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        if (!--i->i_openers)
                ctx->signal1 = NULL;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return 0;
 }
 
@@ -936,11 +937,11 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        file->private_data = ctx;
        if (!i->i_openers++)
                ctx->signal2 = inode->i_mapping;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return nonseekable_open(inode, file);
 }
 
@@ -950,10 +951,10 @@ spufs_signal2_release(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        if (!--i->i_openers)
                ctx->signal2 = NULL;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return 0;
 }
 
@@ -1154,10 +1155,10 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
 
        file->private_data = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        if (!i->i_openers++)
                ctx->mss = inode->i_mapping;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return nonseekable_open(inode, file);
 }
 
@@ -1167,10 +1168,10 @@ spufs_mss_release(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        if (!--i->i_openers)
                ctx->mss = NULL;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return 0;
 }
 
@@ -1211,11 +1212,11 @@ static int spufs_psmap_open(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        file->private_data = i->i_ctx;
        if (!i->i_openers++)
                ctx->psmap = inode->i_mapping;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return nonseekable_open(inode, file);
 }
 
@@ -1225,10 +1226,10 @@ spufs_psmap_release(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        if (!--i->i_openers)
                ctx->psmap = NULL;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return 0;
 }
 
@@ -1281,11 +1282,11 @@ static int spufs_mfc_open(struct inode *inode, struct file *file)
        if (atomic_read(&inode->i_count) != 1)
                return -EBUSY;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        file->private_data = ctx;
        if (!i->i_openers++)
                ctx->mfc = inode->i_mapping;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return nonseekable_open(inode, file);
 }
 
@@ -1295,10 +1296,10 @@ spufs_mfc_release(struct inode *inode, struct file *file)
        struct spufs_inode_info *i = SPUFS_I(inode);
        struct spu_context *ctx = i->i_ctx;
 
-       spin_lock(&ctx->mapping_lock);
+       mutex_lock(&ctx->mapping_lock);
        if (!--i->i_openers)
                ctx->mfc = NULL;
-       spin_unlock(&ctx->mapping_lock);
+       mutex_unlock(&ctx->mapping_lock);
        return 0;
 }
 
index 7150730e2ff11b9896039bd46a05ef83c8d26931..9807206e02196d479b1ea171ecc754fae9c7a746 100644 (file)
@@ -177,7 +177,7 @@ static int spufs_rmdir(struct inode *parent, struct dentry *dir)
 static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
                          int mode, struct spu_context *ctx)
 {
-       struct dentry *dentry;
+       struct dentry *dentry, *tmp;
        int ret;
 
        while (files->name && files->name[0]) {
@@ -193,7 +193,20 @@ static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
        }
        return 0;
 out:
-       spufs_prune_dir(dir);
+       /*
+        * remove all children from dir. dir->inode is not set so don't
+        * just simply use spufs_prune_dir() and panic afterwards :)
+        * dput() looks like it will do the right thing:
+        * - dec parent's ref counter
+        * - remove child from parent's child list
+        * - free child's inode if possible
+        * - free child
+        */
+       list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
+               dput(dentry);
+       }
+
+       shrink_dcache_parent(dir);
        return ret;
 }
 
@@ -274,6 +287,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
        goto out;
 
 out_free_ctx:
+       spu_forget(ctx);
        put_spu_context(ctx);
 out_iput:
        iput(inode);
@@ -349,37 +363,6 @@ out:
        return ret;
 }
 
-static int spufs_rmgang(struct inode *root, struct dentry *dir)
-{
-       /* FIXME: this fails if the dir is not empty,
-                 which causes a leak of gangs. */
-       return simple_rmdir(root, dir);
-}
-
-static int spufs_gang_close(struct inode *inode, struct file *file)
-{
-       struct inode *parent;
-       struct dentry *dir;
-       int ret;
-
-       dir = file->f_path.dentry;
-       parent = dir->d_parent->d_inode;
-
-       ret = spufs_rmgang(parent, dir);
-       WARN_ON(ret);
-
-       return dcache_dir_close(inode, file);
-}
-
-const struct file_operations spufs_gang_fops = {
-       .open           = dcache_dir_open,
-       .release        = spufs_gang_close,
-       .llseek         = dcache_dir_lseek,
-       .read           = generic_read_dir,
-       .readdir        = dcache_readdir,
-       .fsync          = simple_sync_file,
-};
-
 static int
 spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
 {
@@ -407,7 +390,6 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
        inode->i_fop = &simple_dir_operations;
 
        d_instantiate(dentry, inode);
-       dget(dentry);
        dir->i_nlink++;
        dentry->d_inode->i_nlink++;
        return ret;
@@ -437,7 +419,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
                goto out;
        }
 
-       filp->f_op = &spufs_gang_fops;
+       filp->f_op = &simple_dir_operations;
        fd_install(ret, filp);
 out:
        return ret;
@@ -458,8 +440,10 @@ static int spufs_create_gang(struct inode *inode,
         * in error path of *_open().
         */
        ret = spufs_gang_open(dget(dentry), mntget(mnt));
-       if (ret < 0)
-               WARN_ON(spufs_rmgang(inode, dentry));
+       if (ret < 0) {
+               int err = simple_rmdir(inode, dentry);
+               WARN_ON(err);
+       }
 
 out:
        mutex_unlock(&inode->i_mutex);
@@ -600,6 +584,10 @@ spufs_create_root(struct super_block *sb, void *data)
        struct inode *inode;
        int ret;
 
+       ret = -ENODEV;
+       if (!spu_management_ops)
+               goto out;
+
        ret = -ENOMEM;
        inode = spufs_new_inode(sb, S_IFDIR | 0775);
        if (!inode)
index b6ecb30e7d58b71ef8530f13bd02ea4bfe21039e..3b831e07f1ed740eabf0cd95fcafb985f105a8f2 100644 (file)
@@ -93,43 +93,6 @@ void spu_stop_tick(struct spu_context *ctx)
        }
 }
 
-void spu_sched_tick(struct work_struct *work)
-{
-       struct spu_context *ctx =
-               container_of(work, struct spu_context, sched_work.work);
-       struct spu *spu;
-       int preempted = 0;
-
-       /*
-        * If this context is being stopped avoid rescheduling from the
-        * scheduler tick because we would block on the state_mutex.
-        * The caller will yield the spu later on anyway.
-        */
-       if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
-               return;
-
-       mutex_lock(&ctx->state_mutex);
-       spu = ctx->spu;
-       if (spu) {
-               int best = sched_find_first_bit(spu_prio->bitmap);
-               if (best <= ctx->prio) {
-                       spu_deactivate(ctx);
-                       preempted = 1;
-               }
-       }
-       mutex_unlock(&ctx->state_mutex);
-
-       if (preempted) {
-               /*
-                * We need to break out of the wait loop in spu_run manually
-                * to ensure this context gets put on the runqueue again
-                * ASAP.
-                */
-               wake_up(&ctx->stop_wq);
-       } else
-               spu_start_tick(ctx);
-}
-
 /**
  * spu_add_to_active_list - add spu to active list
  * @spu:       spu to add to the active list
@@ -273,34 +236,6 @@ static void spu_prio_wait(struct spu_context *ctx)
        remove_wait_queue(&ctx->stop_wq, &wait);
 }
 
-/**
- * spu_reschedule - try to find a runnable context for a spu
- * @spu:       spu available
- *
- * This function is called whenever a spu becomes idle.  It looks for the
- * most suitable runnable spu context and schedules it for execution.
- */
-static void spu_reschedule(struct spu *spu)
-{
-       int best;
-
-       spu_free(spu);
-
-       spin_lock(&spu_prio->runq_lock);
-       best = sched_find_first_bit(spu_prio->bitmap);
-       if (best < MAX_PRIO) {
-               struct list_head *rq = &spu_prio->runq[best];
-               struct spu_context *ctx;
-
-               BUG_ON(list_empty(rq));
-
-               ctx = list_entry(rq->next, struct spu_context, rq);
-               __spu_del_from_rq(ctx);
-               wake_up(&ctx->stop_wq);
-       }
-       spin_unlock(&spu_prio->runq_lock);
-}
-
 static struct spu *spu_get_idle(struct spu_context *ctx)
 {
        struct spu *spu = NULL;
@@ -428,6 +363,51 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
        return -ERESTARTSYS;
 }
 
+/**
+ * grab_runnable_context - try to find a runnable context
+ *
+ * Remove the highest priority context on the runqueue and return it
+ * to the caller.  Returns %NULL if no runnable context was found.
+ */
+static struct spu_context *grab_runnable_context(int prio)
+{
+       struct spu_context *ctx = NULL;
+       int best;
+
+       spin_lock(&spu_prio->runq_lock);
+       best = sched_find_first_bit(spu_prio->bitmap);
+       if (best < prio) {
+               struct list_head *rq = &spu_prio->runq[best];
+
+               BUG_ON(list_empty(rq));
+
+               ctx = list_entry(rq->next, struct spu_context, rq);
+               __spu_del_from_rq(ctx);
+       }
+       spin_unlock(&spu_prio->runq_lock);
+
+       return ctx;
+}
+
+static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
+{
+       struct spu *spu = ctx->spu;
+       struct spu_context *new = NULL;
+
+       if (spu) {
+               new = grab_runnable_context(max_prio);
+               if (new || force) {
+                       spu_unbind_context(spu, ctx);
+                       spu_free(spu);
+                       if (new)
+                               wake_up(&new->stop_wq);
+               }
+
+       }
+
+       return new != NULL;
+}
+
 /**
  * spu_deactivate - unbind a context from it's physical spu
  * @ctx:       spu context to unbind
@@ -437,12 +417,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
  */
 void spu_deactivate(struct spu_context *ctx)
 {
-       struct spu *spu = ctx->spu;
-
-       if (spu) {
-               spu_unbind_context(spu, ctx);
-               spu_reschedule(spu);
-       }
+       __spu_deactivate(ctx, 1, MAX_PRIO);
 }
 
 /**
@@ -455,21 +430,43 @@ void spu_deactivate(struct spu_context *ctx)
  */
 void spu_yield(struct spu_context *ctx)
 {
-       struct spu *spu;
-
-       if (mutex_trylock(&ctx->state_mutex)) {
-               if ((spu = ctx->spu) != NULL) {
-                       int best = sched_find_first_bit(spu_prio->bitmap);
-                       if (best < MAX_PRIO) {
-                               pr_debug("%s: yielding SPU %d NODE %d\n",
-                                        __FUNCTION__, spu->number, spu->node);
-                               spu_deactivate(ctx);
-                       }
-               }
+       if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
+               mutex_lock(&ctx->state_mutex);
+               __spu_deactivate(ctx, 0, MAX_PRIO);
                mutex_unlock(&ctx->state_mutex);
        }
 }
 
+void spu_sched_tick(struct work_struct *work)
+{
+       struct spu_context *ctx =
+               container_of(work, struct spu_context, sched_work.work);
+       int preempted;
+
+       /*
+        * If this context is being stopped avoid rescheduling from the
+        * scheduler tick because we would block on the state_mutex.
+        * The caller will yield the spu later on anyway.
+        */
+       if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
+               return;
+
+       mutex_lock(&ctx->state_mutex);
+       preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
+       mutex_unlock(&ctx->state_mutex);
+
+       if (preempted) {
+               /*
+                * We need to break out of the wait loop in spu_run manually
+                * to ensure this context gets put on the runqueue again
+                * ASAP.
+                */
+               wake_up(&ctx->stop_wq);
+       } else {
+               spu_start_tick(ctx);
+       }
+}
+
 int __init spu_sched_init(void)
 {
        int i;
index 0a947fd7de57c3da11e2444b64d5d020637a9323..47617e8014a5acc402f1c0222dfb1d1c6c3bf972 100644 (file)
@@ -55,7 +55,7 @@ struct spu_context {
        struct address_space *signal2;     /* 'signal2' area mappings. */
        struct address_space *mss;         /* 'mss' area mappings. */
        struct address_space *psmap;       /* 'psmap' area mappings. */
-       spinlock_t mapping_lock;
+       struct mutex mapping_lock;
        u64 object_id;             /* user space pointer for oprofile */
 
        enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
index f4f82520dc4f59aa1d0f35d387f43637185251d8..5240046d86715275156d7adf6b41434f28b1155a 100644 (file)
@@ -4,5 +4,5 @@ obj-y                           += interrupt.o iommu.o setup.o \
 
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_PPC_UDBG_BEAT)    += udbg_beat.o
-obj-$(CONFIG_HAS_TXX9_SERIAL)  += scc_sio.o
+obj-$(CONFIG_SERIAL_TXX9)      += scc_sio.o
 obj-$(CONFIG_SPU_BASE)         += spu_priv1.o
index 95fa6a7d15ee4d11aab9e8543d8ac59d11f4c1f7..f33b21b9f5d4338d9ad9c01ba610a90250b7e132 100644 (file)
@@ -31,8 +31,6 @@
 #define IOBMAP_PAGE_SIZE       (1 << IOBMAP_PAGE_SHIFT)
 #define IOBMAP_PAGE_MASK       (IOBMAP_PAGE_SIZE - 1)
 
-#define IOBMAP_PAGE_FACTOR     (PAGE_SHIFT - IOBMAP_PAGE_SHIFT)
-
 #define IOB_BASE               0xe0000000
 #define IOB_SIZE               0x3000
 /* Configuration registers */
@@ -97,9 +95,6 @@ static void iobmap_build(struct iommu_table *tbl, long index,
 
        bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
 
-       npages <<= IOBMAP_PAGE_FACTOR;
-       index <<= IOBMAP_PAGE_FACTOR;
-
        ip = ((u32 *)tbl->it_base) + index;
 
        while (npages--) {
@@ -125,9 +120,6 @@ static void iobmap_free(struct iommu_table *tbl, long index,
 
        bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
 
-       npages <<= IOBMAP_PAGE_FACTOR;
-       index <<= IOBMAP_PAGE_FACTOR;
-
        ip = ((u32 *)tbl->it_base) + index;
 
        while (npages--) {
index bd00f89eed1ed3c4dcaa7a5cb39cd6ef1a132505..89a1b469b93df149b21c1c65c5d1723d36e97e09 100644 (file)
@@ -396,6 +396,15 @@ config SCHED_SMT
          when dealing with UltraSPARC cpus at a cost of slightly increased
          overhead in some places. If unsure say N here.
 
+config SCHED_MC
+       bool "Multi-core scheduler support"
+       depends on SMP
+       default y
+       help
+         Multi-core scheduler support improves the CPU scheduler's decision
+         making when dealing with multi-core CPU chips at a cost of slightly
+         increased overhead in some places. If unsure say N here.
+
 source "kernel/Kconfig.preempt"
 
 config CMDLINE_BOOL
index d8d19093d12fd0a650f1196f2d44f490895fccc3..f964bf28d21a2568fed3a436af906c3e78d238ea 100644 (file)
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $
+#
 # Makefile for the linux kernel.
 #
 
@@ -8,7 +8,7 @@ EXTRA_CFLAGS := -Werror
 extra-y                := head.o init_task.o vmlinux.lds
 
 obj-y          := process.o setup.o cpu.o idprom.o \
-                  traps.o auxio.o una_asm.o \
+                  traps.o auxio.o una_asm.o sysfs.o \
                   irq.o ptrace.o time.o sys_sparc.o signal.o \
                   unaligned.o central.o pci.o starfire.o semaphore.o \
                   power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
index ed712e0b337238199b52aadc646b6123a585aef6..7d1a11822a1e2c5407300ac346792397af9d21cf 100644 (file)
@@ -2514,9 +2514,9 @@ sun4v_ncs_request:
         nop
        .size   sun4v_ncs_request, .-sun4v_ncs_request
 
-       .globl  sun4v_scv_send
-       .type   sun4v_scv_send,#function
-sun4v_scv_send:
+       .globl  sun4v_svc_send
+       .type   sun4v_svc_send,#function
+sun4v_svc_send:
        save    %sp, -192, %sp
        mov     %i0, %o0
        mov     %i1, %o1
@@ -2526,11 +2526,11 @@ sun4v_scv_send:
        stx     %o1, [%i3]
        ret
        restore
-       .size   sun4v_scv_send, .-sun4v_scv_send
+       .size   sun4v_svc_send, .-sun4v_svc_send
 
-       .globl  sun4v_scv_recv
-       .type   sun4v_scv_recv,#function
-sun4v_scv_recv:
+       .globl  sun4v_svc_recv
+       .type   sun4v_svc_recv,#function
+sun4v_svc_recv:
        save    %sp, -192, %sp
        mov     %i0, %o0
        mov     %i1, %o1
@@ -2540,33 +2540,55 @@ sun4v_scv_recv:
        stx     %o1, [%i3]
        ret
        restore
-       .size   sun4v_scv_recv, .-sun4v_scv_recv
+       .size   sun4v_svc_recv, .-sun4v_svc_recv
 
-       .globl  sun4v_scv_getstatus
-       .type   sun4v_scv_getstatus,#function
-sun4v_scv_getstatus:
+       .globl  sun4v_svc_getstatus
+       .type   sun4v_svc_getstatus,#function
+sun4v_svc_getstatus:
        mov     HV_FAST_SVC_GETSTATUS, %o5
        mov     %o1, %o4
        ta      HV_FAST_TRAP
        stx     %o1, [%o4]
        retl
         nop
-       .size   sun4v_scv_getstatus, .-sun4v_scv_getstatus
+       .size   sun4v_svc_getstatus, .-sun4v_svc_getstatus
 
-       .globl  sun4v_scv_setstatus
-       .type   sun4v_scv_setstatus,#function
-sun4v_scv_setstatus:
+       .globl  sun4v_svc_setstatus
+       .type   sun4v_svc_setstatus,#function
+sun4v_svc_setstatus:
        mov     HV_FAST_SVC_SETSTATUS, %o5
        ta      HV_FAST_TRAP
        retl
         nop
-       .size   sun4v_scv_setstatus, .-sun4v_scv_setstatus
+       .size   sun4v_svc_setstatus, .-sun4v_svc_setstatus
 
-       .globl  sun4v_scv_clrstatus
-       .type   sun4v_scv_clrstatus,#function
-sun4v_scv_clrstatus:
+       .globl  sun4v_svc_clrstatus
+       .type   sun4v_svc_clrstatus,#function
+sun4v_svc_clrstatus:
        mov     HV_FAST_SVC_CLRSTATUS, %o5
        ta      HV_FAST_TRAP
        retl
         nop
-       .size   sun4v_scv_clrstatus, .-sun4v_scv_clrstatus
+       .size   sun4v_svc_clrstatus, .-sun4v_svc_clrstatus
+
+       .globl  sun4v_mmustat_conf
+       .type   sun4v_mmustat_conf,#function
+sun4v_mmustat_conf:
+       mov     %o1, %o4
+       mov     HV_FAST_MMUSTAT_CONF, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_mmustat_conf, .-sun4v_mmustat_conf
+
+       .globl  sun4v_mmustat_info
+       .type   sun4v_mmustat_info,#function
+sun4v_mmustat_info:
+       mov     %o0, %o4
+       mov     HV_FAST_MMUSTAT_INFO, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_mmustat_info, .-sun4v_mmustat_info
index 9246c2cf95747685fba8106d7cb83dcd99d6a60b..f0e16045fb1693f0f8ba9e8e507413833aa05b9f 100644 (file)
@@ -473,6 +473,53 @@ static void __init set_core_ids(void)
        }
 }
 
+static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
+{
+       int i;
+
+       for (i = 0; i < mp->num_arcs; i++) {
+               struct mdesc_node *t = mp->arcs[i].arc;
+               const u64 *id;
+
+               if (strcmp(mp->arcs[i].name, "back"))
+                       continue;
+
+               if (strcmp(t->name, "cpu"))
+                       continue;
+
+               id = md_get_property(t, "id", NULL);
+               if (*id < NR_CPUS)
+                       cpu_data(*id).proc_id = proc_id;
+       }
+}
+
+static void __init __set_proc_ids(const char *exec_unit_name)
+{
+       struct mdesc_node *mp;
+       int idx;
+
+       idx = 0;
+       md_for_each_node_by_name(mp, exec_unit_name) {
+               const char *type;
+               int len;
+
+               type = md_get_property(mp, "type", &len);
+               if (!find_in_proplist(type, "int", len) &&
+                   !find_in_proplist(type, "integer", len))
+                       continue;
+
+               mark_proc_ids(mp, idx);
+
+               idx++;
+       }
+}
+
+static void __init set_proc_ids(void)
+{
+       __set_proc_ids("exec_unit");
+       __set_proc_ids("exec-unit");
+}
+
 static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
 {
        u64 val;
@@ -574,9 +621,15 @@ static void __init mdesc_fill_in_cpu_data(void)
 #endif
 
                c->core_id = 0;
+               c->proc_id = -1;
        }
 
+#ifdef CONFIG_SMP
+       sparc64_multi_core = 1;
+#endif
+
        set_core_ids();
+       set_proc_ids();
 
        smp_fill_in_sib_core_maps();
 }
index dad4b3ba705f5aaab2e354692db7eb229a490a4c..6f4a5284b0ea5dc2117833a05d1fae19997eaf90 100644 (file)
@@ -1781,6 +1781,10 @@ static void __init of_fill_in_cpu_data(void)
                        }
 
                        cpu_data(cpuid).core_id = portid + 1;
+                       cpu_data(cpuid).proc_id = portid;
+#ifdef CONFIG_SMP
+                       sparc64_multi_core = 1;
+#endif
                } else {
                        cpu_data(cpuid).dcache_size =
                                of_getintprop_default(dp, "dcache-size", 16 * 1024);
@@ -1799,6 +1803,7 @@ static void __init of_fill_in_cpu_data(void)
                                of_getintprop_default(dp, "ecache-line-size", 64);
 
                        cpu_data(cpuid).core_id = 0;
+                       cpu_data(cpuid).proc_id = -1;
                }
 
 #ifdef CONFIG_SMP
index de9b4c13f1c71deee9d62cd69f41eb04d5e76a65..7490cc670a530e495a81e1d5d7d762b8228f7f22 100644 (file)
@@ -513,22 +513,3 @@ void sun_do_break(void)
 
 int serial_console = -1;
 int stop_a_enabled = 1;
-
-static int __init topology_init(void)
-{
-       int i, err;
-
-       err = -ENOMEM;
-
-       for_each_possible_cpu(i) {
-               struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
-               if (p) {
-                       register_cpu(p, i);
-                       err = 0;
-               }
-       }
-
-       return err;
-}
-
-subsys_initcall(topology_init);
index c550bba3490a528fb240f3dc4f5b565a0f59fdc4..4dcd7d0b60f2d8abb8e8ad7630df207cfc39ad21 100644 (file)
@@ -44,6 +44,8 @@
 
 extern void calibrate_delay(void);
 
+int sparc64_multi_core __read_mostly;
+
 /* Please don't make this stuff initdata!!!  --DaveM */
 unsigned char boot_cpu_id;
 
@@ -51,6 +53,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+       { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 static cpumask_t smp_commenced_mask;
 static cpumask_t cpu_callout_map;
 
@@ -1217,13 +1221,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
                unsigned int j;
 
                if (cpu_data(i).core_id == 0) {
-                       cpu_set(i, cpu_sibling_map[i]);
+                       cpu_set(i, cpu_core_map[i]);
                        continue;
                }
 
                for_each_possible_cpu(j) {
                        if (cpu_data(i).core_id ==
                            cpu_data(j).core_id)
+                               cpu_set(j, cpu_core_map[i]);
+               }
+       }
+
+       for_each_possible_cpu(i) {
+               unsigned int j;
+
+               if (cpu_data(i).proc_id == -1) {
+                       cpu_set(i, cpu_sibling_map[i]);
+                       continue;
+               }
+
+               for_each_possible_cpu(j) {
+                       if (cpu_data(i).proc_id ==
+                           cpu_data(j).proc_id)
                                cpu_set(j, cpu_sibling_map[i]);
                }
        }
diff --git a/arch/sparc64/kernel/sysfs.c b/arch/sparc64/kernel/sysfs.c
new file mode 100644 (file)
index 0000000..cdb1477
--- /dev/null
@@ -0,0 +1,297 @@
+/* sysfs.c: Toplogy sysfs support code for sparc64.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+#include <linux/sysdev.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+
+#include <asm/hypervisor.h>
+#include <asm/spitfire.h>
+
+static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
+
+#define SHOW_MMUSTAT_ULONG(NAME) \
+static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
+{ \
+       struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
+       return sprintf(buf, "%lu\n", p->NAME); \
+} \
+static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL)
+
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
+
+static struct attribute *mmu_stat_attrs[] = {
+       &attr_immu_tsb_hits_ctx0_8k_tte.attr,
+       &attr_immu_tsb_ticks_ctx0_8k_tte.attr,
+       &attr_immu_tsb_hits_ctx0_64k_tte.attr,
+       &attr_immu_tsb_ticks_ctx0_64k_tte.attr,
+       &attr_immu_tsb_hits_ctx0_4mb_tte.attr,
+       &attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
+       &attr_immu_tsb_hits_ctx0_256mb_tte.attr,
+       &attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
+       &attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
+       &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
+       &attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
+       &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
+       &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
+       &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
+       &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
+       &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
+       &attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
+       &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
+       &attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
+       &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
+       &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
+       &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
+       &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
+       &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
+       &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
+       &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
+       &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
+       &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
+       &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
+       &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
+       &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
+       &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
+       NULL,
+};
+
+static struct attribute_group mmu_stat_group = {
+       .attrs = mmu_stat_attrs,
+       .name = "mmu_stats",
+};
+
+/* XXX convert to rusty's on_one_cpu */
+static unsigned long run_on_cpu(unsigned long cpu,
+                               unsigned long (*func)(unsigned long),
+                               unsigned long arg)
+{
+       cpumask_t old_affinity = current->cpus_allowed;
+       unsigned long ret;
+
+       /* should return -EINVAL to userspace */
+       if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
+               return 0;
+
+       ret = func(arg);
+
+       set_cpus_allowed(current, old_affinity);
+
+       return ret;
+}
+
+static unsigned long read_mmustat_enable(unsigned long junk)
+{
+       unsigned long ra = 0;
+
+       sun4v_mmustat_info(&ra);
+
+       return ra != 0;
+}
+
+static unsigned long write_mmustat_enable(unsigned long val)
+{
+       unsigned long ra, orig_ra;
+
+       if (val)
+               ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
+       else
+               ra = 0UL;
+
+       return sun4v_mmustat_conf(ra, &orig_ra);
+}
+
+static ssize_t show_mmustat_enable(struct sys_device *s, char *buf)
+{
+       unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
+       return sprintf(buf, "%lx\n", val);
+}
+
+static ssize_t store_mmustat_enable(struct sys_device *s, const char *buf, size_t count)
+{
+       unsigned long val, err;
+       int ret = sscanf(buf, "%ld", &val);
+
+       if (ret != 1)
+               return -EINVAL;
+
+       err = run_on_cpu(s->id, write_mmustat_enable, val);
+       if (err)
+               return -EIO;
+
+       return count;
+}
+
+static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
+
+static int mmu_stats_supported;
+
+static int register_mmu_stats(struct sys_device *s)
+{
+       if (!mmu_stats_supported)
+               return 0;
+       sysdev_create_file(s, &attr_mmustat_enable);
+       return sysfs_create_group(&s->kobj, &mmu_stat_group);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void unregister_mmu_stats(struct sys_device *s)
+{
+       if (!mmu_stats_supported)
+               return;
+       sysfs_remove_group(&s->kobj, &mmu_stat_group);
+       sysdev_remove_file(s, &attr_mmustat_enable);
+}
+#endif
+
+#define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
+static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
+{ \
+       cpuinfo_sparc *c = &cpu_data(dev->id); \
+       return sprintf(buf, "%lu\n", c->MEMBER); \
+}
+
+#define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
+static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
+{ \
+       cpuinfo_sparc *c = &cpu_data(dev->id); \
+       return sprintf(buf, "%u\n", c->MEMBER); \
+}
+
+SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
+SHOW_CPUDATA_ULONG_NAME(udelay_val, udelay_val);
+SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
+SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
+SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
+SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
+SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
+SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
+
+static struct sysdev_attribute cpu_core_attrs[] = {
+       _SYSDEV_ATTR(clock_tick,          0444, show_clock_tick, NULL),
+       _SYSDEV_ATTR(udelay_val,          0444, show_udelay_val, NULL),
+       _SYSDEV_ATTR(l1_dcache_size,      0444, show_l1_dcache_size, NULL),
+       _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
+       _SYSDEV_ATTR(l1_icache_size,      0444, show_l1_icache_size, NULL),
+       _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
+       _SYSDEV_ATTR(l2_cache_size,       0444, show_l2_cache_size, NULL),
+       _SYSDEV_ATTR(l2_cache_line_size,  0444, show_l2_cache_line_size, NULL),
+};
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static void register_cpu_online(unsigned int cpu)
+{
+       struct cpu *c = &per_cpu(cpu_devices, cpu);
+       struct sys_device *s = &c->sysdev;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
+               sysdev_create_file(s, &cpu_core_attrs[i]);
+
+       register_mmu_stats(s);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void unregister_cpu_online(unsigned int cpu)
+{
+       struct cpu *c = &per_cpu(cpu_devices, cpu);
+       struct sys_device *s = &c->sysdev;
+       int i;
+
+       unregister_mmu_stats(s);
+       for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
+               sysdev_remove_file(s, &cpu_core_attrs[i]);
+}
+#endif
+
+static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
+                                     unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned int)(long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               register_cpu_online(cpu);
+               break;
+#ifdef CONFIG_HOTPLUG_CPU
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               unregister_cpu_online(cpu);
+               break;
+#endif
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
+       .notifier_call  = sysfs_cpu_notify,
+};
+
+static void __init check_mmu_stats(void)
+{
+       unsigned long dummy1, err;
+
+       if (tlb_type != hypervisor)
+               return;
+
+       err = sun4v_mmustat_info(&dummy1);
+       if (!err)
+               mmu_stats_supported = 1;
+}
+
+static int __init topology_init(void)
+{
+       int cpu;
+
+       check_mmu_stats();
+
+       register_cpu_notifier(&sysfs_cpu_nb);
+
+       for_each_possible_cpu(cpu) {
+               struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+               register_cpu(c, cpu);
+               if (cpu_online(cpu))
+                       register_cpu_online(cpu);
+       }
+
+       return 0;
+}
+
+subsys_initcall(topology_init);
index af625147df6274a0e18de69dfb90e393760fb88b..4733f009c7c9cbf476798f151065d236619fb7bc 100644 (file)
@@ -3933,10 +3933,13 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
        /* set up set-features taskfile */
        DPRINTK("set features - xfer mode\n");
 
+       /* Some controllers and ATAPI devices show flaky interrupt
+        * behavior after setting xfer mode.  Use polling instead.
+        */
        ata_tf_init(dev, &tf);
        tf.command = ATA_CMD_SET_FEATURES;
        tf.feature = SETFEATURES_XFER;
-       tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+       tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
        tf.protocol = ATA_PROT_NODATA;
        tf.nsect = dev->xfer_mode;
 
@@ -5414,14 +5417,6 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
                }
        }
 
-       /* Some controllers show flaky interrupt behavior after
-        * setting xfer mode.  Use polling instead.
-        */
-       if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
-                    qc->tf.feature == SETFEATURES_XFER) &&
-           (ap->flags & ATA_FLAG_SETXFER_POLLING))
-               qc->tf.flags |= ATA_TFLAG_POLLING;
-
        /* select the device */
        ata_dev_select(ap, qc->dev->devno, 1, 0);
 
index a8462f1e890b569adeabde5e7beddbbed188ba37..63eca299c62bce1a80c0446c9ec277075d8c839b 100644 (file)
@@ -452,7 +452,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /* Early VIA without UDMA support */
        static const struct ata_port_info via_mwdma_info = {
                .sht = &via_sht,
-               .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+               .flags = ATA_FLAG_SLAVE_POSS,
                .pio_mask = 0x1f,
                .mwdma_mask = 0x07,
                .port_ops = &via_port_ops
@@ -460,7 +460,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /* Ditto with IRQ masking required */
        static const struct ata_port_info via_mwdma_info_borked = {
                .sht = &via_sht,
-               .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+               .flags = ATA_FLAG_SLAVE_POSS,
                .pio_mask = 0x1f,
                .mwdma_mask = 0x07,
                .port_ops = &via_port_ops_noirq,
@@ -468,7 +468,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /* VIA UDMA 33 devices (and borked 66) */
        static const struct ata_port_info via_udma33_info = {
                .sht = &via_sht,
-               .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+               .flags = ATA_FLAG_SLAVE_POSS,
                .pio_mask = 0x1f,
                .mwdma_mask = 0x07,
                .udma_mask = 0x7,
@@ -477,7 +477,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /* VIA UDMA 66 devices */
        static const struct ata_port_info via_udma66_info = {
                .sht = &via_sht,
-               .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+               .flags = ATA_FLAG_SLAVE_POSS,
                .pio_mask = 0x1f,
                .mwdma_mask = 0x07,
                .udma_mask = 0x1f,
@@ -486,7 +486,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /* VIA UDMA 100 devices */
        static const struct ata_port_info via_udma100_info = {
                .sht = &via_sht,
-               .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+               .flags = ATA_FLAG_SLAVE_POSS,
                .pio_mask = 0x1f,
                .mwdma_mask = 0x07,
                .udma_mask = 0x3f,
@@ -495,7 +495,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /* UDMA133 with bad AST (All current 133) */
        static const struct ata_port_info via_udma133_info = {
                .sht = &via_sht,
-               .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING,
+               .flags = ATA_FLAG_SLAVE_POSS,
                .pio_mask = 0x1f,
                .mwdma_mask = 0x07,
                .udma_mask = 0x7f,      /* FIXME: should check north bridge */
index 9c67df5ccfa432df69acf4c335ca0707724e7363..7f6d02ce1b5f53da024bddd9e1ef0976c593ad01 100644 (file)
@@ -1475,6 +1475,7 @@ static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
        struct FS_BPENTRY *qe, *ne;
        struct sk_buff *skb;
        int n = 0;
+       u32 qe_tmp;
 
        fs_dprintk (FS_DEBUG_QUEUE, "Topping off queue at %x (%d-%d/%d)\n", 
                    fp->offset, read_fs (dev, FP_CNT (fp->offset)), fp->n, 
@@ -1502,10 +1503,16 @@ static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
                ne->skb = skb;
                ne->fp = fp;
 
-               qe = (struct FS_BPENTRY *) (read_fs (dev, FP_EA(fp->offset)));
-               fs_dprintk (FS_DEBUG_QUEUE, "link at %p\n", qe);
-               if (qe) {
-                       qe = bus_to_virt ((long) qe);
+               /*
+                * FIXME: following code encodes and decodes
+                * machine pointers (could be 64-bit) into a
+                * 32-bit register.
+                */
+
+               qe_tmp = read_fs (dev, FP_EA(fp->offset));
+               fs_dprintk (FS_DEBUG_QUEUE, "link at %x\n", qe_tmp);
+               if (qe_tmp) {
+                       qe = bus_to_virt ((long) qe_tmp);
                        qe->next = virt_to_bus(ne);
                        qe->flags &= ~FP_FLAGS_EPI;
                } else
index b234729706be800789dced2c155629e2bb448d03..be6b93c20f606b9e477746e81688d7abe25bcdda 100644 (file)
@@ -699,9 +699,9 @@ static void evdev_disconnect(struct input_handle *handle)
        if (evdev->open) {
                input_flush_device(handle, NULL);
                input_close_device(handle);
-               wake_up_interruptible(&evdev->wait);
                list_for_each_entry(client, &evdev->client_list, node)
                        kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+               wake_up_interruptible(&evdev->wait);
        } else
                evdev_free(evdev);
 }
index 06f0541b24daaaed730d448d7f5f68bfa2b05c40..10e3b7bc925fd7b68ca8e7bb418d39471caa776c 100644 (file)
@@ -594,9 +594,9 @@ static void joydev_disconnect(struct input_handle *handle)
 
        if (joydev->open) {
                input_close_device(handle);
-               wake_up_interruptible(&joydev->wait);
                list_for_each_entry(client, &joydev->client_list, node)
                        kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+               wake_up_interruptible(&joydev->wait);
        } else
                joydev_free(joydev);
 }
index 86ad1027e12a1ceabeedc5df8911fdfd30fdbc12..b069ee18e35312078ec3cff1d6910d102dd851a7 100644 (file)
@@ -54,7 +54,7 @@ static struct db9_config db9_cfg[DB9_MAX_PORTS] __initdata;
 
 module_param_array_named(dev, db9_cfg[0].args, int, &db9_cfg[0].nargs, 0);
 MODULE_PARM_DESC(dev, "Describes first attached device (<parport#>,<type>)");
-module_param_array_named(dev2, db9_cfg[1].args, int, &db9_cfg[0].nargs, 0);
+module_param_array_named(dev2, db9_cfg[1].args, int, &db9_cfg[1].nargs, 0);
 MODULE_PARM_DESC(dev2, "Describes second attached device (<parport#>,<type>)");
 module_param_array_named(dev3, db9_cfg[2].args, int, &db9_cfg[2].nargs, 0);
 MODULE_PARM_DESC(dev3, "Describes third attached device (<parport#>,<type>)");
index eb0167e9f0cbe66a591ba65c90dffadb5103daff..50e06e8dd05d764a2af87fe7aa8d5c5e6f52db94 100644 (file)
@@ -48,7 +48,7 @@ config MOUSE_PS2_ALPS
          If unsure, say Y.
 
 config MOUSE_PS2_LOGIPS2PP
-       bool "Logictech PS/2++ mouse protocol extension" if EMBEDDED
+       bool "Logitech PS/2++ mouse protocol extension" if EMBEDDED
        default y
        depends on MOUSE_PS2
        help
index 8675f95093935069718d6fe8cf49d8c80ac1fbd9..3f4866d8d18c85a8dae983273ba578e2e1df3d8f 100644 (file)
@@ -766,9 +766,9 @@ static void mousedev_disconnect(struct input_handle *handle)
 
        if (mousedev->open) {
                input_close_device(handle);
-               wake_up_interruptible(&mousedev->wait);
                list_for_each_entry(client, &mousedev->client_list, node)
                        kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+               wake_up_interruptible(&mousedev->wait);
        } else
                mousedev_free(mousedev);
 }
index 8238b13874c200f33ee5ff7534dc7264e9d81919..2db364898e15f434e852c304bbcb855e271de00b 100644 (file)
@@ -476,9 +476,9 @@ static void tsdev_disconnect(struct input_handle *handle)
 
        if (tsdev->open) {
                input_close_device(handle);
-               wake_up_interruptible(&tsdev->wait);
                list_for_each_entry(client, &tsdev->client_list, node)
                        kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+               wake_up_interruptible(&tsdev->wait);
        } else
                tsdev_free(tsdev);
 }
index 7772bd1d92b4be3af84623819e69947deb6e160c..38e815a2e87135227f16095607a2a588a38d6aab 100644 (file)
@@ -291,7 +291,7 @@ static void ucb1x00_ts_irq(int idx, void *id)
 
 static int ucb1x00_ts_open(struct input_dev *idev)
 {
-       struct ucb1x00_ts *ts = idev->private;
+       struct ucb1x00_ts *ts = input_get_drvdata(idev);
        int ret = 0;
 
        BUG_ON(ts->rtask);
@@ -328,7 +328,7 @@ static int ucb1x00_ts_open(struct input_dev *idev)
  */
 static void ucb1x00_ts_close(struct input_dev *idev)
 {
-       struct ucb1x00_ts *ts = idev->private;
+       struct ucb1x00_ts *ts = input_get_drvdata(idev);
 
        if (ts->rtask)
                kthread_stop(ts->rtask);
@@ -380,7 +380,6 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
        ts->idev = idev;
        ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC;
 
-       idev->private    = ts;
        idev->name       = "Touchscreen panel";
        idev->id.product = ts->ucb->id;
        idev->open       = ucb1x00_ts_open;
@@ -391,6 +390,8 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
        __set_bit(ABS_Y, idev->absbit);
        __set_bit(ABS_PRESSURE, idev->absbit);
 
+       input_set_drvdata(idev, ts);
+
        err = input_register_device(idev);
        if (err)
                goto fail;
index 92055405cb3041cc699deb734637e9fffc6bdd30..451adcc52b3c5c281da118b50768ccd338a482a8 100644 (file)
@@ -1,10 +1,9 @@
 #
 # Makefile for the memory technology device drivers.
 #
-# $Id: Makefile.common,v 1.7 2005/07/11 10:39:27 gleixner Exp $
 
 # Core functionality.
-mtd-y                          := mtdcore.o
+mtd-y                          := mtdcore.o mtdsuper.o
 mtd-$(CONFIG_MTD_PARTITIONS)   += mtdpart.o
 obj-$(CONFIG_MTD)              += $(mtd-y)
 
index 389fea28b9a6da2e05de4a0bf6be59a4c0fdfe1f..14ffb1a9302a51e8dc2271f7311116b1f7b635f3 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/major.h>
-#include <linux/root_dev.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/map.h>
 #include <linux/mtd/partitions.h>
@@ -89,10 +88,6 @@ int __init uclinux_mtd_init(void)
        uclinux_ram_mtdinfo = mtd;
        add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS);
 
-       printk("uclinux[mtd]: set %s to be root filesystem\n",
-               uclinux_romfs[0].name);
-       ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, 0);
-
        return(0);
 }
 
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
new file mode 100644 (file)
index 0000000..aca3319
--- /dev/null
@@ -0,0 +1,232 @@
+/* MTD-based superblock management
+ *
+ * Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by:  David Howells <dhowells@redhat.com>
+ *              David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mtd/super.h>
+#include <linux/namei.h>
+#include <linux/ctype.h>
+
+/*
+ * compare superblocks to see if they're equivalent
+ * - they are if the underlying MTD device is the same
+ */
+static int get_sb_mtd_compare(struct super_block *sb, void *_mtd)
+{
+       struct mtd_info *mtd = _mtd;
+
+       if (sb->s_mtd == mtd) {
+               DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n",
+                     mtd->index, mtd->name);
+               return 1;
+       }
+
+       DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
+             sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
+       return 0;
+}
+
+/*
+ * mark the superblock by the MTD device it is using
+ * - set the device number to be the correct MTD block device for pesuperstence
+ *   of NFS exports
+ */
+static int get_sb_mtd_set(struct super_block *sb, void *_mtd)
+{
+       struct mtd_info *mtd = _mtd;
+
+       sb->s_mtd = mtd;
+       sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+       return 0;
+}
+
+/*
+ * get a superblock on an MTD-backed filesystem
+ */
+static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
+                         const char *dev_name, void *data,
+                         struct mtd_info *mtd,
+                         int (*fill_super)(struct super_block *, void *, int),
+                         struct vfsmount *mnt)
+{
+       struct super_block *sb;
+       int ret;
+
+       sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, mtd);
+       if (IS_ERR(sb))
+               goto out_error;
+
+       if (sb->s_root)
+               goto already_mounted;
+
+       /* fresh new superblock */
+       DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n",
+             mtd->index, mtd->name);
+
+       ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+       if (ret < 0) {
+               up_write(&sb->s_umount);
+               deactivate_super(sb);
+               return ret;
+       }
+
+       /* go */
+       sb->s_flags |= MS_ACTIVE;
+       return simple_set_mnt(mnt, sb);
+
+       /* new mountpoint for an already mounted superblock */
+already_mounted:
+       DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
+             mtd->index, mtd->name);
+       ret = simple_set_mnt(mnt, sb);
+       goto out_put;
+
+out_error:
+       ret = PTR_ERR(sb);
+out_put:
+       put_mtd_device(mtd);
+       return ret;
+}
+
+/*
+ * get a superblock on an MTD-backed filesystem by MTD device number
+ */
+static int get_sb_mtd_nr(struct file_system_type *fs_type, int flags,
+                        const char *dev_name, void *data, int mtdnr,
+                        int (*fill_super)(struct super_block *, void *, int),
+                        struct vfsmount *mnt)
+{
+       struct mtd_info *mtd;
+
+       mtd = get_mtd_device(NULL, mtdnr);
+       if (IS_ERR(mtd)) {
+               DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
+               return PTR_ERR(mtd);
+       }
+
+       return get_sb_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super,
+                             mnt);
+}
+
+/*
+ * set up an MTD-based superblock
+ */
+int get_sb_mtd(struct file_system_type *fs_type, int flags,
+              const char *dev_name, void *data,
+              int (*fill_super)(struct super_block *, void *, int),
+              struct vfsmount *mnt)
+{
+       struct nameidata nd;
+       int mtdnr, ret;
+
+       if (!dev_name)
+               return -EINVAL;
+
+       DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name);
+
+       /* the preferred way of mounting in future; especially when
+        * CONFIG_BLOCK=n - we specify the underlying MTD device by number or
+        * by name, so that we don't require block device support to be present
+        * in the kernel. */
+       if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') {
+               if (dev_name[3] == ':') {
+                       struct mtd_info *mtd;
+
+                       /* mount by MTD device name */
+                       DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
+                             dev_name + 4);
+
+                       for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
+                               mtd = get_mtd_device(NULL, mtdnr);
+                               if (!IS_ERR(mtd)) {
+                                       if (!strcmp(mtd->name, dev_name + 4))
+                                               return get_sb_mtd_aux(
+                                                       fs_type, flags,
+                                                       dev_name, data, mtd,
+                                                       fill_super, mnt);
+
+                                       put_mtd_device(mtd);
+                               }
+                       }
+
+                       printk(KERN_NOTICE "MTD:"
+                              " MTD device with name \"%s\" not found.\n",
+                              dev_name + 4);
+
+               } else if (isdigit(dev_name[3])) {
+                       /* mount by MTD device number name */
+                       char *endptr;
+
+                       mtdnr = simple_strtoul(dev_name + 3, &endptr, 0);
+                       if (!*endptr) {
+                               /* It was a valid number */
+                               DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n",
+                                     mtdnr);
+                               return get_sb_mtd_nr(fs_type, flags,
+                                                    dev_name, data,
+                                                    mtdnr, fill_super, mnt);
+                       }
+               }
+       }
+
+       /* try the old way - the hack where we allowed users to mount
+        * /dev/mtdblock$(n) but didn't actually _use_ the blockdev
+        */
+       ret = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
+
+       DEBUG(1, "MTDSB: path_lookup() returned %d, inode %p\n",
+             ret, nd.dentry ? nd.dentry->d_inode : NULL);
+
+       if (ret)
+               return ret;
+
+       ret = -EINVAL;
+
+       if (!S_ISBLK(nd.dentry->d_inode->i_mode))
+               goto out;
+
+       if (nd.mnt->mnt_flags & MNT_NODEV) {
+               ret = -EACCES;
+               goto out;
+       }
+
+       if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR)
+               goto not_an_MTD_device;
+
+       mtdnr = iminor(nd.dentry->d_inode);
+       path_release(&nd);
+
+       return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super,
+                            mnt);
+
+not_an_MTD_device:
+       if (!(flags & MS_SILENT))
+               printk(KERN_NOTICE
+                      "MTD: Attempt to mount non-MTD device \"%s\"\n",
+                      dev_name);
+out:
+       path_release(&nd);
+       return ret;
+
+}
+
+EXPORT_SYMBOL_GPL(get_sb_mtd);
+
+/*
+ * destroy an MTD-based superblock
+ */
+void kill_mtd_super(struct super_block *sb)
+{
+       generic_shutdown_super(sb);
+       put_mtd_device(sb->s_mtd);
+       sb->s_mtd = NULL;
+}
+
+EXPORT_SYMBOL_GPL(kill_mtd_super);
index 023779a581fd14609f5d66be52315d9f47f2a036..2f3184184ad9b201c82d5c38260de72d4a5f2d6a 100644 (file)
@@ -64,8 +64,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.76"
-#define DRV_MODULE_RELDATE     "May 5, 2007"
+#define DRV_MODULE_VERSION     "3.77"
+#define DRV_MODULE_RELDATE     "May 31, 2007"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -10961,6 +10961,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         * upon subsystem IDs.
         */
        if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
            !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
                tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
                                  TG3_FLAG_USE_LINKCHG_REG);
index 572034ceb1434ebfe09e7281aeb020ca9d4e333c..2b2f5c12019b82b94f7115088f8de140d10d271a 100644 (file)
@@ -1532,6 +1532,7 @@ source "drivers/scsi/arm/Kconfig"
 config JAZZ_ESP
        bool "MIPS JAZZ FAS216 SCSI support"
        depends on MACH_JAZZ && SCSI
+       select SCSI_SPI_ATTRS
        help
          This is the driver for the onboard SCSI host adapter of MIPS Magnum
          4000, Acer PICA, Olivetti M700-10 and a few other identical OEM
@@ -1756,6 +1757,7 @@ config SUN3X_ESP
 config SCSI_SUNESP
        tristate "Sparc ESP Scsi Driver"
        depends on SBUS && SCSI
+       select SCSI_SPI_ATTRS
        help
          This is the driver for the Sun ESP SCSI host adapter. The ESP
          chipset is present in most SPARC SBUS-based computers.
index 350ea7feb61d635a50c5cb09d2674a73b1f2bdcf..5c487ff096c7dca723374944c7751ebdf03d7629 100644 (file)
@@ -863,6 +863,14 @@ static struct scsi_host_template aac_driver_template = {
        .emulated                       = 1,
 };
 
+static void __aac_shutdown(struct aac_dev * aac)
+{
+       kthread_stop(aac->thread);
+       aac_send_shutdown(aac);
+       aac_adapter_disable_int(aac);
+       free_irq(aac->pdev->irq, aac);
+}
+
 static int __devinit aac_probe_one(struct pci_dev *pdev,
                const struct pci_device_id *id)
 {
@@ -1015,10 +1023,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
        return 0;
 
  out_deinit:
-       kthread_stop(aac->thread);
-       aac_send_shutdown(aac);
-       aac_adapter_disable_int(aac);
-       free_irq(pdev->irq, aac);
+       __aac_shutdown(aac);
  out_unmap:
        aac_fib_map_free(aac);
        pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
@@ -1038,7 +1043,8 @@ static void aac_shutdown(struct pci_dev *dev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(dev);
        struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
-       aac_send_shutdown(aac);
+       scsi_block_requests(shost);
+       __aac_shutdown(aac);
 }
 
 static void __devexit aac_remove_one(struct pci_dev *pdev)
@@ -1048,16 +1054,12 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
 
        scsi_remove_host(shost);
 
-       kthread_stop(aac->thread);
-
-       aac_send_shutdown(aac);
-       aac_adapter_disable_int(aac);
+       __aac_shutdown(aac);
        aac_fib_map_free(aac);
        pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
                        aac->comm_phys);
        kfree(aac->queues);
 
-       free_irq(pdev->irq, aac);
        aac_adapter_ioremap(aac, 0);
        
        kfree(aac->fibs);
index eff846ae0aff62ffd15394f0b35de6ba8cad8728..03dbe60c264aa5c4490654a9c528f59766b5faab 100644 (file)
@@ -893,45 +893,6 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
        return 0;
 }
 
-/*
- * our own old-style timeout update
- */
-/*
- * The strategy is to cause the timer code to call scsi_times_out()
- * when the soonest timeout is pending.
- * The arguments are used when we are queueing a new command, because
- * we do not want to subtract the time used from this time, but when we
- * set the timer, we want to take this value into account.
- */
-
-int atari_scsi_update_timeout(Scsi_Cmnd * SCset, int timeout)
-{
-       int rtn;
-
-       /*
-        * We are using the new error handling code to actually register/deregister
-        * timers for timeout.
-        */
-
-       if (!timer_pending(&SCset->eh_timeout))
-               rtn = 0;
-       else
-               rtn = SCset->eh_timeout.expires - jiffies;
-
-       if (timeout == 0) {
-               del_timer(&SCset->eh_timeout);
-               SCset->eh_timeout.data = (unsigned long)NULL;
-               SCset->eh_timeout.expires = 0;
-       } else {
-               if (SCset->eh_timeout.data != (unsigned long)NULL)
-                       del_timer(&SCset->eh_timeout);
-               SCset->eh_timeout.data = (unsigned long)SCset;
-               SCset->eh_timeout.expires = jiffies + timeout;
-               add_timer(&SCset->eh_timeout);
-       }
-       return rtn;
-}
-
 /*
  * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd,
  *     void (*done)(Scsi_Cmnd *))
@@ -956,7 +917,6 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
        Scsi_Cmnd *tmp;
        int oldto;
        unsigned long flags;
-       // extern int update_timeout(Scsi_Cmnd * SCset, int timeout);
 
 #if (NDEBUG & NDEBUG_NO_WRITE)
        switch (cmd->cmnd[0]) {
@@ -1029,9 +989,9 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
         * alter queues and touch the lock.
         */
        if (!IS_A_TT()) {
-               oldto = atari_scsi_update_timeout(cmd, 0);
+               /* perhaps stop command timer here */
                falcon_get_lock();
-               atari_scsi_update_timeout(cmd, oldto);
+               /* perhaps restart command timer here */
        }
        if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
                LIST(cmd, hostdata->issue_queue);
index dd076da86a465d668d6b251512492176a65e81be..b98136adaaaebe4784040a05eebcdbd92a490b63 100644 (file)
@@ -2590,7 +2590,7 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
                        return 0;
                if (msleep_interruptible(step))
                        break;
-       } while (--iterations >= 0);
+       } while (--iterations > 0);
 
        return -ETIMEDOUT;
 }
index a67f315244d7c17de1d3acd2697c2b78463e07ea..662577fbe7a87ddd93a66e37e66bbedbeb64e73d 100644 (file)
@@ -184,6 +184,15 @@ int scsi_complete_async_scans(void)
 /* Only exported for the benefit of scsi_wait_scan */
 EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
 
+#ifndef MODULE
+/*
+ * For async scanning we need to wait for all the scans to complete before
+ * trying to mount the root fs.  Otherwise non-modular drivers may not be ready
+ * yet.
+ */
+late_initcall(scsi_complete_async_scans);
+#endif
+
 /**
  * scsi_unlock_floptical - unlock device via a special MODE SENSE command
  * @sdev:      scsi device to send command to
index c3219b29b5ac70b87190c36b195c4a6139dc385b..4831edbae2d589a0442021df9ee78aeaec10430f 100644 (file)
@@ -411,7 +411,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master);
  */
 int spi_register_master(struct spi_master *master)
 {
-       static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<16) - 1);
+       static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
        struct device           *dev = master->cdev.dev;
        int                     status = -ENODEV;
        int                     dynamic = 0;
index 6e1f1ea21b38ddae33dc1747f0acffd3826dd1ce..403dac787ebf0f98975802648455470b9d8edcac 100644 (file)
@@ -755,7 +755,7 @@ config FB_LEO
 
 config FB_IGA
        bool "IGA 168x display support"
-       depends on FB && SPARC32
+       depends on (FB = y) && SPARC32
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
@@ -765,7 +765,7 @@ config FB_IGA
 
 config FB_XVR500
        bool "Sun XVR-500 3DLABS Wildcat support"
-       depends on FB && PCI && SPARC64
+       depends on (FB = y) && PCI && SPARC64
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
@@ -778,7 +778,7 @@ config FB_XVR500
 
 config FB_XVR2500
        bool "Sun XVR-2500 3DLABS Wildcat support"
-       depends on FB && PCI && SPARC64
+       depends on (FB = y) && PCI && SPARC64
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index 9b26dda18a38c1e13fde4b3b92610a36d840d7ef..ac46cc3f6a2a1286586d51cc3bec3dce23ff0d19 100644 (file)
@@ -47,7 +47,7 @@ targets := promcon_tbl.c
 quiet_cmd_conmakehash = CNMKHSH $@
       cmd_conmakehash = scripts/conmakehash $< | \
                sed -e '/\#include <[^>]*>/p' -e 's/types/init/' \
-               -e 's/dfont\(_uni.*\]\)/promfont\1 __initdata/' > $@
+               -e 's/dfont\(_uni.*\]\)/promfont\1 /' > $@
 
 $(obj)/promcon_tbl.c: $(src)/prom.uni
        $(call cmd,conmakehash)
index 1d4e8354b56159b407783eb073a70a4c464fcbf2..3f6c98fad437fca8f542dbd11a29a19e55d52bce 100644 (file)
@@ -656,7 +656,7 @@ static int ffb_setcolreg(unsigned regno,
 {
        u32 value;
 
-       if (regno >= 256)
+       if (regno >= 16)
                return 1;
 
        red >>= 8;
@@ -903,7 +903,7 @@ ffb_init_fix(struct fb_info *info)
 struct all_info {
        struct fb_info info;
        struct ffb_par par;
-       u32 pseudo_palette[256];
+       u32 pseudo_palette[16];
 };
 
 static int ffb_init_one(struct of_device *op)
index 4316c7fe8e21c52d92fa4cca203bb796d7594a54..c3869a96ab5877162ea6e72960f7d0fdf73e9cd1 100644 (file)
@@ -28,7 +28,7 @@ struct s3d_info {
        unsigned int            depth;
        unsigned int            fb_size;
 
-       u32                     pseudo_palette[256];
+       u32                     pseudo_palette[16];
 };
 
 static int __devinit s3d_get_props(struct s3d_info *sp)
@@ -52,15 +52,14 @@ static int s3d_setcolreg(unsigned regno,
 {
        u32 value;
 
-       if (regno >= 256)
-               return 1;
+       if (regno < 16) {
+               red >>= 8;
+               green >>= 8;
+               blue >>= 8;
 
-       red >>= 8;
-       green >>= 8;
-       blue >>= 8;
-
-       value = (blue << 24) | (green << 16) | (red << 8);
-       ((u32 *)info->pseudo_palette)[regno] = value;
+               value = (blue << 24) | (green << 16) | (red << 8);
+               ((u32 *)info->pseudo_palette)[regno] = value;
+       }
 
        return 0;
 }
index 08880a62bfa3302e3983169b73dd461f6c50913d..71bf3f1f00bcf1b9f2aa018281494a9a2a6b0e72 100644 (file)
@@ -50,7 +50,7 @@ struct e3d_info {
        u32                     fb8_0_off;
        u32                     fb8_1_off;
 
-       u32                     pseudo_palette[256];
+       u32                     pseudo_palette[16];
 };
 
 static int __devinit e3d_get_props(struct e3d_info *ep)
@@ -126,7 +126,9 @@ static int e3d_setcolreg(unsigned regno,
        blue_8 = blue >> 8;
 
        value = (blue_8 << 24) | (green_8 << 16) | (red_8 << 8);
-       ((u32 *)info->pseudo_palette)[regno] = value;
+
+       if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16)
+               ((u32 *)info->pseudo_palette)[regno] = value;
 
 
        red_10 = red >> 6;
index 479c1038ed4a4f7d3354e5939f6eae6a910e1d4f..8c90cbc903fa812e562984dab11ccc4e9212f2d2 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/fs.h>
 #include <linux/security.h>
 #include <linux/module.h>
+#include <linux/kallsyms.h>
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
@@ -20,6 +21,7 @@ static long do_ioctl(struct file *filp, unsigned int cmd,
                unsigned long arg)
 {
        int error = -ENOTTY;
+       void *f;
 
        if (!filp->f_op)
                goto out;
@@ -29,10 +31,16 @@ static long do_ioctl(struct file *filp, unsigned int cmd,
                if (error == -ENOIOCTLCMD)
                        error = -EINVAL;
                goto out;
-       } else if (filp->f_op->ioctl) {
+       } else if ((f = filp->f_op->ioctl)) {
                lock_kernel();
-               error = filp->f_op->ioctl(filp->f_path.dentry->d_inode,
-                                         filp, cmd, arg);
+               if (!filp->f_op->ioctl) {
+                       printk("%s: ioctl %p disappeared\n", __FUNCTION__, f);
+                       print_symbol("symbol: %s\n", (unsigned long)f);
+                       dump_stack();
+               } else {
+                       error = filp->f_op->ioctl(filp->f_path.dentry->d_inode,
+                                                 filp, cmd, arg);
+               }
                unlock_kernel();
        }
 
index 4884d5edfe658282d626c6674497231a9ff4d20a..12e83f67eee493cfe625430e14e1e7a938d09395 100644 (file)
@@ -229,9 +229,16 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
           check anyway. */
        if (!tn->fn->size) {
                if (rii->mdata_tn) {
-                       /* We had a candidate mdata node already */
-                       dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
-                       jffs2_kill_tn(c, rii->mdata_tn);
+                       if (rii->mdata_tn->version < tn->version) {
+                               /* We had a candidate mdata node already */
+                               dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
+                               jffs2_kill_tn(c, rii->mdata_tn);
+                       } else {
+                               dbg_readinode("kill new mdata with ver %d (older than existing %d\n",
+                                             tn->version, rii->mdata_tn->version);
+                               jffs2_kill_tn(c, tn);
+                               return 0;
+                       }
                }
                rii->mdata_tn = tn;
                dbg_readinode("keep new mdata with ver %d\n", tn->version);
@@ -1044,7 +1051,8 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
 
                case JFFS2_NODETYPE_DIRENT:
 
-                       if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
+                       if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) &&
+                           len < sizeof(struct jffs2_raw_dirent)) {
                                err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
                                if (unlikely(err))
                                        goto free_out;
@@ -1058,7 +1066,8 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
 
                case JFFS2_NODETYPE_INODE:
 
-                       if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
+                       if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) &&
+                           len < sizeof(struct jffs2_raw_inode)) {
                                err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
                                if (unlikely(err))
                                        goto free_out;
@@ -1071,7 +1080,8 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
                        break;
 
                default:
-                       if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
+                       if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) &&
+                           len < sizeof(struct jffs2_unknown_node)) {
                                err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
                                if (unlikely(err))
                                        goto free_out;
index 6488af43bc9b757e5de859184aacd987404d866f..e220d3bd610de5ae2bc3eb96441d0c9c3c1c8d28 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/mount.h>
 #include <linux/jffs2.h>
 #include <linux/pagemap.h>
-#include <linux/mtd/mtd.h>
+#include <linux/mtd/super.h>
 #include <linux/ctype.h>
 #include <linux/namei.h>
 #include "compr.h"
@@ -75,69 +75,27 @@ static const struct super_operations jffs2_super_operations =
        .sync_fs =      jffs2_sync_fs,
 };
 
-static int jffs2_sb_compare(struct super_block *sb, void *data)
-{
-       struct jffs2_sb_info *p = data;
-       struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
-
-       /* The superblocks are considered to be equivalent if the underlying MTD
-          device is the same one */
-       if (c->mtd == p->mtd) {
-               D1(printk(KERN_DEBUG "jffs2_sb_compare: match on device %d (\"%s\")\n", p->mtd->index, p->mtd->name));
-               return 1;
-       } else {
-               D1(printk(KERN_DEBUG "jffs2_sb_compare: No match, device %d (\"%s\"), device %d (\"%s\")\n",
-                         c->mtd->index, c->mtd->name, p->mtd->index, p->mtd->name));
-               return 0;
-       }
-}
-
-static int jffs2_sb_set(struct super_block *sb, void *data)
-{
-       struct jffs2_sb_info *p = data;
-
-       /* For persistence of NFS exports etc. we use the same s_dev
-          each time we mount the device, don't just use an anonymous
-          device */
-       sb->s_fs_info = p;
-       p->os_priv = sb;
-       sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, p->mtd->index);
-
-       return 0;
-}
-
-static int jffs2_get_sb_mtd(struct file_system_type *fs_type,
-                           int flags, const char *dev_name,
-                           void *data, struct mtd_info *mtd,
-                           struct vfsmount *mnt)
+/*
+ * fill in the superblock
+ */
+static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
 {
-       struct super_block *sb;
        struct jffs2_sb_info *c;
-       int ret;
+
+       D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():"
+                 " New superblock for device %d (\"%s\")\n",
+                 sb->s_mtd->index, sb->s_mtd->name));
 
        c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
                return -ENOMEM;
-       c->mtd = mtd;
-
-       sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
-
-       if (IS_ERR(sb))
-               goto out_error;
-
-       if (sb->s_root) {
-               /* New mountpoint for JFFS2 which is already mounted */
-               D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n",
-                         mtd->index, mtd->name));
-               ret = simple_set_mnt(mnt, sb);
-               goto out_put;
-       }
 
-       D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): New superblock for device %d (\"%s\")\n",
-                 mtd->index, mtd->name));
+       c->mtd = sb->s_mtd;
+       c->os_priv = sb;
+       sb->s_fs_info = c;
 
-       /* Initialize JFFS2 superblock locks, the further initialization will be
-        * done later */
+       /* Initialize JFFS2 superblock locks, the further initialization will
+        * be done later */
        init_MUTEX(&c->alloc_sem);
        init_MUTEX(&c->erase_free_sem);
        init_waitqueue_head(&c->erase_wait);
@@ -146,133 +104,20 @@ static int jffs2_get_sb_mtd(struct file_system_type *fs_type,
        spin_lock_init(&c->inocache_lock);
 
        sb->s_op = &jffs2_super_operations;
-       sb->s_flags = flags | MS_NOATIME;
+       sb->s_flags = sb->s_flags | MS_NOATIME;
        sb->s_xattr = jffs2_xattr_handlers;
 #ifdef CONFIG_JFFS2_FS_POSIX_ACL
        sb->s_flags |= MS_POSIXACL;
 #endif
-       ret = jffs2_do_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
-
-       if (ret) {
-               /* Failure case... */
-               up_write(&sb->s_umount);
-               deactivate_super(sb);
-               return ret;
-       }
-
-       sb->s_flags |= MS_ACTIVE;
-       return simple_set_mnt(mnt, sb);
-
-out_error:
-       ret = PTR_ERR(sb);
- out_put:
-       kfree(c);
-       put_mtd_device(mtd);
-
-       return ret;
-}
-
-static int jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
-                             int flags, const char *dev_name,
-                             void *data, int mtdnr,
-                             struct vfsmount *mnt)
-{
-       struct mtd_info *mtd;
-
-       mtd = get_mtd_device(NULL, mtdnr);
-       if (IS_ERR(mtd)) {
-               D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr));
-               return PTR_ERR(mtd);
-       }
-
-       return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd, mnt);
+       return jffs2_do_fill_super(sb, data, silent);
 }
 
 static int jffs2_get_sb(struct file_system_type *fs_type,
                        int flags, const char *dev_name,
                        void *data, struct vfsmount *mnt)
 {
-       int err;
-       struct nameidata nd;
-       int mtdnr;
-
-       if (!dev_name)
-               return -EINVAL;
-
-       D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name));
-
-       /* The preferred way of mounting in future; especially when
-          CONFIG_BLK_DEV is implemented - we specify the underlying
-          MTD device by number or by name, so that we don't require
-          block device support to be present in the kernel. */
-
-       /* FIXME: How to do the root fs this way? */
-
-       if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') {
-               /* Probably mounting without the blkdev crap */
-               if (dev_name[3] == ':') {
-                       struct mtd_info *mtd;
-
-                       /* Mount by MTD device name */
-                       D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd:%%s, name \"%s\"\n", dev_name+4));
-                       for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
-                               mtd = get_mtd_device(NULL, mtdnr);
-                               if (!IS_ERR(mtd)) {
-                                       if (!strcmp(mtd->name, dev_name+4))
-                                               return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd, mnt);
-                                       put_mtd_device(mtd);
-                               }
-                       }
-                       printk(KERN_NOTICE "jffs2_get_sb(): MTD device with name \"%s\" not found.\n", dev_name+4);
-               } else if (isdigit(dev_name[3])) {
-                       /* Mount by MTD device number name */
-                       char *endptr;
-
-                       mtdnr = simple_strtoul(dev_name+3, &endptr, 0);
-                       if (!*endptr) {
-                               /* It was a valid number */
-                               D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr));
-                               return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr, mnt);
-                       }
-               }
-       }
-
-       /* Try the old way - the hack where we allowed users to mount
-          /dev/mtdblock$(n) but didn't actually _use_ the blkdev */
-
-       err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
-
-       D1(printk(KERN_DEBUG "jffs2_get_sb(): path_lookup() returned %d, inode %p\n",
-                 err, nd.dentry->d_inode));
-
-       if (err)
-               return err;
-
-       err = -EINVAL;
-
-       if (!S_ISBLK(nd.dentry->d_inode->i_mode))
-               goto out;
-
-       if (nd.mnt->mnt_flags & MNT_NODEV) {
-               err = -EACCES;
-               goto out;
-       }
-
-       if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) {
-               if (!(flags & MS_SILENT))
-                       printk(KERN_NOTICE "Attempt to mount non-MTD device \"%s\" as JFFS2\n",
-                              dev_name);
-               goto out;
-       }
-
-       mtdnr = iminor(nd.dentry->d_inode);
-       path_release(&nd);
-
-       return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr, mnt);
-
-out:
-       path_release(&nd);
-       return err;
+       return get_sb_mtd(fs_type, flags, dev_name, data, jffs2_fill_super,
+                         mnt);
 }
 
 static void jffs2_put_super (struct super_block *sb)
@@ -307,8 +152,7 @@ static void jffs2_kill_sb(struct super_block *sb)
        struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
        if (!(sb->s_flags & MS_RDONLY))
                jffs2_stop_garbage_collect_thread(c);
-       generic_shutdown_super(sb);
-       put_mtd_device(c->mtd);
+       kill_mtd_super(sb);
        kfree(c);
 }
 
index 78fc08893a6c781ed94594e96f0408d4be21cbb7..e48665984cb3393d1f859e27db42ba29a6937345 100644 (file)
@@ -754,6 +754,10 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
                list_del(&xd->xindex);
                jffs2_free_xattr_datum(xd);
        }
+       list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) {
+               list_del(&xd->xindex);
+               jffs2_free_xattr_datum(xd);
+       }
 }
 
 #define XREF_TMPHASH_SIZE      (128)
@@ -825,7 +829,7 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
                           ref->xd and ref->ic are not valid yet. */
                        xd = jffs2_find_xattr_datum(c, ref->xid);
                        ic = jffs2_get_ino_cache(c, ref->ino);
-                       if (!xd || !ic) {
+                       if (!xd || !ic || !ic->nlink) {
                                dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n",
                                          ref->ino, ref->xid, ref->xseqno);
                                ref->xseqno |= XREF_DELETE_MARKER;
index 03c385de7619e517373fd9216670761354938473..445026fbec35e4619ca3b5a1b005a88f9841d5d6 100644 (file)
@@ -31,7 +31,7 @@ typedef struct {
        unsigned int    ecache_size;
        unsigned int    ecache_line_size;
        int             core_id;
-       unsigned int    __pad3;
+       int             proc_id;
 } cpuinfo_sparc;
 
 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
index 9329429fb7f6af6ae2c50ad92e24e83482b752f4..4e21c2f3065ca16988802fab9067f80c43e2dddc 100644 (file)
@@ -162,6 +162,22 @@ dma_mapping_error(dma_addr_t dma_addr)
 #else
 
 struct device;
+struct page;
+struct scatterlist;
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+       BUG();
+       return 0;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 dma_mask)
+{
+       BUG();
+       return 0;
+}
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
                         dma_addr_t *dma_handle, gfp_t flag)
@@ -176,6 +192,52 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
        BUG();
 }
 
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+              enum dma_data_direction direction)
+{
+       BUG();
+       return 0;
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+                enum dma_data_direction direction)
+{
+       BUG();
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size,
+            enum dma_data_direction direction)
+{
+       BUG();
+       return 0;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+              enum dma_data_direction direction)
+{
+       BUG();
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+          enum dma_data_direction direction)
+{
+       BUG();
+       return 0;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+            enum dma_data_direction direction)
+{
+       BUG();
+}
+
 static inline void
 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
                        enum dma_data_direction direction)
@@ -190,6 +252,27 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t siz
        BUG();
 }
 
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+                   enum dma_data_direction direction)
+{
+       BUG();
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+                      enum dma_data_direction direction)
+{
+       BUG();
+}
+
+static inline int
+dma_mapping_error(dma_addr_t dma_addr)
+{
+       BUG();
+       return 0;
+}
+
 #endif /* PCI */
 
 
index 4a43075a0619c5b5ccca6bdb4fc2516b39ed269e..5c2f9d4b9f06070813dad3cfe907aca906bbe080 100644 (file)
@@ -2798,6 +2798,11 @@ struct hv_mmu_statistics {
  */
 #define HV_FAST_MMUSTAT_INFO           0x103
 
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
+extern unsigned long sun4v_mmustat_info(unsigned long *ra);
+#endif
+
 /* NCS crypto services  */
 
 /* ncs_request() sub-function numbers */
index f76e1492add5f34aebfb331a86de57aef81083ef..4fb8c4bfb84869bf58c302f811bdb2857aecd69d 100644 (file)
@@ -33,6 +33,8 @@ extern cpumask_t phys_cpu_present_map;
 #define cpu_possible_map phys_cpu_present_map
 
 extern cpumask_t cpu_sibling_map[NR_CPUS];
+extern cpumask_t cpu_core_map[NR_CPUS];
+extern int sparc64_multi_core;
 
 /*
  *     General functions that each host system must provide.
index e0d450d600ec2a7890cc7bca91572f8e56fd472a..290ac75f385bee443a24eb9bba11fb70783edc46 100644 (file)
@@ -1,12 +1,17 @@
 #ifndef _ASM_SPARC64_TOPOLOGY_H
 #define _ASM_SPARC64_TOPOLOGY_H
 
-#include <asm/spitfire.h>
-#define smt_capable()  (tlb_type == hypervisor)
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu)      (cpu_data(cpu).proc_id)
+#define topology_core_id(cpu)                  (cpu_data(cpu).core_id)
+#define topology_core_siblings(cpu)            (cpu_core_map[cpu])
+#define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
+#define mc_capable()                           (sparc64_multi_core)
+#define smt_capable()                          (sparc64_multi_core)
+#endif /* CONFIG_SMP */
 
 #include <asm-generic/topology.h>
 
-#define topology_core_id(cpu)                  (cpu_data(cpu).core_id)
-#define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
+#define cpu_coregroup_map(cpu)                 (cpu_core_map[cpu])
 
 #endif /* _ASM_SPARC64_TOPOLOGY_H */
index 7cf0c54a46a7fff64365b3184ea492d572dac9b0..b3ae77cccbb6723499e094ec01cd4660607fcee7 100644 (file)
@@ -938,6 +938,7 @@ struct super_block {
        struct list_head        s_files;
 
        struct block_device     *s_bdev;
+       struct mtd_info         *s_mtd;
        struct list_head        s_instances;
        struct quota_info       s_dquot;        /* Diskquota specific options */
 
index 85f7b1bd1482ae7094c1d00eeec00b72b2b88d5f..a6a3113120a482f1174d50bcb00c9e7dab522ca6 100644 (file)
@@ -171,7 +171,6 @@ enum {
        ATA_FLAG_SKIP_D2H_BSY   = (1 << 12), /* can't wait for the first D2H
                                              * Register FIS clearing BSY */
        ATA_FLAG_DEBUGMSG       = (1 << 13),
-       ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */
        ATA_FLAG_IGN_SIMPLEX    = (1 << 15), /* ignore SIMPLEX */
        ATA_FLAG_NO_IORDY       = (1 << 16), /* controller lacks iordy */
        ATA_FLAG_ACPI_SATA      = (1 << 17), /* need native SATA ACPI layout */
diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h
new file mode 100644 (file)
index 0000000..4016dd6
--- /dev/null
@@ -0,0 +1,30 @@
+/* MTD-based superblock handling
+ *
+ * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __MTD_SUPER_H__
+#define __MTD_SUPER_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mtd/mtd.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+
+extern int get_sb_mtd(struct file_system_type *fs_type, int flags,
+                     const char *dev_name, void *data,
+                     int (*fill_super)(struct super_block *, void *, int),
+                     struct vfsmount *mnt);
+extern void kill_mtd_super(struct super_block *sb);
+
+
+#endif /* __KERNEL__ */
+
+#endif /* __MTD_SUPER_H__ */
index c0398f5a8cb98667d77f979ac863ed2a7822ad10..65f49fd7deff1620c3f6fdc9102c717f8f2dedae 100644 (file)
@@ -62,13 +62,11 @@ struct unix_skb_parms {
 #define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
 #define UNIXSID(skb)   (&UNIXCB((skb)).secid)
 
-#define unix_state_rlock(s)    spin_lock(&unix_sk(s)->lock)
-#define unix_state_runlock(s)  spin_unlock(&unix_sk(s)->lock)
-#define unix_state_wlock(s)    spin_lock(&unix_sk(s)->lock)
-#define unix_state_wlock_nested(s) \
+#define unix_state_lock(s)     spin_lock(&unix_sk(s)->lock)
+#define unix_state_unlock(s)   spin_unlock(&unix_sk(s)->lock)
+#define unix_state_lock_nested(s) \
                                spin_lock_nested(&unix_sk(s)->lock, \
                                SINGLE_DEPTH_NESTING)
-#define unix_state_wunlock(s)  spin_unlock(&unix_sk(s)->lock)
 
 #ifdef __KERNEL__
 /* The AF_UNIX socket */
index acdfc0549c6fd88c254f8d07009edcc123804bfb..fe590e00db8df24823280b0bbf94a1daead4cfb5 100644 (file)
@@ -105,7 +105,11 @@ static int recalc_sigpending_tsk(struct task_struct *t)
                set_tsk_thread_flag(t, TIF_SIGPENDING);
                return 1;
        }
-       clear_tsk_thread_flag(t, TIF_SIGPENDING);
+       /*
+        * We must never clear the flag in another thread, or in current
+        * when it's possible the current syscall is returning -ERESTART*.
+        * So we don't clear it here, and only callers who know they should do.
+        */
        return 0;
 }
 
@@ -121,7 +125,9 @@ void recalc_sigpending_and_wake(struct task_struct *t)
 
 void recalc_sigpending(void)
 {
-       recalc_sigpending_tsk(current);
+       if (!recalc_sigpending_tsk(current))
+               clear_thread_flag(TIF_SIGPENDING);
+
 }
 
 /* Given the mask, find the first available signal that should be serviced. */
@@ -385,7 +391,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                        }
                }
        }
-       recalc_sigpending_tsk(tsk);
+       if (likely(tsk == current))
+               recalc_sigpending();
        if (signr && unlikely(sig_kernel_stop(signr))) {
                /*
                 * Set a marker that we have dequeued a stop signal.  Our
@@ -1580,8 +1587,9 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
        /*
         * Queued signals ignored us while we were stopped for tracing.
         * So check for any that we should take before resuming user mode.
+        * This sets TIF_SIGPENDING, but never clears it.
         */
-       recalc_sigpending();
+       recalc_sigpending_tsk(current);
 }
 
 void ptrace_notify(int exit_code)
index ceef57c9ab3260737e4909ce83c1eddfb529b7e4..de78c9dd713bbfb0bb0942bea0f98bd5ffb6179a 100644 (file)
@@ -736,8 +736,7 @@ static int vlan_ioctl_handler(void __user *arg)
        case SET_VLAN_NAME_TYPE_CMD:
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
-               if ((args.u.name_type >= 0) &&
-                   (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
+               if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
                        vlan_name_type = args.u.name_type;
                        err = 0;
                } else {
index 7e51d3a5e4f6c5af74f0e782e28acdcdb08c4d57..c14ce0198d25d7be42762ee3dae5df76ba9ec8d6 100644 (file)
@@ -998,7 +998,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
        __sk_dst_set(sk, dst);
        sk->sk_route_caps = dst->dev->features;
        if (sk->sk_route_caps & NETIF_F_GSO)
-               sk->sk_route_caps |= NETIF_F_GSO_MASK;
+               sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
        if (sk_can_gso(sk)) {
                if (dst->header_len)
                        sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
index 1f5e3ba62065e76bf8c48b39b7a7a6a174800919..43a3adb027e7593ba9b15b366f3ae7d1b7843325 100644 (file)
@@ -128,7 +128,7 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf,
        int error = 0, cnt = 0;
        unsigned char *tbuf;
 
-       if (!buf || len < 0)
+       if (!buf)
                return -EINVAL;
 
        if (len == 0)
index dd02a45d0f675945c8c85799ac99b4fbea4c9c04..0301dd468cf47638fdd7d434c35ac44a7b96cae6 100644 (file)
@@ -50,8 +50,12 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                               RT_CONN_FLAGS(sk), oif,
                               sk->sk_protocol,
                               inet->sport, usin->sin_port, sk, 1);
-       if (err)
+       if (err) {
+               if (err == -ENETUNREACH)
+                       IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
                return err;
+       }
+
        if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
                ip_rt_put(rt);
                return -EACCES;
index e238b17f554cc150e36e1430f238af194f9e9b55..02a899bec1969fcb6511722d41fe20009b6f2dbc 100644 (file)
@@ -514,12 +514,15 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
 
        saddr = iph->daddr;
        if (!(rt->rt_flags & RTCF_LOCAL)) {
-               /* This is broken, skb_in->dev points to the outgoing device
-                * after the packet passes through ip_output().
-                */
-               if (skb_in->dev && sysctl_icmp_errors_use_inbound_ifaddr)
-                       saddr = inet_select_addr(skb_in->dev, 0, RT_SCOPE_LINK);
-               else
+               struct net_device *dev = NULL;
+
+               if (rt->fl.iif && sysctl_icmp_errors_use_inbound_ifaddr)
+                       dev = dev_get_by_index(rt->fl.iif);
+
+               if (dev) {
+                       saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
+                       dev_put(dev);
+               } else
                        saddr = 0;
        }
 
index 43fb1600f1f03904846bc3d53a98c73e0f729a61..fbe7714f21d08593beecb3945f2f85848b506be8 100644 (file)
@@ -31,10 +31,8 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 
 /*
  * This array holds the first and last local port number.
- * For high-usage systems, use sysctl to change this to
- * 32768-61000
  */
-int sysctl_local_port_range[2] = { 1024, 4999 };
+int sysctl_local_port_range[2] = { 32768, 61000 };
 
 int inet_csk_bind_conflict(const struct sock *sk,
                           const struct inet_bind_bucket *tb)
index 766314505c096b80ede892a483b5df9d35239da0..cd3c7e95de9e07296de8583fdc5b3df73aa45cdf 100644 (file)
@@ -2464,13 +2464,10 @@ void __init tcp_init(void)
                        order++)
                ;
        if (order >= 4) {
-               sysctl_local_port_range[0] = 32768;
-               sysctl_local_port_range[1] = 61000;
                tcp_death_row.sysctl_max_tw_buckets = 180000;
                sysctl_tcp_max_orphans = 4096 << (order - 4);
                sysctl_max_syn_backlog = 1024;
        } else if (order < 3) {
-               sysctl_local_port_range[0] = 1024 * (3 - order);
                tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
                sysctl_tcp_max_orphans >>= (3 - order);
                sysctl_max_syn_backlog = 128;
index 38cb25b48bf3b9961e1ba737db9ea194c448c3b1..74683d81c3f18f8c3450b3100ff89373179ea101 100644 (file)
@@ -2407,8 +2407,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
        struct sk_buff *skb;
        __u32 now = tcp_time_stamp;
        int acked = 0;
+       int prior_packets = tp->packets_out;
        __s32 seq_rtt = -1;
-       u32 pkts_acked = 0;
        ktime_t last_ackt = ktime_set(0,0);
 
        while ((skb = tcp_write_queue_head(sk)) &&
@@ -2437,7 +2437,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                 */
                if (!(scb->flags & TCPCB_FLAG_SYN)) {
                        acked |= FLAG_DATA_ACKED;
-                       ++pkts_acked;
                } else {
                        acked |= FLAG_SYN_ACKED;
                        tp->retrans_stamp = 0;
@@ -2481,6 +2480,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
        }
 
        if (acked&FLAG_ACKED) {
+               u32 pkts_acked = prior_packets - tp->packets_out;
                const struct tcp_congestion_ops *ca_ops
                        = inet_csk(sk)->icsk_ca_ops;
 
index 5a3e7f839fc52f838122a88a40209566ce2e3ab7..47c61055eb601ae73a68b899fdccf5bb8e1921b2 100644 (file)
@@ -192,8 +192,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
                               IPPROTO_TCP,
                               inet->sport, usin->sin_port, sk, 1);
-       if (tmp < 0)
+       if (tmp < 0) {
+               if (tmp == -ENETUNREACH)
+                       IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
                return tmp;
+       }
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                ip_rt_put(rt);
index 4c7e95fa090d181234e3dbb1a2d1934a259c317f..5da703e699da243afcf6e660d0e6a3df09979981 100644 (file)
@@ -722,8 +722,11 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                                 .dport = dport } } };
                security_sk_classify_flow(sk, &fl);
                err = ip_route_output_flow(&rt, &fl, sk, 1);
-               if (err)
+               if (err) {
+                       if (err == -ENETUNREACH)
+                               IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
                        goto out;
+               }
 
                err = -EACCES;
                if ((rt->rt_flags & RTCF_BROADCAST) &&
index 6d2a0820511104994dc90a08b4a14b8b52759741..dc442fb791b0c2c7dcc58a2c8d45eca3dc76ff4a 100644 (file)
@@ -177,8 +177,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
 
        protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
                                         (*pskb)->len - extoff);
-       if (protoff < 0 || protoff > (*pskb)->len ||
-           pnum == NEXTHDR_FRAGMENT) {
+       if (protoff > (*pskb)->len || pnum == NEXTHDR_FRAGMENT) {
                DEBUGP("proto header not found\n");
                return NF_ACCEPT;
        }
index 0be790d250f9c239f6f1977927fd53b3604537e4..8814b95b232630604eb4e568d7771bcda8e47cb6 100644 (file)
@@ -168,8 +168,7 @@ icmpv6_error_message(struct sk_buff *skb,
                                           skb->len - inip6off
                                                    - sizeof(struct ipv6hdr));
 
-       if ((inprotoff < 0) || (inprotoff > skb->len) ||
-           (inprotonum == NEXTHDR_FRAGMENT)) {
+       if ((inprotoff > skb->len) || (inprotonum == NEXTHDR_FRAGMENT)) {
                DEBUGP("icmpv6_error: Can't get protocol header in ICMPv6 payload.\n");
                return -NF_ACCEPT;
        }
index 45b3cda86a21413e6103c3c18ab9ca0116f407f8..6f8684b5617e5653e2fe5bcfd09aee084c7d9134 100644 (file)
@@ -164,8 +164,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
                                printk("offset must be on 32 bit boundaries\n");
                                goto bad;
                        }
-                       if (skb->len < 0 ||
-                           (offset > 0 && offset > skb->len)) {
+                       if (offset > 0 && offset > skb->len) {
                                printk("offset %d cant exceed pkt length %d\n",
                                       offset, skb->len);
                                goto bad;
index cbefe225581e0b203d68b087653e81f18972dcd8..f4d34480a093d126061920e6f53e7b4fc8dfe83b 100644 (file)
@@ -224,7 +224,8 @@ void __netdev_watchdog_up(struct net_device *dev)
        if (dev->tx_timeout) {
                if (dev->watchdog_timeo <= 0)
                        dev->watchdog_timeo = 5*HZ;
-               if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
+               if (!mod_timer(&dev->watchdog_timer,
+                              round_jiffies(jiffies + dev->watchdog_timeo)))
                        dev_hold(dev);
        }
 }
index e8c0f7435d7f68be0eefd21b1baac4f1a05b1636..80f70aa533867317e5dcbdbadc7333d29c519901 100644 (file)
@@ -77,8 +77,6 @@ static const char *sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = {
 /* Lookup "chunk type" debug name. */
 const char *sctp_cname(const sctp_subtype_t cid)
 {
-       if (cid.chunk < 0)
-               return "illegal chunk id";
        if (cid.chunk <= SCTP_CID_BASE_MAX)
                return sctp_cid_tbl[cid.chunk];
 
@@ -146,8 +144,6 @@ static const char *sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = {
 /* Lookup primitive debug name. */
 const char *sctp_pname(const sctp_subtype_t id)
 {
-       if (id.primitive < 0)
-               return "illegal primitive";
        if (id.primitive <= SCTP_EVENT_PRIMITIVE_MAX)
                return sctp_primitive_tbl[id.primitive];
        return "unknown_primitive";
@@ -161,8 +157,6 @@ static const char *sctp_other_tbl[] = {
 /* Lookup "other" debug name. */
 const char *sctp_oname(const sctp_subtype_t id)
 {
-       if (id.other < 0)
-               return "illegal 'other' event";
        if (id.other <= SCTP_EVENT_OTHER_MAX)
                return sctp_other_tbl[id.other];
        return "unknown 'other' event";
@@ -184,8 +178,6 @@ static const char *sctp_timer_tbl[] = {
 /* Lookup timer debug name. */
 const char *sctp_tname(const sctp_subtype_t id)
 {
-       if (id.timeout < 0)
-               return "illegal 'timer' event";
        if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
                return sctp_timer_tbl[id.timeout];
        return "unknown_timer";
index 523071c7902fbb5a163f1418c17e85fcb0d4f503..70a91ece3c49444de91b41b7db7f4b486a16e7de 100644 (file)
@@ -960,7 +960,7 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
        if (state > SCTP_STATE_MAX)
                return &bug;
 
-       if (cid >= 0 && cid <= SCTP_CID_BASE_MAX)
+       if (cid <= SCTP_CID_BASE_MAX)
                return &chunk_event_table[cid][state];
 
        if (sctp_prsctp_enable) {
index fc12ba51c1fc8223672c744e2c3eb06239741303..87c794d8fa2d55a272934e72330abc346335e5e5 100644 (file)
@@ -174,11 +174,11 @@ static struct sock *unix_peer_get(struct sock *s)
 {
        struct sock *peer;
 
-       unix_state_rlock(s);
+       unix_state_lock(s);
        peer = unix_peer(s);
        if (peer)
                sock_hold(peer);
-       unix_state_runlock(s);
+       unix_state_unlock(s);
        return peer;
 }
 
@@ -369,7 +369,7 @@ static int unix_release_sock (struct sock *sk, int embrion)
        unix_remove_socket(sk);
 
        /* Clear state */
-       unix_state_wlock(sk);
+       unix_state_lock(sk);
        sock_orphan(sk);
        sk->sk_shutdown = SHUTDOWN_MASK;
        dentry       = u->dentry;
@@ -378,7 +378,7 @@ static int unix_release_sock (struct sock *sk, int embrion)
        u->mnt       = NULL;
        state = sk->sk_state;
        sk->sk_state = TCP_CLOSE;
-       unix_state_wunlock(sk);
+       unix_state_unlock(sk);
 
        wake_up_interruptible_all(&u->peer_wait);
 
@@ -386,12 +386,12 @@ static int unix_release_sock (struct sock *sk, int embrion)
 
        if (skpair!=NULL) {
                if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
-                       unix_state_wlock(skpair);
+                       unix_state_lock(skpair);
                        /* No more writes */
                        skpair->sk_shutdown = SHUTDOWN_MASK;
                        if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
                                skpair->sk_err = ECONNRESET;
-                       unix_state_wunlock(skpair);
+                       unix_state_unlock(skpair);
                        skpair->sk_state_change(skpair);
                        read_lock(&skpair->sk_callback_lock);
                        sk_wake_async(skpair,1,POLL_HUP);
@@ -448,7 +448,7 @@ static int unix_listen(struct socket *sock, int backlog)
        err = -EINVAL;
        if (!u->addr)
                goto out;                       /* No listens on an unbound socket */
-       unix_state_wlock(sk);
+       unix_state_lock(sk);
        if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
                goto out_unlock;
        if (backlog > sk->sk_max_ack_backlog)
@@ -462,7 +462,7 @@ static int unix_listen(struct socket *sock, int backlog)
        err = 0;
 
 out_unlock:
-       unix_state_wunlock(sk);
+       unix_state_unlock(sk);
 out:
        return err;
 }
@@ -858,6 +858,31 @@ out_mknod_parent:
        goto out_up;
 }
 
+static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
+{
+       if (unlikely(sk1 == sk2) || !sk2) {
+               unix_state_lock(sk1);
+               return;
+       }
+       if (sk1 < sk2) {
+               unix_state_lock(sk1);
+               unix_state_lock_nested(sk2);
+       } else {
+               unix_state_lock(sk2);
+               unix_state_lock_nested(sk1);
+       }
+}
+
+static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
+{
+       if (unlikely(sk1 == sk2) || !sk2) {
+               unix_state_unlock(sk1);
+               return;
+       }
+       unix_state_unlock(sk1);
+       unix_state_unlock(sk2);
+}
+
 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
                              int alen, int flags)
 {
@@ -877,11 +902,19 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
                    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
                        goto out;
 
+restart:
                other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
                if (!other)
                        goto out;
 
-               unix_state_wlock(sk);
+               unix_state_double_lock(sk, other);
+
+               /* Apparently VFS overslept socket death. Retry. */
+               if (sock_flag(other, SOCK_DEAD)) {
+                       unix_state_double_unlock(sk, other);
+                       sock_put(other);
+                       goto restart;
+               }
 
                err = -EPERM;
                if (!unix_may_send(sk, other))
@@ -896,7 +929,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
                 *      1003.1g breaking connected state with AF_UNSPEC
                 */
                other = NULL;
-               unix_state_wlock(sk);
+               unix_state_double_lock(sk, other);
        }
 
        /*
@@ -905,19 +938,19 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
        if (unix_peer(sk)) {
                struct sock *old_peer = unix_peer(sk);
                unix_peer(sk)=other;
-               unix_state_wunlock(sk);
+               unix_state_double_unlock(sk, other);
 
                if (other != old_peer)
                        unix_dgram_disconnected(sk, old_peer);
                sock_put(old_peer);
        } else {
                unix_peer(sk)=other;
-               unix_state_wunlock(sk);
+               unix_state_double_unlock(sk, other);
        }
        return 0;
 
 out_unlock:
-       unix_state_wunlock(sk);
+       unix_state_double_unlock(sk, other);
        sock_put(other);
 out:
        return err;
@@ -936,7 +969,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo)
                (skb_queue_len(&other->sk_receive_queue) >
                 other->sk_max_ack_backlog);
 
-       unix_state_runlock(other);
+       unix_state_unlock(other);
 
        if (sched)
                timeo = schedule_timeout(timeo);
@@ -994,11 +1027,11 @@ restart:
                goto out;
 
        /* Latch state of peer */
-       unix_state_rlock(other);
+       unix_state_lock(other);
 
        /* Apparently VFS overslept socket death. Retry. */
        if (sock_flag(other, SOCK_DEAD)) {
-               unix_state_runlock(other);
+               unix_state_unlock(other);
                sock_put(other);
                goto restart;
        }
@@ -1048,18 +1081,18 @@ restart:
                goto out_unlock;
        }
 
-       unix_state_wlock_nested(sk);
+       unix_state_lock_nested(sk);
 
        if (sk->sk_state != st) {
-               unix_state_wunlock(sk);
-               unix_state_runlock(other);
+               unix_state_unlock(sk);
+               unix_state_unlock(other);
                sock_put(other);
                goto restart;
        }
 
        err = security_unix_stream_connect(sock, other->sk_socket, newsk);
        if (err) {
-               unix_state_wunlock(sk);
+               unix_state_unlock(sk);
                goto out_unlock;
        }
 
@@ -1096,7 +1129,7 @@ restart:
        smp_mb__after_atomic_inc();     /* sock_hold() does an atomic_inc() */
        unix_peer(sk)   = newsk;
 
-       unix_state_wunlock(sk);
+       unix_state_unlock(sk);
 
        /* take ten and and send info to listening sock */
        spin_lock(&other->sk_receive_queue.lock);
@@ -1105,14 +1138,14 @@ restart:
         * is installed to listening socket. */
        atomic_inc(&newu->inflight);
        spin_unlock(&other->sk_receive_queue.lock);
-       unix_state_runlock(other);
+       unix_state_unlock(other);
        other->sk_data_ready(other, 0);
        sock_put(other);
        return 0;
 
 out_unlock:
        if (other)
-               unix_state_runlock(other);
+               unix_state_unlock(other);
 
 out:
        if (skb)
@@ -1178,10 +1211,10 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
        wake_up_interruptible(&unix_sk(sk)->peer_wait);
 
        /* attach accepted sock to socket */
-       unix_state_wlock(tsk);
+       unix_state_lock(tsk);
        newsock->state = SS_CONNECTED;
        sock_graft(tsk, newsock);
-       unix_state_wunlock(tsk);
+       unix_state_unlock(tsk);
        return 0;
 
 out:
@@ -1208,7 +1241,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_
        }
 
        u = unix_sk(sk);
-       unix_state_rlock(sk);
+       unix_state_lock(sk);
        if (!u->addr) {
                sunaddr->sun_family = AF_UNIX;
                sunaddr->sun_path[0] = 0;
@@ -1219,7 +1252,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_
                *uaddr_len = addr->len;
                memcpy(sunaddr, addr->name, *uaddr_len);
        }
-       unix_state_runlock(sk);
+       unix_state_unlock(sk);
        sock_put(sk);
 out:
        return err;
@@ -1337,7 +1370,7 @@ restart:
                        goto out_free;
        }
 
-       unix_state_rlock(other);
+       unix_state_lock(other);
        err = -EPERM;
        if (!unix_may_send(sk, other))
                goto out_unlock;
@@ -1347,20 +1380,20 @@ restart:
                 *      Check with 1003.1g - what should
                 *      datagram error
                 */
-               unix_state_runlock(other);
+               unix_state_unlock(other);
                sock_put(other);
 
                err = 0;
-               unix_state_wlock(sk);
+               unix_state_lock(sk);
                if (unix_peer(sk) == other) {
                        unix_peer(sk)=NULL;
-                       unix_state_wunlock(sk);
+                       unix_state_unlock(sk);
 
                        unix_dgram_disconnected(sk, other);
                        sock_put(other);
                        err = -ECONNREFUSED;
                } else {
-                       unix_state_wunlock(sk);
+                       unix_state_unlock(sk);
                }
 
                other = NULL;
@@ -1397,14 +1430,14 @@ restart:
        }
 
        skb_queue_tail(&other->sk_receive_queue, skb);
-       unix_state_runlock(other);
+       unix_state_unlock(other);
        other->sk_data_ready(other, len);
        sock_put(other);
        scm_destroy(siocb->scm);
        return len;
 
 out_unlock:
-       unix_state_runlock(other);
+       unix_state_unlock(other);
 out_free:
        kfree_skb(skb);
 out:
@@ -1494,14 +1527,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                        goto out_err;
                }
 
-               unix_state_rlock(other);
+               unix_state_lock(other);
 
                if (sock_flag(other, SOCK_DEAD) ||
                    (other->sk_shutdown & RCV_SHUTDOWN))
                        goto pipe_err_free;
 
                skb_queue_tail(&other->sk_receive_queue, skb);
-               unix_state_runlock(other);
+               unix_state_unlock(other);
                other->sk_data_ready(other, size);
                sent+=size;
        }
@@ -1512,7 +1545,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
        return sent;
 
 pipe_err_free:
-       unix_state_runlock(other);
+       unix_state_unlock(other);
        kfree_skb(skb);
 pipe_err:
        if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
@@ -1641,7 +1674,7 @@ static long unix_stream_data_wait(struct sock * sk, long timeo)
 {
        DEFINE_WAIT(wait);
 
-       unix_state_rlock(sk);
+       unix_state_lock(sk);
 
        for (;;) {
                prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
@@ -1654,14 +1687,14 @@ static long unix_stream_data_wait(struct sock * sk, long timeo)
                        break;
 
                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
-               unix_state_runlock(sk);
+               unix_state_unlock(sk);
                timeo = schedule_timeout(timeo);
-               unix_state_rlock(sk);
+               unix_state_lock(sk);
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
 
        finish_wait(sk->sk_sleep, &wait);
-       unix_state_runlock(sk);
+       unix_state_unlock(sk);
        return timeo;
 }
 
@@ -1816,12 +1849,12 @@ static int unix_shutdown(struct socket *sock, int mode)
        mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
 
        if (mode) {
-               unix_state_wlock(sk);
+               unix_state_lock(sk);
                sk->sk_shutdown |= mode;
                other=unix_peer(sk);
                if (other)
                        sock_hold(other);
-               unix_state_wunlock(sk);
+               unix_state_unlock(sk);
                sk->sk_state_change(sk);
 
                if (other &&
@@ -1833,9 +1866,9 @@ static int unix_shutdown(struct socket *sock, int mode)
                                peer_mode |= SEND_SHUTDOWN;
                        if (mode&SEND_SHUTDOWN)
                                peer_mode |= RCV_SHUTDOWN;
-                       unix_state_wlock(other);
+                       unix_state_lock(other);
                        other->sk_shutdown |= peer_mode;
-                       unix_state_wunlock(other);
+                       unix_state_unlock(other);
                        other->sk_state_change(other);
                        read_lock(&other->sk_callback_lock);
                        if (peer_mode == SHUTDOWN_MASK)
@@ -1973,7 +2006,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
        else {
                struct sock *s = v;
                struct unix_sock *u = unix_sk(s);
-               unix_state_rlock(s);
+               unix_state_lock(s);
 
                seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
                        s,
@@ -2001,7 +2034,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
                        for ( ; i < len; i++)
                                seq_putc(seq, u->addr->name->sun_path[i]);
                }
-               unix_state_runlock(s);
+               unix_state_unlock(s);
                seq_putc(seq, '\n');
        }
 
index 7a19e0ede2891238b910ecae4f416cfd6c6b49a7..849cc06bd9141a62da44061a230e7bc70c472761 100644 (file)
@@ -454,7 +454,7 @@ static int wanrouter_device_setup(struct wan_device *wandev,
        }
 
        if (conf->data_size && conf->data) {
-               if (conf->data_size > 128000 || conf->data_size < 0) {
+               if (conf->data_size > 128000) {
                        printk(KERN_INFO
                            "%s: ERROR, Invalid firmware data size %i !\n",
                                        wandev->name, conf->data_size);
old mode 100644 (file)
new mode 100755 (executable)