Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
authorLinus Torvalds <torvalds@woody.osdl.org>
Wed, 10 Jan 2007 03:48:15 +0000 (19:48 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Wed, 10 Jan 2007 03:48:15 +0000 (19:48 -0800)
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
  IB/mthca: Don't execute QUERY_QP firmware command for QP in RESET state
  IB/ehca: Use proper GFP_ flags for get_zeroed_page()
  IB/mthca: Fix PRM compliance problem in atomic-send completions
  RDMA/ucma: Don't report events with invalid user context
  RDMA/ucma: Fix struct ucma_event leak when backlog is full
  RDMA/iwcm: iWARP connection timeouts shouldn't be reported as rejects
  IB/iser: Return error code when PDUs may not be sent
  IB/mthca: Fix off-by-one in FMR handling on memfree

190 files changed:
Documentation/cachetlb.txt
Documentation/feature-removal-schedule.txt
Documentation/powerpc/mpc52xx-device-tree-bindings.txt
Documentation/usb/acm.txt
Documentation/x86_64/boot-options.txt
MAINTAINERS
Makefile
arch/arm/kernel/entry-armv.S
arch/arm/kernel/time.c
arch/arm/kernel/traps.c
arch/arm/mm/flush.c
arch/i386/Kconfig
arch/i386/kernel/cpu/common.c
arch/i386/kernel/smpboot.c
arch/i386/kernel/trampoline.S
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/time.c
arch/mips/lib/Makefile
arch/mips/lib/csum_partial.S
arch/mips/lib/csum_partial_copy.c [deleted file]
arch/mips/mips-boards/generic/time.c
arch/mips/mips-boards/malta/malta_mtd.c [new file with mode: 0644]
arch/mips/mips-boards/sead/sead_int.c
arch/mips/mm/pg-r4k.c
arch/mips/pci/ops-pnx8550.c
arch/mips/philips/pnx8550/common/time.c
arch/powerpc/Kconfig
arch/powerpc/boot/dts/lite5200.dts
arch/powerpc/boot/dts/lite5200b.dts
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/platforms/52xx/lite5200.c
arch/powerpc/platforms/52xx/mpc52xx_common.c
arch/powerpc/platforms/iseries/lpevents.c
arch/powerpc/platforms/iseries/mf.c
arch/powerpc/platforms/iseries/proc.c
arch/powerpc/platforms/iseries/setup.c
arch/powerpc/platforms/iseries/viopath.c
arch/powerpc/platforms/maple/setup.c
arch/powerpc/platforms/pasemi/setup.c
arch/powerpc/platforms/pseries/hvCall.S
arch/powerpc/platforms/pseries/hvCall_inst.c
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/Makefile
arch/s390/kernel/head31.S
arch/s390/kernel/head64.S
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/lib/uaccess_pt.c
arch/s390/lib/uaccess_std.c
arch/x86_64/kernel/early-quirks.c
arch/x86_64/kernel/io_apic.c
drivers/acpi/toshiba_acpi.c
drivers/ata/Kconfig
drivers/ata/pata_hpt37x.c
drivers/block/pktcdvd.c
drivers/bluetooth/hci_usb.c
drivers/char/ip2/i2ellis.h
drivers/connector/cn_proc.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/chips/m41t00.c
drivers/i2c/i2c-core.c
drivers/ide/pci/atiixp.c
drivers/ide/pci/via82cxxx.c
drivers/kvm/kvm.h
drivers/kvm/kvm_main.c
drivers/kvm/mmu.c
drivers/kvm/paging_tmpl.h
drivers/kvm/svm.c
drivers/kvm/vmx.c
drivers/kvm/x86_emulate.c
drivers/leds/leds-s3c24xx.c
drivers/macintosh/via-pmu.c
drivers/mmc/mmci.c
drivers/net/Space.c
drivers/net/bnx2.c
drivers/net/chelsio/my3126.c
drivers/net/e1000/e1000_main.c
drivers/net/forcedeth.c
drivers/net/ifb.c
drivers/net/ixgb/ixgb.h
drivers/net/ixgb/ixgb_ethtool.c
drivers/net/ixgb/ixgb_hw.c
drivers/net/ixgb/ixgb_main.c
drivers/net/loopback.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/qla3xxx.c
drivers/net/sungem.c
drivers/net/sungem_phy.c
drivers/net/sungem_phy.h
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/wireless/ipw2100.c
drivers/pci/Kconfig
drivers/pci/quirks.c
drivers/pci/search.c
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-rs5c372.c
drivers/s390/char/vmcp.c
drivers/s390/cio/cio.c
drivers/s390/net/Kconfig
drivers/s390/net/qeth.h
drivers/s390/net/qeth_main.c
drivers/serial/mpc52xx_uart.c
drivers/usb/class/usblp.c
drivers/usb/core/endpoint.c
drivers/usb/gadget/omap_udc.c
drivers/usb/gadget/omap_udc.h
drivers/usb/host/uhci-hcd.c
drivers/usb/misc/sisusbvga/sisusb_con.c
drivers/usb/net/asix.c
drivers/usb/serial/Kconfig
drivers/usb/serial/option.c
drivers/usb/storage/unusual_devs.h
fs/adfs/dir_f.c
fs/bad_inode.c
fs/binfmt_elf.c
fs/ufs/balloc.c
fs/ufs/inode.c
include/asm-arm/arch-iop32x/iop32x.h
include/asm-arm/cacheflush.h
include/asm-arm/hardware/iop3xx.h
include/asm-i386/boot.h
include/asm-mips/checksum.h
include/asm-mips/irq.h
include/asm-parisc/cacheflush.h
include/asm-powerpc/bug.h
include/asm-powerpc/hvcall.h
include/asm-powerpc/mpc52xx.h
include/asm-s390/futex.h
include/linux/highmem.h
include/linux/kvm.h
include/linux/magic.h
include/linux/swap.h
include/net/ieee80211.h
include/net/tcp.h
include/net/x25.h
include/sound/version.h
init/main.c
kernel/module.c
kernel/params.c
kernel/power/swap.c
kernel/power/user.c
kernel/profile.c
mm/memory.c
mm/oom_kill.c
mm/page_alloc.c
mm/slab.c
mm/swapfile.c
mm/vmscan.c
net/bluetooth/cmtp/capi.c
net/bluetooth/hci_sysfs.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/rfcomm/tty.c
net/bridge/netfilter/ebtables.c
net/core/pktgen.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_netbios_ns.c
net/netfilter/xt_hashlimit.c
net/netlabel/netlabel_cipso_v4.c
net/netlink/af_netlink.c
net/x25/af_x25.c
net/x25/x25_facilities.c
net/xfrm/xfrm_user.c
scripts/kconfig/qconf.cc
scripts/kconfig/qconf.h
security/selinux/ss/context.h
security/selinux/ss/mls.c
security/selinux/ss/mls.h
security/selinux/ss/services.c
sound/pci/cmipci.c
sound/pci/echoaudio/midi.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/sparc/cs4231.c
sound/usb/usbaudio.c
sound/usb/usbmixer.c

index 73e794f0ff0924e2432e305cb5520345f776160b..debf6813934af05e878863c4a8c53bbf6ae64e62 100644 (file)
@@ -373,14 +373,15 @@ maps this page at its virtual address.
        likely that you will need to flush the instruction cache
        for copy_to_user_page().
 
-  void flush_anon_page(struct page *page, unsigned long vmaddr)
+  void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+                       unsigned long vmaddr)
        When the kernel needs to access the contents of an anonymous
        page, it calls this function (currently only
        get_user_pages()).  Note: flush_dcache_page() deliberately
        doesn't work for an anonymous page.  The default
        implementation is a nop (and should remain so for all coherent
        architectures).  For incoherent architectures, it should flush
-       the cache of the page at vmaddr in the current user process.
+       the cache of the page at vmaddr.
 
   void flush_kernel_dcache_page(struct page *page)
        When the kernel needs to modify a user page is has obtained
index 30f3c8c9c12aeb60f66c925949be8648a0bafd91..f2024df7ebe52985461912c4bab25179c9e956b1 100644 (file)
@@ -226,6 +226,23 @@ Who:       Jean Delvare <khali@linux-fr.org>
 
 ---------------------------
 
+What:  i2c_adapter.dev
+       i2c_adapter.list
+When:  July 2007
+Why:   Superfluous, given i2c_adapter.class_dev:
+         * The "dev" was a stand-in for the physical device node that legacy
+           drivers would not have; but now it's almost always present.  Any
+           remaining legacy drivers must upgrade (they now trigger warnings).
+         * The "list" duplicates class device children.
+       The delay in removing this is so upgraded lm_sensors and libsensors
+       can get deployed.  (Removal causes minor changes in the sysfs layout,
+       notably the location of the adapter type name and parenting the i2c
+       client hardware directly from their controller.)
+Who:   Jean Delvare <khali@linux-fr.org>,
+       David Brownell <dbrownell@users.sourceforge.net>
+
+---------------------------
+
 What:  IPv4 only connection tracking/NAT/helpers
 When:  2.6.22
 Why:   The new layer 3 independant connection tracking replaces the old
index d077d764f82b0ce73148063e694f842e7ab00ff9..7fb3b8a44eb649f1060af13c474c812e181b0f22 100644 (file)
@@ -157,8 +157,8 @@ rtc@<addr>  rtc             *-rtc           Real time clock
 mscan@<addr>   mscan           *-mscan         CAN bus controller
 pci@<addr>     pci             *-pci           PCI bridge
 serial@<addr>  serial          *-psc-uart      PSC in serial mode
-i2s@<addr>     i2s             *-psc-i2s       PSC in i2s mode
-ac97@<addr>    ac97            *-psc-ac97      PSC in ac97 mode
+i2s@<addr>     sound           *-psc-i2s       PSC in i2s mode
+ac97@<addr>    sound           *-psc-ac97      PSC in ac97 mode
 spi@<addr>     spi             *-psc-spi       PSC in spi mode
 irda@<addr>    irda            *-psc-irda      PSC in IrDA mode
 spi@<addr>     spi             *-spi           MPC52xx spi device
index 737d6104c3f39fb294774fc5c16de0cb99612a58..17f5c2e1a5708bb493edd779730e5f40d0dc988d 100644 (file)
@@ -46,6 +46,10 @@ Abstract Control Model (USB CDC ACM) specification.
 
        3Com USR ISDN Pro TA
 
+  Some cell phones also connect via USB. I know the following phones work:
+
+       SonyEricsson K800i
+
   Unfortunately many modems and most ISDN TAs use proprietary interfaces and
 thus won't work with this drivers. Check for ACM compliance before buying.
 
index dbdcaf68e3ea382304784bf66fce502216f1d1f1..5c86ed6f0448a8e2378a4ba62d6cedecf236215c 100644 (file)
@@ -52,6 +52,10 @@ APICs
                 apicmaintimer. Useful when your PIT timer is totally
                 broken.
 
+   disable_8254_timer / enable_8254_timer
+                Enable interrupt 0 timer routing over the 8254 in addition to over
+                the IO-APIC. The kernel tries to set a sensible default.
+
 Early Console
 
    syntax: earlyprintk=vga
index 0f6d13bd4688fc6a11a471dba16e3a1401837765..4ccc5fa06d092cf83d8034293050bd3c1973bead 100644 (file)
@@ -412,20 +412,32 @@ S:      Maintained
 ARM/INTEL IOP32X ARM ARCHITECTURE
 P:     Lennert Buytenhek
 M:     kernel@wantstofly.org
+P:     Dan Williams
+M:     dan.j.williams@intel.com
 L:     linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
-S:     Maintained
+S:     Supported
+
+ARM/INTEL IOP33X ARM ARCHITECTURE
+P:     Dan Williams
+M:     dan.j.williams@intel.com
+L:     linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+S:     Supported
 
 ARM/INTEL IOP13XX ARM ARCHITECTURE
 P:     Lennert Buytenhek
 M:     kernel@wantstofly.org
+P:     Dan Williams
+M:     dan.j.williams@intel.com
 L:     linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
-S:     Maintained
+S:     Supported
 
 ARM/INTEL IQ81342EX MACHINE SUPPORT
 P:     Lennert Buytenhek
 M:     kernel@wantstofly.org
+P:     Dan Williams
+M:     dan.j.williams@intel.com
 L:     linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
-S:     Maintained
+S:     Supported
 
 ARM/INTEL IXP2000 ARM ARCHITECTURE
 P:     Lennert Buytenhek
@@ -448,8 +460,10 @@ S: Maintained
 ARM/INTEL XSC3 (MANZANO) ARM CORE
 P:     Lennert Buytenhek
 M:     kernel@wantstofly.org
+P:     Dan Williams
+M:     dan.j.williams@intel.com
 L:     linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
-S:     Maintained
+S:     Supported
 
 ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT
 P:     Lennert Buytenhek
@@ -2579,6 +2593,12 @@ P:       Adam Belay
 M:     ambx1@neo.rr.com
 S:     Maintained
 
+PNXxxxx I2C DRIVER
+P:     Vitaly Wool
+M:     vitalywool@gmail.com
+L:     i2c@lm-sensors.org
+S:     Maintained
+
 PPP PROTOCOL DRIVERS AND COMPRESSORS
 P:     Paul Mackerras
 M:     paulus@samba.org
index 0e9eee7682888df40ad5de15c1c1f56fcd91a9e5..fb5b3ef9ab11bfa086047b3d7ef5d30718e805e7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 20
-EXTRAVERSION =-rc3
+EXTRAVERSION =-rc4
 NAME = Homicidal Dwarf Hamster
 
 # *DOCUMENTATION*
index 2db42b18f53f060a1040ba65cf01114793535ad9..8517c3c3eb3393c8ba539add942342757c117f2b 100644 (file)
@@ -436,7 +436,7 @@ __und_usr:
        usr_entry
 
        tst     r3, #PSR_T_BIT                  @ Thumb mode?
-       bne     fpundefinstr                    @ ignore FP
+       bne     __und_usr_unknown               @ ignore FP
        sub     r4, r2, #4
 
        @
@@ -448,7 +448,7 @@ __und_usr:
        @
 1:     ldrt    r0, [r4]
        adr     r9, ret_from_exception
-       adr     lr, fpundefinstr
+       adr     lr, __und_usr_unknown
        @
        @ fallthrough to call_fpe
        @
@@ -476,7 +476,9 @@ __und_usr:
  * Emulators may wish to make use of the following registers:
  *  r0  = instruction opcode.
  *  r2  = PC+4
+ *  r9  = normal "successful" return address
  *  r10 = this threads thread_info structure.
+ *  lr  = unrecognised instruction return address
  */
 call_fpe:
        tst     r0, #0x08000000                 @ only CDP/CPRT/LDC/STC have bit 27
@@ -545,10 +547,12 @@ do_fpe:
 
        .data
 ENTRY(fp_enter)
-       .word   fpundefinstr
+       .word   no_fp
        .text
 
-fpundefinstr:
+no_fp: mov     pc, lr
+
+__und_usr_unknown:
        mov     r0, sp
        adr     lr, ret_from_exception
        b       do_undefinstr
index 6ff5e3ff6cb57a781a672b631596f924c75f3ee4..3c8cdcfe8d4a9f72c9ffb3762a72b260be85d73c 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/timer.h>
 #include <linux/irq.h>
 
+#include <linux/mc146818rtc.h>
+
 #include <asm/leds.h>
 #include <asm/thread_info.h>
 #include <asm/mach/time.h>
@@ -85,6 +87,17 @@ unsigned long long __attribute__((weak)) sched_clock(void)
        return (unsigned long long)jiffies * (1000000000 / HZ);
 }
 
+/*
+ * An implementation of printk_clock() independent from
+ * sched_clock().  This avoids non-bootable kernels when
+ * printk_clock is enabled.
+ */
+unsigned long long printk_clock(void)
+{
+       return (unsigned long long)(jiffies - INITIAL_JIFFIES) *
+                       (1000000000 / HZ);
+}
+
 static unsigned long next_rtc_update;
 
 /*
index 042a12982e980c43bb0520e289477df525a50fc2..908915675edcb3f66520c9a3765441d388ef29c0 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/traps.h>
+#include <asm/io.h>
 
 #include "ptrace.h"
 #include "signal.h"
index 628348c9f6c5f17ded5fa62ec700bdb8666e85a7..9df507d36e0b9b0f0291395da5cecd007b627f0f 100644 (file)
@@ -202,3 +202,42 @@ void flush_dcache_page(struct page *page)
        }
 }
 EXPORT_SYMBOL(flush_dcache_page);
+
+/*
+ * Flush an anonymous page so that users of get_user_pages()
+ * can safely access the data.  The expected sequence is:
+ *
+ *  get_user_pages()
+ *    -> flush_anon_page
+ *  memcpy() to/from page
+ *  if written to page, flush_dcache_page()
+ */
+void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+       unsigned long pfn;
+
+       /* VIPT non-aliasing caches need do nothing */
+       if (cache_is_vipt_nonaliasing())
+               return;
+
+       /*
+        * Write back and invalidate userspace mapping.
+        */
+       pfn = page_to_pfn(page);
+       if (cache_is_vivt()) {
+               flush_cache_page(vma, vmaddr, pfn);
+       } else {
+               /*
+                * For aliasing VIPT, we can flush an alias of the
+                * userspace address only.
+                */
+               flush_pfn_alias(pfn, vmaddr);
+       }
+
+       /*
+        * Invalidate kernel mapping.  No data should be contained
+        * in this mapping of the page.  FIXME: this is overkill
+        * since we actually ask for a write-back and invalidate.
+        */
+       __cpuc_flush_dcache_page(page_address(page));
+}
index 0d67a0a1151e8ef263ae67d1489ea5ad98f8a1d4..0dfee812811a9c75427551ef8177b45f2ca7a998 100644 (file)
@@ -777,6 +777,47 @@ config CRASH_DUMP
           PHYSICAL_START.
          For more details see Documentation/kdump/kdump.txt
 
+config PHYSICAL_START
+       hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+       default "0x100000"
+       help
+         This gives the physical address where the kernel is loaded.
+
+         If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
+         bzImage will decompress itself to above physical address and
+         run from there. Otherwise, bzImage will run from the address where
+         it has been loaded by the boot loader and will ignore above physical
+         address.
+
+         In normal kdump cases one does not have to set/change this option
+         as now bzImage can be compiled as a completely relocatable image
+         (CONFIG_RELOCATABLE=y) and be used to load and run from a different
+         address. This option is mainly useful for the folks who don't want
+         to use a bzImage for capturing the crash dump and want to use a
+         vmlinux instead. vmlinux is not relocatable hence a kernel needs
+         to be specifically compiled to run from a specific memory area
+         (normally a reserved region) and this option comes handy.
+
+         So if you are using bzImage for capturing the crash dump, leave
+         the value here unchanged to 0x100000 and set CONFIG_RELOCATABLE=y.
+         Otherwise if you plan to use vmlinux for capturing the crash dump
+         change this value to start of the reserved region (Typically 16MB
+         0x1000000). In other words, it can be set based on the "X" value as
+         specified in the "crashkernel=YM@XM" command line boot parameter
+         passed to the panic-ed kernel. Typically this parameter is set as
+         crashkernel=64M@16M. Please take a look at
+         Documentation/kdump/kdump.txt for more details about crash dumps.
+
+         Usage of bzImage for capturing the crash dump is recommended as
+         one does not have to build two kernels. Same kernel can be used
+         as production kernel and capture kernel. Above option should have
+         gone away after relocatable bzImage support is introduced. But it
+         is present because there are users out there who continue to use
+         vmlinux for dump capture. This option should go away down the
+         line.
+
+         Don't change this unless you know what you are doing.
+
 config RELOCATABLE
        bool "Build a relocatable kernel(EXPERIMENTAL)"
        depends on EXPERIMENTAL
index 1b34c56f8123ac7f4ccb7a7e677bee6ef8f7cb6f..8689d62abd4adc66729c5a58185612ddfd6271d6 100644 (file)
@@ -54,7 +54,7 @@ static struct cpu_dev __cpuinitdata default_cpu = {
        .c_init = default_init,
        .c_vendor = "Unknown",
 };
-static struct cpu_dev * this_cpu = &default_cpu;
+static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
 
 static int __init cachesize_setup(char *str)
 {
index aef39be813614f0e2591836a4f1fa7aefb5ee9d3..300d9b38d02ec449ebbe9817743b5bf8a639743b 100644 (file)
@@ -227,7 +227,7 @@ static struct {
        atomic_t count_start;
        atomic_t count_stop;
        unsigned long long values[NR_CPUS];
-} tsc __initdata = {
+} tsc __cpuinitdata = {
        .start_flag = ATOMIC_INIT(0),
        .count_start = ATOMIC_INIT(0),
        .count_stop = ATOMIC_INIT(0),
@@ -332,7 +332,7 @@ static void __init synchronize_tsc_bp(void)
                printk("passed.\n");
 }
 
-static void __init synchronize_tsc_ap(void)
+static void __cpuinit synchronize_tsc_ap(void)
 {
        int i;
 
index fcce0e61b0e78835f07c85c089d76ddc913a4b24..2f1814c5cfd78d8d57dbbf97d2464e6e2b32fa6d 100644 (file)
 
 .data
 
+/* We can free up trampoline after bootup if cpu hotplug is not supported. */
+#ifndef CONFIG_HOTPLUG_CPU
+.section ".init.data","aw",@progbits
+#endif
+
 .code16
 
 ENTRY(trampoline_data)
index f44a01357adae3fcc1f560d747a3d5d74ba232ec..2ef857c3ee53ec23462906a56c5e19c6b591030a 100644 (file)
@@ -46,5 +46,7 @@ EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strnlen_user_asm);
 
 EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_partial_copy_user);
 
 EXPORT_SYMBOL(invalid_pte_table);
index 11aab6d6bfe5e462b63f849b1dee631439a9a546..8aa544f73a5ea27fd85eb007ac9ec59938e0ff35 100644 (file)
@@ -94,10 +94,8 @@ static void c0_timer_ack(void)
 {
        unsigned int count;
 
-#ifndef CONFIG_SOC_PNX8550     /* pnx8550 resets to zero */
        /* Ack this timer interrupt and set the next one.  */
        expirelo += cycles_per_jiffy;
-#endif
        write_c0_compare(expirelo);
 
        /* Check to see if we have missed any timer interrupts.  */
index 888b61ea12feb4d58354ff62a28e87d2d5914c56..989c900b8b14a5f4b3b1422552044fcd9ac24f52 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y  += csum_partial.o csum_partial_copy.o memcpy.o promlib.o \
+lib-y  += csum_partial.o memcpy.o promlib.o \
           strlen_user.o strncpy_user.o strnlen_user.o uncached.o
 
 obj-y  += iomap.o
index 9db357294be1efdbea0021db9fe653e5e872054d..c0a77fe038befda70c6f4c67c06557bd335a89b4 100644 (file)
@@ -8,7 +8,9 @@
  * Copyright (C) 1998, 1999 Ralf Baechle
  * Copyright (C) 1999 Silicon Graphics, Inc.
  */
+#include <linux/errno.h>
 #include <asm/asm.h>
+#include <asm/asm-offsets.h>
 #include <asm/regdef.h>
 
 #ifdef CONFIG_64BIT
@@ -271,3 +273,443 @@ small_csumcpy:
        jr      ra
        .set    noreorder
        END(csum_partial)
+
+
+/*
+ * checksum and copy routines based on memcpy.S
+ *
+ *     csum_partial_copy_nocheck(src, dst, len, sum)
+ *     __csum_partial_copy_user(src, dst, len, sum, errp)
+ *
+ * See "Spec" in memcpy.S for details.  Unlike __copy_user, all
+ * function in this file use the standard calling convention.
+ */
+
+#define src a0
+#define dst a1
+#define len a2
+#define psum a3
+#define sum v0
+#define odd t8
+#define errptr t9
+
+/*
+ * The exception handler for loads requires that:
+ *  1- AT contain the address of the byte just past the end of the source
+ *     of the copy,
+ *  2- src_entry <= src < AT, and
+ *  3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by __csum_partial_copy_from_user and maintained by
+ *     not writing AT in __csum_partial_copy
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores stores -EFAULT to errptr and return.
+ * These handlers do not need to overwrite any data.
+ */
+
+#define EXC(inst_reg,addr,handler)             \
+9:     inst_reg, addr;                         \
+       .section __ex_table,"a";                \
+       PTR     9b, handler;                    \
+       .previous
+
+#ifdef USE_DOUBLE
+
+#define LOAD   ld
+#define LOADL  ldl
+#define LOADR  ldr
+#define STOREL sdl
+#define STORER sdr
+#define STORE  sd
+#define ADD    daddu
+#define SUB    dsubu
+#define SRL    dsrl
+#define SLL    dsll
+#define SLLV   dsllv
+#define SRLV   dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+#else
+
+#define LOAD   lw
+#define LOADL  lwl
+#define LOADR  lwr
+#define STOREL swl
+#define STORER swr
+#define STORE  sw
+#define ADD    addu
+#define SUB    subu
+#define SRL    srl
+#define SLL    sll
+#define SLLV   sllv
+#define SRLV   srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST  LOADL
+#define STFIRST STORER
+#define STREST  STOREL
+#define SHIFT_DISCARD SLLV
+#define SHIFT_DISCARD_REVERT SRLV
+#else
+#define LDFIRST LOADL
+#define LDREST  LOADR
+#define STFIRST STOREL
+#define STREST  STORER
+#define SHIFT_DISCARD SRLV
+#define SHIFT_DISCARD_REVERT SLLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit)  (FIRST(unit)+NBYTES-1)
+
+#define ADDRMASK (NBYTES-1)
+
+       .set    noat
+
+LEAF(__csum_partial_copy_user)
+       PTR_ADDU        AT, src, len    /* See (1) above. */
+#ifdef CONFIG_64BIT
+       move    errptr, a4
+#else
+       lw      errptr, 16(sp)
+#endif
+FEXPORT(csum_partial_copy_nocheck)
+       move    sum, zero
+       move    odd, zero
+       /*
+        * Note: dst & src may be unaligned, len may be 0
+        * Temps
+        */
+       /*
+        * The "issue break"s below are very approximate.
+        * Issue delays for dcache fills will perturb the schedule, as will
+        * load queue full replay traps, etc.
+        *
+        * If len < NBYTES use byte operations.
+        */
+       sltu    t2, len, NBYTES
+       and     t1, dst, ADDRMASK
+       bnez    t2, copy_bytes_checklen
+        and    t0, src, ADDRMASK
+       andi    odd, dst, 0x1                   /* odd buffer? */
+       bnez    t1, dst_unaligned
+        nop
+       bnez    t0, src_unaligned_dst_aligned
+       /*
+        * use delay slot for fall-through
+        * src and dst are aligned; need to compute rem
+        */
+both_aligned:
+        SRL    t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
+       beqz    t0, cleanup_both_aligned # len < 8*NBYTES
+        nop
+       SUB     len, 8*NBYTES           # subtract here for bgez loop
+       .align  4
+1:
+EXC(   LOAD    t0, UNIT(0)(src),       l_exc)
+EXC(   LOAD    t1, UNIT(1)(src),       l_exc_copy)
+EXC(   LOAD    t2, UNIT(2)(src),       l_exc_copy)
+EXC(   LOAD    t3, UNIT(3)(src),       l_exc_copy)
+EXC(   LOAD    t4, UNIT(4)(src),       l_exc_copy)
+EXC(   LOAD    t5, UNIT(5)(src),       l_exc_copy)
+EXC(   LOAD    t6, UNIT(6)(src),       l_exc_copy)
+EXC(   LOAD    t7, UNIT(7)(src),       l_exc_copy)
+       SUB     len, len, 8*NBYTES
+       ADD     src, src, 8*NBYTES
+EXC(   STORE   t0, UNIT(0)(dst),       s_exc)
+       ADDC(sum, t0)
+EXC(   STORE   t1, UNIT(1)(dst),       s_exc)
+       ADDC(sum, t1)
+EXC(   STORE   t2, UNIT(2)(dst),       s_exc)
+       ADDC(sum, t2)
+EXC(   STORE   t3, UNIT(3)(dst),       s_exc)
+       ADDC(sum, t3)
+EXC(   STORE   t4, UNIT(4)(dst),       s_exc)
+       ADDC(sum, t4)
+EXC(   STORE   t5, UNIT(5)(dst),       s_exc)
+       ADDC(sum, t5)
+EXC(   STORE   t6, UNIT(6)(dst),       s_exc)
+       ADDC(sum, t6)
+EXC(   STORE   t7, UNIT(7)(dst),       s_exc)
+       ADDC(sum, t7)
+       bgez    len, 1b
+        ADD    dst, dst, 8*NBYTES
+       ADD     len, 8*NBYTES           # revert len (see above)
+
+       /*
+        * len == the number of bytes left to copy < 8*NBYTES
+        */
+cleanup_both_aligned:
+#define rem t7
+       beqz    len, done
+        sltu   t0, len, 4*NBYTES
+       bnez    t0, less_than_4units
+        and    rem, len, (NBYTES-1)    # rem = len % NBYTES
+       /*
+        * len >= 4*NBYTES
+        */
+EXC(   LOAD    t0, UNIT(0)(src),       l_exc)
+EXC(   LOAD    t1, UNIT(1)(src),       l_exc_copy)
+EXC(   LOAD    t2, UNIT(2)(src),       l_exc_copy)
+EXC(   LOAD    t3, UNIT(3)(src),       l_exc_copy)
+       SUB     len, len, 4*NBYTES
+       ADD     src, src, 4*NBYTES
+EXC(   STORE   t0, UNIT(0)(dst),       s_exc)
+       ADDC(sum, t0)
+EXC(   STORE   t1, UNIT(1)(dst),       s_exc)
+       ADDC(sum, t1)
+EXC(   STORE   t2, UNIT(2)(dst),       s_exc)
+       ADDC(sum, t2)
+EXC(   STORE   t3, UNIT(3)(dst),       s_exc)
+       ADDC(sum, t3)
+       beqz    len, done
+        ADD    dst, dst, 4*NBYTES
+less_than_4units:
+       /*
+        * rem = len % NBYTES
+        */
+       beq     rem, len, copy_bytes
+        nop
+1:
+EXC(   LOAD    t0, 0(src),             l_exc)
+       ADD     src, src, NBYTES
+       SUB     len, len, NBYTES
+EXC(   STORE   t0, 0(dst),             s_exc)
+       ADDC(sum, t0)
+       bne     rem, len, 1b
+        ADD    dst, dst, NBYTES
+
+       /*
+        * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+        * A loop would do only a byte at a time with possible branch
+        * mispredicts.  Can't do an explicit LOAD dst,mask,or,STORE
+        * because can't assume read-access to dst.  Instead, use
+        * STREST dst, which doesn't require read access to dst.
+        *
+        * This code should perform better than a simple loop on modern,
+        * wide-issue mips processors because the code has fewer branches and
+        * more instruction-level parallelism.
+        */
+#define bits t2
+       beqz    len, done
+        ADD    t1, dst, len    # t1 is just past last byte of dst
+       li      bits, 8*NBYTES
+       SLL     rem, len, 3     # rem = number of bits to keep
+EXC(   LOAD    t0, 0(src),             l_exc)
+       SUB     bits, bits, rem # bits = number of bits to discard
+       SHIFT_DISCARD t0, t0, bits
+EXC(   STREST  t0, -1(t1),             s_exc)
+       SHIFT_DISCARD_REVERT t0, t0, bits
+       .set reorder
+       ADDC(sum, t0)
+       b       done
+       .set noreorder
+dst_unaligned:
+       /*
+        * dst is unaligned
+        * t0 = src & ADDRMASK
+        * t1 = dst & ADDRMASK; T1 > 0
+        * len >= NBYTES
+        *
+        * Copy enough bytes to align dst
+        * Set match = (src and dst have same alignment)
+        */
+#define match rem
+EXC(   LDFIRST t3, FIRST(0)(src),      l_exc)
+       ADD     t2, zero, NBYTES
+EXC(   LDREST  t3, REST(0)(src),       l_exc_copy)
+       SUB     t2, t2, t1      # t2 = number of bytes copied
+       xor     match, t0, t1
+EXC(   STFIRST t3, FIRST(0)(dst),      s_exc)
+       SLL     t4, t1, 3               # t4 = number of bits to discard
+       SHIFT_DISCARD t3, t3, t4
+       /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
+       ADDC(sum, t3)
+       beq     len, t2, done
+        SUB    len, len, t2
+       ADD     dst, dst, t2
+       beqz    match, both_aligned
+        ADD    src, src, t2
+
+src_unaligned_dst_aligned:
+       SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
+       beqz    t0, cleanup_src_unaligned
+        and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+EXC(   LDFIRST t0, FIRST(0)(src),      l_exc)
+EXC(   LDFIRST t1, FIRST(1)(src),      l_exc_copy)
+       SUB     len, len, 4*NBYTES
+EXC(   LDREST  t0, REST(0)(src),       l_exc_copy)
+EXC(   LDREST  t1, REST(1)(src),       l_exc_copy)
+EXC(   LDFIRST t2, FIRST(2)(src),      l_exc_copy)
+EXC(   LDFIRST t3, FIRST(3)(src),      l_exc_copy)
+EXC(   LDREST  t2, REST(2)(src),       l_exc_copy)
+EXC(   LDREST  t3, REST(3)(src),       l_exc_copy)
+       ADD     src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+       nop                             # improves slotting
+#endif
+EXC(   STORE   t0, UNIT(0)(dst),       s_exc)
+       ADDC(sum, t0)
+EXC(   STORE   t1, UNIT(1)(dst),       s_exc)
+       ADDC(sum, t1)
+EXC(   STORE   t2, UNIT(2)(dst),       s_exc)
+       ADDC(sum, t2)
+EXC(   STORE   t3, UNIT(3)(dst),       s_exc)
+       ADDC(sum, t3)
+       bne     len, rem, 1b
+        ADD    dst, dst, 4*NBYTES
+
+cleanup_src_unaligned:
+       beqz    len, done
+        and    rem, len, NBYTES-1  # rem = len % NBYTES
+       beq     rem, len, copy_bytes
+        nop
+1:
+EXC(   LDFIRST t0, FIRST(0)(src),      l_exc)
+EXC(   LDREST  t0, REST(0)(src),       l_exc_copy)
+       ADD     src, src, NBYTES
+       SUB     len, len, NBYTES
+EXC(   STORE   t0, 0(dst),             s_exc)
+       ADDC(sum, t0)
+       bne     len, rem, 1b
+        ADD    dst, dst, NBYTES
+
+copy_bytes_checklen:
+       beqz    len, done
+        nop
+copy_bytes:
+       /* 0 < len < NBYTES  */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define SHIFT_START 0
+#define SHIFT_INC 8
+#else
+#define SHIFT_START 8*(NBYTES-1)
+#define SHIFT_INC -8
+#endif
+       move    t2, zero        # partial word
+       li      t3, SHIFT_START # shift
+/* use l_exc_copy here to return correct sum on fault */
+#define COPY_BYTE(N)                   \
+EXC(   lbu     t0, N(src), l_exc_copy);        \
+       SUB     len, len, 1;            \
+EXC(   sb      t0, N(dst), s_exc);     \
+       SLLV    t0, t0, t3;             \
+       addu    t3, SHIFT_INC;          \
+       beqz    len, copy_bytes_done;   \
+        or     t2, t0
+
+       COPY_BYTE(0)
+       COPY_BYTE(1)
+#ifdef USE_DOUBLE
+       COPY_BYTE(2)
+       COPY_BYTE(3)
+       COPY_BYTE(4)
+       COPY_BYTE(5)
+#endif
+EXC(   lbu     t0, NBYTES-2(src), l_exc_copy)
+       SUB     len, len, 1
+EXC(   sb      t0, NBYTES-2(dst), s_exc)
+       SLLV    t0, t0, t3
+       or      t2, t0
+copy_bytes_done:
+       ADDC(sum, t2)
+done:
+       /* fold checksum */
+#ifdef USE_DOUBLE
+       dsll32  v1, sum, 0
+       daddu   sum, v1
+       sltu    v1, sum, v1
+       dsra32  sum, sum, 0
+       addu    sum, v1
+#endif
+       sll     v1, sum, 16
+       addu    sum, v1
+       sltu    v1, sum, v1
+       srl     sum, sum, 16
+       addu    sum, v1
+
+       /* odd buffer alignment? */
+       beqz    odd, 1f
+        nop
+       sll     v1, sum, 8
+       srl     sum, sum, 8
+       or      sum, v1
+       andi    sum, 0xffff
+1:
+       .set reorder
+       ADDC(sum, psum)
+       jr      ra
+       .set noreorder
+
+l_exc_copy:
+       /*
+        * Copy bytes from src until faulting load address (or until a
+        * lb faults)
+        *
+        * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+        * may be more than a byte beyond the last address.
+        * Hence, the lb below may get an exception.
+        *
+        * Assumes src < THREAD_BUADDR($28)
+        */
+       LOAD    t0, TI_TASK($28)
+        li     t2, SHIFT_START
+       LOAD    t0, THREAD_BUADDR(t0)
+1:
+EXC(   lbu     t1, 0(src),     l_exc)
+       ADD     src, src, 1
+       sb      t1, 0(dst)      # can't fault -- we're copy_from_user
+       SLLV    t1, t1, t2
+       addu    t2, SHIFT_INC
+       ADDC(sum, t1)
+       bne     src, t0, 1b
+        ADD    dst, dst, 1
+l_exc:
+       LOAD    t0, TI_TASK($28)
+        nop
+       LOAD    t0, THREAD_BUADDR(t0)   # t0 is just past last good address
+        nop
+       SUB     len, AT, t0             # len number of uncopied bytes
+       /*
+        * Here's where we rely on src and dst being incremented in tandem,
+        *   See (3) above.
+        * dst += (fault addr - src) to put dst at first byte to clear
+        */
+       ADD     dst, t0                 # compute start address in a1
+       SUB     dst, src
+       /*
+        * Clear len bytes starting at dst.  Can't call __bzero because it
+        * might modify len.  An inefficient loop for these rare times...
+        */
+       beqz    len, done
+        SUB    src, len, 1
+1:     sb      zero, 0(dst)
+       ADD     dst, dst, 1
+       bnez    src, 1b
+        SUB    src, src, 1
+       li      v1, -EFAULT
+       b       done
+        sw     v1, (errptr)
+
+s_exc:
+       li      v0, -1 /* invalid checksum */
+       li      v1, -EFAULT
+       jr      ra
+        sw     v1, (errptr)
+       END(__csum_partial_copy_user)
diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c
deleted file mode 100644 (file)
index 0677104..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994, 1995 Waldorf Electronics GmbH
- * Copyright (C) 1998, 1999 Ralf Baechle
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-#include <net/checksum.h>
-
-/*
- * copy while checksumming, otherwise like csum_partial
- */
-__wsum csum_partial_copy_nocheck(const void *src,
-       void *dst, int len, __wsum sum)
-{
-       /*
-        * It's 2:30 am and I don't feel like doing it real ...
-        * This is lots slower than the real thing (tm)
-        */
-       sum = csum_partial(src, len, sum);
-       memcpy(dst, src, len);
-
-       return sum;
-}
-
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-
-/*
- * Copy from userspace and compute checksum.  If we catch an exception
- * then zero the rest of the buffer.
- */
-__wsum csum_partial_copy_from_user (const void __user *src,
-       void *dst, int len, __wsum sum, int *err_ptr)
-{
-       int missing;
-
-       might_sleep();
-       missing = copy_from_user(dst, src, len);
-       if (missing) {
-               memset(dst + len - missing, 0, missing);
-               *err_ptr = -EFAULT;
-       }
-
-       return csum_partial(dst, len, sum);
-}
index e4604c73f02e9dd75099e5c9a51af8d9fcd6d759..a3c3a1d462b272f39900d9c9910ac1f1d8bde455 100644 (file)
@@ -47,6 +47,9 @@
 #ifdef CONFIG_MIPS_MALTA
 #include <asm/mips-boards/maltaint.h>
 #endif
+#ifdef CONFIG_MIPS_SEAD
+#include <asm/mips-boards/seadint.h>
+#endif
 
 unsigned long cpu_khz;
 
@@ -263,11 +266,13 @@ void __init mips_time_init(void)
 
 void __init plat_timer_setup(struct irqaction *irq)
 {
+#ifdef MSC01E_INT_BASE
        if (cpu_has_veic) {
                set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch);
                mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
-       }
-       else {
+       } else
+#endif
+       {
                if (cpu_has_vint)
                        set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch);
                mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR;
diff --git a/arch/mips/mips-boards/malta/malta_mtd.c b/arch/mips/mips-boards/malta/malta_mtd.c
new file mode 100644 (file)
index 0000000..8ad9bdf
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 MIPS Technologies, Inc.
+ *     written by Ralf Baechle <ralf@linux-mips.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <mtd/mtd-abi.h>
+
+static struct mtd_partition malta_mtd_partitions[] = {
+       {
+               .name =         "YAMON",
+               .offset =       0x0,
+               .size =         0x100000,
+               .mask_flags =   MTD_WRITEABLE
+       }, {
+               .name =         "User FS",
+               .offset =       0x100000,
+               .size =         0x2e0000
+       }, {
+               .name =         "Board Config",
+               .offset =       0x3e0000,
+               .size =         0x020000,
+               .mask_flags =   MTD_WRITEABLE
+       }
+};
+
+static struct physmap_flash_data malta_flash_data = {
+       .width          = 4,
+       .nr_parts       = ARRAY_SIZE(malta_mtd_partitions),
+       .parts          = malta_mtd_partitions
+};
+
+static struct resource malta_flash_resource = {
+       .start          = 0x1e000000,
+       .end            = 0x1e3fffff,
+       .flags          = IORESOURCE_MEM
+};
+
+static struct platform_device malta_flash = {
+       .name           = "physmap-flash",
+       .id             = 0,
+       .dev            = {
+               .platform_data  = &malta_flash_data,
+       },
+       .num_resources  = 1,
+       .resource       = &malta_flash_resource,
+};
+
+static int __init malta_mtd_init(void)
+{
+       platform_device_register(&malta_flash);
+
+       return 0;
+}
+
+module_init(malta_mtd_init)
index f445fcddfdfd925bc9ca4525db4c970998b35e29..874ccb0066b8aa7b6266ee8e377d16671d987408 100644 (file)
@@ -21,7 +21,7 @@
  * Sead board.
  */
 #include <linux/init.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 
 #include <asm/irq_cpu.h>
 #include <asm/mipsregs.h>
@@ -108,7 +108,7 @@ asmlinkage void plat_irq_dispatch(void)
        if (irq >= 0)
                do_IRQ(MIPSCPU_INT_BASE + irq);
        else
-               spurious_interrupt(regs);
+               spurious_interrupt();
 }
 
 void __init arch_init_irq(void)
index d41fc5885e875f67ec9de7cf3fc2057b6c7c2419..dc795be62807d1d1bf22af966ad961d0918794bf 100644 (file)
@@ -243,11 +243,10 @@ static void __init __build_store_reg(int reg)
 
 static inline void build_store_reg(int reg)
 {
-       if (cpu_has_prefetch)
-               if (reg)
-                       build_dst_pref(pref_offset_copy);
-               else
-                       build_dst_pref(pref_offset_clear);
+       int pref_off = cpu_has_prefetch ?
+               (reg ? pref_offset_copy : pref_offset_clear) : 0;
+       if (pref_off)
+               build_dst_pref(pref_off);
        else if (cpu_has_cache_cdex_s)
                build_cdex_s();
        else if (cpu_has_cache_cdex_p)
index 454b65cc335431bdf8543e96c9b00adaf8dc1682..f556b7a8dccdd29318aaaedf855f2704a7ca90bf 100644 (file)
@@ -202,7 +202,7 @@ write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val)
                break;
        }
 
-       err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(1 << (where & 3)), &data);
+       err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(1 << (where & 3)), &data);
 
        return err;
 }
index 65c440e8480b088e46660c50fd46025444cb9430..f80acae07ceecebb0927ca759f918ea484dc1d5a 100644 (file)
 #include <int.h>
 #include <cm.h>
 
-extern unsigned int mips_hpt_frequency;
+static unsigned long cpj;
+
+static cycle_t hpt_read(void)
+{
+       return read_c0_count2();
+}
+
+static void timer_ack(void)
+{
+       write_c0_compare(cpj);
+}
 
 /*
  * pnx8550_time_init() - it does the following things:
@@ -68,27 +78,47 @@ void pnx8550_time_init(void)
         * HZ timer interrupts per second.
         */
        mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p));
+       cpj = (mips_hpt_frequency + HZ / 2) / HZ;
+       timer_ack();
+
+       /* Setup Timer 2 */
+       write_c0_count2(0);
+       write_c0_compare2(0xffffffff);
+
+       clocksource_mips.read = hpt_read;
+       mips_timer_ack = timer_ack;
+}
+
+static irqreturn_t monotonic_interrupt(int irq, void *dev_id)
+{
+       /* Timer 2 clear interrupt */
+       write_c0_compare2(-1);
+       return IRQ_HANDLED;
 }
 
+static struct irqaction monotonic_irqaction = {
+       .handler = monotonic_interrupt,
+       .flags = IRQF_DISABLED,
+       .name = "Monotonic timer",
+};
+
 void __init plat_timer_setup(struct irqaction *irq)
 {
        int configPR;
 
        setup_irq(PNX8550_INT_TIMER1, irq);
+       setup_irq(PNX8550_INT_TIMER2, &monotonic_irqaction);
 
-       /* Start timer1 */
+       /* Timer 1 start */
        configPR = read_c0_config7();
        configPR &= ~0x00000008;
        write_c0_config7(configPR);
 
-       /* Timer 2 stop */
+       /* Timer 2 start */
        configPR = read_c0_config7();
-       configPR |= 0x00000010;
+       configPR &= ~0x00000010;
        write_c0_config7(configPR);
 
-       write_c0_count2(0);
-       write_c0_compare2(0xffffffff);
-
        /* Timer 3 stop */
        configPR = read_c0_config7();
        configPR |= 0x00000020;
index 8699dadcd0966028ed7207b333486b2295a4fc12..0855d55c194d7490fab02d6aad844d238d5a0a9e 100644 (file)
@@ -436,7 +436,7 @@ config PPC_EFIKA
        select RTAS_PROC
        select PPC_MPC52xx
        select PPC_NATIVE
-       default y
+       default n
 
 config PPC_LITE5200
        bool "Freescale Lite5200 Eval Board"
@@ -471,7 +471,7 @@ config PPC_PREP
        select PPC_INDIRECT_PCI
        select PPC_UDBG_16550
        select PPC_NATIVE
-       default y
+       default n
 
 config PPC_MAPLE
        depends on PPC_MULTIPLATFORM && PPC64
index 8bc0d259796d3ba918d09e4e98924c921aa04b8d..a8efb59f5dd7a8feb3a90584fa5033aa9c256689 100644 (file)
 
                // PSC3 in CODEC mode example
                i2s@2400 {              // PSC3
-                       device_type = "i2s";
+                       device_type = "sound";
                        compatible = "mpc5200-psc-i2s\0mpc52xx-psc-i2s";
                        reg = <2400 100>;
                        interrupts = <2 3 0>;
 
                // PSC6 in AC97 mode example
                ac97@2c00 {             // PSC6
-                       device_type = "ac97";
+                       device_type = "sound";
                        compatible = "mpc5200-psc-ac97\0mpc52xx-psc-ac97";
                        reg = <2c00 100>;
                        interrupts = <2 4 0>;
index 81cb76418a7816e3eab636b096d980b3930a597d..1aabee432d86f13200fdfe37aa5d77ceae897470 100644 (file)
 
                // PSC3 in CODEC mode example
                i2s@2400 {              // PSC3
-                       device_type = "i2s";
+                       device_type = "sound";
                        compatible = "mpc5200b-psc-i2s\0mpc52xx-psc-i2s";
                        reg = <2400 100>;
                        interrupts = <2 3 0>;
 
                // PSC6 in AC97 mode example
                ac97@2c00 {             // PSC6
-                       device_type = "ac97";
+                       device_type = "sound";
                        compatible = "mpc5200b-psc-ac97\0mpc52xx-psc-ac97";
                        reg = <2c00 100>;
                        interrupts = <2 4 0>;
index 1c009651f9250f18eb19bcc8a759c46e4307a8ce..340376a470017c173d06c0a6e7521079c6d430fc 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.18-rc6
-# Sun Sep 10 10:45:11 2006
+# Linux kernel version: 2.6.20-rc3
+# Tue Jan  2 15:32:44 2007
 #
 CONFIG_PPC64=y
 CONFIG_64BIT=y
@@ -10,6 +10,8 @@ CONFIG_MMU=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_IRQ_PER_CPU=y
 CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_ARCH_HAS_ILOG2_U64=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
@@ -22,6 +24,8 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
 CONFIG_PPC_OF=y
 CONFIG_PPC_UDBG_16550=y
 CONFIG_GENERIC_TBSYNC=y
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
 # CONFIG_DEFAULT_UIMAGE is not set
 
 #
@@ -31,6 +35,10 @@ CONFIG_GENERIC_TBSYNC=y
 CONFIG_POWER3=y
 CONFIG_POWER4=y
 CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+CONFIG_PPC_DCR_MMIO=y
+CONFIG_PPC_DCR=y
+CONFIG_PPC_OF_PLATFORM_PCI=y
 CONFIG_ALTIVEC=y
 CONFIG_PPC_STD_MMU=y
 CONFIG_VIRT_CPU_ACCOUNTING=y
@@ -52,19 +60,24 @@ CONFIG_LOCALVERSION=""
 CONFIG_LOCALVERSION_AUTO=y
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
-CONFIG_SYSCTL=y
+# CONFIG_UTS_NS is not set
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CPUSETS=y
+CONFIG_SYSFS_DEPRECATED=y
 CONFIG_RELAY=y
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_TASK_XACCT is not set
+CONFIG_SYSCTL=y
 # CONFIG_EMBEDDED is not set
+CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -73,12 +86,12 @@ CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
 CONFIG_BASE_FULL=y
-CONFIG_RT_MUTEXES=y
 CONFIG_FUTEX=y
 CONFIG_EPOLL=y
 CONFIG_SHMEM=y
 CONFIG_SLAB=y
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
 # CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 # CONFIG_SLOB is not set
@@ -97,6 +110,7 @@ CONFIG_STOP_MACHINE=y
 #
 # Block layer
 #
+CONFIG_BLOCK=y
 CONFIG_BLK_DEV_IO_TRACE=y
 
 #
@@ -116,16 +130,20 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
 # Platform support
 #
 CONFIG_PPC_MULTIPLATFORM=y
-# CONFIG_PPC_ISERIES is not set
 # CONFIG_EMBEDDED6xx is not set
 # CONFIG_APUS is not set
 CONFIG_PPC_PSERIES=y
+CONFIG_PPC_ISERIES=y
+# CONFIG_PPC_MPC52xx is not set
 CONFIG_PPC_PMAC=y
 CONFIG_PPC_PMAC64=y
 CONFIG_PPC_MAPLE=y
+# CONFIG_PPC_PASEMI is not set
 CONFIG_PPC_CELL=y
 CONFIG_PPC_CELL_NATIVE=y
 CONFIG_PPC_IBM_CELL_BLADE=y
+# CONFIG_PPC_PS3 is not set
+CONFIG_PPC_NATIVE=y
 CONFIG_UDBG_RTAS_CONSOLE=y
 CONFIG_XICS=y
 CONFIG_U3_DART=y
@@ -139,6 +157,8 @@ CONFIG_IBMVIO=y
 # CONFIG_IBMEBUS is not set
 # CONFIG_PPC_MPC106 is not set
 CONFIG_PPC_970_NAP=y
+CONFIG_PPC_INDIRECT_IO=y
+CONFIG_GENERIC_IOMAP=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_TABLE=y
 # CONFIG_CPU_FREQ_DEBUG is not set
@@ -160,14 +180,16 @@ CONFIG_MPIC=y
 #
 CONFIG_SPU_FS=m
 CONFIG_SPU_BASE=y
-CONFIG_SPUFS_MMAP=y
 CONFIG_CBE_RAS=y
+CONFIG_CBE_THERM=m
+CONFIG_CBE_CPUFREQ=m
 
 #
 # Kernel options
 #
 # CONFIG_HZ_100 is not set
 CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
 CONFIG_PREEMPT_NONE=y
@@ -192,6 +214,7 @@ CONFIG_ARCH_SELECT_MEMORY_MODEL=y
 CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
 CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_SELECT_MEMORY_MODEL=y
 # CONFIG_FLATMEM_MANUAL is not set
 # CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -201,6 +224,7 @@ CONFIG_HAVE_MEMORY_PRESENT=y
 # CONFIG_SPARSEMEM_STATIC is not set
 CONFIG_SPARSEMEM_EXTREME=y
 CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_SPARSE=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 CONFIG_RESOURCES_64BIT=y
 CONFIG_ARCH_MEMORY_PROBE=y
@@ -222,6 +246,7 @@ CONFIG_PPC_I8259=y
 CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
 # CONFIG_PCIEPORTBUS is not set
+# CONFIG_PCI_MULTITHREAD_PROBE is not set
 # CONFIG_PCI_DEBUG is not set
 
 #
@@ -254,6 +279,7 @@ CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM=y
 CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
 CONFIG_NET_KEY=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -272,10 +298,13 @@ CONFIG_INET_XFRM_TUNNEL=m
 CONFIG_INET_TUNNEL=y
 CONFIG_INET_XFRM_MODE_TRANSPORT=y
 CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
 CONFIG_INET_DIAG=y
 CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
 
 #
 # IP: Virtual Server Configuration
@@ -294,25 +323,31 @@ CONFIG_NETFILTER=y
 CONFIG_NETFILTER_NETLINK=y
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK_ENABLED=m
+CONFIG_NF_CONNTRACK_SUPPORT=y
+# CONFIG_IP_NF_CONNTRACK_SUPPORT is not set
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_ACCT=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_GRE=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
 # CONFIG_NETFILTER_XTABLES is not set
 
 #
 # IP: Netfilter Configuration
 #
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_CT_ACCT=y
-CONFIG_IP_NF_CONNTRACK_MARK=y
-CONFIG_IP_NF_CONNTRACK_EVENTS=y
-CONFIG_IP_NF_CONNTRACK_NETLINK=m
-CONFIG_IP_NF_CT_PROTO_SCTP=m
-CONFIG_IP_NF_FTP=m
-CONFIG_IP_NF_IRC=m
-# CONFIG_IP_NF_NETBIOS_NS is not set
-CONFIG_IP_NF_TFTP=m
-CONFIG_IP_NF_AMANDA=m
-# CONFIG_IP_NF_PPTP is not set
-# CONFIG_IP_NF_H323 is not set
-CONFIG_IP_NF_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
 CONFIG_IP_NF_QUEUE=m
 
 #
@@ -339,7 +374,6 @@ CONFIG_LLC=y
 # CONFIG_ATALK is not set
 # CONFIG_X25 is not set
 # CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
 
@@ -411,6 +445,12 @@ CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
 
+#
+# Misc devices
+#
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+
 #
 # ATA/ATAPI/MFM/RLL support
 #
@@ -438,7 +478,6 @@ CONFIG_IDEPCI_SHARE_IRQ=y
 # CONFIG_BLK_DEV_OFFBOARD is not set
 CONFIG_BLK_DEV_GENERIC=y
 # CONFIG_BLK_DEV_OPTI621 is not set
-CONFIG_BLK_DEV_SL82C105=y
 CONFIG_BLK_DEV_IDEDMA_PCI=y
 # CONFIG_BLK_DEV_IDEDMA_FORCED is not set
 CONFIG_IDEDMA_PCI_AUTO=y
@@ -453,6 +492,7 @@ CONFIG_BLK_DEV_AMD74XX=y
 # CONFIG_BLK_DEV_CS5530 is not set
 # CONFIG_BLK_DEV_HPT34X is not set
 # CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
 # CONFIG_BLK_DEV_SC1200 is not set
 # CONFIG_BLK_DEV_PIIX is not set
 # CONFIG_BLK_DEV_IT821X is not set
@@ -461,6 +501,7 @@ CONFIG_BLK_DEV_AMD74XX=y
 # CONFIG_BLK_DEV_PDC202XX_NEW is not set
 # CONFIG_BLK_DEV_SVWKS is not set
 # CONFIG_BLK_DEV_SIIMAGE is not set
+CONFIG_BLK_DEV_SL82C105=y
 # CONFIG_BLK_DEV_SLC90E66 is not set
 # CONFIG_BLK_DEV_TRM290 is not set
 # CONFIG_BLK_DEV_VIA82CXXX is not set
@@ -478,6 +519,8 @@ CONFIG_IDEDMA_AUTO=y
 #
 # CONFIG_RAID_ATTRS is not set
 CONFIG_SCSI=y
+# CONFIG_SCSI_TGT is not set
+CONFIG_SCSI_NETLINK=y
 CONFIG_SCSI_PROC_FS=y
 
 #
@@ -497,14 +540,16 @@ CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 # CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
 
 #
-# SCSI Transport Attributes
+# SCSI Transports
 #
 CONFIG_SCSI_SPI_ATTRS=y
 CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_ISCSI_ATTRS=m
 # CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
 
 #
 # SCSI low-level drivers
@@ -517,26 +562,12 @@ CONFIG_SCSI_ISCSI_ATTRS=m
 # CONFIG_SCSI_AIC7XXX is not set
 # CONFIG_SCSI_AIC7XXX_OLD is not set
 # CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_ARCMSR is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
-CONFIG_ATA=y
-# CONFIG_SATA_AHCI is not set
-CONFIG_SATA_SVW=y
-# CONFIG_SCSI_ATA_PIIX is not set
-# CONFIG_SATA_MV is not set
-# CONFIG_SATA_NV is not set
-# CONFIG_SCSI_PDC_ADMA is not set
 # CONFIG_SCSI_HPTIOP is not set
-# CONFIG_SATA_QSTOR is not set
-# CONFIG_SATA_PROMISE is not set
-# CONFIG_SATA_SX4 is not set
-# CONFIG_SATA_SIL is not set
-# CONFIG_SATA_SIL24 is not set
-# CONFIG_SATA_SIS is not set
-# CONFIG_SATA_ULI is not set
-# CONFIG_SATA_VIA is not set
-# CONFIG_SATA_VITESSE is not set
 # CONFIG_SCSI_BUSLOGIC is not set
 # CONFIG_SCSI_DMX3191D is not set
 # CONFIG_SCSI_EATA is not set
@@ -546,6 +577,7 @@ CONFIG_SATA_SVW=y
 CONFIG_SCSI_IBMVSCSI=y
 # CONFIG_SCSI_INITIO is not set
 # CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
 CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
@@ -556,10 +588,66 @@ CONFIG_SCSI_IPR_TRACE=y
 CONFIG_SCSI_IPR_DUMP=y
 # CONFIG_SCSI_QLOGIC_1280 is not set
 # CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
 CONFIG_SCSI_LPFC=m
 # CONFIG_SCSI_DC395x is not set
 # CONFIG_SCSI_DC390T is not set
 CONFIG_SCSI_DEBUG=m
+# CONFIG_SCSI_SRP is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+CONFIG_ATA=y
+# CONFIG_SATA_AHCI is not set
+CONFIG_SATA_SVW=y
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
 
 #
 # Multi-device support (RAID and LVM)
@@ -575,6 +663,7 @@ CONFIG_MD_RAID5_RESHAPE=y
 CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
 CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_MIRROR=m
@@ -630,11 +719,13 @@ CONFIG_IEEE1394_RAWIO=y
 CONFIG_ADB_PMU=y
 # CONFIG_ADB_PMU_LED is not set
 CONFIG_PMAC_SMU=y
+# CONFIG_MAC_EMUMOUSEBTN is not set
 CONFIG_THERM_PM72=y
 CONFIG_WINDFARM=y
 CONFIG_WINDFARM_PM81=y
 CONFIG_WINDFARM_PM91=y
 CONFIG_WINDFARM_PM112=y
+# CONFIG_PMAC_RACKMETER is not set
 
 #
 # Network device support
@@ -675,6 +766,7 @@ CONFIG_VORTEX=y
 CONFIG_IBMVETH=m
 CONFIG_NET_PCI=y
 CONFIG_PCNET32=y
+# CONFIG_PCNET32_NAPI is not set
 # CONFIG_AMD8111_ETH is not set
 # CONFIG_ADAPTEC_STARFIRE is not set
 # CONFIG_B44 is not set
@@ -713,7 +805,7 @@ CONFIG_E1000=y
 CONFIG_TIGON3=y
 # CONFIG_BNX2 is not set
 CONFIG_SPIDER_NET=m
-# CONFIG_MV643XX_ETH is not set
+# CONFIG_QLA3XXX is not set
 
 #
 # Ethernet (10000 Mbit)
@@ -723,6 +815,7 @@ CONFIG_IXGB=m
 # CONFIG_IXGB_NAPI is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -741,6 +834,7 @@ CONFIG_IBMOL=y
 # Wan interfaces
 #
 # CONFIG_WAN is not set
+CONFIG_ISERIES_VETH=m
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
 CONFIG_PPP=m
@@ -753,6 +847,7 @@ CONFIG_PPP_BSDCOMP=m
 # CONFIG_PPP_MPPE is not set
 CONFIG_PPPOE=m
 # CONFIG_SLIP is not set
+CONFIG_SLHC=m
 # CONFIG_NET_FC is not set
 # CONFIG_SHAPER is not set
 CONFIG_NETCONSOLE=y
@@ -775,6 +870,7 @@ CONFIG_NET_POLL_CONTROLLER=y
 # Input device support
 #
 CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
 
 #
 # Userland interfaces
@@ -797,6 +893,7 @@ CONFIG_KEYBOARD_ATKBD=y
 # CONFIG_KEYBOARD_LKKBD is not set
 # CONFIG_KEYBOARD_XTKBD is not set
 # CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
 CONFIG_INPUT_MOUSE=y
 CONFIG_MOUSE_PS2=y
 # CONFIG_MOUSE_SERIAL is not set
@@ -850,6 +947,7 @@ CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
 CONFIG_HVC_DRIVER=y
 CONFIG_HVC_CONSOLE=y
+CONFIG_HVC_ISERIES=y
 CONFIG_HVC_RTAS=y
 CONFIG_HVCS=m
 
@@ -868,10 +966,6 @@ CONFIG_GEN_RTC=y
 # CONFIG_DTLK is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
 # CONFIG_AGP is not set
 # CONFIG_DRM is not set
 CONFIG_RAW_DRIVER=y
@@ -882,7 +976,6 @@ CONFIG_MAX_RAW_DEVS=256
 # TPM devices
 #
 # CONFIG_TCG_TPM is not set
-# CONFIG_TELCLOCK is not set
 
 #
 # I2C support
@@ -947,6 +1040,7 @@ CONFIG_I2C_POWERMAC=y
 #
 # Dallas's 1-wire bus
 #
+# CONFIG_W1 is not set
 
 #
 # Hardware Monitoring support
@@ -954,15 +1048,10 @@ CONFIG_I2C_POWERMAC=y
 # CONFIG_HWMON is not set
 # CONFIG_HWMON_VID is not set
 
-#
-# Misc devices
-#
-
 #
 # Multimedia devices
 #
 # CONFIG_VIDEO_DEV is not set
-CONFIG_VIDEO_V4L2=y
 
 #
 # Digital Video Broadcasting Devices
@@ -975,6 +1064,7 @@ CONFIG_VIDEO_V4L2=y
 #
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB=y
+CONFIG_FB_DDC=y
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
@@ -1011,6 +1101,7 @@ CONFIG_FB_RADEON_I2C=y
 # CONFIG_FB_3DFX is not set
 # CONFIG_FB_VOODOO1 is not set
 # CONFIG_FB_TRIDENT is not set
+CONFIG_FB_IBM_GXT4500=y
 # CONFIG_FB_VIRTUAL is not set
 
 #
@@ -1158,6 +1249,11 @@ CONFIG_SND_AOA_SOUNDBUS_I2S=m
 #
 # CONFIG_SOUND_PRIME is not set
 
+#
+# HID Devices
+#
+CONFIG_HID=y
+
 #
 # USB support
 #
@@ -1173,6 +1269,7 @@ CONFIG_USB=y
 CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_BANDWIDTH is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_MULTITHREAD_PROBE is not set
 # CONFIG_USB_OTG is not set
 
 #
@@ -1214,13 +1311,13 @@ CONFIG_USB_STORAGE=m
 # CONFIG_USB_STORAGE_JUMPSHOT is not set
 # CONFIG_USB_STORAGE_ALAUDA is not set
 # CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
 # CONFIG_USB_LIBUSUAL is not set
 
 #
 # USB Input Devices
 #
 CONFIG_USB_HID=y
-CONFIG_USB_HIDINPUT=y
 # CONFIG_USB_HIDINPUT_POWERBOOK is not set
 # CONFIG_HID_FF is not set
 CONFIG_USB_HIDDEV=y
@@ -1250,6 +1347,7 @@ CONFIG_USB_HIDDEV=y
 # CONFIG_USB_KAWETH is not set
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET_MII is not set
 # CONFIG_USB_USBNET is not set
 # CONFIG_USB_MON is not set
 
@@ -1267,6 +1365,7 @@ CONFIG_USB_HIDDEV=y
 #
 # CONFIG_USB_EMI62 is not set
 # CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
 # CONFIG_USB_AUERSWALD is not set
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
@@ -1274,12 +1373,13 @@ CONFIG_USB_HIDDEV=y
 # CONFIG_USB_LED is not set
 # CONFIG_USB_CYPRESS_CY7C63 is not set
 # CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGETKIT is not set
-# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_PHIDGET is not set
 # CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
 CONFIG_USB_APPLEDISPLAY=m
 # CONFIG_USB_SISUSBVGA is not set
 # CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
 # CONFIG_USB_TEST is not set
 
 #
@@ -1318,6 +1418,7 @@ CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_ADDR_TRANS=y
 CONFIG_INFINIBAND_MTHCA=m
 CONFIG_INFINIBAND_MTHCA_DEBUG=y
+# CONFIG_INFINIBAND_AMSO1100 is not set
 CONFIG_INFINIBAND_IPOIB=m
 CONFIG_INFINIBAND_IPOIB_DEBUG=y
 # CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
@@ -1346,6 +1447,10 @@ CONFIG_INFINIBAND_ISER=m
 # DMA Devices
 #
 
+#
+# Virtualization
+#
+
 #
 # File systems
 #
@@ -1359,6 +1464,7 @@ CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_XATTR=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4DEV_FS is not set
 CONFIG_JBD=y
 # CONFIG_JBD_DEBUG is not set
 CONFIG_FS_MBCACHE=y
@@ -1379,6 +1485,7 @@ CONFIG_XFS_FS=m
 CONFIG_XFS_SECURITY=y
 CONFIG_XFS_POSIX_ACL=y
 # CONFIG_XFS_RT is not set
+# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
 # CONFIG_MINIX_FS is not set
 # CONFIG_ROMFS_FS is not set
@@ -1414,8 +1521,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
 #
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
 CONFIG_HUGETLBFS=y
 CONFIG_HUGETLB_PAGE=y
 CONFIG_RAMFS=y
@@ -1539,9 +1648,24 @@ CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
 CONFIG_NLS_UTF8=m
 
+#
+# Distributed Lock Manager
+#
+# CONFIG_DLM is not set
+
+#
+# iSeries device drivers
+#
+# CONFIG_VIOCONS is not set
+CONFIG_VIODASD=y
+CONFIG_VIOCD=m
+CONFIG_VIOTAPE=m
+CONFIG_VIOPATH=y
+
 #
 # Library routines
 #
+CONFIG_BITREVERSE=y
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
 CONFIG_CRC32=y
@@ -1551,6 +1675,7 @@ CONFIG_ZLIB_DEFLATE=m
 CONFIG_TEXTSEARCH=y
 CONFIG_TEXTSEARCH_KMP=m
 CONFIG_PLIST=y
+CONFIG_IOMAP_COPY=y
 
 #
 # Instrumentation Support
@@ -1563,8 +1688,11 @@ CONFIG_OPROFILE=y
 # Kernel hacking
 #
 # CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
 CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
 CONFIG_DEBUG_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=17
 CONFIG_DETECT_SOFTLOCKUP=y
@@ -1578,16 +1706,19 @@ CONFIG_DEBUG_MUTEXES=y
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
 # CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_INFO is not set
-CONFIG_DEBUG_FS=y
 # CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
 CONFIG_FORCED_INLINING=y
 # CONFIG_RCU_TORTURE_TEST is not set
 CONFIG_DEBUG_STACKOVERFLOW=y
 CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_HCALL_STATS is not set
 CONFIG_DEBUGGER=y
 CONFIG_XMON=y
 # CONFIG_XMON_DEFAULT is not set
+CONFIG_XMON_DISASSEMBLY=y
 CONFIG_IRQSTACKS=y
 CONFIG_BOOTX_TEXT=y
 # CONFIG_PPC_EARLY_DEBUG is not set
@@ -1602,7 +1733,12 @@ CONFIG_BOOTX_TEXT=y
 # Cryptographic options
 #
 CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=y
@@ -1611,9 +1747,14 @@ CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_TGR192=m
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_LRW is not set
 CONFIG_CRYPTO_DES=y
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_CAST5=m
index e96521530d21b3add856b834d8ace4750234e42e..030d300cd71c8be2e04000b089363e4787ad9d1e 100644 (file)
@@ -303,5 +303,8 @@ int main(void)
        DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
        DEFINE(CLOCK_REALTIME_RES, TICK_NSEC);
 
+#ifdef CONFIG_BUG
+       DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
+#endif
        return 0;
 }
index 1a3d4de197d2bac728cc2492e3ba9565551d5717..2551c0884afcd178d957ce02304ccef119e5870d 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/cputable.h>
 #include <asm/firmware.h>
+#include <asm/bug.h>
 
 /*
  * System calls.
@@ -634,19 +635,15 @@ _GLOBAL(enter_rtas)
        li      r0,0
        mtcr    r0
 
+#ifdef CONFIG_BUG      
        /* There is no way it is acceptable to get here with interrupts enabled,
         * check it with the asm equivalent of WARN_ON
         */
        lbz     r0,PACASOFTIRQEN(r13)
 1:     tdnei   r0,0
-.section __bug_table,"a"
-       .llong  1b,__LINE__ + 0x1000000, 1f, 2f
-.previous
-.section .rodata,"a"
-1:     .asciz  __FILE__
-2:     .asciz "enter_rtas"
-.previous
-
+       EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
+#endif
+       
        /* Hard-disable interrupts */
        mfmsr   r6
        rldicl  r7,r6,48,1
index 89c836d548096a96110c020a0383f5a711bf40fa..1bb20d841080cb0cbdb739bfd0facd9f5ef496bd 100644 (file)
@@ -744,7 +744,8 @@ static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
        struct vm_area_struct *vma;
 
        vma = find_vma(current->mm, addr);
-       if (!vma || ((addr + len) <= vma->vm_start))
+       if (TASK_SIZE - len >= addr &&
+           (!vma || ((addr + len) <= vma->vm_start)))
                return 0;
 
        return -ENOMEM;
@@ -815,6 +816,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
+       if (len > TASK_SIZE)
+               return -ENOMEM;
 
        if (!cpu_has_feature(CPU_FTR_16M_PAGE))
                return -EINVAL;
@@ -823,9 +826,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        BUG_ON((addr + len)  < addr);
 
        if (test_thread_flag(TIF_32BIT)) {
-               /* Paranoia, caller should have dealt with this */
-               BUG_ON((addr + len) > 0x100000000UL);
-
                curareas = current->mm->context.low_htlb_areas;
 
                /* First see if we can use the hint address */
index eaff71e74fb0f8a783465715f73560c877a81c69..0f21bab33f6ce31f7006f487e5b55467fb7d4ff2 100644 (file)
@@ -153,6 +153,7 @@ define_machine(lite52xx) {
        .name           = "lite52xx",
        .probe          = lite52xx_probe,
        .setup_arch     = lite52xx_setup_arch,
+       .init           = mpc52xx_declare_of_platform_devices,
        .init_IRQ       = mpc52xx_init_irq,
        .get_irq        = mpc52xx_get_irq,
        .show_cpuinfo   = lite52xx_show_cpuinfo,
index 8331ff457770dde2063e5571fdae04baba19aaca..cc40889074bd1f6c88e07a619f40186a047546d3 100644 (file)
@@ -116,11 +116,12 @@ unmap_regs:
        if (xlb) iounmap(xlb);
 }
 
-static int __init
+void __init
 mpc52xx_declare_of_platform_devices(void)
 {
        /* Find every child of the SOC node and add it to of_platform */
-       return of_platform_bus_probe(NULL, NULL, NULL);
+       if (of_platform_bus_probe(NULL, NULL, NULL))
+               printk(KERN_ERR __FILE__ ": "
+                       "Error while probing of_platform bus\n");
 }
 
-device_initcall(mpc52xx_declare_of_platform_devices);
index e3e929e1b4606cada11840a13f89aed3c5108805..c1f4502a3c6a02827773db41befced91abce8d8a 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/system.h>
 #include <asm/paca.h>
+#include <asm/firmware.h>
 #include <asm/iseries/it_lp_queue.h>
 #include <asm/iseries/hv_lp_event.h>
 #include <asm/iseries/hv_call_event.h>
@@ -318,6 +319,9 @@ static int __init proc_lpevents_init(void)
 {
        struct proc_dir_entry *e;
 
+       if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               return 0;
+
        e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
        if (e)
                e->proc_fops = &proc_lpevents_operations;
index cff15ae24f6ba8bf1184ce7cf2ad71e5f5a063f8..1ad0e4aaad1a0d5ba514e4d1d136c9cf7d40907c 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/uaccess.h>
 #include <asm/paca.h>
 #include <asm/abs_addr.h>
+#include <asm/firmware.h>
 #include <asm/iseries/vio.h>
 #include <asm/iseries/mf.h>
 #include <asm/iseries/hv_lp_config.h>
@@ -1235,6 +1236,9 @@ static int __init mf_proc_init(void)
        char name[2];
        int i;
 
+       if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               return 0;
+
        mf_proc_root = proc_mkdir("iSeries/mf", NULL);
        if (!mf_proc_root)
                return 1;
index c241413629ac155266cb7fcddb8fa68da759a3c6..b54e37101e6972ba75650692385af460012c0179 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/processor.h>
 #include <asm/time.h>
 #include <asm/lppaca.h>
+#include <asm/firmware.h>
 #include <asm/iseries/hv_call_xm.h>
 
 #include "processor_vpd.h"
 
 static int __init iseries_proc_create(void)
 {
-       struct proc_dir_entry *e = proc_mkdir("iSeries", 0);
+       struct proc_dir_entry *e;
+
+       if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               return 0;
+
+       e = proc_mkdir("iSeries", 0);
        if (!e)
                return 1;
 
@@ -106,6 +112,9 @@ static int __init iseries_proc_init(void)
 {
        struct proc_dir_entry *e;
 
+       if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               return 0;
+
        e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL);
        if (e)
                e->proc_fops = &proc_titantod_operations;
index bdf2afbb60c1c23556e0f4c38a6e7a082ce2e580..cce7e309340c99f933390dd5cf81f65a0fb0ab39 100644 (file)
@@ -527,7 +527,8 @@ static void __init iSeries_fixup_klimit(void)
 static int __init iSeries_src_init(void)
 {
         /* clear the progress line */
-        ppc_md.progress(" ", 0xffff);
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               ppc_md.progress(" ", 0xffff);
         return 0;
 }
 
index 84e7ee2c086f59f21bec9a99b0ce368b07b11676..a6799ed34a66c07555c71e910586328112eb8f79 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/prom.h>
+#include <asm/firmware.h>
 #include <asm/iseries/hv_types.h>
 #include <asm/iseries/hv_lp_event.h>
 #include <asm/iseries/hv_lp_config.h>
@@ -183,6 +184,9 @@ static int __init vio_proc_init(void)
 {
        struct proc_dir_entry *e;
 
+       if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               return 0;
+
        e = create_proc_entry("iSeries/config", 0, NULL);
        if (e)
                e->proc_fops = &proc_viopath_operations;
index f12d5c69e74dea275d772308cdcf311da4f54956..50855d4fd5a08204d1cfcabdc4bc98ada2005fd6 100644 (file)
@@ -254,7 +254,6 @@ static void __init maple_init_IRQ(void)
                printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n",
                       openpic_addr, has_isus);
        }
-       of_node_put(root);
 
        BUG_ON(openpic_addr == 0);
 
index 89d6e295dbf7171011ffd278fbf93efc06f45eb7..bea7d1bb1a3b48e5c24c05c15068430b9c8859c2 100644 (file)
@@ -129,7 +129,6 @@ static __init void pas_init_IRQ(void)
        }
        openpic_addr = of_read_number(opprop, naddr);
        printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
-       of_node_put(root);
 
        mpic = mpic_alloc(mpic_node, openpic_addr, MPIC_PRIMARY, 0, 0,
                          " PAS-OPIC  ");
index c00cfed7af2c0a8b2531ff8e62f620ba971e21c2..5c7e38789897b4da98d3fae627d4881e4bde39a1 100644 (file)
@@ -26,7 +26,7 @@
 BEGIN_FTR_SECTION;                                             \
        mfspr   r0,SPRN_PURR;           /* get PURR and */      \
        std     r0,STK_PARM(r6)(r1);    /* save for later */    \
-END_FTR_SECTION_IFCLR(CPU_FTR_PURR);
+END_FTR_SECTION_IFSET(CPU_FTR_PURR);
        
 /*
  * postcall is performed immediately before function return which
@@ -43,7 +43,7 @@ BEGIN_FTR_SECTION;                                            \
        mfspr   r8,SPRN_PURR;           /* PURR after */        \
        ld      r6,STK_PARM(r6)(r1);    /* PURR before */       \
        subf    r6,r6,r8;               /* delta */             \
-END_FTR_SECTION_IFCLR(CPU_FTR_PURR);                           \
+END_FTR_SECTION_IFSET(CPU_FTR_PURR);                           \
        ld      r5,STK_PARM(r5)(r1);    /* timebase before */   \
        subf    r5,r5,r7;               /* time delta */        \
                                                                \
@@ -66,7 +66,7 @@ BEGIN_FTR_SECTION;                                            \
        ld      r7,HCALL_STAT_PURR(r4); /* PURR */              \
        add     r7,r7,r6;                                       \
        std     r7,HCALL_STAT_PURR(r4);                         \
-END_FTR_SECTION_IFCLR(CPU_FTR_PURR);                           \
+END_FTR_SECTION_IFSET(CPU_FTR_PURR);                           \
 1:
 #else
 #define HCALL_INST_PRECALL
@@ -145,6 +145,7 @@ _GLOBAL(plpar_hcall9)
 
        HVSC                            /* invoke the hypervisor */
 
+       mr      r0,r12
        ld      r12,STK_PARM(r4)(r1)
        std     r4,  0(r12)
        std     r5,  8(r12)
@@ -154,7 +155,7 @@ _GLOBAL(plpar_hcall9)
        std     r9, 40(r12)
        std     r10,48(r12)
        std     r11,56(r12)
-       std     r12,64(r12)
+       std     r0, 64(r12)
 
        HCALL_INST_POSTCALL
 
index 80181c4c49ebd7c356506ca45d8625b316c2c509..3ddc04925d50d01f85266379ffe6d96a0a406aa0 100644 (file)
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
  */
 static void *hc_start(struct seq_file *m, loff_t *pos)
 {
-       if ((int)*pos < HCALL_STAT_ARRAY_SIZE)
+       if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1))
                return (void *)(unsigned long)(*pos + 1);
 
        return NULL;
@@ -57,7 +57,7 @@ static int hc_show(struct seq_file *m, void *p)
        struct hcall_stats *hs = (struct hcall_stats *)m->private;
 
        if (hs[h_num].num_calls) {
-               if (!cpu_has_feature(CPU_FTR_PURR))
+               if (cpu_has_feature(CPU_FTR_PURR))
                        seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2,
                                   hs[h_num].num_calls,
                                   hs[h_num].tb_total,
index b5b2b1103de8d58dd2f49b6b766fe4d429913c46..81d172d650389ee206eef128d912028a7bcd4933 100644 (file)
@@ -224,7 +224,6 @@ static void xics_unmask_irq(unsigned int virq)
 static void xics_mask_real_irq(unsigned int irq)
 {
        int call_status;
-       unsigned int server;
 
        if (irq == XICS_IPI)
                return;
@@ -236,9 +235,9 @@ static void xics_mask_real_irq(unsigned int irq)
                return;
        }
 
-       server = get_irq_server(irq);
        /* Have to set XIVE to 0xff to be able to remove a slot */
-       call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
+       call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
+                               default_server, 0xff);
        if (call_status != 0) {
                printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
                       " returned %d\n", irq, call_status);
index 04d4917eb3035c4140d184e1c740b6fc9eb535cd..2621a7e72d2d1f0ec7839c0ca1ba7b309600a198 100644 (file)
@@ -12,7 +12,6 @@ obj-$(CONFIG_MMIO_NVRAM)      += mmio_nvram.o
 obj-$(CONFIG_FSL_SOC)          += fsl_soc.o
 obj-$(CONFIG_TSI108_BRIDGE)    += tsi108_pci.o tsi108_dev.o
 obj-$(CONFIG_QUICC_ENGINE)     += qe_lib/
-obj-$(CONFIG_MTD)              += rom.o
 
 ifeq ($(CONFIG_PPC_MERGE),y)
 obj-$(CONFIG_PPC_I8259)                += i8259.o
@@ -21,5 +20,6 @@ endif
 
 # Temporary hack until we have migrated to asm-powerpc
 ifeq ($(ARCH),powerpc)
+obj-$(CONFIG_MTD)              += rom.o
 obj-$(CONFIG_CPM2)             += cpm2_common.o cpm2_pic.o
 endif
index 4388b3309e0cbac3d3879e504d18021c32733378..eca507050e47db9e575df233afbda59b0ed45cc7 100644 (file)
@@ -164,11 +164,14 @@ startup_continue:
        srl     %r7,28
        clr     %r6,%r7                 # compare cc with last access code
        be      .Lsame-.LPG1(%r13)
-       b       .Lchkmem-.LPG1(%r13)
+       lhi     %r8,0                   # no program checks
+       b       .Lsavchk-.LPG1(%r13)
 .Lsame:
        ar      %r5,%r1                 # add 128KB to end of chunk
        bno     .Lloop-.LPG1(%r13)      # r1 < 0x80000000 -> loop
 .Lchkmem:                              # > 2GB or tprot got a program check
+       lhi     %r8,1                   # set program check flag
+.Lsavchk:
        clr     %r4,%r5                 # chunk size > 0?
        be      .Lchkloop-.LPG1(%r13)
        st      %r4,0(%r3)              # store start address of chunk
@@ -190,8 +193,15 @@ startup_continue:
        je      .Ldonemem               # if not, leave
        chi     %r10,0                  # do we have chunks left?
        je      .Ldonemem
+       chi     %r8,1                   # program check ?
+       je      .Lpgmchk
+       lr      %r4,%r5                 # potential new chunk
+       alr     %r5,%r1                 # add 128KB to end of chunk
+       j       .Llpcnt
+.Lpgmchk:
        alr     %r5,%r1                 # add 128KB to end of chunk
        lr      %r4,%r5                 # potential new chunk
+.Llpcnt:
        clr     %r5,%r9                 # should we go on?
        jl      .Lloop
 .Ldonemem:
index c526279e11239dc65e5c8c8a0b585d3ca1556a04..6ba3f4512dd1a1ad26a4d4e0993c9b6feaaf6bc6 100644 (file)
@@ -172,12 +172,15 @@ startup_continue:
        srl     %r7,28
        clr     %r6,%r7                 # compare cc with last access code
        je      .Lsame
-       j       .Lchkmem
+       lghi    %r8,0                   # no program checks
+       j       .Lsavchk
 .Lsame:
        algr    %r5,%r1                 # add 128KB to end of chunk
                                        # no need to check here,
        brc     12,.Lloop               # this is the same chunk
 .Lchkmem:                              # > 16EB or tprot got a program check
+       lghi    %r8,1                   # set program check flag
+.Lsavchk:
        clgr    %r4,%r5                 # chunk size > 0?
        je      .Lchkloop
        stg     %r4,0(%r3)              # store start address of chunk
@@ -204,8 +207,15 @@ startup_continue:
        chi     %r10, 0                 # do we have chunks left?
        je      .Ldonemem
 .Lhsaskip:
+       chi     %r8,1                   # program check ?
+       je      .Lpgmchk
+       lgr     %r4,%r5                 # potential new chunk
+       algr    %r5,%r1                 # add 128KB to end of chunk
+       j       .Llpcnt
+.Lpgmchk:
        algr    %r5,%r1                 # add 128KB to end of chunk
        lgr     %r4,%r5                 # potential new chunk
+.Llpcnt:
        clgr    %r5,%r9                 # should we go on?
        jl      .Lloop
 .Ldonemem:
index 49ef206ec8806c6a6dc4ed0b0d9e211979065cf1..5d8ee3baac147e0b153dab2d4ad0152ab22629f9 100644 (file)
@@ -476,7 +476,7 @@ static void __init setup_memory_end(void)
        int i;
 
        memory_size = real_size = 0;
-       max_phys = VMALLOC_END - VMALLOC_MIN_SIZE;
+       max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
        memory_end &= PAGE_MASK;
 
        max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
index 19090f7d4f517e6799619bf1b4398d750994285c..c0cd255fddbd04ccce6895bac1cd08b940be8535 100644 (file)
@@ -794,7 +794,10 @@ static int __init topology_init(void)
        int ret;
 
        for_each_possible_cpu(cpu) {
-               ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
+               struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+               c->hotpluggable = 1;
+               ret = register_cpu(c, cpu);
                if (ret)
                        printk(KERN_WARNING "topology_init: register_cpu %d "
                               "failed (%d)\n", cpu, ret);
index 633249c3ba9180ea2aa75534b0b64458a6825191..49c3e46b406573011d591bbcbd36e6825deb0578 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/hardirq.h>
 #include <linux/mm.h>
 #include <asm/uaccess.h>
 #include <asm/futex.h>
@@ -18,6 +19,8 @@ static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
        struct vm_area_struct *vma;
        int ret = -EFAULT;
 
+       if (in_atomic())
+               return ret;
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (unlikely(!vma))
index bbaca66fa29356af1b0c7ea5589622f28aec4519..56a0214e9928c371c6cce379e599aaf0f61ef8cd 100644 (file)
@@ -258,8 +258,6 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
 {
        int oldval = 0, newval, ret;
 
-       pagefault_disable();
-
        switch (op) {
        case FUTEX_OP_SET:
                __futex_atomic_op("lr %2,%5\n",
@@ -284,7 +282,6 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
        default:
                ret = -ENOSYS;
        }
-       pagefault_enable();
        *old = oldval;
        return ret;
 }
index 829698f6d0490962c387f884f550678b90f8db19..49802f1bee9487484b066915160388d5d683234a 100644 (file)
@@ -69,6 +69,11 @@ static void nvidia_bugs(void)
 
 static void ati_bugs(void)
 {
+       if (timer_over_8254 == 1) {
+               timer_over_8254 = 0;
+               printk(KERN_INFO
+               "ATI board detected. Disabling timer routing over 8254.\n");
+       }
 }
 
 static void intel_bugs(void)
index 2a1dcd5f69c2599e3b3e4b41b355a10f672f79d7..d7bad90a5ad80d6b4d651f23d87d30e2dfec8580 100644 (file)
@@ -55,6 +55,10 @@ int sis_apic_bug; /* not actually supported, dummy for compile */
 
 static int no_timer_check;
 
+static int disable_timer_pin_1 __initdata;
+
+int timer_over_8254 __initdata = 1;
+
 /* Where if anywhere is the i8259 connect in external int mode */
 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 
@@ -350,6 +354,29 @@ static int __init disable_ioapic_setup(char *str)
 }
 early_param("noapic", disable_ioapic_setup);
 
+/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
+static int __init disable_timer_pin_setup(char *arg)
+{
+       disable_timer_pin_1 = 1;
+       return 1;
+}
+__setup("disable_timer_pin_1", disable_timer_pin_setup);
+
+static int __init setup_disable_8254_timer(char *s)
+{
+       timer_over_8254 = -1;
+       return 1;
+}
+static int __init setup_enable_8254_timer(char *s)
+{
+       timer_over_8254 = 2;
+       return 1;
+}
+
+__setup("disable_8254_timer", setup_disable_8254_timer);
+__setup("enable_8254_timer", setup_enable_8254_timer);
+
+
 /*
  * Find the IRQ entry number of a certain pin.
  */
@@ -1568,33 +1595,10 @@ static inline void unlock_ExtINT_logic(void)
  * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
  * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
  * fanatically on his truly buggy board.
+ *
+ * FIXME: really need to revamp this for modern platforms only.
  */
-
-static int try_apic_pin(int apic, int pin, char *msg)
-{
-       apic_printk(APIC_VERBOSE, KERN_INFO
-                   "..TIMER: trying IO-APIC=%d PIN=%d %s",
-                   apic, pin, msg);
-
-       /*
-        * Ok, does IRQ0 through the IOAPIC work?
-        */
-       if (!no_timer_check && timer_irq_works()) {
-               nmi_watchdog_default();
-               if (nmi_watchdog == NMI_IO_APIC) {
-                       disable_8259A_irq(0);
-                       setup_nmi();
-                       enable_8259A_irq(0);
-               }
-               return 1;
-       }
-       clear_IO_APIC_pin(apic, pin);
-       apic_printk(APIC_QUIET, KERN_ERR " .. failed\n");
-       return 0;
-}
-
-/* The function from hell */
-static void check_timer(void)
+static inline void check_timer(void)
 {
        int apic1, pin1, apic2, pin2;
        int vector;
@@ -1615,43 +1619,61 @@ static void check_timer(void)
         */
        apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
        init_8259A(1);
+       if (timer_over_8254 > 0)
+               enable_8259A_irq(0);
 
        pin1  = find_isa_irq_pin(0, mp_INT);
        apic1 = find_isa_irq_apic(0, mp_INT);
        pin2  = ioapic_i8259.pin;
        apic2 = ioapic_i8259.apic;
 
-       /* Do this first, otherwise we get double interrupts on ATI boards */
-       if ((pin1 != -1) && try_apic_pin(apic1, pin1,"with 8259 IRQ0 disabled"))
-               return;
+       apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
+               vector, apic1, pin1, apic2, pin2);
 
-       /* Now try again with IRQ0 8259A enabled.
-          Assumes timer is on IO-APIC 0 ?!? */
-       enable_8259A_irq(0);
-       unmask_IO_APIC_irq(0);
-       if (try_apic_pin(apic1, pin1, "with 8259 IRQ0 enabled"))
-               return;
-       disable_8259A_irq(0);
-
-       /* Always try pin0 and pin2 on APIC 0 to handle buggy timer overrides
-          on Nvidia boards */
-       if (!(apic1 == 0 && pin1 == 0) &&
-           try_apic_pin(0, 0, "fallback with 8259 IRQ0 disabled"))
-               return;
-       if (!(apic1 == 0 && pin1 == 2) &&
-           try_apic_pin(0, 2, "fallback with 8259 IRQ0 disabled"))
-               return;
+       if (pin1 != -1) {
+               /*
+                * Ok, does IRQ0 through the IOAPIC work?
+                */
+               unmask_IO_APIC_irq(0);
+               if (!no_timer_check && timer_irq_works()) {
+                       nmi_watchdog_default();
+                       if (nmi_watchdog == NMI_IO_APIC) {
+                               disable_8259A_irq(0);
+                               setup_nmi();
+                               enable_8259A_irq(0);
+                       }
+                       if (disable_timer_pin_1 > 0)
+                               clear_IO_APIC_pin(0, pin1);
+                       return;
+               }
+               clear_IO_APIC_pin(apic1, pin1);
+               apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
+                               "connected to IO-APIC\n");
+       }
 
-       /* Then try pure 8259A routing on the 8259 as reported by BIOS*/
-       enable_8259A_irq(0);
+       apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
+                               "through the 8259A ... ");
        if (pin2 != -1) {
+               apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
+                       apic2, pin2);
+               /*
+                * legacy devices should be connected to IO APIC #0
+                */
                setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
-               if (try_apic_pin(apic2,pin2,"8259A broadcast ExtINT from BIOS"))
+               if (timer_irq_works()) {
+                       apic_printk(APIC_VERBOSE," works.\n");
+                       nmi_watchdog_default();
+                       if (nmi_watchdog == NMI_IO_APIC) {
+                               setup_nmi();
+                       }
                        return;
+               }
+               /*
+                * Cleanup, just in case ...
+                */
+               clear_IO_APIC_pin(apic2, pin2);
        }
-
-       /* Tried all possibilities to go through the IO-APIC. Now come the
-          really cheesy fallbacks. */
+       apic_printk(APIC_VERBOSE," failed.\n");
 
        if (nmi_watchdog == NMI_IO_APIC) {
                printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
index 88aeccbafaaf9e46ed4678b573e0f28cc6a6f5f2..d9b651ffcdc0c21335f1c15e1e7ef7e3d86361cf 100644 (file)
@@ -321,13 +321,16 @@ static int set_lcd_status(struct backlight_device *bd)
 static unsigned long write_lcd(const char *buffer, unsigned long count)
 {
        int value;
-       int ret = count;
+       int ret;
 
        if (sscanf(buffer, " brightness : %i", &value) == 1 &&
-           value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS)
+           value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
                ret = set_lcd(value);
-       else
+               if (ret == 0)
+                       ret = count;
+       } else {
                ret = -EINVAL;
+       }
        return ret;
 }
 
index b34e0a958d0f37f3ef799c1659a55f4558eeb298..da21552d2b1c0a1013bb5942d583c4b8038230c0 100644 (file)
@@ -381,7 +381,7 @@ config PATA_OPTI
          If unsure, say N.
 
 config PATA_OPTIDMA
-       tristate "OPTI FireStar PATA support (Veyr Experimental)"
+       tristate "OPTI FireStar PATA support (Very Experimental)"
        depends on PCI && EXPERIMENTAL
        help
          This option enables DMA/PIO support for the later OPTi
index 47082df7199e113ecae36156604f1b7a39c2549c..dfb306057cf465c9e5f48eff8abdb55e633e6f6a 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME       "pata_hpt37x"
-#define DRV_VERSION    "0.5.1"
+#define DRV_VERSION    "0.5.2"
 
 struct hpt_clock {
        u8      xfer_speed;
@@ -416,7 +416,7 @@ static const char *bad_ata100_5[] = {
 
 static unsigned long hpt370_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
 {
-       if (adev->class != ATA_DEV_ATA) {
+       if (adev->class == ATA_DEV_ATA) {
                if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
                        mask &= ~ATA_MASK_UDMA;
                if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
@@ -749,7 +749,7 @@ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-       int mscreg = 0x50 + 2 * ap->port_no;
+       int mscreg = 0x50 + 4 * ap->port_no;
        u8 bwsr_stat, msc_stat;
 
        pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
index 7c95c762950fde3efa8253e134662f2db9fc739b..62462190e07e877634053cf20e41643c8ea227cf 100644 (file)
@@ -765,47 +765,34 @@ static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio
  */
 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
 {
-       char sense[SCSI_SENSE_BUFFERSIZE];
-       request_queue_t *q;
+       request_queue_t *q = bdev_get_queue(pd->bdev);
        struct request *rq;
-       DECLARE_COMPLETION_ONSTACK(wait);
-       int err = 0;
+       int ret = 0;
 
-       q = bdev_get_queue(pd->bdev);
+       rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
+                            WRITE : READ, __GFP_WAIT);
+
+       if (cgc->buflen) {
+               if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
+                       goto out;
+       }
+
+       rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+       memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
+       if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
+               memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
 
-       rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ,
-                            __GFP_WAIT);
-       rq->errors = 0;
-       rq->rq_disk = pd->bdev->bd_disk;
-       rq->bio = NULL;
-       rq->buffer = NULL;
        rq->timeout = 60*HZ;
-       rq->data = cgc->buffer;
-       rq->data_len = cgc->buflen;
-       rq->sense = sense;
-       memset(sense, 0, sizeof(sense));
-       rq->sense_len = 0;
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->cmd_flags |= REQ_HARDBARRIER;
        if (cgc->quiet)
                rq->cmd_flags |= REQ_QUIET;
-       memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
-       if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
-               memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
-       rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
-
-       rq->ref_count++;
-       rq->end_io_data = &wait;
-       rq->end_io = blk_end_sync_rq;
-       elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
-       generic_unplug_device(q);
-       wait_for_completion(&wait);
-
-       if (rq->errors)
-               err = -EIO;
 
+       blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
+       ret = rq->errors;
+out:
        blk_put_request(rq);
-       return err;
+       return ret;
 }
 
 /*
index aeefec97fdee2ca40e706e0c4348f0fca68ca892..6bdf593081d8d23f20dc0a5692c8dfec6d29390a 100644 (file)
@@ -117,10 +117,17 @@ static struct usb_device_id blacklist_ids[] = {
 
        /* IBM/Lenovo ThinkPad with Broadcom chip */
        { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU },
+       { USB_DEVICE(0x0a5c, 0x2110), .driver_info = HCI_WRONG_SCO_MTU },
 
        /* ANYCOM Bluetooth USB-200 and USB-250 */
        { USB_DEVICE(0x0a5c, 0x2111), .driver_info = HCI_RESET },
 
+       /* HP laptop with Broadcom chip */
+       { USB_DEVICE(0x03f0, 0x171d), .driver_info = HCI_WRONG_SCO_MTU },
+
+       /* Dell laptop with Broadcom chip */
+       { USB_DEVICE(0x413c, 0x8126), .driver_info = HCI_WRONG_SCO_MTU },
+
        /* Microsoft Wireless Transceiver for Bluetooth 2.0 */
        { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
 
index 5eabe47b0bc842f4abc60599b561dfa7229fbcdf..433305062fb8aa4f7c070d4c8ae7e968d766be1b 100644 (file)
@@ -606,9 +606,9 @@ static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int);
 // code and returning.
 //
 #define COMPLETE(pB,code) \
-       if(1){ \
+       do { \
                 pB->i2eError = code; \
                 return (code == I2EE_GOOD);\
-       }
+       } while (0)
 
 #endif   // I2ELLIS_H
index 3ece6923134359a9e70c841e8e592b589ea864ab..5c9f67f98d10b43e58b2736d07ca76b8f498faac 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/init.h>
 #include <linux/connector.h>
 #include <asm/atomic.h>
+#include <asm/unaligned.h>
 
 #include <linux/cn_proc.h>
 
@@ -60,7 +61,7 @@ void proc_fork_connector(struct task_struct *task)
        ev = (struct proc_event*)msg->data;
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
        ev->what = PROC_EVENT_FORK;
        ev->event_data.fork.parent_pid = task->real_parent->pid;
        ev->event_data.fork.parent_tgid = task->real_parent->tgid;
@@ -88,7 +89,7 @@ void proc_exec_connector(struct task_struct *task)
        ev = (struct proc_event*)msg->data;
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
        ev->what = PROC_EVENT_EXEC;
        ev->event_data.exec.process_pid = task->pid;
        ev->event_data.exec.process_tgid = task->tgid;
@@ -124,7 +125,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
                return;
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
 
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
@@ -146,7 +147,7 @@ void proc_exit_connector(struct task_struct *task)
        ev = (struct proc_event*)msg->data;
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
        ev->what = PROC_EVENT_EXIT;
        ev->event_data.exit.process_pid = task->pid;
        ev->event_data.exit.process_tgid = task->tgid;
@@ -181,7 +182,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        ev = (struct proc_event*)msg->data;
        msg->seq = rcvd_seq;
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
-       ev->timestamp_ns = timespec_to_ns(&ts);
+       put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
        ev->cpu = -1;
        ev->what = PROC_EVENT_NONE;
        ev->event_data.ack.err = err;
index e1989f3a268404226155f0b9ef93bcce4c5fc747..9367c4cfe936f533f9500cd10f73a2b293dfd538 100644 (file)
@@ -564,13 +564,4 @@ config I2C_PNX
          This driver can also be built as a module.  If so, the module
          will be called i2c-pnx.
 
-config I2C_PNX_EARLY
-       bool "Early initialization for I2C on PNXxxxx"
-       depends on I2C_PNX=y
-       help
-         Under certain circumstances one may need to make sure I2C on PNXxxxx
-         is initialized earlier than some other driver that depends on it
-         (for instance, that might be USB in case of PNX4008). With this
-         option turned on you can guarantee that.
-
 endmenu
index bbc8e3a7ff556c1eb8306b56169f2e36c28d0f14..490173611d6b615013b2d87a7984af9aec7b04d2 100644 (file)
@@ -529,6 +529,8 @@ mv64xxx_i2c_probe(struct platform_device *pd)
        platform_set_drvdata(pd, drv_data);
        i2c_set_adapdata(&drv_data->adapter, drv_data);
 
+       mv64xxx_i2c_hw_init(drv_data);
+
        if (request_irq(drv_data->irq, mv64xxx_i2c_intr, 0,
                        MV64XXX_I2C_CTLR_NAME, drv_data)) {
                dev_err(&drv_data->adapter.dev,
@@ -542,8 +544,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
                goto exit_free_irq;
        }
 
-       mv64xxx_i2c_hw_init(drv_data);
-
        return 0;
 
        exit_free_irq:
index de0bca77e92697acb0fb6b10aa9d4c3a63d92e18..17376feb1acc6b95c29196ac538515b9a58ead4d 100644 (file)
@@ -305,8 +305,7 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
        return 0;
 }
 
-static irqreturn_t
-i2c_pnx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
 {
        u32 stat, ctl;
        struct i2c_adapter *adap = dev_id;
@@ -699,10 +698,6 @@ MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>");
 MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses");
 MODULE_LICENSE("GPL");
 
-#ifdef CONFIG_I2C_PNX_EARLY
 /* We need to make sure I2C is initialized before USB */
 subsys_initcall(i2c_adap_pnx_init);
-#else
-mudule_init(i2c_adap_pnx_init);
-#endif
 module_exit(i2c_adap_pnx_exit);
index 420377c86422b229008e11361e079cca6556346e..3fcb646e2073649ca8d330da786259dd400df071 100644 (file)
@@ -209,6 +209,7 @@ m41t00_set(void *arg)
        buf[m41t00_chip->hour] = (buf[m41t00_chip->hour] & ~0x3f) | (hour& 0x3f);
        buf[m41t00_chip->day] = (buf[m41t00_chip->day] & ~0x3f) | (day & 0x3f);
        buf[m41t00_chip->mon] = (buf[m41t00_chip->mon] & ~0x1f) | (mon & 0x1f);
+       buf[m41t00_chip->year] = year;
 
        if (i2c_master_send(save_client, wbuf, 9) < 0)
                dev_err(&save_client->dev, "m41t00_set: Write error\n");
index 3e31f1d265c9e7d403d57a19ac87d7a4c4c443bd..b05378a3d673fb67de094c3c0e4c33e9c7836d87 100644 (file)
@@ -95,16 +95,32 @@ struct device_driver i2c_adapter_driver = {
        .bus = &i2c_bus_type,
 };
 
+/* ------------------------------------------------------------------------- */
+
+/* I2C bus adapters -- one roots each I2C or SMBUS segment */
+
 static void i2c_adapter_class_dev_release(struct class_device *dev)
 {
        struct i2c_adapter *adap = class_dev_to_i2c_adapter(dev);
        complete(&adap->class_dev_released);
 }
 
+static ssize_t i2c_adapter_show_name(struct class_device *cdev, char *buf)
+{
+       struct i2c_adapter *adap = class_dev_to_i2c_adapter(cdev);
+       return sprintf(buf, "%s\n", adap->name);
+}
+
+static struct class_device_attribute i2c_adapter_attrs[] = {
+       __ATTR(name, S_IRUGO, i2c_adapter_show_name, NULL),
+       { },
+};
+
 struct class i2c_adapter_class = {
-       .owner =        THIS_MODULE,
-       .name =         "i2c-adapter",
-       .release =      &i2c_adapter_class_dev_release,
+       .owner                  = THIS_MODULE,
+       .name                   = "i2c-adapter",
+       .class_dev_attrs        = i2c_adapter_attrs,
+       .release                = &i2c_adapter_class_dev_release,
 };
 
 static ssize_t show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -175,8 +191,12 @@ int i2c_add_adapter(struct i2c_adapter *adap)
         * If the parent pointer is not set up,
         * we add this adapter to the host bus.
         */
-       if (adap->dev.parent == NULL)
+       if (adap->dev.parent == NULL) {
                adap->dev.parent = &platform_bus;
+               printk(KERN_WARNING "**WARNING** I2C adapter driver [%s] "
+                      "forgot to specify physical device; fix it!\n",
+                      adap->name);
+       }
        sprintf(adap->dev.bus_id, "i2c-%d", adap->nr);
        adap->dev.driver = &i2c_adapter_driver;
        adap->dev.release = &i2c_adapter_dev_release;
index ffdffb6379efa944b8972bea3c8cbea7ad6194a2..524e65de4398b73cd9e97324e7f8ecdd1b591376 100644 (file)
@@ -46,6 +46,8 @@ static atiixp_ide_timing mdma_timing[] = {
 
 static int save_mdma_mode[4];
 
+static DEFINE_SPINLOCK(atiixp_lock);
+
 /**
  *     atiixp_ratemask         -       compute rate mask for ATIIXP IDE
  *     @drive: IDE drive to compute for
@@ -105,7 +107,7 @@ static int atiixp_ide_dma_host_on(ide_drive_t *drive)
        unsigned long flags;
        u16 tmp16;
 
-       spin_lock_irqsave(&ide_lock, flags);
+       spin_lock_irqsave(&atiixp_lock, flags);
 
        pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
        if (save_mdma_mode[drive->dn])
@@ -114,7 +116,7 @@ static int atiixp_ide_dma_host_on(ide_drive_t *drive)
                tmp16 |= (1 << drive->dn);
        pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
 
-       spin_unlock_irqrestore(&ide_lock, flags);
+       spin_unlock_irqrestore(&atiixp_lock, flags);
 
        return __ide_dma_host_on(drive);
 }
@@ -125,13 +127,13 @@ static int atiixp_ide_dma_host_off(ide_drive_t *drive)
        unsigned long flags;
        u16 tmp16;
 
-       spin_lock_irqsave(&ide_lock, flags);
+       spin_lock_irqsave(&atiixp_lock, flags);
 
        pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
        tmp16 &= ~(1 << drive->dn);
        pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
 
-       spin_unlock_irqrestore(&ide_lock, flags);
+       spin_unlock_irqrestore(&atiixp_lock, flags);
 
        return __ide_dma_host_off(drive);
 }
@@ -152,7 +154,7 @@ static void atiixp_tuneproc(ide_drive_t *drive, u8 pio)
        u32 pio_timing_data;
        u16 pio_mode_data;
 
-       spin_lock_irqsave(&ide_lock, flags);
+       spin_lock_irqsave(&atiixp_lock, flags);
 
        pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
        pio_mode_data &= ~(0x07 << (drive->dn * 4));
@@ -165,7 +167,7 @@ static void atiixp_tuneproc(ide_drive_t *drive, u8 pio)
                 (pio_timing[pio].command_width << (timing_shift + 4));
        pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
 
-       spin_unlock_irqrestore(&ide_lock, flags);
+       spin_unlock_irqrestore(&atiixp_lock, flags);
 }
 
 /**
@@ -189,7 +191,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
 
        speed = ide_rate_filter(atiixp_ratemask(drive), xferspeed);
 
-       spin_lock_irqsave(&ide_lock, flags);
+       spin_lock_irqsave(&atiixp_lock, flags);
 
        save_mdma_mode[drive->dn] = 0;
        if (speed >= XFER_UDMA_0) {
@@ -208,7 +210,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
                }
        }
 
-       spin_unlock_irqrestore(&ide_lock, flags);
+       spin_unlock_irqrestore(&atiixp_lock, flags);
 
        if (speed >= XFER_SW_DMA_0)
                pio = atiixp_dma_2_pio(speed);
index 61f1a9665a7f911729ae17faad0492a5b89309fe..381cc6f101ce073c9307469122230da9870fbab4 100644 (file)
@@ -123,7 +123,7 @@ struct via82cxxx_dev
 static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
 {
        struct pci_dev *dev = hwif->pci_dev;
-       struct via82cxxx_dev *vdev = ide_get_hwifdata(hwif);
+       struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
        u8 t;
 
        if (~vdev->via_config->flags & VIA_BAD_AST) {
@@ -162,7 +162,7 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
 static int via_set_drive(ide_drive_t *drive, u8 speed)
 {
        ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1);
-       struct via82cxxx_dev *vdev = ide_get_hwifdata(drive->hwif);
+       struct via82cxxx_dev *vdev = pci_get_drvdata(drive->hwif->pci_dev);
        struct ide_timing t, p;
        unsigned int T, UT;
 
@@ -225,7 +225,7 @@ static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio)
 static int via82cxxx_ide_dma_check (ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
-       struct via82cxxx_dev *vdev = ide_get_hwifdata(hwif);
+       struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
        u16 w80 = hwif->udma_four;
 
        u16 speed = ide_find_best_mode(drive,
@@ -262,6 +262,53 @@ static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
        return via_config;
 }
 
+/*
+ * Check and handle 80-wire cable presence
+ */
+static void __devinit via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
+{
+       int i;
+
+       switch (vdev->via_config->flags & VIA_UDMA) {
+               case VIA_UDMA_66:
+                       for (i = 24; i >= 0; i -= 8)
+                               if (((u >> (i & 16)) & 8) &&
+                                   ((u >> i) & 0x20) &&
+                                    (((u >> i) & 7) < 2)) {
+                                       /*
+                                        * 2x PCI clock and
+                                        * UDMA w/ < 3T/cycle
+                                        */
+                                       vdev->via_80w |= (1 << (1 - (i >> 4)));
+                               }
+                       break;
+
+               case VIA_UDMA_100:
+                       for (i = 24; i >= 0; i -= 8)
+                               if (((u >> i) & 0x10) ||
+                                   (((u >> i) & 0x20) &&
+                                    (((u >> i) & 7) < 4))) {
+                                       /* BIOS 80-wire bit or
+                                        * UDMA w/ < 60ns/cycle
+                                        */
+                                       vdev->via_80w |= (1 << (1 - (i >> 4)));
+                               }
+                       break;
+
+               case VIA_UDMA_133:
+                       for (i = 24; i >= 0; i -= 8)
+                               if (((u >> i) & 0x10) ||
+                                   (((u >> i) & 0x20) &&
+                                    (((u >> i) & 7) < 6))) {
+                                       /* BIOS 80-wire bit or
+                                        * UDMA w/ < 60ns/cycle
+                                        */
+                                       vdev->via_80w |= (1 << (1 - (i >> 4)));
+                               }
+                       break;
+       }
+}
+
 /**
  *     init_chipset_via82cxxx  -       initialization handler
  *     @dev: PCI device
@@ -274,14 +321,22 @@ static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
 static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const char *name)
 {
        struct pci_dev *isa = NULL;
+       struct via82cxxx_dev *vdev;
        struct via_isa_bridge *via_config;
        u8 t, v;
-       unsigned int u;
+       u32 u;
+
+       vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+       if (!vdev) {
+               printk(KERN_ERR "VP_IDE: out of memory :(\n");
+               return -ENOMEM;
+       }
+       pci_set_drvdata(dev, vdev);
 
        /*
         * Find the ISA bridge to see how good the IDE is.
         */
-       via_config = via_config_find(&isa);
+       vdev->via_config = via_config = via_config_find(&isa);
 
        /* We checked this earlier so if it fails here deeep badness
           is involved */
@@ -289,16 +344,17 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
        BUG_ON(!via_config->id);
 
        /*
-        * Setup or disable Clk66 if appropriate
+        * Detect cable and configure Clk66
         */
+       pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
+
+       via_cable_detect(vdev, u);
 
        if ((via_config->flags & VIA_UDMA) == VIA_UDMA_66) {
                /* Enable Clk66 */
-               pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
                pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008);
        } else if (via_config->flags & VIA_BAD_CLK66) {
                /* Would cause trouble on 596a and 686 */
-               pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
                pci_write_config_dword(dev, VIA_UDMA_TIMING, u & ~0x80008);
        }
 
@@ -367,75 +423,11 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
        return 0;
 }
 
-/*
- * Check and handle 80-wire cable presence
- */
-static void __devinit via_cable_detect(struct pci_dev *dev, struct via82cxxx_dev *vdev)
-{
-       unsigned int u;
-       int i;
-       pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
-
-       switch (vdev->via_config->flags & VIA_UDMA) {
-
-               case VIA_UDMA_66:
-                       for (i = 24; i >= 0; i -= 8)
-                               if (((u >> (i & 16)) & 8) &&
-                                   ((u >> i) & 0x20) &&
-                                    (((u >> i) & 7) < 2)) {
-                                       /*
-                                        * 2x PCI clock and
-                                        * UDMA w/ < 3T/cycle
-                                        */
-                                       vdev->via_80w |= (1 << (1 - (i >> 4)));
-                               }
-                       break;
-
-               case VIA_UDMA_100:
-                       for (i = 24; i >= 0; i -= 8)
-                               if (((u >> i) & 0x10) ||
-                                   (((u >> i) & 0x20) &&
-                                    (((u >> i) & 7) < 4))) {
-                                       /* BIOS 80-wire bit or
-                                        * UDMA w/ < 60ns/cycle
-                                        */
-                                       vdev->via_80w |= (1 << (1 - (i >> 4)));
-                               }
-                       break;
-
-               case VIA_UDMA_133:
-                       for (i = 24; i >= 0; i -= 8)
-                               if (((u >> i) & 0x10) ||
-                                   (((u >> i) & 0x20) &&
-                                    (((u >> i) & 7) < 6))) {
-                                       /* BIOS 80-wire bit or
-                                        * UDMA w/ < 60ns/cycle
-                                        */
-                                       vdev->via_80w |= (1 << (1 - (i >> 4)));
-                               }
-                       break;
-
-       }
-}
-
 static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
 {
-       struct via82cxxx_dev *vdev = kmalloc(sizeof(struct via82cxxx_dev),
-               GFP_KERNEL);
-       struct pci_dev *isa = NULL;
+       struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
        int i;
 
-       if (vdev == NULL) {
-               printk(KERN_ERR "VP_IDE: out of memory :(\n");
-               return;
-       }
-
-       memset(vdev, 0, sizeof(struct via82cxxx_dev));
-       ide_set_hwifdata(hwif, vdev);
-
-       vdev->via_config = via_config_find(&isa);
-       via_cable_detect(hwif->pci_dev, vdev);
-
        hwif->autodma = 0;
 
        hwif->tuneproc = &via82cxxx_tune_drive;
index 100df6f38d920759291e5b442f6f0995061e4f60..91e0c75aca8f40b1402eb29c8c94140979fdcca5 100644 (file)
@@ -52,6 +52,8 @@
 #define KVM_MAX_VCPUS 1
 #define KVM_MEMORY_SLOTS 4
 #define KVM_NUM_MMU_PAGES 256
+#define KVM_MIN_FREE_MMU_PAGES 5
+#define KVM_REFILL_PAGES 25
 
 #define FX_IMAGE_SIZE 512
 #define FX_IMAGE_ALIGN 16
@@ -89,14 +91,54 @@ typedef unsigned long  hva_t;
 typedef u64            hpa_t;
 typedef unsigned long  hfn_t;
 
+#define NR_PTE_CHAIN_ENTRIES 5
+
+struct kvm_pte_chain {
+       u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
+       struct hlist_node link;
+};
+
+/*
+ * kvm_mmu_page_role, below, is defined as:
+ *
+ *   bits 0:3 - total guest paging levels (2-4, or zero for real mode)
+ *   bits 4:7 - page table level for this shadow (1-4)
+ *   bits 8:9 - page table quadrant for 2-level guests
+ *   bit   16 - "metaphysical" - gfn is not a real page (huge page/real mode)
+ */
+union kvm_mmu_page_role {
+       unsigned word;
+       struct {
+               unsigned glevels : 4;
+               unsigned level : 4;
+               unsigned quadrant : 2;
+               unsigned pad_for_nice_hex_output : 6;
+               unsigned metaphysical : 1;
+       };
+};
+
 struct kvm_mmu_page {
        struct list_head link;
+       struct hlist_node hash_link;
+
+       /*
+        * The following two entries are used to key the shadow page in the
+        * hash table.
+        */
+       gfn_t gfn;
+       union kvm_mmu_page_role role;
+
        hpa_t page_hpa;
        unsigned long slot_bitmap; /* One bit set per slot which has memory
                                    * in this shadow page.
                                    */
        int global;              /* Set if all ptes in this page are global */
-       u64 *parent_pte;
+       int multimapped;         /* More than one parent_pte? */
+       int root_count;          /* Currently serving as active root */
+       union {
+               u64 *parent_pte;               /* !multimapped */
+               struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
+       };
 };
 
 struct vmcs {
@@ -117,14 +159,26 @@ struct kvm_vcpu;
 struct kvm_mmu {
        void (*new_cr3)(struct kvm_vcpu *vcpu);
        int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
-       void (*inval_page)(struct kvm_vcpu *vcpu, gva_t gva);
        void (*free)(struct kvm_vcpu *vcpu);
        gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
        hpa_t root_hpa;
        int root_level;
        int shadow_root_level;
+
+       u64 *pae_root;
+};
+
+#define KVM_NR_MEM_OBJS 20
+
+struct kvm_mmu_memory_cache {
+       int nobjs;
+       void *objects[KVM_NR_MEM_OBJS];
 };
 
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
 struct kvm_guest_debug {
        int enabled;
        unsigned long bp[4];
@@ -173,6 +227,7 @@ struct kvm_vcpu {
        struct mutex mutex;
        int   cpu;
        int   launched;
+       int interrupt_window_open;
        unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
 #define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
        unsigned long irq_pending[NR_IRQ_WORDS];
@@ -184,6 +239,7 @@ struct kvm_vcpu {
        unsigned long cr3;
        unsigned long cr4;
        unsigned long cr8;
+       u64 pdptrs[4]; /* pae */
        u64 shadow_efer;
        u64 apic_base;
        int nmsrs;
@@ -194,6 +250,12 @@ struct kvm_vcpu {
        struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
        struct kvm_mmu mmu;
 
+       struct kvm_mmu_memory_cache mmu_pte_chain_cache;
+       struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+
+       gfn_t last_pt_write_gfn;
+       int   last_pt_write_count;
+
        struct kvm_guest_debug guest_debug;
 
        char fx_buf[FX_BUF_SIZE];
@@ -231,10 +293,16 @@ struct kvm {
        spinlock_t lock; /* protects everything except vcpus */
        int nmemslots;
        struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
+       /*
+        * Hash table of struct kvm_mmu_page.
+        */
        struct list_head active_mmu_pages;
+       int n_free_mmu_pages;
+       struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
        int memory_config_version;
        int busy;
+       unsigned long rmap_overflow;
 };
 
 struct kvm_stat {
@@ -247,6 +315,9 @@ struct kvm_stat {
        u32 io_exits;
        u32 mmio_exits;
        u32 signal_exits;
+       u32 irq_window_exits;
+       u32 halt_exits;
+       u32 request_irq_exits;
        u32 irq_exits;
 };
 
@@ -279,6 +350,7 @@ struct kvm_arch_ops {
        void (*set_segment)(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
        void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
+       void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu);
        void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
        void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
                                      unsigned long cr0);
@@ -323,7 +395,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
-void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
+void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot);
 
 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
@@ -396,6 +468,19 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
 
 unsigned long segment_base(u16 selector);
 
+void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
+void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
+int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
+void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+
+static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+                                    u32 error_code)
+{
+       if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+               kvm_mmu_free_some_pages(vcpu);
+       return vcpu->mmu.page_fault(vcpu, gva, error_code);
+}
+
 static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
@@ -541,19 +626,4 @@ static inline u32 get_rdx_init_val(void)
 #define TSS_REDIRECTION_SIZE (256 / 8)
 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
 
-#ifdef CONFIG_X86_64
-
-/*
- * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.  Therefore
- * we need to allocate shadow page tables in the first 4GB of memory, which
- * happens to fit the DMA32 zone.
- */
-#define GFP_KVM_MMU (GFP_KERNEL | __GFP_DMA32)
-
-#else
-
-#define GFP_KVM_MMU GFP_KERNEL
-
-#endif
-
 #endif
index ce7fe640f18dfc29b909af99bc80ac6aac1a3290..67c1154960f0a308f72e41c7a5e2f54f2afc2894 100644 (file)
@@ -58,6 +58,9 @@ static struct kvm_stats_debugfs_item {
        { "io_exits", &kvm_stat.io_exits },
        { "mmio_exits", &kvm_stat.mmio_exits },
        { "signal_exits", &kvm_stat.signal_exits },
+       { "irq_window", &kvm_stat.irq_window_exits },
+       { "halt_exits", &kvm_stat.halt_exits },
+       { "request_irq", &kvm_stat.request_irq_exits },
        { "irq_exits", &kvm_stat.irq_exits },
        { 0, 0 }
 };
@@ -227,6 +230,7 @@ static int kvm_dev_open(struct inode *inode, struct file *filp)
                struct kvm_vcpu *vcpu = &kvm->vcpus[i];
 
                mutex_init(&vcpu->mutex);
+               vcpu->kvm = kvm;
                vcpu->mmu.root_hpa = INVALID_PAGE;
                INIT_LIST_HEAD(&vcpu->free_pages);
        }
@@ -268,8 +272,8 @@ static void kvm_free_physmem(struct kvm *kvm)
 
 static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
 {
-       kvm_arch_ops->vcpu_free(vcpu);
        kvm_mmu_destroy(vcpu);
+       kvm_arch_ops->vcpu_free(vcpu);
 }
 
 static void kvm_free_vcpus(struct kvm *kvm)
@@ -295,14 +299,17 @@ static void inject_gp(struct kvm_vcpu *vcpu)
        kvm_arch_ops->inject_gp(vcpu, 0);
 }
 
-static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu,
-                                        unsigned long cr3)
+/*
+ * Load the pae pdptrs.  Return true is they are all valid.
+ */
+static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
        gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
-       unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5;
+       unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
        int i;
        u64 pdpte;
        u64 *pdpt;
+       int ret;
        struct kvm_memory_slot *memslot;
 
        spin_lock(&vcpu->kvm->lock);
@@ -310,16 +317,23 @@ static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu,
        /* FIXME: !memslot - emulate? 0xff? */
        pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);
 
+       ret = 1;
        for (i = 0; i < 4; ++i) {
                pdpte = pdpt[offset + i];
-               if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull))
-                       break;
+               if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
+                       ret = 0;
+                       goto out;
+               }
        }
 
+       for (i = 0; i < 4; ++i)
+               vcpu->pdptrs[i] = pdpt[offset + i];
+
+out:
        kunmap_atomic(pdpt, KM_USER0);
        spin_unlock(&vcpu->kvm->lock);
 
-       return i != 4;
+       return ret;
 }
 
 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
@@ -365,8 +379,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                        }
                } else
 #endif
-               if (is_pae(vcpu) &&
-                           pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
+               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
                        printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
                               "reserved bits\n");
                        inject_gp(vcpu);
@@ -387,6 +400,7 @@ EXPORT_SYMBOL_GPL(set_cr0);
 
 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 {
+       kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
        set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
 }
 EXPORT_SYMBOL_GPL(lmsw);
@@ -407,7 +421,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                        return;
                }
        } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
-                  && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
+                  && !load_pdptrs(vcpu, vcpu->cr3)) {
                printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
                inject_gp(vcpu);
        }
@@ -439,7 +453,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                        return;
                }
                if (is_paging(vcpu) && is_pae(vcpu) &&
-                   pdptrs_have_reserved_bits_set(vcpu, cr3)) {
+                   !load_pdptrs(vcpu, cr3)) {
                        printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
                               "reserved bits\n");
                        inject_gp(vcpu);
@@ -449,7 +463,19 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        vcpu->cr3 = cr3;
        spin_lock(&vcpu->kvm->lock);
-       vcpu->mmu.new_cr3(vcpu);
+       /*
+        * Does the new cr3 value map to physical memory? (Note, we
+        * catch an invalid cr3 even in real-mode, because it would
+        * cause trouble later on when we turn on paging anyway.)
+        *
+        * A real CPU would silently accept an invalid cr3 and would
+        * attempt to use it - with largely undefined (and often hard
+        * to debug) behavior on the guest side.
+        */
+       if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
+               inject_gp(vcpu);
+       else
+               vcpu->mmu.new_cr3(vcpu);
        spin_unlock(&vcpu->kvm->lock);
 }
 EXPORT_SYMBOL_GPL(set_cr3);
@@ -517,7 +543,6 @@ static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
        vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
 
        vcpu->cpu = -1;  /* First load will set up TR */
-       vcpu->kvm = kvm;
        r = kvm_arch_ops->vcpu_create(vcpu);
        if (r < 0)
                goto out_free_vcpus;
@@ -634,6 +659,7 @@ raced:
                                                     | __GFP_ZERO);
                        if (!new.phys_mem[i])
                                goto out_free;
+                       new.phys_mem[i]->private = 0;
                }
        }
 
@@ -688,6 +714,13 @@ out:
        return r;
 }
 
+static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
+{
+       spin_lock(&vcpu->kvm->lock);
+       kvm_mmu_slot_remove_write_access(vcpu, slot);
+       spin_unlock(&vcpu->kvm->lock);
+}
+
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
@@ -697,6 +730,7 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
        struct kvm_memory_slot *memslot;
        int r, i;
        int n;
+       int cleared;
        unsigned long any = 0;
 
        spin_lock(&kvm->lock);
@@ -727,15 +761,17 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
 
 
        if (any) {
-               spin_lock(&kvm->lock);
-               kvm_mmu_slot_remove_write_access(kvm, log->slot);
-               spin_unlock(&kvm->lock);
-               memset(memslot->dirty_bitmap, 0, n);
+               cleared = 0;
                for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                        struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
 
                        if (!vcpu)
                                continue;
+                       if (!cleared) {
+                               do_remove_write_access(vcpu, log->slot);
+                               memset(memslot->dirty_bitmap, 0, n);
+                               cleared = 1;
+                       }
                        kvm_arch_ops->tlb_flush(vcpu);
                        vcpu_put(vcpu);
                }
@@ -863,6 +899,27 @@ static int emulator_read_emulated(unsigned long addr,
        }
 }
 
+static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+                              unsigned long val, int bytes)
+{
+       struct kvm_memory_slot *m;
+       struct page *page;
+       void *virt;
+
+       if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
+               return 0;
+       m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
+       if (!m)
+               return 0;
+       page = gfn_to_page(m, gpa >> PAGE_SHIFT);
+       kvm_mmu_pre_write(vcpu, gpa, bytes);
+       virt = kmap_atomic(page, KM_USER0);
+       memcpy(virt + offset_in_page(gpa), &val, bytes);
+       kunmap_atomic(virt, KM_USER0);
+       kvm_mmu_post_write(vcpu, gpa, bytes);
+       return 1;
+}
+
 static int emulator_write_emulated(unsigned long addr,
                                   unsigned long val,
                                   unsigned int bytes,
@@ -874,6 +931,9 @@ static int emulator_write_emulated(unsigned long addr,
        if (gpa == UNMAPPED_GVA)
                return X86EMUL_PROPAGATE_FAULT;
 
+       if (emulator_write_phys(vcpu, gpa, val, bytes))
+               return X86EMUL_CONTINUE;
+
        vcpu->mmio_needed = 1;
        vcpu->mmio_phys_addr = gpa;
        vcpu->mmio_size = bytes;
@@ -898,6 +958,30 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
        return emulator_write_emulated(addr, new, bytes, ctxt);
 }
 
+#ifdef CONFIG_X86_32
+
+static int emulator_cmpxchg8b_emulated(unsigned long addr,
+                                      unsigned long old_lo,
+                                      unsigned long old_hi,
+                                      unsigned long new_lo,
+                                      unsigned long new_hi,
+                                      struct x86_emulate_ctxt *ctxt)
+{
+       static int reported;
+       int r;
+
+       if (!reported) {
+               reported = 1;
+               printk(KERN_WARNING "kvm: emulating exchange8b as write\n");
+       }
+       r = emulator_write_emulated(addr, new_lo, 4, ctxt);
+       if (r != X86EMUL_CONTINUE)
+               return r;
+       return emulator_write_emulated(addr+4, new_hi, 4, ctxt);
+}
+
+#endif
+
 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
        return kvm_arch_ops->get_segment_base(vcpu, seg);
@@ -905,18 +989,15 @@ static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 
 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 {
-       spin_lock(&vcpu->kvm->lock);
-       vcpu->mmu.inval_page(vcpu, address);
-       spin_unlock(&vcpu->kvm->lock);
-       kvm_arch_ops->invlpg(vcpu, address);
        return X86EMUL_CONTINUE;
 }
 
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
-       unsigned long cr0 = vcpu->cr0;
+       unsigned long cr0;
 
-       cr0 &= ~CR0_TS_MASK;
+       kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
+       cr0 = vcpu->cr0 & ~CR0_TS_MASK;
        kvm_arch_ops->set_cr0(vcpu, cr0);
        return X86EMUL_CONTINUE;
 }
@@ -975,6 +1056,9 @@ struct x86_emulate_ops emulate_ops = {
        .read_emulated       = emulator_read_emulated,
        .write_emulated      = emulator_write_emulated,
        .cmpxchg_emulated    = emulator_cmpxchg_emulated,
+#ifdef CONFIG_X86_32
+       .cmpxchg8b_emulated  = emulator_cmpxchg8b_emulated,
+#endif
 };
 
 int emulate_instruction(struct kvm_vcpu *vcpu,
@@ -1024,6 +1108,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
        }
 
        if (r) {
+               if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
+                       return EMULATE_DONE;
                if (!vcpu->mmio_needed) {
                        report_emulation_failure(&emulate_ctxt);
                        return EMULATE_FAIL;
@@ -1069,6 +1155,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
 
 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
 {
+       kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
        switch (cr) {
        case 0:
                return vcpu->cr0;
@@ -1403,6 +1490,7 @@ static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
        sregs->gdt.limit = dt.limit;
        sregs->gdt.base = dt.base;
 
+       kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
        sregs->cr0 = vcpu->cr0;
        sregs->cr2 = vcpu->cr2;
        sregs->cr3 = vcpu->cr3;
@@ -1467,11 +1555,15 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
 #endif
        vcpu->apic_base = sregs->apic_base;
 
+       kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
+
        mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
        kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
 
        mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
        kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
+       if (!is_long_mode(vcpu) && is_pae(vcpu))
+               load_pdptrs(vcpu, vcpu->cr3);
 
        if (mmu_reset_needed)
                kvm_mmu_reset_context(vcpu);
@@ -1693,12 +1785,12 @@ static long kvm_dev_ioctl(struct file *filp,
                if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run))
                        goto out;
                r = kvm_dev_ioctl_run(kvm, &kvm_run);
-               if (r < 0)
+               if (r < 0 &&  r != -EINTR)
                        goto out;
-               r = -EFAULT;
-               if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run))
+               if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) {
+                       r = -EFAULT;
                        goto out;
-               r = 0;
+               }
                break;
        }
        case KVM_GET_REGS: {
@@ -1842,6 +1934,7 @@ static long kvm_dev_ioctl(struct file *filp,
                                 num_msrs_to_save * sizeof(u32)))
                        goto out;
                r = 0;
+               break;
        }
        default:
                ;
@@ -1944,17 +2037,17 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
                return -EEXIST;
        }
 
-       kvm_arch_ops = ops;
-
-       if (!kvm_arch_ops->cpu_has_kvm_support()) {
+       if (!ops->cpu_has_kvm_support()) {
                printk(KERN_ERR "kvm: no hardware support\n");
                return -EOPNOTSUPP;
        }
-       if (kvm_arch_ops->disabled_by_bios()) {
+       if (ops->disabled_by_bios()) {
                printk(KERN_ERR "kvm: disabled by bios\n");
                return -EOPNOTSUPP;
        }
 
+       kvm_arch_ops = ops;
+
        r = kvm_arch_ops->hardware_setup();
        if (r < 0)
            return r;
index 790423c5f23d2dab18dcf98eba55330c93177935..c6f972914f082e9af20ca6af54d155104ef5d2ba 100644 (file)
 #include "vmx.h"
 #include "kvm.h"
 
+#undef MMU_DEBUG
+
+#undef AUDIT
+
+#ifdef AUDIT
+static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
+#else
+static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
+#endif
+
+#ifdef MMU_DEBUG
+
+#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
+#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
+
+#else
+
 #define pgprintk(x...) do { } while (0)
+#define rmap_printk(x...) do { } while (0)
+
+#endif
+
+#if defined(MMU_DEBUG) || defined(AUDIT)
+static int dbg = 1;
+#endif
 
 #define ASSERT(x)                                                      \
        if (!(x)) {                                                     \
                       __FILE__, __LINE__, #x);                         \
        }
 
-#define PT64_ENT_PER_PAGE 512
-#define PT32_ENT_PER_PAGE 1024
+#define PT64_PT_BITS 9
+#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
+#define PT32_PT_BITS 10
+#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
 
 #define PT_WRITABLE_SHIFT 1
 
 #define PT_DIRECTORY_LEVEL 2
 #define PT_PAGE_TABLE_LEVEL 1
 
+#define RMAP_EXT 4
+
+struct kvm_rmap_desc {
+       u64 *shadow_ptes[RMAP_EXT];
+       struct kvm_rmap_desc *more;
+};
+
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
        return vcpu->cr0 & CR0_WP_MASK;
@@ -150,32 +183,272 @@ static int is_io_pte(unsigned long pte)
        return pte & PT_SHADOW_IO_MARK;
 }
 
+static int is_rmap_pte(u64 pte)
+{
+       return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
+               == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
+}
+
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+                                 size_t objsize, int min)
+{
+       void *obj;
+
+       if (cache->nobjs >= min)
+               return 0;
+       while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
+               obj = kzalloc(objsize, GFP_NOWAIT);
+               if (!obj)
+                       return -ENOMEM;
+               cache->objects[cache->nobjs++] = obj;
+       }
+       return 0;
+}
+
+static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+       while (mc->nobjs)
+               kfree(mc->objects[--mc->nobjs]);
+}
+
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+{
+       int r;
+
+       r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
+                                  sizeof(struct kvm_pte_chain), 4);
+       if (r)
+               goto out;
+       r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
+                                  sizeof(struct kvm_rmap_desc), 1);
+out:
+       return r;
+}
+
+static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
+{
+       mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
+       mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
+}
+
+static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
+                                   size_t size)
+{
+       void *p;
+
+       BUG_ON(!mc->nobjs);
+       p = mc->objects[--mc->nobjs];
+       memset(p, 0, size);
+       return p;
+}
+
+static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
+{
+       if (mc->nobjs < KVM_NR_MEM_OBJS)
+               mc->objects[mc->nobjs++] = obj;
+       else
+               kfree(obj);
+}
+
+static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
+{
+       return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
+                                     sizeof(struct kvm_pte_chain));
+}
+
+static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
+                              struct kvm_pte_chain *pc)
+{
+       mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
+}
+
+static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
+{
+       return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
+                                     sizeof(struct kvm_rmap_desc));
+}
+
+static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
+                              struct kvm_rmap_desc *rd)
+{
+       mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
+}
+
+/*
+ * Reverse mapping data structures:
+ *
+ * If page->private bit zero is zero, then page->private points to the
+ * shadow page table entry that points to page_address(page).
+ *
+ * If page->private bit zero is one, (then page->private & ~1) points
+ * to a struct kvm_rmap_desc containing more mappings.
+ */
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
+{
+       struct page *page;
+       struct kvm_rmap_desc *desc;
+       int i;
+
+       if (!is_rmap_pte(*spte))
+               return;
+       page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+       if (!page->private) {
+               rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
+               page->private = (unsigned long)spte;
+       } else if (!(page->private & 1)) {
+               rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
+               desc = mmu_alloc_rmap_desc(vcpu);
+               desc->shadow_ptes[0] = (u64 *)page->private;
+               desc->shadow_ptes[1] = spte;
+               page->private = (unsigned long)desc | 1;
+       } else {
+               rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
+               desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+               while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
+                       desc = desc->more;
+               if (desc->shadow_ptes[RMAP_EXT-1]) {
+                       desc->more = mmu_alloc_rmap_desc(vcpu);
+                       desc = desc->more;
+               }
+               for (i = 0; desc->shadow_ptes[i]; ++i)
+                       ;
+               desc->shadow_ptes[i] = spte;
+       }
+}
+
+static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
+                                  struct page *page,
+                                  struct kvm_rmap_desc *desc,
+                                  int i,
+                                  struct kvm_rmap_desc *prev_desc)
+{
+       int j;
+
+       for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
+               ;
+       desc->shadow_ptes[i] = desc->shadow_ptes[j];
+       desc->shadow_ptes[j] = 0;
+       if (j != 0)
+               return;
+       if (!prev_desc && !desc->more)
+               page->private = (unsigned long)desc->shadow_ptes[0];
+       else
+               if (prev_desc)
+                       prev_desc->more = desc->more;
+               else
+                       page->private = (unsigned long)desc->more | 1;
+       mmu_free_rmap_desc(vcpu, desc);
+}
+
+static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
+{
+       struct page *page;
+       struct kvm_rmap_desc *desc;
+       struct kvm_rmap_desc *prev_desc;
+       int i;
+
+       if (!is_rmap_pte(*spte))
+               return;
+       page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+       if (!page->private) {
+               printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
+               BUG();
+       } else if (!(page->private & 1)) {
+               rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
+               if ((u64 *)page->private != spte) {
+                       printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
+                              spte, *spte);
+                       BUG();
+               }
+               page->private = 0;
+       } else {
+               rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
+               desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+               prev_desc = NULL;
+               while (desc) {
+                       for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
+                               if (desc->shadow_ptes[i] == spte) {
+                                       rmap_desc_remove_entry(vcpu, page,
+                                                              desc, i,
+                                                              prev_desc);
+                                       return;
+                               }
+                       prev_desc = desc;
+                       desc = desc->more;
+               }
+               BUG();
+       }
+}
+
+static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct page *page;
+       struct kvm_memory_slot *slot;
+       struct kvm_rmap_desc *desc;
+       u64 *spte;
+
+       slot = gfn_to_memslot(kvm, gfn);
+       BUG_ON(!slot);
+       page = gfn_to_page(slot, gfn);
+
+       while (page->private) {
+               if (!(page->private & 1))
+                       spte = (u64 *)page->private;
+               else {
+                       desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+                       spte = desc->shadow_ptes[0];
+               }
+               BUG_ON(!spte);
+               BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
+                      page_to_pfn(page) << PAGE_SHIFT);
+               BUG_ON(!(*spte & PT_PRESENT_MASK));
+               BUG_ON(!(*spte & PT_WRITABLE_MASK));
+               rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
+               rmap_remove(vcpu, spte);
+               kvm_arch_ops->tlb_flush(vcpu);
+               *spte &= ~(u64)PT_WRITABLE_MASK;
+       }
+}
+
+static int is_empty_shadow_page(hpa_t page_hpa)
+{
+       u64 *pos;
+       u64 *end;
+
+       for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
+                     pos != end; pos++)
+               if (*pos != 0) {
+                       printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
+                              pos, *pos);
+                       return 0;
+               }
+       return 1;
+}
+
 static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
 {
        struct kvm_mmu_page *page_head = page_header(page_hpa);
 
+       ASSERT(is_empty_shadow_page(page_hpa));
        list_del(&page_head->link);
        page_head->page_hpa = page_hpa;
        list_add(&page_head->link, &vcpu->free_pages);
+       ++vcpu->kvm->n_free_mmu_pages;
 }
 
-static int is_empty_shadow_page(hpa_t page_hpa)
+static unsigned kvm_page_table_hashfn(gfn_t gfn)
 {
-       u32 *pos;
-       u32 *end;
-       for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u32);
-                     pos != end; pos++)
-               if (*pos != 0)
-                       return 0;
-       return 1;
+       return gfn;
 }
 
-static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte)
+static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
+                                              u64 *parent_pte)
 {
        struct kvm_mmu_page *page;
 
        if (list_empty(&vcpu->free_pages))
-               return INVALID_PAGE;
+               return NULL;
 
        page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
        list_del(&page->link);
@@ -183,8 +456,239 @@ static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte)
        ASSERT(is_empty_shadow_page(page->page_hpa));
        page->slot_bitmap = 0;
        page->global = 1;
+       page->multimapped = 0;
        page->parent_pte = parent_pte;
-       return page->page_hpa;
+       --vcpu->kvm->n_free_mmu_pages;
+       return page;
+}
+
+static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
+                                   struct kvm_mmu_page *page, u64 *parent_pte)
+{
+       struct kvm_pte_chain *pte_chain;
+       struct hlist_node *node;
+       int i;
+
+       if (!parent_pte)
+               return;
+       if (!page->multimapped) {
+               u64 *old = page->parent_pte;
+
+               if (!old) {
+                       page->parent_pte = parent_pte;
+                       return;
+               }
+               page->multimapped = 1;
+               pte_chain = mmu_alloc_pte_chain(vcpu);
+               INIT_HLIST_HEAD(&page->parent_ptes);
+               hlist_add_head(&pte_chain->link, &page->parent_ptes);
+               pte_chain->parent_ptes[0] = old;
+       }
+       hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
+               if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
+                       continue;
+               for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
+                       if (!pte_chain->parent_ptes[i]) {
+                               pte_chain->parent_ptes[i] = parent_pte;
+                               return;
+                       }
+       }
+       pte_chain = mmu_alloc_pte_chain(vcpu);
+       BUG_ON(!pte_chain);
+       hlist_add_head(&pte_chain->link, &page->parent_ptes);
+       pte_chain->parent_ptes[0] = parent_pte;
+}
+
+static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
+                                      struct kvm_mmu_page *page,
+                                      u64 *parent_pte)
+{
+       struct kvm_pte_chain *pte_chain;
+       struct hlist_node *node;
+       int i;
+
+       if (!page->multimapped) {
+               BUG_ON(page->parent_pte != parent_pte);
+               page->parent_pte = NULL;
+               return;
+       }
+       hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
+               for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
+                       if (!pte_chain->parent_ptes[i])
+                               break;
+                       if (pte_chain->parent_ptes[i] != parent_pte)
+                               continue;
+                       while (i + 1 < NR_PTE_CHAIN_ENTRIES
+                               && pte_chain->parent_ptes[i + 1]) {
+                               pte_chain->parent_ptes[i]
+                                       = pte_chain->parent_ptes[i + 1];
+                               ++i;
+                       }
+                       pte_chain->parent_ptes[i] = NULL;
+                       if (i == 0) {
+                               hlist_del(&pte_chain->link);
+                               mmu_free_pte_chain(vcpu, pte_chain);
+                               if (hlist_empty(&page->parent_ptes)) {
+                                       page->multimapped = 0;
+                                       page->parent_pte = NULL;
+                               }
+                       }
+                       return;
+               }
+       BUG();
+}
+
+static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
+                                               gfn_t gfn)
+{
+       unsigned index;
+       struct hlist_head *bucket;
+       struct kvm_mmu_page *page;
+       struct hlist_node *node;
+
+       pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
+       index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+       bucket = &vcpu->kvm->mmu_page_hash[index];
+       hlist_for_each_entry(page, node, bucket, hash_link)
+               if (page->gfn == gfn && !page->role.metaphysical) {
+                       pgprintk("%s: found role %x\n",
+                                __FUNCTION__, page->role.word);
+                       return page;
+               }
+       return NULL;
+}
+
+static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
+                                            gfn_t gfn,
+                                            gva_t gaddr,
+                                            unsigned level,
+                                            int metaphysical,
+                                            u64 *parent_pte)
+{
+       union kvm_mmu_page_role role;
+       unsigned index;
+       unsigned quadrant;
+       struct hlist_head *bucket;
+       struct kvm_mmu_page *page;
+       struct hlist_node *node;
+
+       role.word = 0;
+       role.glevels = vcpu->mmu.root_level;
+       role.level = level;
+       role.metaphysical = metaphysical;
+       if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
+               quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
+               quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
+               role.quadrant = quadrant;
+       }
+       pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
+                gfn, role.word);
+       index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+       bucket = &vcpu->kvm->mmu_page_hash[index];
+       hlist_for_each_entry(page, node, bucket, hash_link)
+               if (page->gfn == gfn && page->role.word == role.word) {
+                       mmu_page_add_parent_pte(vcpu, page, parent_pte);
+                       pgprintk("%s: found\n", __FUNCTION__);
+                       return page;
+               }
+       page = kvm_mmu_alloc_page(vcpu, parent_pte);
+       if (!page)
+               return page;
+       pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
+       page->gfn = gfn;
+       page->role = role;
+       hlist_add_head(&page->hash_link, bucket);
+       if (!metaphysical)
+               rmap_write_protect(vcpu, gfn);
+       return page;
+}
+
+static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
+                                        struct kvm_mmu_page *page)
+{
+       unsigned i;
+       u64 *pt;
+       u64 ent;
+
+       pt = __va(page->page_hpa);
+
+       if (page->role.level == PT_PAGE_TABLE_LEVEL) {
+               for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+                       if (pt[i] & PT_PRESENT_MASK)
+                               rmap_remove(vcpu, &pt[i]);
+                       pt[i] = 0;
+               }
+               kvm_arch_ops->tlb_flush(vcpu);
+               return;
+       }
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+               ent = pt[i];
+
+               pt[i] = 0;
+               if (!(ent & PT_PRESENT_MASK))
+                       continue;
+               ent &= PT64_BASE_ADDR_MASK;
+               mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
+       }
+}
+
+static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
+                            struct kvm_mmu_page *page,
+                            u64 *parent_pte)
+{
+       mmu_page_remove_parent_pte(vcpu, page, parent_pte);
+}
+
+static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
+                            struct kvm_mmu_page *page)
+{
+       u64 *parent_pte;
+
+       while (page->multimapped || page->parent_pte) {
+               if (!page->multimapped)
+                       parent_pte = page->parent_pte;
+               else {
+                       struct kvm_pte_chain *chain;
+
+                       chain = container_of(page->parent_ptes.first,
+                                            struct kvm_pte_chain, link);
+                       parent_pte = chain->parent_ptes[0];
+               }
+               BUG_ON(!parent_pte);
+               kvm_mmu_put_page(vcpu, page, parent_pte);
+               *parent_pte = 0;
+       }
+       kvm_mmu_page_unlink_children(vcpu, page);
+       if (!page->root_count) {
+               hlist_del(&page->hash_link);
+               kvm_mmu_free_page(vcpu, page->page_hpa);
+       } else {
+               list_del(&page->link);
+               list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+       }
+}
+
+static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       unsigned index;
+       struct hlist_head *bucket;
+       struct kvm_mmu_page *page;
+       struct hlist_node *node, *n;
+       int r;
+
+       pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
+       r = 0;
+       index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+       bucket = &vcpu->kvm->mmu_page_hash[index];
+       hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
+               if (page->gfn == gfn && !page->role.metaphysical) {
+                       pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
+                                page->role.word);
+                       kvm_mmu_zap_page(vcpu, page);
+                       r = 1;
+               }
+       return r;
 }
 
 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
@@ -225,35 +729,6 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
        return gpa_to_hpa(vcpu, gpa);
 }
 
-
-static void release_pt_page_64(struct kvm_vcpu *vcpu, hpa_t page_hpa,
-                              int level)
-{
-       ASSERT(vcpu);
-       ASSERT(VALID_PAGE(page_hpa));
-       ASSERT(level <= PT64_ROOT_LEVEL && level > 0);
-
-       if (level == 1)
-               memset(__va(page_hpa), 0, PAGE_SIZE);
-       else {
-               u64 *pos;
-               u64 *end;
-
-               for (pos = __va(page_hpa), end = pos + PT64_ENT_PER_PAGE;
-                    pos != end; pos++) {
-                       u64 current_ent = *pos;
-
-                       *pos = 0;
-                       if (is_present_pte(current_ent))
-                               release_pt_page_64(vcpu,
-                                                 current_ent &
-                                                 PT64_BASE_ADDR_MASK,
-                                                 level - 1);
-               }
-       }
-       kvm_mmu_free_page(vcpu, page_hpa);
-}
-
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
@@ -266,52 +741,109 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
        for (; ; level--) {
                u32 index = PT64_INDEX(v, level);
                u64 *table;
+               u64 pte;
 
                ASSERT(VALID_PAGE(table_addr));
                table = __va(table_addr);
 
                if (level == 1) {
+                       pte = table[index];
+                       if (is_present_pte(pte) && is_writeble_pte(pte))
+                               return 0;
                        mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
                        page_header_update_slot(vcpu->kvm, table, v);
                        table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
                                                                PT_USER_MASK;
+                       rmap_add(vcpu, &table[index]);
                        return 0;
                }
 
                if (table[index] == 0) {
-                       hpa_t new_table = kvm_mmu_alloc_page(vcpu,
-                                                            &table[index]);
-
-                       if (!VALID_PAGE(new_table)) {
+                       struct kvm_mmu_page *new_table;
+                       gfn_t pseudo_gfn;
+
+                       pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
+                               >> PAGE_SHIFT;
+                       new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
+                                                    v, level - 1,
+                                                    1, &table[index]);
+                       if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                return -ENOMEM;
                        }
 
-                       if (level == PT32E_ROOT_LEVEL)
-                               table[index] = new_table | PT_PRESENT_MASK;
-                       else
-                               table[index] = new_table | PT_PRESENT_MASK |
-                                               PT_WRITABLE_MASK | PT_USER_MASK;
+                       table[index] = new_table->page_hpa | PT_PRESENT_MASK
+                               | PT_WRITABLE_MASK | PT_USER_MASK;
                }
                table_addr = table[index] & PT64_BASE_ADDR_MASK;
        }
 }
 
-static void nonpaging_flush(struct kvm_vcpu *vcpu)
+static void mmu_free_roots(struct kvm_vcpu *vcpu)
 {
-       hpa_t root = vcpu->mmu.root_hpa;
+       int i;
+       struct kvm_mmu_page *page;
 
-       ++kvm_stat.tlb_flush;
-       pgprintk("nonpaging_flush\n");
-       ASSERT(VALID_PAGE(root));
-       release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
-       root = kvm_mmu_alloc_page(vcpu, NULL);
-       ASSERT(VALID_PAGE(root));
-       vcpu->mmu.root_hpa = root;
-       if (is_paging(vcpu))
-               root |= (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK));
-       kvm_arch_ops->set_cr3(vcpu, root);
-       kvm_arch_ops->tlb_flush(vcpu);
+#ifdef CONFIG_X86_64
+       if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->mmu.root_hpa;
+
+               ASSERT(VALID_PAGE(root));
+               page = page_header(root);
+               --page->root_count;
+               vcpu->mmu.root_hpa = INVALID_PAGE;
+               return;
+       }
+#endif
+       for (i = 0; i < 4; ++i) {
+               hpa_t root = vcpu->mmu.pae_root[i];
+
+               ASSERT(VALID_PAGE(root));
+               root &= PT64_BASE_ADDR_MASK;
+               page = page_header(root);
+               --page->root_count;
+               vcpu->mmu.pae_root[i] = INVALID_PAGE;
+       }
+       vcpu->mmu.root_hpa = INVALID_PAGE;
+}
+
+static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
+{
+       int i;
+       gfn_t root_gfn;
+       struct kvm_mmu_page *page;
+
+       root_gfn = vcpu->cr3 >> PAGE_SHIFT;
+
+#ifdef CONFIG_X86_64
+       if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+               hpa_t root = vcpu->mmu.root_hpa;
+
+               ASSERT(!VALID_PAGE(root));
+               page = kvm_mmu_get_page(vcpu, root_gfn, 0,
+                                       PT64_ROOT_LEVEL, 0, NULL);
+               root = page->page_hpa;
+               ++page->root_count;
+               vcpu->mmu.root_hpa = root;
+               return;
+       }
+#endif
+       for (i = 0; i < 4; ++i) {
+               hpa_t root = vcpu->mmu.pae_root[i];
+
+               ASSERT(!VALID_PAGE(root));
+               if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
+                       root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
+               else if (vcpu->mmu.root_level == 0)
+                       root_gfn = 0;
+               page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
+                                       PT32_ROOT_LEVEL, !is_paging(vcpu),
+                                       NULL);
+               root = page->page_hpa;
+               ++page->root_count;
+               vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
+       }
+       vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -322,43 +854,29 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
                               u32 error_code)
 {
-       int ret;
        gpa_t addr = gva;
+       hpa_t paddr;
+       int r;
+
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+               return r;
 
        ASSERT(vcpu);
        ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
 
-       for (;;) {
-            hpa_t paddr;
-
-            paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
 
-            if (is_error_hpa(paddr))
-                    return 1;
+       paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
 
-            ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
-            if (ret) {
-                    nonpaging_flush(vcpu);
-                    continue;
-            }
-            break;
-       }
-       return ret;
-}
+       if (is_error_hpa(paddr))
+               return 1;
 
-static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
-{
+       return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
 }
 
 static void nonpaging_free(struct kvm_vcpu *vcpu)
 {
-       hpa_t root;
-
-       ASSERT(vcpu);
-       root = vcpu->mmu.root_hpa;
-       if (VALID_PAGE(root))
-               release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
-       vcpu->mmu.root_hpa = INVALID_PAGE;
+       mmu_free_roots(vcpu);
 }
 
 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
@@ -367,40 +885,31 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 
        context->new_cr3 = nonpaging_new_cr3;
        context->page_fault = nonpaging_page_fault;
-       context->inval_page = nonpaging_inval_page;
        context->gva_to_gpa = nonpaging_gva_to_gpa;
        context->free = nonpaging_free;
-       context->root_level = PT32E_ROOT_LEVEL;
+       context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
-       context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
+       mmu_alloc_roots(vcpu);
        ASSERT(VALID_PAGE(context->root_hpa));
        kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
        return 0;
 }
 
-
 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu_page *page, *npage;
-
-       list_for_each_entry_safe(page, npage, &vcpu->kvm->active_mmu_pages,
-                                link) {
-               if (page->global)
-                       continue;
-
-               if (!page->parent_pte)
-                       continue;
-
-               *page->parent_pte = 0;
-               release_pt_page_64(vcpu, page->page_hpa, 1);
-       }
        ++kvm_stat.tlb_flush;
        kvm_arch_ops->tlb_flush(vcpu);
 }
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
+       pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
+       mmu_free_roots(vcpu);
+       if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+               kvm_mmu_free_some_pages(vcpu);
+       mmu_alloc_roots(vcpu);
        kvm_mmu_flush_tlb(vcpu);
+       kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
 }
 
 static void mark_pagetable_nonglobal(void *shadow_pte)
@@ -412,7 +921,8 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
                             u64 *shadow_pte,
                             gpa_t gaddr,
                             int dirty,
-                            u64 access_bits)
+                            u64 access_bits,
+                            gfn_t gfn)
 {
        hpa_t paddr;
 
@@ -420,13 +930,10 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
        if (!dirty)
                access_bits &= ~PT_WRITABLE_MASK;
 
-       if (access_bits & PT_WRITABLE_MASK)
-               mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
+       paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
 
        *shadow_pte |= access_bits;
 
-       paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
-
        if (!(*shadow_pte & PT_GLOBAL_MASK))
                mark_pagetable_nonglobal(shadow_pte);
 
@@ -434,10 +941,31 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
                *shadow_pte |= gaddr;
                *shadow_pte |= PT_SHADOW_IO_MARK;
                *shadow_pte &= ~PT_PRESENT_MASK;
-       } else {
-               *shadow_pte |= paddr;
-               page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
+               return;
+       }
+
+       *shadow_pte |= paddr;
+
+       if (access_bits & PT_WRITABLE_MASK) {
+               struct kvm_mmu_page *shadow;
+
+               shadow = kvm_mmu_lookup_page(vcpu, gfn);
+               if (shadow) {
+                       pgprintk("%s: found shadow page for %lx, marking ro\n",
+                                __FUNCTION__, gfn);
+                       access_bits &= ~PT_WRITABLE_MASK;
+                       if (is_writeble_pte(*shadow_pte)) {
+                                   *shadow_pte &= ~PT_WRITABLE_MASK;
+                                   kvm_arch_ops->tlb_flush(vcpu);
+                       }
+               }
        }
+
+       if (access_bits & PT_WRITABLE_MASK)
+               mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
+
+       page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
+       rmap_add(vcpu, shadow_pte);
 }
 
 static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -474,41 +1002,6 @@ static int may_access(u64 pte, int write, int user)
        return 1;
 }
 
-/*
- * Remove a shadow pte.
- */
-static void paging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
-{
-       hpa_t page_addr = vcpu->mmu.root_hpa;
-       int level = vcpu->mmu.shadow_root_level;
-
-       ++kvm_stat.invlpg;
-
-       for (; ; level--) {
-               u32 index = PT64_INDEX(addr, level);
-               u64 *table = __va(page_addr);
-
-               if (level == PT_PAGE_TABLE_LEVEL ) {
-                       table[index] = 0;
-                       return;
-               }
-
-               if (!is_present_pte(table[index]))
-                       return;
-
-               page_addr = table[index] & PT64_BASE_ADDR_MASK;
-
-               if (level == PT_DIRECTORY_LEVEL &&
-                         (table[index] & PT_SHADOW_PS_MARK)) {
-                       table[index] = 0;
-                       release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL);
-
-                       kvm_arch_ops->tlb_flush(vcpu);
-                       return;
-               }
-       }
-}
-
 static void paging_free(struct kvm_vcpu *vcpu)
 {
        nonpaging_free(vcpu);
@@ -522,37 +1015,40 @@ static void paging_free(struct kvm_vcpu *vcpu)
 #include "paging_tmpl.h"
 #undef PTTYPE
 
-static int paging64_init_context(struct kvm_vcpu *vcpu)
+static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 {
        struct kvm_mmu *context = &vcpu->mmu;
 
        ASSERT(is_pae(vcpu));
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging64_page_fault;
-       context->inval_page = paging_inval_page;
        context->gva_to_gpa = paging64_gva_to_gpa;
        context->free = paging_free;
-       context->root_level = PT64_ROOT_LEVEL;
-       context->shadow_root_level = PT64_ROOT_LEVEL;
-       context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
+       context->root_level = level;
+       context->shadow_root_level = level;
+       mmu_alloc_roots(vcpu);
        ASSERT(VALID_PAGE(context->root_hpa));
        kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
                    (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
        return 0;
 }
 
+static int paging64_init_context(struct kvm_vcpu *vcpu)
+{
+       return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
+}
+
 static int paging32_init_context(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *context = &vcpu->mmu;
 
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
-       context->inval_page = paging_inval_page;
        context->gva_to_gpa = paging32_gva_to_gpa;
        context->free = paging_free;
        context->root_level = PT32_ROOT_LEVEL;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
-       context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
+       mmu_alloc_roots(vcpu);
        ASSERT(VALID_PAGE(context->root_hpa));
        kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
                    (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
@@ -561,14 +1057,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
 
 static int paging32E_init_context(struct kvm_vcpu *vcpu)
 {
-       int ret;
-
-       if ((ret = paging64_init_context(vcpu)))
-               return ret;
-
-       vcpu->mmu.root_level = PT32E_ROOT_LEVEL;
-       vcpu->mmu.shadow_root_level = PT32E_ROOT_LEVEL;
-       return 0;
+       return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
 }
 
 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
@@ -597,41 +1086,161 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
 {
+       int r;
+
        destroy_kvm_mmu(vcpu);
-       return init_kvm_mmu(vcpu);
+       r = init_kvm_mmu(vcpu);
+       if (r < 0)
+               goto out;
+       r = mmu_topup_memory_caches(vcpu);
+out:
+       return r;
 }
 
-static void free_mmu_pages(struct kvm_vcpu *vcpu)
+void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
 {
-       while (!list_empty(&vcpu->free_pages)) {
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+       struct kvm_mmu_page *page;
+       struct kvm_mmu_page *child;
+       struct hlist_node *node, *n;
+       struct hlist_head *bucket;
+       unsigned index;
+       u64 *spte;
+       u64 pte;
+       unsigned offset = offset_in_page(gpa);
+       unsigned pte_size;
+       unsigned page_offset;
+       unsigned misaligned;
+       int level;
+       int flooded = 0;
+
+       pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+       if (gfn == vcpu->last_pt_write_gfn) {
+               ++vcpu->last_pt_write_count;
+               if (vcpu->last_pt_write_count >= 3)
+                       flooded = 1;
+       } else {
+               vcpu->last_pt_write_gfn = gfn;
+               vcpu->last_pt_write_count = 1;
+       }
+       index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+       bucket = &vcpu->kvm->mmu_page_hash[index];
+       hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
+               if (page->gfn != gfn || page->role.metaphysical)
+                       continue;
+               pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
+               misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+               if (misaligned || flooded) {
+                       /*
+                        * Misaligned accesses are too much trouble to fix
+                        * up; also, they usually indicate a page is not used
+                        * as a page table.
+                        *
+                        * If we're seeing too many writes to a page,
+                        * it may no longer be a page table, or we may be
+                        * forking, in which case it is better to unmap the
+                        * page.
+                        */
+                       pgprintk("misaligned: gpa %llx bytes %d role %x\n",
+                                gpa, bytes, page->role.word);
+                       kvm_mmu_zap_page(vcpu, page);
+                       continue;
+               }
+               page_offset = offset;
+               level = page->role.level;
+               if (page->role.glevels == PT32_ROOT_LEVEL) {
+                       page_offset <<= 1;          /* 32->64 */
+                       page_offset &= ~PAGE_MASK;
+               }
+               spte = __va(page->page_hpa);
+               spte += page_offset / sizeof(*spte);
+               pte = *spte;
+               if (is_present_pte(pte)) {
+                       if (level == PT_PAGE_TABLE_LEVEL)
+                               rmap_remove(vcpu, spte);
+                       else {
+                               child = page_header(pte & PT64_BASE_ADDR_MASK);
+                               mmu_page_remove_parent_pte(vcpu, child, spte);
+                       }
+               }
+               *spte = 0;
+       }
+}
+
+void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+{
+}
+
+int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
+{
+       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+
+       return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
+}
+
+void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
+{
+       while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
                struct kvm_mmu_page *page;
 
+               page = container_of(vcpu->kvm->active_mmu_pages.prev,
+                                   struct kvm_mmu_page, link);
+               kvm_mmu_zap_page(vcpu, page);
+       }
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
+
+static void free_mmu_pages(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu_page *page;
+
+       while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
+               page = container_of(vcpu->kvm->active_mmu_pages.next,
+                                   struct kvm_mmu_page, link);
+               kvm_mmu_zap_page(vcpu, page);
+       }
+       while (!list_empty(&vcpu->free_pages)) {
                page = list_entry(vcpu->free_pages.next,
                                  struct kvm_mmu_page, link);
                list_del(&page->link);
                __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
                page->page_hpa = INVALID_PAGE;
        }
+       free_page((unsigned long)vcpu->mmu.pae_root);
 }
 
 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
 {
+       struct page *page;
        int i;
 
        ASSERT(vcpu);
 
        for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
-               struct page *page;
                struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
 
                INIT_LIST_HEAD(&page_header->link);
-               if ((page = alloc_page(GFP_KVM_MMU)) == NULL)
+               if ((page = alloc_page(GFP_KERNEL)) == NULL)
                        goto error_1;
                page->private = (unsigned long)page_header;
                page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
                memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
                list_add(&page_header->link, &vcpu->free_pages);
+               ++vcpu->kvm->n_free_mmu_pages;
        }
+
+       /*
+        * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
+        * Therefore we need to allocate shadow page tables in the first
+        * 4GB of memory, which happens to fit the DMA32 zone.
+        */
+       page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+       if (!page)
+               goto error_1;
+       vcpu->mmu.pae_root = page_address(page);
+       for (i = 0; i < 4; ++i)
+               vcpu->mmu.pae_root[i] = INVALID_PAGE;
+
        return 0;
 
 error_1:
@@ -663,10 +1272,12 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 
        destroy_kvm_mmu(vcpu);
        free_mmu_pages(vcpu);
+       mmu_free_memory_caches(vcpu);
 }
 
-void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
+void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
 {
+       struct kvm *kvm = vcpu->kvm;
        struct kvm_mmu_page *page;
 
        list_for_each_entry(page, &kvm->active_mmu_pages, link) {
@@ -679,8 +1290,169 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                pt = __va(page->page_hpa);
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
                        /* avoid RMW */
-                       if (pt[i] & PT_WRITABLE_MASK)
+                       if (pt[i] & PT_WRITABLE_MASK) {
+                               rmap_remove(vcpu, &pt[i]);
                                pt[i] &= ~PT_WRITABLE_MASK;
+                       }
+       }
+}
+
+#ifdef AUDIT
+
+static const char *audit_msg;
+
+static gva_t canonicalize(gva_t gva)
+{
+#ifdef CONFIG_X86_64
+       gva = (long long)(gva << 16) >> 16;
+#endif
+       return gva;
+}
 
+static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
+                               gva_t va, int level)
+{
+       u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
+       int i;
+       gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
+               u64 ent = pt[i];
+
+               if (!ent & PT_PRESENT_MASK)
+                       continue;
+
+               va = canonicalize(va);
+               if (level > 1)
+                       audit_mappings_page(vcpu, ent, va, level - 1);
+               else {
+                       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
+                       hpa_t hpa = gpa_to_hpa(vcpu, gpa);
+
+                       if ((ent & PT_PRESENT_MASK)
+                           && (ent & PT64_BASE_ADDR_MASK) != hpa)
+                               printk(KERN_ERR "audit error: (%s) levels %d"
+                                      " gva %lx gpa %llx hpa %llx ent %llx\n",
+                                      audit_msg, vcpu->mmu.root_level,
+                                      va, gpa, hpa, ent);
+               }
        }
 }
+
+static void audit_mappings(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       if (vcpu->mmu.root_level == 4)
+               audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
+       else
+               for (i = 0; i < 4; ++i)
+                       if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
+                               audit_mappings_page(vcpu,
+                                                   vcpu->mmu.pae_root[i],
+                                                   i << 30,
+                                                   2);
+}
+
+static int count_rmaps(struct kvm_vcpu *vcpu)
+{
+       int nmaps = 0;
+       int i, j, k;
+
+       for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
+               struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
+               struct kvm_rmap_desc *d;
+
+               for (j = 0; j < m->npages; ++j) {
+                       struct page *page = m->phys_mem[j];
+
+                       if (!page->private)
+                               continue;
+                       if (!(page->private & 1)) {
+                               ++nmaps;
+                               continue;
+                       }
+                       d = (struct kvm_rmap_desc *)(page->private & ~1ul);
+                       while (d) {
+                               for (k = 0; k < RMAP_EXT; ++k)
+                                       if (d->shadow_ptes[k])
+                                               ++nmaps;
+                                       else
+                                               break;
+                               d = d->more;
+                       }
+               }
+       }
+       return nmaps;
+}
+
+static int count_writable_mappings(struct kvm_vcpu *vcpu)
+{
+       int nmaps = 0;
+       struct kvm_mmu_page *page;
+       int i;
+
+       list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+               u64 *pt = __va(page->page_hpa);
+
+               if (page->role.level != PT_PAGE_TABLE_LEVEL)
+                       continue;
+
+               for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+                       u64 ent = pt[i];
+
+                       if (!(ent & PT_PRESENT_MASK))
+                               continue;
+                       if (!(ent & PT_WRITABLE_MASK))
+                               continue;
+                       ++nmaps;
+               }
+       }
+       return nmaps;
+}
+
+static void audit_rmap(struct kvm_vcpu *vcpu)
+{
+       int n_rmap = count_rmaps(vcpu);
+       int n_actual = count_writable_mappings(vcpu);
+
+       if (n_rmap != n_actual)
+               printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
+                      __FUNCTION__, audit_msg, n_rmap, n_actual);
+}
+
+static void audit_write_protection(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu_page *page;
+
+       list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
+               hfn_t hfn;
+               struct page *pg;
+
+               if (page->role.metaphysical)
+                       continue;
+
+               hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
+                       >> PAGE_SHIFT;
+               pg = pfn_to_page(hfn);
+               if (pg->private)
+                       printk(KERN_ERR "%s: (%s) shadow page has writable"
+                              " mappings: gfn %lx role %x\n",
+                              __FUNCTION__, audit_msg, page->gfn,
+                              page->role.word);
+       }
+}
+
+static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
+{
+       int olddbg = dbg;
+
+       dbg = 0;
+       audit_msg = msg;
+       audit_rmap(vcpu);
+       audit_write_protection(vcpu);
+       audit_mappings(vcpu);
+       dbg = olddbg;
+}
+
+#endif
index 09bb9b4ed12d1918d5843dfae6a53328a98500a7..2dbf4307ed9ed1e5ea7629bcd551c4b32f3eed08 100644 (file)
        #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
        #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
        #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
+       #ifdef CONFIG_X86_64
+       #define PT_MAX_FULL_LEVELS 4
+       #else
+       #define PT_MAX_FULL_LEVELS 2
+       #endif
 #elif PTTYPE == 32
        #define pt_element_t u32
        #define guest_walker guest_walker32
@@ -42,6 +47,7 @@
        #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
        #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
        #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
+       #define PT_MAX_FULL_LEVELS 2
 #else
        #error Invalid PTTYPE value
 #endif
  */
 struct guest_walker {
        int level;
+       gfn_t table_gfn[PT_MAX_FULL_LEVELS];
        pt_element_t *table;
+       pt_element_t *ptep;
        pt_element_t inherited_ar;
+       gfn_t gfn;
 };
 
-static void FNAME(init_walker)(struct guest_walker *walker,
-                              struct kvm_vcpu *vcpu)
+/*
+ * Fetch a guest pte for a guest virtual address
+ */
+static void FNAME(walk_addr)(struct guest_walker *walker,
+                            struct kvm_vcpu *vcpu, gva_t addr)
 {
        hpa_t hpa;
        struct kvm_memory_slot *slot;
+       pt_element_t *ptep;
+       pt_element_t root;
+       gfn_t table_gfn;
 
+       pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
        walker->level = vcpu->mmu.root_level;
-       slot = gfn_to_memslot(vcpu->kvm,
-                             (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-       hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK);
+       walker->table = NULL;
+       root = vcpu->cr3;
+#if PTTYPE == 64
+       if (!is_long_mode(vcpu)) {
+               walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
+               root = *walker->ptep;
+               if (!(root & PT_PRESENT_MASK))
+                       return;
+               --walker->level;
+       }
+#endif
+       table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+       walker->table_gfn[walker->level - 1] = table_gfn;
+       pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
+                walker->level - 1, table_gfn);
+       slot = gfn_to_memslot(vcpu->kvm, table_gfn);
+       hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
        walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
 
        ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
               (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
 
-       walker->table = (pt_element_t *)( (unsigned long)walker->table |
-               (unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) );
        walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
+
+       for (;;) {
+               int index = PT_INDEX(addr, walker->level);
+               hpa_t paddr;
+
+               ptep = &walker->table[index];
+               ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
+                      ((unsigned long)ptep & PAGE_MASK));
+
+               if (is_present_pte(*ptep) && !(*ptep &  PT_ACCESSED_MASK))
+                       *ptep |= PT_ACCESSED_MASK;
+
+               if (!is_present_pte(*ptep))
+                       break;
+
+               if (walker->level == PT_PAGE_TABLE_LEVEL) {
+                       walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
+                               >> PAGE_SHIFT;
+                       break;
+               }
+
+               if (walker->level == PT_DIRECTORY_LEVEL
+                   && (*ptep & PT_PAGE_SIZE_MASK)
+                   && (PTTYPE == 64 || is_pse(vcpu))) {
+                       walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
+                               >> PAGE_SHIFT;
+                       walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
+                       break;
+               }
+
+               if (walker->level != 3 || is_long_mode(vcpu))
+                       walker->inherited_ar &= walker->table[index];
+               table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
+               paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
+               kunmap_atomic(walker->table, KM_USER0);
+               walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
+                                           KM_USER0);
+               --walker->level;
+               walker->table_gfn[walker->level - 1 ] = table_gfn;
+               pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
+                        walker->level - 1, table_gfn);
+       }
+       walker->ptep = ptep;
+       pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
 }
 
 static void FNAME(release_walker)(struct guest_walker *walker)
 {
-       kunmap_atomic(walker->table, KM_USER0);
+       if (walker->table)
+               kunmap_atomic(walker->table, KM_USER0);
 }
 
 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
-                          u64 *shadow_pte, u64 access_bits)
+                          u64 *shadow_pte, u64 access_bits, gfn_t gfn)
 {
        ASSERT(*shadow_pte == 0);
        access_bits &= guest_pte;
        *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
        set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
-                      guest_pte & PT_DIRTY_MASK, access_bits);
+                      guest_pte & PT_DIRTY_MASK, access_bits, gfn);
 }
 
 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
-                          u64 *shadow_pte, u64 access_bits,
-                          int index)
+                          u64 *shadow_pte, u64 access_bits, gfn_t gfn)
 {
        gpa_t gaddr;
 
        ASSERT(*shadow_pte == 0);
        access_bits &= guest_pde;
-       gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index;
+       gaddr = (gpa_t)gfn << PAGE_SHIFT;
        if (PTTYPE == 32 && is_cpuid_PSE36())
                gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
                        (32 - PT32_DIR_PSE36_SHIFT);
        *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
        set_pte_common(vcpu, shadow_pte, gaddr,
-                      guest_pde & PT_DIRTY_MASK, access_bits);
-}
-
-/*
- * Fetch a guest pte from a specific level in the paging hierarchy.
- */
-static pt_element_t *FNAME(fetch_guest)(struct kvm_vcpu *vcpu,
-                                       struct guest_walker *walker,
-                                       int level,
-                                       gva_t addr)
-{
-
-       ASSERT(level > 0  && level <= walker->level);
-
-       for (;;) {
-               int index = PT_INDEX(addr, walker->level);
-               hpa_t paddr;
-
-               ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
-                      ((unsigned long)&walker->table[index] & PAGE_MASK));
-               if (level == walker->level ||
-                   !is_present_pte(walker->table[index]) ||
-                   (walker->level == PT_DIRECTORY_LEVEL &&
-                    (walker->table[index] & PT_PAGE_SIZE_MASK) &&
-                    (PTTYPE == 64 || is_pse(vcpu))))
-                       return &walker->table[index];
-               if (walker->level != 3 || is_long_mode(vcpu))
-                       walker->inherited_ar &= walker->table[index];
-               paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK);
-               kunmap_atomic(walker->table, KM_USER0);
-               walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
-                                           KM_USER0);
-               --walker->level;
-       }
+                      guest_pde & PT_DIRTY_MASK, access_bits, gfn);
 }
 
 /*
@@ -150,15 +189,26 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        hpa_t shadow_addr;
        int level;
        u64 *prev_shadow_ent = NULL;
+       pt_element_t *guest_ent = walker->ptep;
+
+       if (!is_present_pte(*guest_ent))
+               return NULL;
 
        shadow_addr = vcpu->mmu.root_hpa;
        level = vcpu->mmu.shadow_root_level;
+       if (level == PT32E_ROOT_LEVEL) {
+               shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
+               shadow_addr &= PT64_BASE_ADDR_MASK;
+               --level;
+       }
 
        for (; ; level--) {
                u32 index = SHADOW_PT_INDEX(addr, level);
                u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
-               pt_element_t *guest_ent;
+               struct kvm_mmu_page *shadow_page;
                u64 shadow_pte;
+               int metaphysical;
+               gfn_t table_gfn;
 
                if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
                        if (level == PT_PAGE_TABLE_LEVEL)
@@ -168,21 +218,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        continue;
                }
 
-               if (PTTYPE == 32 && level > PT32_ROOT_LEVEL) {
-                       ASSERT(level == PT32E_ROOT_LEVEL);
-                       guest_ent = FNAME(fetch_guest)(vcpu, walker,
-                                                      PT32_ROOT_LEVEL, addr);
-               } else
-                       guest_ent = FNAME(fetch_guest)(vcpu, walker,
-                                                      level, addr);
-
-               if (!is_present_pte(*guest_ent))
-                       return NULL;
-
-               /* Don't set accessed bit on PAE PDPTRs */
-               if (vcpu->mmu.root_level != 3 || walker->level != 3)
-                       *guest_ent |= PT_ACCESSED_MASK;
-
                if (level == PT_PAGE_TABLE_LEVEL) {
 
                        if (walker->level == PT_DIRECTORY_LEVEL) {
@@ -190,21 +225,30 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                                        *prev_shadow_ent |= PT_SHADOW_PS_MARK;
                                FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
                                               walker->inherited_ar,
-                                         PT_INDEX(addr, PT_PAGE_TABLE_LEVEL));
+                                              walker->gfn);
                        } else {
                                ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
-                               FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar);
+                               FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
+                                              walker->inherited_ar,
+                                              walker->gfn);
                        }
                        return shadow_ent;
                }
 
-               shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent);
-               if (!VALID_PAGE(shadow_addr))
-                       return ERR_PTR(-ENOMEM);
-               shadow_pte = shadow_addr | PT_PRESENT_MASK;
-               if (vcpu->mmu.root_level > 3 || level != 3)
-                       shadow_pte |= PT_ACCESSED_MASK
-                               | PT_WRITABLE_MASK | PT_USER_MASK;
+               if (level - 1 == PT_PAGE_TABLE_LEVEL
+                   && walker->level == PT_DIRECTORY_LEVEL) {
+                       metaphysical = 1;
+                       table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
+                               >> PAGE_SHIFT;
+               } else {
+                       metaphysical = 0;
+                       table_gfn = walker->table_gfn[level - 2];
+               }
+               shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
+                                              metaphysical, shadow_ent);
+               shadow_addr = shadow_page->page_hpa;
+               shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
+                       | PT_WRITABLE_MASK | PT_USER_MASK;
                *shadow_ent = shadow_pte;
                prev_shadow_ent = shadow_ent;
        }
@@ -221,11 +265,13 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
                               u64 *shadow_ent,
                               struct guest_walker *walker,
                               gva_t addr,
-                              int user)
+                              int user,
+                              int *write_pt)
 {
        pt_element_t *guest_ent;
        int writable_shadow;
        gfn_t gfn;
+       struct kvm_mmu_page *page;
 
        if (is_writeble_pte(*shadow_ent))
                return 0;
@@ -250,17 +296,35 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
                        *shadow_ent &= ~PT_USER_MASK;
                }
 
-       guest_ent = FNAME(fetch_guest)(vcpu, walker, PT_PAGE_TABLE_LEVEL, addr);
+       guest_ent = walker->ptep;
 
        if (!is_present_pte(*guest_ent)) {
                *shadow_ent = 0;
                return 0;
        }
 
-       gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+       gfn = walker->gfn;
+
+       if (user) {
+               /*
+                * Usermode page faults won't be for page table updates.
+                */
+               while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+                       pgprintk("%s: zap %lx %x\n",
+                                __FUNCTION__, gfn, page->role.word);
+                       kvm_mmu_zap_page(vcpu, page);
+               }
+       } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
+               pgprintk("%s: found shadow page for %lx, marking ro\n",
+                        __FUNCTION__, gfn);
+               *guest_ent |= PT_DIRTY_MASK;
+               *write_pt = 1;
+               return 0;
+       }
        mark_page_dirty(vcpu->kvm, gfn);
        *shadow_ent |= PT_WRITABLE_MASK;
        *guest_ent |= PT_DIRTY_MASK;
+       rmap_add(vcpu, shadow_ent);
 
        return 1;
 }
@@ -276,7 +340,8 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
  *   - normal guest page fault due to the guest pte marked not present, not
  *     writable, or not executable
  *
- *  Returns: 1 if we need to emulate the instruction, 0 otherwise
+ *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
+ *           a negative value on error.
  */
 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
                               u32 error_code)
@@ -287,39 +352,47 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
        struct guest_walker walker;
        u64 *shadow_pte;
        int fixed;
+       int write_pt = 0;
+       int r;
+
+       pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
+       kvm_mmu_audit(vcpu, "pre page fault");
+
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+               return r;
 
        /*
         * Look up the shadow pte for the faulting address.
         */
-       for (;;) {
-               FNAME(init_walker)(&walker, vcpu);
-               shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
-               if (IS_ERR(shadow_pte)) {  /* must be -ENOMEM */
-                       nonpaging_flush(vcpu);
-                       FNAME(release_walker)(&walker);
-                       continue;
-               }
-               break;
-       }
+       FNAME(walk_addr)(&walker, vcpu, addr);
+       shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
 
        /*
         * The page is not mapped by the guest.  Let the guest handle it.
         */
        if (!shadow_pte) {
+               pgprintk("%s: not mapped\n", __FUNCTION__);
                inject_page_fault(vcpu, addr, error_code);
                FNAME(release_walker)(&walker);
                return 0;
        }
 
+       pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
+                shadow_pte, *shadow_pte);
+
        /*
         * Update the shadow pte.
         */
        if (write_fault)
                fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
-                                           user_fault);
+                                           user_fault, &write_pt);
        else
                fixed = fix_read_pf(shadow_pte);
 
+       pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
+                shadow_pte, *shadow_pte);
+
        FNAME(release_walker)(&walker);
 
        /*
@@ -331,20 +404,23 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
                pgprintk("%s: io work, no access\n", __FUNCTION__);
                inject_page_fault(vcpu, addr,
                                  error_code | PFERR_PRESENT_MASK);
+               kvm_mmu_audit(vcpu, "post page fault (io)");
                return 0;
        }
 
        /*
         * pte not present, guest page fault.
         */
-       if (pte_present && !fixed) {
+       if (pte_present && !fixed && !write_pt) {
                inject_page_fault(vcpu, addr, error_code);
+               kvm_mmu_audit(vcpu, "post page fault (guest)");
                return 0;
        }
 
        ++kvm_stat.pf_fixed;
+       kvm_mmu_audit(vcpu, "post page fault (fixed)");
 
-       return 0;
+       return write_pt;
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -353,9 +429,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
        pt_element_t guest_pte;
        gpa_t gpa;
 
-       FNAME(init_walker)(&walker, vcpu);
-       guest_pte = *FNAME(fetch_guest)(vcpu, &walker, PT_PAGE_TABLE_LEVEL,
-                                       vaddr);
+       FNAME(walk_addr)(&walker, vcpu, vaddr);
+       guest_pte = *walker.ptep;
        FNAME(release_walker)(&walker);
 
        if (!is_present_pte(guest_pte))
@@ -389,3 +464,4 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
 #undef PT_PTE_COPY_MASK
 #undef PT_NON_PTE_COPY_MASK
 #undef PT_DIR_BASE_ADDR_MASK
+#undef PT_MAX_FULL_LEVELS
index fa042873571711b1f453489a881f46ce2723188c..ccc06b1b91b56e5d198b78e3c70e0f4a211b19d8 100644 (file)
@@ -235,6 +235,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 
        vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
        vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
+
+       vcpu->interrupt_window_open = 1;
 }
 
 static int has_svm(void)
@@ -495,7 +497,6 @@ static void init_vmcb(struct vmcb *vmcb)
                /*              (1ULL << INTERCEPT_SELECTIVE_CR0) | */
                                (1ULL << INTERCEPT_CPUID) |
                                (1ULL << INTERCEPT_HLT) |
-                               (1ULL << INTERCEPT_INVLPG) |
                                (1ULL << INTERCEPT_INVLPGA) |
                                (1ULL << INTERCEPT_IOIO_PROT) |
                                (1ULL << INTERCEPT_MSR_PROT) |
@@ -700,6 +701,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
        vcpu->svm->vmcb->save.gdtr.base = dt->base ;
 }
 
+static void svm_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
+{
+}
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
 #ifdef CONFIG_X86_64
@@ -847,6 +852,7 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        u64 fault_address;
        u32 error_code;
        enum emulation_result er;
+       int r;
 
        if (is_external_interrupt(exit_int_info))
                push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
@@ -855,7 +861,12 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        fault_address  = vcpu->svm->vmcb->control.exit_info_2;
        error_code = vcpu->svm->vmcb->control.exit_info_1;
-       if (!vcpu->mmu.page_fault(vcpu, fault_address, error_code)) {
+       r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
+       if (r < 0) {
+               spin_unlock(&vcpu->kvm->lock);
+               return r;
+       }
+       if (!r) {
                spin_unlock(&vcpu->kvm->lock);
                return 1;
        }
@@ -1031,10 +1042,11 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
        skip_emulated_instruction(vcpu);
-       if (vcpu->irq_summary && (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF))
+       if (vcpu->irq_summary)
                return 1;
 
        kvm_run->exit_reason = KVM_EXIT_HLT;
+       ++kvm_stat.halt_exits;
        return 0;
 }
 
@@ -1186,6 +1198,23 @@ static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return rdmsr_interception(vcpu, kvm_run);
 }
 
+static int interrupt_window_interception(struct kvm_vcpu *vcpu,
+                                  struct kvm_run *kvm_run)
+{
+       /*
+        * If the user space waits to inject interrupts, exit as soon as
+        * possible
+        */
+       if (kvm_run->request_interrupt_window &&
+           !vcpu->irq_summary) {
+               ++kvm_stat.irq_window_exits;
+               kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               return 0;
+       }
+
+       return 1;
+}
+
 static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
                                      struct kvm_run *kvm_run) = {
        [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
@@ -1210,6 +1239,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
        [SVM_EXIT_NMI]                          = nop_on_interception,
        [SVM_EXIT_SMI]                          = nop_on_interception,
        [SVM_EXIT_INIT]                         = nop_on_interception,
+       [SVM_EXIT_VINTR]                        = interrupt_window_interception,
        /* [SVM_EXIT_CR0_SEL_WRITE]             = emulate_on_interception, */
        [SVM_EXIT_CPUID]                        = cpuid_interception,
        [SVM_EXIT_HLT]                          = halt_interception,
@@ -1278,15 +1308,11 @@ static void pre_svm_run(struct kvm_vcpu *vcpu)
 }
 
 
-static inline void kvm_try_inject_irq(struct kvm_vcpu *vcpu)
+static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
 {
        struct vmcb_control_area *control;
 
-       if (!vcpu->irq_summary)
-               return;
-
        control = &vcpu->svm->vmcb->control;
-
        control->int_vector = pop_irq(vcpu);
        control->int_ctl &= ~V_INTR_PRIO_MASK;
        control->int_ctl |= V_IRQ_MASK |
@@ -1301,6 +1327,59 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu)
                control->int_ctl &= ~V_IRQ_MASK;
                push_irq(vcpu, control->int_vector);
        }
+
+       vcpu->interrupt_window_open =
+               !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
+}
+
+static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+                                      struct kvm_run *kvm_run)
+{
+       struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
+
+       vcpu->interrupt_window_open =
+               (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+                (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
+
+       if (vcpu->interrupt_window_open && vcpu->irq_summary)
+               /*
+                * If interrupts enabled, and not blocked by sti or mov ss. Good.
+                */
+               kvm_do_inject_irq(vcpu);
+
+       /*
+        * Interrupts blocked.  Wait for unblock.
+        */
+       if (!vcpu->interrupt_window_open &&
+           (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
+               control->intercept |= 1ULL << INTERCEPT_VINTR;
+       } else
+               control->intercept &= ~(1ULL << INTERCEPT_VINTR);
+}
+
+static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+                             struct kvm_run *kvm_run)
+{
+       kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
+                                                 vcpu->irq_summary == 0);
+       kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
+       kvm_run->cr8 = vcpu->cr8;
+       kvm_run->apic_base = vcpu->apic_base;
+}
+
+/*
+ * Check if userspace requested an interrupt window, and that the
+ * interrupt window is open.
+ *
+ * No need to exit to userspace if we already have an interrupt queued.
+ */
+static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+                                         struct kvm_run *kvm_run)
+{
+       return (!vcpu->irq_summary &&
+               kvm_run->request_interrupt_window &&
+               vcpu->interrupt_window_open &&
+               (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
 }
 
 static void save_db_regs(unsigned long *db_regs)
@@ -1324,9 +1403,10 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        u16 fs_selector;
        u16 gs_selector;
        u16 ldt_selector;
+       int r;
 
 again:
-       kvm_try_inject_irq(vcpu);
+       do_interrupt_requests(vcpu, kvm_run);
 
        clgi();
 
@@ -1487,18 +1567,28 @@ again:
        if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
                kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
                kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code;
+               post_kvm_run_save(vcpu, kvm_run);
                return 0;
        }
 
-       if (handle_exit(vcpu, kvm_run)) {
+       r = handle_exit(vcpu, kvm_run);
+       if (r > 0) {
                if (signal_pending(current)) {
                        ++kvm_stat.signal_exits;
+                       post_kvm_run_save(vcpu, kvm_run);
+                       return -EINTR;
+               }
+
+               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+                       ++kvm_stat.request_irq_exits;
+                       post_kvm_run_save(vcpu, kvm_run);
                        return -EINTR;
                }
                kvm_resched(vcpu);
                goto again;
        }
-       return 0;
+       post_kvm_run_save(vcpu, kvm_run);
+       return r;
 }
 
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
@@ -1565,6 +1655,7 @@ static struct kvm_arch_ops svm_arch_ops = {
        .get_segment = svm_get_segment,
        .set_segment = svm_set_segment,
        .get_cs_db_l_bits = svm_get_cs_db_l_bits,
+       .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits,
        .set_cr0 = svm_set_cr0,
        .set_cr0_no_modeswitch = svm_set_cr0,
        .set_cr3 = svm_set_cr3,
index d0a2c2d5342a3320be18a6e315bf5d9d66c3d916..d4701cb4c65424e6b5d9267f2347c42445632ccf 100644 (file)
@@ -116,7 +116,7 @@ static void vmcs_clear(struct vmcs *vmcs)
 static void __vcpu_clear(void *arg)
 {
        struct kvm_vcpu *vcpu = arg;
-       int cpu = smp_processor_id();
+       int cpu = raw_smp_processor_id();
 
        if (vcpu->cpu == cpu)
                vmcs_clear(vcpu->vmcs);
@@ -152,15 +152,21 @@ static u64 vmcs_read64(unsigned long field)
 #endif
 }
 
+static noinline void vmwrite_error(unsigned long field, unsigned long value)
+{
+       printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
+              field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
+       dump_stack();
+}
+
 static void vmcs_writel(unsigned long field, unsigned long value)
 {
        u8 error;
 
        asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
                       : "=q"(error) : "a"(value), "d"(field) : "cc" );
-       if (error)
-               printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
-                      field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
+       if (unlikely(error))
+               vmwrite_error(field, value);
 }
 
 static void vmcs_write16(unsigned long field, u16 value)
@@ -263,6 +269,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        if (interruptibility & 3)
                vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
                             interruptibility & ~3);
+       vcpu->interrupt_window_open = 1;
 }
 
 static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
@@ -541,7 +548,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
 
 static struct vmcs *alloc_vmcs(void)
 {
-       return alloc_vmcs_cpu(smp_processor_id());
+       return alloc_vmcs_cpu(raw_smp_processor_id());
 }
 
 static void free_vmcs(struct vmcs *vmcs)
@@ -736,6 +743,15 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
 
 #endif
 
+static void vmx_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
+{
+       vcpu->cr0 &= KVM_GUEST_CR0_MASK;
+       vcpu->cr0 |= vmcs_readl(GUEST_CR0) & ~KVM_GUEST_CR0_MASK;
+
+       vcpu->cr4 &= KVM_GUEST_CR4_MASK;
+       vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
+}
+
 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
@@ -1011,8 +1027,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
        vmcs_writel(GUEST_RIP, 0xfff0);
        vmcs_writel(GUEST_RSP, 0);
 
-       vmcs_writel(GUEST_CR3, 0);
-
        //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
        vmcs_writel(GUEST_DR7, 0x400);
 
@@ -1049,7 +1063,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
                               | CPU_BASED_CR8_LOAD_EXITING    /* 20.6.2 */
                               | CPU_BASED_CR8_STORE_EXITING   /* 20.6.2 */
                               | CPU_BASED_UNCOND_IO_EXITING   /* 20.6.2 */
-                              | CPU_BASED_INVDPG_EXITING
                               | CPU_BASED_MOV_DR_EXITING
                               | CPU_BASED_USE_TSC_OFFSETING   /* 21.3 */
                        );
@@ -1094,14 +1107,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
        rdmsrl(MSR_IA32_SYSENTER_EIP, a);
        vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
 
-       ret = -ENOMEM;
-       vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!vcpu->guest_msrs)
-               goto out;
-       vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!vcpu->host_msrs)
-               goto out_free_guest_msrs;
-
        for (i = 0; i < NR_VMX_MSR; ++i) {
                u32 index = vmx_msr_index[i];
                u32 data_low, data_high;
@@ -1155,8 +1160,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
 
        return 0;
 
-out_free_guest_msrs:
-       kfree(vcpu->guest_msrs);
 out:
        return ret;
 }
@@ -1224,21 +1227,34 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
                        irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static void kvm_try_inject_irq(struct kvm_vcpu *vcpu)
+
+static void do_interrupt_requests(struct kvm_vcpu *vcpu,
+                                      struct kvm_run *kvm_run)
 {
-       if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)
-           && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0)
+       u32 cpu_based_vm_exec_control;
+
+       vcpu->interrupt_window_open =
+               ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+                (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
+
+       if (vcpu->interrupt_window_open &&
+           vcpu->irq_summary &&
+           !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
                /*
-                * Interrupts enabled, and not blocked by sti or mov ss. Good.
+                * If interrupts enabled, and not blocked by sti or mov ss. Good.
                 */
                kvm_do_inject_irq(vcpu);
-       else
+
+       cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+       if (!vcpu->interrupt_window_open &&
+           (vcpu->irq_summary || kvm_run->request_interrupt_window))
                /*
                 * Interrupts blocked.  Wait for unblock.
                 */
-               vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
-                            vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
-                            | CPU_BASED_VIRTUAL_INTR_PENDING);
+               cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
+       else
+               cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+       vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
 }
 
 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
@@ -1277,6 +1293,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        unsigned long cr2, rip;
        u32 vect_info;
        enum emulation_result er;
+       int r;
 
        vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
@@ -1305,7 +1322,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                cr2 = vmcs_readl(EXIT_QUALIFICATION);
 
                spin_lock(&vcpu->kvm->lock);
-               if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) {
+               r = kvm_mmu_page_fault(vcpu, cr2, error_code);
+               if (r < 0) {
+                       spin_unlock(&vcpu->kvm->lock);
+                       return r;
+               }
+               if (!r) {
                        spin_unlock(&vcpu->kvm->lock);
                        return 1;
                }
@@ -1425,17 +1447,6 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 0;
 }
 
-static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-       u64 address = vmcs_read64(EXIT_QUALIFICATION);
-       int instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
-       spin_lock(&vcpu->kvm->lock);
-       vcpu->mmu.inval_page(vcpu, address);
-       spin_unlock(&vcpu->kvm->lock);
-       vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) + instruction_length);
-       return 1;
-}
-
 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        u64 exit_qualification;
@@ -1575,23 +1586,40 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
+static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+                             struct kvm_run *kvm_run)
+{
+       kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
+       kvm_run->cr8 = vcpu->cr8;
+       kvm_run->apic_base = vcpu->apic_base;
+       kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
+                                                 vcpu->irq_summary == 0);
+}
+
 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
                                   struct kvm_run *kvm_run)
 {
-       /* Turn off interrupt window reporting. */
-       vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
-                    vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
-                    & ~CPU_BASED_VIRTUAL_INTR_PENDING);
+       /*
+        * If the user space waits to inject interrupts, exit as soon as
+        * possible
+        */
+       if (kvm_run->request_interrupt_window &&
+           !vcpu->irq_summary) {
+               kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               ++kvm_stat.irq_window_exits;
+               return 0;
+       }
        return 1;
 }
 
 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        skip_emulated_instruction(vcpu);
-       if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF))
+       if (vcpu->irq_summary)
                return 1;
 
        kvm_run->exit_reason = KVM_EXIT_HLT;
+       ++kvm_stat.halt_exits;
        return 0;
 }
 
@@ -1605,7 +1633,6 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
        [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
        [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
        [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
-       [EXIT_REASON_INVLPG]                  = handle_invlpg,
        [EXIT_REASON_CR_ACCESS]               = handle_cr,
        [EXIT_REASON_DR_ACCESS]               = handle_dr,
        [EXIT_REASON_CPUID]                   = handle_cpuid,
@@ -1642,11 +1669,27 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        return 0;
 }
 
+/*
+ * Check if userspace requested an interrupt window, and that the
+ * interrupt window is open.
+ *
+ * No need to exit to userspace if we already have an interrupt queued.
+ */
+static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+                                         struct kvm_run *kvm_run)
+{
+       return (!vcpu->irq_summary &&
+               kvm_run->request_interrupt_window &&
+               vcpu->interrupt_window_open &&
+               (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
+}
+
 static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        u8 fail;
        u16 fs_sel, gs_sel, ldt_sel;
        int fs_gs_ldt_reload_needed;
+       int r;
 
 again:
        /*
@@ -1673,9 +1716,7 @@ again:
        vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
 #endif
 
-       if (vcpu->irq_summary &&
-           !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
-               kvm_try_inject_irq(vcpu);
+       do_interrupt_requests(vcpu, kvm_run);
 
        if (vcpu->guest_debug.enabled)
                kvm_guest_debug_pre(vcpu);
@@ -1812,6 +1853,7 @@ again:
 
        fx_save(vcpu->guest_fx_image);
        fx_restore(vcpu->host_fx_image);
+       vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
 
 #ifndef CONFIG_X86_64
        asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
@@ -1821,6 +1863,7 @@ again:
        if (fail) {
                kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
                kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR);
+               r = 0;
        } else {
                if (fs_gs_ldt_reload_needed) {
                        load_ldt(ldt_sel);
@@ -1840,17 +1883,28 @@ again:
                }
                vcpu->launched = 1;
                kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT;
-               if (kvm_handle_exit(kvm_run, vcpu)) {
+               r = kvm_handle_exit(kvm_run, vcpu);
+               if (r > 0) {
                        /* Give scheduler a change to reschedule. */
                        if (signal_pending(current)) {
                                ++kvm_stat.signal_exits;
+                               post_kvm_run_save(vcpu, kvm_run);
+                               return -EINTR;
+                       }
+
+                       if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+                               ++kvm_stat.request_irq_exits;
+                               post_kvm_run_save(vcpu, kvm_run);
                                return -EINTR;
                        }
+
                        kvm_resched(vcpu);
                        goto again;
                }
        }
-       return 0;
+
+       post_kvm_run_save(vcpu, kvm_run);
+       return r;
 }
 
 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
@@ -1906,13 +1960,33 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vmcs *vmcs;
 
+       vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!vcpu->guest_msrs)
+               return -ENOMEM;
+
+       vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!vcpu->host_msrs)
+               goto out_free_guest_msrs;
+
        vmcs = alloc_vmcs();
        if (!vmcs)
-               return -ENOMEM;
+               goto out_free_msrs;
+
        vmcs_clear(vmcs);
        vcpu->vmcs = vmcs;
        vcpu->launched = 0;
+
        return 0;
+
+out_free_msrs:
+       kfree(vcpu->host_msrs);
+       vcpu->host_msrs = NULL;
+
+out_free_guest_msrs:
+       kfree(vcpu->guest_msrs);
+       vcpu->guest_msrs = NULL;
+
+       return -ENOMEM;
 }
 
 static struct kvm_arch_ops vmx_arch_ops = {
@@ -1936,6 +2010,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
        .get_segment = vmx_get_segment,
        .set_segment = vmx_set_segment,
        .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
+       .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits,
        .set_cr0 = vmx_set_cr0,
        .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
        .set_cr3 = vmx_set_cr3,
index 1bff3e925fda5686e0bb0042a2189a5593ec8188..be70795b4822073500ff823d7697a3f26818f95d 100644 (file)
@@ -1323,7 +1323,7 @@ twobyte_special_insn:
                                                         ctxt)) != 0))
                                goto done;
                        if ((old_lo != _regs[VCPU_REGS_RAX])
-                           || (old_hi != _regs[VCPU_REGS_RDI])) {
+                           || (old_hi != _regs[VCPU_REGS_RDX])) {
                                _regs[VCPU_REGS_RAX] = old_lo;
                                _regs[VCPU_REGS_RDX] = old_hi;
                                _eflags &= ~EFLG_ZF;
index fb1edc1c9edbd5556fbf86e97a8e67ce0c3dbfb2..50914439d861d74072bf9af8d7ac0024469eb2fe 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 
-#include <asm/arch/hardware.h>
+#include <asm/hardware.h>
 #include <asm/arch/regs-gpio.h>
 #include <asm/arch/leds-gpio.h>
 
index c8558d4ed5064416e88b69d7a8d0a8e655939ee4..8ca75e52f637021ea33f49d3c6a4175506313ecc 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/sysdev.h>
 #include <linux/freezer.h>
 #include <linux/syscalls.h>
+#include <linux/suspend.h>
 #include <linux/cpu.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
index e9b80e9202664bbfa8cd84a6b302b48ae9154560..ccfe6561be240ec65dadbbab2ea56ce0166cd001 100644 (file)
@@ -42,6 +42,8 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
 {
        writel(0, host->base + MMCICOMMAND);
 
+       BUG_ON(host->data);
+
        host->mrq = NULL;
        host->cmd = NULL;
 
@@ -198,6 +200,8 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
        }
 
        if (!cmd->data || cmd->error != MMC_ERR_NONE) {
+               if (host->data)
+                       mmci_stop_data(host);
                mmci_request_end(host, cmd->mrq);
        } else if (!(cmd->data->flags & MMC_DATA_READ)) {
                mmci_start_data(host, cmd->data);
index 602ed31a5dd9309e2b9b66cf9684cb654007f9fc..9305eb9b1b98fde55e17e580f7997b2ea77129b6 100644 (file)
@@ -349,22 +349,11 @@ static void __init trif_probe2(int unit)
 #endif
 
 
-/*
- *     The loopback device is global so it can be directly referenced
- *     by the network code. Also, it must be first on device list.
- */
-extern int loopback_init(void);
-
 /*  Statically configured drivers -- order matters here. */
 static int __init net_olddevs_init(void)
 {
        int num;
 
-       if (loopback_init()) {
-               printk(KERN_ERR "Network loopback device setup failed\n");
-       }
-
-
 #ifdef CONFIG_SBNI
        for (num = 0; num < 8; ++num)
                sbni_probe(num);
index ada5e9b9988cda07654ca92e2f244543f97abed2..ca5acc4736df2bd80134e1663464fd6ae4cd51fe 100644 (file)
@@ -57,8 +57,8 @@
 
 #define DRV_MODULE_NAME                "bnx2"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.5.2"
-#define DRV_MODULE_RELDATE     "December 13, 2006"
+#define DRV_MODULE_VERSION     "1.5.3"
+#define DRV_MODULE_RELDATE     "January 8, 2007"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -1345,8 +1345,6 @@ bnx2_init_copper_phy(struct bnx2 *bp)
 {
        u32 val;
 
-       bp->phy_flags |= PHY_CRC_FIX_FLAG;
-
        if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
                bnx2_write_phy(bp, 0x18, 0x0c00);
                bnx2_write_phy(bp, 0x17, 0x000a);
@@ -3085,7 +3083,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
                int buf_size)
 {
        u32 written, offset32, len32;
-       u8 *buf, start[4], end[4], *flash_buffer = NULL;
+       u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
        int rc = 0;
        int align_start, align_end;
 
@@ -3113,16 +3111,17 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
        }
 
        if (align_start || align_end) {
-               buf = kmalloc(len32, GFP_KERNEL);
-               if (buf == NULL)
+               align_buf = kmalloc(len32, GFP_KERNEL);
+               if (align_buf == NULL)
                        return -ENOMEM;
                if (align_start) {
-                       memcpy(buf, start, 4);
+                       memcpy(align_buf, start, 4);
                }
                if (align_end) {
-                       memcpy(buf + len32 - 4, end, 4);
+                       memcpy(align_buf + len32 - 4, end, 4);
                }
-               memcpy(buf + align_start, data_buf, buf_size);
+               memcpy(align_buf + align_start, data_buf, buf_size);
+               buf = align_buf;
        }
 
        if (bp->flash_info->buffered == 0) {
@@ -3256,11 +3255,8 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
        }
 
 nvram_write_end:
-       if (bp->flash_info->buffered == 0)
-               kfree(flash_buffer);
-
-       if (align_start || align_end)
-               kfree(buf);
+       kfree(flash_buffer);
+       kfree(align_buf);
        return rc;
 }
 
@@ -5645,6 +5641,44 @@ poll_bnx2(struct net_device *dev)
 }
 #endif
 
+static void __devinit
+bnx2_get_5709_media(struct bnx2 *bp)
+{
+       u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
+       u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
+       u32 strap;
+
+       if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
+               return;
+       else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
+               bp->phy_flags |= PHY_SERDES_FLAG;
+               return;
+       }
+
+       if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
+               strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
+       else
+               strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
+
+       if (PCI_FUNC(bp->pdev->devfn) == 0) {
+               switch (strap) {
+               case 0x4:
+               case 0x5:
+               case 0x6:
+                       bp->phy_flags |= PHY_SERDES_FLAG;
+                       return;
+               }
+       } else {
+               switch (strap) {
+               case 0x1:
+               case 0x2:
+               case 0x4:
+                       bp->phy_flags |= PHY_SERDES_FLAG;
+                       return;
+               }
+       }
+}
+
 static int __devinit
 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 {
@@ -5865,10 +5899,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->phy_addr = 1;
 
        /* Disable WOL support if we are running on a SERDES chip. */
-       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
-               if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
-                       bp->phy_flags |= PHY_SERDES_FLAG;
-       } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
+       if (CHIP_NUM(bp) == CHIP_NUM_5709)
+               bnx2_get_5709_media(bp);
+       else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
                bp->phy_flags |= PHY_SERDES_FLAG;
 
        if (bp->phy_flags & PHY_SERDES_FLAG) {
@@ -5880,7 +5913,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                        if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
                                bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
                }
-       }
+       } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
+                  CHIP_NUM(bp) == CHIP_NUM_5708)
+               bp->phy_flags |= PHY_CRC_FIX_FLAG;
 
        if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
            (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
index c7731b6f9de319c24487c4ea4a500d243925734c..82fed1dd5005525b1fdbc44d78db6de46e33d6f6 100644 (file)
@@ -170,9 +170,10 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
 {
        struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
 
-       if (cphy)
-               cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
+       if (!cphy)
+               return NULL;
 
+       cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
        INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
        cphy->bmsr = 0;
 
index 4c1ff752048c9982d52bf4a2e4e2079ee26eb81b..c6259c7127f6bb9aaa4dbda53ee783ed428c1e0e 100644 (file)
@@ -995,12 +995,6 @@ e1000_probe(struct pci_dev *pdev,
           (adapter->hw.mac_type != e1000_82547))
                netdev->features |= NETIF_F_TSO;
 
-#ifdef CONFIG_DEBUG_SLAB
-       /* 82544's work arounds do not play nicely with DEBUG SLAB */
-       if (adapter->hw.mac_type == e1000_82544)
-               netdev->features &= ~NETIF_F_TSO;
-#endif
-
 #ifdef NETIF_F_TSO6
        if (adapter->hw.mac_type > e1000_82547_rev_2)
                netdev->features |= NETIF_F_TSO6;
index 2f48fe9a29a7eb160e69ad908433d485dd383ee7..93f2b7a2216096dfb5be8462da913df2b3417bef 100644 (file)
@@ -234,6 +234,7 @@ enum {
 #define NVREG_XMITCTL_HOST_SEMA_MASK   0x0000f000
 #define NVREG_XMITCTL_HOST_SEMA_ACQ    0x0000f000
 #define NVREG_XMITCTL_HOST_LOADED      0x00004000
+#define NVREG_XMITCTL_TX_PATH_EN       0x01000000
        NvRegTransmitterStatus = 0x088,
 #define NVREG_XMITSTAT_BUSY    0x01
 
@@ -249,6 +250,7 @@ enum {
 #define NVREG_OFFLOAD_NORMAL   RX_NIC_BUFSIZE
        NvRegReceiverControl = 0x094,
 #define NVREG_RCVCTL_START     0x01
+#define NVREG_RCVCTL_RX_PATH_EN        0x01000000
        NvRegReceiverStatus = 0x98,
 #define NVREG_RCVSTAT_BUSY     0x01
 
@@ -1169,16 +1171,21 @@ static void nv_start_rx(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
+       u32 rx_ctrl = readl(base + NvRegReceiverControl);
 
        dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
        /* Already running? Stop it. */
-       if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
-               writel(0, base + NvRegReceiverControl);
+       if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
+               rx_ctrl &= ~NVREG_RCVCTL_START;
+               writel(rx_ctrl, base + NvRegReceiverControl);
                pci_push(base);
        }
        writel(np->linkspeed, base + NvRegLinkSpeed);
        pci_push(base);
-       writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
+        rx_ctrl |= NVREG_RCVCTL_START;
+        if (np->mac_in_use)
+               rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
+       writel(rx_ctrl, base + NvRegReceiverControl);
        dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
                                dev->name, np->duplex, np->linkspeed);
        pci_push(base);
@@ -1186,39 +1193,59 @@ static void nv_start_rx(struct net_device *dev)
 
 static void nv_stop_rx(struct net_device *dev)
 {
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
+       u32 rx_ctrl = readl(base + NvRegReceiverControl);
 
        dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
-       writel(0, base + NvRegReceiverControl);
+       if (!np->mac_in_use)
+               rx_ctrl &= ~NVREG_RCVCTL_START;
+       else
+               rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
+       writel(rx_ctrl, base + NvRegReceiverControl);
        reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
                        NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
                        KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
 
        udelay(NV_RXSTOP_DELAY2);
-       writel(0, base + NvRegLinkSpeed);
+       if (!np->mac_in_use)
+               writel(0, base + NvRegLinkSpeed);
 }
 
 static void nv_start_tx(struct net_device *dev)
 {
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
+       u32 tx_ctrl = readl(base + NvRegTransmitterControl);
 
        dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
-       writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
+       tx_ctrl |= NVREG_XMITCTL_START;
+       if (np->mac_in_use)
+               tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
+       writel(tx_ctrl, base + NvRegTransmitterControl);
        pci_push(base);
 }
 
 static void nv_stop_tx(struct net_device *dev)
 {
+       struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
+       u32 tx_ctrl = readl(base + NvRegTransmitterControl);
 
        dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
-       writel(0, base + NvRegTransmitterControl);
+       if (!np->mac_in_use)
+               tx_ctrl &= ~NVREG_XMITCTL_START;
+       else
+               tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
+       writel(tx_ctrl, base + NvRegTransmitterControl);
        reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
                        NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
                        KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
 
        udelay(NV_TXSTOP_DELAY2);
-       writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
+       if (!np->mac_in_use)
+               writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
+                      base + NvRegTransmitPoll);
 }
 
 static void nv_txrx_reset(struct net_device *dev)
@@ -4148,20 +4175,6 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
        return 0;
 }
 
-/* Indicate to mgmt unit whether driver is loaded or not */
-static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded)
-{
-       u8 __iomem *base = get_hwbase(dev);
-       u32 tx_ctrl;
-
-       tx_ctrl = readl(base + NvRegTransmitterControl);
-       if (loaded)
-               tx_ctrl |= NVREG_XMITCTL_HOST_LOADED;
-       else
-               tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED;
-       writel(tx_ctrl, base + NvRegTransmitterControl);
-}
-
 static int nv_open(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
@@ -4659,33 +4672,24 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
 
        if (id->driver_data & DEV_HAS_MGMT_UNIT) {
-               writel(0x1, base + 0x204); pci_push(base);
-               msleep(500);
                /* management unit running on the mac? */
-               np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
-               if (np->mac_in_use) {
-                       u32 mgmt_sync;
-                       /* management unit setup the phy already? */
-                       mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
-                       if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) {
-                               if (!nv_mgmt_acquire_sema(dev)) {
-                                       for (i = 0; i < 5000; i++) {
-                                               msleep(1);
-                                               mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK;
-                                               if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY)
-                                                       continue;
-                                               if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT)
-                                                       phyinitialized = 1;
-                                               break;
+               if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
+                       np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
+                       dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
+                       for (i = 0; i < 5000; i++) {
+                               msleep(1);
+                               if (nv_mgmt_acquire_sema(dev)) {
+                                       /* management unit setup the phy already? */
+                                       if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
+                                           NVREG_XMITCTL_SYNC_PHY_INIT) {
+                                               /* phy is inited by mgmt unit */
+                                               phyinitialized = 1;
+                                               dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
+                                       } else {
+                                               /* we need to init the phy */
                                        }
-                               } else {
-                                       /* we need to init the phy */
+                                       break;
                                }
-                       } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
-                               /* phy is inited by SMU */
-                               phyinitialized = 1;
-                       } else {
-                               /* we need to init the phy */
                        }
                }
        }
@@ -4724,10 +4728,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        if (!phyinitialized) {
                /* reset it */
                phy_init(dev);
-       }
-
-       if (id->driver_data & DEV_HAS_MGMT_UNIT) {
-               nv_mgmt_driver_loaded(dev, 1);
+       } else {
+               /* see if it is a gigabit phy */
+               u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+               if (mii_status & PHY_GIGABIT) {
+                       np->gigabit = PHY_GIGABIT;
+               }
        }
 
        /* set default link speed settings */
@@ -4749,8 +4755,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 out_error:
        if (phystate_orig)
                writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
-       if (np->mac_in_use)
-               nv_mgmt_driver_loaded(dev, 0);
        pci_set_drvdata(pci_dev, NULL);
 out_freering:
        free_rings(dev);
@@ -4780,9 +4784,6 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
        writel(np->orig_mac[0], base + NvRegMacAddrA);
        writel(np->orig_mac[1], base + NvRegMacAddrB);
 
-       if (np->mac_in_use)
-               nv_mgmt_driver_loaded(dev, 0);
-
        /* free all structures */
        free_rings(dev);
        iounmap(get_hwbase(dev));
index c26a4b8e552a52274832b551a45c2d47ca357da7..ca2b21f9d44489f4008761764f520f90a8956053 100644 (file)
@@ -154,8 +154,8 @@ static int ifb_xmit(struct sk_buff *skb, struct net_device *dev)
        int ret = 0;
        u32 from = G_TC_FROM(skb->tc_verd);
 
-       stats->tx_packets++;
-       stats->tx_bytes+=skb->len;
+       stats->rx_packets++;
+       stats->rx_bytes+=skb->len;
 
        if (!from || !skb->input_dev) {
 dropped:
index 50ffe90488ff4584c033618cb86e2e14859867fc..f4aba4355b19fc2e5deddbba263823b774ad5af2 100644 (file)
@@ -171,6 +171,7 @@ struct ixgb_adapter {
 
        /* TX */
        struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
+       unsigned int restart_queue;
        unsigned long timeo_start;
        uint32_t tx_cmd_type;
        uint64_t hw_csum_tx_good;
index cd22523fb03521535b2fd71c074e9984599d9278..82c044d6e08af339a01c047bf4e60279cc455b8e 100644 (file)
@@ -79,6 +79,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
        {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
        {"tx_deferred_ok", IXGB_STAT(stats.dc)},
        {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
+       {"tx_restart_queue", IXGB_STAT(restart_queue) },
        {"rx_long_length_errors", IXGB_STAT(stats.roc)},
        {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
 #ifdef NETIF_F_TSO
index 02089b64e42c55f6672cd178790c7ba6a9bb63f2..ecbf45861c6825c34c91adad1322c72d6137107d 100644 (file)
@@ -399,8 +399,9 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
        /* Zero out the other 15 receive addresses. */
        DEBUGOUT("Clearing RAR[1-15]\n");
        for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
-               IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+               /* Write high reg first to disable the AV bit first */
                IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+               IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
        }
 
        return;
index e628126c9c49178211032801e1da1a17a64af338..a083a9189230d8f0a3dda97f7b3eba485862f1d1 100644 (file)
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION            "1.0.117-k2"DRIVERNAPI
+#define DRV_VERSION            "1.0.126-k2"DRIVERNAPI
 char ixgb_driver_version[] = DRV_VERSION;
 static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
        struct ixgb_buffer *buffer_info;
        int len = skb->len;
        unsigned int offset = 0, size, count = 0, i;
+       unsigned int mss = skb_shinfo(skb)->gso_size;
 
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
@@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
        while(len) {
                buffer_info = &tx_ring->buffer_info[i];
                size = min(len, IXGB_MAX_DATA_PER_TXD);
+               /* Workaround for premature desc write-backs
+                * in TSO mode.  Append 4-byte sentinel desc */
+               if (unlikely(mss && !nr_frags && size == len && size > 8))
+                       size -= 4;
+
                buffer_info->length = size;
                WARN_ON(buffer_info->dma != 0);
                buffer_info->dma =
@@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
                while(len) {
                        buffer_info = &tx_ring->buffer_info[i];
                        size = min(len, IXGB_MAX_DATA_PER_TXD);
+
+                       /* Workaround for premature desc write-backs
+                        * in TSO mode.  Append 4-byte sentinel desc */
+                       if (unlikely(mss && !nr_frags && size == len
+                                    && size > 8))
+                               size -= 4;
+
                        buffer_info->length = size;
                        buffer_info->dma =
                                pci_map_page(adapter->pdev,
@@ -1398,11 +1411,43 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
        IXGB_WRITE_REG(&adapter->hw, TDT, i);
 }
 
+static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
+{
+       struct ixgb_adapter *adapter = netdev_priv(netdev);
+       struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+
+       netif_stop_queue(netdev);
+       /* Herbert's original patch had:
+        *  smp_mb__after_netif_stop_queue();
+        * but since that doesn't exist yet, just open code it. */
+       smp_mb();
+
+       /* We need to check again in a case another CPU has just
+        * made room available. */
+       if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
+               return -EBUSY;
+
+       /* A reprieve! */
+       netif_start_queue(netdev);
+       ++adapter->restart_queue;
+       return 0;
+}
+
+static int ixgb_maybe_stop_tx(struct net_device *netdev,
+                              struct ixgb_desc_ring *tx_ring, int size)
+{
+       if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __ixgb_maybe_stop_tx(netdev, size);
+}
+
+
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
                         (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
-       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
+#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
+       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
+       + 1 /* one more needed for sentinel TSO workaround */
 
 static int
 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1430,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        spin_lock_irqsave(&adapter->tx_lock, flags);
 #endif
 
-       if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
+       if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
+                     DESC_NEEDED))) {
                netif_stop_queue(netdev);
                spin_unlock_irqrestore(&adapter->tx_lock, flags);
                return NETDEV_TX_BUSY;
@@ -1468,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
 #ifdef NETIF_F_LLTX
        /* Make sure there is space in the ring for the next send. */
-       if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
-               netif_stop_queue(netdev);
+       ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
 
        spin_unlock_irqrestore(&adapter->tx_lock, flags);
 
index 82c10dec1b5ac9d739578aa4d4874573d4ad11c4..2b739fd584f1ff9042d9034548a5740f84c6aed0 100644 (file)
@@ -229,9 +229,11 @@ struct net_device loopback_dev = {
 };
 
 /* Setup and register the loopback device. */
-int __init loopback_init(void)
+static int __init loopback_init(void)
 {
        return register_netdev(&loopback_dev);
 };
 
+module_init(loopback_init);
+
 EXPORT_SYMBOL(loopback_dev);
index 2b1238e2dbdbe077ed41a045829fda5b13bfda58..d88e9b2e93cf919a66edff259be28b5e2214c402 100644 (file)
@@ -1617,6 +1617,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9),
        PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2),
        PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2),
+       PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88),
        PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04),
        PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d),
        PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814),
@@ -1667,6 +1668,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737),
        PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee),
        PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922),
+       PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c),
        PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0),
        PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578),
        PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307),
index d79d141a601d572ab255e12c8b5bacd3f4b8946b..8844c20eac2d0cee6d87367b5b28e23155fb3aa4 100644 (file)
@@ -208,6 +208,15 @@ static void ql_write_common_reg(struct ql3_adapter *qdev,
        return;
 }
 
+static void ql_write_nvram_reg(struct ql3_adapter *qdev,
+                               u32 __iomem *reg, u32 value)
+{
+       writel(value, reg);
+       readl(reg);
+       udelay(1);
+       return;
+}
+
 static void ql_write_page0_reg(struct ql3_adapter *qdev,
                               u32 __iomem *reg, u32 value)
 {
@@ -336,9 +345,9 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
                        qdev->mem_map_registers;
 
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
-       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
                            ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
-       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
                            ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
 }
 
@@ -355,14 +364,14 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
                        qdev->mem_map_registers;
 
        /* Clock in a zero, then do the start bit */
-       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
                            ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
                            AUBURN_EEPROM_DO_1);
-       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
                            ISP_NVRAM_MASK | qdev->
                            eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
                            AUBURN_EEPROM_CLK_RISE);
-       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
                            ISP_NVRAM_MASK | qdev->
                            eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
                            AUBURN_EEPROM_CLK_FALL);
@@ -378,20 +387,20 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
                         * If the bit changed, then change the DO state to
                         * match
                         */
-                       ql_write_common_reg(qdev,
+                       ql_write_nvram_reg(qdev,
                                            &port_regs->CommonRegs.
                                            serialPortInterfaceReg,
                                            ISP_NVRAM_MASK | qdev->
                                            eeprom_cmd_data | dataBit);
                        previousBit = dataBit;
                }
-               ql_write_common_reg(qdev,
+               ql_write_nvram_reg(qdev,
                                    &port_regs->CommonRegs.
                                    serialPortInterfaceReg,
                                    ISP_NVRAM_MASK | qdev->
                                    eeprom_cmd_data | dataBit |
                                    AUBURN_EEPROM_CLK_RISE);
-               ql_write_common_reg(qdev,
+               ql_write_nvram_reg(qdev,
                                    &port_regs->CommonRegs.
                                    serialPortInterfaceReg,
                                    ISP_NVRAM_MASK | qdev->
@@ -412,20 +421,20 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
                         * If the bit changed, then change the DO state to
                         * match
                         */
-                       ql_write_common_reg(qdev,
+                       ql_write_nvram_reg(qdev,
                                            &port_regs->CommonRegs.
                                            serialPortInterfaceReg,
                                            ISP_NVRAM_MASK | qdev->
                                            eeprom_cmd_data | dataBit);
                        previousBit = dataBit;
                }
-               ql_write_common_reg(qdev,
+               ql_write_nvram_reg(qdev,
                                    &port_regs->CommonRegs.
                                    serialPortInterfaceReg,
                                    ISP_NVRAM_MASK | qdev->
                                    eeprom_cmd_data | dataBit |
                                    AUBURN_EEPROM_CLK_RISE);
-               ql_write_common_reg(qdev,
+               ql_write_nvram_reg(qdev,
                                    &port_regs->CommonRegs.
                                    serialPortInterfaceReg,
                                    ISP_NVRAM_MASK | qdev->
@@ -443,7 +452,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
-       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
                            ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 }
 
@@ -461,12 +470,12 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
        /* Read the data bits */
        /* The first bit is a dummy.  Clock right over it. */
        for (i = 0; i < dataBits; i++) {
-               ql_write_common_reg(qdev,
+               ql_write_nvram_reg(qdev,
                                    &port_regs->CommonRegs.
                                    serialPortInterfaceReg,
                                    ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
                                    AUBURN_EEPROM_CLK_RISE);
-               ql_write_common_reg(qdev,
+               ql_write_nvram_reg(qdev,
                                    &port_regs->CommonRegs.
                                    serialPortInterfaceReg,
                                    ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
@@ -3370,7 +3379,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
        SET_MODULE_OWNER(ndev);
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
-       ndev->features = NETIF_F_LLTX;
        if (pci_using_dac)
                ndev->features |= NETIF_F_HIGHDMA;
 
index 785e4a535f9ed7543467001281327cc76eb46cb8..616be8d0fa854712445c668eb4f249cf89bebad6 100644 (file)
@@ -90,7 +90,8 @@
 
 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
                         SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
-                        SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
+                        SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
+                        SUPPORTED_Pause | SUPPORTED_Autoneg)
 
 #define DRV_NAME       "sungem"
 #define DRV_VERSION    "0.98"
index 49800b25907db7d9abd2f4672a456dac6399227c..d21991ee88c492fc5899ff449e038a8dc68f9d1d 100644 (file)
@@ -3,10 +3,9 @@
  *
  * This file could be shared with other drivers.
  *
- * (c) 2002, Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ * (c) 2002-2007, Benjamin Herrenscmidt (benh@kernel.crashing.org)
  *
  * TODO:
- *  - Implement WOL
  *  - Add support for PHYs that provide an IRQ line
  *  - Eventually moved the entire polling state machine in
  *    there (out of the eth driver), so that it can easily be
@@ -152,6 +151,44 @@ static int bcm5221_suspend(struct mii_phy* phy)
        return 0;
 }
 
+static int bcm5241_init(struct mii_phy* phy)
+{
+       u16 data;
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
+               data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
+               data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       return 0;
+}
+
+static int bcm5241_suspend(struct mii_phy* phy)
+{
+       u16 data;
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
+                 data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
+
+       return 0;
+}
+
 static int bcm5400_init(struct mii_phy* phy)
 {
        u16 data;
@@ -373,6 +410,10 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
                adv |= ADVERTISE_100HALF;
        if (advertise & ADVERTISED_100baseT_Full)
                adv |= ADVERTISE_100FULL;
+       if (advertise & ADVERTISED_Pause)
+               adv |= ADVERTISE_PAUSE_CAP;
+       if (advertise & ADVERTISED_Asym_Pause)
+               adv |= ADVERTISE_PAUSE_ASYM;
        phy_write(phy, MII_ADVERTISE, adv);
 
        /* Setup 1000BT advertise */
@@ -436,12 +477,15 @@ static int bcm54xx_read_link(struct mii_phy *phy)
                val = phy_read(phy, MII_BCM5400_AUXSTATUS);
                link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
                             MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
-               phy->duplex = phy_BCM5400_link_table[link_mode][0] ? DUPLEX_FULL : DUPLEX_HALF;
+               phy->duplex = phy_BCM5400_link_table[link_mode][0] ?
+                       DUPLEX_FULL : DUPLEX_HALF;
                phy->speed = phy_BCM5400_link_table[link_mode][2] ?
                                SPEED_1000 :
-                               (phy_BCM5400_link_table[link_mode][1] ? SPEED_100 : SPEED_10);
+                               (phy_BCM5400_link_table[link_mode][1] ?
+                                SPEED_100 : SPEED_10);
                val = phy_read(phy, MII_LPA);
-               phy->pause = ((val & LPA_PAUSE) != 0);
+               phy->pause = (phy->duplex == DUPLEX_FULL) &&
+                       ((val & LPA_PAUSE) != 0);
        }
        /* On non-aneg, we assume what we put in BMCR is the speed,
         * though magic-aneg shouldn't prevent this case from occurring
@@ -450,6 +494,28 @@ static int bcm54xx_read_link(struct mii_phy *phy)
        return 0;
 }
 
+static int marvell88e1111_init(struct mii_phy* phy)
+{
+       u16 rev;
+
+       /* magic init sequence for rev 0 */
+       rev = phy_read(phy, MII_PHYSID2) & 0x000f;
+       if (rev == 0) {
+               phy_write(phy, 0x1d, 0x000a);
+               phy_write(phy, 0x1e, 0x0821);
+
+               phy_write(phy, 0x1d, 0x0006);
+               phy_write(phy, 0x1e, 0x8600);
+
+               phy_write(phy, 0x1d, 0x000b);
+               phy_write(phy, 0x1e, 0x0100);
+
+               phy_write(phy, 0x1d, 0x0004);
+               phy_write(phy, 0x1e, 0x4850);
+       }
+       return 0;
+}
+
 static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
 {
        u16 ctl, adv;
@@ -471,6 +537,10 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
                adv |= ADVERTISE_100HALF;
        if (advertise & ADVERTISED_100baseT_Full)
                adv |= ADVERTISE_100FULL;
+       if (advertise & ADVERTISED_Pause)
+               adv |= ADVERTISE_PAUSE_CAP;
+       if (advertise & ADVERTISED_Asym_Pause)
+               adv |= ADVERTISE_PAUSE_ASYM;
        phy_write(phy, MII_ADVERTISE, adv);
 
        /* Setup 1000BT advertise & enable crossover detect
@@ -549,7 +619,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
 
 static int marvell_read_link(struct mii_phy *phy)
 {
-       u16 status;
+       u16 status, pmask;
 
        if (phy->autoneg) {
                status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS);
@@ -565,7 +635,9 @@ static int marvell_read_link(struct mii_phy *phy)
                        phy->duplex = DUPLEX_FULL;
                else
                        phy->duplex = DUPLEX_HALF;
-               phy->pause = 0; /* XXX Check against spec ! */
+               pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE |
+                       MII_M1011_PHY_SPEC_STATUS_RX_PAUSE;
+               phy->pause = (status & pmask) == pmask;
        }
        /* On non-aneg, we assume what we put in BMCR is the speed,
         * though magic-aneg shouldn't prevent this case from occurring
@@ -595,6 +667,10 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
                adv |= ADVERTISE_100HALF;
        if (advertise & ADVERTISED_100baseT_Full)
                adv |= ADVERTISE_100FULL;
+       if (advertise & ADVERTISED_Pause)
+               adv |= ADVERTISE_PAUSE_CAP;
+       if (advertise & ADVERTISED_Asym_Pause)
+               adv |= ADVERTISE_PAUSE_ASYM;
        phy_write(phy, MII_ADVERTISE, adv);
 
        /* Start/Restart aneg */
@@ -666,7 +742,8 @@ static int genmii_read_link(struct mii_phy *phy)
                        phy->speed = SPEED_100;
                else
                        phy->speed = SPEED_10;
-               phy->pause = 0;
+               phy->pause = (phy->duplex == DUPLEX_FULL) &&
+                       ((lpa & LPA_PAUSE) != 0);
        }
        /* On non-aneg, we assume what we put in BMCR is the speed,
         * though magic-aneg shouldn't prevent this case from occurring
@@ -676,11 +753,19 @@ static int genmii_read_link(struct mii_phy *phy)
 }
 
 
-#define MII_BASIC_FEATURES     (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
-                                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
-                                SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII)
-#define MII_GBIT_FEATURES      (MII_BASIC_FEATURES | \
-                                SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
+#define MII_BASIC_FEATURES \
+       (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |      \
+        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |    \
+        SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |     \
+        SUPPORTED_Pause)
+
+/* On gigabit capable PHYs, we advertise Pause support but not asym pause
+ * support for now as I'm not sure it's supported and Darwin doesn't do
+ * it neither. --BenH.
+ */
+#define MII_GBIT_FEATURES \
+       (MII_BASIC_FEATURES |   \
+        SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
 
 /* Broadcom BCM 5201 */
 static struct mii_phy_ops bcm5201_phy_ops = {
@@ -720,6 +805,24 @@ static struct mii_phy_def bcm5221_phy_def = {
        .ops            = &bcm5221_phy_ops
 };
 
+/* Broadcom BCM 5241 */
+static struct mii_phy_ops bcm5241_phy_ops = {
+       .suspend        = bcm5241_suspend,
+       .init           = bcm5241_init,
+       .setup_aneg     = genmii_setup_aneg,
+       .setup_forced   = genmii_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = genmii_read_link,
+};
+static struct mii_phy_def bcm5241_phy_def = {
+       .phy_id         = 0x0143bc30,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5241",
+       .features       = MII_BASIC_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5241_phy_ops
+};
+
 /* Broadcom BCM 5400 */
 static struct mii_phy_ops bcm5400_phy_ops = {
        .init           = bcm5400_init,
@@ -854,11 +957,8 @@ static struct mii_phy_def bcm5462V_phy_def = {
        .ops            = &bcm5462V_phy_ops
 };
 
-/* Marvell 88E1101 (Apple seem to deal with 2 different revs,
- * I masked out the 8 last bits to get both, but some specs
- * would be useful here) --BenH.
- */
-static struct mii_phy_ops marvell_phy_ops = {
+/* Marvell 88E1101 amd 88E1111 */
+static struct mii_phy_ops marvell88e1101_phy_ops = {
        .suspend        = generic_suspend,
        .setup_aneg     = marvell_setup_aneg,
        .setup_forced   = marvell_setup_forced,
@@ -866,13 +966,41 @@ static struct mii_phy_ops marvell_phy_ops = {
        .read_link      = marvell_read_link
 };
 
-static struct mii_phy_def marvell_phy_def = {
-       .phy_id         = 0x01410c00,
-       .phy_id_mask    = 0xffffff00,
-       .name           = "Marvell 88E1101",
+static struct mii_phy_ops marvell88e1111_phy_ops = {
+       .init           = marvell88e1111_init,
+       .suspend        = generic_suspend,
+       .setup_aneg     = marvell_setup_aneg,
+       .setup_forced   = marvell_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = marvell_read_link
+};
+
+/* two revs in darwin for the 88e1101 ... I could use a datasheet
+ * to get the proper names...
+ */
+static struct mii_phy_def marvell88e1101v1_phy_def = {
+       .phy_id         = 0x01410c20,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Marvell 88E1101v1",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &marvell88e1101_phy_ops
+};
+static struct mii_phy_def marvell88e1101v2_phy_def = {
+       .phy_id         = 0x01410c60,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Marvell 88E1101v2",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &marvell88e1101_phy_ops
+};
+static struct mii_phy_def marvell88e1111_phy_def = {
+       .phy_id         = 0x01410cc0,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Marvell 88E1111",
        .features       = MII_GBIT_FEATURES,
        .magic_aneg     = 1,
-       .ops            = &marvell_phy_ops
+       .ops            = &marvell88e1111_phy_ops
 };
 
 /* Generic implementation for most 10/100 PHYs */
@@ -895,6 +1023,7 @@ static struct mii_phy_def genmii_phy_def = {
 static struct mii_phy_def* mii_phy_table[] = {
        &bcm5201_phy_def,
        &bcm5221_phy_def,
+       &bcm5241_phy_def,
        &bcm5400_phy_def,
        &bcm5401_phy_def,
        &bcm5411_phy_def,
@@ -902,7 +1031,9 @@ static struct mii_phy_def* mii_phy_table[] = {
        &bcm5421k2_phy_def,
        &bcm5461_phy_def,
        &bcm5462V_phy_def,
-       &marvell_phy_def,
+       &marvell88e1101v1_phy_def,
+       &marvell88e1101v2_phy_def,
+       &marvell88e1111_phy_def,
        &genmii_phy_def,
        NULL
 };
index 8ee1ca0471cf9dda841fc3a1d3e313b02dfb19d3..1d70ba6f9f10e65648484d6f99a9bd63a7682848 100644 (file)
@@ -30,7 +30,7 @@ struct mii_phy_def
 struct mii_phy
 {
        struct mii_phy_def*     def;
-       int                     advertising;
+       u32                     advertising;
        int                     mii_id;
 
        /* 1: autoneg enabled, 0: disabled */
@@ -85,6 +85,9 @@ extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
 #define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE   0x0001
 #define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR   0x0004
 
+/* MII BCM5241 Additional registers */
+#define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008
+
 /* MII BCM5400 1000-BASET Control register */
 #define MII_BCM5400_GB_CONTROL                 0x09
 #define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP   0x0200
@@ -115,5 +118,7 @@ extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
 #define MII_M1011_PHY_SPEC_STATUS_SPD_MASK     0xc000
 #define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX   0x2000
 #define MII_M1011_PHY_SPEC_STATUS_RESOLVED     0x0800
+#define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE     0x0008
+#define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE     0x0004
 
 #endif /* __SUNGEM_PHY_H__ */
index 4056ba1ff3c7899f53c3dad2f1fef9439b5e5555..f4bf62c2a7a5dea42a175509d4aff8ff84e5b596 100644 (file)
@@ -68,8 +68,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.71"
-#define DRV_MODULE_RELDATE     "December 15, 2006"
+#define DRV_MODULE_VERSION     "3.72"
+#define DRV_MODULE_RELDATE     "January 8, 2007"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -1015,7 +1015,12 @@ out:
        else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
-               tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+               if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
+                       tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+                       tg3_writephy(tp, MII_TG3_TEST1,
+                                    MII_TG3_TEST1_TRIM_EN | 0x4);
+               } else
+                       tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
        }
        /* Set Extended packet length bit (bit 14) on all chips that */
@@ -10803,9 +10808,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
        if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
                        tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
-               else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
+                       if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
+                               tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
+               } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
                        tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
        }
 
index cf78a7e5997b1734ef6655896817c6d8a7d3cb1a..80f59ac7ec58b3fd8ff7c7783bf2ee5e57da3038 100644 (file)
 #define MII_TG3_EPHY_TEST              0x1f /* 5906 PHY register */
 #define MII_TG3_EPHY_SHADOW_EN         0x80
 
+#define MII_TG3_TEST1                  0x1e
+#define MII_TG3_TEST1_TRIM_EN          0x0010
+
 /* There are two ways to manage the TX descriptors on the tigon3.
  * Either the descriptors are in host DMA'able memory, or they
  * exist only in the cards on-chip SRAM.  All 16 send bds are under
@@ -2256,6 +2259,7 @@ struct tg3 {
 #define TG3_FLG2_1SHOT_MSI             0x10000000
 #define TG3_FLG2_PHY_JITTER_BUG                0x20000000
 #define TG3_FLG2_NO_FWARE_REPORTED     0x40000000
+#define TG3_FLG2_PHY_ADJUST_TRIM       0x80000000
 
        u32                             split_mode_max_reqs;
 #define SPLIT_MODE_5704_MAX_REQ                3
index 0e94fbbf7a941801eb1bbf2051323fe15f793eb4..b85857a848706fcde3c25d251fac7d280aea9201 100644 (file)
@@ -2664,7 +2664,7 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
                                break;
                        }
 #endif
-                       if (stats.len < sizeof(u->rx_data.header))
+                       if (stats.len < sizeof(struct ieee80211_hdr_3addr))
                                break;
                        switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) {
                        case IEEE80211_FTYPE_MGMT:
index f1dd81a1d592eabe12865903211f6d2e5e0b9c95..3cfb0a3575e660938abc6070c9048563ddd7c287 100644 (file)
@@ -19,7 +19,7 @@ config PCI_MSI
 
 config PCI_MULTITHREAD_PROBE
        bool "PCI Multi-threaded probe (EXPERIMENTAL)"
-       depends on PCI && EXPERIMENTAL
+       depends on PCI && EXPERIMENTAL && BROKEN
        help
          Say Y here if you want the PCI core to spawn a new thread for
          every PCI device that is probed.  This can cause a huge
index 8f0322d6f3bf473fd60094073bbd353ed834eaa3..0535efc4f184ac26bcc7cd04447b142fb8db60db 100644 (file)
@@ -1117,10 +1117,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ICH6_1,     asus_h
 static void quirk_sis_96x_smbus(struct pci_dev *dev)
 {
        u8 val = 0;
-       printk(KERN_INFO "Enabling SiS 96x SMBus.\n");
-       pci_read_config_byte(dev, 0x77, &val);
-       pci_write_config_byte(dev, 0x77, val & ~0x10);
        pci_read_config_byte(dev, 0x77, &val);
+       if (val & 0x10) {
+               printk(KERN_INFO "Enabling SiS 96x SMBus.\n");
+               pci_write_config_byte(dev, 0x77, val & ~0x10);
+       }
 }
 
 /*
@@ -1152,11 +1153,12 @@ static void quirk_sis_503(struct pci_dev *dev)
        printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible);
 
        /*
-        * Ok, it now shows up as a 96x.. The 96x quirks are after
-        * the 503 quirk in the quirk table, so they'll automatically
-        * run and enable things like the SMBus device
+        * Ok, it now shows up as a 96x.. run the 96x quirk by
+        * hand in case it has already been processed.
+        * (depends on link order, which is apparently not guaranteed)
         */
        dev->device = devid;
+       quirk_sis_96x_smbus(dev);
 }
 
 static void __init quirk_sis_96x_compatible(struct pci_dev *dev)
index 45f2b20ef513d39612dcfd64cc838a65d85973d9..fab381ed853c9d20726671b99ec1199392e50ce7 100644 (file)
@@ -193,6 +193,18 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor,
        struct pci_dev *dev;
 
        WARN_ON(in_interrupt());
+
+       /*
+        * pci_find_subsys() can be called on the ide_setup() path, super-early
+        * in boot.  But the down_read() will enable local interrupts, which
+        * can cause some machines to crash.  So here we detect and flag that
+        * situation and bail out early.
+        */
+       if (unlikely(list_empty(&pci_devices))) {
+               printk(KERN_INFO "pci_find_subsys() called while pci_devices "
+                               "is still empty\n");
+               return NULL;
+       }
        down_read(&pci_bus_sem);
        n = from ? from->global_list.next : pci_devices.next;
 
@@ -259,6 +271,18 @@ pci_get_subsys(unsigned int vendor, unsigned int device,
        struct pci_dev *dev;
 
        WARN_ON(in_interrupt());
+
+       /*
+        * pci_get_subsys() can potentially be called by drivers super-early
+        * in boot.  But the down_read() will enable local interrupts, which
+        * can cause some machines to crash.  So here we detect and flag that
+        * situation and bail out early.
+        */
+       if (unlikely(list_empty(&pci_devices))) {
+               printk(KERN_NOTICE "pci_get_subsys() called while pci_devices "
+                               "is still empty\n");
+               return NULL;
+       }
        down_read(&pci_bus_sem);
        n = from ? from->global_list.next : pci_devices.next;
 
index 4f654c901c64d8c37278970576d13cdd829de904..a724ab49a7973bfd474681b2d1d0680d540b27e5 100644 (file)
@@ -33,6 +33,8 @@
 
 #include <asm/mach/time.h>
 
+#include <asm/arch/at91_rtc.h>
+
 
 #define AT91_RTC_FREQ          1
 #define AT91_RTC_EPOCH         1900UL  /* just like arch/arm/common/rtctime.c */
index 1460f6b769f2e92975206485bedc9da19be2625e..e7851e3739abd11030cdf0a724da2436315164dd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * An I2C driver for the Ricoh RS5C372 RTC
+ * An I2C driver for Ricoh RS5C372 and RV5C38[67] RTCs
  *
  * Copyright (C) 2005 Pavel Mironchik <pmironchik@optifacio.net>
  * Copyright (C) 2006 Tower Technologies
@@ -13,7 +13,7 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 
-#define DRV_VERSION "0.3"
+#define DRV_VERSION "0.4"
 
 /* Addresses to scan */
 static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
@@ -21,6 +21,13 @@ static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
 /* Insmod parameters */
 I2C_CLIENT_INSMOD;
 
+
+/*
+ * Ricoh has a family of I2C based RTCs, which differ only slightly from
+ * each other.  Differences center on pinout (e.g. how many interrupts,
+ * output clock, etc) and how the control registers are used.  The '372
+ * is significant only because that's the one this driver first supported.
+ */
 #define RS5C372_REG_SECS       0
 #define RS5C372_REG_MINS       1
 #define RS5C372_REG_HOURS      2
@@ -29,59 +36,142 @@ I2C_CLIENT_INSMOD;
 #define RS5C372_REG_MONTH      5
 #define RS5C372_REG_YEAR       6
 #define RS5C372_REG_TRIM       7
+#      define RS5C372_TRIM_XSL         0x80
+#      define RS5C372_TRIM_MASK        0x7F
+
+#define RS5C_REG_ALARM_A_MIN   8                       /* or ALARM_W */
+#define RS5C_REG_ALARM_A_HOURS 9
+#define RS5C_REG_ALARM_A_WDAY  10
+
+#define RS5C_REG_ALARM_B_MIN   11                      /* or ALARM_D */
+#define RS5C_REG_ALARM_B_HOURS 12
+#define RS5C_REG_ALARM_B_WDAY  13                      /* (ALARM_B only) */
+
+#define RS5C_REG_CTRL1         14
+#      define RS5C_CTRL1_AALE          (1 << 7)        /* or WALE */
+#      define RS5C_CTRL1_BALE          (1 << 6)        /* or DALE */
+#      define RV5C387_CTRL1_24         (1 << 5)
+#      define RS5C372A_CTRL1_SL1       (1 << 5)
+#      define RS5C_CTRL1_CT_MASK       (7 << 0)
+#      define RS5C_CTRL1_CT0           (0 << 0)        /* no periodic irq */
+#      define RS5C_CTRL1_CT4           (4 << 0)        /* 1 Hz level irq */
+#define RS5C_REG_CTRL2         15
+#      define RS5C372_CTRL2_24         (1 << 5)
+#      define RS5C_CTRL2_XSTP          (1 << 4)
+#      define RS5C_CTRL2_CTFG          (1 << 2)
+#      define RS5C_CTRL2_AAFG          (1 << 1)        /* or WAFG */
+#      define RS5C_CTRL2_BAFG          (1 << 0)        /* or DAFG */
+
+
+/* to read (style 1) or write registers starting at R */
+#define RS5C_ADDR(R)           (((R) << 4) | 0)
+
+
+enum rtc_type {
+       rtc_undef = 0,
+       rtc_rs5c372a,
+       rtc_rs5c372b,
+       rtc_rv5c386,
+       rtc_rv5c387a,
+};
 
-#define RS5C372_TRIM_XSL       0x80
-#define RS5C372_TRIM_MASK      0x7F
+/* REVISIT:  this assumes that:
+ *  - we're in the 21st century, so it's safe to ignore the century
+ *    bit for rv5c38[67] (REG_MONTH bit 7);
+ *  - we should use ALARM_A not ALARM_B (may be wrong on some boards)
+ */
+struct rs5c372 {
+       struct i2c_client       *client;
+       struct rtc_device       *rtc;
+       enum rtc_type           type;
+       unsigned                time24:1;
+       unsigned                has_irq:1;
+       char                    buf[17];
+       char                    *regs;
+
+       /* on conversion to a "new style" i2c driver, this vanishes */
+       struct i2c_client       dev;
+};
 
-#define RS5C372_REG_BASE       0
+static int rs5c_get_regs(struct rs5c372 *rs5c)
+{
+       struct i2c_client       *client = rs5c->client;
+       struct i2c_msg          msgs[] = {
+               { client->addr, I2C_M_RD, sizeof rs5c->buf, rs5c->buf },
+       };
+
+       /* This implements the third reading method from the datasheet, using
+        * an internal address that's reset after each transaction (by STOP)
+        * to 0x0f ... so we read extra registers, and skip the first one.
+        *
+        * The first method doesn't work with the iop3xx adapter driver, on at
+        * least 80219 chips; this works around that bug.
+        */
+       if ((i2c_transfer(client->adapter, msgs, 1)) != 1) {
+               pr_debug("%s: can't read registers\n", rs5c->rtc->name);
+               return -EIO;
+       }
 
-static int rs5c372_attach(struct i2c_adapter *adapter);
-static int rs5c372_detach(struct i2c_client *client);
-static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind);
+       dev_dbg(&client->dev,
+               "%02x %02x %02x (%02x) %02x %02x %02x (%02x), "
+               "%02x %02x %02x, %02x %02x %02x; %02x %02x\n",
+               rs5c->regs[0],  rs5c->regs[1],  rs5c->regs[2],  rs5c->regs[3],
+               rs5c->regs[4],  rs5c->regs[5],  rs5c->regs[6],  rs5c->regs[7],
+               rs5c->regs[8],  rs5c->regs[9],  rs5c->regs[10], rs5c->regs[11],
+               rs5c->regs[12], rs5c->regs[13], rs5c->regs[14], rs5c->regs[15]);
 
-struct rs5c372 {
-       u8 reg_addr;
-       u8 regs[17];
-       struct i2c_msg msg[1];
-       struct i2c_client client;
-       struct rtc_device *rtc;
-};
+       return 0;
+}
 
-static struct i2c_driver rs5c372_driver = {
-       .driver         = {
-               .name   = "rs5c372",
-       },
-       .attach_adapter = &rs5c372_attach,
-       .detach_client  = &rs5c372_detach,
-};
+static unsigned rs5c_reg2hr(struct rs5c372 *rs5c, unsigned reg)
+{
+       unsigned        hour;
 
-static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+       if (rs5c->time24)
+               return BCD2BIN(reg & 0x3f);
+
+       hour = BCD2BIN(reg & 0x1f);
+       if (hour == 12)
+               hour = 0;
+       if (reg & 0x20)
+               hour += 12;
+       return hour;
+}
+
+static unsigned rs5c_hr2reg(struct rs5c372 *rs5c, unsigned hour)
 {
+       if (rs5c->time24)
+               return BIN2BCD(hour);
+
+       if (hour > 12)
+               return 0x20 | BIN2BCD(hour - 12);
+       if (hour == 12)
+               return 0x20 | BIN2BCD(12);
+       if (hour == 0)
+               return BIN2BCD(12);
+       return BIN2BCD(hour);
+}
 
-       struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
-       u8 *buf = &(rs5c372->regs[1]);
+static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+       struct rs5c372  *rs5c = i2c_get_clientdata(client);
+       int             status = rs5c_get_regs(rs5c);
 
-       /* this implements the 3rd reading method, according
-        * to the datasheet. rs5c372 defaults to internal
-        * address 0xF, so 0x0 is in regs[1]
-        */
+       if (status < 0)
+               return status;
 
-       if ((i2c_transfer(client->adapter, rs5c372->msg, 1)) != 1) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
-               return -EIO;
-       }
+       tm->tm_sec = BCD2BIN(rs5c->regs[RS5C372_REG_SECS] & 0x7f);
+       tm->tm_min = BCD2BIN(rs5c->regs[RS5C372_REG_MINS] & 0x7f);
+       tm->tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C372_REG_HOURS]);
 
-       tm->tm_sec = BCD2BIN(buf[RS5C372_REG_SECS] & 0x7f);
-       tm->tm_min = BCD2BIN(buf[RS5C372_REG_MINS] & 0x7f);
-       tm->tm_hour = BCD2BIN(buf[RS5C372_REG_HOURS] & 0x3f);
-       tm->tm_wday = BCD2BIN(buf[RS5C372_REG_WDAY] & 0x07);
-       tm->tm_mday = BCD2BIN(buf[RS5C372_REG_DAY] & 0x3f);
+       tm->tm_wday = BCD2BIN(rs5c->regs[RS5C372_REG_WDAY] & 0x07);
+       tm->tm_mday = BCD2BIN(rs5c->regs[RS5C372_REG_DAY] & 0x3f);
 
        /* tm->tm_mon is zero-based */
-       tm->tm_mon = BCD2BIN(buf[RS5C372_REG_MONTH] & 0x1f) - 1;
+       tm->tm_mon = BCD2BIN(rs5c->regs[RS5C372_REG_MONTH] & 0x1f) - 1;
 
        /* year is 1900 + tm->tm_year */
-       tm->tm_year = BCD2BIN(buf[RS5C372_REG_YEAR]) + 100;
+       tm->tm_year = BCD2BIN(rs5c->regs[RS5C372_REG_YEAR]) + 100;
 
        dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -89,22 +179,25 @@ static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
-       return 0;
+       /* rtc might need initialization */
+       return rtc_valid_tm(tm);
 }
 
 static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
-       unsigned char buf[8] = { RS5C372_REG_BASE };
+       struct rs5c372  *rs5c = i2c_get_clientdata(client);
+       unsigned char   buf[8];
 
-       dev_dbg(&client->dev,
-               "%s: secs=%d, mins=%d, hours=%d "
+       dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour,
+               __FUNCTION__,
+               tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
+       buf[0] = RS5C_ADDR(RS5C372_REG_SECS);
        buf[1] = BIN2BCD(tm->tm_sec);
        buf[2] = BIN2BCD(tm->tm_min);
-       buf[3] = BIN2BCD(tm->tm_hour);
+       buf[3] = rs5c_hr2reg(rs5c, tm->tm_hour);
        buf[4] = BIN2BCD(tm->tm_wday);
        buf[5] = BIN2BCD(tm->tm_mday);
        buf[6] = BIN2BCD(tm->tm_mon + 1);
@@ -118,21 +211,43 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
        return 0;
 }
 
+#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+#define        NEED_TRIM
+#endif
+
+#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+#define        NEED_TRIM
+#endif
+
+#ifdef NEED_TRIM
 static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
 {
        struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
-       u8 tmp = rs5c372->regs[RS5C372_REG_TRIM + 1];
+       u8 tmp = rs5c372->regs[RS5C372_REG_TRIM];
 
        if (osc)
                *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
 
        if (trim) {
-               *trim = tmp & RS5C372_TRIM_MASK;
-               dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, *trim);
+               dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, tmp);
+               tmp &= RS5C372_TRIM_MASK;
+               if (tmp & 0x3e) {
+                       int t = tmp & 0x3f;
+
+                       if (tmp & 0x40)
+                               t = (~t | (s8)0xc0) + 1;
+                       else
+                               t = t - 1;
+
+                       tmp = t * 2;
+               } else
+                       tmp = 0;
+               *trim = tmp;
        }
 
        return 0;
 }
+#endif
 
 static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
@@ -144,25 +259,190 @@ static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm)
        return rs5c372_set_datetime(to_i2c_client(dev), tm);
 }
 
+#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
+
+static int
+rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+       struct i2c_client       *client = to_i2c_client(dev);
+       struct rs5c372          *rs5c = i2c_get_clientdata(client);
+       unsigned char           buf[2];
+       int                     status;
+
+       buf[1] = rs5c->regs[RS5C_REG_CTRL1];
+       switch (cmd) {
+       case RTC_UIE_OFF:
+       case RTC_UIE_ON:
+               /* some 327a modes use a different IRQ pin for 1Hz irqs */
+               if (rs5c->type == rtc_rs5c372a
+                               && (buf[1] & RS5C372A_CTRL1_SL1))
+                       return -ENOIOCTLCMD;
+       case RTC_AIE_OFF:
+       case RTC_AIE_ON:
+               /* these irq management calls only make sense for chips
+                * which are wired up to an IRQ.
+                */
+               if (!rs5c->has_irq)
+                       return -ENOIOCTLCMD;
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+
+       status = rs5c_get_regs(rs5c);
+       if (status < 0)
+               return status;
+
+       buf[0] = RS5C_ADDR(RS5C_REG_CTRL1);
+       switch (cmd) {
+       case RTC_AIE_OFF:       /* alarm off */
+               buf[1] &= ~RS5C_CTRL1_AALE;
+               break;
+       case RTC_AIE_ON:        /* alarm on */
+               buf[1] |= RS5C_CTRL1_AALE;
+               break;
+       case RTC_UIE_OFF:       /* update off */
+               buf[1] &= ~RS5C_CTRL1_CT_MASK;
+               break;
+       case RTC_UIE_ON:        /* update on */
+               buf[1] &= ~RS5C_CTRL1_CT_MASK;
+               buf[1] |= RS5C_CTRL1_CT4;
+               break;
+       }
+       if ((i2c_master_send(client, buf, 2)) != 2) {
+               printk(KERN_WARNING "%s: can't update alarm\n",
+                       rs5c->rtc->name);
+               status = -EIO;
+       } else
+               rs5c->regs[RS5C_REG_CTRL1] = buf[1];
+       return status;
+}
+
+#else
+#define        rs5c_rtc_ioctl  NULL
+#endif
+
+
+/* NOTE:  Since RTC_WKALM_{RD,SET} were originally defined for EFI,
+ * which only exposes a polled programming interface; and since
+ * these calls map directly to those EFI requests; we don't demand
+ * we have an IRQ for this chip when we go through this API.
+ *
+ * The older x86_pc derived RTC_ALM_{READ,SET} calls require irqs
+ * though, managed through RTC_AIE_{ON,OFF} requests.
+ */
+
+static int rs5c_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+       struct i2c_client       *client = to_i2c_client(dev);
+       struct rs5c372          *rs5c = i2c_get_clientdata(client);
+       int                     status;
+
+       status = rs5c_get_regs(rs5c);
+       if (status < 0)
+               return status;
+
+       /* report alarm time */
+       t->time.tm_sec = 0;
+       t->time.tm_min = BCD2BIN(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
+       t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]);
+       t->time.tm_mday = -1;
+       t->time.tm_mon = -1;
+       t->time.tm_year = -1;
+       t->time.tm_wday = -1;
+       t->time.tm_yday = -1;
+       t->time.tm_isdst = -1;
+
+       /* ... and status */
+       t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE);
+       t->pending = !!(rs5c->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_AAFG);
+
+       return 0;
+}
+
+static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+       struct i2c_client       *client = to_i2c_client(dev);
+       struct rs5c372          *rs5c = i2c_get_clientdata(client);
+       int                     status;
+       unsigned char           buf[4];
+
+       /* only handle up to 24 hours in the future, like RTC_ALM_SET */
+       if (t->time.tm_mday != -1
+                       || t->time.tm_mon != -1
+                       || t->time.tm_year != -1)
+               return -EINVAL;
+
+       /* REVISIT: round up tm_sec */
+
+       /* if needed, disable irq (clears pending status) */
+       status = rs5c_get_regs(rs5c);
+       if (status < 0)
+               return status;
+       if (rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE) {
+               buf[0] = RS5C_ADDR(RS5C_REG_CTRL1);
+               buf[1] = rs5c->regs[RS5C_REG_CTRL1] & ~RS5C_CTRL1_AALE;
+               if (i2c_master_send(client, buf, 2) != 2) {
+                       pr_debug("%s: can't disable alarm\n", rs5c->rtc->name);
+                       return -EIO;
+               }
+               rs5c->regs[RS5C_REG_CTRL1] = buf[1];
+       }
+
+       /* set alarm */
+       buf[0] = RS5C_ADDR(RS5C_REG_ALARM_A_MIN);
+       buf[1] = BIN2BCD(t->time.tm_min);
+       buf[2] = rs5c_hr2reg(rs5c, t->time.tm_hour);
+       buf[3] = 0x7f;  /* any/all days */
+       if ((i2c_master_send(client, buf, 4)) != 4) {
+               pr_debug("%s: can't set alarm time\n", rs5c->rtc->name);
+               return -EIO;
+       }
+
+       /* ... and maybe enable its irq */
+       if (t->enabled) {
+               buf[0] = RS5C_ADDR(RS5C_REG_CTRL1);
+               buf[1] = rs5c->regs[RS5C_REG_CTRL1] | RS5C_CTRL1_AALE;
+               if ((i2c_master_send(client, buf, 2)) != 2)
+                       printk(KERN_WARNING "%s: can't enable alarm\n",
+                               rs5c->rtc->name);
+               rs5c->regs[RS5C_REG_CTRL1] = buf[1];
+       }
+
+       return 0;
+}
+
+#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+
 static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
 {
        int err, osc, trim;
 
        err = rs5c372_get_trim(to_i2c_client(dev), &osc, &trim);
        if (err == 0) {
-               seq_printf(seq, "%d.%03d KHz\n", osc / 1000, osc % 1000);
-               seq_printf(seq, "trim\t: %d\n", trim);
+               seq_printf(seq, "crystal\t\t: %d.%03d KHz\n",
+                               osc / 1000, osc % 1000);
+               seq_printf(seq, "trim\t\t: %d\n", trim);
        }
 
        return 0;
 }
 
+#else
+#define        rs5c372_rtc_proc        NULL
+#endif
+
 static const struct rtc_class_ops rs5c372_rtc_ops = {
        .proc           = rs5c372_rtc_proc,
+       .ioctl          = rs5c_rtc_ioctl,
        .read_time      = rs5c372_rtc_read_time,
        .set_time       = rs5c372_rtc_set_time,
+       .read_alarm     = rs5c_read_alarm,
+       .set_alarm      = rs5c_set_alarm,
 };
 
+#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+
 static ssize_t rs5c372_sysfs_show_trim(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
@@ -172,7 +452,7 @@ static ssize_t rs5c372_sysfs_show_trim(struct device *dev,
        if (err)
                return err;
 
-       return sprintf(buf, "0x%2x\n", trim);
+       return sprintf(buf, "%d\n", trim);
 }
 static DEVICE_ATTR(trim, S_IRUGO, rs5c372_sysfs_show_trim, NULL);
 
@@ -189,16 +469,35 @@ static ssize_t rs5c372_sysfs_show_osc(struct device *dev,
 }
 static DEVICE_ATTR(osc, S_IRUGO, rs5c372_sysfs_show_osc, NULL);
 
-static int rs5c372_attach(struct i2c_adapter *adapter)
+static int rs5c_sysfs_register(struct device *dev)
 {
-       return i2c_probe(adapter, &addr_data, rs5c372_probe);
+       int err;
+
+       err = device_create_file(dev, &dev_attr_trim);
+       if (err)
+               return err;
+       err = device_create_file(dev, &dev_attr_osc);
+       if (err)
+               device_remove_file(dev, &dev_attr_trim);
+
+       return err;
+}
+
+#else
+static int rs5c_sysfs_register(struct device *dev)
+{
+       return 0;
 }
+#endif /* SYSFS */
+
+static struct i2c_driver rs5c372_driver;
 
 static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
 {
        int err = 0;
        struct i2c_client *client;
        struct rs5c372 *rs5c372;
+       struct rtc_time tm;
 
        dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__);
 
@@ -211,7 +510,15 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
                err = -ENOMEM;
                goto exit;
        }
-       client = &rs5c372->client;
+
+       /* we read registers 0x0f then 0x00-0x0f; skip the first one */
+       rs5c372->regs=&rs5c372->buf[1];
+
+       /* On conversion to a "new style" i2c driver, we'll be handed
+        * the i2c_client (we won't create it)
+        */
+       client = &rs5c372->dev;
+       rs5c372->client = client;
 
        /* I2C client */
        client->addr = address;
@@ -222,16 +529,99 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
 
        i2c_set_clientdata(client, rs5c372);
 
-       rs5c372->msg[0].addr = address;
-       rs5c372->msg[0].flags = I2C_M_RD;
-       rs5c372->msg[0].len = sizeof(rs5c372->regs);
-       rs5c372->msg[0].buf = rs5c372->regs;
-
        /* Inform the i2c layer */
        if ((err = i2c_attach_client(client)))
                goto exit_kfree;
 
-       dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+       err = rs5c_get_regs(rs5c372);
+       if (err < 0)
+               goto exit_detach;
+
+       /* For "new style" drivers, irq is in i2c_client and chip type
+        * info comes from i2c_client.dev.platform_data.  Meanwhile:
+        *
+        * STICK BOARD-SPECIFIC SETUP CODE RIGHT HERE
+        */
+       if (rs5c372->type == rtc_undef) {
+               rs5c372->type = rtc_rs5c372b;
+               dev_warn(&client->dev, "assuming rs5c372b\n");
+       }
+
+       /* clock may be set for am/pm or 24 hr time */
+       switch (rs5c372->type) {
+       case rtc_rs5c372a:
+       case rtc_rs5c372b:
+               /* alarm uses ALARM_A; and nINTRA on 372a, nINTR on 372b.
+                * so does periodic irq, except some 327a modes.
+                */
+               if (rs5c372->regs[RS5C_REG_CTRL2] & RS5C372_CTRL2_24)
+                       rs5c372->time24 = 1;
+               break;
+       case rtc_rv5c386:
+       case rtc_rv5c387a:
+               if (rs5c372->regs[RS5C_REG_CTRL1] & RV5C387_CTRL1_24)
+                       rs5c372->time24 = 1;
+               /* alarm uses ALARM_W; and nINTRB for alarm and periodic
+                * irq, on both 386 and 387
+                */
+               break;
+       default:
+               dev_err(&client->dev, "unknown RTC type\n");
+               goto exit_detach;
+       }
+
+       /* if the oscillator lost power and no other software (like
+        * the bootloader) set it up, do it here.
+        */
+       if (rs5c372->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_XSTP) {
+               unsigned char buf[3];
+
+               rs5c372->regs[RS5C_REG_CTRL2] &= ~RS5C_CTRL2_XSTP;
+
+               buf[0] = RS5C_ADDR(RS5C_REG_CTRL1);
+               buf[1] = rs5c372->regs[RS5C_REG_CTRL1];
+               buf[2] = rs5c372->regs[RS5C_REG_CTRL2];
+
+               /* use 24hr mode */
+               switch (rs5c372->type) {
+               case rtc_rs5c372a:
+               case rtc_rs5c372b:
+                       buf[2] |= RS5C372_CTRL2_24;
+                       rs5c372->time24 = 1;
+                       break;
+               case rtc_rv5c386:
+               case rtc_rv5c387a:
+                       buf[1] |= RV5C387_CTRL1_24;
+                       rs5c372->time24 = 1;
+                       break;
+               default:
+                       /* impossible */
+                       break;
+               }
+
+               if ((i2c_master_send(client, buf, 3)) != 3) {
+                       dev_err(&client->dev, "setup error\n");
+                       goto exit_detach;
+               }
+               rs5c372->regs[RS5C_REG_CTRL1] = buf[1];
+               rs5c372->regs[RS5C_REG_CTRL2] = buf[2];
+       }
+
+       if (rs5c372_get_datetime(client, &tm) < 0)
+               dev_warn(&client->dev, "clock needs to be set\n");
+
+       dev_info(&client->dev, "%s found, %s, driver version " DRV_VERSION "\n",
+                       ({ char *s; switch (rs5c372->type) {
+                       case rtc_rs5c372a:      s = "rs5c372a"; break;
+                       case rtc_rs5c372b:      s = "rs5c372b"; break;
+                       case rtc_rv5c386:       s = "rv5c386"; break;
+                       case rtc_rv5c387a:      s = "rv5c387a"; break;
+                       default:                s = "chip"; break;
+                       }; s;}),
+                       rs5c372->time24 ? "24hr" : "am/pm"
+                       );
+
+       /* FIXME when client->irq exists, use it to register alarm irq */
 
        rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
                                &client->dev, &rs5c372_rtc_ops, THIS_MODULE);
@@ -241,18 +631,12 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
                goto exit_detach;
        }
 
-       err = device_create_file(&client->dev, &dev_attr_trim);
+       err = rs5c_sysfs_register(&client->dev);
        if (err)
                goto exit_devreg;
-       err = device_create_file(&client->dev, &dev_attr_osc);
-       if (err)
-               goto exit_trim;
 
        return 0;
 
-exit_trim:
-       device_remove_file(&client->dev, &dev_attr_trim);
-
 exit_devreg:
        rtc_device_unregister(rs5c372->rtc);
 
@@ -266,6 +650,11 @@ exit:
        return err;
 }
 
+static int rs5c372_attach(struct i2c_adapter *adapter)
+{
+       return i2c_probe(adapter, &addr_data, rs5c372_probe);
+}
+
 static int rs5c372_detach(struct i2c_client *client)
 {
        int err;
@@ -274,6 +663,8 @@ static int rs5c372_detach(struct i2c_client *client)
        if (rs5c372->rtc)
                rtc_device_unregister(rs5c372->rtc);
 
+       /* REVISIT properly destroy the sysfs files ... */
+
        if ((err = i2c_detach_client(client)))
                return err;
 
@@ -281,6 +672,14 @@ static int rs5c372_detach(struct i2c_client *client)
        return 0;
 }
 
+static struct i2c_driver rs5c372_driver = {
+       .driver         = {
+               .name   = "rtc-rs5c372",
+       },
+       .attach_adapter = &rs5c372_attach,
+       .detach_client  = &rs5c372_detach,
+};
+
 static __init int rs5c372_init(void)
 {
        return i2c_add_driver(&rs5c372_driver);
index 1678b6c757ec717d40d298c32ebe3cdceb669cd8..a420cd099041b1f660d1d5fee056bf501b911096 100644 (file)
@@ -117,7 +117,7 @@ vmcp_write(struct file *file, const char __user * buff, size_t count,
                return -ENOMEM;
        }
        debug_text_event(vmcp_debug, 1, cmd);
-       session->resp_size = __cpcmd(cmd, session->response,
+       session->resp_size = cpcmd(cmd, session->response,
                                     session->bufsize,
                                     &session->resp_code);
        up(&session->mutex);
index b471ac4a1bf66a1a50b4a33a1f8ca5619648a1ae..ae1bf231d08945bcad13903266f44ad1a3a00be6 100644 (file)
@@ -880,19 +880,15 @@ static void cio_reset_pgm_check_handler(void)
 static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
 {
        int rc;
-       register struct subchannel_id reg1 asm ("1") = schid;
 
        pgm_check_occured = 0;
        s390_reset_pgm_handler = cio_reset_pgm_check_handler;
+       rc = stsch(schid, addr);
+       s390_reset_pgm_handler = NULL;
 
-       asm volatile(
-               "       stsch   0(%2)\n"
-               "       ipm     %0\n"
-               "       srl     %0,28"
-               : "=d" (rc)
-               : "d" (reg1), "a" (addr), "m" (*addr) : "memory", "cc");
+       /* The program check handler could have changed pgm_check_occured */
+       barrier();
 
-       s390_reset_pgm_handler = NULL;
        if (pgm_check_occured)
                return -EIO;
        else
index 1a93fa684e9fc42e1a4539f1b5a5509ec8facda8..52625153a4f09572b6311a1f2324e8f9add7b0f7 100644 (file)
@@ -27,10 +27,7 @@ config IUCV
        help
          Select this option if you want to use inter-user communication
          under VM or VIF. If unsure, say "Y" to enable a fast communication
-         link between VM guests. At boot time the user ID of the guest needs
-         to be passed to the kernel. Note that both kernels need to be
-         compiled with this option and both need to be booted with the user ID
-         of the other VM guest.
+         link between VM guests.
 
 config NETIUCV
        tristate "IUCV network device support (VM only)"
index 53c358c7d368ba4413447ee16bee91cd7274cde9..e95c281f1e36ac2fe97ab580fab739040c084e7f 100644 (file)
@@ -710,7 +710,7 @@ struct qeth_reply {
        int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long);
        u32 seqno;
        unsigned long offset;
-       int received;
+       atomic_t received;
        int rc;
        void *param;
        struct qeth_card *card;
index 2bde4f1fb9c2f320783f9ae08ced9b64fc7cf31f..d2efa5ff125def3fc2ea205165c071a6d43a9c64 100644 (file)
@@ -471,7 +471,7 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
            channel->state == CH_STATE_UP)
                qeth_issue_next_read(card);
 
-       tasklet_schedule(&channel->irq_tasklet);
+       qeth_irq_tasklet((unsigned long)channel);
        return;
 out:
        wake_up(&card->wait_q);
@@ -950,40 +950,6 @@ qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
        return rc;
 }
 
-static int
-qeth_register_ip_addresses(void *ptr)
-{
-       struct qeth_card *card;
-
-       card = (struct qeth_card *) ptr;
-       daemonize("qeth_reg_ip");
-       QETH_DBF_TEXT(trace,4,"regipth1");
-       if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
-               return 0;
-       QETH_DBF_TEXT(trace,4,"regipth2");
-       qeth_set_ip_addr_list(card);
-       qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
-       return 0;
-}
-
-/*
- * Drive the SET_PROMISC_MODE thread
- */
-static int
-qeth_set_promisc_mode(void *ptr)
-{
-       struct qeth_card *card = (struct qeth_card *) ptr;
-
-       daemonize("qeth_setprm");
-       QETH_DBF_TEXT(trace,4,"setprm1");
-       if (!qeth_do_run_thread(card, QETH_SET_PROMISC_MODE_THREAD))
-               return 0;
-       QETH_DBF_TEXT(trace,4,"setprm2");
-       qeth_setadp_promisc_mode(card);
-       qeth_clear_thread_running_bit(card, QETH_SET_PROMISC_MODE_THREAD);
-       return 0;
-}
-
 static int
 qeth_recover(void *ptr)
 {
@@ -1047,11 +1013,6 @@ qeth_start_kernel_thread(struct work_struct *work)
        if (card->read.state != CH_STATE_UP &&
            card->write.state != CH_STATE_UP)
                return;
-
-       if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
-               kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
-       if (qeth_do_start_thread(card, QETH_SET_PROMISC_MODE_THREAD))
-               kernel_thread(qeth_set_promisc_mode, (void *)card, SIGCHLD);
        if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
                kernel_thread(qeth_recover, (void *) card, SIGCHLD);
 }
@@ -1074,7 +1035,7 @@ qeth_set_intial_options(struct qeth_card *card)
                card->options.layer2 = 1;
        else
                card->options.layer2 = 0;
-       card->options.performance_stats = 1;
+       card->options.performance_stats = 0;
 }
 
 /**
@@ -1613,8 +1574,6 @@ qeth_issue_next_read(struct qeth_card *card)
                return -ENOMEM;
        }
        qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
-       wait_event(card->wait_q,
-                  atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
        QETH_DBF_TEXT(trace, 6, "noirqpnd");
        rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
                              (addr_t) iob, 0, 0);
@@ -1635,6 +1594,7 @@ qeth_alloc_reply(struct qeth_card *card)
        reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
        if (reply){
                atomic_set(&reply->refcnt, 1);
+               atomic_set(&reply->received, 0);
                reply->card = card;
        };
        return reply;
@@ -1655,31 +1615,6 @@ qeth_put_reply(struct qeth_reply *reply)
                kfree(reply);
 }
 
-static void
-qeth_cmd_timeout(unsigned long data)
-{
-       struct qeth_reply *reply, *list_reply, *r;
-       unsigned long flags;
-
-       reply = (struct qeth_reply *) data;
-       spin_lock_irqsave(&reply->card->lock, flags);
-       list_for_each_entry_safe(list_reply, r,
-                                &reply->card->cmd_waiter_list, list) {
-               if (reply == list_reply){
-                       qeth_get_reply(reply);
-                       list_del_init(&reply->list);
-                       spin_unlock_irqrestore(&reply->card->lock, flags);
-                       reply->rc = -ETIME;
-                       reply->received = 1;
-                       wake_up(&reply->wait_q);
-                       qeth_put_reply(reply);
-                       return;
-               }
-       }
-       spin_unlock_irqrestore(&reply->card->lock, flags);
-}
-
-
 static struct qeth_ipa_cmd *
 qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
 {
@@ -1745,7 +1680,7 @@ qeth_clear_ipacmd_list(struct qeth_card *card)
        list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
                qeth_get_reply(reply);
                reply->rc = -EIO;
-               reply->received = 1;
+               atomic_inc(&reply->received);
                list_del_init(&reply->list);
                wake_up(&reply->wait_q);
                qeth_put_reply(reply);
@@ -1814,7 +1749,7 @@ qeth_send_control_data_cb(struct qeth_channel *channel,
                                              &card->cmd_waiter_list);
                                spin_unlock_irqrestore(&card->lock, flags);
                        } else {
-                               reply->received = 1;
+                               atomic_inc(&reply->received);
                                wake_up(&reply->wait_q);
                        }
                        qeth_put_reply(reply);
@@ -1858,7 +1793,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
        int rc;
        unsigned long flags;
        struct qeth_reply *reply = NULL;
-       struct timer_list timer;
+       unsigned long timeout;
 
        QETH_DBF_TEXT(trace, 2, "sendctl");
 
@@ -1873,21 +1808,20 @@ qeth_send_control_data(struct qeth_card *card, int len,
                reply->seqno = QETH_IDX_COMMAND_SEQNO;
        else
                reply->seqno = card->seqno.ipa++;
-       init_timer(&timer);
-       timer.function = qeth_cmd_timeout;
-       timer.data = (unsigned long) reply;
        init_waitqueue_head(&reply->wait_q);
        spin_lock_irqsave(&card->lock, flags);
        list_add_tail(&reply->list, &card->cmd_waiter_list);
        spin_unlock_irqrestore(&card->lock, flags);
        QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
-       wait_event(card->wait_q,
-                  atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
+
+       while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
        qeth_prepare_control_data(card, len, iob);
+
        if (IS_IPA(iob->data))
-               timer.expires = jiffies + QETH_IPA_TIMEOUT;
+               timeout = jiffies + QETH_IPA_TIMEOUT;
        else
-               timer.expires = jiffies + QETH_TIMEOUT;
+               timeout = jiffies + QETH_TIMEOUT;
+
        QETH_DBF_TEXT(trace, 6, "noirqpnd");
        spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
        rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
@@ -1906,9 +1840,16 @@ qeth_send_control_data(struct qeth_card *card, int len,
                wake_up(&card->wait_q);
                return rc;
        }
-       add_timer(&timer);
-       wait_event(reply->wait_q, reply->received);
-       del_timer_sync(&timer);
+       while (!atomic_read(&reply->received)) {
+               if (time_after(jiffies, timeout)) {
+                       spin_lock_irqsave(&reply->card->lock, flags);
+                       list_del_init(&reply->list);
+                       spin_unlock_irqrestore(&reply->card->lock, flags);
+                       reply->rc = -ETIME;
+                       atomic_inc(&reply->received);
+                       wake_up(&reply->wait_q);
+               }
+       };
        rc = reply->rc;
        qeth_put_reply(reply);
        return rc;
@@ -2466,32 +2407,17 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
                qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
 }
 
-static inline __u16
+static inline void
 qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
                        struct qeth_hdr *hdr)
 {
-       unsigned short vlan_id = 0;
-#ifdef CONFIG_QETH_VLAN
-       struct vlan_hdr *vhdr;
-#endif
-
        skb->pkt_type = PACKET_HOST;
        skb->protocol = qeth_type_trans(skb, skb->dev);
        if (card->options.checksum_type == NO_CHECKSUMMING)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        else
                skb->ip_summed = CHECKSUM_NONE;
-#ifdef CONFIG_QETH_VLAN
-       if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) {
-               vhdr = (struct vlan_hdr *) skb->data;
-               skb->protocol =
-                       __constant_htons(vhdr->h_vlan_encapsulated_proto);
-               vlan_id = hdr->hdr.l2.vlan_id;
-               skb_pull(skb, VLAN_HLEN);
-       }
-#endif
        *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
-       return vlan_id;
 }
 
 static inline __u16
@@ -2560,7 +2486,6 @@ qeth_process_inbound_buffer(struct qeth_card *card,
        int offset;
        int rxrc;
        __u16 vlan_tag = 0;
-       __u16 *vlan_addr;
 
        /* get first element of current buffer */
        element = (struct qdio_buffer_element *)&buf->buffer->element[0];
@@ -2571,7 +2496,7 @@ qeth_process_inbound_buffer(struct qeth_card *card,
                                       &offset, &hdr))) {
                skb->dev = card->dev;
                if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
-                       vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
+                       qeth_layer2_rebuild_skb(card, skb, hdr);
                else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
                        vlan_tag = qeth_rebuild_skb(card, skb, hdr);
                else { /*in case of OSN*/
@@ -3968,13 +3893,22 @@ static inline struct sk_buff *
 qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
                 struct qeth_hdr **hdr, int ipv)
 {
-       struct sk_buff *new_skb;
+       struct sk_buff *new_skb, *new_skb2;
        
        QETH_DBF_TEXT(trace, 6, "prepskb");
-
-        new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
-               if (new_skb == NULL)
+       new_skb = skb;
+       new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
+       if (!new_skb)
+               return NULL;
+       new_skb2 = qeth_realloc_headroom(card, new_skb,
+                                        sizeof(struct qeth_hdr));
+       if (!new_skb2) {
+               __qeth_free_new_skb(skb, new_skb);
                return NULL;
+       }
+       if (new_skb != skb)
+               __qeth_free_new_skb(new_skb2, new_skb);
+       new_skb = new_skb2;
        *hdr = __qeth_prepare_skb(card, new_skb, ipv);
        if (*hdr == NULL) {
                __qeth_free_new_skb(skb, new_skb);
@@ -4844,9 +4778,11 @@ qeth_arp_query(struct qeth_card *card, char __user *udata)
                           "(0x%x/%d)\n",
                           QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
                           tmp, tmp);
-               copy_to_user(udata, qinfo.udata, 4);
+               if (copy_to_user(udata, qinfo.udata, 4))
+                       rc = -EFAULT;
        } else {
-               copy_to_user(udata, qinfo.udata, qinfo.udata_len);
+               if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
+                       rc = -EFAULT;
        }
        kfree(qinfo.udata);
        return rc;
@@ -4992,8 +4928,10 @@ qeth_snmp_command(struct qeth_card *card, char __user *udata)
        if (rc)
                PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
                           QETH_CARD_IFNAME(card), rc);
-        else
-               copy_to_user(udata, qinfo.udata, qinfo.udata_len);
+       else {
+               if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
+                       rc = -EFAULT;
+       }
 
        kfree(ureq);
        kfree(qinfo.udata);
@@ -5544,12 +5482,10 @@ qeth_set_multicast_list(struct net_device *dev)
        qeth_add_multicast_ipv6(card);
 #endif
 out:
-       if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
-               schedule_work(&card->kernel_thread_starter);
+       qeth_set_ip_addr_list(card);
        if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
                return;
-       if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0)
-               schedule_work(&card->kernel_thread_starter);
+       qeth_setadp_promisc_mode(card);
 }
 
 static int
@@ -6350,6 +6286,42 @@ static struct ethtool_ops qeth_ethtool_ops = {
        .set_tso     = qeth_ethtool_set_tso,
 };
 
+static int
+qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr)
+{
+       struct qeth_card *card;
+       struct ethhdr *eth;
+
+       card = qeth_get_card_from_dev(skb->dev);
+       if (card->options.layer2)
+               goto haveheader;
+#ifdef CONFIG_QETH_IPV6
+       /* cause of the manipulated arp constructor and the ARP
+          flag for OSAE devices we have some nasty exceptions */
+       if (card->info.type == QETH_CARD_TYPE_OSAE) {
+               if (!card->options.fake_ll) {
+                       if ((skb->pkt_type==PACKET_OUTGOING) &&
+                           (skb->protocol==ETH_P_IPV6))
+                               goto haveheader;
+                       else
+                               return 0;
+               } else {
+                       if ((skb->pkt_type==PACKET_OUTGOING) &&
+                           (skb->protocol==ETH_P_IP))
+                               return 0;
+                       else
+                               goto haveheader;
+               }
+       }
+#endif
+       if (!card->options.fake_ll)
+               return 0;
+haveheader:
+       eth = eth_hdr(skb);
+       memcpy(haddr, eth->h_source, ETH_ALEN);
+       return ETH_ALEN;
+}
+
 static int
 qeth_netdev_init(struct net_device *dev)
 {
@@ -6388,7 +6360,10 @@ qeth_netdev_init(struct net_device *dev)
        if (card->options.fake_ll &&
                (qeth_get_netdev_flags(card) & IFF_NOARP))
                        dev->hard_header = qeth_fake_header;
-       dev->hard_header_parse = NULL;
+       if (dev->type == ARPHRD_IEEE802_TR)
+               dev->hard_header_parse = NULL;
+       else
+               dev->hard_header_parse = qeth_hard_header_parse;
        dev->set_mac_address = qeth_layer2_set_mac_address;
        dev->flags |= qeth_get_netdev_flags(card);
        if ((card->options.fake_broadcast) ||
@@ -8235,8 +8210,7 @@ qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
        }
        if (!qeth_add_ip(card, ipaddr))
                kfree(ipaddr);
-       if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
-               schedule_work(&card->kernel_thread_starter);
+       qeth_set_ip_addr_list(card);
        return rc;
 }
 
@@ -8264,8 +8238,7 @@ qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
                return;
        if (!qeth_delete_ip(card, ipaddr))
                kfree(ipaddr);
-       if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
-               schedule_work(&card->kernel_thread_starter);
+       qeth_set_ip_addr_list(card);
 }
 
 /*
@@ -8308,8 +8281,7 @@ qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
        }
        if (!qeth_add_ip(card, ipaddr))
                kfree(ipaddr);
-       if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
-               schedule_work(&card->kernel_thread_starter);
+       qeth_set_ip_addr_list(card);
        return 0;
 }
 
@@ -8337,8 +8309,7 @@ qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
                return;
        if (!qeth_delete_ip(card, ipaddr))
                kfree(ipaddr);
-       if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
-               schedule_work(&card->kernel_thread_starter);
+       qeth_set_ip_addr_list(card);
 }
 
 /**
@@ -8380,8 +8351,7 @@ qeth_ip_event(struct notifier_block *this,
        default:
                break;
        }
-       if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
-               schedule_work(&card->kernel_thread_starter);
+       qeth_set_ip_addr_list(card);
 out:
        return NOTIFY_DONE;
 }
@@ -8433,8 +8403,7 @@ qeth_ip6_event(struct notifier_block *this,
        default:
                break;
        }
-       if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0)
-               schedule_work(&card->kernel_thread_starter);
+       qeth_set_ip_addr_list(card);
 out:
        return NOTIFY_DONE;
 }
index 9d11a75663e668462ca871851ac647529d9d8ad2..3c4b6c24371254b1a8a6bcfa27fa922a6069fc9d 100644 (file)
@@ -789,7 +789,9 @@ static struct console mpc52xx_console = {
 static int __init
 mpc52xx_console_init(void)
 {
+#if defined(CONFIG_PPC_MERGE)
        mpc52xx_uart_of_enumerate();
+#endif
        register_console(&mpc52xx_console);
        return 0;
 }
index 24ee8be359f5e3d86fa6846fef17d4e9e8152df4..6377db1b446ddf5b4be500e96619152b500e412a 100644 (file)
@@ -217,6 +217,7 @@ static const struct quirk_printer_struct quirk_printers[] = {
        { 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */
        { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */
        { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */
+       { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */
        { 0, 0 }
 };
 
index c505b767cee108e7af7af7f2fb0220e75d54c0d5..5e628ae3aec714ba9469df02ed3f2bae4989c129 100644 (file)
@@ -268,6 +268,7 @@ static void ep_device_release(struct device *dev)
        struct ep_device *ep_dev = to_ep_device(dev);
 
        dev_dbg(dev, "%s called for %s\n", __FUNCTION__, dev->bus_id);
+       endpoint_free_minor(ep_dev);
        kfree(ep_dev);
 }
 
@@ -349,7 +350,6 @@ void usb_remove_ep_files(struct usb_host_endpoint *endpoint)
                sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
                sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
                sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
-               endpoint_free_minor(ep_dev);
                device_unregister(&ep_dev->dev);
                endpoint->ep_dev = NULL;
                destroy_endpoint_class();
index 15d77c30793093e18014c9fafd63daa6376e0095..cdcfd42843d46053deda8fe3863a67004079877a 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/usb_gadget.h>
 #include <linux/usb/otg.h>
 #include <linux/dma-mapping.h>
+#include <linux/clk.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
 /* bulk DMA seems to be behaving for both IN and OUT */
 #define        USE_DMA
 
+/* FIXME: OMAP2 currently has some problem in DMA mode */
+#ifdef CONFIG_ARCH_OMAP2
+#undef USE_DMA
+#endif
+
 /* ISO too */
 #define        USE_ISO
 
@@ -99,7 +105,7 @@ static unsigned fifo_mode = 0;
  * boot parameter "omap_udc:fifo_mode=42"
  */
 module_param (fifo_mode, uint, 0);
-MODULE_PARM_DESC (fifo_mode, "endpoint setup (0 == default)");
+MODULE_PARM_DESC (fifo_mode, "endpoint configuration");
 
 #ifdef USE_DMA
 static unsigned use_dma = 1;
@@ -122,7 +128,7 @@ static const char driver_desc [] = DRIVER_DESC;
 /*-------------------------------------------------------------------------*/
 
 /* there's a notion of "current endpoint" for modifying endpoint
- * state, and PIO access to its FIFO.  
+ * state, and PIO access to its FIFO.
  */
 
 static void use_ep(struct omap_ep *ep, u16 select)
@@ -391,7 +397,7 @@ done(struct omap_ep *ep, struct omap_req *req, int status)
 #define FIFO_EMPTY     (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
 #define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
 
-static inline int 
+static inline int
 write_packet(u8 *buf, struct omap_req *req, unsigned max)
 {
        unsigned        len;
@@ -456,7 +462,7 @@ static int write_fifo(struct omap_ep *ep, struct omap_req *req)
        return is_last;
 }
 
-static inline int 
+static inline int
 read_packet(u8 *buf, struct omap_req *req, unsigned avail)
 {
        unsigned        len;
@@ -542,9 +548,9 @@ static inline dma_addr_t dma_csac(unsigned lch)
        /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
         * read before the DMA controller finished disabling the channel.
         */
-       csac = omap_readw(OMAP_DMA_CSAC(lch));
+       csac = OMAP_DMA_CSAC_REG(lch);
        if (csac == 0)
-               csac = omap_readw(OMAP_DMA_CSAC(lch));
+               csac = OMAP_DMA_CSAC_REG(lch);
        return csac;
 }
 
@@ -555,9 +561,9 @@ static inline dma_addr_t dma_cdac(unsigned lch)
        /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
         * read before the DMA controller finished disabling the channel.
         */
-       cdac = omap_readw(OMAP_DMA_CDAC(lch));
+       cdac = OMAP_DMA_CDAC_REG(lch);
        if (cdac == 0)
-               cdac = omap_readw(OMAP_DMA_CDAC(lch));
+               cdac = OMAP_DMA_CDAC_REG(lch);
        return cdac;
 }
 
@@ -582,7 +588,7 @@ static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
 }
 
 #define DMA_DEST_LAST(x) (cpu_is_omap15xx() \
-               ? omap_readw(OMAP_DMA_CSAC(x)) /* really: CPC */ \
+               ? OMAP_DMA_CSAC_REG(x) /* really: CPC */ \
                : dma_cdac(x))
 
 static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
@@ -620,17 +626,19 @@ static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
                        || (cpu_is_omap15xx() && length < ep->maxpacket)) {
                txdma_ctrl = UDC_TXN_EOT | length;
                omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
-                               length, 1, sync_mode);
+                               length, 1, sync_mode, 0, 0);
        } else {
                length = min(length / ep->maxpacket,
                                (unsigned) UDC_TXN_TSC + 1);
-               txdma_ctrl = length;
+               txdma_ctrl = length;
                omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
-                               ep->ep.maxpacket >> 1, length, sync_mode);
+                               ep->ep.maxpacket >> 1, length, sync_mode,
+                               0, 0);
                length *= ep->maxpacket;
        }
        omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
-               OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual);
+               OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
+               0, 0);
 
        omap_start_dma(ep->lch);
        ep->dma_counter = dma_csac(ep->lch);
@@ -675,9 +683,11 @@ static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
        req->dma_bytes = packets * ep->ep.maxpacket;
        omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
                        ep->ep.maxpacket >> 1, packets,
-                       OMAP_DMA_SYNC_ELEMENT);
+                       OMAP_DMA_SYNC_ELEMENT,
+                       0, 0);
        omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
-               OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual);
+               OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
+               0, 0);
        ep->dma_counter = DMA_DEST_LAST(ep->lch);
 
        UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1);
@@ -820,7 +830,8 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
                        omap_set_dma_dest_params(ep->lch,
                                OMAP_DMA_PORT_TIPB,
                                OMAP_DMA_AMODE_CONSTANT,
-                               (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG));
+                               (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG),
+                               0, 0);
                }
        } else {
                status = omap_request_dma(OMAP_DMA_USB_W2FC_RX0 - 1 + channel,
@@ -831,7 +842,8 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
                        omap_set_dma_src_params(ep->lch,
                                OMAP_DMA_PORT_TIPB,
                                OMAP_DMA_AMODE_CONSTANT,
-                               (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG));
+                               (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG),
+                               0, 0);
                        /* EMIFF */
                        omap_set_dma_dest_burst_mode(ep->lch,
                                                OMAP_DMA_DATA_BURST_4);
@@ -846,7 +858,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
 
                /* channel type P: hw synch (fifo) */
                if (!cpu_is_omap15xx())
-                       omap_writew(2, OMAP_DMA_LCH_CTRL(ep->lch));
+                       OMAP1_DMA_LCH_CTRL_REG(ep->lch) = 2;
        }
 
 just_restart:
@@ -893,7 +905,7 @@ static void dma_channel_release(struct omap_ep *ep)
        else
                req = NULL;
 
-       active = ((1 << 7) & omap_readl(OMAP_DMA_CCR(ep->lch))) != 0;
+       active = ((1 << 7) & OMAP_DMA_CCR_REG(ep->lch)) != 0;
 
        DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
                        active ? "active" : "idle",
@@ -1117,7 +1129,7 @@ static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
                 */
                dma_channel_release(ep);
                dma_channel_claim(ep, channel);
-       } else 
+       } else
                done(ep, req, -ECONNRESET);
        spin_unlock_irqrestore(&ep->udc->lock, flags);
        return 0;
@@ -1153,7 +1165,7 @@ static int omap_ep_set_halt(struct usb_ep *_ep, int value)
 
                /* IN endpoints must already be idle */
                if ((ep->bEndpointAddress & USB_DIR_IN)
-                               && !list_empty(&ep->queue)) { 
+                               && !list_empty(&ep->queue)) {
                        status = -EAGAIN;
                        goto done;
                }
@@ -1298,6 +1310,23 @@ static void pullup_disable(struct omap_udc *udc)
        UDC_SYSCON1_REG &= ~UDC_PULLUP_EN;
 }
 
+static struct omap_udc *udc;
+
+static void omap_udc_enable_clock(int enable)
+{
+       if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL)
+               return;
+
+       if (enable) {
+               clk_enable(udc->dc_clk);
+               clk_enable(udc->hhc_clk);
+               udelay(100);
+       } else {
+               clk_disable(udc->hhc_clk);
+               clk_disable(udc->dc_clk);
+       }
+}
+
 /*
  * Called by whatever detects VBUS sessions:  external transceiver
  * driver, or maybe GPIO0 VBUS IRQ.  May request 48 MHz clock.
@@ -1318,10 +1347,22 @@ static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
                else
                        FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510;
        }
+       if (udc->dc_clk != NULL && is_active) {
+               if (!udc->clk_requested) {
+                       omap_udc_enable_clock(1);
+                       udc->clk_requested = 1;
+               }
+       }
        if (can_pullup(udc))
                pullup_enable(udc);
        else
                pullup_disable(udc);
+       if (udc->dc_clk != NULL && !is_active) {
+               if (udc->clk_requested) {
+                       omap_udc_enable_clock(0);
+                       udc->clk_requested = 0;
+               }
+       }
        spin_unlock_irqrestore(&udc->lock, flags);
        return 0;
 }
@@ -1441,7 +1482,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
                }
        }
 
-       /* IN/OUT packets mean we're in the DATA or STATUS stage.  
+       /* IN/OUT packets mean we're in the DATA or STATUS stage.
         * This driver uses only uses protocol stalls (ep0 never halts),
         * and if we got this far the gadget driver already had a
         * chance to stall.  Tries to be forgiving of host oddities.
@@ -1509,7 +1550,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
                                } else if (stat == 0)
                                        UDC_CTRL_REG = UDC_SET_FIFO_EN;
                                UDC_EP_NUM_REG = 0;
-                               
+
                                /* activate status stage */
                                if (stat == 1) {
                                        done(ep0, req, 0);
@@ -1866,7 +1907,7 @@ static void pio_out_timer(unsigned long _ep)
 
        spin_lock_irqsave(&ep->udc->lock, flags);
        if (!list_empty(&ep->queue) && ep->ackwait) {
-               use_ep(ep, 0);
+               use_ep(ep, UDC_EP_SEL);
                stat_flg = UDC_STAT_FLG_REG;
 
                if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
@@ -1876,12 +1917,12 @@ static void pio_out_timer(unsigned long _ep)
                        VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
                        req = container_of(ep->queue.next,
                                        struct omap_req, queue);
-                       UDC_EP_NUM_REG = ep->bEndpointAddress | UDC_EP_SEL;
                        (void) read_fifo(ep, req);
                        UDC_EP_NUM_REG = ep->bEndpointAddress;
                        UDC_CTRL_REG = UDC_SET_FIFO_EN;
                        ep->ackwait = 1 + ep->double_buf;
-               }
+               } else
+                       deselect_ep();
        }
        mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
        spin_unlock_irqrestore(&ep->udc->lock, flags);
@@ -2028,7 +2069,17 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
 
 /*-------------------------------------------------------------------------*/
 
-static struct omap_udc *udc;
+static inline int machine_needs_vbus_session(void)
+{
+       return (machine_is_omap_innovator()
+               || machine_is_omap_osk()
+               || machine_is_omap_apollon()
+#ifndef CONFIG_MACH_OMAP_H4_OTG
+               || machine_is_omap_h4()
+#endif
+               || machine_is_sx1()
+               );
+}
 
 int usb_gadget_register_driver (struct usb_gadget_driver *driver)
 {
@@ -2070,6 +2121,9 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
        udc->gadget.dev.driver = &driver->driver;
        spin_unlock_irqrestore(&udc->lock, flags);
 
+       if (udc->dc_clk != NULL)
+               omap_udc_enable_clock(1);
+
        status = driver->bind (&udc->gadget);
        if (status) {
                DBG("bind to %s --> %d\n", driver->driver.name, status);
@@ -2103,10 +2157,12 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
        /* boards that don't have VBUS sensing can't autogate 48MHz;
         * can't enter deep sleep while a gadget driver is active.
         */
-       if (machine_is_omap_innovator() || machine_is_omap_osk())
+       if (machine_needs_vbus_session())
                omap_vbus_session(&udc->gadget, 1);
 
 done:
+       if (udc->dc_clk != NULL)
+               omap_udc_enable_clock(0);
        return status;
 }
 EXPORT_SYMBOL(usb_gadget_register_driver);
@@ -2121,7 +2177,10 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
        if (!driver || driver != udc->driver || !driver->unbind)
                return -EINVAL;
 
-       if (machine_is_omap_innovator() || machine_is_omap_osk())
+       if (udc->dc_clk != NULL)
+               omap_udc_enable_clock(1);
+
+       if (machine_needs_vbus_session())
                omap_vbus_session(&udc->gadget, 0);
 
        if (udc->transceiver)
@@ -2137,6 +2196,8 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
        udc->gadget.dev.driver = NULL;
        udc->driver = NULL;
 
+       if (udc->dc_clk != NULL)
+               omap_udc_enable_clock(0);
        DBG("unregistered driver '%s'\n", driver->driver.name);
        return status;
 }
@@ -2219,7 +2280,7 @@ static char *trx_mode(unsigned m, int enabled)
        case 0:         return enabled ? "*6wire" : "unused";
        case 1:         return "4wire";
        case 2:         return "3wire";
-       case 3:         return "6wire";
+       case 3:         return "6wire";
        default:        return "unknown";
        }
 }
@@ -2228,11 +2289,18 @@ static int proc_otg_show(struct seq_file *s)
 {
        u32             tmp;
        u32             trans;
+       char            *ctrl_name;
 
        tmp = OTG_REV_REG;
-       trans = USB_TRANSCEIVER_CTRL_REG;
-       seq_printf(s, "\nOTG rev %d.%d, transceiver_ctrl %05x\n",
-               tmp >> 4, tmp & 0xf, trans);
+       if (cpu_is_omap24xx()) {
+               ctrl_name = "control_devconf";
+               trans = CONTROL_DEVCONF_REG;
+       } else {
+               ctrl_name = "tranceiver_ctrl";
+               trans = USB_TRANSCEIVER_CTRL_REG;
+       }
+       seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
+               tmp >> 4, tmp & 0xf, ctrl_name, trans);
        tmp = OTG_SYSCON_1_REG;
        seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
                        FOURBITS "\n", tmp,
@@ -2307,7 +2375,7 @@ static int proc_udc_show(struct seq_file *s, void *_)
                driver_desc,
                use_dma ?  " (dma)" : "");
 
-       tmp = UDC_REV_REG & 0xff; 
+       tmp = UDC_REV_REG & 0xff;
        seq_printf(s,
                "UDC rev %d.%d, fifo mode %d, gadget %s\n"
                "hmc %d, transceiver %s\n",
@@ -2315,11 +2383,16 @@ static int proc_udc_show(struct seq_file *s, void *_)
                fifo_mode,
                udc->driver ? udc->driver->driver.name : "(none)",
                HMC,
-               udc->transceiver ? udc->transceiver->label : "(none)");
-       seq_printf(s, "ULPD control %04x req %04x status %04x\n",
-               __REG16(ULPD_CLOCK_CTRL),
-               __REG16(ULPD_SOFT_REQ),
-               __REG16(ULPD_STATUS_REQ));
+               udc->transceiver
+                       ? udc->transceiver->label
+                       : ((cpu_is_omap1710() || cpu_is_omap24xx())
+                               ? "external" : "(none)"));
+       if (cpu_class_is_omap1()) {
+               seq_printf(s, "ULPD control %04x req %04x status %04x\n",
+                       __REG16(ULPD_CLOCK_CTRL),
+                       __REG16(ULPD_SOFT_REQ),
+                       __REG16(ULPD_STATUS_REQ));
+       }
 
        /* OTG controller registers */
        if (!cpu_is_omap15xx())
@@ -2504,9 +2577,10 @@ omap_ep_setup(char *name, u8 addr, u8 type,
                dbuf = 1;
        } else {
                /* double-buffering "not supported" on 15xx,
-                * and ignored for PIO-IN on 16xx
+                * and ignored for PIO-IN on newer chips
+                * (for more reliable behavior)
                 */
-               if (!use_dma || cpu_is_omap15xx())
+               if (!use_dma || cpu_is_omap15xx() || cpu_is_omap24xx())
                        dbuf = 0;
 
                switch (maxp) {
@@ -2549,7 +2623,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
        ep->bEndpointAddress = addr;
        ep->bmAttributes = type;
        ep->double_buf = dbuf;
-       ep->udc = udc; 
+       ep->udc = udc;
 
        ep->ep.name = ep->name;
        ep->ep.ops = &omap_ep_ops;
@@ -2709,15 +2783,37 @@ static int __init omap_udc_probe(struct platform_device *pdev)
        struct otg_transceiver  *xceiv = NULL;
        const char              *type = NULL;
        struct omap_usb_config  *config = pdev->dev.platform_data;
+       struct clk              *dc_clk;
+       struct clk              *hhc_clk;
 
        /* NOTE:  "knows" the order of the resources! */
-       if (!request_mem_region(pdev->resource[0].start, 
+       if (!request_mem_region(pdev->resource[0].start,
                        pdev->resource[0].end - pdev->resource[0].start + 1,
                        driver_name)) {
                DBG("request_mem_region failed\n");
                return -EBUSY;
        }
 
+       if (cpu_is_omap16xx()) {
+               dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
+               hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
+               BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
+               /* can't use omap_udc_enable_clock yet */
+               clk_enable(dc_clk);
+               clk_enable(hhc_clk);
+               udelay(100);
+       }
+
+       if (cpu_is_omap24xx()) {
+               dc_clk = clk_get(&pdev->dev, "usb_fck");
+               hhc_clk = clk_get(&pdev->dev, "usb_l4_ick");
+               BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
+               /* can't use omap_udc_enable_clock yet */
+               clk_enable(dc_clk);
+               clk_enable(hhc_clk);
+               udelay(100);
+       }
+
        INFO("OMAP UDC rev %d.%d%s\n",
                UDC_REV_REG >> 4, UDC_REV_REG & 0xf,
                config->otg ? ", Mini-AB" : "");
@@ -2727,7 +2823,7 @@ static int __init omap_udc_probe(struct platform_device *pdev)
                hmc = HMC_1510;
                type = "(unknown)";
 
-               if (machine_is_omap_innovator()) {
+               if (machine_is_omap_innovator() || machine_is_sx1()) {
                        /* just set up software VBUS detect, and then
                         * later rig it so we always report VBUS.
                         * FIXME without really sensing VBUS, we can't
@@ -2756,6 +2852,15 @@ static int __init omap_udc_probe(struct platform_device *pdev)
                }
 
                hmc = HMC_1610;
+
+               if (cpu_is_omap24xx()) {
+                       /* this could be transceiverless in one of the
+                        * "we don't need to know" modes.
+                        */
+                       type = "external";
+                       goto known;
+               }
+
                switch (hmc) {
                case 0:                 /* POWERUP DEFAULT == 0 */
                case 4:
@@ -2794,6 +2899,7 @@ bad_on_1710:
                        goto cleanup0;
                }
        }
+known:
        INFO("hmc mode %d, %s transceiver\n", hmc, type);
 
        /* a "gadget" abstracts/virtualizes the controller */
@@ -2818,8 +2924,8 @@ bad_on_1710:
        status = request_irq(pdev->resource[1].start, omap_udc_irq,
                        IRQF_SAMPLE_RANDOM, driver_name, udc);
        if (status != 0) {
-               ERR( "can't get irq %ld, err %d\n",
-                       pdev->resource[1].start, status);
+               ERR("can't get irq %d, err %d\n",
+                       (int) pdev->resource[1].start, status);
                goto cleanup1;
        }
 
@@ -2827,24 +2933,41 @@ bad_on_1710:
        status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
                        IRQF_SAMPLE_RANDOM, "omap_udc pio", udc);
        if (status != 0) {
-               ERR( "can't get irq %ld, err %d\n",
-                       pdev->resource[2].start, status);
+               ERR("can't get irq %d, err %d\n",
+                       (int) pdev->resource[2].start, status);
                goto cleanup2;
        }
 #ifdef USE_ISO
        status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
                        IRQF_DISABLED, "omap_udc iso", udc);
        if (status != 0) {
-               ERR("can't get irq %ld, err %d\n",
-                       pdev->resource[3].start, status);
+               ERR("can't get irq %d, err %d\n",
+                       (int) pdev->resource[3].start, status);
                goto cleanup3;
        }
 #endif
+       if (cpu_is_omap16xx()) {
+               udc->dc_clk = dc_clk;
+               udc->hhc_clk = hhc_clk;
+               clk_disable(hhc_clk);
+               clk_disable(dc_clk);
+       }
+
+       if (cpu_is_omap24xx()) {
+               udc->dc_clk = dc_clk;
+               udc->hhc_clk = hhc_clk;
+               /* FIXME OMAP2 don't release hhc & dc clock */
+#if 0
+               clk_disable(hhc_clk);
+               clk_disable(dc_clk);
+#endif
+       }
 
        create_proc_file();
-       device_add(&udc->gadget.dev);
-       return 0;
-
+       status = device_add(&udc->gadget.dev);
+       if (!status)
+               return status;
+       /* If fail, fall through */
 #ifdef USE_ISO
 cleanup3:
        free_irq(pdev->resource[2].start, udc);
@@ -2860,8 +2983,17 @@ cleanup1:
 cleanup0:
        if (xceiv)
                put_device(xceiv->dev);
+
+       if (cpu_is_omap16xx() || cpu_is_omap24xx()) {
+               clk_disable(hhc_clk);
+               clk_disable(dc_clk);
+               clk_put(hhc_clk);
+               clk_put(dc_clk);
+       }
+
        release_mem_region(pdev->resource[0].start,
                        pdev->resource[0].end - pdev->resource[0].start + 1);
+
        return status;
 }
 
@@ -2891,6 +3023,13 @@ static int __exit omap_udc_remove(struct platform_device *pdev)
        free_irq(pdev->resource[2].start, udc);
        free_irq(pdev->resource[1].start, udc);
 
+       if (udc->dc_clk) {
+               if (udc->clk_requested)
+                       omap_udc_enable_clock(0);
+               clk_put(udc->hhc_clk);
+               clk_put(udc->dc_clk);
+       }
+
        release_mem_region(pdev->resource[0].start,
                        pdev->resource[0].end - pdev->resource[0].start + 1);
 
index 652ee462734470345eed415a474c096fbe4a7909..1dc398bb9ab2ae408f0d77ee2c24d3d9c966c838 100644 (file)
@@ -175,6 +175,9 @@ struct omap_udc {
        unsigned                        ep0_reset_config:1;
        unsigned                        ep0_setup:1;
        struct completion               *done;
+       struct clk                      *dc_clk;
+       struct clk                      *hhc_clk;
+       unsigned                        clk_requested:1;
 };
 
 /*-------------------------------------------------------------------------*/
index acd101caeeeb4760a983c34cea5cf3c865476d67..e0d4c2358b39653dc52fb06fde98e12d312a1775 100644 (file)
@@ -209,24 +209,16 @@ static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
 
 static int remote_wakeup_is_broken(struct uhci_hcd *uhci)
 {
-       static struct dmi_system_id broken_wakeup_table[] = {
-               {
-                       .ident = "Asus A7V8X",
-                       .matches = {
-                               DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK"),
-                               DMI_MATCH(DMI_BOARD_NAME, "A7V8X"),
-                               DMI_MATCH(DMI_BOARD_VERSION, "REV 1.xx"),
-                       }
-               },
-               { }
-       };
        int port;
+       char *sys_info;
+       static char bad_Asus_board[] = "A7V8X";
 
        /* One of Asus's motherboards has a bug which causes it to
         * wake up immediately from suspend-to-RAM if any of the ports
         * are connected.  In such cases we will not set EGSM.
         */
-       if (dmi_check_system(broken_wakeup_table)) {
+       sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+       if (sys_info && !strcmp(sys_info, bad_Asus_board)) {
                for (port = 0; port < uhci->rh_numports; ++port) {
                        if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
                                        USBPORTSC_CCS)
@@ -265,7 +257,9 @@ __acquires(uhci->lock)
        int_enable = USBINTR_RESUME;
        if (remote_wakeup_is_broken(uhci))
                egsm_enable = 0;
-       if (resume_detect_interrupts_are_broken(uhci) || !egsm_enable)
+       if (resume_detect_interrupts_are_broken(uhci) || !egsm_enable ||
+                       !device_may_wakeup(
+                               &uhci_to_hcd(uhci)->self.root_hub->dev))
                uhci->working_RD = int_enable = 0;
 
        outw(int_enable, uhci->io_addr + USBINTR);
index bf26c3c56990f2ee36c9216b00fa430fc8662d22..9148694627d586d16c0445b083dfbfdda8e0f3a8 100644 (file)
@@ -403,7 +403,7 @@ sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
 
 
        sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
-                               (u32)SISUSB_HADDR(x, y), 2, &written);
+                               (long)SISUSB_HADDR(x, y), 2, &written);
 
        mutex_unlock(&sisusb->lock);
 }
@@ -438,7 +438,7 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
        }
 
        sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
-                               (u32)SISUSB_HADDR(x, y), count * 2, &written);
+                               (long)SISUSB_HADDR(x, y), count * 2, &written);
 
        mutex_unlock(&sisusb->lock);
 }
@@ -492,7 +492,7 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
 
 
        sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y),
-                               (u32)SISUSB_HADDR(x, y), length, &written);
+                               (long)SISUSB_HADDR(x, y), length, &written);
 
        mutex_unlock(&sisusb->lock);
 }
@@ -564,7 +564,7 @@ sisusbcon_bmove(struct vc_data *c, int sy, int sx,
 
 
        sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(dx, dy),
-                               (u32)SISUSB_HADDR(dx, dy), length, &written);
+                               (long)SISUSB_HADDR(dx, dy), length, &written);
 
        mutex_unlock(&sisusb->lock);
 }
@@ -612,7 +612,7 @@ sisusbcon_switch(struct vc_data *c)
                                                                length);
 
        sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin,
-                               (u32)SISUSB_HADDR(0, 0),
+                               (long)SISUSB_HADDR(0, 0),
                                length, &written);
 
        mutex_unlock(&sisusb->lock);
@@ -939,7 +939,7 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
        }
 
        sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t),
-                               (u32)SISUSB_HADDR(0, t), length, &written);
+                               (long)SISUSB_HADDR(0, t), length, &written);
 
        mutex_unlock(&sisusb->lock);
 
index 95e682e2c9d658892f6995da890e51243a333c69..f538013965b003d7d622b8920e86967ce3bd8f3b 100644 (file)
@@ -920,7 +920,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
                goto out2;
 
        if ((ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT,
-                               0x0000, 0, 0, buf)) < 0) {
+                               1, 0, 0, buf)) < 0) {
                dbg("Select PHY #1 failed: %d", ret);
                goto out2;
        }
index 2f4d303ee36f927b649cbedb37f7301e3ee564af..c8999ae586520f4f621a56892f4818375666f3ef 100644 (file)
@@ -170,7 +170,7 @@ config USB_SERIAL_FTDI_SIO
 
 config USB_SERIAL_FUNSOFT
        tristate "USB Fundamental Software Dongle Driver"
-       depends on USB_SERIAL
+       depends on USB_SERIAL && !(SPARC || SPARC64)
        ---help---
          Say Y here if you want to use the Fundamental Software dongle.
 
index 819266b7e2f8e3368ddb6f20201918d44a14a889..5ca04e82ea196a491f9b20c1f1cd3e82c9c35eeb 100644 (file)
@@ -625,6 +625,9 @@ static int option_send_setup(struct usb_serial_port *port)
 
        dbg("%s", __FUNCTION__);
 
+       if (port->number != 0)
+               return 0;
+
        portdata = usb_get_serial_port_data(port);
 
        if (port->tty) {
index 5fe7ff441a094def351508a8fa8e069364a04b89..cddef3efba0a9e770934f6d9c2a89d2e475e148f 100644 (file)
@@ -728,7 +728,7 @@ UNUSUAL_DEV( 0x05ac, 0x1204, 0x0000, 0x9999,
                "Apple",
                "iPod",
                US_SC_DEVICE, US_PR_DEVICE, NULL,
-               US_FL_FIX_CAPACITY ),
+               US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ),
 
 UNUSUAL_DEV( 0x05ac, 0x1205, 0x0000, 0x9999,
                "Apple",
@@ -1358,6 +1358,21 @@ UNUSUAL_DEV(  0x1370, 0x6828, 0x0110, 0x0110,
                US_SC_DEVICE, US_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Francesco Foresti <frafore@tiscali.it> */
+UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
+               "Super Top",
+               "IDE DEVICE",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_IGNORE_RESIDUE ),
+
+/* Reported by Robert Schedel <r.schedel@yahoo.de>
+ * Note: this is a 'super top' device like the above 14cd/6600 device */
+UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
+               "Teac",
+               "HD-35PUK-B",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_IGNORE_RESIDUE ),
+
 /* patch submitted by Davide Perini <perini.davide@dpsoftware.org>
  * and Renato Perini <rperini@email.it>
  */
index bbfc862592721fe4b79d8f60c36a13386f6c1269..b9b2b27b68c30f3dd97a7717d0f557e5f3f9615d 100644 (file)
@@ -53,7 +53,7 @@ static inline int adfs_readname(char *buf, char *ptr, int maxlen)
 {
        char *old_buf = buf;
 
-       while (*ptr >= ' ' && maxlen--) {
+       while ((unsigned char)*ptr >= ' ' && maxlen--) {
                if (*ptr == '/')
                        *buf++ = '.';
                else
index 34e6d7b220c322b9399c32a701fe4910c256946f..869f5193ecc248321d1c2468d7de6090cb70954b 100644 (file)
 #include <linux/time.h>
 #include <linux/smp_lock.h>
 #include <linux/namei.h>
+#include <linux/poll.h>
 
-static int return_EIO(void)
+
+static loff_t bad_file_llseek(struct file *file, loff_t offset, int origin)
+{
+       return -EIO;
+}
+
+static ssize_t bad_file_read(struct file *filp, char __user *buf,
+                       size_t size, loff_t *ppos)
+{
+        return -EIO;
+}
+
+static ssize_t bad_file_write(struct file *filp, const char __user *buf,
+                       size_t siz, loff_t *ppos)
+{
+        return -EIO;
+}
+
+static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+                       unsigned long nr_segs, loff_t pos)
+{
+       return -EIO;
+}
+
+static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                       unsigned long nr_segs, loff_t pos)
+{
+       return -EIO;
+}
+
+static int bad_file_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+       return -EIO;
+}
+
+static unsigned int bad_file_poll(struct file *filp, poll_table *wait)
+{
+       return POLLERR;
+}
+
+static int bad_file_ioctl (struct inode *inode, struct file *filp,
+                       unsigned int cmd, unsigned long arg)
+{
+       return -EIO;
+}
+
+static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd,
+                       unsigned long arg)
+{
+       return -EIO;
+}
+
+static long bad_file_compat_ioctl(struct file *file, unsigned int cmd,
+                       unsigned long arg)
+{
+       return -EIO;
+}
+
+static int bad_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       return -EIO;
+}
+
+static int bad_file_open(struct inode *inode, struct file *filp)
+{
+       return -EIO;
+}
+
+static int bad_file_flush(struct file *file, fl_owner_t id)
+{
+       return -EIO;
+}
+
+static int bad_file_release(struct inode *inode, struct file *filp)
+{
+       return -EIO;
+}
+
+static int bad_file_fsync(struct file *file, struct dentry *dentry,
+                       int datasync)
+{
+       return -EIO;
+}
+
+static int bad_file_aio_fsync(struct kiocb *iocb, int datasync)
+{
+       return -EIO;
+}
+
+static int bad_file_fasync(int fd, struct file *filp, int on)
+{
+       return -EIO;
+}
+
+static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl)
+{
+       return -EIO;
+}
+
+static ssize_t bad_file_sendfile(struct file *in_file, loff_t *ppos,
+                       size_t count, read_actor_t actor, void *target)
+{
+       return -EIO;
+}
+
+static ssize_t bad_file_sendpage(struct file *file, struct page *page,
+                       int off, size_t len, loff_t *pos, int more)
+{
+       return -EIO;
+}
+
+static unsigned long bad_file_get_unmapped_area(struct file *file,
+                               unsigned long addr, unsigned long len,
+                               unsigned long pgoff, unsigned long flags)
+{
+       return -EIO;
+}
+
+static int bad_file_check_flags(int flags)
 {
        return -EIO;
 }
 
-#define EIO_ERROR ((void *) (return_EIO))
+static int bad_file_dir_notify(struct file *file, unsigned long arg)
+{
+       return -EIO;
+}
+
+static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl)
+{
+       return -EIO;
+}
+
+static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe,
+                       struct file *out, loff_t *ppos, size_t len,
+                       unsigned int flags)
+{
+       return -EIO;
+}
+
+static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos,
+                       struct pipe_inode_info *pipe, size_t len,
+                       unsigned int flags)
+{
+       return -EIO;
+}
 
 static const struct file_operations bad_file_ops =
 {
-       .llseek         = EIO_ERROR,
-       .aio_read       = EIO_ERROR,
-       .read           = EIO_ERROR,
-       .write          = EIO_ERROR,
-       .aio_write      = EIO_ERROR,
-       .readdir        = EIO_ERROR,
-       .poll           = EIO_ERROR,
-       .ioctl          = EIO_ERROR,
-       .mmap           = EIO_ERROR,
-       .open           = EIO_ERROR,
-       .flush          = EIO_ERROR,
-       .release        = EIO_ERROR,
-       .fsync          = EIO_ERROR,
-       .aio_fsync      = EIO_ERROR,
-       .fasync         = EIO_ERROR,
-       .lock           = EIO_ERROR,
-       .sendfile       = EIO_ERROR,
-       .sendpage       = EIO_ERROR,
-       .get_unmapped_area = EIO_ERROR,
+       .llseek         = bad_file_llseek,
+       .read           = bad_file_read,
+       .write          = bad_file_write,
+       .aio_read       = bad_file_aio_read,
+       .aio_write      = bad_file_aio_write,
+       .readdir        = bad_file_readdir,
+       .poll           = bad_file_poll,
+       .ioctl          = bad_file_ioctl,
+       .unlocked_ioctl = bad_file_unlocked_ioctl,
+       .compat_ioctl   = bad_file_compat_ioctl,
+       .mmap           = bad_file_mmap,
+       .open           = bad_file_open,
+       .flush          = bad_file_flush,
+       .release        = bad_file_release,
+       .fsync          = bad_file_fsync,
+       .aio_fsync      = bad_file_aio_fsync,
+       .fasync         = bad_file_fasync,
+       .lock           = bad_file_lock,
+       .sendfile       = bad_file_sendfile,
+       .sendpage       = bad_file_sendpage,
+       .get_unmapped_area = bad_file_get_unmapped_area,
+       .check_flags    = bad_file_check_flags,
+       .dir_notify     = bad_file_dir_notify,
+       .flock          = bad_file_flock,
+       .splice_write   = bad_file_splice_write,
+       .splice_read    = bad_file_splice_read,
 };
 
+static int bad_inode_create (struct inode *dir, struct dentry *dentry,
+               int mode, struct nameidata *nd)
+{
+       return -EIO;
+}
+
+static struct dentry *bad_inode_lookup(struct inode *dir,
+                       struct dentry *dentry, struct nameidata *nd)
+{
+       return ERR_PTR(-EIO);
+}
+
+static int bad_inode_link (struct dentry *old_dentry, struct inode *dir,
+               struct dentry *dentry)
+{
+       return -EIO;
+}
+
+static int bad_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+       return -EIO;
+}
+
+static int bad_inode_symlink (struct inode *dir, struct dentry *dentry,
+               const char *symname)
+{
+       return -EIO;
+}
+
+static int bad_inode_mkdir(struct inode *dir, struct dentry *dentry,
+                       int mode)
+{
+       return -EIO;
+}
+
+static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
+{
+       return -EIO;
+}
+
+static int bad_inode_mknod (struct inode *dir, struct dentry *dentry,
+                       int mode, dev_t rdev)
+{
+       return -EIO;
+}
+
+static int bad_inode_rename (struct inode *old_dir, struct dentry *old_dentry,
+               struct inode *new_dir, struct dentry *new_dentry)
+{
+       return -EIO;
+}
+
+static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
+               int buflen)
+{
+       return -EIO;
+}
+
+static int bad_inode_permission(struct inode *inode, int mask,
+                       struct nameidata *nd)
+{
+       return -EIO;
+}
+
+static int bad_inode_getattr(struct vfsmount *mnt, struct dentry *dentry,
+                       struct kstat *stat)
+{
+       return -EIO;
+}
+
+static int bad_inode_setattr(struct dentry *direntry, struct iattr *attrs)
+{
+       return -EIO;
+}
+
+static int bad_inode_setxattr(struct dentry *dentry, const char *name,
+               const void *value, size_t size, int flags)
+{
+       return -EIO;
+}
+
+static ssize_t bad_inode_getxattr(struct dentry *dentry, const char *name,
+                       void *buffer, size_t size)
+{
+       return -EIO;
+}
+
+static ssize_t bad_inode_listxattr(struct dentry *dentry, char *buffer,
+                       size_t buffer_size)
+{
+       return -EIO;
+}
+
+static int bad_inode_removexattr(struct dentry *dentry, const char *name)
+{
+       return -EIO;
+}
+
 static struct inode_operations bad_inode_ops =
 {
-       .create         = EIO_ERROR,
-       .lookup         = EIO_ERROR,
-       .link           = EIO_ERROR,
-       .unlink         = EIO_ERROR,
-       .symlink        = EIO_ERROR,
-       .mkdir          = EIO_ERROR,
-       .rmdir          = EIO_ERROR,
-       .mknod          = EIO_ERROR,
-       .rename         = EIO_ERROR,
-       .readlink       = EIO_ERROR,
+       .create         = bad_inode_create,
+       .lookup         = bad_inode_lookup,
+       .link           = bad_inode_link,
+       .unlink         = bad_inode_unlink,
+       .symlink        = bad_inode_symlink,
+       .mkdir          = bad_inode_mkdir,
+       .rmdir          = bad_inode_rmdir,
+       .mknod          = bad_inode_mknod,
+       .rename         = bad_inode_rename,
+       .readlink       = bad_inode_readlink,
        /* follow_link must be no-op, otherwise unmounting this inode
           won't work */
-       .truncate       = EIO_ERROR,
-       .permission     = EIO_ERROR,
-       .getattr        = EIO_ERROR,
-       .setattr        = EIO_ERROR,
-       .setxattr       = EIO_ERROR,
-       .getxattr       = EIO_ERROR,
-       .listxattr      = EIO_ERROR,
-       .removexattr    = EIO_ERROR,
+       /* put_link returns void */
+       /* truncate returns void */
+       .permission     = bad_inode_permission,
+       .getattr        = bad_inode_getattr,
+       .setattr        = bad_inode_setattr,
+       .setxattr       = bad_inode_setxattr,
+       .getxattr       = bad_inode_getxattr,
+       .listxattr      = bad_inode_listxattr,
+       .removexattr    = bad_inode_removexattr,
+       /* truncate_range returns void */
 };
 
 
@@ -88,7 +336,7 @@ static struct inode_operations bad_inode_ops =
  *     on it to fail from this point on.
  */
  
-void make_bad_inode(struct inode * inode) 
+void make_bad_inode(struct inode *inode)
 {
        remove_inode_hash(inode);
 
@@ -113,7 +361,7 @@ EXPORT_SYMBOL(make_bad_inode);
  *     Returns true if the inode in question has been marked as bad.
  */
  
-int is_bad_inode(struct inode * inode) 
+int is_bad_inode(struct inode *inode)
 {
        return (inode->i_op == &bad_inode_ops); 
 }
index d3adfd353ff99eec4277b672088024eee9da22ac..7cb28720f90e363a4b74b2da8b575233ff5a6da1 100644 (file)
@@ -854,13 +854,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                         * default mmap base, as well as whatever program they
                         * might try to exec.  This is because the brk will
                         * follow the loader, and is not movable.  */
-                       if (current->flags & PF_RANDOMIZE)
-                               load_bias = randomize_range(0x10000,
-                                                           ELF_ET_DYN_BASE,
-                                                           0);
-                       else
-                               load_bias = ELF_ET_DYN_BASE;
-                       load_bias = ELF_PAGESTART(load_bias - vaddr);
+                       load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
                }
 
                error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
index b82381475779b59ecdd52685eafc05db391d6bb2..2e0021e8f3662e2f9fad8313efcd002ca56d6862 100644 (file)
@@ -275,6 +275,25 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
        UFSD("EXIT\n");
 }
 
+static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
+                           int sync)
+{
+       struct buffer_head *bh;
+       sector_t end = beg + n;
+
+       for (; beg < end; ++beg) {
+               bh = sb_getblk(inode->i_sb, beg);
+               lock_buffer(bh);
+               memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+               set_buffer_uptodate(bh);
+               mark_buffer_dirty(bh);
+               unlock_buffer(bh);
+               if (IS_SYNC(inode) || sync)
+                       sync_dirty_buffer(bh);
+               brelse(bh);
+       }
+}
+
 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
                           unsigned goal, unsigned count, int * err, struct page *locked_page)
 {
@@ -350,6 +369,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
                        *p = cpu_to_fs32(sb, result);
                        *err = 0;
                        UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+                       ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
+                                       locked_page != NULL);
                }
                unlock_super(sb);
                UFSD("EXIT, result %u\n", result);
@@ -363,6 +384,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
        if (result) {
                *err = 0;
                UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+               ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
+                               locked_page != NULL);
                unlock_super(sb);
                UFSD("EXIT, result %u\n", result);
                return result;
@@ -398,6 +421,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
                *p = cpu_to_fs32(sb, result);
                *err = 0;
                UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+               ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
+                               locked_page != NULL);
                unlock_super(sb);
                if (newcount < request)
                        ufs_free_fragments (inode, result + newcount, request - newcount);
index ee1eaa6f4ec2187f0f669324d2aaa7253932318a..2fbab0aab6883e749f87716440434a46467af37f 100644 (file)
@@ -156,36 +156,6 @@ out:
        return ret;
 }
 
-static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh)
-{
-       lock_buffer(bh);
-       memset(bh->b_data, 0, inode->i_sb->s_blocksize);
-       set_buffer_uptodate(bh);
-       mark_buffer_dirty(bh);
-       unlock_buffer(bh);
-       if (IS_SYNC(inode))
-               sync_dirty_buffer(bh);
-}
-
-static struct buffer_head *
-ufs_clear_frags(struct inode *inode, sector_t beg,
-               unsigned int n, sector_t want)
-{
-       struct buffer_head *res = NULL, *bh;
-       sector_t end = beg + n;
-
-       for (; beg < end; ++beg) {
-               bh = sb_getblk(inode->i_sb, beg);
-               ufs_clear_frag(inode, bh);
-               if (want != beg)
-                       brelse(bh);
-               else
-                       res = bh;
-       }
-       BUG_ON(!res);
-       return res;
-}
-
 /**
  * ufs_inode_getfrag() - allocate new fragment(s)
  * @inode - pointer to inode
@@ -302,7 +272,7 @@ repeat:
        }
 
        if (!phys) {
-               result = ufs_clear_frags(inode, tmp, required, tmp + blockoff);
+               result = sb_getblk(sb, tmp + blockoff);
        } else {
                *phys = tmp + blockoff;
                result = NULL;
@@ -403,8 +373,7 @@ repeat:
 
 
        if (!phys) {
-               result = ufs_clear_frags(inode, tmp, uspi->s_fpb,
-                                        tmp + blockoff);
+               result = sb_getblk(sb, tmp + blockoff);
        } else {
                *phys = tmp + blockoff;
                *new = 1;
@@ -471,13 +440,13 @@ int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head
 #define GET_INODE_DATABLOCK(x) \
        ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new, bh_result->b_page)
 #define GET_INODE_PTR(x) \
-       ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL, bh_result->b_page)
+       ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL, NULL)
 #define GET_INDIRECT_DATABLOCK(x) \
        ufs_inode_getblock(inode, bh, x, fragment,      \
-                         &err, &phys, &new, bh_result->b_page);
+                         &err, &phys, &new, bh_result->b_page)
 #define GET_INDIRECT_PTR(x) \
        ufs_inode_getblock(inode, bh, x, fragment,      \
-                         &err, NULL, NULL, bh_result->b_page);
+                         &err, NULL, NULL, NULL)
 
        if (ptr < UFS_NDIR_FRAGMENT) {
                bh = GET_INODE_DATABLOCK(ptr);
index 4bbd85f3ed2a67acd6cc15f45ab3c360dda2e575..2e9469047eb14a0cc22dc4432e7bc0522337c92a 100644 (file)
@@ -19,7 +19,7 @@
  * Peripherals that are shared between the iop32x and iop33x but
  * located at different addresses.
  */
-#define IOP3XX_GPIO_REG(reg)   (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07c0 + (reg))
+#define IOP3XX_GPIO_REG(reg)   (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07c4 + (reg))
 #define IOP3XX_TIMER_REG(reg)  (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07e0 + (reg))
 
 #include <asm/hardware/iop3xx.h>
index d51049522cd0f764bc3f120e64032090f0626024..5f531ea03059d003bab3e86643ed2580a36133cf 100644 (file)
@@ -357,6 +357,16 @@ extern void flush_dcache_page(struct page *);
 
 extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
 
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma,
+                        struct page *page, unsigned long vmaddr)
+{
+       extern void __flush_anon_page(struct vm_area_struct *vma,
+                               struct page *, unsigned long);
+       if (PageAnon(page))
+               __flush_anon_page(vma, page, vmaddr);
+}
+
 #define flush_dcache_mmap_lock(mapping) \
        write_lock_irq(&(mapping)->tree_lock)
 #define flush_dcache_mmap_unlock(mapping) \
index 1018a7486ab76fc717ec2e92096642e54c6b7a05..13ac8a4cd01f6ba7fc070d88a6aa9023ac442f4f 100644 (file)
@@ -168,9 +168,9 @@ extern void gpio_line_set(int line, int value);
 #define IOP3XX_PERCR0          (volatile u32 *)IOP3XX_REG_ADDR(0x0710)
 
 /* General Purpose I/O  */
-#define IOP3XX_GPOE            (volatile u32 *)IOP3XX_GPIO_REG(0x0004)
-#define IOP3XX_GPID            (volatile u32 *)IOP3XX_GPIO_REG(0x0008)
-#define IOP3XX_GPOD            (volatile u32 *)IOP3XX_GPIO_REG(0x000c)
+#define IOP3XX_GPOE            (volatile u32 *)IOP3XX_GPIO_REG(0x0000)
+#define IOP3XX_GPID            (volatile u32 *)IOP3XX_GPIO_REG(0x0004)
+#define IOP3XX_GPOD            (volatile u32 *)IOP3XX_GPIO_REG(0x0008)
 
 /* Timers  */
 #define IOP3XX_TU_TMR0         (volatile u32 *)IOP3XX_TIMER_REG(0x0000)
index 8ce79a6fa8919a7bf7557f39aa60fb6e71d1af39..e7686d0a84136be4e4756a5a6bfdc762f55e35a0 100644 (file)
@@ -13,7 +13,8 @@
 #define ASK_VGA                0xfffd          /* ask for it at bootup */
 
 /* Physical address where kenrel should be loaded. */
-#define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \
+#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+                               + (CONFIG_PHYSICAL_ALIGN - 1)) \
                                & ~(CONFIG_PHYSICAL_ALIGN - 1))
 
 #endif /* _LINUX_BOOT_H */
index 9b768c3b96b332ee7139827b8ed4c1b734785201..24cdcc6eaab887b5adb08772ad0c2d53abbba11c 100644 (file)
  */
 __wsum csum_partial(const void *buff, int len, __wsum sum);
 
+__wsum __csum_partial_copy_user(const void *src, void *dst,
+                               int len, __wsum sum, int *err_ptr);
+
 /*
  * this is a new version of the above that records errors it finds in *errp,
  * but continues and zeros the rest of the buffer.
  */
-__wsum csum_partial_copy_from_user(const void __user *src,
-                                        void *dst, int len,
-                                        __wsum sum, int *errp);
+static inline
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+                                  __wsum sum, int *err_ptr)
+{
+       might_sleep();
+       return __csum_partial_copy_user((__force void *)src, dst,
+                                       len, sum, err_ptr);
+}
 
 /*
  * Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-static inline __wsum csum_and_copy_to_user (const void *src, void __user *dst,
-                                                 int len, __wsum sum,
-                                                 int *err_ptr)
+static inline
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
+                            __wsum sum, int *err_ptr)
 {
        might_sleep();
-       sum = csum_partial(src, len, sum);
-
-       if (copy_to_user(dst, src, len)) {
+       if (access_ok(VERIFY_WRITE, dst, len))
+               return __csum_partial_copy_user(src, (__force void *)dst,
+                                               len, sum, err_ptr);
+       if (len)
                *err_ptr = -EFAULT;
-               return (__force __wsum)-1;
-       }
 
-       return sum;
+       return (__force __wsum)-1; /* invalid checksum */
 }
 
 /*
index 67657089efa7264b207c9555e7b186ee36648d51..386da82e57747e4cc08b9351fe71f23e7be6a412 100644 (file)
@@ -31,14 +31,14 @@ static inline int irq_canonicalize(int irq)
  * functions will take over re-enabling the low-level mask.
  * Otherwise it will be done on return from exception.
  */
-#define __DO_IRQ_SMTC_HOOK()                                           \
+#define __DO_IRQ_SMTC_HOOK(irq)                                                \
 do {                                                                   \
        if (irq_hwmask[irq] & 0x0000ff00)                               \
                write_c0_tccontext(read_c0_tccontext() &                \
                                   ~(irq_hwmask[irq] & 0x0000ff00));    \
 } while (0)
 #else
-#define __DO_IRQ_SMTC_HOOK() do { } while (0)
+#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0)
 #endif
 
 /*
@@ -52,7 +52,7 @@ do {                                                                  \
 #define do_IRQ(irq)                                                    \
 do {                                                                   \
        irq_enter();                                                    \
-       __DO_IRQ_SMTC_HOOK();                                           \
+       __DO_IRQ_SMTC_HOOK(irq);                                        \
        generic_handle_irq(irq);                                        \
        irq_exit();                                                     \
 } while (0)
index aedb0512cb049bac19f8b7616101abf6d9c4302f..a799dd8ef395a94f9a923848ffa6384384eb1efc 100644 (file)
@@ -186,7 +186,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
 }
 
 static inline void
-flush_anon_page(struct page *page, unsigned long vmaddr)
+flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
 {
        if (PageAnon(page))
                flush_user_dcache_page(vmaddr);
index 709568879f731e2367009c778465b639c997775a..f6fa39474846457f9960c6072f0a43aff16dc5ac 100644 (file)
 #define BUG_OPCODE .long 0x00b00b00  /* For asm */
 #define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */
 
-#ifndef __ASSEMBLY__
-
 #ifdef CONFIG_BUG
 
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+        .section __bug_table,"a"
+5001:   PPC_LONG \addr, 5002f
+        .short \line, \flags
+        .org 5001b+BUG_ENTRY_SIZE
+        .previous
+        .section .rodata,"a"
+5002:   .asciz "\file"
+        .previous
+.endm
+#else
+ .macro EMIT_BUG_ENTRY addr,file,line,flags
+        .section __bug_table,"a"
+5001:   PPC_LONG \addr
+        .short \flags
+        .org 5001b+BUG_ENTRY_SIZE
+        .previous
+.endm
+#endif /* verbose */
+
+#else /* !__ASSEMBLY__ */
 /* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
    sizeof(struct bug_entry), respectively */
 #ifdef CONFIG_DEBUG_BUGVERBOSE
 #define HAVE_ARCH_BUG
 #define HAVE_ARCH_BUG_ON
 #define HAVE_ARCH_WARN_ON
-#endif /* CONFIG_BUG */
 #endif /* __ASSEMBLY __ */
+#endif /* CONFIG_BUG */
 
 #include <asm-generic/bug.h>
 
index 257d1cecb8c9699f910df53b5f1192dd53b67382..7a500732b671db290c9bee9a595e9ad7ceec8329 100644 (file)
@@ -252,8 +252,6 @@ struct hcall_stats {
        unsigned long   tb_total;       /* total wall time (mftb) of calls. */
        unsigned long   purr_total;     /* total cpu time (PURR) of calls. */
 };
-void update_hcall_stats(unsigned long opcode, unsigned long tb_delta,
-                       unsigned long purr_delta);
 #define HCALL_STAT_ARRAY_SIZE  ((MAX_HCALL_OPCODE >> 2) + 1)
 
 #endif /* __ASSEMBLY__ */
index 4a28a850998c052bdd88ce597d5e2d33d064a4e3..4560d72fc75875ecd547a6e33a67131c83646228 100644 (file)
@@ -244,6 +244,7 @@ struct mpc52xx_cdm {
 extern void __iomem * mpc52xx_find_and_map(const char *);
 extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node);
 extern void mpc52xx_setup_cpu(void);
+extern void mpc52xx_declare_of_platform_devices(void);
 
 extern void mpc52xx_init_irq(void);
 extern unsigned int mpc52xx_get_irq(void);
index 5e261e1de6719bb99a638421e63eec8c02e5f79a..5c5d02de49e9ae53fe5728fef181f1657fd4c3dc 100644 (file)
@@ -4,8 +4,8 @@
 #ifdef __KERNEL__
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/errno.h>
-#include <asm/uaccess.h>
 
 static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
 {
@@ -21,7 +21,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
+       pagefault_disable();
        ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index ca9a602cffd7a2b8ea03c3d7fb0a60c4a1a3acb2..645d440807c229c0e5deba21f3ed3260f0416007 100644 (file)
@@ -8,7 +8,7 @@
 #include <asm/cacheflush.h>
 
 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
-static inline void flush_anon_page(struct page *page, unsigned long vmaddr)
+static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
 {
 }
 #endif
index 28fdce1ac1db3e6c4d3db845989ec5f502ad25a3..bc8b4616bad702f073b49fbb60d3b3c833e3076c 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/types.h>
 #include <linux/ioctl.h>
 
-#define KVM_API_VERSION 1
+#define KVM_API_VERSION 2
 
 /*
  * Architectural interrupt line count, and the size of the bitmap needed
@@ -45,6 +45,7 @@ enum kvm_exit_reason {
        KVM_EXIT_DEBUG            = 4,
        KVM_EXIT_HLT              = 5,
        KVM_EXIT_MMIO             = 6,
+       KVM_EXIT_IRQ_WINDOW_OPEN  = 7,
 };
 
 /* for KVM_RUN */
@@ -53,11 +54,19 @@ struct kvm_run {
        __u32 vcpu;
        __u32 emulated;  /* skip current instruction */
        __u32 mmio_completed; /* mmio request completed */
+       __u8 request_interrupt_window;
+       __u8 padding1[3];
 
        /* out */
        __u32 exit_type;
        __u32 exit_reason;
        __u32 instruction_length;
+       __u8 ready_for_interrupt_injection;
+       __u8 if_flag;
+       __u16 padding2;
+       __u64 cr8;
+       __u64 apic_base;
+
        union {
                /* KVM_EXIT_UNKNOWN */
                struct {
index 156c40fc664e26420f07cd6985efbc8dfa62113c..b78bbf42135ac4287e8be81efee7ce3ebcddb3a7 100644 (file)
@@ -3,6 +3,7 @@
 
 #define ADFS_SUPER_MAGIC       0xadf5
 #define AFFS_SUPER_MAGIC       0xadff
+#define AFS_SUPER_MAGIC                0x5346414F
 #define AUTOFS_SUPER_MAGIC     0x0187
 #define CODA_SUPER_MAGIC       0x73757245
 #define EFS_SUPER_MAGIC                0x414A53
index add51cebc8d9dad3999f2b9842a55f7fcce24f47..5423559a44a6833b336e5b29b5863abdd5ccfe6a 100644 (file)
@@ -245,7 +245,7 @@ extern int swap_duplicate(swp_entry_t);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern void swap_free(swp_entry_t);
 extern void free_swap_and_cache(swp_entry_t);
-extern int swap_type_of(dev_t, sector_t);
+extern int swap_type_of(dev_t, sector_t, struct block_device **);
 extern unsigned int count_swap_pages(int, int);
 extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
 extern sector_t swapdev_block(int, pgoff_t);
index e6af381e206db9e7ac6682b5b0c7fc9c1ae301db..e02d85f56e601d79c0ccb32ea4157ee890909cd3 100644 (file)
@@ -218,7 +218,7 @@ struct ieee80211_snap_hdr {
 #define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
 
 #define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG)
-#define WLAN_GET_SEQ_SEQ(seq)  ((seq) & IEEE80211_SCTL_SEQ)
+#define WLAN_GET_SEQ_SEQ(seq)  (((seq) & IEEE80211_SCTL_SEQ) >> 4)
 
 /* Authentication algorithms */
 #define WLAN_AUTH_OPEN 0
index b7d8317f22acf9d256d79c48eabd8c7b2e723c83..cd8fa0c858aedfd0a7295a9c9b84130fc8f339e7 100644 (file)
@@ -242,7 +242,7 @@ extern int tcp_memory_pressure;
 
 static inline int before(__u32 seq1, __u32 seq2)
 {
-        return (__s32)(seq2-seq1) > 0;
+        return (__s32)(seq1-seq2) < 0;
 }
 #define after(seq2, seq1)      before(seq1, seq2)
 
index 0ad90ebcf86ecbf3a5da59e01abe3fe12b154864..e47fe440d9d70c6533a16ffb98290095d3f9f77a 100644 (file)
@@ -259,6 +259,7 @@ extern int  x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int
 extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
 
 /* x25_timer.c */
+extern void x25_init_timers(struct sock *sk);
 extern void x25_start_heartbeat(struct sock *);
 extern void x25_start_t2timer(struct sock *);
 extern void x25_start_t21timer(struct sock *);
index 2949b9b991b52f79786a630af0d5da42cf9d8c4d..20f7babad514109312bf46a3aada728d80af2e27 100644 (file)
@@ -1,3 +1,3 @@
 /* include/version.h.  Generated by alsa/ksync script.  */
 #define CONFIG_SND_VERSION "1.0.14rc1"
-#define CONFIG_SND_DATE " (Wed Dec 20 08:11:48 2006 UTC)"
+#define CONFIG_SND_DATE " (Tue Jan 09 09:56:17 2007 UTC)"
index 2b1cdaab45e654bd1dd9c7d772c48e226e6ec20d..bc27d72bbb198fe8ea00dab91502711be811d3e8 100644 (file)
@@ -538,6 +538,11 @@ asmlinkage void __init start_kernel(void)
        parse_args("Booting kernel", command_line, __start___param,
                   __stop___param - __start___param,
                   &unknown_bootoption);
+       if (!irqs_disabled()) {
+               printk(KERN_WARNING "start_kernel(): bug: interrupts were "
+                               "enabled *very* early, fixing it\n");
+               local_irq_disable();
+       }
        sort_main_extable();
        trap_init();
        rcu_init();
index dbce132b354c9d9b89f9c8e957f7af6f729c1952..d0f2260a02105902fba2217e51451349777ca21d 100644 (file)
@@ -1148,10 +1148,10 @@ static int mod_sysfs_setup(struct module *mod,
        kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
        return 0;
 
-out_unreg_drivers:
-       kobject_unregister(mod->drivers_dir);
 out_unreg_param:
        module_param_sysfs_remove(mod);
+out_unreg_drivers:
+       kobject_unregister(mod->drivers_dir);
 out_unreg:
        kobject_del(&mod->mkobj.kobj);
        kobject_put(&mod->mkobj.kobj);
@@ -2327,8 +2327,22 @@ void print_modules(void)
        printk("\n");
 }
 
+static char *make_driver_name(struct device_driver *drv)
+{
+       char *driver_name;
+
+       driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
+                             GFP_KERNEL);
+       if (!driver_name)
+               return NULL;
+
+       sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
+       return driver_name;
+}
+
 void module_add_driver(struct module *mod, struct device_driver *drv)
 {
+       char *driver_name;
        int no_warn;
 
        if (!mod || !drv)
@@ -2336,17 +2350,31 @@ void module_add_driver(struct module *mod, struct device_driver *drv)
 
        /* Don't check return codes; these calls are idempotent */
        no_warn = sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module");
-       no_warn = sysfs_create_link(mod->drivers_dir, &drv->kobj, drv->name);
+       driver_name = make_driver_name(drv);
+       if (driver_name) {
+               no_warn = sysfs_create_link(mod->drivers_dir, &drv->kobj,
+                                           driver_name);
+               kfree(driver_name);
+       }
 }
 EXPORT_SYMBOL(module_add_driver);
 
 void module_remove_driver(struct device_driver *drv)
 {
+       char *driver_name;
+
        if (!drv)
                return;
+
        sysfs_remove_link(&drv->kobj, "module");
-       if (drv->owner && drv->owner->drivers_dir)
-               sysfs_remove_link(drv->owner->drivers_dir, drv->name);
+       if (drv->owner && drv->owner->drivers_dir) {
+               driver_name = make_driver_name(drv);
+               if (driver_name) {
+                       sysfs_remove_link(drv->owner->drivers_dir,
+                                         driver_name);
+                       kfree(driver_name);
+               }
+       }
 }
 EXPORT_SYMBOL(module_remove_driver);
 
index f406655d66536b8a373b62c082b9ef64364d2c20..718945da8f58cf6a42046c05fe3dfc712e65d42f 100644 (file)
@@ -143,9 +143,15 @@ int parse_args(const char *name,
 
        while (*args) {
                int ret;
+               int irq_was_disabled;
 
                args = next_arg(args, &param, &val);
+               irq_was_disabled = irqs_disabled();
                ret = parse_one(param, val, params, num, unknown);
+               if (irq_was_disabled && !irqs_disabled()) {
+                       printk(KERN_WARNING "parse_args(): option '%s' enabled "
+                                       "irq's!\n", param);
+               }
                switch (ret) {
                case -ENOENT:
                        printk(KERN_ERR "%s: Unknown parameter `%s'\n",
index f133d4a6d817bacfc2219476d72be55fb5d8face..3581f8f86acdc430d4fdd8edb09dfc8d31d2b5c0 100644 (file)
@@ -165,14 +165,15 @@ static int swsusp_swap_check(void) /* This is called before saving image */
 {
        int res;
 
-       res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
+       res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
+                       &resume_bdev);
        if (res < 0)
                return res;
 
        root_swap = res;
-       resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_WRITE);
-       if (IS_ERR(resume_bdev))
-               return PTR_ERR(resume_bdev);
+       res = blkdev_get(resume_bdev, FMODE_WRITE, O_RDWR);
+       if (res)
+               return res;
 
        res = set_blocksize(resume_bdev, PAGE_SIZE);
        if (res < 0)
index 89443b85163baa012d5a785c3491c21fe047ec75..f7b7a785a5c60a35e133daa036f7b9667ce3d718 100644 (file)
@@ -57,7 +57,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
        memset(&data->handle, 0, sizeof(struct snapshot_handle));
        if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
                data->swap = swsusp_resume_device ?
-                               swap_type_of(swsusp_resume_device, 0) : -1;
+                       swap_type_of(swsusp_resume_device, 0, NULL) : -1;
                data->mode = O_RDONLY;
        } else {
                data->swap = -1;
@@ -268,7 +268,8 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                         * so we need to recode them
                         */
                        if (old_decode_dev(arg)) {
-                               data->swap = swap_type_of(old_decode_dev(arg), 0);
+                               data->swap = swap_type_of(old_decode_dev(arg),
+                                                       0, NULL);
                                if (data->swap < 0)
                                        error = -ENODEV;
                        } else {
@@ -365,7 +366,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                        swdev = old_decode_dev(swap_area.dev);
                        if (swdev) {
                                offset = swap_area.offset;
-                               data->swap = swap_type_of(swdev, offset);
+                               data->swap = swap_type_of(swdev, offset, NULL);
                                if (data->swap < 0)
                                        error = -ENODEV;
                        } else {
index fb5e03d57e9dac86b7aadb3b7c5496a467d5ba15..11550b2290b69a20ed624e5966438a70fbd617d0 100644 (file)
@@ -63,7 +63,7 @@ static int __init profile_setup(char * str)
                printk(KERN_INFO
                        "kernel sleep profiling enabled (shift: %ld)\n",
                        prof_shift);
-       } else if (!strncmp(str, sleepstr, strlen(sleepstr))) {
+       } else if (!strncmp(str, schedstr, strlen(schedstr))) {
                prof_on = SCHED_PROFILING;
                if (str[strlen(schedstr)] == ',')
                        str += strlen(schedstr) + 1;
index 563792f4f687e6e6dde9f98aaf87d3116b82daff..af227d26e104674e324f99c5f135f3f34e8b4d22 100644 (file)
@@ -1091,7 +1091,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        if (pages) {
                                pages[i] = page;
 
-                               flush_anon_page(page, start);
+                               flush_anon_page(vma, page, start);
                                flush_dcache_page(page);
                        }
                        if (vmas)
index 6969cfb33901e1e21defc992eda34cd0d6363264..b278b8d60eee4ac13bba50222c478901cff39a60 100644 (file)
@@ -60,12 +60,6 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
                return 0;
        }
 
-       /*
-        * swapoff can easily use up all memory, so kill those first.
-        */
-       if (p->flags & PF_SWAPOFF)
-               return ULONG_MAX;
-
        /*
         * The memory size of the process is the basis for the badness.
         */
@@ -76,6 +70,12 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
         */
        task_unlock(p);
 
+       /*
+        * swapoff can easily use up all memory, so kill those first.
+        */
+       if (p->flags & PF_SWAPOFF)
+               return ULONG_MAX;
+
        /*
         * Processes which fork a lot of child processes are likely
         * a good choice. We add half the vmsize of the children if they
index 8c1a116875bc71d520069686dcb2483a79659604..a49f96b7ea4323667bf635f034c3950f5aa0f7b8 100644 (file)
@@ -711,6 +711,9 @@ static void __drain_pages(unsigned int cpu)
        for_each_zone(zone) {
                struct per_cpu_pageset *pset;
 
+               if (!populated_zone(zone))
+                       continue;
+
                pset = zone_pcp(zone, cpu);
                for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
                        struct per_cpu_pages *pcp;
@@ -3321,6 +3324,10 @@ void *__init alloc_large_system_hash(const char *tablename,
                        numentries >>= (scale - PAGE_SHIFT);
                else
                        numentries <<= (PAGE_SHIFT - scale);
+
+               /* Make sure we've got at least a 0-order allocation.. */
+               if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+                       numentries = PAGE_SIZE / bucketsize;
        }
        numentries = roundup_pow_of_two(numentries);
 
index 0d4e57431de41c8d7fccfe9307bfb504087e5c03..c6100628a6ef7be2325a55e18f69b80a5a0f2006 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3281,7 +3281,7 @@ retry:
                                        flags | GFP_THISNODE, nid);
        }
 
-       if (!obj) {
+       if (!obj && !(flags & __GFP_NO_GROW)) {
                /*
                 * This allocation will be performed within the constraints
                 * of the current cpuset / memory policy requirements.
@@ -3310,7 +3310,7 @@ retry:
                                         */
                                        goto retry;
                        } else {
-                               kmem_freepages(cache, obj);
+                               /* cache_grow already freed obj */
                                obj = NULL;
                        }
                }
index b9fc0e5de6d50d1892dca42e02ae5d0ad48d7a47..a2d9bb4e80df7e3f977b07df8223584c78aba31a 100644 (file)
@@ -434,7 +434,7 @@ void free_swap_and_cache(swp_entry_t entry)
  *
  * This is needed for the suspend to disk (aka swsusp).
  */
-int swap_type_of(dev_t device, sector_t offset)
+int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
 {
        struct block_device *bdev = NULL;
        int i;
@@ -450,6 +450,9 @@ int swap_type_of(dev_t device, sector_t offset)
                        continue;
 
                if (!bdev) {
+                       if (bdev_p)
+                               *bdev_p = sis->bdev;
+
                        spin_unlock(&swap_lock);
                        return i;
                }
@@ -459,6 +462,9 @@ int swap_type_of(dev_t device, sector_t offset)
                        se = list_entry(sis->extent_list.next,
                                        struct swap_extent, list);
                        if (se->start_block == offset) {
+                               if (bdev_p)
+                                       *bdev_p = sis->bdev;
+
                                spin_unlock(&swap_lock);
                                bdput(bdev);
                                return i;
index 40fea4918390e6e69b96bc463b974a4ad31bb2bf..7430df68cb64aaf2856c7158920442c77fe1cf27 100644 (file)
@@ -1406,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
        return ret;
 }
 
+static unsigned long count_lru_pages(void)
+{
+       struct zone *zone;
+       unsigned long ret = 0;
+
+       for_each_zone(zone)
+               ret += zone->nr_active + zone->nr_inactive;
+       return ret;
+}
+
 /*
  * Try to free `nr_pages' of memory, system-wide, and return the number of
  * freed pages.
@@ -1420,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        unsigned long ret = 0;
        int pass;
        struct reclaim_state reclaim_state;
-       struct zone *zone;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_swap = 0,
@@ -1431,10 +1440,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 
        current->reclaim_state = &reclaim_state;
 
-       lru_pages = 0;
-       for_each_zone(zone)
-               lru_pages += zone->nr_active + zone->nr_inactive;
-
+       lru_pages = count_lru_pages();
        nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
@@ -1461,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        for (pass = 0; pass < 5; pass++) {
                int prio;
 
-               /* Needed for shrinking slab caches later on */
-               if (!lru_pages)
-                       for_each_zone(zone) {
-                               lru_pages += zone->nr_active;
-                               lru_pages += zone->nr_inactive;
-                       }
-
                /* Force reclaiming mapped pages in the passes #3 and #4 */
                if (pass > 2) {
                        sc.may_swap = 1;
@@ -1483,7 +1482,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                                goto out;
 
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
+                       shrink_slab(sc.nr_scanned, sc.gfp_mask,
+                                       count_lru_pages());
                        ret += reclaim_state.reclaimed_slab;
                        if (ret >= nr_pages)
                                goto out;
@@ -1491,20 +1491,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
                                congestion_wait(WRITE, HZ / 10);
                }
-
-               lru_pages = 0;
        }
 
        /*
         * If ret = 0, we could not shrink LRUs, but there may be something
         * in slab caches
         */
-       if (!ret)
+       if (!ret) {
                do {
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+                       shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
                        ret += reclaim_state.reclaimed_slab;
                } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+       }
 
 out:
        current->reclaim_state = NULL;
index be04e9fb11f604639f92e447451098902a9977d6..ab166b48ce8dc02c8288b44d21d4d9c5ce7b2d22 100644 (file)
@@ -196,6 +196,9 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
 
        switch (CAPIMSG_SUBCOMMAND(skb->data)) {
        case CAPI_CONF:
+               if (skb->len < CAPI_MSG_BASELEN + 10)
+                       break;
+
                func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5);
                info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8);
 
@@ -226,6 +229,9 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
                        break;
 
                case CAPI_FUNCTION_GET_PROFILE:
+                       if (skb->len < CAPI_MSG_BASELEN + 11 + sizeof(capi_profile))
+                               break;
+
                        controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11);
                        msgnum = CAPIMSG_MSGID(skb->data);
 
@@ -246,17 +252,26 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
                        break;
 
                case CAPI_FUNCTION_GET_MANUFACTURER:
+                       if (skb->len < CAPI_MSG_BASELEN + 15)
+                               break;
+
                        controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 10);
 
                        if (!info && ctrl) {
+                               int len = min_t(uint, CAPI_MANUFACTURER_LEN,
+                                               skb->data[CAPI_MSG_BASELEN + 14]);
+
+                               memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN);
                                strncpy(ctrl->manu,
-                                       skb->data + CAPI_MSG_BASELEN + 15,
-                                       skb->data[CAPI_MSG_BASELEN + 14]);
+                                       skb->data + CAPI_MSG_BASELEN + 15, len);
                        }
 
                        break;
 
                case CAPI_FUNCTION_GET_VERSION:
+                       if (skb->len < CAPI_MSG_BASELEN + 32)
+                               break;
+
                        controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12);
 
                        if (!info && ctrl) {
@@ -269,13 +284,18 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
                        break;
 
                case CAPI_FUNCTION_GET_SERIAL_NUMBER:
+                       if (skb->len < CAPI_MSG_BASELEN + 17)
+                               break;
+
                        controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12);
 
                        if (!info && ctrl) {
+                               int len = min_t(uint, CAPI_SERIAL_LEN,
+                                               skb->data[CAPI_MSG_BASELEN + 16]);
+
                                memset(ctrl->serial, 0, CAPI_SERIAL_LEN);
                                strncpy(ctrl->serial,
-                                       skb->data + CAPI_MSG_BASELEN + 17,
-                                       skb->data[CAPI_MSG_BASELEN + 16]);
+                                       skb->data + CAPI_MSG_BASELEN + 17, len);
                        }
 
                        break;
@@ -284,14 +304,18 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
                break;
 
        case CAPI_IND:
+               if (skb->len < CAPI_MSG_BASELEN + 6)
+                       break;
+
                func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3);
 
                if (func == CAPI_FUNCTION_LOOPBACK) {
+                       int len = min_t(uint, skb->len - CAPI_MSG_BASELEN - 6,
+                                               skb->data[CAPI_MSG_BASELEN + 5]);
                        appl = CAPIMSG_APPID(skb->data);
                        msgnum = CAPIMSG_MSGID(skb->data);
                        cmtp_send_interopmsg(session, CAPI_RESP, appl, msgnum, func,
-                                               skb->data + CAPI_MSG_BASELEN + 6,
-                                               skb->data[CAPI_MSG_BASELEN + 5]);
+                                               skb->data + CAPI_MSG_BASELEN + 6, len);
                }
 
                break;
@@ -309,6 +333,9 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
 
        BT_DBG("session %p skb %p len %d", session, skb, skb->len);
 
+       if (skb->len < CAPI_MSG_BASELEN)
+               return;
+
        if (CAPIMSG_COMMAND(skb->data) == CAPI_INTEROPERABILITY) {
                cmtp_recv_interopmsg(session, skb);
                return;
index d4c935692ccfe377d8fde81e297ab8cb458a7655..801d687ea4efbdf84d7b6982a5dda5607c2d9644 100644 (file)
@@ -242,7 +242,7 @@ static void add_conn(struct work_struct *work)
        struct hci_conn *conn = container_of(work, struct hci_conn, work);
        int i;
 
-       if (device_register(&conn->dev) < 0) {
+       if (device_add(&conn->dev) < 0) {
                BT_ERR("Failed to register connection device");
                return;
        }
@@ -272,6 +272,8 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
 
        dev_set_drvdata(&conn->dev, conn);
 
+       device_initialize(&conn->dev);
+
        INIT_WORK(&conn->work, add_conn);
 
        schedule_work(&conn->work);
@@ -287,6 +289,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
 {
        BT_DBG("conn %p", conn);
 
+       if (!device_is_registered(&conn->dev))
+               return;
+
        INIT_WORK(&conn->work, del_conn);
 
        schedule_work(&conn->work);
index 544d65b7baa7de67fe796ec2e4cf68a0c6051e1f..cb7e855f0828619fdc34095786f86d23a3d0532e 100644 (file)
@@ -557,7 +557,6 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct sock *sk = sock->sk;
        struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
        struct sk_buff *skb;
-       int err;
        int sent = 0;
 
        if (msg->msg_flags & MSG_OOB)
@@ -572,6 +571,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        while (len) {
                size_t size = min_t(size_t, len, d->mtu);
+               int err;
                
                skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
                                msg->msg_flags & MSG_DONTWAIT, &err);
@@ -582,13 +582,16 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
                if (err) {
                        kfree_skb(skb);
-                       sent = err;
+                       if (sent == 0)
+                               sent = err;
                        break;
                }
 
                err = rfcomm_dlc_send(d, skb);
                if (err < 0) {
                        kfree_skb(skb);
+                       if (sent == 0)
+                               sent = err;
                        break;
                }
 
@@ -598,7 +601,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        release_sock(sk);
 
-       return sent ? sent : err;
+       return sent;
 }
 
 static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
index e0e0d09023b2f0607e295397a699e5d7b2c55c25..eb2b52484c70e42e86cb329d093478cc225dab25 100644 (file)
@@ -697,9 +697,13 @@ static int rfcomm_tty_write_room(struct tty_struct *tty)
 
        BT_DBG("tty %p", tty);
 
+       if (!dev || !dev->dlc)
+               return 0;
+
        room = rfcomm_room(dev->dlc) - atomic_read(&dev->wmem_alloc);
        if (room < 0)
                room = 0;
+
        return room;
 }
 
@@ -915,12 +919,14 @@ static void rfcomm_tty_unthrottle(struct tty_struct *tty)
 static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
 {
        struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
-       struct rfcomm_dlc *dlc = dev->dlc;
 
        BT_DBG("tty %p dev %p", tty, dev);
 
-       if (!skb_queue_empty(&dlc->tx_queue))
-               return dlc->mtu;
+       if (!dev || !dev->dlc)
+               return 0;
+
+       if (!skb_queue_empty(&dev->dlc->tx_queue))
+               return dev->dlc->mtu;
 
        return 0;
 }
@@ -928,11 +934,12 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
 static void rfcomm_tty_flush_buffer(struct tty_struct *tty)
 {
        struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
-       if (!dev)
-               return;
 
        BT_DBG("tty %p dev %p", tty, dev);
 
+       if (!dev || !dev->dlc)
+               return;
+
        skb_queue_purge(&dev->dlc->tx_queue);
 
        if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && tty->ldisc.write_wakeup)
@@ -952,11 +959,12 @@ static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
 static void rfcomm_tty_hangup(struct tty_struct *tty)
 {
        struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
-       if (!dev)
-               return;
 
        BT_DBG("tty %p dev %p", tty, dev);
 
+       if (!dev)
+               return;
+
        rfcomm_tty_flush_buffer(tty);
 
        if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
index bee558a41800e2700a4f00e84e06224f57d368b0..6c84ccb8c9d754d045bc6351812917f6cfbd3cb3 100644 (file)
@@ -610,7 +610,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
        struct ebt_entry_target *t;
        struct ebt_target *target;
        unsigned int i, j, hook = 0, hookmask = 0;
-       size_t gap = e->next_offset - e->target_offset;
+       size_t gap;
        int ret;
 
        /* don't mess with the struct ebt_entries */
@@ -660,6 +660,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
        if (ret != 0)
                goto cleanup_watchers;
        t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
+       gap = e->next_offset - e->target_offset;
        target = find_target_lock(t->u.name, &ret, &ebt_mutex);
        if (!target)
                goto cleanup_watchers;
index 1897a3a385d803808ac2f30a6abad2de3aa36967..04d4b93c68eb0b226cdd7c0a4385402d6be27aa9 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/wait.h>
 #include <linux/etherdevice.h>
+#include <linux/kthread.h>
 #include <net/checksum.h>
 #include <net/ipv6.h>
 #include <net/addrconf.h>
@@ -360,8 +361,7 @@ struct pktgen_thread {
        spinlock_t if_lock;
        struct list_head if_list;       /* All device here */
        struct list_head th_list;
-       int removed;
-       char name[32];
+       struct task_struct *tsk;
        char result[512];
        u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */
 
@@ -1689,7 +1689,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
        BUG_ON(!t);
 
        seq_printf(seq, "Name: %s  max_before_softirq: %d\n",
-                  t->name, t->max_before_softirq);
+                  t->tsk->comm, t->max_before_softirq);
 
        seq_printf(seq, "Running: ");
 
@@ -3112,7 +3112,7 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
 {
        /* Remove from the thread list */
 
-       remove_proc_entry(t->name, pg_proc_dir);
+       remove_proc_entry(t->tsk->comm, pg_proc_dir);
 
        mutex_lock(&pktgen_thread_lock);
 
@@ -3260,58 +3260,40 @@ out:;
  * Main loop of the thread goes here
  */
 
-static void pktgen_thread_worker(struct pktgen_thread *t)
+static int pktgen_thread_worker(void *arg)
 {
        DEFINE_WAIT(wait);
+       struct pktgen_thread *t = arg;
        struct pktgen_dev *pkt_dev = NULL;
        int cpu = t->cpu;
-       sigset_t tmpsig;
        u32 max_before_softirq;
        u32 tx_since_softirq = 0;
 
-       daemonize("pktgen/%d", cpu);
-
-       /* Block all signals except SIGKILL, SIGSTOP and SIGTERM */
-
-       spin_lock_irq(&current->sighand->siglock);
-       tmpsig = current->blocked;
-       siginitsetinv(&current->blocked,
-                     sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGTERM));
-
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       /* Migrate to the right CPU */
-       set_cpus_allowed(current, cpumask_of_cpu(cpu));
-       if (smp_processor_id() != cpu)
-               BUG();
+       BUG_ON(smp_processor_id() != cpu);
 
        init_waitqueue_head(&t->queue);
 
-       t->control &= ~(T_TERMINATE);
-       t->control &= ~(T_RUN);
-       t->control &= ~(T_STOP);
-       t->control &= ~(T_REMDEVALL);
-       t->control &= ~(T_REMDEV);
-
        t->pid = current->pid;
 
        PG_DEBUG(printk("pktgen: starting pktgen/%d:  pid=%d\n", cpu, current->pid));
 
        max_before_softirq = t->max_before_softirq;
 
-       __set_current_state(TASK_INTERRUPTIBLE);
-       mb();
+       set_current_state(TASK_INTERRUPTIBLE);
 
-       while (1) {
-
-               __set_current_state(TASK_RUNNING);
+       while (!kthread_should_stop()) {
+               pkt_dev = next_to_run(t);
 
-               /*
-                * Get next dev to xmit -- if any.
-                */
+               if (!pkt_dev &&
+                   (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV))
+                   == 0) {
+                       prepare_to_wait(&(t->queue), &wait,
+                                       TASK_INTERRUPTIBLE);
+                       schedule_timeout(HZ / 10);
+                       finish_wait(&(t->queue), &wait);
+               }
 
-               pkt_dev = next_to_run(t);
+               __set_current_state(TASK_RUNNING);
 
                if (pkt_dev) {
 
@@ -3329,21 +3311,8 @@ static void pktgen_thread_worker(struct pktgen_thread *t)
                                        do_softirq();
                                tx_since_softirq = 0;
                        }
-               } else {
-                       prepare_to_wait(&(t->queue), &wait, TASK_INTERRUPTIBLE);
-                       schedule_timeout(HZ / 10);
-                       finish_wait(&(t->queue), &wait);
                }
 
-               /*
-                * Back from sleep, either due to the timeout or signal.
-                * We check if we have any "posted" work for us.
-                */
-
-               if (t->control & T_TERMINATE || signal_pending(current))
-                       /* we received a request to terminate ourself */
-                       break;
-
                if (t->control & T_STOP) {
                        pktgen_stop(t);
                        t->control &= ~(T_STOP);
@@ -3364,20 +3333,19 @@ static void pktgen_thread_worker(struct pktgen_thread *t)
                        t->control &= ~(T_REMDEV);
                }
 
-               if (need_resched())
-                       schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
 
-       PG_DEBUG(printk("pktgen: %s stopping all device\n", t->name));
+       PG_DEBUG(printk("pktgen: %s stopping all device\n", t->tsk->comm));
        pktgen_stop(t);
 
-       PG_DEBUG(printk("pktgen: %s removing all device\n", t->name));
+       PG_DEBUG(printk("pktgen: %s removing all device\n", t->tsk->comm));
        pktgen_rem_all_ifs(t);
 
-       PG_DEBUG(printk("pktgen: %s removing thread.\n", t->name));
+       PG_DEBUG(printk("pktgen: %s removing thread.\n", t->tsk->comm));
        pktgen_rem_thread(t);
 
-       t->removed = 1;
+       return 0;
 }
 
 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
@@ -3495,37 +3463,11 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
        return add_dev_to_thread(t, pkt_dev);
 }
 
-static struct pktgen_thread *__init pktgen_find_thread(const char *name)
+static int __init pktgen_create_thread(int cpu)
 {
        struct pktgen_thread *t;
-
-       mutex_lock(&pktgen_thread_lock);
-
-       list_for_each_entry(t, &pktgen_threads, th_list)
-               if (strcmp(t->name, name) == 0) {
-                       mutex_unlock(&pktgen_thread_lock);
-                       return t;
-               }
-
-       mutex_unlock(&pktgen_thread_lock);
-       return NULL;
-}
-
-static int __init pktgen_create_thread(const char *name, int cpu)
-{
-       int err;
-       struct pktgen_thread *t = NULL;
        struct proc_dir_entry *pe;
-
-       if (strlen(name) > 31) {
-               printk("pktgen: ERROR:  Thread name cannot be more than 31 characters.\n");
-               return -EINVAL;
-       }
-
-       if (pktgen_find_thread(name)) {
-               printk("pktgen: ERROR: thread: %s already exists\n", name);
-               return -EINVAL;
-       }
+       struct task_struct *p;
 
        t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL);
        if (!t) {
@@ -3533,14 +3475,29 @@ static int __init pktgen_create_thread(const char *name, int cpu)
                return -ENOMEM;
        }
 
-       strcpy(t->name, name);
        spin_lock_init(&t->if_lock);
        t->cpu = cpu;
 
-       pe = create_proc_entry(t->name, 0600, pg_proc_dir);
+       INIT_LIST_HEAD(&t->if_list);
+
+       list_add_tail(&t->th_list, &pktgen_threads);
+
+       p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu);
+       if (IS_ERR(p)) {
+               printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu);
+               list_del(&t->th_list);
+               kfree(t);
+               return PTR_ERR(p);
+       }
+       kthread_bind(p, cpu);
+       t->tsk = p;
+
+       pe = create_proc_entry(t->tsk->comm, 0600, pg_proc_dir);
        if (!pe) {
                printk("pktgen: cannot create %s/%s procfs entry.\n",
-                      PG_PROC_DIR, t->name);
+                      PG_PROC_DIR, t->tsk->comm);
+               kthread_stop(p);
+               list_del(&t->th_list);
                kfree(t);
                return -EINVAL;
        }
@@ -3548,21 +3505,7 @@ static int __init pktgen_create_thread(const char *name, int cpu)
        pe->proc_fops = &pktgen_thread_fops;
        pe->data = t;
 
-       INIT_LIST_HEAD(&t->if_list);
-
-       list_add_tail(&t->th_list, &pktgen_threads);
-
-       t->removed = 0;
-
-       err = kernel_thread((void *)pktgen_thread_worker, (void *)t,
-                         CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
-       if (err < 0) {
-               printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu);
-               remove_proc_entry(t->name, pg_proc_dir);
-               list_del(&t->th_list);
-               kfree(t);
-               return err;
-       }
+       wake_up_process(p);
 
        return 0;
 }
@@ -3643,10 +3586,8 @@ static int __init pg_init(void)
 
        for_each_online_cpu(cpu) {
                int err;
-               char buf[30];
 
-               sprintf(buf, "kpktgend_%i", cpu);
-               err = pktgen_create_thread(buf, cpu);
+               err = pktgen_create_thread(cpu);
                if (err)
                        printk("pktgen: WARNING: Cannot create thread for cpu %d (%d)\n",
                                        cpu, err);
@@ -3674,9 +3615,8 @@ static void __exit pg_cleanup(void)
 
        list_for_each_safe(q, n, &pktgen_threads) {
                t = list_entry(q, struct pktgen_thread, th_list);
-               t->control |= (T_TERMINATE);
-
-               wait_event_interruptible_timeout(queue, (t->removed == 1), HZ);
+               kthread_stop(t->tsk);
+               kfree(t);
        }
 
        /* Un-register us from receiving netdevice events */
index 1144900d37f6dbd6b91b19e307770db265d9c18c..d60fd7321e6363f98a1c09bbfb2a3cc7d9853415 100644 (file)
@@ -305,7 +305,7 @@ lookup_protocol:
                sk->sk_reuse = 1;
 
        inet = inet_sk(sk);
-       inet->is_icsk = INET_PROTOSW_ICSK & answer_flags;
+       inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) == INET_PROTOSW_ICSK;
 
        if (SOCK_RAW == sock->type) {
                inet->num = protocol;
index 84bed40273adc1413bb6480257f4a1adca712e28..25c8a42965dff1d3eeea77f8ac8dad2809c33e28 100644 (file)
@@ -165,9 +165,8 @@ struct in_device *inetdev_init(struct net_device *dev)
                              NET_IPV4_NEIGH, "ipv4", NULL, NULL);
 #endif
 
-       /* Account for reference dev->ip_ptr */
+       /* Account for reference dev->ip_ptr (below) */
        in_dev_hold(in_dev);
-       rcu_assign_pointer(dev->ip_ptr, in_dev);
 
 #ifdef CONFIG_SYSCTL
        devinet_sysctl_register(in_dev, &in_dev->cnf);
@@ -176,6 +175,8 @@ struct in_device *inetdev_init(struct net_device *dev)
        if (dev->flags & IFF_UP)
                ip_mc_up(in_dev);
 out:
+       /* we can receive as soon as ip_ptr is set -- do this last */
+       rcu_assign_pointer(dev->ip_ptr, in_dev);
        return in_dev;
 out_kfree:
        kfree(in_dev);
index a68966059b505ef76e9dd19a7b220752382d18e2..c47ce7076bd55fd66b4688d2d081daa9c2a3aca2 100644 (file)
@@ -15,16 +15,19 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type)
        struct flowi fl = {};
        struct dst_entry *odst;
        unsigned int hh_len;
+       unsigned int type;
 
+       type = inet_addr_type(iph->saddr);
        if (addr_type == RTN_UNSPEC)
-               addr_type = inet_addr_type(iph->saddr);
+               addr_type = type;
 
        /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
         * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
         */
        if (addr_type == RTN_LOCAL) {
                fl.nl_u.ip4_u.daddr = iph->daddr;
-               fl.nl_u.ip4_u.saddr = iph->saddr;
+               if (type == RTN_LOCAL)
+                       fl.nl_u.ip4_u.saddr = iph->saddr;
                fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
                fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
                fl.mark = (*pskb)->mark;
index f6026d4ac428d1bab8f1f59080e6d55c315275a7..47bd3ad18b71eda5ca14eff95d9a67fd609eb59f 100644 (file)
@@ -6,8 +6,8 @@ menu "IP: Netfilter Configuration"
        depends on INET && NETFILTER
 
 config NF_CONNTRACK_IPV4
-       tristate "IPv4 connection tracking support (required for NAT) (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && NF_CONNTRACK
+       tristate "IPv4 connection tracking support (required for NAT)"
+       depends on NF_CONNTRACK
        ---help---
          Connection tracking keeps a record of what packets have passed
          through your machine, in order to figure out how they are related
index 09696f16aa95269a367d2f97a16375bf1eefadfd..fc1f153c86ba1f33a261a038fa2da3e87b38531f 100644 (file)
@@ -919,13 +919,13 @@ copy_entries_to_user(unsigned int total_size,
 #ifdef CONFIG_COMPAT
 struct compat_delta {
        struct compat_delta *next;
-       u_int16_t offset;
+       unsigned int offset;
        short delta;
 };
 
 static struct compat_delta *compat_offsets = NULL;
 
-static int compat_add_offset(u_int16_t offset, short delta)
+static int compat_add_offset(unsigned int offset, short delta)
 {
        struct compat_delta *tmp;
 
@@ -957,7 +957,7 @@ static void compat_flush_offsets(void)
        }
 }
 
-static short compat_calc_jump(u_int16_t offset)
+static short compat_calc_jump(unsigned int offset)
 {
        struct compat_delta *tmp;
        short delta;
@@ -997,7 +997,7 @@ static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
                void *base, struct xt_table_info *newinfo)
 {
        struct ipt_entry_target *t;
-       u_int16_t entry_offset;
+       unsigned int entry_offset;
        int off, i, ret;
 
        off = 0;
@@ -1467,7 +1467,7 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e,
 {
        struct ipt_entry_target *t;
        struct ipt_target *target;
-       u_int16_t entry_offset;
+       unsigned int entry_offset;
        int ret, off, h, j;
 
        duprintf("check_compat_entry_size_and_hooks %p\n", e);
index 28b9233956b57c1f6ab30931e3752f8be05f47d1..d669685afd04dac843529c8171bd4994af8b8813 100644 (file)
@@ -127,10 +127,13 @@ masquerade_target(struct sk_buff **pskb,
 static inline int
 device_cmp(struct ip_conntrack *i, void *ifindex)
 {
+       int ret;
 #ifdef CONFIG_NF_NAT_NEEDED
        struct nf_conn_nat *nat = nfct_nat(i);
+
+       if (!nat)
+               return 0;
 #endif
-       int ret;
 
        read_lock_bh(&masq_lock);
 #ifdef CONFIG_NF_NAT_NEEDED
index bf7a22412bcb9b25c928773322aa96b2754137c3..12de90a5047cf214cd4bd1851331ac14851c0783 100644 (file)
@@ -648,7 +648,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
                                   TCPOLEN_TIMESTAMP);
                rep.opt[1] = htonl(tcp_time_stamp);
                rep.opt[2] = htonl(ts);
-               arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED;
+               arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
        }
 
        /* Swap the send and the receive. */
index 9b0a906431510d8acda8501bc0f40e42b8869a43..171e5b55d7d65aa973183b5b8c5d9ed6d311c6d6 100644 (file)
@@ -413,8 +413,6 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
        if (netif_carrier_ok(dev))
                ndev->if_flags |= IF_READY;
 
-       /* protected by rtnl_lock */
-       rcu_assign_pointer(dev->ip6_ptr, ndev);
 
        ipv6_mc_init_dev(ndev);
        ndev->tstamp = jiffies;
@@ -425,6 +423,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
                              NULL);
        addrconf_sysctl_register(ndev, &ndev->cnf);
 #endif
+       /* protected by rtnl_lock */
+       rcu_assign_pointer(dev->ip6_ptr, ndev);
        return ndev;
 }
 
index e5cd83b2205d41e9a2689224b12d4d8dbfaec2e9..832a5e6e2d7e6270808bbfc70a26eddba49250a6 100644 (file)
@@ -171,7 +171,7 @@ lookup_protocol:
                sk->sk_reuse = 1;
 
        inet = inet_sk(sk);
-       inet->is_icsk = INET_PROTOSW_ICSK & answer_flags;
+       inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) == INET_PROTOSW_ICSK;
 
        if (SOCK_RAW == sock->type) {
                inet->num = protocol;
index 1b853c34d301b628c2eb22600ca747dad68a4ca3..cd10e44db015d59c7e96bdc0207a1b3a5ec5631f 100644 (file)
@@ -44,8 +44,7 @@ choice
        depends on NF_CONNTRACK_ENABLED
 
 config NF_CONNTRACK_SUPPORT
-       bool "Layer 3 Independent Connection tracking (EXPERIMENTAL)"
-       depends on EXPERIMENTAL
+       bool "Layer 3 Independent Connection tracking"
        help
          Layer 3 independent connection tracking is experimental scheme
          which generalize ip_conntrack to support other layer 3 protocols.
@@ -122,7 +121,7 @@ config NF_CONNTRACK_EVENTS
 
 config NF_CT_PROTO_GRE
        tristate
-       depends on EXPERIMENTAL && NF_CONNTRACK
+       depends on NF_CONNTRACK
 
 config NF_CT_PROTO_SCTP
        tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)'
@@ -136,8 +135,8 @@ config NF_CT_PROTO_SCTP
          Documentation/modules.txt.  If unsure, say `N'.
 
 config NF_CONNTRACK_AMANDA
-       tristate "Amanda backup protocol support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && NF_CONNTRACK
+       tristate "Amanda backup protocol support"
+       depends on NF_CONNTRACK
        select TEXTSEARCH
        select TEXTSEARCH_KMP
        help
@@ -151,8 +150,8 @@ config NF_CONNTRACK_AMANDA
          To compile it as a module, choose M here.  If unsure, say N.
 
 config NF_CONNTRACK_FTP
-       tristate "FTP protocol support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && NF_CONNTRACK
+       tristate "FTP protocol support"
+       depends on NF_CONNTRACK
        help
          Tracking FTP connections is problematic: special helpers are
          required for tracking them, and doing masquerading and other forms
@@ -184,8 +183,8 @@ config NF_CONNTRACK_H323
          To compile it as a module, choose M here.  If unsure, say N.
 
 config NF_CONNTRACK_IRC
-       tristate "IRC protocol support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && NF_CONNTRACK
+       tristate "IRC protocol support"
+       depends on NF_CONNTRACK
        help
          There is a commonly-used extension to IRC called
          Direct Client-to-Client Protocol (DCC).  This enables users to send
@@ -218,8 +217,8 @@ config NF_CONNTRACK_NETBIOS_NS
          To compile it as a module, choose M here.  If unsure, say N.
 
 config NF_CONNTRACK_PPTP
-       tristate "PPtP protocol support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && NF_CONNTRACK
+       tristate "PPtP protocol support"
+       depends on NF_CONNTRACK
        select NF_CT_PROTO_GRE
        help
          This module adds support for PPTP (Point to Point Tunnelling
@@ -249,8 +248,8 @@ config NF_CONNTRACK_SIP
          To compile it as a module, choose M here.  If unsure, say N.
 
 config NF_CONNTRACK_TFTP
-       tristate "TFTP protocol support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && NF_CONNTRACK
+       tristate "TFTP protocol support"
+       depends on NF_CONNTRACK
        help
          TFTP connection tracking helper, this is required depending
          on how restrictive your ruleset is.
index a5b234e444dc789c2ae5fe679408a329512bb847..2a48efdf0d6764bc85bf3b2bb0e6cadc4171ccf6 100644 (file)
@@ -89,6 +89,7 @@ static int help(struct sk_buff **pskb, unsigned int protoff,
 
        exp->expectfn             = NULL;
        exp->flags                = NF_CT_EXPECT_PERMANENT;
+       exp->helper               = NULL;
 
        nf_conntrack_expect_related(exp);
        nf_conntrack_expect_put(exp);
index a5a6e192ac2d07ae5fe188b81777b75e645f01cc..f28bf69d3d4270d835ffa279182377e90934bd95 100644 (file)
@@ -745,7 +745,7 @@ static int __init xt_hashlimit_init(void)
        }
        hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", proc_net);
        if (!hashlimit_procdir6) {
-               printk(KERN_ERR "xt_hashlimit: tnable to create proc dir "
+               printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
                                "entry\n");
                goto err4;
        }
index 4afc75f9e377b7bb167749b8a51ec50589182cf9..73e0ff469bff284c765bc80ebcc723af3e81f38e 100644 (file)
@@ -130,12 +130,12 @@ static int netlbl_cipsov4_add_common(struct genl_info *info,
 
        nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem)
                if (nla->nla_type == NLBL_CIPSOV4_A_TAG) {
-                       if (iter > CIPSO_V4_TAG_MAXCNT)
+                       if (iter >= CIPSO_V4_TAG_MAXCNT)
                                return -EINVAL;
                        doi_def->tags[iter++] = nla_get_u8(nla);
                }
-       if (iter < CIPSO_V4_TAG_MAXCNT)
-               doi_def->tags[iter] = CIPSO_V4_TAG_INVALID;
+       while (iter < CIPSO_V4_TAG_MAXCNT)
+               doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID;
 
        return 0;
 }
index 276131fe56dddb140afcfaf15e46bd6ead0263fa..383dd4e82ee150fc33a3a8b726c0d5e02ff7f4b8 100644 (file)
@@ -472,8 +472,7 @@ static int netlink_release(struct socket *sock)
                                NETLINK_URELEASE, &n);
        }       
 
-       if (nlk->module)
-               module_put(nlk->module);
+       module_put(nlk->module);
 
        netlink_table_grab();
        if (nlk->flags & NETLINK_KERNEL_SOCKET) {
index 52a2726d327fe68f6c7ea003894902b204f25e99..b5c80b18990224439e512fb819a96e75fb909b97 100644 (file)
@@ -484,8 +484,6 @@ out:
        return sk;
 }
 
-void x25_init_timers(struct sock *sk);
-
 static int x25_create(struct socket *sock, int protocol)
 {
        struct sock *sk;
index 9f42b9c9de37b67da9e663c4f9b393a37f1a1d3e..27f5cc7966f691414b26bdce5c228e6e940f512b 100644 (file)
@@ -254,7 +254,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
         *      They want reverse charging, we won't accept it.
         */
        if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) {
-               SOCK_DEBUG(sk, "X.25: rejecting reverse charging request");
+               SOCK_DEBUG(sk, "X.25: rejecting reverse charging request\n");
                return -1;
        }
 
@@ -262,29 +262,29 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
 
        if (theirs.throughput) {
                if (theirs.throughput < ours->throughput) {
-                       SOCK_DEBUG(sk, "X.25: throughput negotiated down");
+                       SOCK_DEBUG(sk, "X.25: throughput negotiated down\n");
                        new->throughput = theirs.throughput;
                }
        }
 
        if (theirs.pacsize_in && theirs.pacsize_out) {
                if (theirs.pacsize_in < ours->pacsize_in) {
-                       SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down");
+                       SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down\n");
                        new->pacsize_in = theirs.pacsize_in;
                }
                if (theirs.pacsize_out < ours->pacsize_out) {
-                       SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down");
+                       SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down\n");
                        new->pacsize_out = theirs.pacsize_out;
                }
        }
 
        if (theirs.winsize_in && theirs.winsize_out) {
                if (theirs.winsize_in < ours->winsize_in) {
-                       SOCK_DEBUG(sk, "X.25: window size inwards negotiated down");
+                       SOCK_DEBUG(sk, "X.25: window size inwards negotiated down\n");
                        new->winsize_in = theirs.winsize_in;
                }
                if (theirs.winsize_out < ours->winsize_out) {
-                       SOCK_DEBUG(sk, "X.25: window size outwards negotiated down");
+                       SOCK_DEBUG(sk, "X.25: window size outwards negotiated down\n");
                        new->winsize_out = theirs.winsize_out;
                }
        }
index e5372b11fc8f105bd4ecf6071c0418f71855760c..82f36d396fca5fa56f0bb44f8dbb4d7ad2db7bf0 100644 (file)
@@ -434,18 +434,19 @@ error_no_put:
        return NULL;
 }
 
-static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
        struct xfrm_state *x;
        int err;
        struct km_event c;
 
-       err = verify_newsa_info(p, (struct rtattr **)xfrma);
+       err = verify_newsa_info(p, xfrma);
        if (err)
                return err;
 
-       x = xfrm_state_construct(p, (struct rtattr **)xfrma, &err);
+       x = xfrm_state_construct(p, xfrma, &err);
        if (!x)
                return err;
 
@@ -507,14 +508,15 @@ static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p,
        return x;
 }
 
-static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_state *x;
        int err = -ESRCH;
        struct km_event c;
        struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
 
-       x = xfrm_user_state_lookup(p, (struct rtattr **)xfrma, &err);
+       x = xfrm_user_state_lookup(p, xfrma, &err);
        if (x == NULL)
                return err;
 
@@ -672,14 +674,15 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
        return skb;
 }
 
-static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
        struct xfrm_state *x;
        struct sk_buff *resp_skb;
        int err = -ESRCH;
 
-       x = xfrm_user_state_lookup(p, (struct rtattr **)xfrma, &err);
+       x = xfrm_user_state_lookup(p, xfrma, &err);
        if (x == NULL)
                goto out_noput;
 
@@ -718,7 +721,8 @@ static int verify_userspi_info(struct xfrm_userspi_info *p)
        return 0;
 }
 
-static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_state *x;
        struct xfrm_userspi_info *p;
@@ -1013,7 +1017,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p,
        return NULL;
 }
 
-static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
        struct xfrm_policy *xp;
@@ -1024,11 +1029,11 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
        err = verify_newpolicy_info(p);
        if (err)
                return err;
-       err = verify_sec_ctx_len((struct rtattr **)xfrma);
+       err = verify_sec_ctx_len(xfrma);
        if (err)
                return err;
 
-       xp = xfrm_policy_construct(p, (struct rtattr **)xfrma, &err);
+       xp = xfrm_policy_construct(p, xfrma, &err);
        if (!xp)
                return err;
 
@@ -1227,7 +1232,8 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
        return skb;
 }
 
-static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_policy *xp;
        struct xfrm_userpolicy_id *p;
@@ -1239,7 +1245,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
        p = NLMSG_DATA(nlh);
        delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
 
-       err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
+       err = copy_from_user_policy_type(&type, xfrma);
        if (err)
                return err;
 
@@ -1250,11 +1256,10 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
        if (p->index)
                xp = xfrm_policy_byid(type, p->dir, p->index, delete);
        else {
-               struct rtattr **rtattrs = (struct rtattr **)xfrma;
-               struct rtattr *rt = rtattrs[XFRMA_SEC_CTX-1];
+               struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
                struct xfrm_policy tmp;
 
-               err = verify_sec_ctx_len(rtattrs);
+               err = verify_sec_ctx_len(xfrma);
                if (err)
                        return err;
 
@@ -1302,7 +1307,8 @@ out:
        return err;
 }
 
-static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct km_event c;
        struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
@@ -1367,7 +1373,8 @@ nlmsg_failure:
        return -1;
 }
 
-static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_state *x;
        struct sk_buff *r_skb;
@@ -1415,7 +1422,8 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
        return err;
 }
 
-static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_state *x;
        struct km_event c;
@@ -1439,7 +1447,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
                goto out;
 
        spin_lock_bh(&x->lock);
-       err = xfrm_update_ae_params(x,(struct rtattr **)xfrma);
+       err = xfrm_update_ae_params(x, xfrma);
        spin_unlock_bh(&x->lock);
        if (err < 0)
                goto out;
@@ -1455,14 +1463,15 @@ out:
        return err;
 }
 
-static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct km_event c;
        u8 type = XFRM_POLICY_TYPE_MAIN;
        int err;
        struct xfrm_audit audit_info;
 
-       err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
+       err = copy_from_user_policy_type(&type, xfrma);
        if (err)
                return err;
 
@@ -1477,7 +1486,8 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **x
        return 0;
 }
 
-static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_policy *xp;
        struct xfrm_user_polexpire *up = NLMSG_DATA(nlh);
@@ -1485,18 +1495,17 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void *
        u8 type = XFRM_POLICY_TYPE_MAIN;
        int err = -ENOENT;
 
-       err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
+       err = copy_from_user_policy_type(&type, xfrma);
        if (err)
                return err;
 
        if (p->index)
                xp = xfrm_policy_byid(type, p->dir, p->index, 0);
        else {
-               struct rtattr **rtattrs = (struct rtattr **)xfrma;
-               struct rtattr *rt = rtattrs[XFRMA_SEC_CTX-1];
+               struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1];
                struct xfrm_policy tmp;
 
-               err = verify_sec_ctx_len(rtattrs);
+               err = verify_sec_ctx_len(xfrma);
                if (err)
                        return err;
 
@@ -1537,7 +1546,8 @@ out:
        return err;
 }
 
-static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_state *x;
        int err;
@@ -1568,7 +1578,8 @@ out:
        return err;
 }
 
-static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
+static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
+               struct rtattr **xfrma)
 {
        struct xfrm_policy *xp;
        struct xfrm_user_tmpl *ut;
@@ -1647,7 +1658,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
 #undef XMSGSIZE
 
 static struct xfrm_link {
-       int (*doit)(struct sk_buff *, struct nlmsghdr *, void **);
+       int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **);
        int (*dump)(struct sk_buff *, struct netlink_callback *);
 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
        [XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = { .doit = xfrm_add_sa        },
@@ -1735,7 +1746,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err
 
        if (link->doit == NULL)
                goto err_einval;
-       *errp = link->doit(skb, nlh, (void **) &xfrma);
+       *errp = link->doit(skb, nlh, xfrma);
 
        return *errp;
 
index 0b2fcc417f590a0eed79b5709f0e48d50c8dda11..a8ffc329666a18abd5fa5095f3e2ae940de229c1 100644 (file)
@@ -925,6 +925,8 @@ ConfigInfoView::ConfigInfoView(QWidget* parent, const char *name)
                configSettings->endGroup();
                connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
        }
+
+       has_dbg_info = 0;
 }
 
 void ConfigInfoView::saveSettings(void)
@@ -953,10 +955,13 @@ void ConfigInfoView::setInfo(struct menu *m)
        if (menu == m)
                return;
        menu = m;
-       if (!menu)
+       if (!menu) {
+               has_dbg_info = 0;
                clear();
-       else
+       } else {
+               has_dbg_info = 1;
                menuInfo();
+       }
 }
 
 void ConfigInfoView::setSource(const QString& name)
@@ -991,6 +996,9 @@ void ConfigInfoView::symbolInfo(void)
 {
        QString str;
 
+       if (!has_dbg_info)
+               return;
+
        str += "<big>Symbol: <b>";
        str += print_filter(sym->name);
        str += "</b></big><br><br>value: ";
index 6fc1c5f14425162aab07eb5e35f8d24f0c8ccadc..a397edb5adcf0fafda192169bd0f75afa1b0172c 100644 (file)
@@ -273,6 +273,8 @@ protected:
        struct symbol *sym;
        struct menu *menu;
        bool _showDebug;
+
+       int has_dbg_info;
 };
 
 class ConfigSearchWindow : public QDialog {
index 0562bacb7b996f56432a63d3f6d03ebbca43bd50..2eee0dab524d831585624f722f40e8f3e86b61a3 100644 (file)
@@ -55,6 +55,29 @@ out:
        return rc;
 }
 
+/*
+ * Sets both levels in the MLS range of 'dst' to the low level of 'src'.
+ */
+static inline int mls_context_cpy_low(struct context *dst, struct context *src)
+{
+       int rc;
+
+       if (!selinux_mls_enabled)
+               return 0;
+
+       dst->range.level[0].sens = src->range.level[0].sens;
+       rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
+       if (rc)
+               goto out;
+
+       dst->range.level[1].sens = src->range.level[0].sens;
+       rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[0].cat);
+       if (rc)
+               ebitmap_destroy(&dst->range.level[0].cat);
+out:
+       return rc;
+}
+
 static inline int mls_context_cmp(struct context *c1, struct context *c2)
 {
        if (!selinux_mls_enabled)
index b4f682dc13ff36b14736166e84b8471c436a875d..4a8bab2f3c712445f020fbe3a42a8aa4f172442c 100644 (file)
@@ -270,7 +270,7 @@ int mls_context_to_sid(char oldc,
                if (!defcon)
                        goto out;
 
-               rc = mls_copy_context(context, defcon);
+               rc = mls_context_cpy(context, defcon);
                goto out;
        }
 
@@ -400,26 +400,6 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
        return rc;
 }
 
-/*
- * Copies the effective MLS range from `src' into `dst'.
- */
-static inline int mls_scopy_context(struct context *dst,
-                                    struct context *src)
-{
-       int l, rc = 0;
-
-       /* Copy the MLS range from the source context */
-       for (l = 0; l < 2; l++) {
-               dst->range.level[l].sens = src->range.level[0].sens;
-               rc = ebitmap_cpy(&dst->range.level[l].cat,
-                                &src->range.level[0].cat);
-               if (rc)
-                       break;
-       }
-
-       return rc;
-}
-
 /*
  * Copies the MLS range `range' into `context'.
  */
@@ -552,19 +532,19 @@ int mls_compute_sid(struct context *scontext,
        case AVTAB_CHANGE:
                if (tclass == SECCLASS_PROCESS)
                        /* Use the process MLS attributes. */
-                       return mls_copy_context(newcontext, scontext);
+                       return mls_context_cpy(newcontext, scontext);
                else
                        /* Use the process effective MLS attributes. */
-                       return mls_scopy_context(newcontext, scontext);
+                       return mls_context_cpy_low(newcontext, scontext);
        case AVTAB_MEMBER:
                /* Only polyinstantiate the MLS attributes if
                   the type is being polyinstantiated */
                if (newcontext->type != tcontext->type) {
                        /* Use the process effective MLS attributes. */
-                       return mls_scopy_context(newcontext, scontext);
+                       return mls_context_cpy_low(newcontext, scontext);
                } else {
                        /* Use the related object MLS attributes. */
-                       return mls_copy_context(newcontext, tcontext);
+                       return mls_context_cpy(newcontext, tcontext);
                }
        default:
                return -EINVAL;
index 661d6fc76966bdf18a536b933238e4a659c09c90..096d1b4ef7fbb7cd7759cd9148840df3e0e2af5c 100644 (file)
 #include "context.h"
 #include "policydb.h"
 
-/*
- * Copies the MLS range from `src' into `dst'.
- */
-static inline int mls_copy_context(struct context *dst,
-                                  struct context *src)
-{
-       int l, rc = 0;
-
-       /* Copy the MLS range from the source context */
-       for (l = 0; l < 2; l++) {
-               dst->range.level[l].sens = src->range.level[l].sens;
-               rc = ebitmap_cpy(&dst->range.level[l].cat,
-                                &src->range.level[l].cat);
-               if (rc)
-                       break;
-       }
-
-       return rc;
-}
-
 int mls_compute_context_len(struct context *context);
 void mls_sid_to_context(struct context *context, char **scontext);
 int mls_context_isvalid(struct policydb *p, struct context *c);
index ee0581557966498d91435600b2e208a9a411b9d0..3eb1fa9f0de18bbd77919c9954b206e41561d21b 100644 (file)
@@ -1916,11 +1916,10 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
        newcon.user = context1->user;
        newcon.role = context1->role;
        newcon.type = context1->type;
-       rc = mls_copy_context(&newcon, context2);
+       rc = mls_context_cpy(&newcon, context2);
        if (rc)
                goto out_unlock;
 
-
        /* Check the validity of the new context. */
        if (!policydb_context_isvalid(&policydb, &newcon)) {
                rc = convert_context_handle_invalid_context(&newcon);
@@ -2492,9 +2491,9 @@ static int selinux_netlbl_socket_setsid(struct socket *sock, u32 sid)
 
        rc = netlbl_socket_setattr(sock, &secattr);
        if (rc == 0) {
-               spin_lock(&sksec->nlbl_lock);
+               spin_lock_bh(&sksec->nlbl_lock);
                sksec->nlbl_state = NLBL_LABELED;
-               spin_unlock(&sksec->nlbl_lock);
+               spin_unlock_bh(&sksec->nlbl_lock);
        }
 
 netlbl_socket_setsid_return:
index 71c58df4af285569a55945757d4568849b5457d8..70face7e104862b38cf7545136e4a26141b32d23 100644 (file)
@@ -2198,7 +2198,8 @@ static int _snd_cmipci_uswitch_put(struct snd_kcontrol *kcontrol,
                val = inb(cm->iobase + args->reg);
        else
                val = snd_cmipci_read(cm, args->reg);
-       change = (val & args->mask) != (ucontrol->value.integer.value[0] ? args->mask : 0);
+       change = (val & args->mask) != (ucontrol->value.integer.value[0] ? 
+                       args->mask_on : (args->mask & ~args->mask_on));
        if (change) {
                val &= ~args->mask;
                if (ucontrol->value.integer.value[0])
index e31f0f11e3a875c095a07426bf3b1521722d72e0..91f5bff66d3f923fa870184bfc3705be62b1f7a9 100644 (file)
@@ -213,7 +213,7 @@ static void snd_echo_midi_output_write(unsigned long data)
        sent = bytes = 0;
        spin_lock_irqsave(&chip->lock, flags);
        chip->midi_full = 0;
-       if (chip->midi_out && !snd_rawmidi_transmit_empty(chip->midi_out)) {
+       if (!snd_rawmidi_transmit_empty(chip->midi_out)) {
                bytes = snd_rawmidi_transmit_peek(chip->midi_out, buf,
                                                  MIDI_OUT_BUFFER_SIZE - 1);
                DE_MID(("Try to send %d bytes...\n", bytes));
@@ -264,9 +264,11 @@ static void snd_echo_midi_output_trigger(struct snd_rawmidi_substream *substream
                }
        } else {
                if (chip->tinuse) {
-                       del_timer(&chip->timer);
                        chip->tinuse = 0;
+                       spin_unlock_irq(&chip->lock);
+                       del_timer_sync(&chip->timer);
                        DE_MID(("Timer removed\n"));
+                       return;
                }
        }
        spin_unlock_irq(&chip->lock);
index 97e9af130b7109fcee9ac0933d360d82a667ea0f..1589d2f2917f23aa3cc31652501e7281a15d5dde 100644 (file)
@@ -485,8 +485,9 @@ static const char *get_input_type(struct hda_gnode *node, unsigned int *pinctl)
                        return "Front Aux";
                return "Aux";
        case AC_JACK_MIC_IN:
-               if (node->pin_caps &
-                   (AC_PINCAP_VREF_80 << AC_PINCAP_VREF_SHIFT))
+               if (pinctl &&
+                   (node->pin_caps &
+                    (AC_PINCAP_VREF_80 << AC_PINCAP_VREF_SHIFT)))
                        *pinctl |= AC_PINCTL_VREF_80;
                if ((location & 0x0f) == AC_JACK_LOC_FRONT)
                        return "Front Mic";
index 9fd34f85cad507a0847d1d582c1a6c157f884bb7..1a7e82104bb91f34a93bcfed92247a1b5ecd5bee 100644 (file)
@@ -83,6 +83,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
                         "{Intel, ICH7},"
                         "{Intel, ESB2},"
                         "{Intel, ICH8},"
+                        "{Intel, ICH9},"
                         "{ATI, SB450},"
                         "{ATI, SB600},"
                         "{ATI, RS600},"
@@ -1711,6 +1712,8 @@ static struct pci_device_id azx_ids[] = {
        { 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH7 */
        { 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */
        { 0x8086, 0x284b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH8 */
+       { 0x8086, 0x293e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH9 */
+       { 0x8086, 0x293f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH9 */
        { 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */
        { 0x1002, 0x4383, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB600 */
        { 0x1002, 0x793b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATIHDMI }, /* ATI RS600 HDMI */
@@ -1718,9 +1721,14 @@ static struct pci_device_id azx_ids[] = {
        { 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */
        { 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */
        { 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ULI }, /* ULI M5461 */
-       { 0x10de, 0x026c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA 026c */
-       { 0x10de, 0x0371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA 0371 */
-       { 0x10de, 0x03f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA 03f0 */
+       { 0x10de, 0x026c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP51 */
+       { 0x10de, 0x0371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP55 */
+       { 0x10de, 0x03e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP61 */
+       { 0x10de, 0x03f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP61 */
+       { 0x10de, 0x044a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP65 */
+       { 0x10de, 0x044b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP65 */
+       { 0x10de, 0x055c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP67 */
+       { 0x10de, 0x055d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_NVIDIA }, /* NVIDIA MCP67 */
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, azx_ids);
index edeb3d3c4c7eb66364b2a50a67de1b5dc149444b..f5956d557f70089f6f9c544df07aacc684876c86 100644 (file)
@@ -1268,7 +1268,7 @@ static struct snd_pcm_hardware snd_cs4231_playback =
        .channels_min           = 1,
        .channels_max           = 2,
        .buffer_bytes_max       = (32*1024),
-       .period_bytes_min       = 4096,
+       .period_bytes_min       = 64,
        .period_bytes_max       = (32*1024),
        .periods_min            = 1,
        .periods_max            = 1024,
@@ -1288,7 +1288,7 @@ static struct snd_pcm_hardware snd_cs4231_capture =
        .channels_min           = 1,
        .channels_max           = 2,
        .buffer_bytes_max       = (32*1024),
-       .period_bytes_min       = 4096,
+       .period_bytes_min       = 64,
        .period_bytes_max       = (32*1024),
        .periods_min            = 1,
        .periods_max            = 1024,
@@ -1796,7 +1796,7 @@ static irqreturn_t snd_cs4231_sbus_interrupt(int irq, void *dev_id)
        snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0);
        spin_unlock_irqrestore(&chip->lock, flags);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 /*
@@ -1821,7 +1821,6 @@ static int sbus_dma_request(struct cs4231_dma_control *dma_cont, dma_addr_t bus_
        if (!(csr & test))
                goto out;
        err = -EBUSY;
-       csr = sbus_readl(base->regs + APCCSR);
        test = APC_XINT_CNVA;
        if ( base->dir == APC_PLAY )
                test = APC_XINT_PNVA;
@@ -1862,17 +1861,16 @@ static void sbus_dma_enable(struct cs4231_dma_control *dma_cont, int on)
 
        spin_lock_irqsave(&base->lock, flags);
        if (!on) {
-               if (base->dir == APC_PLAY) { 
-                       sbus_writel(0, base->regs + base->dir + APCNVA); 
-                       sbus_writel(1, base->regs + base->dir + APCC); 
-               }
-               else
-               {
-                       sbus_writel(0, base->regs + base->dir + APCNC); 
-                       sbus_writel(0, base->regs + base->dir + APCVA); 
-               } 
+               sbus_writel(0, base->regs + base->dir + APCNC);
+               sbus_writel(0, base->regs + base->dir + APCNVA);
+               sbus_writel(0, base->regs + base->dir + APCC);
+               sbus_writel(0, base->regs + base->dir + APCVA);
+
+               /* ACK any APC interrupts. */
+               csr = sbus_readl(base->regs + APCCSR);
+               sbus_writel(csr, base->regs + APCCSR);
        } 
-       udelay(600); 
+       udelay(1000);
        csr = sbus_readl(base->regs + APCCSR);
        shift = 0;
        if ( base->dir == APC_PLAY )
index 3d7f36fb4cf02b514a9599cbf8db64adb1cfa8b6..19bdcc74c96c9aa376742135c0c65de753dc62e7 100644 (file)
@@ -2471,7 +2471,13 @@ static int parse_audio_format_rates(struct snd_usb_audio *chip, struct audioform
                fp->nr_rates = nr_rates;
                fp->rate_min = fp->rate_max = combine_triple(&fmt[8]);
                for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) {
-                       unsigned int rate = fp->rate_table[r] = combine_triple(&fmt[idx]);
+                       unsigned int rate = combine_triple(&fmt[idx]);
+                       /* C-Media CM6501 mislabels its 96 kHz altsetting */
+                       if (rate == 48000 && nr_rates == 1 &&
+                           chip->usb_id == USB_ID(0x0d8c, 0x0201) &&
+                           fp->altsetting == 5 && fp->maxpacksize == 392)
+                               rate = 96000;
+                       fp->rate_table[r] = rate;
                        if (rate < fp->rate_min)
                                fp->rate_min = rate;
                        else if (rate > fp->rate_max)
@@ -3280,6 +3286,7 @@ static void snd_usb_audio_create_proc(struct snd_usb_audio *chip)
 
 static int snd_usb_audio_free(struct snd_usb_audio *chip)
 {
+       usb_chip[chip->index] = NULL;
        kfree(chip);
        return 0;
 }
@@ -3541,7 +3548,6 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
                list_for_each(p, &chip->mixer_list) {
                        snd_usb_mixer_disconnect(p);
                }
-               usb_chip[chip->index] = NULL;
                mutex_unlock(&register_mutex);
                snd_card_free_when_closed(card);
        } else {
index e74eb1bc8d8778e97eb6fd93e65c2b60568b7fe5..7b3bf3545a3bd880cf911d4c39756aac93f1586e 100644 (file)
@@ -1526,7 +1526,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi
                namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL);
                if (! namelist[i]) {
                        snd_printk(KERN_ERR "cannot malloc\n");
-                       while (--i > 0)
+                       while (i--)
                                kfree(namelist[i]);
                        kfree(namelist);
                        kfree(cval);